diff --git "a/3702.jsonl" "b/3702.jsonl"
new file mode 100644--- /dev/null
+++ "b/3702.jsonl"
@@ -0,0 +1,738 @@
+{"seq_id":"66038561","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom .models import *\nfrom .forms import *\n\n# Create your views here.\n\ndef index(request):\n return render(request, 'index.html')\n\ndef mostrar_empresas(request):\n empresas = Empresa.objects.all()\n return render(request, 'Empresas.html',\n {'empresas': empresas})\n\ndef nova_empresa(request):\n if request.method == \"POST\":\n form = EmpresaForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('mostrar_empresas')\n else:\n form = EmpresaForm()\n return render(request, 'nova_empresa.html',\n {'form': form})\n\ndef mostrar_acoes(request):\n acoes = Acao.objects.all()\n return render(request, 'Acoes.html',\n {'acoes': acoes})\n\ndef nova_acao(request):\n if request.method == \"POST\":\n form = AcaoForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('mostrar_acoes')\n else:\n form = AcaoForm()\n return render(request, 'nova_acao.html',\n {'form': form})\n\ndef mostrar_cotacoes(request):\n cotacoes = Cotacao.objects.all()\n\n return render(request, 'Cotacao.html',\n {'cotacoes': cotacoes})\n\ndef nova_cotacao(request):\n if request.method == \"POST\":\n form = CotacaoForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('mostrar_cotacoes')\n else:\n form = CotacaoForm()\n return render(request, 'nova_cotacao.html',\n {'form': form})","sub_path":"valores/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"1460074","text":"from scipy import misc\nimport numpy as np\nimport numdifftools as nd\nimport random\nimport time\n\ndef simp(x,j,max_iter,max_time): \n y=lambda x:c+np.dot(x,b)+np.dot(x,np.dot(a,np.transpose([x])))\n iteration=0\n tr=x\n start = time.time()\n while max_iter>iteration and time.time() <= start + max_time:\n grad=nd.Gradient(y)([tr])\n t1=tr-grad*beta\n print(\"Iteration \",iteration+1,\"\\n new x:\\n \",t1)\n print(\"J(x):\\n\",y(tr))\n if y(t1)<=j:\n break\n tr=t1\n iteration+=1\n return t1,y(t1)\n\ndef newton(x,j,max_iter,max_time):\n f=lambda x:c+np.dot(x,b)+np.dot(x,np.dot(a,np.transpose([x])))\n iteration=0\n start=time.time()\n while iteration .*?title=\"([\\s\\S]*?)\"[\\s\\S]*? ([\\s\\S]*?) ([\\s\\S]*?)
\\n' % \\\n mistune.escape(code)\n formatter = HtmlFormatter()\n return highlight(code, lexer, formatter)\n\n\n@impl\ndef flaskbb_load_post_markdown_class():\n return FlaskBBRenderer\n\n\n@impl\ndef flaskbb_load_nonpost_markdown_class():\n return FlaskBBRenderer\n\n\n@impl\ndef flaskbb_jinja_directives(app):\n render_classes = app.pluggy.hook.flaskbb_load_post_markdown_class(app=app)\n app.jinja_env.filters['markup'] = make_renderer(render_classes)\n\n render_classes = app.pluggy.hook.flaskbb_load_nonpost_markdown_class(\n app=app\n )\n app.jinja_env.filters['nonpost_markup'] = make_renderer(render_classes)\n\n\ndef make_renderer(classes):\n RenderCls = type('FlaskBBRenderer', tuple(classes), {})\n\n markup = mistune.Markdown(renderer=RenderCls(escape=True, hard_wrap=True))\n return lambda text: Markup(markup.render(text))\n","sub_path":"flaskbb/markup.py","file_name":"markup.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"304725252","text":"from django.contrib.auth import authenticate, login, logout\nfrom django.db import IntegrityError\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.core.paginator import Paginator\n\nimport json\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse\nfrom django.shortcuts import HttpResponse, HttpResponseRedirect, render\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom .models import User, Post, Comment, Follower, Like\n\n\n#def index(request):\n# return render(request, \"network/index.html\")\n\n\ndef index(request):\n # If no user is signed in, return to login page:\n if not request.user.is_authenticated:\n return HttpResponseRedirect(reverse(\"login\"))\n return render(request, \"network/index.html\")\n\n\ndef login_view(request):\n if request.method == \"POST\":\n\n # Attempt to sign user in\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n user = authenticate(request, username=username, password=password)\n\n # Check if authentication successful\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"network/login.html\", {\n \"message\": \"Invalid username and/or password.\"\n })\n else:\n return render(request, \"network/login.html\")\n\n\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect(reverse(\"index\"))\n\n\ndef register(request):\n if request.method == \"POST\":\n username = request.POST[\"username\"]\n email = request.POST[\"email\"]\n\n # Ensure password matches confirmation\n password = request.POST[\"password\"]\n confirmation = request.POST[\"confirmation\"]\n if password != confirmation:\n return render(request, \"network/register.html\", {\n \"message\": \"Passwords must match.\"\n })\n\n # Attempt to create new user\n try:\n user = User.objects.create_user(username, email, password)\n user.save()\n except IntegrityError:\n return render(request, \"network/register.html\", {\n \"message\": \"Username already taken.\"\n })\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"network/register.html\")\n\n@csrf_exempt\n@login_required\ndef plus_like(request):\n if request.method == 'POST':\n data = json.loads(request.body)\n #content = data.get(\"content\")\n w_post = data.get(\"w_post\")\n post = Post.objects.get(pk=w_post)\n user = request.user\n if Like.objects.filter(user=user,post=post).exists():\n Like.objects.filter(user=user, post=post).delete()\n liked = False\n else:\n like = Like(\n user=user,\n post=post\n )\n like.save()\n liked = True\n counter = Like.objects.filter(post=post).count()\n return JsonResponse({\n \"Success\": \"like added\",\n \"likecount\":str(counter),\n \"liked\":liked\n }, status=200)\n return JsonResponse({\n \"Error\": \"get methode\"\n }, status=400)\n\n ###############################\n\n@csrf_exempt\n@login_required\ndef cancel_like(request):\n if request.method == 'POST':\n data = json.loads(request.body)\n #content = data.get(\"content\")\n w_post = data.get(\"w_post\")\n post = Post.objects.get(pk=w_post)\n user = request.user\n\n like = Like(\n user=user,\n post=post\n )\n like.delete()\n counter = Like.objects.filter(post=post).count()\n return JsonResponse({\n \"Success\": \"like canceled\",\n \"likecount\":str(counter)\n }, status=200)\n return JsonResponse({\n \"Error\": \"get methode\"\n }, status=400)\n\n ###############################\n\n@csrf_exempt\n@login_required\ndef likecounter(request,id):\n post = Post.objects.get(pk=id)\n counter = Like.objects.filter(post=post).count()\n liked = False\n if Like.objects.filter(user=request.user, post=post).exists():\n liked = True\n\n return JsonResponse({\n \"likecount\":str(counter),\n \"liked\":liked\n }, status=200)\n\n@csrf_exempt\n@login_required\ndef like_button(request,id):\n #post = Post.objects.get(pk=id)\n post = Post.objects.get(pk=id)\n #counter = Like.objects.filter(post=post).count()\n like_button = Like.objects.filter(post=post).filter(user=request.user).count()\n return JsonResponse({\n \"like_button\":str(like_button)\n }, status=200)\n\n@csrf_exempt\n@login_required\ndef compose(request):\n\n # Composing a new email must be via POST\n if request.method != \"POST\":\n return JsonResponse({\"error\": \"POST request required.\"}, status=400)\n\n # Check recipient emails\n data = json.loads(request.body)\n\n content = data.get(\"content\")\n\n if content == \"\":\n return JsonResponse({\n \"error\": \"Empty post is not permitted.\"\n }, status=400)\n\n\n creator = User.objects.get(username=request.user.username)\n post = Post(\n creator=creator,\n content=content\n )\n\n post.save()\n\n return JsonResponse({\"message\": \"Post sent successfully.\"}, status=201)\n\n ######################################################################\n\ndef all_posts(request):\n post_list = Post.objects.all()\n post_list = post_list.order_by(\"-time_of_creation\").all()\n paginator = Paginator(post_list, 10) # Show 10 contacts per page.\n page_number = request.GET.get('page', 1)\n page_obj = paginator.get_page(page_number)\n return render(request, 'network/all_posts.html', {'page_obj': page_obj})\n\n#########################################################################3\n\ndef comment(request, post_id):\n post = Post.objects.get(id=post_id)\n #comments_0 = Comment.objects.all()\n comments = Comment.objects.all().filter(item_id=post.id)\n return render(request, \"network/comment.html\", {\n \"post\": post,\n \"comments\": comments\n })\n\ndef edit(request, post_id):\n post = Post.objects.get(id=post_id)\n return render(request, \"network/edit.html\", {\n \"post\": post\n })\n\n@csrf_exempt\n@login_required\ndef edit_2(request, post_id):\n # Query for requested post\n try:\n post = Post.objects.get(pk=post_id)\n except Post.DoesNotExist:\n return JsonResponse({\"error\": \"Post not found.\"}, status=404)\n\n # Return post contents\n if request.method == \"GET\":\n return JsonResponse(post.serialize())\n\n # Update whether email is read or should be archived\n elif request.method == \"PUT\":\n data = json.loads(request.body.decode(\"utf-8\"))\n print(post.content)\n post.content = data[\"content\"]\n post.save()\n return JsonResponse({\n \"Success\": \"Update\"\n }, status=200)\n\n # Post must be via GET or PUT\n else:\n return JsonResponse({\n \"error\": \"GET or PUT request required.\"\n }, status=400)\n\n\ndef comment_add(request, post_id):\n username = request.user.username\n user = User.objects.get(username=username)\n post = Post.objects.get(id=post_id)\n if request.method == \"POST\":\n comment = request.POST[\"comment\"]\n comments = Comment.objects.create(user=user, post=post, comment=comment, item_id=post.id)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return HttpResponse(\"Invalid Input\")\n\n\ndef profile(request, creator_id):\n username = request.user.username\n user = User.objects.get(username=username)\n user2 = User.objects.get(id=creator_id)\n #Airport.objects.filter(city=\"New York\")\n post_list = Post.objects.filter(creator=user2)\n post_list = post_list.order_by(\"-time_of_creation\").all()\n paginator = Paginator(post_list, 10) # Show 10 contacts per page.\n page_number = request.GET.get('page',)\n page_obj = paginator.get_page(page_number)\n wells = Follower.objects.all().filter(user=user)\n y_cont = Follower.objects.all().filter(user=user).count()\n w_cont = Follower.objects.all().filter(user=user2).count()\n cont = 0\n for well in wells:\n if well.following == user2:\n cont = cont + 1\n\n x_cont = user.followed.all().count()\n z_cont = user2.followed.all().count()\n #for well in wells:\n # if well.following == user2:\n # w_cont = W_cont + 1\n return render(request, 'network/profile.html', {'page_obj': page_obj, 'user': user, 'user2': user2, \"w_cont\": int(w_cont), \"x_cont\": int(x_cont), \"z_cont\": int(z_cont), 'y_cont': int(y_cont), 'cont': int(cont)})\n\n\ndef follower_add(request, following_id):\n following = User.objects.get(id=following_id)\n username = request.user.username\n user = User.objects.get(username=username)\n #follower_items = Follower.objects.all()\n item_count = Follower.objects.all().filter(user=user, following=following).count()\n if item_count == 1:\n return HttpResponse(\"Following\")\n follower_item = Follower.objects.create(user=user, following=following)\n #my_follower = Follower.objects.all().get(user=user)\n return HttpResponseRedirect(reverse(\"index\"))\n #return render(request, \"network/follower_add.html\", {\n # \"message\": \"from now on, following this user, as long as you don't decide to stop following it\"\n # \"my_follower\": my_follower\n #})\n\n\ndef follower_index(request):\n username = request.user.username\n user = User.objects.get(username=username)\n wells = Follower.objects.all().filter(user=user)\n post_list = Post.objects.all()\n w_list = []\n for well in wells:\n follower = well.following\n w_list.append(follower)\n post_list = Post.objects.filter(creator__in=w_list)\n #post_list = Post.objects.filter(creator=user2)\n #Blog.objects.filter(pk__in=[1, 4, 7])\n post_list = post_list.order_by(\"-time_of_creation\").all()\n paginator = Paginator(post_list, 10) # Show 10 contacts per page.\n page_number = request.GET.get('page',)\n page_obj = paginator.get_page(page_number)\n return render(request, 'network/follower_index.html', {'page_obj': page_obj, 'user': user})\n\n\n\ndef follower_del(request, following_id):\n following = User.objects.get(id=following_id)\n username = request.user.username\n user = User.objects.get(username=username)\n well = Follower.objects.get(user=user, following=following)\n well.delete()\n return HttpResponseRedirect(reverse(\"index\"))\n","sub_path":"network/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"111352527","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\n################################################################################\n##\n## inorder+Preorder/Postorder/Levelorder can define a binary Tree\n##\n################################################################################\n\n## https://www.geeksforgeeks.org/if-you-are-given-two-traversal-sequences-can-you-construct-the-binary-tree/\n\nclass Solution:\n def findDuplicateSubtrees(self, root: Optional[TreeNode]) -> List[Optional[TreeNode]]:\n def preorder_encode_tree(root,preorder_dic):\n if not root:\n return tuple([-1]) # this is important, return an empty tuple will make trouble\n preorder_dic[root]= tuple([root.val])+preorder_encode_tree(root.left,preorder_dic)+preorder_encode_tree(root.right,preorder_dic)\n return preorder_dic[root] \n \n def inorder_encode_tree(root,inorder_dic):\n if not root:\n return tuple([-1])\n inorder_dic[root]= inorder_encode_tree(root.left,inorder_dic)+tuple([root.val])+inorder_encode_tree(root.right,inorder_dic)\n return inorder_dic[root]\n preorder_dic={}\n inorder_dic={}\n preorder_encode_tree(root,preorder_dic)\n inorder_encode_tree(root,inorder_dic)\n coded_node={}\n out = set()\n for node in preorder_dic:\n if (preorder_dic[node],inorder_dic[node]) not in coded_node:\n coded_node[(preorder_dic[node],inorder_dic[node])]=node\n else:\n out.add(coded_node[(preorder_dic[node],inorder_dic[node])])\n return out\n \n","sub_path":"Problem652_Find_Duplicate_Subtrees.py","file_name":"Problem652_Find_Duplicate_Subtrees.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"449655206","text":"from pssplot.pssfigure import Pssfigure\nfrom pssplot.pssplot import test_pssplot\nfrom collections import defaultdict\n\nFILE_PATH = 'test.py'\nfigure = Pssfigure(FILE_PATH)\n\nprint(figure._plots)\n\nprint(figure._plots['test'])\nfigure._plots['test'] = 'hello'\nprint(figure._plots['test'])\n\n# Create test plots\nplt1 = test_pssplot('plt1')\nplt2 = test_pssplot('plt2')\nplt3 = test_pssplot('plt3')\nplt4 = test_pssplot('plt3')\n# plt2 = dict(file_path='test2.py')\n# plt3 = dict(file_path='test3.py', other_var=5)\n# plt4 = dict(file_path='test3.py', other_var=4)\n\nplots = [plt1, plt2, plt3, plt4]\n\ntest_dict = defaultdict(list)\n\nfor plot in plots:\n value = plot.name\n test_dict[value].append(plot)\n\nfor plot, figure in test_dict.items():\n print(plot)\n for plt in figure:\n plt.test_plot()","sub_path":"test_pssfigure.py","file_name":"test_pssfigure.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"526274449","text":"'''\nrefer : https://github.com/mbr/tinyrpc/tree/master/examples\nimport gevent\nimport tinyrpc\nimport gevent-websocket\n'''\n\nfrom tinyrpc.protocols.jsonrpc import JSONRPCProtocol\nfrom tinyrpc.transports.http import HttpPostClientTransport\nfrom tinyrpc import RPCClient\n\nrpc_client = RPCClient(\n JSONRPCProtocol(),\n HttpPostClientTransport('http://127.0.0.1:5000/')\n)\n\nremote_server = rpc_client.get_proxy()\n\n# call a method called 'reverse_string' with a single string argument\nresult = remote_server.reverse_string('Hello, World!')\n\nprint(\"Server answered: \", result)","sub_path":"ZMQ/zmq_tinyrpc_http_client.py","file_name":"zmq_tinyrpc_http_client.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"312236111","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# script for auditing and formatting postal codes in an OSM file\nimport re\n\nnonalpha_re = re.compile('\\W')\n\n# returns the set of postal codes used in an osm file\ndef audit_postal_code(osmfile):\n osm_file = open(osmfile, \"r\")\n postal_codes = set()\n for event, elem in ET.iterparse(osm_file, events=(\"start\",)):\n\n if elem.tag == \"node\" or elem.tag == \"way\":\n for tag in elem.iter(\"tag\"):\n if tag.attrib['k'] == \"addr:postcode\":\n postal_codes.add(tag.attrib['v'])\n\n osm_file.close()\n return postal_codes\n\n# takes a postal code and returns it in the proper format or returns None if given an invalid postal code\ndef format_postal_code(code):\n # strip any BC prefix\n if code[0:2].upper() == 'BC':\n code = code[2:]\n # strip all non alphanumeric characters\n code = re.sub(nonalpha_re, \"\", code)\n # insert a space to the middle of the code\n code = code[0:3] + ' ' + code[3:]\n return code.upper()[0:7] if len(code) > 6 else None","sub_path":"Cleaning Vancouver OSM Data/audit_postal_codes.py","file_name":"audit_postal_codes.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"511823074","text":"# 組み込みのClassifierやTrainerを使わず,単純パーセプトロンを最適化する\nimport time\nimport sys\n\nimport numpy as np\n\nimport chainer\nimport chainer.links as L\nimport chainer.functions as F\nfrom chainer import report, computational_graph\nfrom chainer import optimizers, serializers\nfrom chainer import Chain\n\nfrom net import SimplePerceptron\nfrom net import Classifier\n\nbatchsize = 100\nn_epoch = 100\n\n# training data size\nN = int(input())\n\n# データセットを標準入力から読み込む\ntrain_data_raw = []\ntrain_target_raw = []\nfor i in range(0, N):\n i0, i1, t = input().split()\n train_data_raw.append([i0, i1])\n train_target_raw.append(t)\n# NumPy配列に変換\ntrain_data = np.array(train_data_raw, dtype=\"float32\")\ntrain_target = np.array(train_target_raw, dtype=\"int32\")\n\n# モデルインスタンスの作成\nmodel = Classifier(SimplePerceptron())\n\n# オプティマイザの初期化\noptimizer = optimizers.Adam()\noptimizer.setup(model)\n\n# トレーニングループ\nfor epoch in range(0, n_epoch):\n print ('epoch', epoch+1)\n\n # ランダムに訓練データを並べ替える\n perm = np.random.permutation(N)\n sum_accuracy = 0\n sum_loss = 0\n\n start = time.time()\n for i in range(0, N, batchsize):\n # データを取り出し\n x = chainer.Variable(np.asarray(train_data[perm[i:i + batchsize]]))\n t = chainer.Variable(np.asarray(train_target[perm[i:i + batchsize]]))\n\n # オプティマイズ\n optimizer.update(model, x, t)\n\n # グラフ出力(一回だけ)\n if epoch == 0 and i == 0:\n with open('graph.dot', 'w') as o:\n variable_style = {'shape': 'octagon', 'fillcolor': '#E0E0E0',\n 'style': 'filled'}\n function_style = {'shape': 'record', 'fillcolor': '#6495ED',\n 'style': 'filled'}\n g = computational_graph.build_computational_graph(\n (model.loss, ),\n variable_style=variable_style,\n function_style=function_style)\n o.write(g.dump())\n print('graph generated')\n\n sum_loss += float(model.loss.data) * len(t.data)\n sum_accuracy += float(model.accuracy.data) * len(t.data)\n end = time.time()\n elapsed_time = end - start\n throughput = N / elapsed_time\n\n print('train mean loss={}, accuracy={}, throughput={} images/sec'.format(\n sum_loss / N, sum_accuracy / N, throughput))\n\nprint('save the model') \nserializers.save_npz('linear.model', model)\n\n","sub_path":"simple-perceptron/train_linear.py","file_name":"train_linear.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"232489243","text":"__author__ = 'Tianlu.Shi'\n\nclass RICs:\n\tdef __init__(self):\n\t\tself.Template = 0\n\t\tself.PRC ='Not Allow'\n\t\t# Double price information\n\t\t# Format:\n\t\t# [SRCRowNum,SRCRowColumn,LayoutNum,FormatColumn]\n\t\tself.DPrice = []\n\t\t# Single price information\n\t\t# Format:\n\t\t# [SRCRowNum,SRCRowColumn,LayoutNum,FormatColumn]\n\t\t# If blank then insert None\n\t\tself.SingleFid = []\n\t\t# MarketRule information:\n\t\t# Format:\n\t\t# [StrategyIDColumn,SubRuleName,SubRuleColumn,CalendarColumn]\n\t\tself.MarketRule = []\n\t\t# Dynamic FidList:\n\t\t# Format:\n\t\t# [DoublePrice,Single1,Single2,...]\n\t\t# Doesn't filter out the same name FID or FIDs not in the template.\n\t\tself.FidList = []\n\t\t# StaticFids End Column\n\t\tself.StaticEnd = 0\n\t\t# PriceFid Dict:\n\t\t# key: RowNum\n\t\t# Value: list of Price FID index in SingleFID\n\t\t# E.g: {10:[0,3], 11:[1,4]}\n\t\tself.PriceFid = {}\n\t\t# Price Fid Condition String\n\t\tself.PriceString = {}\n\n","sub_path":"RICs.py","file_name":"RICs.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"234583192","text":"import json\nimport logging\n\nfrom django.http import JsonResponse\nfrom django.views.decorators.http import require_http_methods\n\nfrom ..models import Film\n\n# Get an instance of a logger\nlogger = logging.getLogger(\"django\")\n\n\n@require_http_methods([\"POST\"])\ndef datatable_search(request):\n \"\"\" Search films from db \"\"\"\n\n args = json.loads(\n request.body) if request.method == 'POST' and request.body else {}\n logger.debug(\"Input args: %s\", args)\n\n data = Film.datatable_search(args)\n\n films = []\n for film in data['films']:\n film_dict = film.row2dict()\n film_dict['language'] = film.language.row2dict()\n film_dict['categories'] = [category.row2dict()\n for category in film.categories.all()]\n film_dict['actors'] = [actor.row2dict()\n for actor in film.actors.all()]\n films.append(film_dict)\n\n response = {\n 'fetch_id': args.get('fetch_id'),\n 'records_total': data['records_total'],\n 'records_filtered': data['records_filtered'],\n 'data': films,\n }\n\n return JsonResponse(response, json_dumps_params={'indent': 2})\n","sub_path":"api/controllers/film.py","file_name":"film.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"89226720","text":"import discord\nfrom discord.ext import commands\n\nclient = discord.Client()\ntoken = open(\"token.txt\", \"r\").read()\n\n@client.event\nasync def on_ready():\n print(f\"We are live as {client.user}\")\n\n@client.event\nasync def on_message(msg):\n channelName = str(msg.channel)\n\n if channelName != \"commands\" or channelName != \"bot-construction\":\n return\n\n cmd = msg.content.lower()\n\n switchcase = {\n\n \"twitch()\": await msg.channel.send(\"Go Follow Melkeydev over at https://www.twitch.tv/melkeydev\"),\n\n \"test()\": await msg.channel.send(\"Test worked you didnt break it yet\"),\n\n \"schedule()\": await msg.channel.send(\"Melkey streams start on Mondays, Wednesdays, and Fridays at 9PM EST\"),\n\n \"project()\": await msg.channel.send(\"Melkey is working on a NBA app written in react to search, and compare player stats!\"),\n\n \"pow()\": await msg.channel.send(\"The pow is a sacred technique practice by the ancient tribes of Konoha.\"),\n\n \"crash()\": await msg.channel.send(\"Nice try I am impossible to crash\"),\n\n \"dot()\": await msg.channel.send(\"Check out my dotfiles at https://github.com/Amokstakov/NvimConfig\")\n\n }\n return switchcase.get(cmd, None)\nif __name__ == \"__main__\":\n client.run(token)\nelse:\n print('Do not import.')\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"70712837","text":"import copy\n\nclass Action:\n def __init__(self, pos, text, from_version, to_version):\n self.pos = pos\n self.text = text\n self.from_version = from_version\n self.to_version = to_version\n\n @staticmethod\n def _insert(text, text_diff, pos):\n if pos <= len(text):\n return text[0:pos] + str(text_diff) + text[pos:]\n raise ValueError\n\n @staticmethod\n def _replace(text, text_diff, pos):\n replace_length = len(str(text_diff))\n return text[:pos] + str(text_diff) + text[pos+replace_length:]\n\n\nclass TextHistory:\n DEFAULT_VERSION = 0\n BUFFER = dict()\n act_buff = []\n\n def __init__(self, text=None, version=0):\n self._text = text or ''\n self._version = version\n self.BUFFER[self._version] = self._text\n\n @property\n def text(self):\n return self._text\n\n @property\n def version(self):\n return self._version\n\n def _writing_buffer(self, diff, pos, method):\n self.BUFFER[self._version] = {\n 'text': self.text,\n 'diff': diff,\n 'pos': pos,\n 'method': method\n }\n return self.BUFFER\n\n def _write_act_buff(self, act):\n self.act_buff.append(act)\n \n def insert(self, diff_text, pos=None):\n if pos is None:\n pos = len(self._text)\n if pos > len(self._text) or pos < 0:\n raise ValueError\n action = InsertAction(pos=pos, text=diff_text, from_version=self.version, to_version=self.update_version())\n self.action(action)\n self._writing_buffer(diff_text, pos, 'insert')\n return self._version\n\n def replace(self, diff_text, pos=None):\n if pos is None:\n pos = len(self._text)\n if pos > len(self._text) or pos < 0:\n raise ValueError\n action = ReplaceAction(pos=pos, text=diff_text, from_version=self.version, to_version=self.update_version())\n self.action(action)\n self._writing_buffer(diff_text, pos, 'replace')\n return self._version\n\n def delete(self, pos=None, length=0):\n if pos is None:\n pos = len(self._text)\n print(f'pos {pos}, length {length}')\n if pos > len(self._text) or (pos + length) > len(self._text) or pos < 0:\n raise ValueError\n action = DeleteAction(pos=pos, length=length, from_version=self.version, to_version=self.update_version())\n self.action(action)\n self._writing_buffer(pos=pos, diff=length, method='delete')\n return self._version\n\n def update_version(self, to_version=0):\n to_version = to_version or self._version\n if to_version > self._version:\n return to_version\n elif to_version == self._version:\n return to_version + 1\n else:\n raise ValueError\n return self._version\n\n def action(self, act):\n if act.to_version <= act.from_version:\n raise ValueError\n\n self._text = act.apply(self.text)\n self._write_act_buff(act)\n self._version = self.update_version(act.to_version)\n return self._version\n\n def optimization_act_buf(self):\n print(f'before optimization:\\n{self.act_buff}')\n # Оптимизация работает только для объектов InsertAction and ReplaceAction\n # Работает так: если есть два однотипных экшна подряд, к примеру:\n # h.insert('a')\n # h.insert('bc')\n # то их можно представить одним h.insert('abc')\n i = 0\n g = 0\n action_list = copy.copy(self.act_buff)\n for i in range(0, len(self.act_buff)):\n if i < len(self.act_buff):\n act_type = self.act_buff[i].__class__\n for g in range(i + 1, len(self.act_buff)):\n if g < len(self.act_buff):\n if isinstance(self.act_buff[g], act_type) and \\\n (self.act_buff[g].pos + len(self.act_buff[g].text)) == \\\n (self.act_buff[i].pos + len(self.act_buff[i].text) + \\\n len(self.act_buff[g].text)):\n action_list[i].text = self.act_buff[i].text + self.act_buff[g].text\n action_list[i].to_version = self.act_buff[g].to_version\n action_list.pop(g)\n else:\n i = g\n break\n else:\n break\n g += 1\n i += 1\n print(f'after optimization:\\n{action_list}')\n return action_list\n\n # Оптимизации________________________________________________\n\n def get_actions(self, from_version=0, to_version=None):\n if to_version is None:\n to_version = len(self.act_buff)\n if not (0<= from_version <= to_version <= len(self.act_buff)):\n raise ValueError\n self.act_buff = self.optimization_act_buf()\n return self.act_buff[from_version:to_version]\n\nclass InsertAction(Action):\n def apply(self, text):\n return self._insert(text, self.text, self.pos)\n\n\nclass ReplaceAction(Action):\n def apply(self, text):\n return self._replace(text, self.text, self.pos)\n\n\nclass DeleteAction(Action):\n def __init__(self, pos, length, from_version, to_version):\n self.pos = pos\n self.length = length\n self.from_version = from_version\n self.to_version = to_version\n\n @staticmethod\n def _delete(text, length, pos):\n return text[:pos] + text[pos + length:]\n\n def apply(self, text):\n return self._delete(text, self.length, self.pos)\n\n\ndef main():\n h = TextHistory()\n h.insert('a')\n h.insert('bc')\n h.replace('B', pos=1)\n h.delete(pos=0, length=1)\n\n actions = h.get_actions(1)\n print(actions)\n print(len(actions))\n print('_____________')\n\n insert, replace, delete = actions\n # insert\n print(insert.from_version)\n print(insert.to_version)\n print(insert.text)\n print(insert.pos)\n print('_____________')\n # replace\n print(replace.from_version)\n print(replace.to_version)\n print(replace.text)\n print(replace.pos)\n print('_____________')\n # delete\n print(delete.from_version)\n print(delete.to_version)\n print(delete.pos)\n print(delete.length)\n\n\n\n\n\nif __name__==\"__main__\":\n main()","sub_path":"009_text_history/text_history.py","file_name":"text_history.py","file_ext":"py","file_size_in_byte":6504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"584897095","text":"import datetime, json, logging, uuid\nfrom . import LANGUAGES, RE_CHALLENGE_ID, RE_USER_ID\nfrom .StorageHelper import StorageKeys, get_redis, wait_for_redis\n\n# -------------------------------------------------------------------\n\n@wait_for_redis\ndef CreateSubmission(lang, user_id, challenge_id, code, simulation=None):\n\t# Language\n\tlang = lang.strip()\n\tif not lang in LANGUAGES:\n\t\tlogging.error('Language is invalid')\n\t\treturn None\n\n\t# User\n\tuser_id = user_id.strip()\n\tif not RE_USER_ID.match(user_id):\n\t\tlogging.error('User is invalid')\n\t\treturn None\n\tuser = get_redis().hget(StorageKeys.Users, user_id)\n\tif not user:\n\t\tlogging.error('User is Unknown')\n\t\treturn None\n\n\t# Challenge\n\tchallenge_id = challenge_id.strip()\n\tif not RE_CHALLENGE_ID.match(challenge_id):\n\t\tlogging.error('Challenge is invalid')\n\t\treturn None\n\tfrom .ChallengeHelper import LoadChallenge\n\tif not LoadChallenge(challenge_id):\n\t\tlogging.error('Challenge is unknown')\n\t\treturn None\n\n\t# Code\n\tcode = code.replace('\\r', '')\n\tif not code:\n\t\tlogging.error('Code is invalid')\n\t\treturn None\n\n\t# Execute\n\tsubmission_id = str(uuid.uuid4())\n\tsubmission = {\n\t\t'challenge_id': challenge_id,\n\t\t'code': code,\n\t\t'id': submission_id,\n\t\t'lang': lang,\n\t\t'stamp': datetime.datetime.utcnow().timestamp(),\n\t\t'user_id': user_id\n\t}\n\tif simulation != None:\n\t\tif not isinstance(simulation, int):\n\t\t\tlogging.error('Simulation is invalid: %s', simulation)\n\t\t\treturn None\n\t\tsubmission['simulation'] = simulation\n\n\tpipe = get_redis().pipeline()\n\tpipe.hset(StorageKeys.Submissions, submission_id, json.dumps(submission))\n\tpipe.lpush(StorageKeys.SubmissionsQueue, submission_id)\n\tpipe.execute()\n\n\treturn submission\n\n# -------------------------------------------------------------------\n\n@wait_for_redis\ndef LoadSubmissions():\n\treturn [ json.loads(submission) for submission_id, submission in get_redis().hgetall(StorageKeys.Submissions).items() ]\n\n# -------------------------------------------------------------------\n\n@wait_for_redis\ndef LoadSubmission(submission_id):\n\tsubmission = get_redis().hget(StorageKeys.Submissions, submission_id)\n\treturn json.loads(submission) if submission else None\n\n# -------------------------------------------------------------------\n\n@wait_for_redis\ndef WaitSubmission():\n\tlogging.info('Wait submission')\n\tsubmission_id = get_redis().brpoplpush(StorageKeys.SubmissionsQueue, StorageKeys.SubmissionsQueueWIP, 0)\n\tlogging.info('Load submission %s', submission_id)\n\treturn json.loads(get_redis().hget(StorageKeys.Submissions, submission_id))\n\n# -------------------------------------------------------------------\n","sub_path":"app/helpers/SubmissionHelper.py","file_name":"SubmissionHelper.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"227816894","text":"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport re\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef add_model_freezing_configs(_C):\n _C.MODEL.FROZEN_LAYER_REG_EXP = []\n\n\ndef set_requires_grad(model, reg_exps, value):\n total_num_parameters = 0\n unmatched_parameters = []\n unmatched_parameter_names = []\n matched_parameters = []\n matched_parameter_names = []\n for name, parameter in model.named_parameters():\n total_num_parameters += 1\n matched = False\n for frozen_layers_regex in reg_exps:\n if re.match(frozen_layers_regex, name):\n matched = True\n parameter.requires_grad = value\n matched_parameter_names.append(name)\n matched_parameters.append(parameter)\n break\n if not matched:\n unmatched_parameter_names.append(name)\n unmatched_parameters.append(parameter)\n logger.info(\"Matched layers (require_grad={}): {}\".format(\n value, matched_parameter_names))\n logger.info(\"Unmatched layers: {}\".format(unmatched_parameter_names))\n return matched_parameter_names, unmatched_parameter_names\n","sub_path":"d2go/modeling/model_freezing_utils.py","file_name":"model_freezing_utils.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"375750666","text":"\n\"\"\"\nO(n^2) implementation of suffix tree construction\n\"\"\"\nimport collections\n\nclass SuffixTreeNode:\n \"\"\"\n Node for the suffix tree.\n Instead of storing substrings in edges, store them\n in nodes instead.\n\n :param length: The length of the text in the node\n :param end: The ending index of the text in the node\n :param children: List of SuffixTreeNode children\n :param leaf: The suffix for which this is a leaf of. -1\n if not a leaf\n \"\"\"\n\n def __init__(self, length=0, end=0, children=[], leaf = -1):\n self.length = length\n self.end = end\n self.children = [x for x in children]\n self.leaf = leaf\n \n def add_child(self, child):\n \"\"\"\n Add a child to this node\n\n :param child: The child node to add\n \"\"\"\n self.children.append(child)\n\n def get_text(self, text):\n \"\"\"\n Return the text stored in the node\n\n :param text: The full text that end and length refer to\n :return: The substring in the node\n \"\"\"\n return text[self.end - self.length : self.end]\n\nclass SuffixTree:\n \"\"\"\n Create a Suffix Tree from the given text\n\n :param text: Text to create the suffix tree from\n \"\"\"\n\n def __init__(self, text, suffix=True):\n self.text = text\n self.__suffix = suffix\n self.__create_tree(text)\n\n def search(self, pattern):\n \"\"\"\n Search for the pattern in the suffix tree\n\n :param pattern: The pattern to search for\n :return: [start indices]\n \"\"\"\n node = self.root\n # Start index of unfound items in pattern\n index = 0\n while index < len(pattern):\n valid = False\n for child in node.children:\n if child.leaf == -1:\n start = child.end - child.length\n length = min(child.length, len(pattern) - index)\n child_text = self.text[start : start+length]\n pattern_text = pattern[index : index+length]\n if child_text == pattern_text:\n valid = True\n index = index + length\n node = child\n break\n if valid == False:\n return []\n return self.__leaves(node)\n\n def __leaves(self, node):\n \"\"\"\n Get the leaf nodes of the current node\n\n :param node: The node to get the leaves of\n :return: [leaf node indices]\n \"\"\"\n leaves = []\n queue = collections.deque()\n queue.append(node)\n while len(queue) > 0:\n node = queue.pop()\n if node.leaf != -1:\n leaves.append(node.leaf)\n for child in node.children:\n queue.append(child)\n leaves.sort()\n return leaves\n\n def __create_tree(self, text):\n \"\"\"\n Create the suffix tree\n\n :param text: Text to create the suffix tree from\n \"\"\"\n keyword = self.__create_keyword(text)\n if self.__suffix:\n self.root = self.__condense_keyword(keyword)\n else:\n self.root = keyword\n\n def __create_keyword(self, text):\n \"\"\"\n Create the keyword tree\n\n :param text: Text to create the keyword tree from\n :return: The root of the created keyword tree\n \"\"\"\n # Create the keyword tree for all suffixes\n temp_root = SuffixTreeNode()\n for i in range(len(text)):\n self.__add_to_keyword(temp_root, i)\n return temp_root\n\n def __add_to_keyword(self, root, index):\n \"\"\"\n Add the given suffix to the tree\n\n :param root: The root of the tree\n :param index: The starting index of the suffix to add\n \"\"\"\n node = root\n for i in range(index, len(self.text)):\n cont = True\n for child in node.children:\n if child.leaf == -1:\n if child.get_text(self.text) == self.text[i]:\n node = child\n cont = False\n break\n if cont:\n new_node = SuffixTreeNode(1, i+1)\n node.add_child(new_node)\n node = new_node\n new_node = SuffixTreeNode(leaf=index)\n node.add_child(new_node)\n\n def __condense_keyword(self, keyword):\n \"\"\"\n Condense the keyword tree into a suffix tree\n\n :param keyword: The keyword tree \n \"\"\"\n queue = collections.deque()\n for child in keyword.children:\n queue.append(child)\n while len(queue) > 0:\n node = queue.popleft()\n if node.leaf == -1:\n while len(node.children) == 1:\n child = node.children[0]\n if child.leaf == -1:\n node.length = node.length + child.length\n node.end = child.end\n node.children = child.children\n else:\n break\n for child in node.children:\n queue.append(child)\n return keyword\n\n","sub_path":"suffix_tree.py","file_name":"suffix_tree.py","file_ext":"py","file_size_in_byte":5196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"619464472","text":"#!/usr/bin/env python3\n\n# Copyright © 2021 Helmholtz Centre Potsdam GFZ German Research Centre for Geosciences, Potsdam, Germany\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n\n\"\"\"\nUnit tests for modelprop.\n\"\"\"\n\nimport unittest\n\nimport modelprop\n\nfrom test_cmd import *\n\n\nclass TestAll(unittest.TestCase):\n \"\"\"\n Unit test class.\n \"\"\"\n\n def test_get_supported_schemas(self):\n \"\"\"\n Extracts which schemas are supported.\n :return: None\n \"\"\"\n supported_schemas = modelprop.get_supported_schemas()\n\n assumed_schemas = [\n \"HAZUS_v1.0\",\n \"SARA_v1.0\",\n \"SUPPASRI2013_v2.0\",\n \"Mavrouli_et_al_2014\",\n \"Torres_Corredor_et_al_2017\",\n \"Medina_2019\",\n ]\n\n self.assertTrue(supported_schemas)\n\n self.assertEqual(len(assumed_schemas), len(supported_schemas))\n\n for schema in assumed_schemas:\n self.assertIn(schema, supported_schemas)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test_all.py","file_name":"test_all.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"293370860","text":"\n# ### 수작업으로 만들었음...\n# decBook = {\n# \"2\": \"H\",\n# \"3\": \"e\",\n# \"1\": \"l\",\n# \"4\": \"o\",\n# \"9\": \"W\"\n# \"8\": \"r\"\n# \"7\": \"d\"\n# }\n\n\n# 자동으로 decBook을 만드는 함수...\ndef makeDecCodeBook(encBook):\n decBook = {}\n for k in encBook:\n val = encBook[k]\n decBook[val] = k\n return decBook\n\n## encryption 과정\n## input : msg, encBook\n## output : output\n\ndef encWithCodeBook(msg, encBook):\n output = \"\"\n for m in msg:\n if m in encBook: # 만약 msg 의 값 한개 씩 읽어와서 encBook 에 있으면 그것을 변환해서 output 에 붙여준다.\n output += encBook[m]\n else:\n output += m\n return output\n\ndef encWithCodeBook2(msg, encBook):\n for m in msg:\n if m in encBook: # 다른 방식의 인코딩\n msg = msg.replace(m, encBook[m])\n else:\n msg += m\n return msg\n\n\n\n## decryption 과정\n# input : output, decBook\n# output : PlainText\n\ndef decWithCodeBook(output, decBook):\n PlainText = \"\"\n for m in output:\n if m in decBook:\n PlainText += decBook[m]\n else:\n PlainText += m\n return PlainText\n\ndef decWithCodeBook2(output, decBook):\n for m in output:\n if m in decBook:\n output = output.replace(m, decBook[m])\n else:\n output += m\n return output\n\n\ndef encdecWithCodeBook(input, codeBook): ## encode decode 과정을 한번에 합친 함수\n for m in input:\n if m in codeBook:\n input = input.replace(m, codeBook[m])\n else:\n input += m\n return input\n\n\n# main...\n\nencBook = {\n \"H\": \"2\",\n \"e\": \"3\",\n \"l\": \"1\",\n \"o\": \"4\",\n \"W\": \"9\",\n \"r\": \"8\",\n \"d\": \"7\"\n}\n\ndecBook = makeDecCodeBook(encBook)\nprint(decBook)\n\nmsg = \"Hello World\"\n\ncipher = encWithCodeBook(msg, encBook)\nprint(cipher)\n\ncipher2 = encWithCodeBook2(msg, encBook)\nprint(cipher2)\n\nplaintext = decWithCodeBook(cipher, decBook)\nprint(plaintext)\n\nplaintext2 = decWithCodeBook2(cipher2, decBook)\nprint(plaintext2)\n\n\ncipher3 = encdecWithCodeBook(msg, encBook)\nprint(cipher3)\n\nplaintext3 = encdecWithCodeBook(cipher3, decBook)\nprint(plaintext3)\n","sub_path":"SecurityPythonCode/SecuPro/CodeBookCipher.py","file_name":"CodeBookCipher.py","file_ext":"py","file_size_in_byte":2196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"359483240","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import (absolute_import, division, print_function)\n\nimport os\nimport sys\nimport warnings\n\nimport ansible.constants\nimport ansible.errors\nimport ansible.utils\nimport pytest\nfrom pprint import pprint\nfrom ibm_zos_ims.tests.functional.module_utils.ims_test_gen_utils import DBDInputParameters as ip\n\n__metaclass__ = type\n\nGEN_SUCCESS_MSG = 'DBDGEN execution was successful.'\nBATCH_SUCCESS_RETURN_TEXT = 'success'\n\n\ndef test_ims_dbd_gen_sample(ansible_zos_module):\n hosts = ansible_zos_module\n source = ip.SOURCE\n\n dest = ip.DESTINATION\n sys_lib = ip.SYSLIB\n results = hosts.all.ims_dbd_gen(src=source, location=\"DATA_SET\", replace=True, member_list=['DEDBJN21', 'DEDBJN21'], dest=dest, sys_lib=sys_lib)\n\n for result in results.contacted.values():\n pprint(result)\n assert result['changed']\n # Check return code for array of output for each source\n assert result['rc'] == 0\n\n # Check for success message (if we remove return codes)\n assert result['msg'] == GEN_SUCCESS_MSG\n\n\ndef test_ims_dbd_gen_sample_batch(ansible_zos_module):\n hosts = ansible_zos_module\n source = ip.SOURCE\n dest = ip.DESTINATION\n sys_lib = ip.SYSLIB\n batch_list = [{\n 'src': source,\n 'location': 'DATA_SET',\n 'replace': True,\n 'member_list': 'DEDBJN21'}]\n results = hosts.all.ims_dbd_gen(batch=batch_list, dest=dest, sys_lib=sys_lib)\n\n for result in results.contacted.values():\n pprint(result)\n assert result['changed']\n # Check return code for array of output for each source\n assert result['rc'] == 0\n # Check for success message (if we remove return codes)\n assert result['msg'] == GEN_SUCCESS_MSG\n\n for src_result in result['batch_result']:\n assert src_result['return_text'] == BATCH_SUCCESS_RETURN_TEXT\n","sub_path":"tests/functional/modules/ims_dbd_gen/test_ims_dbd_gen_sample2.py","file_name":"test_ims_dbd_gen_sample2.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"179505175","text":"#!/usr/local/bin/python3.6\n#-*-encoding:utf-8-*-\n#题目:\n#作者:luohu\n#时间:2018-09\n#目的:\nfrom tkinter import *\nfrom tkinter.messagebox import showinfo\ndef reply(name):\n\tshowinfo(title=\"回复\",message=\"Hello %s!\" %name)\n\ntop = Tk()\ntop.title(\"天龙八部\")\ntop.iconbitmap(\"haha.svg\")\n\n#Label控件:Label 控件用以显示文字和图片. Label 通常被用来展示信息, 而非与用户交互. \nLabel(top,text=\"请输出你的名字:\").pack(side=TOP)\n#Entry 是 Tkinter 用来接收字符串等输入的控件. 该控件允许用户输入一行文字.\n\nent = Entry(top)\nent.pack(side=TOP)\nbtn = Button(top,text=\"提交\",command=(lambda:reply(ent.get())))\nbtn.pack(side=LEFT)\n\ntop.mainloop()\n\n","sub_path":"python_program/01.Preview/tkinter103.py","file_name":"tkinter103.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"426878609","text":"started = False\r\nname=input(\"Enter your name: \")\r\nprint(f\"Hii {name} let's play a game\")\r\ncommand = \"\"\r\nwhile command != \"quit\":\r\n input_command= input(\"> \")\r\n if input_command.upper() == \"HELP\":\r\n print('''\r\nstart - to start the car\r\nstop - to stop the car\r\nquit - to exit\r\n ''')\r\n elif input_command.upper() == \"START\":\r\n if started :\r\n print(\"Car is alreay started ... what are doing\")\r\n else :\r\n started = True\r\n print(\"Car started... Ready to go!\")\r\n elif input_command.upper() == \"STOP\" :\r\n if not started:\r\n print(\"you already stop ... what are you doing\")\r\n else :\r\n started = False\r\n print(\"Car stopped\")\r\n\r\n\r\n elif input_command.upper() == \"QUIT\" :\r\n exit_command = input('''\r\ndo u want to quit \r\n (y)es and (N)o\r\n > ''' ).lower()\r\n if exit_command == \"y\" :\r\n command = 'quit'\r\n else :\r\n print(\"I don't understand that ...\")\r\n\r\n\r\n\r\n","sub_path":"Python_code/Game_start.py","file_name":"Game_start.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"462036079","text":"#!/c/Python35/python.exe\r\n\r\nimport sys\r\nimport os\r\nimport shutil\r\n\r\ndef main(argv):\r\n\tscript_path = str(os.path.dirname(os.path.abspath(__file__)))\r\n\r\n\tif len(argv) == 0:\r\n\t\tprint(\"Script Usage: post-build-event.py \\\"$(TargetDir)\\\"\")\r\n\telse:\r\n\t\ttarget_dir = argv[0]\r\n\t\t\r\n\t\tprint(\"Post-Buid Event script path:\", script_path)\r\n\t\tprint(\"Post-Buid Event target path:\", target_dir)\r\n\t\t\r\n\t\tif not os.path.exists(target_dir):\r\n\t\t\tprint(\"The specified target directory was not found.\")\r\n\t\t\tsys.exit(1)\r\n\t\tcopy_dependencies(script_path, target_dir)\r\n\r\ndef copy_dependencies(root_path, target_dir):\r\n\tif not root_path.endswith('\\\\'):\r\n\t\troot_path += '\\\\'\r\n\r\n\tprint(\"Copying libcurl.dll...\")\r\n\tshutil.copy2(root_path + \"..\\\\..\\\\curl\\\\lib\\\\Debug\\\\libcurl.dll\", target_dir)\r\n\tprint(\"Dependencies copied.\")\r\n\r\nif __name__ == \"__main__\":\r\n\tmain(sys.argv[1:])\r\n","sub_path":"books/cpp-tdd/TestDoubles/post-build-event.py","file_name":"post-build-event.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"68133404","text":"# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\n# ALLOWED_HOSTS must be correct in production!\n# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts\nALLOWED_HOSTS = ['*']\n\n# Databases\n#DATABASES['default']['NAME'] = 'wcs'\n#DATABASES['default']['USER'] = 'wcs'\n#DATABASES['default']['PASSWORD'] = 'wcspass'\n#DATABASES['default']['HOST'] = 'db'\n#DATABASES['default']['PORT'] = '5432'\n\n# Zone\nLANGUAGE_CODE = 'fr-fr'\nTIME_ZONE = 'Europe/Paris'\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse',\n },\n 'require_debug_true': {\n '()': 'django.utils.log.RequireDebugTrue',\n },\n },\n 'formatters': {\n 'simple': {\n 'format': '[%(asctime)s] %(name)s %(levelname)s %(message)s',\n 'datefmt': '%d/%b/%Y %H:%M:%S'\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'filters': ['require_debug_true'],\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple'\n },\n },\n 'loggers': {\n\t'':{\n 'handlers': ['console'],\n 'level': 'INFO',\n 'disabled': False\n },\n },\n}\n\n# Email configuration\n# EMAIL_SUBJECT_PREFIX = '[combo] '\n# SERVER_EMAIL = 'root@combo.example.org'\n# DEFAULT_FROM_EMAIL = 'webmaster@combo.example.org'\n\n# SMTP configuration\nEMAIL_HOST = 'smtp'\n# EMAIL_HOST_USER = ''\n# EMAIL_HOST_PASSWORD = ''\nEMAIL_PORT = 1025\n\n# HTTPS Security\nCSRF_COOKIE_SECURE = True\nSESSION_COOKIE_SECURE = True\n\n","sub_path":"wcs/wcs.settings.py","file_name":"wcs.settings.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"154851439","text":"import torch\nfrom torch import nn\n\nfrom CLS_GM.feature_extractor import FeatureExtractor\n\n\nclass Net_Class(torch.nn.Module):\n def __init__(self):\n super(Net_Class, self).__init__()\n num_class = 20\n self.feature_extractor = FeatureExtractor()\n self.classifier = nn.Sequential(\n nn.Linear(2048, 1024),\n torch.nn.Dropout(p=0.5),\n nn.ReLU(),\n nn.Linear(1024, num_class)\n )\n\n def forward(\n self,\n images,\n points,\n graphs,\n n_points,\n perm_mats,\n n_label,\n c_label,\n visualize_flag=False,\n visualization_params=None\n ):\n _, global_list = self.feature_extractor(images,\n points,\n graphs,\n n_points,\n perm_mats,\n visualize_flag,\n visualization_params)\n return [self.classifier(global_feat) for global_feat in global_list]\n","sub_path":"CLS_GM/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"536365374","text":"#! /usr/bin/python\n# -*- coding: utf8 -*-\nimport time\nimport tensorlayer as tl\nimport progressbar\nimport zipfile\nimport os\nimport urllib.request\n\npbar = None\n\ndef show_progress(block_num, block_size, total_size):\n global pbar\n if pbar is None:\n pbar = progressbar.ProgressBar(maxval=total_size)\n\n downloaded = block_num * block_size\n if downloaded < total_size:\n pbar.update(downloaded)\n else:\n pbar.finish()\n pbar = None\n\ndef download_vgg():\n url = \"https://www.dropbox.com/s/7mmianmwcj2qyl5/vgg16.npy?dl=1\"\n urllib.request.urlretrieve(url, 'vgg/vgg16.npy', show_progress)\n\ndef download_models():\n url = \"https://www.dropbox.com/s/f51f795qq7of9rt/Reproducible%20challenge.zip?dl=1\"\n urllib.request.urlretrieve(url, 'Precomputed_weights.zip', show_progress)\n print(\"Finished Download. Starting unzipping of the file:\")\n with zipfile.ZipFile(\"Precomputed_weights.zip\",\"r\") as zip_ref:\n zip_ref.extractall(\"Precomputed_weights\")\n os.remove(\"Precomputed_weights.zip\")\n print(\"Finished Unzipping. Weights of the model located in Precomputed_weights folder\")\n\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--mode', type=str, default='vgg', help='gdepth')\n args = parser.parse_args()\n\n tl.global_flag['mode'] = args.mode\n\n if tl.global_flag['mode'] == 'vgg':\n print(\"Downloading vgg-16 weights\")\n download_vgg()\n elif tl.global_flag['mode'] == 'weights':\n print(\"Downloading precomputed weights for each model\")\n download_models()\n #pass\n else:\n raise Exception(\"Unknow --mode\")\n","sub_path":"download_weights.py","file_name":"download_weights.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"545853646","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\":Mod: rules\n\n:Synopsis:\n\n:Author:\n servilla\n\n:Created:\n 6/4/18\n\"\"\"\nimport daiquiri\n\nfrom eml2_1_1.exceptions import MPLRuleError\nfrom mom.model import Node\n\n\nlogger = daiquiri.getLogger('rules: ' + __name__)\n\nREQUIRED = True\nOPTIONAL = False\nINFINITY = None\n\n\ndef access_rule(node: Node):\n rules = [\n ['allow', 'deny', 1, INFINITY]\n ]\n process_rules(rules, node)\n attributes = {\n 'id': OPTIONAL,\n 'system': OPTIONAL,\n 'scope': OPTIONAL,\n 'order': OPTIONAL,\n 'authSystem': REQUIRED\n }\n process_attributes(attributes, node)\n if 'order' in node.attributes:\n allowed = ['allowFirst', 'denyFirst']\n if node.attributes['order'] not in allowed:\n msg = '\"{0}:order\" attribute must be one of \"{1}\"'.format(node.rank, allowed)\n raise MPLRuleError(msg)\n\n\ndef additional_metadata_rule(node: Node):\n rules = [\n ['describes', 0, INFINITY],\n ['metadata', 1, 1]\n ]\n process_rules(rules, node)\n attributes = {\n 'id': OPTIONAL\n }\n process_attributes(attributes, node)\n\n\ndef allow_rule(node: Node):\n rules = [\n ['principal', 1, INFINITY],\n ['permission', 1, INFINITY]\n ]\n process_rules(rules, node)\n\n\ndef any_name_rule(node: Node):\n rules = [\n ['value', 0, INFINITY]\n ]\n process_rules(rules, node)\n attributes = {\n 'lang': OPTIONAL\n }\n process_attributes(attributes, node)\n if node.content is not None and type(node.content) is not str:\n msg = 'Node \"{0}\" content should be type string, not \"{1}\"'.format(node.rank, type(node.content))\n raise MPLRuleError(msg)\n if len(node.children) == 0 and node.content is None:\n msg = 'Node \"{0}\" content should not be empty'.format(node.rank)\n raise MPLRuleError(msg)\n\ndef dataset_rule(node: Node):\n pass\n\n\ndef deny_rule(node: Node):\n rules = [\n ['principal', 1, INFINITY],\n ['permission', 1, INFINITY]\n ]\n process_rules(rules, node)\n\n\ndef eml_rule(node: Node):\n rules = [\n ['access', 0, 1],\n ['dataset', 'citation', 'software', 'protocol', 1, 1],\n ['additionalMetadata', 0, INFINITY]\n ]\n process_rules(rules, node)\n attributes = {\n 'packageId': REQUIRED,\n 'system': REQUIRED,\n 'scope': OPTIONAL,\n 'lang': OPTIONAL\n }\n process_attributes(attributes, node)\n\n\ndef individual_name_rule(node: Node):\n rules = [\n ['salutation', 0, INFINITY],\n ['givenName', 0, INFINITY],\n ['surName', 1, 1]\n ]\n process_rules(rules, node)\n\n\ndef metadata_rule(node: Node):\n if len(node.children) != 0:\n msg = 'Node \"{0}\" should not have children'.format(node.rank)\n raise MPLRuleError(msg)\n if type(node.content) is not str:\n msg = 'Node \"{0}\" content should be type string, not \"{1}\"'.format(node.rank, type(node.content))\n raise MPLRuleError(msg)\n\n\ndef permission_rule(node: Node):\n if len(node.children) != 0:\n msg = 'Node \"{0}\" should not have children'.format(node.rank)\n raise MPLRuleError(msg)\n allowed = ['read', 'write', 'changePermission', 'all']\n if node.content not in allowed:\n msg = 'Node \"{0}\" content should be one of \"{1}\", not \"{2}\"'.format(node.rank, allowed, node.content)\n raise MPLRuleError(msg)\n\n\ndef principal_rule(node: Node):\n if len(node.children) != 0:\n msg = 'Node \"{0}\" should not have children'.format(node.rank)\n raise MPLRuleError(msg)\n if type(node.content) is not str:\n msg = 'Node content should be type string, not \"{0}\"'.format(type(node.content))\n raise MPLRuleError(msg)\n\n\ndef responsible_party_rule(node: Node):\n rules = [\n ['individualName', 'organizationName', 'positionName', 1, INFINITY],\n ['address', 0, INFINITY],\n ['phone', 0, INFINITY],\n ['electronicMailAddress', 0, INFINITY],\n ['onlineUrl', 0, INFINITY],\n ['userId', 0, INFINITY]\n ]\n process_rules(rules, node)\n attributes = {\n 'id': OPTIONAL,\n 'system': OPTIONAL,\n 'scope': OPTIONAL\n }\n process_attributes(attributes, node)\n\n\ndef title_rule(node: Node):\n if node.content is not None and type(node.content) is not str:\n msg = 'Node \"{0}\" content should be type string, not \"{1}\"'.format(node.rank, type(node.content))\n raise MPLRuleError(msg)\n rules = [\n ['value', 0, INFINITY]\n ]\n process_rules(rules, node)\n attributes = {\n 'lang': OPTIONAL\n }\n process_attributes(attributes, node)\n\n\ndef value_rule(node: Node):\n if node.content is None:\n msg = 'Node \"{0}\" content cannot be empty'.format(node.rank)\n raise MPLRuleError(msg)\n if type(node.content) is not str:\n msg = 'Node \"{0}\" content should be type string, not \"{1}\"'.format(node.rank, type(node.content))\n raise MPLRuleError(msg)\n attributes = {\n 'xml:lang': REQUIRED,\n }\n process_attributes(attributes, node)\n\n\ndef process_rules(rules, node: Node):\n i = 0\n max_i = len(node.children)\n for rule in rules:\n rank = rule[:-2]\n min = rule[-2]\n max = rule[-1]\n cnt = 0\n while i < max_i:\n child_rank = node.children[i].rank\n if child_rank in rank:\n cnt += 1\n if max is not INFINITY and cnt > max:\n msg = 'Maximum occurrence of \"{0}\" exceeded for \"{1}\"'.format(rank, node.rank)\n raise MPLRuleError(msg)\n i += 1\n else: break\n if cnt < min:\n msg = 'Minimum occurrence of \"{0}\" not met for \"{1}\"'.format(rank, node.rank)\n raise MPLRuleError(msg)\n if i < max_i:\n child_rank = node.children[i].rank\n msg = 'Child \"{0}\" not allowed for \"{1}\"'.format(child_rank, node.rank)\n raise MPLRuleError(msg)\n\n\ndef process_attributes(attributes, node: Node):\n for attribute in attributes:\n required = attributes[attribute]\n if required and attribute not in node.attributes:\n msg = '\"{0}\" is a required attribute of node \"{1}\"'.format(attribute, node.rank)\n raise MPLRuleError(msg)\n for attribute in node.attributes:\n if attribute not in attributes:\n msg = '\"{0}\" is not a recognized attributes of node \"{1}\"'.format(attribute, node.rank)\n raise MPLRuleError(msg)\n\n\n\nrules = {\n 'access': access_rule,\n 'additionalMetadata': additional_metadata_rule,\n 'allow': allow_rule,\n 'contact': responsible_party_rule,\n 'creator': responsible_party_rule,\n 'dataset': dataset_rule,\n 'deny': deny_rule,\n 'eml': eml_rule,\n 'givenName': any_name_rule,\n 'individualName': individual_name_rule,\n 'metadata': metadata_rule,\n 'organizationName': any_name_rule,\n 'permission': permission_rule,\n 'positionName': any_name_rule,\n 'principal': principal_rule,\n 'salutation': any_name_rule,\n 'surName': any_name_rule,\n 'title': title_rule,\n 'value': value_rule,\n}\n\n\ndef main():\n return 0\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/eml2_1_1/rules.py","file_name":"rules.py","file_ext":"py","file_size_in_byte":7167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"472094544","text":"import gzip\nimport numpy as np\nfrom collections import namedtuple\n\nfrom .subblocks import (\n parse_run_header,\n parse_event_header,\n parse_event_end,\n parse_cherenkov_photons,\n parse_particle_data,\n parse_longitudinal,\n parse_run_end,\n get_version,\n)\nfrom .subblocks.longitudinal import longitudinal_header_dtype\nfrom .subblocks.data import mmcs_cherenkov_photons_dtype\nfrom .io import read_block, read_buffer_size\n\nfrom .constants import BLOCK_SIZE_BYTES, EVTH_VERSION_POSITION\n\nEvent = namedtuple('Event', ['header', 'data', 'longitudinal', 'end'])\nPhotonEvent = namedtuple('PhotonEvent', ['header', 'photons', 'longitudinal', 'end'])\nParticleEvent = namedtuple('ParticleEvent', ['header', 'particles', 'longitudinal', 'end'])\n\n\ndef is_gzip(f):\n pos = f.tell()\n f.seek(0)\n b1, b2 = f.read(2)\n f.seek(pos)\n\n return (b1 == 0x1f) and (b2 == 0x8b)\n\n\nclass CorsikaFile:\n\n def __init__(self, path):\n self.EventClass = Event\n\n self._f = open(path, 'rb')\n if is_gzip(self._f):\n self._f = gzip.open(path)\n\n self._buffer_size = read_buffer_size(self._f)\n\n runh_bytes = self.read_block()\n if not runh_bytes[:4] == b'RUNH':\n raise ValueError('File does not start with b\"RUNH\"')\n\n self.run_header = parse_run_header(runh_bytes)[0]\n self.version = round(float(self.run_header['version']), 4)\n self._run_end = None\n\n @property\n def run_end(self):\n if self._run_end is None:\n pos = self._f.tell()\n\n if self._buffer_size is None:\n self._f.seek(0, 2)\n else:\n self._f.seek(-4, 2)\n\n self._f.seek(-BLOCK_SIZE_BYTES, 1)\n block = self.read_block()\n while block[:4] != b'RUNE':\n self._f.seek(-2 * BLOCK_SIZE_BYTES, 1)\n block = self.read_block()\n\n self._run_end = parse_run_end(block)[0]\n self._f.seek(pos)\n\n return self._run_end\n\n def __next__(self):\n block = self.read_block()\n\n if block[:4] == b'RUNE':\n self._run_end = parse_run_end(block)\n raise StopIteration()\n\n if len(block) < BLOCK_SIZE_BYTES:\n raise StopIteration\n\n if block[:4] != b'EVTH':\n raise IOError('EVTH block expected but found {}'.format(block[:4]))\n\n event_header = parse_event_header(block)[0]\n\n block = self.read_block()\n data_bytes = bytearray()\n long_bytes = bytearray()\n\n while block[:4] != b'EVTE':\n\n if block[:4] == b'LONG':\n long_bytes += block[longitudinal_header_dtype.itemsize:]\n else:\n data_bytes += block\n\n block = self.read_block()\n\n event_end = parse_event_end(block)[0]\n data = self.parse_data_blocks(data_bytes)\n longitudinal = parse_longitudinal(long_bytes)\n\n return self.EventClass(event_header, data, longitudinal, event_end)\n\n @classmethod\n def parse_data_blocks(cls, data_bytes):\n array = np.frombuffer(data_bytes, dtype='float32').reshape(-1, 7)\n return array[np.any(array != 0, axis=1)]\n\n def __iter__(self):\n return self\n\n def read_headers(self):\n pos = self._f.tell()\n self._f.seek(0)\n\n block = self.read_block()\n event_header_data = bytearray()\n end_found = True\n\n while block:\n if block[:4] == b'RUNE':\n self._run_end = parse_run_end(block)[0]\n break\n\n if block[:4] == b'EVTH' and get_version(block, EVTH_VERSION_POSITION) == self.version:\n if not end_found:\n raise IOError('Expected EVTE block before next EVTH')\n\n event_header_data += block\n\n end_found = False\n elif block[:4] == b'EVTE':\n end_found = True\n\n block = self.read_block()\n\n self._f.seek(pos)\n\n event_headers = parse_event_header(event_header_data)\n\n return self.run_header, event_headers, self._run_end\n\n def read_block(self):\n return read_block(self._f, self._buffer_size)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.close()\n\n def close(self):\n self._f.close()\n\n\nclass CorsikaCherenkovFile(CorsikaFile):\n\n def __init__(self, path, mmcs=False):\n super().__init__(path)\n\n self.EventClass = PhotonEvent\n self.mmcs = mmcs\n\n def parse_data_blocks(self, data_bytes):\n photons = parse_cherenkov_photons(data_bytes)\n if not self.mmcs:\n return photons\n\n mmcs = np.empty(len(photons), dtype=mmcs_cherenkov_photons_dtype)\n\n for col in ('x', 'y', 'u', 'v', 't', 'production_height'):\n mmcs[col] = photons[col]\n\n mmcs['n_photons'] = 1.0\n mmcs['wavelength'] = photons['n_photons'] % 1000\n mmcs['mother_particle'] = photons['n_photons'] // 100000\n\n return mmcs\n\n\nclass CorsikaParticleFile(CorsikaFile):\n\n def __init__(self, path):\n super().__init__(path)\n self.EventClass = ParticleEvent\n\n def parse_data_blocks(self, data_bytes):\n return parse_particle_data(data_bytes)\n","sub_path":"corsikaio/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":5267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"298279645","text":"class XLSUploadAPI(APIView):\r\n def post(self,request):\r\n form=XLSUploadForm(request.POST,request.FILES)\r\n if form.is_valid():\r\n file=form.cleaned_data[\"xlsfile\"]\r\n sysname=form.cleaned_data[\"sysname\"]\r\n chartname=form.cleaned_data[\"chartname\"]\r\n if Chart.objects.filter(chartname=chartname,system_sysname=sysname).exists():\r\n return self.error('Chart already exists')\r\n system=System.objects.get(sysname=sysname)\r\n chart=Chart.objects.create(system=system,chartname=chartname)\r\n db=oracle.connect('usr_exgdba/4f583b94@172.16.11.16/ywxtdb')\r\n cursor=db.cursor()\r\n xlsfile=xlrd.open_workbook(filename=None,file_contents=file)\r\n sheet=xlsfile.sheet_by_index(0)\r\n realnames=sheet.row_values(0)\r\n colnames=[]\r\n for i in realnames:\r\n name=''.join(lazy_pinyin(i))\r\n if name in colnames:\r\n j=0\r\n while name in colnames:\r\n name=name+str(j)\r\n j=j+1\r\n colnames.append(name)\r\n col=Column.objects.create(chart=chart,realname=i,colname=name)\r\n\r\n sql=\"create table %s(id number(10) primary key\" % chartname\r\n for i in colnames:\r\n sql=sql+\",%s varchar2(200)\" % i\r\n sql=sql+')'\r\n result=cursor.excute(sql)\r\n return self.success()","sub_path":"backupfororacle.py","file_name":"backupfororacle.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"287919119","text":"def shipping_ground(weight):\n if weight <= 2:\n price_pound = 1.50\n elif weight <= 6:\n price_pound = 3.00\n elif weight <= 10:\n price_pound = 4.00\n else:\n price_pound = 4.75\n return (weight * price_pound) + 20.00\nprint(shipping_ground(8.4))\n\nshipping_ground_premium = 125.00\n\ndef shipping_dron(weight):\n if weight <= 2:\n price_pound = 4.50\n elif weight <= 6:\n price_pound = 9.00\n elif weight <= 10:\n price_pound = 12.00\n else:\n price_pound = 14.25\n return (weight * price_pound)\nprint(shipping_dron(1.5))\n \n \ndef shipping_cheapest(weight):\n ground = shipping_ground(weight)\n dron = shipping_dron(weight)\n premium = shipping_ground_premium(weight)\n if ground < dron and ground < premium:\n return ground\n elif dron < ground and dron < premium:\n return shipping_dron\n else:\n return premium\n \n cheapest = shipping_cheapest(4.8)\n print(\"cheapest: \", cheapest)\n \n \n ","sub_path":"section-python/section-function-lists/function-shipping.py","file_name":"function-shipping.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"56095855","text":"import numpy as np\r\nfrom scipy.sparse import rand\r\nfrom skfeature.function.structure import group_fs\r\n\r\n\r\ndef main():\r\n n_samples = 50 # specify the number of samples in the simulated data\r\n n_features = 100 # specify the number of features in the simulated data\r\n\r\n # simulate the dataset\r\n X = np.random.rand(n_samples, n_features)\r\n\r\n # simulate the feature weight\r\n w_orin = rand(n_features, 1, 1).toarray()\r\n w_orin[0:50] = 0\r\n\r\n # obtain the ground truth of the simulated dataset\r\n noise = np.random.rand(n_samples, 1)\r\n y = np.dot(X, w_orin) + 0.01 * noise\r\n y = y[:, 0]\r\n\r\n z1 = 0.1 # specify the regularization parameter of L1 norm\r\n z2 = 0.1 # specify the regularization parameter of L2 norm for the non-overlapping group\r\n\r\n # specify the group structure among features\r\n idx = np.array([[1, 20, np.sqrt(20)], [21, 40, np.sqrt(20)], [41, 50, np.sqrt(10)],\r\n [51, 70, np.sqrt(20)], [71, 100, np.sqrt(30)]]).T\r\n idx = idx.astype(int)\r\n\r\n # perform feature selection and obtain the feature weight of all the features\r\n w, obj, value_gamma = group_fs.group_fs(X, y, z1, z2, idx, verbose=True)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"Pattern-Recognition/hw2-Feature-Selection/skfeature/example/test_group_fs.py","file_name":"test_group_fs.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"307907497","text":"from django import forms\nfrom django_summernote.widgets import SummernoteWidget\nfrom board.models import Post, Comment\n\nclass PostForm(forms.ModelForm):\n class Meta:\n model = Post\n fields = ('title','content')\n #exclude = ('created_at','updated_at')\n widgets = {\n 'title': forms.TextInput(attrs={'placeholder': '제목'}),\n 'content': SummernoteWidget(),\n }\n\nclass CommentForm(forms.ModelForm):\n class Meta:\n model = Comment\n fields = ('content',)\n\n","sub_path":"udecide/board/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"576130807","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 22 17:07:43 2019\n\n@author: Bill\n\"\"\"\n\nimport asyncio\nimport time\n\nclass status():\n def __init__(self):\n self.done = False\n\ndef fire_and_forget(f):\n def wrapped(*args, **kwargs):\n return asyncio.get_event_loop().run_in_executor(None, f, *args, *kwargs)\n\n return wrapped\n#\n# I do not understand why, but I can only get this to work using a decorator. I kind of\n# find decorators annoying. But I can't get the arguments to work correctly otherwise. \n# At this point I guess I can just decorate the call to trainer, like below, and then \n# I should be able to send commands to it from the Matplotlib interface. \n# \n# Here what happens is \n#\n@fire_and_forget\ndef foo(st):\n print('done is',st.done)\n while not st.done:\n time.sleep(1.0)\n print('looping...done is',st.done)\n print(\"foo() completed\")\n return\n\n\nstat = status()\nprint(\"Hello\")\n#fire_and_forget(foo,stat)\nfoo(stat)\n#foo = fire_and_forget(foo)\n\nprint(\"I didn't wait for foo()\")\n","sub_path":"fire_and_forget.py","file_name":"fire_and_forget.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"214860288","text":"from pytube import YouTube\r\nimport sys\r\n\r\nlink_file = open('/nfs/home/ryan0507/ybigta/link_list2.txt')\r\nline = link_file.readline().rstrip('\\n')\r\ncnt = 21\r\nlabel_file = open('/nfs/home/ryan0507/ybigta/original_files.txt', mode = \"w\")\r\nwhile line:\r\n yt = YouTube(line)\r\n yt = yt.streams.first()\r\n print('Downloading ' + line)\r\n yt.download(output_path='/nfs/home/ryan0507/bmt/sample/', filename='test_'+str(cnt))\r\n print(line , 'downloaded with test_' + str(cnt))\r\n label_file.write('test_' + str(cnt) + ' : ' + line + '\\n')\r\n cnt += 1\r\n line = link_file.readline().rstrip('\\n')\r\n\r\n\r\n","sub_path":"model/video-captioning/bmt/pytube_link2.py","file_name":"pytube_link2.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"46372316","text":"\nimport sys\nsys.path.append(\"../../../pygra\") # add pygra library\n\n# Compute the Gap of a honeycomb lattice as a function of the sublattice\n# imbalance\n\nimport geometry\nimport gap\nimport numpy as np\n\nms = np.linspace(0.,0.3,30)\ngs = [] # storage for the gaps\nfor m in ms:\n g = geometry.honeycomb_lattice()\n h = g.get_hamiltonian(has_spin=True)\n h.add_sublattice_imbalance(m)\n gg = gap.indirect_gap(h)\n gs.append(gg) # append gap\n print(m,gg,gg/m)\n\n\n\nimport matplotlib.pyplot as plt\nplt.plot(ms,gs)\nplt.xlabel(\"mass\")\nplt.ylabel(\"gap\")\nplt.show()\n\n","sub_path":"examples/2d/gap/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"296459918","text":"from oslo_config import cfg\nimport oslo_messaging as msg\n\n\nclass client(object):\n def __init__(self, transport, target):\n self.transport = transport\n self.target = target\n self._client = msg.RPCClient(self.transport, self.target)\n\n def test(self):\n self._client.call(ctxt={}, method = 'test', arg=\"Hey. This is testing my coding skills\")\n\n# Create Messaging Transport\ntransport = msg.get_transport(cfg.CONF)\n# Create Target\ntarget = msg.Target(topic='trungnv')\n\n# Create RPC client\nrpc_client = client(transport,target)\n\n# Call function\nrpc_client.test()","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"26094350","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/c24b/projets/crawtext/newspaper/videos/extractors.py\n# Compiled at: 2014-11-06 08:50:32\nfrom .videos import Video\nVIDEOS_TAGS = [\n 'iframe', 'embed', 'object', 'video']\nVIDEO_PROVIDERS = ['youtube', 'vimeo', 'dailymotion', 'kewego']\n\nclass VideoExtractor(object):\n \"\"\"Extracts a list of video from Article top node\n \"\"\"\n\n def __init__(self, config, top_node):\n self.config = config\n self.parser = self.config.get_parser()\n self.top_node = top_node\n self.candidates = []\n self.movies = []\n\n def get_embed_code(self, node):\n return ('').join([ line.strip() for line in self.parser.nodeToString(node).splitlines()\n ])\n\n def get_embed_type(self, node):\n return self.parser.getTag(node)\n\n def get_width(self, node):\n return self.parser.getAttribute(node, 'width')\n\n def get_height(self, node):\n return self.parser.getAttribute(node, 'height')\n\n def get_src(self, node):\n return self.parser.getAttribute(node, 'src')\n\n def get_provider(self, src):\n if src:\n for provider in VIDEO_PROVIDERS:\n if provider in src:\n return provider\n\n return\n\n def get_video(self, node):\n \"\"\"Create a video object from a video embed\n \"\"\"\n video = Video()\n video.embed_code = self.get_embed_code(node)\n video.embed_type = self.get_embed_type(node)\n video.width = self.get_width(node)\n video.height = self.get_height(node)\n video.src = self.get_src(node)\n video.provider = self.get_provider(video.src)\n return video\n\n def get_iframe_tag(self, node):\n return self.get_video(node)\n\n def get_video_tag(self, node):\n \"\"\"Extract html video tags\n \"\"\"\n return Video()\n\n def get_embed_tag(self, node):\n parent = self.parser.getParent(node)\n if parent is not None:\n parent_tag = self.parser.getTag(parent)\n if parent_tag == 'object':\n return self.get_object_tag(node)\n return self.get_video(node)\n\n def get_object_tag(self, node):\n child_embed_tag = self.parser.getElementsByTag(node, 'embed')\n if child_embed_tag and child_embed_tag[0] in self.candidates:\n self.candidates.remove(child_embed_tag[0])\n src_node = self.parser.getElementsByTag(node, tag='param', attr='name', value='movie')\n if not src_node:\n return None\n else:\n src = self.parser.getAttribute(src_node[0], 'value')\n provider = self.get_provider(src)\n if not provider:\n return None\n video = self.get_video(node)\n video.provider = provider\n video.src = src\n return video\n\n def get_videos(self):\n self.candidates = self.parser.getElementsByTags(self.top_node, VIDEOS_TAGS)\n for candidate in self.candidates:\n tag = self.parser.getTag(candidate)\n attr = 'get_%s_tag' % tag\n if hasattr(self, attr):\n movie = getattr(self, attr)(candidate)\n if movie is not None and movie.provider is not None:\n self.movies.append(movie)\n\n return list(self.movies)","sub_path":"pycfiles/crawtext-4.1.1-py2-none-any/extractors.py","file_name":"extractors.py","file_ext":"py","file_size_in_byte":3440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"290075782","text":"import threading\nfrom tkinter import *\nimport json\nimport tkinter.scrolledtext as tkscrolled\nimport numpy as np\n\nimport matplotlib\nmatplotlib.use('TkAgg')\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nimport matplotlib.pyplot as plt; plt.rcdefaults()\nfrom matplotlib.figure import Figure\n\nwith open(\"configuration.json\", 'r+') as configuration_file:\n elasticsearch_directory = json.load(configuration_file)[\"elasticsearch_directory\"]\n configuration_file.close()\n\nimport sys\n\nsys.path.append(elasticsearch_directory)\n\ntry:\n from search_api import *\nexcept:\n print(\"No Elasticsearch module found\")\n exit(1)\n\n\nclass App(Frame):\n \"\"\"\n This class contains the main functionality of the Kibana GUI.\n \"\"\"\n\n class Constants():\n \"\"\"\n This class contains the constants used in the GUI creation\n \"\"\"\n START_OF_FILE = 1.0\n TOP_FRAME_HEIGHT = 50\n\n def __init__(self, master):\n \"\"\"\n This parameter initializes the main App frame (and configures it) and calls the method to create\n all of the app widgets.\n :param master: Main Window\n \"\"\"\n super().__init__()\n self.master = master\n self.figure = Figure()\n\n self.default_query = \"16042018 http clientIP all size_response all ORDER clientIP\"\n #self.default_query = \"16042018 http clientIP all GROUP\"\n #self.default_query = \"16042018 http clientIP 131.174.154.24 size_response all COUNT\"\n self.last_query = \"\"\n\n self.search_engine = Elasticsearch()\n\n self.has_been_just_launched = True\n self.createWidgets(self.master)\n\n def update_text_ui(self, original_logs, bottom):\n \"\"\"\n This method updates the ScrollableText Widget that displays the logs that are a match with the query.\n\n :param original_logs: logs that are a match with the query\n :param bottom: ScrollableText widget that contains the logs\n :return: The function does not return anything.\n \"\"\"\n\n new_text = \"\\n\\n\"\n log_counter = 0\n for log in original_logs:\n new_text += \"Log number \" + str(log_counter) + \": \\n\\n\"\n log_counter += 1\n\n for key in log.keys():\n if str(log[key]) != \"-\":\n new_text = new_text + key + \": \" + str(log[key]) + \"\\n\"\n\n new_text += \"\\n\\n\"\n\n bottom.config(state=NORMAL)\n bottom.delete(self.Constants.START_OF_FILE, END)\n bottom.insert(self.Constants.START_OF_FILE, new_text)\n bottom.config(state=DISABLED)\n\n def update_graphic_ui(self, query, top, results):\n \"\"\"\n This method updates the Frame Widget that displays the graphic result.\n\n :param query: query performed\n :param top: frame that contains the graphic/s\n :param results: result of the query\n :return: The method does not return anything\n \"\"\"\n\n split_query = query.split(\" \")\n _, _, _, pairs, action = self.search_engine.getQueryInfo(elasticsearch_directory, split_query)\n\n if action == \"COUNT\":\n # Create graphic based on the result of the COUNT action result structure\n x_axis_title = \"\"\n for pair in pairs:\n x_axis_title += pair[0] + \": \" + pair[1] + \" \"\n\n y_pos = np.arange(1)\n\n plt.rcdefaults()\n fig, ax = plt.subplots(figsize=(5, 1))\n\n ax.barh(results, results, align='center')\n ax.set_yticks(y_pos)\n ax.set_xlabel(\"Count - \" + x_axis_title)\n ax.set_title('Query Results')\n\n fig.tight_layout()\n\n # Now we integrate the graphic with the Graphic UI\n self.figure = fig\n top.figure = self.figure\n top.draw()\n top.get_tk_widget().pack(side=TOP, fill=BOTH, expand=True)\n\n elif action == \"GROUP\":\n # Create graphic based on the result of the GROUP action result structure\n y_axis_name = \"Count\"\n\n results.keys()\n\n fig = plt.figure(figsize=(12, 3))\n s = fig.add_subplot(111)\n\n y_pos = np.arange(len(results.keys()))\n y_axis_values = []\n\n objects = []\n\n for key in results.keys():\n y_axis_values.append(results[key])\n objects.append(key)\n\n s.bar(y_pos, y_axis_values, align='center')\n plt.xticks(y_pos, objects)\n s.set_xticklabels(objects)\n s.set_ylabel(y_axis_name)\n s.set_title('Query Results')\n\n for ax in fig.axes:\n matplotlib.pyplot.sca(ax)\n plt.xticks(rotation=45)\n\n fig.tight_layout()\n\n # Now we integrate the graphic with the Graphic UI\n self.figure = fig\n top.figure = self.figure\n top.draw()\n top.get_tk_widget().pack(side=TOP, fill=BOTH, expand=True)\n\n elif action == \"ORDER\":\n # Create graphic based on the result of the ORDER action result structure\n\n # Check that the there are 2 field-value pairs in the query, just in case\n if len(pairs) == 2:\n fig, ax1 = plt.subplots(figsize=(12, 5))\n\n y_axis_name = \"Count\"\n objects = results.keys()\n y_pos = np.arange(len(objects))\n y_axis_values = []\n\n data = []\n for key in results.keys():\n y_axis_values.append(results[key][\"count\"])\n data.append(results[key][pairs[-1][0]])\n\n plt.bar(y_pos, y_axis_values, align='center')\n plt.xticks(y_pos, objects)\n plt.xticks(rotation=-90)\n plt.ylabel(y_axis_name)\n plt.title('Query Results')\n\n ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n\n color = 'tab:red'\n ax2.set_ylabel([pairs[-1][0]], color=color) # we already handled the x-label with ax1\n ax2.plot(objects, data, color=color)\n ax2.tick_params(axis='y', labelcolor=color)\n\n fig.tight_layout() # otherwise the right y-label is slightly clipped\n\n # Now we integrate the graphic with the Graphic UI\n self.figure = fig\n top.figure = self.figure\n top.draw()\n top.get_tk_widget().pack(side=TOP, fill=BOTH, expand=True)\n else:\n pass\n\n def update_ui(self, top, bottom, original_logs, results, elasticsearch_directory, query):\n \"\"\"\n This method updates the UI with the result of the query --graphic and text---.\n\n :param top: Frame that contains the graphic\n :param bottom: ScrollableText widget that contains the logs\n :param original_logs: logs that are a match with the query\n :param results: result of the query, that is the action performed over the logs that are a match with the\n query.\n :param elasticsearch_directory:\n :return: This function does not return anything.\n \"\"\"\n\n print(\"Updating UI in update_ui method\")\n\n # Update Text UI in thread, so UI doesn't freeze\n new_text_thread = threading.Thread(\n target=self.update_text_ui(original_logs, bottom))\n new_text_thread.run()\n\n # Update Graphic UI in thread, so UI doesn't freeze\n new_graphic_thread = threading.Thread(\n target=self.update_graphic_ui(query, top,results))\n new_graphic_thread.run()\n\n print(\"Leaving update_ui method\")\n\n def search(self, top, bottom, search, query=None):\n \"\"\"\n This method is the bridge between the UI, the Elasticsearch API and the UI Updater\n\n :param top: Frame widget that contains the graphic\n :param bottom: ScrollableText widget that contains the logs that are a match with the query.\n :param search: SearchBar widget\n :param query: query that will be performed in the Elasticsearch API\n :return: this function does not return anything\n \"\"\"\n\n print(\"Entering search method in Kibana GUI\\n\")\n\n with open(\"configuration.json\", 'r+') as configuration_file:\n elasticsearch_directory = json.load(configuration_file)[\"elasticsearch_directory\"]\n configuration_file.close()\n\n if query is not None:\n original_logs, results = self.search_engine.search(elasticsearch_directory, query)\n else:\n original_logs, results = self.search_engine.search(elasticsearch_directory, search.get())\n query = search.get()\n\n new_thread = threading.Thread(\n target=self.update_ui(top, bottom, original_logs, results, elasticsearch_directory, query))\n new_thread.run()\n\n print(\"Leaving search method in Kibana GUI\")\n\n def createWidgets(self, master):\n \"\"\"\n This method creates all of the Tkinter widgets that are part of the UI\n :param master: Main Window of the App, that will contain all the widgets created\n :return: The method does not return anything\n \"\"\"\n\n with open(\"configuration.json\", 'r+') as configuration_file:\n elasticsearch_directory = json.load(configuration_file)[\"elasticsearch_directory\"]\n configuration_file.close()\n\n # Create all of the main containers\n dark_grey = '#898686'\n light_grey = '#c1c1c1'\n\n top_frame = Frame(master, bg=dark_grey, width=master.winfo_width(), height=self.Constants.TOP_FRAME_HEIGHT)\n center_bottom_frame = Frame(master, bg=light_grey, width=master.winfo_width())\n\n # layout all of the main containers\n master.grid_rowconfigure(1, weight=1)\n master.grid_columnconfigure(0, weight=1)\n\n top_frame.grid(row=0, sticky=\"ew\")\n center_bottom_frame.grid(row=1, sticky=\"nsew\")\n\n # create the center widgets\n center_bottom_frame.grid_rowconfigure(0, weight=1)\n center_bottom_frame.grid_columnconfigure(1, weight=1)\n\n ctr_bottom_left = Frame(center_bottom_frame, bg=dark_grey, width=254)\n ctr_bottom_mid_right = Frame(center_bottom_frame)\n ctr_bottom_mid_right_top = FigureCanvasTkAgg(self.figure, master=ctr_bottom_mid_right)\n ctr_bottom_mid_right_bottom = Frame(ctr_bottom_mid_right, bg=\"white\")\n\n ctr_bottom_left.grid(row=0, column=0, sticky=\"ns\")\n ctr_bottom_mid_right_top.draw()\n ctr_bottom_mid_right_top.get_tk_widget().pack(side=TOP, fill=BOTH, expand=True)\n ctr_bottom_mid_right.grid(row=0, column=1, sticky=\"nsew\")\n ctr_bottom_mid_right_bottom.pack(fill=BOTH)\n\n # Create and configure the query result label (title)\n query_result_title = Label(ctr_bottom_mid_right_bottom)\n query_result_title.config(text=\"Logs that are a match with the query\", font='Helvetica 14 bold',\n bg=\"white\", width=164, anchor=W, justify=LEFT)\n query_result_title.grid(row=0, column=0)\n\n text_wrapper = Frame(ctr_bottom_mid_right_bottom)\n text_wrapper.config(height=500)\n text_wrapper.grid(row=1, column=0, sticky=\"nswe\")\n\n bottom_text = tkscrolled.ScrolledText(text_wrapper, bg=\"white\")\n bottom_text.grid(row=0)\n bottom_text['font'] = ('Helvetica', '12')\n\n # Create left panel label (title)\n left_label_title = Label(ctr_bottom_left)\n left_label_title.config(text=\"Available Indexes\", bg=\"white\", font='Helvetica 14 bold', width=32,\n justify=LEFT)\n left_label_title.place(x=ctr_bottom_left.winfo_x(), y=ctr_bottom_left.winfo_y())\n\n # Create left panel list (body) and scrollbar\n available_indexes = self.search_engine.available_indexes(elasticsearch_directory)\n\n listbox = Listbox(ctr_bottom_left, bg=\"white\", width=int(master.winfo_width()*0.025), height=500,\n selectmode=MULTIPLE)\n listbox.place(x=left_label_title.winfo_x(), y=left_label_title.winfo_y() + 22)\n\n for i in range(0, len(available_indexes)):\n listbox.insert(i + 1, available_indexes[i])\n\n # Create and configure the search bar\n search_bar = Entry(top_frame, bg=\"white\", text=\"Search Here\", width=int(master.winfo_width()*0.1118))\n search_bar.grid(row=0, column=0)\n search_button = Button(top_frame, bg='white', text=\"Search\", width=int(master.winfo_width()*0.01),\n command=lambda top=ctr_bottom_mid_right_top, bottom=bottom_text,\n search=search_bar:\n self.search(top, bottom, search))\n search_button.grid(row=0, column=1)\n\n # When the UI is launched for the first time, we show a default query so that the UI is not empty.\n if self.has_been_just_launched:\n self.search(ctr_bottom_mid_right_top, bottom_text, search_bar, query=self.default_query)\n self.has_been_just_launched = False\n\n\nif __name__ == \"__main__\":\n root = Tk()\n root.title(\"Log Management System v1\")\n canvas = Canvas(root, width=2000, height=1000)\n canvas.pack()\n canvas.update()\n my_gui = App(canvas)\n my_gui.mainloop()","sub_path":"Log Manager in Python/kibana/kibana_ui.py","file_name":"kibana_ui.py","file_ext":"py","file_size_in_byte":13365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"502501537","text":"\nimport requests\nimport re\nimport json\nimport time\nimport random\n\nMAXSLEEPTIME = 3\nMINSLEEPTIME = 1\nSTATUS_OK = 200\nMAX_PAGE_NUM = 10\nSERVER_ERROR_MIN = 500\nSERVER_ERROR_MAX = 600\nCLIENT_ERROR_MIN = 400\nCLIENT_ERROR_MAX = 500\n\n#1.对URL发起HTTP请求http request,得到相应的http response响应,response响应体中有我们需要的数据内容。\ndef get_one_page(URL,num_retry=5): #https://maoyan.com/board/4?offset=0\n if num_retry == 0:\n return None\n ua_headers = {\"User-Agent\":\"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36\"}\n response = requests.get(URL,headers=ua_headers)\n if response.status_code == STATUS_OK:\n return response.text\n elif SERVER_ERROR_MIN <= response.status_code < SERVER_ERROR_MAX:\n time.sleep(MAXSLEEPTIME)\n get_one_page(URL,num_retry-1)\n elif CLIENT_ERROR_MIN <= response.status_code < CLIENT_ERROR_MAX:\n #正确的做法是写日志\n if response.status_code == 404:\n print(\"Page not find!\") \n elif response.status_code == 403:\n print(\"Forbidden!\")\n else:\n pass\n return None\n\n#print(get_one_page(\"https://maoyan.com/board/4?offset=0\"))\n\n#2.使用正则表达式,XPath,bs4精确的获取数据。\ndef parse_one_page(html):\n pattern = re.compile('%s
')\n return Markup(value)\n\n\ndef formatfiltercount(value):\n try:\n value = int(value)\n if value > 0:\n return 'yes, %i filter(s)' % value\n else:\n return 'none'\n except Exception:\n return 'unknown'\n\n\ndef formatBugLinks(value):\n def addLink(match):\n linkApp = match.group(1)\n if linkApp != None:\n linkApp = linkApp.lower()\n linkType = match.group(2).lower()\n linkNum = int(match.group(3))\n if linkType == 'topic':\n link = 'https://adblockplus.org/forum/viewtopic.php?t=%i' % linkNum\n elif linkApp == None and linkType == 'issue':\n link = 'https://issues.adblockplus.org/ticket/%i' % linkNum\n elif linkApp == 'webkit':\n link = 'https://bugs.webkit.org/show_bug.cgi?id=%i' % linkNum\n elif linkApp != None:\n link = 'http://code.google.com/p/chromium/issues/detail?id=%i' % linkNum\n else:\n link = 'https://bugzilla.mozilla.org/show_bug.cgi?id=%i' % linkNum\n return '%s' % (link, match.group(0))\n\n regexp = re.compile(r'(https?://\\S+?)([.,:;!?\"\\']?(?:\\s|$))', re.I | re.U)\n regexp2 = re.compile(r'(?:\\b(WebKit|Chrome|Chromium)\\s+)?\\b(bug|issue|topic)\\s+(\\d+)', re.I | re.U)\n value = unicode(Markup.escape(value))\n value = re.sub(regexp, r'\\1\\2', value)\n value = re.sub(regexp2, addLink, value)\n return Markup(value)\n\n\ndef urlencode(value):\n return urllib.quote(value.encode('utf-8'), '')\n\n\ndef subscriptionSort(value, prioritizeRecommended=True):\n value = value[:] # create a copy of the list\n if prioritizeRecommended:\n value.sort(\n lambda a, b:\n cmp(a.type, b.type) or\n cmp(a.deprecated, b.deprecated) or\n cmp(b.catchall, a.catchall) or\n cmp(b.recommendation != None, a.recommendation != None) or\n cmp(a.name.lower(), b.name.lower()),\n )\n else:\n value.sort(\n lambda a, b:\n cmp(a.type, b.type) or\n cmp(a.deprecated, b.deprecated) or\n cmp(a.name.lower(), b.name.lower()),\n )\n return value\n\n\ndef formatmime(text):\n # See http://bugs.python.org/issue5871 (not really fixed), Header() will\n # happily accept non-printable characters including newlines. Make sure to\n # remove them.\n text = re.sub(r'[\\x00-\\x1F]', '', text)\n return email.header.Header(text).encode()\n\n\ndef ljust(value, width=80):\n return unicode(value).ljust(width)\n\n\ndef rjust(value, width=80):\n return unicode(value).rjust(width)\n\n\ndef ltruncate(value, length=255, end='...'):\n value = unicode(value)\n if len(value) <= length:\n return value\n return end + value[len(value) - length:len(value)]\n\n\ndef formatweekday(value):\n return time.strftime('%a', (0, 0, 0, 0, 0, 0, value, 0, 0))\n\n\ndef formatbytes(value):\n if value == 0:\n return '0'\n\n value = float(value)\n unit = 'Bytes'\n if value > 1024:\n value /= 1024\n unit = 'KB'\n if value > 1024:\n value /= 1024\n unit = 'MB'\n if value > 1024:\n value /= 1024\n unit = 'GB'\n return '%.2f %s' % (value, unit)\n\n\ndef toJSON(value, **args):\n return re.sub(r'', r'<\\/script>', json.dumps(value, **args))\n\n\nfilters = {\n 'formattime': formattime,\n 'timerelative': formatrelativetime,\n 'url': formaturl,\n 'keepnewlines': formatnewlines,\n 'filtercount': formatfiltercount,\n 'buglinks': formatBugLinks,\n 'urlencode': urlencode,\n 'subscriptionSort': subscriptionSort,\n 'mime': formatmime,\n 'emailaddr': email.utils.formataddr,\n 'ljust': ljust,\n 'rjust': rjust,\n 'ltruncate': ltruncate,\n 'weekday': formatweekday,\n 'bytes': formatbytes,\n 'json': toJSON,\n}\n","sub_path":"sitescripts/templateFilters.py","file_name":"templateFilters.py","file_ext":"py","file_size_in_byte":6494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"651851451","text":"import json\nimport logging\n\nimport sys\nfrom optparse import OptionParser\n\nimport requests\n\n\nclass ZabbixAPIException(Exception):\n pass\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass MonitorDescription:\n zabbix_servers = {\n \"JMX\": \"java.zabbix.dooioo.com\",\n \"SERVER\": \"server.zabbix.dooioo.com\"\n }\n\n default_template_names = {\n \"JMX\": [\"Template JMX Generic\"],\n \"SERVER\": [\"Template OS Linux\"]\n }\n\n default_host_groups = {\n \"JMX\": ['JMX'],\n \"SERVER\": ['Linux']\n }\n\n def __init__(self, ip, interface_port=10811, monitor_type='JMX'\n , host_group_names=None, monitor_template_names=None\n , http_test_resource='/api/it/ping', http_test_port=9600):\n self.ip = ip\n self.interface_port = interface_port\n self.monitor_type = monitor_type if monitor_type else 'JMX'\n if self.monitor_type == 'JMX':\n self.interface_type = 4\n else:\n self.interface_type = 1\n self.host_group_names = host_group_names if host_group_names else self.default_host_groups[monitor_type]\n if monitor_template_names:\n self.monitor_template_names = self.default_template_names[\"JMX\"]\n self.monitor_template_names.extend(monitor_template_names)\n else:\n self.monitor_template_names = self.default_template_names['JMX']\n self.http_test_resource = http_test_resource\n self.http_test_port = http_test_port\n self.build_zabbix_server()\n self.host_group_ids = []\n self.template_ids = []\n\n def build_zabbix_server(self):\n try:\n self.api_url = 'http://{0}/api_jsonrpc.php' \\\n .format(self.zabbix_servers[self.monitor_type])\n except Exception as e:\n raise ZabbixAPIException(\"检查你输入的监控类型是否正确,目前支持:JMX, SERVER两种类型\")\n\n\nclass ZabbixClient:\n \"\"\"\n Generic Zabbix API Client\n - login\n - get auth\n \"\"\"\n global_header = {\n 'Content-Type': 'application/json-rpc',\n 'User-Agent': 'python-zabbix-client',\n 'Cache-Control': 'no-cache'\n }\n\n def __init__(self, monitor_description, user_name=None, password=None, timeout=None):\n self.user_name = user_name if user_name else '110863'\n self.password = password if password else '123456'\n self.session = requests.session()\n self.session.headers.update(self.global_header)\n self.auth = ''\n self.id = 0\n self.timeout = timeout if timeout else 20\n self.zabbix_api_url = monitor_description.api_url\n self.monitor_description = monitor_description\n\n def login(self):\n \"\"\"\n login with given user_name and password, if None, use default user\n :param user_name:\n :param password:\n :return: result,auth key\n \"\"\"\n self.auth = self.user.login(user=self.user_name, password=self.password)\n\n def api_version(self):\n return self.apiinfo.version()\n\n def get_exiting_host_group_ids(self):\n \"\"\"\n get exiting host group name to avoid host group not existing error\n :param host_group_names:\n :return:\n \"\"\"\n host_group = self.hostgroup.get(\n filter={\n 'name': self.monitor_description.host_group_names\n })\n logger.info(str(host_group) + \"is found\")\n if len(host_group) == 0:\n raise ZabbixAPIException(self.host_group_names + \" is not existing\")\n host_group_ids = []\n for item in host_group:\n host_group_ids.append({\"groupid\": item['groupid']})\n self.monitor_description.host_group_ids = host_group_ids\n return host_group_ids\n\n def get_existing_templates(self):\n \"\"\"\n get exiting monitor templates\n :param monitor_template:\n :return:\n \"\"\"\n templates = self.template.get(filter={\n \"host\": self.monitor_description.monitor_template_names\n })\n for item in templates:\n self.monitor_description.template_ids.append({\"templateid\": item['templateid']})\n return templates\n\n def create_host(self):\n \"\"\"\n create new host\n \"\"\"\n created_host = self.host.create(\n host=self.monitor_description.ip,\n interfaces=[{\n \"type\": self.monitor_description.interface_type,\n \"main\": 1,\n \"useip\": 1,\n \"ip\": self.monitor_description.ip,\n \"dns\": \"\",\n \"port\": self.monitor_description.interface_port\n }],\n groups=self.monitor_description.host_group_ids\n )\n new_host_id = created_host['hostids'][0]\n return new_host_id\n\n def link_template_to_host(self, host_id):\n \"\"\"\n\n :param host_id:\n :return:\n \"\"\"\n if host_id:\n template = self.template.massadd(\n templates=self.monitor_description.template_ids,\n hosts=[{\"hostid\": host_id}]\n )\n return template\n\n def create_monitor(self):\n self.login()\n self.get_exiting_host_group_ids()\n self.get_existing_templates()\n host_id = self.create_host()\n self.link_template_to_host(host_id)\n return host_id\n\n def httptest_create(self, host_id, monitor_description):\n \"\"\"\n create http test like ping /api/it/ping\n :param host_id:\n :param name:\n :param ip:\n :param port:\n :param http_test_resource:\n :return:\n \"\"\"\n\n if host_id:\n httptest_request = {\n 'retries': '1',\n 'status': '0',\n 'agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) '\n 'AppleWebKit/535.8 (KHTML, like Gecko) Chrome/17.0.940.0 Safari/535.8',\n 'steps': [\n {\n 'no': '1',\n 'status_codes': '200',\n 'posts': '',\n 'variables': '',\n 'timeout': '15',\n 'url': 'http://' + self.monitory_description.ip + ':' + str(\n self.monitory_description.http_test_port) + monitor_description.http_test_resource,\n 'required': '',\n 'name': self.monitory_description.ip + '_' + str(self.monitory_description.http_test_port)\n }\n ],\n 'authentication': '0',\n 'macros': '',\n 'hostid': host_id,\n 'variables': '',\n 'delay': '30',\n 'http_password': '',\n 'name': self.monitory_description.ip + '_' + str(self.monitory_description.http_test_port)\n }\n\n http_test = self.httptest.create(httptest_request)\n return http_test\n\n def do_request(self, method, params=None):\n request_json = {\n 'jsonrpc': '2.0',\n 'method': method,\n 'params': params or {},\n 'id': self.id,\n }\n\n if method != 'apiinfo.version' and self.auth:\n request_json['auth'] = self.auth\n\n logger.info(\"sending: %s\", json.dumps(request_json, indent=4, separators=(',', ':')))\n response = self.session.post(\n self.monitor_description.api_url,\n data=json.dumps(request_json),\n timeout=self.timeout\n )\n logger.info(\"Response Code : %s\", str(response.status_code))\n\n response.raise_for_status()\n\n if not len(response.text):\n raise ZabbixAPIException(\"没有返回值\")\n try:\n response_json = json.loads(response.text)\n except ValueError:\n raise ZabbixAPIException(\"不能解析JSON %s\" % response.text)\n\n logger.info(\"sending: %s\", json.dumps(request_json, indent=4, separators=(',', ':')))\n\n self.id += 1\n\n if 'error' in response_json:\n if 'data' not in response_json['error']:\n response_json['error']['data'] = 'No Data'\n msg = \"Error {code}: {message},{data}\".format(\n code=response_json['error']['code'],\n message=response_json['error']['message'],\n data=response_json['error']['data']\n )\n raise ZabbixAPIException(msg, response_json['error']['code'])\n return response_json\n\n def __getattr__(self, item):\n \"\"\"\n auto create Zabbix API Client\n :param item:\n :return:\n \"\"\"\n return ZabbixAPIObjectClass(item, self)\n\n\nclass ZabbixAPIObjectClass(object):\n \"\"\"\n Zabbix API Object for API client\n \"\"\"\n\n def __init__(self, name, parent):\n self.name = name\n self.parent = parent\n\n def __getattr__(self, item):\n \"\"\"\n dynamic create a method (get,create,update,delete, or others)\n :param item:\n :return:\n \"\"\"\n\n def fn(*args, **kwargs):\n if args and kwargs:\n raise TypeError('只能输入一种参数,value或者 key=value形式')\n\n return self.parent.do_request('{0}.{1}'.format(self.name, item),\n args or kwargs)['result']\n\n return fn\n\n\nif __name__ == '__main__':\n parser = OptionParser()\n parser.add_option(\"-t\", '--monitor_template_names',\n help=\"输入模版名称,默认为JMX:Template JMX Generic,SERVER:Template OS Linux\")\n parser.add_option(\"-i\", '--ip', help=\"输入HOST的IP地址\")\n parser.add_option(\"-m\", '--monitor_type', help=\"监控类型:JMX or SERVER\", default='JMX')\n parser.add_option(\"-a\", '--interface_port', help=\"监控代理端口:agent interface port\", default=10811)\n parser.add_option(\"-g\", '--host_group_names', help=\"host group names\")\n parser.add_option(\"-p\", '--http_test_resource', help=\"服务检查URL,默认为/api/it/ping\", default='/api/it/ping')\n parser.add_option(\"-P\", '--http_test_port', help=\"服务检查端口,默认为9600端口\", default=9600)\n\n (options, args) = parser.parse_args()\n print(options)\n monitor_description = MonitorDescription(ip=options.ip\n , interface_port=options.interface_port\n , monitor_type=options.monitor_type\n , host_group_names=options.host_group_names\n , monitor_template_names=options.monitor_template_names\n , http_test_resource=options.http_test_resource\n , http_test_port=options.http_test_port)\n\n client = ZabbixClient(monitor_description)\n client.create_monitor()\n","sub_path":"python_client/zabbix/zabbixclient.py","file_name":"zabbixclient.py","file_ext":"py","file_size_in_byte":10917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"319170332","text":"\n# this renders the home page which is start.html\nfrom app.allImports import *\n@app.route(\"/post/\"\"\")\n\n html.append(\"\"\"\n
\"\"\")\n html.append(\"\"\"\n \"\"\")\n","sub_path":"app/matrice.py","file_name":"matrice.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"151751941","text":"import pipeline\nimport workers\n\n\ndef hello_pipeline():\n pipeline.Pipeline([\n pipeline.Task(HelloWorldProducer, 1),\n pipeline.Task(PrintConsumer, 1)\n ]).execute()\n\n\nclass HelloWorldProducer(workers.Producer):\n def process(self):\n self.out_queue.put('{}: Hello World'.format(self.name))\n\n\nclass PrintConsumer(workers.Consumer):\n def process(self, item):\n print(\"{}: '{}'\".format(self.name, str(item)))\n\n\nif __name__ == '__main__':\n hello_pipeline()\n","sub_path":"hello_pipeline.py","file_name":"hello_pipeline.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"363242474","text":"import torch\nimport numpy as np\nfrom datasets import test_data\nfrom torchvision import transforms\ndevice=torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nto_pil_image = transforms.ToPILImage()\ndef print_img(image):\n image = image.cpu().squeeze(0)\n img = to_pil_image(image[:])\n img.convert('RGB')\n img.show()\nimg,target,label = test_data[0]\nprint_img(img)\n\nprint(label)\nt_con_mask = torch.ByteTensor(target.size()).to(device)\nt_con_mask.zero_()\nt_con_mask[:, :, 4] = 1\n\nt_confidence = target[t_con_mask].cpu().numpy()\nt_sorted_idx = np.argsort(-t_confidence)\nt_sorted_con = np.sort(-t_confidence)\n\nt_i = 0\nt_rst = []\nwhile True:\n if -t_sorted_con[t_i] < 1:\n break\n t_row = t_sorted_idx[t_i] // 28\n t_line = t_sorted_idx[t_i] % 28\n t_r = target[ t_row, t_line, 5]\n t_rst.append(t_r.item())\n t_i += 1\nprint(t_rst)","sub_path":"fixerror.py","file_name":"fixerror.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"122274889","text":"\"\"\"The class used to manipulate lists of Atoms\n\"\"\"\nimport numpy as np\n# from copy import copy\nfrom copy import deepcopy\n\nfrom fromage.utils.atom import Atom\nimport fromage.io.edit_file as ef\n\ndef try_ismol(to_test):\n \"\"\" Raise exception if the argument is not a Mol object\"\"\"\n if not isinstance(to_test, Mol):\n raise TypeError(\"Cannot cast \" +\n type(to_test).__name__ + \" to Mol object\")\n\ndefault_thresh = {'dis' : 1.8,\n 'cov' : 0.2,\n 'vdw' : -0.3}\n\nclass Mol(object):\n \"\"\"\n Object representing a list of atoms.\n\n This class can be used to reflect any number of molecules, point charges or\n unit cells. Although Mol shares many methods with list, it deliberately does\n not inherit it in order to avoid nonsensical operations such as Mol1 > Mol2\n\n Attributes\n ----------\n atoms : list of Atom objects\n Member atoms of Mol\n vectors : 3 x 3 numpy array\n Lattice vectors of the unit cell\n bonding : string 'dist, 'cov' or 'vdw'\n The method for detecting bonding in this molecule.\n 'dis' : distance between atoms < threshold\n 'cov' : distance - (cov radius of atom a + of atom b) < threshold\n 'vdw' : distance - (vwd radius of atom a + of atom b) < threshold\n thresh : float, optional\n Threshold for the detection. If None, use defaults\n\n \"\"\"\n\n\n\n def __init__(self, in_atoms=[], vectors=np.zeros((3, 3)), bonding = 'dis', thresh = 1.8):\n # In case the user feeds a lone atom:\n if isinstance(in_atoms, Atom):\n in_atoms = [in_atoms]\n self.atoms = in_atoms\n self.vectors = vectors\n self.bonding = bonding\n self.thresh = thresh\n def __repr__(self):\n out_str = \"\"\n for atom in self.atoms:\n out_str += atom.__str__() + \"\\n\"\n return out_str\n\n def __str__(self):\n return self.__repr__()\n\n def set_bonding(self, bonding='dis', thresh=None):\n \"\"\"\n Set the type of bonding detection used in this Mol\n\n Parameters\n ----------\n bonding : string 'dis', 'cov' or 'vdw'\n The method for detecting bonding in this molecule.\n 'dis' : distance between atoms < threshold\n 'cov' : distance - (cov radius of atom a + of atom b) < threshold\n 'vdw' : distance - (vwd radius of atom a + of atom b) < threshold\n thresh : float, optional\n Threshold for the detection. If None, use defaults:\n 'dis' -> 1.8\n 'cov' -> 0.2\n 'vdw' -> -0.3\n\n \"\"\"\n if bonding not in default_thresh:\n raise TypeError(\"Unrecognised bonding type: \"+ bonding)\n self.bonding = bonding\n if thresh:\n self.thresh = thresh\n else:\n self.thresh = default_thresh[bonding]\n return\n\n def set_bonding_str(self, in_str):\n \"\"\"\n Set the type of bonding and threshold with one string\n\n The string is of the type \"cov0.2\" or \"dis1.7\" etc. But giving just the\n threshold or just the bonding gives the default for the ommitted part.\n The order of the bonding and threshold does not matter, so \"vdw2.2\" is\n the same as \"2.2vdw\"\n\n Parameters\n ----------\n in_str : str\n The string which determines the bonding where the threshold and the\n distance are set to default if none are supplied\n\n \"\"\"\n bondings = default_thresh.keys()\n bonding = ''\n thresh_str = ''\n # check if bonding has been specified\n for i_bonding in bondings:\n if i_bonding in in_str:\n bonding = i_bonding\n # if there is bonding, try to find threshold\n if bonding:\n stripped = in_str.replace(bonding,'')\n # if there is still a thresh in this string\n if stripped:\n thresh_str = stripped\n # if only the thresh is specified\n elif in_str:\n thresh_str = in_str\n # if both present\n if bonding and thresh_str:\n self.set_bonding(bonding=bonding,thresh=float(thresh_str))\n # if only bonding\n if bonding and not thresh_str:\n self.set_bonding(bonding=bonding)\n # if only thresh\n if thresh_str and not bonding:\n self.set_bonding(thresh=float(thresh_str))\n if not thresh_str and not bonding:\n self.set_bonding()\n return\n\n def bonded(self, atom_a, atom_b):\n \"\"\"\n Check if atom_a is bonded to atom_b given the bonding settings\n\n Parameters\n ----------\n atom_a, atom_b : Atom objects\n The atoms to be compared\n Returns\n -------\n bonded_bool : bool\n True if the atoms are bonded and False if not\n \"\"\"\n bonded_bool = atom_a.dist(atom_b, ref=self.bonding) <= self.thresh\n return bonded_bool\n\n def per_bonded(self, atom_a, atom_b):\n \"\"\"\n Check if atom_a is bonded to atom_b given lattice conditions\n\n Parameters\n ----------\n atom_a, atom_b : Atom objects\n The atoms to be compared\n Returns\n -------\n bonded_bool : bool\n True if the atoms are bonded and False if not\n \"\"\"\n bonded_bool = atom_a.per_dist(atom_b, self.vectors, ref=self.bonding) <= self.thresh\n return bonded_bool\n\n # list-y behaviour\n def append(self, element):\n self.atoms.append(element)\n\n def extend(self, other_mol):\n self.atoms.extend(other_mol.atoms)\n\n def insert(self, i, element):\n self.atoms.insert(i, element)\n\n def remove(self, element):\n self.atoms.remove(element)\n\n def index(self, element):\n return self.atoms.index(element)\n\n def pop(self, i=-1):\n return self.atoms.pop(i)\n\n def clear(self):\n self.atoms.clear()\n\n def count(self, element):\n return self.atoms.count()\n\n def __add__(self, other_mol):\n try_ismol(other_mol)\n return Mol(deepcopy(self).atoms + other_mol.atoms, vectors = self.vectors, bonding = self.bonding, thresh = self.thresh)\n\n def __len__(self):\n return len(self.atoms)\n\n def __eq__(self, other):\n return self.atoms == other.atoms\n\n def __getitem__(self, index):\n return self.atoms[index]\n\n def __setitem__(self, index, value):\n self.atoms[index] = value\n return\n\n def __contains__(self, elem):\n return self.atoms.__contains__(elem)\n\n def copy(self):\n return deepcopy(self)\n\n def write_xyz(self, name):\n \"\"\"Write an xyz file of the Mol\"\"\"\n ef.write_xyz(name, self.atoms)\n\n def empty_mol(self):\n \"\"\"Return an empty mol with the same properties\"\"\"\n new_mol = deepcopy(self)\n new_mol.atoms = []\n return new_mol\n\n def select(self, labels):\n \"\"\"\n Return a molecule out of the current Mol.\n\n The function returns a new Mol of selected atoms atoms. The selection is\n done by measuring by how much adjacent vdw spheres overlap. The returned\n Mol's attributes are new objects obtained via a deep copy.\n\n Parameters\n ----------\n label : int or list of ints\n The number of the atoms from which the molecules are generated.\n\n Returns\n -------\n selected : Mol object\n The selected molecule\n \"\"\"\n\n # Make sure that labels is a list\n if isinstance(labels, int):\n labels = [labels]\n\n # Check for duplicate labels\n if len(labels) > len(set(labels)):\n raise TypeError(\"Some labels are repeated\")\n\n selected = self.copy()\n selected.atoms = deepcopy([self[i] for i in labels])\n remaining = self.copy()\n for atom in selected:\n if atom in remaining:\n remaining.remove(atom)\n\n old_atoms = selected.copy()\n\n # While there are atoms to add\n cont = True\n while cont:\n cont = False\n new_atoms = Mol([])\n for old in old_atoms:\n tmp_remaining = remaining.copy()\n for rem in remaining:\n if self.bonded(old,rem):\n new_atoms.append(rem)\n selected.append(rem)\n tmp_remaining.remove(rem)\n cont = True # An atom was added so continue loop\n remaining = tmp_remaining\n old_atoms = new_atoms\n return selected\n\n def per_select(self, labels, old_pos=False):\n \"\"\"\n Select a molecule out of a Mol in a periodic system.\n\n Parameters\n ----------\n labels : int or list of ints\n The number of the atoms from which the molecules are generated\n old_pos : bool\n Option to print the selected molecule at its original coordinates\n\n Returns\n -------\n selected_img : Mol object\n The atoms belonging to the molecule which is selected with certain\n atoms translated so that the molecule is fully connected without\n periodic boundaries\n selected_old : Mol object (optional)\n The atoms belonging to the molecule which is selected before\n translations\n\n \"\"\"\n\n # Make sure that labels is a list\n if isinstance(labels, int):\n labels = [labels]\n\n # Check for duplicate labels\n if len(labels) > len(set(labels)):\n raise TypeError(\"Some labels are repeated\")\n\n # Mol of selected atoms from the unit cell\n selected_old = self.copy()\n selected_old.atoms = [self[i] for i in labels]\n\n # Mol of selected atoms where the periodic image\n # atoms are translated back to form a molecule\n selected_img = selected_old.copy()\n\n remaining = self.copy()\n for atom in selected_old:\n if atom in remaining:\n remaining.remove(atom)\n\n old_atoms = selected_old.copy()\n\n # While there are atoms to add\n cont = True\n while cont == True:\n cont = False\n new_atoms = Mol([])\n for old in old_atoms:\n tmp_remaining = remaining.copy()\n for rem in remaining:\n # contains the distance from the point or image and the\n # coordinates of the point or image\n dist, per_img = old.per_dist(rem, self.vectors, ref=self.bonding, new_pos=True)\n # if the atom is close enough to be part of the molecule\n if dist <= self.thresh:\n new_atoms.append(per_img)\n selected_old.append(rem)\n selected_img.append(per_img)\n tmp_remaining.remove(rem)\n cont = True # An atom was added so continue loop\n remaining = tmp_remaining\n old_atoms = new_atoms\n\n\n if old_pos:\n return selected_img, selected_old\n else:\n return selected_img\n\n def segregate(self):\n \"\"\"Separate current Mol in a list of Mols of different molecules\"\"\"\n molecules = [] # list of molecules\n remaining = self.copy()\n\n while len(remaining) > 0:\n molecule = remaining.select(0)\n molecules.append(molecule)\n for atom in molecule:\n remaining.remove(atom)\n return molecules\n\n def complete_mol(self, labels):\n \"\"\"\n Take a cell and complete certain molecules\n\n The objective is to end up with a unit cell where the molecules of interest\n are complete. The rest of the atoms of the cell must remain intact. Note that\n the input atoms are transformed and are the same as are present in the\n output.\n\n Parameters\n ----------\n labels : int or list of ints\n The number of the atoms from which the molecules are generated\n Returns\n -------\n new_mol : Mol object\n The now complete molecule\n new_cell : Mol object\n The cell with the completed molecule\n \"\"\"\n new_mol, scattered_mol = self.per_select(labels, old_pos=True)\n new_cell_atoms = deepcopy(\n [a for a in self.atoms if a not in scattered_mol])\n new_cell = self.copy()\n new_cell.atoms = new_cell_atoms\n\n for atom in new_mol:\n new_cell.append(atom.copy())\n return new_mol, new_cell\n\n def complete_cell(self):\n \"\"\"\n Return a cell where atoms have been translated to complete all molecules of\n the cell\n\n Returns\n -------\n out_cell : Mol object\n The new untruncated cell\n full_mol_l : list of Mol objects\n Each molecule in the untruncated cell\n\n \"\"\"\n full_mol_l = []\n remaining = self.copy()\n\n while len(remaining) != 0:\n full_mol, cell = remaining.complete_mol(0)\n full_mol_l.append(full_mol)\n remaining = cell\n for atom in full_mol:\n if atom in remaining:\n remaining.remove(atom)\n\n # Convinently, remaining is now an empty Mol\n out_cell = remaining\n for mol in full_mol_l:\n out_cell.extend(mol)\n return out_cell, full_mol_l\n\n def centroid(self):\n \"\"\"Return np array of the centroid\"\"\"\n N = len(self.atoms)\n centro = np.array([0.0, 0.0, 0.0])\n for atom in self.atoms:\n centro[0] += atom.x\n centro[1] += atom.y\n centro[2] += atom.z\n centro = centro / N\n return centro\n\n def center_mol(self):\n \"\"\"Translate molecules to center\"\"\"\n cen = self.centroid()\n for atom in self.atoms:\n atom.v_translate(-cen)\n return\n\n def translate(self, vector):\n \"\"\"\n Translate Mol by a vector\n\n Parameters\n ----------\n vector : 3 x 1 numpy array\n Translation vector\n\n \"\"\"\n for atom in self.atoms:\n atom.v_translate(vector)\n return\n\n def supercell(self, trans):\n \"\"\"\n Return a supercell of I x J x K\n\n Parameters\n ----------\n trans : array-like of length 3\n Multiplications of the primitive cell\n Returns\n -------\n supercell : Mol object\n New supercell with adjusted lattice vectors\n\n \"\"\"\n # make the input into a np array\n trans = np.array(trans)\n\n new_cell = self.empty_mol()\n for a_mult in range(trans[0]):\n for b_mult in range(trans[1]):\n for c_mult in range(trans[2]):\n vector = a_mult * \\\n self.vectors[0] + b_mult * \\\n self.vectors[1] + c_mult * self.vectors[2]\n new_atoms = Mol([i.v_translated(vector)\n for i in self.atoms])\n new_cell += new_atoms\n out_vec = (self.vectors.T * trans.transpose()).T\n new_cell.vectors = out_vec\n return new_cell\n\n def centered_supercell(self, trans, from_origin=False):\n \"\"\"\n Make a bigger supercell out of an input cell.\n\n The cell is multiplied positively and negatively through each lattice\n vector so that the supercluster ends up being\n (1+2*trans[0])*(1+2*trans[1])*(1+2*trans[2]) times larger. For example if the\n input is 1,1,1 for a cubic unit cell, the output will be the original unit\n cell surrounded by 26 other unit cells forming a total 3x3x3 cube.\n\n Alternatively, the multiplication can be centered around the origin, a corner of the\n unit cell, instead of the centre. In that case the supercluster ends up being\n only (2*trans[0])*(2*trans[1])*(2*trans[2])\n\n Parameters\n ----------\n trans : numpy array of length 3\n Multiplications of the primitive cell\n from_origin : bool\n Determines the kind of multiplication. True is corner of the cell as\n the center, False is middle of the cell.\n\n Returns\n -------\n mega_cell : Mol object\n The resulting supercell\n\n \"\"\"\n trans_series = [0, 0, 0]\n for i, tra in enumerate(trans):\n if from_origin:\n trans_series[i] = list(range(-tra, tra))\n else:\n trans_series[i] = list(range(-tra, tra + 1))\n trans_series = np.array(trans_series)\n\n new_cell = self.empty_mol()\n for a_mult in trans_series[0]:\n for b_mult in trans_series[1]:\n for c_mult in trans_series[2]:\n vector = a_mult * \\\n self.vectors[0] + b_mult * \\\n self.vectors[1] + c_mult * self.vectors[2]\n new_atoms = Mol([i.v_translated(vector)\n for i in self.atoms])\n new_cell += new_atoms\n out_vec = (self.vectors.T * trans.transpose()).T\n new_cell.vectors = out_vec\n return new_cell\n\n def trans_from_rad(self, clust_rad):\n \"\"\"\n Generate the translations necessary to encapsulate a sphere of given rad\n\n Parameters\n ----------\n clust_rad : float\n Radius defining a sphere\n\n Returns\n -------\n trans_count : 3 x 1 numpy array\n The translations required for the unit cell to contain the sphere\n\n \"\"\"\n\n # determine how many unit cells we need\n vectors = deepcopy(self.vectors)\n\n # vectors normal to faces\n a_perp = np.cross(vectors[1], vectors[2])\n b_perp = np.cross(vectors[2], vectors[0])\n c_perp = np.cross(vectors[0], vectors[1])\n\n # the three normalised unit vectors\n perp = np.array([a_perp / np.linalg.norm(a_perp), b_perp /\n np.linalg.norm(b_perp), c_perp / np.linalg.norm(c_perp)])\n\n trans_count = np.array([1, 1, 1])\n\n # distances from faces\n distances = np.array([0.0, 0.0, 0.0])\n\n new_vectors = deepcopy(vectors)\n\n for comp in range(3):\n while True:\n trans_count[comp] += 1\n distances[comp] = np.dot(new_vectors[comp], perp[comp])\n new_vectors[comp] = trans_count[comp] * vectors[comp]\n if distances[comp] > clust_rad:\n break\n trans_count -= np.array([1, 1, 1])\n return trans_count\n\n def make_cluster(self, clust_rad, mode = 'exc', central_mol = None):\n \"\"\"\n Generate a cluster of molecules from a primitive cell\n\n This first makes a supercell of the correct size which will contain with\n one additional buffer shell. Then the sphere is generated from this new\n supercell by connectivity.\n\n A central molecule can also be supplied which will turn the spheres\n defining the clusters into the union of spheres stemming from each atom\n of the central molecule.\n\n Parameters\n ----------\n clust_rad : float\n Radius defining a sphere. All molecules with atoms in the sphere are\n to be grabbed\n mode : str\n Switches between inclusive and exclusive selecting. Inclusive,\n 'inc', selects all molecules which have atoms within the radius.\n Exclusive, 'exc', selects all molecules fully in the radius.\n Default: false\n central_mol : Mol\n If this is supplied, the central molecule will act as a kernel for\n the cluster which will end up being of the appropriate shape.\n Returns\n -------\n cluster : Mol object\n Spherical cluster of molecules from their crystal positions\n\n \"\"\"\n # if there is a central mol, account for nearest neighbour molecules\n # bleeding out of the original radius\n if central_mol:\n central_rad = 0\n for atom in central_mol:\n dis = atom.v_dist([0,0,0])\n if dis < central_rad:\n central_rad = dis\n trans = self.trans_from_rad(clust_rad + central_rad)\n # get the translations necessary to enclose the required mols\n else:\n trans = self.trans_from_rad(clust_rad)\n # if the cluster is inclusive, then extra mols might be required from\n # an additional layer of the supercell\n if mode == 'inc':\n trans += np.array([1,1,1]) # one buffer cell layer\n supercell = self.centered_supercell(trans, from_origin=True)\n\n seed_atoms = Mol([])\n\n # get seedatoms in the shape of the central mol if pertinent\n if central_mol:\n for atom_i in supercell:\n for atom_j in central_mol:\n if atom_i.dist(atom_j) < clust_rad:\n seed_atoms.append(atom_i)\n break\n # get spherical seedatoms\n else:\n for atom in supercell:\n if atom.v_dist([0, 0, 0]) < clust_rad:\n seed_atoms.append(atom)\n\n \n max_mol_len = 0\n if mode == 'exc':\n while len(seed_atoms) > 0:\n mol = seed_atoms.select(0)\n if len(mol) > max_mol_len:\n max_mol_len = len(mol)\n clust_atoms = Mol([])\n if len(mol) == max_mol_len:\n clust_atoms += mol\n for atom in mol:\n seed_atoms.remove(atom)\n if mode == 'inc':\n clust_atoms = Mol([])\n max_mol_len = len(supercell.select(supercell.index(seed_atoms[0])))\n\n while len(seed_atoms) > 0:\n mol_tmp = seed_atoms.select(0) # The part of the mol detected in seed_atoms\n if len(mol_tmp) < max_mol_len:\n # The whole mol, which could potentially include even more seed_atoms\n mol = supercell.select(supercell.index(seed_atoms[0]))\n else:\n mol = mol_tmp\n clust_atoms += mol\n for atom in mol_tmp:\n seed_atoms.remove(atom)\n for atom in mol:\n supercell.remove(atom)\n # remove all atoms of the mol which are part of seed_atoms\n try:\n seed_atoms.remove(atom)\n except ValueError:\n pass\n\n return clust_atoms\n\n def remove_duplicates(self, thresh=0.001):\n \"\"\"Remove the duplicate atoms\"\"\"\n purged_mol = Mol([self.atoms[0]])\n for atom_a in self[1:]:\n unique = True\n for atom_b in purged_mol:\n if atom_a.very_close(atom_b, thresh=thresh):\n unique = False\n break\n if unique:\n purged_mol.append(atom_a)\n self.atoms = purged_mol\n return\n\n def dir_to_frac_pos(self):\n \"\"\"Move all atoms to fractional coordinates\"\"\"\n\n out_mol = self.copy()\n # transpose to get the transformation matrix\n M = np.transpose(self.vectors)\n # inverse transformation matrix\n U = np.linalg.inv(M)\n\n for atom in out_mol:\n # change of basis transformation\n dir_pos = atom.get_pos()\n frac_pos = np.dot(U, dir_pos)\n for i, coord in enumerate(frac_pos):\n # if the coordinate is out of range\n if coord < 0 or coord > 1:\n # translate it to the range [0,1]\n frac_pos[i] = coord % 1\n atom.set_pos(frac_pos)\n return out_mol\n\n def frac_to_dir_pos(self):\n \"\"\"Move all atoms to direct coordinates\"\"\"\n out_mol = self.copy()\n for atom in out_mol:\n new_pos = np.matmul(self.vectors.T, atom.get_pos())\n atom.set_pos(new_pos)\n\n return out_mol\n\n def confined(self):\n \"\"\"Move all atoms to fit inside the primitive cell\"\"\"\n frac_mol = self.dir_to_frac_pos()\n out_mol = frac_mol.frac_to_dir_pos()\n\n return out_mol\n\n def centered_mols(self, labels, return_trans = False):\n \"\"\"\n Return the molecules translated at the origin with a corresponding cell\n\n Parameters\n ----------\n labels : int or list of ints\n The labels of the atoms to select\n print_centro : bool\n Print the translation vector which was detected as -centroid\n Returns\n -------\n mol : Mol object\n The selected molecules with their centroid at the origin\n mod_cell : Mol object\n The new confined cell corresponding to the now translated molecules\n\n \"\"\"\n mol, mod_cell = self.complete_mol(labels)\n centro = mol.centroid()\n mol.translate(-centro)\n mod_cell.translate(-centro)\n mod_cell = mod_cell.confined()\n\n if return_trans:\n return mol, mod_cell, -centro\n else:\n return mol, mod_cell\n\n\n def es_pot(self, position):\n \"\"\"\n Return the electorstatic potential generated by this Mol\n\n Parameters\n ----------\n position : 3x1 np array\n The point at which the potential should be evaluated\n Returns\n -------\n tot_pot : float\n The total potential\n\n \"\"\"\n tot_pot = 0\n for atom in self:\n tot_pot += atom.es_pot(position)\n return tot_pot\n\n def change_charges(self, charges):\n \"\"\"\n Change all of the charges of the constituent atoms at once\n\n Parameters\n ----------\n charges : array-like of floats\n Contains all of the new charges. IMPORTANT: they need to be in the\n order corresponding to self.atoms\n\n \"\"\"\n for i, atom in enumerate(self.atoms):\n atom.q = charges[i]\n return\n\n def charges(self):\n \"\"\"Return an array of charges\"\"\"\n l_char = []\n for atom in self.atoms:\n l_char.append(atom.q)\n arr_char = np.array(l_char)\n return arr_char\n\n def raw_assign_charges(self, charges):\n \"\"\"Assign the charges from an array-like to the atoms\"\"\"\n for char,at in zip(charges,self.atoms):\n at.q=char\n return\n\n def populate(self, reference_mol):\n \"\"\"\n Assign charges to the Mol by comparing to the connectivity of a\n reference\n\n Parameters\n ----------\n reference_mol : Mol object\n Charged molecule or cell\n\n \"\"\"\n # This is a naughty in-function import to prevent a circular dependency.\n # The reason is that assign_charges functions are grouped up with the\n # executable script which needs to read_file and in turn use mol.py\n # Some careful refactoring should fix this\n import fromage.scripts.assign_charges as ac\n ac.assign_charges(reference_mol, self)\n pass\n","sub_path":"fromage/utils/mol.py","file_name":"mol.py","file_ext":"py","file_size_in_byte":27343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"442725203","text":"from django.urls import path\n\nfrom .views import (CardCreateView, DeckListView, CardListView, DeckUpdateView,\n CardUpdateView, CardDeleteView, DeckDeleteView, deck_create)\n\napp_name = 'words'\n\nurlpatterns = [\n path('decks/add/', deck_create, name='deck_add'),\n path('decks/', DeckListView.as_view(), name='deck_list'),\n path('\"\"\")\n\n html.append(\"\"\"\n \"\"\")\n\n for t1 in cst.tous_les_types:\n html.append(\"\"\"\n \"\"\")\n for t in cst.tous_les_types:\n if len(t) > 0:\n html.append(\"\"\"\n {} \"\"\".format(t.title()))\n\n html.append(\"\"\"\n \"\"\")\n if len(t1) > 0:\n html.append(\"\"\"\n \"\"\")\n\n html.append(\"\"\"\n {} \"\"\".format(t1.title()))\n else:\n html.append(\"\"\"\n Empty \"\"\")\n\n for t2 in cst.tous_les_types:\n if len(t2) > 0:\n if len(t1) > 0:\n nombre = len(cst.combinaisons_types[(t1, t2)])\n else:\n nombre = len(cst.combinaisons_types[(t2, \"\")])\n if nombre > 0:\n couleur = calcule_couleur_dégradé(\n nombre, cst.nombre_pokemon_maxi)\n # url = Universal Remote Location\n if len(t1) > 0:\n url = \"/categories?type1={}&type2={}\".format(t1, t2)\n else:\n url = \"/type/{}\".format(t2)\n html.append(\n \"\"\"\n {} \"\"\".format(url, couleur, nombre))\n else:\n html.append(\"\"\"\n \"\"\")\n\n html.append(\"\"\"\n
', '') for w in versions_list]\n versions_list = [w.replace('', '') for w in versions_list]\n\n return versions_list\n\n################################################################\n","sub_path":"crawlers/JetReports.py","file_name":"JetReports.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"599546767","text":"from time import sleep\nfrom selenium import webdriver\nclass Test_File:\n def setup(self):\n self.driver = webdriver.Chrome()\n self.driver.implicitly_wait(5)\n self.driver.maximize_window()\n def teardown(self):\n self.driver.quit()\n def test_file(self):\n self.driver.get('https://www.baidu.com/')\n self.driver.find_element_by_xpath('//*[@id=\"form\"]/span[1]/span[1]').click() #点击按图片搜索按钮\n ele = self.driver.find_element_by_xpath('//*[@id=\"form\"]/div/div[2]/div[2]/input') #定位到上传文件按钮\n ele.send_keys('/home/ouyi/goods02.jpg') #上传图片\n sleep(3)","sub_path":"web/test_file.py","file_name":"test_file.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"347447346","text":"import string\nclass Level:\n '''\n Parse maps.\n '''\n def __init__(self,lines):\n GOAL = '.'\n GOAL_AND_STONE = '@'\n STONE = '$'\n BLOCK = '#'\n PLAYER = string.digits\n PLAYER_AND_GOAL = string.ascii_uppercase\n\n self.lines = lines\n self.M = len(self.lines);self.N = max([len(line) for line in lines])\n self.at_player = {}\n self.is_goal = {}\n self.at_stone = {}\n self.is_clear = {}\n self.stone_num = 1\n for i in range(1,len(self.lines)+1):\n line = lines[i-1]\n for j in range(1,len(line)+1):\n c = line[j-1]\n if c in GOAL_AND_STONE:\n self.is_goal[(i,j)] = True\n self.at_stone[(i,j)] = self.stone_num\n self.stone_num += 1\n elif c in GOAL:\n self.is_goal[(i,j)] = True\n self.is_clear[(i,j)] = True\n elif c in STONE:\n self.at_stone[(i,j)] = self.stone_num\n self.stone_num += 1\n elif c in PLAYER_AND_GOAL:\n self.is_goal[(i,j)] = True\n self.at_player[(i,j)] = int(ord(c) - ord('A'))+1\n elif c in PLAYER:\n self.at_player[(i,j)] = int(c)\n elif not c in BLOCK:\n self.is_clear[(i,j)] = True\n","sub_path":"clients/level.py","file_name":"level.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"175565533","text":"import os\nimport shutil\n\ngetOSType = os.name\n\ndef gettingDataFromTheWindows():\n file = \"C:/Users/HP/Desktop/in\"\n file2 = \"C:/Users/HP/Desktop\"\n list = os.listdir(file)\n list2 = os.listdir(file2)\n for i in list:\n shutil.copy2(file + \"/\" + i, \"C:/Users/HP/Desktop/out\")\n\n for i in list2:\n if i.endswith(\".zip\"):\n shutil.copy2(file2 + \"/\" + i, \"C:/Users/HP/Desktop/out\")\n os.remove(file2 + \"/\" + i)\n else:\n continue\n\n\ndef copyToExternalDirection():\n file1 = \"C:/Users/HP/Desktop/out\"\n list1 = os.listdir(file1)\n\n for i in list1:\n if not os.path.exists(\"G:/NewFile\"):\n os.makedirs(\"G:/NewFile\")\n shutil.copy2(file1 + \"/\" + i, \"G:/NewFile\")\n else:\n shutil.copy2(file1 + \"/\" + i, \"G:/NewFile\")\n\nif str(getOSType) == \"posix\":\n print(\"MacOS\")\n\nelif str(getOSType) == \"nt\":\n print(\"Windows\")\n #gettingDataFromTheWindows()\n copyToExternalDirection()\n","sub_path":"SpywareTest_0.0.01.py","file_name":"SpywareTest_0.0.01.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"118115143","text":"# -*- coding: utf-8 -*-\n\nfrom openerp import models, fields, api\nimport datetime\nfrom openerp.exceptions import Warning\n\nSELECTION_MONTH = []\n\nfor month in range(1, 13):\n if month < 10:\n value = '0%d' % month\n else:\n value = '%d' % month\n SELECTION_MONTH.append((month, value))\n\n\nclass jj_workplan(models.Model):\n _name = 'jj_workplan.jj_workplan'\n _inherit = ['mail.thread', 'ir.needaction_mixin']\n\n # 查表找到当前用户\n def _employee_get(self):\n resource = self.env['resource.resource'].search([('user_id', '=', self.env.user.id)])\n employee = self.env['hr.employee'].search([('resource_id', '=', resource.id)])\n return employee\n\n # 查表找到当前用户所在部门\n def _department_get(self):\n return self._employee_get().department_id\n\n # 实时计算工作计划进度条\n @api.depends('time_schedule')\n def _compute_time(self):\n if self.plan_start_date and self.plan_end_date:\n plan_start_date = datetime.datetime.strptime(self.plan_start_date, '%Y-%m-%d %H:%M:%S')\n plan_end_date = datetime.datetime.strptime(self.plan_end_date, '%Y-%m-%d %H:%M:%S')\n estimate_delta = plan_end_date - plan_start_date\n estimate_before = estimate_delta.days * 24 * 60 * 60 + estimate_delta.seconds\n if self.actual_finish_time:\n actual_finish_time = datetime.datetime.strptime(self.actual_finish_time, '%Y-%m-%d %H:%M:%S')\n actual_delta = actual_finish_time - plan_start_date\n actual_before = actual_delta.days * 24 * 60 * 60 + actual_delta.seconds\n self.time_schedule = float(actual_before) / float(estimate_before) * 100\n else:\n now_delta = datetime.datetime.now() - plan_start_date\n now_before = now_delta.days * 24 * 60 * 60 + now_delta.seconds\n self.time_schedule = float(now_before) / float(estimate_before) * 100\n\n # 判断当前用户是否是创建用户\n # 如果是 在根据状态来让能修改的地方\n def _is_employee(self):\n if self.env.user == self.employee_id.user_id:\n if self.state == 'Draft':\n self.is_employee = 'Draft'\n if self.state == 'Conduct':\n self.is_employee = 'Conduct'\n else:\n self.is_employee = 'False'\n\n name = fields.Char(string=\"事项\", required=True)\n employee_id = fields.Many2one('hr.employee', string=\"创建人\", readonly=True, default=_employee_get)\n department_id = fields.Many2one('hr.department', string=\"部门\", default=_department_get)\n plan_start_date = fields.Datetime(string=\"计划开始时间\", required=True)\n plan_end_date = fields.Datetime(string=\"计划结束时间\", required=True)\n actual_finish_time = fields.Datetime(string=\"实际完成时间\")\n description = fields.Text(string=\"描述\")\n is_employee = fields.Char(default='Draft', compute=_is_employee)\n cc = fields.Many2many('res.users', string='抄送')\n time_schedule = fields.Integer(string='所耗时间占比', compute=_compute_time)\n state = fields.Selection([('Draft', '草稿'), ('Conduct', '进行中'), ('Done', '完成')], default=\"Draft\", string='状态')\n month = fields.Selection(SELECTION_MONTH, string='月份',\n default=datetime.datetime.today().month,\n automatic=True, required=True)\n\n # 发消息\n @api.multi\n def send_followers(self, body):\n followers = [x.partner_id.id for x in self.message_follower_ids]\n self.message_post(body=body, type=\"notification\", subtype=\"mt_comment\", parnter_ids=followers)\n return True\n\n @api.model\n def create(self, vals):\n # 开始时间最多能提前5个小时\n if datetime.datetime.strptime(vals['plan_start_date'],\n '%Y-%m-%d %H:%M:%S') < datetime.datetime.now() - datetime.timedelta(\n hours=5) or datetime.datetime.strptime(vals['plan_start_date'],\n '%Y-%m-%d %H:%M:%S') > datetime.datetime.strptime(\n vals['plan_end_date'], '%Y-%m-%d %H:%M:%S'):\n raise Warning('时间不正确')\n vals['state'] = 'Conduct'\n self.send_followers(\"开始\")\n a = self.search([('id', '!=', 0)], order='id desc', limit=1)\n # 将授权用户拉入关注中\n if len(vals['cc']) > 0:\n for u in vals['cc'][0][-1]:\n res_users = self.env['res.users'].search([('id', '=', u)])\n mail = {u'partner_id': res_users.partner_id.id,\n u'res_model': 'jj_workplan.jj_workplan',\n u'res_id': a.id + 1}\n self.env['mail.followers'].sudo().create(mail)\n return super(jj_workplan, self).create(vals)\n\n @api.multi\n def write(self, vals):\n # 修改预计时间后 发消息\n if self.plan_end_date:\n if vals.get('plan_end_date', False):\n context = str(datetime.datetime.strptime(self.plan_end_date, '%Y-%m-%d %H:%M:%S') + datetime.timedelta(\n hours=8)) + '更改为' + str(\n datetime.datetime.strptime(vals['plan_end_date'], '%Y-%m-%d %H:%M:%S') + datetime.timedelta(\n hours=8))\n self.send_followers('预计完成时间由' + context)\n # 如果新增授权用户 拉入关注中\n if 'cc' in vals.keys():\n if vals['cc'][0][-1]:\n for u in vals['cc'][0][-1]:\n res_users = self.env['res.users'].search([('id', '=', u)])\n mail = self.env['mail.followers'].search(\n [('partner_id', '=', res_users.partner_id.id), ('res_model', '=', 'jj_workplan.jj_workplan'),\n ('res_id', '=', self.id)])\n if not mail:\n mail_followers = {u'res_model': 'jj_workplan.jj_workplan',\n u'res_id': self.id,\n u'partner_id': res_users.partner_id.id\n }\n self.env['mail.followers'].sudo().create(mail_followers)\n return super(jj_workplan, self).write(vals)\n\n # @api.one\n # def submit_button(self):\n # \tself.state = 'Conduct'\n # \tself.send_followers(\"开始\")\n\n @api.one\n def done_button(self):\n if self.time_schedule > 100:\n over_time = '超时' + str(int(self.time_schedule) - 100) + '%'\n else:\n over_time = 'good,在预期内完成了任务。'\n self.actual_finish_time = datetime.datetime.now()\n self.state = 'Done'\n self.send_followers(\"完成\" + \"\\n\" + over_time)\n","sub_path":"jj_workplan/models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"573244849","text":"# https://www.kaggle.com/chmaxx/quick-regression/code\n# https://www.kaggle.com/chmaxx/train-12-regressors-with-just-one-line-of-code\n\nimport numpy as np \nimport pandas as pd \n\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.compose import TransformedTargetRegressor\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.pipeline import make_pipeline\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import RobustScaler\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.preprocessing import FunctionTransformer\n\nfrom sklearn.model_selection import cross_val_score\n\nfrom sklearn.dummy import DummyRegressor\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import ElasticNet\nfrom sklearn.linear_model import Lasso\nfrom sklearn.linear_model import Ridge\nfrom sklearn.linear_model import BayesianRidge\nfrom sklearn.linear_model import SGDRegressor\nfrom sklearn.svm import SVR\nfrom sklearn.svm import LinearSVR\nfrom sklearn.kernel_ridge import KernelRidge\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.ensemble import GradientBoostingRegressor\n\nimport xgboost as xgb\nimport lightgbm as lgb\n\nimport time\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\n\ndef log_transform(x):\n return np.log1p(x)\n\ndef inverse_log_transform(x):\n return np.expm1(x)\n\n\ndef get_classifiers():\n\n \"\"\"\n Provide lists of regression classifiers and their names.\n \"\"\"\n n_jobs = -1\n random_state = 1\n\n classifiers = [\n DummyRegressor(), \n LinearRegression(n_jobs=n_jobs), \n Ridge(random_state=random_state), \n Lasso(random_state=random_state), \n ElasticNet(random_state=random_state),\n KernelRidge(),\n SGDRegressor(random_state=random_state),\n SVR(kernel=\"linear\"),\n LinearSVR(random_state=1),\n DecisionTreeRegressor(random_state=random_state),\n RandomForestRegressor(n_jobs=n_jobs, random_state=random_state),\n GradientBoostingRegressor(random_state=random_state),\n lgb.LGBMRegressor(n_jobs=n_jobs, random_state=random_state),\n xgb.XGBRegressor(objective=\"reg:squarederror\", n_jobs=n_jobs, random_state=random_state),\n ]\n\n clf_names = [\n \"DummyRegressor \",\n \"LinearRegression \", \n \"Ridge \",\n \"Lasso \",\n \"ElasticNet \",\n \"KernelRidge \",\n \"SGDRegressor \",\n \"SVR \",\n \"LinearSVR \",\n \"DecisionTreeRegressor\",\n \"RandomForest \", \n \"GBMRegressor \", \n \"LGBMRegressor \", \n \"XGBoostRegressor \",\n ]\n\n return clf_names, classifiers\n\n\n\ndef prepare_data(df, target_name):\n\n \"\"\"\n Separate descriptive variables and target variable.\n Separate numerical and categorical columns.\n \"\"\"\n\n if target_name is not None:\n X = df.drop(target_name, axis=1)\n y = df[target_name]\n else:\n X = df\n y = None\n\n # get list of numerical & categorical columns in order to process these separately in the pipeline \n num_cols = X.select_dtypes(\"number\").columns\n cat_cols = X.select_dtypes(\"object\").columns\n \n return X, y, num_cols, cat_cols\n\n\ndef get_pipeline(classifier, num_cols, cat_cols, impute_strategy, log_x, log_y):\n\n \"\"\"\n Create Pipeline with a separate pipe for categorical and numerical data.\n Automatically impute missing values, scale and then one hot encode.\n \"\"\"\n\n # the numeric transformer gets the numerical data acording to num_cols\n # first step: the imputer imputes all missing values to the provided strategy argument\n # second step: all numerical data gets stanadard scaled \n if log_x == False:\n numeric_transformer = Pipeline(steps=[\n ('imputer', make_pipeline(SimpleImputer(strategy=impute_strategy))),\n ('scaler', StandardScaler())])\n # if log_x is \"True\" than log transform feature values\n else:\n numeric_transformer = Pipeline(steps=[\n ('imputer', make_pipeline(SimpleImputer(strategy=impute_strategy))),\n ('log_transform', FunctionTransformer(np.log1p)),\n ('scaler', StandardScaler()),\n ])\n \n # the categorical transformer gets all categorical data according to cat_cols\n # first step: imputing missing values\n # second step: one hot encoding all categoricals\n categorical_transformer = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='most_frequent', fill_value='missing')),\n ('onehot', OneHotEncoder(handle_unknown='ignore'))])\n \n # the column transformer creates one Pipeline for categorical and numerical data each\n preprocessor = ColumnTransformer(\n transformers=[\n ('num', numeric_transformer, num_cols),\n ('cat', categorical_transformer, cat_cols)])\n \n # return the whole pipeline for the classifier provided in the function call\n if log_y == False:\n return Pipeline(steps=[('preprocessor', preprocessor), ('classifier', classifier)])\n # if log_y is \"True\" than use a TransformedTargetRegressor with log and inverse log functions for \"y\"\n else:\n transformed_classifier = TransformedTargetRegressor(regressor=classifier, \n func=log_transform, inverse_func=inverse_log_transform)\n return Pipeline(steps=[('preprocessor', preprocessor), ('classifier', transformed_classifier)])\n\n\ndef score_models(df, target_name, sample_size=None, \n impute_strategy=\"mean\", scoring_metric=\"r2\", log_x=False, log_y=False, verbose=True):\n\n \"\"\"\n This function yields error scores for a large variety of common regression classifiers on provided training data. \n\n Function separates numerical and categorical data based on dtypes of the dataframe columns. Missing values are imputed. Categorical data is one hot encoded, numerical data standard scaled. All classifiers are used with default settings and crossvalidated.\n \n The function returns a dataframe with error scores of all classifiers as well as the mean of all results in the last row of the dataframe.\n\n Parameters\n ----------\n df : Pandas dataframe \n Pandas dataframe with your training data\n target_name : str\n Column name of target variable\n sample_size : int, default \"None\" (score on all available samples)\n Number of samples for scoring the model\n impute_strategy : str, default \"mean\" \n Strategy for SimpleImputer, can be \"mean\" (default), \"median\", \"most_frequent\" or \"constant\"\n scoring_metric : str, default \"r2\"\n scoring metric for regressor: \"r2\" (default), \"explained_variance\", \"max_error\", \n \"neg_mean_absolute_error\", \"neg_mean_squared_error\", \"neg_mean_squared_log_error\", \"neg_median_absolute_error\"\n log_x : bool, default \"False\" \n Log transform features variable(s)\n log_y : bool, default \"False\" \n Log transform target variable\n verbose : bool, default \"True\" \n Print results during crossvalidation\n \n Returns\n -------\n DataFrame\n 1st column : Name of classifier\n 2nd column : scoring result\n\n Example\n -------\n X, y = sklearn.datasets.make_regression()\n X, X_test, y, _ = train_test_split(X, y)\n\n df = pd.DataFrame(X)\n df[\"target_variable\"] = y\n df_test = pd.DataFrame(X_test)\n\n scores_dummy = score_models(df, \"target_variable\")\n display(scores_dummy)\n \n # further use: train and predict\n pipelines = train_models(df, \"target_variable\")\n predictions = predict_from_models(df_test, pipelines)\n predictions.head()\n\n \"\"\"\n\n \n if sample_size is not None:\n df = df.sample(sample_size)\n \n # retrieve X, y and separated columns names for numerical and categorical data\n X, y, num_cols, cat_cols = prepare_data(df, target_name)\n\n scores = []\n\n clf_names, classifiers = get_classifiers()\n if verbose == True:\n print(f\"Classifier Metric ({scoring_metric})\")\n print(\"-\"*30)\n for clf_name, classifier in zip(clf_names, classifiers):\n start_time = time.time()\n \n # create a pipeline for each classifier\n clf = get_pipeline(classifier, num_cols, cat_cols, impute_strategy, log_x, log_y)\n \n # crossvalidate classifiers on training data\n cv_score = cross_val_score(clf, X, y, cv=3, scoring=scoring_metric)\n \n if verbose == True:\n print(f\"{clf_name} {cv_score.mean(): .4f} | {(time.time() - start_time):.2f} secs\")\n \n scores.append([clf_name.strip(), cv_score.mean()])\n\n scores = pd.DataFrame(scores, columns=[\"Classifier\", scoring_metric]).sort_values(scoring_metric, ascending=False)\n \n # just for good measure: add the mean of all scores to dataframe\n scores.loc[len(scores) + 1, :] = [\"mean_all\", scores[scoring_metric].mean()]\n\n return scores.reset_index(drop=True)\n \n\n\ndef train_models(df, target_name, \n impute_strategy=\"mean\", log_x=False, log_y=False, verbose=True): \n\n \"\"\"\n This function trains a large variety of common regression classifiers on provided training data. It separates numerical and categorical data based on dtypes of the dataframe columns. Missing values are imputed. Categorical data is one hot encoded, numerical data standard scaled. Each classifier is then trained with default settings.\n \n The function returns a list of fitted scikit-learn Pipelines.\n\n Parameters\n ----------\n df : Pandas dataframe \n Pandas dataframe with your training data\n target_name : str\n Column name of target variable\n sample_size : int, default \"None\" (score on all available samples)\n Number of samples for scoring the model\n impute_strategy : str, default \"mean\" \n Strategy for SimpleImputer, can be \"mean\" (default), \"median\", \"most_frequent\" or \"constant\"\n log_x : bool, default \"False\" \n Log transform features variable(s)\n log_y : bool, default \"False\" \n Log transform target variable\n verbose : bool, default \"True\" \n Print results during crossvalidation\n \n Returns\n -------\n List of fitted scikit-learn Pipelines\n\n Example:\n X, y = sklearn.datasets.make_regression()\n X, X_test, y, _ = train_test_split(X, y)\n\n df = pd.DataFrame(X)\n df[\"target_variable\"] = y\n df_test = pd.DataFrame(X_test)\n\n scores_dummy = score_models(df, \"target_variable\")\n display(scores_dummy)\n \n pipelines = train_models(df, \"target_variable\")\n\n # further use: predict from pipelines\n predictions = predict_from_models(df_test, pipelines)\n predictions.head()\n \n \"\"\"\n\n X, y, num_cols, cat_cols = prepare_data(df, target_name)\n\n pipelines = []\n\n if verbose == True:\n print(f\"Classifier Training time\")\n print(\"-\"*35)\n \n clf_names, classifiers = get_classifiers()\n for clf_name, classifier in zip(clf_names, classifiers):\n start_time = time.time()\n clf = get_pipeline(classifier, num_cols, cat_cols, impute_strategy, log_x, log_y)\n clf.fit(X, y)\n if verbose == True:\n print(f\"{clf_name} {(time.time() - start_time):.2f} secs\")\n pipelines.append(clf)\n \n return pipelines\n\n\n\ndef predict_from_models(df_test, pipelines):\n\n \"\"\"\n This function makes predictions with a list of pipelines. Test data is treated in the same pipeline the classifiers were trained on. \n \n The function returns a dataframe with all predictions ordered columnwise. Each column is named with the respective classifiers.\n\n Parameters\n ----------\n df_test : Pandas dataframe \n Dataframe with test data\n pipelines: array\n List of scikit-learn pipelines (preferably from train_models())\n\n Returns\n -------\n Pandas dataframe with prediction from each classifier, ordered columnwise. \n 1 column = results of 1 classifier.\n \n Example:\n X, y = sklearn.datasets.make_regression()\n X, X_test, y, _ = train_test_split(X, y)\n\n df = pd.DataFrame(X)\n df[\"target_variable\"] = y\n df_test = pd.DataFrame(X_test)\n\n scores_dummy = score_models(df, \"target_variable\")\n display(scores_dummy)\n \n pipelines = train_models(df, \"target_variable\")\n\n # further use: predict from pipelines\n predictions = predict_from_models(df_test, pipelines)\n predictions.head()\n \n \"\"\"\n \n X_test, _ , _, _ = prepare_data(df_test, None)\n predictions = []\n \n for pipeline in pipelines:\n preds = pipeline.predict(X_test)\n predictions.append(preds)\n \n df_predictions = pd.DataFrame(predictions).T\n clf_names, _ = get_classifiers()\n df_predictions.columns = [clf_name.strip() for clf_name in clf_names]\n\n return df_predictions\n","sub_path":"Scripts/quick_regression/quick_regression.py","file_name":"quick_regression.py","file_ext":"py","file_size_in_byte":13338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"232733970","text":"import json\nimport logging\nfrom datetime import datetime, timezone\n\nimport requests\n\nfrom config import config\n\nlogger = logging.getLogger()\n\n\ndef _get_mac():\n\ttry:\n\t\ttry:\n\t\t\tmac = open(\"/sys/class/net/ppp0/address\").read()\n\t\texcept:\n\t\t\tmac = open(\"/sys/class/net/eth0/address\").read()\n\texcept:\n\t\tmac = \"00:00:00:00:00:00\"\n\treturn mac[0:17]\n\n\ndef send_data(sensor_id, timestamp, value):\n\t\"\"\"\n\tSends data was read from sensor to M4M server\n\n\t:type sensor_id int\n\t:param sensor_id: sensor id\n\t:type timestamp: datetime.datetime or str\n\t:type value: dict\n\t:param value: Value to send\n\t\"\"\"\n\n\tif not isinstance(timestamp, str):\n\t\tif isinstance(timestamp, datetime):\n\t\t\ttimestamp = timestamp.replace(tzinfo=timezone.utc).strftime(\"%Y-%m-%dT%H:%M:%S\")\n\t\telse:\n\t\t\traise Exception(\"timestamp argument must be string or datetime\")\n\n\tdata = {\n\t\t'controller_mac': _get_mac(),\n\t\t'sensor_id': int(sensor_id),\n\t\t'value': json.dumps(value),\n\t\t'hash': \"some_hash\",\n\t\t'timestamp': timestamp,\n\t}\n\tresponse = requests.post('{}/sensor.addRecord'.format(config['m4m_server']['receiver_uri']), json=data)\n\tif response.status_code != 200:\n\t\tlogger.error(\n\t\t\t\"Failed to send data %s to the M4M server. Status code: %s. Response: \",\n\t\t\ttimestamp,\n\t\t\tresponse.status_code,\n\t\t\tresponse.text,\n\t\t)\n\t\traise Exception(\"Failed to send data to the M4M server\")\n\tlogger.info(\"Data %s was send to the M4M server\", timestamp)\n","sub_path":"m4m_client/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"453519508","text":"# -*- coding: utf-8 -*-\n\"\"\"\nClean up Titanic files\n\"\"\"\n\nimport pandas as pd\n\ndef process_file(infilename, outfilename,\n add_survival=False):\n # Open up the csv file in to a Python object\n df = pd.read_csv(infilename, header=0)\n df.info()\n \n if add_survival:\n names = map(lambda name: name.replace('\"', ''), df_all.name)\n survived_dict = dict(zip(names, df_all.survived))\n cols = ['Survived'] + list(df.keys())\n df['Survived'] = df['Name'].map(lambda name: survived_dict[name.replace('\"', '')])\n df = df.reindex(columns=cols)\n \n # Remap sex to binary value\n df['Sex'] = df['Sex'].map( {'female': 0, 'male': 1} ).astype(int)\n \n # Remap embarked to integer value\n df = df[df['Embarked'].notnull()]\n df['Embarked'] = df['Embarked'].map( {'C': 0, 'Q': 1, 'S': 2} ).astype(int)\n \n # Drop features\n df = df.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1)\n \n # Filter missing values\n #for key in df.keys():\n # df = df[df[key].notnull()]\n df = df.dropna()\n df.info()\n \n # Write out to file\n df.to_csv(outfilename, index=False)\n\ndf_all = pd.read_excel('../titanic_data/titanic3.xls', header=0)\n\nprocess_file('../titanic_data/kaggle/train.csv', '../data/titanic_train.csv')\nprocess_file('../titanic_data/kaggle/test.csv', '../data/titanic_test.csv')\nprocess_file('../titanic_data/kaggle/test.csv', '../data/titanic_test.answers.csv', add_survival=True)","sub_path":"CS158/Projects/ps2/ps2/source/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"434787404","text":"import numpy as np\nimport statistics\nimport math\nimport torch\nimport pandas as pd\nfrom torch.utils.data import DataLoader\nfrom DataLoading import *\nfrom Preprocessing import *\nfrom SiameseNetwork import *\n\n#trial = 3 # trial 0-3\n#kid = 24 # kids from 0-44\n#move = 0 # moves from 0-17 or 0-13\n#seq = 1 #Sequence 1 or 3\n\nmask_testGold = np.load('../../dance_results/mask_testGold.npy')\n#mask_testChild = np.load('../../dance_results/mask_testChild.npy')\n#mask_trainChild = np.load('../../dance_results/mask_trainChild.npy')\nmask_trainGold = np.load('../../dance_results/mask_trainGold.npy')\ntestChild = np.load('../../dance_results/padded_testChild.npy')\ntestGold = np.load('../../dance_results/padded_testGold.npy')\ntrainChild = np.load('../../dance_results/padded_trainChild.npy')\ntrainGold = np.load('../../dance_results/padded_trainGold.npy')\ntrainScores = np.load('../../dance_results/trainScores.npy')\ntestScores = np.load('../../dance_results/testScores.npy')\n\n#revtestChild = np.flip(testChild, 1)\n#newtestChild = np.concatenate((testChild, revtestChild))\n\n#revtestGold = np.flip(testGold, 1)\n#newtestGold = np.concatenate((testGold, revtestGold))\n\n#revtrainChild = np.flip(trainChild, 1)\n#newtrainChild = np.concatenate((trainChild, revtrainChild))\n\n#revtrainGold = np.flip(trainGold, 1)\n#newtrainGold = np.concatenate((trainGold, revtrainGold))\n\n#revmask_trainGold = np.flip(mask_trainGold, 1)\n#newtrainGold = np.concatenate((mask_trainGold, revmask_trainGold))\n\n#revmask_testGold = np.flip(mask_testGold, 1)\n#newtestGold = np.concatenate((mask_testGold, revmask_testGold))\n\n#revtestScores = np.flip(testScores, 0)\n#revtrainScores = np.flip(trainScores, 0)\n#newtestScores = np.concatenate((testScores, revtestScores))\n#newtrainScores = np.concatenate((trainScores, revtrainScores))\n\n\n#train_data = myDataset(newtrainChild, newtrainGold, newmask_trainGold, newtrainScores)\n#test_data = myDataset(newtestChild, newtestGold, newmask_testGold, newtestScores)\n\ntrain_data = myDataset(trainChild, trainGold, mask_trainGold, trainScores)\ntest_data = myDataset(testChild, testGold, mask_testGold, testScores)\n\n###################\n# Hyper Parameters#\n###################\nbatch_size = 25\nhidden_size = 64\nlr_rate = .001\n\n# train and testing parameters for batching\ntrain_params = {'batch_size': batch_size,\n\t\t'shuffle': True,\n\t\t'num_workers': 0}\ntest_params = {'batch_size': batch_size,\n\t\t'shuffle': False,\n\t\t'num_workers': 0}\n\n#batch generators\ntrain_gen = DataLoader(train_data, **train_params)\ntest_gen = DataLoader(test_data, **test_params)\n\nhyper_params = {'input_size': 60,\n\t\t'hidden_size': hidden_size,\n\t\t'batch_size': batch_size}\n\n#device configuration\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n#intializing my model of Siamese NN\nmymodel = mySiameseNetwork(**hyper_params)\nmymodel = mymodel.to(device)\n\n# defining metric loss and optimizer\ncriterion = myContrastiveLoss()\n#criterion = myBasicLoss()\noptimizer = torch.optim.Adam(mymodel.parameters(), lr=lr_rate)\n\nnum_epochs = 50\n\n#saving intermediate infromation\nlosses = []\ntrain_accuracies = []\ntest_accuracies = []\n\nfor i in range(num_epochs):\n\tmymodel.train(True)\n\tfor child, gold, weights, scores in train_gen:\n\t\tchild = child.to(device)\n\t\tgold = gold.to(device)\n\t\tscores = scores.to(device)\n\t\t#clearing gradients\n\t\toptimizer.zero_grad()\n\t\t#forward pass\n\t\toutput1, output2= mymodel(child, gold)\n\t\toutput1 = torch.stack([output1[j, w-1, :] for j, w in enumerate(weights)])\n\t\toutput2 = torch.stack([output2[j, w-1, :] for j, w in enumerate(weights)])\n\t\t# Use the weight matrix\n\t\tloss, y_pred = criterion(output1, output2, scores)\n\t\tacc = newaccuracy(y_pred, scores)\n\t\tloss.backward()\n\t\toptimizer.step()\n\tprint(y_pred.t(), scores)\t\n\tmymodel.train(False)\n\tfor child, gold, weights, scores in train_gen:\n\t\tchild = child.to(device)\n\t\tgold = gold.to(device)\n\t\tscores = scores.to(device)\n\t\toutput1, output2 = mymodel(child, gold)\n\t\toutput1 = torch.stack([output1[j, w-1, :] for j, w in enumerate(weights)])\n\t\toutput2 = torch.stack([output2[j, w-1, :] for j, w in enumerate(weights)])\n\t\tloss, y_pred = criterion(output1, output2, scores)\n\t\ttest_acc = newaccuracy(y_pred, scores)\n\n\tprint(\"Epoch number {}\\n Current loss {}\\n Current accuracy {}\\n Test accuracy {}\".format(i+1, loss.item(), acc, test_acc))\n\ttrain_accuracies.append(acc)\n\tlosses.append(loss.item())\n\ttest_accuracies.append(test_acc)\ntest = 1\nnp.save('../../dance_results/losses{}'.format(test), losses)\t\nnp.save('../../dance_results/acc{}'.format(test), train_accuracies)\nnp.save('../../dance_results/testacc{}'.format(test), test_accuracies)\ntorch.save(mymodel.state_dict(), 'noAugmentDataSNN{}'.format(test))\n","sub_path":"Dance_Imitation/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":4661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"273081635","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n\nimport time\nimport pandas as pd\nimport operator\nimport subprocess\nimport sys\n\ndef install(package):\n #install a module \n #Reference https://www.activestate.com/resources/quick-reads/how-to-install-python-packages-using-a-script/\n\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", package]) \n\ntry:\n # try to import fuzzywuzzy\n from fuzzywuzzy import fuzz\n \nexcept ModuleNotFoundError:\n #if not installed then install \n install('fuzzywuzzy')\n from fuzzywuzzy import fuzz\n \n \nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv' }\n\n#List of data (City, month and WeekName)\nlist_city=['chicago', 'new york city', 'washington']\nlist_month=['january', 'february', 'march', 'april', 'may', 'june','all']\nlist_week_name=['sunday','monday','tuesday','wednesday','thursday','friday','saturday','all'] \n\n\ndef choice(prompt, choices=('yes', 'no')):\n \"\"\"Return a valid input from the user given an array of possible answers.\n \n Args:\n (str)prompt - prompt with input request\n (tup)choices - tuple with elements of possibles answers\n \"\"\"\n\n while True:\n choice = input(prompt).lower()\n if choice in choices:\n break\n prompt = (\"\\nInvalid Response.Please choise again!\\n\")\n return choice\n\n\ndef similarity(input_data,data_list):\n #This function checks the similarity ratio between the input and our data list\n #Reference https://towardsdatascience.com/fuzzy-string-matching-in-python-68f240d910fe\n input_data=input_data.lower()\n ratio={}\n \n #ratio calculation \n for i in data_list:\n r=fuzz.ratio(input_data,i.lower())\n ratio[i]=r\n most_r=max(ratio.items(),key=operator.itemgetter(1))[0]\n \n if ratio[most_r] == 100:\n return (input_data,most_r,'exact')\n elif ratio[most_r] < 50:\n return (input_data,most_r,'no found')\n else:\n return (input_data,most_r,'similar')\n\n\ndef check(data): \n if data[2]=='exact':\n return data[1]\n elif data[2]=='similar':\n i=choice('Do you mean \\'{}\\' ? Enter yes or no\\n'.format(data[1].title()))\n if i=='yes':\n return data[1]\n else:\n print('\\nInvalid Response.Please choise again!\\n ')\n else:\n print('\\nInvalid Response.Please choise again!\\n ')\n return None \n\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n print('\\nHello!Let\\'s explore some US bikeshare data!\\n')\n city=None\n month=None\n day=None\n while True:\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n while city == None:\n city=input('Please enter a city (chicago, new york city, washington):')\n city_sim=similarity(city,list_city)\n city=check(city_sim)\n\n # get user input for month (all, january, february, ... , june)\n while month==None:\n month=input('\\nwhich month?(january,february,march,april or all(for all months)): ')\n month_sim=similarity(month,list_month)\n month=check(month_sim)\n # get user input for day of week (all, monday, tuesday, ... sunday)\n while day==None:\n day=input('\\nwhich day?(monday, tuesday, wednesday, thursday, friday, saturday, sunday or all(for all days)): ')\n day_sim=similarity(day,list_week_name)\n day=check(day_sim) \n # confirm the user input\n confirm = choice(\"\\nPlease confirm that you would like to apply \"\n \"the following filter to the bikeshare data.\"\n \"\\n\\n City: {}\\n Month: {}\\n Weekday\"\n \": {}\\n\\nEnter yes or no.\\n>\"\n .format(city, month, day ))\n if confirm.lower() == 'yes':\n break\n else:\n city=None\n month=None\n day=None\n print(\"\\nLet's try this again!\")\n print('-'*40)\n return city,month,day \n\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n df=pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['Month'] = df['Start Time'].dt.month\n df['Week_day']=df['Start Time'].dt.day_name()\n \n # filter by day of week if applicable\n if day!='all':\n # filter by day of week to create the new dataframe\n df=df[df['Week_day']==day.title()]\n \n # filter by month if applicable\n if month!='all':\n # use the index of the months list to get the corresponding int\n month = list_month.index(month) + 1\n # filter by month to create the new dataframe\n df=df[df['Month']==month]\n \n return df\n\n\ndef time_stats(df,day,month):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n if month=='all':\n #if month is given then we don't need to display the most common month\n common_month=df['Month'].mode()[0]\n print('The Most Common Month: {}'.format(str(list_month[common_month-1].title())))\n\n # display the most common day of week\n if day=='all':\n #if day is given then we don't need to display the most common day\n common_day = df['Week_day'].mode()[0]\n print('The Most Common Day Of week: {}'.format(common_day.title()))\n\n # display the most common start hour\n common_start_hour=df['Start Time'].dt.hour.mode()[0]\n print('The Most Common Start Hour: {}'.format(common_start_hour))\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n \n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n print('Most Commonly used Start Station : {}'.format(df['Start Station'].mode()[0]))\n \n\n # display most commonly used end station\n print('Most Commonly used End Station : {}'.format(df['End Station'].mode()[0]))\n\n # display most frequent combination of start station and end station trip\n df_trip=(df['Start Station']+ ' -> ' +df['End Station'])\n print('Most Commonly Used Station in End and Start Station: {}'.format(df_trip.mode()[0]))\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n \n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n print('Total Travel Time:')\n total_travel=df['Trip Duration'].sum()\n days=int(total_travel//86400)\n hours=int((total_travel% 86400)//3600)\n minutes=int(((total_travel% 86400) % 3600)//60)\n seconds=int(((total_travel% 86400) % 3600) % 60)\n print('{} days {} hours {} minutes {} seconds , total in second= {}'.format(days,hours,minutes,seconds,total_travel))\n print('_'*40)\n \n # display mean travel time\n print('Average Travel Time:')\n mean_travel=df['Trip Duration'].mean()\n days=int(mean_travel//86400)\n hours=int((mean_travel % 86400)//3600)\n minutes=int(((mean_travel % 86400) % 3600)//60)\n seconds=int(((mean_travel % 86400) % 3600) % 60)\n print('{} days {} hours {} minutes {} seconds , Average in seconds= {}'.format(days,hours,minutes,seconds,mean_travel))\n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n \n \ndef user_stats(df,city):\n \n \"\"\"Displays statistics on bikeshare users.\"\"\"\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print('Numbers of user by type :')\n print(df['User Type'].value_counts().to_string())\n print('_'*40)\n\n # Display counts of gender \n if \"Gender\" in df.columns:\n print('Numbers of user by gender :')\n print(df['Gender'].value_counts().to_string())\n print('_'*50)\n else :\n print(\"We're sorry! There is no data of user genders for {}.\"\n .format(city.title()))\n\n # Display earliest, most recent, and most common year of birth\n if \"Birth Year\" in df.columns:\n earliest=df['Birth Year'].min()\n recent=df['Birth Year'].max()\n mode=df['Birth Year'].mode()[0]\n print('The earliest year of birth: {}'.format(int(earliest)))\n print('The most common year of birth: {}'.format(int(mode)))\n print('The most recent year of birth: {}'.format(int(recent)))\n else :\n print(\"We're sorry! There is no data of birth year for {}.\"\n .format(city.title()))\n\n print(\"\\nThis took {} seconds.\".format((time.time() - start_time)))\n print('-'*40)\n\n \ndef display_data(df, ref):\n \"\"\"Display 5 line of sorted raw data each time.\"\"\"\n\n print(\"\\nYou choosed to view raw data.\")\n\n # sort data by column\n ref = 0\n sort_df = choice(\"\\nHow would you like to sort the display of data?\\n\" \n \"Press Enter to view unsorted.\\n \\n \"\n \"st: Start Time\\n et: End Time\\n \"\n \"td: Trip Duration\\n ss: Start Station\\n \"\n \"es: End Station\\n\",\n ('st', 'et', 'td', 'ss', 'es', ''))\n\n order = choice(\"\\nWould you like it to be sorted ascending or \"\n \"descending? \\n asc: Ascending\\n desc: Descending\"\n \"\\n\",\n ('asc', 'desc'))\n\n if order == 'asc':\n order = True\n elif order == 'desc':\n order = False\n\n if sort_df == 'st':\n df = df.sort_values(['Start Time'], ascending=order)\n elif sort_df == 'et':\n df = df.sort_values(['End Time'], ascending=order)\n elif sort_df == 'td':\n df = df.sort_values(['Trip Duration'], ascending=order)\n elif sort_df == 'ss':\n df = df.sort_values(['Start Station'], ascending=order)\n elif sort_df == 'es':\n df = df.sort_values(['End Station'], ascending=order)\n elif sort_df == '':\n pass\n\n # each loop displays 5 lines of raw data\n while True:\n for i in range(ref, len(df.index)):\n print(\"\\n\")\n print(df.iloc[ref:ref+5].to_string())\n print(\"\\n\")\n ref += 5\n\n if choice(\"Do you want to see more?\"\n \" Enter yes or no.\\n\") == 'yes':\n continue\n else:\n break\n break\n\n return ref\n\n\ndef main():\n while True:\n city, month, day = get_filters()\n df=load_data(city,month,day)\n ref = 0\n while True:\n select_data = choice(\"\\nWhat information would you \"\n \"like to obtain?\\n\\n ts: Time Stats\\n ss: \"\n \"Station Stats\\n tds: Trip Duration Stats\\n \"\n \"us: User Stats\\n dd: Display Data\\n \"\n \"r: Restart\\n\",\n ('ts', 'ss', 'tds', 'us', 'dd', 'r'))\n if select_data == 'ts':\n time_stats(df,day,month)\n elif select_data == 'ss':\n station_stats(df)\n elif select_data == 'tds':\n trip_duration_stats(df)\n elif select_data == 'us':\n user_stats(df, city)\n elif select_data == 'dd':\n ref = display_data(df, ref)\n elif select_data == 'r':\n break\n\n restart = choice(\"\\nWould you like to restart?Enter yes or no.\\n\")\n if restart != 'yes':\n break\n\nif __name__ == \"__main__\":\n main()\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":12619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"478095347","text":"'''\nGiven a string containing digits from 2-9 inclusive,\nreturn all possible letter combinations that the number could represent.\nA mapping of digit to letters (just like on the telephone buttons) is given below.\nNote that 1 does not map to any letters.\n'''\n\n\nclass Solution:\n def letterCombinations(self, digits):\n \"\"\"\n :type digits: str\n :rtype: List[str]\n \"\"\"\n match = {'1':'', '2':'abc', '3':'def', '4':'ghi', '5':'jkl', '6':'mno',\n '7':'pqrs', '8':'tuv', '9':'wxyz'}\n res = [digits]\n n = len(digits)\n\n for i in range(n):\n new_res = []\n for s in res:\n t = s\n for j in match[s[i]]:\n t = t[:i] + j + t[i + 1:]\n new_res.append(t)\n res = new_res\n return res\n\ntest = Solution()\nprint(test.letterCombinations(''))\n\n\n","sub_path":"Letter Combinations of a Phone Number.py","file_name":"Letter Combinations of a Phone Number.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"447513373","text":"#!/usr/bin/env python3\n\n\ndef get_value_suffix(value_type):\n suffix = None\n if value_type == 'OP_INTEGER':\n suffix = \"\"\n if value_type == 'OP_DECIMAL':\n suffix = \".0\"\n\n return suffix\n\n\ndef operation_to_symbol(operation):\n symbol = None\n if operation == \"PLUS_OR_MINUS\":\n symbol = \"-\" # This is on purpose since \"+\" on floats does not return the correct type\n if operation == \"MULTIPLY\":\n symbol = \"*\"\n if operation == \"DIVIDE\":\n symbol = \"/\"\n if operation == \"COMPARE\":\n symbol = \"<\"\n\n return symbol\n\n\ndef generate_state():\n print(\"struct State {\")\n print(\" OP_INTEGER_placeholder : int64\")\n print(\" OP_DECIMAL_placeholder : float64\")\n print(\"}\\n\")\n\n\ndef generage_arithmetic_fun(row_num, data_type, operation):\n suffix = get_value_suffix(data_type)\n fun_name = \"{}{}Row{}\".format(data_type, operation, row_num)\n print(\"fun {}(execCtx: *ExecutionContext, state: *State) -> nil {{\".format(fun_name))\n print(\" @execCtxStartResourceTracker(execCtx, 3)\")\n\n step_size = 100\n\n print(\" for (var i = 0; i < {}; i = i + {}) {{\".format(row_num, step_size))\n print(\" var a = state.{}_placeholder - 1000000000{}\".format(data_type, suffix))\n for i in range(step_size):\n if operation == \"COMPARE\":\n if i != 0:\n print(\" if (a {} 5{}) {{\".format(operation_to_symbol(operation), suffix))\n print(\" a = 10{}\".format(suffix))\n print(\" } else {\")\n print(\" a = 1{}\".format(suffix))\n print(\" }\")\n else:\n print(\" a = a {} 3{}\".format(operation_to_symbol(operation), suffix))\n print(\" state.{}_placeholder = a\".format(data_type))\n print(\" }\")\n\n print(\" @execCtxEndResourceTracker(execCtx, @stringToSql(\\\"{}_{}, {}\\\"))\".format(\n data_type, operation, row_num))\n print(\"}\")\n\n print()\n\n return fun_name\n\n\ndef generate_main_fun(fun_names):\n print(\"fun main(execCtx: *ExecutionContext) -> int32 {\")\n print(\" var state: State\")\n for fun_name in fun_names:\n print(\" {}(execCtx, &state)\".format(fun_name))\n print(\" return state.OP_INTEGER_placeholder\")\n print(\"}\")\n\n\ndef generate_all():\n fun_names = []\n row_nums = list(range(10000, 100000, 10000)) + list(range(100000, 1000000, 100000))\n data_types = [\"OP_INTEGER\", \"OP_DECIMAL\"]\n operations = [\"PLUS_OR_MINUS\", \"MULTIPLY\", \"DIVIDE\", \"COMPARE\"]\n\n generate_state()\n\n for row_num in row_nums:\n for data_type in data_types:\n for operation in operations:\n fun_names.append(generage_arithmetic_fun(row_num, data_type, operation))\n\n generate_main_fun(fun_names)\n\n\nif __name__ == '__main__':\n generate_all()\n","sub_path":"script/model/execution_engine_runner/generate_arithmetic.py","file_name":"generate_arithmetic.py","file_ext":"py","file_size_in_byte":2778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"307041487","text":"import sys\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nimport os\nfrom skimage.measure import compare_mse, compare_psnr, compare_ssim\nfrom collections import OrderedDict\n\n\nROWS = 288 # 192 288\nCOLS = 360 # 240 360\n\n\ndef make_iter0_png(root='train_save_all'):\n first_batch_num = None\n for filename in os.listdir(root):\n img_path = os.path.join(root, filename)\n img_name = os.path.splitext(filename)\n if not os.path.isfile(img_path):\n continue\n\n if img_name[1] != '.png':\n continue\n\n substr = 'batch'\n p = img_name[0].find(substr)\n batch_num = img_name[0][p + len(substr):p + len(substr) + 6]\n\n if first_batch_num != batch_num and first_batch_num is None:\n image = cv2.imread(img_path, cv2.IMREAD_ANYDEPTH | cv2.IMREAD_ANYCOLOR)\n image[:, COLS:2 * COLS] = image[:, :COLS]\n image[:, 3 * COLS:4 * COLS] = np.abs(\n image[:, COLS:2 * COLS].astype(np.int32) - image[:, 2 * COLS:3 * COLS].astype(np.int32))\n cv2.imwrite(img_path.replace(str(batch_num), '000000'), image)\n first_batch_num = batch_num\n continue\n\n if batch_num == first_batch_num:\n image = cv2.imread(img_path, cv2.IMREAD_ANYDEPTH | cv2.IMREAD_ANYCOLOR)\n image[:, COLS:2 * COLS] = image[:, :COLS]\n image[:, 3 * COLS:4 * COLS] = np.abs(\n image[:, COLS:2 * COLS].astype(np.int32) - image[:, 2 * COLS:3 * COLS].astype(np.int32))\n cv2.imwrite(img_path.replace(str(batch_num), '000000'), image)\n\n\ndef calc_ssim_psnr(image_path, sp_list, use_training_gmap_batch=True):\n img = cv2.imread(image_path, cv2.IMREAD_ANYDEPTH | cv2.IMREAD_ANYCOLOR)\n\n if use_training_gmap_batch:\n p0 = image_path.find('test')\n p1 = image_path.find('.png')\n gmaps_name = 'batch_' + image_path[p0: p1] + '_gmap.npy'\n path = os.path.split(image_path)[0]\n gmaps = np.load(os.path.join(path, gmaps_name))\n\n idx_col = img.shape[1] // COLS - 3\n batch_size = img.shape[0] // ROWS\n\n mean_psnr_pred = 0.0\n mean_ssim_pred = 0.0\n\n for i in range(batch_size):\n pred = img[i * ROWS:(i + 1) * ROWS, 1 * COLS:2 * COLS]\n label = img[i * ROWS:(i + 1) * ROWS, 2 * COLS:3 * COLS]\n\n pred = pred.astype(np.float32) / 255.\n label = label.astype(np.float32) / 255.\n\n if use_training_gmap_batch:\n gmap = np.squeeze(gmaps[i])\n pred = np.clip(pred * gmap, 0.0, 1.0)\n label = np.clip(label * gmap, 0.0, 1.0)\n\n mean_psnr_pred += compare_psnr(label, pred)\n mean_ssim_pred += compare_ssim(label, pred)\n\n mean_psnr_pred /= batch_size\n mean_ssim_pred /= batch_size\n\n if len(sp_list) == 0:\n sp_list.append(mean_psnr_pred)\n sp_list.append(mean_ssim_pred)\n else:\n sp_list[0] += mean_psnr_pred\n sp_list[1] += mean_ssim_pred\n\n\ndef gen_label(path):\n if path.endswith(os.path.sep) or path.endswith(os.path.altsep):\n path = path[:-1]\n\n label = os.path.split(path)[1]\n label = label.replace('train_save_all_', '')\n\n return label\n\n\ndef make_psnr_ssim_curve(root='train_save_all', use_gmap=True):\n ps_dict = OrderedDict()\n img_cnt_per_iter = 0\n\n for img in os.listdir(root):\n img_path = os.path.join(root, img)\n img_name = os.path.splitext(img)\n if not os.path.isfile(img_path):\n continue\n\n if img_name[1] != '.png':\n continue\n\n substr = 'batch'\n p = img_name[0].find(substr)\n iter = int(img_name[0][p + len(substr):p + len(substr) + 6])\n\n if iter not in ps_dict.keys():\n ps_dict[iter] = list()\n\n calc_ssim_psnr(img_path, ps_dict[iter], use_training_gmap_batch=use_gmap)\n\n img_cnt_per_iter += 1\n\n img_cnt_per_iter /= len(ps_dict.keys())\n\n for k, val in ps_dict.items():\n for i, _ in enumerate(val):\n ps_dict[k][i] /= img_cnt_per_iter\n\n label = gen_label(root)\n print('Done: ', label)\n\n return ps_dict, label\n\n\ndef draw_psnr_ssim(ps_dict_list, labels):\n fig = plt.figure()\n ax_psnr = fig.add_subplot(2, 1, 1)\n ax_psnr.set_ylabel(\"PSNR\")\n\n ax_ssim = fig.add_subplot(2, 1, 2)\n ax_ssim.set_ylabel(\"SSIM\")\n\n ax_ssim.set_xlabel(\"iteration\")\n\n marker_styles = ['bs', 'gp', 'c+', 'mx', 'k*', 'yo', 'm.', 'bh']\n\n for i, ps_dict in enumerate(ps_dict_list):\n x = []\n y_psnr = []\n y_ssim = []\n for k, val in ps_dict.items():\n x.append(k)\n y_psnr.append(val[0])\n y_ssim.append(val[1])\n\n ax_psnr.plot(x, y_psnr, marker_styles[i], label=labels[i])\n ax_ssim.plot(x, y_ssim, marker_styles[i])\n\n ax_psnr.legend(loc=0)\n\n save_png_name = time.strftime('diagram/ps_%Y%m%d%H%M%S.png', time.localtime(time.time()))\n fig.savefig(save_png_name)\n plt.show()\n plt.close()\n\n\nif __name__ == '__main__':\n '''\n # [ori dataset] [in/G, label/G] \"ps_model_2.png\"\n # psnr(label/G, out) -> psnr(label, out*G) -> psnr(label, out*G){*diff data location}\n psnr_ssim_0 = make_psnr_ssim_curve('train_save_all_0518_1730', use_gmap=False)\n psnr_ssim_1 = make_psnr_ssim_curve('train_save_all_0518_1730', use_gmap=True)\n draw_psnr_ssim([psnr_ssim_0, psnr_ssim_1])\n '''\n\n '''\n # [gl dataset] \"ps_model_3.png\"\n # 3 * net, 2 * block -> 1 * net, 3 * block -> 1 * net, 4 * block\n psnr_ssim_0 = make_psnr_ssim_curve('train_save_all_0519_1425', use_gmap=False)\n psnr_ssim_1 = make_psnr_ssim_curve('train_save_all_0519_2000', use_gmap=False)\n psnr_ssim_2 = make_psnr_ssim_curve('train_save_all_0521_1200', use_gmap=False)\n draw_psnr_ssim([psnr_ssim_0, psnr_ssim_1, psnr_ssim_2])\n '''\n\n '''\n # [ori dataset] [without Gmap] a little better, robuster \"ps_model_5.png\"\n # output := output -> output := input - output\n psnr_ssim_0 = make_psnr_ssim_curve('train_save_all_noG', use_gmap=False)\n psnr_ssim_1 = make_psnr_ssim_curve('train_save_all_0521_1630', use_gmap=False)\n draw_psnr_ssim([psnr_ssim_0, psnr_ssim_1, ])\n \n # [ori dataset] [with Gmap] a little better, robuster \"ps_model_6.png\"\n # output := output -> output := input - output\n psnr_ssim_0 = make_psnr_ssim_curve('train_save_all_0518_1730', use_gmap=True)\n psnr_ssim_1 = make_psnr_ssim_curve('train_save_all_0521_1830', use_gmap=True)\n draw_psnr_ssim([psnr_ssim_0, psnr_ssim_1, ])\n '''\n\n '''\n # [ori dataset] [with Gmap] using lrelu, a little better \"ps_model_7.png\"\n # relu -> lrelu\n psnr_ssim_0 = make_psnr_ssim_curve('train_save_all_0521_1830', use_gmap=True)\n psnr_ssim_1 = make_psnr_ssim_curve('train_save_all_0522_1130', use_gmap=True)\n draw_psnr_ssim([psnr_ssim_0, psnr_ssim_1, ])\n '''\n\n '''\n # [ori dataset] [with Gmap] remove dc operation \"ps_model_8.png\"\n # relu -> lrelu\n psnr_ssim_0 = make_psnr_ssim_curve('train_save_all_0521_1830', use_gmap=True)\n psnr_ssim_1 = make_psnr_ssim_curve('train_save_all_0522_1350', use_gmap=True)\n psnr_ssim_2 = make_psnr_ssim_curve('train_save_all_0522_1350', use_gmap=False)\n draw_psnr_ssim([psnr_ssim_0, psnr_ssim_1, psnr_ssim_2])\n '''\n\n '''\n # [ori dataset] new\n psnr_ssim_0 = make_psnr_ssim_curve('train_save_all_0522_1900', use_gmap=False)\n psnr_ssim_1 = make_psnr_ssim_curve('train_save_all_0522_2030', use_gmap=False)\n psnr_ssim_2 = make_psnr_ssim_curve('train_save_all_0522_2230', use_gmap=False)\n psnr_ssim_3 = make_psnr_ssim_curve('train_save_all_0522_1930', use_gmap=False)\n draw_psnr_ssim([psnr_ssim_0, psnr_ssim_1, psnr_ssim_2, psnr_ssim_3])\n '''\n\n '''\n # [ori dataset] unet densenet \"ps_model_20.png\"\n # unet + 1x1 convs -> unet + 1x1 convs + rm last 3 bn -> unet + rm last several bns\n psnr_ssim_0 = make_psnr_ssim_curve('train_save_all_0524_1250', use_gmap=False)\n psnr_ssim_1 = make_psnr_ssim_curve('train_save_all_0524_1255', use_gmap=False)\n psnr_ssim_2 = make_psnr_ssim_curve('train_save_all_0524_1609', use_gmap=False)\n psnr_ssim_3 = make_psnr_ssim_curve('train_save_all_0522_1930', use_gmap=False)\n draw_psnr_ssim([psnr_ssim_0, psnr_ssim_1, psnr_ssim_2, psnr_ssim_3])\n '''\n\n data_dir0 = 'D:/Dataset/MRI/Thrive/0803/tfrecords/thrive_train_norm3_intep_a4_n060/train_save_all_intep_a4_n060'\n psnr_ssim_0, label0 = make_psnr_ssim_curve(data_dir0, use_gmap=False)\n\n data_dir1 = 'D:/Dataset/MRI/Thrive/0803/tfrecords/thrive_train_norm3_intep_a4_n060/train_save_all_intep_a4_n060_unet'\n psnr_ssim_1, label1 = make_psnr_ssim_curve(data_dir1, use_gmap=False)\n\n data_dir2 = 'D:/Dataset/MRI/Thrive/0803/tfrecords/thrive_train_norm3_intep_a2_n080/train_save_all'\n # psnr_ssim_2, label2 = make_psnr_ssim_curve(data_dir2, use_gmap=False)\n\n data_dir3 = 'D:/Dataset/MRI/Thrive/0803/tfrecords/thrive_train_norm3_intep_a4_n050-100/train_save_all'\n # psnr_ssim_3, label3 = make_psnr_ssim_curve(data_dir3, use_gmap=False)\n\n draw_psnr_ssim([psnr_ssim_0, psnr_ssim_1],\n [label0, label1])\n\n # use gan\n # psnr_ssim_0 = make_psnr_ssim_curve('train_save_all_2rnn', use_gmap=False)\n # psnr_ssim_1 = make_psnr_ssim_curve('train_save_all_3rnn', use_gmap=False)\n # draw_psnr_ssim([psnr_ssim_0, psnr_ssim_1])\n\n # make_iter0_png()\n print('done')\n","sub_path":"denoise_py/snr_demo.py","file_name":"snr_demo.py","file_ext":"py","file_size_in_byte":9353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"531759701","text":"r\"\"\"\n\n在对隐藏层使用丢弃法,假设其中 h2, h5被清零,那么输出值的计算不再依赖于 h2, h5,在反向传播时,与这两个隐藏层相关的权重的梯度均为0.\n由于在训练中,每个神经元都有概率被清零,因此输出层的计算无法过度依赖于h1,...,h5中的任意一个,从而在训练模型时起到正则化的作用,并可\n能用力应对过拟合。在测试模型时,我们为了得到更加确定的结果,一般不使用丢弃法。\n\"\"\"\n\nfrom d2lzh import d2l\nfrom mxnet.gluon import loss as gloss, data as gdata\nfrom mxnet import autograd, nd\n\n\ndef dropout(X, drop_prob):\n r'''\n 公式:\n h_i' = (ξ_i / (1 - drop_prob)) * h_i\n 其中 ξ_i 是丢弃神经元的概率,设 ξ_i 为 0 和 1 的概率分别为 p 和 1-p。\n\n 期望算法:\n E(h_i') = (E(ξ_i) / (1 - drop_prob)) * E(h_i)\n = ((1 - drop_prob) / (1 - drop_prob)) * E(h_i)\n = E(h_i)\n 期望不变\n\n :param X:\n :param drop_prob:\n :return:\n '''\n assert 0 < drop_prob < 1\n keep_prob = 1 - drop_prob\n if keep_prob == 0:\n return nd.zeros_like()\n mask = nd.random.uniform(0, 1, X.shape) < keep_prob\n X_final = mask * X / keep_prob\n # print('mask : {}'.format(mask))\n return X_final\n\n\nnum_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256\nW1 = nd.random.normal(scale=0.01, shape=(num_inputs, num_hiddens1))\nb1 = nd.zeros(num_hiddens1)\nW2 = nd.random.normal(scale=0.01, shape=(num_hiddens1, num_hiddens2))\nb2 = nd.zeros(num_hiddens2)\nW3 = nd.random.normal(scale=0.01, shape=(num_hiddens2, num_outputs))\nb3 = nd.zeros(num_outputs)\n\nparams = [W1, b1, W2, b2, W3, b3]\n\n\ndef net(X):\n for param in params:\n param.attach_grad()\n\n drop_prob1, drop_prob2 = 0.2, 0.5\n\n X = X.reshape(-1, num_inputs)\n H1 = (nd.dot(X, W1) + b1).relu()\n if autograd.is_training(): # 只有在训练时,使用drop_out\n keep_prob = 1 - drop_prob1\n mask = nd.random.normal(0, 1, H1.shape) < keep_prob\n H1 = mask * H1 / keep_prob\n H2 = (nd.dot(H1, W2) + b2).relu()\n if autograd.is_training():\n keep_prob = 1 - drop_prob2\n mask = nd.random.normal(0, 1, H2.shape) < keep_prob\n H2 = H2 * mask / keep_prob\n return nd.dot(H2, W3) + b3\n\n\ndef train():\n num_epochs, lr, batch_size = 5, 0.5, 256\n loss = gloss.SoftmaxCrossEntropyLoss()\n train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\n d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,\n params, lr)\n\n\ndef dropout_test():\n X = nd.arange(16).reshape((2, 8))\n X_d = dropout(X, 0.2)\n print('X_d : {}'.format(X_d))\n X_d = dropout(X, 0.5)\n print('X_d : {}'.format(X_d))\n X_d = dropout(X, 0.8)\n print('X_d : {}'.format(X_d))\n\n\nif __name__ == '__main__':\n train()\n","sub_path":"chapter3/C_3_13_dropout.py","file_name":"C_3_13_dropout.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"444617464","text":"import numpy as np\n\nfrom doctr.models import detection\n\n\ndef test_dbpostprocessor():\n postprocessor = detection.DBPostProcessor(rotated_bbox=False)\n r_postprocessor = detection.DBPostProcessor(rotated_bbox=True)\n mock_batch = np.random.rand(2, 512, 512).astype(np.float32)\n out, _ = postprocessor(mock_batch)\n r_out, _ = r_postprocessor(mock_batch)\n # Batch composition\n assert isinstance(out, list)\n assert len(out) == 2\n assert all(isinstance(sample, np.ndarray) for sample in out)\n assert all(sample.shape[1] == 5 for sample in out)\n assert all(sample.shape[1] == 6 for sample in r_out)\n # Relative coords\n assert all(np.all(np.logical_and(sample[:, :4] >= 0, sample[:, :4] <= 1)) for sample in out)\n assert all(np.all(np.logical_and(sample[:, :4] >= 0, sample[:, :4] <= 1)) for sample in r_out)\n # Repr\n assert repr(postprocessor) == 'DBPostProcessor(box_thresh=0.1)'\n # Edge case when the expanded points of the polygon has two lists\n issue_points = np.array([\n [869, 561],\n [923, 581],\n [925, 595],\n [915, 583],\n [889, 583],\n [905, 593],\n [882, 601],\n [901, 595],\n [904, 604],\n [876, 608],\n [915, 614],\n [911, 605],\n [925, 601],\n [930, 616],\n [911, 617],\n [900, 636],\n [931, 637],\n [904, 649],\n [932, 649],\n [932, 628],\n [918, 627],\n [934, 624],\n [935, 573],\n [909, 569],\n [934, 562]], dtype=np.int32)\n out = postprocessor.polygon_to_box(issue_points)\n r_out = r_postprocessor.polygon_to_box(issue_points)\n assert isinstance(out, tuple) and len(out) == 4\n assert isinstance(r_out, tuple) and len(r_out) == 5\n\n\ndef test_linknet_postprocessor():\n postprocessor = detection.LinkNetPostProcessor()\n r_postprocessor = detection.LinkNetPostProcessor(rotated_bbox=True)\n mock_batch = np.random.rand(2, 512, 512).astype(np.float32)\n out, _ = postprocessor(mock_batch)\n r_out, _ = r_postprocessor(mock_batch)\n # Batch composition\n assert isinstance(out, list)\n assert len(out) == 2\n assert all(isinstance(sample, np.ndarray) for sample in out)\n assert all(sample.shape[1] == 5 for sample in out)\n assert all(sample.shape[1] == 6 for sample in r_out)\n # Relative coords\n assert all(np.all(np.logical_and(sample[:4] >= 0, sample[:4] <= 1)) for sample in out)\n","sub_path":"tests/common/test_models_detection.py","file_name":"test_models_detection.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"342703501","text":"import pygame\nimport logic\nimport surface\nimport service\n\nSCREEN_DIM = (1180, 675)\n\n\npygame.init()\ngame_display = pygame.display.set_mode(SCREEN_DIM)\npygame.display.set_caption('Arena')\n\n\nservice.service_init()\n\n\ndef create_game(is_new=False):\n global engine, drawer\n if is_new:\n engine = logic.GameEngine()\n drawer = surface.ArenaSurface(\n (1180, 675),\n pygame.SRCALPHA,\n (0, 0),\n surface.ScreenHandle((0, 0))\n )\n\n drawer.connect_engine(engine)\n service.reload_game(engine)\n\n\ncreate_game(True)\n\nwhile engine.working:\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n engine.working = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n engine.working = engine.game_process\n engine.game_process = False\n if engine.game_process:\n if event.key == pygame.K_SPACE:\n engine.start_fighting()\n if event.key == pygame.K_RETURN:\n engine.end_fighting()\n\n game_display.blit(drawer, (0, 0))\n drawer.draw(game_display)\n pygame.display.update()\n\npygame.display.quit()\npygame.quit()\nexit(0)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"328525366","text":"from selenium.common.exceptions import NoSuchElementException, ElementNotVisibleException\nimport string\nimport random\n\n\n# method for dropdown grid category view\nfrom selenium_tests.models import TestRunsExecution\n\n\ndef drop_down_list_element_finder(dropdowngrid_pageobject):\n a = False\n while a is False:\n try:\n if dropdowngrid_pageobject.get_single_element_from_drop_down_list(\n \"Exercise\",\n \"Ea\",\n \"1.1\",\n ).is_displayed() is True:\n a = True\n else:\n raise NoSuchElementException\n except (NoSuchElementException, ElementNotVisibleException):\n try:\n print(\"first level exception\")\n dropdowngrid_pageobject.get_scroll_down_button().click()\n except (NoSuchElementException, ElementNotVisibleException):\n print(\"second level exception\")\n break\n\n\ndef random_string_generator(size):\n output =str(\"\")\n for x in range(0,size,1):\n output+=(random.choice(string.ascii_letters))\n return output.lower()\n\n\ndef slower_than_prevoius_run(test_run_minutes, test_run_seconds):\n single_value_from_actual_run = float(test_run_minutes * 60) + test_run_seconds\n try:\n last_run_minutes = TestRunsExecution.objects.values().last().get(\n \"execution_time_minutes\"\n )\n last_run_seconds = TestRunsExecution.objects.values().last().get(\n \"execution_time_seconds\"\n )\n except AttributeError:\n last_run_minutes = 99\n last_run_seconds = 99\n\n single_value_from_previous_run = (last_run_minutes * 60) + last_run_seconds\n if single_value_from_actual_run > single_value_from_previous_run:\n return True\n return False\n","sub_path":"selenium_tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"299948804","text":"#!/usr/bin/env python\nimport rospy\nfrom geometry_msgs.msg import Twist\nfrom std_msgs.msg import String\nfrom turtlesim.msg import Pose\nfrom math import pow,atan2,sqrt\nfrom PlannedPath import PlannedPath\nfrom ControllerBase import ControllerBase\nimport sys\nimport time\nimport math\n\nclass Move2GoalController(ControllerBase):\n\n def __init__(this, occupancyGrid):\n ControllerBase.__init__(this, occupancyGrid)\n\n def get_distance(self, goal_x, goal_y):\n distance = sqrt(pow((goal_x - self.pose.x), 2) + pow((goal_y - self.pose.y), 2))\n return distance\n\n def get_angle(self, goal_x, goal_y):\n toAngle = atan2(goal_y - self.pose.y, goal_x - self.pose.x)\n fromAngle = self.pose.theta\n delta = toAngle - fromAngle\n while (delta < -math.pi): delta += 2.0*math.pi\n while (delta > math.pi): delta -= 2.0*math.pi\n return delta\n\n def driveToWaypoint(this, waypoint):\n vel_msg = Twist()\n\n # Calculate initial distance and angle error\n distanceError = this.get_distance(waypoint[0], waypoint[1])\n angleError = this.get_angle(waypoint[0], waypoint[1])\n\n this.distanceTravelled += distanceError\n this.angleTurned += math.fabs(angleError)\n\n # Move to waypoint\n while ((distanceError >= this.distance_tolerance) & (not rospy.is_shutdown())):\n \n # Only move when angle error is reduced below 1e-3\n if (math.fabs(angleError) < 1e-3):\n vel_msg.linear.x = 20 * distanceError\n\n # Continue reducing angle error even when moving\n vel_msg.angular.z = 20 * angleError\n\n this.velocityPublisher.publish(vel_msg)\n this.rate.sleep()\n \n # Update errors\n distanceError = this.get_distance(waypoint[0], waypoint[1])\n angleError = this.get_angle(waypoint[0], waypoint[1])\n\n # Stop moving\n vel_msg.linear.x = 0\n vel_msg.angular.z =0\n this.velocityPublisher.publish(vel_msg)\n","sub_path":"src/planner_controller/scripts/controller/Move2GoalController.py","file_name":"Move2GoalController.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"135357338","text":"# media, mediana, moda\nimport statistics as st\n\n\ndef media(lista):\n media = sum(lista) / len(lista)\n return media\n\ndef media_stats(lista):\n media = st.mean(lista)\n return media\n\ndef mediana(lista):\n lista_ordenada = sorted(lista)\n tamanho = len(lista_ordenada)\n\n if tamanho % 2 == 0:\n mediana = (lista_ordenada[int(tamanho/2)] + lista_ordenada[int((tamanho/2) - 1)]) / 2\n elif tamanho % 2 == 1:\n mediana = lista_ordenada[int(tamanho/2)]\n return mediana\n\ndef mediana_stats(lista):\n mediana = st.median(lista)\n return mediana\n\ndef moda(lista):\n numeros = {}\n for num in lista:\n if num not in numeros:\n numeros[num] = 1\n else:\n numeros[num] += 1\n\n maior_repeticao = max(numeros.values())\n for i in numeros:\n if numeros[i] == maior_repeticao:\n moda = i\n return moda\n\ndef moda_stats(lista):\n moda = st.mode(lista)\n return moda","sub_path":"python-intermediary/modularizacao/media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"535084900","text":"from logging import getLogger, StreamHandler, INFO, DEBUG\n\nimport cv2\nfrom PyQt5.QtCore import QObject, pyqtSignal\n\nhandler = StreamHandler()\nhandler.setLevel(DEBUG)\nlogger = getLogger(__name__)\nlogger.setLevel(DEBUG)\nlogger.addHandler(handler)\nlogger.propagate = False\n\nclass UploadWorker(QObject):\n\n uploaded = pyqtSignal(object)\n\n def __init__(self, parent, config, api, name):\n QObject.__init__(self)\n self.parent = parent\n self.config = config\n self.api = api\n self.name = name\n\n def upload(self):\n self.api.uploadPhoto(self.name, caption=self.config.instagram_caption)\n self.uploaded.emit(True)\n","sub_path":"gui/upload_worker.py","file_name":"upload_worker.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"265928597","text":"from collections import Counter, namedtuple\nimport csv\nimport re\nfrom pprint import pprint\n\nimport requests\n\nMARVEL_CSV = 'https://raw.githubusercontent.com/pybites/marvel_challenge/master/marvel-wikia-data.csv' # noqa E501\n\nCharacter = namedtuple('Character', 'pid name sid align sex appearances year')\n\n\n# csv parsing code provided so this Bite can focus on the parsing\n\ndef _get_csv_data():\n \"\"\"Download the marvel csv data and return its decoded content\"\"\"\n with requests.Session() as session:\n return session.get(MARVEL_CSV).content.decode('utf-8')\n\n\ndef load_data():\n \"\"\"Converts marvel.csv into a sequence of Character namedtuples\n as defined above\"\"\"\n content = _get_csv_data()\n reader = csv.DictReader(content.splitlines(), delimiter=',')\n for row in reader:\n name = re.sub(r'(.*?)\\(.*', r'\\1', row['name']).strip()\n yield Character(pid=row['page_id'],\n name=name,\n sid=row['ID'],\n align=row['ALIGN'],\n sex=row['SEX'],\n appearances=row['APPEARANCES'],\n year=row['Year'])\n\n\ncharacters = list(load_data())\n\n\n# start coding\n\ndef get_appearances(c : Character) -> int:\n try:\n return int(c.appearances)\n except ValueError as _:\n return 0\n\ndef most_popular_characters(characters=characters, top=5):\n \"\"\"Get the most popular character by number of appearances,\n return top n characters (default 5)\n \"\"\"\n name_ctr = Counter({c: int(get_appearances(c))\n for c in characters})\n \n return [c[0].name for c in name_ctr.most_common(top)]\n\n\n\ndef max_and_min_years_new_characters(characters=characters):\n \"\"\"Get the year with most and least new characters introduced respectively,\n use either the 'FIRST APPEARANCE' or 'Year' column in the csv\n characters, or the 'year' attribute of the namedtuple, return a tuple\n of (max_year, min_year)\n \"\"\"\n intro_ctr = Counter(c.year for c in characters if c.year)\n ranking = intro_ctr.most_common()\n return ranking[0][0], ranking[-1][0]\n\n\ndef get_percentage_female_characters(characters=characters):\n \"\"\"Get the percentage of female characters as percentage of all genders\n over all appearances.\n Ignore characters that don't have gender ('sex' attribue) set\n (in your characters data set you should only have Male, Female,\n Agender and Genderfluid Characters.\n Return the result rounded to 2 digits\n \"\"\"\n sexes = [c.sex for c in characters if c.sex]\n sex_ctr = Counter(sexes)\n females = sex_ctr['Female Characters']\n total = sum(sex_ctr.values())\n return round(100 * females / total,2)","sub_path":"124/marvel.py","file_name":"marvel.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"398558274","text":"#%% Import relevant modules\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport keras as K\n\n#%% Load train data\nfolderData = 'Data\\\\'\nfolderResults = 'Results\\\\'\nfolderModels = 'TrainedModels\\\\'\n\ntrainData = pd.read_csv(folderData+'train.csv')\n# Get image and label from trainData\nlabelTrain = trainData['label']\nimageTrain = trainData.drop(\"label\",axis=1)\n# One hot encoding of the labels\nlabelTrainOneHot = K.utils.to_categorical(labelTrain)\n#%% Choose some image to displays\nnTrain = imageTrain.shape[0]\nnRow = 4\nnCol = 7\nidx = np.random.choice(np.arange(nTrain), nRow*nCol)\n# Plot somes images\nplt.figure(figsize=(13,12))\nfor i in range(nRow * nCol):\n plt.subplot(nRow, nCol, i + 1)\n plt.imshow(imageTrain.values[idx[i],:].reshape(28,28),cmap='gray')\n title_text = 'Image ' + str(i + 1) + ' labeled ' + str(labelTrain[idx[i]])\n plt.title(title_text, size=6.5)\n plt.xticks(())\n plt.yticks(())\nplt.show()\n\n#%% Setup a DNN\n# Get input dimension\ndInput = imageTrain.shape[1]\n# Number of units for the hidden layer\nnUnits = 1000\n# Number of classes to predict\nnClasses = 10\n\n# Construct the DNN : one input layer, one hidden layer and one output layer\ninputLayer = K.layers.Input((dInput,),name='inputLayer')\ndenseLayer_1 = K.layers.Dense(nUnits,activation='relu')(inputLayer)\ndenseLayer_2 = K.layers.Dense(nClasses,activation='relu')(denseLayer_1)\n# Add softmax layer for class probability computation\noutputLayer = K.layers.Softmax()(denseLayer_2)\n# Gather the layers into a keras model\nmodel = K.Model(inputs=inputLayer, outputs=outputLayer)\n# Compile the model with ADAM optimizer and binary crossentropy loss\nmodel.compile(optimizer='adam',loss='binary_crossentropy',metrics=['binary_accuracy'])\n# Fit the DNN model by batch of 128 samples and for 10 epochs\nmodel.fit(imageTrain,labelTrainOneHot,batch_size=128,epochs=10)\n# Save the trained model\nmodel.save('TrainedModel\\\\myDNN.h5')\n\n#%% Lood test data\ntestData = pd.read_csv(folderData+'test.csv')\n# Get image from testData\nimageTest = testData\n#%% Predict on test data\nlabelPredicted = model.predict(imageTest)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"158795111","text":"NumeroFinalDoIntervalo = int(input(\"Qual numero você quer verificar os numeros primos no intervalo de [1,X], com X valendo? \" ))\nwhile NumeroFinalDoIntervalo <= 1 or NumeroFinalDoIntervalo > 1000:\n print(\"Numero invalido, seu numero deve ser igual ou maior a 2 e menor ou igual a 1000\")\n NumeroFinalDoIntervalo = int(input(\"Qual numero você quer verificar os numeros primos no intervalo de [1,X], com X valendo? \" ))\nprint(\"Entre 1 e\",NumeroFinalDoIntervalo,\"são primos os números:\")\nNumerosAVerificarDoMeioDoIntervalo = 2\nwhile NumerosAVerificarDoMeioDoIntervalo <= NumeroFinalDoIntervalo-1:\n NumeroDeDivisoresPossivel = 0\n NumeroParaComparacaoInicial = 1\n while NumeroParaComparacaoInicial < 1000:\n if NumerosAVerificarDoMeioDoIntervalo % NumeroParaComparacaoInicial == 0:\n NumeroDeDivisoresPossivel = NumeroDeDivisoresPossivel + 1\n NumeroParaComparacaoInicial = NumeroParaComparacaoInicial + 1 \n if NumeroDeDivisoresPossivel == 2:\n print(NumerosAVerificarDoMeioDoIntervalo,end=\" \") \n NumerosAVerificarDoMeioDoIntervalo = NumerosAVerificarDoMeioDoIntervalo + 1","sub_path":"Lista 3/lista03ex21.py","file_name":"lista03ex21.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"647702326","text":"\"\"\"\nrepoaccess.txt editing functions for repoedit.\n\"\"\"\n\nimport sys, re, os\n\nclass RepoObject:\n\t\"\"\" Generic Repo object as contained in repository entries. \"\"\"\n\tdef __init__(self, name, rights, parent=None):\n\t\tself.name = name\n\t\tself.access_rights = rights\n\t\tself.repo = parent\n\n\tdef set_rights(rights):\n\t\t\"\"\" Sets the rights of the object. Valid values are \"rw\" and \"r\". \"\"\"\n\t\tif rights != \"rw\" or rights != \"r\":\n\t\t\traise Exception(\"Invalid access right type %s.\" % rights)\n\t\telse:\n\t\t\tself.access_rights = rights\n\t\t\treturn True\n\n\tdef output(self):\n\t\t\"\"\" Returns the string representation as it should look in\n\t\trepoaccess.txt. MUST BE OVERRIDDEN! \"\"\"\n\t\traise Exception(\"Base class called. Bad!\")\n\nclass RepoGroup(RepoObject):\n\t\"\"\" The RepoGroup is access for a group. They aren't currently used, but are\n\tfully supported by RepoEdit. \"\"\"\n\tdef output(self):\n\t\t\"\"\" Returns the string representation as it should look in\n\t\trepoaccess.txt \"\"\"\n\t\treturn \"@%s = %s\" % (self.name, self.access_rights)\n\nclass RepoUser(RepoObject):\n\t\"\"\" Represents a single user for a specific repository. \"\"\"\n\tdef output(self):\n\t\t\"\"\" Returns the string representation as it should look in\n\t\trepoaccess.txt \"\"\"\n\t\treturn \"%s = %s\" % (self.name, self.access_rights)\n\nclass GroupDef:\n\t\"\"\" Group definition defines a name of a group and the users that belong to\n\tit. \"\"\"\n\tdef __init__(self, name, members=None):\n\t\t\"\"\" Creates a new GroupDef object. If members is passed and is iterable,\n\t\tthe strings within are automatically added a members. \"\"\"\n\t\tself.name = name\n\t\tself.members = []\n\t\tif members is not None:\n\t\t\ttry:\n\t\t\t\titerator = iter(members)\n\n\t\t\t\tfor member in members:\n\t\t\t\t\tself.add_member(member)\n\t\t\texcept TypeError:\n\t\t\t\t# Cannot iterate. Ignore.\n\t\t\t\tpass\n\t\t\tfinally:\n\t\t\t\t# Finalized.\n\t\t\t\tdel iterator\n\n\tdef add_member(self, name):\n\t\t\"\"\" Adds a member to the group. Returns False if the member was already\n\t\tthere, True, otherwise. Case SENSITIVE. \"\"\"\n\t\tif name not in self.members:\n\t\t\tself.members.append(name)\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef del_member(self, name):\n\t\t\"\"\" Removes a member from the group. Returns False if the member was not\n\t\tfound, True otherwise. \"\"\"\n\t\tif name not in self.members:\n\t\t\treturn False\n\t\telse:\n\t\t\tself.members.remove(name)\n\t\t\treturn True\n\nclass Repo:\n\t\"\"\" Represents a single repository. \"\"\"\n\tdef __init__(self, name, path):\n\t\tself.name = name # The repo name.\n\t\tself.path = path # The repo's relative path.\n\t\tself.entries = {} # Dictionary of RepoEntry objects (either RepoUser or RepoGroup).\n\tdef set_user(self, user, rights):\n\t\t\"\"\" Sets (or adds) access rights for a *user* on this repo. \"\"\"\n\t\tif user in self.entries:\n\t\t\tif type(self.entries[user]) is not RepoUser:\n\t\t\t\tprint(\"Warning! Changing non-user as if they were a user!\")\n\t\t\t# Edit.\n\t\t\tself.entries[user].set_rights(rights)\n\t\telse:\n\t\t\tself.entries[user] = RepoUser(user, rights, self)\n\n\tdef set_group(self, group, rights):\n\t\t\"\"\" Sets (or adds) access rights for a *group* on this repo. \"\"\"\n\t\tif group in self.entries:\n\t\t\tif type(self.entries[user]) is not RepoGroup:\n\t\t\t\tprint(\"Warning! Changing non-group as if they were a group!\")\n\t\t\t# Edit.\n\t\t\tself.entries[group].set_rights(rights)\n\t\telse:\n\t\t\tself.entries[group] = RepoGroup(group, rights, self)\n\n\tdef del_user(self, user):\n\t\t\"\"\" Removes a user from the access list. Returns True if succesful,\n\t\tFalse if the group was not found. \"\"\"\n\t\tif user in self.entries and type(self.entries[user]) is RepoUser:\n\t\t\tdel self.entries[user]\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef del_group(self, group):\n\t\t\"\"\" Removes a group from the access list. Returns True if succesful,\n\t\tFalse if the group was not found. \"\"\"\n\t\tif group in self.entries and type(self.entries[group]) is RepoGroup:\n\t\t\tdel self.entries[group]\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\nclass RepoAccess:\n\t\"\"\" Entire repoaccess representation. \"\"\"\n\tuser_regex = re.compile(\"(\\w+@[\\w\\.]\\.aau\\.dk) = (\\w*)\") # Matches a user access right.\n\tgroup_acl_regex = re.compile(\"(@\\w+) = (\\w*)\") # Matches a group access right..\n\tgroup_def_regex = re.compile(\"(\\w+) = (\\w+)(,\\s*\\w+)+\") # Matches a group definition.\n\trepo_def_regex = re.compile(\"\\[(\\w+):([\\w/]+)\\]\") # Matches a repo definition.\n\tdef __init__(self, path):\n\t\t\"\"\" Opens and parses a repoaccess.txt file. \"\"\"\n\t\ttry:\n\t\t\tfile = open(path, \"rt\")\n\t\tfinally:\n\t\t\tlines = file.readlines()\n\t\t\tfile.close()\n\t\t\tdel file # Done with you!\n\n\t\t# Set initial values.\n\t\tself.groups = {} # Dictionary of all defined acl groups.\n\t\tself.repos = {} # Dictionary of all defined repositories.\n\n\t\tlevel = 0 # 0 = root, 1 = within groups section, 2 = within a repo section.\n\t\tcurrent_repo = None\n\n\t\t# Parse-o-rama\n\t\tfor line in lines:\n\t\t\tparsed = False\n\t\t\twhile not parsed: # Allows us to return to level 0 parsing if an end-of-section was found.\n\t\t\t\tif level == 0:\n\t\t\t\t\t# Find next section type.\n\t\t\t\t\tuser = try_parse_repo(line)\n\t\t\t\t\tif user is not False:\n\t\t\t\t\t\tprint(\"Found repository %s.\" % user)\n\t\t\t\t\t\tcurrent_repo = self.repos[user]\n\t\t\t\t\t\tlevel = 2\n\t\t\t\t\t\tparsed = True\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tgroup = try_parse_group_section(line)\n\t\t\t\t\tif group is not False:\n\t\t\t\t\t\tprint(\"Found group definitions\")\n\t\t\t\t\t\tlevel = 1\n\t\t\t\t\t\tparsed = True\n\t\t\t\t\t\tcontinue # Force breaking from \n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"Nothing found: '%s'. Moving on.\" % line)\n\t\t\t\t\t\t# Imperative we don't mess with parsed var here. This is the key continue point.\n\t\t\t\telif level == 1:\n\t\t\t\t\t# Find next group definition.\n\t\t\t\t\tgroup = try_parse_group_def(line)\n\t\t\t\t\tif group is not False:\n\t\t\t\t\t\tprint(\"Found new group %s.\" % group)\n\t\t\t\t\t\tparsed = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"Could not parse a new group. Assuming end of section by '%s'.\" % line)\n\t\t\t\t\t\tlevel = 0\n\t\t\t\t\t\tparsed = False\n\t\t\t\telif level == 2:\n\t\t\t\t\t# Find next repo entry or nothing.\n\t\t\t\t\tgroup = try_parse_group(line, current_repo)\n\t\t\t\t\tif group is not False:\n\t\t\t\t\t\tprint(\"Added group %s to repo.\" % group)\n\t\t\t\t\t\tparsed = True\n\t\t\t\t\tuser = try_parse_user(line, current_repo)\n\t\t\t\t\tif user is not False:\n\t\t\t\t\t\tprint(\"Added user %s to repo.\" % user)\n\t\t\t\t\t\tparsed = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"No match found in '%s'. Assuming end of section.\" % line)\n\t\t\t\t\t\tparsed = False\n\t\t\t\t\t\tlevel = 0\n\n\tdef try_parse_user(self, line, repo):\n\t\t\"\"\" Attempts to parse the line as a user access right. Returns name if \n\t\tthis succeeded, False otherwise. \"\"\"\n\t\tmatch = re.search(RepoAccess.user_regex, line)\n\t\tif match:\n\t\t\tself.repos[repo.name].set_user(match.group(1), match.group(2), self)\n\t\t\treturn match.group(1)\n\t\telse:\n\t\t\treturn False\n\n\tdef try_parse_group(self, line, repo):\n\t\t\"\"\" Attempts to parse the line as a group access right. Returns name if \n\t\tthis succeeded, False otherwise. \"\"\"\n\t\tmatch = re.search(RepoAccess.group_acl_regex, line)\n\t\tif match:\n\t\t\tself.repos[repo.name].set_group(match.group(1), match.group(2), self)\n\t\t\treturn match.group(1)\n\t\telse:\n\t\t\treturn False\n\n\tdef try_parse_repo(self, line):\n\t\t\"\"\" Attempts to parse a line from repoaccess.txt as a repo definition.\n\t\tReturns the name of the repo if succesful, False otherwise. \"\"\"\n\t\tmatch = re.search(RepoAccess.repo_def_regex, line)\n\t\tif match:\n\t\t\tself.repos[match.group(1)] = Repo(match.group(1), match.group(2))\n\t\t\treturn match.group(1)\n\t\telse:\n\t\t\treturn False\n\n\tdef try_parse_group_def(self, line):\n\t\t\"\"\" Attempts to parse a group definition. Returns the name of the group\n\t\tif succesful, False otherwise. \"\"\"\n\t\tmatch = re.searcH(RepoAccess.group_def_regex, line)\n\t\tif match:\n\t\t\tgroup = GroupDef(match.group(1))\n\t\t\t# Add all members.\n\t\t\tfor iterator in range(2,match.lastindex):\n\t\t\t\tmember = match.group(iterator)\n\t\t\t\tgroup.add_member(member)\n\t\t\tself.groups[match.group(1)] = group\n\t\t\treturn group.name\n\t\telse:\n\t\t\treturn False\n\n\tdef try_parse_group_section(self, line):\n\t\t\"\"\" Determines if a group section follows. \"\"\"\n\t\treturn \"groups\" in line\n\n\tdef strip_username(self, name):\n\t\t\"\"\" Strips a username of invalid characters such as ',' and whitespaces.\n\t\tThis will not necessarily sanitize the name. \"\"\"\n\t\treturn name.strip(\", \\t\").rstrip(\", \\t\")\n","sub_path":"lib/repoaccess.py","file_name":"repoaccess.py","file_ext":"py","file_size_in_byte":7957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"414497224","text":"from __future__ import division\nimport pdb\nimport numpy as np\n\nclass DecisionTreeNode:\n '''This class encapsulates a simple tree node, which contains information like\n the split attribute used, the valid values for that split attribute, the branches\n connected to this node, and a class label (if this node is a leaf node).'''\n def __init__(self):\n '''This method initializes the node.'''\n self.split_attr = -1\n self.split_attr_type = \"DISCRETE\"\n self.attr_values = []\n self.label = -1\n self.branches = []\n self.instances = None\n\n def add_branch(self, node):\n '''This method adds a branch to this node.'''\n self.branches.append(node)\n\n def __str__(self):\n '''This method pretty prints the node.\n Note: nodes are printed differently depending on if they \n are an interior or leaf node.'''\n if len(self.branches) == 0:\n node_str = \"class label: %d\" % self.label\n else:\n node_str = 'split attribute: %d, split attribute type: %s, split values: %s, split branches length: %d, split number of instances: %d' % \\\n (self.split_attr, self.split_attr_type,\n str(self.attr_values), len(self.branches), len(self.instances))\n return node_str\n\n def __repr__(self):\n '''This method pretty prints the node during debugging.'''\n return self.__str__()\n","sub_path":"Project 4/decision_tree_node.py","file_name":"decision_tree_node.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"311676430","text":"import copy\nHelper = projectRelativeImport('helpers', 'app/util', 'Helper')\n\n# Individual items being held in scrape.py Data collector\nclass CollectorItem:\n # Initializer\n def __init__(self, parentId, url):\n if type(parentId) is not int:\n raise TypeError('invalid type of parentId in CollectorItem __init__(), should be int')\n elif type(url) is not str:\n raise TypeError('invalid type of url input in CollectorItem __init__(), should be str')\n elif parentId < -1:\n raise ValueError('invalid value of parentId in CollectorItem __init__(), must be greater than or equal to negative one')\n\n # Values\n self.content = [] # array normally with only one value, but selector could return multiple values\n self.attempted = False\n self.title = ''\n self.headerOne = []\n self.saved = False\n self.parents = [parentId]\n self.children = []\n self.url = url\n self.id = None\n\n # returns a stringified clone, doesn't effect original\n def stringifyTags(self):\n clone = copy.copy(self)\n temp = ''\n for contentPart in clone.content:\n temp = temp + self.__cleanMe(contentPart)\n\n clone.content = temp\n clone.title = Helper.xstr(clone.title)\n\n temp = ''\n for tag in clone.headerOne:\n if tag is not None and tag != '':\n temp = temp + ' - ' + tag.get_text().strip()\n\n clone.headerOne = temp\n return clone\n\n # private tag to clean string for content\n def __cleanMe(self, soup):\n for script in soup([\"script\", \"style\"]): # remove all javascript and stylesheet code\n script.extract()\n # get text\n text = soup.get_text()\n # break into lines and remove leading and trailing white space on each\n lines = (line.strip() for line in text.splitlines())\n # break multi-headlines into a line each\n chunks = (phrase.strip() for line in lines for phrase in line.split(\" \"))\n # drop blank lines\n text = '\\n'.join(chunk for chunk in chunks if chunk)\n return text\n","sub_path":"app/modules/scraping/scrapedItem.py","file_name":"scrapedItem.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"542534905","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nTesting file for ipfs-scripting module.\n\"\"\"\n\nimport unittest\nimport json\n\nfrom src import hive_setting\nfrom tests.utils.http_client import HttpClient\nfrom tests import init_test\nfrom tests.utils_v1 import test_common\n\n\nclass IpfsScriptingTestCase(unittest.TestCase):\n def __init__(self, method_name='runTest'):\n super().__init__(method_name)\n init_test()\n self.cli = HttpClient(f'/api/v2/vault')\n self.cli2 = HttpClient(f'/api/v2/vault', is_did2=True)\n self.file_name = 'ipfs-scripting/test.txt'\n self.file_content = 'File Content: 1234567890'\n # Owner's did and application did.\n self.did = self.cli.get_current_did()\n self.app_did = test_common.app_id\n\n @staticmethod\n def _subscribe():\n HttpClient(f'/api/v2').put('/subscription/vault')\n HttpClient(f'/api/v2', is_did2=True).put('/subscription/vault')\n\n @classmethod\n def setUpClass(cls):\n cls._subscribe()\n\n @classmethod\n def tearDownClass(cls):\n pass\n\n def __register_script(self, script_name, body):\n response = self.cli.put(f'/scripting/{script_name}', body)\n self.assertEqual(response.status_code, 200)\n return json.loads(response.text)\n\n def __call_script(self, script_name, body=None, is_raw=False):\n if body is None:\n body = dict()\n body['context'] = {\n 'target_did': self.did,\n 'target_app_did': self.app_did,\n }\n response = self.cli2.patch(f'/ipfs-scripting/{script_name}', body)\n self.assertEqual(response.status_code, 200)\n return response.text if is_raw else json.loads(response.text)\n\n def __set_and_call_script(self, name, set_data, run_data):\n self.__register_script(name, set_data)\n return self.__call_script(name, run_data)\n\n def __call_script_for_transaction_id(self, script_name, check_anonymous=False):\n response_body = self.__call_script(script_name, {\n \"params\": {\n \"path\": self.file_name\n }\n })\n self.assertEqual(type(response_body), dict)\n self.assertTrue(script_name in response_body)\n self.assertEqual(type(response_body[script_name]), dict)\n self.assertTrue('transaction_id' in response_body[script_name])\n if check_anonymous:\n self.assertTrue('anonymous_url' in response_body[script_name])\n self.assertTrue(response_body[script_name]['anonymous_url'].startswith(hive_setting.IPFS_PROXY_URL))\n return response_body[script_name]['transaction_id']\n\n def test01_file_upload(self):\n name = 'ipfs_upload_file'\n self.__register_script(name, {\n \"executable\": {\n \"output\": True,\n \"name\": name,\n \"type\": \"fileUpload\",\n \"body\": {\n \"path\": \"$params.path\"\n }\n }\n })\n response = self.cli2.put(f'/ipfs-scripting/stream/{self.__call_script_for_transaction_id(name)}',\n self.file_content.encode(), is_json=False)\n self.assertEqual(response.status_code, 200)\n\n def test02_file_download(self):\n name = 'ipfs_download_file'\n self.__register_script(name, {\n \"executable\": {\n \"output\": True,\n \"name\": name,\n \"type\": \"fileDownload\",\n \"body\": {\n \"path\": \"$params.path\"\n }\n }\n })\n response = self.cli2.get(f'/ipfs-scripting/stream/{self.__call_script_for_transaction_id(name)}')\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.text, self.file_content)\n\n def test03_file_properties_without_params(self):\n name = 'ipfs_file_properties'\n body = self.__set_and_call_script(name, {'executable': {\n 'name': name,\n 'type': 'fileProperties',\n 'output': True,\n 'body': {\n 'path': self.file_name\n }}}, None)\n self.assertTrue(name in body)\n self.assertEqual(body[name]['size'], len(self.file_content))\n\n def test04_file_hash(self):\n name = 'ipfs_file_hash'\n body = self.__set_and_call_script(name, {'executable': {\n 'name': name,\n 'type': 'fileHash',\n 'output': True,\n 'body': {\n 'path': '$params.path'\n }}}, {'params': {\n 'path': self.file_name}})\n self.assertIsNotNone(body)\n\n def test05_get_anonymous_file(self):\n name = 'ipfs_get_anonymous_file'\n self.__register_script(name, {\n \"executable\": {\n \"output\": True,\n \"name\": name,\n \"type\": \"fileDownload\",\n \"body\": {\n \"path\": \"$params.path\"\n }\n },\n \"allowAnonymousUser\": True,\n \"allowAnonymousApp\": True\n })\n self.__call_script_for_transaction_id(name, check_anonymous=True)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/ipfs_scripting_test.py","file_name":"ipfs_scripting_test.py","file_ext":"py","file_size_in_byte":5148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"28593187","text":"# Author: Berat Kurar\n# Date: 01/01/2021\n\nimport math as m\nfrom PIL import Image\n\nRo = 600.0\nRi = 520.0\n\ncir = [[0 for x in range(int(Ro * 2))] for y in range(int(Ro * 2))]\n\nimage = Image.open('h_3600.png')\npixels = image.load()\nwidth, height = image.size\n\ndef morph_img(img):\n list_image = [item for sublist in img for item in sublist]\n new_image = Image.new(\"L\", (len(img[0]), len(img)))\n new_image.putdata(list_image)\n new_image.save(\"circled_text_image.png\",\"PNG\")\n\nfor i in range(int(Ro)):\n # outer_radius = Ro*m.cos(m.asin(i/Ro))\n outer_radius = m.sqrt(Ro*Ro - i*i)\n for j in range(-int(outer_radius),int(outer_radius)):\n if i < Ri:\n # inner_radius = Ri*m.cos(m.asin(i/Ri))\n inner_radius = m.sqrt(Ri*Ri - i*i)\n else:\n inner_radius = -1\n if j < -inner_radius or j > inner_radius:\n x = Ro+j\n y = Ro-i\n angle = m.atan2(y-Ro,x-Ro)/2\n distance = m.sqrt((y-Ro)*(y-Ro) + (x-Ro)*(x-Ro))\n distance = m.floor((distance-Ri+1)*(height-1)/(Ro-Ri))\n cir[int(y)][int(x)] = pixels[int(width*angle/m.pi) % width, height-distance-1]\n y = Ro+i\n angle = m.atan2(y-Ro,x-Ro)/2\n distance = m.sqrt((y-Ro)*(y-Ro) + (x-Ro)*(x-Ro))\n distance = m.floor((distance-Ri+1)*(height-1)/(Ro-Ri))\n cir[int(y)][int(x)] = pixels[int(width*angle/m.pi) % width, height-distance-1]\n\nmorph_img(cir)","sub_path":"cb55_experiment/morph_text_image_to_circle.py","file_name":"morph_text_image_to_circle.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"405528044","text":"import discord\nfrom discord.ext import commands\nfrom util import send_embed_message, search_youtube\nfrom googletrans import Translator\nimport requests\n\nTRANSLATOR = Translator()\n\n\ndef setup(bot):\n bot.add_cog(General(bot))\n\n\nclass General(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.last_corona_virus_data = {}\n\n @commands.command(name=\"ysearch\")\n async def youtubeSearch(self, ctx, *searchStr):\n searchStr = ' '.join(map(str, searchStr))\n await ctx.send(f\"Searching \\'{searchStr}\\' on youtube...\")\n video_link = search_youtube(searchStr)\n if video_link != None:\n await ctx.send(f\"Found : {video_link}\")\n else:\n await ctx.send(\"I couldn't find anything\")\n\n @commands.command(pass_context=True, description=\"Example usage:\\n h!translate \\'I love you\\' german\")\n async def translate(self, ctx, toTranslate: str = \"\", toTranslateLanguage: str = \"en\"):\n if toTranslate == \"\":\n await send_embed_message(ctx, \"Gimme something to translate\")\n try:\n detect_language = TRANSLATOR.detect(toTranslate).lang\n print(f\"Language detected from {toTranslate}, {detect_language}\")\n translated = TRANSLATOR.translate(\n toTranslate, src=detect_language, dest=toTranslateLanguage).text\n await send_embed_message(ctx, f\"This:\\n{toTranslate.upper()}\\nMeans:\\n{translated.upper()}\")\n except ValueError:\n await send_embed_message(ctx, \"Error, type h!help translate\")\n\n @commands.command()\n async def quote(self, ctx, amount=1):\n if amount > 10:\n amount = 10\n\n for i in range(amount):\n data = requests.get(\"https://api.quotable.io/random\").json()\n await send_embed_message(ctx, author_name=data[\"author\"], content=data[\"content\"])","sub_path":"gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"176997598","text":"print(\"Welcome to the magic 8-ball! May the furtunes ever be in your favor\")\n\nimport random\n\npredictions = [\"It is certain\", \"It is decidedly so\", \"Without a doubt\", \"Yes Definitely\", \"You may rely on it\", \"As I see it, yes\", \"Most likely\", \"Outlook good\", \"Yes\", \"Signs point to yes\", \"Reply hazy try again\", \"Ask again later\", \"Better not to tell you\", \"Cannot predict now\", \"Concentrate and ask again\", \"Don't count on it\", \"My reply is no\", \"My sources say no\", \"Outlook not so good\", \"Very doubtful\"]\n\nask_question = input(\"Do you want to ask the Magic 8-Ball a question? (yes) or (no)\\n> \")\n\ncondition = True\n\nwhile condition:\n while ask_question.lower() not in [\"yes\", \"no\", \"done\"]:\n ask_question = input(\"I'm looking for a (yes) or (no/done)\\n> \")\n if ask_question.lower() == \"yes\":\n user_question = input(\"What question do you dare ask the Magic 8-Ball!?\\n> \")\n print(\"...\\n...\\n...\")\n print(f\"You have asked {user_question}; the Magic 8-Ball says: {random.choice(predictions)}.\")\n ask_question = input(\"Do you want to try again? or are you done?\\n> \")\n if ask_question.lower() == \"done\" or \"no\":\n print(\"Not everyone is ready for answers\")\n condition = False\n","sub_path":"python labs/lab4-magic_8_ball.py","file_name":"lab4-magic_8_ball.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"637273092","text":"'''\nCreated on Oct 21, 2014\n\n@author: JBlackmore\n'''\nimport sys\nfrom Tkinter import *\nfrom gold.models.board import Board, IllegalMove\nfrom gold.models.life import determineLife\nfrom gold.models.MoveTreeParser import MoveTreeParser\n\nDEFAULT_WIDTH = 400\nDEFAULT_HEIGHT = 400\nDEFAULT_MARGIN = 12\nDEFAULT_SPACES = 19\n\n''' Places a stone of color 'color' on the\n space nearest (x, y) on Canvas C.\n'''\nclass ResizingCanvas(Canvas):\n def __init__(self, parent, launcher, **kwargs):\n Canvas.__init__(self, parent, **kwargs)\n self.parent=parent\n self.bind('
delimiters\n for i in range(len(mixed_df)):\n if len(mixed_df.loc[i].false) == 1:\n mixed_df.loc[i, \"false\"] = mixed_df.loc[i, \"false\"][0]\n else:\n string = mixed_df.loc[i, \"false\"][0]\n for element in mixed_df.loc[i, \"false\"][1:]:\n string += \"
\" + element\n mixed_df.loc[i, \"false\"] = string\n\n # capitalize the first letter\n for i in range(len(mixed_df)):\n stem = mixed_df.loc[i].stem[0].capitalize() + mixed_df.loc[i].stem[1:]\n mixed_df.loc[i, \"stem\"] = stem\n\n # write to file as .parquet\n mixed_df.to_parquet(\n \"../../data/ingested_data/en-fact-completion-3-21-23.parquet\",\n index=False,\n )\n\n # order by language popularity\n\n # Optionally upload final parquet to HuggingFace\n if args.hugging_face:\n data_files = {\n \"English\": \"../../data/ingested_data/en-fact-completion-3-21-23.parquet\",\n \"Spanish\": \"../../data/ingested_data/translated_versions/es-fact-completion-4-8-23.parquet\",\n \"French\": \"../../data/ingested_data/translated_versions/fr-fact-completion-4-5-23.parquet\",\n \"Russian\": \"../../data/ingested_data/translated_versions/ru-fact-completion-4-7-23.parquet\",\n \"Portuguese\": \"../../data/ingested_data/translated_versions/pt-fact-completion-4-8-23.parquet\",\n \"German\": \"../../data/ingested_data/translated_versions/de-fact-completion-4-7-23.parquet\",\n \"Italian\": \"../../data/ingested_data/translated_versions/it-fact-completion-4-9-23.parquet\",\n \"Ukrainian\": \"../../data/ingested_data/translated_versions/uk-fact-completion-4-9-23.parquet\",\n \"Polish\": \"../../data/ingested_data/translated_versions/pl-fact-completion-4-12-23.parquet\",\n \"Romanian\": \"../../data/ingested_data/translated_versions/ro-fact-completion-4-5-23.parquet\",\n \"Czech\": \"../../data/ingested_data/translated_versions/cs-fact-completion-4-10-23.parquet\",\n \"Bulgarian\": \"../../data/ingested_data/translated_versions/bg-fact-completion-4-10-23.parquet\",\n \"Swedish\": \"../../data/ingested_data/translated_versions/sv-fact-completion-4-10-23.parquet\",\n \"Serbian\": \"../../data/ingested_data/translated_versions/sr-fact-completion-4-12-23.parquet\",\n \"Hungarian\": \"../../data/ingested_data/translated_versions/hu-fact-completion-4-12-23.parquet\",\n \"Croatian\": \"../../data/ingested_data/translated_versions/hr-fact-completion-4-12-23.parquet\",\n \"Danish\": \"../../data/ingested_data/translated_versions/da-fact-completion-4-12-23.parquet\",\n \"Slovenian\": \"../../data/ingested_data/translated_versions/sl-fact-completion-4-12-23.parquet\",\n \"Dutch\": \"../../data/ingested_data/translated_versions/nl-fact-completion-4-12-23.parquet\",\n \"Catalan\": \"../../data/ingested_data/translated_versions/ca-fact-completion-4-12-23.parquet\",\n }\n dataset = load_dataset(\"parquet\", data_files=data_files)\n\n # This reads the environment variables inside .env\n load_dotenv()\n # Logs into HF hub\n login(os.getenv(\"HF_TOKEN\"))\n # push to hub\n dataset.push_to_hub(\"Polyglot-or-Not/Fact-Completion\")\n # test loading from hub\n load_dataset(\"Polyglot-or-Not/Fact-Completion\")\n\n return None\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\n \"--hugging_face\",\n type=bool,\n default=False,\n help=\"Whether or not to write to Hugging Face (access required)\",\n )\n\n args = parser.parse_args()\n main(args)\n","sub_path":"src/dataset_caching_scripts/cache_fact_completion_dataset.py","file_name":"cache_fact_completion_dataset.py","file_ext":"py","file_size_in_byte":41884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"217706647","text":"from django.test import TestCase\nfrom . import views\nfrom . import models\nfrom django.contrib.auth.models import User as us\nimport uuid\n\n# Create your tests here.\n\n\nclass Apartment(TestCase):\n def setUp(self) -> None:\n models.Apartment(address='amir', number_of_units=5,\n apartment_id=uuid.UUID('c9686293-2ac1-4c96-b9a4-aa7002939d81')).save()\n\n def test_create_apartment(self):\n address = 'Tehran'\n number_of_units = 20\n res = views.create_apartment(address, number_of_units)\n self.assertTrue('Failed', res)\n\n def test_delete_apartment(self):\n apartment_id = '7y12312y2t'\n res = views.remove_apartment(apartment_id)\n self.assertTrue('Failed', res)\n\n def test_get_apartment(self):\n apartment_id = uuid.UUID('c9686293-2ac1-4c96-b9a4-aa7002939d81')\n res = views.get_apartment(apartment_id)\n self.assertEqual(True, res)\n\n\nclass User(TestCase):\n def setUp(self) -> None:\n a = us.objects.create_user(username = 'amirreza@yahoo.com', password='Amir1376')\n print('alex', a)\n def test_signup(self):\n email = 'test@example.com'\n password = 'tester'\n first_name = 'test'\n last_name = 'testian'\n phone_number = +989331532578\n unit = 4\n res = views.signupuser(email, password, first_name, last_name, phone_number, unit)\n self.assertTrue('Failed', res)\n\n def test_login(self):\n email = 'amirreza@yahoo.com'\n password = 'Amir1376'\n\n res = views.loginTest(email, password)\n print(res)\n self.assertEqual(True, res)\n\n def test_edit_user(self):\n user_id = 3\n email = 'test@example.com'\n password = 'tester'\n first_name = 'test'\n last_name = 'testian'\n phone_number = 989331532578\n unit = 4\n result = views.edit(user_id, email, password, first_name, last_name, phone_number, unit)\n self.assertTrue('Failed', result)\n\n def test_get_user(self):\n user_id = 'amirreza@yahoo.com'\n res = views.get_user(user_id)\n print(res)\n self.assertEqual(True, res)\n\n\nclass Unit(TestCase):\n def setUp(self) -> None:\n models.Apartment(address='nanaz', number_of_units=5, apartment_id=uuid.UUID('c9686293-2ac1-4c96-b9a4-aa7002939d81')).save()\n models.Unit(unit_number=2, apartment_id=uuid.UUID('c9686293-2ac1-4c96-b9a4-aa7002939d81'), phone=23154, number_of_people=5).save()\n\n def test_create_unit(self):\n unit_number = 2\n number_of_people = 3\n phone = 982166198045\n apartment = '7y12312y'\n res = views.create_unit(unit_number, number_of_people, phone, apartment)\n self.assertTrue('Failed', res)\n\n def test_edit_unit(self):\n unit_id = 1\n unit_number = 2\n number_of_people = 3\n phone = 982166198045\n res = views.edit_unit(unit_id, unit_number, number_of_people, phone)\n self.assertTrue('Failed', res)\n\n def test_get_unit(self):\n unit_id = 2\n res = views.get_unit(unit_id)\n self.assertEqual(True, res)\n\n\nclass Services(TestCase):\n def test_created_services(self):\n tracking_code = '7t5d6'\n unit = 3\n water = 20000\n gas = 30000\n electricity = 10000\n repairs = 20000\n cleaning = 30000\n res = views.create_services(tracking_code, unit, water, gas, electricity, repairs, cleaning)\n self.assertTrue('Failed', res)\n\n def test_edit_services(self):\n tracking_code = '7t5d6'\n unit = 3\n water = 20000\n gas = 25000\n electricity = 10000\n repairs = 52000\n cleaning = 36000\n res = views.edit_services(tracking_code, unit, water, gas, electricity, repairs, cleaning)\n self.assertTrue('Failed', res)\n\n def test_delete_services(self):\n tracking_code = '7t5d6'\n res = views.delete_services(tracking_code)\n self.assertTrue('Failed', res)\n\n def test_get_services(self):\n tracking_code = '7t5d6'\n res = views.get_services(tracking_code)\n self.assertTrue('Failed', res)\n\n\nclass Notification(TestCase):\n def setUp(self) -> None:\n models.Apartment(address='nanaz', number_of_units=5, apartment_id=uuid.UUID('c9686293-2ac1-4c96-b9a4-aa7002939d81')).save()\n models.Notification(id=5, title='Amir', body='dwwd', apartment_id=uuid.UUID('c9686293-2ac1-4c96-b9a4-aa7002939d81')).save()\n def test_create_notification(self):\n title = 'maintain'\n body = 'the elevator is crash'\n apartment = '3tu87123ol'\n res = views.create_notification(title, body, apartment)\n self.assertTrue('Failed', res)\n\n def test_edit_notification(self):\n notification_id = 23\n title = 'maintain'\n body = 'the elevator is crash'\n res = views.edit_notification(notification_id, title, body)\n self.assertTrue('Failed', res)\n\n def test_delete_notification(self):\n notification_id = 25\n res = views.delete_notification(notification_id)\n self.assertTrue('Failed', res)\n\n def test_get_notification(self):\n notification_id = 5\n res = views.get_notification(notification_id)\n self.assertEqual(True, res)\n\n\nclass Suggestion(TestCase):\n def test_send_suggest(self):\n title = 'bela bela'\n body = 'bela bela bela'\n res = views.send_suggest(title, body)\n self.assertTrue('Failed', res)\n\n def test_delete_suggest(self):\n suggestion_id = '123n24'\n res = views.delete_suggest(suggestion_id)\n self.assertTrue('Failed', res)\n\n def test_get_suggest(self):\n suggestion_id = 8\n res = views.get_suggest(suggestion_id)\n self.assertEqual(True, res)\n\n\nclass Vote(TestCase):\n def setUp(self) -> None:\n models.Vote(id=88, title='soal', body='body soal', date='2011-01-02', apartment_id=uuid.UUID('c9686293-2ac1-4c96-b9a4-aa7002939d81')).save()\n models.Apartment(address='nanaz', number_of_units=5, apartment_id=uuid.UUID('c9686293-2ac1-4c96-b9a4-aa7002939d81')).save()\n\n def test_create_vote(self):\n title = 'fixing'\n body = 'What?'\n apartment_id = '4352433356'\n res = views.create_vote(title, body, apartment_id)\n self.assertTrue('Failed', res)\n\n def test_edit_vote(self):\n title = 'fixing'\n body = 'What?'\n res = views.edit_vote(title, body)\n self.assertTrue('Failed', res)\n\n def test_remove_vote(self):\n vote_id = 12\n res = views.remove_vote(vote_id)\n self.assertTrue('Failed', res)\n\n def test_get_vote(self):\n vote_id = 88\n res = views.get_vote(vote_id)\n self.assertEqual(True, res)\n","sub_path":"server/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":6747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"406808080","text":"from __future__ import print_function\n\nfrom .base import maybe_requirement_list\nfrom .fetcher import PyPIFetcher\nfrom .http import Crawler\nfrom .interpreter import PythonInterpreter\nfrom .obtainer import Obtainer\nfrom .platforms import Platform\nfrom .translator import Translator\n\nfrom pkg_resources import (\n Environment,\n WorkingSet,\n find_distributions,\n)\n\n\nclass ResolverEnvironment(Environment):\n def can_add(self, dist):\n return Platform.distribution_compatible(dist, python=self.python, platform=self.platform)\n\n\nclass ResolverBase(WorkingSet):\n def __init__(self, cache=None):\n self._cached_entries = set(find_distributions(cache)) if cache else set()\n self._entries = set()\n super(ResolverBase, self).__init__(entries=[])\n\n def make_installer(self, requirements, interpreter, platform):\n return None\n\n def resolve(self, requirements, interpreter=None, platform=None):\n requirements = maybe_requirement_list(requirements)\n interpreter = interpreter or PythonInterpreter.get()\n platform = platform or Platform.current()\n env = ResolverEnvironment([d.location for d in (self._entries | self._cached_entries)],\n python=interpreter.python,\n platform=platform)\n added = set()\n for dist in super(ResolverBase, self).resolve(requirements, env=env,\n installer=self.make_installer(requirements, interpreter, platform)):\n if dist not in self._entries:\n added.add(dist)\n self._entries.add(dist)\n return added\n\n def distributions(self):\n return self._entries\n\n\nclass Resolver(ResolverBase):\n def __init__(self, cache=None, crawler=None, fetchers=None, install_cache=None,\n conn_timeout=None):\n self._crawler = crawler or Crawler()\n self._fetchers = fetchers or [PyPIFetcher()]\n self._install_cache = install_cache\n self._conn_timeout = conn_timeout\n super(Resolver, self).__init__(cache=cache)\n\n def make_installer(self, reqs, interpreter, platform):\n obtainer = Obtainer(self._crawler, self._fetchers,\n Translator.default(self._install_cache, interpreter=interpreter, platform=platform,\n conn_timeout=self._conn_timeout))\n return obtainer.obtain\n","sub_path":"src/python/twitter/common/python/resolver.py","file_name":"resolver.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"551578985","text":"import os, fnmatch, Versionamento\n\ndef find(pattern, path):\n\tresult = []\n\tfor root, dirs, files in os.walk(path):\n\t\tfor name in files:\n\t\t\tif fnmatch.fnmatch(name, pattern):\n\t\t\t\tresult.append(os.path.join(root, name))\n\treturn result\n\n\nprojetos = list()\nwith open(\"/home/gabriel/Documentos/ic2/analiseDosProjetosGerais/RQ1/selecaoDosProjetosAleatoriamente/projetosAleatorios.csv\") as entrada:\n for linha in entrada:\n linha=linha.replace(\"\\n\",\"\")\n projetos.append(linha.split(\",\"))\n\nprint(\"framework,projeto,caminho,minSdkVersion/springboot,targetSdkVersion\")\n\nfor projeto in projetos:\n tem_import = False\n if (projeto[0] == \"Android\"):\n gradles = find (\"build.gradle\", \"/home/gabriel/Documentos/ic2/analiseDosProjetosGerais/repositorios/\"+projeto[1])\n for gradle in gradles:\n arquivo_gradle = open(gradle).read()\n if(\"minSdkVersion\" in arquivo_gradle and \"targetSdkVersion\" in arquivo_gradle):\n tem_import = True\n\n posicao_inicial = arquivo_gradle.find(\"minSdkVersion\")\n \n minSdkVersion = arquivo_gradle[posicao_inicial:]\n\n posicao_inicial = minSdkVersion.find(\" \")\n\n minSdkVersion = minSdkVersion[posicao_inicial+1:]\n\n posicao_final = minSdkVersion.find(\"\\n\")\n \n minSdkVersion = minSdkVersion[:posicao_final]\n \n # target\n \n posicao_inicial = arquivo_gradle.find(\"targetSdkVersion\")\n \n targetSdkVersion = arquivo_gradle[posicao_inicial:]\n\n posicao_inicial = targetSdkVersion.find(\" \")\n\n targetSdkVersion = targetSdkVersion[posicao_inicial+1:]\n\n posicao_final = targetSdkVersion.find(\"\\n\")\n \n targetSdkVersion = targetSdkVersion[:posicao_final]\n\n print(projeto[0]+\",\"+projeto[1]+\",\"+gradle+\",\"+minSdkVersion+\",\"+targetSdkVersion)\n if(tem_import == False):\n print(projeto[0]+\",\"+projeto[1]+\",\"+\"Nao encontrado\")\n\n if(projeto[0]==\"Spring\"):\n poms = find(\"pom.xml\", \"/home/gabriel/Documentos/ic2/analiseDosProjetosGerais/repositorios/\"+projeto[1])\n for pom in poms:\n arquivo_pom = open(pom).read()\n if(\"org.springframework.boot\" in arquivo_pom):\n tem_import = True\n posicao = arquivo_pom.find(\"org.springframework.boot\")\n\n arquivo = arquivo_pom[posicao:]\n\n posicao= arquivo.find(\"version\")\n\n arquivo = arquivo[posicao:]\n\n posicao = arquivo.find(\">\")\n\n arquivo = arquivo[posicao+1:]\n\n posicao = arquivo.find(\"<\")\n\n versao = arquivo[:posicao]\n print(projeto[0]+\",\"+projeto[1]+\",\"+pom+\",\"+versao)\n if(tem_import == False):\n print(projeto[0]+\",\"+projeto[1]+\",\"+\"Nao encontrado\")","sub_path":"analiseDosProjetosGerais/RQ2/extraindoVersaoAtualDoFramework/pega_caminho_dos_pom_e_gradle.py","file_name":"pega_caminho_dos_pom_e_gradle.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"359459574","text":"\r\n\r\n\r\n\r\n\r\n\r\nlat_data_file = open(\"data/path.csv\")\r\ndata = lat_data_file.readlines()\r\n\r\nloc_json = open(\"path_data.json\", 'w')\r\nloc_json.write('{\\n\\t\"AllData\": \\n\\t[')\r\n\r\nfirst = True\r\n\r\nfor line in data[1:]:\r\n if not first:\r\n loc_json.write(\",\\n\")\r\n else:\r\n first = False\r\n line_data = line.strip().split(',')\r\n loc_json.write(\" \\n\\t\\t{\\n\\t\\t\\\"Data\\\": \\n\\t\\t[\\n\\t\\t\\t\")\r\n for i in range(len(line_data)):\r\n \tif line_data[i] != 'nan':\r\n \t\ttemp = line_data[i]\r\n \t\tif temp.endswith('.0'):\r\n \t\t\ttemp = temp[:-2]\r\n \t\tloc_json.write(\"\\\"\" + temp + \"\\\"\")\r\n \tif i < len(line_data) - 1 and line_data[i+1] != 'nan':\r\n \t\tloc_json.write(\", \")\r\n loc_json.write(\"\\n\\t\\t\\t]\\n\\t\\t}\")\r\n # print (line_data)\r\n # print (line_data)\r\n # loc_json.write(\" \\n\\t\\t{\\n\\t\\t\\\"Data\\\": \\n\\t\\t\\t[\\n\\t\\t\\t\\\"\" + line_data[1] + \"\\\", \\\"\" + line_data[2] +\r\n # \"\\\", \\\"\" + line_data[3] + \"\\\"\\n\\t\\t\\t]\\n\\t\\t}\")\r\n\r\nloc_json.write('\\n\\t]\\n}')\r\nloc_json.close()\r\n\r\nlat_data_file.close()\r\n","sub_path":"make_path_json.py","file_name":"make_path_json.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"297065736","text":"import numpy as np\nimport torch\n\nfrom collections import defaultdict\nfrom hyperopt import STATUS_OK\nfrom scvi.core.modules import SCANVAE\nfrom scvi.core.trainers import SemiSupervisedTrainer\nfrom sklearn.model_selection import train_test_split\nfrom scvi.data._anndata import get_from_registry\nfrom scvi import _CONSTANTS\n\n\ndef custom_objective_hyperopt(\n space, is_best_training=False, dataset=None, n_epochs=None\n):\n \"\"\"Custom objective function for advanced autotune tutorial.\"\"\"\n space = defaultdict(dict, space)\n model_tunable_kwargs = space[\"model_tunable_kwargs\"]\n trainer_tunable_kwargs = space[\"trainer_tunable_kwargs\"]\n train_func_tunable_kwargs = space[\"train_func_tunable_kwargs\"]\n\n trainer_specific_kwargs = {}\n model_specific_kwargs = {}\n train_func_specific_kwargs = {}\n trainer_specific_kwargs[\"use_cuda\"] = bool(torch.cuda.device_count())\n train_func_specific_kwargs[\"n_epochs\"] = n_epochs\n\n # add hardcoded parameters\n # disable scVI progbar\n trainer_specific_kwargs[\"silent\"] = True\n trainer_specific_kwargs[\"frequency\"] = 1\n\n # merge params with fixed param precedence\n model_tunable_kwargs.update(model_specific_kwargs)\n trainer_tunable_kwargs.update(trainer_specific_kwargs)\n train_func_tunable_kwargs.update(train_func_specific_kwargs)\n\n scanvi = SCANVAE(\n dataset.uns[\"_scvi\"][\"summary_stats\"][\"n_vars\"],\n dataset.uns[\"_scvi\"][\"summary_stats\"][\"n_batch\"],\n dataset.uns[\"_scvi\"][\"summary_stats\"][\"n_labels\"],\n **model_tunable_kwargs\n )\n trainer_scanvi = SemiSupervisedTrainer(scanvi, dataset, **trainer_tunable_kwargs)\n batch_indices = get_from_registry(dataset, _CONSTANTS.BATCH_KEY)\n trainer_scanvi.unlabelled_set = trainer_scanvi.create_scvi_dl(\n indices=(batch_indices == 1)\n )\n trainer_scanvi.unlabelled_set.to_monitor = [\"reconstruction_error\", \"accuracy\"]\n indices_labelled = batch_indices == 0\n\n if not is_best_training:\n # compute k-fold accuracy on a 20% validation set\n k = 5\n accuracies = np.zeros(k)\n indices_labelled = batch_indices == 0\n for i in range(k):\n indices_labelled_train, indices_labelled_val = train_test_split(\n indices_labelled.nonzero()[0], test_size=0.2\n )\n trainer_scanvi.labelled_set = trainer_scanvi.create_scvi_dl(\n indices=indices_labelled_train\n )\n trainer_scanvi.labelled_set.to_monitor = [\n \"reconstruction_error\",\n \"accuracy\",\n ]\n trainer_scanvi.validation_set = trainer_scanvi.create_scvi_dl(\n indices=indices_labelled_val\n )\n trainer_scanvi.validation_set.to_monitor = [\"accuracy\"]\n trainer_scanvi.train(**train_func_tunable_kwargs)\n accuracies[i] = trainer_scanvi.history[\"accuracy_unlabelled_set\"][-1]\n return {\"loss\": -accuracies.mean(), \"space\": space, \"status\": STATUS_OK}\n else:\n trainer_scanvi.labelled_set = trainer_scanvi.create_scvi_dl(\n indices=indices_labelled\n )\n trainer_scanvi.labelled_set.to_monitor = [\"reconstruction_error\", \"accuracy\"]\n trainer_scanvi.train(**train_func_tunable_kwargs)\n return trainer_scanvi\n","sub_path":"tests/notebooks/utils/autotune_advanced_notebook.py","file_name":"autotune_advanced_notebook.py","file_ext":"py","file_size_in_byte":3298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"37308753","text":"from typing import Tuple\n\nfrom tqdm import tqdm\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef cast_rays(\n height: int,\n width: int,\n focal: float,\n pose: torch.Tensor,\n device: torch.device = \"cpu\",\n):\n ii, jj = torch.meshgrid(\n torch.arange(width, device=device),\n torch.arange(height, device=device),\n indexing=\"xy\",\n )\n directions = torch.stack(\n [\n (ii - width * 0.5) / focal,\n -(jj - height * 0.5) / focal,\n -torch.ones_like(ii),\n ],\n dim=-1,\n )\n directions = torch.sum(directions[..., None, :] * pose[:3, :3], dim=-1)\n origins = pose[:3, -1].expand(directions.shape)\n return origins, directions\n\n\ndef sample_points(\n origins: torch.Tensor,\n directions: torch.Tensor,\n z_near: float,\n z_far: float,\n num_samples: int,\n random: bool = True,\n device: torch.device = \"cpu\",\n):\n z = torch.linspace(z_near, z_far, num_samples, device=device)\n if random:\n noise = torch.rand(\n origins.shape[0], origins.shape[1], num_samples, device=device\n )\n z = z + noise * (z_far - z_near) / num_samples\n points = origins[..., None, :] + directions[..., None, :] * z[..., :, None]\n return points, z\n\n\ndef positional_encoding(\n x: torch.Tensor, encoding_size: int = 6, include_input: bool = True\n):\n encoding = [x] if include_input else []\n for i in range(encoding_size):\n for fn in [torch.sin, torch.cos]:\n encoding.append(fn((2**i) * x))\n return torch.cat(encoding, dim=-1)\n\n\ndef render_volume(\n samples: torch.Tensor, z: torch.Tensor\n) -> Tuple[torch.Tensor, torch.Tensor]:\n rgb = torch.sigmoid(samples[..., :3])\n sigma_a = F.relu(samples[..., 3])\n dist = z[..., 1:] - z[..., :-1]\n dist = torch.cat([dist, torch.full_like(z[..., :1], 1e10)], dim=-1)\n alpha = 1.0 - torch.exp(-sigma_a * dist)\n weights = alpha * torch.cumprod(1.0 - alpha + 1e-10, dim=-1)\n rgb = (weights[..., None] * rgb).sum(dim=-2)\n return rgb\n\n\nclass NeRF(nn.Module):\n def __init__(self, encoding_size: int = 6, hidden_channels: int = 256):\n super().__init__()\n in_channels = 3 + 3 * 2 * encoding_size\n self.fc1 = nn.Linear(in_channels, hidden_channels)\n self.fc2 = nn.Linear(hidden_channels, hidden_channels)\n self.fc3 = nn.Linear(hidden_channels, hidden_channels)\n self.fc4 = nn.Linear(hidden_channels, hidden_channels // 2)\n self.fc5 = nn.Linear(hidden_channels // 2, hidden_channels // 2)\n self.alpha = torch.nn.Linear(hidden_channels, 1)\n self.rgb = torch.nn.Linear(hidden_channels // 2, 3)\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n alpha = self.alpha(x)\n x = F.relu(self.fc4(x))\n x = F.relu(self.fc5(x))\n rgb = self.rgb(x)\n return torch.cat([rgb, alpha], dim=1)\n\n\nif __name__ == \"__main__\":\n device = \"cuda\"\n num_iters = 20000\n z_near = 2.0\n z_far = 6.0\n num_samples = 64 # Number of depth samples per ray\n chunk_size = 4096\n encoding_size = 10\n lr = 5e-3\n eval_freq = 100\n\n data = np.load(\"tinynerf.npz\")\n images = torch.from_numpy(data[\"images\"])\n poses = torch.from_numpy(data[\"poses\"])\n focal = torch.tensor(data[\"focal\"])\n\n height, width = images.shape[1:3]\n dataset_size = images.shape[0]\n\n model = NeRF(encoding_size).to(device)\n optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n\n pbar = tqdm(total=num_iters)\n for i in range(num_iters):\n choice = np.random.randint(100)\n image = images[choice].to(device)\n pose = poses[choice].to(device)\n origins, directions = cast_rays(height, width, focal, pose, device=device)\n points, z = sample_points(\n origins, directions, z_near, z_far, num_samples, device=device\n )\n points = points.reshape(-1, 3)\n encoding = positional_encoding(points, encoding_size)\n batches = torch.split(encoding, chunk_size)\n samples = [model(batch) for batch in batches]\n samples = torch.cat(samples, dim=0)\n samples = samples.reshape(height, width, num_samples, 4)\n rgb = render_volume(samples, z)\n\n optimizer.zero_grad()\n loss = F.mse_loss(rgb, image)\n loss.backward()\n optimizer.step()\n\n pbar.set_postfix(loss=loss.item())\n pbar.update()\n pbar.close()\n","sub_path":"nerf/nerf.py","file_name":"nerf.py","file_ext":"py","file_size_in_byte":4509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"619996915","text":"from collections import defaultdict as dd\nfrom random import randint, uniform\nfrom math import log\nfrom sys import exit\nfrom tqdm import tqdm\n\nNUM_TOPICS = 3\nFILEPATH = \"../files/data/wiki-en-documents.word\"\n\nALPHA = 0.01\nBETA = 0.01\n\ndef sample_one(probs):\n z = sum(probs)\n remaining = uniform(0, z)\n for i in range(len(probs)):\n remaining -= probs[i]\n if remaining <= 0:\n return i\n exit()\n\ndef add_counts(word, topic, doc_id, amounts, xcnts, ycnts):\n xcnts[f\"{topic}\"] += 1\n xcnts[f\"{word}|{topic}\"] += 1\n\n ycnts[f\"{doc_id}\"] += 1\n ycnts[f\"{topic}|{doc_id}\"] += 1\n\n\ndef initialize():\n xcorpus, ycorpus = [], []\n xcounts, ycounts = dd(int), dd(int)\n unique_words = set()\n\n for line in open(FILEPATH, \"r\", encoding=\"utf-8\"):\n doc_id = len(xcorpus)\n words = line.strip().split()\n topics = []\n\n for word in words:\n unique_words.add(word)\n topic = randint(0, NUM_TOPICS-1)\n topics.append(topic)\n\n add_counts(word, topic, doc_id, 1, xcounts, ycounts)\n\n xcorpus.append(words)\n ycorpus.append(topics)\n\n num_words = len(unique_words)\n return xcorpus, ycorpus, xcounts, ycounts, num_words\n\ndef sampling():\n xcorps, ycorps, xcnts, ycnts, num_words = initialize()\n ll = 0\n\n for i in tqdm(range(len(xcorps)), desc=\"sent\"):\n for j in range(len(xcorps[i])):\n x, y = xcorps[i][j], ycorps[i][j]\n add_counts(x, y, i, -1, xcnts, ycnts)\n\n probs = []\n for k in range(NUM_TOPICS):\n x_prob = (xcnts[f\"{x}|{k}\"] + ALPHA) / (xcnts[k] + ALPHA * num_words)\n y_prob = (ycnts[f\"{k}|{i}\"] + BETA) / (ycnts[i] + BETA * NUM_TOPICS)\n probs.append(x_prob * y_prob)\n new_y = sample_one(probs)\n ll += log(probs[new_y])\n add_counts(x, new_y, i, 1, xcnts, ycnts)\n ycorps[i][j] = new_y\n\n print(ll)\n\nif __name__ == \"__main__\":\n sampling()","sub_path":"takahashi/tutorial09/learn_lda.py","file_name":"learn_lda.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"584702788","text":"#running incremental ILP for incremental number of RRHs\nimport simpy\nimport functools\nimport random as np\nimport time\nfrom enum import Enum\nimport numpy\nfrom scipy.stats import norm\nimport matplotlib.pyplot as plt\nimport batch_teste as lp\nimport pureBatchILP as plp\nimport copy\nimport simDynamicTemporalRRH as sim\n\n#util class\nutil = sim.Util()\n#keep the power consumption\npower_consumption = []\ninc_power_consumption = []\n#to count the activated resources\nactivated_nodes, activated_lambdas, activated_dus, activated_switchs, redirected = ([] for i in range(5))\n#to count the activated resources\ninc_activated_nodes, inc_activated_lambdas, inc_activated_dus, inc_activated_switchs, inc_redirected = ([] for i in range(5))\n#to control the number of RRHs in each run\nr = range(1,50)\n\n#method of the batch sequential scheduling\ndef seqBatch():\n\tcount_nodes, count_lambdas, count_dus, count_switches = (0 for i in range(4))\n\t#list of RRHs to be scheduled\n\trrhs = []\n\t#create the RRHs\n\tfor i in r:\n\t\trrhs.append(util.createRRHs(i,[],[],[]))\n\t#calls the ILP for each set of RRHs\n\tfor i in rrhs:\n\t\tilp = plp.ILP(i, range(len(i)), plp.nodes, plp.lambdas)\n\t\ts = ilp.run()\n\t\t#print(lp.nodeState)\n\t\tif s != None:\n\t\t\t#print(\"Optimal solution is: {}\".format(s.objective_value))\n\t\t\tsol = ilp.return_solution_values()\n\t\t\tilp.updateValues(sol)\n\t\t\tif redirected:\n\t\t\t\tredirected.append(sum((redirected[-1], len(sol.var_k))))\n\t\t\telse:\n\t\t\t\tredirected.append(len(sol.var_k))\n\t\t\tpower_consumption.append(util.getPowerConsumption(plp))\n\t\t\t#counts the current activated nodes, lambdas, DUs and switches\n\t\t\tfor i in plp.nodeState:\n\t\t\t\tif i == 1:\n\t\t\t\t\tcount_nodes += 1\n\t\t\tactivated_nodes.append(count_nodes)\n\t\t\tfor i in plp.lambda_state:\n\t\t\t\tif i == 1:\n\t\t\t\t\tcount_lambdas += 1\n\t\t\tactivated_lambdas.append(count_lambdas)\n\t\t\tfor i in plp.du_state:\n\t\t\t\tfor j in i:\n\t\t\t\t\tif j == 1:\n\t\t\t\t\t\tcount_dus += 1\n\t\t\tactivated_dus.append(count_dus)\n\t\t\tfor i in plp.switch_state:\n\t\t\t\tif i == 1:\n\t\t\t\t\tcount_switches += 1\n\t\t\tactivated_switchs.append(count_switches)\n\t\t\tilp.resetValues()\n\t\t\tcount_nodes = 0\n\t\t\tcount_lambdas = 0\n\t\t\tcount_dus = 0\n\t\t\tcount_switches = 0\n\tilp.resetValues()\n\n#method for the sequential incremental\ndef seqInc():\n\tcount_nodes, count_lambdas, count_dus, count_switches = (0 for i in range(4))\n\trrhs = util.createRRHs(max(r),[],[],[])\n\tnp.shuffle(rrhs)\n\t#calls the ilp for each rrh on rrhs\n\tfor i in rrhs:\n\t\trrh_list = []\n\t\trrh_list.append(i)\n\t\t#print(\"Matrix is {}\".format(i.rrhs_matrix))\n\t\tilp = plp.ILP(rrh_list, range(0,1), plp.nodes, plp.lambdas)\n\t\ts = ilp.run()\n\t\tif s != None:\n\t\t\t#print(\"Optimal solution is: {}\".format(s.objective_value))\n\t\t\tsol = ilp.return_solution_values()\n\t\t\tilp.updateValues(sol)\n\t\t\tif inc_redirected:\n\t\t\t\tinc_redirected.append(sum((inc_redirected[-1], len(sol.var_k))))\n\t\t\telse:\n\t\t\t\tinc_redirected.append(len(sol.var_k))\n\t\t\tinc_power_consumption.append(util.getPowerConsumption(plp))\n\t\t\t#counts the current activated nodes, lambdas, DUs and switches\n\t\t\tfor i in plp.nodeState:\n\t\t\t\tif i == 1:\n\t\t\t\t\tcount_nodes += 1\n\t\t\tinc_activated_nodes.append(count_nodes)\n\t\t\tfor i in plp.lambda_state:\n\t\t\t\tif i == 1:\n\t\t\t\t\tcount_lambdas += 1\n\t\t\tinc_activated_lambdas.append(count_lambdas)\n\t\t\tfor i in plp.du_state:\n\t\t\t\tfor j in i:\n\t\t\t\t\tif j == 1:\n\t\t\t\t\t\tcount_dus += 1\n\t\t\tinc_activated_dus.append(count_dus)\n\t\t\tfor i in plp.switch_state:\n\t\t\t\tif i == 1:\n\t\t\t\t\tcount_switches += 1\n\t\t\tinc_activated_switchs.append(count_switches)\n\t\t\tcount_nodes = 0\n\t\t\tcount_lambdas = 0\n\t\t\tcount_dus = 0\n\t\t\tcount_switches = 0\n\t\telse:\n\t\t\tprint(\"noooo\")\n\nseqBatch()\nseqInc()\nprint(redirected)\nprint(inc_redirected)\nmin_power = min(min(power_consumption), min(inc_power_consumption))\nmax_power = max(max(power_consumption), max(inc_power_consumption))\nmin_lambdas = min(min(activated_lambdas), min(inc_activated_lambdas))\nmax_lambdas = max(max(activated_lambdas), max(inc_activated_lambdas))\nmin_nodes = min(min(activated_nodes), min(inc_activated_nodes))\nmax_nodes = max(max(activated_nodes), max(inc_activated_nodes))\nmin_dus = min(min(activated_dus), min(inc_activated_dus))\nmax_dus = max(max(activated_dus), max(inc_activated_dus))\nmin_switch = min(min(activated_switchs), min(inc_activated_switchs))\nmax_switch = max(max(activated_switchs), max(inc_activated_switchs))\nmin_redirected = min(min(redirected), min(inc_redirected))\nmax_redirected = max(max(redirected), max(inc_redirected))\n\n#generate the plots for power consumption\nplt.plot(power_consumption, label = \"Batch ILP\")\nplt.plot(inc_power_consumption, label = \"Inc ILP\")\nplt.xticks(numpy.arange(min(range(len(power_consumption))), max(range(len(power_consumption))), 5))\nplt.yticks(numpy.arange(min_power, max_power, 500))\nplt.ylabel('Power Consumption')\nplt.xlabel(\"Number of ONUs\")\nplt.legend()\nplt.grid()\nplt.savefig('/home/tinini/Área de Trabalho/simQuaseFinal/CFRAN-Simulator/experiments/power_consumption.png', bbox_inches='tight')\nplt.clf()\n\n#generate the plots for activated lambdas\nplt.plot(activated_lambdas, label = \"Batch ILP\")\nplt.plot(inc_activated_lambdas, label = \"Inc ILP\")\nplt.xticks(numpy.arange(min(r), max(r), 5))\nplt.yticks(numpy.arange(min_lambdas, max_lambdas+1, 1))\nplt.ylabel('Activated Lambdas')\nplt.xlabel(\"Number of ONUs\")\nplt.legend()\nplt.grid()\nplt.savefig('/home/tinini/Área de Trabalho/simQuaseFinal/CFRAN-Simulator/experiments/activated_lambdas.png', bbox_inches='tight')\nplt.clf()\n\n#generate the plots for activated nodes\nplt.plot(activated_nodes, label = \"Batch ILP\")\nplt.plot(inc_activated_nodes, label = \"Inc ILP\")\nplt.xticks(numpy.arange(min(r), max(r), 5))\nplt.yticks(numpy.arange(min_nodes, max_nodes+1, 1))\nplt.ylabel('Activated Nodes')\nplt.xlabel(\"Number of ONUs\")\nplt.legend()\nplt.grid()\nplt.savefig('/home/tinini/Área de Trabalho/simQuaseFinal/CFRAN-Simulator/experiments/activated_nodes.png', bbox_inches='tight')\nplt.clf()\n\n#generate the plots for activated DUs\nplt.plot(activated_dus, label = \"Batch ILP\")\nplt.plot(inc_activated_dus, label = \"Inc ILP\")\nplt.xticks(numpy.arange(min(r), max(r), 5))\nplt.yticks(numpy.arange(min_dus, max_dus, 5))\nplt.ylabel('Activated DUs')\nplt.xlabel(\"Number of ONUs\")\nplt.legend()\nplt.grid()\nplt.savefig('/home/tinini/Área de Trabalho/simQuaseFinal/CFRAN-Simulator/experiments/activated_DUs.png', bbox_inches='tight')\nplt.clf()\n\n#generate the plots for activated Switches\nplt.plot(activated_switchs, label = \"Batch ILP\")\nplt.plot(inc_activated_switchs, label = \"Inc ILP\")\nplt.xticks(numpy.arange(min(r), max(r), 5))\nplt.yticks(numpy.arange(min_switch, max_switch+1, 1))\nplt.ylabel('Activated Switches')\nplt.xlabel(\"Number of ONUs\")\nplt.legend()\nplt.grid()\nplt.savefig('/home/tinini/Área de Trabalho/simQuaseFinal/CFRAN-Simulator/experiments/activated_switches.png', bbox_inches='tight')\nplt.clf()\n\n#generate the plots for redirected DUs\nplt.plot(redirected, label = \"Batch ILP\")\nplt.plot(inc_redirected, label = \"Inc ILP\")\nplt.xticks(numpy.arange(min(r), max(r), 5))\nplt.yticks(numpy.arange(min_redirected, max_redirected, 2))\nplt.ylabel('Redirected RRHs')\nplt.xlabel(\"Number of ONUs\")\nplt.legend()\nplt.grid()\nplt.savefig('/home/tinini/Área de Trabalho/simQuaseFinal/CFRAN-Simulator/experiments/redirected_rrhs.png', bbox_inches='tight')\nplt.clf()\n","sub_path":"inc_staticRRHs.py","file_name":"inc_staticRRHs.py","file_ext":"py","file_size_in_byte":7169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"66504479","text":"from codewars.test import Test\n\ndef find_it(seq):\n num_count = {}\n for idx, num in enumerate(seq):\n if num in num_count:\n continue\n else:\n num_count[num] = seq.count(num)\n\n for key, val in num_count.items():\n if val % 2 != 0:\n return int(key)\n\ntest = Test()\n# test.describe(\"Example\")\ntest.assert_equals(find_it([20,1,-1,2,-2,3,3,5,5,1,2,4,20,4,-1,-2,5]), 5)\n","sub_path":"codewars/6kyu/findodd.py","file_name":"findodd.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"361328756","text":"#========================================================================\n# argv[1]: code\n# argv[2]: valid_path\n# argv[3]: rank\n# argv[4]: session / user\n#========================================================================\nimport sys\ntry:\n code = int(sys.argv[1])\nexcept IndexError:\n code=0\nexcept ValueError:\n pass\nwin_path = f'../features/4_winner/'\nsecond_path = '../features/2_second_valid/'\ngdrive_path = '../features/9_gdrive/'\nignore_list = []\n\nimport numpy as np\nimport pandas as pd\nimport os\nimport shutil\nimport glob\nimport re\nHOME = os.path.expanduser('~')\nsys.path.append(f\"{HOME}/kaggle/data_analysis/library/\")\nimport utils\nfrom utils import logger_func\n\n# path rename\n# win_path = '../features/4_winner/*.gz'\n# path_list = glob.glob(win_path)\n# for path in path_list:\n# tmp = utils.read_pkl_gzip(path)\n# if path.count('107_his_train_'):\n# utils.to_pkl_gzip(path=path.replace(r'107_his_train_', '107_his_train_his_'), obj=tmp)\n# elif path.count('107_his_test_'):\n# utils.to_pkl_gzip(path=path.replace(r'107_his_test_', '107_his_test_his_'), obj=tmp)\n# sys.exit()\n\n\ndef to_win_dir_Nfeatures(path='../features/1_first_valid/*.gz', N=100):\n path_list = glob.glob(path)\n np.random.seed(1208)\n np.random.shuffle(path_list)\n path_list = path_list[:N]\n for path in path_list:\n try:\n shutil.move('train_'+path, win_path)\n shutil.move('test_'+path, win_path)\n except shutil.Error:\n shutil.move('train_'+path, '../features/9_delete')\n shutil.move('test_'+path, '../features/9_delete')\n\n\ndef move_to_second_valid(best_select=[], path='', rank=0, key_list=[]):\n logger = logger_func()\n if len(best_select)==0:\n try:\n if path=='':\n path = sys.argv[2]\n except IndexError:\n pass\n best_select = pd.read_csv(path)\n try:\n if rank==0:\n rank = int(sys.argv[3])\n except IndexError:\n pass\n best_feature = best_select.query(f\"rank>={rank}\")['feature'].values\n try:\n best_feature = [col for col in best_feature if col.count(sys.argv[4])]\n except IndexError:\n best_feature = [col for col in best_feature if col.count('')]\n\n if len(best_feature)==0:\n sys.exit()\n\n path_list = glob.glob('../features/4_winner/*')\n\n for feature in best_feature:\n move_path = []\n for path in path_list:\n filename = re.search(r'/([^/.]*).gz', path).group(1)\n # if path.count(feature) and feature not in ignore_list:\n # if feature==filename:\n if feature==filename.replace('stan_', ''):\n # print(f\"{filename} | {feature}\")\n move_path.append(path)\n\n for move in move_path:\n try:\n shutil.move(move, second_path)\n except FileNotFoundError:\n logger.info(f'FileNotFoundError: {feature}')\n except shutil.Error:\n logger.info(f'Shutil Error: {feature}')\n print(f'move to third_valid:{len(best_feature)}')\n\n\ndef move_to_use():\n\n try:\n path = sys.argv[2]\n except IndexError:\n path = ''\n best_select = pd.read_csv(path)\n best_feature = best_select['feature'].values\n\n win_list = glob.glob(win_path + '*')\n first_list = glob.glob('../features/1_first_valid/*')\n second_list = glob.glob('../features/2_second_valid/*')\n third_list = glob.glob('../features/3_third_valid/*')\n tmp_list = glob.glob('../features/5_tmp/*')\n path_list = third_list\n # path_list = third_list + tmp_list + win_list\n # path_list = first_list + second_list + third_list + tmp_list + win_list\n\n done_list = []\n for feature in best_feature:\n for path in path_list:\n try:\n filename = re.search(r'/([^/.]*).gz', path).group(1)\n except AttributeError:\n continue\n # if path.count(feature):\n # if filename==feature:\n if filename.replace('stan_', '')==feature:\n try:\n shutil.move(path, win_path)\n # filename = re.search(r'/([^/.]*).gz', path).group(1)\n done_list.append(filename)\n except shutil.Error:\n pass\n # shutil.move(path, gdrive_path)\n except FileNotFoundError:\n pass\n # shutil.move(path, gdrive_path)\n\n logger = logger_func()\n best_feature = [f for f in best_feature]\n\n loss_list = set(list(best_feature)) - set(done_list)\n logger.info(f\"Loss List:\")\n for loss in loss_list:\n logger.info(f\"{loss}\")\n\n\ndef move_feature(feature_name, move_path='../features/9_delete'):\n\n try:\n shutil.move(f'../features/4_winner/{feature_name}.gz', move_path)\n except FileNotFoundError:\n print(f'FileNotFound. : {feature_name}.gz')\n pass\n\n\ndef main():\n if code==0:\n move_to_second_valid()\n elif code==1:\n move_to_use()\n elif code==2:\n move_file()\n elif code==4:\n to_win_dir_Nfeatures(N=int(sys.argv[2]))\n\n\nif __name__ == '__main__':\n\n main()\n","sub_path":"py/select_feature.py","file_name":"select_feature.py","file_ext":"py","file_size_in_byte":5331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"80437305","text":"# *****************************************************************************\n# Copyright (c) 2019, Intel Corporation All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# *****************************************************************************\n\n\"\"\"\n\n| :class:`pandas.SeriesGroupBy` functions and operators implementations in SDC\n| Also, it contains Numba internal operators which are required for :class:`pandas.SeriesGroupBy` type handling\n\n\"\"\"\n\n\nimport numpy\nimport pandas\n\nfrom numba import types\nfrom numba.extending import overload_method\nfrom numba.errors import TypingError\n\nfrom sdc.datatypes.hpat_pandas_seriesgroupby_types import SeriesGroupByType\n\n\n@overload_method(SeriesGroupByType, 'count')\ndef hpat_pandas_seriesgroupby_count(self):\n \"\"\"\n Pandas Series method :meth:`pandas.core.groupby.GroupBy.count` implementation.\n\n .. only:: developer\n\n Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_groupby_count\n\n Parameters\n -----------\n self: :obj:`pandas.core.groupby.SeriesGroupBy`\n The object this method is working on\n\n Returns\n -------\n :obj:`pandas.Series`\n returns :obj:`pandas.Series` object with count of values within each group\n \"\"\"\n\n _func_name = 'Method seriesgroupby.count().'\n\n if not isinstance(self, SeriesGroupByType):\n raise TypingError('{} The object must be a pandas.seriesgroupby. Given: {}'.format(_func_name, self))\n\n def hpat_pandas_seriesgroupby_count_impl(self):\n \"\"\"\n Pandas algorithm:\n https://github.com/pandas-dev/pandas/blob/b1049540fe207f8d8071ebfbd44e8f5224c98bad/pandas/core/groupby/generic.py#L1339\n \"\"\"\n\n # is not implemented yet.\n # return self._data.value_counts()\n #\n # workaround\n freq = {}\n for x in self._data:\n if x not in freq:\n freq[x] = 1\n else:\n freq[x] += 1\n\n # Numba requires to translate dict() into list()\n keys = []\n values = []\n for key, value in freq.items():\n keys.append(key)\n values.append(value)\n\n return pandas.Series(values, keys)\n\n return hpat_pandas_seriesgroupby_count_impl\n","sub_path":"sdc/datatypes/hpat_pandas_seriesgroupby_functions.py","file_name":"hpat_pandas_seriesgroupby_functions.py","file_ext":"py","file_size_in_byte":3501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"99055669","text":"import ROOT\n\n\ndef prtStable(pid):\n return abs(pid) in (211, 321, 11, 13, 2212)\n\n\ndef heavyFlavor(pid):\n return abs(pid) in (411, 421, 431, 4122, 511, 521, 531, 5122)\n\n\n# Writer class.\nclass Writer:\n def __init__(self):\n from collections import OrderedDict\n\n self.vars = OrderedDict()\n self.null = ROOT.vector(\"double\")(1, 0)\n\n def init(self, tree):\n for key, val in self.vars.iteritems():\n tree.Branch(key, val)\n\n def add(self, var):\n self.vars[var] = ROOT.vector(\"double\")()\n\n def var(self, var, val=None, idx=-2):\n if not var in self.vars:\n return self.null.back()\n var = self.vars[var]\n if idx < -1:\n var.push_back(0 if val == None else val)\n if idx < 0:\n idx = var.size() - 1\n elif idx >= var.size():\n idx = -1\n if idx < 0:\n return self.null[0]\n if val != None:\n var[idx] = val\n return var[idx]\n\n def size(self, var):\n return self.vars[var].size()\n\n def clear(self):\n for key, val in self.vars.iteritems():\n val.clear()\n\n\ndef hitSel(mhit, fhit, pz):\n hit = mhit\n hit_type = -1\n if mhit.T() != 0:\n hit_type = 0\n if mhit.T() == 0 and fhit.T() != 0:\n hit = fhit\n hit_type = 1\n elif fhit.T() != 0 and (pz / abs(pz)) * fhit.Z() < (pz / abs(pz)) * mhit.Z():\n hit = fhit\n hit_type = 1\n return [hit.X(), hit.Y(), hit.Z(), hit_type]\n\n\ndef Hits(module, rffoil, scatter, prt):\n hits = []\n p = prt.pAbs()\n if p == 0:\n return hits\n vx, vy, vz = prt.xProd(), prt.yProd(), prt.zProd()\n px, py, pz = prt.px() / p, prt.py() / p, prt.pz() / p\n p3 = ROOT.TVector3(prt.px(), prt.py(), prt.pz())\n nrf = 0\n mhit = module.intersect(vx, vy, vz, px, py, pz)\n fhit = rffoil.intersect(vx, vy, vz, px, py, pz)\n hit = hitSel(mhit, fhit, pz)\n while hit[3] >= 0:\n vx, vy, vz = [hit[0], hit[1], hit[2]]\n if hit[3] == 0:\n hits += [[vx, vy, vz]]\n fx0 = 0.01\n if hit[3] > 0:\n nrf += 1\n fx0 = 0.005\n p3 = scatter.smear(p3, fx0)\n px, py, pz = p3.X() / p3.Mag(), p3.Y() / p3.Mag(), p3.Z() / p3.Mag()\n vx, vy, vz = vx + px * 0.1, vy + py * 0.1, vz + pz * 0.1\n mhit = module.intersect(vx, vy, vz, px, py, pz)\n fhit = rffoil.intersect(vx, vy, vz, px, py, pz)\n hit = hitSel(mhit, fhit, pz)\n return hits\n","sub_path":"gen/writer.py","file_name":"writer.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"307937575","text":"#!/apollo/sbin/envroot \"$ENVROOT/python3.6/bin/python3.6\"\n\nimport re\nimport sys\nimport os\nimport argparse\nimport git\nimport logging\nfrom dxd_tools_dev.modules import mcm\nfrom dxd_tools_dev.modules import mcm_variables\n\nlogging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S', level=logging.INFO)\n\ndef parse_args(fake_args=None):\n main_parser = argparse.ArgumentParser(prog='acl_manage_mcm.py')\n main_parser.add_argument(\"-pl\", \"--prefix_list_devices\", type=str, dest=\"prefix_list_devices\", required=True, help=\"Comma separated vc-car/vc-dar device names\")\n main_parser.add_argument(\"-no_plu\", \"--no_prefix_list_update\", default=True, action='store_false', dest=\"no_prefix_list_update\", help=\"Specify -no_plu flag to not update prefix lists. Prefix list will be updated by default (if -no_plu is not specified)\")\n main_parser.add_argument(\"-vccor\", \"--vc_cor_brick\", type=str, dest=\"vc_cor_brick\", help=\"VC_COR brick on which ACLs need to be deployed\")\n main_parser.add_argument(\"-brtra\", \"--br_tra_devices\", type=str, dest=\"br_tra_devices\", help=\"Comma separated br-tra devices on which ACLs need to be deployed\")\n main_parser.add_argument(\"-ae\", \"--ae_interfaces\", type=str, dest=\"ae_interfaces\", help=\"Comma separated ae interfaces - e.g ae30,ae31\")\n main_parser.add_argument(\"-dae\", \"--dx2_ae_interfaces\", type=str, dest=\"dx2_ae_interfaces\", help=\"Comma separated ae interfaces toward vc-bdr - e.g ae30,ae31\")\n main_parser.add_argument(\"-bp\", \"--bgp_prestaged\", type=str, default=\"True\", help=\"set to True if BGP is prestaged, else False. Default is True\")\n main_parser.add_argument(\"-sb\", \"--ssh_bastion\", type=str, dest=\"ssh_bastion\", required=True, help=\"SSH BASTION e.g neteng-bastion-iad-6004.iad6.amazon.com\")\n main_parser.add_argument(\"-adv\", \"--add_devices\", default=False, action='store_true', dest=\"add_devices\", help=\"Optional - use this flag for adding devices to ACL Manage\")\n return main_parser.parse_args()\n\ndef git_clone(package, path= \"/home/\" + os.getlogin() + \"/\"):\n full_path = path + package\n full_url = 'ssh://git.amazon.com/pkg/' + package\n try:\n repo = git.Repo.clone_from(full_url ,to_path = f'{full_path}')\n return repo\n except:\n logging.error('Could not clone {}. Exception {}'.format(package, sys.exc_info()))\n return None\n\ndef main():\n cli_arguments = parse_args()\n\n logging.info('Checking User provided input devices')\n\n if not cli_arguments.ae_interfaces and not cli_arguments.dx2_ae_interfaces:\n print( '-ae/--ae_interfaces and -dae/--dx2_ae_interfaces are missing. you need to specify atleast one')\n raise\n\n if cli_arguments.vc_cor_brick and cli_arguments.br_tra_devices:\n logging.error('vc-cor and br-tra both passed as arguments. Please specify either vc-cor or br-tra and re-run the script. Existing')\n sys.exit()\n\n for device in cli_arguments.prefix_list_devices.split(','):\n if not re.match('.*-vc-(car|dar|bdr)-.*', device):\n raise ValueError('Device not supported. {} must be VC-CAR or VC-DAR or VC-BDR'.format(cli_arguments.vc_car_dar_devices))\n\n if cli_arguments.vc_cor_brick:\n if not re.search('^[a-z][a-z][a-z][0-9][0-9]?-vc-cor-b[0-9]$',cli_arguments.vc_cor_brick):\n raise ValueError('Specify vc-cor brick. It should follow {site}-vc-cor-b{brick_number} standard')\n deploy_acl_devices = cli_arguments.vc_cor_brick + '-r1,' + cli_arguments.vc_cor_brick + '-r2,' + cli_arguments.vc_cor_brick + '-r3,' + cli_arguments.vc_cor_brick + '-r4'\n\n if cli_arguments.br_tra_devices:\n for device in cli_arguments.br_tra_devices.split(','):\n if not re.match('.*-br-tra-.*', device):\n raise ValueError('Device not supported. {} must be comma separated br-tra'.format(cli_arguments.br_tra_devices))\n deploy_acl_devices = cli_arguments.br_tra_devices\n\n\n\n\n logging.info('Creating variable file')\n ae_interfaces = ''\n dx2_ae_interfaces = ''\n \n devices_vc_car_dar_bdr = cli_arguments.prefix_list_devices\n ssh_bastion = cli_arguments.ssh_bastion\n add_devices = cli_arguments.add_devices\n if cli_arguments.ae_interfaces:\n ae_interfaces = cli_arguments.ae_interfaces\n if cli_arguments.dx2_ae_interfaces:\n dx2_ae_interfaces = cli_arguments.dx2_ae_interfaces\n bgp_prestaged = cli_arguments.bgp_prestaged\n\n car_dar_counter = 0\n bdr_counter = 0\n\n for device in devices_vc_car_dar_bdr.split(','):\n if 'car' in device or 'dar' in device:\n car_dar_counter += 1\n elif 'bdr' in device:\n bdr_counter += 1\n\n if car_dar_counter != 0:\n no_prefix_list_update = cli_arguments.no_prefix_list_update\n else:\n no_prefix_list_update = False\n\n variable_file = mcm_variables.create_variables_vc_cor_acl_manage(no_prefix_list_update,devices_vc_car_dar_bdr,deploy_acl_devices,ae_interfaces,dx2_ae_interfaces,ssh_bastion,add_devices,bgp_prestaged)\n logging.info('variable file created')\n\n logging.info('Creating MCM')\n\n if bdr_counter == 0:\n mcm_info = mcm.mcm_creation(\"acl_manage\",no_prefix_list_update,devices_vc_car_dar_bdr,deploy_acl_devices)\n else: \n mcm_info = mcm.mcm_creation(\"dx2_acl_manage\",no_prefix_list_update,devices_vc_car_dar_bdr,deploy_acl_devices)\n mcm_id = mcm_info[0]\n mcm_uid = mcm_info[1]\n mcm_overview = mcm_info[2]\n logging.info('https://mcm.amazon.com/cms/{} created'.format(mcm_id))\n\n logging.info('Updating variable file with MCM number')\n variable_file_updated = variable_file.replace('MCM_NUMBER',mcm_id)\n logging.info('variable file updated')\n\n # git operations\n logging.info('Performing git operations')\n username = os.getlogin()\n if os.path.exists(f'/home/{username}/DxVpnCM2014/') == True:\n repo = git.Repo(f'/home/{username}/DxVpnCM2014')\n origin = repo.remote('origin')\n logging.info('DxVpnCM2014 repo exists')\n if os.path.exists(f'/home/{username}/DxVpnCM2014/cm/{username}') == True:\n logging.info('{} exists under DxVpnCM2014/cm directory'.format(username))\n logging.info('Performing git pull')\n origin.pull()\n else:\n logging.info('{} does not exists under DxVpnCM2014/cm directory'.format(username))\n os.mkdir(f'/home/{username}/DxVpnCM2014/cm/{username}')\n logging.info('User {} successfully created user directory under DxVpnCM2014/cm'.format(username))\n logging.info('Performing git pull')\n origin.pull()\n else:\n logging.info('DxVpnCM2014 repo does not exist')\n logging.info('Performing git clone on DxVpnCM2014')\n cloned = git_clone('DxVpnCM2014')\n\n if cloned:\n logging.info('git clone successful for DxVpnCM2014')\n repo = git.Repo(f'/home/{username}/DxVpnCM2014')\n origin = repo.remote('origin')\n if os.path.exists(f'/home/{username}/DxVpnCM2014/cm/{username}') == True:\n logging.info('{} exists under DxVpnCM2014/cm directory'.format(username))\n logging.info('Performing git pull')\n origin.pull()\n else:\n logging.info('{} does not exists under DxVpnCM2014/cm directory'.format(username))\n os.mkdir(f'/home/{username}/DxVpnCM2014/cm/{username}')\n logging.info('User {} successfully created user directory under DxVpnCM2014/cm'.format(username))\n logging.info('Performing git pull')\n origin.pull()\n else:\n logging.error('git clone failed for DxVpnCM2014. Clone DxVpnCM2014 manually and re-run the script')\n sys.exit()\n\n os.mkdir(f'/home/{username}/DxVpnCM2014/cm/{username}/{mcm_id}')\n with open(f'/home/{username}/DxVpnCM2014/cm/{username}/{mcm_id}/{mcm_id}.var','w') as var_file:\n var_file.write(variable_file_updated)\n var_file.close()\n\n logging.info(f'Created variable file for Daryl /home/{username}/DxVpnCM2014/cm/{username}/{mcm_id}/{mcm_id}.var')\n logging.info('Prepping for variable file to be pushed to DxVpnCM2014 repo')\n repo = git.Repo(f'/home/{username}/DxVpnCM2014')\n logging.info('git add')\n repo.index.add([f'/home/{username}/DxVpnCM2014/cm/{username}/{mcm_id}/{mcm_id}.var'])\n logging.info('git status\\n{}\\n'.format(repo.git.status()))\n logging.info('git commit')\n repo.index.commit(f'variable file for {mcm_id}')\n origin = repo.remote('origin')\n logging.info('git push')\n origin.push()\n logging.info('variable file /home/{}/DxVpnCM2014/cm/{}/{}/{}.var successfully pushed to DxVpnCM2014 repo'.format(username,username,mcm_id,mcm_id))\n\n mcm_overview_append = f\"\"\"\n###Lock MCM\n```\n/apollo/env/Daryl/bin/darylscriptc --lock --cm {mcm_id}\n ```\n\n###Dry-run\n```\n/apollo/env/Daryl/bin/daryl.pl --cm {mcm_id} --mode dryrun --no-auto-dashboard --no-hds\n ```\n\n###Execute MCM\n```\n/apollo/env/Daryl/bin/daryl.pl --cm {mcm_id} --mode execute\n```\n\n###Variable File\n\nhttps://code.amazon.com/packages/DxVpnCM2014/blobs/mainline/--/cm/{username}/{mcm_id}/{mcm_id}.var\n\n \"\"\"\n # update MCM overview and steps\n mcm_overview_final = mcm_overview + mcm_overview_append\n mcm_steps = [{'title':'Daryl Info','time':300,'description':f'Daryl URL: brazil://DxVpnCM2014/cm/{username}/{mcm_id}/{mcm_id}.var'}]\n mcm.mcm_update(mcm_id,mcm_uid,mcm_overview_final,mcm_steps)\n logging.info('{} successfully updated, please lock the MCM through Daryl and submit for approvals\\n'.format(mcm_id))\n\nif __name__ == '__main__':\n main()\n","sub_path":"aws/acl_manage_mcm.py","file_name":"acl_manage_mcm.py","file_ext":"py","file_size_in_byte":9606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"573185452","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('ja', '0003_auto_20160125_1353'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='articles',\n options={'verbose_name': 'Artykuł', 'verbose_name_plural': 'Artykuły', 'ordering': ['date']},\n ),\n ]\n","sub_path":"ja/migrations/0004_auto_20160125_1354.py","file_name":"0004_auto_20160125_1354.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"319029473","text":"#%% Change working directory from the workspace root to the ipynb file location. Turn this addition off with the DataScience.changeDirOnImportExport setting\nimport os\ntry:\n\tos.chdir(os.path.join(os.getcwd(), 'code'))\n\tprint(os.getcwd())\nexcept:\n\tpass\n\n#%%\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport pytesseract\nimport cv2\nimport os\nimport glob\nimport matplotlib.pyplot as plt\n\nos.chdir('..')\nos.chdir('Images')\nf = open('./list.txt', 'r')\ncategories = f.read().split('\\n')[:-1]\ncategories.sort()\n\n\n#%%\nfor i, category in enumerate(categories):\n print(category)\n for name in glob.glob('./' + category + '/*'):\n img = cv2.imread(name)\n \n b, g, r = cv2.split(img)\n img = cv2.merge([r, g, b]) \n \n plt.imshow(img)\n plt.show()\n print(pytesseract.image_to_string(img, lang='Hangul'))\n\n\n#%%\n\n\n\n\n","sub_path":"code/.ipynb_checkpoints/ocr_notebook-checkpoint.py","file_name":"ocr_notebook-checkpoint.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"168800597","text":"import hashlib\nimport os\n\ndef md5(fname):\n \"\"\"\n Creates a md5sum hash from filepath to be used for checksum\n \"\"\"\n hashmd5 = hashlib.md5()\n with open(fname, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hashmd5.update(chunk)\n return hashmd5.hexdigest()\n\ndef compareMD5(file1, file2):\n \"\"\"\n Compares between 2 MD5SUM hash to compare between file\n \"\"\"\n if file1 == file2:\n return True\n else:\n return False\n\ndef grab_files(directory):\n \"\"\"\n Traverses from the top directory and returns a list containing the file\n name and file path\n \"\"\"\n arrPath = []\n for root, dirs, files in os.walk(directory):\n for name in files:\n filedir = os.path.join(root,name) \n arrPath.append((filedir))\n return arrPath\n\ndef createFile(arrPath,name='MD5SUM.txt'):\n \"\"\"\n Create file containing name and md5sum value\n \"\"\"\n filetext = open(name, mode='w+')\n for name, filedir in arrPath:\n print(name, filedir, file=filetext)\n\ndef compareLocalMD5(path = None, checksum1 = 'MD5SUM.txt'):\n \"\"\"\n Compares local file with latest MD5SUM. Input must be in the form of string\n ex: .\\thisFolder\n \"\"\"\n # Creates the md5 of local content\n files_in_dir = grab_files(path)\n local_list = [] # List of filenames and its md5 value\n for i in files_in_dir:\n y = md5(i)\n local_list.append((i,y))\n local_dict = dict(local_list) # Change it to a dictionary\n\n # Open the previously created MD5SUM\n fileMD = open(checksum1, \"r\").read()\n listMD = fileMD.split(\"\\n\")\n temp =[]\n for i in listMD:\n temp.append(i[:-33])\n temp.append(i[-32:])\n listMD = temp\n dictMD = dict(zip(*[iter(listMD)]*2))\n try:\n dictMD.pop(\"\",None)\n except:\n pass\n \n \n # Determine which files are missing/created anew\n result = []\n for key in dictMD:\n if key in local_dict:\n if dictMD[key] == local_dict[key]: \n result.append((key, \"MATCH\"))\n elif dictMD[key] != local_dict[key]:\n result.append((key, \"UPDATE\"))\n if key not in local_dict:\n result.append((key, \"DELETE\"))\n for key in local_dict:\n if key not in dictMD:\n result.append((key, \"UPLOAD\"))\n return result\n \n \ndef compareFileDifference(checksum1 = None, checksum2 = None):\n \"\"\"\n Compare actual file difference and\n return a list with status of file\n \"\"\"\n # Open Files\n fileMD = open(checksum1, \"r\").read()\n fileMDOutput = open(checksum2, \"r\").read()\n # Split 'em and put 'em as dicks\n listMD = fileMD.split(\"\\n\")\n listMDOutput = fileMDOutput.split(\"\\n\")\n\n temp =[]\n for i in listMD:\n temp.append(i[:-33])\n temp.append(i[-32:])\n listMD = temp\n temp =[]\n for i in listMDOutput:\n temp.append(i[:-33])\n temp.append(i[-32:])\n listMDOutput = temp\n\n dictMD = dict(zip(*[iter(listMD)]*2))\n dictMDOutput = dict(zip(*[iter(listMDOutput)]*2))\n \n \n # Determine which have the same key and same md5 value\n result = []\n for key in dictMD:\n if key in dictMDOutput:\n if dictMD[key] == dictMDOutput[key]: \n result.append((key, \"MATCH\"))\n elif dictMD[key] != dictMDOutput[key]:\n result.append((key, \"MISMATCH\"))\n if key not in dictMDOutput:\n result.append((key, \"MISSINGCLIENT\"))\n for key in dictMDOutput:\n if key not in dictMD:\n result.append((key, \"MISSINGSERVER\"))\n return result\n\n\n\n## x = md5sum.grab_files(FILE_PATH)\n## lista = []\n## for i in x:\n## y = md5sum.md5(i)\n## lista.append((i,y))\n## md5sum.createFile(lista)\n\n\n\n \n","sub_path":"Desktop/Server/md5sum.py","file_name":"md5sum.py","file_ext":"py","file_size_in_byte":3790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"576291581","text":"def main():\r\n fruits = ['banana','apple','pear','waxberry']\r\n fruits += ['pineapple','greap','mango']\r\n\r\n for fruit in fruits:\r\n print(fruit.title(), end = ' ')\r\n print()\r\n\r\n #sort函数不会修改传入列表\r\n fruits_sort1 = sorted(fruits)\r\n fruits_sort2 = sorted(fruits, reverse = True)\r\n fruits_sort3 = sorted(fruits, key = len)\r\n\r\n print(fruits_sort1)\r\n print(fruits_sort2)\r\n print(fruits_sort3)\r\n\r\n #在原列表上进行排序\r\n fruits.sort(reverse = True)\r\n print(fruits)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n","sub_path":"python_study/基础结构与模块/列表排序.py","file_name":"列表排序.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"605565135","text":"from django.shortcuts import render, get_object_or_404,redirect\nfrom orders.models import OrderItem\nfrom orders.forms import OrderCreateForm\nfrom cart.cart import Cart\n\nfrom pages.models import Products,Sku\n\nfrom django.http import HttpResponse\nfrom django.core.mail import EmailMessage\nfrom django.core.mail import send_mail\n\n\n\n\nimport stripe # new\n\nfrom django.conf import settings\nfrom django.views.generic.base import TemplateView\nfrom django.shortcuts import render # new\n\nstripe.api_key = settings.STRIPE_SECRET_KEY # new\n\n\n\nclass HomePageView(TemplateView):\n template_name = 'homep.html'\n \n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['key'] = settings.STRIPE_PUBLISHABLE_KEY\n return context\n\n\ndef charge(request): # new\n cart = Cart(request)\n amt=cart.get_total_price2()\n print(amt)\n if request.method == 'POST':\n\n charge = stripe.Charge.create(\n amount=int(amt),\n currency='usd',\n description='A Django charge',\n source=request.POST['stripeToken']\n \n )\n\n\n\n \n\n\n\n\n\n\n status='success'\n context={'status':status,}\n \n #return render(request, 'charge.html',context)\n return redirect('payments:order_create')\n\ndef callback(request):\n \n #profile = UserProfile.objects.get(user=request.user)\n\n r = request.POST('https://connect.stripe.com/oauth/token', params={\n 'client_secret': settings.STRIPE_SECRET_KEY\n \n }).json()\n\n try:\n access_token = r['access_token']\n refresh_token = r['refresh_token']\n publishable_key = r['stripe_publishable_key']\n #profile.save()\n\n messages.success(request, \"Your account was successfully connected to Stripe.\")\n except KeyError:\n messages.error(request, \"Unable to connect your account to Stripe.\")\n\n return redirect('homep')\n\ndef order_create(request):\n #cart=scart\n \n cart = Cart(request)\n\n \n print(cart.cart)\n #request.session['cart']=cart\n if request.method == 'POST':\n print('start to work on db')\n form = OrderCreateForm(request.POST)\n if form.is_valid():\n order = form.save(False)\n for item in cart:\n print(item)\n product_id=item['product_id']\n quantity=item['quantity'] \n product = get_object_or_404(Products, id=product_id)\n sproduct = Sku.objects.filter(product= product,colour=item['color'],size=item['size'])\n if sproduct.count() == 0:\n stock=0\n \n else:\n \n order = form.save()\n stock=sproduct[0].stock\n OrderItem.objects.create(\n order=order,\n product=product,\n price=item['price'],\n size=item['size'],\n colour=item['color'],\n quantity=item['quantity'],\n )\n print(sproduct[0].stock)\n id=sproduct[0].id\n print(id)\n newq=Sku.objects.get(id=id)\n newstock=int(sproduct[0].stock)-quantity\n newq.stock=newstock\n newq.save()\n print('after:'+str(newstock))\n \n \n \n cart.clear()\n #return redirect('payments:homep')\n return render(request, 'orders/order/created.html', {'order': order, 'cart':cart})\n else:\n form = OrderCreateForm()\n print('blank')\n return render(request, 'homep.html', {'form': form, 'cart':cart,})","sub_path":"payments/viewsb4updatetran.py","file_name":"viewsb4updatetran.py","file_ext":"py","file_size_in_byte":3745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"464128851","text":"import time\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\n\nimport utils\n\n# logging setup\nimport logging\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nformatter = logging.Formatter('%(levelname)s - %(message)s')\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\n\nclass Trainer(object):\n\n def __init__(self, model, eta, mom, no_loss_reg, vec_dim, cuda=False):\n # set the random seeds for every instance of trainer.\n # needed to ensure reproduction of random word vectors for out of vocab terms\n torch.manual_seed(1234)\n np.random.seed(1234)\n self.cuda = cuda\n self.unk_term = np.random.uniform(-0.25, 0.25, vec_dim)\n\n self.reg = 1e-5\n self.no_loss_reg = no_loss_reg\n self.model = model\n self.criterion = nn.CrossEntropyLoss()\n #self.criterion = nn.NLLLoss()\n self.optimizer = optim.SGD(self.model.parameters(), lr=eta, momentum=mom, \\\n weight_decay=(0 if no_loss_reg else self.reg))\n\n self.data_splits = {}\n self.embeddings = {}\n self.vec_dim = vec_dim\n\n\n def load_input_data(self, dataset_root_folder, word_vectors_cache_file, \\\n train_set_folder, dev_set_folder, test_set_folder, load_ext_feats=True):\n for set_folder in [test_set_folder, dev_set_folder, train_set_folder]:\n if set_folder:\n questions, sentences, labels, maxlen_q, maxlen_s, vocab = \\\n utils.read_in_dataset(dataset_root_folder, set_folder)\n\n self.data_splits[set_folder] = [questions, sentences, labels, maxlen_q, maxlen_s]\n\n default_ext_feats = [np.zeros(4)] * len(self.data_splits[set_folder][0])\n self.data_splits[set_folder].append(default_ext_feats)\n\n utils.load_cached_embeddings(word_vectors_cache_file, vocab, self.embeddings,\n [] if \"train\" in set_folder else self.unk_term)\n\n\n def regularize_loss(self, loss):\n\n flattened_params = []\n\n for p in self.model.parameters():\n f = p.data.clone()\n flattened_params.append(f.view(-1))\n\n fp = torch.cat(flattened_params)\n\n loss = loss + 0.5 * self.reg * fp.norm() * fp.norm()\n\n # for p in self.model.parameters():\n # loss = loss + 0.5 * self.reg * p.norm() * p.norm()\n\n return loss\n\n\n def _train(self, xq, xa, ext_feats, ys):\n\n self.optimizer.zero_grad()\n output = self.model(xq, xa, ext_feats)\n loss = self.criterion(output, ys)\n # logger.debug('loss after criterion {}'.format(loss))\n\n # NOTE: regularizing location 1\n if not self.no_loss_reg:\n loss = self.regularize_loss(loss)\n # logger.debug('loss after regularizing {}'.format(loss))\n\n loss.backward()\n\n # logger.debug('AFTER backward')\n #logger.debug('params {}'.format([p for p in self.model.parameters()]))\n # logger.debug('params grads {}'.format([p.grad for p in self.model.parameters()]))\n\n # NOTE: regularizing location 2. It would seem that location 1 is correct?\n #if not self.no_loss_reg:\n # loss = self.regularize_loss(loss)\n # logger.debug('loss after regularizing {}'.format(loss))\n\n self.optimizer.step()\n\n # logger.debug('AFTER step')\n #logger.debug('params {}'.format([p for p in self.model.parameters()]))\n # logger.debug('params grads {}'.format([p.grad for p in self.model.parameters()]))\n\n return loss.data[0], self.pred_equals_y(output, ys)\n\n\n def pred_equals_y(self, pred, y):\n _, best = pred.max(1)\n best = best.data.long().squeeze()\n return torch.sum(y.data.long() == best)\n\n\n def test(self, set_folder, batch_size):\n logger.info('----- Predictions on {} '.format(set_folder))\n\n questions, sentences, labels, maxlen_q, maxlen_s, ext_feats = \\\n self.data_splits[set_folder]\n word_vectors, vec_dim = self.embeddings, self.vec_dim\n\n self.model.eval()\n\n batch_size = 1\n\n total_loss = 0.0\n total_correct = 0.0\n num_batches = np.ceil(len(questions)/batch_size)\n y_pred = np.zeros(len(questions))\n ypc = 0\n\n for k in range(int(num_batches)):\n batch_start = k * batch_size\n batch_end = (k+1) * batch_size\n # convert raw questions and sentences to tensors\n batch_inputs, batch_labels = self.get_tensorized_inputs(\n questions[batch_start:batch_end],\n sentences[batch_start:batch_end],\n labels[batch_start:batch_end],\n ext_feats[batch_start:batch_end],\n word_vectors, vec_dim\n )\n\n xq, xa, x_ext_feats = batch_inputs[0]\n y = batch_labels[0]\n\n pred = self.model(xq, xa, x_ext_feats)\n loss = self.criterion(pred, y)\n pred = torch.exp(pred)\n total_loss += loss\n # total_correct += self.pred_equals_y(pred, y)\n\n y_pred[ypc] = pred.data.squeeze()[1]\n # ^ we want to score for relevance, NOT the predicted class\n ypc += 1\n\n # logger.info('{}_correct {}'.format(set_folder, total_correct))\n # logger.info('{}_loss {}'.format(set_folder, total_loss.data[0]))\n logger.info('{} total {}'.format(set_folder, len(labels)))\n # logger.info('{}_loss = {:.4f}, acc = {:.4f}'.format(set_folder, total_loss.data[0]/len(labels), float(total_correct)/len(labels))\n #logger.info('{}_loss = {:.4f}'.format(set_folder, total_loss.data[0]/len(labels)))\n\n return y_pred\n\n\n def train(self, set_folder, batch_size, debug_single_batch):\n train_start_time = time.time()\n\n questions, sentences, labels, maxlen_q, maxlen_s, ext_feats = \\\n self.data_splits[set_folder]\n word_vectors, vec_dim = self.embeddings, self.vec_dim\n\n # set model for training modep\n self.model.train()\n\n train_loss, train_correct = 0., 0.\n num_batches = np.ceil(len(questions)/float(batch_size))\n\n for k in range(int(num_batches)):\n batch_start = k * batch_size\n batch_end = (k+1) * batch_size\n\n # convert raw questions and sentences to tensors\n batch_inputs, batch_labels = self.get_tensorized_inputs(\n questions[batch_start:batch_end],\n sentences[batch_start:batch_end],\n labels[batch_start:batch_end],\n ext_feats[batch_start:batch_end],\n word_vectors, vec_dim\n )\n\n xq, xa, x_ext_feats = batch_inputs[0]\n\n ys = batch_labels[0]\n\n batch_loss, batch_correct = self._train(xq, xa, x_ext_feats, ys)\n\n # logger.debug('batch_loss {}, batch_correct {}'.format(batch_loss, batch_correct))\n train_loss += batch_loss\n # train_correct += batch_correct\n if debug_single_batch:\n break\n\n # logger.info('train_correct {}'.format(train_correct))\n logger.info('train_loss {}'.format(train_loss))\n logger.info('total training batches = {}'.format(num_batches))\n logger.info('train_loss = {:.4f}'.format(\n train_loss/num_batches\n ))\n logger.info('training time = {:.3f} seconds'.format(time.time() - train_start_time))\n return train_correct/num_batches\n\n\n def make_input_matrix(self, sentence, word_vectors, vec_dim):\n terms = sentence.strip().split()[:60]\n # NOTE: we are truncating the inputs to 60 words.\n\n word_embeddings = torch.zeros(len(terms), vec_dim).type(torch.DoubleTensor)\n for i in range(len(terms)):\n word = terms[i]\n emb = torch.from_numpy(word_vectors[word])\n word_embeddings[i] = emb\n\n input_tensor = torch.zeros(1, vec_dim, len(terms))\n input_tensor[0] = torch.transpose(word_embeddings, 0, 1)\n if self.cuda and torch.cuda.is_available():\n input_tensor = input_tensor.cuda()\n return input_tensor\n\n\n def get_tensorized_inputs(self, batch_ques, batch_sents, batch_labels, batch_ext_feats, \\\n word_vectors, vec_dim):\n batch_size = len(batch_ques)\n # NOTE: ideal batch size is one, because sentences are all of different length.\n # In other words, we have no option but to feed in sentences one by one into the model\n # and compute loss at the end.\n\n # TODO: what if the sentences in a batch are all of different lengths?\n # - should be have the longest sentence as 2nd dim?\n # - would zero endings work for other smaller sentences?\n\n y = torch.LongTensor(batch_size).type(torch.LongTensor)\n if self.cuda and torch.cuda.is_available():\n y = y.cuda()\n\n tensorized_inputs = []\n for i in range(len(batch_ques)):\n xq = Variable(self.make_input_matrix(batch_ques[i], word_vectors, vec_dim))\n xs = Variable(self.make_input_matrix(batch_sents[i], word_vectors, vec_dim))\n ext_feats = torch.FloatTensor(batch_ext_feats[i])\n if self.cuda and torch.cuda.is_available():\n ext_feats = ext_feats.cuda()\n ext_feats = Variable(ext_feats)\n ext_feats = torch.unsqueeze(ext_feats, 0)\n y[i] = batch_labels[i]\n tensorized_inputs.append((xq, xs, ext_feats))\n\n return tensorized_inputs, Variable(y)\n","sub_path":"sm_cnn/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"338220733","text":"class AbstractGroup(object):\n \"\"\"base class for containers of sprites\n\n AbstractGroup does everything needed to behave as a normal group. You can\n easily subclass a new group class from this or the other groups below if\n you want to add more features.\n\n Any AbstractGroup-derived sprite groups act like sequences and support\n iteration, len, and so on.\n\n \"\"\"\n\n # dummy val to identify sprite groups, and avoid infinite recursion\n _spritegroup = True\n\n def __init__(self):\n self.spritedict = {}\n self.lostsprites = []\n\n def sprites(self):\n \"\"\"get a list of sprites in the group\n\n Group.sprite(): return list\n\n Returns an object that can be looped over with a 'for' loop. (For now,\n it is always a list, but this could change in a future version of\n pygame.) Alternatively, you can get the same information by iterating\n directly over the sprite group, e.g. 'for sprite in group'.\n\n \"\"\"\n return list(self.spritedict)\n\n def add_internal(self, sprite):\n self.spritedict[sprite] = 0\n\n def remove_internal(self, sprite):\n r = self.spritedict[sprite]\n if r:\n self.lostsprites.append(r)\n del self.spritedict[sprite]\n\n def has_internal(self, sprite):\n return sprite in self.spritedict\n\n def copy(self):\n \"\"\"copy a group with all the same sprites\n\n Group.copy(): return Group\n\n Returns a copy of the group that is an instance of the same class\n and has the same sprites in it.\n\n \"\"\"\n return self.__class__(self.sprites())\n\n def __iter__(self):\n return iter(self.sprites())\n\n def __contains__(self, sprite):\n return self.has(sprite)\n\n def add(self, *sprites):\n \"\"\"add sprite(s) to group\n\n Group.add(sprite, list, group, ...): return None\n\n Adds a sprite or sequence of sprites to a group.\n\n \"\"\"\n for sprite in sprites:\n # It's possible that some sprite is also an iterator.\n # If this is the case, we should add the sprite itself,\n # and not the iterator object.\n if isinstance(sprite, Sprite):\n if not self.has_internal(sprite):\n self.add_internal(sprite)\n sprite.add_internal(self)\n else:\n try:\n # See if sprite is an iterator, like a list or sprite\n # group.\n self.add(*sprite)\n except (TypeError, AttributeError):\n # Not iterable. This is probably a sprite that is not an\n # instance of the Sprite class or is not an instance of a\n # subclass of the Sprite class. Alternately, it could be an\n # old-style sprite group.\n if hasattr(sprite, '_spritegroup'):\n for spr in sprite.sprites():\n if not self.has_internal(spr):\n self.add_internal(spr)\n spr.add_internal(self)\n elif not self.has_internal(sprite):\n self.add_internal(sprite)\n sprite.add_internal(self)\n\n def remove(self, *sprites):\n \"\"\"remove sprite(s) from group\n\n Group.remove(sprite, list, or group, ...): return None\n\n Removes a sprite or sequence of sprites from a group.\n\n \"\"\"\n # This function behaves essentially the same as Group.add. It first\n # tries to handle each argument as an instance of the Sprite class. If\n # that failes, then it tries to handle the argument as an iterable\n # object. If that failes, then it tries to handle the argument as an\n # old-style sprite group. Lastly, if that fails, it assumes that the\n # normal Sprite methods should be used.\n for sprite in sprites:\n if isinstance(sprite, Sprite):\n if self.has_internal(sprite):\n self.remove_internal(sprite)\n sprite.remove_internal(self)\n else:\n try:\n self.remove(*sprite)\n except (TypeError, AttributeError):\n if hasattr(sprite, '_spritegroup'):\n for spr in sprite.sprites():\n if self.has_internal(spr):\n self.remove_internal(spr)\n spr.remove_internal(self)\n elif self.has_internal(sprite):\n self.remove_internal(sprite)\n sprite.remove_internal(self)\n\n def has(self, *sprites):\n \"\"\"ask if group has a sprite or sprites\n\n Group.has(sprite or group, ...): return bool\n\n Returns True if the given sprite or sprites are contained in the\n group. Alternatively, you can get the same information using the\n 'in' operator, e.g. 'sprite in group', 'subgroup in group'.\n\n \"\"\"\n return_value = False\n\n for sprite in sprites:\n if isinstance(sprite, Sprite):\n # Check for Sprite instance's membership in this group\n if self.has_internal(sprite):\n return_value = True\n else:\n return False\n else:\n try:\n if self.has(*sprite):\n return_value = True\n else:\n return False\n except (TypeError, AttributeError):\n if hasattr(sprite, '_spritegroup'):\n for spr in sprite.sprites():\n if self.has_internal(spr):\n return_value = True\n else:\n return False\n else:\n if self.has_internal(sprite):\n return_value = True\n else:\n return False\n\n return return_value\n\n def update(self, *args):\n \"\"\"call the update method of every member sprite\n\n Group.update(*args): return None\n\n Calls the update method of every member sprite. All arguments that\n were passed to this method are passed to the Sprite update function.\n\n \"\"\"\n for s in self.sprites():\n s.update(*args)\n\n def draw(self, surface):\n \"\"\"draw all sprites onto the surface\n\n Group.draw(surface): return None\n\n Draws all of the member sprites onto the given surface.\n\n \"\"\"\n sprites = self.sprites()\n surface_blit = surface.blit\n for spr in sprites:\n self.spritedict[spr] = surface_blit(spr.image, spr.rect)\n self.lostsprites = []\n\n def clear(self, surface, bgd):\n \"\"\"erase the previous position of all sprites\n\n Group.clear(surface, bgd): return None\n\n Clears the area under every drawn sprite in the group. The bgd\n argument should be Surface which is the same dimensions as the\n screen surface. The bgd could also be a function which accepts\n the given surface and the area to be cleared as arguments.\n\n \"\"\"\n if callable(bgd):\n for r in self.lostsprites:\n bgd(surface, r)\n for r in self.spritedict.values():\n if r:\n bgd(surface, r)\n else:\n surface_blit = surface.blit\n for r in self.lostsprites:\n surface_blit(bgd, r, r)\n for r in self.spritedict.values():\n if r:\n surface_blit(bgd, r, r)\n\n def empty(self):\n \"\"\"remove all sprites\n\n Group.empty(): return None\n\n Removes all the sprites from the group.\n\n \"\"\"\n for s in self.sprites():\n self.remove_internal(s)\n s.remove_internal(self)\n\n def __nonzero__(self):\n return truth(self.sprites())\n\n def __len__(self):\n \"\"\"return number of sprites in group\n\n Group.len(group): return int\n\n Returns the number of sprites contained in the group.\n\n \"\"\"\n return len(self.sprites())\n\n def __repr__(self):\n return \"<%s(%d sprites)>\" % (self.__class__.__name__, len(self))\n\n\nclass Group(AbstractGroup):\n \"\"\"container class for many Sprites\n\n pygame.sprite.Group(*sprites): return Group\n\n A simple container for Sprite objects. This class can be subclassed to\n create containers with more specific behaviors. The constructor takes any\n number of Sprite arguments to add to the Group. The group supports the\n following standard Python operations:\n\n in test if a Sprite is contained\n len the number of Sprites contained\n bool test if any Sprites are contained\n iter iterate through all the Sprites\n\n The Sprites in the Group are not ordered, so the Sprites are drawn and\n iterated over in no particular order.\n\n \"\"\"\n\n def __init__(self, *sprites):\n AbstractGroup.__init__(self)\n self.add(*sprites)\n\n\nRenderPlain = Group\nRenderClear = Group\n","sub_path":"game/collision_test/pygame_legacy/group.py","file_name":"group.py","file_ext":"py","file_size_in_byte":9263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"92564254","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.preprocessing import normalize\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import cohen_kappa_score, recall_score, precision_score, f1_score, accuracy_score, confusion_matrix\nfrom sklearn.linear_model import LogisticRegression\nfrom xgboost import XGBClassifier\nle = preprocessing.LabelEncoder()\n\ndata_dir = '/home/pramod/pramod_work/pdc_hackathon/data/pdc_ml_hackathon_2019-master/data/'\ntrain_data = pd.read_json(data_dir+'train.json', encoding='ascii')\ntrain_data.to_csv(data_dir+'train.csv', index=False)\nvalidation_data = pd.read_json(data_dir+'validation.json', encoding='ascii')\nvalidation_data.to_csv(data_dir+'validation.csv', index=False)\ntrain_data = pd.read_csv(data_dir+\"train.csv\")\ntrain_y1 = train_data.petition_category\ntrain_y2 = train_data.petition_is_victory\ndel train_data['petition_category']\ndel train_data['petition_is_victory']\nclf = RandomForestClassifier(max_depth=2, random_state=21)\ntrain_y1 = le.fit_transform(train_y1)\ntrain_y2 = le.fit_transform(train_y2)\n#initially tring for some of features following taking each feature at a time\ntrain_data[[\"_score\",\"petition_calculated_goal\",\"petition_displayed_signature_count\",\"petition_progress\",\"petition_total_signature_count\",\"petition_weekly_signature_count\",\"petition_primary_target_is_person\",\"petition_primary_target_publicly_visible\",\"petition_primary_target_type\",\"_source_coachable\",\"_source_discoverable\",\"_source_sponsored_campaign\"]]\nclf = LogisticRegression(random_state= 21)\ncolumns = []\naccuracy = []\ntrain_y = train_y2\nc=0\nfor col in train_x.columns:\n print(col)\n columns.append(col)\n train_x1 = train_x[col]\n if(c<5):\n if((any(train_x1.isnull()))|(any(train_x1.isna()))):\n train_x1[np.where(train_x1=='None')[0]] = (pd.DataFrame(train_x1)).median().iloc[0,0]\n if(c>5):\n if(train_x1.dtype!='bool'):\n if(any(train_x1=='None')):\n train_x1[np.where(train_x1=='None')[0]] = (pd.DataFrame(train_x1)).mode().iloc[0,0]\n X_train, X_test, y_train, y_test = train_test_split(train_x1, train_y, test_size=0.33, random_state=42)\n X_train = X_train.reshape(-1,1)\n X_test = X_test.reshape(-1,1)\n model= clf.fit(X_train,y_train)\n y_pred = model.predict(X_test)\n print(f1_score(y_test, y_pred))\n accuracy.append(f1_score(y_test,y_pred))\n c=c+1\n\n########undersampling majority class examples to deal with data imbalance########\nindices = np.concatenate([random.sample(np.where(train_y2==0)[0],len(np.where(train_y2==1)[0])),np.where(train_y2==1)[0]])\npd.DataFrame(indices).to_csv(data_dir+\"indices_usedafterUnderSampling.csv\")\ntrain_data_undersampled = train_data.iloc[indices,:]\n#,\"petition_primary_target_type\",\"petition_user_country_code\"\ntrain_y2_undersampled = train_y2[indices]\ntrain_x = train_data_undersampled[[\"_score\",\"petition_calculated_goal\",\"petition_displayed_signature_count\",\"petition_progress\",\"petition_total_signature_count\",\"petition_weekly_signature_count\",\"petition_primary_target_is_person\",\"petition_primary_target_publicly_visible\",\"_source_coachable\",\"petition_primary_target_type\",]]\n#clf = LogisticRegression(random_state= 21)\ncolumns = []\naccuracy = []\ntrain_y = train_y2_undersampled\nc=0\ntrain_data_undersampled_preprocessed = pd.DataFrame()\nfor col in train_x.columns:\n print(col)\n columns.append(col)\n train_x1 = train_x[col]\n if(c<5):\n if((any(train_x1.isnull()))|(any(train_x1.isna()))):\n train_x1[np.where(train_x1=='None')[0]] = (pd.DataFrame(train_x1)).median().iloc[0,0]\n if(c>5):\n if(train_x1.dtype!='bool'):\n if(any(train_x1=='None')):\n train_x1[np.where(train_x1=='None')[0]] = (pd.DataFrame(train_x1)).mode().iloc[0,0]\n if(col==\"petition_primary_target_type\"):\n train_x1 = le.fit_transform(train_x1)\n X_train, X_test, y_train, y_test = train_test_split(train_x1, train_y, test_size=0.33, random_state=42)\n X_train = X_train.reshape(-1,1)\n X_test = X_test.reshape(-1,1)\n model= clf.fit(X_train,y_train)\n y_pred = model.predict(X_test)\n print(f1_score(y_test, y_pred))\n accuracy.append(f1_score(y_test,y_pred))\n c=c+1\n train_data_undersampled_preprocessed = pd.concat([train_data_undersampled_preprocessed,train_x1],axis=1)\ntrain_data_undersampled_preprocessed = pd.concat([train_data_undersampled_preprocessed,train_y2_undersampled],axis=1)\ntrain_data_undersampled_preprocessed.to_csv(data_dir +\"train_data_undersampled_preprocessed_8.csv\")\npd.DataFrame(train_x1).to_csv(data_dir+\"undersampled_preprocessed_9thFeature.csv\")\n\n#overall results\nclf = RandomForestClassifier(max_depth=2, random_state=21)\ntrain_data_undersampled_preprocessed = pd.read_csv(data_dir+\"train_data_undersampled_preprocessed_8.csv\")\ntrain_y = train_data_undersampled_preprocessed.petition_is_victory\ndel train_data_undersampled_preprocessed['petition_is_victory']\ntrain_x = train_data_undersampled_preprocessed\nX_train, X_test, y_train, y_test = train_test_split(train_x, train_y, test_size=0.33, random_state=42)\n#X_train = X_train.reshape(-1,1)\n#X_test = X_test.reshape(-1,1)\nmodel= clf.fit(X_train,y_train)\ny_pred = model.predict(X_test)\nprint(f1_score(y_test, y_pred))\nconfusion_matrix(y_test,y_pred)\naccuracy_score(y_test,y_pred)\nc=0\ntest_data = pd.read_csv(data_dir+\"validation.csv\")\ntest_data = test_data[[\"_score\",\"petition_calculated_goal\",\"petition_displayed_signature_count\",\"petition_progress\",\"petition_total_signature_count\",\"petition_weekly_signature_count\",\"petition_primary_target_publicly_visible\",\"_source_coachable\",\"petition_primary_target_type\",]]\ntest_data_prepro=pd.DataFrame()\nfor col in test_data.columns:\n print(col)\n test_x1 = test_data[col]\n if(c<5):\n if((any(test_x1.isnull()))|(any(test_x1.isna()))):\n train_x1[np.where(test_x1=='None')[0]] = (pd.DataFrame(test_x1)).median().iloc[0,0]\n if(c>5):\n if(test_x1.dtype!='bool'):\n if(any(test_x1=='None')):\n test_x1[np.where(test_x1=='None')[0]] = (pd.DataFrame(test_x1)).mode().iloc[0,0]\n if(col==\"petition_primary_target_type\"):\n test_x1 = le.fit_transform(test_x1)\n test_data_prepro = pd.concat([test_data_prepro,test_x1],axis=1)\n c=c+1\ntest_data_prepro.to_csv(data_dir+\"validation_prepro.csv\")\npd.DataFrame(test_x1).to_csv(data_dir+\"validation_9.csv\")\ntest_data_prepro = pd.read_csv(data_dir+\"validation_prepro.csv\")\ntest_data_prepro.columns = X_test.columns\ny_pred = model.predict(test_data_prepro)\npd.DataFrame(y_pred).to_csv(data_dir+\"outputclass.csv\")\n","sub_path":"code/isvictory_final_approach.py","file_name":"isvictory_final_approach.py","file_ext":"py","file_size_in_byte":6718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"164753183","text":"#!/usr/bin/python3\nimport re\nfrom nltk import pos_tag\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nfrom nltk.util import bigrams\nimport sys\nimport getopt\nimport string\nimport os\nimport csv\nimport _pickle as pickle\nimport math\nfrom queue import PriorityQueue\nfrom utility import tokenize, is_valid\n\npunc = string.punctuation\nblock_count = 0 # running count of the number of blocks\nmax_len = 0\nBLOCKS = \"blocks\"\nDICTIONARY = {} # stores (key, value) as (doc_id, doc_len)\nRELEVANT = {}\nREL_TAGS = ['JJ', 'JJR', 'JJS', 'NN', 'NNS', 'NNP', 'NNPS', 'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']\n\ndef usage():\n print(\"usage: \" + sys.argv[0] + \" -i directory-of-documents -d dictionary-file -p postings-file\")\n\n\ndef build_index(in_dir, out_dict, out_postings):\n '''\n Builds index from documents stored in the input directory,\n then output the dictionary file and postings file\n '''\n print('indexing...')\n # This is an empty method\n # Pls implement your code in below\n csv.field_size_limit(sys.maxsize)\n limit = 20\n os.makedirs(BLOCKS, exist_ok=True)\n\n with open(in_dir, 'r+', encoding=\"utf-8\") as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n doc_list = list(csv_reader)\n doc_list = doc_list[1:] # ignore header line\n doc_chunks = [doc_list[i * limit:(i + 1) * limit] for i in range((len(doc_list) + limit - 1) // limit)]\n for chunk in doc_chunks:\n spimi_invert(chunk)\n f = open(out_dict, 'w+', encoding=\"utf-8\")\n f.close()\n f = open(out_postings, 'w+', encoding=\"utf-8\")\n f.close()\n offset = record_doc_length(out_dict, out_postings)\n merge(BLOCKS, out_dict, out_postings)\n write_rel_to_disk()\n\n\ndef record_doc_length(out_dict, out_postings):\n '''\n Records docIDs and their respective normalised doc lengths\n '''\n global DICTIONARY\n result = ''\n\n accum = 0 # for gap encoding\n for doc_id, doc_len in sorted(DICTIONARY.items()):\n gap = doc_id - accum\n result += str(gap) + '-' + str(doc_len) + ' '\n accum += gap\n\n # (doc_frequency, absolute_offset, accumulative_offset)\n dict_expr = \"* 0 \" + str(len(result)) + \"\\n\"\n\n write_to_file(out_dict, dict_expr)\n write_to_file(out_postings, result)\n\n\ndef write_to_file(file, content):\n '''\n Writes out lines to disk for search phase later\n '''\n fw = open(file, 'a', encoding=\"utf-8\")\n fw.write(''.join(content))\n fw.close()\n\n\ndef spimi_invert(chunk):\n '''\n Executes SPIMI Invert algorithm for each chunk of documents\n For each chunk, store a master index\n For each entry in the chunk, collect term frequencies and calculate the weights (for normalised doc length)\n Add [doc id, term freq] to the master index and log the normalised document length\n '''\n global block_count, DICTIONARY\n print('block:', block_count)\n index = {} # index for the whole chunk\n\n for entry in chunk:\n entry_index = {}\n doc_id, title, content, date, court = entry[0], entry[1], entry[2], entry[3], entry[4]\n\n # process title words\n title_words = []\n gen_unigram(entry_index, doc_id, title.rstrip(), title_words, 0)\n gen_bigram(entry_index, doc_id, title_words, 0)\n\n # process content words\n content_words = []\n for sent in sent_tokenize(content): # content\n gen_unigram(entry_index, doc_id, sent, content_words, 1)\n gen_bigram(entry_index, doc_id, content_words, 1)\n\n # process dates\n gen_unigram(entry_index, doc_id, date.rstrip(), [], 2)\n\n # process court words\n court_words = []\n gen_unigram(entry_index, doc_id, court.rstrip(), court_words, 3)\n gen_bigram(entry_index, doc_id, court_words, 3)\n\n doc_len = 0\n for token, posting_list in entry_index.items():\n doc_len += (1 + math.log10(posting_list[1]))**2\n if token not in index:\n index[token] = [posting_list]\n else:\n curr_posting = index[token]\n curr_posting.append(posting_list)\n index[token] = curr_posting\n DICTIONARY[int(doc_id)] = float(\"{:.2f}\".format(math.sqrt(doc_len)))\n trim_rel_dict_entry(doc_id, entry_index)\n block_count += 1\n output_file = \"block\" + str(block_count) + \".txt\"\n write_block_to_disk(index, output_file)\n\n\ndef gen_unigram(entry_index, doc_id, section_content, section_words, zone_index):\n '''\n Generates unigrams based on given text\n '''\n rel_words = []\n for word in word_tokenize(section_content):\n if is_valid(word):\n tagged_word = pos_tag([word])\n tag = tagged_word[0][1]\n tokenized = tokenize(word)\n if tag in REL_TAGS and tokenized not in rel_words:\n rel_words.append(tokenized)\n section_words.append(tokenized)\n if tokenized not in entry_index:\n zones = [0, 0, 0, 0]\n zones[zone_index] += 1 # add title zone\n entry_index[tokenized] = [int(doc_id), 1, zones]\n else:\n curr_count = entry_index[tokenized][1]\n zones = entry_index[tokenized][2]\n zones[zone_index] += 1\n entry_index[tokenized] = [int(doc_id), curr_count + 1, zones]\n # Adding relevant terms to relevant dictionary\n if doc_id not in RELEVANT:\n RELEVANT[doc_id] = rel_words\n else:\n existing_rel_word = RELEVANT[doc_id]\n for word in rel_words:\n if word not in existing_rel_word:\n existing_rel_word.append(word)\n RELEVANT[doc_id] = existing_rel_word\n\n\ndef gen_bigram(entry_index, doc_id, section_words, zone_index):\n '''\n Generates bigrams based on given text\n '''\n for entry in list(bigrams(section_words)):\n if is_valid(entry[0]) and is_valid(entry[1]):\n bigram = tokenize(entry[0]) + \"_\" + tokenize(entry[1])\n if bigram not in entry_index:\n zones = [0, 0, 0, 0]\n zones[zone_index] += 1\n entry_index[bigram] = [int(doc_id), 1, zones]\n else:\n curr_count = entry_index[bigram][1]\n zones = entry_index[bigram][2]\n zones[zone_index] += 1\n entry_index[bigram] = [int(doc_id), curr_count + 1, zones]\n\n\ndef trim_rel_dict_entry(doc_id, entry_index):\n '''\n Trims the relevant terms for the given document to the top 5\n '''\n terms = RELEVANT[doc_id]\n\n num_terms = len(terms)\n\n # use tf calculation to rank the terms\n tf_scores = {}\n for term in terms:\n # example of entry_index entry: 'court': [246407, 3, [0, 2, 0, 1]]\n term_freq = entry_index[term][1]\n tf_scores[term] = 1 + math.log(term_freq, 10)\n \n tf_scores.update((term, score / num_terms) for term, score in tf_scores.items())\n\n # sort tf_scores from highest to lowest\n sorted_tf_scores = dict(sorted(tf_scores.items(), key=lambda item: item[1], reverse=True))\n\n # trim relevant dictionary entry to top 5 scoring items\n RELEVANT[doc_id] = list(sorted_tf_scores.keys())[:5]\n\n\ndef write_block_to_disk(index, output_file):\n '''\n Writes out a block to disk in /blocks folder\n '''\n global max_len\n index_items = index.items()\n max_len = max(max_len, len(index_items))\n for key, value in index_items: # sorting each postings list\n value.sort() # sort by doc_id\n index_items = sorted(index_items) # sort terms\n output = open(os.path.join(BLOCKS, output_file), 'wb')\n for item in index_items:\n pickle.dump(item, output)\n output.close()\n\n\ndef write_rel_to_disk():\n '''\n Writes the dictionary of relevant terms for each document to disk\n '''\n rel_items = RELEVANT.items()\n output = open('rel.txt', 'wb')\n for item in rel_items:\n pickle.dump(item, output)\n output.close()\n\n\ndef merge(in_dir, out_dict, out_postings):\n '''\n Performs n-way merge, reading limit-number of entries from each block at a time\n '''\n print(\"merge\")\n global max_len\n limit = 5\n opened_files = {}\n removed_files = []\n\n # open all files and store in list\n for entry in os.listdir(in_dir):\n opened_files[entry] = open(os.path.join(in_dir, entry), 'rb')\n\n # initialising PQ\n pq = PriorityQueue()\n for i in range(limit):\n for block_name, file_read in opened_files.items():\n if block_name not in removed_files:\n try:\n temp_item = list(pickle.load(file_read))\n # block where the item of (term, docID) is from\n temp_item.append(block_name)\n pq.put(temp_item)\n except EOFError as error:\n removed_files.append(block_name)\n\n term_to_write = ''\n posting_list_to_write = []\n\n while not pq.empty():\n item = pq.get()\n term, posting_list, block_name = item[0], item[1], item[2]\n if term_to_write == '': # first term we are processing\n term_to_write = term\n posting_list_to_write = posting_list\n elif term_to_write != term: # time to write our current term to to disk because we encountered a new term\n posting_list_to_write.sort()\n posting_list_to_write = gap_encoding(posting_list_to_write)\n posting_list_str = posting_to_str(posting_list_to_write)\n\n # (doc_frequency, absolute_offset, accumulative_offset)\n\n\n dict_entry = term_to_write + \" \" + str(len(posting_list_to_write)) + \" \" + str(len(posting_list_str)) + \"\\n\"\n write_to_file(out_dict, dict_entry)\n write_to_file(out_postings, posting_list_str)\n \n # resetting variables for new term\n term_to_write = term\n posting_list_to_write = posting_list\n else: # curr_term == term\n posting_list_to_write.extend(posting_list)\n\n if block_name not in removed_files:\n try:\n unpickler = pickle.Unpickler(opened_files[block_name])\n temp_item = list(unpickler.load())\n # block where the item of (term, docID) is from\n temp_item.append(block_name)\n pq.put(temp_item)\n except EOFError as error:\n removed_files.append(block_name)\n\n\ndef gap_encoding(posting_list):\n '''\n Compresses posting list by adopting gap encoding for doc-IDs\n Example input: [[247336, 1, [0, 1, 0, 0]], [247336, 1, [0, 1, 0, 0]], [2140544, 1, [0, 1, 0, 0]]]\n '''\n final_posting = []\n accum = 0\n for posting in posting_list:\n doc_id = posting[0]\n gap = doc_id - accum\n final_posting.append([gap, posting[1], posting[2]])\n accum += gap\n return final_posting\n\n\ndef posting_to_str(posting_list):\n '''\n Converts a posting list to string form of docID-termFreq-zones\n '''\n result = ''\n for posting in posting_list:\n separator = ''\n zones_lst = posting[2]\n zones_str = ''\n for i in range(len(zones_lst) - 1):\n if i == len(zones_lst) - 1:\n if zones_lst[i] != 0:\n zones_str = separator.join([zones_str, str(zones_lst[i])])\n if zones_lst[i] == 0:\n zones_str = separator.join([zones_str, ','])\n else:\n zones_str = separator.join([zones_str, str(zones_lst[i]) ,','])\n result = separator.join([result, str(posting[0]), '-', zones_str, ' '])\n return result\n\ninput_directory = output_file_dictionary = output_file_postings = None\n\ntry:\n opts, args = getopt.getopt(sys.argv[1:], 'i:d:p:')\nexcept getopt.GetoptError:\n usage()\n sys.exit(2)\n\nfor o, a in opts:\n if o == '-i': # input directory\n input_directory = a\n elif o == '-d': # dictionary file\n output_file_dictionary = a\n elif o == '-p': # postings file\n output_file_postings = a\n else:\n assert False, \"unhandled option\"\n\nif input_directory == None or output_file_postings == None or output_file_dictionary == None:\n usage()\n sys.exit(2)\n\nbuild_index(input_directory, output_file_dictionary, output_file_postings)\n","sub_path":"HW4/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":12245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"404658360","text":"import matplotlib.pyplot as plt\n\nf = open(\"./wrench_y_list.txt\",\"r\")\nlist_row = []\n\nfor x in f:\n list_row.append(float(x.rstrip(\"\\n\")))\nf.close()\n\ny = list_row\nx = range(1,len(y)+1)\n\nprint(y)\nprint(x)\n\nplt.plot(x, y)\nplt.show()\n\n","sub_path":"vegs_recognition/scripts/list_to_graph_y.py","file_name":"list_to_graph_y.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"603861863","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom constants import const\nimport h5py\nimport sys\n\n\ndef pltmap(H, pcA, pcB):\n\n C = const()\n\n fig = plt.figure(figsize=[7.5, 5])\n\n \"\"\"define the colors of interest\"\"\"\n n_col = len(C['set_id_cal'] + C['set_id_val'])\n colormat = cm.rainbow(np.linspace(0, 1, n_col))\n gray = [.7, .7, .7]\n\n f_red = h5py.File(\"spatial_reduced_L%s.hdf5\" % H, 'r')\n\n \"\"\"plot SVE sets for cal\"\"\"\n\n c = 0\n\n for ii in xrange(len(C['set_id_cal'])):\n\n set_id = C['set_id_cal'][ii]\n\n reduced = f_red.get('reduced_%s' % set_id)[...]\n meanA = reduced[:, pcA].mean()\n meanB = reduced[:, pcB].mean()\n\n plt.text(meanA, meanB+4, C['names_cal'][ii],\n horizontalalignment='center',\n verticalalignment='center')\n\n plt.plot(reduced[:, pcA], reduced[:, pcB],\n marker='s', markersize=6, color=colormat[c, :],\n alpha=0.4, linestyle='')\n plt.plot(meanA, meanB,\n marker='s', markersize=8, color=colormat[c, :],\n linestyle='')\n\n # varmat = np.var(reduced, axis=0)\n # msg = \"total variance for %s: %s\" % (set_id, varmat.sum())\n # rr.WP(msg, C['wrt_file'])\n\n c += 1\n\n \"\"\"plot SVE sets for val\"\"\"\n\n for ii in xrange(len(C['set_id_val'])):\n\n set_id = C['set_id_val'][ii]\n\n reduced = f_red.get('reduced_%s' % set_id)[...]\n meanA = reduced[:, pcA].mean()\n meanB = reduced[:, pcB].mean()\n\n plt.text(meanA, meanB+4, C['names_val'][ii],\n horizontalalignment='center',\n verticalalignment='center')\n\n # plt.text(txtm[ii, 0], txtm[ii, 1], C['names_val'][ii],\n # horizontalalignment='center',\n # verticalalignment='center')\n\n plt.plot(reduced[:, pcA], reduced[:, pcB],\n marker='o', markersize=6, color=colormat[c, :],\n alpha=0.4, linestyle='')\n plt.plot(meanA, meanB,\n marker='o', markersize=8, color=colormat[c, :],\n linestyle='')\n\n # varmat = np.var(reduced, axis=0)\n # msg = \"total variance for %s: %s\" % (set_id, varmat.sum())\n # rr.WP(msg, C['wrt_file'])\n\n c += 1\n\n plt.margins(.2)\n\n plt.xlabel(\"PC%s\" % str(pcA+1))\n plt.ylabel(\"PC%s\" % str(pcB+1))\n\n plt.grid(linestyle='-', alpha=0.15)\n\n \"\"\"create a legend based on points not plotted\"\"\"\n p1 = plt.plot(0, 0, marker='s', markersize=6,\n color=gray, linestyle='', label='calibration')\n p2 = plt.plot(0, 0, marker='o', markersize=6,\n color=gray, linestyle='', label='validation')\n plt.legend(loc='upper left', shadow=True, fontsize='medium', ncol=2)\n p1[0].remove()\n p2[0].remove()\n\n fig.tight_layout()\n\n # plt.legend(bbox_to_anchor=(1.02, 1), loc=2, shadow=True, fontsize='medium')\n # fig.tight_layout(rect=(0, 0, .7, 1))\n\n f_red.close()\n\n fig_name = 'pc%s_pc%s_L%s.png' % (pcA+1, pcB+1, H)\n fig.canvas.set_window_title(fig_name)\n plt.savefig(fig_name)\n\n\nif __name__ == '__main__':\n H = np.int64(sys.argv[1])\n pcA = np.int64(sys.argv[2])\n pcB = np.int64(sys.argv[3])\n\n pltmap(H, pcA, pcB)\n\n plt.show()\n","sub_path":"fip_collab/2016_08_11_polycrystal_FIP/plot_pc_map.py","file_name":"plot_pc_map.py","file_ext":"py","file_size_in_byte":3297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"216191829","text":"import os\nimport sys\nimport time\n# Use the built-in version of scandir/stat if possible, otherwise\n# use the scandir module version\ntry:\n from os import scandir, stat # noqa # pylint: disable=unused-import\nexcept ImportError:\n from scandir import scandir, stat # noqa # pylint: disable=unused-import\n\nfrom graphite.intervals import Interval, IntervalSet\nfrom graphite.carbonlink import CarbonLink\nfrom graphite.logger import log\nfrom django.conf import settings\n\ntry:\n import whisper\nexcept ImportError:\n whisper = False\n\n# The parser was repalcing __readHeader with the ():')\n network = construct_network(file)\n input_val = network.degree_between('a@dra.net', 'z@dra.net')\n expected_output = float('inf')\n self.assertEqual(input_val, expected_output, 'Wrong degree between')\n \n def test_multiple_friends_one_path(self: 'TestDegree') -> None:\n \"\"\"\n A graph with 9 nodes are created. Then 2 specific nodes will be \n selected such that only one path exists between the two nodes. \n Some paths will lead to a deadend. Test to see if even if it are \n deadends in the graph, degree_between can still find the path \n to target node from the beginning node.\n \"\"\"\n \n file = io.StringIO('A Dylan(Uni of Toronto):b@dra.net\\n'\n 'B Ba():c@dra.net,e@dra.net,g@dra.net\\n'\n 'C Ca':\n\t\t\tbreak\n\treturn s\n\n#Setting device\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nchencherry = nltk.translate.bleu_score.SmoothingFunction()\npath = sys.path[0]\n\n#Hyperparameter\nencoder_path = os.path.join(path, 'trained', 'EncoderCNN', 'v12')\ndecoder_path = os.path.join(path, 'trained', 'DecoderLSTM', 'v12')\ntrained_encoder = os.path.join(encoder_path, 'encoder-20-final.ckpt')\ntrained_decoder = os.path.join(decoder_path, 'decoder-20-final.ckpt')\nvocabulary_path = os.path.join(path, 'dataset', 'vocab.pkl')\nimage_path = os.path.join(path, 'dataset', 'train_set')\ntitle_path = os.path.join(path, 'dataset', 'train_set.csv')\nval_image_path = os.path.join(path, 'dataset', 'val_set')\nval_title_path = os.path.join(path, 'dataset', 'val_set.csv')\nlogs_path = os.path.join(path, 'logs', 'v12')\nembedding_size = 512\nmomentum = 0.0001\nlearning_rate = 0.0001\nhidden_size = 1024\nnum_layers = 1\nresize = 224\nmax_length = 40\nbatch_size = 8\nnum_workers = 0\nnum_epochs = 26\nstep = 5\n\n#Create folders\nif not os.path.exists(encoder_path):\n\tos.makedirs(encoder_path)\nif not os.path.exists(decoder_path):\n\tos.makedirs(decoder_path)\nif not os.path.exists(logs_path):\n\tos.makedirs(logs_path)\n\n#Create trasforms\ntransforms = transforms.Compose([transforms.Resize(resize), transforms.RandomCrop(resize), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n\n#Load vocabulary\nwith open(vocabulary_path, 'rb') as f:\n\tvocabulary = pickle.load(f)\nprint('Vocabulary loaded. Size:', len(vocabulary))\n\n#Prepare product title list\ntrain_titles = pd.read_csv(title_path)\ntrain_titles = train_titles['title'].tolist()\nval_titles = pd.read_csv(val_title_path)\nval_titles = val_titles['title'].tolist()\n\n#Prepare data loader\ndata_loader = getLoader(image_path, train_titles, vocabulary, transforms, batch_size, True, num_workers)\nval_loader = getLoader(val_image_path, val_titles, vocabulary, transforms, batch_size, True, num_workers)\nbleu_train_loader = getLoader(image_path, train_titles, vocabulary, transforms, 1, False, num_workers)\nbleu_val_loader = getLoader(val_image_path, val_titles, vocabulary, transforms, 1, False, num_workers)\n\n#Create model\nencoder = EncoderCNN(embedding_size, momentum).to(device)\ndecoder = DecoderLSTM(embedding_size, hidden_size, len(vocabulary), num_layers, max_length).to(device)\n\nencoder.load_state_dict(torch.load(trained_encoder))\ndecoder.load_state_dict(torch.load(trained_decoder))\n\n#Configure optimizer\ncriterion = nn.CrossEntropyLoss()\nparams = list(decoder.parameters()) + list(encoder.parameters())\noptimizer = torch.optim.Adam(params, lr=learning_rate)\n\n#Training\ntotal_step = len(data_loader)\ntotal_val = len(val_loader)\ntotal_bleu_train = len(bleu_train_loader)\ntotal_bleu_val = len(bleu_val_loader)\n\nfor epoch in range(21,num_epochs):\n\tloss_train = 0\n\tloss_val = 0\n\tencoder.train()\n\tdecoder.train()\n\t\n\tprint('Start Train')\n\tfor i, (indexes, images, titles, lengths) in enumerate(data_loader):\n\n\t\timages = images.to(device)\n\t\ttitles = titles.to(device)\n\t\ttargets = pack_padded_sequence(titles, lengths, batch_first=True)[0]\n\n\t\tfeatures = encoder(images)\n\t\toutputs = decoder(features, titles, lengths)\n\n\t\tloss = criterion(outputs, targets)\n\t\tdecoder.zero_grad()\n\t\tencoder.zero_grad()\n\t\tloss.backward()\n\t\toptimizer.step()\n\n\t\tloss_train += loss.item() * images.size(0)\n\n\t\tif i % (len(data_loader)//4) == 0:\n\t\t\tprint('Epoch [{}/{}], Step [{}/{}], Train Loss: {:.4f}'.format(epoch, num_epochs, i, total_step, loss.item()))\n\t\t\n\ttorch.save(decoder.state_dict(), os.path.join(decoder_path, 'decoder-{}-final.ckpt'.format(epoch)))\n\ttorch.save(encoder.state_dict(), os.path.join(encoder_path, 'encoder-{}-final.ckpt'.format(epoch)))\n\n\twith open(os.path.join(logs_path, 'train_epoch_loss.txt'), 'a') as f:\n\t\tf.write(str(loss_train/len(train_titles)) + '\\n')\n\n\twith torch.no_grad():\n\t\tencoder.eval()\n\t\tdecoder.eval()\n\n\t\tprint('Start Validate')\n\t\tfor i, (indexes, images, titles, lengths) in enumerate(val_loader):\n\t\t\t\n\t\t\timages = images.to(device)\n\t\t\ttitles = titles.to(device)\n\t\t\ttargets = pack_padded_sequence(titles, lengths, batch_first=True)[0]\n\n\t\t\tfeatures = encoder(images)\n\t\t\toutputs = decoder(features, titles, lengths)\n\n\t\t\tloss = criterion(outputs, targets)\n\n\t\t\tloss_val += loss.item() * images.size(0)\n\n\t\t\tif i % (len(val_loader)//4) == 0:\n\t\t\t\tprint('Epoch [{}/{}], Step [{}/{}], Validation Loss: {:.4f}'.format(epoch, num_epochs, i, total_val, loss.item()))\n\n\t\twith open(os.path.join(logs_path, 'val_epoch_loss.txt'), 'a') as f:\n\t\t\tf.write(str(loss_val/len(val_titles)) + '\\n')\n\n\t\tprint('Epoch [{}/{}], Epoch Train Loss: {:.4f}, Epoch Validation Loss: {:.4f}'.format(epoch, num_epochs, loss_train/len(train_titles), loss_val/len(val_titles)))\n\n\t\tif epoch % step == 0:\n\t\t\tprint('Calculate BLEU Train')\n\t\t\tbleu1_train_score = 0\n\t\t\tfor i, (indexes, images, titles, lengths) in enumerate(bleu_train_loader):\n\n\t\t\t\timages = images.to(device)\n\t\t\t\t\n\t\t\t\tfeatures = encoder(images)\n\t\t\t\tsampled_ids = decoder.greedySearch(features)\n\n\t\t\t\tsampled_ids = sampled_ids[0].cpu().numpy()\n\n\t\t\t\ttitles = titles.detach().cpu().numpy()\n\t\t\t\tground_truth = []\n\t\t\t\tfor title in titles:\n\t\t\t\t\tground_truth.append(idToWord(title))\n\n\t\t\t\tgenerated = idToWord(sampled_ids)\n\n\t\t\t\ttemp_score = 0\n\t\t\t\tif len(generated) > 1:\n\t\t\t\t\ttemp_score = nltk.translate.bleu_score.sentence_bleu(ground_truth,generated,weights=(1., 0, 0, 0),smoothing_function=chencherry.method7)\n\t\t\t\tbleu1_train_score += temp_score\n\n\t\t\t\tif i % (len(bleu_train_loader)//4) == 0:\n\t\t\t\t\tprint('Epoch [{}/{}], Step [{}/{}], BLEU-1 Train: {:.4f}'.format(epoch, num_epochs, i, total_bleu_train, temp_score))\n\n\t\t\tavg_bleu1_train = bleu1_train_score/total_bleu_train\n\t\t\twith open(os.path.join(logs_path, 'train_epoch_bleu.txt'), 'a') as f:\n\t\t\t\tf.write(str(avg_bleu1_train) + '\\n')\n\n\t\t\tprint('Calculate BLEU Validation')\n\t\t\tbleu1_val_score = 0\n\t\t\tfor i, (indexes, images, titles, lengths) in enumerate(bleu_val_loader):\n\n\t\t\t\timages = images.to(device)\n\t\t\t\t\n\t\t\t\tfeatures = encoder(images)\n\t\t\t\tsampled_ids = decoder.greedySearch(features)\n\n\t\t\t\tsampled_ids = sampled_ids[0].cpu().numpy()\n\n\t\t\t\ttitles = titles.detach().cpu().numpy()\n\t\t\t\tground_truth = []\n\t\t\t\tfor title in titles:\n\t\t\t\t\tground_truth.append(idToWord(title))\n\n\t\t\t\tgenerated = idToWord(sampled_ids)\n\n\t\t\t\ttemp_score = 0\n\t\t\t\tif len(generated) > 1:\n\t\t\t\t\ttemp_score = nltk.translate.bleu_score.sentence_bleu(ground_truth,generated,weights=(1., 0, 0, 0),smoothing_function=chencherry.method7)\n\t\t\t\tbleu1_val_score += temp_score\n\n\t\t\t\tif i % (len(bleu_val_loader)//4) == 0:\n\t\t\t\t\tprint('Epoch [{}/{}], Step [{}/{}], BLEU-1 Validation: {:.4f}'.format(epoch, num_epochs, i, total_bleu_val, temp_score))\n\n\t\t\tavg_bleu1_val = bleu1_val_score/total_bleu_val\n\t\t\twith open(os.path.join(logs_path, 'val_epoch_bleu.txt'), 'a') as f:\n\t\t\t\tf.write(str(avg_bleu1_val) + '\\n')\n\n\t\t\tprint('Epoch [{}/{}], Epoch Train BLEU-1: {:.4f}, Epoch Validation BLEU-1: {:.4f}'.format(epoch, num_epochs, avg_bleu1_train, avg_bleu1_val))\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"158214230","text":"\n\"\"\"\nSVM Classifier for model 2 using VGG 19 features\nPaper: Ravi, Aravind, Harshwin Venugopal, Sruthy Paul, and Hamid R. Tizhoosh. \n\"A Dataset and Preliminary Results for Umpire Pose Detection Using SVM Classification of Deep Features.\" \narXiv preprint arXiv:1809.06217 (2018).\n\n\"\"\"\nimport numpy as np\nfrom sklearn.svm import LinearSVC\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import LeaveOneOut\nimport pickle\nimport time\n\nlayers_to_extract = [\"fc1\",\"fc2\"]\n\nlayer_num = 0\n\n#Loading the Features\nX1 = np.load(\"class_cricket\"+layers_to_extract[layer_num]+\"1vgg19_data.npy\")\nX2 = np.load(\"class_cricket\"+layers_to_extract[layer_num]+\"2vgg19_data.npy\")\nX3 = np.load(\"class_cricket\"+layers_to_extract[layer_num]+\"3vgg19_data.npy\")\nX4 = np.load(\"class_cricket\"+layers_to_extract[layer_num]+\"4vgg19_data.npy\")\nX5 = np.load(\"class_cricket\"+layers_to_extract[layer_num]+\"5vgg19_data.npy\")\n\n\n#Concatenate into single matrix\nX_data = np.append(X1,X2,axis=0)\nX_data = np.append(X_data,X3,axis=0)\nX_data = np.append(X_data,X4,axis=0)\nX_data = np.append(X_data,X5,axis=0)\n\n#Labels\nY_data = X_data[:,(X_data.shape[1]-1)]\n\n#Training Data\nX_data = X_data[:,0:(X_data.shape[1]-1)]\n\n#Train Test Split 80-20\nx_tr,x_ts,y_tr,y_ts = train_test_split(X_data, Y_data, test_size=0.2,random_state=157)\n\n#Classifier SVM Linear Kernel \nclf = LinearSVC(C=10)\n\nstart_time = time.time()\n\nclf = clf.fit(x_tr,y_tr)\npredictions_tr = (clf.predict(x_ts))\n\n#10-Fold Cross-validation Accuracy\nscores = cross_val_score(clf, x_tr, y_tr, cv=10)\nprint(layers_to_extract[layer_num])\nprint(\"Training Accuracy: %0.4f (+/- %0.2f)\" % (scores.mean(), scores.std() * 2))\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\n#Leave One Out or Jack-Knife Crossvalidation\nloo_train_acc=[]\nloo = LeaveOneOut()\nfor train_index, test_index in loo.split(x_tr):\n X_train, X_test = x_tr[train_index], x_tr[test_index]\n y_train, y_test = y_tr[train_index], y_tr[test_index]\n clf = clf.fit(X_train,y_train)\n predictions = (clf.predict(X_test))\n loo_train_acc.append(accuracy_score(y_test,predictions))\n\nloo_train_accuracy = np.asarray(loo_train_acc)\nprint(\"LOO Accuracy: %0.4f\" % loo_train_accuracy.mean())\n\n#20% Test Data Accuracy\ntest_acc = accuracy_score(y_ts,predictions_tr)\nprint(\"Test Accuracy: %0.4f\" % test_acc)\n\n#Save the PCA parameters and SVM Model\npickle.dump(clf, open('FER_vgg19fc2_model2net_ck_transfer_only_svm.sav', 'wb'))\n","sub_path":"Code/VGG19_Method/vgg19_classifier_model2.py","file_name":"vgg19_classifier_model2.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"166668351","text":"import socket\n\naddress = \"peering.nano.org\"\nport = 7075\n\nenum_network = {}\nfor i, network_id in enumerate(\n [\"network_test\", \"network_beta\", \"network_live\"], start=ord(\"A\")\n):\n enum_network[network_id] = i.to_bytes(1, byteorder=\"big\")\n\nenum_msgtype = {}\nfor i, msgtype in enumerate(\n [\n \"invalid\",\n \"not_a_type\",\n \"keepalive\",\n \"publish\",\n \"confirm_req\",\n \"confirm_ack\",\n \"bulk_pull\",\n \"bulk_push\",\n \"frontier_req\",\n \"bulk_pull_blocks\",\n \"node_id_handshake\",\n \"bulk_pull_account\",\n ]\n):\n enum_msgtype[msgtype] = i.to_bytes(1, byteorder=\"big\")\n\nenum_blocktype = {}\nfor i, blocktype in enumerate(\n [\"invalid\", \"not_a_block\", \"send\", \"receive\", \"open\", \"change\", \"state\"]\n):\n enum_blocktype[blocktype] = i.to_bytes(1, byteorder=\"big\")\n\nmagic = b\"R\"\nnetwork_id = enum_network[\"network_live\"]\nversion_max = (16).to_bytes(1, byteorder=\"big\")\nversion_using = (16).to_bytes(1, byteorder=\"big\")\nversion_min = (13).to_bytes(1, byteorder=\"big\")\nmessage_type = enum_msgtype[\"keepalive\"]\nextensions = (0x0000).to_bytes(2, byteorder=\"big\")\n# ~ extensions = (0xffff & 0x0001).to_bytes(2, byteorder='big')\n# ~ extensions = (0xffff & 0x0002).to_bytes(2, byteorder='big')\n# ~ extensions = ((0xffff & 0x0f00)>>8).to_bytes(2, byteorder='big')\n# ~ print(account_key('nano_3ooycog5ejbce9x7nmm5aueui18d1kpnd74gc4s67nid114c5bp4g9nowusy'))\n# ~ start = bytes.fromhex(\n# ~ 'd6be555c36452a61fa5a4e6346d9b800cb04ad45944e50b242d20b0004a1a6c2')\n# ~ age = bytes.fromhex('ffffffff')\n# ~ count = bytes.fromhex('ffffffff')\n# ~ body = start + age + count\npeer = bytes.fromhex(\"000000000000000000000000000000000000\")\nbody = peer\nfor i in range(7):\n body += peer\n\nsock = socket.socket(\n socket.AF_INET, socket.SOCK_DGRAM\n) # SOCK_DGRAM - UDP # SOCK_STREAM - TCP\n# ~ sock.bind(('', 7075))\n# ~ sock.connect((address, port))\nmsg = (\n magic\n + enum_network[\"network_live\"]\n + version_max\n + version_using\n + version_min\n + message_type\n + extensions\n + body\n)\n# ~ sock.send(msg)\nsock.sendto(msg, (address, port))\nprint(msg.hex())\nresponse = sock.recv(1024)\nprint(response.hex())\n","sub_path":"nanopy/peer.py","file_name":"peer.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"522026351","text":"import model1\n\nmodel_save_path1 = \"model/model.ckpt\"\nmodel_save_path2 = \"model/model2.ckpt\"\nmodel_save_path3 = \"model/model3.ckpt\"\nmodel_save_path4 = \"model/model4.ckpt\"\nmodel_save_path5 = \"model/model5.ckpt\"\nmodel_save_path6 = \"model/model6.ckpt\"\nmodel_save_path7 = \"model/model7.ckpt\"\n\n#Three of the first model\nresults1 = model1.LoadAndRun(model_save_path1)\n\n\nwith open(\"results/results.csv\", 'w') as file:\n file.write(\"ImageId,Label\\n\")\n for idx in range(len(results1)):\n pred1 = int(results1[idx])\n\n nums = np.zeros(10)\n nums[pred1] = nums[pred1] + 1 \n\n prediction = np.argmax(nums)\n\n file.write(str(idx + 1))\n file.write(\",\")\n file.write(str(prediction))\n file.write(\"\\n\")","sub_path":"CatVsDogs/CatVsDogs/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"65793582","text":"# Copyright (c) hoduchieu01\nclass Solution:\n def shortestDistance(self, words: List[str], word1: str, word2: str) -> int:\n word_dict = defaultdict(list)\n for i, w in enumerate(words):\n if w in (word1, word2): \n word_dict[w].append(i)\n minDistance = float('inf')\n for i in word_dict[word1]:\n for j in word_dict[word2]:\n minDistance = min(minDistance, abs(i-j))\n return minDistance","sub_path":"interview/LeetCode/DecemberLeetCodingChallenge/Week1/Shortest_Word_Distance.py","file_name":"Shortest_Word_Distance.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"439466641","text":"from bot import Bot\nimport random\nfrom constants import *\n\n\nclass TJBotOld(Bot):\n def __init__(self):\n self.Dynamite = 0\n self.repeat_flag = False\n self.repeat_value = ''\n self.buffer = ['', '', '']\n\n def make_move(self, gamestate):\n\n self.check_4_repeat(gamestate)\n if self.repeat_flag:\n Move = TJBotOld.beat_const_bot(self)\n else:\n Move = TJBotOld.make_random_move(self)\n\n return Move\n\n def check_4_repeat(self, gamestate):\n length = len(gamestate[\"rounds\"])\n if length >= 1:\n # shift and fill in the the 3 item long list 'buffer'\n self.buffer[0] = self.buffer[1]\n self.buffer[1] = self.buffer[2]\n self.buffer[2] = gamestate[\"rounds\"][length - 1][\"p2\"]\n if self.buffer[0] == self.buffer[1] and self.buffer[0] == self.buffer[2]:\n self.repeat_flag = True\n self.repeat_value = self.buffer[2]\n else:\n self.repeat_flag = False\n\n def beat_const_bot(self):\n if self.repeat_value == 'R':\n Move = 'P'\n elif self.repeat_value == 'P':\n Move = 'S'\n elif self.repeat_value == 'S':\n Move = 'R'\n elif self.repeat_value == 'D':\n Move = 'W'\n else:\n Move = 'R'\n\n return Move\n\n def make_random_move(self):\n moveIndex = random.choice([0, 1, 2, 3, 4])\n if moveIndex == 4:\n self.Dynamite += 1\n\n if self.Dynamite >= NUM_DYNAMITE:\n moveIndex = random.choice([0, 1, 2, 3])\n\n Move = VALID_MOVES[moveIndex]\n return Move\n","sub_path":"tjbotold.py","file_name":"tjbotold.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"40514607","text":"from django import forms\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.utils.html import mark_safe\n\nfrom .models import Thing\n\n\ndef test_misc(request):\n return render(request, \"django_functest/tests/test_misc.html\",\n {'name': request.session.get('name', None)})\n\n\nclass ThingForm(forms.ModelForm):\n def __init__(self, add_spacers=False, **kwargs):\n super(ThingForm, self).__init__(**kwargs)\n self.add_spacers = add_spacers\n\n def as_p(self):\n retval = super(ThingForm, self).as_p()\n if self.add_spacers:\n # Hack to help test interacting with elements\n # that aren't in view.\n retval = mark_safe(retval.replace('
' + i + '
').xpath('//p')[0].xpath('string(.)') for i in all_p]\n s = '\\n'.join(r)\n return s.strip()\n\n\n # 替换一些html的特殊字符\n def substring(self, text):\n if text:\n text = re.sub(' ', ' ', text)\n text = re.sub('>', '>', text)\n text = re.sub('<', '<', text)\n return text\n\n\n # 本类主函数, 通用型网页正文提取\n def extract_content(self, response):\n try:\n content = self.get_main_block(response)\n content = self.remove_empty_line(self.remove_js_css(content))\n left, right, x, y = self.method_1(content)\n\n content = '\\n'.join(content.split('\\n')[left:right])\n content = Utils.transform_coding(content)\n content = self.rearrage_paragraph(content)\n content = self.substring(content)\n return content\n\n except:\n return ''\n\n\n\n# 通用的、提取item信息\nclass CommonExtract(object):\n\n def __init__(self):\n pass\n\n\n def parse_response(self, response, item):\n ec = ExtractContent()\n content = ec.extract_content(response)\n item.content = content.strip('\\n')\n\n\n","sub_path":"extract/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":5394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"549560734","text":"import scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scraping.scraping.items import ForecastItem\n\n\nclass ForecastSpider(CrawlSpider):\n name = \"forecast\"\n allowed_domains = [\"tenki.jp\"]\n start_urls = [\"https://tenki.jp/indexes/dress/\"]\n\n rules = (\n Rule(LinkExtractor(allow=r\"/indexes/dress/\\d+/\\d+/\"), callback=\"parse_item\"),\n )\n\n def parse_item(self, response):\n item = ForecastItem()\n\n place = response.css(\"#delimiter ol li a span::text\").extract()\n item[\"area\"] = place[-2]\n item[\"prefecture\"] = place[-1]\n\n item[\"update_time\"] = response.css(\".date-time ::text\").extract_first()\n\n item[\"clothes_info\"] = response.css(\".map-wrap ul span ::text\").extract()\n\n for forecast in response.css(\".sub-column-forecast-pickup\"):\n item[\"weather_city\"] = forecast.css(\".name ::text\").extract()\n item[\"weather\"] = forecast.css(\"img::attr(alt)\").extract()\n\n item[\"highest_temp\"] = forecast.css(\n \".date-value .high-temp ::text\"\n ).extract()\n item[\"lowest_temp\"] = forecast.css(\".date-value .low-temp ::text\").extract()\n\n item[\"rain_chance\"] = forecast.css(\".precip ::text\").extract()\n yield item\n","sub_path":"services/web/scraping/scraping/spiders/forecast.py","file_name":"forecast.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"566195739","text":"# encoding: utf-8\n\"\"\"\nNOTE:\n 存在重复的三元组,这里没有去掉。\n ConceptNet格式是relation, entity, predict,其他KG是entity, relation, predict\n\"\"\"\n\nimport sys\n\nif __name__ == '__main__':\n while True:\n line = sys.stdin.readline().strip()\n if line:\n items = line.split('\\t')\n relation = items[1]\n entity = items[2]\n predict = items[3]\n relation = relation.split('/')[-1]\n entity_lang, entity = entity.split('/')[2:4]\n predict_lang, predict = predict.split('/')[2:4]\n if entity_lang == 'zh' or predict_lang == 'zh':\n print('\\t'.join((relation, entity, predict,)))\n else:\n break\n","sub_path":"corpus_processor/kg/concept_net_extractor.py","file_name":"concept_net_extractor.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"453587303","text":"import os\n\n\ndef getWordList(filepath):\n assert os.path.isfile(filepath), 'Must be a file'\n txt = open(filepath).read().strip().split('\\n')\n if ':' in open(filepath).read():\n for line in txt:\n if ':' not in line:\n txt[txt.index(line)] = line + ':'\n words = {}\n for line in txt:\n index = line.split(':')[0]\n words[index] = line.split(':')[1].split(',')\n for syn in words[index]:\n if syn == '':\n words[index].remove(syn)\n else:\n words = []\n for word in txt:\n words.append(word.strip())\n return words\n\n\nverbs = getWordList('src/dictionary/verbs.txt')\nnouns = getWordList('src/dictionary/nouns.txt')\nextras = getWordList('src/dictionary/extras.txt')\n","sub_path":"src/words.py","file_name":"words.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"446070895","text":"import random\n\nimport pytest\n\nfrom day19 import full_beacon_list, matcher, max_manhattan_between_scanners, verify\nfrom geometry_utils import Vector, all_cubic_group_transformations, invert\nfrom helpers import parse_file\n\n\n@pytest.fixture\ndef data_test():\n data = parse_file(\"test.txt\")\n assert len(data) == 5\n return data\n\n\n@pytest.fixture\ndef real_data():\n data = parse_file(\"input.txt\")\n return data\n\n\ndef test_match_scanner_1_and_scanner_2(data_test):\n result = matcher(data_test[0], data_test[1])\n assert result\n translation, rotation = result\n assert (\n len(\n set(data_test[0]).intersection(\n set((rotation.apply(b) + translation) for b in data_test[1])\n )\n )\n == 12\n )\n\n\ndef test_full_beacon_list(data_test):\n all_beacons, all_pos = full_beacon_list(data_test)\n assert len(all_beacons) == 79\n assert max_manhattan_between_scanners(all_pos) == 3621\n\n\ndef test_with_real_data(real_data):\n all_beacons, positions = full_beacon_list(real_data)\n print(\"number of beacons:\", len(all_beacons))\n assert len(all_beacons) == 405\n d_max = max_manhattan_between_scanners(positions)\n print(d_max)\n\n\n@pytest.mark.parametrize(\"rotation\", list(all_cubic_group_transformations))\ndef test_matcher(rotation):\n initial_source = {\n Vector(\n (random.randint(0, 1000), random.randint(0, 999), random.randint(0, 999))\n )\n for _ in range(30)\n }\n translation_vector = Vector((345, 789, 123))\n other_beacon = {rotation.apply(b + translation_vector) for b in initial_source}\n tr, rot = verify(\n initial_source,\n other_beacon,\n Vector((-1) * v for v in translation_vector),\n invert(rotation),\n )\n assert tr, rot == matcher(initial_source, other_beacon)\n","sub_path":"2021/day19/test_algorithm_solution.py","file_name":"test_algorithm_solution.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"484327410","text":"import random\nfrom freq import freq\nfrom cons import cons\nfrom lib_vowels import lib_vowels\n\ndef get_rand_letter(type):\n\tif type == 'all':\n\t\tx = random.random() * 26\n\t\txmod = x % 1\n\t\tselect = int(x-xmod)\n\t\tall_letters = freq\n\t\tselection = all_letters[select]\n\t\t\n\tif type == 'vow':\n\t\tx = random.random() * 5\n\t\txmod = x % 1\n\t\tselect = int(x-xmod)\n\t\tselection = lib_vowels[select]\n\t\t\n\tif type == 'cons':\n\n\t\tx = random.random() * 21\n\t\txmod = x % 1\n\t\tselect = int(x-xmod)\n\t\tselection = cons[select]\n\treturn selection\n\nif __name__ == '__main__':\n\tget_rand_letter(type)","sub_path":"hist/hist4/prod/lib/get_rand_letter.py","file_name":"get_rand_letter.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"311444048","text":"\n\n#calss header\nclass _EYESTRAIN():\n\tdef __init__(self,): \n\t\tself.name = \"EYESTRAIN\"\n\t\tself.definitions = [u'tired or painful eyes as a result of too much reading, looking at a computer screen, etc.']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_eyestrain.py","file_name":"_eyestrain.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"152551275","text":"antall_kvinner = 0\nantall_menn = 0\nantall_fag = 0\nantall_itgk = 0\nantall_timer_lekser = 0\n\nkjønn = 0\nage = 0\nfag = 0\n#hvis noen sier hade så vil while løkken avsluttes.\n\ndef whilekjonn():\n kjønn = input(\"Er du mann eller kvinne?\\n\").lower()\n if kjønn == \"m\" or kjønn == \"mann\" or kjønn == \"k\" or kjønn == \"kvinne\" or kjønn == \"hade\":\n return kjønn\n else:\n print (\"Ugyldig kjønn\")\n whilekjonn()\n\ndef alder():\n age = int(input(\"Hvor gammel er du?\\n\"))\n return age\n\ndef fager():\n fag = input(\"Tar du ett fag?\\n\").lower()\n if fag == \"ja\" or fag == \"j\" or fag == \"nei\" or fag == \"n\":\n return fag\n else:\n print(\"Ugyldig svar\")\n fager()\n\ndef itgk(age):\n global antall_itgk\n antall_itgk += 1\n if age <= 22:\n print (\"Tar du ITGK?\")\n else:\n print (\"Tar du særr ITGK?\")\n\n\nwhile True:\n kjønn = whilekjonn()\n if kjønn == \"m\" or kjønn == \"mann\":\n print (\"Du er en mann\")\n antall_menn += 1\n elif kjønn == \"k\" or kjønn == \"kvinne\":\n print (\"Du er en kvinne\")\n antall_kvinner += 1\n elif kjønn == \"hade\":\n break\n\n age = alder()\n if 16 <= age <= 25:\n print(\"Du passer perf.\")\n fag = fager()\n if fag == \"ja\" or fag == \"j\":\n itgk(age)\n antall_fag += 1\n timer_lekser = float(input(\"Hvor mange timer i uka bruker du i uka på lekser da?\\n\"))\n antall_timer_lekser += timer_lekser\n\n elif fag == \"hade\":\n break\n else:\n print(\"Du burde ta ITGK.\")\n else:\n print(\"Du er ikke innenfor aldersgruppen vi søker, beklager\")\n\nprint(\"Kvinner {}, Menn {}, fag {}, itgk {}, timer lekser {}\".format(antall_kvinner, antall_menn, antall_fag, antall_itgk, antall_timer_lekser))\n","sub_path":"Øving 5/spørre.py","file_name":"spørre.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"591765509","text":"#!/usr/bin/env python3\n\ndef flips_needed(stack):\n flips = 0\n for i in range(len(stack) - 1):\n if stack[i] != stack[i + 1]:\n flips += 1\n if stack[-1] == '-':\n flips += 1\n return flips\n\nfor case_number in range(int(input())):\n print('Case #{}: {}'.format(case_number + 1, flips_needed(input())))\n","sub_path":"codes/CodeJamCrawler/16_0_2/afg/problem-b.py","file_name":"problem-b.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"418684099","text":"from api.app import app \nfrom api.app import mongo\nfrom flask import request,Response,jsonify\nfrom bson.json_util import loads, dumps, ObjectId\nimport json\n\n@app.route('/') #doy la bienvenida solo con la url original\ndef hola_mundo():\n return \".*?)(?P\\[\\/plain\\])',\n re.MULTILINE|re.DOTALL\n )\n\nclass ForgeExtension(markdown.Extension):\n\n def __init__(self, wiki=False, email=False, macro_context=None):\n markdown.Extension.__init__(self)\n self._use_wiki = wiki\n self._is_email = email\n self._macro_context = macro_context\n\n def extendMarkdown(self, md, md_globals):\n md.registerExtension(self)\n self.forge_processor = ForgeProcessor(self._use_wiki, md, macro_context=self._macro_context)\n self.forge_processor.install()\n md.preprocessors['fenced-code'] = FencedCodeProcessor()\n md.preprocessors.add('plain_text_block', PlainTextPreprocessor(md), \"_begin\")\n md.inlinePatterns['autolink_1'] = AutolinkPattern(r'(http(?:s?)://[a-zA-Z0-9./\\-_0%?&=+#;~:]+)')\n md.treeprocessors['br'] = LineOrientedTreeProcessor(md)\n # Sanitize HTML\n md.postprocessors['sanitize_html'] = HTMLSanitizer()\n # Rewrite all relative links that don't start with . to have a '../' prefix\n md.postprocessors['rewrite_relative_links'] = RelativeLinkRewriter(\n make_absolute=self._is_email)\n # Put a class around markdown content for custom css\n md.postprocessors['add_custom_class'] = AddCustomClass()\n md.postprocessors['mark_safe'] = MarkAsSafe()\n\n def reset(self):\n self.forge_processor.reset()\n\nclass PlainTextPreprocessor(markdown.preprocessors.Preprocessor):\n\n def run(self, lines):\n text = \"\\n\".join(lines)\n while 1:\n res = PLAINTEXT_BLOCK_RE.finditer(text)\n for m in res:\n code = self._escape(m.group('code'))\n placeholder = self.markdown.htmlStash.store(code, safe=True)\n text = '%s%s%s'% (text[:m.start()], placeholder, text[m.end():])\n break\n else:\n break\n return text.split(\"\\n\")\n\n def _escape(self, txt):\n \"\"\" basic html escaping \"\"\"\n txt = txt.replace('&', '&')\n txt = txt.replace('<', '<')\n txt = txt.replace('>', '>')\n txt = txt.replace('\"', '"')\n return txt\n\nclass FencedCodeProcessor(markdown.preprocessors.Preprocessor):\n pattern = '~~~~'\n\n def run(self, lines):\n in_block = False\n new_lines = []\n for line in lines:\n if line.lstrip().startswith(self.pattern):\n in_block = not in_block\n continue\n if in_block:\n new_lines.append(' ' + line)\n else:\n new_lines.append(line)\n return new_lines\n\nclass ForgeProcessor(object):\n alink_pattern = r'(?= len(stash): return ''\n return stash[id]\n\n def compile(self):\n from allura import model as M\n if self.stash['artifact'] or self.stash['link']:\n try:\n self.alinks = M.Shortlink.from_links(*self.stash['artifact'])\n self.alinks.update(M.Shortlink.from_links(*self.stash['link']))\n except:\n self.alinks = {}\n self.stash['artifact'] = map(self._expand_alink, self.stash['artifact'])\n self.stash['link'] = map(self._expand_link, self.stash['link'])\n self.stash['macro'] = map(macro.parse(self._macro_context), self.stash['macro'])\n\n def reset(self):\n self.stash = dict(\n artifact=[],\n macro=[],\n link=[])\n self.alinks = {}\n self.compiled = False\n\n def _expand_alink(self, link):\n new_link = self.alinks.get(link, None)\n if new_link:\n return '[%s]' % (\n new_link.url, link)\n elif self._use_wiki and ':' not in link:\n return '[%s]' % (\n h.urlquote(link), link)\n else:\n return link\n\n def _expand_link(self, link):\n reference = self.alinks.get(link)\n mailto = u'\\x02amp\\x03#109;\\x02amp\\x03#97;\\x02amp\\x03#105;\\x02amp\\x03#108;\\x02amp\\x03#116;\\x02amp\\x03#111;\\x02amp\\x03#58;'\n if not reference and not link.startswith(mailto) and '#' not in link:\n return 'notfound'\n else:\n return ''\n\nclass ForgeInlinePattern(markdown.inlinepatterns.Pattern):\n\n def __init__(self, parent, pattern):\n self.parent = parent\n markdown.inlinepatterns.Pattern.__init__(\n self, pattern, parent.markdown)\n\n def handleMatch(self, m):\n return self.parent.store(m.group(2))\n\nclass ForgePostprocessor(markdown.postprocessors.Postprocessor):\n\n def __init__(self, parent):\n self.parent = parent\n markdown.postprocessors.Postprocessor.__init__(\n self, parent.markdown)\n\n def run(self, text):\n self.parent.compile()\n def repl(mo):\n return self.parent.lookup(mo.group(1), int(mo.group(2)))\n return self.parent.placeholder_re.sub(repl, text)\n\nclass ForgeTreeProcessor(markdown.treeprocessors.Treeprocessor):\n '''This flags intra-wiki links that point to non-existent pages'''\n\n def __init__(self, parent):\n self.parent = parent\n\n def run(self, root):\n for node in root.getiterator('a'):\n href = node.get('href')\n if not href: continue\n if '/' in href: continue\n classes = node.get('class', '').split() + [ self.parent._store('link', href) ]\n node.attrib['class'] = ' '.join(classes)\n return root\n\nclass MarkAsSafe(markdown.postprocessors.Postprocessor):\n\n def run(self, text):\n return h.html.literal(text)\n\nclass AddCustomClass(markdown.postprocessors.Postprocessor):\n\n def run(self, text):\n return '%s' % text\n\nclass RelativeLinkRewriter(markdown.postprocessors.Postprocessor):\n\n def __init__(self, make_absolute=False):\n self._make_absolute = make_absolute\n\n def run(self, text):\n try:\n if not request.path_info.endswith('/'): return text\n except:\n # Must be being called outside the request context\n pass\n soup = BeautifulSoup(text)\n if self._make_absolute:\n rewrite = self._rewrite_abs\n else:\n rewrite = self._rewrite\n for link in soup.findAll('a'):\n rewrite(link, 'href')\n for link in soup.findAll('img'):\n rewrite(link, 'src')\n return unicode(soup)\n\n def _rewrite(self, tag, attr):\n val = tag.get(attr)\n if val is None: return\n if ' ' in val:\n # Don't urllib.quote to avoid possible double-quoting\n # just make sure no spaces\n val = val.replace(' ', '%20')\n tag[attr] = val\n if '://' in val:\n if 'sf.net' in val or 'sourceforge.net' in val:\n return\n else:\n tag['rel']='nofollow'\n return\n if val.startswith('/'): return\n if val.startswith('.'): return\n if val.startswith('mailto:'): return\n if val.startswith('#'): return\n tag[attr] = '../' + val\n\n def _rewrite_abs(self, tag, attr):\n self._rewrite(tag, attr)\n val = tag.get(attr)\n val = urljoin(config.get('base_url', 'http://sourceforge.net/'),val)\n tag[attr] = val\n\nclass HTMLSanitizer(markdown.postprocessors.Postprocessor):\n\n def run(self, text):\n try:\n p = feedparser._HTMLSanitizer('utf-8')\n except TypeError: # $@%## pre-released versions from SOG\n p = feedparser._HTMLSanitizer('utf-8', '')\n p.feed(text.encode('utf-8'))\n return unicode(p.output(), 'utf-8')\n\nclass LineOrientedTreeProcessor(markdown.treeprocessors.Treeprocessor):\n '''Once MD is satisfied with the etree, this runs to replace \\n with
\n within s.\n '''\n\n def __init__(self, md):\n self._markdown = md\n \n def run(self, root):\n for node in root.getiterator('p'):\n if not node.text: continue\n if '\\n' not in node.text: continue\n text = self._markdown.serializer(node)\n text = self._markdown.postprocessors['raw_html'].run(text)\n text = text.strip().encode('utf-8')\n if '\\n' not in text: continue\n new_text = (text\n .replace('
', '
')\n .replace('\\n', '
'))\n new_node = None\n try:\n new_node = markdown.etree.fromstring(new_text)\n except SyntaxError:\n try:\n new_node = markdown.etree.fromstring(unicode(BeautifulSoup(new_text)))\n except:\n log.exception('Error adding
tags: new text is %s', new_text)\n pass\n if new_node:\n node.clear()\n node.text = new_node.text\n node[:] = list(new_node)\n return root\n\nclass AutolinkPattern(markdown.inlinepatterns.LinkPattern):\n\n def handleMatch(self, mo):\n old_link = mo.group(2)\n result = markdown.etree.Element('a')\n result.text = old_link\n result.set('href', old_link)\n return result\n\n","sub_path":"Allura/allura/lib/markdown_extensions.py","file_name":"markdown_extensions.py","file_ext":"py","file_size_in_byte":11415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"468581628","text":"from standalone import pddl\nfrom standalone import statistics\n\nfrom standalone.task import PlanningStatusEnum, Task\nimport standalone.globals as global_vars\n\nfrom standalone import config\nlog = config.logger(\"mapsim\")\n\nstatistics_defaults = dict(\n execution_time=0.0,\n failed_execution_attempts=0,\n physical_actions_executed=0,\n sensor_actions_executed=0,\n speech_acts_executed=0,\n total_plan_cost=0,\n reward=0\n )\n\n\ntask_id = 0\ndef next_id():\n global task_id\n task_id += 1\n return task_id-1\n\ndef loggingScope(f):\n def new_f(self, *args, **kwargs):\n oldscope = config.logfile_scope\n if global_vars.mapsim_config.separate_logs:\n config.logfile_scope = self.name\n rval = f(self, *args, **kwargs)\n config.logfile_scope = oldscope\n return rval\n \n new_f.__name__ = f.__name__\n return new_f\n\nclass BaseAgent(object):\n def __init__(self, simulator):\n self.simulator = simulator\n self.running = False\n\n def run(self):\n self.running = True\n \n def execute(self, action, args):\n self.simulator.schedule(action, args, self)\n\n def done(self):\n self.running = False\n self.simulator.signal_done(self)\n\n def is_running(self):\n return self.running\n\n\nclass Agent(BaseAgent):\n def __init__(self, name, mapltask, planner, simulator):\n BaseAgent.__init__(self, simulator)\n \n self.name = name\n self.planner = planner\n self.statistics = statistics.Statistics(defaults = statistics_defaults)\n self.last_action = None\n\n self.new_task(mapltask)\n\n def get_state(self):\n return self.task.get_state()\n\n def update_state(self, svar, val):\n self.get_state()[svar] = val\n\n def new_task(self, mapltask):\n self.mapltask = mapltask.copy()\n\n prob_state = pddl.prob_state.ProbabilisticState.from_problem(self.mapltask)\n state = prob_state.determinized_state(0.1, 0.9)\n \n if global_vars.mapsim_config.add_assertions:\n self.task = Task(next_id(), self.mapltask, add_assertions=True)\n self.task.set_state(state)\n else:\n self.task = Task(next_id(), self.mapltask)\n self.task.set_state(state)\n \n self.planner.register_task(self.task)\n\n @loggingScope\n def run(self):\n BaseAgent.run(self)\n self.task.replan()\n self.execute_plan(self.task)\n\n def write_plan(self, plan):\n if global_vars.mapsim_config.write_dotfiles:\n G = plan.to_dot()\n G.write(\"plan.dot\")\n if global_vars.mapsim_config.write_pdffiles:\n G = plan.to_dot() # a bug in pygraphviz causes write() to delete all node attributes when using subgraphs. So create a new graph.\n G.layout(prog='dot')\n G.draw(\"plan.pdf\")\n \n \n @loggingScope\n @statistics.time_method_for_statistics(\"execution_time\")\n def execute_plan(self, task):\n if task.planning_status == PlanningStatusEnum.PLANNING_FAILURE:\n return\n\n def action_cmp(pnode1, pnode2):\n if pnode1.action.is_pure_sensor() and not pnode2.action.is_pure_sensor():\n return -1\n elif not pnode1.action.is_pure_sensor() and pnode2.action.is_pure_sensor():\n return 1\n\n for s1, s2 in [(pnode1.action.name, pnode2.action.name)] + zip(map(str, pnode1.args), map(str, pnode2.args)):\n if cmp(s1, s2) != 0:\n return cmp(s1, s2)\n return 0\n \n plan = task.get_plan()\n self.write_plan(plan)\n #all_funcs = set(self.mapltask.functions) | set(self.mapltask.predicates)\n # print \"instantiate:\"\n #mapl.sas_translate.to_sas(self.mapltask)\n # print \"executable:\"\n # for a in self.mapltask.actions:\n # a = pddl.mapl.MAPLObjectFluentNormalizer().translate(a, domain=self.mapltask)\n \n # for c in pddl.sas_translate.instantiate_action(a, task.get_state(), all_funcs):\n # print \"(%s %s)\" % (a.name, \" \".join(o.name for o in c))\n\n executable = sorted(plan.executable(), cmp=action_cmp)\n log.info(\"executable actions: %s\", \" \".join(map(str, executable)))\n if executable:\n log.debug(\"trying to execute (%s %s)\", executable[0].action.name, \" \".join(map(str, executable[0].args)))\n self.last_action = executable[0]\n self.execute(executable[0].action.name, executable[0].full_args)\n else:\n log.debug(\"nothing more to do.\")\n self.done()\n\n \n @loggingScope\n def updateTask(self, new_facts, action_status=None):\n plan = self.task.get_plan()\n\n if plan is not None and action_status:\n self.last_action.status = action_status\n \n for f in new_facts:\n self.task.get_state().set(f)\n \n self.task.mark_changed()\n self.task.replan()\n self.execute_plan(self.task)\n\n def collect_statistics(self):\n \"\"\" return all stats collected by this agent (usually to the simulation) \"\"\"\n return self.statistics.merge(self.task.statistics)\n","sub_path":"subarchitectures/planner.sa/branches/surprises/src/python/mapsim/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":5226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"502056742","text":"# ----------------------------------------------------------\n# Introdução a Programação de Computadores - IPC\n# Universidade do Estado do Amazonas - UEA\n# Prof. Jucimar Jr\n# tiago ferreira aranha \t 1715310047\n# Luiz Daniel Raposo Nunes de Mello\t 1715310049\n# Gabriel nascimento de Oliveira 1715310052\n# Wilbert Luís Evangelista Marins 1715310055\n# Mackson Garcez Moreno de Oliveira júnior 1215090300\n# Edson de Lima Barros 1715310043\n#\n# 5. Desenhar uma estrela de 5 pontas\n# ----------------------------------------------------------\n\nimport turtle\nlados = 10\n\np = turtle.Pen()\n\nfor x in range(lados):\n p.forward(50)\n p.left(360/lados)\n p.forward(50)\n p.right(360/lados*2)\n\n\n\nturtle.mainloop()\n","sub_path":"lista02/lista02_exercicio05.py","file_name":"lista02_exercicio05.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"162663535","text":"# -*- coding: utf-8 -*-\n\nfrom django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.forum, name='forum'),\n\turl(r'^block/(?P\\d+)/$', views.block, name='block'),\n\turl(r'^check/', views.checking, name='check'),\n\turl(r'^post/(?P\\d+)/$', views.post_detail, name='forum_post_detail'),\n\turl(r'^post/add/(?P\\d+)/$', views.post_add, name='forum_post_add'),\n\turl(r'^ajax/post_freeze$', views.close, name='post_freeze'),\n\turl(r'^ajax/post_check$', views.check, name='post_check'),\n\turl(r'^ajax/post_delete$', views.delete, name='post_delete'),\n]","sub_path":"forum/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"434225341","text":"# use something with NLTK enabled\n\nimport nltk\nfrom response import *\n\nfd = open(\"pos-nouns\", \"r\")\nposSubjects=fd.read().splitlines()\nfd.close()\nfd = open(\"neg-nouns\", \"r\")\nnegSubjects=fd.read().splitlines()\nfd.close\n\nprint(\"Welcome to your interview with Pres. Trump. Please keep your questions simple.\")\n\nwhile True:\n\tsentence = input(\"> \")\n\n\t# hey, that's a common use case, might as well make it work\n\tif sentence.casefold() in [\"hello\",\"hi\"] :\n\t\tprint(sayHello())\n\t\tcontinue\n\t\t\n\tif sentence.casefold().startswith(\"bye\"):\n\t\tprint(sayBye())\n\t\tbreak\n\n\ttokens = nltk.word_tokenize(sentence)\n\ttagged = nltk.tag.pos_tag(tokens)\n\t\n\t# trees are too complicated for now\n\t\"\"\"\n\ttaggedStr=\"(\"\n\tfor i in tagged:\n\t\ttaggedStr+=str(i)\n\ttaggedStr+=\")\"\n\t\n\tsenTree = nltk.tree.Tree.fromstring(taggedStr)\n\t\n\tsenTree.subtrees()\n\t#print(senTree)\n\t\"\"\"\n\t# we try to find a proper noun (NNP) to talk about. If we can't, we take an noun (NN)\n\t\n\tsubject=\"none\"\n\t\n\t# it would be very interesting to analyse a corpus of trump's speech to get a list of negative or positive terms\n\t#negSubjects=[\"democrats\", \"obama\", \"clinton\", \"mccain\", \"cnn\", \"taxes\", \"obamacare\", \"ACA\"]\n\t#posSubjects=[\"ivanka\", \"bannon\", \"kushner\", \"breitbart\", \"fox\", \"nuclear\", \"rich\", \"science\", \"guns\"]\n\n\t# this should do AS LONG AS WE DO NOT HAVE COMPLEX SENTENCES\n\tfor i in tagged:\n\t\tif i[1] == \"NNP\":\n\t\t\tsubject=i\n\tif subject == \"none\":\n\t\tfor i in tagged:\n\t\t\tif i[1] == \"NN\" or i[1] == \"NNS\":\n\t\t\t\tsubject=i\n\t\n\t\n\tif subject==\"none\":\n\t\tprint(rambling())\n\t# the way we generate our pos/neg nouns mean they can appear in both positive and negative lists. Currently, we just favor the negative.\n\telif subject[0].casefold() in negSubjects:\n\t\tprint(negativeResponse(subject[0], subject[1]))\n\telif subject[0].casefold() in posSubjects:\n\t\t#positive response\n\t\tprint(positiveResponse(subject[0], subject[1]))\n\telse:\n\t\t#neutral response\n\t\tprint(neutralResponse(subject[0], subject[1]))\n\t\n\t# now we have the subject, we should work on getting a rough \"sentence type\", to know what type of answer we need to give.\n\t\n\t#print(\"I'm too intelligent for this.\")\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"221897425","text":"\"\"\"\nMIT License\nCopyright (c) 2020 GamingGeek\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software\nand associated documentation files (the \"Software\"), to deal in the Software without restriction,\nincluding without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE\nFOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\nWITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\nfrom logging.handlers import TimedRotatingFileHandler\nfrom jishaku.modules import resolve_extensions\nimport core.coloredformat as colorformat\nfrom discord.ext import commands, tasks\nfrom aioinflux import InfluxDBClient\nfrom sentry_sdk import push_scope\nfrom .context import Context\nfrom .config import Config\nimport functools\nimport traceback\nimport sentry_sdk\nimport aiofiles\nimport aiohttp\nimport datetime\nimport discord\nimport asyncpg\nimport logging\nimport typing\nimport json\nimport sys\n\n\nclass Fire(commands.Bot):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.launchtime = datetime.datetime.utcnow()\n\n # COMMON ATTRIBUTES\n self.config: dict = json.load(open('config.json', 'r'))\n self.configs = {}\n self.overrides: dict = json.load(open('overrides.json', 'r'))\n self.override_save.start()\n self.tips = json.load(open('tips.json', 'r'))\n self.premiumGuilds = []\n self.db: asyncpg.pool.Pool = None\n self.realtime_members = True\n self.dev = kwargs.pop('dev', False)\n\n # CRAB\n self.crab = '🦀'\n\n # LOGGING\n logging.basicConfig(filename='bot.log', level=logging.INFO)\n self.logger = logging.getLogger('Fire')\n stdout = logging.StreamHandler(sys.stdout)\n stdout.setLevel(logging.INFO)\n COLOR_FORMAT = colorformat.formatter_message(\"[$BOLD%(name)s$RESET][%(levelname)s] %(message)s $RESET($BOLD%(filename)s$RESET:%(lineno)d)\")\n stdout.setFormatter(colorformat.ColoredFormatter(COLOR_FORMAT))\n self.logger.addHandler(stdout)\n\n # SENTRY\n if 'sentry' in self.config:\n sentry_sdk.init(self.config['sentry'])\n\n # INFLUX\n if 'influx_user' in self.config and 'influx_pass' in self.config:\n self.influx = InfluxDBClient(\n db='firedev' if self.dev else 'fire',\n username=self.config['influx_user'],\n password=self.config['influx_pass']\n )\n\n # MODULES\n self.load_modules()\n\n # COMMANDS\n self.load_commands()\n self.cmdresp = {}\n\n # EVENTS\n self.load_events()\n\n # CUSTOM PERMISSIONS\n # self.permissions = {}\n\n async def get_context(self, message: discord.Message, **kwargs):\n silent = False\n if message.content and message.content.endswith(' --silent'):\n message.content = message.content[:-9]\n silent = True\n if 'cls' not in kwargs:\n ctx = await super().get_context(message, cls=Context, **kwargs)\n else:\n ctx = await super().get_context(message, **kwargs)\n if ctx.valid and silent:\n try:\n await message.delete()\n except Exception:\n pass\n return ctx\n\n def get_message(self, mid: int):\n if not self.cached_messages:\n return None\n found = [m for m in self.cached_messages if m.id == mid]\n if not found:\n return None\n return found[0]\n\n def isadmin(self, user: typing.Union[discord.User, discord.Member]) -> bool:\n if str(user.id) not in self.config['admins']:\n admin = False\n else:\n admin = True\n return admin\n\n def load_commands(self):\n try:\n # raise Exception('Chatwatch is temporarily disabled')\n self.load_extension('core.chatwatch')\n except Exception as e:\n # errortb = ''.join(traceback.format_exception(\n # type(e), e, e.__traceback__))\n self.logger.error(f'$REDError while loading $CYANChatwatch', exc_info=e)\n try:\n self.load_extension('jishaku')\n except Exception as e:\n # errortb = ''.join(traceback.format_exception(\n # type(e), e, e.__traceback__))\n self.logger.error(f'$REDError while loading $CYANJishaku', exc_info=e)\n for ext in resolve_extensions(self, 'commands.*'):\n try:\n self.load_extension(ext)\n except Exception as e:\n # errortb = ''.join(traceback.format_exception(\n # type(e), e, e.__traceback__))\n self.logger.error(f'$REDError while loading $CYAN{ext}', exc_info=e)\n\n def load_events(self):\n for ext in resolve_extensions(self, 'events.*'):\n try:\n self.load_extension(ext)\n except Exception as e:\n # errortb = ''.join(traceback.format_exception(\n # type(e), e, e.__traceback__))\n self.logger.error(f'$REDError while loading {ext}', exc_info=e)\n\n def load_modules(self):\n for ext in resolve_extensions(self, 'modules.*'):\n try:\n self.load_extension(ext)\n except Exception as e:\n # errortb = ''.join(traceback.format_exception(\n # type(e), e, e.__traceback__))\n self.logger.error(f'$REDError while loading {ext}', exc_info=e)\n\n def sentry_exc(self, error: commands.CommandError, userscope: dict, exclevel: str, extra: dict):\n with push_scope() as scope:\n scope.user = userscope\n scope.level = exclevel\n for key in extra:\n scope.set_tag(key, extra[key])\n sentry_sdk.capture_exception(error)\n\n @tasks.loop(minutes=2)\n async def override_save(self):\n await self.wait_until_ready()\n try:\n f = await aiofiles.open('overrides.json', 'w')\n await f.write(json.dumps(self.overrides))\n await f.close()\n except Exception:\n pass\n\n async def haste(self, content, fallback: bool=False):\n url = 'hst.sh'\n if fallback:\n url = 'h.inv.wtf'\n async with aiohttp.ClientSession().post(f'https://{url}/documents', data=content) as r:\n if r.status != 200 and not fallback:\n return await self.haste(content, fallback=True)\n j = await r.json()\n return f'https://{url}/' + j['key']\n\n async def is_team_owner(self, user: typing.Union[discord.User, discord.Member]):\n if user.id == self.owner_id:\n return True\n else:\n return False\n","sub_path":"core/fire.py","file_name":"fire.py","file_ext":"py","file_size_in_byte":7371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"619105178","text":"from django.shortcuts import render, redirect\r\nfrom . import models\r\nimport datetime\r\n# Create your views here.\r\n\r\n\r\ndef index(request):\r\n boardList = models.Board.objects.all()\r\n return render(request, 'board/index.html', {'boards': boardList})\r\n\r\n\r\ndef submit(request):\r\n name = request.POST.get('name')\r\n content = request.POST.get('content')\r\n date = str(datetime.datetime.now())\r\n new_board = models.Board(name=name, content=content, date=date)\r\n new_board.save()\r\n return redirect('/index')\r\n","sub_path":"每天练习/0023/message/board/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"552945745","text":"import pandas as pd\n#from decimal import Decimal\nteamDF = pd.read_csv('BBLTeams.csv')\nbatDF = pd.read_csv('BattingData.csv')\n#print(batDF)\nbowlDF = pd.read_csv('BowlingData.csv')\n\nbowlDF.set_index('Name', inplace = 'true')\nbatDF.set_index('Name', inplace = 'true')\nteamDF = teamDF.values\n\n#Teams\n#Adelaide Strikers\n#Brisbane Heat\n#Hobart Hurricanes\n#Melbourne Renegades\n#Melbourne Stars\n#Perth Scorchers\n#Sydney Sixers\n#Sydney Thunder\n\n#Team1 = -1\n#Team2 = -1\nBowlStat1 = 0\nBatStat1 = 0\nBowlStat2 = 0\nBatStat2 = 0\nTeam1 = -1\nTeam2 = -1\nwhile Team1 == -1:\n Team1 = input('Input home team: ')\n if Team1 == 'A':\n Team1 = 0\n elif Team1 == 'B':\n Team1 = 1\n elif Team1 == 'H':\n Team1 = 2\n elif Team1 == 'MR':\n Team1 = 3\n elif Team1 == 'MS':\n Team1 = 4\n elif Team1 == 'P':\n Team1 = 5\n elif Team1 == 'SS':\n Team1 = 6\n elif Team1 == 'ST':\n Team1 = 7\n else :\n print('Invalid Team')\n Team1=-1\nplayersNotPlaying1 = []\nppName = '_'\nppTF = input('For Home Team are there any players that are not playing?')\nwhile ppTF == 'y' or ppTF == 'yes' or ppTF == 'YES' :\n ppName = input('Player full name : ')\n playersNotPlaying1.append(ppName)\n ppTF = input('Any more players not playing? ')\n \n\n \n \n\nwhile Team2 == -1:\n Team2 = input('Away Team: ')\n if Team2 == 'A':\n Team2 = 0\n elif Team2 == 'B':\n Team2 = 1\n elif Team2 == 'H':\n Team2 = 2\n elif Team2 == 'MR':\n Team2 = 3\n elif Team2 == 'MS':\n Team2 = 4\n elif Team2 == 'P':\n Team2 = 5\n elif Team2 == 'SS':\n Team2 = 6\n elif Team2 == 'ST':\n Team2 = 7\n else :\n print('Invalid Team')\n Team2=-1\nplayersNotPlaying2 = []\nppName = '_'\nppTF = input('For Away Team are there any players that are not playing?')\nwhile ppTF == 'y' or ppTF == 'yes' or ppTF == 'YES' :\n ppName = input('Player full name : ')\n playersNotPlaying2.append(ppName)\n ppTF = input('Any more players not playing? ')\ncount2 = 0\nbowlCount1 = 0\nbatCount1 = 0\nbowlCount2 = 0\nbatCount2 = 0\n#home team calc\nwhile count2 <19:\n #print (row)\n #print(Team1)\n #print(teamDF.loc[:Team1])\n row1 = teamDF[count2,Team1]\n #row1 = row[Team1]#.strip('.')\n \n #print(row1)\n if count2 == 0:\n row2 = row1.rstrip(' (Captain)')\n else :\n row2 = row1\n print(row2)\n #print(row2 in bowlDF.index)\n if row2 not in playersNotPlaying1:\n if row2 in bowlDF.index :\n #BowlStat1 = float(BowlStat1) + float(bowlDF.loc[row2,'Ave'])*float(bowlDF.loc[row2,'Econ'])\n #insert bowl annalysis\n #print(type(bowlDF.loc[row2,'Ave']))\n bowlCount1 += 1\n if isinstance(bowlDF.loc[row2,'Ave'],str):\n try :\n BowlStat1 += float(bowlDF.loc[row2,'Ave'])*float(bowlDF.loc[row2,'Econ'])\n except:\n bowlCount1 = bowlCount1\n #print(float(bowlDF.loc[row2,'Ave'])*float(bowlDF.loc[row2,'Econ']))\n else :\n #print(float(bowlDF.loc[row2,'Ave'].values[0])*float(bowlDF.loc[row2,'Econ'].values[0]))\n BowlStat1 += float(bowlDF.loc[row2,'Ave'].values[0])*float(bowlDF.loc[row2,'Econ'].values[0])\n #AdeSix.loc[row1] = bowlDF.loc[row2]\n print('Bowl Stats :')\n print(BowlStat1)\n if row2 in batDF.index:\n # #insert bat annalysis\n #print('true')\n #print(type(batDF.loc[row2,'Ave'])) \n batCount1 += 1\n BatStat1 += batDF.loc[row2,'Ave']*batDF.loc[row2,'SR']\n print('Bat Stats: ')\n print(BatStat1)\n #print(BatStat1)\n # if isinstance(batDF.loc[row2,'Ave'],str):\n # \n # print(float(batDF.loc[row2,'Ave'])*float(batDF.loc[row2,'SR']))\n # BatStat1 += float(batDF.loc[row2,'Ave'])*float(batDF.loc[row2,'SR'])\n # else :\n # print(float(batDF.loc[row2,'Ave'].values[0])*float(batDF.loc[row2,'SR'].values[0]))\n # BatStat1 += float(batDF.loc[row2,'Ave'].values[0])*float(batDF.loc[row2,'SR'].values[0])\n #BatStat1 += batDF.loc[row2,'Ave']*batDF.loc[row2,'SR']\n count2 +=1\n \n \ncount2 = 0 \n#away team calc \nwhile count2 <19:\n row1 = teamDF[count2,Team2]\n if count2 == 0:\n row2 = row1.rstrip(' (Captain)')\n else :\n row2 = row1\n print(row2)\n if row2 not in playersNotPlaying2:\n if row2 in bowlDF.index :\n bowlCount2 += 1\n if isinstance(bowlDF.loc[row2,'Ave'],str):\n try :\n BowlStat2 += float(bowlDF.loc[row2,'Ave'])*float(bowlDF.loc[row2,'Econ'])\n except:\n bowlCount2 = bowlCount2\n else :\n BowlStat2 += float(bowlDF.loc[row2,'Ave'].values[0])*float(bowlDF.loc[row2,'Econ'].values[0])\n print('Bowl Stats :')\n print(BowlStat2)\n if row2 in batDF.index:\n batCount2 += 1\n BatStat2 += batDF.loc[row2,'Ave']*batDF.loc[row2,'SR']\n print('Bat Stats: ')\n print(BatStat2)\n count2 +=1\nBowlStat1 = BowlStat1/bowlCount1\nBatStat1 = BatStat1/batCount1\nBowlStat2 = BowlStat2/bowlCount2\nBatStat2 = BatStat2/batCount2\nprint(BowlStat1)\nprint(BatStat1)\nprint(BowlStat2)\nprint(BatStat2)\nprint(BowlStat2 * BatStat1)\nprint(BowlStat1 * BatStat2)\nHome = (45.5,38.1,50.0,66.67,40.91,62.5,46.45,18.18)\nAway = (57.14,52.17,43.48,64.0,52.38,60.87,53.55,31.82)\nif Home[Team1] < Away[Team2]:\n adjust = (Away[Team2] - Home[Team1])/100 + 1\n result = (BowlStat2 * BatStat1)/(adjust) - (BowlStat1 * BatStat2*adjust)\nelse :\n adjust = (Home[Team1] - Away[Team2])/100 + 1\n result = (BowlStat2 * BatStat1*adjust) - (BowlStat1 * BatStat2)/adjust\n###enter in adjust part\nprint('result is: ')\nprint(result)\nprint(playersNotPlaying1)\nprint('Team Two')\nprint(playersNotPlaying2)","sub_path":"Testing.py","file_name":"Testing.py","file_ext":"py","file_size_in_byte":5973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"303729759","text":"from django.shortcuts import render\nfrom django.views import View\nfrom django.http import JsonResponse\nfrom django_redis import get_redis_connection\nfrom django.utils import timezone\nfrom django.db import transaction\nfrom apps.users.models import Address\nfrom apps.goods.models import SKU\nimport json\n\nfrom .models import OrderInfo,OrderGoods\n\nfrom decimal import Decimal\n# Create your views here.\n\n\n\n\n\nclass OrderSettlementView(View):\n\n def get(self,request):\n user = request.user\n\n try:\n addresses = Address.objects.filter(user=request.user,is_deleted=False)\n except Exception as e:\n addresses = None\n\n conn = get_redis_connection('carts')\n item_dict = conn.hgetall(f'carts_{user.id}')\n cart_selected = conn.smembers(f'selected_{user.id}')\n cart = {}\n for sku_id in cart_selected:\n cart[int(sku_id)] = int(item_dict[sku_id])\n\n sku_list = []\n\n skus = SKU.objects.filter(id__in=cart.keys())\n for sku in skus:\n sku_list.append({\n 'id':sku.id,\n 'name':sku.name,\n 'default_image_url':sku.default_image_url,\n 'count': cart[sku.id],\n 'price':sku.price\n })\n\n freight = Decimal('10.00')\n\n list = []\n for address in addresses:\n list.append({\n 'id':address.id,\n 'province':address.province.name,\n 'city':address.city.name,\n 'district':address.district.name,\n 'place':address.place,\n 'receiver':address.receiver,\n 'mobile':address.mobile\n })\n\n context = {\n 'addresses':list,\n 'skus':sku_list,\n 'freight':freight,\n }\n\n return JsonResponse({'code':0,'errmsg':'ok','context':context})\n\n\nclass OrderCommitView(View):\n\n def post(self,request):\n json_dict = json.loads(request.body.decode())\n address_id = json_dict.get('address_id')\n pay_method = json_dict.get('pay_method')\n\n if not all([address_id,pay_method]):\n return JsonResponse({'code':400,'errmsg':'缺少必传参数'})\n try:\n address = Address.objects.get(id=address_id)\n except Exception as e:\n return JsonResponse({'code':400,'errmsg':'参数address_id有误'})\n\n if pay_method not in [OrderInfo.PAY_METHODS_ENUM['CASH'],OrderInfo.PAY_METHODS_ENUM['ALIPAY']]:\n return JsonResponse({'code':400,'errmsg':'参数pay_method'})\n\n user = request.user\n\n order_id = timezone.localtime().strftime('%Y%m%H%M%S') + ('{:0>9d}'.format(user.id))\n\n\n with transaction.atomic():\n save_id = transaction.savepoint()\n\n\n order = OrderInfo.objects.create(\n order_id = order_id,\n user = user,\n address = address,\n total_count = 0,\n total_amount = Decimal('10.00'),\n freight = Decimal('0'),\n pay_method=pay_method,\n status = OrderInfo.ORDER_STATUS_ENUM['UNPAID']\n if pay_method == OrderInfo.PAY_METHODS_ENUM['ALIPAY']\n else OrderInfo.ORDER_STATUS_ENUM['UNSEND']\n )\n\n conn = get_redis_connection('carts')\n item_dict = conn.hgetall(f'carts_{user.id}')\n cart_selected = conn.smembers(f'selected_{user.id}')\n carts = {}\n for sku_id in cart_selected:\n carts[int(sku_id)] = int(item_dict[sku_id])\n\n sku_ids = carts.keys()\n\n\n for sku_id in sku_ids:\n sku = SKU.objects.get(id=sku_id)\n sku_count = carts[sku.id]\n if sku_count>sku.stock:\n return JsonResponse({'code':400,'errmsg':'库存不足'})\n\n sku.stock -= sku_count\n sku.sales += sku_count\n sku.save()\n\n OrderGoods.objects.create(\n order = order,\n sku=sku,\n count = sku_count,\n price=sku.price,\n )\n\n order.total_count += sku_count\n order.total_amount += (sku_count* sku.price)\n\n order.total_amount += order.freight\n order.save()\n\n transaction.savepoint_commit(save_id)\n\n\n pl = conn.pipeline()\n pl.hdel(f'carts_{user.id}',*cart_selected)\n pl.srem(f'selected_{user.id}',*cart_selected)\n pl.execute()\n\n return JsonResponse({'code':0,'errmsg':'ok','order_id':order.order_id})\n\n\n\n\n\n","sub_path":"meiduo_mall/apps/orders/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"543279990","text":"if __name__ == '__main__':\n N = int(raw_input())\n l=[]\n y = \",\"\n for x in range(N):\n s= raw_input().split()\n value = s[0]\n value1= s[1:]\n if value!=\"print\":\n value+=\"(\"+y.join(value1)+\")\"\n eval(\"l.\"+value)\n else:\n print(l)\n","sub_path":"Programs/Basic Data Types/lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"319582086","text":"\"\"\"\nBackground\n\nThe Genomics program requires stable research IDs (RIDs). This is a script that will\nadd only pid/rid mappings for participants that don't currently exist in the\nprimary pid_rid_mapping table.\n\nThese records will be appended to the pipeline_tables.pid_rid_mapping table in BigQuery.\nDuplicate mappings are not allowed.\n\"\"\"\n# Python imports\nimport argparse\nimport inspect\nimport logging\nimport time\n\n# Third party imports\nfrom google.cloud import bigquery\n\n# Project imports\nfrom common import (JINJA_ENV, MAX_DEID_DATE_SHIFT, PID_RID_MAPPING,\n PIPELINE_TABLES)\nfrom utils import auth, bq\n\nLOGGER = logging.getLogger(__name__)\n\nSCOPES = [\n 'https://www.googleapis.com/auth/bigquery',\n 'https://www.googleapis.com/auth/devstorage.read_write',\n]\n\nGET_NEW_MAPPINGS = JINJA_ENV.from_string(\"\"\"\nINSERT INTO `{{primary.project}}.{{primary.dataset_id}}.{{primary.table_id}}`\n(person_id, research_id, shift)\nSELECT\n person_id\n , research_id\n-- generates random shifts between 1 and max_shift inclusive --\n , CAST(FLOOR({{max_shift}} * RAND() + 1) AS INT64) as shift\nFROM `{{rdr_table.project}}.{{rdr_table.dataset_id}}.{{rdr_table.table_id}}`\nWHERE person_id not in (\n SELECT person_id\n FROM `{{primary.project}}.{{primary.dataset_id}}.{{primary.table_id}}`)\n-- This is just to make sure we don't duplicate either person_id OR research_id --\nAND research_id not in (\n SELECT research_id\n FROM `{{primary.project}}.{{primary.dataset_id}}.{{primary.table_id}}`)\n\"\"\")\n\n\ndef store_to_primary_mapping_table(fq_rdr_mapping_table,\n client=None,\n run_as=None):\n \"\"\"\n Store the provided mappings and create required date shifts.\n\n Curation must maintain a stable pid/rid mapping for participants, as well\n as a date shift integer. Curation gets the pid/rid mapping table from the\n RDR team as part of their ETL process. Curation must identify new pid/rid\n mapping pairs, create random date shifts for each pair, and store the three\n tuple to the pipeline_tables.pid_rid_mapping table.\n\n This script requires either a client object be passed as a parameter or an\n email address to impersonate be provided. If both are missing, the script\n will not execute!\n\n The script assumes the newly provided mapping table exists in the same\n project as the primary mapping table.\n\n :param fq_rdr_mapping_table: a dot separated fully qualified name of the\n recently imported pid_rid_mapping table.\n :param client: a client object to use for querying both tables\n :param run_as: the email address of the service account to run as. if\n impersonation is already set up, pass the existing client object instead.\n\n :return: None\n :raises: RuntimeError if client and run_as are both None. BigQuery errors.\n \"\"\"\n project, dataset, table = fq_rdr_mapping_table.split('.')\n\n LOGGER.info(\n f'RDR mapping info: project -> {project}\\tdataset -> {dataset}\\ttable -> {table}'\n )\n LOGGER.info(f'Primary mapping info: project -> {project}\\t'\n f'dataset -> {PIPELINE_TABLES}\\ttable -> {PID_RID_MAPPING}')\n\n if not client and not run_as:\n LOGGER.error('Run cannot proceed without proper credentials')\n raise RuntimeError(\n 'Provide either a client or a service account to impersonate.')\n\n # set up an impersonated client if one is not provided\n if not client:\n LOGGER.info(\n 'Using impersonation credentials and creating a new client.')\n # get credentials and create client\n impersonation_creds = auth.get_impersonation_credentials(run_as, SCOPES)\n\n client = bq.get_client(project, credentials=impersonation_creds)\n else:\n LOGGER.info('Client object provided and being used.')\n\n # rdr table ref\n dataset_ref = bigquery.DatasetReference(project, dataset)\n rdr_table = bigquery.TableReference(dataset_ref, table)\n\n # primary table ref\n dataset_ref = bigquery.DatasetReference(project, PIPELINE_TABLES)\n primary_mapping_table = bigquery.TableReference(dataset_ref,\n PID_RID_MAPPING)\n\n # Query job config\n labels = {\n 'rdr_mapping_table':\n '-'.join(fq_rdr_mapping_table.lower().split('.')[1:])[-63:],\n 'module_name':\n __file__.lower().replace('/', '-').replace('.', '-')[-63:]\n }\n\n job_prefix = inspect.currentframe().f_code.co_name\n query = GET_NEW_MAPPINGS.render(rdr_table=rdr_table,\n primary=primary_mapping_table,\n max_shift=MAX_DEID_DATE_SHIFT)\n\n LOGGER.info(f'Preparing to run query:\\n{query}')\n\n config = bigquery.job.QueryJobConfig(labels=labels)\n\n new_mappings_job = client.query(query,\n job_config=config,\n job_id_prefix=job_prefix)\n\n # wait for the query to finish\n LOGGER.info('Waiting for pid/rid/shift storage query to finish.')\n new_mappings_job.result()\n LOGGER.info('Query has finished.')\n\n LOGGER.info(f'{new_mappings_job.num_dml_affected_rows} mapping records '\n f'added to {primary_mapping_table}')\n\n # check if errors were encountered and report any\n if new_mappings_job.errors:\n LOGGER.error(f'Query job finished with errors. See details of job '\n f'with job_id_prefix {job_prefix} and labels {labels}')\n else:\n LOGGER.info('Query job finished without errors.')\n\n\ndef check_table_name(name_str):\n \"\"\"\n Make sure the tablename provided follows the fully qualified format.\n\n If the table name cannot be split into three sections by splitting on a\n dot, '.', then reject the provided name as incomplete.\n\n :param name_str: The name of the table as provided by the end user.\n\n :return: a fully qualified table name string.\n :raises: ValueError if the name cannot be split.\n \"\"\"\n name_parts = name_str.split('.')\n if len(name_parts) != 3:\n raise ValueError(f'A fully qualified table name must be of the form '\n f'.. . You '\n f'provided {name_str}')\n\n return name_str\n\n\ndef check_email_address(address_str):\n \"\"\"\n Make sure the string provided looks like an email address.\n\n If the string does not contain `@`, then reject the provided string.\n\n :param address_str: The email address as provided by the end user.\n\n :return: a validated email address.\n :raises: ValueError if the address does not contain `@`.\n \"\"\"\n if '@' not in address_str:\n raise ValueError(f'An email address must be specified. '\n f'You supplied {address_str}')\n\n return address_str\n\n\ndef process_mappings(raw_args=None):\n \"\"\"\n Allow mapping arguments to be validated from other python modules.\n\n Use parser to validate arguments and then run mapping storage. This\n is not strictly required, but will help ensure the\n `store_to_primary_mapping_table` function works as designed.\n\n :params raw_args: If provided, a list of arguments and values.\n If not provided, defaults to command line values.\n \"\"\"\n LOGGER.info(\"Beginning pid/rid/shift storage process.\")\n\n parser = argparse.ArgumentParser(\n description='Add new mappings to our primary pid/rid mapping table.',\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument(\n '-r',\n '--fq_rdr_mapping',\n action='store',\n dest='rdr_mapping',\n help=('The fully qualified rdr mapping table name. '\n 'The project_id will be extracted from this table name.'),\n type=check_table_name,\n required=True)\n parser.add_argument(\n '-i',\n '--run_as',\n action='store',\n dest='run_as',\n help=('The email address of the service account to impersonate.'),\n type=check_email_address)\n args = parser.parse_args()\n\n store_to_primary_mapping_table(args.rdr_mapping, run_as=args.run_as)\n\n LOGGER.info(\"Finished pid/rid/shift storage process.\")\n\n\nif __name__ == '__main__':\n from utils import pipeline_logging\n\n pipeline_logging.configure()\n process_mappings()\n","sub_path":"data_steward/tools/store_mappings.py","file_name":"store_mappings.py","file_ext":"py","file_size_in_byte":8349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"294908721","text":"def cria_matriz(num_linhas, num_colunas):\n matriz = [] #lista vazia\n for i in range(num_linhas):\n linha = []\n for j in range(num_colunas):\n linha.append(0)\n matriz.append(linha)\n\n for i in range(num_colunas):\n for j in range(num_linhas):\n matriz[j][i] = int(input(\"Digite o elemento [\" + str(j) + \"][\" + str(i) + \"]: \"))\n\n return matriz\n\ncria_matriz(2,3)","sub_path":"Exercicios_Resolvidos/Parte 2/Treino/exercicio_matriz-3.py","file_name":"exercicio_matriz-3.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"434477061","text":"import numpy as np\nimport cv2\n\nMIN_MATCH_COUNT = 10\n\nimg1 = cv2.imread('C:/Users/User/PycharmProjects/untitled1/heart.png',1) # queryImage\nimg2 = cv2.imread('C:/Users/User/PycharmProjects/untitled1/gg.jpg',1) # trainImage\n\nimg1_gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\nimg2_gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)\n\ncv2.imshow('loadimage', img2)\ncv2.waitKey(0)\n\n# Initiate SURF detector\nsurf = cv2.xfeatures2d.SURF_create()\n\n# find the keypoints and descriptors with SURF\nkp1, des1 = surf.detectAndCompute(img1_gray,None)\nkp2, des2 = surf.detectAndCompute(img2_gray,None)\n\n# FLANN parameters\nFLANN_INDEX_KDTREE = 0\nindex_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)\nsearch_params = dict(checks=50) # or pass empty dictionary\n\nflann = cv2.FlannBasedMatcher(index_params,search_params)\n\nmatches = flann.knnMatch(des1,des2,k=2)\n\n# store all the good matches as per Lowe's ratio test.\ngood = []\nfor m,n in matches:\n if m.distance < 0.7 * n.distance:\n good.append(m)\n\nif len(good)>MIN_MATCH_COUNT:\n src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)\n dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)\n\n M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)\n matchesMask = mask.ravel().tolist()\n\n h,w = img1_gray.shape\n pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)\n dst = cv2.perspectiveTransform(pts,M)\n\n img2_gray = cv2.polylines(img2_gray,[np.int32(dst)],True,255,3, cv2.LINE_AA)\n\nelse:\n print(\"Not enough matches are found - %d/%d\" % (len(good),MIN_MATCH_COUNT))\n matchesMask = None\n\ndraw_params = dict(matchColor = (0,255,0), # draw matches in green color\n singlePointColor = None,\n matchesMask = matchesMask, # draw only inliers\n flags = 2)\n\nimg3 = cv2.drawMatches(img1_gray,kp1,img2_gray,kp2,good,None,**draw_params)\n\ncv2.imshow('SURF', img3)\ncv2.waitKey(0)\n\npoint = np.int32(dst)\npoint1 = point[0][0]\npoint2 = point[1][0]\npoint3 = point[2][0]\npoint4 = point[3][0]\n\nx = int((point1[0]+point2[0])/2)\ny = int((point1[1]+point4[1])/2)\nw = int((point3[0]+point4[0])/2)\nh = int((point2[1]+point3[1])/2)\n\nimg_trim = img2[y:h, x:w]\ncv2.imshow('trim', img_trim)\ncv2.waitKey(0)\n\nimg_trim_hsv = cv2.cvtColor(img_trim, cv2.COLOR_BGR2HSV)\n\nlower_blue = np.array([110, 30, 100])\nupper_blue = np.array([130, 255, 255])\nlower_green = np.array([50, 30, 100])\nupper_green = np.array([70, 255, 255])\nlower_red = np.array([-10, 30, 100])\nupper_red = np.array([255, 255, 255])\n\nmask_red = cv2.inRange(img_trim_hsv, lower_red, upper_red)\n\nres = cv2.bitwise_and(img_trim, img_trim, mask=mask_red )\n\ncv2.imshow('red', res)\ncv2.waitKey(0)\n\nres2 = img_trim-res\ncv2.imshow('res',res2)\ncv2.waitKey(0)\n\nres2_ycrcb = cv2.cvtColor(res2, cv2.COLOR_BGR2YCR_CB)\ny, cr, cb = cv2.split(res2_ycrcb)\n\ny_mean = np.mean(y)\ncr_mean = np.mean(cr)\ncb_mean = np.mean(cb)\n\nym = int(round(239 - y_mean))\ncrm = int(round(128 - cr_mean))\ncbm = int(round(128 - cb_mean))\n\ny2 = cv2.add(y, ym)\ncr2 = cv2.add(cr, crm)\ncb2 = cv2.add(cb, cbm)\n\nres2_ycrcb = cv2.merge((y2, cr2, cb2))\nres2 = cv2.cvtColor(res2_ycrcb, cv2.COLOR_YCR_CB2BGR)\n\ncv2.imshow('loadimage2', res2)\ncv2.waitKey(0)\n\nimg2_ycrcb = cv2.cvtColor(img2, cv2.COLOR_BGR2YCR_CB)\nimg2_y, img2_cr, img2_cb = cv2.split(img2_ycrcb)\n\nimg2_y2 = cv2.add(img2_y, ym)\nimg2_cr2 = cv2.add(img2_cr, crm)\nimg2_cb2 = cv2.add(img2_cb, cbm)\n\nimg2_ycrcb = cv2.merge((img2_y, img2_cr2, img2_cb2))\nimg2 = cv2.cvtColor(img2_ycrcb, cv2.COLOR_YCR_CB2BGR)\n\ncv2.imshow('loadimage3', img2)\ncv2.waitKey(0)\n\n","sub_path":"opencv/matching3.py","file_name":"matching3.py","file_ext":"py","file_size_in_byte":3599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"16630706","text":"# Root Routing Configuration for Channels.\n# It is similar to a Django URLconf in that it tells Channels\n# what code to run\n# when an HTTP request is received by the Channels server.\n\nfrom channels.auth import AuthMiddlewareStack\nfrom channels.routing import ProtocolTypeRouter, URLRouter\n\nimport chat.routing\n\n# ProtocolTypeRouter checks if it is a WebSocket connection\n# AuthMiddlewareStack populates the connection’s scope\n# with a reference to the currently authenticated user.\n# URLRouter examines the HTTP path of the connection\n# to route it to a particular consumer\n\napplication = ProtocolTypeRouter({\n # (http->django views is added by default)\n 'websocket': AuthMiddlewareStack(\n URLRouter(\n chat.routing.websocket_urlpatterns\n )\n ),\n})","sub_path":"lifebase/routing.py","file_name":"routing.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"564484840","text":"'''\nAuthor: Chuck Stewart\nDate: February 24, 2016\n\nPurpose: A module to read in the Social Security Administration's top\n 250 baby names for each year from 1880 up to and including 2014.\n\nUsage: \n 1. Import the module\n\n 2. Call read_names.read_from_file(fn) where fn is the file\n containing the names. This returns a boolean, which for the\n purposes of the homework can be safely ignored.\n\n 3. (names, counts) = read_names.top_in_year(year, f_or_m) where\n year is an int and f_or_m is a single character to indicate\n whether female ('F' or 'f') names are requested or male ('M' or\n 'm') names are requested. This function returns a tuple of\n lists, where the first list is the names and the second list is\n the count of occurrences of that name.\n'''\n\nimport sys\n\n'''\nThe following are the global lists and int storing all of the\ninformation read in. All of this really should be in a Python class\nobject, but at this point in the semester we have not studied these.\n'''\nall_female_names = []\nall_female_counts = []\nall_male_names = []\nall_male_counts = []\nfirst_year = -1\n\n\ndef read_from_file( file_name ):\n '''\n Read from file_name. The format is the year, followed by the top\n 250 female names,followed by the top 250 male names. This repeats\n for each year in order. Here are the first four lines to show the\n form of each line \n 1880\n Mary,F,7065\n Anna,F,2604\n Emma,F,2003\n '''\n in_f = open(file_name,'r')\n\n ''' These are the counts for one year... '''\n female_names = []\n female_counts = []\n male_names = []\n male_counts = []\n year = -1 # this is reset when the very first year is read in\n line_num = 0\n \n ''' Tell Python to use these outside variables '''\n global all_female_names\n global all_female_counts\n global all_male_names\n global all_male_counts\n global first_year\n\n ''' Handle one line of input at a time '''\n for line in in_f:\n line = line.strip().split(',')\n line_num += 1\n\n # Handle the special case of the very first line and year\n if first_year == -1 and len(line) == 1 and line[0].isdigit():\n first_year = int(line[0])\n year = first_year\n\n # Error check on the format of the first line\n elif first_year == -1:\n print(\"Error: initial format on line number\", line_num)\n return False\n\n # After the first line we'll end up here each time. This\n # line is to test for the start of the next year and will\n # succeed only after 500 names have been read\n elif len(line) == 1 and line[0].isdigit(): \n # Add the names from the year just completely read to the\n # end of the global lists of lists\n all_female_names.append( female_names )\n all_female_counts.append( female_counts )\n all_male_names.append( male_names )\n all_male_counts.append( male_counts )\n\n # Reset the lists for the next year\n female_names = []\n female_counts = []\n male_names = []\n male_counts = []\n year = int(line[0])\n\n # Check for a well-formatted line for a female name\n elif len(line)==3 and line[1].lower() == 'f' and line[2].isdigit():\n female_names.append( line[0] )\n female_counts.append( int(line[2]) )\n\n # Check for a well-formatted line for a male name\n elif len(line)==3 and line[1].lower() == 'm' and line[2].isdigit():\n male_names.append( line[0] )\n male_counts.append( int(line[2]) )\n\n # If we get here there is a formatting error somewhere and we\n # don't know what else to do so we quit\n else:\n print(\"Error: internal format on line number\", line_num)\n return False\n\n # We get to here after the entire file has been read. We now\n # need to save the last year's lists to the global lists.\n all_female_names.append( female_names )\n all_female_counts.append( female_counts )\n all_male_names.append( male_names )\n all_male_counts.append( male_counts )\n\n # We are done!\n return True\n \n\ndef top_in_year( year, f_or_m ):\n ''' For the given year, access the list of names and the list of\n counts for the names. Return empty lists if the year is out\n of range.\n '''\n if year < first_year or year > 2014:\n return ([], [])\n\n index = year - first_year\n if f_or_m.lower() == 'f':\n return (all_female_names[index], all_female_counts[index])\n else:\n return (all_male_names[index], all_male_counts[index])\n\n\n'''\nThe following code is only run if the module is being executed as a program.\n'''\nif __name__ == \"__main__\":\n fn = \"top_names_1880_to_2014.txt\"\n\n if read_from_file( fn ):\n print(\"Successful read\")\n else:\n print(\"Read failed\")\n sys.exit()\n\n (names,counts) = top_in_year( 1883, 'F')\n for i in range(10):\n print(i, names[i], counts[i])\n\n print()\n (names,counts) = top_in_year( 1885, 'M')\n for i in range(10):\n print(i, names[i], counts[i])\n\n \n \n","sub_path":"CSCI_1100/Week_5/HW_3/read_names.py","file_name":"read_names.py","file_ext":"py","file_size_in_byte":5229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"616075483","text":"#coding:utf-8\n\nimport sys\nimport numpy as np\nfrom DataPreProcessing import (\n test_features,test_targets\n , train_features,train_targets\n , val_features, val_targets\n , scaled_features)\nfrom strategy import NeuralNetwork\n\nepochs = 100\nlearning_rate = 0.1\nhidden_nodes = 2\noutput_nodes = 1\n\nN_i = train_features.shape[1]\nnetwork = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)\n\n\ndef MSE(y, Y):\n return np.mean((y-Y)**2)\n\nlosses = {'train':[], 'validation':[]}\nfor e in range(epochs):\n batch = np.random.choice(train_features.index, size=128)\n for record,target in zip(train_features.ix[batch].values\n , train_targets.ix[batch]['cnt']):\n network.train(record, target)\n\n train_loss = MSE(network.run(train_features), train_targets)\n val_loss = MSE(network.run(val_features), val_targets)\n sys.stdout.write(\"\\rProgress: \" + str(100 * e/float(epochs))[:4] \\\n + \"% ... Training loss: \" + str(train_loss)[:5] \\\n + \" ... Validation loss: \" + str(val_loss)[:5])\n losses['train'].append(train_loss)\n losses['validation'].append(val_loss)\n\nfig, ax = plt.subplots(figsize=(8,4))\nmean, std = scaled_features['cnt']\npredictions = network.run(test_features)*std + mean\nax.plot(predictions[0], label='Prediction')\nax.plot((test_targets['cnt']*std + mean).values, label='Data')\nax.set_xlim(right=len(predictions))\nax.legend()\n\ndates = pd.to_datetime(rides.ix[test_data.index]['dteday'])\ndates = dates.apply(lambda d: d.strftime('%b %d'))\nax.set_xticks(np.arange(len(dates))[12::24])\n_ = ax.set_xticklabels(dates[12::24], rotation=45)","sub_path":"BikeSharing/BikeSharing.py","file_name":"BikeSharing.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"189772281","text":"\"\"\"This script shows how to calculate the number of reaches upstream of a target\"\"\"\n\nimport os\nimport sys\nsys.path.append('attribution_and_accumulation')\n\nfrom attribution_and_accumulation import Navigator\n\nREGION_ID = '07'\nTOPOLOGY_FILE = \"upstream_{}.npz\".format(REGION_ID)\nTOPOLOGY_FILE = os.path.join(\"WatershedTopology\", TOPOLOGY_FILE)\n\nREGION = Navigator(TOPOLOGY_FILE)\n\nTEST_REACH = 4867727\n\nN = len(REGION.all_upstream(TEST_REACH))\n\nprint(N)\n","sub_path":"Tools/navigator_demo.py","file_name":"navigator_demo.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"1332782","text":"from django import forms\nfrom django.shortcuts import render, redirect, reverse, get_object_or_404\nfrom django.urls import reverse_lazy\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import CreateView, ListView, UpdateView, DeleteView, DetailView\nfrom django.db import transaction\nfrom django.db.models import Count, Avg\n\n\nfrom account.decorators import teacher_required\nfrom ..models import Quiz, Question, Answer\nfrom ..forms import QuestionForm, BaseAnswerInlineFormSet\n\n\n@method_decorator([login_required, teacher_required], name='dispatch')\nclass QuizCreateView(CreateView):\n model = Quiz\n fields = ('name',)\n template_name = 'quiz/add_quiz.html'\n\n def form_valid(self, form):\n quiz = form.save(commit=False)\n quiz.owner = self.request.user\n quiz.save()\n messages.success(\n self.request, 'The quiz was created with success! Go ahead and add some questions now.')\n return redirect('quiz:question_add', quiz.pk)\n\n\n@method_decorator([login_required, teacher_required], name='dispatch')\nclass QuizListView(ListView):\n model = Quiz\n ordering = ('name', )\n context_object_name = 'quizzes'\n template_name = 'quiz/quiz_list.html'\n\n def get_queryset(self):\n queryset = self.request.user.quizzes \\\n .annotate(questions_count=Count('questions', distinct=True))\n return queryset\n\n\n@method_decorator([login_required, teacher_required], name='dispatch')\nclass QuizUpdateView(UpdateView):\n model = Quiz\n fields = ('name', )\n context_object_name = 'quiz'\n template_name = 'quiz/quiz_update.html'\n\n def get_context_data(self, **kwargs):\n kwargs['questions'] = self.get_object().questions.annotate(\n answers_count=Count('answers'))\n return super().get_context_data(**kwargs)\n\n def get_queryset(self):\n '''\n This method is an implicit object-level permission management\n This view will only match the ids of existing quizzes that belongs\n to the logged in user.\n '''\n return self.request.user.quizzes.all()\n\n def get_success_url(self):\n return reverse('quiz:quiz_update', kwargs={'pk': self.object.pk})\n\n\n@method_decorator([login_required, teacher_required], name='dispatch')\nclass QuizDeleteView(DeleteView):\n model = Quiz\n context_object_name = 'quiz'\n template_name = 'quiz/quiz_delete_confirm.html'\n success_url = reverse_lazy('quiz:quiz_list')\n\n def delete(self, request, *args, **kwargs):\n quiz = self.get_object()\n messages.success(\n request, 'The quiz %s was deleted with success!' % quiz.name)\n return super().delete(request, *args, **kwargs)\n\n def get_queryset(self):\n return self.request.user.quizzes.all()\n\n\n@method_decorator([login_required, teacher_required], name='dispatch')\nclass QuestionDeleteView(DeleteView):\n model = Question\n context_object_name = 'question'\n template_name = 'quiz/question_delete_confirm.html'\n pk_url_kwarg = 'question_pk'\n\n def get_context_data(self, **kwargs):\n question = self.get_object()\n kwargs['quiz'] = question.quiz\n return super().get_context_data(**kwargs)\n\n def delete(self, request, *args, **kwargs):\n question = self.get_object()\n messages.success(\n request, 'The question %s was deleted with success!' % question.text)\n return super().delete(request, *args, **kwargs)\n\n def get_queryset(self):\n return Question.objects.filter(quiz__owner=self.request.user)\n\n def get_success_url(self):\n question = self.get_object()\n return reverse('quiz:quiz_update', kwargs={'pk': question.quiz_id})\n\n\n@method_decorator([login_required, teacher_required], name='dispatch')\nclass QuizResultsView(DetailView):\n model = Quiz\n context_object_name = 'quiz'\n template_name = 'quiz/teachers/quiz_results.html'\n\n def get_context_data(self, **kwargs):\n quiz = self.get_object()\n taken_quizzes = quiz.taken_quizzes.select_related(\n 'student__user').order_by('-date')\n total_taken_quizzes = taken_quizzes.count()\n quiz_score = quiz.taken_quizzes.aggregate(average_score=Avg('score'))\n extra_context = {\n 'taken_quizzes': taken_quizzes,\n 'total_taken_quizzes': total_taken_quizzes,\n 'quiz_score': quiz_score\n }\n kwargs.update(extra_context)\n return super().get_context_data(**kwargs)\n\n def get_queryset(self):\n return self.request.user.quizzes.all()\n\n\n@login_required\n@teacher_required\ndef question_add(request, pk):\n\n quiz = get_object_or_404(Quiz, pk=pk, owner=request.user)\n\n if request.method == 'POST':\n form = QuestionForm(request.POST)\n if form.is_valid():\n question = form.save(commit=False)\n question.quiz = quiz\n question.save()\n messages.success(\n request, 'You may now add answers/options to the question.')\n return redirect('quiz:question_update', quiz.pk, question.pk)\n # quiz.pk, question.pk\n else:\n form = QuestionForm()\n\n return render(request, 'quiz/question_add.html', {'quiz': quiz, 'form': form})\n\n\n@login_required\n@teacher_required\ndef question_update(request, quiz_pk, question_pk):\n\n quiz = get_object_or_404(Quiz, pk=quiz_pk, owner=request.user)\n question = get_object_or_404(Question, pk=question_pk, quiz=quiz)\n\n AnswerFormSet = forms.inlineformset_factory(\n Question, # parent model\n Answer, # base model\n formset=BaseAnswerInlineFormSet,\n fields=('text', 'is_correct'),\n min_num=2,\n validate_min=True,\n max_num=10,\n validate_max=True\n )\n\n if request.method == 'POST':\n form = QuestionForm(request.POST, instance=question)\n formset = AnswerFormSet(request.POST, instance=question)\n if form.is_valid() and formset.is_valid():\n with transaction.atomic():\n form.save()\n formset.save()\n messages.success(\n request, 'Question and answers saved with success!')\n return redirect('quiz:quiz_update', quiz.pk)\n else:\n form = QuestionForm(instance=question)\n formset = AnswerFormSet(instance=question)\n\n return render(request, 'quiz/question_update.html', {\n 'quiz': quiz,\n 'question': question,\n 'form': form,\n 'formset': formset\n })\n","sub_path":"quiz/views/teachers.py","file_name":"teachers.py","file_ext":"py","file_size_in_byte":6601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"502049591","text":"import matplotlib\nmatplotlib.use(\"TKAgg\")\nfrom Tkinter import *\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\nfrom matplotlib.figure import Figure\n\nclass plot_region(Frame):\n def __init__(self, master=None, **opt):\n Frame.__init__(self, master, opt)\n self.pack()\n self.plot_area()\n\n def plot_area(self):\n self.fig = Figure(figsize=(5, 5), dpi=150)\n self.canv = FigureCanvasTkAgg(self.fig, self)\n self.canv.show()\n self.canv.get_tk_widget().pack()\n\n def get_canvas(self):\n return (self.canv, self.fig)\n\n\nif __name__ == '__main__':\n plot_region().mainloop()","sub_path":"Ph2150/ps9_preq/plot_frame.py","file_name":"plot_frame.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"412771406","text":"#LJ\nimport json\nimport requests\n\nclass RunMain():\n '''\n RunMain类是为了get和post请求,\n 初始化实例之后通过run_main来判断请求类型\n '''\n\n #实例化后直接调用run_main()\n def __init__(self,url,method,data=None):\n self.res = self.run_main(url,method,data)\n\n\n #get请求\n def send_get(self,url,data):\n res = requests.get(url=url,data=data).json()\n return json.dumps(res,indent=2,sort_keys=True)\n\n\n #post请求\n def send_post(self,url,data):\n res = requests.get(url=url, data=data).json()\n return json.dumps(res, indent=2, sort_keys=True)\n\n def run_main(self,url,method,data=None):\n res = None\n if method == 'GET':\n res = self.send_get(url,data)\n else:\n res = self.send_post(url,data)\n return res\n\nif __name__ == '__main__':\n url= 'http://127.0.0.1:8000/web/login/'\n data = {\n 'username': 'lj',\n 'password': '123456'\n }\n run = RunMain(url,'GET',data)\n print(run.res)","sub_path":"interface/interfaceDemo.py","file_name":"interfaceDemo.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"291148187","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\nimport json\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom .models import Generator\n\n\ndef at_random(request, generator_number=None):\n try:\n if generator_number is not None:\n generator = Generator.objects.get(number=generator_number)\n next_card_number = Generator.objects.order_by('?').first().number\n else:\n generators = Generator.objects.order_by('?')\n generator = generators[0]\n next_card_number = generators[1].id\n\n context = {\n 'generator': generator,\n 'next_card_number': next_card_number\n }\n\n except ObjectDoesNotExist:\n context = {\n 'generator': None,\n 'next_card_number': None\n }\n\n return render(request, 'generators/at_random.html', context)\n","sub_path":"generators/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"207702800","text":"from Imdb_Etl.BucketManager import BucketManager\n\n\nclass TitlePrincipalsManager(BucketManager):\n def __init__(self, initiation_time):\n super().__init__(initiation_time)\n self.set_path_to_process(\"s3a://imdbtitleprincipalsz/processing/*/*/*/*/*.parquet\")\n self.set_bucket_name(BucketManager.get_bucket_name_from_config(\"title_principals\"))\n self.set_s3_resource()\n self.parse_bucket()\n self.change_files_to_processing_status()\n","sub_path":"Imdb_Etl/TitlePrincipalsManager.py","file_name":"TitlePrincipalsManager.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"9216891","text":"# $Id: SQLDDDB-Oracle.py,v 1.3 2008-09-30 13:17:36 marcocle Exp $\n__author__ = \"Marco Clemencic \"\n\nfrom Gaudi.Configuration import *\nfrom DetCond.Configuration import *\n\n##########################################################################\n# Technology dependent options to use the Conditions database\n##########################################################################\nfrom Configurables import COOLConfSvc\nCOOLConfSvc(UseLFCReplicaSvc = True)\n\nCondDBAccessSvc(\"DDDB\",\n ConnectionString = \"CondDB/DDDB\",\n CacheHighLevel = 1700\n )\n\nCondDBAccessSvc(\"LHCBCOND\",\n ConnectionString = \"CondDB/LHCBCOND\",\n CacheHighLevel = 200\n )\n\nCondDBAccessSvc(\"SIMCOND\",\n ConnectionString = \"CondDB/SIMCOND\",\n CacheHighLevel = 200\n )\n\nCondDBAccessSvc(\"DQFLAGS\",\n ConnectionString = \"CondDB/DQFLAGS\",\n\t\tCacheLowLevel = 5,\n CacheHighLevel = 10\n )\n\nCondDBAccessSvc(\"ONLINE\",\n ConnectionString = \"CondDBOnline/ONLINE\",\n )\n","sub_path":"DBASE/Det/SQLDDDB/options/SQLDDDB-Oracle.py","file_name":"SQLDDDB-Oracle.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"495909303","text":"import logging\nimport os\n\nfrom pyxus.resources.repository import DomainRepository, OrganizationRepository, InstanceRepository, SchemaRepository, ContextRepository\nfrom pyxus.utils.http_client import HttpClient\n\nLOGGER = logging.getLogger(__package__)\n\nENV_VAR_NEXUS_ENDPOINT = \"NEXUS_ENDPOINT\"\nENV_VAR_NEXUS_PREFIX = \"NEXUS_PREFIX\"\nENV_VAR_NEXUS_NAMESPACE = \"NEXUS_NAMESPACE\"\n\nclass NexusClient(object):\n SUPPORTED_VERSIONS = ['0.8.14']\n\n def __init__(self, scheme=None, host=None, prefix=None, alternative_namespace=None, token=None):\n self.version = None\n self.namespace = alternative_namespace if alternative_namespace is not None else \"{}://{}\".format(scheme, host)\n self.env = None\n self.config = NexusConfig(scheme, host, prefix, alternative_namespace)\n self._http_client = HttpClient(self.config.NEXUS_ENDPOINT, self.config.NEXUS_PREFIX, token=token)\n self.domains = DomainRepository(self._http_client)\n self.contexts = ContextRepository(self._http_client)\n self.organizations = OrganizationRepository(self._http_client)\n self.instances = InstanceRepository(self._http_client)\n self.schemas = SchemaRepository(self._http_client)\n\n def version_check(self, supported_versions=SUPPORTED_VERSIONS):\n server_metadata_url = '{}/'.format(self.config.NEXUS_ENDPOINT)\n\n response = self._http_client.get(server_metadata_url)\n\n if response is not None:\n service_name = response.get('name')\n self.version = response.get('version')\n self.env = response.get('env')\n if service_name == 'kg' and self.version in supported_versions:\n LOGGER.info('Version supported : %s\\nenv: %s',\n self.version, self.env)\n return True\n else:\n LOGGER.error('**Version unsupported**: %s\\nenv: %s',\n self.version, self.env)\n return True\n else:\n raise NexusException(response.reason)\n\n def get_fullpath_for_entity(self, entity):\n return \"{}{}\".format(self.config.NEXUS_NAMESPACE, entity.path)\n\n\nclass NexusConfig(object):\n\n def __init__(self, scheme=None, host=None, nexus_prefix=None, nexus_namespace=None):\n if host is None and scheme is None and ENV_VAR_NEXUS_ENDPOINT in os.environ:\n self.NEXUS_ENDPOINT = os.environ.get(ENV_VAR_NEXUS_ENDPOINT)\n elif host is not None and scheme is not None:\n self.NEXUS_ENDPOINT = \"{}://{}\".format(scheme, host)\n else:\n self.NEXUS_ENDPOINT = None\n self.NEXUS_PREFIX = os.environ.get(ENV_VAR_NEXUS_PREFIX) if nexus_prefix is None and ENV_VAR_NEXUS_PREFIX in os.environ else nexus_prefix\n if nexus_namespace is None and ENV_VAR_NEXUS_NAMESPACE in os.environ:\n self.NEXUS_NAMESPACE = os.environ.get(ENV_VAR_NEXUS_NAMESPACE)\n else:\n self.NEXUS_NAMESPACE = nexus_namespace\n self._validate()\n\n def _validate(self):\n if self.NEXUS_ENDPOINT is None:\n raise ValueError(\"The Nexus endpoint is not set!\")\n if self.NEXUS_PREFIX is None:\n raise ValueError(\"The Nexus prefix is not set!\")\n if self.NEXUS_NAMESPACE is None:\n raise ValueError(\"The Nexus namespace is not set!\")\n\n\nclass NexusException(Exception):\n \"\"\"Exception raised when a Nexus call fails\n\n Attributes:\n http_status_code -- code returned by the API\n message -- message for the exception\n \"\"\"\n def __init__(self, message):\n self.message = message\n","sub_path":"pyxus/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"360958633","text":"# -*- coding: utf-8 -*-\n# @Author: yulidong\n# @Date: 2018-07-18 18:49:15\n# @Last Modified by: yulidong\n# @Last Modified time: 2018-08-27 11:37:42\nimport numpy as np\nimport os\nimport time\nimport matplotlib.pyplot as plt\nfrom multiprocessing import Process,Lock\nfrom multiprocessing import Pool\nthread_num=10\ndef crop(object):\n ground=np.array((object==1).nonzero())\n x1=np.min(ground[0,:])\n x2=np.max(ground[0,:])\n y1=np.min(ground[1,:])\n y2=np.max(ground[1,:])\n size=np.sum(object)\n return x1,y1,x2+1,y2+1,size\ndef pre_matching(start,end):\n left_dir=r'/home/lidong/Documents/datasets/Driving/train_data_clean_pass/left/'\n right_dir=r'/home/lidong/Documents/datasets/Driving/train_data_clean_pass/right/'\n match_dir=r'/home/lidong/Documents/datasets/Driving/train_data_clean_pass/match/'\n left_files=os.listdir(left_dir)\n left_files.sort()\n right_files=os.listdir(right_dir)\n right_files.sort()\n # s_index=int(np.floor(length/thread_num*index))-2\n # e_index=int(np.floor(length/thread_num*(index+1)))+2\n # if e_index>length:\n # e_index=length\n # if s_index<0:\n # s_index=0\n for i in range(int(start),int(end)):\n left=np.load(os.path.join(left_dir,left_files[i]))[...,8]\n right=np.load(os.path.join(right_dir,right_files[i]))[...,8]\n pre=[]\n pre2=[]\n l_box=[]\n r_box=[]\n match=[]\n start_time=time.time()\n for m in range(int(np.max(left)+1)):\n object=np.where(left==m,1,0)\n if np.sum(object)>0:\n l_box.append(crop(object))\n else:\n l_box.append((0,0,1,1,1))\n l_box=np.array(l_box)\n for m in range(int(np.max(right)+1)):\n object=np.where(right==m,1,0)\n if np.sum(object)>0:\n r_box.append(crop(object))\n else:\n r_box.append((0,0,1,1,1))\n r_box=np.array(r_box)\n for m in range(int(np.max(left)+1)):\n x1,y1,x2,y2,size=l_box[m]\n if size==1:\n match.append(-1)\n continue\n left_object_m=np.where(left==m,1,-1)[x1:x2,y1:y2]\n x_matching=(np.min([np.ones_like(r_box[:,2])*x2,r_box[:,2]],0)-np.max([np.ones_like(r_box[:,0])*x1,r_box[:,0]],0))/(x2-x1)\n x_matching=np.where(x_matching>0.8,1,0)\n y_matching=np.min([(r_box[:,3]-r_box[:,1]),np.ones_like(r_box[:,0])*(y2-y1)],0)/np.max([(r_box[:,3]-r_box[:,1]), \\\n np.ones_like(r_box[:,0])*(y2-y1)],0)\n y_matching=np.where(y_matching>0.5,1,0)\n y_check=np.where(r_box[:,1]<=y2,1,0)\n y_matching=y_matching*y_check\n matching=x_matching*y_matching\n overlap=[]\n for n in range(matching.shape[0]):\n if matching[n]==1:\n r_x1,r_y1,r_x2,r_y2,r_size=r_box[n]\n right_object_n=np.where(right==n,1,0)[x1:x2,r_y1:r_y2]\n if (r_y2-r_y1)>(y2-y1):\n shift=[]\n for k in range((r_y2-r_y1)-(y2-y1)+1):\n tmp_overelap=left_object_m-right_object_n[:,k:k+y2-y1]\n shift.append(np.sum(np.where(tmp_overelap==0,1,0))/np.max([size,r_size]))\n else:\n shift=[]\n for k in range((y2-y1)-(r_y2-r_y1)+1):\n tmp_overelap=right_object_n-left_object_m[:,k:k+r_y2-r_y1]\n shift.append(np.sum(np.where(tmp_overelap==0,1,0))/np.max([size,r_size]))\n overlap.append(np.max(shift))\n else:\n overlap.append(-1)\n if np.max(overlap)>0:\n match.append(np.argmax(overlap))\n else:\n match.append(-1)\n match=np.array(match)\n pre.append([l_box,r_box,match])\n #min_d=np.array(np.max([np.where(match==-1,0,r_box[match,1]+l_box[:,1]-l_box[:,3]),np.zeros_like(match)],0))\n #max_d=np.array(np.min([np.where(match==-1,l_box[:,3],r_box[match,3]+l_box[:,3]-l_box[:,1]),min_d+300],0))\n variance_d=np.floor((l_box[:,3]-l_box[:,1])/10).astype(np.int)\n min_d=np.where(match==-1,0,np.max([l_box[:,1]-r_box[match,1]-variance_d,np.zeros_like(match)],0))\n max_d=np.where(match==-1,192,np.min([l_box[:,3]-r_box[match,3]+variance_d,np.ones_like(match)*192],0))\n # if min_d>l_box[:,2]:\n # min_d=0\n # else:\n # min_d=np.max([min_d,0])\n\n min_d=np.where(min_d>l_box[:,3],0,np.max([min_d,np.zeros_like(match)],0))\n max_d=np.where(max_d<=min_d,min_d+192,max_d)\n max_d=np.min([max_d,l_box[:,3]],0)\n pre2.append(np.array([min_d,max_d]))\n pre_match=np.array([pre,pre2])\n np.save(os.path.join(match_dir,left_files[i]),pre_match)\n print('thread:%d,doing:%d,time:%.3f' % (end/440,i,time.time()-start_time))\n\n\nprocess = []\nleft_dir=r'/home/lidong/Documents/datasets/Driving/train_data_clean_pass/left/'\nleft_files=os.listdir(left_dir)\nleft_files.sort()\nlength=len(left_files)\nstart=[]\nend=[]\np = Pool(thread_num)\nfor z in range(thread_num):\n start.append(z*length/10)\n end.append((z+1)*length/10)\nfor z in range(thread_num):\n p.apply_async(pre_matching, args=(start[z],end[z]))\n\np.close()\np.join()\n#pre_matching(0,1)\nprint('end')\n","sub_path":"back of PSSM/PSSM/pre_matching-20180827142110.py","file_name":"pre_matching-20180827142110.py","file_ext":"py","file_size_in_byte":5382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"512895182","text":"import cv2, os\r\nimport numpy as np\r\nfrom fd import *\r\n#from pprint import pprint\r\n\r\ndef normalize_intensity(image):\r\n \"\"\" This method normalizes the size and pixel intensity of an image.\"\"\"\r\n is_color = len(image.shape) == 3\r\n if is_color:\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n image = cv2.equalizeHist(image)\r\n return image\r\n\r\n\r\ndef read_face(face, size=(100, 100)):\r\n \"\"\" This method open and resize the image into size specified\"\"\"\r\n if isinstance(face, basestring):\r\n face = cv2.imread(face, 0)\r\n face_norm = normalize_intensity(face)\r\n else:\r\n face_norm = normalize_intensity(face)\r\n if face_norm.shape < size:\r\n face_resized = cv2.resize(face_norm, size, interpolation=cv2.INTER_AREA)\r\n else:\r\n face_resized = cv2.resize(face_norm, size, interpolation=cv2.INTER_CUBIC)\r\n return face_resized\r\n\r\ndef get_labels_faces(dataset_dir):\r\n \"\"\" This method generate the labels and faces\"\"\"\r\n labels_ppl = {}\r\n images = []\r\n labels = []\r\n i = 0\r\n for path, dir, file in os.walk(dataset_dir):\r\n for d in dir:\r\n for f in os.listdir(os.path.join(dataset_dir, d)):\r\n images.append(read_face(os.path.join(dataset_dir, d, f)))\r\n labels.append(i)\r\n labels_ppl[i] = d\r\n i+=1\r\n return labels, images, labels_ppl\r\n\r\n\r\ndef main():\r\n labels, images, labels_ppl = get_labels_faces(\"data/face_dataset\")\r\n recognizer = cv2.face.EigenFaceRecognizer_create()\r\n threshold = 10000\r\n recognizer.train(images, np.array(labels))\r\n eigenvectors = recognizer.getEigenVectors()\r\n eigenfaces = np.transpose(eigenvectors*255)\r\n for i,eigenface in enumerate(eigenfaces):\r\n eigenface = (eigenface-np.min(eigenface))/(np.max(eigenface)-np.min(eigenface))\r\n cv2.imwrite('ef%d.jpg'%i,(eigenface*255).astype(np.uint8).reshape(100,100))\r\n\r\n raw_input(\"Press ENTER to start recognition\")\r\n\r\n fd = CascadeClassifier(\"classifier/haarcascade_frontalface_default.xml\", \"classifier/haarcascade_eye.xml\")\r\n video_capture = cv2.VideoCapture(0)\r\n while True:\r\n # Capture frame-by-frame\r\n ret, frame = video_capture.read()\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n faces_coord, faces_img = fd.detectMultiScale(gray)\r\n faces = [read_face(face) for face in faces_img]\r\n if len(faces_coord):\r\n for i, face in enumerate(faces):\r\n pred, conf = recognizer.predict(face)\r\n print (\"Prediction: \" + str(pred))\r\n print ('Confidence: ' + str(round(conf)))\r\n print ('Threshold: ' + str(threshold))\r\n\r\n if conf < threshold:\r\n cv2.putText(frame, labels_ppl[pred].capitalize(),\r\n (faces_coord[i][0], faces_coord[i][1] - 2),\r\n cv2.FONT_HERSHEY_PLAIN, 1.7, (206, 0, 209), 2,\r\n cv2.LINE_AA)\r\n if len(faces_coord[i]) == 4:\r\n x,y,w,h = faces_coord[i]\r\n cv2.rectangle(frame, (x,y), (x+w,y+h), (206, 0, 209), thickness=2)\r\n elif len(faces_coord[i]) == 8:\r\n pts = map(tuple, np.array(faces_coord[i]).reshape(4,2).tolist())\r\n cv2.polylines(frame, np.int32([pts]), True, (206, 0, 209), 2)\r\n else:\r\n cv2.putText(frame, \"Unknown\",\r\n (faces_coord[i][0], faces_coord[i][1]),\r\n cv2.FONT_HERSHEY_PLAIN, 1.7, (206, 0, 209), 2,\r\n cv2.LINE_AA)\r\n\r\n\r\n # Display the resulting frame\r\n cv2.imshow('Video', frame)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"Face Recogition/Face_recogition.py","file_name":"Face_recogition.py","file_ext":"py","file_size_in_byte":3889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"376282390","text":"from torchvision import transforms\nfrom torch.utils.data import DataLoader\nimport numpy as np\nimport torch\nfrom torch import nn, optim\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom data_loader23 import transformer_loader\nimport math\nimport time\nimport copy\nimport random\nimport os\nclass Trans(nn.Module):\n def __init__(self):\n super(Trans, self).__init__()\n self.encoder_layer = nn.TransformerEncoderLayer(d_model=100, nhead=2,dropout=0.3,dim_feedforward=1024)\n self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=6,norm=\"T\")\n self.linear = nn.Sequential(nn.Flatten(),\n nn.Linear(100*20, 512),\n nn.Dropout(0.1),\n # nn.Linear(1024, 512),\n # nn.Dropout(0.1),\n nn.Linear(512, 3),\n nn.Dropout(0.1),\n nn.Softmax())\n\n def forward(self,x):\n # print(x.size())\n x=self.transformer_encoder(x)\n # print(x.size())\n x=self.linear(x)\n return x\n\ndef binary_acc(preds, y):\n\n # preds = torch.round(preds)\n score_p, prediction = torch.max(preds, 1)\n score_t, target = torch.max(y, 1)\n # print(\"hhhhhhhhhhhhhhh\")\n # print(prediction)\n # print(target)\n\n correct = torch.eq(prediction, target).float()\n acc = correct.sum() /( len(correct))\n return acc\n\n\n#训练函数\ndef train(model, iterator, optimizer, criteon):\n\n avg_loss = []\n avg_acc = []\n model.train() #表示进入训练模式\n # print(\"-------------------\")3\n # print(len(iterator))\n for i, (data,label,mask) in enumerate(iterator):\n # print(\"*****************\")\n # print(data.size)\n # print(label.size())\n # print(mask.size)\n data=Variable(data).float().cuda()\n mask=Variable(mask).float().cuda()\n label=Variable(label).float().cuda()\n\n # data = Variable(data).type(torch.LongTensor)\n pred = model(data)\n\n # print(pred.size())\n # print(\"hhhhhhhhhhhh\")\n # similarity = torch.cosine_similarity(pred, label)\n #\n # loss = 1 - similarity\n loss = criteon(pred, label)\n acc = binary_acc(pred, label).item() #计算每个batch的准确率\n avg_loss.append(loss.item())\n avg_acc.append(acc)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n avg_acc = np.array(avg_acc).mean()\n avg_loss = np.array(avg_loss).mean()\n return avg_loss, avg_acc\n\n\n#评估函数\ndef evaluate(model, iterator, criteon):\n\n avg_loss = []\n avg_acc = []\n model.eval() #表示进入测试模式\n\n with torch.no_grad():\n for (data,label,mask) in iterator:\n data = Variable(data).float().cuda()\n mask = Variable(mask).float().cuda()\n label = Variable(label).float().cuda()\n pred = model(data)\n # score_p, prediction = torch.max(pred, 1)\n # score_t, target = torch.max(label, 1)\n # print(\"hhhhhhhhhhhhhhh\")\n # print(prediction)\n # print(target)\n loss = criteon(pred,label)\n acc = binary_acc(pred, label).item()\n avg_loss.append(loss.item())\n avg_acc.append(acc)\n\n avg_loss = np.array(avg_loss).mean()\n avg_acc = np.array(avg_acc).mean()\n return avg_loss, avg_acc\nif __name__==\"__main__\":\n\n from scipy.io import loadmat\n import numpy as np\n import random\n import os\n\n data = loadmat(\"MCAD_AFQ_competition.mat\", mat_dtype=True)\n # print(data.keys())\n # print(data[\"train_set\"])\n # print(data[\"train_set\"]0][0][.shape)\n train_set = data[\"train_set\"]\n train_diagnose = data[\"train_diagnose\"]\n all_data_list=[]\n # for i,item in enumerate(train_diagnose):\n # if(item[0]>1):\n # all_data_list.append(str(i)+\".npy\")\n\n\n all_data_list = os.listdir(\"./new_data/\")\n len_train_list=len(all_data_list)\n random.shuffle(all_data_list)\n val1,val2,val3,val4,val5=all_data_list[:int(len_train_list*0.2)],all_data_list[int(len_train_list*0.2):int(len_train_list*0.4)],all_data_list[int(len_train_list*0.4):int(len_train_list*0.6)],all_data_list[int(len_train_list*0.6):int(len_train_list*0.8)],all_data_list[int(len_train_list*0.8):]\n list_vals=[val1,val2,val3,val4,val5]\n for item in list_vals:\n val_list=item\n print(val_list)\n # val_list = random.sample(all_data_list, int(700 * 0.2))\n train_list = [item for item in all_data_list if item not in val_list]\n # random.shuffle(train_list)\n train_dataset = transformer_loader(train_list,\n transform=transforms.Compose([\n transforms.ToTensor(),\n\n ]))\n train_iterator = DataLoader(train_dataset, batch_size=32,\n shuffle=True, num_workers=1)\n print(len(train_list))\n print(len(train_iterator))\n dev_dataset = transformer_loader(val_list,\n transform=transforms.Compose([\n transforms.ToTensor(),\n ]))\n\n dev_iterator = DataLoader(dev_dataset, batch_size=32,\n shuffle=False, num_workers=1)\n\n # model=resnet18()\n # model =MyTransformerModel(p_drop=0.12).cuda()\n model=Trans().cuda()\n\n optimizer = optim.Adam(model.parameters(), lr=1e-5)\n criteon = nn.BCELoss()\n accs=[]\n losses_list=[]\n best_valid_acc = float('-inf')\n for epoch in range(100):\n start_time = time.time()\n train_loss, train_acc = train(model, train_iterator, optimizer, criteon)\n dev_loss, dev_acc = evaluate(model, dev_iterator, criteon)\n end_time = time.time()\n epoch_mins, epoch_secs = divmod(end_time - start_time, 60)\n if dev_acc > best_valid_acc: #只要模型效果变好,就保存\n best_valid_acc = dev_acc\n torch.save(model.state_dict(),'wordavg-model23.pt')\n print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs:.2f}s')\n print(f'\\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%')\n print(f'\\t Val. Loss: {dev_loss:.3f} | Val. Acc: {dev_acc*100:.2f}%')\n accs.append(dev_acc)\n losses_list.append(dev_loss)\n import matplotlib.pyplot as plt\n plt.plot(range(100),losses_list)\n plt.show()\n plt.plot(range(100),accs)\n plt.show()\n","sub_path":"transformer123.py","file_name":"transformer123.py","file_ext":"py","file_size_in_byte":6908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"465429261","text":"# if we try to get input in function parameter right away, it is a bit harder to really use exceptions to test input\ndef convert_str_to_float():\n\t'''\n\twhen function called:\n\t\ttry: asks user for input.\n\t\t\twill return input if input can be converted to a float\n\t\texcept ValueError:\n\t\t\ttells user to try again. ValueError found; input invalid.\n\t'''\n\twhile 1 > 0:\n\t\ttry:\n\t\t\tuinput=float(input('enter a float >>>'))\n\t\t\treturn uinput\n\t\texcept ValueError:\n\t\t\tprint('please try again')\nprint(convert_str_to_float())\n# have to print([function called]) in order to print result\n","sub_path":"IntroToProgramming/ch4questions_functions/convstringtofloat.py","file_name":"convstringtofloat.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"295191947","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 12 17:29:56 2017\n\n@author: xueyanmei\n\"\"\"\nimport pandas as pd\nimport numpy as np\nfrom sklearn.linear_model import Lasso\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import GridSearchCV, train_test_split, cross_val_score\nfrom sklearn.linear_model import Ridge, RidgeCV\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import AdaBoostRegressor\n\n\n# read the test date\ndata = pd.read_excel(\"/Users/xueyanmei/Downloads/test data1.xlsx\")\n\nminutes = data[\"minutes\"]\nn = np.logical_and(minutes > 10, minutes < 120)\nnew_data = data[n]\n\n#Get useful features\ndf = new_data[['day of the week', 'Age', 'sex', 'minutes', 'anatomy', 'patient type',\n 'pregnant', 'time', 'resource id', 'insured relation', 'hispanic', 'Ethnic code']]\n\n#Split target variable and features\ny_data = df['minutes'].values\nx_data = df.drop('minutes', axis = 1).values\n\n#split the train and test set\n(x_train, x_test, y_train, y_test) = train_test_split(x_data, y_data, test_size = 0.25)\n\n#build the lasso model\nlasso = Lasso(alpha =0.001, normalize=True)\nlasso.fit(x_train, y_train)\ncvscore_lasso = cross_val_score(lasso, x_data, y_data, cv=10)\ny_pred_lasso=lasso.predict(x_test)\nprint(\"R^2: {}\".format(lasso.score(x_test, y_test)))\nrmse_lasso = np.sqrt(mean_squared_error(y_test, y_pred_lasso))\nprint(\"Root Mean Squared Error: {}\".format(rmse_lasso))\n\n# build linear regression with regularization\nreg = RidgeCV(alphas = [0.1, 1.0, 10.0])\nreg.fit(x_train, y_train)\nreg.alpha_ \nest = Pipeline([('poly', PolynomialFeatures(2)), \n ('linear', Ridge(alpha=1))])\nest.fit(x_train, y_train)\nest_pred = est.predict(x_test)\nprint(\"R^2: {}\".format(lasso.score(x_test, est_pred)))\nrmse_ridge = np.sqrt(mean_squared_error(y_test, est_pred))\nprint(\"Root Mean Squared Error: {}\".format(rmse_ridge))\n\n#buid decision tree model\ndtr= DecisionTreeRegressor(max_depth=10)\ndtr.fit(x_train, y_train)\ny_pred_dtr = dtr.predict(x_test)\nrmse_dtr = np.sqrt(mean_squared_error(y_test, y_pred_dtr))\nprint(\"Root Mean Squared Error: {}\".format(rmse_dtr))\n\n","sub_path":"pyqt/studytime/trainingscript/studytimeRegression.py","file_name":"studytimeRegression.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"18530752","text":"from django.shortcuts import render, HttpResponse, redirect\nfrom apps.loginregister.models import *\nfrom datetime import datetime\nimport bcrypt\nfrom apps.Courses.models import *\n\ndef index(request):\n if 'loggedid' in request.session:\n return redirect('/courses/')\n if 'errors' not in request.session:\n request.session['errors']={}\n context={'colleges':College.objects.all(),'departments':Dept.objects.all(),'errors':request.session['errors']}\n return render(request,'login.html',context)\n\ndef regval(request):\n validator= ValidationManager()\n if request.method=='POST':\n request.session['errors']={}\n errors= validator.validatereg(request.POST)\n if not len(errors):\n pwd=bcrypt.hashpw(request.POST['password'].encode(), bcrypt.gensalt())\n pwdcrpt=pwd.decode('utf-8')\n print(pwdcrpt)\n x=User.objects.create(first_name=request.POST['first_name'],last_name=request.POST['last_name'],email=request.POST['email'], username=request.POST['username'], passwordhash=pwdcrpt)\n x.college.add(College.objects.get(name=str(request.POST['college'])))\n x.save()\n request.session['loggedid']=User.objects.last().id\n return redirect('/courses/')\n else:\n request.session['errors']=errors\n return redirect('/login/')\n return redirect('/login/')\n\ndef loginval(request):\n validator= ValidationManager()\n request.session['errors']={}\n if request.method=='POST':\n errors= validator.validatelogin(request.POST)\n if not len(errors):\n request.session['loggedid']=User.objects.get(username=request.POST['username']).id\n return redirect ('/courses/')\n else:\n request.session['errors']=errors\n return redirect('/login/')\n\ndef logout(request):\n request.session.clear()\n return redirect('/login/')\n\ndef edituser(request):\n if 'errors' not in request.session:\n request.session['errors']={}\n x=User.objects.get(id=request.session['loggedid'])\n context={'errors':request.session['errors'], 'user':x, 'admin':x.accesslevel, 'courses':Course.objects.all(), 'departments':Dept.objects.all(), 'colleges':College.objects.all()}\n return render (request,'useredit.html',context)\n\ndef updateuser(request):\n validator= ValidationManager()\n if request.method=='POST':\n request.session['errors']={}\n errors= validator.validateupdate(request.POST)\n request.session['errors']=errors\n if len(errors)>0:\n return redirect('/login/profile/')\n x=User.objects.get(id=request.session['loggedid'])\n x.first_name=request.POST['first_name']\n x.last_name=request.POST['last_name']\n x.email=request.POST['email']\n x.username=request.POST['username']\n x.save()\n return redirect('/courses/')\n return redirect('/login/')","sub_path":"apps/loginregister/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"334798159","text":"# -*- coding: utf-8 -*-\n\nfrom django.core.mail import EmailMessage\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import render\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.db.models import Case, When, Value, BooleanField, Count\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\nfrom .forms import PostReplyForm, PrivateQuestionForm, QuestionForm, QuestionDeleteForm, PostReplyDeleteForm\nfrom .models import Question, Notice, FAQ, NewsLetter, Thesis, PostReply, SUBJECT_MENOPAUSE, SUBJECT_STUDY_OPERATION\nfrom examinees.models import Examinee\n#from tracking.models import Visitor\n\nimport datetime\nimport smtplib\n\nEMAIL_ID = settings.EMAIL_HOST_USER\nEMAIL_PASSWD = settings.EMAIL_HOST_PASSWORD\nEMAIL_HOST = settings.EMAIL_HOST\nEMAIL_PORT = settings.EMAIL_PORT\n\nMASTER_PW = settings.MASTER_PW\n\ndef index(request):\n\tend_time = datetime.datetime.now()\n\tstart_time = datetime.datetime(2019, 6, 1)\n\n\t#total_visit_count = Visitor.objects.stats(start_time, end_time)['total']\n\ttotal_visit_count = 0\n\ttotal_examinee_count = Examinee.objects.filter(is_tester=0).count()\n\n\tcontext = {\n\t\t'total_visit_count': '{}명'.format(format(total_visit_count, ',')),\n\t\t'total_examinee_count': '{}명'.format(format(total_examinee_count, ','))\n\t}\n\n\treturn render(request, 'index.html', context)\n\n\ndef intro_study_purpose(request):\n\treturn render(request, 'intro/study_purpose.html')\n\n\ndef intro_study_goal(request):\n\treturn render(request, 'intro/study_goal.html')\n\n\ndef study_method_examinees_status(request):\n\treturn render(request, 'study_method/examinees_status.html')\n\n\ndef study_method_collect_data(request):\n\treturn render(request, 'study_method/collect_data.html')\n\n\ndef study_result_thesis(request):\n\ttoday = datetime.datetime.today()\n\tthree_days_ago = today - datetime.timedelta(days=3)\n\tthesis_list = Thesis.objects.filter(is_open=True).order_by('-created_at').annotate(\n\t\treplies_count=Count('thesis_replies'),\n\t\tis_brand_new=Case(When(created_at__gte=three_days_ago, then=Value(True)),\n\t\t\t\t\t\t default=Value(False),\n\t\t\t\t\t\t output_field=BooleanField()))\n\tpaginator = Paginator(thesis_list, 10)\n\tpage = request.GET.get('page', 1)\n\tthesiss = paginator.page(page)\n\n\tcontext = {\n\t\t'thesiss': thesiss,\n\t}\n\n\treturn render(request, 'study_result/main_thesis.html', context)\n\n\ndef study_result_news_letter(request):\n\ttoday = datetime.datetime.today()\n\tthree_days_ago = today - datetime.timedelta(days=3)\n\tnews_letter_list = NewsLetter.objects.filter(is_open=True).order_by('-created_at').annotate(\n\t\treplies_count=Count('news_letter_replies'),\n\t\tis_brand_new=Case(When(created_at__gte=three_days_ago, then=Value(True)),\n\t\t\t\t\t\t default=Value(False),\n\t\t\t\t\t\t output_field=BooleanField()))\n\tpaginator = Paginator(news_letter_list, 10)\n\tpage = request.GET.get('page', 1)\n\tnews_letters = paginator.page(page)\n\n\tcontext = {\n\t\t'news_letters': news_letters,\n\t}\n\n\treturn render(request, 'study_result/news_letter.html', context)\n\n\ndef post_detail(request):\n\tdef do_error():\n\t\treturn HttpResponseRedirect(\"//kscs-kbsmc.net/app/\")\n\n\tcontent_type = request.GET.get('type', '')\n\tcontent_id = request.GET.get('id', '')\n\n\tif not content_type or not content_id:\n\t\treturn do_error()\n\n\tneed_show_reply = True\n\n\ttry:\n\t\tif content_type == \"notice\":\n\t\t\ttitle = \"공지사항\"\n\t\t\tpost = Notice.objects.get(id=content_id)\n\t\t\treplies = PostReply.objects.filter(notice=post)\n\n\t\telif content_type == \"faq\":\n\t\t\ttitle = \"FAQ\"\n\t\t\tpost = FAQ.objects.get(id=content_id)\n\t\t\treplies = PostReply.objects.none()\n\t\t\tneed_show_reply = False\n\n\t\telif content_type == \"question\":\n\t\t\ttitle = \"1:1문의\"\n\t\t\tpost = Question.objects.get(id=content_id)\n\t\t\treplies = PostReply.objects.filter(question=post)\n\n\t\telif content_type == \"thesis\":\n\t\t\ttitle = \"주요 논문\"\n\t\t\tpost = Thesis.objects.get(id=content_id)\n\t\t\treplies = PostReply.objects.filter(thesis=post)\n\n\t\telif content_type == \"news_letter\":\n\t\t\ttitle = \"뉴스레터\"\n\t\t\tpost = NewsLetter.objects.get(id=content_id)\n\t\t\treplies = PostReply.objects.filter(news_letter=post)\n\n\t\telse:\n\t\t\traise Exception()\n\texcept:\n\t\treturn do_error()\n\n\tcontext = {\n\t\t'comment_form': PostReplyForm(),\n\t\t'comment_delete_form': PostReplyDeleteForm(),\n\t\t'content_type': content_type,\n\t\t'title': title,\n\t\t'post': post,\n\t\t'replies': replies,\n\t\t'need_show_reply': need_show_reply\n\t}\n\n\tif content_type == \"question\":\n\t\tcontext['post_delete_form'] = QuestionDeleteForm()\n\n\tif request.method == 'POST':\n\t\tif 'content' in request.POST:\n\t\t\tform = PostReplyForm(request.POST)\n\t\t\tif form.is_valid():\n\t\t\t\tpost_reply = form.save(commit=False)\n\n\t\t\t\thref = \"\"\n\t\t\t\tif content_type == \"notice\":\n\t\t\t\t\tpost_reply.notice = post\n\t\t\t\t\thref = \"https://kscs-kbsmc.net/user_app/notice/{}/change/\".format(\n\t\t\t\t\t\tpost.id,\n\t\t\t\t\t\tpost.id\n\t\t\t\t\t)\n\n\t\t\t\telif content_type == \"question\":\n\t\t\t\t\tpost_reply.question = post\n\t\t\t\t\thref = \"https://kscs-kbsmc.net/user_app/question/{}/change/\".format(\n\t\t\t\t\t\tpost.id,\n\t\t\t\t\t\tpost.id\n\t\t\t\t\t)\n\n\t\t\t\telif content_type == \"thesis\":\n\t\t\t\t\tpost_reply.thesis = post\n\t\t\t\t\thref = \"https://kscs-kbsmc.net/user_app/thesis/{}/change/\".format(\n\t\t\t\t\t\tpost.id,\n\t\t\t\t\t\tpost.id\n\t\t\t\t\t)\n\n\t\t\t\telif content_type == \"news_letter\":\n\t\t\t\t\tpost_reply.news_letter = post\n\t\t\t\t\thref = \"https://kscs-kbsmc.net/user_app/newsletter/{}/change/\".format(\n\t\t\t\t\t\tpost.id,\n\t\t\t\t\t\tpost.id\n\t\t\t\t\t)\n\n\t\t\t\tpost_reply.save()\n\n\t\t\t\tsend_email(\"[홈페이지] 새댓글 알림\", \"여성건강연구 홈페이지에 새로운 댓글이 올라왔습니다.
작성자: {}
내용: {}
작성시간: {}
{}\".format(\n\t\t\t\t\tpost_reply.name,\n\t\t\t\t\tpost_reply.content,\n\t\t\t\t\tpost_reply.created_at,\n\t\t\t\t\thref\n\t\t\t\t))\n\n\t\t\t\tmessages.error(request, \"댓글이 저장되었습니다.\")\n\n\t\telif 'post_reply_id' in request.POST:\n\t\t\tform = PostReplyDeleteForm(request.POST)\n\n\t\t\tif form.is_valid():\n\t\t\t\tpost_reply_id = request.POST.get('post_reply_id')\n\t\t\t\tpassword = request.POST.get('password')\n\n\t\t\t\ttry:\n\t\t\t\t\tpost_reply = PostReply.objects.get(id=post_reply_id, password=password)\n\t\t\t\t\tpost_reply.delete()\n\t\t\t\t\tmessages.error(request, \"댓글이 삭제되었습니다.\")\n\t\t\t\texcept:\n\t\t\t\t\tmessages.error(request, \"비밀번호가 올바르지 않습니다.\")\n\n\t\telif 'question_id' in request.POST:\n\t\t\tform = QuestionDeleteForm(request.POST)\n\t\t\tif form.is_valid():\n\t\t\t\tquestion_id = request.POST.get('question_id')\n\t\t\t\tpassword = request.POST.get('password')\n\n\t\t\t\ttry:\n\t\t\t\t\tquestion = Question.objects.get(id=question_id, password=password)\n\t\t\t\t\tquestion.delete()\n\t\t\t\t\tmessages.error(request, \"게시글이 삭제되었습니다.\")\n\t\t\t\texcept:\n\t\t\t\t\tmessages.error(request, \"비밀번호가 올바르지 않습니다.\")\n\n\t\t\t\treturn HttpResponseRedirect(\"//kscs-kbsmc.net/app/question/inquiry/\")\n\n\t\treturn HttpResponseRedirect('https://kscs-kbsmc.net/app/question/detail/?id={}&type={}'.format(\n\t\t\tcontent_id, content_type\n\t\t))\n\n\treturn render(request, 'post_detail.html', context)\n\n\ndef question_write(request):\n\tcontext = {\n\t\t'form': QuestionForm()\n\t}\n\n\tif request.method == 'POST':\n\t\tform = QuestionForm(request.POST)\n\n\t\t# try:\n\t\tif form.is_valid():\n\t\t\tquestion = form.save(commit=False)\n\t\t\tquestion.save()\n\t\t\tmessages.error(request, \"문의글이 저장되었습니다.\")\n\n\t\t\tsend_email(\"[홈페이지] 새글 알림\", \"여성건강연구 홈페이지에 새로운 글이 올라왔습니다.
작성자: {}
문의종류: {}
작성시간: {}
{}\".format(\n\t\t\t\tquestion.name,\n\t\t\t\tquestion.get_subject_display(),\n\t\t\t\tquestion.created_at,\n\t\t\t\t\"https://kscs-kbsmc.net/user_app/question/{}/change/\".format(question.id, question.id)\n\t\t\t))\n\t\t\treturn HttpResponseRedirect(\"//kscs-kbsmc.net/app/question/inquiry\")\n\t\telse:\n\t\t\tmessages.error(request, \"유효하지 않은 요청입니다.\")\n\t\t# except:\n\t\t# \tmessages.error(request, \"서버 오류가 발생하였습니다.\")\n\t\t# \treturn HttpResponseRedirect(\"//kscs-kbsmc.net/app/question/inquiry/\")\n\n\treturn render(request, 'question_write.html', context)\n\n\ndef question_notice(request):\n\ttoday = datetime.datetime.today()\n\tthree_days_ago = today - datetime.timedelta(days=3)\n\tnotice_list = Notice.objects.filter(is_open=True).order_by('-created_at').annotate(\n\t\treplies_count=Count('notice_replies'),\n\t\tis_brand_new=Case(When(created_at__gte=three_days_ago, then=Value(True)),\n\t\t\t\t\t\t default=Value(False),\n\t\t\t\t\t\t output_field=BooleanField()))\n\tpaginator = Paginator(notice_list, 10)\n\tpage = request.GET.get('page', 1)\n\tnotices = paginator.page(page)\n\n\tcontext = {\n\t\t'notices': notices,\n\t}\n\n\treturn render(request, 'question/notice.html', context)\n\n\ndef question_faq(request):\n\ttoday = datetime.datetime.today()\n\tthree_days_ago = today - datetime.timedelta(days=3)\n\n\tmenopause_faq_list = FAQ.objects.filter(is_open=True, subject=SUBJECT_MENOPAUSE).order_by('-created_at').annotate(\n\t\tis_brand_new=Case(When(created_at__gte=three_days_ago, then=Value(True)),\n\t\t\t\t\t\t default=Value(False), output_field=BooleanField()))\n\n\tstudy_faq_list = FAQ.objects.filter(is_open=True, subject=SUBJECT_STUDY_OPERATION).order_by('-created_at').annotate(\n\t\t\tis_brand_new=Case(When(created_at__gte=three_days_ago, then=Value(True)),\n\t\t\t\t\t\t\t default=Value(False), output_field=BooleanField()))\n\n\tpage = request.GET.get('page', 1)\n\tsubject = request.GET.get('subject', 'menopause')\n\n\tmenopause_paginator = Paginator(menopause_faq_list, 10)\n\tstudy_paginator = Paginator(study_faq_list, 10)\n\n\tif subject == 'study':\n\t\tmenopause_page = 1\n\t\tstudy_page = page\n\n\telse:\n\t\tmenopause_page = page\n\t\tstudy_page = 1\n\n\tmenopause_faqs = menopause_paginator.page(menopause_page)\n\tstudy_faqs = study_paginator.page(study_page)\n\n\tcontext = {\n\t\t'menopause_faqs': menopause_faqs,\n\t\t'study_faqs': study_faqs\n\t}\n\treturn render(request, 'question/faq.html', context)\n\n\ndef question_inquiry(request):\n\ttoday = datetime.datetime.today()\n\tyesterday = today - datetime.timedelta(days=1)\n\tquestion_list = Question.objects.all().order_by('-created_at').annotate(\n\t\treplies_count=Count('question_replies'),\n\t\tis_brand_new=Case(When(created_at__gte=yesterday, then=Value(True)),\n\t\t\t\t\t\t default=Value(False),\n\t\t\t\t\t\t output_field=BooleanField()))\n\tpaginator = Paginator(question_list, 10)\n\tpage = request.GET.get('page', 1)\n\tquestions = paginator.page(page)\n\n\tcontext = {\n\t\t'form': PrivateQuestionForm(),\n\t\t'questions': questions,\n\t}\n\n\tif request.method == 'POST':\n\t\tform = PrivateQuestionForm(request.POST)\n\n\t\ttry:\n\t\t\tif form.is_valid():\n\t\t\t\tquestion_id = int(form.cleaned_data.get('question_id'))\n\t\t\t\tpassword = form.cleaned_data.get('password')\n\n\t\t\t\tif password == MASTER_PW:\n\t\t\t\t\tquestion = Question.objects.get(\n\t\t\t\t\t\tid=question_id,\n\t\t\t\t\t)\n\n\t\t\t\telse:\n\t\t\t\t\tquestion = Question.objects.get(\n\t\t\t\t\t\tid=question_id,\n\t\t\t\t\t\tpassword=password\n\t\t\t\t\t)\n\n\t\t\t\treturn HttpResponseRedirect(\n\t\t\t\t\t\"//kscs-kbsmc.net/app/question/detail/?id={}&type=question\".format(question.id))\n\n\t\t\telse:\n\t\t\t\traise Exception()\n\t\texcept:\n\t\t\tmessages.error(request, \"비밀번호가 올바르지 않습니다.\")\n\t\t\treturn HttpResponseRedirect(\"//kscs-kbsmc.net/app/question/inquiry/\")\n\n\treturn render(request, 'question/inquiry.html', context)\n\n\ndef question_contact(request):\n\treturn render(request, 'question/contact.html')\n\n\ndef send_email(title, content):\n\tmessage = MIMEMultipart()\n\tmessage['From'] = EMAIL_ID\n\tmessage['To'] = \"kscs.kbsmc@samsung.com\"\n\t# message['To'] = \"elysiu_m@naver.com\"\n\tmessage['Subject'] = title\n\tmessage.attach(MIMEText(content, \"html\", \"utf-8\"))\n\n\temail_object = smtplib.SMTP(EMAIL_HOST, EMAIL_PORT)\n\temail_object.ehlo()\n\temail_object.starttls()\n\temail_object.ehlo()\n\temail_object.login(EMAIL_ID, EMAIL_PASSWD)\n\ttry:\n\t\temail_object.sendmail(EMAIL_ID, [message['To']], message.as_string())\n\t\temail_object.close()\n\texcept:\n\t\tpass","sub_path":"user_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"225405314","text":"# -*- coding: utf-8 -*-\n\nimport os, sys\nimport struct\nfrom collections import namedtuple\n\nFunction = namedtuple('Function', ['entry', 'end', 'name'])\nfunctions = dict()\nlabels = dict()\n\ndef get_registry_value(key, subkey, value):\n\timport _winreg\n\tkey = getattr(_winreg, key)\n\thandle = _winreg.OpenKey(key, subkey)\n\t(value, type) = _winreg.QueryValueEx(handle, value)\n\treturn value\n\ncputype = get_registry_value(\n\t\"HKEY_LOCAL_MACHINE\", \n\t\"HARDWARE\\\\DESCRIPTION\\\\System\\\\CentralProcessor\\\\0\",\n\t\"ProcessorNameString\")\n\ncpuspeed = get_registry_value(\n\t\"HKEY_LOCAL_MACHINE\", \n\t\"HARDWARE\\\\DESCRIPTION\\\\System\\\\CentralProcessor\\\\0\",\n\t\"~MHz\")\n\nclass TraceData:\n\tdef __init__(self, data, base_ts):\n\t\tself.data = data\n\t\tself.ts = (data['ts'] - base_ts)/(cpuspeed * 1000000.0)\n\n\tdef __repr__(self):\n\t\ts = ''\n\n\t\tif self.data['code'] == 0:\n\t\t\ts = 'trace'\n\t\t\n\t\ts = \"%.3f %s<%x>\" % (self.ts, s, self.data['thread'])\n\t\treturn s\n\n\tdef is_trace(self):\n\t\treturn self.data['code'] == 0\n\n\tdef get_count(self):\n\t\treturn self.data['count']\n\n\tdef unpack(self, start=0):\n\t\tlength = self.get_count() - start\n\t\tif length <= 0: return\n\t\t\n\t\tfor entry in struct.unpack_from('<%dI' % (length,), self.data['data'], start*4):\n\t\t\tyield entry\n\n\tdef get_trace(self, i):\n\t\td = struct.unpack('>2\n rotamer_trials_renew_taskpack(pose)\n minmover.apply(pose)\n pmm.apply(pose)\n if mc.boltzmann(pose):\n num_accepts+=1\n pmm.apply(pose)\n pack_mover_min_renew_taskpack(pose)\n jd.output_decoy(pose)\n \n'''# Acceptance Rate\n# log_acceptance_rate\nf=open('log.txt','a')\nacceptance_rate=float(num_accepts)/float(mc.total_trials())\nacceptance_rate=float('%.4f'%acceptance_rate)\nf.write('count \\t num_accepts \\t acceptance_rate \\n')\nf.write(str(count)+'\\t'+str( num_accepts)+'\\t\\t' +str(acceptance_rate)+'\\n')\nf.close()'''\n","sub_path":"PyRosetta/Code/1207movie.py","file_name":"1207movie.py","file_ext":"py","file_size_in_byte":5291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"332448771","text":"import argparse\nimport os\nimport sys\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom matplotlib import pyplot as plt\nfrom torch.utils.data import DataLoader\nimport torch.optim as opt\nfrom tqdm import tqdm\nfrom transformers import BartTokenizer, get_linear_schedule_with_warmup\nimport datasets\nsys.path.insert(0, os.path.abspath('..'))\nfrom model.Seq2Seq import *\nfrom preprocessing.translation_data import *\n\nos.chdir('../')\n\narg_parser = argparse.ArgumentParser()\narg_parser.add_argument(\n '--gpu',\n type=int,\n default=0,\n help=f'Specify which gpu to use'\n)\n\narg_parser.add_argument(\n '-e', '--epoch',\n type=int,\n default=10,\n help=f'Specify number of training epochs'\n)\narg_parser.add_argument(\n '-b', '--batch',\n type=int,\n default=6,\n help=f'Specify batch size'\n)\nargs = arg_parser.parse_args()\n\n# device\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nif device == 'cuda':\n torch.cuda.set_device(args.gpu) # use an unoccupied GPU\n\n# hyperparameter\nNUM_EPOCH = args.epoch\nBATCH_SIZE = args.batch\nEMBEDDING_DIM = 128\nHIDDEN_DIM = 128\nNUM_LAYERS = 2\nDROPOUT = 0.1\n\n# model saving\nos.makedirs(os.path.dirname('model_weights' + '/'), exist_ok=True)\nMODEL_NAME = f'seq2seq_{BATCH_SIZE}'\nlog_file = open(os.path.join('model_weights', f'{MODEL_NAME}.log'), 'w')\nprint(f\"training seq2seq with batch size {BATCH_SIZE} for {NUM_EPOCH} epochs\")\n\n# model setup\ntokenizer = BartTokenizer.from_pretrained('facebook/bart-base')\nmodel = Seq2Seq(embed_size=EMBEDDING_DIM,\n hidden_size=HIDDEN_DIM,\n num_layers=NUM_LAYERS,\n dropout=DROPOUT).to(device)\noptimizer = opt.Adam(model.parameters())\n\n# record these for each epoch\nloss_record = []\nppl_record = []\n# training loop\nfor epo in range(NUM_EPOCH):\n model.train()\n total_loss = 0\n\n '''\n DataLoader\n '''\n dataset = TranslationData(data='train')\n data_loader = torch.utils.data.DataLoader(\n dataset,\n batch_size=BATCH_SIZE,\n shuffle=True\n )\n\n # training\n train_iterator_with_progress = tqdm(data_loader)\n idx = 0\n for batch in train_iterator_with_progress:\n try:\n # input encoding\n input_encoding = tokenizer(batch['en'], return_tensors='pt', padding=True, truncation=True)\n input_ids = input_encoding['input_ids']\n input_ids = torch.transpose(input_ids, 0, 1).to(device) # shape: (input_len, batch_size)\n\n # target encoding\n target_encoding = tokenizer(batch['de'], return_tensors='pt', padding=True, truncation=True)\n target_ids = target_encoding['input_ids']\n target_ids = torch.transpose(target_ids, 0, 1).to(device) # shape: (target_len, batch_size)\n\n # zero-out gradient\n optimizer.zero_grad()\n\n # forward pass\n outputs, _ = model(x=input_ids, y=target_ids) # outputs.shape: (target_len, batch_size, vocab_size)\n\n # prepare labels for cross entropy by removing the first time stamp ()\n labels = target_ids[1:, :] # shape: (target_len - 1, batch_size)\n labels = labels.reshape(-1).to(device) # shape: ((target_len - 1) * batch_size)\n\n # prepare model predicts for cross entropy by removing the last timestamp and merge first two axes\n outputs = outputs[:-1, ...] # shape: (target_len - 1, batch_size, vocab_size)\n outputs = outputs.reshape(-1, outputs.shape[-1]).to(device)\n # shape: ((target_len - 1) * batch_size, vocab_size)\n\n # compute loss and perform a step\n criterion = nn.CrossEntropyLoss(ignore_index=1) # ignore padding index\n loss = criterion(outputs, labels)\n\n loss.backward()\n\n torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1) # gradient clipping\n optimizer.step()\n # scheduler.step()\n\n # if idx % 1000 == 0:\n # print(f'epoch: {epo}, batch: {idx}, memory reserved {torch.cuda.memory_reserved(DEVICE_ID) / 1e9} GB')\n # print(f'epoch: {epo}, batch: {idx}, memory allocated {torch.cuda.memory_allocated(DEVICE_ID) / 1e9} GB')\n idx += 1\n\n total_loss += float(loss)\n train_iterator_with_progress.set_description(f'Epoch {epo}')\n train_iterator_with_progress.set_postfix({'Loss': loss.item()})\n except Exception as e:\n print(e)\n\n loss_record.append(total_loss)\n print(f'Loss in epoch {epo}: {total_loss}')\n log_file.write(f'Epoch:{epo} ')\n log_file.write(f'Loss:{total_loss} ')\n\n # evaluation\n model.eval()\n with torch.no_grad():\n '''\n DataLoader\n '''\n valid_dataset = TranslationData(data='test')\n valid_data_loader = torch.utils.data.DataLoader(\n valid_dataset,\n batch_size=BATCH_SIZE,\n shuffle=True\n )\n\n batch_num = 0\n total_loss = 0\n metric_bleu = datasets.load_metric('sacrebleu')\n for batch in valid_data_loader:\n # input encoding\n input_encoding = tokenizer(batch['en'], return_tensors='pt', padding=True, truncation=True)\n input_ids = input_encoding['input_ids']\n input_ids = torch.transpose(input_ids, 0, 1).to(device) # shape: (input_len, batch_size)\n\n # target encoding\n target_encoding = tokenizer(batch['de'], return_tensors='pt', padding=True, truncation=True)\n target_ids = target_encoding['input_ids']\n target_ids = torch.transpose(target_ids, 0, 1).to(device) # shape: (target_len, batch_size)\n\n # forward pass\n outputs, _ = model(x=input_ids, y=target_ids) # outputs.shape: (target_len, batch_size, vocab_size)\n\n # prepare labels for cross entropy by removing the first time stamp ()\n labels = target_ids[1:, :] # shape: (target_len - 1, batch_size)\n labels = labels.reshape(-1).to(device) # shape: ((target_len - 1) * batch_size)\n\n # prepare model predicts for cross entropy by removing the last timestamp and merge first two axes\n outputs = outputs[:-1, ...] # shape: (target_len - 1, batch_size, vocab_size)\n outputs = outputs.reshape(-1, outputs.shape[-1]).to(device)\n # shape: ((target_len - 1) * batch_size, vocab_size)\n\n # compute loss and perform a step\n criterion = nn.CrossEntropyLoss(ignore_index=1) # ignore padding index\n loss = criterion(outputs, labels)\n\n total_loss += float(loss)\n batch_num += 1\n\n input_ids = torch.transpose(input_ids, 0, 1).to(device)\n model_res_ids = []\n for source in input_ids:\n length = torch.sum(source != 1)\n model_res_ids.append(model.generate(source.reshape(-1, 1)[:length]))\n predictions = [tokenizer.decode(g, skip_special_tokens=True) for g in model_res_ids]\n\n tmp_predictions, tmp_targets = [], []\n for prediction, target in zip(predictions, batch['de']):\n if len(target) > 0:\n tmp_predictions.append(prediction)\n tmp_targets.append(target)\n predictions, targets = tmp_predictions, tmp_targets\n references = [[r] for r in targets]\n metric_bleu.add_batch(predictions=predictions, references=references)\n\n perplexity = np.exp(total_loss / batch_num)\n ppl_record.append(perplexity)\n score_bleu = metric_bleu.compute()\n print(f'Perplexity: {perplexity}')\n print(f'BLEU: {round(score_bleu[\"score\"], 1)} out of {round(100., 1)}')\n log_file.write(f'Perplexity:{perplexity}')\n log_file.write(f'BLEU: {round(score_bleu[\"score\"], 1)} out of {round(100., 1)}\\n')\n\n SAVE_PATH = os.path.join('model_weights', f'{MODEL_NAME}_epoch_{epo+1}.pt')\n # save model after training for one epoch\n torch.save(model.state_dict(), SAVE_PATH)\n\n# close log file\nlog_file.close()\n\n# plot loss and ppl\nfig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 5))\n\nepochs = list(range(NUM_EPOCH))\nax[0].plot(epochs, loss_record)\nax[0].set_title('Loss', fontsize=20)\nax[0].set_xlabel('Epoch', fontsize=15)\nax[0].set_ylabel('Loss', fontsize=15)\n\nax[1].plot(epochs, ppl_record)\nax[1].set_title('Perplexity', fontsize=20)\nax[1].set_xlabel('Epoch', fontsize=15)\nax[1].set_ylabel('Perplexity', fontsize=15)\n\nos.makedirs(os.path.dirname('figures' + '/'), exist_ok=True)\nfig.savefig(os.path.join('figures', f'{MODEL_NAME}'))\n\n\n","sub_path":"train/train_seq2seq.py","file_name":"train_seq2seq.py","file_ext":"py","file_size_in_byte":8597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"619607024","text":"from typing import List, Mapping, Optional\n\nfrom app.data_models.answer_store import AnswerStore\nfrom app.data_models.list_store import ListStore\nfrom app.data_models.progress_store import ProgressStore\nfrom app.questionnaire.location import Location\nfrom app.questionnaire.questionnaire_schema import QuestionnaireSchema\nfrom app.questionnaire.routing_path import RoutingPath\nfrom app.questionnaire.rules import (\n evaluate_goto,\n evaluate_skip_conditions,\n is_goto_rule,\n)\n\n\nclass PathFinder:\n def __init__(\n self,\n schema: QuestionnaireSchema,\n answer_store: AnswerStore,\n list_store: ListStore,\n progress_store: ProgressStore,\n metadata: Mapping,\n ):\n self.answer_store = answer_store\n self.metadata = metadata\n self.schema = schema\n self.progress_store = progress_store\n self.list_store = list_store\n\n def routing_path(\n self, section_id: str, list_item_id: Optional[str] = None\n ) -> RoutingPath:\n \"\"\"\n Visits all the blocks in a section and returns a path given a list of answers.\n \"\"\"\n blocks: List[Mapping] = []\n routing_path_block_ids = []\n current_location = Location(section_id=section_id, list_item_id=list_item_id)\n section = self.schema.get_section(section_id)\n list_name = self.schema.get_repeating_list_for_section(\n current_location.section_id\n )\n\n if section:\n for group in section[\"groups\"]:\n if \"skip_conditions\" in group:\n if evaluate_skip_conditions(\n group[\"skip_conditions\"],\n self.schema,\n self.metadata,\n self.answer_store,\n self.list_store,\n current_location=current_location,\n ):\n continue\n\n blocks.extend(group[\"blocks\"])\n\n if blocks:\n routing_path_block_ids = self._build_routing_path_block_ids(\n blocks, current_location\n )\n\n return RoutingPath(routing_path_block_ids, section_id, list_item_id, list_name)\n\n @staticmethod\n def _block_index_for_block_id(blocks, block_id):\n return next(\n (index for (index, block) in enumerate(blocks) if block[\"id\"] == block_id),\n None,\n )\n\n def _build_routing_path_block_ids(self, blocks, current_location):\n # Keep going unless we've hit the last block\n routing_path_block_ids = []\n block_index = 0\n repeating_list = self.schema.get_repeating_list_for_section(\n current_location.section_id\n )\n\n while block_index < len(blocks):\n block = blocks[block_index]\n\n is_skipping = block.get(\"skip_conditions\") and evaluate_skip_conditions(\n block[\"skip_conditions\"],\n self.schema,\n self.metadata,\n self.answer_store,\n self.list_store,\n current_location=current_location,\n routing_path_block_ids=routing_path_block_ids,\n )\n\n if not is_skipping:\n block_id = block[\"id\"]\n if repeating_list and current_location.list_item_id:\n this_location = Location(\n section_id=current_location.section_id,\n block_id=block_id,\n list_name=repeating_list,\n list_item_id=current_location.list_item_id,\n )\n else:\n this_location = Location(\n section_id=current_location.section_id, block_id=block_id\n )\n\n if block_id not in routing_path_block_ids:\n routing_path_block_ids.append(block_id)\n\n # If routing rules exist then a rule must match (i.e. default goto)\n routing_rules = block.get(\"routing_rules\")\n if routing_rules:\n block_index = self._evaluate_routing_rules(\n this_location,\n blocks,\n routing_rules,\n block_index,\n routing_path_block_ids,\n )\n if block_index:\n continue\n\n return routing_path_block_ids\n\n # Last block so return routing_path_block_ids\n if block_index == len(blocks) - 1:\n return routing_path_block_ids\n\n # No routing rules, so step forward a block\n block_index = block_index + 1\n\n def _evaluate_routing_rules(\n self, this_location, blocks, routing_rules, block_index, routing_path_block_ids\n ):\n for rule in filter(is_goto_rule, routing_rules):\n should_goto = evaluate_goto(\n rule[\"goto\"],\n self.schema,\n self.metadata,\n self.answer_store,\n self.list_store,\n current_location=this_location,\n routing_path_block_ids=routing_path_block_ids,\n )\n\n if should_goto:\n if rule[\"goto\"].get(\"section\") == \"End\":\n return None\n\n next_block_id = self._get_next_block_id(rule)\n next_block_index = PathFinder._block_index_for_block_id(\n blocks, next_block_id\n )\n next_precedes_current = (\n next_block_index is not None and next_block_index < block_index\n )\n\n if next_precedes_current:\n self._remove_rule_answers(rule[\"goto\"], this_location)\n routing_path_block_ids.append(next_block_id)\n return None\n\n return next_block_index\n\n def _get_next_block_id(self, rule):\n if \"group\" in rule[\"goto\"]:\n return self.schema.get_first_block_id_for_group(rule[\"goto\"][\"group\"])\n return rule[\"goto\"][\"block\"]\n\n def _remove_rule_answers(self, goto_rule, this_location):\n # We're jumping backwards, so need to delete all answers from which\n # route is derived. Need to filter out conditions that don't use answers\n if \"when\" in goto_rule.keys():\n for condition in goto_rule[\"when\"]:\n if \"meta\" not in condition.keys():\n self.answer_store.remove_answer(condition[\"id\"])\n\n self.progress_store.remove_completed_location(location=this_location)\n","sub_path":"app/questionnaire/path_finder.py","file_name":"path_finder.py","file_ext":"py","file_size_in_byte":6677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"427077370","text":"import os\nfrom functools import reduce\nfrom configparser import ConfigParser\nfrom urllib.parse import urlparse\n\nimport dataset\nimport requests\nimport html2text\nimport progressbar\nfrom readability import Document\n\nPATH_DATA_BROWSER = os.path.join(os.environ['HOME'],\n '.mozilla/firefox/')\n\n\nclass BrowserWorker:\n def __init__(self, browser_path=None, tag='read later'):\n self.browser_path = browser_path\n self.tag = tag\n\n def profile_path(self):\n profile_ini = os.path.join(self.browser_path, 'profiles.ini')\n\n if not os.path.isfile(profile_ini):\n raise Exception('File does not exist <{file_path}>'\n .format(file_path=profile_ini))\n\n profiles = ConfigParser()\n profiles.read(profile_ini)\n\n profile_abs_path = None\n\n for profile in profiles._sections:\n _prof = profiles._sections.get(profile)\n if 'default' in _prof.keys() and _prof.get('default') is '1':\n profile_abs_path = _prof['path']\n break\n\n if not profile_abs_path:\n raise Exception('No profile found <{file_path}>'\n .format(file_path=profile_ini))\n\n return os.path.join(self.browser_path, profile_abs_path,\n 'places.sqlite')\n\n def _tags(self):\n places_sqlite = self.profile_path()\n data = dataset.connect('sqlite:///{}'.format(places_sqlite))\n\n return data['moz_bookmarks'].distinct('id', 'title', type=2, parent=4)\n\n def _tag_by_label(self, label=None):\n tags = [dict(tag).get('id')\n for tag in self._tags() if tag['title'] == label]\n\n tag_id = None\n if tags and len(tags) is 1:\n tag_id = tags[0]\n\n return tag_id\n\n def articles(self):\n places_sqlite = self.profile_path()\n data = dataset.connect('sqlite:///{}'.format(places_sqlite))\n\n tag_id = self._tag_by_label('read later')\n articles_key = [article for article in data['moz_bookmarks'].distinct('fk', parent=tag_id)]\n\n articles = [list(data['moz_places'].distinct('url',\n id=article.get('fk')))[0]['url']\n for article in articles_key]\n return articles\n\n\ndef article_folder_archive(file_title):\n\n archive_folder = os.path.join(os.environ.get('HOME'), 'Documents',\n 'Archive')\n if not os.path.isdir(archive_folder):\n os.makedirs(archive_folder)\n\n return os.path.join(archive_folder, file_title)\n\n\ndef articles():\n data_articles = BrowserWorker(PATH_DATA_BROWSER)\n articles = data_articles.articles()\n\n for article_url in progressbar.progressbar(articles):\n url = urlparse(article_url)\n response = requests.get(article_url)\n doc = Document(response.text)\n article_markdown = html2text.html2text(doc.summary())\n\n\n file_title = '{}.md'.format(url.path.split('/')[-1])\n file_save = article_folder_archive(file_title)\n with open(file_save, 'a') as article_file:\n article_file.write(article_markdown)\n\nif __name__ == '__main__':\n articles()\n","sub_path":"readings.py","file_name":"readings.py","file_ext":"py","file_size_in_byte":3206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"651720691","text":"from tkinter import *\r\n\r\nroot = Tk()\r\nroot.title(\"Опрос\")\r\nroot.geometry(\"720x800\")\r\n\r\n\r\n\r\n\r\nbtn = Button(text=\"Посмотреть результаты\", # текст кнопки\r\n background=\"#555\", # фоновый цвет кнопки\r\n foreground=\"#ccc\", # цвет текста\r\n padx=\"20\", # отступ от границ до содержимого по горизонтали\r\n pady=\"8\", # отступ от границ до содержимого по вертикали\r\n font=\"16\") # высота шрифта\r\nbtn.pack(side=BOTTOM, padx=0, pady=50)\r\nlabel1 = Label(text=\"Какой язык программирования вам больше всего нравится ?\")\r\nlabel1.pack(side=TOP,padx=0,pady=10)\r\nPossible_answer = IntVar()\r\n\r\npython_checkbutton = Radiobutton(text=\"Java\",variable=Possible_answer, value=1, padx=15, pady=10)\r\npython_checkbutton.pack(side=TOP,padx=0,pady=10)\r\npython_checkbutton = Radiobutton(text=\"Python\",variable=Possible_answer, value=2, padx=15, pady=10)\r\npython_checkbutton.pack(side=TOP,padx=0,pady=10)\r\n\r\nlabel1 = Label(text=\"Какой язык программирования вам больше всего нравится ?\")\r\nlabel1.pack(side=TOP,padx=0,pady=10)\r\nanswer = IntVar()\r\n\r\npython_checkbutton = Radiobutton(text=\"Java\",variable=answer, value=3, padx=15, pady=10)\r\npython_checkbutton.pack(side=TOP,padx=0,pady=10)\r\npython_checkbutton = Radiobutton(text=\"Python\",variable=answer, value=4, padx=15, pady=10)\r\npython_checkbutton.pack(side=TOP,padx=0,pady=10)\r\n\r\n\r\n\r\n\r\n\r\n\r\nroot.mainloop()","sub_path":"pythonProject/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"256384715","text":"with open('action.csv', 'r') as input_item:\n lines = input_item.readlines()\n\nnew_lines = []\nfor line in lines:\n items = line.split(\"_!_\") # 22780_!_611_!_2_!_0_!_1617488831_!_1\n # 250def1d50bf_!_1690341_!_1621319827_!_1_!_0_!_1\n new_line = [items[0], items[1], items[4], items[2], items[3], items[5]]\n new_lines.append(\"_!_\".join(new_line))\n\nwith open('action_2.csv', 'w') as out:\n out.writelines(new_lines)\n","sub_path":"src/offline/movie/data-scraping/filter_movie.py","file_name":"filter_movie.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"344865293","text":"# -*- coding: utf-8 -*-\n\nfrom geoalchemy2.elements import _SpatialElement\n\nfrom pyramid_oereb.lib import b64\nfrom pyramid_oereb.lib.records.image import ImageRecord\nfrom pyramid_oereb.lib.sources import BaseDatabaseSource\nfrom geoalchemy2.shape import to_shape\n\nfrom pyramid_oereb.lib.sources.municipality import MunicipalityBaseSource\n\n\nclass DatabaseSource(BaseDatabaseSource, MunicipalityBaseSource):\n\n def read(self, params, fosnr=None):\n \"\"\"\n Central method to read a municipality by it's id_bfs identifier.\n\n Args:\n params (pyramid_oereb.views.webservice.Parameter): The parameters of the extract request.\n fosnr (int or None): The federal number of the municipality defined by the statistics office.\n \"\"\"\n session = self._adapter_.get_session(self._key_)\n try:\n self.records = list()\n if fosnr:\n results = session.query(self._model_).filter(self._model_.fosnr == fosnr).all()\n else:\n results = session.query(self._model_).all()\n for result in results:\n logo = ImageRecord(b64.decode(result.logo))\n self.records.append(self._record_class_(\n result.fosnr,\n result.name,\n result.published,\n logo,\n geom=to_shape(result.geom).wkt if isinstance(\n result.geom, _SpatialElement) else None,\n ))\n finally:\n session.close()\n","sub_path":"pyramid_oereb/standard/sources/municipality.py","file_name":"municipality.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"200042469","text":"from schema import Schema\n\nschema = Schema(\n {\n \"userId\": str,\n \"paymentMethod\": str,\n \"total\": str,\n \"product\": [{\"id\": str, \"type\": str, \"name\": str, \"price\": str}],\n },\n ignore_extra_keys=True,\n)\n\ndef validate(event):\n return schema.validate(event)","sub_path":"src/order/validator.py","file_name":"validator.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"302667538","text":"import dropbox\r\n\r\nclass TransferData:\r\n def __init__(self, access_token):\r\n self.access_token=access_token\r\n\r\n def upload(self, file_from,file_to):\r\n db=dropbox.Dropbox(self.access_token)\r\n\r\n f=open(file_from,'rb')\r\n db.files_upload(f.read(), file_to)\r\n\r\ndef main():\r\n access_token=\"JhbcWnh-C6cAAAAAAAAAAT70P7DuQoK_-rbd5DKTvTAdqMql-uFFmm-cppm-mIxy\"\r\n TransferData(access_token)\r\n\r\n file_from=input(\"Enter the file path to transfer: \")\r\n file_to=input(\"Enter the full file path to transfer: \")\r\n\r\n TransferData.upload(file_from,file_to)\r\n print(\"File has been moved.\")\r\n\r\n \r\n\r\n","sub_path":"dropbox.py","file_name":"dropbox.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"344818669","text":"import time\r\nimport sys\r\nimport colored\r\n#印出棋盤\r\ndef printBoard(board,width):\r\n print(\"\\t\",end=\"\")\r\n for i in range(size):\r\n print(str(i+1),end=\" \"*width)\r\n print(\"\\n\")#換兩行==print()print()\r\n for rowNum in range(size):\r\n print(str(rowNum+1),end=\"\\t\")\r\n for pieceNum in range(size):\r\n piece=board[rowNum][pieceNum]\r\n if piece==\"O\":\r\n THISfcolor=colored.fg(\"green\")\r\n elif piece==\"X\":\r\n THISfcolor=colored.fg(\"red_1\")\r\n elif piece==\".\":\r\n THISfcolor=fcolor\r\n print(THISfcolor+bcolor+piece+reset,end=\"\")\r\n if not pieceNum==size-1: #不是最後一個旗子\r\n print(fcolor+bcolor+\" \" * width+reset,end=\"\")\r\n print()\r\n\r\n #print(fcolor+bcolor+(\" \"*width).join(board[rowNum])+reset)\r\n\r\n print(\"\\t\"+fcolor+bcolor+\" \"*(width*(size-1)+size)+reset)\r\n#設定棋盤大小\r\ndef setSize():\r\n print(\"plz enter the size of board.(n in size(nxn))(allowed size=5 to 20)\")\r\n while True:\r\n a=input()\r\n try:\r\n a=int(a)\r\n except ValueError:\r\n print(\"not num!\")\r\n continue\r\n if a>20:\r\n print(\"too BIG!!!\")\r\n continue\r\n elif a<5:\r\n print(\"too SMALL!!!\")\r\n continue\r\n return a\r\n#清空畫面(感謝徐晧倫)\r\ndef clear():\r\n for i in range(100):\r\n print()\r\n#玩家下棋\r\ndef player():\r\n print(\"plz enter ur pos.\")\r\n while True:\r\n a=input()\r\n if a.find(\" \")==-1 or len(a) > 5:\r\n print(\"wrong!!\")\r\n continue\r\n try:\r\n x,y = [int(i)-1 for i in a.split()]\r\n except ValueError:\r\n print(\"not num!\")\r\n continue\r\n #x,y=====0~size-1\r\n if x<0 or y<0 or x>size-1 or y>size-1:\r\n print(\"UR WEIRD NUM NOT ALLOWED!!!\")\r\n continue\r\n if not checkNoPiece(y,x):\r\n print(\"There is already a piece.\")\r\n continue\r\n break\r\n board[y][x]=\"O\"\r\n#檢查是否下在沒棋子之處\r\ndef checkNoPiece(y,x):\r\n return board[y][x]==\".\"\r\n#檢查棋盤是否已滿\r\ndef checkBoardFull(board):\r\n isFull = True\r\n for row in board:\r\n for piece in row:\r\n if piece==\".\":\r\n isFull=False\r\n return isFull\r\n#以minimax搜索最佳下棋位置\r\ndef DeepCheck(checkDepth,onlyCheck):\r\n \r\n if checkBoardFull(board):\r\n gameover(\"tie\")\r\n\r\n positionsss=dict()\r\n depth=0\r\n newBoard=[[i for i in row] for row in board]\r\n positionsss=AllCheck(\"AI\",depth,newBoard,checkDepth,onlyCheck,0,False)\r\n return positionsss\r\n#檢查一層的所有可下棋位置\r\ndef AllCheck(who,depth,newBoard,checkDepth,onlyCheck,alpha,checkAplhaBeta):\r\n cut=False\r\n beta=\"no\"\r\n positions=dict()\r\n #所有可下棋位置\r\n for y in range(size):\r\n for x in range(size):\r\n if newBoard[y][x]==\".\":\r\n #計算下在各格的分數\r\n score=check(x,y,positions,who,depth,checkDepth,newBoard,onlyCheck,beta,beta!=\"no\")\r\n #存入dictionary\r\n positions[score]=(y,x)\r\n #存較大層之最大or最小分數\r\n if who==\"AI\":\r\n beta=max(positions.keys())\r\n elif who==\"playerAI\":\r\n beta=min(positions.keys())\r\n #alpha-beta剪枝\r\n if checkAplhaBeta:\r\n if who==\"AI\" and beta>alpha and checkAplhaBeta:\r\n cut=True\r\n break\r\n elif who==\"playerAI\" and beta=checkDepth:\r\n return pos\r\n else:\r\n return AllCheck(\"AI\",depth+1,aWholeNewBoard,checkDepth,onlyCheck,beta,checkAplhaBeta)\r\n elif who==\"AI\":\r\n if end or checkBoardFull(aWholeNewBoard) or depth+1>=checkDepth:\r\n return pos\r\n else:\r\n return AllCheck(\"playerAI\",depth+1,aWholeNewBoard,checkDepth,onlyCheck,beta,checkAplhaBeta)\r\n\r\n#直排\r\ndef vertical(b,pos,who,depth,onlyCheck):\r\n for x in range(size):\r\n line = [b[y][x] for y in range(size)]\r\n piecePos=[(y,x) for y in range(size)]\r\n try:\r\n line = \"\".join(line)\r\n except:\r\n print(\"sdszdsd\")\r\n\r\n pos,end=analyze(line,b,piecePos,pos,who,depth,onlyCheck)\r\n return pos,end\r\n#橫排\r\ndef horizontal(b,pos,who,depth,onlyCheck):\r\n for y in range(size):\r\n line = [b[y][x] for x in range(size)]\r\n piecePos=[(y,x) for x in range(size)]\r\n try:\r\n line = \"\".join(line)\r\n except:\r\n print(\"sdszdsd\")\r\n\r\n pos,end=analyze(line,b,piecePos,pos,who,depth,onlyCheck)\r\n return pos,end\r\n#斜排\r\ndef slideUP(b,pos,who,depth,onlyCheck):\r\n for k in range(0+4,(size*2-1)-4):\r\n line = [b[y][x] for x in range(size) for y in range(size) if x+y==k]\r\n piecePos=[(y,x) for x in range(size) for y in range(size) if x+y==k]\r\n try:\r\n line = \"\".join(line)\r\n except:\r\n print(\"sdszdsd\")\r\n \r\n pos,end=analyze(line,b,piecePos,pos,who,depth,onlyCheck)\r\n return pos,end\r\n#斜排\r\ndef slideDOWN(b,pos,who,depth,onlyCheck):\r\n for k in range((-(size-1))+4,(size)-4):\r\n line = [b[y][x] for x in range(size) for y in range(size) if x-y==k]\r\n piecePos=[(y,x) for x in range(size) for y in range(size) if x-y==k]\r\n try:\r\n line = \"\".join(line)\r\n except:\r\n print(\"sdszdsd\")\r\n\r\n pos,end=analyze(line,b,piecePos,pos,who,depth,onlyCheck)\r\n return pos,end\r\n#分析情勢再加減分數\r\ndef analyze(line,b,piecePos,pos,who,depth,onlyCheck):\r\n #playerAI = O AI = X\r\n \r\n\r\n s,e=\"X\",\"O\"\r\n\r\n\r\n how=(1/10)**depth#越下層之分數越接近0,較不會影響總分\r\n checkend=False\r\n\r\n #判斷情勢\r\n if e*5 in line:\r\n if depth==0 and onlyCheck:\r\n gameover(\"win\")#玩家贏了\r\n pos-=10000000000 * how\r\n print(\"too bad\")\r\n checkend=True\r\n if e*4 in line:\r\n start=line.find(e*4)-1\r\n end=start+5\r\n \r\n \r\n if start>=0:\r\n startPos=piecePos[start]\r\n startPiece=b[startPos[0]][startPos[1]]\r\n else:\r\n startPiece=\"N\"\r\n if end<=len(line)-1:\r\n endPos=piecePos[end]\r\n endPiece=b[endPos[0]][endPos[1]]\r\n else:\r\n endPiece=\"N\"\r\n\r\n \r\n if startPiece==\".\" or endPiece==\".\":\r\n pos-=10000000 * how\r\n\r\n \r\n if e*3 in line:\r\n start=line.find(e*3)-1\r\n end=start+4\r\n \r\n \r\n if start>=0:\r\n startPos=piecePos[start]\r\n startPiece=b[startPos[0]][startPos[1]]\r\n else:\r\n startPiece=\"N\"\r\n if end<=len(line)-1:\r\n endPos=piecePos[end]\r\n endPiece=b[endPos[0]][endPos[1]]\r\n else:\r\n endPiece=\"N\"\r\n\r\n \r\n if startPiece==\".\" and endPiece==\".\":\r\n pos-=1000000 * how\r\n elif startPiece==\".\" or endPiece==\".\":\r\n pos-=5000 * how\r\n\r\n \r\n if e*2 in line:\r\n start=line.find(e*2)-1\r\n end=start+3\r\n \r\n \r\n if start>=0:\r\n startPos=piecePos[start]\r\n startPiece=b[startPos[0]][startPos[1]]\r\n else:\r\n startPiece=\"N\"\r\n if end<=len(line)-1:\r\n endPos=piecePos[end]\r\n endPiece=b[endPos[0]][endPos[1]]\r\n else:\r\n endPiece=\"N\"\r\n\r\n \r\n if startPiece==\".\" and endPiece==\".\":\r\n pos-=100 * how\r\n elif startPiece==\".\" or endPiece==\".\":\r\n pos-=50 * how\r\n\r\n \r\n if e*1 in line:\r\n start=line.find(e*1)-1\r\n end=start+2\r\n \r\n \r\n if start>=0:\r\n startPos=piecePos[start]\r\n startPiece=b[startPos[0]][startPos[1]]\r\n else:\r\n startPiece=\"N\"\r\n if end<=len(line)-1:\r\n endPos=piecePos[end]\r\n endPiece=b[endPos[0]][endPos[1]]\r\n else:\r\n endPiece=\"N\"\r\n\r\n \r\n if startPiece==\".\" and endPiece==\".\":\r\n pos-=3 * how\r\n elif startPiece==\".\":\r\n pos-=1 * how\r\n elif endPiece==\".\":\r\n pos-=1 * how\r\n\r\n\r\n\r\n if s*5 in line:\r\n if depth==0 and onlyCheck:\r\n gameover(\"lose\")#AI贏了\r\n pos+=10000000000 * how\r\n print(\"too good\")\r\n checkend=True\r\n if s*4 in line:\r\n start=line.find(s*4)-1\r\n end=start+5\r\n \r\n \r\n if start>=0:\r\n startPos=piecePos[start]\r\n startPiece=b[startPos[0]][startPos[1]]\r\n else:\r\n startPiece=\"N\"\r\n if end<=len(line)-1:\r\n endPos=piecePos[end]\r\n endPiece=b[endPos[0]][endPos[1]]\r\n else:\r\n endPiece=\"N\"\r\n\r\n\r\n\r\n \r\n if startPiece==\".\" and endPiece==\".\":\r\n pos+=10000000 * how\r\n elif startPiece==\".\" or endPiece==\".\":\r\n pos+=5000 * how\r\n\r\n \r\n if s*3 in line:\r\n start=line.find(s*3)-1\r\n end=start+4\r\n \r\n \r\n if start>=0:\r\n startPos=piecePos[start]\r\n startPiece=b[startPos[0]][startPos[1]]\r\n else:\r\n startPiece=\"N\"\r\n if end<=len(line)-1:\r\n endPos=piecePos[end]\r\n endPiece=b[endPos[0]][endPos[1]]\r\n else:\r\n endPiece=\"N\"\r\n\r\n \r\n if startPiece==\".\" and endPiece==\".\":\r\n pos+=1000 * how\r\n elif startPiece==\".\" or endPiece==\".\":\r\n pos+=10 * how\r\n\r\n \r\n if s*2 in line:\r\n start=line.find(s*2)-1\r\n end=start+3\r\n \r\n \r\n if start>=0:\r\n startPos=piecePos[start]\r\n startPiece=b[startPos[0]][startPos[1]]\r\n else:\r\n startPiece=\"N\"\r\n if end<=len(line)-1:\r\n endPos=piecePos[end]\r\n endPiece=b[endPos[0]][endPos[1]]\r\n else:\r\n endPiece=\"N\"\r\n\r\n \r\n if startPiece==\".\" and endPiece==\".\":\r\n pos+=5 * how\r\n elif startPiece==\".\" or endPiece==\".\":\r\n pos+=2 * how\r\n\r\n\r\n if s*1 in line:\r\n start=line.find(s*1)-1\r\n end=start+2\r\n \r\n \r\n if start>=0:\r\n startPos=piecePos[start]\r\n startPiece=b[startPos[0]][startPos[1]]\r\n else:\r\n startPiece=\"N\"\r\n if end<=len(line)-1:\r\n endPos=piecePos[end]\r\n endPiece=b[endPos[0]][endPos[1]]\r\n else:\r\n endPiece=\"N\"\r\n\r\n \r\n if startPiece==\".\" and endPiece==\".\":\r\n pos+=2 * how\r\n elif startPiece==\".\" or endPiece==\".\":\r\n pos+=1 * how\r\n\r\n\r\n\r\n\r\n if e+\".\"+e in line:\r\n start=line.find(e+\".\"+e)-1\r\n end=start+4\r\n \r\n \r\n if start>=0:\r\n startPos=piecePos[start]\r\n startPiece=b[startPos[0]][startPos[1]]\r\n else:\r\n startPiece=\"N\"\r\n if end<=len(line)-1:\r\n endPos=piecePos[end]\r\n endPiece=b[endPos[0]][endPos[1]]\r\n else:\r\n endPiece=\"N\"\r\n \r\n if startPiece==\".\" and endPiece==\".\":\r\n pos-=500000 * how\r\n elif startPiece==\".\":\r\n pos-=5000 * how\r\n elif endPiece==\".\":\r\n pos-=5000 * how\r\n\r\n\r\n\r\n\r\n \r\n if e+e+\".\"+e in line:\r\n start=line.find(e+e+\".\"+e)-1\r\n end=start+5\r\n \r\n \r\n if start>=0:\r\n startPos=piecePos[start]\r\n startPiece=b[startPos[0]][startPos[1]]\r\n else:\r\n startPiece=\"N\"\r\n if end<=len(line)-1:\r\n endPos=piecePos[end]\r\n endPiece=b[endPos[0]][endPos[1]]\r\n else:\r\n endPiece=\"N\"\r\n \r\n if startPiece==\".\" and endPiece==\".\":\r\n pos-=1000000 * how\r\n elif startPiece==\".\":\r\n pos-=10000 * how\r\n elif endPiece==\".\":\r\n pos-=10000 * how\r\n if e+\".\"+e+e in line:\r\n start=line.find(e+\".\"+e+e)-1\r\n end=start+5\r\n \r\n \r\n if start>=0:\r\n startPos=piecePos[start]\r\n startPiece=b[startPos[0]][startPos[1]]\r\n else:\r\n startPiece=\"N\"\r\n if end<=len(line)-1:\r\n endPos=piecePos[end]\r\n endPiece=b[endPos[0]][endPos[1]]\r\n else:\r\n endPiece=\"N\"\r\n \r\n if startPiece==\".\" and endPiece==\".\":\r\n pos-=1000000 * how\r\n elif startPiece==\".\":\r\n pos-=10000 * how\r\n elif endPiece==\".\":\r\n pos-=10000 * how\r\n if e+e+\".\"+e+e in line:\r\n\r\n \r\n pos-=10000000 * how\r\n if e+e+e+\".\"+e in line:\r\n \r\n pos-=10000000 * how\r\n if e+\".\"+e+e+e in line:\r\n\r\n \r\n pos-=10000000 * how\r\n\r\n\r\n\r\n\r\n return pos,checkend\r\n#AI下棋\r\ndef ai(positions,board):\r\n if positions!=dict():\r\n besty,bestx=positions[max(positions.keys())]\r\n\r\n board[besty][bestx]=\"X\"\r\n\r\n else:\r\n print(\"the AI of this game is too stupid to decide where to place its pawn.\")\r\n print(\"So it's time for u to defeat it.\")\r\n return board\r\n#遊戲結束\r\ndef gameover(text):\r\n if text==\"win\":\r\n print(\"U win!!!\")\r\n elif text==\"lose\":\r\n print(\"haha u lose!!!\")\r\n elif text==\"tie\":\r\n print(\"the board is full!!!\")\r\n print(\"LOOK WHAT U'VE DONE!!!\")\r\n time.sleep(5)\r\n print(\"end!\")\r\n sys.exit()#end script\r\n\r\n\r\n#初始化\r\nsize=setSize()\r\nCheckDepth=1\r\n\r\nboard=[[\".\" for i in range(size)] for i in range(size)]\r\nboardWidth=3\r\nfcolor=colored.fg(\"black\")\r\nbcolor=colored.bg(\"white\")\r\nreset=colored.attr(\"reset\")\r\nboardSpaceAmount=size**2\r\nprint(f\"boardSpaceAmount:{boardSpaceAmount}\")\r\nprintBoard(board,boardWidth)\r\n#循環進行遊戲\r\nwhile 1:\r\n #玩家下棋\r\n player()\r\n boardSpaceAmount-=1\r\n printBoard(board,boardWidth)\r\n positions=DeepCheck(1,True)\r\n \r\n\r\n \r\n #訂定AI搜尋深度\r\n CheckDepth=int(-0.05 * boardSpaceAmount +7)#or+6\r\n print(f\"CheckDepth : {CheckDepth}\")\r\n #AI下棋\r\n print(\"loading...\")\r\n positions=DeepCheck(CheckDepth,False)\r\n clear()\r\n board=ai(positions,board)\r\n boardSpaceAmount-=1\r\n printBoard(board,boardWidth)\r\n positions=DeepCheck(1,True)\r\n","sub_path":"minimax_fixed.py","file_name":"minimax_fixed.py","file_ext":"py","file_size_in_byte":16812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"467557233","text":"import reframe as rfm\nimport reframe.utility.sanity as sn\n\n\n@rfm.required_version('>=2.14')\n@rfm.parameterized_test(\n ['dom', 'PrgEnv-gnu', 'gcc/7.3.0'],\n ['dom', 'PrgEnv-gnu', 'gcc/8.3.0'],\n ['dom', 'PrgEnv-intel', 'intel/18.0.2.199'],\n ['dom', 'PrgEnv-intel', 'intel/19.0.1.144'],\n ['dom', 'PrgEnv-cray', 'cce/8.7.10'],\n # \n ['daint', 'PrgEnv-gnu', 'gcc/4.9.3'],\n ['daint', 'PrgEnv-gnu', 'gcc/5.3.0'],\n ['daint', 'PrgEnv-gnu', 'gcc/6.2.0'],\n ['daint', 'PrgEnv-gnu', 'gcc/7.3.0'],\n ['daint', 'PrgEnv-intel', 'intel/17.0.4.196'],\n ['daint', 'PrgEnv-intel', 'intel/18.0.2.199'],\n ['daint', 'PrgEnv-cray', 'cce/8.6.1'],\n ['daint', 'PrgEnv-cray', 'cce/8.7.4'],\n ['daint', 'PrgEnv-pgi', 'pgi/17.5.0'],\n ['daint', 'PrgEnv-pgi', 'pgi/18.5.0'],\n ['daint', 'PrgEnv-pgi', 'pgi/18.10.0'],\n)\nclass SphExaMiniAppSquarepatch(rfm.CompileOnlyRegressionTest):\n \"\"\"\n cd sph-exa_mini-app.git/scripts/reframe/\n reframe --system dom:mc --exec-policy=async --keep-stage-files \\\n --prefix=$SCRATCH/reframe/ -r -c ./miniapp.py\n \"\"\"\n def __init__(self, sysname, prgenv, compilerversion):\n super().__init__()\n self.name = 'sphexa_' + sysname + \"_\" + compilerversion.replace('/', '')\n self.descr = 'compilation only check'\n self.valid_systems = ['%s:gpu' % sysname, '%s:mc' % sysname]\n self.valid_prog_environs = [prgenv]\n self.modules = [compilerversion]\n self.prgenv_flags = {\n 'PrgEnv-gnu': ['-I./include', '-std=c++14', '-O3', '-g',\n '-fopenmp', '-D_JENKINS'],\n 'PrgEnv-intel': ['-I./include', '-std=c++14', '-O3', '-g',\n '-qopenmp', '-D_JENKINS'],\n 'PrgEnv-cray': ['-I./include', '-hstd=c++14', '-O3', '-g',\n '-homp', '-D_JENKINS'],\n 'PrgEnv-pgi': ['-I./include', '-std=c++14', '-O3', '-g',\n '-mp', '-D_JENKINS'],\n }\n self.variables = {\n 'CRAYPE_LINK_TYPE': 'dynamic'\n }\n self.build_system = 'SingleSource'\n self.testname = 'sqpatch'\n self.sourcepath = '%s.cpp' % self.testname\n self.executable = '%s.exe' % self.testname\n self.rpt = '%s.rpt' % self.testname\n self.maintainers = ['JG']\n self.tags = {'pasc'}\n self.postbuild_cmd = ['file %s &> %s' % (self.executable, self.rpt)]\n self.sanity_patterns = sn.assert_found(\n 'ELF 64-bit LSB executable, x86-64', self.rpt)\n\n def setup(self, partition, environ, **job_opts):\n super().setup(partition, environ, **job_opts)\n environ_name = self.current_environ.name\n prgenv_flags = self.prgenv_flags[environ_name]\n self.build_system.cxxflags = prgenv_flags\n","sub_path":"scripts/reframe/miniapp.py","file_name":"miniapp.py","file_ext":"py","file_size_in_byte":2791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"505453166","text":"from PyQt5.QtWidgets import QPushButton, QLineEdit, QRadioButton, QWidget, QGroupBox, QVBoxLayout, QDialogButtonBox, QLabel, QHBoxLayout\nfrom PyQt5.QtCore import pyqtSignal\n\n\nclass Editwidget(QWidget):\n sendCommand = pyqtSignal(str)\n\n def __init__(self, parent=None):\n super(Editwidget, self).__init__(parent)\n\n self.setGeometry(300, 300, 800, 600)\n self.max_time = QLineEdit(self)\n self.type_number = QRadioButton(\"Numer\", self)\n self.type_string = QRadioButton(\"Tekst\", self)\n self.max_input_size = QLineEdit(self)\n self.empty_input_yes = QRadioButton(\"Tak\", self)\n self.empty_input_no = QRadioButton(\"Nie\", self)\n self.hide_input_yes = QRadioButton(\"Tak\", self)\n self.hide_input_no = QRadioButton(\"Nie\", self)\n self.keyboard_edit_yes = QRadioButton(\"Tak\", self)\n self.keyboard_edit_no = QRadioButton(\"Nie\", self)\n self.barcode_reader_yes = QRadioButton(\"Tak\", self)\n self.barcode_reader_no = QRadioButton(\"Nie\", self)\n self.edit_field_title = QLineEdit(self)\n self.begin_value = QLineEdit(self)\n self.attributes_field = QLineEdit(self)\n self.buttonBox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)\n self.buttonBox.accepted.connect(self.accept)\n self.buttonBox.rejected.connect(lambda: self.close())\n self.attributes_us_btn = QPushButton(\"US\")\n self.title_us_btn = QPushButton(\"US\")\n\n self.attributes_us_btn.clicked.connect(\n lambda: self.attributes_field.setText(self.attributes_field.text() + \"\\x1f\"))\n self.title_us_btn.clicked.connect(\n lambda: self.edit_field_title.setText(self.edit_field_title.text() + \"\\x1f\"))\n\n groupBox = QGroupBox()\n groupBox1 = QGroupBox()\n groupBox2 = QGroupBox()\n groupBox3 = QGroupBox()\n groupBox4 = QGroupBox()\n\n radio_layout = QVBoxLayout()\n radio_layout.addWidget(self.type_number)\n radio_layout.addWidget(self.type_string)\n\n radio_layout_label = QHBoxLayout()\n radio_layout_label.addLayout(radio_layout)\n radio_layout_label.addWidget(QLabel(\"Typ wprowadzanej wartości\"))\n groupBox.setLayout(radio_layout_label)\n\n radio_layout1 = QVBoxLayout()\n radio_layout1.addWidget(self.empty_input_yes)\n radio_layout1.addWidget(self.empty_input_no)\n\n radio_layout_label1 = QHBoxLayout()\n radio_layout_label1.addLayout(radio_layout1)\n radio_layout_label1.addWidget(QLabel(\"Wprowadzenie pustej wartości\"))\n groupBox1.setLayout(radio_layout_label1)\n\n radio_layout2 = QVBoxLayout()\n radio_layout2.addWidget(self.hide_input_yes)\n radio_layout2.addWidget(self.hide_input_no)\n\n radio_layout_label2 = QHBoxLayout()\n radio_layout_label2.addLayout(radio_layout2)\n radio_layout_label2.addWidget(QLabel(\"Maskowanie wprowadzanej treści\"))\n groupBox2.setLayout(radio_layout_label2)\n\n radio_layout3 = QVBoxLayout()\n radio_layout3.addWidget(self.keyboard_edit_yes)\n radio_layout3.addWidget(self.keyboard_edit_no)\n\n radio_layout_label3 = QHBoxLayout()\n radio_layout_label3.addLayout(radio_layout3)\n radio_layout_label3.addWidget(QLabel(\"Edycja z klawiatury\"))\n groupBox3.setLayout(radio_layout_label3)\n\n radio_layout4 = QVBoxLayout()\n radio_layout4.addWidget(self.barcode_reader_yes)\n radio_layout4.addWidget(self.barcode_reader_no)\n\n radio_layout_label4 = QHBoxLayout()\n radio_layout_label4.addLayout(radio_layout4)\n radio_layout_label4.addWidget(QLabel(\"Czytnik kodów kreskowych\"))\n groupBox4.setLayout(radio_layout_label4)\n\n row1 = QHBoxLayout()\n row1.addWidget(self.edit_field_title)\n row1.addWidget(self.title_us_btn)\n\n row3 = QHBoxLayout()\n row3.addWidget(self.attributes_field)\n row3.addWidget(self.attributes_us_btn)\n\n main_layout = QVBoxLayout()\n main_layout.addWidget(self.max_time)\n main_layout.addWidget(QLabel(\"Maksymalny czas trwania interakcji w sekundach (0 = bez limitu czasowego)\"))\n main_layout.addWidget(groupBox)\n main_layout.addWidget(self.max_input_size)\n main_layout.addWidget(QLabel(\"Maksymalny rozmiar wprowadzanej wartości\"))\n main_layout.addWidget(groupBox1)\n main_layout.addWidget(groupBox2)\n main_layout.addWidget(groupBox3)\n main_layout.addWidget(groupBox4)\n main_layout.addWidget(QLabel(\"Nazwa edytowalnego pola\"))\n main_layout.addLayout(row1)\n main_layout.addWidget(QLabel(\"Wartość początkowa pola edycyjnego\"))\n main_layout.addWidget(self.begin_value)\n main_layout.addWidget(QLabel(\"Dodatkowe atrybuty\"))\n main_layout.addLayout(row3)\n main_layout.addWidget(self.buttonBox)\n self.setLayout(main_layout)\n\n def accept(self):\n timeout = self.max_time.text()\n type_of_field = \"T\" if self.type_string.isChecked() else \"N\"\n max_size = self.max_input_size.text()\n empty = \"1\" if self.empty_input_yes.isChecked() else \"0\"\n mask = \"1\" if self.hide_input_yes.isChecked() else \"0\"\n keyboard = \"1\" if self.keyboard_edit_yes.isChecked() else \"0\"\n codereader = \"1\" if self.barcode_reader_yes.isChecked() else \"0\"\n title = self.edit_field_title.text()\n beginText = self.begin_value.text()\n attributes = self.attributes_field.text()\n print(self.edit_field_title.text())\n comm = f\"\\x1cK7\\x1c{timeout}\\x1c{type_of_field}\\x1c{max_size}\\x1c{empty}\\x1c{mask}\\x1c{keyboard}\\x1c{codereader}\\x1c{title}\\x1c{beginText}\\x1c{attributes}\\x03\"\n self.sendCommand.emit(comm)\n","sub_path":"editwidget.py","file_name":"editwidget.py","file_ext":"py","file_size_in_byte":5751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"277640373","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri May 27 13:00:07 2022\r\n\r\n@author: youssef\r\n\"\"\"\r\n\r\nimport os\r\nos.environ['SNOPT_LICENSE'] = '/home/youssef/snopt/snopt7.lic'\r\nimport math\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport floris.tools as wfct\r\nimport floris.tools.optimization.pyoptsparse as opt\r\nimport pdb\r\nfrom itertools import product, permutations\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef sort_boundaries(boundaries):\r\n bnd=pd.DataFrame(boundaries, columns=['x','y'])\r\n bnd.mean()\r\n A=np.degrees(np.arctan2(bnd.y-bnd.mean()[1],bnd.x-bnd.mean()[0]))\r\n A %= 360\r\n A.sort_values()\r\n boundaries=bnd.reindex(A.sort_values().index).values.tolist()\r\n return boundaries\r\n\r\n\r\ndef ConcentricCirclesLayout(N_circles,spc,D):\r\n spc=spc*D\r\n y_coordinates=np.array(0)\r\n x_coordinates=np.array(0)\r\n for i in range(N_circles):\r\n i=i+1\r\n N_turbines=math.floor(2*np.pi*i)\r\n angles=np.arange(0,2*np.pi,2*np.pi/N_turbines)\r\n x_coordinates=np.append(x_coordinates, i*spc*np.cos(angles))\r\n y_coordinates=np.append(y_coordinates, i*spc*np.sin(angles))\r\n x_coordinates=np.round(x_coordinates)\r\n y_coordinates=np.round(y_coordinates)\r\n layout= (x_coordinates.tolist(), y_coordinates.tolist())\r\n \r\n return layout\r\n\r\n\r\ndef SNOPTlayoutoptimization(fi,layout0,wd, ws, freq,plot):\r\n model = opt.layout.Layout(fi, boundaries, wdir=wd, wspd=ws, wfreq=freq)\r\n # optOptions={\"Major feasibility tolerance\": 1e-6, \"Verify level\": 3, \"Scale option\":2 ,\"Major optimality tolerance\": 5e-5}\r\n tmp = opt.optimization.Optimization(model=model, solver=\"SNOPT\") \r\n sol = tmp.optimize()\r\n if plot==1: \r\n model.plot_layout_opt_results(sol)\r\n plt.show() \r\n \r\n layout=(sol.getDVs()[\"x\"].tolist(),sol.getDVs()[\"y\"].tolist())\r\n \r\n return layout\r\n\r\ndef savelayout(layout,path,filename):\r\n layoutdf=pd.DataFrame(layout[0],columns=['x'])\r\n layoutdf['y']=layout[1]\r\n layoutdf.to_csv(path+filename, index=False)\r\n\r\n\r\n\r\n\r\n# Initialize the FLORIS interface fi\r\nfi = wfct.floris_interface.FlorisInterface(\"FLORIS_15MW.json\")\r\nD = fi.floris.farm.turbines[0].rotor_diameter\r\n\r\nN_circles=2\r\nspacing=5*D\r\nspc=5\r\nangles=np.arange(0,360,1)\r\nboundaries_x=np.round(N_circles*spacing*np.cos(np.radians(angles)))\r\nboundaries_y=np.round(N_circles*spacing*np.sin(np.radians(angles)))\r\nboundaries = [[x,y] for x, y in zip(boundaries_x, boundaries_y)]\r\nboundaries=sort_boundaries(boundaries)\r\nlayout0=ConcentricCirclesLayout(N_circles,spc,D) \r\n\r\nwd=[0., 22.5, 45., 67.5, 90., 112.5, 135., 157.5, 180., 202.5, 225., 247.5, 270., 292.5, 315., 337.5]\r\nws=[10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]\r\nfreq= [.025, .024, .029, .036,.063, .065, .100, .122,.063, .038, .039, .083, .213, .046, .032, .022]\r\n\r\nfi.reinitialize_flow_field(layout_array=layout0)\r\nAEP_initial=fi.get_farm_AEP(np.array(wd), np.array(ws), np.array(freq)) * 1e-9\r\nprint(\"=====================================================\")\r\nprint('AEP_initial='+str(AEP_initial))\r\nprint(\"=====================================================\")\r\n\r\nlayout=SNOPTlayoutoptimization(fi,layout0,wd, ws, freq,1)\r\n\r\nfi.reinitialize_flow_field(layout_array=layout)\r\nAEP_optimized=fi.get_farm_AEP(np.array(wd), np.array(ws), np.array(freq)) * 1e-9\r\nprint(\"=====================================================\")\r\nprint('AEP_current='+str(AEP_optimized))\r\nprint(\"=====================================================\")\r\n\r\nsavelayout(layout,'','SNOPTlayoutnew.csv')","sub_path":"Linux_pyoptsparse/SNOPT_allwindfarm_fixed.py","file_name":"SNOPT_allwindfarm_fixed.py","file_ext":"py","file_size_in_byte":3621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"162931301","text":"#!/usr/bin/env python\n\nimport sys\nimport rospy\nfrom testmsgsrv.srv import *\nfrom testmsgsrv.msg import *\nfrom std_srvs.srv import Empty,EmptyResponse\nfrom threading import *\n\nclass Myclasss():\n\tdef __init__(self):\n\t\trospy.init_node('fibo_client_service', anonymous=True)\n\t\trospy.Subscriber('/action', fiboo, self.callback)\n\n\tdef babooo(self,x):\n\t\trospy.wait_for_service('get_fibonacci')\n\t\ttry:\n\t\t\tfff = rospy.ServiceProxy('get_fibonacci', Fibonaccii)\n\t\t\trespp = fff(x)\n\t\t\tprint(\"Final Fibonacci Series = %s\"%(str(respp.sequence)))\n\t\texcept rospy.ServiceException as e:\n\t\t\tprint(\"Service call failed: %s\"%e)\n\n\tdef callback(self,msg):\n\t\tprint(\"Feedback : Sequence : \",msg.sequence)\n\t\t# if eval(msg.sequence)[-1] == 144:\n\t\t# \tprint('asdfghjkl')\n\t\t# \tself.preempt()\n\n\tdef preempt(self):\n\t\trospy.wait_for_service('preempt_fibonacci')\n\t\ttry:\n\t\t\tfff = rospy.ServiceProxy('preempt_fibonacci', Empty)\n\t\t\trespp = fff()\n\t\texcept rospy.ServiceException as e:\n\t\t\tprint(\"Service call failed: %s\"%e)\n\n\nif __name__ == \"__main__\":\n\tx = sys.argv[1]\n\tobbject = Myclasss()\n\n\tt1 = Thread(target=obbject.babooo, args=(x,))\n\tt1.start()\n\n\tprint('threading success')\n","sub_path":"testmsgsrv/scripts/fibo_using_ROSserivce/fibo_client_service.py","file_name":"fibo_client_service.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"187590218","text":"_KNOWN_CLASSES = {\n 'HTTP': 'minemeld.ft.http.HttpFT',\n 'AggregatorIPv4': 'minemeld.ft.ipop.AggregateIPv4FT',\n 'Aggregator': 'minemeld.ft.op.AggregateFT',\n 'RedisSet': 'minemeld.ft.redis.RedisSet'\n}\n\n\ndef _dynamic_load(classname):\n if '.' not in classname:\n raise ValueError('invalid absolute classname %s' % classname)\n\n modname, classname = classname.rsplit('.', 1)\n t = __import__(modname, globals(), locals(), [classname])\n cls = getattr(t, classname)\n return cls\n\n\ndef factory(classname, name, chassis, config):\n classname = _KNOWN_CLASSES.get(classname, classname)\n\n return _dynamic_load(classname)(\n name=name,\n chassis=chassis,\n config=config\n )\n\n\nclass ft_states(object):\n READY = 0\n CONNECTED = 1\n REBUILDING = 2\n RESET = 3\n INIT = 4\n STARTED = 5\n CHECKPOINT = 6\n IDLE = 7\n STOPPED = 8\n","sub_path":"minemeld/ft/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"113088353","text":"# Entrypoint: com.google.android.apps.chrome.firstrun.FirstRunActivity.onCreate(Landroid/os/Bundle;)V\n# Target: invokevirtual < Application, Landroid/app/Activity, startActivity(Landroid/content/Intent;)V >@110\n\nIAAv0 = Real('IAAv0') # \nIAAv5 = Int('IAAv5') # Pointer<805177838>.this$0.access$100()\nIAAv8 = Int('IAAv8') # Pointer<805177838>.this$0.access$400().getFirstRunFlowComplete()\nIAAv1 = Int('IAAv1') # Pointer<582524242>.getBoolean()\nIAAv7 = Int('IAAv7') # Pointer<805177838>.this$0.access$500().checkAnyUserHasSeenToS()\nIAAv4 = Real('IAAv4') # Pointer<805177838>.mHasChildAccount\nIAAv2 = Int('IAAv2') # Pointer<-1202501681>.getInstance().checkHasChildAccount().SDK_INT\nIAAv6 = Int('IAAv6') # Pointer<805177838>.this$0.access$000()\nIAAv3 = Real('IAAv3') # Pointer<805177838>.mIsAndroidEduDevice\nIAAv9 = Real('IAAv9') # Pointer<805177838>.this$0.mObserver\n\ns.add(And(And(And(And(And(Or((IAAv0 == 0), (IAAv0 != 0)), (IAAv1 != 0)), (IAAv2 < 18)), And((IAAv3 != 0), (IAAv4 != 0))), And(And(Or(Or(And((IAAv5 == 0), (IAAv6 != 0)), And((IAAv5 == 0), (IAAv6 == 0))), (IAAv5 != 0)), (IAAv7 == 0)), (IAAv8 == 0))), Or((IAAv9 == 0), (IAAv9 != 0))))\n\n","sub_path":"static/playdrone/Reference_ref/com.android.chrome-2125114/constraints7_0.py","file_name":"constraints7_0.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"552733435","text":"import csv\n\ntimes = []\n# checks csv and turns everything into a float for calculations\ndef processCSV():\n with open('onsetimes.csv') as csvFile:\n reader = csv.reader(csvFile)\n for row in reader:\n timestring = str(row)\n timestring = timestring.replace(\"['\",\"\")\n timestring = timestring.replace(\"']\",\"\")\n timesint = float(timestring)\n times.append(timesint)\n\n# get between time\ndef getBetweenTime():\n btwtimes = []\n for i in times:\n try:\n timeindex1 = times.index(i)\n btwtime = times[timeindex1+1]-times[timeindex1]\n except IndexError:\n btwtime = 0\n btwtimes.append(str(btwtime)+\"F\")\n btwtimestr = str(btwtimes)\n btwtimestr = btwtimestr.replace(\"]\",\"\")\n btwtimestr = btwtimestr.replace(\"[\",\"\")\n btwtimestr = btwtimestr.replace(\"'\",\"\")\n return btwtimestr\n\nname = input(\"Name the script: \")\n# Copies header template onto the script and adds the times\ndef createTheScript():\n with open(\"unity3DScripts/SampleSongTemplateHeader.txt\") as template:\n with open(name+\".cs\", \"w\") as script:\n for line in template:\n script.write(line)\n script.write(\"public static float[] Times = {\"+str(getBetweenTime())+\"};}\")\n\n\n\n#functions\nprocessCSV()\ncreateTheScript()\n","sub_path":"_createScript.py","file_name":"_createScript.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"162932830","text":"#\n# @lc app=leetcode id=35 lang=python3\n#\n# [35] Search Insert Position\n#\n# https://leetcode.com/problems/search-insert-position/description/\n#\n# algorithms\n# Easy (41.41%)\n# Likes: 1782\n# Dislikes: 214\n# Total Accepted: 512.8K\n# Total Submissions: 1.2M\n# Testcase Example: '[1,3,5,6]\\n5'\n#\n# Given a sorted array and a target value, return the index if the target is\n# found. If not, return the index where it would be if it were inserted in\n# order.\n# \n# You may assume no duplicates in the array.\n# \n# Example 1:\n# \n# \n# Input: [1,3,5,6], 5\n# Output: 2\n# \n# \n# Example 2:\n# \n# \n# Input: [1,3,5,6], 2\n# Output: 1\n# \n# \n# Example 3:\n# \n# \n# Input: [1,3,5,6], 7\n# Output: 4\n# \n# \n# Example 4:\n# \n# \n# Input: [1,3,5,6], 0\n# Output: 0\n# \n# \n#\n\n# @lc code=start\nclass Solution:\n #Time Complexity: O(logn)\n #Space Complexity O(1)\n def searchInsert(self, nums, target):\n if not nums:\n return 0\n start = 0\n end = len(nums) - 1\n while start + 1 < end:\n mid = start + (end - start) // 2\n selected_ele = nums[mid]\n\n if selected_ele > target:\n end = mid\n elif selected_ele == target:\n return mid\n else:\n start = mid\n \n if target <= nums[start]:\n return start\n elif target <= nums[end]:\n return end\n else:\n return end + 1\n ","sub_path":"leetcode/Binary Search/35. Search Insert Position.py","file_name":"35. Search Insert Position.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"94879843","text":"from typing import Dict, Tuple, List\r\n\r\nEncodedStr = List[int]\r\nCodeToWordDict = Dict[int, str]\r\nWordToCodeDict = Dict[str, int]\r\n\r\n# Encapsulates the type for dictionary encoded text\r\nDictionaryEncoding = Tuple[CodeToWordDict, EncodedStr]\r\n\r\n\r\ndef dictionary_encode(text: str) -> DictionaryEncoding:\r\n \"\"\"\r\n Encodes a string of text using Dictionary Encoding\r\n :param text: The input string to encode\r\n :return: A tuple containing the dictionary of words by their ID,\r\n and a series of ID's which make up the original text\r\n \"\"\"\r\n # Calculate a word dictionary, and it's inverse,\r\n code_dict: CodeToWordDict = dict()\r\n word_dict: WordToCodeDict = dict()\r\n output_str: List[int] = []\r\n\r\n # Codes will be assigned as simple one-up numbers\r\n next_code: int = 0\r\n for word in text.split(\" \"):\r\n # If we haven't seen this word before, add it to the dictionary\r\n if word not in word_dict:\r\n code_dict[next_code] = word\r\n word_dict[word] = next_code\r\n next_code += 1\r\n\r\n # Lookup the code using our inverse dictionary\r\n code: int = word_dict.get(word)\r\n\r\n # Write the code to the output stream\r\n output_str.append(code)\r\n\r\n # Return the code dictionary and the output string.\r\n return code_dict, output_str\r\n\r\n\r\ndef dictionary_decode(encoded: DictionaryEncoding) -> str:\r\n \"\"\"\r\n Decode a dictionary encoded string into it's original form\r\n :param encoded: The dictionary encoded string\r\n :return: The original string\r\n \"\"\"\r\n # Read the parts of the encoded info\r\n input_dict, input_str = encoded\r\n\r\n # Convert the list of integers to a list of words, using the dictionary lookup\r\n output_chars = [input_dict.get(x) for x in input_str]\r\n\r\n # Join all the output words into a single string\r\n return \" \".join(output_chars)\r\n","sub_path":"CORE_DataRepresentation/DictionaryEncoding/dictionary_encode.py","file_name":"dictionary_encode.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"550167602","text":"import numpy as np\nfrom keras.applications.vgg16 import VGG16\nfrom scipy import misc\nimport pickle\nimport os\n\ntest_img_names = os.listdir('data/test')\ntrain_img_names = os.listdir('data/train')\n\ndef load_img(filename):\n img = misc.imread(filename)\n img = misc.imresize(img, size=(224,224,3))\n img = img/255\n return img\n\n#Load test and training images\ntest_imgs = [load_img(os.path.join('data/test', name)) for name in test_img_names]\ntest_imgs = np.stack(test_imgs)\n\ntrain_imgs = [load_img(os.path.join('data/train', name)) for name in train_img_names]\ntrain_imgs = np.stack(train_imgs)\n\nwith open('data/train_labels.pkl', 'rb') as f:\n train_labels = pickle.load(f)\n\n#Load the pretrained InceptionV3 model\nModel = vgg16(include_top=False, input_shape=(224, 224, 3), weights='imagenet')\n\nprint('loaded the model')\n\nfeaturized_train_data = Model.predict(train_imgs, verbose=1)\nfeaturized_test_data = Model.predict(test_imgs, verbose=1)\n\n#Save featurized images\nwith open('featurized_train_imgs.pkl', 'wb') as f:\n pickle.dump(featurized_train_data, f)\nwith open('featurized_test_imgs.pkl', 'wb') as f:\n pickle.dump(featurized_test_data, f)\n\n","sub_path":"featurize.py","file_name":"featurize.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"603174485","text":"import argparse\nimport os\nimport json\nimport fcntl\nimport errno\nimport subprocess\nimport time\nimport signal\n\n\nSIMULATE_TASK_RUNNER_KILLED = False\nSIMULATE_TASK_PROCESS_KILLED = False\nSIMULATE_SLOW_FINISH = False\n\nCONFIG_FILE = 'task_config.json'\n\nTASK_DIR = 'task'\n\nSTARTED_STATUS = 'STARTED'\nFINISHED_STATUS = 'FINISHED'\nBAD_STATUS = 'BAD'\n\n\ndef get_task_dir(task_id):\n return os.path.join(TASK_DIR, task_id)\n\n\ndef get_workers_path(task_id):\n return os.path.join(get_task_dir(task_id), 'workers')\n\n\ndef get_workers_lock(task_id):\n return os.path.join(get_task_dir(task_id), 'workers.lock')\n\n\ndef make_dir_exist(name):\n try:\n os.makedirs(name)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n\ndef flock_nb(fd, operation):\n try:\n fcntl.flock(fd, operation | fcntl.LOCK_NB)\n return True\n except OSError as e:\n if e.errno not in [errno.EACCES, errno.EAGAIN]:\n raise\n else:\n return False\n\n\ndef pid_exists(pid):\n try:\n os.kill(pid, 0)\n except OSError as e:\n if e.errno == errno.ESRCH:\n return False\n if e.errno == errno.EPERM:\n # There's a process (probably), but access was denied\n return True\n raise\n return True\n\n\n\ndef readline(f, discard_newline=True):\n line = f.readline()\n if line.endswith('\\n'):\n return line[:-1]\n else:\n return line\n\n\nclass TaskRunner:\n def __init__(self, task_id, command, dependency_ids):\n self.task_id = task_id\n self.command = command\n self.dependency_ids = dependency_ids\n self.locked = False\n self.workers_lines = None\n self.workers_lock = None\n self.dep_workers_lines = None\n self.dep_workers_locks = {}\n self.bad_dep_workers = None\n\n def run(self, timeout=10, bad_dep_timeout=4, delay=0.05):\n \"\"\"\n timeout is the total timeout to verify statuses of dependencies and conflicts.\n bad_dep_timeout is the timeout to wait for a task to set finished status after its pid does not exist.\n Negative timeout means to ignore the timeout.\n \"\"\"\n start_time = time.time()\n bad_dep_start_times = {}\n run_task = False\n while True:\n self.try_lock_files()\n\n if self.locked:\n self.load_workers()\n\n dep_check = self.check_dependencies()\n if dep_check:\n # Dependencies satisfied, start running task\n run_task = True\n break\n\n if dep_check is not None:\n # Do not run task because of dependencies\n print('Failed dependency check')\n break\n\n # There are bad workers from the dependencies\n if bad_dep_timeout >= 0:\n for dep_id, worker_id in self.bad_dep_workers:\n if (dep_id, worker_id) in bad_dep_start_times:\n if time.time() - bad_dep_start_times[(dep_id, worker_id)] >= bad_dep_timeout:\n # timeout on waiting for the bad worker has been reached\n print('Bad dependency timeout reached, setting BAD_STATUS for', dep_id, worker_id)\n self.update_dep_worker(dep_id, [BAD_STATUS, str(worker_id)])\n del bad_dep_start_times[(dep_id, worker_id)]\n else:\n print('Waiting for status (pid does not exist) of', dep_id, worker_id)\n bad_dep_start_times[(dep_id, worker_id)] = time.time()\n\n self.unlock_files()\n time.sleep(delay)\n if timeout >= 0 and time.time() - start_time >= timeout:\n raise RuntimeError('Timeout exceeded')\n\n if run_task:\n print('Running', self.task_id)\n max_worker_id = -1\n for line in self.workers_lines:\n row = line.split()\n if row[0] == STARTED_STATUS:\n worker_id = int(row[1])\n if worker_id > max_worker_id:\n max_worker_id = worker_id\n\n worker_id = max_worker_id + 1\n proc = subprocess.Popen(self.command, shell=True, start_new_session=True)\n self.update_worker([STARTED_STATUS, str(worker_id), str(proc.pid)])\n else:\n print('Not running')\n\n self.unlock_files()\n\n if run_task:\n if SIMULATE_TASK_PROCESS_KILLED:\n print('killing', proc.pid)\n os.kill(proc.pid, signal.SIGKILL)\n\n if SIMULATE_TASK_RUNNER_KILLED:\n print('killing self', os.getpid())\n os.kill(os.getpid(), signal.SIGKILL)\n \n ret_val = proc.wait()\n\n if SIMULATE_SLOW_FINISH:\n print('Finishing slow')\n time.sleep(3)\n\n\n workers_lock = os.open(get_workers_lock(self.task_id), os.O_CREAT | os.O_RDONLY)\n fcntl.flock(workers_lock, fcntl.LOCK_EX)\n self.update_worker([FINISHED_STATUS, str(worker_id), str(ret_val)])\n fcntl.flock(workers_lock, fcntl.LOCK_UN)\n os.close(workers_lock)\n print('Finished')\n \n\n def try_lock_files(self):\n if self.workers_lock is not None or self.dep_workers_locks:\n raise RuntimeError('Locks already open')\n\n self.workers_lock = os.open(get_workers_lock(self.task_id), os.O_CREAT | os.O_RDONLY)\n if not flock_nb(self.workers_lock, fcntl.LOCK_EX):\n return\n\n dep_ids_set = set(self.dependency_ids)\n dep_ids_set.discard(self.task_id)\n for dep_id in dep_ids_set:\n dep_lock = os.open(get_workers_lock(dep_id), os.O_CREAT | os.O_RDONLY)\n self.dep_workers_locks[dep_id] = dep_lock\n if not flock_nb(dep_lock, fcntl.LOCK_EX):\n return\n\n self.locked = True\n\n def unlock_files(self):\n self.locked = False\n\n if self.workers_lock is not None:\n fcntl.flock(self.workers_lock, fcntl.LOCK_UN)\n os.close(self.workers_lock)\n self.workers_lines = None\n self.workers_lock = None\n\n for dep_lock in self.dep_workers_locks.values():\n fcntl.flock(dep_lock, fcntl.LOCK_UN)\n os.close(dep_lock)\n self.dep_workers_lines = None\n self.dep_workers_locks = {}\n self.bad_dep_workers = None\n\n def load_workers(self):\n if not self.locked:\n raise RuntimeError('Workers not locked')\n\n if os.path.isfile(get_workers_path(self.task_id)):\n with open(get_workers_path(self.task_id)) as f:\n self.workers_lines = f.readlines()\n else:\n self.workers_lines = []\n\n self.dep_workers_lines = {}\n dep_ids_set = set(self.dependency_ids)\n for dep_id in dep_ids_set:\n if os.path.isfile(get_workers_path(dep_id)):\n with open(get_workers_path(dep_id)) as f:\n self.dep_workers_lines[dep_id] = f.readlines()\n else:\n self.dep_workers_lines[dep_id] = []\n \n self.bad_dep_workers = []\n\n def update_worker(self, row):\n line = ' '.join(row) + '\\n'\n with open(get_workers_path(self.task_id), 'a') as f:\n f.write(line)\n\n def update_dep_worker(self, dep_id, row):\n line = ' '.join(row) + '\\n'\n with open(get_workers_path(dep_id), 'a') as f:\n f.write(line)\n\n def check_dependencies(self):\n \"\"\"\n dependencies must have no workers running and the last task\n must have finished successfully\n \"\"\"\n\n dependency_check = True\n\n for dep_id in self.dependency_ids:\n if not self.dep_workers_lines[dep_id]:\n # No workers for the dependency have started yet\n dependency_check = False\n continue\n\n unfinished_workers = {}\n\n for line in self.dep_workers_lines[dep_id]:\n rows = line.split()\n if rows[0] == STARTED_STATUS:\n worker_id = int(rows[1])\n pid = int(rows[2])\n if worker_id in unfinished_workers:\n raise RuntimeError('Invalid worker state')\n unfinished_workers[worker_id] = pid\n elif rows[0] == FINISHED_STATUS:\n worker_id = int(rows[1])\n ret_val = int(rows[2])\n del unfinished_workers[worker_id]\n elif rows[0] == BAD_STATUS:\n worker_id = int(rows[1])\n del unfinished_workers[worker_id]\n else:\n raise RuntimeError('Invalid status')\n\n if unfinished_workers:\n for worker_id, pid in unfinished_workers.items():\n if self.check_bad_pid(dep_id, worker_id, pid):\n if dependency_check:\n dependency_check = None\n else:\n dependency_check = False\n\n else:\n # check that the last worker finished with a 0 status\n last_worker_rows = self.dep_workers_lines[dep_id][-1].split()\n if last_worker_rows[0] != FINISHED_STATUS or last_worker_rows[2] != '0':\n print('Last status not finished 0 for', dep_id)\n dependency_check = False\n else:\n # check there are no bad statuses from when the worker started\n for line in reversed(self.dep_workers_lines[dep_id][:-1]):\n rows = line.split()\n if rows[0] == STARTED_STATUS:\n if rows[1] == last_worker_rows[1]:\n break\n elif rows[0] == FINISHED_STATUS:\n if rows[2] != '0':\n dependency_check = False\n elif rows[0] == BAD_STATUS:\n dependency_check = False\n else:\n raise RuntimeError('Unstarted worker')\n\n return dependency_check\n\n def check_bad_pid(self, dep_id, worker_id, pid):\n # Worker had started, but if the task was killed, then\n # the status would not have updated\n # Check if the pid exists, not a great solution according to\n # http://mywiki.wooledge.org/ProcessManagement#The_risk_of_letting_the_parent_die\n # but the safer alternative is not viable because we are assuming\n # that the parent process runner can be killed\n if pid_exists(pid):\n return False\n\n # Assume that the task had finished, mark it as bad\n self.bad_dep_workers.append((dep_id, worker_id))\n\n return True\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('task_id')\n parser.add_argument('--kill-runner', action='store_true')\n parser.add_argument('--kill-process', action='store_true')\n parser.add_argument('--slow-finish', action='store_true')\n args = parser.parse_args()\n\n global SIMULATE_TASK_RUNNER_KILLED\n SIMULATE_TASK_RUNNER_KILLED = args.kill_runner\n global SIMULATE_TASK_PROCESS_KILLED\n SIMULATE_TASK_PROCESS_KILLED = args.kill_process\n global SIMULATE_SLOW_FINISH\n SIMULATE_SLOW_FINISH = args.slow_finish\n\n task_id = args.task_id\n\n with open(CONFIG_FILE) as f:\n config = json.load(f)\n\n task = None\n\n for config_task in config:\n if config_task['id'] == task_id:\n task = config_task\n break\n\n if task is None:\n raise RuntimeError('Task id not configured')\n\n for config_task in config:\n make_dir_exist(get_task_dir(config_task['id']))\n\n runner = TaskRunner(task['id'], task['command'], task['dependencies'])\n runner.run()\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"task_runner.py","file_name":"task_runner.py","file_ext":"py","file_size_in_byte":12142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"78319436","text":"'''Implementation of counting sort'''\ndef countingsort(unsorted_numbers, max):\n numbers_to_count = [0]*(max+1)\n for number in unsorted_numbers:\n numbers_to_count[number] += 1\n sorted_numbers = []\n for number, count in enumerate(numbers_to_count):\n for number_of_times in range(count):\n sorted_numbers.append(number)\n return sorted_numbers\nprint(countingsort([4,6,2,2,7,3,8,9],9))\nprint(countingsort([4,6,2,7,3,8,9],9))\n","sub_path":"Searching&Sorting/CountingSort.py","file_name":"CountingSort.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"550154132","text":"import random\r\nimport time\r\n\r\ndef quitGame():\r\n print(\"3\")\r\n time.sleep(1)\r\n print(\"2\")\r\n time.sleep(1)\r\n print(\"1\")\r\n time.sleep(1)\r\n print(\"bye bye\")\r\n exit()\r\n\r\ndef playGame():\r\n\r\n winner = False\r\n loser = False\r\n\r\n while winner == loser:\r\n roundOne = 0\r\n roundTwo = 0\r\n roundThree = 0\r\n\r\n options = [\"rock\", \"paper\", \"scissors\"]\r\n numOptions = [1, 2, 3]\r\n\r\n while roundOne == 0:\r\n print(\"Round One!!\")\r\n print(\"In a moment, you will choose either rock, paper, or scissors with **A NUMBER**\")\r\n time.sleep(1)\r\n playerChoice = input(\"Please choose rock[1], paper[2], scissors[3]: \")\r\n try:\r\n playerChoice = int(playerChoice)\r\n if playerChoice in numOptions:\r\n playerChoice = options[(playerChoice - 1)]\r\n except:\r\n print(\"You must have put in a bad input. Let's try this round again!\")\r\n\r\n opponentChoice = options[random.randint(0, 2)]\r\n\r\n if playerChoice == opponentChoice:\r\n print(f\"You both had {playerChoice}!\")\r\n print(\"Let's try this round again!\")\r\n roundOne = 0\r\n else:\r\n print(f\"You chose {playerChoice}, and your opponent chose {opponentChoice}.\")\r\n if (playerChoice == \"rock\") and (opponentChoice == \"scissors\"):\r\n print(\"win\")\r\n roundOne = 1\r\n elif (playerChoice == \"scissors\") and (opponentChoice == \"paper\"):\r\n print(\"win\")\r\n roundOne = 1\r\n elif (playerChoice == \"paper\") and (opponentChoice == \"rock\"):\r\n print(\"win\")\r\n roundOne = 1\r\n else:\r\n print(\"lose\")\r\n roundOne = 2\r\n\r\n\r\n while roundTwo == 0:\r\n print(\"Round Two!!\")\r\n print(\"In a moment, you will choose either rock, paper, or scissors with **A NUMBER**\")\r\n time.sleep(1)\r\n playerChoice = input(\"Please choose rock[1], paper[2], scissors[3]: \")\r\n try:\r\n playerChoice = int(playerChoice)\r\n if playerChoice in numOptions:\r\n playerChoice = options[(playerChoice - 1)]\r\n except:\r\n print(\"You must have put in a bad input. Let's try this round again!\")\r\n\r\n opponentChoice = options[random.randint(0, 2)]\r\n\r\n if playerChoice == opponentChoice:\r\n print(f\"You both had {playerChoice}!\")\r\n print(\"Let's try this round again!\")\r\n roundTwo = 0\r\n else:\r\n print(f\"You chose {playerChoice}, and your opponent chose {opponentChoice}.\")\r\n if (playerChoice == \"rock\") and (opponentChoice == \"scissors\"):\r\n print(\"win\")\r\n roundTwo = 1\r\n elif (playerChoice == \"scissors\") and (opponentChoice == \"paper\"):\r\n print(\"win\")\r\n roundTwo = 1\r\n elif (playerChoice == \"paper\") and (opponentChoice == \"rock\"):\r\n print(\"win\")\r\n roundTwo = 1\r\n else:\r\n print(\"lose\")\r\n roundTwo = 2\r\n\r\n if roundOne == roundTwo:\r\n roundThree = roundOne\r\n\r\n while roundThree == 0:\r\n print(\"Round Three!!\")\r\n print(\"In a moment, you will choose either rock, paper, or scissors with **A NUMBER**\")\r\n time.sleep(1)\r\n playerChoice = input(\"Please choose rock[1], paper[2], scissors[3]: \")\r\n try:\r\n playerChoice = int(playerChoice)\r\n if playerChoice in numOptions:\r\n playerChoice = options[(playerChoice - 1)]\r\n except:\r\n print(\"You must have put in a bad input. Let's try this round again!\")\r\n\r\n opponentChoice = options[random.randint(0, 2)]\r\n\r\n if playerChoice == opponentChoice:\r\n print(f\"You both had {playerChoice}!\")\r\n print(\"Let's try this round again!\")\r\n roundThree = 0\r\n else:\r\n print(f\"You chose {playerChoice}, and your opponent chose {opponentChoice}.\")\r\n if (playerChoice == \"rock\") and (opponentChoice == \"scissors\"):\r\n print(\"win\")\r\n roundThree = 1\r\n elif (playerChoice == \"scissors\") and (opponentChoice == \"paper\"):\r\n print(\"win\")\r\n roundThree = 1\r\n elif (playerChoice == \"paper\") and (opponentChoice == \"rock\"):\r\n print(\"win\")\r\n roundThree = 1\r\n else:\r\n print(\"lose\")\r\n roundThree = 2\r\n\r\n if (roundOne == 1 and roundTwo == 1) or (roundOne == 1 and roundThree == 1) or (roundTwo == 1 and roundThree == 1):\r\n winner = True\r\n print(\"You're the winner!\")\r\n time.sleep(1)\r\n elif (roundOne == 2 and roundTwo == 2) or (roundOne == 2 and roundThree == 2) or (roundTwo == 2 and roundThree == 2):\r\n loser = True\r\n print(\"Sorry, you lost.\")\r\n\r\n\r\n print(\"\")\r\n startGame = False\r\n\r\nprint(\"Let's play rock, paper, scissors!\")\r\ntime.sleep(.5)\r\n\r\nstartGame = False\r\n\r\nrpsrules = \"\"\r\nwhile rpsrules == \"\":\r\n rpsrules = input(\"Do you know how to play (yes/no): \")\r\n if rpsrules.lower() == \"yes\":\r\n wannaPlay = input(\"Would you like to play (yes/no): \")\r\n if wannaPlay.lower() == \"yes\":\r\n startGame = True\r\n elif wannaPlay.lower() == \"no\":\r\n print(\"Okay...\")\r\n quitGame()\r\n else:\r\n print(\"seems you mistyped...\")\r\n print(\"Let the games begin, anyway!\")\r\n startGame = True\r\n elif rpsrules.lower() == \"no\":\r\n print(\"In rock, paper, scissors you and your opponent want to win each round.\")\r\n time.sleep(2.5)\r\n print(\"Each round you will choose one of three choices: rock, paper, or scissors.\")\r\n time.sleep(2.5)\r\n print(\"In rock, paper, scissors: rock > scissors; paper > rock; scissors > paper.\")\r\n time.sleep(2.5)\r\n print(\"Your goal is to win at least 2 of 3 rounds.\")\r\n time.sleep(5)\r\n print(\"Let's begin!\")\r\n startGame = True\r\n time.sleep(.5)\r\n else:\r\n print(\"You mistyped.\")\r\n rpsrules = \"\"\r\n\r\nwhile startGame:\r\n playGame()\r\n startGame = False\r\n\r\nprint(\"Thanks for playing!\")\r\n","sub_path":"rockPaperScizzors.py","file_name":"rockPaperScizzors.py","file_ext":"py","file_size_in_byte":6688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"540317646","text":"import json\nimport os\nimport tempfile\nimport numpy as np\nimport numba\nfrom enum import IntEnum\n\nimport strax\nimport straxen\nfrom straxen.common import pax_file, get_resource, first_sr1_run\nexport, __all__ = strax.exporter()\nfrom .pulse_processing import HE_PREAMBLE\n\n\n@export\n@strax.takes_config(\n strax.Option('n_top_pmts', default=straxen.n_top_pmts, infer_type=False,\n help=\"Number of top PMTs\"),\n strax.Option('check_peak_sum_area_rtol', default=None, track=False, infer_type=False,\n help=\"Check if the sum area and the sum of area per \"\n \"channel are the same. If None, don't do the \"\n \"check. To perform the check, set to the desired \"\n \" rtol value used e.g. '1e-4' (see np.isclose).\"),\n)\nclass PeakBasics(strax.Plugin):\n \"\"\"\n Compute the basic peak-properties, thereby dropping structured\n arrays.\n NB: This plugin can therefore be loaded as a pandas DataFrame.\n \"\"\"\n __version__ = \"0.1.0\"\n parallel = True\n depends_on = ('peaks',)\n provides = 'peak_basics'\n dtype = [\n (('Start time of the peak (ns since unix epoch)',\n 'time'), np.int64),\n (('End time of the peak (ns since unix epoch)',\n 'endtime'), np.int64),\n (('Weighted center time of the peak (ns since unix epoch)',\n 'center_time'), np.int64),\n (('Peak integral in PE',\n 'area'), np.float32),\n (('Number of PMTs contributing to the peak',\n 'n_channels'), np.int16),\n (('PMT number which contributes the most PE',\n 'max_pmt'), np.int16),\n (('Area of signal in the largest-contributing PMT (PE)',\n 'max_pmt_area'), np.float32),\n (('Total number of saturated channels',\n 'n_saturated_channels'), np.int16),\n (('Width (in ns) of the central 50% area of the peak',\n 'range_50p_area'), np.float32),\n (('Width (in ns) of the central 90% area of the peak',\n 'range_90p_area'), np.float32),\n (('Fraction of area seen by the top array '\n '(NaN for peaks with non-positive area)',\n 'area_fraction_top'), np.float32),\n (('Length of the peak waveform in samples',\n 'length'), np.int32),\n (('Time resolution of the peak waveform in ns',\n 'dt'), np.int16),\n (('Time between 10% and 50% area quantiles [ns]',\n 'rise_time'), np.float32),\n (('Hits within tight range of mean',\n 'tight_coincidence'), np.int16),\n (('PMT channel within tight range of mean',\n 'tight_coincidence_channel'), np.int16),\n (('Classification of the peak(let)',\n 'type'), np.int8)\n ]\n\n def compute(self, peaks):\n p = peaks\n r = np.zeros(len(p), self.dtype)\n for q in 'time length dt area type'.split():\n r[q] = p[q]\n r['endtime'] = p['time'] + p['dt'] * p['length']\n r['n_channels'] = (p['area_per_channel'] > 0).sum(axis=1)\n r['range_50p_area'] = p['width'][:, 5]\n r['range_90p_area'] = p['width'][:, 9]\n r['max_pmt'] = np.argmax(p['area_per_channel'], axis=1)\n r['max_pmt_area'] = np.max(p['area_per_channel'], axis=1)\n r['tight_coincidence'] = p['tight_coincidence']\n r['n_saturated_channels'] = p['n_saturated_channels']\n\n n_top = self.config['n_top_pmts']\n area_top = p['area_per_channel'][:, :n_top].sum(axis=1)\n # Recalculate to prevent numerical inaccuracy #442\n area_total = p['area_per_channel'].sum(axis=1)\n # Negative-area peaks get NaN AFT\n m = p['area'] > 0\n r['area_fraction_top'][m] = area_top[m]/area_total[m]\n r['area_fraction_top'][~m] = float('nan')\n r['rise_time'] = -p['area_decile_from_midpoint'][:, 1]\n\n if self.config['check_peak_sum_area_rtol'] is not None:\n self.check_area(area_total, p, self.config['check_peak_sum_area_rtol'])\n # Negative or zero-area peaks have centertime at startime\n r['center_time'] = p['time']\n r['center_time'][m] += self.compute_center_times(peaks[m])\n return r\n\n @staticmethod\n @numba.njit(cache=True, nogil=True)\n def compute_center_times(peaks):\n result = np.zeros(len(peaks), dtype=np.int32)\n for p_i, p in enumerate(peaks):\n t = 0\n for t_i, weight in enumerate(p['data']):\n t += t_i * p['dt'] * weight\n result[p_i] = t / p['area']\n return result\n\n @staticmethod\n def check_area(area_per_channel_sum, peaks, rtol) -> None:\n \"\"\"\n Check if the area of the sum-wf is the same as the total area\n (if the area of the peak is positively defined).\n\n :param area_per_channel_sum: the summation of the\n peaks['area_per_channel'] which will be checked against the\n values of peaks['area'].\n :param peaks: array of peaks.\n :param rtol: relative tolerance for difference between\n area_per_channel_sum and peaks['area']. See np.isclose.\n :raises: ValueError if the peak area and the area-per-channel\n sum are not sufficiently close\n \"\"\"\n positive_area = peaks['area'] > 0\n if not np.sum(positive_area):\n return\n\n is_close = np.isclose(area_per_channel_sum[positive_area],\n peaks[positive_area]['area'],\n rtol=rtol,\n )\n\n if not is_close.all():\n for peak in peaks[positive_area][~is_close]:\n print('bad area')\n strax.print_record(peak)\n\n p_i = np.where(~is_close)[0][0]\n peak = peaks[positive_area][p_i]\n area_fraction_off = 1 - area_per_channel_sum[positive_area][p_i] / peak['area']\n message = (f'Area not calculated correctly, it\\'s '\n f'{100*area_fraction_off} % off, time: {peak[\"time\"]}')\n raise ValueError(message)\n\n\n@export\nclass PeakBasicsHighEnergy(PeakBasics):\n __doc__ = HE_PREAMBLE + PeakBasics.__doc__\n __version__ = '0.0.2'\n depends_on = 'peaks_he'\n provides = 'peak_basics_he'\n child_ends_with = '_he'\n\n def compute(self, peaks_he):\n return super().compute(peaks_he)\n\n\n@export\n@strax.takes_config(\n strax.Option(\n 'nn_architecture', infer_type=False,\n help='Path to JSON of neural net architecture',\n default_by_run=[\n (0, pax_file('XENON1T_tensorflow_nn_pos_20171217_sr0.json')),\n (first_sr1_run, straxen.aux_repo + '3548132b55f81a43654dba5141366041e1daaf01/strax_files/XENON1T_tensorflow_nn_pos_20171217_sr1_reformatted.json')]), # noqa\n strax.Option(\n 'nn_weights', infer_type=False,\n help='Path to HDF5 of neural net weights',\n default_by_run=[\n (0, pax_file('XENON1T_tensorflow_nn_pos_weights_20171217_sr0.h5')),\n (first_sr1_run, pax_file('XENON1T_tensorflow_nn_pos_weights_20171217_sr1.h5'))]), # noqa\n strax.Option('min_reconstruction_area',\n help='Skip reconstruction if area (PE) is less than this',\n default=10, infer_type=False,),\n strax.Option('n_top_pmts', default=straxen.n_top_pmts, infer_type=False,\n help=\"Number of top PMTs\")\n)\nclass PeakPositions1T(strax.Plugin):\n \"\"\"Compute the S2 (x,y)-position based on a neural net.\"\"\"\n dtype = [('x', np.float32,\n 'Reconstructed S2 X position (cm), uncorrected'),\n ('y', np.float32,\n 'Reconstructed S2 Y position (cm), uncorrected')\n ] + strax.time_fields\n depends_on = ('peaks',)\n provides = \"peak_positions\"\n\n # Parallelization doesn't seem to make it go faster\n # Is there much pure-python stuff in tensorflow?\n # Process-level paralellization might work, but you'd have to do setup\n # in each process, which probably negates the benefits,\n # except for huge chunks\n parallel = False\n\n __version__ = '0.1.1'\n\n def setup(self):\n import tensorflow as tf\n keras = tf.keras\n nn_conf = get_resource(self.config['nn_architecture'], fmt='json')\n # badPMTList was inserted by a very clever person into the keras json\n # file. Let's delete it to prevent future keras versions from crashing.\n # Do NOT try `del nn_conf['badPMTList']`! See get_resource docstring\n # for the gruesome details.\n bad_pmts = nn_conf['badPMTList']\n nn = keras.models.model_from_json(json.dumps({\n k: v\n for k, v in nn_conf.items()\n if k != 'badPMTList'}))\n self.pmt_mask = ~np.in1d(np.arange(self.config['n_top_pmts']),\n bad_pmts)\n\n # Keras needs a file to load its weights. We can't put the load\n # inside the context, then it would break on Windows,\n # because there temporary files cannot be opened again.\n with tempfile.NamedTemporaryFile(delete=False) as f:\n f.write(get_resource(self.config['nn_weights'],\n fmt='binary'))\n fname = f.name\n nn.load_weights(fname)\n os.remove(fname)\n self.nn = nn\n\n def compute(self, peaks):\n result = np.ones(len(peaks), dtype=self.dtype)\n result['time'], result['endtime'] = peaks['time'], strax.endtime(peaks)\n result['x'] *= float('nan')\n result['y'] *= float('nan')\n\n # Keep large peaks only\n peak_mask = peaks['area'] > self.config['min_reconstruction_area']\n if not np.sum(peak_mask):\n # Nothing to do, and .predict crashes on empty arrays\n return result\n\n # Input: normalized hitpatterns in good top PMTs\n _in = peaks['area_per_channel'][peak_mask, :]\n _in = _in[:, :self.config['n_top_pmts']][:, self.pmt_mask]\n with np.errstate(divide='ignore', invalid='ignore'):\n _in /= _in.sum(axis=1).reshape(-1, 1)\n\n # Output: positions in mm (unfortunately), so convert to cm\n _out = self.nn.predict(_in) / 10\n\n # Set output in valid rows. Do NOT try result[peak_mask]['x']\n # unless you want all NaN positions (boolean masks make a copy unless\n # they are used as the last index)\n result['x'][peak_mask] = _out[:, 0]\n result['y'][peak_mask] = _out[:, 1]\n return result\n\n\n@export\n@strax.takes_config(\n strax.Option('min_area_fraction', default=0.5, infer_type=False,\n help='The area of competing peaks must be at least '\n 'this fraction of that of the considered peak'),\n strax.Option('nearby_window', default=int(1e7), infer_type=False,\n help='Peaks starting within this time window (on either side)'\n 'in ns count as nearby.'),\n strax.Option('peak_max_proximity_time', default=int(1e8), infer_type=False,\n help='Maximum value for proximity values such as '\n 't_to_next_peak [ns]'))\nclass PeakProximity(strax.OverlapWindowPlugin):\n \"\"\"\n Look for peaks around a peak to determine how many peaks are in\n proximity (in time) of a peak.\n \"\"\"\n depends_on = ('peak_basics',)\n dtype = [\n ('n_competing', np.int32,\n 'Number of nearby larger or slightly smaller peaks'),\n ('n_competing_left', np.int32,\n 'Number of larger or slightly smaller peaks left of the main peak'),\n ('t_to_prev_peak', np.int64,\n 'Time between end of previous peak and start of this peak [ns]'),\n ('t_to_next_peak', np.int64,\n 'Time between end of this peak and start of next peak [ns]'),\n ('t_to_nearest_peak', np.int64,\n 'Smaller of t_to_prev_peak and t_to_next_peak [ns]')\n ] + strax.time_fields\n\n __version__ = '0.4.0'\n\n def get_window_size(self):\n return self.config['peak_max_proximity_time']\n\n def compute(self, peaks):\n windows = strax.touching_windows(peaks, peaks,\n window=self.config['nearby_window'])\n n_left, n_tot = self.find_n_competing(\n peaks,\n windows,\n fraction=self.config['min_area_fraction'])\n\n t_to_prev_peak = (\n np.ones(len(peaks), dtype=np.int64)\n * self.config['peak_max_proximity_time'])\n t_to_prev_peak[1:] = peaks['time'][1:] - peaks['endtime'][:-1]\n\n t_to_next_peak = t_to_prev_peak.copy()\n t_to_next_peak[:-1] = peaks['time'][1:] - peaks['endtime'][:-1]\n\n return dict(\n time=peaks['time'],\n endtime=strax.endtime(peaks),\n n_competing=n_tot,\n n_competing_left=n_left,\n t_to_prev_peak=t_to_prev_peak,\n t_to_next_peak=t_to_next_peak,\n t_to_nearest_peak=np.minimum(t_to_prev_peak, t_to_next_peak))\n\n @staticmethod\n @numba.jit(nopython=True, nogil=True, cache=True)\n def find_n_competing(peaks, windows, fraction):\n n_left = np.zeros(len(peaks), dtype=np.int32)\n n_tot = n_left.copy()\n areas = peaks['area']\n\n for i, peak in enumerate(peaks):\n left_i, right_i = windows[i]\n threshold = areas[i] * fraction\n n_left[i] = np.sum(areas[left_i:i] > threshold)\n n_tot[i] = n_left[i] + np.sum(areas[i + 1:right_i] > threshold)\n\n return n_left, n_tot\n\n@export\n@strax.takes_config(\n strax.Option(name='pre_s2_area_threshold', default=1000,\n help='Only take S2s larger than this into account '\n 'when calculating PeakShadow [PE]'),\n strax.Option(name='deltatime_exponent', default=-1.0,\n help='The exponent of delta t when calculating shadow'),\n strax.Option('time_window_backward', default=int(3e9),\n help='Search for S2s causing shadow in this time window [ns]'),\n strax.Option(name='electron_drift_velocity',\n default=('electron_drift_velocity', 'ONLINE', True),\n help='Vertical electron drift velocity in cm/ns (1e4 m/ms)'),\n strax.Option(name='max_drift_length', default=straxen.tpc_z,\n help='Total length of the TPC from the bottom of gate to the '\n 'top of cathode wires [cm]'),\n strax.Option(name='exclude_drift_time', default=False,\n help='Subtract max drift time to avoid peak interference in '\n 'a single event [ns]'))\nclass PeakShadow(strax.OverlapWindowPlugin):\n \"\"\"\n This plugin can find and calculate the previous S2 shadow at peak level,\n with time window backward and previous S2 area as options.\n It also gives the area and position information of these previous S2s.\n \"\"\"\n\n __version__ = '0.1.0'\n depends_on = ('peak_basics', 'peak_positions')\n provides = 'peak_shadow'\n save_when = strax.SaveWhen.EXPLICIT\n\n def setup(self):\n self.time_window_backward = self.config['time_window_backward']\n if self.config['exclude_drift_time']:\n electron_drift_velocity = straxen.get_correction_from_cmt(\n self.run_id,\n self.config['electron_drift_velocity'])\n drift_time_max = int(self.config['max_drift_length'] / electron_drift_velocity)\n self.n_drift_time = drift_time_max\n else:\n self.n_drift_time = 0\n self.s2_threshold = self.config['pre_s2_area_threshold']\n self.exponent = self.config['deltatime_exponent']\n\n def get_window_size(self):\n return 3 * self.config['time_window_backward']\n\n def infer_dtype(self):\n dtype = [('shadow', np.float32, 'previous s2 shadow [PE/ns]'),\n ('pre_s2_area', np.float32, 'previous s2 area [PE]'),\n ('shadow_dt', np.int64, 'time difference to the previous s2 [ns]'),\n ('pre_s2_x', np.float32, 'x of previous s2 peak causing shadow [cm]'),\n ('pre_s2_y', np.float32, 'y of previous s2 peak causing shadow [cm]')]\n dtype += strax.time_fields\n return dtype\n\n def compute(self, peaks):\n roi_shadow = np.zeros(len(peaks), dtype=strax.time_fields)\n roi_shadow['time'] = peaks['center_time'] - self.time_window_backward\n roi_shadow['endtime'] = peaks['center_time'] - self.n_drift_time\n\n mask_pre_s2 = peaks['area'] > self.s2_threshold\n mask_pre_s2 &= peaks['type'] == 2\n split_peaks = strax.touching_windows(peaks[mask_pre_s2], roi_shadow)\n res = np.zeros(len(peaks), self.dtype)\n res['pre_s2_x'] = np.nan\n res['pre_s2_y'] = np.nan\n if len(peaks):\n self.compute_shadow(peaks, peaks[mask_pre_s2], split_peaks, self.exponent, res)\n\n res['time'] = peaks['time']\n res['endtime'] = strax.endtime(peaks)\n return res\n\n @staticmethod\n @numba.njit\n def compute_shadow(peaks, pre_s2_peaks, touching_windows, exponent, res):\n \"\"\"\n For each peak in peaks, check if there is a shadow-casting S2 peak\n and check if it casts the largest shadow\n \"\"\"\n for p_i, p_a in enumerate(peaks):\n # reset for every peak\n new_shadow = 0\n s2_indices = touching_windows[p_i]\n for s2_idx in range(s2_indices[0], s2_indices[1]):\n s2_a = pre_s2_peaks[s2_idx]\n if p_a['center_time'] - s2_a['center_time'] <= 0:\n continue\n new_shadow = s2_a['area'] * (\n p_a['center_time'] - s2_a['center_time'])**exponent\n if new_shadow > res['shadow'][p_i]:\n res['shadow'][p_i] = new_shadow\n res['pre_s2_area'][p_i] = s2_a['area']\n res['shadow_dt'][p_i] = p_a['center_time'] - s2_a['center_time']\n res['pre_s2_x'][p_i] = s2_a['x']\n res['pre_s2_y'][p_i] = s2_a['y']\n\n\n@export\nclass VetoPeakTags(IntEnum):\n \"\"\"Identifies by which detector peak was tagged.\n \"\"\"\n # Peaks are not inside any veto interval\n NO_VETO = 0\n # Peaks are inside a veto interval issued by:\n NEUTRON_VETO = 1\n MUON_VETO = 2\n BOTH = 3\n\n\n@export\nclass PeakVetoTagging(strax.Plugin):\n \"\"\"\n Plugin which tags S1 peaks according to muon and neutron-vetos.\n Tagging S2s is does not make sense as they occur with a delay.\n However, we compute for both S1/S2 the time delay to the closest veto\n region.\n\n * untagged: 0\n * neutron-veto: 1\n * muon-veto: 2\n * both vetos: 3\n \"\"\"\n __version__ = '0.0.1'\n depends_on = ('peak_basics', 'veto_regions_nv', 'veto_regions_mv')\n provides = ('peak_veto_tags')\n save_when = strax.SaveWhen.TARGET\n\n dtype = strax.time_fields + [\n ('veto_tag', np.int8,\n 'Veto tag for S1 peaks. unatagged: 0, nveto: 1, mveto: 2, both: 3'),\n ('time_to_closest_veto', np.int64, 'Time to closest veto interval boundary in ns (can be '\n 'negative if closest boundary comes before peak.). ')\n ]\n\n def get_time_difference(self, peaks, veto_regions_nv, veto_regions_mv):\n \"\"\"\n Computes time differences to closest nv/mv veto signal.\n\n It might be that neutron-veto and muon-veto signals overlap\n Hence we compute first the individual time differences to the\n corresponding vetos and keep afterwards the smallest ones.\n \"\"\"\n dt_nv = get_time_to_closest_veto(peaks, veto_regions_nv)\n dt_mv = get_time_to_closest_veto(peaks, veto_regions_mv)\n\n dts = np.transpose([dt_nv, dt_mv])\n ind_axis1 = np.argmin(np.abs(dts), axis=1)\n return self._get_smallest_value(dts, ind_axis1)\n\n @staticmethod\n @numba.njit(cache=True, nogil=True)\n def _get_smallest_value(time_differences, index):\n res = np.zeros(len(time_differences), np.int64)\n for res_ind, (ind, dt) in enumerate(zip(index, time_differences)):\n res[res_ind] = dt[ind]\n return res\n\n def compute(self, peaks, veto_regions_nv, veto_regions_mv):\n touching_mv = strax.touching_windows(peaks, veto_regions_mv)\n touching_nv = strax.touching_windows(peaks, veto_regions_nv)\n\n tags = np.zeros(len(peaks))\n tags = tag_peaks(tags, touching_nv, straxen.VetoPeakTags.NEUTRON_VETO)\n tags = tag_peaks(tags, touching_mv, straxen.VetoPeakTags.MUON_VETO)\n\n dt = self.get_time_difference(peaks, veto_regions_nv, veto_regions_mv)\n return {'time': peaks['time'],\n 'endtime': strax.endtime(peaks),\n 'veto_tag': tags,\n 'time_to_closest_veto': dt,\n }\n\n\n@numba.njit(cache=True, nogil=True)\ndef tag_peaks(tags, touching_windows, tag_number):\n \"\"\"Tags every peak which are within the corresponding touching window\n with the defined tag number.\n\n :param tags: numpy.array in which the tags should be stored. Should\n be of length peaks.\n :param touching_windows: Start/End index of tags to be set to tag\n value.\n :param tag_number: integer representing the tag.\n :return: Updated tags.\n \"\"\"\n pre_tags = np.zeros(len(tags), dtype=np.int8)\n for start, end in touching_windows:\n pre_tags[start:end] = tag_number\n tags += pre_tags\n return tags\n\n\ndef get_time_to_closest_veto(peaks, veto_intervals):\n \"\"\"Computes time difference between peak and closest veto interval.\n\n The time difference is always computed from peaks-time field to\n the time or endtime of the veto_interval depending on which distance\n is smaller.\n \"\"\"\n vetos = np.zeros(len(veto_intervals)+2, strax.time_fields)\n vetos[1:-1]['time'] = veto_intervals['time']\n vetos[1:-1]['endtime'] = strax.endtime(veto_intervals)\n vetos[-1]['time'] = straxen.INFINITY_64BIT_SIGNED\n vetos[-1]['endtime'] = straxen.INFINITY_64BIT_SIGNED\n vetos[0]['time'] = -straxen.INFINITY_64BIT_SIGNED\n vetos[0]['endtime'] = -straxen.INFINITY_64BIT_SIGNED\n return _get_time_to_closest_veto(peaks, vetos)\n\n\n@numba.njit(cache=True, nogil=True)\ndef _get_time_to_closest_veto(peaks, vetos):\n res = np.zeros(len(peaks), dtype=np.int64)\n veto_index = 0\n for ind, p in enumerate(peaks):\n for veto_index in range(veto_index, len(vetos)):\n if veto_index+1 == len(vetos):\n # If we reach here all future peaks are closest to last veto:\n res[ind] = np.abs(vetos[-1]['time'] - p['time'])\n break\n\n # Current interval can be before or after current peak, hence\n # we have to check which distance is smaller.\n dt_current_veto = min(np.abs(vetos[veto_index]['time'] - p['time']),\n np.abs(vetos[veto_index]['endtime'] - p['time'])\n )\n # Next interval is always further in the future as the\n # current one, hence we only have to compute distance with \"time\".\n dt_next_veto = np.abs(vetos[veto_index+1]['time'] - p['time'])\n\n # Next veto is closer so we have to repeat in case next + 1\n # is even closer.\n if dt_current_veto >= dt_next_veto:\n veto_index += 1\n continue\n\n # Now compute time difference for real:\n dt_time = vetos[veto_index]['time'] - p['time']\n dt_endtime = vetos[veto_index]['endtime'] - p['time']\n\n if np.abs(dt_time) < np.abs(dt_endtime):\n res[ind] = dt_time\n else:\n res[ind] = dt_endtime\n break\n\n return res\n","sub_path":"straxen/plugins/peak_processing.py","file_name":"peak_processing.py","file_ext":"py","file_size_in_byte":23690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"290822987","text":"import itertools\nimport logging\nimport math\nimport numpy as np\nimport pandas as pd \n\nimport matplotlib.pyplot as plt\nimport matplotlib.style as style\nimport scipy.stats as stats\nimport seaborn as sns\nimport warnings\n\nfrom pandas.plotting import scatter_matrix\nfrom sklearn.decomposition import PCA\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.impute import SimpleImputer\n\nimport pdb\nimport warnings\n\nwarnings.filterwarnings('ignore')\nlogging.basicConfig(level=logging.WARNING)\nlog = logging.getLogger(__name__)\nstyle.use('bmh') ## style for charts\n\n\nclass autoEDA:\n\n def __init__(self, df, eda_type, target=None, max_categories=None):\n DEFAULT_MAX_CATEGORIES = 20\n max_categories = max_categories if max_categories is not None else DEFAULT_MAX_CATEGORIES\n\n self._validate_input_df(df)\n self._validate_input_target(df, target)\n self._validate_input_params(max_categories)\n\n numeric_cols, categorical_cols, combined_cols, all_cols = self._col_types(df, target)\n df = df[all_cols] #remove any non-numeric, non-categorical fields (ie dates)\n df = self._format_df_target(df, target)\n df = self._df_bin_max_categories(df, categorical_cols, max_categories)\n \n self.df = df\n self.target = target\n self.eda_type = eda_type\n self.numeric_cols = numeric_cols\n self.categorical_cols = categorical_cols\n self.combined_cols = combined_cols\n self.all_cols = all_cols\n self.max_categories = max_categories \n self._ranked_numeric_cols = self._rank_numeric_cols(df, target, numeric_cols)\n self._ranked_categorical_cols = self._rank_categorical_cols(df, target, categorical_cols)\n self._bar_lineplot_reference = None\n\n def _validate_input_df(self, df):\n \"\"\" Validate the input of class instantiation \"\"\"\n if not isinstance(df, pd.DataFrame): \n raise ValueError('Invalid input, please input a pandas DataFrame')\n if df.shape[1] < 2: raise ValueError('Dataframe must have at least 2 columns')\n if df.shape[0] < 10: raise ValueError('Dataframe must have at least 10 rows') \n\n def _validate_input_target(self, df, target):\n raise NotImplementedError\n\n # for binary classification:\n #if not isinstance(target, str): raise ValueError('Invalid target: {t}'.format(t=target))\n #if target not in df.columns: raise ValueError('Target not in dataframe: {t}'.format(t=target))\n\n def _validate_input_params(self, max_categories):\n if max_categories and not isinstance(max_categories, int): \n raise ValueError('Invalid max_categories parameter, must be int')\n if max_categories and max_categories < 2: raise ValueError('Max categories must be greater than 1')\n\n def _col_types(self, df, target):\n df_na_filled = df.fillna(value=0)\n numeric_cols = set(df_na_filled.select_dtypes(include=np.number).columns)\n categorical_cols_overlap = set(df.select_dtypes(include=['object','bool','category']).columns)\n categorical_cols = categorical_cols_overlap - numeric_cols\n\n numeric_cols.discard(target)\n categorical_cols.discard(target)\n\n combined_cols = numeric_cols.union(categorical_cols)\n all_cols = combined_cols.union([target]) if target is not None else combined_cols\n unusable_cols = list(set(df.columns) - set(all_cols))\n if len(unusable_cols) > 0:\n log.warning('Unable to use the following colunms: {uc}'.format(uc=unusable_cols))\n\n log.info('Using the following numeric columns: {n}'.format(n=numeric_cols))\n log.info('Using the following categorical columns: {c}'.format(c=categorical_cols))\n\n return numeric_cols, categorical_cols, combined_cols, all_cols\n\n def _format_df_target(self, df, target):\n raise NotImplementedError\n\n # for binary classification:\n #df[target] = pd.get_dummies(df[target], drop_first=True) # converts the target to 1 or 0\n\n def _df_bin_max_categories(self, df, categorical_cols, max_categories):\n \"\"\" Cap the max number of categories in categorical fields for readability \"\"\"\n for col in categorical_cols:\n df[col].fillna(\"Unknown\", inplace = True)\n top_categories = df[col].value_counts().nlargest(max_categories-1).index\n # set values with ranked counts below max_categories to \"Other(Overflow)\"\n df.loc[~df[col].isin(top_categories), col] = \"Other(Overflow)\"\n \n return df\n\n def _rank_numeric_cols(self, df, target, numeric_cols):\n raise NotImplementedError\n\n def _rank_categorical_cols(self, df, target, categorical_cols):\n raise NotImplementedError\n\n def _is_listlike(self, parameter):\n return isinstance(parameter, (list, tuple, set, pd.Series, pd.Index))\n\n def _validate_min_numeric_cols(self, cols, min_cols):\n \"\"\" Validate that at least n colunms are numeric in cols list (n=min_cols)\"\"\"\n if cols is False: \n numeric_count = len(self.numeric_cols)\n else:\n numeric_count = len(set(cols).intersection(self.numeric_cols))\n if numeric_count < min_cols:\n raise ValueError(\"Need at least {n} numeric columns\".format(n=min_cols))\n\n def _validate_min_categorical_cols(self, cols, min_cols):\n \"\"\" Validate that at least n colunms are categorical in cols list (n=min_cols)\"\"\"\n if cols is False: \n categorical_count = len(self.categorical_cols)\n else:\n categorical_count = len(set(cols).intersection(self.categorical_cols))\n if categorical_count < min_cols:\n raise ValueError(\"Need at least {n} categorical columns\".format(n=min_cols)) \n\n\n def _balance_df(self, df, target):\n if self.eda_type == 'classification':\n count_class_0, count_class_1 = df[target].value_counts()\n class_0, class_1 = df[target].value_counts().index\n max_sample = min(count_class_0, count_class_1)\n\n df_class_0 = df[df[target] == class_0]\n df_class_1 = df[df[target] == class_1]\n df_class_0_under = df_class_0.sample(max_sample)\n df_class_1_under = df_class_1.sample(max_sample)\n\n df = pd.concat([df_class_0_under, df_class_1_under], axis=0)\n\n return df\n\n def _get_best_numeric_cols(self, cols, max_plots):\n \"\"\" Find top n ranked numeric columns in cols list (n=max_plots)\"\"\"\n self._validate_min_numeric_cols(cols, min_cols=1)\n ranked_plot_cols = [col for col in self._ranked_numeric_cols if col in cols]\n max_plots = max_plots if max_plots < len(ranked_plot_cols) else len(ranked_plot_cols)\n return ranked_plot_cols[0:max_plots]\n\n def _get_best_categorical_cols(self, cols, max_plots):\n \"\"\" Find top n ranked categorical columns in cols list (n=max_plots)\"\"\"\n self._validate_min_categorical_cols(cols, min_cols=1)\n ranked_plot_cols = [col for col in self._ranked_categorical_cols if col in cols]\n max_plots = max_plots if max_plots < len(ranked_plot_cols) else len(ranked_plot_cols)\n\n return ranked_plot_cols[0:max_plots]\n\n def _get_best_col_pairs(self, ranked_cols, max_plots):\n \"\"\" Find top n pairs of columns in ranked_cols list (n=max_plots)\"\"\"\n # n is how many columns are needed to satisfy the pairs criteria\n n=2; m=1;\n while m < max_plots:\n m += n\n n += 1 \n\n # if the number of columns is less than n, use them all\n if len(ranked_cols) <= n: n = len(ranked_cols)\n plot_cols = ranked_cols[0:n]\n weakest_col = plot_cols[n-1]\n\n # get all possible pairs \n col_pairs = list(itertools.combinations(plot_cols, 2))\n # remove the excess using the weakest column (the nth column)\n while len(col_pairs) > max_plots:\n i = 0\n for col_pair in col_pairs:\n if col_pair[0] == weakest_col or col_pair[1] == weakest_col:\n break\n i += 1\n col_pairs.pop(i)\n\n return col_pairs \n\n def _get_best_numeric_pairs(self, cols, max_plots):\n \"\"\" Find top n pairs of ranked numeric columns in cols list (n=max_plots)\"\"\"\n self._validate_min_numeric_cols(cols, min_cols=2)\n ranked_cols = [col for col in self._ranked_numeric_cols if col in cols]\n return self._get_best_col_pairs(ranked_cols, max_plots) \n\n def _get_best_categorical_pairs(self, cols, max_plots):\n \"\"\" Find top n pairs of ranked categorical columns in cols list (n=max_plots)\"\"\"\n self._validate_min_categorical_cols(cols, min_cols=2)\n ranked_cols = [col for col in self._ranked_categorical_cols if col in cols]\n return self._get_best_col_pairs(ranked_cols, max_plots) \n\n def _get_best_numeric_categorical_pairs(self, cols, max_plots):\n \"\"\" Find top n ranked pairs of (numeric, categorical) columns in cols list (n=max_plots)\"\"\"\n self._validate_min_categorical_cols(cols, min_cols=1)\n self._validate_min_numeric_cols(cols, min_cols=1)\n ranked_categorical_cols = [col for col in self._ranked_categorical_cols if col in cols]\n ranked_numeric_cols = [col for col in self._ranked_numeric_cols if col in cols]\n\n ## Find best numeric-categorical pairs based on correlation and logistic regression score\n # try to get an even split, preferring categorical\n num_categoricals = math.ceil(math.sqrt(max_plots))\n if num_categoricals > len(ranked_categorical_cols): num_categoricals = len(ranked_categorical_cols) \n num_numeric = math.ceil(max_plots/num_categoricals)\n if num_numeric > len(ranked_numeric_cols): num_numeric = len(ranked_numeric_cols) \n\n categorical_pair_cols = ranked_categorical_cols[0:num_categoricals]\n numeric_pair_cols = ranked_numeric_cols[0:num_numeric]\n\n weakest_numeric_col = numeric_pair_cols[num_numeric-1]\n\n cat_num_pairs = [pair for pair in itertools.product(numeric_pair_cols, categorical_pair_cols)]\n # if over max_plots limit, pop off pairs with the worst numerical col one at a time\n while len(cat_num_pairs) > max_plots:\n i = 0\n for col_pair in cat_num_pairs:\n if col_pair[0] == weakest_numeric_col or col_pair[1] == weakest_numeric_col:\n break\n i += 1\n cat_num_pairs.pop(i)\n\n return cat_num_pairs\n\n def _log_transform_df(self, df, log_transform):\n \"\"\" Take log base 10 of the specified columns in the log_transform parameter \"\"\"\n logged_cols = []\n\n # log_transform can be: True, a string, or an iterable of cols to transform\n if log_transform is True:\n transform_cols = self.numeric_cols.intersection(set(df.columns))\n elif isinstance(log_transform, str):\n transform_cols = [log_transform]\n elif self._is_listlike(log_transform):\n transform_cols = log_transform\n else: raise ValueError('Invalid argument to log_tranform parameter: {l}'.format(l=log_transform))\n\n for col in transform_cols:\n if col not in self.numeric_cols: \n log.warning(\"Unable to log transform non-numeric column: {c}\".format(c=col))\n\n for col in df:\n # only positive values can be logged\n if col in transform_cols and col in self.numeric_cols and min(df[col]) >= 0:\n df[col] = np.log10(df[col] + 1)\n logged_cols.append(col)\n elif col in transform_cols and col in self.numeric_cols and min(df[col]) < 0:\n log.warning(\"Unable to log transform column with negative values: {c}\".format(c=col))\n\n return df, logged_cols\n\n def _create_transformed_plot_df(self, plot_cols, log_transform):\n \"\"\" Create local copy of df for plot and log transform \"\"\"\n plot_df = self.df.copy()\n # wrap in DataFrame() to ensure single index doesn't become Series\n if self.target:\n plot_df = pd.DataFrame(plot_df[ list(plot_cols) + [self.target] ])\n else: plot_df = pd.DataFrame(plot_df[plot_cols])\n\n if log_transform: \n plot_df, logged_cols = self._log_transform_df(plot_df, log_transform)\n else: logged_cols = []\n\n return plot_df, logged_cols\n\n def _filter_cols_to_plot(self, possible_cols, specified_cols, exclude, filter_function, max_plots): \n \"\"\" Apply parameters specified by the user to find list of column/bivariates to plot \"\"\"\n if not isinstance(max_plots, int): raise ValueError('Max_plots must be an integer')\n\n if specified_cols: \n if isinstance(specified_cols, str):\n specified_cols = [specified_cols]\n if not self._is_listlike(specified_cols): \n raise ValueError('Invalid cols argument: {c}'.format(c=specified_cols))\n invalid_col = list(set(specified_cols) - self.all_cols)\n if len(invalid_col) > 0:\n log.error('Invalid colums passed to cols parameter: {i}'.format(i=invalid_col))\n possible_cols = specified_cols\n\n if exclude:\n if isinstance(exclude, str):\n exclude = [exclude]\n if not self._is_listlike(exclude): \n raise ValueError('Invalid cols argument: {c}'.format(c=exclude))\n invalid_exclude = list(set(exclude) - self.combined_cols)\n if len(invalid_exclude) > 0:\n log.error('Invalid colums passed to exclude parameter: {i}'.format(i=invalid_exclude))\n if self.target and self.target in exclude: log.warning(\"Can't exclude target column\")\n possible_cols = [col for col in possible_cols if col not in exclude]\n\n cols_to_plot = filter_function(possible_cols, max_plots)\n return cols_to_plot\n\n def _param_plot_categorical(self, cols=False, exclude=None, max_plots=150, chart_params=None):\n \"\"\" Plot the catgegorical columns against target (if provided) \"\"\"\n plot_cols = self._filter_cols_to_plot(\n possible_cols = self.categorical_cols, \n specified_cols = cols, \n exclude = exclude, \n filter_function = self._get_best_categorical_cols, \n max_plots = max_plots\n )\n self._validate_min_categorical_cols(plot_cols, min_cols=1)\n \n for col in plot_cols:\n self._plot_categorical_col(col=col, chart_params=chart_params)\n\n def _param_plot_numeric(\n self, \n cols=False, \n exclude=None, \n max_plots=150, \n log_transform=False, \n chart_params=None\n ):\n \"\"\" Plot the numeric columns against target (if provided) \"\"\"\n plot_cols = self._filter_cols_to_plot(\n possible_cols = self.numeric_cols, \n specified_cols = cols, \n exclude = exclude, \n filter_function = self._get_best_numeric_cols, \n max_plots = max_plots\n )\n self._validate_min_numeric_cols(plot_cols, min_cols=1)\n plot_df, logged_cols = self._create_transformed_plot_df(plot_cols, log_transform)\n\n for col in plot_cols:\n self._plot_numeric_col(\n plot_df = plot_df, \n col = col, \n logged_cols = logged_cols, \n chart_params = chart_params,\n )\n\n def _param_plot_numeric_pairs(\n self, \n cols=False, \n exclude=None, \n log_transform=False, \n max_plots=40, \n chart_params=None\n ):\n \"\"\" Plot pairs of numeric columns colored by target (if provided)\"\"\"\n numeric_pairs = self._filter_cols_to_plot(\n possible_cols = self.numeric_cols, \n specified_cols = cols, \n exclude = exclude, \n filter_function = self._get_best_numeric_pairs, \n max_plots = max_plots\n )\n plot_cols = set([col for pair in numeric_pairs for col in pair])\n self._validate_min_numeric_cols(plot_cols, min_cols=2)\n plot_df, logged_cols = self._create_transformed_plot_df(plot_cols, log_transform)\n if chart_params['balance'] is True: \n plot_df = self._balance_df(plot_df, self.target)\n \n for pair in numeric_pairs:\n self._plot_numeric_pair(\n plot_df = plot_df, \n pair = pair, \n logged_cols = logged_cols, \n chart_params = chart_params,\n )\n\n def _param_plot_categorical_pairs(self, cols=False, exclude=None, max_plots=50, chart_params=None):\n \"\"\" Plot pairs categorical columns against the target \"\"\"\n categorical_pairs = self._filter_cols_to_plot(\n possible_cols = self.categorical_cols, \n specified_cols = cols, \n exclude = exclude, \n filter_function = self._get_best_categorical_pairs, \n max_plots = max_plots\n )\n plot_cols = set([col for pair in categorical_pairs for col in pair])\n self._validate_min_categorical_cols(plot_cols, min_cols=2)\n \n for pair in categorical_pairs:\n self._plot_categorical_pair(pair=pair, chart_params=chart_params)\n\n def _param_plot_numeric_categorical_pairs(\n self, \n cols=False, \n exclude=None, \n log_transform=False,\n max_plots=40,\n chart_params=None\n ):\n \"\"\" Plot pairs of numeric vs categorical columns broken down by the target \"\"\"\n self._validate_min_categorical_cols(cols, min_cols=1)\n self._validate_min_numeric_cols(cols, min_cols=1)\n \n num_cat_pairs = self._filter_cols_to_plot(\n possible_cols = self.combined_cols, \n specified_cols = cols, \n exclude = exclude, \n filter_function = self._get_best_numeric_categorical_pairs, \n max_plots = max_plots\n )\n plot_cols = set([col for pair in num_cat_pairs for col in pair])\n plot_df, logged_cols = self._create_transformed_plot_df(plot_cols, log_transform)\n\n for pair in num_cat_pairs:\n self._plot_numeric_categorical_pair(\n plot_df = plot_df, \n pair = pair, \n logged_cols = logged_cols, \n chart_params = chart_params,\n )\n\n def plot_pca(self, output_components=None):\n \"\"\" Perform PCA and plot variability described the PCs \"\"\"\n if len(self.numeric_cols) < 2: raise ValueError('Need at least 2 numeric cols for PCA')\n pca_df = self.df[self.numeric_cols].copy()\n \n imp=SimpleImputer(missing_values=np.NaN)\n imp_df=pd.DataFrame(imp.fit_transform(pca_df))\n \n pca = PCA(n_components=imp_df.shape[1])\n pca.fit(imp_df)\n\n ## Output error explained by sqrt(n)th term\n if not output_components: output_components = math.floor(math.sqrt(imp_df.shape[1]))\n\n ## Inspect the explained variances to determine how many components to use \n plt.subplots(figsize=(8, 8))\n # use n_components series to make x axis start at 1\n n_components = pd.Series(range(1,len(np.cumsum(pca.explained_variance_ratio_))+1))\n plt.plot(n_components, np.cumsum(pca.explained_variance_ratio_))\n plt.xlabel('Number of Components')\n plt.ylabel('Cumulative Explained Variance')\n\n ## Output the explained variances at output_components # of components\n output_str = 'Cumulative Explained variance at {n} PCA components:'.format(n=output_components)\n print(output_str,sum(pca.explained_variance_ratio_[0:output_components]) )\n \n def plot_corr_heatmap(self, annot=False, figsize=(10,10)):\n \"\"\" Plot grid of numeric columns with a heat map of their correlations \"\"\"\n if len(self.numeric_cols) < 2: raise ValueError('Need at least 2 numeric cols for corr heatmap')\n df_numeric = self.df[self.numeric_cols]\n\n fig, ax = plt.subplots(figsize=figsize)\n sns.heatmap(df_numeric.corr(), annot=annot, ax=ax)\n\n def _plot_categorical_col(self, col, chart_params):\n raise NotImplementedError\n\n def _plot_numeric_col(self, plot_df, col, chart_params, logged_cols):\n raise NotImplementedError\n\n def _plot_numeric_pair(self, plot_df, pair, chart_params, logged_cols):\n raise NotImplementedError\n\n def _plot_categorical_pair(self, pair, chart_params):\n raise NotImplementedError\n\n def _plot_numeric_categorical_pair(self, plot_df, pair, chart_params, logged_cols):\n raise NotImplementedError\n\n\n\n\n\nclass ClassificationEDA(autoEDA):\n def __init__(self, df, target=None, max_categories=None):\n super().__init__(df=df, target=target, max_categories=max_categories, eda_type='classification')\n\n def _validate_input_target(self, df, target):\n if not isinstance(target, str): raise ValueError('Invalid target: {t}'.format(t=target))\n if df[target].nunique() != 2: raise ValueError('Target must have 2 unique values')\n\n def _format_df_target(self, df, target):\n df[target] = pd.get_dummies(df[target], drop_first=True) # converts the target to 1 or 0\n return df\n\n def _rank_numeric_cols(self, df, target, numeric_cols):\n correlations = [(col, abs(self.df[self.target].corr(self.df[col]))) for col in self.numeric_cols]\n correlations.sort(key=lambda tup: tup[1], reverse=True)\n ranked_numeric_cols = [col_corr[0] for col_corr in correlations]\n \n return ranked_numeric_cols\n\n def _rank_categorical_cols(self, df, target, categorical_cols):\n \"\"\" Run small batch of logistic regression against target with each categorical col to rank \"\"\"\n sample_df = self.df.copy()\n if self.df.shape[0] > 1000: sample_df = self.df.sample(n=1000)\n\n col_scores = []\n for col in self.categorical_cols:\n y = sample_df[self.target]\n try:\n X_onehot = pd.get_dummies(sample_df[col], drop_first=True)\n except:\n pdb.set_trace()\n\n lr = LogisticRegression(n_jobs=-1, max_iter=999)\n try:\n lr.fit(X_onehot,y)\n except:\n pdb.set_trace()\n y_pred = lr.predict(X_onehot)\n acc = accuracy_score(y_pred, y)\n\n col_scores.append((col, acc))\n\n # rank based on training set accuracy\n col_scores.sort(key=lambda tup: tup[1], reverse=True)\n ranked_categorical_cols = [col_corr[0] for col_corr in col_scores]\n \n return ranked_categorical_cols\n\n\n def _plot_categorical_col(self, col, chart_params):\n \"\"\" Charts the counts of caterogical cols with % of binary response overlaid \"\"\"\n verbose = True if 'verbose' in chart_params and chart_params['verbose'] else False\n\n field_count = self.df[col].value_counts()\n field_count_df = field_count.to_frame()\n field_count_df.columns = ['count']\n\n # Get the % target by category for the line overlay\n field_target_pct = pd.crosstab(self.df[col], self.df[self.target], normalize='index') * 100\n field_target_pct = field_target_pct.reset_index()\n # Try to choose the axis with smaller values to avoid skewed axis\n if not self._bar_lineplot_reference:\n self._bar_lineplot_reference = 1 if field_target_pct[0].median() < field_target_pct[1].median() else 2\n drop_index = self._bar_lineplot_reference\n field_target_pct = field_target_pct.drop(field_target_pct.columns[-drop_index],axis=1)\n\n merged_filed_target_pct = field_target_pct.merge(field_count_df, right_index=True, left_on=col)\n field_target_data = merged_filed_target_pct.sort_values('count', ascending=False).reset_index(drop=True)\n if verbose : print(field_target_data)\n\n fig, ax = plt.subplots(figsize=(10, 6))\n ax.set_xlabel(col)\n ax = sns.barplot(\n field_target_data[col], \n field_target_data['count'], \n alpha=0.8,\n order = field_target_data.sort_values('count', ascending=False)[col]\n )\n ax.set_xticklabels(ax.get_xticklabels(), rotation=45, horizontalalignment='right')\n ax.set_ylabel('count (bars)')\n ax2 = ax.twinx() # dual axis graph\n # line graph of % target in category\n ax2 = sns.pointplot(\n x=field_target_data[col], \n y=field_target_data.iloc[:,-2], \n color='black', \n legend=False\n )\n ax2.set_ylabel('% {t} (line)'.format(t = self.target))\n plt.show()\n\n def _plot_numeric_col(self, plot_df, col, chart_params, logged_cols):\n bins = chart_params['bins']\n\n # prefix 'log_' to the colunm name if it was log transformed\n target_value0 = plot_df[self.target].value_counts().index[0]\n target_value1 = plot_df[self.target].value_counts().index[1]\n\n col_name = 'log_{c}'.format(c=col) if col in logged_cols else col\n fig, ax = plt.subplots(figsize=(10, 6))\n\n sns.distplot(\n plot_df.loc[plot_df[self.target] == target_value0][col],\n label=target_value0, \n bins = bins, \n )\n sns.distplot(\n plot_df.loc[plot_df[self.target] != target_value0][col],\n label=target_value1, \n bins = bins, \n )\n ax.legend(loc='upper right')\n ax.set_title('{c} histogram'.format(c=col_name))\n\n def _plot_numeric_pair(self, plot_df, pair, chart_params, logged_cols):\n alpha = chart_params['alpha']\n\n fig, ax = plt.subplots(figsize=(10, 6))\n prefix0 = 'log_' if pair[0] in logged_cols else ''\n prefix1 = 'log_' if pair[1] in logged_cols else ''\n title = '{p0}{f0} vs {p1}{f1}'.format(p0=prefix0, f0=pair[0], p1=prefix1, f1=pair[1])\n \n sns.scatterplot(\n data=plot_df, \n x=pair[0], \n y=pair[1], \n hue=plot_df[self.target].tolist(), \n alpha=alpha, \n )\n ax.set_title(title)\n\n def _plot_categorical_pair(self, pair, chart_params):\n annot = chart_params['annot']\n\n fig, ax = plt.subplots(figsize=(10, 6))\n sns.heatmap(\n pd.pivot_table(self.df,index=[pair[0]], values=self.target, columns=[pair[1]]),\n annot=annot,\n ) \n\n def _plot_numeric_categorical_pair(self, plot_df, pair, chart_params, logged_cols):\n boxplot_only = chart_params['boxplot_only']\n\n fig, ax = plt.subplots(figsize=(10, 6))\n # prefix 'log_' to the colunm name if it was log transformed\n numeric_col_name = 'log_{c}'.format(c=pair[0]) if pair[0] in logged_cols else pair[0]\n title = '{c} vs {n}'.format(c=pair[1], n=numeric_col_name)\n\n category_count = len(self.df[pair[1]].value_counts())\n if category_count <= 15 and not boxplot_only:\n sns.violinplot(\n x=pair[1], \n y=pair[0], \n hue=self.target, \n data=plot_df, \n split=True, \n inner='quart',\n )\n ax.set_xticklabels(ax.get_xticklabels(), rotation=45, horizontalalignment='right')\n else:\n sns.boxplot(x=pair[1], y=pair[0], hue=self.target, data=plot_df)\n\n ax.set_xticklabels(ax.get_xticklabels(), rotation=45, horizontalalignment='right')\n ax.set_title(title)\n\n def plot_categorical(\n self, \n cols=False, \n exclude=None, \n max_plots=150, \n verbose=False\n ):\n if verbose is not True and verbose is not False:\n log.error('Invalid verbose parameter, choose True or False')\n verbose = False\n\n chart_params = {}\n chart_params['verbose'] = verbose\n self._param_plot_categorical(\n cols=cols, \n exclude=exclude, \n max_plots=max_plots, \n chart_params=chart_params\n )\n\n def plot_numeric(\n self, \n cols=False, \n exclude=None, \n max_plots=150,\n log_transform=False,\n bins=None\n ):\n if bins is not None and not isinstance(bins, int):\n log.error('Invalid bins parameter, must be int')\n bins = None\n\n chart_params = {}\n chart_params['bins'] = bins\n self._param_plot_numeric(\n cols=cols, \n exclude=exclude, \n max_plots=max_plots,\n log_transform=log_transform,\n chart_params=chart_params\n )\n\n def plot_scatterplots(\n self, \n cols=False, \n exclude=None, \n max_plots=150,\n log_transform=False,\n alpha=0.6,\n balance=False,\n ):\n if alpha is not None and not isinstance(alpha, float):\n log.error('Invalid bins parameter, must be float')\n alpha = 0.6\n if balance is not False and balance is not True:\n log.error('Invalid balance parameter, must be True/False')\n balance=False\n\n chart_params = {}\n chart_params['alpha'] = alpha\n chart_params['balance'] = balance\n self._param_plot_numeric_pairs(\n cols=cols, \n exclude=exclude, \n max_plots=max_plots,\n log_transform=log_transform,\n chart_params=chart_params\n )\n\n def plot_categorical_pairs(\n self, \n cols=False, \n exclude=None, \n max_plots=150,\n annot=False\n ):\n if annot is not False and annot is not True:\n log.error('Invalid annot parameter, must be True/False')\n annot = False\n\n chart_params = {}\n chart_params['annot'] = annot\n self._param_plot_categorical_pairs(\n cols=cols, \n exclude=exclude, \n max_plots=max_plots,\n chart_params=chart_params\n )\n def plot_numeric_categorical_pairs(\n self, \n cols=False, \n exclude=None, \n max_plots=150,\n log_transform=False,\n boxplot_only=False\n ):\n if boxplot_only is not False and boxplot_only is not True:\n log.error('Invalid boxplot_only parameter, must be True/False')\n boxplot_only = False\n\n chart_params = {}\n chart_params['boxplot_only'] = boxplot_only\n self._param_plot_numeric_categorical_pairs(\n cols=cols, \n exclude=exclude, \n max_plots=max_plots,\n log_transform=log_transform,\n chart_params=chart_params\n )\n ","sub_path":"autoEDA.py","file_name":"autoEDA.py","file_ext":"py","file_size_in_byte":30897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"633369283","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\n'''\r\nurl_from_out을 매개변수로 받은 뒤에 soup객체와 함께 페이지의 소스코드를 텍스트화시킨 html_info를 제공받는다. \r\n그후 soup객체를 가공해서 리스트로 만든 뒤 soup_str_translate을 반환한다. \r\nurl_list라는 리스트를 생성하는데 페이지에 존재하는 모든 링크를 리스트에 할당한다. \r\n동시에 http를 포함하는 문자열 중에서 exe가 포함되지 않은 문자열을 url_list라는 리스트로 반환한다. \r\n\r\nfor link in soup.find_all('a'):\r\n print(link.get('href')) \r\n\r\n--> 을 사용하면 쉽게 하이퍼링크만 뽑아낼 수 있다. 근데 이 기능만 사용하면 exe파일과 같이 다른 페이지로 \r\n넘어가는 link가 아닌 다른 link들도 함께 sorting된다. 그러므로 따로 조건을 더 달아줘야함. \r\n'''\r\n\r\ndef to_text(url_from_out):\r\n url = url_from_out \r\n html_info = requests.get(url).text \r\n soup = BeautifulSoup(html_info,'html.parser') \r\n\r\n soup_text = soup.get_text() #soup객체를 텍스트로 만들어서 soup_text에 할당하기. \r\n soup_str_translate = soup_text.split() #soup_text객체를 공백 구분해서 soup_str_translate에 할당. \r\n\r\n url_list = [] #모든 링크는 url_list의 리스트를 통해 접근 가능하다. \r\n for link in soup.find_all('a'): #soup객체 내에 존재하는 모든 링크를 list로 만들어준다. \r\n url_list.append(link.get('href'))\r\n \r\n\r\n new_url_list = [] #새로운 리스트를 생성해서 가공된 url list를 할당한다. \r\n for i in range(len(url_list)): \r\n try:\r\n #http가 있는 문자열 즉 연결 가능한 링크만 따로 모아서 new_url_list에 할당한다.\r\n #가끔 None을 반환하는 링크가 있다. 그럴 경우는 제외시킨다. \r\n #exe 파일도 제외시킨다. \r\n if ((url_list[i][0:4] == \"http\") & (url_list[i] != None)): \r\n if \"exe\" not in url_list[i]: #실행파일이 아닌 경우에만\r\n new_url_list.append(url_list[i]) \r\n except TypeError: #TypeError가 발생하는 경우가 있음. 그럴 경우에는 무시하고 진행\r\n continue\r\n\r\n #해당되는 기호들이 있으면 제거한다. word_list에 정리된 단어들을 할당한다. \r\n word_list = []\r\n for word in soup_str_translate:\r\n symbols = \"\"\"▶↑~!@#$%^&*()_-+={[}]|\\\\;:\"‘'·<>?/.,ⓒ \"\"\" \r\n for i in range(len((symbols))):\r\n\r\n #해당 symbols내의 기호가 있으면 '' 내부의 것으로 바꾼다. \r\n #근데 '' 안에 아무것도 없으니 제거하는것과 마찬가지 기능을 함. \r\n word = word.replace(symbols[i], '') \r\n\r\n if len(word) > 0:\r\n word_list.append(word)\r\n \r\n return word_list,new_url_list #가공된 word에 관한 리스트와 url에 관한 리스트를 반환시켜 준다. 2개 반환..\r\n","sub_path":"bs4/collect_text.py","file_name":"collect_text.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"295035630","text":"import asyncio\nimport time\n\nasync def me_first() -> str:\n await asyncio.sleep(1)\n\n return \"I'm first!\"\n\nasync def me_second() -> str:\n await asyncio.sleep(1)\n\n return \"Then me!\"\n\nasync def me_third() -> str:\n await asyncio.sleep(1)\n\n return \"And finally me!\"\n\nasync def main():\n start = time.perf_counter()\n\n first_result = await me_first()\n second_result = await me_second()\n third_result = await me_third()\n\n end = time.perf_counter()\n\n print(f\"It took {end - start} seconds to complete all operations\\n\")\n\n print(first_result)\n print(second_result)\n print(third_result)\n\n\nasyncio.run(main())","sub_path":"src/coroutine-4.py","file_name":"coroutine-4.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"185853758","text":"from spack import *\n\n\nclass Lgrtk(CMakePackage):\n \"\"\"LGRTK is a tool kit for using the Lagrangian Grid Reconnection\n method to build simulations.\n \"\"\"\n\n homepage = \"https://github.com/SNLComputation/lgrtk\"\n url = \"https://github.com/SNLComputation/lgrtk/archive/v0.0.0.tar.gz\"\n git = \"git@github.com:SNLComputation/lgrtk.git\"\n\n version('master', branch='master')\n\n variant('tests', default=True, description='Compile tests')\n variant('cubit', default=False, description='Point to CUBIT for inline meshing')\n\n depends_on('omega-h')\n depends_on('cubit', when='+cubit')\n depends_on('googletest~pthreads', when='+tests')\n\n def cmake_args(self):\n args = []\n args.append('-DCMAKE_BUILD_TYPE:STRING=')\n args.append('-DBUILD_SHARED_LIBS:BOOL=ON')\n if '+tests' in self.spec:\n args.append('-DBUILD_TESTING:BOOL=ON')\n args.append('-DGTest_PREFIX:PATH={0}'.format(\n self.spec['googletest'].prefix))\n else:\n args.append('-DBUILD_TESTING:BOOL=OFF')\n args.append('-DOmega_h_PREFIX:PATH={0}'.format(\n self.spec['omega-h'].prefix))\n return args\n\n def flag_handler(self, name, flags):\n flags = list(flags)\n if name == 'cxxflags':\n flags.append(self.compiler.cxx11_flag)\n return (None, None, flags)\n\n def root_cmakelists_dir(self):\n return os.path.join(self.stage.source_path, 'v2')\n","sub_path":"packages/lgrtk/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"11639925","text":"import ssl\nimport argparse\nimport OpenSSL\nparser=argparse.ArgumentParser(description='Process args for Host')\nparser.add_argument('-i', type=str, dest='ip', required=True)\nparser.add_argument('-o', type=int ,dest=\"port\", required=False, default=443)\nargs=parser.parse_args()\n\naddr = (args.ip, args.port)\npem = ssl.get_server_certificate(addr)\n\nthumbprint = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, pem)\nbigprint = thumbprint.digest('sha1')\n\n\nprint(bigprint)\n","sub_path":"Thumb.py","file_name":"Thumb.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"317483774","text":"from flask import (\n Blueprint,\n redirect,\n render_template,\n url_for,\n request,\n session\n)\nbp = Blueprint('main', __name__)\n\nfrom app.controllers.User import UserForm\n\n@bp.route('/', methods=['GET','POST'])\ndef home():\n form = UserForm(request.form)\n if request.method == \"POST\":\n return 'ok'\n return render_template('main/index.html', form=form)\n","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"607575971","text":"# **=\t幂赋值运算符\tc **= a 等效于 c = c ** a\n# //=\t取整除赋值运算符\tc //= a 等效于 c = c // a\n\na = 21\nb = 10\nc = 2\n\nc **= a\nprint(\"6 - c 的值为:\", c)\n\nc //= a\nprint(\"7 - c 的值为:\", c)\n\n# 逻辑运算符\n\n# and or not\n\n# 成员运算符\n# in\t如果在指定的序列中找到值返回 True,否则返回 False。\tx 在 y 序列中 , 如果 x 在 y 序列中返回 True。\n# not in\t如果在指定的序列中没有找到值返回 True,否则返回 False。\tx 不在 y 序列中 , 如果 x 不在 y 序列中返回 True。\n\na = 10\nb = 20\nlist = [1, 2, 3, 4, 5];\n\nif (a in list):\n print(\"1 - 变量 a 在给定的列表中 list 中\")\nelse:\n print(\"1 - 变量 a 不在给定的列表中 list 中\")\n\nif (b not in list):\n print(\"2 - 变量 b 不在给定的列表中 list 中\")\nelse:\n print(\"2 - 变量 b 在给定的列表中 list 中\")\n\n# 身份运算符用于比较两个对象的存储单元\n# is 是判断两个标识符是不是引用自一个对象\tx is y, 类似 id(x) == id(y) , 如果引用的是同一个对象则返回 True,否则返回 False\n# is not 是判断两个标识符是不是引用自不同对象\tx is not y , 类似 id(a) != id(b)。如果引用的不是同一个对象则返回结果 True,否则返回 False。\n# id() 函数用于获取对象内存地址。\n\n# is 与 == 区别:\n# is 用于判断两个变量引用对象是否为同一个, == 用于判断引用变量的值是否相等。\n\na = [1, 2, 3]\nb = a\nprint(b is a);\n# True\nprint(b == a);\n# True","sub_path":"basic/7_operator.py","file_name":"7_operator.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"643439962","text":"from api import *\nfrom utils.pastebins import *\n\ndef load():\n \"\"\"Records books for the gentoolib v2.\"\"\"\n dbExecute('''create table if not exists books (\n book_id int auto_increment primary key,\n title text, \n added_by varchar(255) )\n ''')\nregisterModule('Books', load)\n\n@register(\"addbook %S\", syntax=\"addbook booktitle\")\ndef addBook(channel, sender, booktitle):\n \"\"\"Store the book in the database\"\"\"\n if booktitle == \"\":\n sendMessage(sender, \"Enter a proper book title. syntax: addbook booktitle\")\n return\n else:\n log.info('Trying to insert book: %s' % booktitle)\n dbExecute('INSERT INTO books (title, added_by) VALUES (%s, %s)', [booktitle, sender])\n sendMessage(channel, \"Book recorded\")\n\n@register(\"dumpbooks\", syntax=\"dumpbooks\")\ndef allBooks(channel, sender):\n \"\"\"Fetches all books in database, upload them on a pastebin, not featured not scalable fuckyou\"\"\"\n books = dbQuery('SELECT title, added_by FROM books')\n bookList = ''\n for (title, added_by) in books:\n bookList += '\\\"%s\\\" inserted by \\\"%s\\\"\\n' % (title, added_by)\n try:\n url = paste(bookList)\n except Exception:\n sendMessage(channel, \"Uploading book list failed.\")\n return\n sendMessage(channel, \"%s, Books: %s\" % (sender, url))\n","sub_path":"modules/books.py","file_name":"books.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"404624339","text":"'''\nGiven a non-empty array of integers, every element appears three times except for one, which appears exactly once. Find that single one.\n\nNote:\n\nYour algorithm should have a linear runtime complexity. Could you implement it without using extra memory?\n\nExample 1:\n\nInput: [2,2,3,2]\nOutput: 3\nExample 2:\n\nInput: [0,1,0,1,0,1,99]\nOutput: 99\n\n\nSOLUTION LOGIC 1:\nWe can sum the bits in same positions for all the numbers and take modulo with 3. The bits for which sum is not multiple of 3, are the bits of number with single occurrence.\nLet us consider the example array {5, 5, 5, 8}. The 101, 101, 101, 1000\nSum of first bits%3 = (1 + 1 + 1 + 0)%3 = 0;\nSum of second bits%3 = (0 + 0 + 0 + 0)%0 = 0;\nSum of third bits%3 = (1 + 1 + 1 + 0)%3 = 0;\nSum of fourth bits%3 = (1)%3 = 1;\nHence number which appears once is 1000\n\n'''\nINT_SIZE = 4\ndef single_number(nums) -> int:\n print(nums)\n result = 0\n for i in range(0,INT_SIZE):\n sm = 0\n x = (1 << i)\n for j in range(0,len(nums)):\n print(nums[j],x,nums[j]&x)\n if (nums[j] & x):\n sm += 1\n\n\nif __name__ == '__main__':\n input = [2,2,3,2]\n print(single_number(input))\n","sub_path":"SingleNumberLeet.py","file_name":"SingleNumberLeet.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"69330112","text":"import pandas as pd\nimport numpy as np\nimport pickle\n\n\ndef pickle_read(path):\n with open(path, \"rb\") as f:\n pickle_file = pickle.load(f)\n return pickle_file\n\ndef pickle_write(item, path):\n with open(path, \"wb\") as f:\n pickle.dump(item, f)\n\ndef increasing_debt(row, column, i):\n if i > 1 and row[column + f'{i}'] < row[column + f'{i - 1}'] and row[\"is_streak\"] == 1:\n row[\"debt_streak\"] += 1\n row[\"raw_debt_accum\"] += row[column + f'{i - 1}'] - row[column + f'{i}']\n else:\n row[\"is_streak\"] = 0\n return row\n\n\ndef initiate_placeholders(df):\n df[\"is_streak\"], df[\"debt_streak\"] = 1, 0\n df[\"raw_debt_accum\"] = 0\n return df\n\n\ndef remove_placeholders(df):\n return df.drop(columns=[\"is_streak\", \"raw_debt_accum\"])\n\n\ndef replace_unknowns(df):\n education_dict = {4: 0, 5: 0, 6: 0}\n marriage_dict = {3: 0}\n df[\"EDUCATION\"].replace(education_dict, inplace=True)\n df[\"MARRIAGE\"].replace(marriage_dict, inplace=True)\n return df\n\ndef exclude_columns(looped_cols, start, end):\n \"\"\"Gathers column names to exclude\"\"\"\n looped_exc = []\n for col in looped_cols:\n sing_exc = [col + f\"{i}\" for i in np.arange(start, end)]\n looped_exc.extend(sing_exc)\n return looped_exc\n\n\ndef extract_dummies(df, column, value):\n \"\"\"Creates a column with dummy variables for matches in a value\"\"\"\n\n if np.isnan(value):\n return np.where(df[column].isna().values == True, 1, 0)\n else:\n return np.where(df[column].values == value, 1, 0)\n\n\ndef calculate_utilization(df):\n df[\"avg_utilization\"], df[\"avg_payment_impact\"] = 0, 0\n initiate_placeholders(df)\n for i in np.arange(1, 7):\n df['payment_impact' + f'{i}'] = (df['PAY_AMT' + f'{i}']) / df[\"LIMIT_BAL\"]\n df[\"utilization\" + f'{i}'] = df[\"BILL_AMT\" + f'{i}'] / df[\"LIMIT_BAL\"]\n if i > 1:\n df = df.apply(lambda x: increasing_debt(x, \"utilization\", i), axis=1)\n df[\"avg_utilization\"] += df[\"utilization\" + f'{i}']\n df[\"avg_payment_impact\"] += df[\"payment_impact\" + f'{i}']\n df[\"avg_utilization\"] = df[\"avg_utilization\"] / 6\n df[\"avg_payment_impact\"] = df[\"avg_payment_impact\"] / 6\n df[\"debt_avg_delta\"] = (df[\"raw_debt_accum\"] / df[\"debt_streak\"]).fillna(0)\n df = remove_placeholders(df)\n return df\n\n\ndef split_pay_columns(df):\n \"\"\"Extracts the quantitative information (The number of months of missed payments)\n from the qualitative, the two fields that determined ontime payments\"\"\"\n\n df = df.copy()\n for i in np.arange(0, 1):\n column = \"PAY_\" + f\"{i}\"\n df[column] = df[column].astype(int)\n dflt = df[column].unique().tolist()\n default_vals = dict(zip(dflt, dflt))\n default_vals[-1], default_vals[-2] = 0, 0\n df[f\"{column}N1\"] = extract_dummies(df, column, -1)\n df[f\"{column}N2\"] = extract_dummies(df, column, -1)\n df[column] = df[column].map(default_vals)\n return df\n\ndef combine_pay_columns(df):\n \"\"\"Extracts non correlated information from the past history pay columns. Any\n time there is an improvement to this field it is incremented and the sum is added\n to a new column in the dataframe.\"\"\"\n\n df = df.copy()\n before= df[\"PAY_0\"].values\n results = np.zeros(before.size)\n for i in np.arange(2, 7):\n column = \"PAY_\" + f\"{i}\"\n after = df[column].values\n comparison = before < after\n results += comparison.astype(int)\n df[\"payment_improvements\"] = results\n return df","sub_path":"week_7/classification-assessment/working_dir/data_cleaning.py","file_name":"data_cleaning.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"314469489","text":"# -*- coding: utf-8 -*-\r\n\r\nclass ProgressBar(object):\r\n\tdef __init__(self, title, \r\n\t\t\t\tcount = 0.0,\r\n\t\t\t\trun_status = None,\r\n\t\t\t\tfin_status = None,\r\n\t\t\t\ttotal = 100.0,\r\n\t\t\t\tunit = '',\r\n\t\t\t\tseq = '/',\r\n\t\t\t\tchunk_size = 1.0):\r\n\t\tsuper(ProgressBar, self).__init__()\r\n\t\tself.info = '【%s】%s %.2f %s %s %.2f %s'\r\n\t\tself.title = title \r\n\t\tself.total = total \r\n\t\tself.count = count \r\n\t\tself.chunk_size = chunk_size\r\n\t\tself.status = run_status or '' \r\n\t\tself.fin_status = fin_status or ' ' * len(self.status)\r\n\t\tself.unit = unit \r\n\t\tself.seq = seq\r\n\t\r\n\t\r\n\tdef __get_info(self):\r\n\t\t# 【名称】状态 进度 单位 分割线 总数 单位\r\n\t\t_info = self.info % (self.title, self.status, self.count/self.chunk_size,\r\n\t\t\tself.unit, self.seq, self.total/self.chunk_size, self.unit)\r\n\t\treturn _info \r\n\t\r\n\tdef refresh(self, count = 1, status = None):\r\n\t\tself.count += count \r\n\t\t# 状态不为空\r\n\t\tself.status = status or self.status \r\n\t\tend_str = '\\r'\r\n\t\tif self.count >= self.total:\r\n\t\t\tend_str = '\\n'\r\n\t\t\tself.status = status or self.fin_status \r\n\t\tprint(self.__get_info(), end=end_str)","sub_path":"CET6-spider/progressBar.py","file_name":"progressBar.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"42037973","text":"from ..grid.grid import Grid\n\n\ndef get_closest_point(grid, srce, dest, avoid_ennemies = False):\n if avoid_ennemies:\n offsets = Grid.get_closest_points(srce,dest)\n idx = 0\n while ([srce[0] + offsets[idx][0], srce[1] + offsets[idx][1]] not in grid.get_range(srce) or [srce[0] + offsets[idx][0], srce[1] + offsets[idx][1]] in grid.get_ennemy_range()) and idx < len(offsets)-1:\n idx += 1\n return srce[0] + offsets[idx][0], srce[1] + offsets[idx][1]\n else:\n if srce[0] < dest[0]:\n return srce[0] + 1, srce[1]\n elif srce[0] > dest[0]:\n return srce[0] - 1, srce[1]\n elif srce[1] < dest[1]:\n return srce[0], srce[1] + 1\n else:\n return srce[0], srce[1] - 1","sub_path":"algorithm/greedy.py","file_name":"greedy.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"586569430","text":"from django.conf.urls import url\nfrom django.urls import path\nfrom .views import HomeView, ArticleDetailView, AddPostView,UpdatePostView,DeletePostView,AddCategoryView,CategoryView,CategoryListView,AddCommentView,DeleteCommentView\n\napp_name = 'blog'\n\nurlpatterns = [\n \n url(r'^$', HomeView.as_view(), name=\"home\"),\n url(r'^article/(?P\\d+)$', ArticleDetailView.as_view(), name='article-detail'),\n url(r'^add_post/$', AddPostView.as_view(), name='add-post'),\n url(r'^add_category/$', AddCategoryView.as_view(), name='add-category'),\n url(r'^article/edit/(?P\\d+)$', UpdatePostView.as_view(), name='update-post'),\n url(r'^article/(?P\\d+)/remove$', DeletePostView.as_view(), name='delete-post'),\n #url(r'^category/', CategoryView, name='category'),\n path('category//', CategoryView, name='category'),\n #path('category-list', CategoryListView, name='category-list'),\n url(r'^category-list/$', CategoryListView, name='category-list'),\n url(r'^article/(?P\\d+)/comment$', AddCommentView.as_view(), name='add-comment'),\n url(r'^comment/(?P\\d+)$', DeleteCommentView.as_view(), name='delete-comment'),\n]\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"565472249","text":"import time\n\n\ndef show_date_time():\n print(time.asctime())\n\ndef countdown(t): \n \n while t: \n mins, secs = divmod(t, 60) \n timer = '{:02d}:{:02d}'.format(mins, secs) \n print(timer, end=\"\\r\") \n time.sleep(1) \n t -= 1\n print(\"Time Is Finished\")\n \ndef set_timer():\n Time = int(input(\"Enter the time in a second : \"))\n countdown(Time)\n\ndef count():\n print('Press \"Ctrl + z\" to stop time')\n t = 1\n while True: \n mins, secs = divmod(t, 60) \n timer = '{:02d}:{:02d}'.format(mins, secs) \n print(timer, end=\"\\r\") \n time.sleep(1) \n t += 1\n\ndef main():\n print(\"1.) Show Date And Time\")\n print(\"2.) Set Timer\")\n print(\"3.) Stop Watch\")\n \n user = int(input(\"Choose Options : \"))\n if(user==1):\n show_date_time()\n elif(user==2):\n set_timer()\n elif(user==3):\n count()\n else:\n print(\"invalid key pressed\")\nmain()\n","sub_path":"Clock.py","file_name":"Clock.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"5860188","text":"# -*- coding:gb2312 -*-\r\n# -*- $Id: itemmanager.py 2 2009-04-20 03:10:36Z fengyu05 $ -*-\r\n\r\nimport random\r\nimport math\r\nimport gamectrl.const as const\r\n\r\n\r\nclass ItemType(object):\r\n\r\n\tdef __init__(self, type=0, rate=10, contain=500, life=300, last_time=0,usage=False, name=\"\"):\r\n\t\tself.type = type # 加或者减, 0减, 1加\r\n\t\tself.speed = const.ITEM_SPEED\r\n\t\tself.contain = contain\r\n\t\tself.life = life\r\n\t\tself.rate = rate\r\n\t\tself.last_time = last_time\r\n\t\tself.usage = usage # 是否主动使用\r\n\t\tself.name = name\r\n\r\n\r\nclass Item(object):\r\n\r\n\tdef __init__(self, item_type):\r\n\t\tself.item_type = item_type\r\n\t\tself.life = self.item_type.life\r\n\t\tself.status = const.ITEM_STATUS_CREATE\r\n\t\tself.in_brick = False\r\n\t\tself.index = 0\r\n\t\t# 初始化位置\r\n\t\tposx = random.randint(1, const.GRID_NUM_X - 2)\r\n\t\tself.gridpos = (posx, 0)\r\n\t\tif posx == 1:\r\n\t\t\tposx = const.GRID_SIZE * 3 / 2\r\n\t\telse:\r\n\t\t\tposx = const.GRID_SIZE * posx + const.GRID_SIZE / 2\r\n\t\tself.pos = (posx, 0)\r\n\r\n\t# 获得道具处理自己,后续操作期待中。。。\r\n\tdef on_eaten(self):\r\n\t\tself.status = const.ITEM_STATUS_KILL\r\n\r\n\t# 先无视...\r\n\tdef on_brick_eaten(self):\r\n\t\tpass\r\n\r\n# 道具管理器\r\nclass ItemManager(object):\r\n\r\n\tdef __init__(self):\r\n\t\tself._items_prototype = {}\r\n\t\tself.rate_sum = 0 # 全部类型的概率饼图总值\r\n\t\tself.rate_in_brick = const.ITEM_IN_BRICK_RATE\r\n\r\n\t# 初始化道具类型\r\n\tdef init_item_types(self):\r\n\t\tprototypes = self._items_prototype\r\n\r\n\t\tprototypes[const.ITEM_TYPE_FIRE_BALL] = \\\r\n\t\t\t\tItemType(const.ITEM_TYPE_FIRE_BALL, const.ITEM_RATE_BALL_TYPE, usage=True)\r\n\t\tprototypes[const.ITEM_TYPE_ICE_BALL] = \\\r\n\t\t\t\tItemType(const.ITEM_TYPE_ICE_BALL,const. ITEM_RATE_BALL_TYPE, usage=True)\r\n\t\tprototypes[const.ITEM_TYPE_THUNDER_BALL] = \\\r\n\t\t\t\tItemType(const.ITEM_TYPE_THUNDER_BALL, const.ITEM_RATE_BALL_TYPE, usage=True)\r\n\r\n\t\t# 加bar生命的物品\r\n\t\tprototypes[const.ITEM_TYPE_ADD_BAR_LIFE] = \\\r\n\t\t\t\tItemType(const.ITEM_TYPE_ADD_BAR_LIFE, 80, 150)\r\n\t\t# 减bar生命的物品\r\n\t\tprototypes[const.ITEM_TYPE_SUB_BAR_LIFE] = \\\r\n\t\t\t\tItemType(const.ITEM_TYPE_SUB_BAR_LIFE, 80, -150)\r\n\t\t# 加bar能量的物品\r\n\t\tprototypes[const.ITEM_TYPE_ADD_BAR_EN] = \\\r\n\t\t\t\tItemType(const.ITEM_TYPE_ADD_BAR_EN, 50, 50)\r\n\r\n\t\t# 加ctrlbar的加速度的物品\r\n\t\tprototypes[const.ITEM_TYPE_ADD_BAR_ACC] = \\\r\n\t\t\t\tItemType(const.ITEM_TYPE_ADD_BAR_ACC, 50, last_time=10, usage=True, name='Speed Up')\r\n\t\t# 加bar长度的物品\r\n\t\tprototypes[const.ITEM_TYPE_ADD_BAR_LEN] = \\\r\n\t\t\t\tItemType(const.ITEM_TYPE_ADD_BAR_LEN, 150, last_time=20, usage=True, name='Longest')\r\n\t\t# 时间减慢物品\r\n\t\tprototypes[const.ITEM_TYPE_TIME_SLOW] = \\\r\n\t\t\t\tItemType(const.ITEM_TYPE_TIME_SLOW, 150, last_time=5, usage=True, name='Time Slow')\r\n\t\t# 威力加强\r\n\t\tprototypes[const.ITEM_TYPE_ADD_DAMAGE] = \\\r\n\t\t\t\tItemType(const.ITEM_TYPE_ADD_DAMAGE, 100, last_time=5, usage=True, name='Damage Up')\r\n\r\n\t\t# #\r\n\t\tfor index in prototypes:\r\n\t\t\tself.rate_sum += prototypes[index].rate\r\n\r\n\tdef init_items(self):\r\n\t\tself.interval = const.ITEM_INTERVAL\r\n\t\tself.create_time = self.interval\r\n\t\tself.itemdict = {}\r\n\t\tself.itemnum = 0\r\n\t\tself.cur_id = 1\r\n\t\tself.level = 1\r\n\r\n\tdef get_item_type(self, id):\r\n\t\tassert id in self._items_prototype\r\n\t\treturn self._items_prototype[id]\r\n\r\n\t# 产生新道具\r\n\tdef create_item(self, type_id):\r\n\t\tif type_id == 0:\r\n\t\t\treturn None\r\n\t\tassert(type_id in self._items_prototype)\r\n\t\titem = Item(self._items_prototype[type_id])\r\n\t\treturn item\r\n\r\n\t# 道具落到砖头里面\r\n\tdef on_hit_brick(self, brick, item):\r\n\t\titem.pos = (item.pos[0], item.gridpos[1] * const.GRID_SIZE + const.GRID_SIZE / 2)\r\n\t\tin_rate = random.randint(1, 100)\r\n\t\tif in_rate <= self.rate_in_brick:\r\n\t\t\titem.status = const.ITEM_STATUS_IN_BRICK\r\n\t\t\tbrick.has_item = True\r\n\t\telse:\r\n\t\t\tif item.gridpos[1] < 14:\r\n\t\t\t\titem.gridpos = (item.gridpos[0], item.gridpos[1] + 1)\r\n\t\t\telse:\r\n\t\t\t\titem.gridpos = (item,gridpos[0], 14)\r\n\r\n\t# 道具跟随下压效果\r\n\tdef on_add_line(self):\r\n\t\tfor index in self.itemdict:\r\n\t\t\titem = self.itemdict[index]\r\n\t\t\tif item.status == const.ITEM_STATUS_IN_BRICK:\r\n\t\t\t\titem.gridpos = (item.gridpos[0], item.gridpos[1] + 1)\r\n\t\t\t\titem.pos = (item.pos[0], item.gridpos[1] * const.GRID_SIZE + const.GRID_SIZE / 2)\r\n\r\n\t# 道具消失\r\n\tdef on_kill(self, index):\r\n\t\tself.itemnum -= 1\r\n\t\tdel self.itemdict[index]\r\n\r\n\t# 道具碰到球, 先无视掉\r\n\tdef on_hitball(self):\r\n\t\tpass\r\n\r\n\t# 道具下落\r\n\tdef drop_item_at(self, index_x, index_y):\r\n\t\tfor index in self.itemdict:\r\n\t\t\titem = self.itemdict[index]\r\n\t\t\tif item.gridpos == (index_x, index_y) and item.status == const.ITEM_STATUS_IN_BRICK:\r\n\t\t\t\titem.status = const.ITEM_STATUS_ACTIVE\r\n\t\t\t\treturn index\r\n\r\n\t# 根据几率生成新道具\r\n\tdef new_item(self):\r\n\t\t# 产生一个小于self.rate_sum的随机数\r\n\t\t# 根据随机数减去各种物品中的rate来确定产生哪种物品\r\n\t\tid = random.randint(1, self.rate_sum)\r\n\t\titem = None\r\n\t\tfor index in self._items_prototype:\r\n\t\t\ttype = self._items_prototype[index]\r\n\t\t\tid -= type.rate\r\n\t\t\tif id <= 0:\r\n\t\t\t\titem = self.create_item(type.type)\r\n\t\t\t\tbreak\r\n\r\n\t\tif item == None:\r\n\t\t\treturn None\r\n\t\titem.status = const.ITEM_STATUS_ACTIVE\r\n\t\titem.index = self.cur_id\r\n\t\tself.itemdict[self.cur_id] = item\r\n\t\tself.cur_id += 1\r\n\t\tself.itemnum += 1\r\n\r\n\t\treturn self.cur_id - 1\r\n\r\n\t# 道具管理相应更新\r\n\tdef update(self, level, brickgrids, bottombar, ball, time):\r\n\t\tcreatenew = [] # 新增加的物品\r\n\t\tdrop_item = [] # 掉落出去的物品\r\n\t\teaten_item = [] # 被吃掉的物品\r\n\t\tif self.level != level:\r\n\t\t\tself.rate_in_brick = const.ITEM_IN_BRICK_RATE + level * const.ITEM_IN_RATE_FACTOR\r\n\t\tif self.create_time <= 0:\r\n\t\t\tself.create_time = self.interval\r\n\t\t\t# 是时候产生物品了\r\n\t\t\tif self.itemnum < const.ITEM_MAX_NUM:\r\n\t\t\t\tid = self.new_item()\r\n\t\t\t\tif id:\r\n\t\t\t\t\tcreatenew.append(id)\r\n\t\telse:\r\n\t\t\tself.create_time -= time\r\n\t\t# update物品信息\r\n\t\tfor index in self.itemdict.keys():\r\n\t\t\titem = self.itemdict[index]\r\n\t\t\tif item.status == const.ITEM_STATUS_ACTIVE:\r\n\t\t\t\t# 下一个位置\r\n\t\t\t\tnextposy = item.pos[1] + item.item_type.speed * time\r\n\t\t\t\t# 检测是否到了该grid中心,到了就找是否有砖块\r\n\t\t\t\tif nextposy >= item.gridpos[1] * const.GRID_SIZE + const.GRID_SIZE / 2:\r\n\t\t\t\t\tbrick = brickgrids[item.gridpos[1]][item.gridpos[0]]\r\n\t\t\t\t\tif brick and brick.has_item == False and brick.status != const.BRICK_IGNORE:\r\n\t\t\t\t\t\tself.on_hit_brick(brick, item)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\titem.gridpos = (item.gridpos[0], item.gridpos[1] + 1)\r\n\t\t\t\telse:\r\n\t\t\t\t\titem.pos = (item.pos[0], nextposy)\r\n\t\t\t\t# 检测是否被吃掉\r\n\t\t\t\tif nextposy >= bottombar.pos[1]:\r\n\t\t\t\t\tif item.pos[0] >= bottombar.pos[0] - bottombar.width / 2 \\\r\n\t\t\t\t\t\tand item.pos[0] <= bottombar.pos[0] + bottombar.width /2:\r\n\t\t\t\t\t\t\titem.pos = (item.pos[0], bottombar.pos[1])\r\n\t\t\t\t\t\t\tbottombar.on_eate(item)\r\n\t\t\t\t\t\t\titem.on_eaten()\r\n\t\t\t\t\t\t\t# 加入被吃了更新列表\r\n\t\t\t\t\t\t\teaten_item.append(index)\r\n\t\t\t\t\t\t\tcontinue\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\titem.gridpos = (item.gridpos[0], 14)\r\n\t\t\t\t\t\titem.status = const.ITEM_STATUS_KILL\r\n\t\t\t\t\t\tdrop_item.append(index)\r\n\t\t\t\t\t\tcontinue\r\n\t\t\tif item.status == const.ITEM_STATUS_KILL:\r\n\t\t\t\tself.on_kill(index)\r\n\t\t\tif item.status == const.ITEM_STATUS_IN_BRICK:\r\n\t\t\t\tcontinue\r\n\t\treturn createnew, drop_item, eaten_item\r\n\r\n","sub_path":"logic/itemmanager.py","file_name":"itemmanager.py","file_ext":"py","file_size_in_byte":7219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"472013852","text":"\"\"\"\n=========================\nMunich Adjustment Example\n=========================\n\nThis example demonstrates how to adjust LDFs by the relationship between Paid\nand Incurred using the MunichAdjustment.\n.\n\"\"\"\n\nimport chainladder as cl\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nsns.set_style('whitegrid')\nsns.set_palette('muted')\n\n# Load data\nmcl = cl.load_dataset('mcl')\n# Volume weighted (default) LDFs\ndev = cl.Development().fit_transform(mcl)\n# Traditional Chainladder\ncl_traditional = cl.Chainladder().fit(dev)\n# Munich Adjustment\ndev_munich = cl.MunichAdjustment(paid_to_incurred={'paid':'incurred'}).fit_transform(dev)\ncl_munich = cl.Chainladder().fit(dev_munich)\n\n# Plot data\nfig, (ax, ax2) = plt.subplots(ncols=2, sharex=True, figsize=(10,5))\nplot1_data = cl_munich.ultimate_['paid'].to_frame()\nplot1_data.columns = ['Paid Ultimate']\nplot1_data['Incurred Ultimate'] = cl_munich.ultimate_['incurred'].to_frame()\nplot2_data = (cl_munich.ultimate_['paid']/cl_munich.ultimate_['incurred']).to_frame()\nplot2_data.columns = ['Munich']\nplot2_data['Traditional'] = (cl_traditional.ultimate_['paid']/cl_traditional.ultimate_['incurred']).to_frame()\nplot1_data.plot(kind='bar', ax=ax)\nax.set_ylabel('Ultimate')\nax.set_xlabel('Accident Year')\nax.set_title('Munich Chainladder')\nplot2_data.plot(kind='bar', ax=ax2, ylim=(0,1.25))\nax2.set_title('P/I Ratio Comparison')\nax2.set_xlabel('Accident Year')\ng = plt.ylabel('Paid Ultimate / Incurred Ultimate')\n","sub_path":"examples/plot_munich.py","file_name":"plot_munich.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"32311219","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 11 2n:47:05 2018\n\n@author: zakaria\n\"\"\"\nimport time\nimport math\nfrom tkinter import *\nfrom tkinter.filedialog import askopenfilename\nfrom tkinter import messagebox\nimport pycosat\nfrom pprint import pprint\n#### Bagian GUI ############\ndef SolveOptionPressed():\n menu.entryconfig(\"Solve\",state=DISABLED)\n Solve(sudoku)\n global index\n #pprint(solusi)\n #print(len(solusi))\n if(jumSol==0):\n msgatbottom.configure(text=\"Solusi 0/0.\")\n messagebox.showinfo(\"Gagal!\", \"Sudoku Tidak Memiliki Solusi\")\n menu.entryconfig(\"Solve\",state=NORMAL)\n elif(jumSol==1):\n msgatbottom.configure(text=\"Solusi 1/1.\")\n messagebox.showinfo(\"Berhasil!\", \"Sudoku Solved\")\n else:\n msgatbottom.configure(text=\"Solusi \"+str(index)+\"/\"+str(jumSol)+\".\")\n messagebox.showinfo(\"Berhasil!\", \"Sudoku Solved\")\n\n index +=1\n menu.entryconfig(\"Next\",state=NORMAL)\ndef Next():\n menu.entryconfig(\"Prev\",state=NORMAL)\n global index\n UpdateBoard(solusi,index)\n msgatbottom.configure(text=\"Solusi \"+str(index)+\"/\"+str(jumSol)+\".\")\n index +=1\n if(index == jumSol+1):\n menu.entryconfig(\"Next\",state=DISABLED)\n\ndef PrebaseN():\n menu.entryconfig(\"Next\",state=NORMAL)\n global index\n UpdateBoard(solusi,index-2)\n msgatbottom.configure(text=\"Solusi \"+str(index-2)+\"/\"+str(jumSol)+\".\")\n index -=1\n if(index == 2):\n menu.entryconfig(\"Prev\",state=DISABLED)\n\n\ndef LoadFile():\n global endflag, origsudoku\n global solusi\n global given\n global index\n index = 1\n given=N*N\n solusi =[]\n filename=askopenfilename()\n sudoku=LoadCSVtoArray(filename)\n endflag=False\n UpdateBoard(sudoku,0)\n menu.entryconfig(\"Next\",state=DISABLED)\n menu.entryconfig(\"Prev\",state=DISABLED)\n menu.entryconfig(\"Solve\",state=NORMAL)\n msgatbottom.configure(text=\"Sudoku siap. \"+str(given)+\" Givens\")\n\ndef Sudoku4():\n filemenu.entryconfig(\"Sud4x4\",state=DISABLED)\n filemenu.entryconfig(\"Sud9x9\",state=NORMAL)\n filemenu.entryconfig(\"Sud16x16\",state=NORMAL)\n menu.entryconfig(\"Solve\",state=DISABLED)\n menu.entryconfig(\"Next\",state=DISABLED)\n menu.entryconfig(\"Prev\",state=DISABLED)\n global n,N,N1,orig_sudoku,sudoku,cell\n for i in range(1,N1):\n for j in range(1,N1):\n cell[i][j].grid_forget()\n n=2\n N1=5\n N=4\n sudoku = [[0 for x in range(N)] for x in range(N)]\n orig_sudoku = [[0 for x in range(N)] for x in range(N)]\n cell = [[1 for x in range(N1)] for x in range(N1)]\n for i in range(1,N1):\n for j in range(1,N1):\n cell[i][j] = Label(frame, width=6, font=\"bold\", height=3, relief=\"sunken\")\n cell[i][j].grid(row=i,column=j, padx=3, pady=2)\n cell[i][j].configure(bg=GridColor(i,j))\n msgatbottom.configure(text=\"Masukkan Sudoku\")\n\ndef Sudoku9():\n filemenu.entryconfig(\"Sud4x4\",state=NORMAL)\n filemenu.entryconfig(\"Sud9x9\",state=DISABLED)\n filemenu.entryconfig(\"Sud16x16\",state=NORMAL)\n menu.entryconfig(\"Solve\",state=DISABLED)\n menu.entryconfig(\"Next\",state=DISABLED)\n menu.entryconfig(\"Prev\",state=DISABLED)\n global n,N,N1,orig_sudoku,sudoku,cell\n for i in range(1,N1):\n for j in range(1,N1):\n cell[i][j].grid_forget()\n n=3\n N1=10\n N=9\n sudoku = [[0 for x in range(N)] for x in range(N)]\n orig_sudoku = [[0 for x in range(N)] for x in range(N)]\n cell = [[1 for x in range(N1)] for x in range(N1)]\n for i in range(1,N1):\n for j in range(1,N1):\n cell[i][j] = Label(frame, width=6, font=\"bold\", height=3, relief=\"sunken\")\n cell[i][j].grid(row=i,column=j, padx=3, pady=2)\n cell[i][j].configure(bg=GridColor(i,j))\n msgatbottom.configure(text=\"Masukkan Sudoku\")\n\ndef Sudoku16():\n filemenu.entryconfig(\"Sud4x4\",state=NORMAL)\n filemenu.entryconfig(\"Sud9x9\",state=NORMAL)\n filemenu.entryconfig(\"Sud16x16\",state=DISABLED)\n menu.entryconfig(\"Solve\",state=DISABLED)\n menu.entryconfig(\"Next\",state=DISABLED)\n menu.entryconfig(\"Prev\",state=DISABLED)\n global n,N,N1,orig_sudoku,sudoku,cell\n for i in range(1,N1):\n for j in range(1,N1):\n cell[i][j].grid_forget()\n n=4\n N1=17\n N=16\n sudoku = [[0 for x in range(N)] for x in range(N)]\n orig_sudoku = [[0 for x in range(N)] for x in range(N)]\n cell = [[1 for x in range(N1)] for x in range(N1)]\n for i in range(1,N1):\n for j in range(1,N1):\n cell[i][j] = Label(frame, width=2, font=\"bold\", height=1, relief=\"sunken\")\n cell[i][j].grid(row=i,column=j, padx=1, pady=1)\n cell[i][j].configure(bg=GridColor(i,j))\n msgatbottom.configure(text=\"Masukkan Sudoku\")\n\ndef LoadCSVtoArray(filename):\n import csv\n with open(filename) as f:\n reader = csv.reader(f)\n i=0\n for row in reader:\n sudoku[i]=row\n #del sudoku[i][-1]\n i=i+1\n for i in range(0,N):\n for j in range(0,N):\n a=sudoku[i][j]\n if(len(a)==0):\n sudoku[i][j]=orig_sudoku[i][j]=0\n elif(a=='n'):\n sudoku[i][j]=orig_sudoku[i][j]=pop(sudoku[i][j])\n else:\n sudoku[i][j]=orig_sudoku[i][j]=int(sudoku[i][j])\n #print(sudoku)\n return sudoku\n\ndef About():\n txt=\"Masukkan sudoku dengan menekan 'File' pada menu lalu menekan 'Load'. Sudoku harus disimpan dengan format CSV. Tekan 'Solve' untuk memecahkan sudoku. Jika sudoku memiliki lebih dari satu solusi tekan 'Next' untuk melihat solusi lainnya. Tekan'Prev' untuk melihat solusi sebelumnya\"\n messagebox.showinfo(\"Help\", txt)\n\ndef UpdateBoard(board,n):\n global given\n if(n<1):\n for i in range(0,N):\n for j in range(0,N):\n a=board[i][j]\n if a==0:\n given -=1\n cell[i+1][j+1].configure(text=\"\")\n else:\n if (orig_sudoku[i][j]==0):\n txtcolor=\"red\"\n else:\n txtcolor=\"black\"\n cell[i+1][j+1].configure(text=a, fg=txtcolor)\n else:\n for i in range(0,N):\n for j in range(0,N):\n a=board[n-1][i][j]\n if a==0:\n cell[i+1][j+1].configure(text=\"\")\n else:\n if (orig_sudoku[i][j]==0):\n txtcolor=\"red\"\n else:\n txtcolor=\"black\"\n cell[i+1][j+1].configure(text=a, fg=txtcolor)\n\n\n\ndef GridColor(i,j):\n if n%2==1:\n z=(n*int((i-1)/n))+1 + int((j-1)/n)\n else:\n z=(int((i-1)/n))+1 + int((j-1)/n)\n if z%2==0:\n z=\"ghostwhite\"\n else:\n z=\"yellow\"\n return z\n\n\n\n########## Bagian Sudoku Solver ##########\n\n\ndef ShowBoard(s):\n global solusi\n solusi.append([])\n for x in range(0,N):\n solusi[jumSol-1].append([])\n #print()\n for y in range(0,N):\n solusi[jumSol-1][x].append(s[x][y])\n #print(s[x][y],end=\"\")\n #print()\n\ndef baseN(i, j, v):\n return N*N* (i - 1) + N * (j - 1) + v\n\n#merubah sudoku menjadi DIMACS\ndef sudoku_clauses():\n res = []\n\n #Setidaknya ada satu angka di setiap entri:\n\n for r in range(1, N1):\n for c in range(1, N1):\n l=[]\n for v in range(1, N1):\n l.append(baseN(r,c,v))\n res.append(l)\n\n #Setiap angka muncul paling banyak satu kali di setiap baris:\n\n for r in range(1, N1):\n for v in range(1, N1):\n for c in range(1, N):\n for i in range(c+1,N1):\n res.append([-baseN(r, c, v), -baseN(r, i, v)])\n\n #Setiap angka muncul paling banyak satu kali di setiap kolom:\n for c in range(1, N1):\n for v in range(1, N1):\n for r in range(1, N):\n for i in range(r+1,N1):\n res.append([-baseN(r, c, v), -baseN(i, c, v)])\n\n #Setiap angka muncul paling banyak sekali dalam setiap sub-grid n x n:\n #blok1=0\n for v in range(1, N1):\n for i in range(n):\n for j in range(n):\n for c in range(1, n+1):\n for r in range(1, n):\n for k in range(r + 1, n+1):\n for l in range(1,n+1):\n #blok1+=1\n #print([-((n*i+r)*100+ (n*j+c)*10+d), -((n*i+k)*100+(n*j+l)*10+d)])\n res.append([-baseN((n*i+r), (n*j+c), v), -baseN((n*i+k), (n*j+l), v)])\n\n #print(blok1)\n\n return res\n\n########################### memecahkan sudoku\ndef Solve(sudoku):\n #waktu CPU mulai\n start = time.time()\n clauses = sudoku_clauses()\n for i in range(1, N1):\n for j in range(1, N1):\n d = sudoku[i - 1][j - 1]\n # untuk setiap given.\n if d:\n clauses.append([baseN(i, j, d)])\n\n def read_cell(i, j):\n # mengembalikan i,j\n for d in range(1, N1):\n if baseN(i, j, d) in sol:\n return d\n # memulai SAT solver\n #print(len(clauses))\n #pprint(clauses)\n sol = set(pycosat.solve(clauses))\n #print(sol)\n checker=len(sol)\n counter=0\n global solusi\n global jumSol\n sudokuSol=[]\n if checker == 5:\n #print('Sudoku tidak memiliki solusi')\n jumSol=counter\n while (checker != 5):\n if(counter==100):\n return\n counter+=1\n jumSol=counter\n #print(counter)\n #print(sol)\n #sol = pycosat.solve(cnf)\n\n for i in range(1, N1):\n for j in range(1, N1):\n sudoku[i - 1][j - 1] = read_cell(i, j)\n\n #print('Answer: '+str(counter))\n #print(numclause)\n ShowBoard(sudoku)\n #print(len(sol))\n #print()\n sudokuSol.append(sudoku)\n clauses.append([-x for x in sol])\n sol = set(pycosat.solve(clauses))\n checker=len(sol)\n #print(sudokuSol[0])\n end = time.time()\n print(\"Time: \"+str(end - start))\n if(counter<2):\n UpdateBoard(sudoku,0)\n else:\n UpdateBoard(solusi,1)\n\n\n\n############## Main #########################\nN=16\nn=int(math.sqrt(N))\nN1=N+1\nsudoku = [[0 for x in range(N)] for x in range(N)]\norig_sudoku = [[0 for x in range(N)] for x in range(N)]\ncell = [[1 for x in range(N1)] for x in range(N1)]\nsolusi = []\ngiven = 0\njumSol = 0\nindex = 1\nendflag=False\nroot = Tk()\nroot.title(\"Sudoku Solver\")\n\nmenu = Menu(root)\nroot.config(menu=menu)\nfilemenu = Menu(menu)\nmenu.add_cascade(label=\"File\", menu=filemenu)\n\nactionmenu= Menu(menu)\nmenu.add_command(label=\"Solve\", state=DISABLED, command=SolveOptionPressed)\n\nhelpmenu = Menu(menu)\nmenu.add_cascade(label=\"Help\", menu=helpmenu)\n\nactionmenu= Menu(menu)\nmenu.add_command(label=\"Prev\", state=DISABLED, command=Prev)\n\nactionmenu= Menu(menu)\nmenu.add_command(label=\"Next\", state=DISABLED, command=Next)\n\n\nfilemenu.add_command(label=\"Load\", command=LoadFile)\nfilemenu.add_separator()\nfilemenu.add_command(label=\"Sud4x4\", command=Sudoku4)\nfilemenu.add_separator()\nfilemenu.add_command(label=\"Sud9x9\", command=Sudoku9)\nfilemenu.add_separator()\nfilemenu.add_command(label=\"Sud16x16\", state=DISABLED, command=Sudoku16)\nfilemenu.add_separator()\nfilemenu.add_command(label=\"Exit\", command=root.quit)\n\nhelpmenu.add_command(label=\"About...\", command=About)\n\nframe=Frame(root)\nframe.grid(ipady=1,ipadx=1)\n\nfor i in range(1,N1):\n for j in range(1,N1):\n cell[i][j] = Label(frame, width=2, font=\"bold\", height=1, relief=\"sunken\")\n cell[i][j].grid(row=i,column=j, padx=1, pady=1)\n cell[i][j].configure(bg=GridColor(i,j))\nmsgatbottom=Label(frame,height=1)\nmsgatbottom.grid(row=N1,column=1,columnspan=N,sticky=\"we\")\nmsgatbottom.configure(text=\"Masukkan Sudoku\")\nmainloop()\n","sub_path":"sudokuSolver.py","file_name":"sudokuSolver.py","file_ext":"py","file_size_in_byte":11742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"136383156","text":"# region IMPORTS \nimport os\n\nimport yaml\nimport operator\nimport time\nfrom datetime import date, datetime, timedelta\n\nimport pytz\nfrom dateutil.relativedelta import relativedelta\n\nfrom functools import reduce\nfrom peewee import *\n# endregion\n\n# region Logger\nimport logging\nfrom debug import setup_logging\n\nlog = logger = logging.getLogger(\"dbo\")\nsetup_logging()\n# endregion\n\n\n# region GLOBALS \nrealpath = os.path.dirname(os.path.realpath(__file__))\nrp = realpath\n\ndb_path = os.path.join(realpath, 'database.db')\ndb = SqliteDatabase(db_path)\n# endregion\n\n# region FUNCTIONS \ndef read_yaml(filename):\n with open(os.path.join(filename), \"r\", encoding=\"utf-8\") as f:\n data = yaml.safe_load(f)\n return data\n\ndef read_config_data():\n return read_yaml(\"config\")\n\ndef read_config(filename):\n data = read_yaml(os.path.join(realpath, 'config', filename + '.yaml'))\n return data\n\ndef write_config(filename, data):\n with open(os.path.join(realpath, 'config', filename + '.yaml'), \"w+\", encoding=\"utf-8\") as f:\n f.write(yaml.dump(data, default_flow_style=False))\n\n\ndef config(*args):\n conf = read_config('config')\n return reduce(operator.getitem, args, conf)\n\ndef set_config(*args, value=None):\n conf = read_config('config')\n config(conf, args[:-1])[args[-1]] = value\n write_config('config', conf)\n\n\n# endregion\n\n\n\nclass Accounting(Model):\n address = CharField()\n date = DateTimeField()\n upload = IntegerField()\n download = IntegerField()\n\n class Meta:\n database = db\n\nclass MontlyArchive(Model):\n address = CharField()\n date = DateField()\n upload = IntegerField()\n download = IntegerField()\n\n class Meta:\n database = db\n\ndef trunc_datetime(someDate):\n return someDate.replace(day=1, hour=0, minute=0, second=0, microsecond=0)\n\ndef compare_months(A, B):\n A = trunc_datetime(A)\n B = trunc_datetime(B)\n return A == B\n\ndef cleanup_database():\n '''\n Remove oldest entries, count their sum and record into MonthlyArchive DB.\n '''\n try:\n now = datetime.utcnow().date()\n data = {}\n query = Accounting.select()\n for entry in query:\n entry_date = entry.date.date()\n if entry_date < now - relativedelta(months=config(\"general\", \"keep_months\")):\n if entry_date not in data:\n data[entry_date] = [entry.date, entry.address, entry.download, entry.upload]\n else:\n data[entry_date][1] = entry.download + data[entry_date][1]\n data[entry_date][2] = entry.upload + data[entry_date][2]\n entry.delete_instance()\n for dt, data in sorted(data.items()):\n MontlyArchive.create(\n date = dt,\n address = data[0],\n download = data[1],\n upload = data[2]\n )\n except Exception as e:\n log.error(\"Database cleanup failed.\", exc_info=True)\n\ndef cleanup_database_loop():\n while True:\n cleanup_database()\n # TODO Replace with proper scheduler\n time.sleep(21600) # 21600 second = 6 hours\n\nlog.info(\" \".join([\"Using DB\", str(db), \"At path:\", str(db_path)]))\n\ndb.connect()\ndb.create_tables([Accounting])\n\n","sub_path":"dbo.py","file_name":"dbo.py","file_ext":"py","file_size_in_byte":3245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"149214278","text":"# -*- coding: utf-8 -*-\n\nclass DefineContainer:\n def __init__(self):\n \"\"\"\n dictionary\n +-- Comiket (unicode)\n | +-- number (unicode) data:(int)\n | +-- name (unicode) data:(unicode)\n |\n +-- cutInfo (unicode)\n | +-- width (unicode) data:(int)\n | +-- height (unicode) data:(int)\n | +-- origin_x (unicode) data:(int)\n | +-- origin_y (unicode) data:(int)\n | +-- offset_x (unicode) data:(int)\n | +-- offset_y (unicode) data:(int)\n |\n +-- mapTableInfo (unicode)\n | +-- width (unicode) data:(int)\n | +-- height (unicode) data:(int)\n | +-- origin_x (unicode) data:(int)\n | +-- origin_y (unicode) data:(int)\n |\n +-- ComiketDate (unicode)\n | +-- 20XXXXXX (開催日付)(int)\n | | +-- year (unicode) data:(int)\n | | +-- month (unicode) data:(int)\n | | +-- day (unicode) data:(int)\n | | +-- week (unicode) data:(unicode)\n | | +-- page (unicode) data:(int)\n | +-- 20XXXXXX\n | :\n |\n +-- ComiketMap\n | +-- 東123 (地図名)(unicode)\n | | +-- name (unicode) data:(unicode)\n | | +-- map_key (unicode) data:(unicode)\n | | +-- print_area (unicode) data:(int[X,X,X,X])\n | | +-- small_map_key (unicode) data:(unicode)\n | | +-- fine_print_area (unicode) data:(int[X,X,X,X])\n | | +-- reverse (unicode) data:(bool)\n | +-- 東456\n | :\n |\n +-- ComiketArea\n | +-- 東123壁 (地区名)(unicode)\n | | +-- name (unicode) data:(unicode)\n | | +-- map (unicode) data:(unicode)\n | | +-- block (unicode) data:(unicode)\n | | +-- print_area unicode) data:(int[X,X,X,X])\n | | +-- small_map_key (key=東123壁,東456壁,西1壁,西2壁を除く)(unicode) data:(unicode)\n | | +-- fine_print_area (key=東123壁,東456壁,西1壁,西2壁を除く)(unicode) data:(int[X,X,X,X])\n | +-- 東1\n | :\n |\n +-- ComiketGenre (unicode)\n +-- 100 (key=ジャンルコード(int) value=ジャンル名(unicode))\n +-- 110\n :\n \"\"\"\n\n self.comiket_number = None\n self.comiket_name = None\n self.cut_info = DefineContainer.CutInfoContainer()\n self.map_table_info = DefineContainer.MapTableInfoContainer()\n self.comiket_date = {}\n self.comiket_map = {}\n self.comiket_area = {}\n self.comiket_genre = {}\n\n class CutInfoContainer:\n def __init__(self):\n self.width = None\n self.height = None\n self.origin_x = None\n self.origin_y = None\n self.offset_x = None\n self.offset_y = None\n\n class MapTableInfoContainer:\n def __init__(self):\n self.width = None\n self.height = None\n self.origin_x = None\n self.origin_y = None\n\n class ComiketDateContainer:\n def __init__(self):\n self.year = None\n self.month = None\n self.day = None\n self.week = None\n self.page = None\n\n class ComiketMapContainer:\n def __init__(self):\n self.name = None\n self.map_key = None\n self.print_area = None\n self.small_map_key = None\n self.fine_print_area = None\n self.reverse = None\n\n class ComiketAreaContainer:\n def __init__(self):\n self.name = None\n self.map = None\n self.block = None\n self.print_area = None\n self.small_map_key = None\n self.fine_print_area = None\n\nif __name__ == \"__main__\":\n pass","sub_path":"src/container/define.py","file_name":"define.py","file_ext":"py","file_size_in_byte":3829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"125102992","text":"import csv;\nimport math;\nimport random;\nfrom timeit import Timer;\n\n#从csv文件中读取数据,并转换为float\ndef load_cdv(filename):\n data = csv.reader(open(filename,'r'))\n dataset = list(data)\n for i in range(len(dataset)):\n dataset[i] = [float(x) for x in dataset[i]]\n return dataset\n\n#将dataset按 radio:(1-radio) 的比例分成训练数据和测试数据两部分\ndef split_dataset(dataset,radio):\n train_data = dataset\n test_data = []\n test_size = int(len(dataset)*(1-radio))\n while len(test_data) str:\n \"\"\"\n Helper function to escape telegram markup symbols.\n Args:\n text (:obj:`str`): The text.\n version (:obj:`int` | :obj:`str`): Use to specify the version of telegrams Markdown.\n Either ``1`` or ``2``. Defaults to ``1``.\n entity_type (:obj:`str`, optional): For the entity types ``PRE``, ``CODE`` and the link\n part of ``TEXT_LINKS``, only certain characters need to be escaped in ``MarkdownV2``.\n See the official API documentation for details. Only valid in combination with\n ``version=2``, will be ignored else.\n \"\"\"\n if int(version) == 1:\n escape_chars = r\"_*`[\"\n elif int(version) == 2:\n if entity_type in [\"pre\", \"code\"]:\n escape_chars = r\"\\`\"\n elif entity_type == \"text_link\":\n escape_chars = r\"\\)\"\n else:\n escape_chars = r\"_*[]()~`>#+-=|{}.!\"\n else:\n raise ValueError(\"Markdown version must be either 1 or 2!\")\n\n return re.sub(f\"([{re.escape(escape_chars)}])\", r\"\\\\\\1\", text)\n\n\ndef get_user_link(user_id: Text, mention_text: Text):\n return f\"[{mention_text}](tg://user?id={user_id})\"\n","sub_path":"dataset/actions/utils/markdown.py","file_name":"markdown.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"23701038","text":"import markdown\n\nimport sys,os\n\npath = sys.argv[1]\nwrite_target = sys.argv[2]\n\ndef make_html(s, filename):\n template = open(\"html_template.html\", encoding=\"utf-8\").read()\n spl = template.split('---bodysplit---')\n open(filename, 'w', encoding=\"utf-8\").write(spl[0]+'\\n\\n'+s+'\\n\\n'+spl[1])\n\nfor a in os.listdir(path):\n\n if a[-3:] == '.md':\n print(a)\n # htmlfile = open(write_target+\"\\\\\"+a+\".html\",'w', encoding=\"utf-8\")\n md = open(path+\"\\\\\"+a, encoding=\"utf-8\").read()\n md = md.replace(\"```text\", \"\")\\\n .replace(\"```rust,ignore\", \"\")\\\n .replace(\"```rust\", \"\")\\\n .replace(\"```\", \"
\")\n # htmlfile.write(html)\n html = markdown.markdown(md)\n\n make_html(html, write_target+\"\\\\\"+a+\".html\")\n\n","sub_path":"_projlab/_various/rustbook.py","file_name":"rustbook.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"524037302","text":"import sys\r\nimport os\r\nimport json\r\nimport pprint\r\nimport zlib\r\nfrom collections import defaultdict\r\nfrom settings import N_BINS, N_THREAD_BINS, OUT_DIR\r\nfrom multiprocessing import Pool\r\n\r\ndef get_bin(s, n_bins=N_THREAD_BINS):\r\n\treturn zlib.adler32(s.encode('utf-8')) % n_bins \r\n\r\ndef reshuffle_by_thread_bin(bin_idx, node_root_file, indir, outdir):\r\n\tprint(bin_idx)\r\n\twith open(node_root_file) as f:\r\n\t\tnode_to_root = json.load(f)\r\n\t\r\n\tbin_to_emails = defaultdict(dict)\r\n\r\n\twith open(os.path.join(indir, '%02d.json' % bin_idx)) as f:\r\n\t\temail_dict = json.load(f)\r\n\t\tfor key, email in email_dict.items():\r\n\t\t\tif key not in node_to_root: continue\r\n\t\t\temail['thread_root_key'], email['thread_level'] = node_to_root[key]\r\n\t\t\tbin_to_emails[get_bin(email['thread_root_key'])][key] = email \r\n\r\n\tfor thread_bin_idx, email_dict in bin_to_emails.items():\r\n\t\tprint(bin_idx, thread_bin_idx, len(email_dict))\r\n\t\toutfile = os.path.join(outdir, '%02d' % thread_bin_idx, '%02d.json' % bin_idx)\r\n\t\twith open(outfile, 'w') as f:\r\n\t\t\tjson.dump(email_dict, f)\r\n\r\nif __name__ == '__main__':\r\n\troot_indir = os.path.join(OUT_DIR, 'reduce_dedup_extracted/content')\r\n\troot_outdir = os.path.join(OUT_DIR, 'reduce_dedup_thread/content')\r\n\tfor thread_bin_idx in range(N_THREAD_BINS):\r\n\t\tprint(thread_bin_idx)\r\n\t\ttry:\r\n\t\t\tos.mkdir(os.path.join(root_outdir, '%02d' % thread_bin_idx))\r\n\t\texcept Exception as e:\r\n\t\t\tprint(e)\r\n\r\n\tnode_root_file = os.path.join(OUT_DIR, 'reduce_dedup_thread/node_to_root.json')\r\n\t\r\n\tpool = Pool(4)\r\n\tpool.starmap(reshuffle_by_thread_bin,\r\n\t\t[(bin_idx, node_root_file, root_indir, root_outdir) for bin_idx in range(N_BINS)])","sub_path":"reshuffle_by_thread_bin.py","file_name":"reshuffle_by_thread_bin.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"35578979","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndf1 = pd.read_csv('client.csv', encoding='cp949')\ndf2 = pd.read_csv('campaign_etc.csv', encoding='cp949')\ndf = pd.merge(df1, df2, on='index', how='inner')\ndf.drop(['index', 'poutcome'], axis='columns', inplace=True)\ndf.insert(5, 'housing', '')\ndf.loc[:25131, 'housing'] = 'yes'\ndf.loc[25131:, 'housing'] = 'No'\ndf.rename(columns={'y':'deposit'},inplace=True)\ndf.replace({'yes': 1, 'yes': 1, 'No': 0, 'no':0},inplace=True)\n\n# age 결측치 (12개) 중앙값으로 채우기\nage_median = df['age'].median(axis=0) # age_median = 39\ndf['age'].fillna(age_median, inplace=True)\ndf.isnull().sum() # age 결측치 0개 된 것 확인 가능\n\n# duration 결측치 (123개) 중앙값으로 채우기\nduration_median = df['duration'].median(axis=0) # duration_median = 181\ndf['duration'].fillna(duration_median, inplace=True)\ndf.isnull().sum() # duration 결측치 0개 된 것 확인 가능\n\n# balance 결측치 (16개) 제거\n## 남은 결측치가 balance 결측치뿐이라 전체에 대해 dropna 실행가능\ndf.dropna(inplace=True)\ndf.isnull().sum() # 모든 결측치 사라짐\n\n# age 는 자를 것 없음. 그대로 유지 (범위 자체가 크지 않고 최댓값이 95세로 오류값이라고 보기 어려우므로)\n\n\n# balance 이상치 처리\n# 음수값 제거\nbalance_negative = df['balance'] < 0\ndf.drop(df.loc[balance_negative].index, axis='index', inplace=True)\n\n# IQR 기준 이상치 제거 -> 너무 많이 잘려나가서 기각\n#q1, q3 = df['balance'].quantile(0.25), df['balance'].quantile(0.75)\n#iqr = q3-q1\n#condition = (df['balance'] > q1-1.5*iqr) & (df['balance'] < q3+1.5 *iqr)\n#df['balance'][condition].describe()\n\n# 50000 이상인 값 제거 : boxplot 상으로 시각적으로 판단\nbalance_outlier = df['balance'] > 50000\ndf.drop(df.loc[balance_outlier].index, axis='index', inplace=True)\n\n\n# day 이상치 처리\nday_outlier = (df['day']<0) | (df['day']>31)\ndf.drop(df.loc[day_outlier].index, axis='index', inplace=True)\n\n\n#campaign 는 유지 (25%-1.5IQR, 75%+1.5IQR 기준으로 outlier 확인 했을 때에 3234개가 outlier로 잡히고, 41949개가 남음.상대적으로 outlier 비율 적다고 생각하여 유지)\n\n\n#duration 이상치 처리\n# IQR 기준 이상치 제거\nduration_q1, duration_q3 = df['duration'].quantile(0.25), df['duration'].quantile(0.75)\nduration_iqr = duration_q3-duration_q1\ncondition = (df['duration'] > duration_q1-1.5*duration_iqr) & (df['duration'] < duration_q3+1.5 *duration_iqr)\ndf['duration'][condition].describe()\n\n\n# pdays 이상치 처리\n# 음수값 제거\npdays_negative = df['pdays'] <0\ndf.drop(df.loc[pdays_negative].index, axis='index', inplace=True)\n# 700 이상 제거 : boxplot 상에서 시각적으로 판단\npdays_outlier = df['pdays'] > 700\ndf.drop(df.loc[pdays_outlier].index, axis='index', inplace=True)\n\n\n# previous 이상치 처리\n# 혼자 동떨어져있는 250 이상 값 제거하기 : 혼자 너무 동떨어져있는데 boxplot 상에서 확인되어서 먼저 제거\nprevious_outlier = df['previous'] > 250\ndf.drop(df.loc[previous_outlier].index, axis='index', inplace=True)\n\n# 30 이상 제거: boxplot 상으로 시각적으로 판단\nprevious_outlier2 = df['previous'] > 30\ndf.drop(df.loc[previous_outlier2].index, axis='index', inplace=True)\n\ndf = df.T.drop_duplicates().T\n\n# 최종적으로 7718개의 데이터가 남았음. 이를 가지고 분석 진행하기로 함.\n\n# age 분석\ndf = df.astype({'age':float})\nage = plt.subplots(figsize=(12, 10))\nage = sns.violinplot(data=df, y='age')\nage.set_title(\"고객층의 age 분포\", size=20)\n\nage1 = plt.subplots(figsize=(12, 10))\nplt.rcParams['font.family'] = 'Malgun Gothic'\nage1 = sns.distplot(df['age'], bins=15)\nage1.set_title(\"고객층의 age 분포\", size=20)\n\n# marital 분석\nmarital = df['marital'].value_counts()\nplt.rc('font', family='Malgun Gothic')\nplt.rcParams['font.size'] = 15\nplt.pie(marital, labels=['married', 'single', 'divorced'], colors=['lightblue', 'darksalmon', 'mediumpurple']\n , autopct='%.1f%%', explode=(0.05, 0, 0.03))\nplt.legend()\nplt.show()\n\n# job 분석\njob = df['job'].value_counts()\nplt.rcParams['font.size'] = 15\nplt.pie(job, labels=['management', 'blue-collar', 'technician', 'admin', 'services', 'retired', 'student', 'self-employed', 'entrepreneur', 'unemployed', 'housemaid', 'unknown']\n , autopct='%.1f%%', explode=(0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01))\nplt.legend(loc='center left', bbox_to_anchor=(1.1, 0.5))\nplt.show()\n\n# pdays 분석\npdays = plt.subplots(figsize=(12, 10))\npdays = df['pdays'].plot(kind='box')\npdays.set_title(\"마지막 contact 으로부터 지난 날의 수\")\ndf['pdays'].quantile(0.25), df['pdays'].quantile(0.5), df['pdays'].quantile(0.75), df['pdays'].mean()\n\n# duration 분석\nduration1 = plt.subplots(figsize=(12, 10))\nplt.rcParams['font.family'] = 'Malgun Gothic'\nduration = sns.distplot(df['duration'], bins=15)\nduration.set_title(\"duration 분포\", size=20)\n\n# age와 duration 관계\nimport numpy as np\n\nsns.set(rc = {'figure.figsize':(12,10)})\nage_duration = sns.stripplot(x='age', y='duration', hue=\"deposit\", palette=[\"green\", \"red\"], data=df, alpha=0.5)\nplt.title(\"customer's age and duration\", size=20)\nplt.xticks(rotation=45)\nplt.xticks(np.arange(0, 95, 5))\nplt.yticks(np.arange(0, 2500, 500))\nplt.legend(loc='center right')\nage_duration\n\n## deposit 성사된 데이터들만 가지고 age와 duration 관계 다시 확인\nsns.set(rc = {'figure.figsize':(12,10)})\nage_duration = sns.stripplot(x='age', y='duration',palette=[\"mediumpurple\"], data=df.loc[df['deposit']==1], alpha=0.5)\nplt.title(\"Yes deposit customer's age and duration\", size=20)\nplt.xticks(rotation=45)\nplt.xticks(np.arange(0, 95, 5))\nplt.yticks(np.arange(0, 2500, 500))\nplt.legend(loc='center right')\nage_duration\n\n# balance와 duration 의 관계\nsns.set(rc = {'figure.figsize':(12,10)})\nbalance_duration = sns.stripplot(x='balance', y='duration', hue=\"deposit\", palette=[\"lightblue\", \"salmon\"], data=df, alpha=0.5)\nplt.title(\"customer's balance and duration\", size=20)\nplt.xticks(rotation=90)\n#balance_duration.set_xticks(range(0,40000, 5000))\nplt.legend(loc='center right')\nbalance_duration\n\n# job에 따른 deposit 성사율\ndeposit_true = df['deposit'] == 1\ndeposit_false = df['deposit'] == 0\ntrue = df.loc[deposit_true]\ntrue_job_groups = true.groupby(\"job\")\ntrue_series = true_job_groups['deposit'].count() # 모든 column에서 데이터 수는 동일하기는 한데 굳이 dataframe으로 같은 값 여러개 띄울 필요 없어서 한 column 지정해서 호출한 것\nfalse = df.loc[deposit_false]\nfalse_job_groups = false.groupby(\"job\")\nfalse_series = false_job_groups['deposit'].count()\n\ndf_concat = pd.concat([true_series, false_series], axis=1)\ndf_concat.columns = ['true', 'false']\ndf_concat['ratio'] = df_concat['true']/(df_concat['true']+df_concat['false'])\n\nplt.rcParams['font.family'] = 'Malgun Gothic'\njob_concat = df_concat.plot(kind='bar', y='ratio')\njob_concat.set_title(\"직업군에 따른 적금 개설 성사 비율\")\n\n# marital에 따른 deposit 성사율\ndeposit_true = df['deposit'] == 1\ndeposit_false = df['deposit'] == 0\ntrue = df.loc[deposit_true]\ntrue_marital_groups = true.groupby(\"marital\")\ntrue_series1 = true_marital_groups['deposit'].count() # 모든 column에서 데이터 수는 동일하기는 한데 굳이 dataframe으로 같은 값 여러개 띄울 필요 없어서 한 column 지정해서 호출한 것\nfalse = df.loc[deposit_false]\nfalse_marital_groups = false.groupby(\"marital\")\nfalse_series1 = false_marital_groups['deposit'].count()\n\ndf_concat = pd.concat([true_series1, false_series1], axis=1)\ndf_concat.columns = ['true', 'false']\ndf_concat['ratio'] = df_concat['true']/(df_concat['true']+df_concat['false'])\n\nplt.rcParams['font.family'] = 'Malgun Gothic'\nmarital_concat = df_concat.plot(kind='bar', y='ratio', color='salmon')\nmarital_concat.set_title(\"결혼 상태에 따른 적금 개설 성사 비율\")\n\n# 그 외 요소들의 상관계수 확인\ndf= df.astype({'default':'int','loan':'int','day':'int','campaign':'int','pdays':'int','previous':'int','deposit':'int','housing':'int', 'age':'float', 'balance':'float','duration':'float'})\nsns.heatmap(df.corr(), annot=True)\n\n# 상관관계 clustermap으로 확인\ncorr = df.corr()\nsns.clustermap(corr)\n\n# deposit 과의 상관계수 내림차순 정렬\ndf.corr()['deposit'].sort_values(ascending=False)","sub_path":"codeit/final_project_analysis.py","file_name":"final_project_analysis.py","file_ext":"py","file_size_in_byte":8434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"286838214","text":"#!/usr/bin/env python\nimport time\nimport math\nfrom nav_msgs.msg import OccupancyGrid # Global map \nfrom geometry_msgs.msg import PoseArray, PoseStamped, Pose2D, Pose, Twist # Global path \nimport rospy \nimport sys \nfrom visualization_msgs.msg import Marker, MarkerArray # Debug drawing \nimport tf2_ros \nfrom tf import transformations\n\n#----- Load paramters -----# \n# foot_print = [[-0.57, 0.36],[0.57, 0.36],[0.57, -0.36],[-0.57, -0.36]]\np1 = 1 # V, p controller # How much you care about 'r'\np2 = 1/0.375 # W, p controller # How much you care about 'alpha'\n# p3 = # How much you care about 'beta'\nVel_limit = 0.7 # Should be in class, modified dynamic.\nIS_ENABLE_MOVE_BACKWORD = True \n\nMAX_LINEAR_DEC = 0.7 # m/s^2\nMAX_ANGULAR_DEC = 0.7 # rad/s^2\nSAFTY_DISTANCE = 0.1 # m \nSAFTY_ANGLE = 0.05\n\n\nTOUCH_ZONE_RADIUS = 0.05 # m \nTOUCH_ZONE_ANGLE = 0.017 # rad \nADJUST_ZONE_RADIUS = 0.20 # m \n\n# LVP = LINEAR_VELOCITY_PLANNER()\npub_marker = rospy.Publisher('markers', MarkerArray,queue_size = 1, latch=False )\n\nclass LINEAR_VELOCITY_PLANNER():\n def __init__(self):\n #----- Current Pose ------# \n self.current_position = PoseStamped()\n # ---- Current Goal ------# \n self.goal = Pose2D() # PoseStamped()\n self.goal_mode = \"goal\"# \"waypoint\" # \n # ---- State Machine -----#\n self.state = \"stand_by\" # \"abort\", \"timeout\" , \"moving\"\n #------- #\n self.markerArray = MarkerArray()\n #----- Publisher ------# \n self.pub_cmd_vel = rospy.Publisher('/cmd_vel', Twist ,queue_size = 10, latch=False)\n self.cmd_vel = Twist()\n #----- Counting Star ------# \n self.t_start_moving = None \n # ---- Falgs -------# \n self.was_in_touch_zone = False \n\n def reset (self):\n '''\n Clean Current Task , and reset to init.\n '''\n # ---- Current Goal ------# \n self.goal = Pose2D() # PoseStamped()\n # ---- State Machine -----#\n self.state = \"stand_by\" # \"abort\", \"timeout\" , \"moving\"\n #------- #\n self.markerArray = MarkerArray()\n #----- Counting Star ------# \n self.t_start_moving = None \n # ---- Falgs -------# \n self.was_in_touch_zone = False \n\n def clean_screen (self):\n #------- clean screen -------#\n marker = Marker()\n marker.header.frame_id = \"/map\"\n marker.action = marker.DELETEALL\n self.markerArray.markers.append(marker)\n pub_marker.publish(self.markerArray)\n\n #------- Debug draw -------# \n self.markerArray = MarkerArray()\n\n def goal_CB(self, goal):\n rospy.loginfo (\"Target : \" + str(goal))\n\n #(self.goal.x, self.goal.y) = (goal.pose.position.x , goal.pose.position.y ) \n #self.goal.theta = transformations.euler_from_quaternion(self.pose_quaternion_2_list(goal.pose.orientation))[2]\n self.goal = goal \n self.state = \"moving\"\n # TODO Do something.\n #self.reset()\n #self.navi_goal = self.XY2idx((navi_goal.pose.position.x, navi_goal.pose.position.y))\n self.t_start_moving = time.time()\n\n \n\n def current_position_CB(self, current_position):\n self.current_position = current_position\n\n def sign (self, x):\n if x >= 0: \n return 1\n else: \n return -1 \n \n def angle_substitution(self, ang):\n '''\n Make sure ang is 0 ~ pi/2 or -0 ~ -pi/2 \n '''\n ans = (abs(ang) % (2* math.pi)) * self.sign(ang) # Make sure not ot exceed 360\n\n if ans > math.pi :\n # return (2* math.pi) - ans \n ans = ans - (2* math.pi) # Negative \n elif ans < -math.pi : \n ans = (2* math.pi) + ans # Positive \n else: \n pass \n return ans \n\n\n def iterateOnce (self):\n '''\n Switch Case \n '''\n if self.state == \"stand_by\":\n pass\n elif self.state == \"reached\":\n rospy.loginfo (\"[Linear_velocity_planner] time spend: \" + str(time.time() - self.t_start_moving))\n self.reset()\n\n elif self.state == \"moving\": \n #----- Get r , alpha , beta ------# \n # pos_dx = self.goal.pose.position.x - self.current_position.pose.position.x\n # pos_dy = self.goal.pose.position.y - self.current_position.pose.position.y\n pos_dx = self.goal.x - self.current_position.pose.position.x\n pos_dy = self.goal.y - self.current_position.pose.position.y\n r = math.sqrt(pos_dx*pos_dx + pos_dy*pos_dy)\n\n r_yaw = math.atan2(pos_dy, pos_dx)\n\n cureent_position_yaw = transformations.euler_from_quaternion(self.pose_quaternion_2_list(self.current_position.pose.orientation))[2]\n alpha = self.angle_substitution(cureent_position_yaw - r_yaw)\n\n goal_yaw = self.goal.theta # transformations.euler_from_quaternion(self.pose_quaternion_2_list(self.goal.pose.orientation))[2]\n beta = self.angle_substitution(cureent_position_yaw - goal_yaw) # cureent_position_yaw - goal_yaw \n\n # Don't make it complicate\n # theta = self.angle_substitution(goal_yaw - r_yaw) # r_yaw - goal_yaw \n\n # ------- Check Go Farword or Backword --------#\n linear_direction = 1 \n if IS_ENABLE_MOVE_BACKWORD: \n if abs(alpha) > math.pi/2:\n rospy.loginfo(\"Decide to go BackWord.\")\n cureent_position_yaw = self.angle_substitution(cureent_position_yaw + math.pi)\n alpha = self.angle_substitution(cureent_position_yaw - r_yaw)\n linear_direction = -1 \n \n if r < ADJUST_ZONE_RADIUS: \n rospy.loginfo (\"++++++++++++++++++++++++++\")\n rospy.loginfo (\"cureent_position_yaw = \" + str(cureent_position_yaw))\n rospy.loginfo (\"r_yaw = \" + str(r_yaw))\n rospy.loginfo (\"r = \" + str(r))\n rospy.loginfo (\"alpha = \" + str(alpha))\n rospy.loginfo (\"beta = \" + str(beta))\n\n # Calculate V \n V = abs(p1*r*math.cos(alpha)) * linear_direction\n\n # Calculate W \n if r < TOUCH_ZONE_RADIUS or self.was_in_touch_zone: # Inside touch zone \n self.was_in_touch_zone = True \n alpha_pecentage = 0\n beta_pecentage = 1\n # rospy.loginfo(\"+++++++++++++++++++++++++++++beta adjustment\")\n # W = -p2*beta\n '''\n elif r < ADJUST_ZONE_RADIUS : # Inside adjust zone \n if abs(theta) < math.pi/2:\n alpha_pecentage = math.pow((ADJUST_ZONE_RADIUS - r) , 2)/math.pow((ADJUST_ZONE_RADIUS - TOUCH_ZONE_RADIUS) , 2)\n beta_pecentage = 1 - (math.pow((ADJUST_ZONE_RADIUS - r) , 2)/math.pow((ADJUST_ZONE_RADIUS - TOUCH_ZONE_RADIUS) , 2))\n else: \n alpha_pecentage = 1 \n beta_pecentage = 0\n '''\n else: # Outside\n alpha_pecentage = 1 \n beta_pecentage = 0\n # W = -p2*alpha\n W = -p2 * (alpha*alpha_pecentage + beta*beta_pecentage ) # alpha_pecentage + beta_pecentage = 1 \n\n # Vel conservation\n \n if (abs(V) + abs(W)) > Vel_limit:\n k = Vel_limit / (abs(V) + abs(W))\n V = V * k\n W = W * k\n else: # Allow slower Vel\n pass \n\n #---------------------------------#\n #reached or not \n if self.goal_mode == \"waypoint\" and self.was_in_touch_zone :\n V = 0 \n W = 0\n self.state = \"reached\"\n elif self.goal_mode == \"goal\" and self.was_in_touch_zone and abs(beta) < TOUCH_ZONE_ANGLE: \n V = 0 \n W = 0\n self.state = \"reached\"\n \n #---------------------------------#\n rospy.loginfo (\"V = \" + str(V))\n rospy.loginfo (\"W = \" + str(W))\n self.cmd_vel.linear.x = V \n self.cmd_vel.angular.z = W\n self.pub_cmd_vel.publish(self.cmd_vel)\n else: \n pass \n \n def collision_calculation(self):\n pass \n #dt_linear = / + /\n #dt_angular = / + /\n\n def set_point(self, idx ,r ,g ,b ):\n '''\n Set Point at MarkArray \n '''\n marker = Marker()\n marker.header.frame_id = \"/map\"\n marker.id = idx \n marker.ns = \"tiles\"\n marker.header.stamp = rospy.get_rostime()\n marker.type = marker.SPHERE\n marker.action = marker.ADD\n marker.scale.x = 0.05\n marker.scale.y = 0.05\n marker.scale.z = 0.05\n marker.color.a = 1.0\n marker.color.r = r/255.0\n marker.color.g = g/255.0\n marker.color.b = b/255.0\n marker.pose.orientation.w = 1.0\n # (marker.pose.position.x , marker.pose.position.y) = self.idx2XY(idx)\n self.markerArray.markers.append(marker)\n \n def pose_quaternion_2_list(self, quaternion):\n \"\"\"\n This function help transfer the geometry_msgs.msg.PoseStameped \n into (translation, quaternion) <-- lists\n \"\"\"\n return [quaternion.x, quaternion.y, quaternion.z, quaternion.w]\n\n#----- Declare Class -----# \nLVP = LINEAR_VELOCITY_PLANNER()\n\ndef main(args):\n #----- Init node ------# \n rospy.init_node('linear_velocity_planner', anonymous=True)\n rospy.Subscriber('/lucky_navi/goal', Pose2D , LVP.goal_CB) # TODO for testing \n # rospy.Subscriber('/move_base_simple/goal', Pose2D , LVP.goal_CB)\n rospy.Subscriber('/current_position', PoseStamped, LVP.current_position_CB) \n\n\n r = rospy.Rate(10)#call at 10HZ\n while (not rospy.is_shutdown()):\n '''\n if GC.is_need_pub: \n pub_global_costmap.publish(GC.global_costmap)\n GC.is_need_pub = False\n if GP.is_need_pub:\n pub_global_path.publish(GP.global_path)\n GP.is_need_pub = False\n '''\n LVP.iterateOnce()\n r.sleep()\n\nif __name__ == '__main__':\n try:\n main(sys.argv)\n except rospy.ROSInterruptException:\n pass\n\n \n\n","sub_path":"src/linear_velocity_planner.py","file_name":"linear_velocity_planner.py","file_ext":"py","file_size_in_byte":10365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"193081725","text":"import logging\nimport os\nimport pickle\nimport numpy as np\n\nfrom .io import load_jets_from_pickle, save_jets_to_pickle\nfrom .JetDataset import JetDataset\n\ndef load_jets(data_dir, filename, redo=False, preprocess_fn=None):\n\n #preprocessed_dir = os.path.join(data_dir, 'preprocessed')\n\n raw_data_dir = os.path.join(data_dir, 'raw')\n preprocessed_dir = os.path.join(data_dir, 'preprocessed')\n path_to_preprocessed = os.path.join(preprocessed_dir, filename)\n\n if not os.path.exists(path_to_preprocessed) or redo:\n if not os.path.exists(preprocessed_dir):\n os.makedirs(preprocessed_dir)\n\n logging.warning(\"Preprocessing...\")\n\n preprocess_fn(raw_data_dir, preprocessed_dir, filename)\n\n logging.warning(\"Preprocessed the data and saved it to {}\".format(path_to_preprocessed))\n else:\n logging.warning(\"Data at {} and already preprocessed\".format(path_to_preprocessed))\n\n jets = load_jets_from_pickle(path_to_preprocessed)\n logging.warning(\"\\tSuccessfully loaded data\")\n return jets\n\ndef load_train_dataset(data_dir, filename, n_train, n_valid, redo):\n if 'w-vs-qcd' in data_dir:\n from .w_vs_qcd import preprocess, crop_dataset\n elif 'quark-gluon' in data_dir:\n from .quark_gluon import preprocess, crop_dataset\n else:\n raise ValueError('Unrecognized data_dir!')\n #from problem_module import preprocess, crop_dataset\n\n problem = data_dir.split('/')[-1]\n subproblem = filename\n\n logging.warning(\"Loading data...\")\n filename = \"{}-train.pickle\".format(filename)\n\n jets = load_jets(data_dir, filename, redo, preprocess_fn=preprocess)\n logging.warning(\"Found {} jets in total\".format(len(jets)))\n\n if n_train > 0:\n jets = jets[:n_train]\n logging.warning(\"Splitting into train and validation...\")\n #\n train_jets = jets[n_valid:]\n train_dataset = JetDataset(train_jets)\n #\n valid_jets = jets[:n_valid]\n valid_dataset = JetDataset(valid_jets)\n\n # crop validation set and add the excluded data to the training set\n #if 'w-vs-qcd' in data_dir:\n valid_dataset, cropped_dataset = crop_dataset(valid_dataset, pileup=False)\n train_dataset.extend(cropped_dataset)\n\n train_dataset.shuffle()\n ##\n logging.warning(\"Building normalizing transform from training set...\")\n train_dataset.transform()\n\n valid_dataset.transform(train_dataset.tf)\n\n # add cropped indices to training data\n logging.warning(\"\\tfinal train size = %d\" % len(train_dataset))\n logging.warning(\"\\tfinal valid size = %d\" % len(valid_dataset))\n\n return train_dataset, valid_dataset\n\ndef load_test_dataset(data_dir, filename, n_test, redo):\n if 'w-vs-qcd' in data_dir:\n from .w_vs_qcd import preprocess, crop_dataset\n elif 'quark-gluon' in data_dir:\n from .quark_gluon import preprocess, crop_dataset\n else:\n raise ValueError('Unrecognized data_dir!')\n\n train_dataset, _ = load_train_dataset(data_dir, filename, -1, 27000, False)\n logging.warning(\"Loading test data...\")\n filename = \"{}-test.pickle\".format(filename)\n jets = load_jets(data_dir, filename, redo)\n jets = jets[:n_test]\n\n dataset = JetDataset(jets)\n dataset.transform(train_dataset.tf)\n\n # crop validation set and add the excluded data to the training set\n dataset, _ = crop_dataset(dataset, pileup=False)\n\n # add cropped indices to training data\n logging.warning(\"\\tfinal test size = %d\" % len(dataset))\n\n return dataset\n","sub_path":"src/jets/data_ops/load_dataset.py","file_name":"load_dataset.py","file_ext":"py","file_size_in_byte":3484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"587638528","text":"# -*- coding: utf-8 -*-\nimport h5py\nimport numpy as np\nimport os\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport torchvision\nfrom torchvision import transforms, models\nimport pytorch_lightning as pl\nfrom pytorch_lightning.loggers import WandbLogger\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport wandb\nwandb.init(\n # mode='offline',\n project=\"prostatex\", \n group=\"wang-tbakd3-t3\",\n config={\n \"loss\": \"binary_cross_entropy\",\n \"metric\": \"accuracy\",\n \"optimizer\": \"Adam\",\n \"lr\":1e-4,\n \"epoch\": 1000,\n \"batch_size\": 16,\n \"augmentation\": {\n \"degrees\": 50,\n \"translate\": (0.9, 0.9),\n \"shear\": [-15, 15, -15, 15],\n },\n \"train_dir\": 'tbakd3_npy/train',\n \"valid_dir\": 'tbakd3_npy/valid_bal'\n })\nwandblogger = WandbLogger()\n\nnp.random.seed(0)\ntorch.manual_seed(0)\n\n\n## Data\ntransform = transforms.Compose([\n transforms.ToTensor(),\n transforms.RandomAffine(**wandb.config.augmentation)\n])\n\ndef npy_loader(path: str) -> np.ndarray:\n return np.load(path)\n\ndatasets = {}\ndatasets['train'] = torchvision.datasets.DatasetFolder(wandb.config.train_dir, extensions='npy', loader=npy_loader, transform=transform)\ndatasets['valid'] = torchvision.datasets.DatasetFolder(wandb.config.valid_dir, extensions='npy', loader=npy_loader, transform=transforms.ToTensor())\n\n\nclass MSDSC(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Sequential(\n nn.Conv2d(16, 16, 3, padding=1, groups=16),\n nn.Conv2d(16, 8, 1)\n )\n self.conv2 = nn.Sequential(\n nn.Conv2d(16, 16, 5, padding=2, groups=16),\n nn.Conv2d(16, 8, 1)\n )\n self.layer = nn.Sequential(\n nn.BatchNorm2d(16),\n nn.ReLU(),\n nn.MaxPool2d(2)\n )\n\n def forward(self, x):\n x1 = self.conv1(x)\n x2 = self.conv2(x)\n x = torch.cat((x1, x2), 1)\n x = self.layer(x)\n return x\n\n\nclass WangClassifier(pl.LightningModule):\n\n def __init__(self, num_sequences):\n super().__init__()\n\n self.num_sequences = num_sequences\n self.conv = nn.ModuleList(\n [nn.Sequential(\n MSDSC(), MSDSC(), MSDSC(), MSDSC(), MSDSC(), nn.Flatten()\n ) for i in range(num_sequences)]\n )\n self.linear = nn.ModuleList([nn.Linear(64, 1) for i in range(num_sequences)])\n self.fusion = nn.Linear(64 * num_sequences, 1)\n\n self.lr = wandb.config.lr\n self.accuracy = lambda x, y: ((x > 0.5).type_as(y) == y).float().mean()\n self.auroc = pl.metrics.functional.classification.auroc\n\n def forward(self, x):\n conv = [conv(x[:, i].unsqueeze(1).repeat(1, 16, 1, 1)) for i, conv in enumerate(self.conv)]\n conv_x = torch.cat([c.unsqueeze(1) for c in conv], 1)\n linear = [linear(conv_x[:, i]) for i, linear in enumerate(self.linear)]\n linear_x = torch.cat([l.unsqueeze(1) for l in linear], 1)\n fusion_x = self.fusion(torch.cat(conv, 1)).unsqueeze(1)\n x = torch.cat((linear_x, fusion_x), 1)\n x = torch.mean(linear_x, 1)\n return x\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n y_hat = self(x)\n y = y.type_as(y_hat).unsqueeze(1)\n criterion = nn.BCEWithLogitsLoss()\n loss = criterion(y_hat, y)\n acc = self.accuracy(torch.sigmoid(y_hat), y)\n self.log('train_loss', loss, sync_dist=True)\n self.log('train_acc', acc, prog_bar=True, sync_dist=True)\n return loss\n\n def validation_step(self, batch, batch_idx):\n x, y = batch\n y_hat = self(x)\n y = y.type_as(y_hat).unsqueeze(1)\n criterion = nn.BCEWithLogitsLoss()\n loss = criterion(y_hat, y)\n acc = self.accuracy(torch.sigmoid(y_hat), y)\n auc = self.auroc(torch.sigmoid(y_hat).squeeze(), y.squeeze())\n self.log('valid_loss', loss, sync_dist=True)\n self.log('valid_acc', acc, prog_bar=True, sync_dist=True)\n self.log('valid_auc', auc, prog_bar=True, sync_dist=True)\n\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\n return optimizer\n \n def train_dataloader(self):\n dataloader = torch.utils.data.DataLoader(\n datasets['train'], \n batch_size=wandb.config.batch_size, \n num_workers=16, drop_last=True, shuffle=True)\n return dataloader\n\n def val_dataloader(self):\n dataloader = torch.utils.data.DataLoader(\n datasets['valid'], \n batch_size=64, \n num_workers=16, drop_last=False)\n return dataloader\n\n\nmodel = WangClassifier(num_sequences=datasets['train'][0][0].shape[0])\ntrainer = pl.Trainer(\n logger=wandblogger,\n gpus=-1,\n accelerator='ddp',\n max_epochs=wandb.config.epoch)\ntrainer.fit(model)","sub_path":"wang.py","file_name":"wang.py","file_ext":"py","file_size_in_byte":4941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"492319955","text":"\"\"\"\n@author: Churiulin Evgenii\nСкрипт предназначен для запуска алгоритма машинного обучения Random Forest, метода главных компонент и факторного анализа\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom pandas.plotting import scatter_matrix\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.decomposition import PCA\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.decomposition import FactorAnalysis\n\n\n#########################\n# Функция 1 для коррекции пустых значений с учетом предыдущего и следующего значения\n#########################\n\n# colums_df_stat - имя переменной (тип - объект Series), полученной на основе выгрузки данных из исходного массива данных\ndef nan_values_correction(colums_df_stat):\n for tmin2m_st, k in enumerate(colums_df_stat):\n if (tmin2m_st < (len(colums_df_stat)-1)): \n if np.isnan(colums_df_stat[tmin2m_st]) and colums_df_stat[tmin2m_st-1] != 0 and colums_df_stat[tmin2m_st+1] != 0:\n colums_df_stat[tmin2m_st] = (colums_df_stat[tmin2m_st - 1] + colums_df_stat[tmin2m_st + 1])/2 \n elif np.isnan(colums_df_stat[tmin2m_st]) and colums_df_stat[tmin2m_st-1] != 0 and colums_df_stat[tmin2m_st+2] != 0:\n colums_df_stat[tmin2m_st] = (colums_df_stat[tmin2m_st - 1] + colums_df_stat[tmin2m_st + 2])/2\n elif np.isnan(colums_df_stat[tmin2m_st]) and colums_df_stat[tmin2m_st-1] != 0 and colums_df_stat[tmin2m_st+3] != 0:\n colums_df_stat[tmin2m_st] = (colums_df_stat[tmin2m_st - 1] + colums_df_stat[tmin2m_st + 3])/2 \n if tmin2m_st == (len(colums_df_stat)-1): \n if np.isnan(colums_df_stat[tmin2m_st]):\n colums_df_stat[tmin2m_st] = (colums_df_stat[tmin2m_st - 1])\n return (colums_df_stat) \n\n#########################\n# Функция 2 для коррекции пустых значений с учетом предыдущего, следующего, 2 следующих, 3 следующих значения\n#########################\n\n# colums_df_stat2 - имя переменной (тип - объект Series), полученной на основе выгрузки данных из исходного массива данных \ndef nan_values_correction_2(colums_df_stat_2):\n for tt, kkk in enumerate(colums_df_stat_2):\n if (tt < (len(colums_df_stat_2)-1)):\n if np.isnan(colums_df_stat_2[tt]) and not np.isnan(colums_df_stat_2[tt-1]) and not np.isnan(colums_df_stat_2[tt+1]):\n colums_df_stat_2[tt] = (colums_df_stat_2[tt-1] + colums_df_stat_2[tt+1])/2 \n elif np.isnan(colums_df_stat_2[tt]) and not np.isnan(colums_df_stat_2[tt-1]) and not np.isnan(colums_df_stat_2[tt+2]):\n colums_df_stat_2[tt] = (colums_df_stat_2[tt-1] + colums_df_stat_2[tt+2])/2 \n elif np.isnan(colums_df_stat_2[tt]) and not np.isnan(colums_df_stat_2[tt-1]) and not np.isnan(colums_df_stat_2[tt+3]):\n colums_df_stat_2[tt] = (colums_df_stat_2[tt-1] + colums_df_stat_2[tt+3])/2\n if tt == (len(colums_df_stat_2)-1):\n if np.isnan(colums_df_stat_2[tt]):\n colums_df_stat_2[tt] = colums_df_stat_2[tt-1]\n return (colums_df_stat_2) \n\n# Примечание функции 1 и 2 можно заменить одной, но требуется доработка + дополнительно следует учесть когда пропуск стоит на 1 месте\n\n\n#########################\n# Функция 3 для коррекции пустых значения за период с мая по октябрь (не включая). Использовал для заполнения \n#########################\n\n# пустых значений снежного покрова\n# colums_snow - имя переменной (тип - объект Series), полученной на основе выгрузки д��нных из исходного массива данных \ndef snow_values_correction(colums_snow):\n for h_st, kk in enumerate(colums_snow):\n month = colums_snow.index[h_st].month\n if month >= 5 and month <= 9:\n colums_snow[h_st] = 0\n return (colums_snow)\n\n#########################\n# Функция 4 для загрузки исходных метеорологических данных\n#########################\n \n# iPath - путь к данным \ndef initial_data(iPath):\n # Считываем данные \n df = pd.read_csv(iPath, skiprows = 0, sep=';', dayfirst = True, parse_dates = True)\n #print ('Columns:', df.columns)\n #Удаляем дубликаты и столбцы, которые не представляют интереса для данного исследования \n df = df.drop_duplicates(keep = False)\n df = df.drop(['lat','lon','h_station','id_st','t2m_negative','hsnow'], axis=1) \n #Создаем серии для заполнения пропусков в даннных\n index_date = pd.to_datetime(df['Date']) # time step\n ps = pd.Series(df['ps'].values, index = index_date, dtype = 'float') # air pressure at meteostation\n pmsl = pd.Series(df['pmsl'].values, index = index_date, dtype = 'float') # air pressure at meteostation in according to see level\n t2m = pd.Series(df['t2m'].values, index = index_date, dtype = 'float') # 2m air temperature\n tmin2m = pd.Series(df['tmin2m'].values, index = index_date, dtype = 'float') # 2m min air temperature\n tmax2m = pd.Series(df['tmax2m'].values, index = index_date, dtype = 'float') # 2m max air temperature \n tming = pd.Series(df['tming'].values, index = index_date, dtype = 'float') # min soil temperature for night \n td2m = pd.Series(df['td2m'].values, index = index_date, dtype = 'float') # 2m dew point\n t_g = pd.Series(df['t_g'].values, index = index_date, dtype = 'float') # temperatura of soil\n dd10m = pd.Series(df['dd10m'].values, index = index_date, dtype = 'float') # direction of wind\n ff10mean = pd.Series(df['ff10meam'].values, index = index_date, dtype = 'float') # speed of wind\n ff10max = pd.Series(df['ff10max'].values, index = index_date, dtype = 'float') # speed of wind\n hsnow = pd.Series(df['hsnow_snowe'].values, index = index_date, dtype = 'float') # Depth of snow\n swe = pd.Series(df['swe'].values, index = index_date, dtype = 'float') # SWE of snow\n rho = pd.Series(df['rho'].values, index = index_date, dtype = 'float') # RHO of snow \n # Запускаем функции коррекции данных\n ps = nan_values_correction(ps)\n pmsl = nan_values_correction(pmsl)\n t2m = nan_values_correction(t2m)\n tmin2m = nan_values_correction(tmin2m)\n tmax2m = nan_values_correction(tmax2m)\n tming = nan_values_correction(tming)\n tming = nan_values_correction_2(tming)\n td2m = nan_values_correction(td2m)\n t_g = nan_values_correction(t_g)\n t_g = nan_values_correction_2(t_g)\n for t_g_s, kk in enumerate(t_g):\n if (t_g_s < (len(t_g)-1)):\n if np.isnan(t_g[t_g_s]) and not np.isnan(tming[t_g_s]):\n t_g[t_g_s] = tming[t_g_s] \n if t_g_s == (len(t_g)-1):\n if np.isnan(t_g[t_g_s]):\n t_g[t_g_s] = tming[t_g_s]\n dd10m = nan_values_correction(dd10m)\n ff10mean = nan_values_correction(ff10mean)\n ff10max = nan_values_correction(ff10max)\n hsnow = snow_values_correction(hsnow)\n swe = snow_values_correction(swe)\n rho = snow_values_correction(rho) \n # Отбрасываем строки с пустыми значениям, где заполненных параметров меньше 15\n df = df.dropna(axis='rows', thresh=15) \n # Выполняем переиндексирование по дате\n df['Date'] = pd.to_datetime(df['Date'], format='%Y-%m-%d') \n # Устанавливаем новый индекс для массива данных\n df = df.set_index('Date') \n # Заполняем оставшиеся пустые значения в столбцах средним значением по столбцу \n #df = df.fillna(method='ffill')\n #df = df.fillna(df.mean())\n #df = df.fillna(0) \n return (df) \n\n#########################\n# Функция для выборки только зимних значений метеопараметров\n#########################\n\n# data_maket - пустой массив данных, куда будут записываться данные\n# df_data - массив с метеоданными\n# name - текстовый параметр с именем столбца из df_data\n# time_1 - переменная начала периода\n# time_2 - переменная конца периода \ndef winter_data(data_maket, df_data, name, time_1, time_2):\n if len(data_maket)>0: \n data_maket = pd.concat([data_maket, df_data[name][time_1:time_2]])\n else:\n data_maket = df_data[name][time_1:time_2]\n return (data_maket)\n\n#########################\n# Функция выполняющая простейшее машинное обучение на основе метода RandomForest\n#########################\n\n# df_main_year или df_main_winter = df - массив по которому выполняем описание\n# Data_path_exit - путь, где будет храниться результат работы \ndef simple_machine_learning(df_data, Data_path_exit):\n # Выбрасываем интересующий нас столбец из подготовленного массива данных и создаем 2 новых переменных\n X = df_data.drop('Discharge', axis=1)\n y = df_data['Discharge']\n\n # Разделяем наши данные на тестовый набор данных и независимый набор данных для провеки\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.50, random_state=2020)\n\n # Стандартизируем данные\n ss = StandardScaler()\n X_train_scaled = ss.fit_transform(X_train)\n X_test_scaled = ss.transform(X_test)\n\n # Переводим из объекта Series в объект numpy\n y_train = np.array(y_train)\n # Переводим из float 64 в int 32, нужно для корректной работы методов дальше\n y_train = y_train.astype(np.int32)\n\n # Подключаем алгоритм машинного обучения и выполняем обучение базовой модели \n rfc = RandomForestClassifier()\n rfc.fit(X_train_scaled, y_train)\n #display(rfc.score(X_train_scaled, y_train))\n\n # Отображаем самые важные признаки для обучения \n feats = {}\n # Требуется указать какие данные используются\n for feature, importance in zip(df_data.columns, rfc.feature_importances_): \n feats[feature] = importance\n importances = pd.DataFrame.from_dict(feats, orient='index').rename(columns={0: 'Gini-Importance'})\n importances = importances.sort_values(by='Gini-Importance', ascending=False)\n importances = importances.reset_index()\n importances = importances.rename(columns={'index': 'Features'})\n sns.set(font_scale = 5)\n sns.set(style=\"whitegrid\", color_codes=True, font_scale = 1.7)\n fig, ax = plt.subplots()\n fig.set_size_inches(30,15)\n sns.barplot(x=importances['Gini-Importance'], y=importances['Features'], data=importances, color='skyblue')\n plt.xlabel('Значимость переменной', fontsize=25, weight = 'bold')\n plt.ylabel('Переменные', fontsize=25, weight = 'bold')\n #plt.title('Feature Importance', fontsize=25, weight = 'bold')\n plt.savefig(Data_path_exit + 'Importance factor' + '.png', format='png', dpi = 300) \n # Отображаем графически важность признаков\n display(plt.show())\n # Делаем визуализацию важности признаков\n display(importances)\n \n ###### Запускаем метод главных компонент \n pca_test = PCA(n_components=13)\n pca_test.fit(X_train_scaled)\n sns.set(style='whitegrid')\n plt.plot(np.cumsum(pca_test.explained_variance_ratio_))\n plt.xlabel('Число компонент')\n plt.ylabel('Суммарно объясненная дисперсия')\n plt.axvline(linewidth=4, color='r', linestyle = '--', x=10, ymin=0, ymax=1)\n plt.savefig(Data_path_exit + 'PCA' + '.png', format='png', dpi = 300) \n display(plt.show())\n evr = pca_test.explained_variance_ratio_\n cvr = np.cumsum(pca_test.explained_variance_ratio_)\n pca_df = pd.DataFrame()\n pca_df['Cumulative Variance Ratio'] = cvr\n pca_df['Explained Variance Ratio'] = evr\n #display(pca_df.head(10)) \n # Понижаем размерность исходного массива данных до n-компонент описывающих 95% дисперсии\n pca = PCA(n_components=10)\n pca.fit(X_train_scaled)\n X_train_scaled_pca = pca.transform(X_train_scaled)\n X_test_scaled_pca = pca.transform(X_test_scaled)\n \n # Создаем новый массив данных с распределением дисперсии по компонентам\n pca_dims = []\n for x in range(0, len(pca_df)):\n pca_dims.append('PCA Component {}'.format(x))\n pca_test_df = pd.DataFrame(pca_test.components_, columns=X.columns, index=pca_dims)\n pca_test_df.to_csv(Data_path_exit + 'PCA_result.csv', sep=';', float_format='%.3f') # массив данных с распределением дисперсии по компонентам\n #print (pca_test_df.head(10).T)\n\n#########################\n# Функция для стандартизации переменных\n#########################\n \n# df - исходный массив с данными\ndef scale_features(df):\n scaled = preprocessing.StandardScaler().fit_transform(df)\n scaled = pd.DataFrame(scaled, columns=df.columns)\n return scaled\n\n#########################\n# Функция для выполнения факторного анализа\n#########################\n \n# df_main_year или df_main_winter = df - массив по которому выполняем описание\n# Data_path_exit - путь к папкам, где будут храниться данные\n# df2_index - массив с исходными индексами по дате (может быть 2 варианта либо df_main (4 сезона) или df_main1 (холодный сезон))\ndef factor_analysis(df, Data_path_exit, df2_index):\n # Выполняем расчет описательной статистики и корреляционной матрицы\n df_des_stat = df.describe()\n df_cor_stat = df.corr()\n # Делаем вывод описательной статистики и корреляционной матрицы\n df_des_stat.to_csv(Data_path_exit + 'des_stat.csv', sep=';', float_format='%.3f')\n df_cor_stat.to_csv(Data_path_exit + 'cor_stat.csv', sep=';', float_format='%.3f')\n # Строим диаграммы рассеивания и гистограммы\n matrix = scatter_matrix(df, figsize=[20,20], alpha=0.2)\n # Импортируем данные\n plt.savefig(Data_path_exit + 'Scatter_matrix' +'.png', format='png', dpi = 300)\n\n df_scaled = preprocessing.scale(df) # массив со стандартизированными данными\n # Проецируем с метода главных компонент переменнные на плоскость. Выделяем 4 главных фактора (можно больше)\n pca = PCA(n_components=4)\n pca1 = pca.fit(df_scaled)\n print('Доля разброса, которую объясняют факторы: ', pca.explained_variance_ratio_)\n \n # Рассчитываем значения основных факторов\n zzz = pca.transform(df_scaled)\n values_factors = pd.DataFrame(zzz)\n values_factors.to_csv(Data_path_exit + 'factor_values.csv', sep=';', float_format='%.3f') \n #print (zzz)\n\n # Факторный анализ\n fa = FactorAnalysis(n_components=4) # Количество факторов\n fac_1 = fa.fit(df_scaled)\n df_fa = pd.DataFrame(fa.components_, columns=df.columns) \n df_fa.to_csv(Data_path_exit + 'factor_result.csv', sep=';', float_format='%.3f') # Координаты факторов в пространстве исходных значений\n # Уникальность значений в смысле дисперсии, объяснённой факторами (чем больше, тем хуже объясняется факторами) содержится в атрибуте\n fac_2 = pd.Series(fa.noise_variance_, df.columns)\n fac_2.to_csv(Data_path_exit + 'Unic_values.csv', sep=';', float_format='%.3f') # Координаты факторов. Основной результат \n print ('Уникальность значений:\\n', fac_2)\n scores = pd.DataFrame(fa.transform(df_scaled), columns=['factor1', 'factor2','factor3', 'factor4']) \n scores = scores.set_index(df2_index.index)\n scores.to_csv(Data_path_exit + 'factor_vectors.csv', sep=';', float_format='%.3f') # Координаты факторов. Основной результат\n\n\n###### Этап 1. Подготовка начальных данных для проведения дальнейших вычислений\n\n# Создание массивов метеоданных для водосборов\n\n# Путь к папке, где хранится проект с рекой Дон\npath_main = 'D:/Don/'\n# Путь к папкам, куда записываются результирующие данные\niPath_result = path_main +'Main_data/' #Результирующие массивы с метео данными\niPath_exit = path_main +'PCA/' #Результаты машинного обучения, PCA и факторного анализа\n\n###### Река Сосна - г.п. Елец\niPath_stat_exit1 = iPath_exit + 'Sosna_river/annual_data/' #Результаты для всех сезонов\niPath_stat_exit2 = iPath_exit + 'Sosna_river/cold_data/' #Результаты для холодного сезона\n\n###### Река Битюг - г.п. Бобров\niPath_stat_exit3 = iPath_exit + 'Bitug_river/annual_data/' #Результаты для всех сезонов\niPath_stat_exit4 = iPath_exit + 'Bitug_river/cold_data/' #Результаты для холодного сезона\n\n###### Река Тихая Сосна - г.п. Алексеевка\niPath_stat_exit5 = iPath_exit + 'M_Sosna_river/annual_data/' #Результаты для всех сезонов\niPath_stat_exit6 = iPath_exit + 'M_Sosna_river/cold_data/' #Результаты для холодного сезона\n\n###### Река Медведица - г.п. Лысые горы\niPath_stat_exit7 = iPath_exit + 'Medveditsa_river/annual_data/' #Результаты для всех сезонов\niPath_stat_exit8 = iPath_exit + 'Medveditsa_river/cold_data/' #Результаты для холодного сезона\n\n\n######\n#Версия для р. Сосна - г.п. Елец (метеостанции 27928, 27915, 34013, 34112)\n######\n\"\"\"\nfileName_1 = '27928.csv'\nfileName_2 = '27915.csv'\nfileName_3 = '34013.csv'\nfileName_4 = '34112.csv'\n\niPath_1 = path_main + 'meteo_2000_2020/{}'.format(fileName_1)\niPath_2 = path_main + 'meteo_2000_2020/{}'.format(fileName_2)\niPath_3 = path_main + 'meteo_2000_2020/{}'.format(fileName_3)\niPath_4 = path_main + 'meteo_2000_2020/{}'.format(fileName_4)\n\n# Загружаем массивы с метеоданными\ndf_27928 = initial_data(iPath_1)\ndf_27915 = initial_data(iPath_2)\ndf_34013 = initial_data(iPath_3)\ndf_34112 = initial_data(iPath_4)\n\n# Создаем общий массив и усредняем значения метеопараметров\ndf_data = pd.concat((df_27928, df_27915,df_34013,df_34112)).groupby(level=0).mean()\n\n# Подгружаем данные гидрологических наблюдений\nfileName_hydro = 'Rivers_discharges.xlsx'\niPath_hydro = path_main + 'hydro_data/{}'.format(fileName_hydro)\n\ndf_hydro = pd.read_excel(iPath_hydro, skiprows = 0, sep=';', dayfirst = True, parse_dates = True, index_col = [0], \n skipinitialspace = True, na_values= ['9990','********'])\nprint ('Columns:', df_hydro.columns)\ndata_rivers = df_hydro['Sosna']\n\"\"\"\n######\n#Версия для р. Битюг - г.п. Бобров (метеостанции 34036, 34238)\n######\n\"\"\"\nfileName_1 = '34036.csv'\nfileName_2 = '34238.csv'\n\niPath_1 = path_main + 'meteo_2000_2020/{}'.format(fileName_1)\niPath_2 = path_main + 'meteo_2000_2020/{}'.format(fileName_2)\n\n# Загружаем массивы с метеоданными\ndf_34036 = initial_data(iPath_1)\ndf_34238 = initial_data(iPath_2)\n\n# Создаем общий массив и усредняем значения метеопараметров\ndf_data = pd.concat((df_34036, df_34238)).groupby(level=0).mean()\n\n# Подгружаем данные гидрологических наблюдений\nfileName_hydro = 'Rivers_discharges.xlsx'\niPath_hydro = path_main + 'hydro_data/{}'.format(fileName_hydro)\n\ndf_hydro = pd.read_excel(iPath_hydro, skiprows = 0, sep=';', dayfirst = True, parse_dates = True, index_col = [0], \n skipinitialspace = True, na_values= ['9990','********'])\nprint ('Columns:', df_hydro.columns)\ndata_rivers = df_hydro['Bitug']\n\"\"\"\n######\n#Версия для р. Тихая Сосна - г.п. Алексеевка (метеостанции 34213, 34321)\n######\n\"\"\"\nfileName_1 = '34213.csv'\nfileName_2 = '34321.csv'\n\niPath_1 = path_main + 'meteo_2000_2020/{}'.format(fileName_1)\niPath_2 = path_main + 'meteo_2000_2020/{}'.format(fileName_2)\n\n# Загружаем массивы с метеоданными\ndf_34213 = initial_data(iPath_1)\ndf_34321 = initial_data(iPath_2)\n\n# Создаем общий массив и усредняем значения метеопараметров\ndf_data = pd.concat((df_34213, df_34321)).groupby(level=0).mean()\n\n# Подгружаем данные гидрологических наблюдений\nfileName_hydro = 'Rivers_discharges.xlsx'\niPath_hydro = path_main + 'hydro_data/{}'.format(fileName_hydro)\n\ndf_hydro = pd.read_excel(iPath_hydro, skiprows = 0, sep=';', dayfirst = True, parse_dates = True, index_col = [0], \n skipinitialspace = True, na_values= ['9990','********'])\nprint ('Columns:', df_hydro.columns)\ndata_rivers = df_hydro['Tixay Sosna']\n\"\"\"\n######\n#Версия для р. Медведица - г.п. Лысые Горы (метеостанции 34063, 34069, 34163)\n######\n\nfileName_1 = '34063.csv'\nfileName_2 = '34069.csv'\nfileName_3 = '34163.csv'\n\niPath_1 = path_main + 'meteo_2000_2020/{}'.format(fileName_1)\niPath_2 = path_main + 'meteo_2000_2020/{}'.format(fileName_2)\niPath_3 = path_main + 'meteo_2000_2020/{}'.format(fileName_3)\n\n# Загружаем массивы с метеоданными\ndf_34063 = initial_data(iPath_1)\ndf_34069 = initial_data(iPath_2)\ndf_34163 = initial_data(iPath_3)\n\n# Создаем общий массив и усредняем значения метеопараметров\ndf_data = pd.concat((df_34063, df_34069, df_34163)).groupby(level=0).mean()\n\n# Подгружаем данные гидрологических наблюдений\nfileName_hydro = 'Rivers_discharges.xlsx'\niPath_hydro = path_main + 'hydro_data/{}'.format(fileName_hydro)\n\ndf_hydro = pd.read_excel(iPath_hydro, skiprows = 0, sep=';', dayfirst = True, parse_dates = True, index_col = [0], \n skipinitialspace = True, na_values= ['9990','********'])\nprint ('Columns:', df_hydro.columns)\ndata_rivers = df_hydro['Medveditsa']\n\n\n\n\n\n# Объединяем массив с метеоданными с данными о расходах воды\ndf_main = pd.concat((df_data, data_rivers), axis = 1)\n\n# Отбрасываем строки с пустыми значениям, где заполненных параметров меньше 15. Для того чтобы отфльтровать лишние расходы воды\ndf_main = df_main.dropna(axis='rows', thresh=15) \n\n# Отбрасываем \"ненужные\" или дублирующие столбцы данных \ndf_main = df_main.drop(['tming','pmsl','dd10m','R12','R24'], axis=1) # Основной массив с метеоданными\n\n# Заполняем оставшиеся пустые значения в столбцах средним значением по столбцу\n# Формируем датафрейм с информацией о всех метеоданных за весь год (с января по декабрь)\ndf_main = df_main.fillna(df_main.mean())\n\n# Нужно правильно указывать столбец из которого были взяты расход воды\n#df_main = df_main.rename(columns={'Sosna': 'Discharge'})\n#df_main = df_main.rename(columns={'Bitug': 'Discharge'})\n#df_main = df_main.rename(columns={'Tixay Sosna': 'Discharge'})\ndf_main = df_main.rename(columns={'Medveditsa': 'Discharge'})\n\n\n\n###### Этап 2. Готовим данные для всех сезонов\n# Делаем переиндексацию и отбрасываем дату\ncount = [] \ncount_numbers = 0\nfor jj in range(len(df_main)):\n count_numbers += 1 \n count.append(count_numbers)\nt = pd.Series(count, index = df_main.index) \ndf_main_year = df_main.set_index(t) # Итоговый массив данных для все 4 сезонов\n\n\n\n###### Этап 3. Готовим данные для зимнего сезона\n# Создаем специальный массив с данными только за зимний период годы, чтобы посмотреть влияние снега на весеннее половодье\n# Создаем пустые списки для переменных\nps_winter = ''\nt2m_winter = ''\ntmin2m_winter = ''\ntmax2m_winter = ''\nt_g_winter = ''\ntd2m_winter = ''\nff10meam_winter = ''\nff10max_winter = ''\nhsnow_snowe_winter = ''\nrho_winter = ''\nswe_winter = ''\nR12_liquid_winter = ''\nR12_solid_winter = ''\nDischarge_winter = ''\n\n# Задаем количество периодов = количеству зимних сезонов\nw = 20 \n \nperiods_winter = [['2000-10-01','2001-04-30'],\n ['2001-10-01','2002-04-30'],\n ['2002-10-01','2003-04-30'],\n ['2003-10-01','2004-04-30'],\n ['2004-10-01','2005-04-30'],\n ['2005-10-01','2006-04-30'],\n ['2006-10-01','2007-04-30'],\n ['2007-10-01','2008-04-30'],\n ['2008-10-01','2009-04-30'],\n ['2009-10-01','2010-04-30'],\n ['2010-10-01','2011-04-30'],\n ['2011-10-01','2012-04-30'],\n ['2012-10-01','2013-04-30'],\n ['2013-10-01','2014-04-30'],\n ['2014-10-01','2015-04-30'],\n ['2015-10-01','2016-04-30'],\n ['2016-10-01','2017-04-30'],\n ['2017-10-01','2018-04-30'],\n ['2018-10-01','2019-04-30'],\n ['2019-10-01','2020-04-30']]\n\nperiods_winter = np.array(periods_winter)\nfor tr in range(w):\n try:\n y_w_1 = periods_winter[tr][0]\n y_w_2 = periods_winter[tr][1]\n \n ps_winter = winter_data(ps_winter, df_main, 'ps', y_w_1, y_w_2) \n t2m_winter = winter_data(t2m_winter, df_main, 't2m', y_w_1, y_w_2) \n tmin2m_winter = winter_data(tmin2m_winter, df_main, 'tmin2m', y_w_1, y_w_2) \n tmax2m_winter = winter_data(tmax2m_winter, df_main, 'tmax2m', y_w_1, y_w_2) \n t_g_winter = winter_data(t_g_winter, df_main, 't_g', y_w_1, y_w_2) \n td2m_winter = winter_data(td2m_winter, df_main, 'td2m', y_w_1, y_w_2)\n ff10meam_winter = winter_data(ff10meam_winter, df_main, 'ff10meam', y_w_1, y_w_2)\n ff10max_winter = winter_data(ff10max_winter, df_main, 'ff10max', y_w_1, y_w_2) \n hsnow_snowe_winter = winter_data(hsnow_snowe_winter, df_main, 'hsnow_snowe', y_w_1, y_w_2)\n rho_winter = winter_data(rho_winter, df_main, 'rho', y_w_1, y_w_2)\n swe_winter = winter_data(swe_winter, df_main, 'swe', y_w_1, y_w_2)\n R12_liquid_winter = winter_data(R12_liquid_winter, df_main, 'R12_liquid', y_w_1, y_w_2)\n R12_solid_winter = winter_data(R12_solid_winter, df_main, 'R12_solid', y_w_1, y_w_2)\n Discharge_winter = winter_data(Discharge_winter, df_main, 'Discharge', y_w_1, y_w_2) \n except:\n print ('No data')\n\ndf_main_winter = pd.concat([ps_winter, t2m_winter, tmin2m_winter, tmax2m_winter, t_g_winter,\n td2m_winter, ff10meam_winter, ff10max_winter, hsnow_snowe_winter,\n rho_winter, swe_winter, R12_liquid_winter, R12_solid_winter,\n Discharge_winter], axis = 1)\n\ndf_main1 = df_main_winter # Создаем специальный массив для сохранения индекса с датой для факторного анализа\n# Делаем переиндексацию и отбрасываем дату\ncount2 = [] \ncount_numbers_2 = 0\nfor jjj in range(len(df_main_winter)):\n count_numbers_2 += 1 \n count2.append(count_numbers_2)\n\nt2 = pd.Series(count2, index = df_main_winter.index) \ndf_main_winter = df_main_winter.set_index(t2) # Итоговый массив данных для зимних сезонов\n\n\n###### Река Сосна - г.п. Елец\n\"\"\"\n###### Этап 4. Машинное обучение и Факторный анализ\nprint ('4 сезона')\nannual_data_m = simple_machine_learning(df_main_year, iPath_stat_exit1)\nannual_data_f = factor_analysis(df_main_year, iPath_stat_exit1, df_main)\nprint ('Холодный сезон')\ncold_data_m = simple_machine_learning(df_main_winter, iPath_stat_exit2)\ncold_data_f = factor_analysis(df_main_winter, iPath_stat_exit2, df_main1)\n\"\"\"\n\n###### Река Битюг - г.п. Бобров\n\"\"\"\n###### Этап 4. Машинное обучение и Факторный анализ\nprint ('4 сезона')\nannual_data_m = simple_machine_learning(df_main_year, iPath_stat_exit3)\nannual_data_f = factor_analysis(df_main_year, iPath_stat_exit3, df_main)\nprint ('Холодный сезон')\ncold_data_m = simple_machine_learning(df_main_winter, iPath_stat_exit4)\ncold_data_f = factor_analysis(df_main_winter, iPath_stat_exit4, df_main1)\n\"\"\"\n\n\n###### Река Тихая Сосна - г.п. Алексеевка\n\"\"\"\n###### Этап 4. Машинное обучение и Факторный анализ\nprint ('4 сезона')\nannual_data_m = simple_machine_learning(df_main_year, iPath_stat_exit5)\nannual_data_f = factor_analysis(df_main_year, iPath_stat_exit5, df_main)\nprint ('Холодный сезон')\ncold_data_m = simple_machine_learning(df_main_winter, iPath_stat_exit6)\ncold_data_f = factor_analysis(df_main_winter, iPath_stat_exit6, df_main1)\n\"\"\"\n\n###### Река Медведица - г.п. Лысые Горы\n\n###### Этап 4. Машинное обучение и Факторный анализ\nprint ('4 сезона')\nannual_data_m = simple_machine_learning(df_main_year, iPath_stat_exit7)\nannual_data_f = factor_analysis(df_main_year, iPath_stat_exit7, df_main)\nprint ('Холодный сезон')\ncold_data_m = simple_machine_learning(df_main_winter, iPath_stat_exit8)\ncold_data_f = factor_analysis(df_main_winter, iPath_stat_exit8, df_main1)\n\n\n\n\n\n\n\n","sub_path":"PCA.py","file_name":"PCA.py","file_ext":"py","file_size_in_byte":33215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"267625799","text":"import torch\nimport numpy as np\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\n\nclass LovaszLoss(nn.Module):\n\n def __init__(self):\n super().__init__() \n return None\n\n def __gradient__(self, gt_sorted):\n p = len(gt_sorted)\n gts = gt_sorted.sum()\n intersection = gts - gt_sorted.float().cumsum(0)\n union = gts + (1 - gt_sorted).float().cumsum(0)\n jaccard = 1. - intersection / union\n if p > 1:\n jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]\n return jaccard\n \n def __eval__(self, logits, labels):\n signs = 2. * labels.float() - 1.\n errors = (1. - logits * Variable(signs))\n errors_sorted, perm = torch.sort(errors, dim=0, descending=True)\n perm = perm.data\n gt_sorted = labels[perm]\n grad = self.__gradient__(gt_sorted)\n loss = torch.dot(F.relu(errors_sorted), Variable(grad))\n return loss\n \n def mean(self, l, empty=0):\n l = iter(l)\n try:\n n = 1\n acc = next(l)\n except StopIteration:\n if empty == 'raise':\n raise ValueError('Empty mean')\n return empty\n for n, v in enumerate(l, 2):\n acc += v\n if n == 1:\n return acc\n return acc / n\n\n def forward(self, logits, labels):\n loss = []\n for logit, label in zip(logits, labels):\n logit = logit.unsqueeze(0).view(-1)\n label = label.unsqueeze(0).view(-1)\n loss.append(self.__eval__(logit, label))\n return self.mean(loss)\n\n\n","sub_path":"competitions/salt/metrics/lovasz.py","file_name":"lovasz.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"470102763","text":"# gluoncv에 있는 코드 참고\n\n\"\"\"Decoder functions.\nDecoders are used during testing/validation, which convert predictions back to\nnormal boxes, etc.\n\"\"\"\nimport mxnet as mx\nfrom mxnet.gluon import HybridBlock\n\n\nclass BoxDecoder(HybridBlock):\n\n def __init__(self, stds=(0.1, 0.1, 0.2, 0.2), means=(0., 0., 0., 0.)):\n super(BoxDecoder, self).__init__()\n self._stds = stds\n self._means = means\n\n def hybrid_forward(self, F, box_preds, anchors):\n anchor_x, anchor_y, anchor_width, anchor_height = anchors.split(axis=-1, num_outputs=4)\n norm_x, norm_y, norm_width, norm_height = F.split(box_preds, axis=-1, num_outputs=4)\n\n pre_box_x = F.broadcast_add(F.broadcast_mul(norm_x * self._stds[0] + self._means[0], anchor_width), anchor_x)\n pre_box_y = F.broadcast_add(F.broadcast_mul(norm_y * self._stds[1] + self._means[1], anchor_height), anchor_y)\n pre_box_w = F.broadcast_mul(F.exp(norm_width * self._stds[2] + self._means[2]), anchor_width)\n pre_box_h = F.broadcast_mul(F.exp(norm_height * self._stds[3] + self._means[3]), anchor_height)\n\n # center to corner\n half_w = pre_box_w / 2\n half_h = pre_box_h / 2\n xmin = pre_box_x - half_w\n ymin = pre_box_y - half_h\n xmax = pre_box_x + half_w\n ymax = pre_box_y + half_h\n return F.concat(xmin, ymin, xmax, ymax, dim=-1)\n\n\n# multiclass decoder\nclass ClassMDecoder(HybridBlock):\n\n def __init__(self, num_classes=None, thresh=0.01, from_sigmoid=False):\n super(ClassMDecoder, self).__init__()\n self._num_classes = num_classes\n self._thresh = thresh\n self._from_sigmoid = from_sigmoid\n\n def hybrid_forward(self, F, cls_preds):\n if not self._from_sigmoid:\n cls_preds = F.sigmoid(cls_preds, axis=-1)\n class_ids = F.argmax(cls_preds, axis=-1, keepdims=True)\n cls_preds = F.pick(cls_preds, class_ids, axis=-1, keepdims=True)\n\n # ex) thresh=0.01 이상인것만 뽑기\n mask = cls_preds > self._thresh\n class_ids = F.where(mask, class_ids, F.ones_like(class_ids) * -1)\n scores = F.where(mask, cls_preds, F.zeros_like(cls_preds))\n return class_ids, scores\n\n\n# multiclass per decoder\nclass ClassMPDecoder(HybridBlock):\n\n def __init__(self, num_classes=None, thresh=0.05, from_sigmoid=False):\n super(ClassMPDecoder, self).__init__()\n self._num_classes = num_classes\n self._thresh = thresh\n self._from_sigmoid = from_sigmoid\n\n def hybrid_forward(self, F, cls_preds):\n\n if not self._from_sigmoid:\n cls_preds = F.sigmoid(cls_preds)\n # batch x all feature number x foreground class(N) -> batch x all feature number x 1 - 클래스별로 쪼개기\n template = F.zeros_like(cls_preds.slice_axis(axis=-1, begin=0, end=1)) # batch x all feature number x 1\n class_ids = []\n # batch x all feature number x 1 당 번호 0부터 부여하기\n for i in range(self._num_classes):\n class_ids.append(template + i) # batch x all feature number x 1\n\n # batch x all feature number x foreground class 형태로 만들기\n class_ids = F.concat(*class_ids, dim=-1)\n\n # ex) thresh=0.05 이상인것만 뽑기\n mask = cls_preds > self._thresh\n class_ids = F.where(mask, class_ids, F.ones_like(class_ids) * -1)\n scores = F.where(mask, cls_preds, F.zeros_like(cls_preds))\n return class_ids, scores\n\n\n''' \n RetinaNet 논문을 읽고 구현해 봄\n 모든 박스를 decoding 할 필요는 없다. \n'''\n\n\nclass BoxDecodeLimit(HybridBlock):\n '''\n Parameters\n ----------\n decode_number : int / -1 : all\n '''\n\n def __init__(self, decode_number=1000):\n super(BoxDecodeLimit, self).__init__()\n self._decode_number = decode_number\n\n def hybrid_forward(self, F, box_preds, anchors, class_ids, class_scores):\n\n if self._decode_number > 0:\n cls_scores_argmax = F.argmax(class_scores, axis=-1) # (batch, all feature number)\n cls_scores_argsort = F.argsort(cls_scores_argmax, axis=1, is_ascend=False)\n cls_scores_argsort = F.slice_axis(cls_scores_argsort, axis=-1, begin=0,\n end=self._decode_number) # (batch, self._decode_number)\n class_ids = F.take(class_ids, cls_scores_argsort, axis=1)[0]\n class_scores = F.take(class_scores, cls_scores_argsort, axis=1)[0]\n box_preds = F.take(box_preds, cls_scores_argsort, axis=1)[0]\n anchors = F.take(anchors, cls_scores_argsort, axis=1)[0]\n return class_ids, class_scores, box_preds, anchors\n else:\n return class_ids, class_scores, box_preds, anchors\n\n\n# test\nif __name__ == \"__main__\":\n from core import RetinaNet, RetinaTrainTransform, DetectionDataset\n import os\n\n input_size = (512, 512)\n root = os.path.dirname(\n os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))\n transform = RetinaTrainTransform(input_size[0], input_size[1], make_target=False)\n dataset = DetectionDataset(path=os.path.join(root, 'Dataset', 'train'), transform=transform)\n num_classes = dataset.num_class\n\n image, label, _, _, _ = dataset[0]\n label = mx.nd.array(label)\n\n net = RetinaNet(version=18,\n input_size=input_size,\n anchor_sizes=[32, 64, 128, 256, 512],\n anchor_size_ratios=[1, pow(2, 1 / 3), pow(2, 2 / 3)],\n anchor_aspect_ratios=[0.5, 1, 2],\n num_classes=num_classes, # foreground만\n pretrained=False,\n pretrained_path=os.path.join(root, \"modelparam\"),\n anchor_box_offset=(0.5, 0.5),\n anchor_box_clip=True,\n ctx=mx.cpu())\n\n net.hybridize(active=True, static_alloc=True, static_shape=True)\n\n # batch 형태로 만들기\n image = image.expand_dims(axis=0)\n cls_preds, box_preds, anchors = net(image)\n\n boxdecoder = BoxDecoder(stds=(0.1, 0.1, 0.2, 0.2), means=(0., 0., 0., 0.))\n # classdecoder = ClassMDecoder(num_classes=num_classes, thresh=0.01, from_sigmoid=False)\n classdecoder = ClassMPDecoder(num_classes=num_classes, thresh=0.05, from_sigmoid=False)\n box_predictions = boxdecoder(box_preds, anchors)\n class_ids, class_scores = classdecoder(cls_preds)\n\n print(f\"class id shape : {class_ids.shape}\")\n print(f\"class scores shape : {class_scores.shape}\")\n print(f\"box predictions shape : {box_predictions.shape}\")\n '''\n class id shape : (1, 49104, 5)\n class scores shape : (1, 49104, 5)\n box predictions shape : (1, 49104, 4)\n '''\n","sub_path":"RETINA/core/utils/dataprocessing/predictFunction/decoder.py","file_name":"decoder.py","file_ext":"py","file_size_in_byte":6749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"231194712","text":"#!/usr/bin/env python\n\nimport CGIHTTPServer\nimport BaseHTTPServer\n\n# - - - for local testing - - -\n\nif __name__ == \"__main__\":\n server = BaseHTTPServer.HTTPServer\n handler = CGIHTTPServer.CGIHTTPRequestHandler\n server_address = (\"\", 8000)\n handler.cgi_directories = [\"/\"]\n httpd = server(server_address, handler)\n httpd.serve_forever()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"628789789","text":"#Funckije v pomoč:\n\ndef unikati(s):\n prazen_seznam = []\n\n for i in s: # for i in s pomeni, da se i premika po vrednostih seznama s, NE PO INDEKSIH!\n if i not in prazen_seznam:\n prazen_seznam.append(i)\n\n return prazen_seznam\n\ndef izloci_besedo(beseda):\n for i in beseda:\n if not beseda[0].isalnum():\n beseda = beseda[1:]\n else:\n break\n\n od_zadaj = len(beseda) - 1 #od_zadaj je stevec iz desne proti levi ki se premika po besedi, -1 ker se indeksiranje zacne z 0\n # in konca z eno manj kot je dejansko dolg niz\n while od_zadaj >= 0:\n if not beseda[od_zadaj].isalnum():\n beseda = beseda[:od_zadaj]\n else:\n break\n od_zadaj-=1\n\n return beseda\n\ndef se_zacne_z(tvit, c):\n prazen_seznam = []\n rezultat = []\n prazen_seznam = tvit.split(\" \")\n for s in prazen_seznam:\n if s[0] == c:\n rezultat.append(izloci_besedo(s))\n\n return rezultat\n\ndef zberi_se_zacne_z(tviti, c):\n rez = []\n for i in tviti:\n rez.extend(se_zacne_z(i,c)) # extend zdruzi dva seznama, ne uporabljaj \"append\"!!!\n\n return unikati(rez)\n\ndef vse_afne(tviti):\n return zberi_se_zacne_z(tviti, \"@\")\n\ndef vsi_hashtagi(tviti):\n return zberi_se_zacne_z(tviti, \"#\")\n\n############################\n\ndef get_tviti(tviti):\n seznam_tvitov = []\n for tvit in tviti:\n seznam_tvitov.append(tvit)\n\n return seznam_tvitov\n\n############################\n\ndef besedilo(tvit):\n razlom = tvit.split(\": \", 1)\n return razlom[1]\n\ndef avtor(tvit):\n razlom = tvit.split(\": \", 1)\n return razlom[0]\n\ndef zadnji_tvit(tviti):\n prazen_slovar = collections.defaultdict(str)\n\n for tvit in tviti:\n besedilo_tvita = besedilo(tvit)\n avtor_tvita = avtor(tvit)\n prazen_slovar[avtor_tvita] = besedilo_tvita\n\n return prazen_slovar\n\ndef prvi_tvit(tviti):\n prazen_slovar = collections.defaultdict(str)\n\n for tvit in tviti:\n besedilo_tvita = besedilo(tvit)\n avtor_tvita = avtor(tvit)\n\n if prazen_slovar[avtor_tvita] == \"\":\n prazen_slovar[avtor_tvita] = besedilo_tvita\n\n return prazen_slovar\n\ndef prestej_tvite(tviti):\n prazen_slovar = collections.defaultdict(int)\n\n for tvit in tviti:\n besedilo_tvita = besedilo(tvit)\n avtor_tvita = avtor(tvit)\n prazen_slovar[avtor_tvita] += 1\n\n return prazen_slovar\n\ndef omembe(tviti):\n prazen_slovar = collections.defaultdict(list)\n\n for tvit in tviti:\n avtor_tvita = avtor(tvit)\n prazen_slovar[avtor_tvita].extend(se_zacne_z(tvit, \"@\"))\n\n return prazen_slovar\n\ndef neomembe(ime, omembe):\n prazen_slovar = []\n for oseba, vrednost in omembe.items():\n if oseba not in omembe[ime] and oseba != ime:\n prazen_slovar.append(oseba)\n\n return prazen_slovar\n\ndef se_poznata(ime1, ime2, omembe):\n for oseba, vrednost in omembe.items():\n if oseba == ime1 and ime2 in vrednost:\n return True\n elif oseba == ime2 and ime1 in vrednost:\n return True\n\n return False\n\ndef hashtagi(tviti):\n prazen_slovar = collections.defaultdict(list)\n seznam_hashtagov = vsi_hashtagi(tviti)\n for hash in seznam_hashtagov:\n for tvit in tviti:\n for split in se_zacne_z(tvit, \"#\"):\n if split == hash:\n prazen_slovar[hash].append(avtor(tvit))\n\n for kljuc, vrednost in prazen_slovar.items():\n prazen_slovar[kljuc] = sorted(vrednost)\n\n return prazen_slovar\n\n\n","sub_path":"code/batch-1/vse-naloge-brez-testov/DN6-M-84.py","file_name":"DN6-M-84.py","file_ext":"py","file_size_in_byte":3620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"510063930","text":"import sqlalchemy\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, Date, ForeignKey\nfrom sqlalchemy.orm import sessionmaker, relationship\n\nengine = create_engine(\"mysql+pymysql://brucy:brucy_getGoodJob@localhost/db_playground\", encoding='utf-8') # echo=True will print out detailed process\n\nBase = declarative_base() # generate orm base class\n\nclass Students(Base):\n __tablename__ = 'student'\n id = Column(Integer, primary_key = True)\n name = Column(String(32), nullable = False)\n register_date = Column(Date, nullable = False)\n\n def __repr__(self):\n return \"<%s name: %s>\" % (self.id, self.name)\n\n\nclass StudyRecord(Base):\n __tablename__ = 'study_record'\n id = Column(Integer, primary_key = True)\n day = Column(Integer, nullable = False)\n status = Column(String(32))\n stu_id = Column(Integer, ForeignKey('student.id'))\n\n student = relationship(\"Students\", backref = \"my_classes\")\n def __repr__(self):\n return \"<%s Day: %s Status: %s>\" % (self.student.name, self.day, self.status)\n\nBase.metadata.create_all(engine)\n\n###################### Insert Data #####################\nSession_class = sessionmaker(bind=engine) # create database dialog: session class, the return type of sessionmaker is class not instance\n\nSession = Session_class() # generate session instance, similar as cursor in pymysql\n\n# s1 = Students(name = \"Brucy\", register_date = '2017-05-26')\n# s2 = Students(name = \"Bella\", register_date = '2017-04-26')\n# s3 = Students(name = \"Bacon\", register_date = '2017-12-26')\n# s4 = Students(name = \"Cheese\", register_date = '2017-02-26')\n\n# study_obj1 = StudyRecord(day = 1, status = 'Yes', stu_id = 1)\n# study_obj2 = StudyRecord(day = 2, status = 'No', stu_id = 1)\n# study_obj3 = StudyRecord(day = 3, status = 'Yes', stu_id = 1)\n# Session.add_all([s1, s2, s3, s4, study_obj1, study_obj2, study_obj3])\n\nstu_obj = Session.query(Students).filter(Students.name == 'Brucy').first()\nprint(stu_obj.my_classes)\n\n# Session.commit()","sub_path":"PY_DB/sqlAlchemy_foreignKey.py","file_name":"sqlAlchemy_foreignKey.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"476900029","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport random\r\nfrom linebot.models import *\r\n\r\ndef spotify_random():\r\n\turl = 'https://spotifycharts.com/regional/'\r\n\twebContent = requests.get(url)\r\n\twebContent.encoding = 'UTF-8'\r\n\r\n\tsoup = BeautifulSoup(webContent.text, 'html.parser')\r\n\tresult = []\r\n\r\n\tsongList = soup.select('table.chart-table tbody tr')\r\n\trandom.shuffle(songList)\r\n\tfor song in songList[:10]:\r\n\t\tsongLink = song.select('td a')[0]['href']\r\n\t\twebContent = requests.get(songLink)\r\n\r\n\t\ttemp = song.select('td')[3]\r\n\t\tsongName = temp.select('strong')[0].text\r\n\t\tartist = temp.select('span')[0].text[3:]\r\n\t\talbumArtLink = song.select('td')[0].select('img')[0]['src'].replace('ab67616d00004851','ab67616d00001e02')\r\n\t\tresult.append(CarouselColumn(\r\n thumbnail_image_url=albumArtLink,\r\n title=artist,\r\n text=songName,\r\n actions=[\r\n \tURIAction(\r\n\t\t\t\t\t\tlabel='Open on Spotify',\r\n\t\t\t\t\t\turi=songLink\r\n\t\t\t\t\t),\r\n\t\t\t\t\tMessageAction(\r\n\t\t\t\t\t\tlabel='顯示歌手與歌名',\r\n\t\t\t\t\t\ttext='{} by {}'.format(songName,artist)\r\n\t\t\t\t\t)\r\n ]\r\n \t)\r\n )\r\n\r\n\treturn result\r\n\r\nif __name__ == '__main__':\r\n\tprint(spotify_random())","sub_path":"function/spotify_top_200.py","file_name":"spotify_top_200.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"406646011","text":"from keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.layers import Embedding\nfrom keras.layers import LSTM\nfrom keras.preprocessing import sequence\nfrom keras.models import load_model\nimport numpy as np\nimport gensim\nimport pickle\n\nmodel = gensim.models.Word2Vec.load(\"model\")\nlstm_model = load_model('lstm.h5')\n\nresearcher_sentences = []\nsupporter_sentences = []\npurchaser_sentences = []\n\nsentences = []\n\nf = open('Test.txt', 'r')\nlines = f.readlines()\nlines_count = len(lines)\nright_count = 0\nwrong_count = 0\nX = []\nY = []\nY_test = []\nfor line in lines:\n line = line[:-1]\n sentence = line.split(\"\\t\")\n\n if sentence[-1] == \"0\":\n researcher_sentences.append(sentence)\n elif sentence[-1] == \"1\":\n supporter_sentences.append(sentence)\n elif sentence[-1] == \"2\":\n purchaser_sentences.append(sentence)\n\n sentences.append(sentence)\n\n vectors = []\n urls = sentence[:-1]\n url_count = len(urls)\n # print(\"url_count:%s\" % url_count)\n if url_count == 0:\n continue\n for url in urls:\n vectors.append(model[url])\n X.append(vectors)\n\n label = -1\n if sentence[-1] == \"0\":\n label = 0\n elif sentence[-1] == \"1\":\n label = 1\n elif sentence[-1] == \"2\":\n label = 2\n else:\n print(\"wrong\")\n Y.append(label)\n\n label1 = [0,0,0]\n if sentence[-1] == \"0\":\n label1 = [1,0,0]\n elif sentence[-1] == \"1\":\n label1 = [0,1,0]\n elif sentence[-1] == \"2\":\n label1 = [0,0,1]\n else:\n print(\"wrong\")\n Y_test.append(label1)\n # print(\"sum:%s\" % vector)\n # print(\"divide:%s\" % vector)\n # prediction = clf.predict(vector)\n # # print(prediction[0])\n # # print(sentence[-1])\n # if prediction[0] == sentence[-1]:\n # right_count += 1\n\nY_test.append(Y_test[0])\nY_test.append(Y_test[0])\nY_test.append(Y_test[0])\nY_test.append(Y_test[0])\nX.append(X[0])\nX.append(X[0])\nX.append(X[0])\nX.append(X[0])\nX_test = sequence.pad_sequences(X, maxlen=552)\nPx = lstm_model.predict(X_test)\nPx = Px[:-4]\n\nfor i, p in enumerate(Px):\n p_index = np.argmax(p)\n label = Y[i]\n if p_index == label:\n right_count += 1\n else:\n wrong_count += 1\n print(\"wrong:%s,p:%s,l:%s\" % (wrong_count,p_index,label))\n\nrecall_purchaser_count = 0\nprecision_purchaser_count = 0\nright_precision_purchaser_count = 0\nrecall_supporter_count = 0\nprecision_supporter_count = 0\nright_precision_supporter_count = 0\nrecall_researcher_count = 0\nprecision_researcher_count = 0\nright_precision_researcher_count = 0\nfor i, y in enumerate(Y[:-4]):\n # print(prediction[0])\n # print(sentence[-1])\n\n p_index = np.argmax(Px[i])\n if p_index == 0:\n precision_researcher_count += 1\n if y == 0:\n right_precision_researcher_count += 1\n\n if p_index == 1:\n precision_supporter_count += 1\n if y == 1:\n right_precision_supporter_count += 1\n\n if p_index == 2:\n precision_purchaser_count += 1\n if y == 2:\n right_precision_purchaser_count += 1\n \nscore = lstm_model.evaluate(X_test, Y_test, batch_size=32)\n\nprint(\"score:%s\" % score)\n\nprint(\"general:%s/%s\" % (right_count,len(Px)))\n\nprint(\"purchaser######\")\nprint(\"recall:%s/%s\" % (right_precision_purchaser_count,len(purchaser_sentences)))\nprint(\"precision:%s/%s\" % (right_precision_purchaser_count,precision_purchaser_count))\n\nprint(\"supporter######\")\nprint(\"recall:%s/%s\" % (right_precision_supporter_count,len(supporter_sentences)))\nprint(\"precision:%s/%s\" % (right_precision_supporter_count,precision_supporter_count))\n\nprint(\"researcher######\")\nprint(\"recall:%s/%s\" % (right_precision_researcher_count,len(researcher_sentences)))\nprint(\"precision:%s/%s\" % (right_precision_researcher_count,precision_researcher_count))\n\n\n# general:891/1256=73.7261146%\n# purchaser######\n# recall:73/228=31.578947368%\n# precision:73/179=51.428571429%\n# supporter######\n# recall:577/657=89.193302892%\n# precision:577/659=88.922610015%\n# researcher######\n# recall:241/371=74.393530997%\n# precision:241/406=62.02247191%","sub_path":"4.model-testing/lstmtest.py","file_name":"lstmtest.py","file_ext":"py","file_size_in_byte":4084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"101290065","text":"import pandas as pd\nimport re\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom bs4 import BeautifulSoup\nimport numpy as np\nfrom datetime import datetime\n\n\ndef conv_perc(x):\n xv = x\n if \"%\" in str(xv):\n xv = int(re.sub(\"[^\\d+]+\", \"\", xv))/100\n val = xv\n elif str(xv) == \"?\":\n val = np.nan\n else:\n val = x\n return val\n\n\ndef date_col(x, i):\n try:\n datestring = re.findall(\"(\\d+)\", str(x))\n mlist = re.findall(\"([a-zA-z]+)\", x)\n if len(mlist) > 1:\n if i == 1:\n month = mlist[0]\n if i == 2:\n month = mlist[-1]\n if len(mlist) == 1:\n month = mlist[0]\n if i == 1:\n date = datestring[0]\n if i == 2 and len(datestring) == 1:\n date = datestring[0]\n if i == 2 and len(datestring) > 1:\n date = datestring[1]\n ds = str(date) + \"_\" + month + \"_2021\"\n ds_f = datetime.strptime(ds, \"%d_%b_%Y\")\n return(f\"{ds_f.day}/{ds_f.month}/{ds_f.year}\")\n except:\n return np.nan\n\n\ndef cycle_table(table):\n #Initiate table\n constructeddf = pd.read_html(table.get_attribute(\"outerHTML\"), header=0, skiprows=[1])[0]\n\n #Create Columns\n constructeddf[\"Crosstab\"] = np.nan\n constructeddf[\"url\"] = np.nan\n\n #Compile Regex\n ftype = re.compile(\"((\\.pdf)|(\\.xlsx))$\", re.MULTILINE)\n perc_re = re.compile(\"(?<=%)(.*?)(?=%)\", re.DOTALL)\n split = re.compile(\"(.+(?=on))|(\\d+)\")\n\n #Find Rows\n rows = table.find_elements_by_xpath(\".//tr\")\n\n #Other\n rowindex = 0\n\n #Cycler\n for row in rows:\n cells = row.find_elements_by_xpath(\".//td\")\n if len(cells) == 0:\n continue\n i = 1\n for cell in cells:\n if (i == 1):\n inner_text = cell.get_attribute(\"innerHTML\")\n soup = BeautifulSoup(inner_text, features=\"lxml\")\n if soup.get_text() == \"\":\n break\n url = cell.find_element_by_xpath(\".//a\").get_attribute(\"href\")\n constructeddf.loc[rowindex, [\"url\"]] = str(url)\n crosstab = re.search(ftype, url)\n if crosstab != None:\n constructeddf.loc[rowindex, [\"Crosstab\"]] = \"Crosstab\"\n\n if i == 11: #Expand table\n text = cell.get_attribute(\"innerHTML\")\n soup = BeautifulSoup(text, features=\"lxml\")\n finds = re.findall(perc_re, soup.get_text())\n if len(finds) > 0:\n for find in finds:\n vals = re.findall(split, find)\n party = vals[0][0].strip()\n perc = vals[1][1].strip()\n if party in constructeddf.columns:\n constructeddf.loc[rowindex, [party]] = int(perc)/100\n else:\n constructeddf[party] = np.nan\n constructeddf.loc[rowindex, [party]] = int(perc)/100\n i += 1\n rowindex += 1\n return constructeddf\n\n\nif __name__ == \"__main__\":\n\n print(\"Scraping polls...\")\n #Set Chrome Options\n opts = Options()\n opts.add_argument(\"--disable-extensions\")\n opts.add_argument(\"--disable-gpu\")\n opts.add_argument(\"--headless\")\n\n #Initiate Chrome\n driver = webdriver.Chrome(\"/mnt/sda1/chromedriver\", options=opts)\n print(\"Chrome initiated, scraping table\")\n # Once Chrome is started, ensure closure on error\n try:\n # Get table\n driver.get(\"https://en.wikipedia.org/wiki/Opinion_polling_for_the_next_United_Kingdom_general_election\")\n table = driver.find_element_by_xpath(\"//h3[contains(.,'2021')]/..//table//th[contains(.,'Pollster')]/../../..\")\n\n #Cycle\n constructeddf = cycle_table(table)\n driver.quit()\n\n except Exception as e:\n print(e)\n driver.quit()\n\n print(\"Table retrieved\")\n #Final Wrangles\n\n #Get rid of non-rows\n constructeddf = constructeddf[constructeddf['url'].notna()]\n\n #Convert dateranges\n constructeddf[\"StartDate\"] = constructeddf.apply(lambda x: date_col(x[\"Datesconducted\"], 1), axis=1)\n constructeddf[\"EndDate\"] = constructeddf.apply(lambda x: date_col(x[\"Datesconducted\"], 2), axis=1)\n\n #Convert percs to ints\n constructeddf = constructeddf.applymap(conv_perc)\n\n #Cosmetic stuff\n constructeddf = constructeddf.drop([\"Datesconducted\"], axis=1)\n constructeddf.rename(columns={'Others': 'Minor'}, inplace=True)\n constructeddf = constructeddf[['StartDate', 'EndDate'] + [c for c in constructeddf if c not in ['StartDate', 'EndDate']]]\n\n\n #Push polls\n constructeddf.to_csv(\"polls.csv\", index=False)\n print(\"Polls Built\")","sub_path":"Poll_tracker_model/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":4798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"628147198","text":"# Sertis Face Regconigtion Croping Coding Date 10/11/2020 By Panupong Suksuwan\n\"\"\"\nนำเข้า Lib ที่ใช้งาน \n- Numpy ไว้จัดการ Array จากการภาพเช่น Copy Image Array \n- cv2(OpenCV) Pre-trained Model สำหรับตวรจจับใบน้าโดยจะใช้ส่วนของ haarcascade_frontalcatface.xml \n- mathplotlib สำหรับการ plot กรอบใบหน้าบนภาพ\n- อาจจะมีเพิ่มเติมเช่ม os Path File หรือ Flask\n\"\"\"\nfrom flask import Flask, request\nfrom flask_cors import CORS,cross_origin\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalcatface.xml')\n\n# นำเข้ารูปสำหรับการทำ Face Regconigtion Croping\nimage = cv2.imread('download.jpg')\n\n# สำรองภาพสีไว้สำหรับผลลัพธ์\nraw = np.copy(image)\n\n# แปลงภาพให้เป็นโทนสี RGB\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n# สำรองภาพที่เป็นโทนสี RGB\nimage_copy = np.copy(image)\n\n# แปลงภาพให้เป็นโทนสี RGB เป็น Grayscale\ngray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n\n# ใช้ Pre-trained Model ในการ Dectect หน้า\nfaces = face_cascade.detectMultiScale(raw, 1.25, 6)\n\n# แสดงค่าจำนวนหน้าที่ Pre-trained Model พบ บนหน้า Console\nprint('Number of faces detected:', len(faces))\n\n# สร้างกรอบบนหน้าที่ Dectect เจอ\nface_crop = []\nfor f in faces:\n x, y, w, h = [ v for v in f ]\n cv2.rectangle(image_copy, (x,y), (x+w, y+h), (255,0,0), 3)\n\n # Crop ตามกรอบที่ Plot\n face_crop.append(raw[y:y+h, x:x+w])\n\nfor face in face_crop:\n cv2.imshow('face',face)\n cv2.waitKey(0)\n\n# Display the face crops With Gui\nfig = plt.figure(figsize = (9,9))\naxl = fig.add_subplot(111)\naxl.set_xticks([])\naxl.set_yticks([])\naxl.set_title(\"Largest Face Immage Cropped\")\n\n# จุด Return File\nfilename = 'Result.jpg'\ncv2.imwrite(filename, face)\n","sub_path":"Largest_Face_Crop.py","file_name":"Largest_Face_Crop.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"643362079","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport sys\nimport geograpy\nimport collections\nfrom geograpy import extraction\nfrom geotext import GeoText\nimport re\nfrom time import strptime\nfrom llr import utils as llrutils \n\n#Added from https://gist.github.com/onyxfish/322906#gistcomment-1701799\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\ndef flatten(l):\n for el in l:\n if isinstance(el, collections.Iterable) and not isinstance(el, basestring):\n for sub in flatten(el):\n yield sub\n else:\n yield el\n \ndef getPlaceET_fromText_NLTK(text):\n result = list()\n if not text:\n return filter(None, result) \n\n # You can now access all of the places found by the Extractor\n places = geograpy.get_place_context(text=text) \n for place in (places.countries + places.other): \n c = llrutils.getISO3166_1code(place)\n if not c:\n c = llrutils.getUNM49code(place)\n result.append(c)\n return filter(None, flatten(result))\n \ndef getPlaceET_fromText_GeoText(text):\n result = list()\n if not text:\n return filter(None, result) \n\n places = GeoText(text)\n \n for place in (places.countries):\n c = llrutils.getISO3166_1code(place)\n if not c:\n c = llrutils.getUNM49code(place)\n result.append(c)\n return filter(None, flatten(result))\n\ndef getLPOAC(label):\n return {\n \"Economic Growth\" : None,\n \"Governance, Conflict, and Humanitarian Assistance\" : None,\n \"Climate Change and Natural Resource Management\" : \"Land Use, Management & Investment\",\n \"Food Security\" : None,\n \"Gender Equality and Women's Empowerment\" : \"Access to Land & Tenure Security\",\n \"Responsible Land Based Investment\" : \"Land Use, Management & Investment\",\n \"Customary and Community Tenure\" : \"Access to Land & Tenure Security\",\n \"Marine Tenure and Coastal Resource Management\" : \"Land Use, Management & Investment\",\n \"Sustainable Urbanization\" : None\n }.get(label, None)\n\ndef getLPTheme(label):\n return {\n \"Economic Growth\" : \"Socio-Economic & Institutional Context\",\n \"Governance, Conflict, and Humanitarian Assistance\" : \"Land Conflicts\",\n \"Climate Change and Natural Resource Management\" : \"Land, Climate change & Environment\",\n \"Food Security\" : \"Land & Food Security\",\n \"Gender Equality and Women's Empowerment\" : \"Land & Gender\",\n \"Responsible Land Based Investment\" : \"Land & Investments\",\n \"Customary and Community Tenure\" : \"Indigenous & Community Land Rights\",\n \"Marine Tenure and Coastal Resource Management\" : None,\n \"Sustainable Urbanization\" : \"Urban Tenure\"\n }.get(label, None)\n\ndef getLPConcepts(label):\n return {\n \"Economic Growth\" : [\"development\", \"sustainable development\"],\n \"Governance, Conflict, and Humanitarian Assistance\" : [\"land conflicts\", \"land governance\"],\n \"Climate Change and Natural Resource Management\" : [\"climate change\", \"environment\", \"natural resources management\", \"sustainable land management\"],\n \"Food Security\" : [\"food security\"], \n \"Gender Equality and Women's Empowerment\" : [\"gender equity in access to land\", \"women\"], \n \"Responsible Land Based Investment\" : [\"land investments\"],\n \"Customary and Community Tenure\" : [\"customary tenure\", \"customary land rights\", \"local community\", \"community land rights\"],\n \"Marine Tenure and Coastal Resource Management\" : [\"coastal area\", \"land management\", \"sustainable land management\", \"land tenure systems\"],\n \"Sustainable Urbanization\" : [\"urban areas\", \"land development (urbanization)\", \"urbanization\", \"sustainable development\"]\n }.get(label, None)\n\ndef getPublisher(label):\n label_lower= label.lower()\n if \"flacso\" in label_lower and (\"quito\" in label_lower or \"ecuador\" in label_lower):\n label = \"Facultad Latinoamericana de Ciencias Sociales Ecuador\"\n\n if \"flacso\" in label_lower and (\"chile\" in label_lower):\n label = \"Facultad Latinoamericana de Ciencias Sociales Chile\"\n \n if \"flacso\" in label_lower and (\"argentina\" in label_lower):\n label = \"Facultad Latinoamericana de Ciencias Sociales Argentina\"\n \n if \"flacso\" in label_lower and (u\"méxico\" in label_lower):\n label = u\"Facultad Latinoamericana de Ciencias Sociales México\"\n\n if \"Colegio de Postgraduados\" in label:\n label = \"Colegio de Postgraduados\"\n\n if (\"Universidad de Buenos Aires\" in label) or (u\"Facultad de Ciencias Económicas, UBA\" in label):\n label = \"Universidad de Buenos Aires\"\n \n if \"Universidad Austral de Chile\" in label:\n label = \"Universidad Austral de Chile\"\n \n if u\"Universidad Andina Simón Bolívar\" in label:\n label = u\"Universidad Andina Simón Bolívar\"\n \n if u\"Pontificia Universidad Católica de Chile\" in label:\n label = u\"Pontificia Universidad Católica de Chile\"\n \n if (u\"UASLP\" in label) or (u\"Universidad Autónoma de San Luis Potosí\" in label):\n label = u\"Universidad Autónoma de San Luis Potosí\"\n \n if u\"Universidad Católica del Norte\" in label:\n label = u\"Universidad Católica del Norte\"\n \n if u\"Universidad del Pacífico\" in label:\n label = u\"Universidad del Pacífico\"\n \n if u\"Universidad Nacional de Cuyo\" in label:\n label = u\"Universidad Nacional de Cuyo\"\n \n if u\"Universidad del Rosario\" in label:\n label = u\"Universidad del Rosario\"\n \n if u\"CAAP\" in label:\n label = u\"Centro Andino de Acción Popular\"\n\n if u\"Universidad Nacional de Quilmes\" in label:\n label = u\"Universidad Nacional de Quilmes\"\n \n if u\"EAFIT\" in label:\n label = u\"Universidad EAFIT\"\n \n if u\"Universidad del Rosario\" in label:\n label = u\"Universidad del Rosario\"\n \n if (u\"Universidad Nacional de la Amazonia Peruana\" in label) or (u\"Universidad Nacional de la Amazonía Peruana\" in label) or (u\"Universidad de la Amazonía Peruana\" in label):\n label = u\"Universidad Nacional de la Amazonía Peruana\"\n \n if u\"Universidad de Lima\" in label:\n label = u\"Universidad de Lima\"\n \n if u\"UAEMEX\" in label:\n label = u\"Universidad Autónoma del Estado de México\"\n\n if (u\"Pontificia Universidad Javeriana\" in label) or (u\"Facultad de Estudios Ambientales y Rurales\" in label):\n label = u\"Pontificia Universidad Javeriana\"\n \n if u\"Universidad Nacional (Costa Rica)\" in label:\n label = u\"Universidad Nacional de Costa Rica\"\n\n if u\"Universidad Sergio Arboleda\" in label:\n label = u\"Universidad Sergio Arboleda\"\n \n\n#CLEAN \n if u\"Maestría en\" in label:\n label = None\n elif \"Facultad\" in label and (u\"Facultad Latinoamericana de Ciencias Sociales\" not in label):\n label = None\n elif \"Escuela de\" in label:\n label = None\n \n return {\n \"University of Costa Rica\" : \"Universidad de Costa Rica\",\n \"Costa Rica University\" : \"Universidad de Costa Rica\",\n \"Agenda Ambiental\": None,\n u\"Perú\": None,\n \"Humanidades y Ciencias Sociales\": None\n\n }.get(label, label)\n\n\n\n# Target: 2011-12-13 (YYYY-MM-DD) (Drupal)\n\ndef clean_date (date):\n \n dateArray = date.strip().lower().split(\"-\")\n \n if len(dateArray) == 1: #YYYY\n pattern_YYYY = re.compile(\"^[0-9]{4}$\") #YYYY\n if re.search(pattern_YYYY, date):\n date = date+\"-12-31\"\n\n if len(dateArray) == 2:\n year = dateArray[0]\n month = dateArray[1]\n if month in [\"01\",\"03\",\"05\", \"07\", \"08\", \"10\", \"12\"]:\n date = year+\"-\"+month+\"-31\"\n elif month in [\"02\"]:\n date = year+\"-\"+month+\"-28\"\n else:\n date = year+\"-\"+month+\"-30\"\n \n if len(dateArray) == 3:\n year = dateArray[0]\n month = dateArray[1]\n day = dateArray[2].split(\"-\")[0] \n date = year+\"-\"+month+\"-\"+day\n \n return date\n\ndef getLLR_type(label):\n return {\n \"article\": \"Journal Articles & Books\",\n \"report\": \"Reports & Research\",\n \"masterThesis\": \"Reports & Research\", \n \"doctoralThesis\": \"Reports & Research\"\n }[label]","sub_path":"landlibrary/importers/LAReferencia/python-scripts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"355254221","text":"\"\"\"Heuristics for Best First Search.\"\"\"\nfrom abc import ABC, abstractmethod\nfrom typing import List\nimport numpy as np\nimport networkx as nx\nfrom utils import println\nimport copy\n\n\ndef manha_dist(a, b):\n \"\"\"Measure Manhattan distance.\"\"\"\n (x1, y1) = a\n (x2, y2) = b\n return abs(x1 - x2) + abs(y1 - y2)\n\n\nclass Heuristics(ABC):\n \"\"\"Class for defining heuristics.\"\"\"\n\n @abstractmethod\n def __call__(self, states: List):\n \"\"\"Call method, compute `heuristics` of List `states`.\"\"\"\n return\n\n\nclass EasyRule(Heuristics):\n \"\"\"Simple heuristics.\n\n Computes Manhattan distance for:\n * Boxes to goals.\n * Agents to boxes.\n \"\"\"\n\n def __call__(self, states: List):\n \"\"\"Calculate heuristic for states in place.\"\"\"\n if type(states) is not list:\n states = [states]\n if len(states) == 0:\n return None\n\n for state in states:\n box_goal_cost = 0\n agt_box_cost = 0\n agt_box_costs = []\n for key in state.getGoalKeys():\n goal_params = state.getGoalsByKey(key)\n box_params = state.getBoxesByKey(key)\n # maybe add some temporary costs here for each key\n\n # find every position of goals and boxes with the given key\n for goal_pos, goal_color in goal_params:\n box_goal_costs = []\n for box_pos, _ in box_params:\n # only take agents with the same color as goalColor\n if goal_color in state.agentColor:\n agent_keys = state.getAgentsByColor(goal_color)\n\n if manha_dist(goal_pos, box_pos) == 0:\n continue\n\n for agent_key in agent_keys:\n agentPos = state.getAgentsByKey(agent_key)[0][0]\n agt_box_costs.append(manha_dist(agentPos, box_pos))\n\n box_goal_costs.append(manha_dist(box_pos, goal_pos))\n\n if len(box_goal_costs) > 0:\n box_goal_cost += min(box_goal_costs)\n if len(agt_box_costs) > 0:\n agt_box_cost += sum(agt_box_costs)\n\n state.h = box_goal_cost + agt_box_cost\n state.f = state.h * 5 + state.g\n\n\nclass WeightedRule(Heuristics):\n \"\"\"Weighted heuristics.\n\n The distance from a box to a box is weigthed more (used for communication).\n Computes Manhattan distance for:\n * Boxes to goals.\n * Agents to boxes.\n \"\"\"\n\n def __init__(self, weight: str):\n \"\"\"Initialize object with state and `string` of box to weight more.\"\"\"\n self.weight = weight\n\n def __call__(self, states: List):\n \"\"\"Calculate heuristic for states in place.\"\"\"\n if type(states) is not list:\n states = [states]\n if len(states) == 0:\n return None\n\n for state in states:\n box_goal_cost = 0\n agt_box_cost = 0\n agt_box_costs = []\n for key in state.getGoalKeys():\n goal_params = state.getGoalsByKey(key)\n box_params = state.getBoxesByKey(key)\n # maybe add some temporary costs here for each key\n\n # find every position of goals and boxes with the given key\n for goal_pos, goal_color in goal_params:\n box_goal_costs = []\n for box_pos, _ in box_params:\n # only take agents with the same color as goalColor\n agent_keys = state.getAgentsByColor(goal_color)\n\n if manha_dist(goal_pos, box_pos) == 0:\n continue\n\n for agent_key in agent_keys:\n agentPos = state.getAgentsByKey(agent_key)[0][0]\n agt_box_costs.append(manha_dist(agentPos, box_pos))\n\n box_goal_costs.append(manha_dist(box_pos, goal_pos))\n\n if len(box_goal_costs) > 0:\n box_cost = min(box_goal_costs)\n if key.lower() == self.weight:\n box_cost *= 10\n box_goal_cost += box_cost\n if len(agt_box_costs) > 0:\n agt_box_cost += sum(agt_box_costs)\n\n state.h = box_goal_cost + agt_box_cost\n state.f = state.h * 5 + state.g\n\n\nclass GoAway(Heuristics):\n \"\"\"GoAway heuristics.\n\n The distance from a box to a box is weigthed more (used for communication).\n Computes Manhattan distance for:\n * Boxes to goals.\n * Agents to boxes.\n \"\"\"\n\n def __call__(self, states: List):\n \"\"\"Calculate heuristic for states in place.\"\"\"\n if type(states) is not list:\n states = [states]\n if len(states) == 0:\n return None\n\n for state in states:\n box_goal_cost = 0\n agt_box_cost = 0\n agt_box_costs = []\n for key in state.getGoalKeys():\n goal_params = state.getGoalsByKey(key)\n box_params = state.getBoxesByKey(key)\n # maybe add some temporary costs here for each key\n\n # find every position of goals and boxes with the given key\n for goal_pos, goal_color in goal_params:\n box_goal_costs = []\n for box_pos, _ in box_params:\n # only take agents with the same color as goalColor\n agent_keys = state.agents.keys()\n\n if manha_dist(goal_pos, box_pos) == 0:\n continue\n\n for agent_key in agent_keys:\n agentPos = state.getAgentsByKey(agent_key)[0][0]\n agt_box_costs.append(-10 * manha_dist(agentPos, box_pos))\n\n box_goal_costs.append(manha_dist(box_pos, goal_pos))\n\n box_goal_cost += min(box_goal_costs)\n if len(agt_box_costs) > 0:\n agt_box_cost += sum(agt_box_costs)\n\n state.h = box_goal_cost + agt_box_cost\n state.f = state.h * 25\n\n\nclass dGraph(Heuristics):\n def __init__(self, state: np.array):\n \"\"\"Initialize object by building the VIS(V,E) graph.\"\"\"\n self.dirs = [\n np.array([0, 1]),\n np.array([1, 0]),\n np.array([0, -1]),\n np.array([-1, 0]),\n np.array([0, 1]),\n np.array([1, 0]),\n np.array([0, -1]),\n np.array([-1, 0]),\n ]\n # self.weight = weight\n self.cornerSet = []\n self.map = state.map\n self.uniqueCorners = set()\n self.poses = []\n self.graph = self.build_graph(state.map)\n self.boxes = {}\n\n # self.dir = {\"N\": (-1, 0), \"E\": (0, 1), \"S\": (1, 0), \"W\": (0, -1)}\n\n def build_graph(self, map: np.array) -> List:\n explored = set()\n\n # add boundry wall\n rows, cols = map.shape\n for col in range(0, cols):\n explored.add(tuple([0, col]))\n explored.add(tuple([rows - 1, col]))\n for row in range(0, rows):\n explored.add(tuple([row, 0]))\n # explored.add(tuple(np.array([row, cols - 1])))\n\n # find contours\n self.cornerSets = []\n println(explored)\n for col in range(1, cols):\n for row in range(1, rows):\n pos = np.array([row, col])\n if map[row, col] == \"+\":\n freePos = np.array([row, col - 1])\n # println(freePos, tuple(freePos) in explored)\n if map[row, col - 1] != \"+\" and tuple(pos) not in explored:\n # println(\"first spot\", freePos)\n corners = self.findEdges(freePos, map, explored)\n if corners:\n self.cornerSets.append(corners)\n\n G = self.generateGraph(copy.deepcopy(map))\n return G\n\n def draw(self, G):\n import matplotlib.pyplot as plt\n\n elarge = [(u, v) for (u, v, d) in G.edges(data=True) if d[\"weight\"] > 0.5]\n esmall = [(u, v) for (u, v, d) in G.edges(data=True) if d[\"weight\"] <= 0.5]\n pos = nx.spring_layout(G)\n nx.draw_networkx_nodes(G, pos, node_size=700)\n nx.draw_networkx_edges(G, pos, edgelist=elarge, width=6)\n nx.draw_networkx_edges(\n G, pos, edgelist=esmall, width=6, alpha=0.5, edge_color=\"b\", style=\"dashed\"\n )\n\n nx.draw_networkx_labels(G, pos, font_size=20, font_family=\"sans-serif\")\n\n plt.show()\n\n def generateGraph(self, map):\n cornerSets = self.cornerSets\n for corners in cornerSets:\n println(\"corner set\", corners)\n if type(corners) != list:\n corners = [corners]\n for corner in corners:\n self.uniqueCorners.add(corner)\n map[corner] = \"O\"\n println(map)\n\n self.uniqueCorners = list(self.uniqueCorners)\n\n # TODO fix order of corners\n # cornerSets[0] = cornerSets[0][-1::] + cornerSets[0][:-1:]\n\n G = nx.DiGraph()\n\n println(cornerSets)\n # for corner in self.uniqueCorners:\n # G.add_node(tuple(corner), pos=corner)\n\n for corners in cornerSets:\n # First position is always at the end\n for i in range(len(corners)):\n # println(corners[i - 1], corners[i])\n if not np.array_equal(corners[i - 1], corners[i]):\n if self.getValidKeypoint(map, corners[i - 1], corners[i], []) is True:\n\n corner1 = corners[i - 1]\n corner2 = corners[i]\n dist = manha_dist(\n (corner1[0], corner1[1]), (corner2[0], corner2[1])\n )\n println(corner1, corner2, dist)\n G.add_edge(corner1, corner2, weight=dist)\n G.add_edge(corner2, corner1, weight=dist)\n\n for corner1 in self.uniqueCorners:\n #G.add_node(tuple(corner), pos=corner)\n closestCorners = self.connectCornerSets(corner1, 4, 0.5)\n #println(corner1, closestCorners)\n for corner2 in closestCorners:\n dist = manha_dist((corner1[0], corner1[1]), (corner2[0], corner2[1]))\n G.add_edge(corner1, corner2, weight=dist)\n\n # if len(cornerSets) > 1:\n # raise Exception(\"####### No edge fusion implemented yet\")\n\n # println(cornerSets)\n # self.draw(G)\n\n # import sys; sys.exit()\n\n # for corners in cornerSets:\n # for i in range(len(corners) - 1):\n # if not np.array_equal(corners[i], corners[i + 1]):\n # corner1 = corners[i]\n # corner2 = corners[i + 1]\n # dist = manha_dist(\n # (corner1[0], corner1[1]), (corner2[0], corner2[1])\n # )\n # println(corner1, corner2, dist)\n # G.add_edge(corner1, corner2, weight=dist)\n # G.add_edge(corner2, corner1, weight=dist)\n # pass\n del map\n return G\n\n def checkAndAddCorner(self, map, corners, cornerPos):\n if map[tuple(cornerPos)] == \"+\":\n return False\n corners.append(tuple(cornerPos))\n return True\n\n def addCorner(self, map, newPos, pos, dir, prevDir, explored, corners):\n if map[tuple(newPos)] != \"+\" and tuple(newPos) not in explored:\n # tempExplored.add(tuple(newPos))\n cornerType = dir - prevDir\n if cornerType == -1:\n # println(pos, cornerType)\n self.checkAndAddCorner(map, corners, pos)\n\n # println(\"moving here:\", (newPos, prevDir, dir))\n return True\n elif map[tuple(newPos)] == \"+\":\n # println(\"wall added\", tuple(newPos))\n explored.add(tuple(newPos))\n return False\n\n def connectCornerSets(self, pos, points, percent=1):\n map = self.map\n corners = np.asarray(self.uniqueCorners)\n sortedKp = sorted(corners, key=lambda kp: np.linalg.norm(pos - kp, 1))\n validKps = []\n for i, kp in enumerate(sortedKp):\n if kp[0] != pos[0] or kp[1] != pos[1]:\n self.getValidKeypoint(map, pos, kp, validKps)\n if len(validKps) >= points:\n break\n if i > percent * len(sortedKp):\n break\n return list(validKps) # sort by age\n\n # return sorted(corners, key=lambda p: p)\n\n\n def findEdges(self, initPos, map, explored):\n dir = -1\n prevDir = dir + 1\n pos = initPos\n corners = []\n initDir = -999\n newPos = None\n isDone = False\n # TODO add a new corner here probably\n while not isDone:\n for j in range(0, 4):\n dir = dir + 1\n newPos = pos + self.dirs[dir]\n if self.addCorner(map, newPos, pos, dir, prevDir, explored, corners):\n prevDir = dir % 4 # 4 directions\n if np.array_equal(initPos, pos) and prevDir == initDir:\n isDone = True\n pos = newPos\n if initDir == -999:\n initDir = prevDir\n break\n dir = prevDir - 2\n\n return corners\n\n def getValidKeypoint(self, map, pos, kp, validKps):\n\n tempPos = np.array(pos)\n # println(\"keypoint:\", kp, pos)\n diff = np.array(tempPos) - kp\n dir = [0, 0]\n if diff[0] < 0:\n dir[0] = 1\n else:\n dir[0] = -1\n\n while tempPos[0] != kp[0]:\n tempPos[0] += dir[0]\n # print(tempPos)\n if map[tuple(tempPos)] == \"+\":\n return None\n elif tuple(tempPos) in validKps:\n return tempPos\n\n\n if diff[1] < 0:\n dir[1] = 1\n else:\n dir[1] = -1\n\n while tempPos[1] != kp[1]:\n tempPos[1] += dir[1]\n # println(tempPos)\n if map[tuple(tempPos)] == \"+\" or tuple(tempPos) in validKps:\n return None\n elif tuple(tempPos) in validKps:\n return tempPos\n\n # TODO, if it passes a corner point skip to next\n\n # println(\"best keypoint for pos\", pos,\"is:\", kp)\n validKps.append(tuple(kp))\n return True\n # return kp\n\n def findBestKeyPoint(self, pos, pos2):\n # TODO optimize this!\n # By nature of how the corners are generated the nearst point, if reachable\n # will always be reachable from all directions minimizing the distance\n # between the pos and keypoint\n map = self.map\n corners = np.asarray(self.uniqueCorners)\n # println(list([np.linalg.norm(pos - kp, 1), kp] for kp in corners))\n sortedKp = sorted(corners, key=lambda kp: np.linalg.norm(pos - kp, 1))\n # println(pos)\n validKps = []\n for kp in sortedKp:\n\n self.getValidKeypoint(map, pos, kp, validKps)\n if len(validKps) >= 4:\n break\n if not validKps:\n return [pos2]\n # println(validKps)\n if np.linalg.norm(np.asarray(validKps[0]) - np.asarray(pos)) >= np.linalg.norm(\n np.asarray(pos) - np.asarray(pos2)\n ):\n self.getValidKeypoint(map, pos, pos2, validKps)\n # println(validKps)\n\n return list(validKps) # sort by age\n\n # return sorted(corners, key=lambda p: p)\n\n def findPathPart(self, state, pathId, combId):\n # (State, pathIndex)\n # TODO when calculating new dijkstras maybe just look at the changing parts\n # TODO make it work for more boxes and goals\n # TODO test performace difference between deepcopy and copy\n\n # TODO TODO TODO only recalculate new parts of the shorest path. e.g.\n # calculate the distance from pos to Kp, and then simply calculate the distance from\n # Kp to Kp and add them together\n # maybe precalculate every keypoint?\n\n GTemp = copy.deepcopy(self.graph)\n # println(self.poses, self.poses[combId][pathId])\n startPos, endPos = self.poses[combId][pathId]\n currentPath = state.currentPath[combId][pathId]\n prevKeypoints = state.prevKeypoints[combId][pathId]\n # println(\" start\", startPos,endPos, state.currentPath, startId, endId)\n if (\n currentPath is not None\n ): # and #TODO find if inbetween two points! #G.has_node(state.currentPath[0]):\n # TODO don't re calculate the path\n # TODO do the same at the endPoint\n startKps, endKps = prevKeypoints\n # println(endKps, startKps)\n # println(state.currentPath)\n # println(startKp, boxKp, goalKp)\n # println(state.currentPath.index(startKp), boxKp, goalKp)\n\n if len(currentPath) > 2 and currentPath[1] == startPos:\n startKps = [currentPath[1], currentPath[2]]\n if len(currentPath) == 2:\n # println(endPos, startKps)\n startKps.append(endPos)\n\n # dist = manha_dist(startPos, state.currentPath[0])\n # GTemp.add_edge(startPos, state.currentPath[0], weight=dist)\n # dist = manha_dist(startPos, state.currentPath[1])\n # GTemp.add_edge(startPos, state.currentPath[1], weight=dist)\n\n for kp in startKps:\n dist = manha_dist(startPos, kp)\n GTemp.add_edge(startPos, kp, weight=dist)\n # GTemp.add_edge(startKp, startPos, weight=dist)\n # println(startKp, startPos)\n\n for kp in endKps:\n dist = manha_dist(endPos, kp)\n GTemp.add_edge(kp, endPos, weight=dist)\n\n # TODO do some magic for endPos\n # println(\"is neighbor\", startPos, startKp, endPos, boxKp, goalPos, goalKp)\n println(\"if\", startPos, startKps, endPos, endKps)\n self.draw(GTemp)\n length, newPath = nx.bidirectional_dijkstra(GTemp, startPos, endPos)\n\n # println(lengthBox, pathBox[1::], lengthGoal, pathGoal[2::])\n # println(startKp, startPos, boxKp, endPos, goalKp, goalPos)\n prevKeypoints = [startKps, endKps]\n\n # println(lengthBox,lengthGoal, pathBox, pathGoal)\n else:\n\n # println(startPos, endPos)\n startKps = self.findBestKeyPoint(startPos, endPos)\n endKps = self.findBestKeyPoint(endPos, startPos)\n # println(startKps, endKps)\n prevKeypoints = [startKps, endKps]\n # println(state.prevKeypoints, endKps)\n\n for kp in startKps:\n dist = manha_dist(startPos, kp)\n GTemp.add_edge(startPos, kp, weight=dist)\n\n for kp in endKps:\n dist = manha_dist(kp, endPos)\n GTemp.add_edge(kp, endPos, weight=dist)\n # GTemp.add_edge(endPos, endKp, weight=dist)\n\n # println(\"else\", startPos, startKps, endPos, endKps)\n # self.draw(GTemp)\n if nx.has_path(GTemp, startPos, endPos):\n length, newPath = nx.bidirectional_dijkstra(GTemp, startPos, endPos)\n else:\n return None\n del GTemp\n currentPath = newPath\n return length # path[1:-1]\n\n def initializeGraphAttributes(self, state, subGoal, i):\n self.poses[i] = subGoal\n if state.currentPath[i] is None:\n state.currentPath[i] = [None] * len(subGoal)\n state.prevKeypoints[i] = [None] * len(subGoal)\n\n def initializeGraphSizes(self, state, size):\n self.poses = [None] * size\n state.currentPath = [None] * size\n state.prevKeypoints = [None] * size\n\n def __call__(self, states: List):\n \"\"\"Calculate heuristic for states in place.\"\"\"\n if type(states) is not list:\n states = [states]\n if len(states) == 0:\n return None\n\n length = None\n for state in states:\n # Subproblems just have one agent\n agt_pos, color = list(state.agents.values())[0][0]\n length_boxes = 0\n length_goals = 0\n for goal, vals in state.goals.items():\n for v in vals:\n goal_pos, goal_color = v\n\n if color != goal_color:\n continue\n\n box_poses = [\n p_c[0][0]\n for box, p_c in state.boxes.items()\n if box.lower() == goal.lower()\n ]\n if any(goal_pos == box_pos for box_pos in box_poses):\n continue\n\n # (State, partsToSolve)\n this_boxes = []\n this_goals = []\n\n self.initializeGraphSizes(state, len(box_poses))\n\n for i, box_pos in enumerate(box_poses):\n if (goal, box_pos, agt_pos) in self.boxes:\n this_goal = self.boxes[(goal, box_pos, agt_pos)]\n h_box = this_goal[0]\n h_goal = this_goal[1]\n else:\n self.initializeGraphAttributes(\n state, [[agt_pos, box_pos], [box_pos, goal_pos]], i\n )\n h_box = self.findPathPart(state, 0, i)\n h_goal = self.findPathPart(state, 1, i)\n if h_box and h_goal:\n self.boxes[(goal, box_pos, agt_pos)] = h_box, h_goal\n\n # (State, partIndex)\n if h_box and h_goal:\n this_boxes.append(h_box)\n this_goals.append(h_goal)\n if this_boxes and this_goals:\n length_boxes += min(this_boxes)\n length_goals += sum(this_goals)\n\n length = length_boxes + length_goals * 2\n state.h = length\n state.f = state.h * 2 + state.g\n # println(state, state.h, state.g, state.f)\n","sub_path":"multi_sokoban/heuristics.py","file_name":"heuristics.py","file_ext":"py","file_size_in_byte":22563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"434160608","text":"import re\n# Работа со списками\n# Написать код для функций ниже\n# Проверка производится в функции main()\n\n# 00. Пример. Дан список (list) строк. Вернуть число - количество строк, у которых\n# 1. длина строки 2 и больше\n# 2. первый и последний символ одинаковые\n\ndef func00(words):\n count = 0\n for w in words:\n if len(w)>=2 and w[0]==w[-1]:\n count += 1\n return count\n\n\n# 01. Из списка строк вернуть список в отсортированном по алфавиту порядке, но строки \n# начинающиеся с числа (0-9) должны идти после строк, начинающихся с букв\n# Подсказка: можно создать два списка, отсортировать их по отдельности перед объединением\n\ndef func01(words):\n # здесь код и не забыть вернуть хоть что-то\n number_list = []\n word_list = []\n for word in words:\n if re.match(r'(\\d+\\w*)', word) is not None:\n number_list.append(word)\n else:\n word_list.append(word)\n return sorted(word_list) + sorted(number_list)\n\n\n# 02. Отсортировать по последнему\n# Дан список не пустых tuples, вернуть список, отсортированный по возрастанию\n# последнего элемента tuple\ndef func02(tuples):\n # здесь код и не забыть вернуть хоть что-то\n return sorted(tuples, key = lambda tup: tup[1])\n\n\n# используется для проверки, \ndef test(got, expected):\n if got == expected:\n prefix = ' OK '\n else:\n prefix = ' X '\n print('%s got: %s expected: %s' % (prefix, repr(got), repr(expected)))\n\n\n# Запускает проверку\ndef main():\n print('func00')\n test(func00(['abba', 'xyz01', 'nn', 'y', '444']), 3)\n test(func00(['', 'a', 'ab', 'cvc', 'jj']), 2)\n test(func00(['rrr', 'db', 'pro', 'hello']), 1)\n\n print('func01')\n test(func01(['1aa', '2bb', 'axx', 'xzz', 'xaa']),\n ['axx', 'xaa', 'xzz', '1aa', '2bb'])\n test(func01(['ccc', 'bbb', '9aa', 'xcc', 'xaa']),\n ['bbb', 'ccc', 'xaa', 'xcc', '9aa'])\n test(func01(['mix', 'xyz', '6apple', 'xanadu', 'aardvark']),\n ['aardvark', 'mix', 'xanadu', 'xyz', '6apple'])\n\n print('func02')\n test(func02([(1, 3), (3, 2), (2, 1)]),\n [(2, 1), (3, 2), (1, 3)])\n test(func02([(2, 3), (1, 2), (3, 1)]),\n [(3, 1), (1, 2), (2, 3)])\n test(func02([(1, 7), (1, 3), (3, 4, 5), (2, 2)]),\n [(2, 2), (1, 3), (3, 4, 5), (1, 7)])\n\nif __name__ == '__main__':\n main()\n","sub_path":"2018/01/01_Moldobaev.py","file_name":"01_Moldobaev.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"602030537","text":"visited = list()\n\n\ndef check_around(x, y, rows, columns, land):\n global visited\n my_size = 1\n\n # Upper left corner\n if x > 0 and y > 0 and not visited[x-1][y-1] and land[x-1][y-1] == 1:\n visited[x-1][y-1] = True\n my_size += check_around(x - 1, y - 1, rows, columns, land)\n\n # Upper same column\n if x > 0 and not visited[x-1][y] and land[x-1][y] == 1:\n visited[x - 1][y] = True\n my_size += check_around(x - 1, y, rows, columns, land)\n\n # Upper right corner\n if x > 0 and y < (columns-1) and not visited[x-1][y+1] and land[x-1][\n y+1] == 1:\n visited[x - 1][y + 1] = True\n my_size += check_around(x - 1, y + 1, rows, columns, land)\n\n # Same level left column\n if y > 0 and not visited[x][y-1] and land[x][y-1] == 1:\n visited[x][y - 1] = True\n my_size += check_around(x, y - 1, rows, columns, land)\n\n # Same level right column\n if y < (columns-1) and not visited[x][y+1] and land[x][y+1] == 1:\n visited[x][y + 1] = True\n my_size += check_around(x, y + 1, rows, columns, land)\n\n # Down left column\n if x < (rows-1) and y > 0 and not visited[x+1][y-1] and land[x+1][y-1] \\\n == 1:\n visited[x + 1][y - 1] = True\n my_size += check_around(x + 1, y - 1, rows, columns, land)\n\n # Down same column\n if x < (rows-1) and not visited[x+1][y] and land[x+1][y] == 1:\n visited[x + 1][y] = True\n my_size += check_around(x + 1, y, rows, columns, land)\n\n # Down right column\n if x < (rows-1) and y < (columns-1) and not visited[x+1][y+1] and land[\n x+1][y+1] == 1:\n visited[x + 1][y + 1] = True\n my_size += check_around(x + 1, y + 1, rows, columns, land)\n return my_size\n\n\ndef count_continents(land):\n global visited\n rows = len(land)\n if rows == 0:\n return 0\n columns = len(land[0])\n visited = [[False for j in range(0, columns)] for i in range(0, rows)]\n continent_sizes = list()\n for i in range(0, rows):\n for j in range(0, columns):\n if not visited[i][j] and land[i][j] == 1:\n visited[i][j] = True\n size = check_around(i, j, rows, columns, land)\n continent_sizes.append(size)\n return len(continent_sizes), continent_sizes\n\n\nif __name__ == '__main__':\n land = [\n [0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0],\n [1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1],\n [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0],\n [0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0],\n [1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0],\n [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0],\n [0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]]\n print(count_continents(land))\n","sub_path":"summer-of-code/week-01/wk1-hackathon-submissions/soc01h-cc-varsha-prabhu.py","file_name":"soc01h-cc-varsha-prabhu.py","file_ext":"py","file_size_in_byte":2853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"559666053","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\n\n\"\"\"\nAuthor: Dylan Payton taken from FeedbackLCA code\nPad data with ones for visualization\nOutputs:\n padded version of input\nArgs:\n data: np.ndarray\n\"\"\"\n\ndef pad_data(data):\n n = int(np.ceil(np.sqrt(data.shape[0])))\n padding = (((0, n ** 2 - data.shape[0]),\n (1, 1), (1, 1)) # add some space between filters\n + ((0, 0),) * (data.ndim - 3)) # don't pad the last dimension (if there is one)\n padded_data = np.pad(data, padding, mode=\"constant\", constant_values=1)\n # tile the filters into an image\n padded_data = padded_data.reshape((n, n) + padded_data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, padded_data.ndim + 1)))\n padded_data = padded_data.reshape((n * padded_data.shape[1], n * padded_data.shape[3]) + padded_data.shape[4:])\n return padded_data\n\n\ndef normalize_data(data):\n norm_data = data.squeeze()\n if np.max(np.abs(data)) > 0:\n norm_data = (data / np.max(np.abs(data))).squeeze()\n return norm_data\n\n\n\"\"\"\nAuthor: Dylan Payton taken from FeedbackLCA code\nDisplay input data as an image with reshaping\nOutputs:\n fig: index for figure call\n sub_axis: index for subplot call\n axis_image: index for imshow call\nInpus:\n data: np.ndarray of shape (height, width) or (n, height, width)\n normalize: [bool] indicating whether the data should be streched (normalized)\n This is recommended for dictionary plotting.\n title: string for title of figure\n\"\"\"\ndef display_data_tiled(data, normalize=False, title=\"\", prev_fig=None):\n \n #calculate mean of each picture of weights\n mean_list =[]\n for x in data:\n mean_list.append(np.linalg.norm(np.reshape(x,-1),ord=2))\n #mean_list.append(np.linalg.norm(np.reshape(x,-1)))\n \n #Rescale data \n mean_data = np.mean(data)\n min_data = np.amin(data)\n max_data = np.amax(data)\n data = (((data-min_data)/(max_data-min_data))*2)-1\n \n if normalize:\n data = normalize_data(data)\n if len(data.shape) >= 3:\n data = pad_data(data)\n \n fig = plt.figure(figsize=(10,10))\n \n sub_axis = fig.add_subplot(2,1,1) \n axis_image = sub_axis.imshow(data, \n cmap=\"Greys_r\",\n interpolation=\"none\")\n axis_image.set_clim(vmin=-1.0, vmax=1.0)\n # Turn off tick labels\n sub_axis.set_yticklabels([])\n sub_axis.set_xticklabels([])\n cbar = fig.colorbar(axis_image)\n sub_axis.tick_params(\n axis=\"both\",\n bottom=\"off\",\n top=\"off\",\n left=\"off\",\n right=\"off\") \n \n bar_chart = fig.add_subplot(2,1,2)\n bar_chart.bar(range(0, len(mean_list)), mean_list, edgecolor = 'black', color = 'black')\n\n #fig.subtitle(title, y=1.05)\n fig.canvas.draw()\n #plt.show()\n \n return (fig, sub_axis, axis_image)\n\n\n\"\"\"\nAuthor: Vasha DuTell\nPlot to visualize the tiling of the center RF of on and off cells separately.\nOutputs:\n Figure object with two tiling plots, one with on, and the other with off cells.\nArgs:\n data: np.ndarray or list of weights, each an individiaul neuron RF\n\"\"\"\ndef plotonoff(allws):\n\n #extract on center\n onws = np.mean(allws,axis=0)>0\n onws = allws[:,onws]\n #extract off center\n offws = np.mean(allws,axis=0)<0\n offws = allws[:,offws]\n #keep track of the circles\n oncircs = []\n offcircs = []\n\n for ws in allws:\n circ = (ws>(0.9*np.sign(np.mean(ws))))\n if(np.mean(ws)>0):\n oncircs.append(circ)\n else:\n offcircs.append(False==circ)\n\n #plot\n fig = plt.figure(figsize=(6,3.5))\n plt.subplot(1,2,1,title='On') \n oncolors = iter(plt.cm.jet(np.linspace(0,1,len(oncircs)))) \n for onc in oncircs: \n plt.contour(onc,[0.7],linewidths = 3,colors=[next(oncolors)])\n plt.xticks([])\n plt.yticks([])\n \n plt.subplot(1,2,2,title='Off')\n offcolors = iter(plt.cm.jet(np.linspace(0,1,len(offcircs)))) \n for ofc in offcircs:\n plt.contour(ofc,[0.7], linewidths = 3, colors=[next(offcolors)])\n plt.xticks([])\n plt.yticks([])\n plt.tight_layout()\n \n return(fig)\n\n\n\n\ndef save_plots(aec,\n activations,\n cost_evolution,\n wmean_evolution,\n inweights_evolution,\n outweights_evolution,\n images,\n recons,\n final_inweights,\n final_outweights,\n inbias_evolution,\n activation_evolution,\n gamma_evolution,\n gamma_assign_evolution):\n \n savefolder = aec.params['savefolder']\n\n #Save our final weights\n inweights_evolution_r = np.rollaxis(np.reshape(inweights_evolution,\n (len(inweights_evolution),\n aec.params['imxlen'],\n aec.params['imylen'],\n aec.params['nneurons'])),3,1)\n (f,sa,ai) = display_data_tiled(inweights_evolution_r[-1], normalize=True, title=\"final_in_weights\", prev_fig=None);\n f.savefig(savefolder+'inweights_final.png')\n plt.close() \n \n outweights_evolution_r = np.reshape(outweights_evolution,\n (len(outweights_evolution),\n aec.params['nneurons'],\n aec.params['imxlen'],\n aec.params['imylen'])) #no rollaxis needed b/c shape is already nnuerons in pos 1.\n \n (f,sa,ai) = display_data_tiled(outweights_evolution_r[-1], normalize=True, title=\"final_out_weights\", prev_fig=None);\n f.savefig(savefolder+'outweights_final.png')\n plt.close()\n\n #save evolving weights\n for i in range(len(inweights_evolution_r)):\n (f,sa,ai) = display_data_tiled(inweights_evolution_r[i], normalize=True,title=\"inweights_evolving\", prev_fig=None);\n f.savefig(savefolder+'/inweights_evolution_'+str(i)+'.png')\n plt.close()\n \n (f,sa,ai) = display_data_tiled(outweights_evolution_r[i], normalize=True,title=\"outweights_evolving\", prev_fig=None);\n f.savefig(savefolder+'/outweights_evolution_'+str(i)+'.png')\n plt.close()\n \n #save plot of activations\n f8 = plt.figure(figsize=(6,6))\n plt.plot(activations)\n plt.title('Activations')\n f8.savefig(savefolder+'/activations.png') \n plt.close()\n \n #save weights and cost evolution\n f2 = plt.figure(figsize=(6,6))\n plt.subplot(2,1,1,title='Weights_Mean')\n plt.plot(wmean_evolution)\n plt.subplot(2,1,2,title='Cost')\n plt.plot(cost_evolution)\n #plt.plot(cost_evolution/2)\n plt.tight_layout()\n f2.savefig(savefolder+'/cost_weights.png') \n plt.close()\n \n #show an example image and reconstruction from the last iteration of learning\n patchnum = 3\n plots = 4\n f3 = plt.figure()\n for i in range(plots):\n plt.subplot(plots,2,2*i+1)#,title='Patch')\n plt.imshow(images[-1][patchnum+i],cmap='gray',interpolation='none')\n plt.colorbar()\n plt.axis('off')\n\n plt.subplot(plots,2,2*i+2)#,title='Recon')\n plt.imshow(recons[-1][patchnum+i],cmap='gray',interpolation='none')\n plt.colorbar()\n plt.axis('off')\n\n plt.tight_layout()\n f3.savefig(savefolder+'/reconstruction.png') \n plt.close() \n \n #save plots of on and off tiling\n f4 = plotonoff(inweights_evolution_r[-1]);\n f4.savefig(savefolder+'/final_in_on_off_RFs.png') \n plt.close()\n \n #save plots of on and off tiling\n f5 = plotonoff(outweights_evolution_r[-1]);\n f5.savefig(savefolder+'/final_out_on_off_RFs.png') \n plt.close()\n \n \n #save plots of activation and gamma\n for i in range(len(activation_evolution)):\n f6 = plt.figure()\n plt.bar(range(0, len(activation_evolution[i])), activation_evolution[i], edgecolor = 'black', color = 'black')\n f6.savefig(savefolder+'/activation_'+str(i)+'.png')\n plt.close()\n for i in range(len(gamma_evolution)):\n f7 = plt.figure()\n plt.bar(range(0, len(gamma_evolution[i])), gamma_evolution[i], edgecolor = 'black', color = 'black')\n f7.savefig(savefolder+'/gamma_'+str(i)+'.png')\n plt.close()\n for i in range(len(gamma_assign_evolution)):\n f8 = plt.figure()\n plt.bar(range(0, len(gamma_assign_evolution[i])), gamma_assign_evolution[i], edgecolor = 'black', color = 'black')\n f8.savefig(savefolder+'/gamma_assign'+str(i)+'.png')\n plt.close()\n for i in range(len(inbias_evolution)):\n f9 = plt.figure()\n plt.bar(range(0, len(inbias_evolution[i])), inbias_evolution[i], edgecolor = 'black', color = 'black')\n f9.savefig(savefolder+'/inbias_'+str(i)+'.png')\n plt.close()\n \n ","sub_path":"utils/plotutils.py","file_name":"plotutils.py","file_ext":"py","file_size_in_byte":8947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"399274903","text":"import tweepy\nimport sys\nimport sqlite3\nfrom sqlite3 import Error\nimport time\n\n\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tweepy.API(auth, wait_on_rate_limit=True)\n\n\n# http://www.sqlitetutorial.net/sqlite-python/insert/\n\ndef create_connection(db_file):\n \"\"\"Create a database connection to the SQLite database\n param db_file: database file for database\n return: connection object or None\"\"\"\n\n try:\n conn = sqlite3.connect(db_file)\n return conn\n except Error as e:\n print(e)\n\ndef write_articles_by_screen_name(conn, t_screen_name):\n try:\n t_sql = '''INSERT INTO main_articles(article_id, author, title , source_url) VALUES(?, ?, ? ,?)'''\n\n cur = conn.cursor()\n cur.execute(t_sql, t_screen_name)\n except Error as e:\n print(e)\n return cur.lastrowid\n\ndef link_comments_to_articles(conn):\n cursor = conn.execute(\"UPDATE main_comments SET article_id=(SELECT article_id FROM main_articles WHERE author = main_comments.screen_name);\")\n conn.commit()\n\n\ndef build_articles_by_screen_name(conn):\n cursor = conn.execute(\"select distinct user_screen_name, user_name from main_tweets\")\n i = 1\n for row in cursor:\n user_url = 'https://twitter.com/'+row[0]\n article_payload = (i, row[0], row[0], user_url)\n recId = write_articles_by_screen_name(conn, article_payload)\n i +=1\n conn.commit()\n return True\n\n\ndef write_comments_by_screen_name(conn, t_tweet):\n #print(t_tweet)\n t_sql = '''INSERT INTO main_comments(screen_name, comment_raw) VALUES(?, ?)'''\n try:\n cur = conn.cursor()\n cur.execute(t_sql, t_tweet)\n except Error as e:\n print(e)\n return cur.lastrowid\n\n\ndef build_comments_by_screen_name(conn):\n cursor = conn.execute(\"select distinct id_str, tweet_id, tweet_text, user_screen_name, topic \"\n \"from main_tweets order by user_screen_name\")\n master_comments = {}\n current_screen_name = 'NULL'\n work_string = ''\n i = 0\n for row in cursor:\n i += 1\n if current_screen_name == row[3]:\n work_string = work_string + ' |||||t' + row[2].replace('\\n', '').replace('\\r', '')\n else:\n master_comments[current_screen_name] = work_string\n work_string = ''\n work_string = row[2].replace('\\n', '').replace('\\r', '') + '\\t'\n current_screen_name = row[3]\n return master_comments\n\ndef clean_comments(conn):\n cursor = conn.execute(\"update main_comments set comment_clean = replace(replace(comment_raw,'#',''),'@','')\")\n conn.commit()\n\ndef db_create_tweet(conn, t_tweet):\n print(t_tweet)\n t_sql = '''INSERT INTO main_tweets(id_str, tweet_text, user_screen_name, created_at, topic) VALUES(?, ?, ? ,?, ?)'''\n cur = conn.cursor()\n cur.execute(t_sql, t_tweet)\n return cur.lastrowid\n\ndef hashtag_list():\n hashtags = {'#democrat': 100, \"#danger\": 100, '#republican': 100, '#breaking': 100, '#china': 100, '#russia': 100,\n '#healthinsurance': 100, '#fraud': 100, '#surveillance': 100, '#measles': 100, '#felony': 100,\n '#drug': 100, '#mueller': 100, '#arrest': 100, '#criminal': 100, '#police': 100,\n '@CNN': 100, '#Foxnews': 100, '#shooting': 1000, '@realdonaldtrump': 1500, '#terrorism': 1500,\n '#fishing': 1500, '#python': 1500, '#UIUC': 1500, \"#buildawall\": 1000, \"@OCSheriff\": 750,\n \"#illegal\": 100, '#hate': 1000, '@elonmusk': 100, '#mexico': 100, '#buildthewall': 100,\n '#teens': 100, '#coursera': 100, '@speakerpelosi': 100, '#victim': 100, '#FBI': 100,\n '#disneyland': 100, '#stocks': 100, '#CIA': 100, '#FBI': 100, '#CBP': 100,\n '#MAGA': 100, '#OCREgister': 100, '#school': 100, '#california': 100, '#mexico': 100,\n '#healthcare': 100, '#april': 100, '#endgame': 100, '#student': 100, '#theif': 100,\n '#SB54': 100, \"#losangeles\": 100, \"#world\": 100, \"#UN\": 100, \"#newyork\": 100,\n '#hacker': 100, '#may': 100, '#zombie': 100, '#today': 100, '#GOT': 100,\n '@wapo': 100, \"#nytimes\": 100, '#HBPD': 100, \"#google\": 100, \"#rally\": 100,\n '#opioids': 100, '#HIV': 100, '#religion': 100, '#trending': 100, '@WSJ': 100, '@cnnpolitics': 100,\n '#fakenews': 100, '#media': 100}\n clear_hashtags_from_db(conn)\n for hashtag, last_id in hashtags.items():\n hashtag_string = (hashtag, last_id)\n write_hashtag_to_db(conn, hashtag_string)\n\n return hashtags\n\ndef clear_hashtags_from_db(conn):\n print('Deleting existing hashtags and resetting')\n cur = conn.execute('Delete from main_hashtags')\n\ndef write_hashtag_to_db(conn, topic):\n cur = conn.execute\n print('writing hashtag to database', topic)\n t_sql = '''INSERT INTO main_hashtags(hashtag_string, hashtag_lastvalue) VALUES(?, ?)'''\n try:\n cur = conn.cursor()\n cur.execute(t_sql, topic)\n except Error as e:\n print(e)\n print('success')\n conn.commit()\n return cur.lastrowid\n\n\n\n\ndef main(topics, conn):\n\n f_tweet = {}\n replies = []\n count = 1\n for topic, start in topics.items():\n print(topic)\n print(\"running for topic: \", topic, \" starting at tweet:\")\n with conn:\n non_bmp_map = dict.fromkeys(range(0x10000, sys.maxunicode + 1), 0xfffd)\n for full_tweets in tweepy.Cursor(api.search, q=topic + \" -filter:retweets\", count=500, lang=\"en\", since=\"2019-04-01\").items(500):\n art_payload = full_tweets.id_str+\";\"+full_tweets.user.screen_name+\";\" + \\\n full_tweets.text.translate(non_bmp_map).replace('\\n', ' ').replace('\\r', '').replace(';', ':')\n comment = \"\"\n ft_sql_pay_load = (full_tweets.id_str, full_tweets.text.translate(non_bmp_map),\n full_tweets.user.screen_name, full_tweets.created_at, topic);\n ft_id = db_create_tweet(conn, ft_sql_pay_load)\n count +=1\n\n f_tweet.update({full_tweets.id_str: art_payload})\n print(\"starting wait\")\n sleep_time = 10\n for i in range(6):\n print('sleeping for ',sleep_time,' seconds, step:', 6-i)\n time.sleep(sleep_time)\n\n #build_articles_by_screen_name(conn)\n #master_comments = build_comments_by_screen_name(conn)\n #for key, value in master_comments.items():\n # ft_sql_pay_load = (key, value);\n # ft_id = write_comments_by_screen_name(conn, ft_sql_pay_load)\n # print(ft_id)\n\n\n\n#setup the base database connection\ndb_name = '..\\db.sqlite3'\nconn = create_connection(db_name)\n#get our list of hashtags (TODO: move this to db later)\n\ntopics_new =hashtag_list()\n\n#run our main twitter extract, TODO: split this up into smaller functions\n#main(topics_new, conn)\n\nbuild_articles_by_screen_name(conn)\nmaster_comments = build_comments_by_screen_name(conn)\n\nfor key, value in master_comments.items():\n ft_sql_pay_load = (key, value);\n ft_id = write_comments_by_screen_name(conn, ft_sql_pay_load)\n\nlink_comments_to_articles(conn)\n\n#Because the comments come in with # and @, lets remove those so they do not interfer with rank function\nclean_comments(conn)\n#make sure everything gets pushed to the DB\nconn.commit()\nconn.close()\n\n","sub_path":"main/import_tweets.py","file_name":"import_tweets.py","file_ext":"py","file_size_in_byte":7234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"123843004","text":"#!/usr/bin/python3\n\nimport xml.etree.ElementTree as ET\nimport numpy as np\nimport datetime\nimport copy\nimport traceback\nimport mts_util\nimport subprocess\n\nTradableKeys = [\"symbol\", \"exch_symbol\", \"venue\", \"tick_size\", \\\n \"point_value\", \"px_multiplier\", \"type\", \\\n \"mts_contract\", \"contract_month\", \"mts_symbol\", \\\n \"N\", \"expiration_days\", \"tt_security_id\", \\\n \"tt_venue\", \"currency\", \"expiration_date\",\"bbg_id\",\\\n \"bbg_px_multiplier\", \"tickdata_id\", \\\n \"tickdata_px_multiplier\", \"tickdata_timezone\", \\\n \"lotspermin\", \"start_time\", \"end_time\"]\n\nTradableKeysSpread = [\"S\"]\n\ndef gen_asset_root(xml_path, assets_xml) :\n xml_cfg = xml_path + \"/\" + assets_xml\n root = ET.parse(xml_cfg).getroot()\n return root\n\ndef get_symbol_spread_set(cfg = '/home/mts/run/config/main.cfg'):\n mts_cfg = mts_util.MTS_Config(cfg)\n spread_dict = {}\n try:\n keys = mts_cfg.listSubKeys('Spreads')\n for k in keys:\n kl = int(mts_cfg.arrayLen('Spreads.'+k))\n sa = []\n for i in np.arange(kl).astype(int):\n sa.append(tuple(mts_cfg.getArr('Spreads.%s[%d]'%(k,i), int)))\n spread_dict[k] = copy.deepcopy(sa)\n except:\n #traceback.print_exc()\n print('no symbol spread definitions found')\n return spread_dict\n\ndef get_max_N(cfg = '/home/mts/run/config/main.cfg'):\n mts_cfg = mts_util.MTS_Config(cfg)\n return mts_cfg.get('MaxN',int)\n\ndef update_symbol_map(today_yyyy_mm_dd, xml_path, max_N = 2, assets_xml = \"assets.xml\", mts_symbols=None, root = None, symbol_spread_dict={}, add_prev_day_symbols=False):\n \"\"\"\n Input: \n today_yyyy_mm_dd: today's date as yyyy-mm-dd\n xml_path: the path to the mappings xml, i.e. config/symbol\n max_N: number of contracts to include for each future symbol\n assets_xml: the xml file, ie. assets.xml\n mts_symbols: optional list of mts_symbols (taken without _N, otherwise ignores _N) \n to generate mappings for. If None then generates all.\n root: optional xml root if None then generates from xml_path\n spread_list: optional dictionary of per-symbol list of N1-N2 pairs to be added.\n i.e. {'WTI':[(1,6), (1,12)]}, to add spreads N1-N6 and N1-N12 (in addition to defaults)\n Note, if not specified, it adds [(0,1),(0,2),(1,2)] by default\n add_prev_day_symbols: in case today's holiday (and half day) for some symbols, they won't be \n in today's symbol_map and therefore the bpmain/tpmain won't \n subscribe them. This could lead to issues such as next day's open referring\n to the previous day's close, making the overnight return bigger than reality.\n Set this true to include previous day's symbols if they are not in today's map.\n Symbols are 'WTI','Brent', etc.\n\n return :\n it reads assets.xml and roll_schedules/* \n and parse into a list of tradable and a dict of venues for current day:\n tradable = [ \n {\n tradable = ESH1\n symbol = SPX\n exch_symbol = ES\n venue = CME\n currency = USD\n tick_size = 0.25\n point_value = 50\n px_multiplier = 0.01\n type = FUT\n\n mts_contract = SPX_202103\n contract_month = 202103\n mts_symbol = SPX_N1\n N = 1\n expiration_days = 4\n tt_security_id = 0123455\n tt_venue = CME\n subscribed = 0\n bbg_id = ESH1 INDEX\n bbg_px_multiplier = 1.0\n tickdata_id = ESH21\n tickdata_px_multiplier = 0.01\n lotspermin = 40\n\n start_time = 20210214-18:00:00\n end_time = 20210214-17:00:00\n }\n ...\n ]\n\n venue = {\n CME = {\n hours = [ -6, 0, 17, 0 ]\n }\n }\n \"\"\"\n\n if mts_symbols is not None:\n # remove \"_N?\" from mts_symbols if exists\n symbols0 = set()\n for symbol in mts_symbols :\n symbols0.add(symbol.split('_')[0])\n mts_symbols = symbols0\n\n today = today_yyyy_mm_dd\n if '-' not in today_yyyy_mm_dd and len(today_yyyy_mm_dd) == 8 :\n today = today_yyyy_mm_dd[:4] + '-' + today_yyyy_mm_dd[4:6] + '-' + today_yyyy_mm_dd[6:]\n #today += \" 00:00:00\"\n\n if root is None :\n root = gen_asset_root(xml_path, assets_xml)\n\n tradable = []\n venues = {}\n\n max_Nsp = max_N\n default_spread_list = [(0,1),(0,2),(1,2)]\n\n for assets in root :\n sym = {}\n max_Nsp = max_N\n spread_set = set(default_spread_list)\n for asset in assets :\n if asset.tag == \"symbol\" :\n if mts_symbols is not None and len(mts_symbols)>0 and asset.text not in mts_symbols :\n break\n sym[\"symbol\"] = asset.text\n if asset.text in symbol_spread_dict.keys():\n spread_set=set(default_spread_list+symbol_spread_dict[asset.text])\n for (n1,n2) in spread_set:\n max_Nsp=max(max_Nsp,n2)\n elif asset.tag == \"exchange_symbol\":\n sym[\"exch_symbol\"] = asset.text\n elif asset.tag == \"currency\" :\n sym[\"currency\"] = asset.text\n elif asset.tag == \"exchange\" :\n sym[\"venue\"] = asset.text\n elif asset.tag == \"ticksize\" :\n sym[\"tick_size\"] = asset.text\n elif asset.tag == \"spread_ticksize\" :\n sym[\"spread_tick_size\"] = asset.text\n elif asset.tag == \"currency\" :\n sym[\"currency\"] = asset.text\n elif asset.tag == \"calendar\" :\n cal_file = xml_path + \"/calendars/\" + asset.text\n\n sym[\"cal\"] = {\"file\": cal_file}\n # getting the calendar for first max_N contract of today,\n # include N0, N1, etc\n c = np.genfromtxt(cal_file, delimiter = \",\", dtype = \"|S32\")\n try :\n ct = c[np.nonzero(c[:,0] == str.encode(today))[0][0]]\n s = ct[2].decode()\n e = ct[3].decode()\n if s == \"\" or e == \"\" :\n # not a trading day\n raise RuntimeError(today + \" is not a trading day from calendar\")\n sym[\"cal\"][\"s\"] = s\n sym[\"cal\"][\"e\"] = e\n sym[\"expiration_days\"] = int(ct[6])\n # read upto max_N contracts into \"contracts\"\n # for N = 0, 1, .., max_Nsp\n sym[\"contracts\"] = []\n for n in np.arange(max_Nsp+1).astype(int) :\n sym[\"contracts\"].append(ct[7+n].decode())\n sym[\"start_time\"] = s.split(' ')[1]\n sym[\"end_time\"] = e.split(' ')[1]\n\n except :\n #traceback.print_exc()\n #raise TradingDayError( today + \" is not a trading day from \" + cal_file)\n #print ( today + \" is not a trading day from \" + cal_file)\n break\n\n # set the venue\n if \"venue\" in sym.keys() :\n v = sym[\"venue\"]\n if v in venues.keys() :\n if venues[v][\"s\"] != s or venues[v][\"e\"] != e :\n #print (\"venue hour update for \" + v)\n if venues[v][\"s\"] > s :\n venues[v][\"s\"] = s\n if venues[v][\"e\"] < e :\n venues[v][\"e\"] = e\n else :\n venues[v] = {\"s\": s, \"e\": e}\n\n elif asset.tag == \"providers\" :\n # getting the tt's px mul\n # and tickdata's px_multiplier and timezone\n for p in asset :\n if p.tag == \"tt\" :\n for p0 in p :\n if p0.tag == \"px_multiplier\" :\n sym[\"px_multiplier\"] = p0.text\n if p0.tag == \"exchange\":\n sym[\"tt_venue\"] = p0.text\n elif p.tag == \"tickdata\" :\n for p0 in p :\n if p0.tag == \"px_multiplier\" :\n sym[\"tickdata_px_multiplier\"] = p0.text\n elif p0.tag == \"timezone\" :\n sym[\"tickdata_timezone\"] = p0.text\n elif p.tag == \"bbg\" :\n for p0 in p :\n if p0.tag == \"px_multiplier\" :\n sym[\"bbg_px_multiplier\"] = p0.text\n\n elif asset.tag == \"execution\" :\n # getting the twap lotspermin\n for p in asset :\n if p.tag == \"twap\" :\n for p0 in p :\n if p0.tag == \"lotspermin\":\n sym[\"lotspermin\"] = p0.text\n\n elif asset.tag == \"contracts\" :\n # getting the tradable name\n for c in asset :\n con0 = {'type':'FUT'}\n for f in c :\n if f.tag == \"pointvalue\" :\n con0[\"point_value\"] = f.text\n elif f.tag == \"expiry\" :\n expiry = f.text\n if expiry not in sym[\"contracts\"] :\n con0 = {}\n break\n n = np.nonzero(np.array(sym[\"contracts\"])==expiry)[0][0]\n con0[\"contract_month\"] = expiry\n con0[\"N\"] = n\n elif f.tag == \"tt_exchange_id\" :\n con0[\"tradable\"] = f.text\n elif f.tag == \"tt_security_id\" :\n con0[\"tt_security_id\"] = f.text\n elif f.tag == \"expiration_date\" :\n con0[\"expiration_date\"] = f.text\n elif f.tag == \"bbg_id\" :\n con0[\"bbg_id\"] = f.text\n elif f.tag == \"tickdata_id\" :\n con0[\"tickdata_id\"] = f.text\n elif f.tag == \"symbol\" :\n con0[\"mts_contract\"] = f.text\n\n if len(con0.keys()) >= 4 :\n sym[n] = copy.deepcopy(con0)\n\n elif asset.tag == \"spreads\":\n # getting the tradable name\n for c in asset :\n con0 = {'type':'MLEG'}\n for f in c :\n if f.tag == \"pointvalue\" :\n con0[\"point_value\"] = f.text\n elif f.tag == \"expiry\" :\n expiry1,expiry2 = f.text.split('-')\n if expiry1 not in sym[\"contracts\"] or expiry2 not in sym[\"contracts\"]:\n con0 = {}\n break\n n1 = np.nonzero(np.array(sym[\"contracts\"])==expiry1)[0][0]\n n2 = np.nonzero(np.array(sym[\"contracts\"])==expiry2)[0][0]\n if (n1,n2) not in spread_set:\n break\n con0[\"contract_month\"] = \"%s-%s\"%(expiry1,expiry2)\n con0[\"N\"] = n1\n con0[\"S\"] = n2\n\n elif f.tag == \"tt_exchange_id\" :\n con0[\"tradable\"] = f.text\n elif f.tag == \"tt_security_id\" :\n con0[\"tt_security_id\"] = f.text\n elif f.tag == \"bbg_id\" :\n con0[\"bbg_id\"] = f.text\n elif f.tag == \"tickdata_id\" :\n con0[\"tickdata_id\"] = f.text\n elif f.tag == \"symbol\" :\n con0[\"mts_contract\"] = f.text\n\n if len(con0.keys()) >= 4 and 'S' in con0.keys():\n if n1 not in sym.keys():\n raise RuntimeError(\"spread contract defined before underlying\")\n if 'spreads' not in sym[n1].keys():\n sym[n1]['spreads'] = []\n sym[n1]['spreads'].append(copy.deepcopy(con0))\n\n # finish parsing this asset into sym\n # write maxN tradable into trd \n # \n for n in np.arange(max_Nsp+1).astype(int) :\n if n not in sym.keys() :\n continue\n\n sym0 = copy.deepcopy(sym)\n sym0.update(sym[n])\n\n # for underlying types (FUT)\n if n <= max_N :\n sym0[\"mts_symbol\"] = sym[\"symbol\"] + \"_N\"+str(n)\n trd0 = {}\n for k in TradableKeys :\n # write to the files\n try :\n trd0[k] = sym0[k]\n except :\n if 'tickdata' not in k:\n print (k + \" is not defined \" + str(sym0[\"symbol\"]))\n trd0[k] = 'None'\n\n trd0[\"tradable\"] = sym0[\"tradable\"]\n if trd0['expiration_date'] <= today :\n # only add contracts expires later than today\n continue\n tradable.append(copy.deepcopy(trd0))\n\n # for the spread with n1=n\n if 'spreads' not in sym0.keys():\n continue\n for spd_con in sym0['spreads']:\n # we don't need this check, all spd_con should be in the set\n #if (n, spd_con['S']) not in spread_set:\n # continue\n trd0 = {}\n sym1=copy.deepcopy(sym0)\n sym1.update(spd_con)\n for k in TradableKeys + TradableKeysSpread:\n # write to the files\n try :\n trd0[k] = sym1[k]\n except :\n if 'tickdata' not in k :\n print (k + \" is not defined \" + str(sym0[\"symbol\"]))\n trd0[k] = 'None'\n\n trd0[\"mts_symbol\"] = sym1[\"symbol\"] + \"_N\"+str(n)+'-'+sym1['symbol']+'_N'+str(spd_con['S'])\n trd0[\"tradable\"] = sym1[\"tradable\"]\n trd0['tick_size'] = sym1['spread_tick_size']\n tradable.append(copy.deepcopy(trd0))\n\n\n if add_prev_day_symbols:\n # called from launch to add previous day's symbol in the symbol map for subscription/reference purpose for MTS engine\n tdi = mts_util.TradingDayIterator(today_yyyy_mm_dd)\n tdi.begin()\n prev_day=tdi.prev()\n tp, vp = update_symbol_map(prev_day, xml_path, max_N = max_N, assets_xml = assets_xml, mts_symbols=mts_symbols, root = root, symbol_spread_dict=symbol_spread_dict, add_prev_day_symbols=False)\n # merge symbols that are in tp into tradable\n cur_symbols = []\n for td in tradable:\n cur_symbols.append(td['symbol'])\n for td in tp:\n if td['symbol'] not in cur_symbols:\n tradable.append(td)\n # merge venue\n for vk in vp.keys():\n if vk not in venues.keys():\n venues[vk] = vp[vk]\n\n return tradable, venues\n\n\ndef writeToConfigure(tradable, venues, cfg_path, map_file = \"symbol_map.cfg\") :\n \"\"\"\n persist the two dictionaries to cfg_path/map_file\n \"\"\"\n\n with open(cfg_path + \"/\" + map_file, \"w\") as f :\n f.write(\"tradable = {\\n\")\n for trd in tradable :\n f.write(\" \" + trd[\"tradable\"] + \" = {\\n\")\n for key in TradableKeys+TradableKeysSpread:\n if key in trd.keys():\n f.write(\" \" + key + \" = \" + str(trd[key])+ \"\\n\")\n f.write(\" \" + \"}\\n\")\n\n f.write(\"}\\n\")\n f.write(\"venue = {\\n\")\n for key in venues.keys() :\n f.write(\" \" + key + \" = {\\n\")\n s = venues[key][\"s\"]\n e = venues[key][\"e\"]\n\n sh = int(s.strip().split(\" \")[1].split(\":\")[0])\n sm = int(s.strip().split(\" \")[1].split(\":\")[1])\n eh = int(e.strip().split(\" \")[1].split(\":\")[0])\n em = int(e.strip().split(\" \")[1].split(\":\")[1])\n\n if sh > eh or (sh == eh and sm >= em) :\n sh = sh - 24\n\n f.write(\" hours = [ \" + str(sh) + \", \" + str(sm) + \", \" + str(eh) + \", \" + str(em) + \" ]\\n\")\n f.write(\" }\\n\")\n f.write(\"}\\n\")\n\nclass SymbolMap :\n def __init__(self, xml_path = '/home/mts/run/config/symbol', max_N = 2, assets_xml = \"assets.xml\", default_symbol_spread_dict={}, main_cfg_fn = None):\n self.xml_path = xml_path\n assert max_N <= 6, \"max_N more than 6 - calendar file limit 6, run twice to get 12\"\n self.max_N = max_N\n self.assets_xml = assets_xml\n self.assets_root = None\n self.default_symbol_spread_dict = default_symbol_spread_dict \n if main_cfg_fn is not None:\n self.default_symbol_spread_dict = get_symbol_spread_set(main_cfg_fn)\n self.max_N = get_max_N(main_cfg_fn)\n\n def update_config(self, today_yyyy_mm_dd, cfg_path = 'config', map_file = 'symbol_map.cfg', mts_symbols = None, symbol_spread_dict={}):\n if len(symbol_spread_dict) == 0:\n symbol_spread_dict = self.default_symbol_spread_dict\n self._get_assets_root()\n t,v = update_symbol_map(today_yyyy_mm_dd, self.xml_path, self.max_N, self.assets_xml, mts_symbols = mts_symbols, root = self.assets_root, symbol_spread_dict=symbol_spread_dict)\n writeToConfigure(t, v, cfg_path, map_file)\n\n def get_tradable_map(self, today_yyyy_mm_dd, mts_key = False, mts_symbols = None, symbol_spread_dict={}, add_prev_day=False, optional_key_name=None) :\n \"\"\"\n if mts_key is True, map have key on mts_symbol, otherwise, key is tradable\n return :\n dict with key being either tradable or mts_symbol, value being the tradable dict\n \"\"\"\n if len(symbol_spread_dict) == 0:\n symbol_spread_dict = self.default_symbol_spread_dict\n self._get_assets_root()\n t,v = update_symbol_map(today_yyyy_mm_dd, self.xml_path, self.max_N, self.assets_xml, mts_symbols=mts_symbols, root=self.assets_root, symbol_spread_dict=symbol_spread_dict)\n smap = {} # a map from mts_symbol to a tradable\n key_name = 'mts_symbol'\n if not mts_key:\n if optional_key_name is not None:\n key_name = optional_key_name\n else:\n key_name = 'tradable'\n for t0 in t :\n #k = t0[\"mts_symbol\"] if mts_key else t0[\"tradable\"]\n k = t0[key_name]\n smap[k] = copy.deepcopy(t0)\n\n if add_prev_day:\n # add symbols that were tradable on previous day but not on today\n # to the smap\n tdi = mts_util.TradingDayIterator(today_yyyy_mm_dd)\n tdi.begin()\n prev_day = tdi.prev()\n smap_prev = self.get_tradable_map(prev_day, mts_key= mts_key, mts_symbols=mts_symbols, \\\n symbol_spread_dict=symbol_spread_dict, add_prev_day = False, optional_key_name=optional_key_name)\n for k in smap_prev.keys():\n if k not in smap.keys():\n smap[k] = smap_prev[k]\n return smap\n\n @staticmethod\n def is_mts_symbol(mts_or_tradable):\n \"\"\"look for pattern that either has '_N[012...9]' or '_yyyymm'\n \"\"\"\n tf = mts_or_tradable.split('_')\n if len(tf)>1:\n tf0 = tf[-1]\n if tf0[0]=='N':\n try:\n n_contract = int(tf0[1:])\n return True\n except:\n pass\n elif len(tf0)==6:\n try:\n contract_month = int(tf0)\n return True\n except:\n pass\n return False\n\n def get_tinfo(self, mts_or_tradable, yyyymmdd = None, is_mts_symbol = False, symbol_spread_dict={}, add_prev_day=False, optional_key_name=None) :\n if len(symbol_spread_dict) == 0:\n symbol_spread_dict = self.default_symbol_spread_dict\n if yyyymmdd is None :\n yyyymmdd = datetime.datetime.now().strftime('%Y%m%d')\n\n is_mts_symbol0 = SymbolMap.is_mts_symbol(mts_or_tradable)\n if is_mts_symbol is None:\n is_mts_symbol=is_mts_symbol0\n elif is_mts_symbol0 != is_mts_symbol:\n print('got %s, which %s a MTS symbol, but was given otherwise, please fix'%(mts_or_tradable, 'is' if is_mts_symbol0 else 'is NOT'))\n try :\n # try it as mts symbol, \n mts_symbols = [mts_or_tradable] if is_mts_symbol else None\n\n # this uses the mts_symbol as key, i.e. WTI_N1\n smap = self.get_tradable_map(yyyymmdd, mts_key = True, mts_symbols = mts_symbols, symbol_spread_dict=symbol_spread_dict, add_prev_day=add_prev_day, optional_key_name=optional_key_name)\n return smap[mts_or_tradable]\n except :\n try :\n if is_mts_symbol0 and optional_key_name is None:\n # this uses the mts_contract as key, i.e. WTI_202209\n smap = self.get_tradable_map(yyyymmdd, mts_key = False, mts_symbols = mts_symbols, symbol_spread_dict=symbol_spread_dict,add_prev_day=add_prev_day, optional_key_name='mts_contract')\n else:\n # this uses the 'exchange symbol as key, i.e. CLU2\n smap = self.get_tradable_map(yyyymmdd, mts_key = False, mts_symbols = mts_symbols, symbol_spread_dict=symbol_spread_dict,add_prev_day=add_prev_day, optional_key_name=optional_key_name)\n return smap[mts_or_tradable]\n except :\n raise KeyError(\"failed to get tinfo \" + mts_or_tradable)\n \n def list_symbol(self, today_yyyymmdd = None, mts_symbol_list=None, add_prev_day=False) :\n \"\"\"\n return a list of symbol, such as WTI, SPX, from mts_symbol, such as WTI_N1. \n if None then list all defined in assets.xml for today_yyyymmdd\n \"\"\"\n if today_yyyymmdd is None :\n today_yyyymmdd = datetime.datetime.now().strftime('%Y%m%d')\n tdi = mts_util.TradingDayIterator(today_yyyymmdd)\n today_yyyymmdd=tdi.begin()\n smap = self.get_tradable_map(today_yyyymmdd, mts_key = True, mts_symbols=mts_symbol_list, add_prev_day=add_prev_day)\n if mts_symbol_list is None:\n mts_symbol_list = smap.keys()\n\n symbols = []\n for k in mts_symbol_list:\n sym = smap[k]['symbol']\n if sym not in symbols: \n symbols.append(sym)\n return symbols\n\n def get_tradable_from_mts_symbol(self, mts_symbol_arr, trade_day) :\n \"\"\"\n get an array of tradables given mts_symbols and a trading day\n Note the returned array has same length with mts_symbol_arr. If not a trading day\n for the corresponding mts_symbol, a 'None' is put in place\n \"\"\"\n tm = self.get_tradable_map(trade_day, mts_key=True, mts_symbols = mts_symbol_arr)\n ret = []\n for ms in mts_symbol_arr:\n if ms not in tm.keys() :\n ret.append(None)\n else :\n ret.append(tm[ms][\"tradable\"])\n return ret\n\n def get_contract_from_symbol(self, mts_symbol_no_N, trade_day, add_prev_day=False, include_spread=True, extra_N=[]):\n \"\"\"\n mts_symbol_no_N: mts_symbol that doesn't have '_N', i.e. WTI\n trade_day: a trading day in YYYYMMDD\n include_spread: if True, includes all spreads within the limit of maxn, \n i.e. if max=2, N0-N1, N0-N2, N1-N2\n if False, no spread is included\n extra_N: list of N beyond the max_N, i.e [6,9,12] for max_N=2 (cannot include spread)\n return: a list of all contracts that matches with the symbol on\n the day, upto max_N defined in self.max_N\n NOTE - since the calendar file has only contracts upto N_6, in order to extend to N_12\n it runs again at half year earlier\n \"\"\"\n assert (not include_spread) or (len(extra_N) == 0)\n # handle the extra_N here\n max_N = self.max_N\n max_NE = max_N if len(extra_N)==0 else max(max_N, np.max(extra_N))\n if len(extra_N) > 1:\n en = np.array(extra_N)\n assert np.min(en[1:]-en[:-1]) > 0\n\n if max_NE > 6:\n assert max_NE <= 12\n self.max_N = 6\n else:\n self.max_N = max_NE\n\n tm = self.get_tradable_map(trade_day, mts_key=True, mts_symbols = [ mts_symbol_no_N ], add_prev_day=add_prev_day)\n assert len(tm.keys()) > 0, 'no contracts found on ' + trade_day + ', a holiday?'\n Ns = []\n Contracts = []\n for ms in tm.keys():\n if not include_spread and ('S' in tm[ms].keys()):\n continue\n symbol = tm[ms]['symbol']\n if symbol == mts_symbol_no_N :\n Ns.append(tm[ms]['N'])\n Contracts.append(tm[ms]['contract_month'])\n nix = np.argsort(Ns)\n Ns = list(np.array(Ns)[nix])\n Contracts = list(np.array(Contracts)[nix])\n\n if max_NE > 6:\n # get a day of 6 months ago and get a list of 6 contracts to append to\n day = trade_day\n year = int(day[:4])\n month = int(day[4:6])\n if month < 7:\n year -= 1\n month += 6\n else:\n month -= 6\n day = '%04d%02d'%(year,month)+day[-2:]\n tdi = mts_util.TradingDayIterator(day)\n day = tdi.begin()\n\n # This is quite a hack - \n # if the current day has just rolled, i.e. N6\n # is a new contract, make sure the dates half \n # year earlier starts from 'N7'\n if np.min(Ns) == 0:\n day = tdi.next(10)\n\n Ns2 = []\n Contracts2 = []\n tm2 = self.get_tradable_map(day, mts_key=True, mts_symbols = [ mts_symbol_no_N ], add_prev_day=True)\n # generate all the contracts\n for ms in tm2.keys() :\n if not include_spread and ('S' in tm2[ms].keys()):\n continue\n symbol = tm2[ms][\"symbol\"]\n if symbol == mts_symbol_no_N :\n Ns2.append(tm2[ms]['N'])\n Contracts2.append(tm2[ms]['contract_month'])\n nix = np.argsort(Ns2)\n Ns2 = list(np.array(Ns2)[nix])\n Contracts2 = list(np.array(Contracts2)[nix])\n \n # populate the Ns\n for con in Contracts2 :\n year = int(con[:4])+1\n con = '%04d'%(year)+con[-2:]\n if con > Contracts[-1]:\n Ns.append(Ns[-1]+1)\n Contracts.append(con)\n\n ix = np.searchsorted(np.array(Ns), max_N)\n assert Ns[ix] == max_N\n ret = Contracts[:ix+1]\n\n # make Ns (the contract in the future) to Ms (the months in the future)\n Ms = np.array(Contracts).astype(int)//100*12+(np.array(Contracts).astype(int)%100)\n Ms = Ms - Ms[0] + Ns[0]\n for en in extra_N:\n if en <= max_N:\n continue\n ix = np.clip(np.searchsorted(np.array(Ms), en),0,len(Ns)-1)\n if Ns[ix] == en:\n ret.append(Contracts[ix])\n\n self.max_N=max_N\n return ret\n\n def get_symbol_contract_from_tradable(self, tradable, trade_day = None, is_mts_symbol=False, add_prev_day=False) :\n \"\"\"\n return mts_symbol, symbol and contract_month\n \"\"\"\n\n max_N0 = self.max_N\n max_N = max_N0\n # for MTS non-spread symbols, N can be entertained\n if is_mts_symbol and len(tradable.split('_')) == 2:\n sym = tradable.split('_')[0]\n N = tradable.split('_')[1]\n assert N[0] == 'N', 'unknown format of MTS symbol: ' + tradable\n n = int(N[1:])\n assert n >= 0 and n <= 12, 'N out of range: ' + tradable\n if n > self.max_N:\n if n <= 6:\n max_N = 6\n else:\n if trade_day is None :\n trade_day = datetime.datetime.now().strftime('%Y%m%d')\n con = self.get_contract_from_symbol(sym, trade_day, add_prev_day=add_prev_day, include_spread=False, extra_N=[n])\n return tradable, sym, con[-1]\n\n self.max_N = max_N\n tinfo = self.get_tinfo(tradable, trade_day, is_mts_symbol=is_mts_symbol,add_prev_day=add_prev_day)\n self.max_N = max_N0\n return tinfo['mts_symbol'], tinfo['symbol'], tinfo['contract_month']\n\n def get_mts_symbol_from_tt_venue(self, tt_venue_arr, trade_day, tt_venue_map_cfg='config/venue_map.cfg', N=1) :\n ttvm = self._load_tt_venue_map(tt_venue_map_cfg)\n tm = self.get_tradable_map(trade_day, mts_key=True)\n ret = []\n for ms in tm.keys() :\n ttv = ttvm[tm[ms][\"venue\"]]\n if tt_venue_arr is None or ttv in tt_venue_arr :\n if ms[-1] != str(N) :\n continue\n ret.append([tm[ms][\"symbol\"], ttv])\n return ret\n\n def get_mts_symbol_from_mts_venue(self, mts_venue_arr, trade_day) :\n tm = self.get_tradable_map(trade_day, mts_key=True)\n ret = []\n for ms in tm.keys() :\n venue = tm[ms][\"venue\"]\n if venue in mts_venue_arr :\n ret.append(ms)\n return ret\n\n def get_mts_symbol_from_field(self, trade_day, field_values, field_name) :\n tm = self.get_tradable_map(trade_day, mts_key=True)\n ret = []\n for ms in tm.keys() :\n if field_name not in tm[ms].keys() :\n continue\n val = tm[ms][field_name]\n if val in field_values: \n ret.append(ms)\n return ret\n\n def getSubscriptionList(self, mts_cfg, trade_day) :\n # read \"MTSVenue\" and \"MTSSymbol\" from config and return a list of mts_symbol\n # as subscription list\n sub_venue0 = mts_cfg.listSubKeys('MTSVenue')\n sub_venue = []\n for v in sub_venue0 :\n if len(mts_cfg.getArr('MTSVenue.'+v)) > 0 :\n sub_venue.append(v)\n\n sub_symbol0 = mts_cfg.listSubKeys('MTSSymbol')\n sub_symbol = []\n for s in sub_symbol0 :\n if len(mts_cfg.getArr('MTSSymbol.'+s)) > 0 :\n sub_symbol.append(s)\n\n sub_symbol= set(self.get_mts_symbol_from_field(trade_day, sub_symbol, 'symbol'))\n sub_symbol.update(self.get_mts_symbol_from_mts_venue(sub_venue, trade_day))\n return sub_symbol\n\n def _load_tt_venue_map(self, tt_venue_map_cfg) :\n vm = {}\n try :\n with open(tt_venue_map_cfg, \"r\") as f :\n while True :\n l = f.readline()\n if len(l) ==0 :\n break\n l = l.strip()\n if l[0] == '#' :\n continue\n la = l.split('=')\n if len(la) == 2 :\n vm[la[0].strip()] = la[1].strip()\n except :\n traceback.print_exc()\n print (\"failed to load tt venue map!\")\n return vm\n\n def _get_assets_root(self) :\n if self.assets_root is None :\n try :\n self.assets_root = gen_asset_root(self.xml_path, self.assets_xml)\n except :\n traceback.print_exc()\n raise RuntimeError(\"Cannot get symbol map from \" + self.xml_path + \", \" + self.assets_xml)\n\n\nclass FXMap :\n def __init__(self, fx_filename='/home/mts/run/config/symbol/fx_1700.txt'):\n \"\"\"\n days, utcs: array of yyyymmdd (utc) of the days of rates\n syms: array of symbols of columns of rates\n rates: shape [ndays,nfx] rates x, where x is number of USD needed to for 1 FX\n \"\"\"\n self.fn = fx_filename\n self.syms, self.days, self.utcs, self.rates = self._load()\n\n def _load(self):\n \"\"\"load historical FX from file into fxdict, i.e. fx_1500.txt\n File format:\n Header:\n trade_date,AUD,BRL,CAD,CHF,CLP,CNH,CNY,COP,CZK,DKK,EUR,GBP,HKD,HUF,IDR,ILS,INR,ISK,JPY,KRW,MXN,MYR,NOK,NZD,PHP,PLN,RUB,SAR,SEK,SGD,THB,TRY,TWD,ZAR\n Body:\n 2007-02-28,0.7879,,,,,,,,,,1.3231,1.9636,,,,,,,,,,,,0.7016,,,,,,,,,,\n \"\"\"\n syms = subprocess.check_output(['head','-1',self.fn]).decode().replace('\\r','').replace('\\n','').split(',')[1:]\n rates = np.genfromtxt(self.fn, delimiter=',',skip_header=1,dtype='str')\n days = []\n utcs = []\n for d in rates[:,0]:\n dt = datetime.datetime.strptime(d, '%Y-%m-%d')\n days.append(dt.strftime('%Y%m%d'))\n utcs.append(int(dt.strftime('%s'))+17*3600) # utc at 17:00 of the day\n\n rates[np.nonzero(rates=='')]='1'\n rates=rates[:,1:].astype(float)\n return np.array(syms), np.array(days), np.array(utcs), rates\n\n def get(self, sym, day, use_prev_day=True, default=None):\n \"\"\"\n sym: str, i.e. 'EUR', 'GBP'\n day: yyyymmdd\n use_prev_day: use previous day if day is not found\n default: if sym not found, throw if None, otherwise use the default\n \"\"\"\n six = np.nonzero(self.syms==sym)[0]\n if len(six) == 0:\n if default is not None:\n return default\n raise RuntimeError('unknown FX: %s'%(sym))\n six=six[0]\n\n dix = max(np.searchsorted(self.days, str(int(day)+1))-1,0)\n if self.days[dix] != day:\n if not use_prev_day:\n raise RuntimeError('%s not found on %s'%(sym,day))\n return self.rates[dix,six]\n\n def get_by_utc(self, sym, utc, use_prev_day=True, default=None):\n six = np.nonzero(self.syms==sym)[0]\n if len(six) == 0:\n if default is not None:\n return default\n raise RuntimeError('unknown FX: %s'%(sym))\n six=six[0]\n\n dix = max(np.searchsorted(self.utcs, utc+1)-1,0)\n if not use_prev_day:\n utc0=self.utcs[dix]\n ymd0 = datetime.datetime.fromtimestamp(utc0).strftime('%Y%m%d')\n ymd = datetime.datetime.fromtimestamp(utc).strftime('%Y%m%d')\n if ymd != ymd0:\n raise RuntimeError('%s not found on %s'%(sym,ymd))\n return self.rates[dix,six]\n\n\n\n","sub_path":"mts/python/symbol_map.py","file_name":"symbol_map.py","file_ext":"py","file_size_in_byte":35213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"344080463","text":"from random import randint\nfrom time import sleep\n\n\nclass Jogo:\n def __init__(self):\n self.numero = randint(0, 10)\n self.entrada = int(input())\n\n @staticmethod\n def easy():\n print('\\nVocê escolheu o modo fácil! Vamos jogar.')\n sleep(1)\n print('Pensando no número que quero que você adivinhe...')\n sleep(3)\n print('Escolhi um número!')\n sleep(1)\n numero = randint(0, 10)\n while True:\n try:\n entrada = int(input('De 0 a 10, que número estou pensando?\\n'))\n except ValueError:\n print('\\nApenas números de 0 a 10! Tente de novo!')\n sleep(1)\n else:\n if entrada < 0 or entrada > 10:\n print('\\nO número tem que ser entre 0 e 10. Escolha outra vez!\\n')\n sleep(1)\n continue\n else:\n if entrada == numero:\n print('Parabéns, você venceu!')\n break\n elif entrada < numero:\n print('\\nErrado, o número é maior.\\n')\n sleep(1)\n continue\n else:\n print('\\nErrado, o número é menor\\n')\n sleep(1)\n continue\n\n @staticmethod\n def medium():\n print('\\nVocê escolheu o módo médio. Vamos lá!')\n sleep(1)\n print('Pensando no número que quero que você adivinhe...')\n sleep(3)\n print('Escolhi um número.')\n sleep(1)\n numero = randint(0, 10)\n while True:\n try:\n entrada = int(input('\\nDe 0 a 10, que numero estou pensando?\\n'))\n except ValueError:\n print('Apenas números entre 0 e 10! Tente de novo!')\n else:\n if entrada < 0 or entrada > 10:\n print('O número tem que ser entre 0 e 10! Escolha de novo!')\n sleep(1)\n continue\n else:\n if entrada == numero:\n print('Parabéns, você venceu sem nenhuma dica!')\n break\n else:\n print('\\nNúmero errado. Tente de novo!\\n')\n sleep(1)\n continue\n\n @staticmethod\n def hard():\n print('\\nVocê escolheu o modo difícil. Boa sorte!')\n sleep(1)\n print('Pensando no número que quero que você adivinhe...')\n sleep(3)\n while True:\n print('Escolhi um número!')\n sleep(1)\n try:\n entrada = int(input('\\nDe 0 a 10, que número estou pensando?\\n'))\n numero = randint(0, 10)\n except ValueError:\n print('Apenas números entre 0 e 10! Tente de novo!\\n')\n else:\n if entrada < 0 or entrada > 10:\n print('O número tem que ser entre 0 e 10. Escolha outra vez!\\n')\n sleep(1)\n continue\n else:\n if entrada == numero:\n print('Foi difícil, mas você ganhou! Parabéns!')\n break\n else:\n print('\\nNumero errado. Dessa vez não vai ser fácil!\\n')\n sleep(1)\n continue\n","sub_path":"jogoClasses.py","file_name":"jogoClasses.py","file_ext":"py","file_size_in_byte":3506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"606219446","text":"import time\nimport json\nfrom urlparse import urlparse\n\nfrom google.appengine.api import urlfetch\n\nimport config\n\n# pull down the base info for the repo\ndef repo_base(url):\n\ttry:\n\t\t# parse the url and build a normalized github URL\n\t\tparts = urlparse(url)\n\t\turl = \"https://%s/%s\" % (config.github_url.strip('/'), parts[2].strip('/'))\n\t\t\n\t\t# use the path to make an API GET for the repo JSON\n\t\trepo = json.loads(\n\t\t\turlfetch.fetch(\"https://%s/%s/%s\" % (\n\t\t\t\tconfig.github_api_url.strip('/'),\n\t\t\t\t'repos',\n\t\t\t\tparts[2].strip('/')\n\t\t\t), deadline=5).content\n\t\t)\n\n\t\tif 'name' not in repo:\n\t\t\traise Exception(\"A valid Github repository was not found using that URL.\")\n\n\t\t# build and return the response\n\t\tresponse = {'response': 'success', 'result': {'repo': repo}}\n\t\treturn response\n\n\texcept Exception as ex:\n\t\t# build and return the failure\n\t\tresponse = {'response': 'fail', 'result': {'message': \"Project was not added. %s\" % ex} }\n\t\treturn response\n\ndef repo_sync_contents(project):\n\ttry:\n\t\t# parse the url and build a normalized github URL\n\t\tparts = urlparse(project.url)\n\t\turl = \"https://%s/%s\" % (config.github_url.strip('/'), parts[2].strip('/'))\n\t\t\n\t\t# use the path to make an API GET for the repo JSON\n\t\tcontents = json.loads(\n\t\t\turlfetch.fetch(\"https://%s/%s/%s/%s/%s\" % (\n\t\t\t\tconfig.github_api_url.strip('/'),\n\t\t\t\t'repos',\n\t\t\t\tparts[2].strip('/'), # repo name\n\t\t\t\t'contents',\n\t\t\t\t'utterio'\n\t\t\t), deadline=5).content\n\t\t)\n\n\t\t# check for required files\n\t\tcheck = {'README.md': 0, 'install.sh': 0, 'icon.png': 0}\n\t\tfor file in contents:\n\t\t\tif file['name'] == \"README.md\":\n\t\t\t\tproject.readme_url = file['download_url']\n\t\t\t\tproject.readme_link = file['html_url']\n\t\t\t\tcheck['README.md'] = 1\n\t\t\tif file['name'] == \"install.sh\":\n\t\t\t\tproject.install_url = file['download_url']\n\t\t\t\tproject.install_link = file['html_url']\n\t\t\t\tcheck['install.sh'] = 1\n\t\t\tif file['name'] == \"icon.png\":\n\t\t\t\tproject.icon_url = file['download_url']\n\t\t\t\tproject.icon_link = file['html_url']\n\t\t\t\tcheck['icon.png'] = 1\n\n\t\t# do a pass to build missing files string\n\t\tmissing = \"\"\n\t\tfor key,value in check.items():\n\t\t\tif not value:\n\t\t\t\tmissing = \"%s%s, \" % (missing, key)\n\t\tmissing = missing.strip(', ')\n\n\t\t# update the repo\n\t\tproject.put()\n\n\t\t# build the response object\n\t\tresponse = {'response': \"success\", 'result': {'message': ''}}\n\n\t\t# build the appropriate message\t\t\t\n\t\tif missing == \"\":\n\t\t\t# build and return the response\n\t\t\tresponse['result']['message'] = \"A complete Utter.io configuration was found!\"\n\t\telse:\n\t\t\tresponse['response'] = \"fail\"\n\t\t\tresponse['result']['message'] = \"The repository needs the following files added to the utterio directory: %s.\" % missing\n\n\t\treturn response\n\n\texcept Exception as ex:\n\t\t# build and return the failure\n\t\tresponse = {'response': \"fail\", 'result': {'message': \"This repository needs an utterio directory added to it.\"} }\n\t\treturn response\n\n\n","sub_path":"lib/github/github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":2858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"91698984","text":"import numpy as np\r\nimport pandas as pd\r\nfrom scipy.stats import stats\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn import metrics\r\nfrom sklearn.metrics import confusion_matrix, classification_report\r\nfrom statistics import mode\r\n\r\n\r\ndef case_submission_year_range(df):\r\n df['CASE_SUBMITTED_YEAR_RANGE'] = np.nan\r\n for i in range(len(df['CASE_SUBMITTED_YEAR'])):\r\n if int(df['CASE_SUBMITTED_YEAR'][i]) <= 2012:\r\n df.loc[i, 'CASE_SUBMITTED_YEAR_RANGE'] = 'BEFORE 2012'\r\n if int(df['CASE_SUBMITTED_YEAR'][i]) > 2012:\r\n df.loc[i, 'CASE_SUBMITTED_YEAR_RANGE'] = 'AFTER 2012'\r\n\t\t\t\r\ndef prevailing_wage(df):\r\n df['PREVAILING_WAGE_RANGE'] = np.nan\r\n for i in range(len(df['PREVAILING_WAGE'])):\r\n if int(df['PREVAILING_WAGE'][i]) <= 20000:\r\n df.loc[i, 'PREVAILING_WAGE_RANGE'] = '0 - 20000'\r\n if int(df['PREVAILING_WAGE'][i]) > 20000 and int(df['PREVAILING_WAGE'][i]) <= 50000:\r\n df.loc[i, 'PREVAILING_WAGE_RANGE'] = '20000 - 50000'\r\n if int(df['PREVAILING_WAGE'][i]) > 50000 and int(df['PREVAILING_WAGE'][i]) <= 120000:\r\n df.loc[i, 'PREVAILING_WAGE_RANGE'] = '50000 - 120000'\r\n if int(df['PREVAILING_WAGE'][i]) > 120000 and int(df['PREVAILING_WAGE'][i]) <= 250000:\r\n df.loc[i, 'PREVAILING_WAGE_RANGE'] = '120000 - 250000'\r\n if int(df['PREVAILING_WAGE'][i]) > 250000:\r\n df.loc[i, 'PREVAILING_WAGE_RANGE'] ='>250000'\r\n\r\ndef merge_labels(df):\r\n '''Merges labels like CERTIFIEDWITHDRAWN in the dataset into CERTIFIED class, and REJECTED, INVALIDATED with DENIED class\r\n Here we are transforming multiclass dataset into binary classification data with labels CERTIFIED and DENIED'''\r\n df['CASE_STATUS'] = df['CASE_STATUS'].replace(['CERTIFIEDWITHDRAWN'], ['CERTIFIED'])\r\n df['CASE_STATUS'] = df['CASE_STATUS'].replace(['REJECTED'], ['DENIED'])\r\n df['CASE_STATUS'] = df['CASE_STATUS'].replace(['INVALIDATED'], ['DENIED'])\r\n labels = df['CASE_STATUS'].unique()\r\n #print(labels)\r\n\r\n\r\ndef fill_nan_values(df):\r\n '''The dataset consists of many nan values. These are replaced by the mode for various columns like EMPLOYER_NAME,\r\n EMPLOYER_STATE, FULL_TIME_POSITION ,PW_UNIT_OF_PAY ,PW_SOURCE, PW_SOURCE_YEAR, H-1B_DEPENDENT, WILLFUL_VIOLATOR. For the column\r\n PREVAILING_WAGE we replace the nan columns with the mean value of the wage data. Also, if the SOC_NAME () is not available,\r\n we replace it with hardcoded value Others'''\r\n\r\n df['EMPLOYER_NAME'] = df['EMPLOYER_NAME'].fillna(df['EMPLOYER_NAME'].mode()[0])\r\n df['EMPLOYER_STATE'] = df['EMPLOYER_STATE'].fillna(df['EMPLOYER_STATE'].mode()[0])\r\n df['FULL_TIME_POSITION'] = df['FULL_TIME_POSITION'].fillna(df['FULL_TIME_POSITION'].mode()[0])\r\n df['PW_UNIT_OF_PAY'] = df['PW_UNIT_OF_PAY'].fillna(df['PW_UNIT_OF_PAY'].mode()[0])\r\n df['PW_SOURCE'] = df['PW_SOURCE'].fillna(df['PW_SOURCE'].mode()[0])\r\n df['PW_SOURCE_YEAR'] = df['PW_SOURCE_YEAR'].fillna(df['PW_SOURCE_YEAR'].mode()[0])\r\n df['H-1B_DEPENDENT'] = df['H-1B_DEPENDENT'].fillna(df['H-1B_DEPENDENT'].mode()[0])\r\n df['WILLFUL_VIOLATOR'] = df['WILLFUL_VIOLATOR'].fillna(df['WILLFUL_VIOLATOR'].mode()[0])\r\n\r\n\r\n df['SOC_NAME'] = df.SOC_NAME.replace(np.nan, 'Others', regex=True)\r\n\r\n df.PREVAILING_WAGE.fillna(df.PREVAILING_WAGE.mean(), inplace=True)\r\n\r\n\r\ndef classify_employer(df):\r\n # Check if the employer name is a 'university'. Since employers with university in their name have more chances of visa approval\r\n df['UNIV_EMPLOYER'] = np.nan\r\n df['EMPLOYER_NAME'] = df['EMPLOYER_NAME'].str.lower()\r\n df.loc[df['EMPLOYER_NAME'].str.contains('university'), 'UNIV_EMPLOYER'] = 'university'\r\n df['UNIV_EMPLOYER'] = df.UNIV_EMPLOYER.replace(np.nan, 'non university', regex=True)\r\n\r\n # Broadly classifying the occupations for people filing the visa petition\r\n df['OCCUPATION'] = np.nan\r\n df['SOC_NAME'] = df['SOC_NAME'].str.lower()\r\n\r\n df.loc[df['SOC_NAME'].str.contains(\r\n 'computer|graphic|web|developer|programmer|software|it|database|analyst'), 'OCCUPATION'] = 'IT Industry'\r\n df.loc[df['SOC_NAME'].str.contains(\r\n 'business|managers|planners|management|public relation|executives|supervisor|curator|human resources'), 'OCCUPATION'] = 'Management'\r\n df.loc[df['SOC_NAME'].str.contains('math|statistic|stats'), 'OCCUPATION'] = 'Maths Department'\r\n df.loc[df['SOC_NAME'].str.contains('promotion|market|advertis'), 'OCCUPATION'] = 'Marketing Department'\r\n df.loc[df['SOC_NAME'].str.contains('accountant|finance|acc'), 'OCCUPATION'] = 'Finance Department'\r\n df.loc[df['SOC_NAME'].str.contains(\r\n 'education|prof|teacher|linguist|teach|counsel|coach'), 'OCCUPATION'] = 'Education Department'\r\n df.loc[df['SOC_NAME'].str.contains(\r\n 'scientist|science|psychia|doctor|surgeon|biolog|clinical reasearch|physician|dentist|health'), 'OCCUPATION'] = 'Advance Sciences'\r\n df.loc[\r\n df['SOC_NAME'].str.contains(\r\n 'engineer|technician|surveyor|architec'), 'OCCUPATION'] = 'Engineering and Architecture'\r\n df['OCCUPATION'] = df.OCCUPATION.replace(np.nan, 'Others', regex=True)\r\n\r\ndef preprocessingTrainingdata(user_input):\r\n print(\"Pre Processing data for training set\")\r\n if(user_input=='1' or user_input=='3' or user_input=='2'):\r\n df_train = pd.read_csv('File 1 - H1B Dataset.csv',encoding=\"ISO-8859-1\")\r\n\r\n merge_labels(df_train)\r\n\r\n #clean data by filling the NAN data with appropriate values\r\n fill_nan_values(df_train)\r\n prevailing_wage(df_train)\r\n case_submission_year_range(df_train)\r\n \r\n #Create new column OCCUPATION to broadly classify the occupations for H1B petition filers.\r\n #Also creating column UNIV_EMPLOYER for checking if the emplyer name is a university\r\n classify_employer(df_train)\r\n\r\n class_mapping = {'CERTIFIED': 0, 'DENIED': 1}\r\n df_train[\"CASE_STATUS\"] = df_train[\"CASE_STATUS\"].map(class_mapping)\r\n\r\n#Creating a copy of dataset for prediction\r\n df1_train_set = df_train[\r\n ['FULL_TIME_POSITION', 'PREVAILING_WAGE_RANGE', 'CASE_SUBMITTED_YEAR_RANGE', 'UNIV_EMPLOYER', 'OCCUPATION', 'WORKSITE_STATE',\r\n 'CASE_STATUS']].copy()\r\n\r\n df1_train_set[['FULL_TIME_POSITION', 'PREVAILING_WAGE_RANGE', 'CASE_SUBMITTED_YEAR_RANGE', 'UNIV_EMPLOYER', 'OCCUPATION', 'WORKSITE_STATE',\r\n 'CASE_STATUS']] = df1_train_set[\r\n ['FULL_TIME_POSITION', 'PREVAILING_WAGE_RANGE', 'CASE_SUBMITTED_YEAR_RANGE', 'UNIV_EMPLOYER', 'OCCUPATION', 'WORKSITE_STATE',\r\n 'CASE_STATUS']].apply(lambda x: x.astype('category'))\r\n\r\n #print(df1_train_set.head())\r\n\r\n X = df1_train_set.loc[:, 'FULL_TIME_POSITION':'WORKSITE_STATE']\r\n Y = df1_train_set.CASE_STATUS\r\n\r\n seed = 5\r\n X_train, X_validation, Y_train, Y_validation = train_test_split(X, Y, test_size=0.3, random_state=seed)\r\n #print(X_train.isnull().sum())\r\n\r\n X_train_encode = pd.get_dummies(X_train)\r\n X_val_encode = pd.get_dummies(X_validation)\r\n\r\n train_X = X_train_encode.values\r\n train_y = Y_train.values\r\n\r\n val_x = X_val_encode.values\r\n val_y = Y_validation.values\r\n print(\"Pre Processing is completed for training set\")\r\n return train_X,train_y,val_x,val_y\r\n\r\ndef preprocessingTestingdata(user_input):\r\n print(\"Pre Processing data for test set\")\r\n if(user_input=='1' or user_input=='3' or user_input=='2'):\r\n df_test = pd.read_csv('File 2 - H1B Dataset.csv',encoding=\"ISO-8859-1\")\r\n \r\n merge_labels(df_test)\r\n fill_nan_values(df_test)\r\n prevailing_wage(df_test)\r\n case_submission_year_range(df_test)\r\n classify_employer(df_test)\r\n df1_test_set = df_test[\r\n ['FULL_TIME_POSITION', 'PREVAILING_WAGE_RANGE', 'CASE_SUBMITTED_YEAR_RANGE', 'UNIV_EMPLOYER', 'OCCUPATION', 'WORKSITE_STATE',\r\n 'CASE_STATUS']].copy()\r\n\r\n df1_test_set[['FULL_TIME_POSITION', 'PREVAILING_WAGE_RANGE', 'CASE_SUBMITTED_YEAR_RANGE', 'UNIV_EMPLOYER', 'OCCUPATION', 'WORKSITE_STATE',\r\n 'CASE_STATUS']] = df1_test_set[\r\n ['FULL_TIME_POSITION', 'PREVAILING_WAGE_RANGE', 'CASE_SUBMITTED_YEAR_RANGE', 'UNIV_EMPLOYER', 'OCCUPATION', 'WORKSITE_STATE',\r\n 'CASE_STATUS']].apply(lambda x: x.astype('category'))\r\n class_mapping = {'CERTIFIED': 0, 'DENIED': 1}\r\n df1_test_set[\"CASE_STATUS\"] = df1_test_set[\"CASE_STATUS\"].map(class_mapping)\r\n\r\n X_test = df1_test_set.loc[:, 'FULL_TIME_POSITION':'WORKSITE_STATE']\r\n Y_test = df1_test_set.CASE_STATUS\r\n\r\n X_test_encode = pd.get_dummies(X_test)\r\n testX = X_test_encode.values\r\n\t\r\n testY = Y_test.values\r\n print(\"Pre Processing is completed for test set\")\r\n return testX, testY","sub_path":"DataPreProcessing.py","file_name":"DataPreProcessing.py","file_ext":"py","file_size_in_byte":8679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"102658962","text":"\"\"\"\n Use the same techniques such as (but not limited to):\n 1) Sockets\n 2) File I/O\n 3) raw_input()\n\n from the OSINT HW to complete this assignment. Good luck!\n\"\"\"\n\nimport socket\n\nhost = \"cornerstoneairlines.co\" # IP address here\nport = 45 # Port here\n\n# opens connection and issues command, expects command to start with \";\"\ndef execute_cmd(cmd):\n # Establish socket connection\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n data = s.recv(1024) # Receives 1024 bytes from IP/Port\n s.send(cmd + \"\\n\")\n data = s.recv(1024)\n return data\n\n# shell logic\ndef shell():\n path = \"/\"\n while(True):\n cmd = raw_input(path.strip() + \"> \").strip()\n if (cmd == \"exit\"):\n break\n elif (cmd.startswith(\"cd\")):\n pwd = execute_cmd(\";cd \" + path + \";\" + cmd + \";pwd\").strip()\n if (pwd != \"\"):\n path = pwd\n else:\n print(execute_cmd(\";cd \" + path + \";\" + cmd).strip())\n\n# reads in remote file using cat command and then writes it to the local path\ndef pull(remote, local):\n file = execute_cmd(\";cat \" + remote)\n open(local, \"w+\").write(file)\n\n# prints commands that are available\ndef help():\n print(\n \"\"\"1) shell: Drop into an interactive shell and allow users to gracefully `exit`\n 2) pull : Download files\n 3) help: Shows this help menu\n 4) quit: Quit the shell\"\"\")\n\nif __name__ == '__main__':\n while(True):\n cmd = raw_input(\"> \").strip()\n if (cmd == \"shell\"):\n shell()\n elif (cmd.startswith(\"pull\")):\n args = cmd.split()\n if (len(args) == 3):\n pull(args[1], args[2])\n else:\n print(\"Pull takes two arguments\")\n elif (cmd == \"quit\"):\n break\n else:\n help()","sub_path":"week/4/writeup/stub.py","file_name":"stub.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"349079128","text":"import requests\n\n\ndef main():\n\n with requests.Session() as s:\n\n r = s.post(\"http://127.0.0.1:5000/login\", data={\n \"username\": \"test\",\n \"password\": \"test\"\n }\n )\n\n print(r.text)\n\n for x in range(20, 100):\n\n r = s.post(\"http://127.0.0.1:5000/create_puzzle\", data={\n \"title\": \"The best puzzle \" + str(x),\n \"hint_1\": \"one\",\n \"answer_1\": \"two\",\n \"hint_2\": \"three\",\n \"answer_2\": \"four\",\n \"hint_3\": \"five\",\n \"answer_3\": \"six\",\n \"hint_4\": \"seven\",\n \"answer_4\": \"eight\",\n \"hint_5\": \"nine\",\n \"answer_5\": \"ten\",\n \"hint_6\": \"eleven\",\n \"answer_6\": \"twelve\",\n \"hint_7\": \"thirteen\",\n \"answer_7\": \"fourteen\",\n \"hint_8\": \"fifteen\",\n \"answer_8\": \"sixteen\",\n \"hint_9\": \"seventeen\",\n \"answer_9\": \"eighteen\",\n \"hint_10\": \"nineteen\",\n \"answer_10\": \"twenty\"\n })\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"util/fill_db.py","file_name":"fill_db.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"393498689","text":"import numpy as np \nnp.random.seed(1337)\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nimport matplotlib.pyplot as plt\n\n# create some data\nX = np.linspace(-1, 1, 200)\nnp.random.shuffle(X) # randomize the data\nY = 0.5 * X + 2 + np.random.normal(0, 0.05, (200, ))\n# plot data\n# plt.scatter(X, Y)\n# plt.show()\n\nX_train, Y_train = X[:160], Y[:160] # train 前 160 data points\nX_test, Y_test = X[160:], Y[160:] # test 后 40 data points\n\n#build model\nmodel= Sequential()\nmodel.add(Dense(output_dim=1,input_dim=1))\n#choose loss function\nmodel.compile(loss='mse',optimizer='sgd')\n\n#train\nprint(\"training__________________________________\")\nfor step in range(301):\n cost = model.train_on_batch(X_train,Y_train)\n if step %100==0:\n #maybe str()\n print('traincost',cost)\n\n\n#test\nprint(\"test__________________________________\")\ncost =model.evaluate(X_test,Y_test,batch_size=40)\nW,b = model.layers[0].get_weights()\nprint('Weights=', W, '\\nbiases=', b)\n\n# plotting the prediction\nY_pred = model.predict(X_test)\nplt.scatter(X_test, Y_test)\nplt.plot(X_test, Y_pred)\nplt.show()\n","sub_path":"HW2-LSTM/keras_practice/keras_regressor_practice.py","file_name":"keras_regressor_practice.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"612846896","text":"# Copyright (C) 2009, Hyves (Startphone Ltd.)\n#\n# This module is part of the Concurrence Framework and is released under\n# the New BSD License: http://www.opensource.org/licenses/bsd-license.php\n\n#TODO write timeout\n\nfrom __future__ import with_statement\n\nimport logging\nimport urlparse\nimport httplib\nimport traceback\nimport rfc822\n\nfrom concurrence import Tasklet, Message, Channel, TimeoutError, __version__\nfrom concurrence.io import Socket, Buffer, BufferedStream\nfrom concurrence.containers import ReorderQueue\nfrom concurrence.timer import Timeout\nfrom concurrence.http import HTTPError, HTTPParser\n\nSERVER_ID = \"Concurrence-Http/%s\" % __version__\n\nclass WSGIErrorStream(object):\n def write(self, s):\n logging.error(s)\n\n def writelines(self, s):\n assert False, 'TODO'\n\n def flush(self):\n assert False, 'TODO'\n\nclass WSGIRequest(object):\n log = logging.getLogger('WSGIRequest')\n\n def __init__(self, environ):\n self.environ = environ\n\n self.environ['SCRIPT_NAME'] = '' #TODO\n\n #add wsgi stuff\n self.environ['wsgi.url_scheme'] = 'http'\n self.environ['wsgi.multiprocess'] = False\n self.environ['wsgi.multithread'] = True\n self.environ['wsgi.run_once'] = False\n self.environ['wsgi.version'] = (1, 0)\n\n #wsgi complience\n if 'HTTP_CONTENT_LENGTH' in self.environ:\n self.environ['CONTENT_LENGTH'] = self.environ['HTTP_CONTENT_LENGTH']\n\n if 'HTTP_CONTENT_TYPE' in self.environ:\n self.environ['CONTENT_TYPE'] = self.environ['HTTP_CONTENT_TYPE']\n\n #setup required wsgi streams\n #self.environ['wsgi.input'] = WSGIInputStream(self, reader)\n self.environ['wsgi.errors'] = WSGIErrorStream()\n\n if not 'HTTP_HOST' in self.environ:\n if self.environ['HTTP_VERSION'] == 'HTTP/1.0':\n #ok in version 1.0, TODO what should host in wsgi environ be?\n host = 'localhost'\n else:\n raise HTTPError('Host header field is required in HTTP version > 1.0')\n else:\n host = self.environ['HTTP_HOST']\n\n if ':' in host:\n host, port = host.split(':')\n else:\n host, port = host, 80\n\n self.environ['SERVER_NAME'] = host\n self.environ['SERVER_PORT'] = port\n self.environ['SERVER_PROTOCOL'] = self.environ['HTTP_VERSION']\n\n self.response_headers = []\n self.response_status = httplib.OK\n self.response_exc_info = None\n\n #print self.environ\n\n def start_response(self, status, response_headers, exc_info = None):\n self.response_status = status\n self.response_headers = response_headers\n self.response_exc_info = exc_info\n\n @property\n def uri(self):\n return self.environ['REQUEST_URI']\n\n @property\n def version(self):\n return self.environ['HTTP_VERSION']\n\nclass HTTPConnection(object):\n\n def __init__(self, server, client_socket):\n self._server = server\n self._stream = BufferedStream(client_socket)\n #print 'new con'\n\n def _write_response(self, version, status, headers, response):\n\n if version == 'HTTP/1.0':\n chunked = False\n else:\n chunked = True\n\n headers.append(('Date', rfc822.formatdate()))\n headers.append(('Server', SERVER_ID))\n\n if chunked:\n headers.append(('Transfer-Encoding', 'chunked'))\n else:\n headers.append(('Content-length', str(len(response))))\n response = ''.join(response)\n\n with self._stream.get_writer() as writer:\n writer.clear()\n writer.write_bytes(\"%s %s\\r\\n\" % (version, status))\n writer.write_bytes('\\r\\n'.join([\"%s: %s\" % (k, v) for k, v in headers]))\n writer.write_bytes(\"\\r\\n\\r\\n\")\n\n if chunked:\n for chunk in response:\n writer.write_bytes(\"%x;\\r\\n\" % len(chunk))\n writer.write_bytes(chunk)\n writer.write_bytes(\"\\r\\n\")\n writer.write_bytes(\"0\\r\\n\\r\\n\")\n else:\n writer.write_bytes(response)\n\n writer.flush()\n\n def _read_request(self):\n\n with self._stream.get_reader() as reader:\n reader.fill() #initial fill\n parser = HTTPParser(reader.buffer)\n while True:\n #parse the buffer\n if parser.parse():\n break #ok\n else:\n #need more data from socket, could not parse request with data currently in buffer\n reader.append() \n return WSGIRequest(parser.environ)\n\n def _handle_request(self):\n request = self._read_request()\n response = self._server.handle_request(request)\n self._write_response(request.version, request.response_status, request.response_headers, response) \n if request.version == 'HTTP/1.0':\n self._close()\n else:\n self._stream._stream.readable.notify(self.handle, 10)\n\n def _close(self):\n self._stream.close()\n\n def handle(self, has_timedout = False):\n if has_timedout:\n self._stream.close()\n else:\n Tasklet.defer(self._handle_request)\n\nclass WSGIServer(object):\n \"\"\"A HTTP/1.1 Web server with WSGI application interface.\n\n Usage::\n\n def hello_world(environ, start_response):\n start_response(\"200 OK\", [])\n return [\"Hello, world!\"]\n\n server = WSGIServer(hello_world)\n server.serve(('localhost', 8080))\n \"\"\"\n log = logging.getLogger('WSGIServer')\n\n def __init__(self, application, request_log_level = logging.DEBUG):\n \"\"\"Create a new WSGIServer serving the given *application*. Optionally\n the *request_log_level* can be given. This loglevel is used for logging the requests.\"\"\"\n self._application = application\n self._request_log_level = request_log_level\n\n def internal_server_error(self, environ, start_response):\n \"\"\"Default WSGI application for creating a default `500 Internal Server Error` response on any\n unhandled exception.\n The default response will render a traceback with a text/plain content-type.\n Can be overridden to provide a custom response.\"\"\"\n start_response('500 Internal Server Error', [('Content-type', 'text/plain')])\n return [traceback.format_exc(20)]\n\n def handle_request(self, request):\n \"\"\"All HTTP requests pass trough this method.\n This method provides a hook for logging, statistics and or further processing w.r.t. the *request*.\"\"\"\n try:\n response = self._application(request.environ, request.start_response)\n self.log.log(self._request_log_level, \"%s %s\", request.response_status, request.uri) \n except TaskletExit:\n raise\n except:\n self.log.exception(\"unhandled exception while handling request\")\n response = self.internal_server_error(request.environ, request.start_response)\n return response\n\n def handle_connection(self, client_socket):\n \"\"\"All HTTP connections pass trough this method.\n This method provides a hook for logging, statistics and or further processing w.r.t. the connection.\"\"\"\n HTTPConnection(self, client_socket).handle()\n\n def serve(self, endpoint):\n \"\"\"Serves the application at the given *endpoint*. The *endpoint* must be a tuple (, ).\"\"\"\n server_socket = Socket.server(endpoint)\n for client_socket in server_socket.accept_iter():\n self.handle_connection(client_socket)\n\n\n","sub_path":"lib/concurrence/http/server2.py","file_name":"server2.py","file_ext":"py","file_size_in_byte":7710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"502396290","text":"# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom pathlib import PurePath\nfrom textwrap import dedent\nfrom typing import List, Optional\n\nfrom pants.backend.python.lint.pylint.rules import PylintFieldSet, PylintFieldSets\nfrom pants.backend.python.lint.pylint.rules import rules as pylint_rules\nfrom pants.backend.python.target_types import PythonInterpreterCompatibility, PythonLibrary\nfrom pants.backend.python.targets.python_library import PythonLibrary as PythonLibraryV1\nfrom pants.base.specs import FilesystemLiteralSpec, OriginSpec, SingleAddress\nfrom pants.build_graph.build_file_aliases import BuildFileAliases\nfrom pants.core.goals.lint import LintResult\nfrom pants.engine.addresses import Address\nfrom pants.engine.fs import FileContent\nfrom pants.engine.legacy.graph import HydratedTargets\nfrom pants.engine.rules import RootRule\nfrom pants.engine.selectors import Params\nfrom pants.engine.target import Dependencies, Sources, TargetWithOrigin\nfrom pants.testutil.external_tool_test_base import ExternalToolTestBase\nfrom pants.testutil.option.util import create_options_bootstrapper\n\n\nclass PylintIntegrationTest(ExternalToolTestBase):\n # See http://pylint.pycqa.org/en/latest/user_guide/run.html#exit-codes for exit codes.\n source_root = \"src/python\"\n good_source = FileContent(\n path=f\"{source_root}/good.py\", content=b\"'''docstring'''\\nUPPERCASE_CONSTANT = ''\\n\",\n )\n bad_source = FileContent(\n path=f\"{source_root}/bad.py\", content=b\"'''docstring'''\\nlowercase_constant = ''\\n\",\n )\n\n @classmethod\n def alias_groups(cls) -> BuildFileAliases:\n return BuildFileAliases(targets={\"python_library\": PythonLibraryV1})\n\n @classmethod\n def target_types(cls):\n return [PythonLibrary]\n\n @classmethod\n def rules(cls):\n return (\n *super().rules(),\n *pylint_rules(),\n RootRule(PylintFieldSets),\n RootRule(HydratedTargets),\n )\n\n def make_target_with_origin(\n self,\n source_files: List[FileContent],\n *,\n name: str = \"target\",\n interpreter_constraints: Optional[str] = None,\n origin: Optional[OriginSpec] = None,\n dependencies: Optional[List[Address]] = None,\n ) -> TargetWithOrigin:\n for source_file in source_files:\n self.create_file(source_file.path, source_file.content.decode())\n source_globs = [PurePath(source_file.path).name for source_file in source_files]\n self.create_library(\n path=self.source_root, target_type=PythonLibrary.alias, name=name, sources=source_globs\n )\n # We must re-write the files because `create_library` will have over-written the content.\n for source_file in source_files:\n self.create_file(source_file.path, source_file.content.decode())\n target = PythonLibrary(\n {\n Sources.alias: source_globs,\n Dependencies.alias: dependencies,\n PythonInterpreterCompatibility.alias: interpreter_constraints,\n },\n address=Address(self.source_root, name),\n )\n if origin is None:\n origin = SingleAddress(directory=self.source_root, name=name)\n return TargetWithOrigin(target, origin)\n\n def run_pylint(\n self,\n targets: List[TargetWithOrigin],\n *,\n config: Optional[str] = None,\n passthrough_args: Optional[str] = None,\n skip: bool = False,\n ) -> LintResult:\n args = [\"--backend-packages2=pants.backend.python.lint.pylint\"]\n if config:\n self.create_file(relpath=\"pylintrc\", contents=config)\n args.append(\"--pylint-config=pylintrc\")\n if passthrough_args:\n args.append(f\"--pylint-args='{passthrough_args}'\")\n if skip:\n args.append(f\"--pylint-skip\")\n return self.request_single_product(\n LintResult,\n Params(\n PylintFieldSets(PylintFieldSet.create(tgt) for tgt in targets),\n create_options_bootstrapper(args=args),\n ),\n )\n\n def test_passing_source(self) -> None:\n target = self.make_target_with_origin([self.good_source])\n result = self.run_pylint([target])\n assert result.exit_code == 0\n assert \"Your code has been rated at 10.00/10\" in result.stdout.strip()\n\n def test_failing_source(self) -> None:\n target = self.make_target_with_origin([self.bad_source])\n result = self.run_pylint([target])\n assert result.exit_code == 16 # convention message issued\n assert \"bad.py:2:0: C0103\" in result.stdout\n\n def test_mixed_sources(self) -> None:\n target = self.make_target_with_origin([self.good_source, self.bad_source])\n result = self.run_pylint([target])\n assert result.exit_code == 16 # convention message issued\n assert \"good.py\" not in result.stdout\n assert \"bad.py:2:0: C0103\" in result.stdout\n\n def test_multiple_targets(self) -> None:\n targets = [\n self.make_target_with_origin([self.good_source], name=\"t1\"),\n self.make_target_with_origin([self.bad_source], name=\"t2\"),\n ]\n result = self.run_pylint(targets)\n assert result.exit_code == 16 # convention message issued\n assert \"good.py\" not in result.stdout\n assert \"bad.py:2:0: C0103\" in result.stdout\n\n def test_precise_file_args(self) -> None:\n target = self.make_target_with_origin(\n [self.good_source, self.bad_source], origin=FilesystemLiteralSpec(self.good_source.path)\n )\n result = self.run_pylint([target])\n assert result.exit_code == 0\n assert \"Your code has been rated at 10.00/10\" in result.stdout.strip()\n\n def test_respects_config_file(self) -> None:\n target = self.make_target_with_origin([self.bad_source])\n result = self.run_pylint([target], config=\"[pylint]\\ndisable = C0103\\n\")\n assert result.exit_code == 0\n assert \"Your code has been rated at 10.00/10\" in result.stdout.strip()\n\n def test_respects_passthrough_args(self) -> None:\n target = self.make_target_with_origin([self.bad_source])\n result = self.run_pylint([target], passthrough_args=\"--disable=C0103\")\n assert result.exit_code == 0\n assert \"Your code has been rated at 10.00/10\" in result.stdout.strip()\n\n def test_includes_direct_dependencies(self) -> None:\n self.make_target_with_origin(source_files=[], name=\"transitive_dependency\")\n\n direct_dependency_content = dedent(\n \"\"\"\\\n # No docstring because Pylint doesn't lint dependencies\n\n from transitive_dep import doesnt_matter_if_variable_exists\n\n THIS_VARIABLE_EXISTS = ''\n \"\"\"\n )\n self.make_target_with_origin(\n source_files=[\n FileContent(\n f\"{self.source_root}/direct_dependency.py\", direct_dependency_content.encode()\n )\n ],\n name=\"direct_dependency\",\n dependencies=[Address(self.source_root, \"transitive_dependency\")],\n )\n\n source_content = dedent(\n \"\"\"\\\n '''Code is not executed, but Pylint will check that variables exist and are used'''\n from direct_dependency import THIS_VARIABLE_EXISTS\n\n print(THIS_VARIABLE_EXISTS)\n \"\"\"\n )\n target = self.make_target_with_origin(\n source_files=[FileContent(f\"{self.source_root}/target.py\", source_content.encode())],\n dependencies=[Address(self.source_root, \"direct_dependency\")],\n )\n\n result = self.run_pylint([target])\n assert result.exit_code == 0\n assert \"Your code has been rated at 10.00/10\" in result.stdout.strip()\n\n def test_skip(self) -> None:\n target = self.make_target_with_origin([self.bad_source])\n result = self.run_pylint([target], skip=True)\n assert result == LintResult.noop()\n","sub_path":"src/python/pants/backend/python/lint/pylint/rules_integration_test.py","file_name":"rules_integration_test.py","file_ext":"py","file_size_in_byte":8105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"394175329","text":"#encoding: utf-8\nfrom OpenOrange import *\nfrom Routine import Routine\nfrom PurchaseOrder import PurchaseOrder\nfrom PurchaseOrder import PurchaseOrderItemRow\nfrom Item import Item\nfrom Stock import Stock\n\n\"\"\"\n from StockDepo import StockDepo\n query = Query()\n query.sql = \"SELECT si.{MinQty}, si.{SupUnit},si.{NormalQty},s.{Qty},s.{ArtCode},i.{MinLevel} \"\n query.sql += \"FROM [SupplierItem] si \"\n query.sql += \"INNER JOIN [Stock] s ON si.{ArtCode} = s.{ArtCode} \"\n query.sql += \"INNER JOIN [Item] i ON i.{Code} = si.{ArtCode} \"\n query.sql += \"WHERE?AND (s.{Qty} < i.{MinLevel} OR s.{Qty} IS NULL) \" # Null in case item was never used yet\n query.sql += \"WHERE?AND si.{SupCode} = s|%s| \" % self.SupCode\n query.sql += \"GROUP BY s.{ArtCode} \"\n if query.open():\n for rec in query:\n stockqty = 0\n if rec.Qty: stockqty = rec.Qty\n # Depends on the stock policy \n if (rec.MinLevel):\n orderqty = rec.MinLevel - stockqty\n elif rec.NormalQty: \n orderqty = rec.NormalQty\n else:\n orderqty = 0\n poir = PurchaseOrderItemRow()\n poir.ArtCode = rec.ArtCode\n poir.Qty = orderqty\n poir.pasteArtCode(self)\n poir.pasteQty(self)\n poir.sumUp(self)\n self.Items.append(poir)\n self.sumUp()\n\"\"\"\n\nclass GenPurchaseOrderRoutine(Routine):\n def getSupplierItems(self, supcode):\n res = {}\n query = Query()\n query.sql = \"SELECT [SupplierItem].{SupItemCode}, [SupplierItem].{Discount1}, [SupplierItem].{Discount2},[SupplierItem].{Discount3},[SupplierItem].{Discount4},[SupplierItem].{Discount5},[SupplierItem].{ArtCode}, [Item].{MinLevel}, [Item].{MaxLevel}, [SupplierItem].{MinQty} FROM [SupplierItem]\"\n query.sql += \" INNER JOIN [Item] ON [Item].{Code} = [SupplierItem].{ArtCode}\"\n query.sql += \" WHERE?AND [SupplierItem].{SupCode} = s|%s| \" % supcode\n query.sql += \" WHERE?AND [SupplierItem].{Default} = i|1| \"\n query.sql += \" WHERE?AND ([Item].{Closed} = i|0| OR [Item].{Closed} IS NULL) \"\n if query.open():\n for rec in query:\n res[rec.ArtCode] = {\"MaxLevel\": rec.MaxLevel, \"MinLevel\": rec.MinLevel, \"MinQty\": rec.MinQty, \"MinQty\": rec.MinQty, \"DeliveryPendingQty\": Item.getDeliveryPendingQty(rec.ArtCode), \"PurchaseOrderPendingQty\": Item.getPurchaseOrderPendingQty(rec.ArtCode), \"Stock\": Stock.getQty(rec.ArtCode), \"Discount1\": rec.Discount1, \"Discount1\": rec.Discount1, \"Discount2\": rec.Discount2, \"Discount3\": rec.Discount3, \"Discount4\": rec.Discount4, \"Discount5\": rec.Discount5, \"SupArtCode\": rec.SupItemCode}\n else:\n message(\"No hay Articulos-Proveedor para generar\")\n return res\n\n def genPurchaseOrder(self, supcode):\n po = PurchaseOrder()\n items = self.getSupplierItems(supcode)\n #alert(items['3'])\n if len(items):\n po.defaults()\n po.SupCode = supcode\n po.pasteSupCode()\n artcodes = items.keys()\n artcodes.sort()\n for artcode in artcodes:\n item = items[artcode]\n #if artcode == \"180-301\": alert(item)\n if item[\"Stock\"] - item[\"DeliveryPendingQty\"] + item[\"PurchaseOrderPendingQty\"] < item[\"MinLevel\"]:\n porow = PurchaseOrderItemRow()\n #if artcode == \"180-301\": alert(\"pegado\")\n porow.ArtCode = artcode\n porow.pasteArtCode(po)\n porow.Qty = item[\"MaxLevel\"] - (item[\"Stock\"] - item[\"DeliveryPendingQty\"] + item[\"PurchaseOrderPendingQty\"])\n if porow.Qty < item[\"MinQty\"]:\n porow.Qty = item[\"MinQty\"]\n porow.pasteQty(po)\n for d in (\"Discount1\",\"Discount2\",\"Discount3\",\"Discount4\",\"Discount5\"):\n porow.fields(d).setValue(item[d])\n porow.pasteAccDiscounts(po)\n porow.SupArtCode = item[\"SupArtCode\"]\n porow.sumUp(po)\n po.Items.append(porow)\n po.sumUp()\n return po\n return None\n \n def run(self):\n spec = self.getRecord()\n if spec.SupCode:\n po = self.genPurchaseOrder(spec.SupCode)\n if po:\n if po.Items.count():\n from PurchaseOrderWindow import PurchaseOrderWindow\n w = PurchaseOrderWindow()\n w.setRecord(po)\n w.open()\n else:\n message(\"No hay faltantes para este proveedor\")\n else:\n query = Query()\n query.sql += \"SELECT {Code} FROM [Supplier]\"\n if query.open():\n for rec in query:\n po = self.genPurchaseOrder(rec.Code)\n if po and po.Items.count():\n if not po.save():\n message(tr(\"Routine terminated without saving results\"))\n rollback()\n return\n \n","sub_path":"standard/routines/GenPurchaseOrderRoutine.py","file_name":"GenPurchaseOrderRoutine.py","file_ext":"py","file_size_in_byte":5309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"159227607","text":"import numpy as np\nimport scipy as np\nfrom operator import le\nimport pso\nimport pso_adv\n\nclass PSO_ANN(object):\n\n def __init__(self, hidden_layers = None, lmbd = 1.0,\n p_size = 20, w = 0.8, c1 = 2, c2 = 2, maxiter = 2000, vmax = 1):\n\n if hidden_layers is None:\n self.hidden_layers = [100]\n else:\n self.hidden_layers = hidden_layers\n\n self.L = 2 + len(self.hidden_layers)\n self.A = [None] * self.L\n self.theta = [None] * (self.L - 1)\n self.delta = [None] * self.L\n self.grad = [None] * self.L\n self.lmbd = lmbd\n\n self.p_size = p_size\n self.w = w\n self.c1 = c1\n self.c2 = c2\n self.maxiter = maxiter\n self.vmax = vmax\n\n @staticmethod\n def _sigma(x):\n return 1.0 / (1 + np.exp(-x))\n\n def _f(self, Theta):\n self.A[0] = self.X\n\n theta_idx = 0\n\n for i in range(self.L - 1):\n theta_shape = (self.layers[i] + 1, self.layers[i + 1])\n theta_len = theta_shape[0] * theta_shape[1]\n self.theta[i] = (Theta[theta_idx : theta_idx + theta_len].\n reshape(theta_shape))\n theta_idx += theta_len\n\n z = np.hstack((\n np.ones((self.A[i].shape[0], 1)),\n self.A[i])).dot(self.theta[i])\n self.A[i + 1] = self._sigma(z)\n\n mJ = (- (self.target * np.log(self.A[-1]) +\n (1 - self.target) * np.log(1 - self.A[-1])).sum()\n + self.lmbd * 0.5 * (Theta ** 2).sum())\n #Theta**2?\n\n# self.delta[-1] = self.A[-1] - self.target\n# for i in range(self.L - 2, 0, -1):\n# self.delta[i] = (self.delta[i + 1].dot(self.theta[i].T[:, 1:]) *\n# self.A[i] * (1 - self.A[i]))\n\n# for i in range(self.L - 1):\n# self.grad[i] = np.vstack((\n# np.ones((1, self.A[i].shape[0])),\n# self.A[i].T)).dot(self.delta[i + 1])\n\n# Grad = np.concatenate(map(lambda x: x.flatten(), self.grad[:-1]))\n# Grad += self.lmbd * Theta\n\n return mJ\n\n def init_theta(self):\n init_thetas = [None] * (self.L - 1)\n for i in range(self.L - 1):\n epsilon = np.sqrt(6.0 / (self.layers[i] + self.layers[i + 1]))\n init_thetas[i] = np.random.mtrand.rand(self.layers[i] + 1,\n self.layers[i + 1]) * 2.0 * epsilon - epsilon\n return np.concatenate(map(lambda x: x.flatten(), init_thetas)) \n\n def fit(self, X, y): \n self.X = X \n target_labels = sorted(list(set(y)))\n #cut the redundant\n labels_count = len(target_labels)\n self.labels_map = dict(zip(target_labels, range(labels_count)))\n self.labels_index_map = dict(zip(range(labels_count), target_labels))\n\n self.target = np.zeros((X.shape[0], labels_count))\n for i, label in enumerate(y):\n self.target[i, self.labels_map[label]] = 1\n\n self.layers = [X.shape[1]]\n self.layers.extend(self.hidden_layers)\n self.layers.append(labels_count)\n \n# self.result = optimize.minimize(self._f, x0 = init_theta,\n# method = self.optimization_method, jac = True,\n# options = self.method_specific_options)\n self.result = pso_adv.PSO(self._f, le, self.init_theta, \n self.p_size, lambda x: all(x<3) and all(x>-3), self.vmax,\n self.w, self.c1 , self.c2, self.maxiter,\n nor_perceptron = 10, nor_r = 0.2).get_ans()\n\n self.optimized_theta = []\n optimized_theta = self.result[0]\n theta_idx = 0\n for i in range(self.L - 1):\n theta_shape = (self.layers[i] + 1, self.layers[i + 1])\n theta_len = theta_shape[0] * theta_shape[1]\n self.optimized_theta.append(\n optimized_theta[theta_idx : theta_idx + theta_len]\n .reshape(theta_shape))\n theta_idx += theta_len\n\n def predict(self, X):\n labels_idx = self.predict_proba(X).argmax(axis = 1)\n return map(lambda x: self.labels_index_map[x], labels_idx)\n\n def predict_proba(self, X):\n self.A[0] = X\n m = X.shape[0]\n\n for i in range(self.L - 1):\n _X = np.hstack((np.ones((m, 1)), self.A[i]))\n self.A[i + 1] = self._sigma(_X.dot(self.optimized_theta[i]))\n\n return self.A[-1]\n\n","sub_path":"ANN_model/pso_ann.py","file_name":"pso_ann.py","file_ext":"py","file_size_in_byte":4440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"616144551","text":"# Adventure 8: banana.py\n#\n# From the book: \"Adventures in Minecraft\", 2nd Edition\n# written by David Whale and Martin O'Hanlon, Wiley, 2017\n# http://eu.wiley.com/WileyCDA/WileyTitle/productCd-1119439582.html\n#\n# This program senses when you touch a banana connected to your micro:bit\n\nimport microbit\nimport time\n\nBANANA = microbit.Image(\"00090:00090:00990:09900:99000\")\n\nwhile True:\n time.sleep(0.25)\n if microbit.pin0.is_touched():\n microbit.display.show(BANANA)\n else:\n microbit.display.show('?')\n\n# END\n\n","sub_path":"Adventure8/banana.py","file_name":"banana.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"146266018","text":"from . import actionTable\nimport depot\nfrom . import experiment\nimport gui\nimport util\n\nimport decimal\nimport math\nimport wx\n\n## Provided so the UI knows what to call this experiment.\nEXPERIMENT_NAME = 'RotatorSweep'\n\n\n## This class handles classic Z-stack experiments.\nclass RotatorSweepExperiment(experiment.Experiment):\n def __init__(self, polarizerHandler=None, settlingTime=0.1,\n startV=0.0, maxV=10., vSteps=100, *args, **kwargs):\n experiment.Experiment.__init__(self, *args, **kwargs)\n self.polarizerHandler = polarizerHandler\n self.settlingTime = settlingTime\n # Look up the rotator analogue line handler.\n self.lineHandler = polarizerHandler.getLineHandler()\n self.vRange = (startV, maxV, vSteps)\n vDelta = float(maxV - startV) / vSteps\n # Add voltage parameters to the metadata.\n self.metadata = 'Rotator start and delta: [%f, %f]' % (startV, vDelta)\n\n\n ## Create the ActionTable needed to run the experiment.\n def generateActions(self):\n table = actionTable.ActionTable()\n curTime = 0\n vStart, vLessThan, vSteps = self.vRange\n dv = float(vLessThan - vStart) / float(vSteps)\n dt = decimal.Decimal(self.settlingTime)\n\n for step in range(vSteps):\n # Move to next polarization rotator voltage.\n vTarget = vStart + step * dv\n table.addAction(curTime, self.lineHandler, vTarget)\n curTime += dt\n # Image the sample.\n for cameras, lightTimePairs in self.exposureSettings:\n curTime = self.expose(curTime, cameras, lightTimePairs, table)\n # Advance the time very slightly so that all exposures\n # are strictly ordered.\n curTime += decimal.Decimal('.001')\n # Hold the rotator angle constant during the exposure.\n table.addAction(curTime, self.lineHandler, vTarget)\n # Advance time slightly so all actions are sorted (e.g. we\n # don't try to change angle and phase in the same timestep).\n curTime += dt\n\n return table\n\n\n## A consistent name to use to refer to the class itself.\nEXPERIMENT_CLASS = RotatorSweepExperiment\n\n\n## Generate the UI for special parameters used by this experiment.\nclass ExperimentUI(wx.Panel):\n def __init__(self, parent, configKey):\n wx.Panel.__init__(self, parent = parent)\n self.configKey = configKey\n sizer = wx.GridSizer(2, 4, 1)\n ## Maps strings to TextCtrls describing how to configure\n # response curve experiments.\n self.settings = self.loadSettings()\n self.settlingTimeControl = gui.guiUtils.addLabeledInput(\n self, sizer, label='settling time',\n defaultValue=self.settings['settlingTime'],)\n sizer.Add(self.settlingTimeControl)\n self.vStepsControl = gui.guiUtils.addLabeledInput(\n self, sizer, label='V steps',\n defaultValue=self.settings['vSteps'],)\n sizer.Add(self.vStepsControl)\n self.startVControl = gui.guiUtils.addLabeledInput(\n self, sizer, label='V start',\n defaultValue=self.settings['startV'],)\n sizer.Add(self.startVControl)\n self.maxVControl = gui.guiUtils.addLabeledInput(\n self, sizer, label='V max',\n defaultValue=self.settings['maxV'],)\n sizer.Add(self.maxVControl)\n self.SetSizerAndFit(sizer)\n\n\n ## Given a parameters dict (parameter name to value) to hand to the\n # experiment instance, augment them with our special parameters.\n def augmentParams(self, params):\n self.saveSettings()\n params['settlingTime'] = gui.guiUtils.tryParseNum(self.settlingTimeControl, float)\n params['startV'] = gui.guiUtils.tryParseNum(self.startVControl, float)\n params['maxV'] = gui.guiUtils.tryParseNum(self.maxVControl, float)\n params['vSteps'] = gui.guiUtils.tryParseNum(self.vStepsControl)\n params['polarizerHandler'] = depot.getHandlerWithName('SI polarizer')\n return params\n\n\n ## Load the saved experiment settings, if any.\n def loadSettings(self):\n return util.userConfig.getValue(\n self.configKey + 'RotatorSweepExperimentSettings',\n default = {\n 'settlingTime': '0.1',\n 'startV' : '0.0',\n 'maxV': '10.0',\n 'vSteps': '100',\n }\n )\n\n\n ## Generate a dict of our settings.\n def getSettingsDict(self):\n return {\n 'settlingTime': self.settlingTimeControl.GetValue(),\n 'startV': self.startVControl.GetValue(),\n 'maxV': self.maxVControl.GetValue(),\n 'vSteps': self.vStepsControl.GetValue(),}\n\n\n ## Save the current experiment settings to config.\n def saveSettings(self, settings = None):\n if settings is None:\n settings = self.getSettingsDict()\n util.userConfig.setValue(\n self.configKey + 'RotatorSweepExperimentSettings',\n settings)\n","sub_path":"experiment/rotatorSweep.py","file_name":"rotatorSweep.py","file_ext":"py","file_size_in_byte":5339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"129948540","text":"# ccle_pre_mut.py\r\n# Prepare easy to read mutation file: ccle_pat2mut.txt, ccle_setmut.txt\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n# process data part.\r\nidx2mut = {}\r\nf = open('rawdata/CCLE_MUT.csv','r')\r\nfor line in f:\r\n line = line.strip().split(',')\r\n idx2mut = {idx:mut[1:-1].lower() for idx,mut in enumerate(line[1:])}\r\n break\r\n\r\nf1 = open('inputdata/ccle_pat2mut.txt','w')\r\nfor line in f:\r\n line = line.strip().split(',')\r\n can = line[0][1:-1].lower()\r\n Idx = np.where(np.asarray(line[1:],dtype=int)==1)[0]\r\n Mut = [idx2mut[idx] for idx in Idx]\r\n # note: 106 cell lines do not have any mutation information: \r\n # ignore them.\r\n if len(Mut) == 0: continue\r\n print >> f1, can+'\\t'+'\\t'.join(Mut)\r\nf1.close()\r\nf.close()\r\n# stat: 937 pat\r\n\r\n\r\nsetmut = set()\r\nnummut = []\r\nf = open('inputdata/ccle_pat2mut.txt','r')\r\nfor line in f:\r\n line = line.strip().split('\\t')\r\n setmut = setmut | set(line[1:])\r\n nummut.append(len(line[1:]))\r\nsetmut = sorted(list(setmut))\r\nprint >> open('inputdata/ccle_setmut.txt','w'), '\\n'.join(setmut)\r\n# stat: 1639 mut\r\n\r\nplt.plot(nummut,'k.')\r\n# stat: mean=68.3, media=49\r\n\r\n#EOF.","sub_path":"charge/ccle/ccle_pre_mut.py","file_name":"ccle_pre_mut.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"251667361","text":"# https://youtu.be/6Ilb270zYpU\n# https://repl.it/student/assignments/499618/model_solution?fromSubmissionId=1891345\n\n\"\"\"Define a class with the `class` keyword\"\"\"\nclass Node:\n \"\"\"The `__init__` method on a class in Python is analogous\n with JavaScript's `constructor` method; it specifies how a\n class should be initialized give some parameters. You'll\n also notice the `self` keyword, which is passed in to\n every class method as the first argument. It's very much\n analogous to JavaScript's `this` keyword.\"\"\"\n def __init__(self, data=None, next_node=None):\n self.data = data\n self.next_node = next_node\n\n \"\"\"Returns the data stored at the current node\"\"\"\n def get_data(self):\n return self.data\n\n \"\"\"Returns the next node this node points to\"\"\"\n def get_next(self):\n return self.next_node\n\n \"\"\"Sets this node's `next_node` pointer\"\"\"\n def set_next(self, new_next):\n self.next_node = new_next\n\n\n\"\"\"Now that we've defined our `Node`, we can define our Linked List\nclass, which will utilize our `Node` class\"\"\"\nclass LinkedList:\n def __init__(self, head=None):\n self.head = head\n\n \"\"\"Wraps the input item in a Node and adds it as the\n current node's next node\"\"\"\n def insert(self, item):\n new_node = Node(item)\n new_node.set_next(self.head)\n self.head = new_node\n\n \"\"\"Returns the number of nodes in the linked list\"\"\"\n def size(self):\n current = self.head\n count = 0\n while current:\n count += 1\n current = current.get_next()\n return count\n\n \"\"\"Returns the target item if it is in the linked list,\n and None otherwise\"\"\"\n def search(self, target):\n current = self.head\n found = False\n while current and found is False:\n if current.get_data() == target:\n found = True\n else:\n current = current.get_next()\n return current\n\n \"\"\"Deletes the target item from the linked list if it is\n in the list. Raises a ValueError exception otherwise if\n the target item is not in the list\"\"\"\n def delete(self, target):\n current = self.head\n previous = None\n found = False\n while current and found is False:\n if current.get_data() == target:\n found = True\n else:\n previous = current\n current = current.get_next()\n if current is None:\n raise ValueError('Data not in list')\n if previous is None:\n self.head = current.get_next()\n else:\n previous.set_next(current.get_next())\n \n","sub_path":"cc73linkedList/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"112419111","text":"import pygame\n\nWIDTH = 1000\nHEIGHT = int(WIDTH * 2 / 3)\n\ndef render(surface, n: dict):\n for key, value in n.items():\n surface.blit(value, key)\n\n return surface\n\n\nclass BackgroundChanger:\n\n def __init__(self):\n self.background_id = 0\n\n def background(self, id:int = None):\n if id != self.background_id:\n self.background_id = id\n background = pygame.image.load(f\"./assets/backgrounds/background_{self.background_id}.jpg\")\n background = pygame.transform.scale(background, (WIDTH, HEIGHT))\n return background\n","sub_path":"include/renderer.py","file_name":"renderer.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"68464267","text":"import asyncio\nimport uuid \nfrom zipfile import ZipFile\nfrom os import listdir, path\nfrom json import loads, dumps\nfrom datetime import datetime\n\nfrom ..db import conn\nfrom ..servernet import get_server_file\n\nasync def store_stats_in_database():\n\tcursor = conn.cursor()\n\tprint(\"Preparing to update player stats...\")\n\tbackups_folder = get_server_file()\n\n\tall_zips = [f for f in listdir(backups_folder) if f.endswith('.zip')]\n\tall_zips = sorted(all_zips)\n\t\n\t# Store the final data to be stored in DB\n\tdata_by_date: dict = {}\n\tfor fname in all_zips:\n\t\t# Grab fnames that have actual backup data\n\t\tprint(f\"Processing {fname} for stats\")\n\t\ttry:\n\t\t\t# The date to associate this particular data point with\n\t\t\t# The file is of the format Backup--world--DATE\n\t\t\t# So just ignore irrelevant strings including zip extension\n\t\t\traw_date = fname[:-4]\n\t\t\tparsed_date = datetime.strptime(raw_date, \"%Y-%m-%d-%H-%M-%S\")\n\n\t\t\twith ZipFile(path.join(backups_folder, fname), 'r') as zf:\n\t\t\t\tuser_datas = []\n\t\t\t\t# Could potentially break if some other stats folder comes into play.\n\t\t\t\tstats_fnames = [f for f in zf.namelist() if f.startswith(f\"world{path.sep}stats{path.sep}\") and f.endswith(\".json\")]\n\n\t\t\t\tfor stat_fname in stats_fnames:\n\t\t\t\t\t# Get the UUID of the user by parsing file name\n\t\t\t\t\t# Separate by folder delimitter and then remove json extension\n\t\t\t\t\tuniq_id = stat_fname.split(path.sep)[-1][:-5]\n\n\t\t\t\t\tf = zf.read(stat_fname)\n\t\t\t\t\t\n\t\t\t\t\t# Process the loaded data\n\t\t\t\t\tparsed_data = (str(uuid.uuid4()), parsed_date.strftime('%Y-%m-%dT%H:%M:%S.000Z'), uniq_id, f)\n\t\t\t\t\tuser_datas.append(parsed_data)\n\n\t\t\t\tcursor.executemany(\"INSERT OR IGNORE INTO PlayerStats (id, date, userId, stats) VALUES (?, ?, ?, ?)\", user_datas)\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\n\tconn.commit()\n\tprint(\"Done updating player stats\")\n\t\t\t\t\t\t\t\t\t\ndef clean_stats_json(loaded_json: dict) -> dict:\n\t\"\"\"JSON loads makes each element in the stats file\n\tinto its own key, and doesn't recursively create\n\ta dict. Fix that by doing exactly that.\"\"\"\n\tcleaned_json = {}\n\n\tfor key, value in loaded_json.items():\n\t\t# We need the last element too because some stats\n\t\t# hold accumulations of all its children.\n\t\tpath_list = key.split(\".\")\n\n\t\tdrill = cleaned_json\n\t\tfor stat_key in path_list:\n\t\t\tdrill = drill.setdefault(stat_key, {})\n\n\t\t# To remove ambiguity and type checking, store the\n\t\t# actual stat value in a unique key.\n\t\tdrill[\"_stat\"] = value\t\n\n\treturn cleaned_json","sub_path":"mcdiscord/schedule/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"81180591","text":"import re\nfrom typing import Callable, List\n\nfrom langdetect import detect_langs\nfrom nltk import word_tokenize, WordNetLemmatizer\nfrom nltk.corpus import stopwords\n\nfrom voikko_lemmatizer import VoikkoLemmatizer\n\nSTOPWORDS = stopwords.words('english') + stopwords.words('finnish')\n\n\ndef tokenize_and_clean_text(text: str,\n tokenization_method: Callable,\n lemmatization_method: Callable = None,\n strip_character_list: List[str] = None,\n stopwords: List[str] = None):\n text = text.lower()\n if strip_character_list:\n text = remove_characters(text, strip_character_list)\n\n tokens = tokenization_method(text)\n if stopwords:\n tokens = remove_stopwods(stopwords, tokens)\n\n if lemmatization_method:\n tokens = lemmatization_method(tokens)\n\n tokens = remove_non_alphabetic_characters(tokens)\n return tokens\n\n\ndef remove_non_alphabetic_characters(tokens):\n tokens = [token for token in tokens if token.isalpha()]\n return tokens\n\n\ndef remove_stopwods(stopwords, tokens):\n tokens = [token for token in tokens if token not in stopwords]\n return tokens\n\n\ndef lemmatize_english_tokens(tokens):\n lemmatization_method = WordNetLemmatizer().lemmatize\n lemmas = [lemmatization_method(token) for token in tokens]\n return lemmas\n\n\ndef lemmatize_finnish_tokens(tokens):\n return VoikkoLemmatizer().lemmatize(tokens)\n\n\ndef remove_characters(text, characters):\n for c in characters:\n text = text.replace(c, '')\n return text\n\n\ndef tokenize_and_clean_finnish_text(text: str):\n tokenization_method = re.compile(r'\\S+').findall\n strip_character_list = ['.', ',', '-']\n lemmatization_method = lemmatize_finnish_tokens\n tokens = tokenize_and_clean_text(text,\n tokenization_method=tokenization_method,\n strip_character_list=strip_character_list,\n stopwords=STOPWORDS,\n lemmatization_method=lemmatization_method)\n return tokens\n\n\ndef tokenize_and_clean_english_text(text: str):\n tokenization_method = word_tokenize\n strip_character_list = ['.', ',', '-']\n lemmatization_method = lemmatize_english_tokens\n tokens = tokenize_and_clean_text(text,\n tokenization_method=tokenization_method,\n strip_character_list=strip_character_list,\n stopwords=STOPWORDS,\n lemmatization_method=lemmatization_method)\n return tokens\n\n\ndef detect_language(text):\n results = detect_langs(text)\n results_dict = [result.__dict__ for result in results]\n language = results_dict[0].get('lang', None)\n probability = results_dict[0].get('prob', None)\n return language, probability\n\n\ndef process_text(text: str):\n language, probability = detect_language(text)\n processing_methods_by_language = {\n 'en': tokenize_and_clean_english_text,\n 'fi': tokenize_and_clean_finnish_text,\n }\n processing_method = processing_methods_by_language.get(language, re.compile(r'\\S+').findall)\n tokens = processing_method(text)\n return tokens\n\n\ndef process_documents(documents: List[str]):\n return [process_text(document) for document in documents]\n","sub_path":"text_processing.py","file_name":"text_processing.py","file_ext":"py","file_size_in_byte":3419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"480694044","text":"import sys\nimport os\nfrom argparse import ArgumentParser\n\nimport requests\nimport arrow\n\nURL = 'http://localhost:7000'\n\ndef main():\n # create new data collection in Uploads collection\n upload_collection = requests.get(os.path.join(URL, 'api/services/datacollection/get_upload_collection')).json()\n upload_collection_guid = upload_collection['guid']\n\n # create new data collection for upload data\n current_date = arrow.utcnow().format('YYYY-MM-DD_HH:mm:ss')\n\n data = {\n 'name': 'upload {}'.format(current_date),\n 'parentguid': upload_collection_guid\n }\n\n new_databucketcollection = requests.post(os.path.join(URL, 'api/rasterdatabucketcollection/'), json=data).json()\n new_databucketcollection_guid = new_databucketcollection['guid']\n\n # create new databucket for upload data\n data = {\n 'name': 'new_upload',\n 'collectionguid': new_databucketcollection_guid\n }\n\n new_databucket = requests.post(os.path.join(URL, 'api/rasterdatabucket/'), json=data).json()\n\n target_bucketid = new_databucket['guid']\n\n # upload data into new databucket\n print('target bucket: {}'.format(target_bucketid))\n upload_file = '../tiles/server/data/strm/input/s22_e026_1arc_v3.zip'\n\n payload = {\n 'bucketid': target_bucketid\n }\n\n files = {\n 'file': open(upload_file, 'rb')\n }\n upload_url = 'http://localhost:7000/api/rasterdatabucket/{}/upload'.format(target_bucketid)\n upload = requests.post(upload_url, data=payload, files=files)\n\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"archive/webclient-databucket-system/tools/load_srtm_data.py","file_name":"load_srtm_data.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"504137688","text":"from os import close\nimport sqlite3\nfrom tkinter import messagebox as mb\n\nclass Pokemons:\n\n\t# Aqui es donde se administra toda la informacion en la base de datos\n\t# usando estos metodos.\n\t# se crea un atributo llamado lugar para poder interar en \n\t# las filas de la base de datos, mas abajo se va a explicar en detalles.\n\t# En caso de que la base de datos no exista se va a crear junto a su tabla.\n\tdef __init__(self):\n\t\tself.lugar = -1\n\t\ttry:\n\t\t\tconexion = sqlite3.connect(\"Pokemones.db\")\n\t\t\tcursor = conexion.cursor()\n\t\t\tsql = \"\"\"create table pokemones (\n\t\t\t\t\t\tnumero integer primary key,\n\t\t\t\t\t\tpokemon text,\n\t\t\t\t\t\tnombre text,\n\t\t\t\t\t\taltura real,\n\t\t\t\t\t\tpeso real,\n\t\t\t\t\t\ttipos text,\n\t\t\t\t\t\tcomida text,\n\t\t\t\t\t\timagenPath text,\n\t\t\t\t\t\tdescripcion text\n\t\t\t\t\t)\"\"\"\n\t\t\tcursor.execute(sql)\n\t\texcept sqlite3.OperationalError:\n\t\t\tpass\n\t\n\t# Abre la conexion con la base de datos\n\tdef abrir(self):\n\t\tconexion = sqlite3.connect(\"Pokemones.db\")\n\t\treturn conexion\n\t\n\t# Aqui es donde se administra la informacion mandada por el metodo\n\t# guardar de modulo Formulario, esta es insertada ala base de datos apenas\n\t# es recibida.\n\tdef subir(self,datos):\n\t\ttry:\n\t\t\tcone = self.abrir()\n\t\t\tcursor = cone.cursor()\n\t\t\tsql = \"\"\"insert into pokemones(\n\t\t\t\t\t\tnumero,\n\t\t\t\t\t\tpokemon,\n\t\t\t\t\t\tnombre,\n\t\t\t\t\t\taltura,\n\t\t\t\t\t\tpeso,\n\t\t\t\t\t\ttipos,\n\t\t\t\t\t\tcomida,\n\t\t\t\t\t\timagenPath,\n\t\t\t\t\t\tdescripcion\n\t\t\t\t\t) values (?,?,?,?,?,?,?,?,?)\"\"\"\n\t\t\tcursor.execute(sql, datos)\n\t\t\treturn cursor.rowcount\n\t\texcept sqlite3.IntegrityError:\n\t\t\tmb.showinfo(\"Error!\", \"Ya existe un pokemon con ese numero de identificacion intenta otro numero\")\n\t\tfinally:\n\t\t\tcone.commit()\n\t\t\tcone.close()\n\n\n\n\t# Iterar es un metodo llamado por el metodo guardar del modulo\n\t# PokedexGUI, ambos metodos reciben un parametro llamado direccion, \n\t# el proposito de este parametro es el de ser sumado al atributo lugar,\n\t# este atributo tiene como proposito ser un idicie ala hora de iterar en\n\t# las filas de la base de datos, debido a que este metodo se construyo de manera\n\t# de que toda la informacion de la base de datos sea recibida en forma de lista,\n\t# siendo asi el atributo lugar en indice de ella que indicia que fila retornar ala \n\t# PokedexGUI para que esta mustre su inforamcion, al summar 1 al atriubuto lugar\n\t# se avanza en las filas de la base de datos y viceversa cuando se suma -1\n\tdef iterar(self, direccion):\n\t\ttry:\n\t\t\tself.lugar += direccion\n\t\t\tcone = self.abrir()\n\t\t\tcursor = cone.cursor()\n\t\t\tsql = \"select * from pokemones\"\n\t\t\tif self.lugar < 0:\n\t\t\t\tmb.showinfo(\"Informacion\", \"Estas en el principio de los registros de la Pokedex, intenta avanzar\")\n\t\t\t\tself.lugar = 0\n\t\t\tcursor.execute(sql)\n\t\t\tinformacion = cursor.fetchall()\n\t\t\treturn informacion[self.lugar]\n\t\texcept IndexError:\n\t\t\tmb.showinfo(\"Informacion\", \"Llegaste al final de la Pokedex, intenta atrapar mas Pokemones!\")\n\t\t\tself.lugar -= 1\n\t\tfinally:\n\t\t\tcone.close()\n\n\tdef editar(self, datos):\n\t\ttry:\n\t\t\tcone = self.abrir()\n\t\t\tcursor = cone.cursor()\n\t\t\tsql = \"\"\"update pokemones set\n\t\t\tpokemon=?, \n\t\t\tnombre=?,\n\t\t\taltura=?,\n\t\t\tpeso=?,\n\t\t\ttipos=?,\n\t\t\tcomida=?,\n\t\t\timagenPath=?,\n\t\t\tdescripcion=? where numero=? \"\"\"\n\t\t\tcursor.execute(sql, datos)\n\t\t\treturn cursor.rowcount\n\t\tfinally:\n\t\t\tcone.commit()\n\t\t\tcone.close()\n\t\n\tdef borrar(self, dato):\n\t\ttry:\n\t\t\tcone = self.abrir()\n\t\t\tcursor = cone.cursor()\n\t\t\tsql = \"delete from pokemones where numero=?\"\n\t\t\tcursor.execute(sql, dato)\n\t\t\treturn cursor.rowcount\n\t\tfinally:\n\t\t\tcone.commit()\n\t\t\tcone.close()\n\n\tdef buscar(self, dato):\n\t\ttry:\n\t\t\tcone = self.abrir()\n\t\t\tcursor = cone.cursor()\n\t\t\tsql = \"\"\"select pokemon, \n\t\t\t\tnombre, \n\t\t\t\taltura,\n\t\t\t\tpeso,\n\t\t\t\ttipos,\n\t\t\t\tcomida,\n\t\t\t\timagenPath,\n\t\t\t\tdescripcion from pokemones where numero=?\"\"\"\n\t\t\tcursor.execute(sql, dato)\n\t\t\treturn cursor.fetchone()\n\t\tfinally:\n\t\t\tcone.close()\n","sub_path":"Database.py","file_name":"Database.py","file_ext":"py","file_size_in_byte":3764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"137741991","text":"from django.template import Library\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom site_core.models import Parameter\nimport json\n\nregister = Library()\n\n\n@register.assignment_tag\ndef html_from_parameters(name):\n try:\n parameter = Parameter.objects.get(name=name)\n if parameter.enable:\n return parameter.value\n else:\n return ''\n except ObjectDoesNotExist:\n return ''\n\n\n","sub_path":"site_core/templatetags/site_core_extras.py","file_name":"site_core_extras.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"214856353","text":"import urllib.request\nfrom bs4 import BeautifulSoup\n\nheader = {\n 'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0'\n}\n\nurl = 'http://jandan.net/ooxx/page-2063'\nfileName = 'D:\\\\Code\\\\Python日常\\\\jiandan_ooxx'\n\ndef download_url(url):\n try:\n request = urllib.request.Request(url,None,header)\n response = urllib.request.urlopen(request)\n content = response.read().decode('UTF-8')\n return content\n except urllib.request.URLError as e:\n print(\"[访问出错]\" + e.reason)\n return None\ndef download_image(imageUrl):\n try:\n print(\"正在下载......\" + str(imageUrl))\n urllib.request.urlretrieve(imageUrl,fileName)\n except urllib.request.URLError as e:\n print(\"[下载出错]\\n\" + e.reason)\n return None\ndef get_url(content):\n soup = BeautifulSoup(content,\"html.parser\")\n contents = soup.find_all('a',attrs={'class':'view_img_link'})\n for i in contents:\n return i.get('href')\nif __name__ == \"__main__\":\n download_image(get_url(download_url(url)))\n\n","sub_path":"Python/jiandan_ooxx.py","file_name":"jiandan_ooxx.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"572099385","text":"from django.db.models.signals import post_save, post_delete\nfrom django.db.utils import OperationalError\nfrom django.dispatch import receiver\nfrom django.contrib.admin.models import LogEntry\n\nfrom apps.hello.models import DBAction\n\n\nIGNORED_SENDERS = (DBAction, LogEntry)\n\n\ndef create_DBAction(sender, action):\n try:\n DBAction.objects.create(\n model=sender.__name__,\n action=action\n )\n except OperationalError:\n pass\n\n\n@receiver(post_save) # noqa\ndef object_created_or_updated(sender, created, **kwargs):\n if sender in IGNORED_SENDERS:\n return\n\n create_DBAction(sender, action=('created' if created else 'updated'))\n\n\n@receiver(post_delete) # noqa\ndef object_deleted(sender, **kwargs):\n if sender in IGNORED_SENDERS:\n return\n\n create_DBAction(sender, 'deleted')\n","sub_path":"apps/hello/signals/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"168698306","text":"from doubly_linked_list import DoublyLinkedList\n\nclass LRUCache:\n \"\"\"\n Our LRUCache class keeps track of the max number of nodes it\n can hold, the current number of nodes it is holding, a doubly-\n linked list that holds the key-value entries in the correct\n order, as well as a storage dict that provides fast access\n to every node stored in the cache.\n \"\"\"\n def __init__(self, limit=10):\n #Plan\n #limits the amt of nodes cache can hold to 10\n #show current number of nodes it has\n #variable used to set the value being set by Doublylinked list, a list that holds the key-value entries\n #dictionary for access (hash table) s well as a storage dict that provides fast access to every node stored in the cache.\n self.limit = limit\n self.size = 0\n self.order = DoublyLinkedList()\n self.storage = {}\n\n \"\"\"\n Retrieves the value associated with the given key. Also\n needs to move the key-value pair to the end of the order\n such that the pair is considered most-recently used.\n Returns the value associated with the key or None if the\n key-value pair doesn't exist in the cache.\n \"\"\"\n def get(self, key):\n # plan if there is a key in storage, move to the front, then refresh the order and return the value which is a truple, else return none\n if key in self.storage:\n node = self.storage[key]\n self.order.move_to_front(node)\n return node.value[1]\n else:\n return None\n\n \n \n\n \"\"\"\n Adds the given key-value pair to the cache. The newly-\n added pair should be considered the most-recently used\n entry in the cache. If the cache is already at max capacity\n before this entry is added, then the oldest entry in the\n cache needs to be removed to make room. Additionally, in the\n case that the key already exists in the cache, we simply\n want to overwrite the old value associated with the key with\n the newly-specified value.\n \"\"\"\n def set(self, key, value):\n if key in self.storage:\n node = self.storage[key]\n node.value = (key, value)\n self.order.move_to_front(node)\n return\n\n if self.size is self.limit:\n del self.storage[self.order.tail.value[0]]\n self.order.remove_from_tail()\n self.size -= 1\n self.order.add_to_head((key, value))\n self.storage[key] = self.order.head\n self.size += 1\n\n ","sub_path":"lru_cache/lru_cache.py","file_name":"lru_cache.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"86990657","text":"\"\"\"This function crosses information of the several channels.\n\nAfter loading the segmented spots and nuclei, it calculates nucleus per nucleus\nthe matrix of the distances between all the spots of the three chennels; we can have\na maximum of 2 dots per channel, and all the six possible distances are calculated\nin order the minimu possible distance for un association, not for the sum of the distances.\nEx: c1, c2, e1, e2 are goi,ng to be associated like c1-e1, c2-e2 if d(c1-e1) (or d(c2-e2)) is\nthe shortest possbile distance, we don't care if at the end\nd(c1-e1) + d(c2-e2) > d(c1-e2) + d(c2-e1)\n\"\"\"\n\nimport numpy as np\nfrom skimage.measure import label, regionprops_table\nimport xlsxwriter\n# import pyqtgraph as pg\nfrom PyQt5 import QtWidgets\n\n\ndef dist_choose(x1, x2, pix_sizeZ, pix_sizeX):\n \"\"\"Calculates the distances between to 3D points: if one is (0,0,0), gives 1000000000\"\"\"\n if x1.sum() == 0 or x2.sum() == 0:\n dist = 1000000000\n else:\n dist = np.sqrt(((x1[0] - x2[0]) * pix_sizeZ) ** 2 + ((x1[1] - x2[1]) * pix_sizeX) ** 2 + ((x1[2] - x2[2]) * pix_sizeX) ** 2)\n return dist\n\n\nclass CrossInfo:\n \"\"\"Only class, does all the job\"\"\"\n def __init__(self, analysis_folder, spts_chs_modif):\n\n spts_ch1 = spts_chs_modif[:, :, :, 0] # load analysis results\n spts_ch2 = spts_chs_modif[:, :, :, 1]\n spts_ch3 = spts_chs_modif[:, :, :, 2]\n nucs_dapi = np.load(analysis_folder + '/nucs_dapi.npy')\n pix_sizeZ, pix_sizeX = np.load(analysis_folder + '/pix_sizes.npy')\n try:\n spts_clusters_flag = np.load(analysis_folder + '/spts_clstrs_flag.npy')\n except FileNotFoundError:\n spts_clusters_flag = 1\n\n spts_ch1_lbls = label(spts_ch1, connectivity=1) # spots labeling\n spts_ch2_lbls = label(spts_ch2, connectivity=1)\n spts_ch3_lbls = label(spts_ch3, connectivity=1)\n\n rgp_ch1 = regionprops_table(spts_ch1_lbls, properties=[\"label\", \"coords\", \"area\"]) # dictionary with useful info of spots\n rgp_ch2 = regionprops_table(spts_ch2_lbls, properties=[\"label\", \"coords\", \"area\"])\n rgp_ch3 = regionprops_table(spts_ch3_lbls, properties=[\"label\", \"coords\", \"area\"])\n\n idxs2rm_ch1 = np.where(rgp_ch1[\"area\"] < 4)[0] # check id of spots smaller than 4 pixels to delete them\n idxs2rm_ch2 = np.where(rgp_ch2[\"area\"] < 4)[0]\n idxs2rm_ch3 = np.where(rgp_ch3[\"area\"] < 4)[0]\n\n for kk in idxs2rm_ch1:\n spts_ch1[rgp_ch1[\"coords\"][kk][:, 0], rgp_ch1[\"coords\"][kk][:, 1], rgp_ch1[\"coords\"][kk][:, 2]] = 0 # remove small spots using the coordinates of the pixels of the spots itself\n\n for kk in idxs2rm_ch2:\n spts_ch2[rgp_ch2[\"coords\"][kk][:, 0], rgp_ch2[\"coords\"][kk][:, 1], rgp_ch2[\"coords\"][kk][:, 2]] = 0\n\n for kk in idxs2rm_ch3:\n spts_ch3[rgp_ch3[\"coords\"][kk][:, 0], rgp_ch3[\"coords\"][kk][:, 1], rgp_ch3[\"coords\"][kk][:, 2]] = 0\n\n spts_ch1_lbls = label(spts_ch1, connectivity=1) # relabel spots after removal of small objects\n spts_ch2_lbls = label(spts_ch2, connectivity=1)\n spts_ch3_lbls = label(spts_ch3, connectivity=1)\n\n rgp_ch1 = regionprops_table(spts_ch1_lbls, properties=[\"label\", \"centroid\", \"area\", \"coords\"]) # new dictionary of the selected spots\n rgp_ch2 = regionprops_table(spts_ch2_lbls, properties=[\"label\", \"centroid\", \"area\", \"coords\"])\n rgp_ch3 = regionprops_table(spts_ch3_lbls, properties=[\"label\", \"centroid\", \"area\", \"coords\"])\n\n idxs_nucs = np.unique(nucs_dapi)[1:] # tags of the nuclei\n dists_mtx = np.zeros((6, idxs_nucs.size)) # matrix of the distances: for each nucleus you have at maximum 6 distances (c1-e1, c2-e2, c1-s1, c2-s2, e1-, s1, e2-s2)\n\n spts_ch1_fin = np.zeros(spts_ch1.shape, np.uint8)\n spts_ch2_fin = np.zeros(spts_ch2.shape, np.uint8)\n spts_ch3_fin = np.zeros(spts_ch3.shape, np.uint8)\n\n pbar = ProgressBar(total1=idxs_nucs.size)\n pbar.show()\n pbar.update_progressbar1(0)\n\n # print(counts)\n for counts, jj in enumerate(idxs_nucs):\n pbar.update_progressbar1(counts)\n sing_nuc = (nucs_dapi == jj) # for each nucleus\n # CH1\n spts_ch1_sing_tags = spts_ch1_lbls * sing_nuc # find the tag of the ch1 spots inside the single nucleus\n spts_ch1_sing_tags = spts_ch1_sing_tags[spts_ch1_sing_tags != 0]\n spts_ch1_sing_tags = np.unique(spts_ch1_sing_tags)\n spts_ch1_sing_idxs = list()\n for gg in spts_ch1_sing_tags:\n spts_ch1_sing_idxs.append(np.where(rgp_ch1[\"label\"] == gg)[0][0])\n\n ctrs_ch1 = np.zeros((2, 3))\n\n if len(spts_ch1_sing_idxs) >= 2:\n area_idxs = np.zeros((2, len(spts_ch1_sing_idxs)))\n area_idxs[0, :] = np.take(rgp_ch1[\"area\"], spts_ch1_sing_idxs)\n area_idxs[1, :] = np.asarray(spts_ch1_sing_idxs)\n area_idxs = (area_idxs[:, area_idxs[0].argsort()][1, -2:]).astype(np.int64)\n ctrs_ch1[0, :] = np.array([rgp_ch1[\"centroid-0\"][area_idxs[0]], rgp_ch1[\"centroid-1\"][area_idxs[0]], rgp_ch1[\"centroid-2\"][area_idxs[0]]])\n ctrs_ch1[1, :] = np.array([rgp_ch1[\"centroid-0\"][area_idxs[1]], rgp_ch1[\"centroid-1\"][area_idxs[1]], rgp_ch1[\"centroid-2\"][area_idxs[1]]])\n spts_ch1_fin[rgp_ch1[\"coords\"][area_idxs[0]][:, 0], rgp_ch1[\"coords\"][area_idxs[0]][:, 1], rgp_ch1[\"coords\"][area_idxs[0]][:, 2]] = 1\n spts_ch1_fin[rgp_ch1[\"coords\"][area_idxs[1]][:, 0], rgp_ch1[\"coords\"][area_idxs[1]][:, 1], rgp_ch1[\"coords\"][area_idxs[1]][:, 2]] = 1\n\n if len(spts_ch1_sing_idxs) == 1:\n ctrs_ch1[0, :] = np.array([rgp_ch1[\"centroid-0\"][spts_ch1_sing_idxs[0]], rgp_ch1[\"centroid-1\"][spts_ch1_sing_idxs[0]], rgp_ch1[\"centroid-2\"][spts_ch1_sing_idxs[0]]])\n spts_ch1_fin[rgp_ch1[\"coords\"][spts_ch1_sing_idxs[0]][:, 0], rgp_ch1[\"coords\"][spts_ch1_sing_idxs[0]][:, 1], rgp_ch1[\"coords\"][spts_ch1_sing_idxs[0]][:, 2]] = 1\n\n # CH2\n spts_ch2_sing_tags = spts_ch2_lbls * sing_nuc # find the tag of the ch1 spots inside the single nucleus\n spts_ch2_sing_tags = spts_ch2_sing_tags[spts_ch2_sing_tags != 0]\n spts_ch2_sing_tags = np.unique(spts_ch2_sing_tags)\n spts_ch2_sing_idxs = list()\n for gg in spts_ch2_sing_tags:\n spts_ch2_sing_idxs.append(np.where(rgp_ch2[\"label\"] == gg)[0][0])\n\n ctrs_ch2 = np.zeros((2, 3))\n\n if len(spts_ch2_sing_idxs) >= 2:\n area_idxs = np.zeros((2, len(spts_ch2_sing_idxs)))\n area_idxs[0, :] = np.take(rgp_ch2[\"area\"], spts_ch2_sing_idxs)\n area_idxs[1, :] = np.asarray(spts_ch2_sing_idxs)\n area_idxs = (area_idxs[:, area_idxs[0].argsort()][1, -2:]).astype(np.int64)\n ctrs_ch2[0, :] = np.array([rgp_ch2[\"centroid-0\"][area_idxs[0]], rgp_ch2[\"centroid-1\"][area_idxs[0]], rgp_ch2[\"centroid-2\"][area_idxs[0]]])\n ctrs_ch2[1, :] = np.array([rgp_ch2[\"centroid-0\"][area_idxs[1]], rgp_ch2[\"centroid-1\"][area_idxs[1]], rgp_ch2[\"centroid-2\"][area_idxs[1]]])\n spts_ch2_fin[rgp_ch2[\"coords\"][area_idxs[0]][:, 0], rgp_ch2[\"coords\"][area_idxs[0]][:, 1], rgp_ch2[\"coords\"][area_idxs[0]][:, 2]] = 1\n spts_ch2_fin[rgp_ch2[\"coords\"][area_idxs[1]][:, 0], rgp_ch2[\"coords\"][area_idxs[1]][:, 1], rgp_ch2[\"coords\"][area_idxs[1]][:, 2]] = 1\n\n if len(spts_ch2_sing_idxs) == 1:\n ctrs_ch2[0, :] = np.array([rgp_ch2[\"centroid-0\"][spts_ch2_sing_idxs[0]], rgp_ch2[\"centroid-1\"][spts_ch2_sing_idxs[0]], rgp_ch2[\"centroid-2\"][spts_ch2_sing_idxs[0]]])\n spts_ch2_fin[rgp_ch2[\"coords\"][spts_ch2_sing_idxs[0]][:, 0], rgp_ch2[\"coords\"][spts_ch2_sing_idxs[0]][:, 1], rgp_ch2[\"coords\"][spts_ch2_sing_idxs[0]][:, 2]] = 1\n\n # CH3\n spts_ch3_sing_tags = spts_ch3_lbls * sing_nuc # find the tag of the ch1 spots inside the single nucleus: mask the nucleus on the labeled spots\n spts_ch3_sing_tags = spts_ch3_sing_tags[spts_ch3_sing_tags != 0]\n spts_ch3_sing_tags = np.unique(spts_ch3_sing_tags) # find the surviving tags\n spts_ch3_sing_idxs = list()\n for gg in spts_ch3_sing_tags:\n spts_ch3_sing_idxs.append(np.where(rgp_ch3[\"label\"] == gg)[0][0]) # find the indexes in the dictionary of the surviving tags\n\n ctrs_ch3 = np.zeros((2, 3)) # initialize the matrix of centroids\n\n if len(spts_ch3_sing_idxs) >= 2: # if we have 2 spots or more in the mask, search the biggest 2 (it's redundant in case of only 2 spots, but works anyway reducing code complexity)\n area_idxs = np.zeros((2, len(spts_ch3_sing_idxs))) # initialize with volume and relative indexes\n area_idxs[0, :] = np.take(rgp_ch3[\"area\"], spts_ch3_sing_idxs)\n area_idxs[1, :] = np.asarray(spts_ch3_sing_idxs)\n area_idxs = (area_idxs[:, area_idxs[0].argsort()][1, -2:]).astype(np.int64) # sort the matrix with respect to the volume\n ctrs_ch3[0, :] = np.array([rgp_ch3[\"centroid-0\"][area_idxs[0]], rgp_ch3[\"centroid-1\"][area_idxs[0]], rgp_ch3[\"centroid-2\"][area_idxs[0]]]) # record the coordinates of the centroids of the biggest 2\n ctrs_ch3[1, :] = np.array([rgp_ch3[\"centroid-0\"][area_idxs[1]], rgp_ch3[\"centroid-1\"][area_idxs[1]], rgp_ch3[\"centroid-2\"][area_idxs[1]]])\n spts_ch3_fin[rgp_ch3[\"coords\"][area_idxs[0]][:, 0], rgp_ch3[\"coords\"][area_idxs[0]][:, 1], rgp_ch3[\"coords\"][area_idxs[0]][:, 2]] = 1\n spts_ch3_fin[rgp_ch3[\"coords\"][area_idxs[1]][:, 0], rgp_ch3[\"coords\"][area_idxs[1]][:, 1], rgp_ch3[\"coords\"][area_idxs[1]][:, 2]] = 1\n\n if len(spts_ch3_sing_idxs) == 1: # if there is only 1 spot, no sorting is needed\n ctrs_ch3[0, :] = np.array([rgp_ch3[\"centroid-0\"][spts_ch3_sing_idxs[0]], rgp_ch3[\"centroid-1\"][spts_ch3_sing_idxs[0]], rgp_ch3[\"centroid-2\"][spts_ch3_sing_idxs[0]]])\n spts_ch3_fin[rgp_ch3[\"coords\"][spts_ch3_sing_idxs[0]][:, 0], rgp_ch3[\"coords\"][spts_ch3_sing_idxs[0]][:, 1], rgp_ch3[\"coords\"][spts_ch3_sing_idxs[0]][:, 2]] = 1\n\n # FILLING DISTANCE MATRIX\n\n all_d_ce = [dist_choose(ctrs_ch1[0, :], ctrs_ch2[0, :], pix_sizeZ, pix_sizeX), dist_choose(ctrs_ch1[0, :], ctrs_ch2[1, :], pix_sizeZ, pix_sizeX), dist_choose(ctrs_ch1[1, :], ctrs_ch2[0, :], pix_sizeZ, pix_sizeX), dist_choose(ctrs_ch1[1, :], ctrs_ch2[1, :], pix_sizeZ, pix_sizeX)]\n\n if np.argmin(all_d_ce) == 1 or np.argmin(all_d_ce) == 2:\n dists_mtx[:2, counts] = all_d_ce[1], all_d_ce[2]\n else:\n dists_mtx[:2, counts] = all_d_ce[0], all_d_ce[3]\n\n all_d_cs = [dist_choose(ctrs_ch1[0, :], ctrs_ch3[0, :], pix_sizeZ, pix_sizeX), dist_choose(ctrs_ch1[0, :], ctrs_ch3[1, :], pix_sizeZ, pix_sizeX), dist_choose(ctrs_ch1[1, :], ctrs_ch3[0, :], pix_sizeZ, pix_sizeX), dist_choose(ctrs_ch1[1, :], ctrs_ch3[1, :], pix_sizeZ, pix_sizeX)]\n\n if np.argmin(all_d_cs) == 1 or np.argmin(all_d_cs) == 2:\n dists_mtx[2:4, counts] = all_d_cs[1], all_d_cs[2]\n else:\n dists_mtx[2:4, counts] = all_d_cs[0], all_d_cs[3]\n\n all_d_es = [dist_choose(ctrs_ch2[0, :], ctrs_ch3[0, :], pix_sizeZ, pix_sizeX), dist_choose(ctrs_ch2[0, :], ctrs_ch3[1, :], pix_sizeZ, pix_sizeX), dist_choose(ctrs_ch2[1, :], ctrs_ch3[0, :], pix_sizeZ, pix_sizeX), dist_choose(ctrs_ch2[1, :], ctrs_ch3[1, :], pix_sizeZ, pix_sizeX)]\n\n if np.argmin(all_d_es) == 1 or np.argmin(all_d_es) == 2:\n dists_mtx[4:, counts] = all_d_es[1], all_d_es[2]\n else:\n dists_mtx[4:, counts] = all_d_es[0], all_d_es[3]\n\n pbar.close()\n\n workbook = xlsxwriter.Workbook(analysis_folder + \"/distances_modified.xlsx\")\n sheet1 = workbook.add_worksheet(\"\")\n\n sheet1.write(0, 0, \"Nucs Id\")\n sheet1.write(0, 1, \"Dist ch1_1-ch2_1\")\n sheet1.write(0, 2, \"Dist ch1_2-ch2_2\")\n sheet1.write(0, 3, \"Dist ch1_1-ch3_1\")\n sheet1.write(0, 4, \"Dist ch1_2-ch3_2\")\n sheet1.write(0, 5, \"Dist ch2_1-ch3_1\")\n sheet1.write(0, 6, \"Dist ch2_2-ch3_2\")\n\n for ll in range(dists_mtx.shape[1]):\n sheet1.write(ll + 1, 0, idxs_nucs[ll])\n if dists_mtx[0, ll] < 1000000000:\n sheet1.write(ll + 1, 1, dists_mtx[0, ll])\n else:\n sheet1.write(ll + 1, 1, \"----\")\n\n if dists_mtx[1, ll] < 1000000000:\n sheet1.write(ll + 1, 2, dists_mtx[1, ll])\n else:\n sheet1.write(ll + 1, 2, \"----\")\n if dists_mtx[2, ll] < 1000000000:\n sheet1.write(ll + 1, 3, dists_mtx[2, ll])\n else:\n sheet1.write(ll + 1, 3, \"----\")\n if dists_mtx[3, ll] < 1000000000:\n sheet1.write(ll + 1, 4, dists_mtx[3, ll])\n else:\n sheet1.write(ll + 1, 4, \"----\")\n if dists_mtx[4, ll] < 1000000000:\n sheet1.write(ll + 1, 5, dists_mtx[4, ll])\n else:\n sheet1.write(ll + 1, 5, \"----\")\n if dists_mtx[5, ll] < 1000000000:\n sheet1.write(ll + 1, 6, dists_mtx[5, ll])\n else:\n sheet1.write(ll + 1, 6, \"----\")\n\n if spts_clusters_flag[0] == 2:\n sheet2 = workbook._add_sheet(\"Overlap\")\n sheet2.write(0, 0, \"CH1_Id\")\n sheet2.write(0, 1, \"Volume\")\n sheet2.write(0, 2, \"z centroid\")\n sheet2.write(0, 3, \"x centroid\")\n sheet2.write(0, 4, \"y centroid\")\n sheet2.write(0, 5, \"CH2_Id\")\n sheet2.write(0, 6, \"Volume\")\n sheet2.write(0, 7, \"z centroid\")\n sheet2.write(0, 8, \"x centroid\")\n sheet2.write(0, 9, \"y centroid\")\n sheet2.write(0, 10, \"CH3_Id\")\n sheet2.write(0, 11, \"Volume\")\n sheet2.write(0, 12, \"z centroid\")\n sheet2.write(0, 13, \"x centroid\")\n sheet2.write(0, 14, \"y centroid\")\n\n sheet2.write(1, 16, \"CH2 on Clstrs\")\n sheet2.write(2, 16, \"CH2 not on Clstrs\")\n sheet2.write(3, 16, \"CH3 on Clstrs\")\n sheet2.write(4, 16, \"CH3 not on Clstrs\")\n\n sheet2.write(0, 17, \"Numb\")\n sheet2.write(0, 18, \"%\")\n\n numb_spts_ch2 = np.unique(spts_ch2_lbls[spts_ch2_lbls != 0]).size\n numb_spts_ch3 = np.unique(spts_ch3_lbls[spts_ch3_lbls != 0]).size\n ch2_on_clstr = spts_ch2_lbls * np.sign(spts_ch1_lbls)\n ch2_on_clstr = np.unique(ch2_on_clstr[ch2_on_clstr != 0]).size\n ch2_noton_clstr = numb_spts_ch2 - ch2_on_clstr\n ch3_on_clstr = spts_ch3_lbls * np.sign(spts_ch1_lbls)\n ch3_on_clstr = np.unique(ch3_on_clstr[ch3_on_clstr != 0]).size\n ch3_noton_clstr = numb_spts_ch3 - ch3_on_clstr\n\n sheet2.write(1, 17, np.int64(ch2_on_clstr))\n sheet2.write(2, 17, np.int64(ch2_noton_clstr))\n sheet2.write(3, 17, np.int64(ch3_on_clstr))\n sheet2.write(4, 17, np.int64(ch3_noton_clstr))\n sheet2.write(1, 18, np.float64(100 * ch2_on_clstr / numb_spts_ch2))\n sheet2.write(2, 18, np.float64(100 * ch2_noton_clstr / numb_spts_ch2))\n sheet2.write(3, 18, np.float64(100 * ch3_on_clstr / numb_spts_ch3))\n sheet2.write(4, 18, np.float64(100 * ch3_noton_clstr / numb_spts_ch3))\n\n row_idx = 0\n for bb in range(len(rgp_ch1[\"label\"])):\n sheet2.write(1 + row_idx, 0, \"Clst_\" + str(rgp_ch1[\"label\"][bb]))\n sheet2.write(1 + row_idx, 1, rgp_ch1[\"area\"][bb])\n sheet2.write(1 + row_idx, 2, rgp_ch1[\"centroid-0\"][bb])\n sheet2.write(1 + row_idx, 3, rgp_ch1[\"centroid-1\"][bb])\n sheet2.write(1 + row_idx, 4, rgp_ch1[\"centroid-2\"][bb])\n\n bff_ch2 = spts_ch2_lbls[rgp_ch1[\"coords\"][bb][:, 0], rgp_ch1[\"coords\"][bb][:, 1], rgp_ch1[\"coords\"][bb][:, 2]]\n bff_ch2 = np.unique(bff_ch2[bff_ch2 != 0])\n for dd in range(bff_ch2.size):\n iidd = np.where(rgp_ch2[\"label\"] == bff_ch2[dd])[0][0]\n sheet2.write(1 + row_idx + dd, 5, \"Spts_\" + str(rgp_ch2[\"label\"][iidd]))\n sheet2.write(1 + row_idx + dd, 6, rgp_ch2[\"area\"][iidd])\n sheet2.write(1 + row_idx + dd, 7, rgp_ch2[\"centroid-0\"][iidd])\n sheet2.write(1 + row_idx + dd, 8, rgp_ch2[\"centroid-1\"][iidd])\n sheet2.write(1 + row_idx + dd, 9, rgp_ch2[\"centroid-2\"][iidd])\n\n bff_ch3 = spts_ch3_lbls[rgp_ch1[\"coords\"][bb][:, 0], rgp_ch1[\"coords\"][bb][:, 1], rgp_ch1[\"coords\"][bb][:, 2]]\n bff_ch3 = np.unique(bff_ch3[bff_ch3 != 0])\n for pp in range(bff_ch3.size):\n ddii = np.where(rgp_ch3[\"label\"] == bff_ch3[pp])[0][0]\n sheet2.write(1 + row_idx + pp, 10, \"Spts_\" + str(rgp_ch3[\"label\"][ddii]))\n sheet2.write(1 + row_idx + pp, 11, rgp_ch3[\"area\"][ddii])\n sheet2.write(1 + row_idx + pp, 12, rgp_ch3[\"centroid-0\"][ddii])\n sheet2.write(1 + row_idx + pp, 13, rgp_ch3[\"centroid-1\"][ddii])\n sheet2.write(1 + row_idx + pp, 14, rgp_ch3[\"centroid-2\"][ddii])\n\n row_idx += np.max([bff_ch2.size, bff_ch3.size])\n\n workbook.close()\n\n mtx2show = np.zeros(spts_ch2.shape + (3,), dtype=np.uint8)\n mtx2show[:, :, :, 0] = 125 * np.sign(nucs_dapi) * (1 - spts_ch1_fin) * (1 - spts_ch2_fin) * (1 - spts_ch3_fin)\n mtx2show[:, :, :, 1] = 125 * np.sign(nucs_dapi) * (1 - spts_ch1_fin) * (1 - spts_ch2_fin) * (1 - spts_ch3_fin)\n mtx2show[:, :, :, 2] = 125 * np.sign(nucs_dapi) * (1 - spts_ch1_fin) * (1 - spts_ch2_fin) * (1 - spts_ch3_fin)\n mtx2show[:, :, :, 1] += 255 * spts_ch1_fin\n mtx2show[:, :, :, 0] += 255 * spts_ch2_fin\n mtx2show[:, :, :, 2] += 255 * spts_ch3_fin\n # pg.image(mtx2show)\n\n self.mtx2show = mtx2show\n self.nucs_dapi = nucs_dapi\n\n\nclass ProgressBar(QtWidgets.QWidget):\n \"\"\"Simple progress bar widget\"\"\"\n def __init__(self, parent=None, total1=20):\n super(ProgressBar, self).__init__(parent)\n self.name_line1 = QtWidgets.QLineEdit()\n\n self.progressbar1 = QtWidgets.QProgressBar()\n self.progressbar1.setMinimum(1)\n self.progressbar1.setMaximum(total1)\n\n main_layout = QtWidgets.QGridLayout()\n main_layout.addWidget(self.progressbar1, 0, 0)\n\n self.setLayout(main_layout)\n self.setWindowTitle(\"Progress\")\n self.setGeometry(500, 300, 300, 50)\n\n def update_progressbar1(self, val1):\n \"\"\"First progress bar updater\"\"\"\n self.progressbar1.setValue(val1)\n QtWidgets.qApp.processEvents()\n\n\n\n\n","sub_path":"ModifCrossInfo.py","file_name":"ModifCrossInfo.py","file_ext":"py","file_size_in_byte":20126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"244026773","text":"from rest_framework import relations, renderers, serializers, status\nfrom rest_framework.settings import api_settings\nfrom rest_framework_json_api import encoders\nfrom rest_framework_json_api.utils import (\n get_related_field, is_related_many,\n model_from_obj, model_to_resource_type\n)\nfrom django.core import urlresolvers\nfrom django.core.exceptions import NON_FIELD_ERRORS\nfrom django.utils import encoding, six\nfrom django.utils.six.moves.urllib.parse import urlparse, urlunparse\n\n\nclass WrapperNotApplicable(ValueError):\n\n def __init__(self, *args, **kwargs):\n self.data = kwargs.pop('data', None)\n self.renderer_context = kwargs.pop('renderer_context', None)\n\n return super(WrapperNotApplicable, self).__init__(*args, **kwargs)\n\n\nclass JsonApiMixin(object):\n convert_by_name = {\n 'id': 'convert_to_text',\n api_settings.URL_FIELD_NAME: 'rename_to_href',\n }\n\n convert_by_type = {\n relations.PrimaryKeyRelatedField: 'handle_related_field',\n relations.HyperlinkedRelatedField: 'handle_url_field',\n serializers.ModelSerializer: 'handle_nested_serializer',\n }\n dict_class = dict\n encoder_class = encoders.JSONEncoder\n media_type = 'application/vnd.api+json'\n wrappers = [\n 'wrap_empty_response',\n 'wrap_parser_error',\n 'wrap_field_error',\n 'wrap_generic_error',\n 'wrap_options',\n 'wrap_paginated',\n 'wrap_default'\n ]\n\n def render(self, data, accepted_media_type=None, renderer_context=None):\n \"\"\"Convert native data to JSON API\n\n Tries each of the methods in `wrappers`, using the first successful\n one, or raises `WrapperNotApplicable`.\n \"\"\"\n\n wrapper = None\n success = False\n\n for wrapper_name in self.wrappers:\n wrapper_method = getattr(self, wrapper_name)\n try:\n wrapper = wrapper_method(data, renderer_context)\n except WrapperNotApplicable:\n pass\n else:\n success = True\n break\n\n if not success:\n raise WrapperNotApplicable(\n 'No acceptable wrappers found for response.',\n data=data, renderer_context=renderer_context)\n\n renderer_context[\"indent\"] = 4\n\n return super(JsonApiMixin, self).render(\n data=wrapper,\n accepted_media_type=accepted_media_type,\n renderer_context=renderer_context)\n\n def wrap_empty_response(self, data, renderer_context):\n \"\"\"\n Pass-through empty responses\n\n 204 No Content includes an empty response\n \"\"\"\n\n if data is not None:\n raise WrapperNotApplicable('Data must be empty.')\n\n return data\n\n def wrap_parser_error(self, data, renderer_context):\n \"\"\"\n Convert parser errors to the JSON API Error format\n\n Parser errors have a status code of 400, like field errors, but have\n the same native format as generic errors. Also, the detail message is\n often specific to the input, so the error is listed as a 'detail'\n rather than a 'title'.\n \"\"\"\n\n response = renderer_context.get(\"response\", None)\n status_code = response and response.status_code\n\n if status_code != 400:\n raise WrapperNotApplicable('Status code must be 400.')\n\n if list(data.keys()) != ['detail']:\n raise WrapperNotApplicable('Data must only have \"detail\" key.')\n\n # Probably a parser error, unless `detail` is a valid field\n view = renderer_context.get(\"view\", None)\n model = self.model_from_obj(view)\n\n if 'detail' in model._meta.get_all_field_names():\n raise WrapperNotApplicable()\n\n return self.wrap_error(\n data, renderer_context, keys_are_fields=False,\n issue_is_title=False)\n\n def wrap_field_error(self, data, renderer_context):\n \"\"\"\n Convert field error native data to the JSON API Error format\n\n See the note about the JSON API Error format on `wrap_error`.\n\n The native format for field errors is a dictionary where the keys are\n field names (or 'non_field_errors' for additional errors) and the\n values are a list of error strings:\n\n {\n \"min\": [\n \"min must be greater than 0.\",\n \"min must be an even number.\"\n ],\n \"max\": [\"max must be a positive number.\"],\n \"non_field_errors\": [\n \"Select either a range or an enumeration, not both.\"]\n }\n\n It is rendered into this JSON API error format:\n\n {\n \"errors\": [{\n \"status\": \"400\",\n \"path\": \"/min\",\n \"detail\": \"min must be greater than 0.\"\n },{\n \"status\": \"400\",\n \"path\": \"/min\",\n \"detail\": \"min must be an even number.\"\n },{\n \"status\": \"400\",\n \"path\": \"/max\",\n \"detail\": \"max must be a positive number.\"\n },{\n \"status\": \"400\",\n \"path\": \"/-\",\n \"detail\": \"Select either a range or an enumeration, not both.\"\n }]\n }\n \"\"\"\n response = renderer_context.get(\"response\", None)\n status_code = response and response.status_code\n if status_code != 400:\n raise WrapperNotApplicable('Status code must be 400.')\n\n return self.wrap_error(\n data, renderer_context, keys_are_fields=True, issue_is_title=False)\n\n def wrap_generic_error(self, data, renderer_context):\n \"\"\"\n Convert generic error native data using the JSON API Error format\n\n See the note about the JSON API Error format on `wrap_error`.\n\n The native format for errors that are not bad requests, such as\n authentication issues or missing content, is a dictionary with a\n 'detail' key and a string value:\n\n {\n \"detail\": \"Authentication credentials were not provided.\"\n }\n\n This is rendered into this JSON API error format:\n\n {\n \"errors\": [{\n \"status\": \"403\",\n \"title\": \"Authentication credentials were not provided\"\n }]\n }\n \"\"\"\n response = renderer_context.get(\"response\", None)\n status_code = response and response.status_code\n is_error = (\n status.is_client_error(status_code) or\n status.is_server_error(status_code)\n )\n if not is_error:\n raise WrapperNotApplicable(\"Status code must be 4xx or 5xx.\")\n\n return self.wrap_error(\n data, renderer_context, keys_are_fields=False, issue_is_title=True)\n\n def wrap_error(\n self, data, renderer_context, keys_are_fields, issue_is_title):\n \"\"\"Convert error native data to the JSON API Error format\n\n JSON API has a different format for errors, but Django REST Framework\n doesn't have a separate rendering path for errors. This results in\n some guesswork to determine if data is an error, what kind, and how\n to handle it.\n\n As of August 2014, there is not a consensus about the error format in\n JSON API. The format documentation defines an \"errors\" collection, and\n some possible fields for that collection, but without examples for\n common cases. If and when consensus is reached, this format will\n probably change.\n \"\"\"\n\n response = renderer_context.get(\"response\", None)\n status_code = str(response and response.status_code)\n\n errors = []\n for field, issues in data.items():\n if isinstance(issues, six.string_types):\n issues = [issues]\n for issue in issues:\n error = self.dict_class()\n error[\"status\"] = status_code\n\n if issue_is_title:\n error[\"title\"] = issue\n else:\n error[\"detail\"] = issue\n\n if keys_are_fields:\n if field in ('non_field_errors', NON_FIELD_ERRORS):\n error[\"path\"] = '/-'\n else:\n error[\"path\"] = '/' + field\n\n errors.append(error)\n wrapper = self.dict_class()\n wrapper[\"errors\"] = errors\n return wrapper\n\n def wrap_options(self, data, renderer_context):\n '''Wrap OPTIONS data as JSON API meta value'''\n request = renderer_context.get(\"request\", None)\n method = request and getattr(request, 'method')\n if method != 'OPTIONS':\n raise WrapperNotApplicable(\"Request method must be OPTIONS\")\n\n wrapper = self.dict_class()\n wrapper[\"meta\"] = data\n return wrapper\n\n def wrap_paginated(self, data, renderer_context):\n \"\"\"Convert paginated data to JSON API with meta\"\"\"\n\n pagination_keys = ['count', 'next', 'previous', 'results']\n for key in pagination_keys:\n if not (data and key in data):\n raise WrapperNotApplicable('Not paginated results')\n\n view = renderer_context.get(\"view\", None)\n model = self.model_from_obj(view)\n resource_type = self.model_to_resource_type(model)\n\n try:\n from rest_framework.utils.serializer_helpers import ReturnList\n\n results = ReturnList(\n data[\"results\"],\n serializer=data.serializer.fields[\"results\"],\n )\n except ImportError:\n results = data[\"results\"]\n\n # Use default wrapper for results\n wrapper = self.wrap_default(results, renderer_context)\n\n # Add pagination metadata\n pagination = self.dict_class()\n\n pagination['previous'] = data['previous']\n pagination['next'] = data['next']\n pagination['count'] = data['count']\n\n wrapper.setdefault('meta', self.dict_class())\n\n wrapper['meta'].setdefault('pagination', self.dict_class())\n wrapper['meta']['pagination'].setdefault(\n resource_type, self.dict_class()).update(pagination)\n\n return wrapper\n\n def wrap_default(self, data, renderer_context):\n \"\"\"Convert native data to a JSON API resource collection\n\n This wrapper expects a standard DRF data object (a dict-like\n object with a `fields` dict-like attribute), or a list of\n such data objects.\n \"\"\"\n\n wrapper = self.dict_class()\n view = renderer_context.get(\"view\", None)\n request = renderer_context.get(\"request\", None)\n\n model = self.model_from_obj(view)\n resource_type = self.model_to_resource_type(model)\n\n if isinstance(data, list):\n many = True\n resources = data\n else:\n many = False\n resources = [data]\n\n items = []\n links = self.dict_class()\n linked = self.dict_class()\n meta = self.dict_class()\n\n for resource in resources:\n converted = self.convert_resource(resource, data, request)\n item = converted.get('data', {})\n linked_ids = converted.get('linked_ids', {})\n if linked_ids:\n item[\"links\"] = linked_ids\n items.append(item)\n\n links.update(converted.get('links', {}))\n linked.update(converted.get('linked', {}))\n meta.update(converted.get('meta', {}))\n\n if many:\n wrapper[resource_type] = items\n else:\n wrapper[resource_type] = items[0]\n\n if links:\n links = self.prepend_links_with_name(links, resource_type)\n wrapper[\"links\"] = links\n\n if linked:\n wrapper[\"linked\"] = linked\n\n if meta:\n wrapper[\"meta\"] = meta\n\n return wrapper\n\n def convert_resource(self, resource, data, request):\n fields = self.fields_from_resource(resource, data)\n\n if not fields:\n raise WrapperNotApplicable('Items must have a fields attribute.')\n\n data = self.dict_class()\n linked_ids = self.dict_class()\n links = self.dict_class()\n linked = self.dict_class()\n meta = self.dict_class()\n\n for field_name, field in six.iteritems(fields):\n converted = None\n\n if field_name in self.convert_by_name:\n converter_name = self.convert_by_name[field_name]\n converter = getattr(self, converter_name)\n converted = converter(resource, field, field_name, request)\n else:\n related_field = get_related_field(field)\n\n for field_type, converter_name in \\\n six.iteritems(self.convert_by_type):\n if isinstance(related_field, field_type):\n converter = getattr(self, converter_name)\n converted = converter(\n resource, field, field_name, request)\n break\n\n if converted:\n data.update(converted.pop(\"data\", {}))\n linked_ids.update(converted.pop(\"linked_ids\", {}))\n links.update(converted.get(\"links\", {}))\n linked.update(converted.get(\"linked\", {}))\n meta.update(converted.get(\"meta\", {}))\n else:\n data[field_name] = resource[field_name]\n\n return {\n 'data': data,\n 'linked_ids': linked_ids,\n 'links': links,\n 'linked': linked,\n 'meta': meta,\n }\n\n def convert_to_text(self, resource, field, field_name, request):\n data = self.dict_class()\n data[field_name] = encoding.force_text(resource[field_name])\n return {\"data\": data}\n\n def rename_to_href(self, resource, field, field_name, request):\n data = self.dict_class()\n data['href'] = resource[field_name]\n return {\"data\": data}\n\n def prepend_links_with_name(self, links, name):\n changed_links = links.copy()\n\n for link_name, link_obj in six.iteritems(links):\n prepended_name = \"%s.%s\" % (name, link_name)\n link_template = \"{%s}\" % link_name\n prepended_template = \"{%s}\" % prepended_name\n\n updated_obj = changed_links[link_name]\n\n if \"href\" in link_obj:\n updated_obj[\"href\"] = link_obj[\"href\"].replace(\n link_template, prepended_template)\n\n changed_links[prepended_name] = changed_links[link_name]\n del changed_links[link_name]\n\n return changed_links\n\n def handle_nested_serializer(self, resource, field, field_name, request):\n serializer_field = get_related_field(field)\n\n if hasattr(serializer_field, \"opts\"):\n model = serializer_field.opts.model\n else:\n model = serializer_field.Meta.model\n\n resource_type = self.model_to_resource_type(model)\n\n linked_ids = self.dict_class()\n links = self.dict_class()\n linked = self.dict_class()\n linked[resource_type] = []\n\n if is_related_many(field):\n items = resource[field_name]\n else:\n items = [resource[field_name]]\n\n obj_ids = []\n\n resource.serializer = serializer_field\n\n for item in items:\n converted = self.convert_resource(item, resource, request)\n linked_obj = converted[\"data\"]\n linked_ids = converted.pop(\"linked_ids\", {})\n\n if linked_ids:\n linked_obj[\"links\"] = linked_ids\n\n obj_ids.append(converted[\"data\"][\"id\"])\n\n field_links = self.prepend_links_with_name(\n converted.get(\"links\", {}), resource_type)\n\n field_links[field_name] = {\n \"type\": resource_type,\n }\n\n if \"href\" in converted[\"data\"]:\n url_field_name = api_settings.URL_FIELD_NAME\n url_field = serializer_field.fields[url_field_name]\n\n field_links[field_name][\"href\"] = self.url_to_template(\n url_field.view_name, request, field_name,\n )\n\n links.update(field_links)\n\n linked[resource_type].append(linked_obj)\n\n if is_related_many(field):\n linked_ids[field_name] = obj_ids\n else:\n linked_ids[field_name] = obj_ids[0]\n\n return {\"linked_ids\": linked_ids, \"links\": links, \"linked\": linked}\n\n def handle_related_field(self, resource, field, field_name, request):\n links = self.dict_class()\n linked_ids = self.dict_class()\n\n related_field = get_related_field(field)\n\n model = self.model_from_obj(related_field)\n resource_type = self.model_to_resource_type(model)\n\n if field_name in resource:\n links[field_name] = {\n \"type\": resource_type,\n }\n\n if is_related_many(field):\n link_data = [\n encoding.force_text(pk) for pk in resource[field_name]]\n elif resource[field_name]:\n link_data = encoding.force_text(resource[field_name])\n else:\n link_data = None\n\n linked_ids[field_name] = link_data\n\n return {\"linked_ids\": linked_ids, \"links\": links}\n\n def handle_url_field(self, resource, field, field_name, request):\n links = self.dict_class()\n linked_ids = self.dict_class()\n\n related_field = get_related_field(field)\n\n model = self.model_from_obj(related_field)\n resource_type = self.model_to_resource_type(model)\n\n links[field_name] = {\n \"href\": self.url_to_template(related_field.view_name,\n request,\n field_name),\n \"type\": resource_type,\n }\n\n if field_name in resource:\n linked_ids[field_name] = self.url_to_pk(\n resource[field_name], field)\n\n return {\"linked_ids\": linked_ids, \"links\": links}\n\n def url_to_pk(self, url_data, field):\n if is_related_many(field):\n try:\n obj_list = field.to_internal_value(url_data)\n except AttributeError:\n obj_list = [field.from_native(url) for url in url_data]\n\n return [encoding.force_text(obj.pk) for obj in obj_list]\n\n if url_data:\n try:\n obj = field.to_internal_value(url_data)\n except AttributeError:\n obj = field.from_native(url_data)\n\n return encoding.force_text(obj.pk)\n else:\n return None\n\n def url_to_template(self, view_name, request, template_name):\n resolver = urlresolvers.get_resolver(None)\n info = resolver.reverse_dict[view_name]\n\n path_template = info[0][0][0]\n # FIXME: what happens when URL has more than one dynamic values?\n # e.g. nested relations: manufacturer/%(id)s/cars/%(card_id)s\n path = path_template % {info[0][0][1][0]: '{%s}' % template_name}\n\n parsed_url = urlparse(request.build_absolute_uri())\n\n return urlunparse(\n [parsed_url.scheme, parsed_url.netloc, path, '', '', '']\n )\n\n def fields_from_resource(self, resource, data):\n if hasattr(data, \"serializer\"):\n resource = data.serializer\n\n if hasattr(resource, \"child\"):\n resource = resource.child\n\n return getattr(resource, \"fields\", None)\n\n def model_to_resource_type(self, model):\n return model_to_resource_type(model)\n\n def model_from_obj(self, obj):\n return model_from_obj(obj)\n\n\nclass JsonApiRenderer(JsonApiMixin, renderers.JSONRenderer):\n pass\n","sub_path":"rest_framework_json_api/renderers.py","file_name":"renderers.py","file_ext":"py","file_size_in_byte":19869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"151000874","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader, Dataset, TensorDataset\nfrom torch.optim import *\nimport torchvision\nimport torchvision.transforms as transforms\nimport time\nimport matplotlib.pyplot as plt\n\n\n\ndef train(model, iterator, optimizer, criterion, device):\n total = 0\n correct = 0\n epoch_loss = 0\n epoch_acc = 0\n predicted_list = []\n model.train()\n \n for batch, labels in iterator:\n \n #Move tensors to the configured device\n batch = batch.to(device)\n labels = labels.to(device)\n \n \n #Forward pass\n outputs = model(batch.float())\n outputs = outputs.to(device)\n \n loss = criterion(outputs, labels).to(device)\n\n # Backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n #check accuracy\n predictions = model(batch.float())\n _, predicted = torch.max(predictions.data, 1) #returns max value, indices\n total += labels.size(0) #keep track of total\n correct += (predicted == labels).sum().item() #.item() give the raw number\n acc = 100 * (correct / total)\n \n epoch_loss += loss.item()\n epoch_acc = acc\n predicted_list.append(predicted)\n \n return epoch_loss / len(iterator), epoch_acc, predicted_list\n\n#======================================================================\ndef evaluate(model, iterator, criterion , device):\n \n total = 0\n correct = 0\n epoch_loss = 0\n epoch_acc = 0\n predicted_list = []\n labels_list = []\n \n model.eval()\n \n with torch.no_grad():\n \n for batch, labels in iterator:\n \n #Move tensors to the configured device\n batch = batch.to(device)\n labels = labels.to(device)\n \n #print(labels)\n \n\n predictions = model(batch.float())\n loss = criterion(predictions, labels)\n \n \n _, predicted = torch.max(predictions.data, 1) #returns max value, indices\n #print(predicted)\n \n total += labels.size(0) #keep track of total\n correct += (predicted == labels).sum().item() #.item() give the raw number\n acc = 100 * (correct / total)\n \n epoch_loss += loss.item()\n epoch_acc += acc\n \n labels_list.append(labels)\n predicted_list.append(predicted)\n \n\n return epoch_loss / len(iterator), epoch_acc / len(iterator) ,predicted_list, labels_list\n\n\n#======================================================================\n\n# define a time function useful for calculating time\ndef epoch_time(start_time, end_time):\n elapsed_time = end_time - start_time\n elapsed_mins = int(elapsed_time / 60)\n elapsed_secs = int(elapsed_time - (elapsed_mins * 60))\n return elapsed_mins, elapsed_secs\n\n\n\ndef do_plot(train_losses, valid_losses):\n plt.figure(figsize=(25,5))\n# clear_output(wait=True)\n plt.plot(train_losses, label='train loss')\n plt.plot(valid_losses, label='valid_loss')\n plt.title('Classification based Encoder loss')\n plt.legend()\n plt.show()\n\ndef save_loss_graph(_train_losses, _valid_losses, _path, _file):\n import matplotlib as mpl\n mpl.use('agg')\n _fig = plt.figure()\n plt.figure(figsize=(25,5))\n _ax = plt.plot(_train_losses, label='train loss')\n plt.plot(_valid_losses, label='valid_loss')\n plt.title('Classification based Encoder loss')\n plt.legend()\n plt.savefig(f'{_path}{_file}-loss.png')\n plt.close(_fig)\n\ndef save_acc_graph(_train_acc, _valid_acc, _path, _file):\n import matplotlib as mpl\n mpl.use('agg')\n _fig = plt.figure()\n plt.figure(figsize=(25,5))\n plt.plot(_train_acc, 'r' , label='train acc')\n plt.plot(_valid_acc, 'g' , label='valid acc')\n plt.title('Accuracy graph')\n plt.legend()\n plt.savefig(f'{_path}{_file}-acc.png')\n plt.close(_fig)\n","sub_path":"Reconstruction/experiment/Nice/libs/train_utilities.py","file_name":"train_utilities.py","file_ext":"py","file_size_in_byte":4153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"593448974","text":"try:\n import cStringIO as StringIO\nexcept ImportError:\n import StringIO\n\nimport csv\n\nfrom django.http import HttpResponse\nfrom django.utils.encoding import smart_unicode\nfrom django.utils.xmlutils import SimplerXMLGenerator\n\nfrom piston.emitters import Emitter, XMLEmitter\nfrom django.utils.encoding import smart_str\nfrom unidecode import unidecode\n\n\nclass CustomXMLEmitter(XMLEmitter):\n \"\"\"\n Overides the default xml generator\n Expects a dict object,\n the key of the object will be used as the opening tag\n \"\"\"\n\n def _to_xml(self, xml, data):\n if isinstance(data, (list, tuple)):\n for item in data:\n self._to_xml(xml, item)\n elif isinstance(data, dict):\n for key, value in data.iteritems():\n xml.startElement(key, {})\n self._to_xml(xml, value)\n xml.endElement(key)\n else:\n xml.characters(smart_unicode(data))\n\n def render(self, request):\n stream = StringIO.StringIO()\n\n xml = SimplerXMLGenerator(stream, \"utf-8\")\n xml.startDocument()\n self._to_xml(xml, self.construct())\n xml.endDocument()\n\n return stream.getvalue()\n\n\nclass CSVEmitter(Emitter):\n \"\"\"\n Emitter for exporting to CSV (excel dialect).\n \"\"\"\n def get_keys(self, input_dict):\n keys = []\n for item in input_dict.items():\n if isinstance(item[1], dict):\n keys.extend(self.get_keys(item[1]))\n else:\n keys.append(item[0])\n return keys\n\n def get_values(self, input_dict):\n for item in input_dict.items():\n if isinstance(item[1], dict):\n input_dict.update(self.get_values(input_dict.pop(item[0])))\n else:\n input_dict[item[0]] = unidecode(smart_str(item[1]))\n return input_dict\n\n def render(self, request):\n #response = StringIO.StringIO()\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=download.csv'\n\n content = self.construct()\n\n if isinstance(content, dict):\n content = content.values()[0]\n\n if content:\n keys = self.get_keys(content[0])\n\n if keys:\n writer = csv.DictWriter(response, keys, dialect='excel')\n headers = dict((n, n) for n in keys)\n writer.writerow(headers)\n for row in content:\n writer.writerow(self.get_values(row))\n\n return response\n\nEmitter.register('xml', CustomXMLEmitter, 'text/xml; charset=utf-8')\nEmitter.register('csv', CSVEmitter, 'text/csv; charset=utf-8')\n","sub_path":"api/emitters.py","file_name":"emitters.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"160668765","text":"\"\"\"\nThe main client class, handles all client functionality\n\"\"\"\n\n\nfrom abc import ABCMeta, abstractmethod\nimport socket\nimport logging\n\nfrom submarines_client import messages, constants, exceptions, protocol_utils\nfrom submarines_client.messages_codec import BaseMessagesCodec, MessagesCodec\nfrom submarines_client.messages import SubmarineMessageType\n\n\nclass BaseSubmarinesClient(metaclass=ABCMeta):\n \"\"\"\n The main client class, handles all client functionality\n \"\"\"\n\n @classmethod\n @abstractmethod\n def listen(cls, listening_port: int):\n \"\"\"\n Start listen to incoming tcp connections\n\n :param listening_port: The listening port to use\n :return: A client instance (on listen mode)\n \"\"\"\n\n raise NotImplementedError()\n\n @abstractmethod\n def wait_for_game(self):\n \"\"\"\n Wait for a game request, and accept it\n Note: this is a blocking method, it will exit only\n when a game connection is established\n \"\"\"\n\n raise NotImplementedError()\n\n @abstractmethod\n def invite_player(self, player_host: str, player_port: int) -> bool:\n \"\"\"\n Invite a player for a game\n Note: this is a blocking method, it will exit only\n when a response is received or an error is raised\n\n :param player_host: The player's host\n :param player_port: The player's port\n :return: whether the player accepted the game invite\n \"\"\"\n\n raise NotImplementedError()\n\n @abstractmethod\n def send_message(self, message: messages.BaseSubmarinesMessage):\n \"\"\"\n send a message to the connected player\n\n :param message: The message you wish to send\n :raise NotConnectedError: No player is connected to the client\n \"\"\"\n\n raise NotImplementedError()\n\n @abstractmethod\n def receive_message(self, expected_type: SubmarineMessageType) -> messages.BaseSubmarinesMessage:\n \"\"\"\n Receive a message from the connected player\n\n :param expected_type: optional, an expected message type\n :return: The decoded message\n :raise NotConnectedError: No player is connected to the client\n :raise ProtocolException: if the message is not expected type\n \"\"\"\n\n raise NotImplementedError()\n\n @abstractmethod\n def __enter__(self):\n \"\"\"\n The client's entering point\n\n :return: The client\n \"\"\"\n\n raise NotImplementedError()\n\n @abstractmethod\n def __exit__(self, exc_type, exc_val, exc_tb):\n \"\"\"\n The client's exit point (used for cleanup)\n\n :return: Should the exception be suppressed\n \"\"\"\n\n raise NotImplementedError()\n\n\nclass TCPSubmarinesClient(BaseSubmarinesClient):\n \"\"\"\n The main client class, handles all client functionality,\n using tcp connection\n \"\"\"\n\n def __init__(self,\n messages_codec: BaseMessagesCodec,\n listening_socket: socket.socket,\n game_socket: socket.socket = None):\n \"\"\"\n Initializing a client\n\n :param messages_codec: The messages codec of the client\n :param listening_socket: The socket in which you listen to incoming requests\n :param game_socket: A game socket, this socket has to be in a game session,\n means a game request and response was passed on this socket\n \"\"\"\n\n self._messages_codec = messages_codec\n self._listening_socket = listening_socket\n self._game_socket = game_socket\n self._logger = logging.getLogger(constants.LOGGER_NAME)\n\n @classmethod\n def listen(cls,\n listening_port: int = constants.Network.DEFAULT_PORT,\n messages_codec: BaseMessagesCodec = MessagesCodec()):\n \"\"\"\n Start listen to incoming tcp connections\n\n :param listening_port: The listening port to use\n :param messages_codec: The messages codec for the client\n :return: A client instance (on listen mode)\n \"\"\"\n\n try:\n listening_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n listening_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n listening_socket.bind((constants.Network.PUBLIC_IP, listening_port))\n listening_socket.listen(1)\n\n return cls(messages_codec=messages_codec, listening_socket=listening_socket)\n except socket.error:\n raise\n\n def wait_for_game(self):\n \"\"\"\n Wait for a game request, and accept it\n Note: this is a blocking method, it will exit only\n when a game connection is established\n \"\"\"\n\n while not self._game_socket:\n try:\n # accept connection\n self._game_socket, address = self._listening_socket.accept()\n\n # receive game request\n self.receive_message(SubmarineMessageType.GAME_REQUEST)\n self._logger.info('Incoming game request: ', f'from {address}')\n\n # send game reply\n self.send_message(messages.GameReplyMessage())\n self._logger.info('Game reply sent: ', 'game starts')\n except exceptions.ProtocolException as pe:\n self._logger.warning('Protocol error: ', pe)\n self._game_socket = None\n except socket.error as se:\n self._logger.warning('Network error: ', se)\n self._game_socket = None\n\n def invite_player(self, player_host: str, player_port: int = constants.Network.DEFAULT_PORT) -> bool:\n \"\"\"\n Invite a player for a game\n Note: this is a blocking method, it will exit only\n when a response is received or an error is raised\n\n :param player_host: The player's host\n :param player_port: The player's port\n :return: whether the player accepted the game invite\n \"\"\"\n\n try:\n # Connect to player\n self._game_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._game_socket.connect((player_host, player_port))\n\n # send game request\n self.send_message(messages.GameRequestMessage())\n\n # receive game reply\n game_reply: messages.GameReplyMessage = self.receive_message(SubmarineMessageType.GAME_REPLY)\n return game_reply.response\n except exceptions.ProtocolException:\n raise\n except socket.error:\n raise\n\n def send_message(self, message: messages.BaseSubmarinesMessage):\n \"\"\"\n send a message to the connected player\n\n :param message: The message you wish to send\n :raise NotConnectedError: No player is connected to the client\n \"\"\"\n\n encoded_message = self._messages_codec.encode_message(message)\n self._game_socket.send(encoded_message)\n\n def receive_message(self, expected_type: SubmarineMessageType = None) -> messages.BaseSubmarinesMessage:\n \"\"\"\n Receive a message from the connected player\n\n :param expected_type: optional, an expected message type\n :return: The decoded message\n :raise NotConnectedError: No player is connected to the client\n :raise ProtocolException: if the message is not expected type\n \"\"\"\n\n encoded_message = bytes()\n\n try:\n new_data = self._game_socket.recv(constants.Network.BUFFER_SIZE)\n\n while new_data:\n encoded_message += new_data\n\n if len(new_data) < constants.Network.BUFFER_SIZE:\n break\n\n new_data = self._game_socket.recv(constants.Network.BUFFER_SIZE)\n\n message = self._messages_codec.decode_message(encoded_message)\n\n if message.get_message_type() == SubmarineMessageType.ERROR:\n raise message.exception\n\n if expected_type:\n protocol_utils.insure_message_type(message, expected_type)\n\n return message\n\n except exceptions.ProtocolException:\n raise\n except socket.error:\n raise\n\n def __enter__(self):\n \"\"\"\n The client's entering point\n\n :return: The client\n \"\"\"\n\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n \"\"\"\n The client's exit point (used for cleanup)\n\n :return: Should the exception be suppressed\n \"\"\"\n\n if self._game_socket:\n self._game_socket.close()\n\n if self._listening_socket:\n self._listening_socket.close()\n\n return False\n","sub_path":"submarines_client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":8617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"606144548","text":"from __future__ import annotations\n\nimport math\nimport random\nimport time\nfrom threading import Condition\n\nfrom ibapi.client import EClient\nfrom ibapi.commission_report import CommissionReport\nfrom ibapi.common import BarData, ListOfHistoricalTickLast, OrderId, TickerId, TickAttribLast, TickAttrib, \\\n HistoricalTickLast\nfrom ibapi.contract import Contract, ContractDetails\nfrom ibapi.ticktype import TickType\nfrom ibapi.wrapper import EWrapper\nfrom pandas import DataFrame\n\nfrom se2.domain.account import *\nfrom se2.domain.time_series import HistoryDataQueryCommand, TSData, Column, Asset, RTTimeSeriesType, \\\n BarHistoryTimeSeriesType, TSTypeRegistry\n\nclient: IBClient = None\n\n\ndef initialize(host: str, port: int, client_id: int):\n \"\"\"\n 初始化ib\n :param host:\n :param port:\n :param client_id:\n :return:\n \"\"\"\n global client\n if client:\n raise RuntimeError(\"client已经被初始化了\")\n client = IBClient(host, port, client_id)\n # 注册时序类型\n TSTypeRegistry.register(IBMinBar())\n TSTypeRegistry.register(IBCurrentPrice())\n TSTypeRegistry.register(IBAdjustedDailyBar())\n\n\nclass IBAccount(AbstractAccount):\n def match(self, data):\n raise NotImplementedError\n\n def do_place_order(self, order: Order):\n pass\n\n def do_cancel_order(self, order: Order):\n pass\n\n def do_update_order_price(self, order, new_price):\n pass\n\n def valid_scope(self, codes):\n pass\n\n\nclass Request(object):\n id_to_request = {}\n\n def __init__(self):\n self.condition: Condition = Condition()\n self.req_id = self._random_id()\n self.resp = None\n Request.id_to_request[self.req_id] = self\n\n def _random_id(self):\n while True:\n k = random.randint(0, 100000000)\n if k not in Request.id_to_request:\n return k\n\n @classmethod\n def new_request(cls):\n return Request()\n\n @classmethod\n def clear(cls, req_id):\n return Request.id_to_request.pop(req_id)\n\n @classmethod\n def find(cls, reqId):\n return Request.id_to_request[reqId]\n\n\nclass ClientStatusCallback(metaclass=ABCMeta):\n @abstractmethod\n def re_connect(self):\n pass\n\n\nclass IBClient(EWrapper):\n clients_map: Mapping[str, IBClient] = {}\n\n def tickPrice(self, reqId: TickerId, tickType: TickType, price: float, attrib: TickAttrib):\n super().tickPrice(reqId, tickType, price, attrib)\n if self.market_data_subscriber:\n self.market_data_subscriber.tickPrice(reqId, tickType, price, attrib)\n\n def tickSize(self, reqId: TickerId, tickType: TickType, size: int):\n super().tickSize(reqId, tickType, size)\n if self.market_data_subscriber:\n self.market_data_subscriber.tickSize(reqId, tickType, size)\n\n def tickString(self, reqId: TickerId, tickType: TickType, value: str):\n super().tickString(reqId, tickType, value)\n if self.market_data_subscriber:\n self.market_data_subscriber.tickString(reqId, tickType, value)\n\n def error(self, reqId: TickerId, errorCode: int, errorString: str):\n super().error(reqId, errorCode, errorString)\n\n def tickByTickAllLast(self, reqId: int, tickType: int, time: int, price: float, size: int,\n tickAttribLast: TickAttribLast, exchange: str, specialConditions: str):\n try:\n super().tickByTickAllLast(reqId, tickType, time, price, size, tickAttribLast, exchange, specialConditions)\n if self.tick_subscriber:\n self.tick_subscriber.tickByTickAllLast(reqId, tickType, time, price, size, tickAttribLast,\n exchange, specialConditions)\n except:\n import traceback\n logging.error(\"{}\".format(traceback.format_exc()))\n\n def execDetails(self, reqId: int, contract: Contract, execution: Execution):\n try:\n super().execDetails(reqId, contract, execution)\n if self.account_subscriber:\n self.account_subscriber.execDetails(reqId, contract, execution)\n except:\n import traceback\n logging.error(\"{}\".format(traceback.format_exc()))\n\n def commissionReport(self, commissionReport: CommissionReport):\n try:\n super().commissionReport(commissionReport)\n if self.account_subscriber:\n self.account_subscriber.commissionReport(commissionReport)\n except:\n import traceback\n logging.error(\"{}\".format(traceback.format_exc()))\n\n def orderStatus(self, orderId: OrderId, status: str, filled: float, remaining: float, avgFillPrice: float,\n permId: int, parentId: int, lastFillPrice: float, clientId: int, whyHeld: str, mktCapPrice: float):\n try:\n super().orderStatus(orderId, status, filled, remaining, avgFillPrice, permId, parentId, lastFillPrice,\n clientId,\n whyHeld, mktCapPrice)\n if self.account_subscriber:\n self.account_subscriber.orderStatus(orderId, status, filled, remaining, avgFillPrice, permId, parentId,\n lastFillPrice, clientId, whyHeld, mktCapPrice)\n except:\n import traceback\n logging.error(\"{}\".format(traceback.format_exc()))\n\n def historicalData(self, reqId: int, bar: BarData):\n super().historicalData(reqId, bar)\n req = Request.find(reqId)\n if not req.resp:\n req.resp = [bar]\n else:\n req.resp.append(bar)\n\n def historicalDataEnd(self, reqId: int, start: str, end: str):\n super().historicalDataEnd(reqId, start, end)\n req = Request.find(reqId)\n if req.condition.acquire():\n req.condition.notifyAll()\n req.condition.release()\n\n def historicalTicksLast(self, reqId: int, ticks: ListOfHistoricalTickLast, done: bool):\n super().historicalTicksLast(reqId, ticks, done)\n req = Request.find(reqId)\n if req.resp:\n req.resp.extend(ticks)\n else:\n req.resp = ticks\n if done:\n if req.condition.acquire():\n req.condition.notifyAll()\n req.condition.release()\n\n def contractDetails(self, reqId: int, contractDetails: ContractDetails):\n super().contractDetails(reqId, contractDetails)\n req = Request.find(reqId)\n if not req.resp:\n req.resp = [contractDetails.contract]\n else:\n req.resp.append(contractDetails.contract)\n\n def contractDetailsEnd(self, reqId: int):\n super().contractDetailsEnd(reqId)\n req = Request.find(reqId)\n if req.condition.acquire():\n req.condition.notifyAll()\n req.condition.release()\n\n def nextValidId(self, orderId: int):\n super().nextValidId(orderId)\n self._next_valid_id = orderId\n\n @alarm(target=\"尝试连接\", freq=Timedelta(minutes=10))\n def try_connect(self):\n # 先清理掉无效的连接\n if self.cli.connState == EClient.CONNECTED:\n self.cli.disconnect()\n self.cli.connect(self.host, self.port, self.client_id)\n if self.cli.connState == EClient.CONNECTED and self.cli.reader.is_alive():\n threading.Thread(name=\"ib_msg_consumer\", target=self.cli.run).start()\n # 等待客户端初始化成功\n time.sleep(3)\n # 重新订阅\n for callback in self.client_status_callbacks:\n callback.re_connect()\n else:\n raise RuntimeError(\"重新连接失败\")\n\n def __init__(self, host, port, client_id):\n super().__init__()\n cli = EClient(self)\n self.cli = cli\n self.host = host\n self.port = port\n self.client_id = client_id\n self.account_subscriber = None\n self.tick_subscriber = None\n self.market_data_subscriber: EWrapper = None\n self._next_valid_id = None\n self.code_contract_map = {}\n self.client_status_callbacks: List[ClientStatusCallback] = []\n self.try_connect()\n\n # 启动ping线程,如果与服务器的连接丢失,则会尝试重新连接\n def ping():\n # retry_count = 0\n while True:\n try:\n if cli.connState != EClient.CONNECTED or not cli.reader.is_alive():\n logging.info(\"尝试重新连接\")\n self.try_connect()\n except:\n import traceback\n logging.error(\"{}\".format(traceback.format_exc()))\n\n time.sleep(10)\n\n threading.Thread(name=\"ib_ping\", target=ping).start()\n\n def req_history_data(self, code: str, end_date_time: Timestamp, duration_str, bar_size, what_to_show,\n use_rth: int, format_date: int, keep_up_to_date, char_options) -> List[BarData]:\n req = Request.new_request()\n contract = self.code_to_contract(code)\n self.cli.reqHistoricalData(req.req_id, contract,\n end_date_time.strftime(\"%Y%m%d %H:%M:%S\") if end_date_time else \"\",\n duration_str, bar_size,\n what_to_show, use_rth, format_date, keep_up_to_date, char_options)\n if req.condition.acquire():\n req.condition.wait(20)\n if not req.resp:\n self.cli.cancelHistoricalData(req.req_id)\n raise RuntimeError(\"获取数据超时或者没有获取到数据\")\n resp = req.resp\n # 清理数据\n Request.clear(req.req_id)\n # 返回排好序的数据\n return sorted(resp, key=lambda bar: bar.date)\n\n def _req_history_ticks(self, code: str, start: Timestamp, end: Timestamp, nums: int, what_to_show: str,\n use_rth: int,\n ignore_size: bool, misc_options) -> List[HistoricalTickLast]:\n req = Request.new_request()\n contract = self.code_to_contract(code)\n self.cli.reqHistoricalTicks(req.req_id, contract,\n start.strftime(\"%Y%m%d %H:%M:%S\") if start is not None else \"\",\n end.strftime(\"%Y%m%d %H:%M:%S\"), nums, what_to_show,\n use_rth, ignore_size, misc_options)\n if req.condition.acquire():\n req.condition.wait(10)\n if not req.resp:\n raise RuntimeError(\"获取数据超时或者没有获取到数据\")\n resp = req.resp\n Request.clear(req.req_id)\n return resp\n\n def req_min_bar(self, command: HistoryDataQueryCommand) -> Mapping[str, List[BarData]]:\n code_to_bars = {}\n for code in command.codes:\n bars: List[BarData] = []\n batch_end = command.end\n while True:\n batch_bars = self.req_history_data(code, end_date_time=batch_end, duration_str=\"86400 S\",\n bar_size='1 min',\n what_to_show='TRADES', use_rth=1, format_date=1,\n keep_up_to_date=False,\n char_options=None)\n bars.extend(batch_bars[::-1])\n if command.start and command.end:\n # 检查start时间\n if Timestamp(bars[-1].date, tz='Asia/Shanghai') <= command.start:\n break\n else:\n # 检查window\n if len(bars) >= command.window:\n break\n batch_end = Timestamp(bars[-1].date, tz='Asia/Shanghai')\n code_to_bars[code] = bars\n return code_to_bars\n\n def req_tick(self, command: HistoryDataQueryCommand) -> Mapping[str, List[HistoricalTickLast]]:\n\n ticks_map = {}\n for code in command.codes:\n ticks: List[HistoricalTickLast] = []\n batch_end = command.end\n while True:\n # 如果指定了开始时间,则每次获取1000条,否则使用command里面定义的window\n window = 1000 if command.start else command.window\n batch_ticks = self._req_history_ticks(code, None, batch_end, nums=window,\n what_to_show='TRADES',\n use_rth=1, ignore_size=False, misc_options=None)\n ticks.extend(batch_ticks)\n if command.start and command.end:\n if Timestamp(batch_ticks[0].time, unit='s', tz='Asia/Shanghai') <= command.start:\n break\n else:\n if len(ticks) >= command.window:\n break\n batch_end = Timestamp(batch_ticks[0].time, unit='s', tz='Asia/Shanghai')\n ticks_map[code] = ticks\n\n return ticks_map\n\n def code_to_contract(self, code) -> Contract:\n if code in self.code_contract_map:\n return self.code_contract_map[code]\n contract = Contract()\n ss = code.split(\"_\")\n contract.symbol = ss[0]\n contract.secType = ss[1]\n contract.currency = ss[2]\n contract.exchange = ss[3]\n if len(ss) > 4:\n contract.lastTradeDateOrContractMonth = ss[4]\n contracts: List[Contract] = self.query_contract(contract)\n if len(contracts) != 1:\n raise RuntimeError(\"code不能唯一确定一个合约\")\n self.code_contract_map[code] = contracts[0]\n return contracts[0]\n\n def contract_to_code(self, contract: Contract):\n return \"_\".join([contract.symbol, contract.secType, contract.currency, contract.exchange])\n\n def query_contract(self, contract):\n req = Request.new_request()\n self.cli.reqContractDetails(req.req_id, contract)\n\n if req.condition.acquire():\n req.condition.wait(20)\n if not req.resp:\n raise RuntimeError(\"没有获取到数据\")\n resp = req.resp\n # 清理数据\n Request.clear(req.req_id)\n return resp\n\n def placeOrder(self, ib_order_id, contract, order):\n self.cli.placeOrder(ib_order_id, contract, order)\n\n def next_valid_id(self):\n if not self._next_valid_id:\n raise RuntimeError(\"no next_valid_id\")\n self._next_valid_id += 1\n return self._next_valid_id\n\n @classmethod\n def find_client(cls, host, port, client_id):\n key = \"{}_{}_{}\".format(host, port, client_id)\n if key in cls.clients_map:\n return cls.clients_map[key]\n return None\n\n @classmethod\n def registry(cls, host, port, client_id, cli: IBClient):\n key = \"{}_{}_{}\".format(host, port, client_id)\n cls.clients_map[key] = cli\n\n def sub(self, account_subscriber: EWrapper = None, tick_subscriber: EWrapper = None,\n market_data_subscriber: EWrapper = None):\n if account_subscriber:\n self.account_subscriber = account_subscriber\n if tick_subscriber:\n self.tick_subscriber = tick_subscriber\n if market_data_subscriber:\n self.market_data_subscriber = market_data_subscriber\n\n def register_client_status_callback(self, callback: ClientStatusCallback):\n if callback not in self.client_status_callbacks:\n self.client_status_callbacks.append(callback)\n\n\nclass IBCurrentPrice(RTTimeSeriesType, EWrapper, ClientStatusCallback):\n \"\"\"\n IB实时数据\n \"\"\"\n\n def re_connect(self):\n if len(self.sub_codes) > 0:\n logging.info(\"重新订阅实时数据,codes:{}\".format(self.sub_codes))\n self.do_sub(self.sub_codes)\n\n def tickPrice(self, reqId: TickerId, tickType: TickType, price: float, attrib: TickAttrib):\n req = Request.find(reqId)\n code = req.code\n if tickType == 1:\n # bid price\n if code in self.current_price_map:\n self.current_price_map[code] = self.current_price_map[code].with_new_bid_price(price)\n else:\n cp: CurrentPrice = self._default_cp(code)\n cp = cp.with_new_bid_price(price)\n self.current_price_map[code] = cp\n elif tickType == 2:\n # ask price\n if code in self.current_price_map:\n self.current_price_map[code] = self.current_price_map[code].with_new_ask_price(price)\n else:\n self.current_price_map[code] = self._default_cp(code).with_new_ask_price(price)\n\n for sub in self.sub_map[code]:\n sub.on_data(self.current_price_map[code])\n\n def _default_cp(self, code):\n now = Timestamp.now(tz='Asia/Shanghai')\n return CurrentPrice(self.name(), now, code,\n {\"price\": None, 'ask_price': None, 'ask_size': None, 'bid_price': None, 'bid_size': None})\n\n def tickSize(self, reqId: TickerId, tickType: TickType, size: int):\n code = Request.find(reqId).code\n values = {\n 'tick_type': tickType,\n 'value': size,\n }\n if tickType == 0:\n # bid size\n if code in self.current_price_map:\n self.current_price_map[code] = self.current_price_map[code].with_new_bid_size(size)\n else:\n self.current_price_map[code] = self._default_cp(code).with_new_bid_size(size)\n elif tickType == 3:\n # ask size\n if code in self.current_price_map:\n self.current_price_map[code] = self.current_price_map[code].with_new_ask_size(size)\n else:\n self.current_price_map[code] = self._default_cp(code).with_new_ask_size(size)\n for sub in self.sub_map[code]:\n sub.on_data(self.current_price_map[code])\n\n def tickString(self, reqId: TickerId, tickType: TickType, value: str):\n code = Request.find(reqId).code\n if tickType == 48:\n # 45表示RTVolume\n values = value.split(';')\n if len(values[0]) > 0:\n # 这个时间我们会丢弃,使用接收到数据的时间作为当前价格的时间戳\n # 因为收取到买卖价格的时候是没有时间戳的\n price_time = Timestamp(int(values[2]), unit='ms', tz='Asia/Shanghai')\n now = Timestamp.now(tz='Asia/Shanghai')\n try:\n self._time_check(price_time, now)\n except:\n pass\n new_price = float(values[0])\n if code in self.current_price_map:\n self.current_price_map[code] = self.current_price_map[code].with_new_price(new_price)\n else:\n self.current_price_map[code] = self._default_cp(code).with_new_price(new_price)\n\n for sub in self.sub_map[code]:\n sub.on_data(self.current_price_map[code])\n\n @alarm(level=AlarmLevel.ERROR, target=\"数据延迟检查\", freq=Timedelta(minutes=1),\n escape_params=[EscapeParam(index=0, key='self')])\n def _time_check(self, server_time: Timestamp, receive_time: Timestamp):\n if (receive_time - server_time) > Timedelta(seconds=5):\n raise RuntimeError(\"接收的数据延迟过高\")\n\n def name(self) -> str:\n return 'ibCurrentPrice'\n\n def current_price(self, codes) -> Mapping[str, CurrentPrice]:\n ret = {}\n for code in codes:\n if code in self.current_price_map:\n ret[code] = self.current_price_map[code]\n return ret\n\n def do_sub(self, codes: List[str]):\n for code in codes:\n contract: Contract = self.client.code_to_contract(code)\n req = Request.new_request()\n self.client.cli.reqMktData(req.req_id, contract, '233', False, False, None)\n self.code_to_req[code] = req\n req.code = code\n\n def do_unsub(self, codes):\n for code in codes:\n req_id = self.code_to_req[code].req_id\n self.client.cli.cancelMktData(req_id)\n Request.clear(req_id)\n\n def __init__(self):\n super().__init__()\n\n # cli = IBClient.find_client(host, port, client_id)\n # if not cli:\n # cli = IBClient(host, port, client_id)\n # IBClient.registry(host, port, client_id, cli)\n global client\n client.sub(market_data_subscriber=self)\n client.register_client_status_callback(self)\n self.client = client\n self.code_to_req: Mapping[str, Request] = {}\n self.current_price_map: Mapping[str, CurrentPrice] = {}\n\n\nclass IBAdjustedDailyBar(BarHistoryTimeSeriesType):\n\n def name(self) -> str:\n return \"ibAdjustedDailyBar\"\n\n def should_cache(self):\n return False\n\n def load_history_data(self, command: HistoryDataQueryCommand) -> List[TSData]:\n if not command.calendar:\n raise RuntimeError(\"need calendar\")\n start = command.start\n if not start:\n weeks = math.ceil(command.window / 5)\n start = command.end - Timedelta(weeks=weeks)\n\n ys = math.ceil((Timestamp.now(tz='Asia/Shanghai') - start).days / 365)\n total_ts_data: List[TSData] = []\n\n for code in command.codes:\n # 返回的bar的日期,是收盘时刻对应的UTC标准时间的日期部分\n bars: List[BarData] = self.client.req_history_data(code, None, \"{} Y\".format(ys), \"1 day\",\n \"ADJUSTED_LAST\", 1, 1, False, None)\n for bar in bars:\n dt = Timestamp(bar.date, tz='UTC')\n visible_time = command.calendar.next_close(dt).tz_convert('Asia/Shanghai')\n start_time = (command.calendar.previous_open(visible_time) - Timedelta(minutes=1)).tz_convert(\n 'Asia/Shanghai')\n provider_data = {\"start_time\": start_time, \"open\": bar.open, \"high\": bar.high, \"low\": bar.low,\n \"close\": bar.close, \"volume\": bar.volume}\n ts_data = TSData(self.name(), visible_time, code, self.parse(provider_data))\n total_ts_data.append(ts_data)\n\n return total_ts_data\n\n def load_assets(self) -> List[Asset]:\n raise RuntimeError(\"not supported\")\n\n def columns(self) -> List[Column]:\n columns = [Column(\"start_time\", Timestamp, None, None, None), Column(\"open\", float, None, None, None),\n Column(\"high\", float, None, None, None), Column(\"low\", float, None, None, None),\n Column(\"close\", float, None, None, None), Column(\"volume\", int, None, None, None)]\n return columns\n\n def __init__(self):\n super().__init__(current_price_change_start_offset=Timedelta(days=1),\n current_price_change_end_offset=Timedelta(days=365 * 2))\n\n # cli = IBClient.find_client(host, port, client_id)\n # if not cli:\n # cli = IBClient(host, port, client_id)\n # IBClient.registry(host, port, client_id, cli)\n global client\n self.client = client\n self.cp_mem_cache: Mapping[str, DataFrame] = {}\n\n\nclass IBMinBar(BarHistoryTimeSeriesType):\n\n def __init__(self):\n super().__init__(current_price_change_start_offset=Timedelta(minutes=5),\n current_price_change_end_offset=Timedelta(minutes=1440 * 10))\n # cli = IBClient.find_client(host, port, client_id)\n # if not cli:\n # cli = IBClient(host, port, client_id)\n # IBClient.registry(host, port, client_id, cli)\n global client\n self.client = client\n\n def name(self) -> str:\n return \"ibMinBar\"\n\n def load_history_data(self, command: HistoryDataQueryCommand) -> List[TSData]:\n code_to_bars: Mapping[str, List[BarData]] = self.client.req_min_bar(command)\n all_ts_datas = []\n one_minute = Timedelta(minutes=1)\n for code in code_to_bars.keys():\n\n for bar in code_to_bars.get(code):\n dt = Timestamp(bar.date, tz='Asia/Shanghai')\n visible_time = dt + one_minute\n provider_data = {\"date\": dt, \"open\": bar.open, \"high\": bar.high, \"low\": bar.low, \"close\": bar.close,\n \"volume\": bar.volume}\n ts_data = TSData(self.name(), visible_time, code, self.parse(provider_data))\n all_ts_datas.append(ts_data)\n\n return all_ts_datas\n\n def load_assets(self) -> List[Asset]:\n pass\n\n def columns(self) -> List[Column]:\n columns = [Column(\"date\", Timestamp, None, None, None), Column(\"open\", float, None, None, None),\n Column(\"high\", float, None, None, None), Column(\"low\", float, None, None, None),\n Column(\"close\", float, None, None, None), Column(\"volume\", int, None, None, None)]\n return columns\n","sub_path":"se2/infras/ib.py","file_name":"ib.py","file_ext":"py","file_size_in_byte":25129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"653438145","text":"from app import app\nfrom flask import render_template, flash, redirect, url_for\nfrom app.forms import LoginForm\nfrom flask_login import current_user, login_user, login_required\nfrom app.models import User, Result\nfrom flask_login import logout_user\nfrom flask import request\nfrom werkzeug.urls import url_parse\n\nfrom app import db\nfrom app.forms import RegistrationForm, AnswerForm\n\n@app.route('/')\ndef welcome():\n return render_template(\"welcome.html\")\n\n@app.route('/home')\ndef home():\n return render_template(\"Home.html\")\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n if user is None or not user.check_password(form.password.data):\n flash('Invalid username or password, please try again')\n return redirect(url_for('login'))\n login_user(user, remember=form.remember_me.data)\n next_page = request.args.get('next')\n if not next_page or url_parse(next_page).netloc != '':\n next_page = url_for('home')\n return redirect(next_page)\n return render_template('login.html', title='Sign In', form=form,)\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = RegistrationForm()\n if form.validate_on_submit():\n user = User(username=form.username.data, email=form.email.data)\n user.set_password(form.password.data)\n db.session.add(user)\n db.session.commit()\n flash('Congratulations, you are now a registered user!')\n return redirect(url_for('login'))\n return render_template('register.html', title='Register', form=form)\n \n@app.route('/learn')\ndef learn():\n return render_template(\"learn.html\")\n\n@app.route('/assessment_home')\ndef assessment_home():\n return render_template(\"assessment_home.html\")\n\n@app.route('/assessment', methods=['GET', 'POST'])\ndef assessment():\n form = AnswerForm()\n if form.validate_on_submit():\n score,correct = getmark(form)\n result = Result(user_id=current_user.get_id(),mark=score)\n db.session.add(result)\n db.session.commit()\n return redirect(url_for('feedback', score = score, correct = correct))\n return render_template(\"assessment.html\", form=form)\n\n@app.route('/table')\n@login_required\ndef table():\n results = Result.query.filter_by(user_id=current_user.get_id()).all()\n return render_template(\"table.html\", results=results)\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('home'))\n\n@app.route('/feedback?')\ndef feedback(score,correct):\n return render_template(\"feedback.html\", score = score, correct = correct)\n\ndef getmark(form):\n score = 0\n correct = []\n if form.q1.data == 'nu':\n score += 10\n correct.append(1)\n if form.q2.data == 'nume':\n score += 10\n correct.append(2)\n if form.q3.data == 'ネ':\n score += 10\n correct.append(3)\n if form.q4.data == 'を':\n score += 10\n correct.append(4)\n if form.q5.data == 'ga ka':\n score += 10\n correct.append(5)\n if form.q6.data == 'ノ':\n score += 10\n correct.append(6)\n if form.q7.data == 'へ':\n score += 10\n correct.append(7)\n if form.q8.data == 'た':\n score += 10\n correct.append(8)\n if form.q9.data == 'ぽ':\n score += 10\n correct.append(9)\n if form.q10.data == 'ん':\n score += 10\n correct.append(10)\n return score, correct\n","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"443912945","text":"#!/usr/bin/python\n\nimport math\n\ndef format_label(value, tick):\n\tif tick < 0.00001:\n\t\tfmtstr = \"%02d:%02d:%09.6f\"\n\telif tick < 0.0001:\n\t\tfmtstr = \"%02d:%02d:%08.5f\"\n\telif tick < 0.001:\n\t\tfmtstr = \"%02d:%02d:%07.4f\"\n\telif tick < 0.01:\n\t\tfmtstr = \"%02d:%02d:%06.3f\"\n\telif tick < 0.1:\n\t\tfmtstr = \"%02d:%02d:%05.2f\"\n\telif tick < 1:\n\t\tfmtstr = \"%02d:%02d:%04.1f\"\n\telse:\n\t\tfmtstr = \"%02d:%02d:%02.0f\"\n\thour = int(value) / 3600\n\tmin = int(value - hour * 3600) / 60\n\tsec = value - hour * 3600 - min * 60\n\treturn fmtstr % (hour, min, sec)\n# end of format_label\n\nTIME_SCALE_OPTIONS = [\n\t(5, 5, 0.000001), \n\t(10, 5, 0.000002), \n\t(5, 5, 0.00001), \n\t(10, 5, 0.0001), \n\t(10, 5, 0.0002), \n\t(5, 5, 0.0001), \n\t(10, 5, 0.0001), \n\t(10, 5, 0.002), \n\t(5, 5, 0.001), \n\t(10, 5, 0.001), \n\t(10, 5, 0.002), \n\t(5, 5, 0.01), \n\t(10, 5, 0.01), \n\t(10, 5, 0.02), \n\t(5, 5, 0.1), \n\t(10, 5, 0.1), \n\t(10, 5, 0.2), \n\t(5, 5, 1), \n\t(10, 5, 1), \n\t(10, 5, 2), \n\t(3, 3, 10), \n\t(10, 5, 6), \n\t(10, 5, 12), \n\t(5, 5, 60), \n\t(10, 5, 60), \n\t(10, 5, 120), \n\t(3, 3, 600), \n\t(6, 3, 600), \n\t(12, 6, 600), \n\t(4, 4, 1800), \n\t(8, 4, 1800), \n\t(8, 4, 3600), \n\t(12, 6, 3600), \n]\n\nclass Metrics:\n\tdef __init__(self, layout, width=200, height=74, rate=8000, \n\t\t\t\tchannels=1, draw_in=0, draw_out=8000*60):\n\t\tself.layout = layout\n\n\t\tself.width = width\n\t\tself.height = height\n\t\tself.draw_in = draw_in\n\t\tself.draw_out = draw_out\n\n\t\tself.rate = rate\n\t\tself.channels = channels\n\n\t\tself.ch_stride = -1\n\t\tself.ticks = {}\n\t\tself.grids = []\n\t\tself.labels = []\n\t\tself.x_array = []\n\t\tself.timetick = []\n\t\tself.timetag = []\n\n\t\tself.update_volume_grid()\n\t\tself.update_time_grid()\n\n\tdef update(self, width, height, rate, channels, draw_in, draw_out):\n\t\tif height != self.height or channels != self.channels:\n\t\t\tself.height = height\n\t\t\tself.channels = channels\n\t\t\tself.update_volume_grid()\n\n\t\tif (width != self.width or rate != self.rate or\n\t\t\t\tdraw_in != self.draw_in or draw_out != self.draw_out):\n\t\t\tself.width = width\n\t\t\tself.rate = rate\n\t\t\tself.draw_in = draw_in\n\t\t\tself.draw_out = draw_out\n\t\t\tself.update_time_grid()\n\n\tdef update_volume_grid(self):\n\t\tself.ch_stride = (self.height + 2.0) / self.channels\n\t\th = round(self.ch_stride - 2) - 1\n\t\tstart = 0\n\t\tif h >= 400:\n\t\t\ttick_stride, grid_stride, label_stride = 1, 5, 10\n\t\telif h >= 200:\n\t\t\ttick_stride, grid_stride, label_stride = 2, 10, 20\n\t\telif h >= 80:\n\t\t\ttick_stride, grid_stride, label_stride = 10, 50, 50\n\t\telif h >= 40:\n\t\t\ttick_stride, grid_stride, label_stride = 10, 50, 100\n\t\telse:\n\t\t\ttick_stride, grid_stride, label_stride = 20, 100, 100\n\t\t\tstart = 100\n\t\tself.ticks = dict([(x, int(round(h * x / 200))) \n\t\t\t\tfor x in range(0, 201, tick_stride)])\n\t\tself.grids = [x for x in range(0, 201, grid_stride)]\n\t\tself.labels = [x for x in range(start, 201, label_stride)]\n\n\tdef update_time_grid(self):\n\t\tlayout = self.layout\n\t\tpixel_per_sec = (self.width - 1.) * self.rate / \\\n\t\t\t\t(self.draw_out - self.draw_in)\n\t\ttime_scale = {}\n\t\ti = 0\n\t\twhile not time_scale:\n\t\t\tif i < len(TIME_SCALE_OPTIONS) - 1:\n\t\t\t\tmajor, minor, tick = TIME_SCALE_OPTIONS[i]\n\t\t\t\ti += 1\n\t\t\telse:\n\t\t\t\tmajor = 12\n\t\t\t\tminor = 6\n\t\t\t\ttick *= 2\n\t\t\tpixel_per_grid = pixel_per_sec * minor * tick\n\t\t\tif pixel_per_grid < 25:\n\t\t\t\tcontinue\n\t\t\tpixel_per_label = pixel_per_sec * major * tick\n\t\t\tmax_label = format_label(self.draw_out * self.rate, tick)\n\t\t\tlayout.set_text(max_label)\n\t\t\tlabel_width, label_height = layout.get_pixel_size()\n\t\t\tif label_width + 10 > pixel_per_label:\n\t\t\t\tcontinue\n\t\t\ttime_scale = {\"major\": major, \"minor\": minor, \"tick\": tick}\n\t\t\tbreak\n\t\tsec_per_grid = time_scale[\"tick\"] * time_scale[\"minor\"]\n\t\tsec_draw_in = self.draw_in * 1.0 / self.rate\n\t\tsec_draw_out = self.draw_out * 1.0 / self.rate\n\t\tsec_start = math.floor(sec_draw_in / sec_per_grid) * sec_per_grid\n\t\tsec_curr = sec_start\n\t\tcounter = 0\n\t\tself.x_array = []\n\t\tself.timetick = []\n\t\tself.timetag = []\n\t\twhile sec_curr <= sec_draw_out:\n\t\t\tx = int(round((sec_curr - sec_draw_in) * pixel_per_sec))\n\t\t\tif counter % time_scale[\"major\"] == 0:\n\t\t\t\tself.x_array.append(x)\n\t\t\t\tself.timetick.append((x, 5))\n\t\t\t\tself.timetag.append((x, format_label(sec_curr, time_scale[\"tick\"])))\n\t\t\telif counter % time_scale[\"minor\"] == 0:\n\t\t\t\tself.x_array.append(x)\n\t\t\t\tself.timetick.append((x, 3))\n\t\t\telse:\n\t\t\t\tself.timetick.append((x, 1))\n\t\t\tcounter += 1\n\t\t\t#sec_curr = sec_start + sec_per_grid * counter\n\t\t\tsec_curr = sec_start + time_scale[\"tick\"] * counter\n\n\tdef get_x_array(self):\n\t\treturn self.x_array\n\n\tdef get_timetick(self):\n\t\treturn self.timetick\n\n\tdef get_timetag(self):\n\t\treturn self.timetag\n\n\tdef get_channel_base_y(self, ch):\n\t\treturn int(self.ch_stride * ch)\n\n\tdef get_channel_height(self):\n\t\treturn int(round(self.ch_stride - 2))\n\n\tdef get_ticks(self):\n\t\tresult = []\n\t\tfor i in sorted(self.ticks):\n\t\t\ty = self.ticks[i]\n\t\t\tif i in self.labels:\n\t\t\t\tresult.append((y, 7))\n\t\t\telif i in self.grids:\n\t\t\t\tresult.append((y, 3))\n\t\t\telse:\n\t\t\t\tresult.append((y, 1))\n\t\treturn result\n\n\tdef get_grid_lines(self):\n\t\treturn [self.ticks[i] for i in self.grids]\n\n\tdef get_labels(self):\n\t\tresult = []\n\t\tfor i in self.labels[:-1]:\n\t\t\tresult.append((self.ticks[i], \"%.1f\" % (1-i/100.)))\n\t\treturn result\n\n\tdef get_center(self):\n\t\treturn self.ticks[100]\n\n# end of class Metrics\n\n# end of $URL$\n\n","sub_path":"Metrics.py","file_name":"Metrics.py","file_ext":"py","file_size_in_byte":5215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"4616029","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def bstToGst(self, root: TreeNode) -> TreeNode:\n \n \n if not root:\n return None\n self.helper(root, 0)\n return root\n \n \n \n def helper(self, root:TreeNode, total: int)-> int:\n \n if not root.right and not root.left:\n root.val += total\n return root.val\n \n if root.right:\n total = self.helper(root.right, total)\n \n root.val += total\n total = root.val\n \n if root.left:\n total = self.helper(root.left, total)\n \n return total","sub_path":"binary-search-tree-to-greater-sum-tree/binary-search-tree-to-greater-sum-tree.py","file_name":"binary-search-tree-to-greater-sum-tree.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"158890349","text":"from .cost import Cost\n\nimport numpy as np\nimport json\n\n\nclass ParticleVectorNLocalConvolution4Network(object):\n\n def __init__(self, particle_input=None, cost=\"mse\", regularizer=None):\n self.layers = []\n self.cost_name = cost\n self.cost_function = Cost.get(cost)\n self.cost_d_function = Cost.get_d(cost)\n self.lock_built = False\n self.regularizer = regularizer\n self.particle_input = particle_input\n\n def append(self, layer):\n \"\"\"\n Appends a layer to the network\n\n :param layer:\n :return:\n \"\"\"\n self.layers.append(layer)\n\n def build(self):\n \"\"\"\n Handle networks layer dimensions checks, other possible initializations\n\n Release build lock\n\n :return:\n \"\"\"\n # TODO\n\n self.lock_built = True\n\n def predict_single(self, data_X):\n \"\"\"\n Same as predict, but only one sample\n \"\"\"\n return self.predict(data_X.reshape((1, len(data_X))))\n\n def predict(self, data_X):\n \"\"\"\n Pass given input through network to compute the output prediction\n\n :param data_X:\n :return:\n \"\"\"\n a, r = self.particle_input.feed_forward(data_X)\n for layer in self.layers:\n a, r = layer.feed_forward(a, r)\n return a\n\n def feed_to_layer(self, data_X, end_layer=0):\n \"\"\"\n Feed data forward until given end layer. Return the resulting activation\n\n :param data_X: input data\n :param end_layer: the index of the ending layer\n :return: resulting activation at end layer\n \"\"\"\n if len(self.layers) <= end_layer < 0:\n return None\n\n a, r = self.particle_input.feed_forward(data_X)\n for l, layer in enumerate(self.layers):\n a, r = layer.feed_forward(a, r)\n if l == end_layer:\n return a\n\n return None\n\n def cost(self, data_X, data_Y):\n \"\"\"\n Compute the cost for all input data corresponding to expected output\n\n :param data_X:\n :param data_Y:\n :return:\n \"\"\"\n c = self.cost_function(data_Y, self.predict(data_X))\n\n if self.regularizer is not None:\n c += self.regularizer.cost(self.particle_input, self.layers)\n\n return c\n\n def cost_gradient_thread(self, data_XYt):\n \"\"\"\n Wrapper for multithreaded call\n :param data_XY:\n :return:\n \"\"\"\n return self.cost_gradient(data_XYt[0], data_XYt[1], thread_scale=data_XYt[2])\n\n def cost_gradient(self, data_X, data_Y, thread_scale=1):\n \"\"\"\n Computes the gradient of the cost with respect to each weight and bias in the network\n\n :param data_X:\n :param data_Y:\n :return:\n \"\"\"\n\n # Output gradients\n dc_db = []\n dc_dr = [[np.zeros(self.particle_input.output_size) for _ in range(self.particle_input.nr)]]\n dc_dn = [[np.zeros(self.particle_input.output_size) for _ in range(self.particle_input.nv)]]\n dc_dm = [[np.zeros(self.particle_input.output_size) for _ in range(self.particle_input.nw)]]\n # Initialize\n for l, layer in enumerate(self.layers):\n dc_db.append(np.zeros(layer.b.shape))\n dc_dr.append([np.zeros(layer.output_size) for _ in range(self.particle_input.nr)])\n dc_dn.append([np.zeros(layer.output_size) for _ in range(self.particle_input.nv)])\n dc_dm.append([np.zeros(layer.output_size) for _ in range(self.particle_input.nw)])\n\n sigma_Z = []\n A_scaled, _ = self.particle_input.feed_forward(data_X)\n A = [A_scaled] # Note: A has one more element than sigma_Z\n prev_layer_rr = self.particle_input.get_rxyz()\n for l, layer in enumerate(self.layers):\n z = layer.compute_z(A[l], prev_layer_rr, apply_input_noise=(l == 0))\n a = layer.compute_a(z, apply_dropout=True)\n A.append(a)\n sigma_Z.append(layer.compute_da(z, apply_dropout=True))\n prev_layer_rr = layer.get_rxyz()\n\n delta_L = self.cost_d_function(data_Y, A[-1], sigma_Z[-1])\n\n # IMPORTANT:\n # For threaded calls, we need to divide the cost gradient by the number threads to account for the mean being\n # taken in the cost function. When data is split, the mean is off by a factor of the number of threads.\n if thread_scale > 1:\n delta_L /= thread_scale\n\n # For each piece of data\n for di, data in enumerate(data_X):\n dc_db[-1] += delta_L[di]\n\n # Reshape\n for r in range(self.particle_input.nr):\n self.particle_input.positions[r] = self.particle_input.positions[r].reshape((self.particle_input.output_size, 1))\n for v in range(self.particle_input.nv):\n self.particle_input.nvectors[v] = self.particle_input.nvectors[v].reshape((self.particle_input.output_size, 1))\n for w in range(self.particle_input.nw):\n self.particle_input.nwectors[w] = self.particle_input.nwectors[w].reshape((self.particle_input.output_size, 1))\n for layer in self.layers:\n for r in range(layer.nr):\n layer.positions[r] = layer.positions[r].reshape((layer.output_size, 1))\n for v in range(layer.nv):\n layer.nvectors[v] = layer.nvectors[v].reshape((layer.output_size, 1))\n for w in range(layer.nw):\n layer.nwectors[w] = layer.nwectors[w].reshape((layer.output_size, 1))\n\n if layer.apply_convolution:\n layer.positions_cache = layer.positions_cache.reshape((layer.nr, len(layer.positions_cache[0]), 1))\n layer.nvectors_cache = layer.nvectors_cache.reshape((layer.nv, len(layer.nvectors_cache[0]), 1))\n layer.nwectors_cache = layer.nwectors_cache.reshape((layer.nw, len(layer.nwectors_cache[0]), 1))\n\n l = -1\n layer = self.layers[l]\n prev_layer = self.particle_input if -(l-1) > len(self.layers) else self.layers[l-1]\n\n Al = A[l-1]\n Al_trans = Al.transpose()\n trans_delta_L = delta_L.transpose()\n trans_sigma_Z = []\n for sz in sigma_Z:\n trans_sigma_Z.append(np.asarray(sz).transpose())\n\n next_delta = np.zeros((len(prev_layer.positions[0]), len(data_X)))\n\n # Position gradient\n for j in range(layer.output_size):\n trans_delta_L_j = trans_delta_L[j]\n trans_sigma_Z_l = trans_sigma_Z[l-1] if -(l-1) <= len(self.layers) else np.ones((prev_layer.output_size, len(data_X)))\n\n d2 = None\n lpos = None\n if layer.apply_convolution:\n d2 = np.zeros((len(prev_layer.positions[0]), len(data_X))) # this is amazing stuff! numpy is the best!\n lpos = layer.positions_cache\n\n else:\n d2 = np.zeros_like(prev_layer.positions[0])\n lpos = layer.positions\n\n dr = []\n for r in range(layer.nr):\n dtmp = prev_layer.positions[r] - lpos[r][j]\n d2 += dtmp ** 2\n dr.append(dtmp)\n\n d = np.sqrt(d2)\n dot = 0.0\n for v in range(layer.nv):\n dot += prev_layer.nvectors[v] * layer.nwectors[v][j]\n exp_dij = layer.potential(d, layer.zeta) * dot\n\n # Next delta\n next_delta += trans_delta_L_j * exp_dij * trans_sigma_Z_l\n atj = Al_trans * trans_delta_L_j\n dq = exp_dij * atj\n\n # Position gradient\n tmp = -dot * atj * layer.d_potential(d, layer.zeta) / d\n for r in range(layer.nr):\n tr = dr[r] * tmp\n dc_dr[l][r][j] += np.sum(tr)\n dc_dr[l - 1][r] -= np.sum(tr, axis=1)\n\n # Vector gradient\n tmp = dq / dot\n for v in range(layer.nv):\n tv = tmp * prev_layer.nvectors[v]\n dc_dm[l][v][j] += np.sum(tv)\n tv = tmp * layer.nwectors[v][j]\n dc_dn[l - 1][v] += np.sum(tv, axis=1)\n\n l = -1\n while -l < len(self.layers):\n l -= 1\n # Gradient computation\n layer = self.layers[l]\n prev_layer = self.particle_input if -(l-1) > len(self.layers) else self.layers[l-1]\n\n Al = A[l-1]\n Al_trans = Al.transpose()\n\n this_delta = next_delta\n if layer.apply_convolution:\n this_delta = this_delta.reshape((layer.output_size, layer.n_convolution, -1))\n # Bias gradient\n trans_delta = np.sum(this_delta, axis=1).transpose()\n for di, data in enumerate(data_X):\n dc_db[l] += trans_delta[di]\n else:\n # Bias gradient\n trans_delta = this_delta.transpose()\n for di, data in enumerate(data_X):\n dc_db[l] += trans_delta[di]\n\n if prev_layer.apply_convolution:\n next_delta = np.zeros((len(prev_layer.positions_cache[0]), len(data_X)))\n trans_sigma_Z_l = trans_sigma_Z[l - 1] if -(l - 1) <= len(self.layers) else np.ones((len(prev_layer.positions_cache[0]), len(data_X)))\n\n else:\n next_delta = np.zeros((prev_layer.output_size, len(data_X)))\n trans_sigma_Z_l = trans_sigma_Z[l-1] if -(l-1) <= len(self.layers) else np.ones((prev_layer.output_size, len(data_X)))\n\n prev_n_out = prev_layer.output_size * prev_layer.n_convolution\n lpos = None\n lvec = None\n if layer.apply_convolution:\n # use the non-flattened caches\n lpos = layer.positions_cache2.reshape((layer.nr, layer.output_size, layer.n_convolution, len(data_X), 1)) # use the max pool contributor position\n lvec = layer.nwectors_cache.reshape((layer.nw, layer.output_size, layer.n_convolution, 1))\n else:\n lpos = layer.positions\n lvec = layer.nwectors\n\n # Position gradient\n for j in range(layer.output_size):\n this_delta_j = this_delta[j]\n d2 = None\n dot = 0.0\n dr = []\n\n if layer.apply_convolution:\n if prev_layer.apply_convolution:\n d2 = np.zeros((layer.n_convolution, len(data_X), prev_n_out, 1))\n dr = np.zeros((layer.n_convolution, layer.nr, len(data_X), prev_n_out, 1))\n for c in range(layer.n_convolution):\n for a in range(len(data_X)):\n for r in range(layer.nr):\n dtmp = prev_layer.positions_cache[r] - lpos[r][j][c][a]\n d2[c][a] += dtmp ** 2\n dr[c][r][a] += dtmp\n for w in range(layer.nw):\n dot += prev_layer.nvectors_cache[w] * lvec[w][j][c]\n d = np.sqrt(d2)\n\n else:\n d2 = np.zeros((layer.n_convolution, len(data_X), prev_layer.output_size, 1))\n dr = np.zeros((layer.n_convolution, layer.nr, len(data_X), prev_layer.output_size, 1))\n for c in range(layer.n_convolution):\n for a in range(len(data_X)):\n for r in range(layer.nr):\n dtmp = prev_layer.positions[r] - lpos[r][j][c][a]\n d2[c][a] += dtmp ** 2\n dr[c][r][a] += dtmp\n for w in range(layer.nw):\n dot += prev_layer.nvectors[w] * lvec[w][j][c]\n d = np.sqrt(d2)\n\n else:\n if prev_layer.apply_convolution:\n d2 = np.zeros_like(prev_layer.positions_cache[0])\n for r in range(layer.nr):\n dtmp = prev_layer.positions_cache[r] - lpos[r][j]\n d2 += dtmp ** 2\n dr.append(dtmp)\n d = np.sqrt(d2)\n for w in range(layer.nw):\n dot += prev_layer.nvectors_cache[w] * lvec[w][j]\n else:\n d2 = np.zeros_like(prev_layer.positions[0])\n for r in range(layer.nr):\n dtmp = prev_layer.positions[r] - lpos[r][j]\n d2 += dtmp ** 2\n dr.append(dtmp)\n d = np.sqrt(d2)\n for w in range(layer.nw):\n dot += prev_layer.nvectors[w] * lvec[w][j]\n\n exp_dij = layer.potential(d, layer.zeta) * dot\n\n # Next delta\n if layer.apply_convolution:\n # Loop through each j-th convolution\n next_delta = next_delta.transpose()\n sigma_Z_l = trans_sigma_Z_l.transpose()\n\n ld_pot = (layer.d_potential(d, layer.zeta) / d).reshape((layer.n_convolution, len(data_X), prev_n_out))\n exp_dij = exp_dij.reshape((layer.n_convolution, len(data_X), prev_n_out))\n this_delta_j = this_delta_j.reshape((layer.n_convolution, len(data_X), 1))\n dr = dr.reshape((layer.n_convolution, layer.nr, len(data_X), prev_n_out))\n\n if prev_layer.apply_convolution:\n for c in range(layer.n_convolution):\n # todo: still not sure why the average works here... max pooling dependent input??\n next_delta += this_delta_j[c] * exp_dij[c] * sigma_Z_l / (layer.n_convolution * len(data_X))\n\n atj = Al * this_delta_j[c]\n dq = (exp_dij[c] * atj).reshape((len(data_X), prev_n_out, 1))\n v_tmp = (dq / dot).reshape((len(data_X), prev_n_out))\n\n jcdot = 0.0\n for w in range(layer.nw):\n jcdot += prev_layer.nvectors_cache[w] * lvec[w][j][c]\n p_tmp = -jcdot.flatten() * atj * ld_pot[c] # only the dot for this j-th conv?\n\n for r in range(layer.nr):\n tr = dr[c][r] * p_tmp\n dc_dr[l][r][j] += np.sum(tr)\n dc_dr[l - 1][r] -= np.sum(np.sum(tr, axis=0).reshape((len(prev_layer.positions[0]), -1)), axis=1)\n\n for w in range(layer.nw):\n tv = v_tmp * prev_layer.nvectors_cache[w].flatten()\n dc_dm[l][w][j] += np.sum(tv)\n tv = v_tmp * layer.nwectors[w][j]\n dc_dn[l - 1][w] += np.sum(np.sum(tv, axis=0).reshape((len(prev_layer.nvectors[0]), -1)), axis=1)\n\n else:\n for c in range(layer.n_convolution):\n next_delta += this_delta_j[c] * exp_dij[c] * sigma_Z_l\n atj = Al * this_delta_j[c]\n dq = (exp_dij[c] * atj).reshape((len(data_X), prev_n_out, 1))\n v_tmp = (dq / dot).reshape((len(data_X), prev_n_out))\n\n jcdot = 0.0\n for w in range(layer.nw):\n jcdot += prev_layer.nvectors[w] * lvec[w][j][c]\n p_tmp = -jcdot.flatten() * atj * ld_pot[c] # only the dot for this j-th conv?\n\n for r in range(layer.nr):\n tr = dr[c][r] * p_tmp\n dc_dr[l][r][j] += np.sum(tr)\n dc_dr[l - 1][r] -= np.sum(tr, axis=0)\n\n for w in range(layer.nw):\n tv = v_tmp * prev_layer.nvectors[w].flatten()\n dc_dm[l][w][j] += np.sum(tv)\n tv = v_tmp * layer.nwectors[w][j]\n dc_dn[l - 1][w] += np.sum(tv, axis=0)\n\n next_delta = next_delta.transpose()\n\n else:\n exp_dij = exp_dij.reshape((-1, 1))\n\n next_delta += this_delta_j * exp_dij * trans_sigma_Z_l\n atj = Al_trans * this_delta_j\n dq = exp_dij * atj\n\n p_tmp = -dot * atj * layer.d_potential(d, layer.zeta) / d\n v_tmp = dq / dot\n\n if prev_layer.apply_convolution:\n for r in range(layer.nr):\n tr = dr[r] * p_tmp\n dc_dr[l][r][j] += np.sum(tr)\n dc_dr[l - 1][r] -= np.sum(np.sum(tr, axis=1).reshape((len(prev_layer.positions[0]), -1)), axis=1)\n\n for w in range(layer.nw):\n tv = v_tmp * prev_layer.nvectors_cache[w]\n dc_dm[l][w][j] += np.sum(tv)\n tv = v_tmp * layer.nwectors[w][j]\n dc_dn[l - 1][w] += np.sum(np.sum(tv, axis=1).reshape((len(prev_layer.nvectors[0]), -1)), axis=1)\n else:\n for r in range(layer.nr):\n tr = dr[r] * p_tmp\n dc_dr[l][r][j] += np.sum(tr)\n dc_dr[l - 1][r] -= np.sum(tr, axis=1)\n\n for w in range(layer.nw):\n tv = v_tmp * prev_layer.nvectors[w]\n dc_dm[l][w][j] += np.sum(tv)\n tv = v_tmp * layer.nwectors[w][j]\n dc_dn[l - 1][w] += np.sum(tv, axis=1)\n\n # Restore shapes\n for r in range(self.particle_input.nr):\n self.particle_input.positions[r] = self.particle_input.positions[r].reshape((self.particle_input.output_size, ))\n for v in range(self.particle_input.nv):\n self.particle_input.nvectors[v] = self.particle_input.nvectors[v].reshape((self.particle_input.output_size, ))\n for w in range(self.particle_input.nw):\n self.particle_input.nwectors[w] = self.particle_input.nwectors[w].reshape((self.particle_input.output_size, ))\n for layer in self.layers:\n for r in range(layer.nr):\n layer.positions[r] = layer.positions[r].reshape((layer.output_size, ))\n for v in range(layer.nv):\n layer.nvectors[v] = layer.nvectors[v].reshape((layer.output_size, ))\n for w in range(layer.nw):\n layer.nwectors[w] = layer.nwectors[w].reshape((layer.output_size, ))\n\n if layer.apply_convolution:\n layer.positions_cache = layer.positions_cache.reshape((layer.nr, len(layer.positions_cache[0]), ))\n layer.nvectors_cache = layer.nvectors_cache.reshape((layer.nv, len(layer.nvectors_cache[0]), ))\n layer.nwectors_cache = layer.nwectors_cache.reshape((layer.nw, len(layer.nwectors_cache[0]), ))\n\n # Regularizer\n if self.regularizer is not None:\n # dc_dr = self.regularizer.cost_gradient(self.particle_input, self.layers, dc_dr)\n dc_dn, dc_db = self.regularizer.cost_gradient(self.particle_input, self.layers, dc_dn, dc_db)\n\n return dc_db, dc_dr, dc_dn, dc_dm\n\n def fit(self, data_X, data_Y, optimizer):\n \"\"\"\n Run the optimizer for specified number of epochs\n\n :param data_X:\n :param data_Y:\n :return:\n \"\"\"\n\n return optimizer.optimize(self, data_X, data_Y)\n\n def write_to_json(self, file=None):\n \"\"\"\n Write network data to file in JSON format\n :param file: a file open for writing\n :return:\n \"\"\"\n pass\n","sub_path":"src/calrissian/particle_vector_n_network_local_conv4.py","file_name":"particle_vector_n_network_local_conv4.py","file_ext":"py","file_size_in_byte":20170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"89247426","text":"from songs import Songs\nfrom playlists import Playlists\nfrom wordpool import WordPool\nfrom settings import Settings\nimport functions as func\n\nimport os\n\n\ndef search_and_save():\n pls = Playlists()\n # 获得前50项歌单\n pls.get_playlists(st)\n\n if not os.path.exists('res/' + st.csv_fname + '.csv'):\n # 递归下载歌单\n pls.recur_playlists(st)\n\n\ndef single_playlist():\n if not os.path.exists('res/' + st.csv_fname + '.csv'):\n # 新建一个歌单类\n s = Songs()\n s.get_plist(st.playlist_url, st)\n s.get_lyric()\n func.songs_to_csv(s.songs, st)\n\n\nif __name__ == \"__main__\":\n st = Settings()\n # 新建一个词池\n w = WordPool()\n\n if st.toggle == True:\n search_and_save()\n else:\n single_playlist()\n w.get_wordpool(st)\n if st.word_rank:\n w.word_freq(st)\n w.generate_wordcloud()\n","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"8773119","text":"#!/usr/bin/env python\n\"\"\"Run the fastqc pipeline.\n\nThis runs FastQC on a number of files.\n\nIt requires a YAML configuration file with parameters for FastQC (output directory, etc.)\nIt also requires a samples file that has at least a column named 'sample' and 'filename'.\n\n\"\"\"\n\nimport csv\nimport sys\nimport os\nimport argparse\nimport subprocess\n\nfrom ruffus import *\nimport yaml\n\nfrom ccrngspy.tasks import FastQC\nfrom ccrngspy import utils\n\nfrom ccrngspy.pipeline import fastqc_helpers\n\nlogger = utils.make_local_logger(\"FastQC logging\", level=\"debug\", color=\"green\")\n\nparser = argparse.ArgumentParser(description=\"Run fastqc on files.\")\n\nparser.add_argument(\"--print_only\", dest=\"print_only\", action=\"store_true\", default=False,\n help=\"Don't run the pipeline, just print what will be run.\")\n\nparser.add_argument('--config_file', dest=\"config_file\", type=str,\n help=\"A YAML configuration file for pipeline.\")\n\nparser.add_argument('--sample_file', dest=\"sample_file\", type=str,\n help=\"A YAML configuration file for pipeline.\")\n\n# add options for the fastqc task\nparser = FastQC.FastQC().argparse(parser)\n\n# Parse the options\nopts = parser.parse_args()\n\n# Load the bootstrap config file\nwith open(opts.config_file, 'r') as configfile:\n config = yaml.load(configfile)\n\n# Load the samples tab-separated file\nwith open(opts.sample_file, 'r') as samplefile:\n reader = csv.DictReader(samplefile, delimiter=\"\\t\")\n samples = list(reader)\n\n\ntest_task_params = make_fastqc_param_list(samples=samples, config=config)\n\n#----------------------------------------------\n# begin tasks here\n#----------------------------------------------\n\n@files(test_task_params)\ndef run_fastqc(input, output, params=None):\n \"\"\"Set up and run the fastqc program.\n \n \"\"\"\n\n fastqc_task = FastQC.FastQC(input_files=[input], output_directory=config['fastqc_params']['output_dir'])\n fastqc_task.run_fastqc()\n\n # post task, touch output file!\n of = file(output, mode=\"w\")\n of.close()\n\nif opts.print_only:\n pipeline_printout(sys.stdout, [run_fastqc])\nelse:\n pipeline_run([run_fastqc], multiprocess=5)\n\n","sub_path":"scripts/fastqc.py","file_name":"fastqc.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"549918924","text":"from time import sleep\n\n\n\n\n#TEST\nclass s_IA():\n\tdef __init__(self):\n\t\tself.gyro_z_offset = 0\n\t\tself.gyro_pos = 0 # 0N 90E 180S 270O\n\t\tself.flag = 0# <180 0 >180 1 \n\t\tself.tour = 5\n\n\t\tself.coef = 1.5\n\n\tdef go(self, robot):\n\t\tself.gyro_z_offset = robot.input_sensor.gyro(2, robot)\n\t\tself.gyro_pos += self.tour*360 + self.gyro_z_offset + 180\n\t\twhile True:\n\t\t\t\n\t\t\ttry:\n\t\t\t\trobot.input_sensor.scan_moy(4, robot)\n\t\t\t\tif robot.input_sensor.ultra_son_moy[1] > 30:\n\t\t\t\t\tself.aller_droite(robot)\n\t\t\t\t\tself.aller_tout_droit(robot, 1/(self.coef) + 0.5, 1)\n\t\t\t\telif robot.input_sensor.ultra_son_moy[0] > 30:\n\t\t\t\t\tself.aller_tout_droit(robot, 0.1, 0)\n\t\t\t\telif robot.input_sensor.ultra_son_moy[3] > 30:\n\t\t\t\t\tself.aller_gauche(robot)\n\t\t\t\t\tself.aller_tout_droit(robot, 1/(self.coef) + 0.5, 1)\n\t\t\t\telse:\n\t\t\t\t\tself.aller_gauche(robot)\n\t\t\t\t\tself.aller_gauche(robot)\n\n\t\t\texcept:\n\t\t\t\tprint(\"ERREUR ! go IA\")\n\t\t\t\trobot.output.traitement_input(robot, [1, 0, 0, 0, 0])\n\n\tdef aller_gauche(self, robot):\n\t\tprint(\"IA : GAUCHE \")\n\t\trobot.output.traitement_input(robot, [1, 10*(self.coef), -20*(self.coef), 1, 0])\n\t\twhile self.pi360(robot) < self.gyro_pos + 90:\n\t\t\tsleep(0.03)\n\t\t\tprint(\"gauche cur:%d obj:%d gyro_pos:%d\" %(self.pi360(robot), self.gyro_pos + 90, self.gyro_pos))\n\t\tself.gyro_pos += 90\n\n\tdef aller_droite(self, robot):\n\t\tprint(\"IA : DROITE \")\n\t\trobot.output.traitement_input(robot, [1, 10*(self.coef), 20*(self.coef), 1, 0])\n\t\twhile self.pi360(robot) > self.gyro_pos - 90:\n\t\t\tsleep(0.03)\n\t\t\tprint(\"droite cur:%d obj:%d gyro_pos:%d\" %(self.pi360(robot), self.gyro_pos - 90, self.gyro_pos))\n\t\tself.gyro_pos -= 90\n\n\tdef aller_tout_droit(self, robot, temps, assmur):#assmur 0 oui, 1 non\n\t\tprint(\"IA : T.D. \")\n\t\ttemps_int = int(temps / 0.1)\n\t\tfor i in range(temps_int):\n\n\t\t\trobot.input_sensor.scan_moy(1, robot)\n\t\t\trobot.output.traitement_input(robot, [1, 30*(self.coef), self.pi360(robot)-self.gyro_pos, assmur, 0])\n\t\t\tsleep(0.1)\n\t\t\n\t\t#robot.output.traitement_input(robot, [1, 0, 0, 0, 0])\n\n\tdef pi360(self, robot):\n\t\tgy = robot.input_sensor.gyro(3, robot) + 180 - self.gyro_z_offset\n\t\tgy_mod = (gy+self.tour*360)%360\n\t\tif(gy_mod > 90 and gy_mod < 270):\n\t\t\tif(gy_mod > 180 and self.flag == 0):\n\t\t\t\tself.flag = 1\n\t\t\telif(gy_mod < 180 and self.flag == 1):\n\t\t\t\tself.flag = 0\n\n\t\tif(gy_mod < 90 and self.flag == 1):\n\t\t\tself.flag = 0\n\t\t\tself.tour += 1\n\t\telif(gy_mod > 270 and self.flag == 0):\n\t\t\tself.flag = 1\n\t\t\tself.tour -= 1\n\n\t\treturn robot.input_sensor.gyro(3, robot) + 180 - self.gyro_z_offset + self.tour*360\n\n\n\n\n\n\n\n\n\n\n\n\n#CLASS\n#class s_Carte():\n#\tdef __init__(self):\n#\t\tself.intersection = [s_Intersection()]\n#\t\tself.intersection[0].dir_set([2, 0, 3, 0], [None, None, None, None])\n#\t\tself.inter_last\n#\t\tself.eg\n#\n#\tdef new_intersection(self):\n#\t\tself.\n#\n#\n#class s_Intersection():\n#\tdef __init__(self):\n#\t\tself.dir = []\n#\t\tself.pointer = []\n#\t\tself.sens = []\n#\n#\n#\n#\tdef dir_set(self, dir_tab, dir_pointer):\n#\t\t#0 mur, 1 chemain connu, 2 chemin inconnu, 3 sortie\n#\n#\t\tself.dir = [dir_tab[0], dir_tab[1], dir_tab[2], dir_tab[3]]\n#\t\tself.pointer = [dir_pointer[0], dir_pointer[1], dir_pointer[2], dir_pointer[3]]\n#\t\t#0,1,2,3 -> sens connu; -1 inconnu; -2 sortie\n#\t\tself.sens = [-1, -1, -1, -1]\n#\t\treturn 0","sub_path":"nationales/codeBrouillon/PYTHON/IA.py","file_name":"IA.py","file_ext":"py","file_size_in_byte":3194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"566247040","text":"from nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom document import Document\nfrom string import punctuation\nfrom demoji import findall\nimport stemmer\n\n\nclass Parse:\n\n numeric_counter = 0\n\n months = {\"jan\": \"1\", \"january\": \"1\", \"feb\": \"2\", \"february\": \"2\", \"mar\": \"3\", \"march\": \"3\", \"apr\": \"4\",\n \"april\": \"4\", \"may\": \"5\", \"jun\": \"6\", \"june\": \"6\", \"jul\": \"7\", \"july\": \"7\", \"aug\": \"8\", \"august\": \"8\",\n \"sep\": \"9\", \"september\": \"9\", \"oct\": \"10\", \"october\": \"10\", \"nov\": \"11\", \"november\": \"11\", \"dec\": \"12\",\n \"december\": \"12\"}\n\n days = {\"first\": \"1\", \"1st\": \"1\", \"second\": \"2\", \"2nd\": \"2\", \"third\": \"3\", \"fourth\": \"4\", \"4th\": \"4\", \"fifth\": \"5\",\n \"5th\": \"5\", \"sixth\": \"6\", \"6th\": \"6\", \"seventh\": \"7\", \"7th\": \"7\", \"eighth\": \"8\", \"8th\": \"8\", \"ninth\": \"9\",\n \"9th\": \"9\", \"tenth\": \"10\", \"10th\": \"10\", \"eleventh\": \"11\", \"11th\": \"11\", \"twelfth\": \"12\", \"12th\": \"12\",\n \"thirteenth\": \"13\", \"13th\": \"13\", \"fourteenth\": \"14\", \"14th\": \"14\", \"fifteenth\": \"15\", \"15th\": \"15\",\n \"sixteenth\": \"16\", \"16th\": \"16\", \"seventeenth\": \"17\", \"17th\": \"17\", \"eighteenth\": \"18\", \"18th\": \"18\",\n \"nineteenth\": \"19\", \"19th\": \"19\", \"twentieth\": \"20\", \"twenty\": \"20\", \"20th\": \"20\", \"twenty-first\": \"21\",\n \"21tst\": \"21\", \"22nd\": \"22\", \"twenty-second\": \"22\", \"23rd\": \"23\", \"twenty-third\": \"23\", \"24th\": \"24\",\n \"twenty-fourth\": \"24\", \"25th\": \"25\", \"twenty-fifth\": \"25\", \"26th\": \"26\", \"twenty-sixth\": \"26\",\n \"27th\": \"27\", \"twenty-seventh\": \"27\", \"28th\": \"28\", \"twenty-eighth\": \"28\", \"twenty-ninth\": \"29\",\n \"29th\": \"29\", \"30th\": \"30\", \"thirty\": \"30\", \"31st\": \"31\", \"thirty-first\": \"31\"}\n\n def __init__(self):\n self.stop_words = stopwords.words('english')\n self.stop_words.extend([\"rt\", \"n't\", \"'re\", \"gon\", \"na\", \"covid\", \"coronavirus\", \"covid-19\"])\n self.punctuation_to_remove = punctuation.replace('#', '').replace('@', '').replace('%', '').replace('$', '')\n self.symbols = \"<>:\\\"/\\\\|!?*~.'`-_()^,+=;\"\n self.token_stemmer = stemmer.Stemmer()\n\n def get_valid_url(self, url_col):\n \"\"\"\n :param url_col: \"urls\" column or \"retweet_urls\" or \"quote_urls\" columns\n :return: pure valid url or empty string if no valid url was present. format - {\"\":\"return_value\"}\n \"\"\"\n\n if url_col != \"{}\":\n trans_table = url_col.maketrans(\"\\\"\", \" \")\n urls = url_col.translate(trans_table)\n urls = urls.split()\n if len(urls) == 5:\n return urls[3]\n return \"\"\n\n def parse_hashtag_underscore(self, text_tokens, i):\n \"\"\"\n this function deals with hashtags of the form #stay_at_home\n :param text_tokens: list of tokens that is changed according to the given rules\n :param i: the index of the \"#\" token\n \"\"\"\n token = text_tokens[i + 1]\n del text_tokens[i + 1]\n joined_hashtag = '#'\n insertion_index = 0\n num_inserted = 0\n splited_tokens = token.split(\"_\")\n for j in range(len(splited_tokens)):\n if splited_tokens[j] != \"\":\n text_tokens.insert(i + 1 + insertion_index, splited_tokens[j].lower())\n insertion_index += 1\n num_inserted += 1\n joined_hashtag += splited_tokens[j]\n text_tokens[i] = joined_hashtag\n\n def parse_hashtag_camel_case(self, text_tokens, i):\n \"\"\"\n this function parses hashtags of the the type #StayAtHome #stayAtHome\n :param text_tokens: list of tokens that is changed\n :param i: \"#\" index\n \"\"\"\n token = text_tokens[i + 1]\n del text_tokens[i + 1]\n j = 0\n joined_hashtag = '#'\n from_index = 0\n insertion_index = 0\n while j < len(token):\n if token[j].isupper() and j != 0:\n text_tokens.insert(i + 1 + insertion_index, token[from_index:j].lower())\n joined_hashtag += token[from_index:j].lower()\n from_index = j\n insertion_index += 1\n j += 1\n if token[from_index:len(token)] != '':\n joined_hashtag += token[from_index:len(token)].lower()\n text_tokens.insert(i + 1 + insertion_index, token[from_index:len(token)].lower())\n text_tokens[i] = joined_hashtag\n\n def parse_hashtag_upper_case(self, text_tokens, i):\n\n \"\"\"\n this function parses hashtags of the the type #COVID19 #NJ\n :param text_tokens: list of tokens that is changed\n :param i: \"#\" index\n \"\"\"\n joined_hashtag = '#'\n joined_hashtag += text_tokens[i+1].lower()\n text_tokens[i] = joined_hashtag # \"#covid19\"\n\n def parse_hashtag(self, text_tokens, i):\n \"\"\"\n this function calls to parse underscore or parse camel case respectively\n :param - i the index of\n :return - return False if the hashtag contained not ascii values else return True\n \"\"\"\n\n if len(text_tokens) > i + 1 and not text_tokens[i+1].isascii():\n del text_tokens[i] # deleting ashtag\n del text_tokens[i] # deleting not ascii symbol\n return False\n\n # parsing snake case\n if len(text_tokens) > i + 1 and text_tokens[i + 1].count('_') > 0:\n self.parse_hashtag_underscore(text_tokens, i)\n\n elif len(text_tokens) > i+1 and text_tokens[i+1].isupper():\n self.parse_hashtag_upper_case(text_tokens, i)\n\n # parsing pascal and camel cases\n elif len(text_tokens) > i + 1:\n self.parse_hashtag_camel_case(text_tokens, i)\n return True\n\n def parse_tagging(self, text_tokens, i):\n \"\"\"\n this function appends @ and name that our tokenizer separates\n :param text_tokens: list of tokens\n :param i: index of '@'\n :return:\n \"\"\"\n if len(text_tokens) > i + 1:\n text_tokens[i] += text_tokens[i + 1]\n del text_tokens[i + 1]\n\n def parse_url(self, text_tokens, i):\n \"\"\"\n this function parses url according to the rules.\n :param text_tokens: list of tokens\n :param i: index of \"https\"\n \"\"\"\n del text_tokens[i] # removing 'https or http'\n if len(text_tokens) > i and text_tokens[i] == \":\":\n if text_tokens[i] == ':':\n del text_tokens[i] # removing ':'\n\n link_token = text_tokens[i]\n\n tokens_in_url = link_token.split(\"/\")\n del text_tokens[i]\n\n token_index = 0\n while token_index < len(tokens_in_url):\n if tokens_in_url[token_index] == \"t.co\":\n break\n if tokens_in_url[token_index] != \"twitter.com\" and tokens_in_url[token_index] != \"\":\n text_tokens.insert(i + token_index, tokens_in_url[token_index].lstrip(\"w.\"))\n token_index += 1\n\n def is_float(self, number):\n\n \"\"\"\n Verify if a string can be converted to float\n :param number - string to be converted\n :return Boolean - can be converted or not\n \"\"\"\n\n try:\n float(number.replace(\",\", \"\"))\n if number.lower() != \"infinity\":\n return True\n except ValueError:\n return False\n\n def parse_numeric_values(self, text_tokens, index):\n\n \"\"\"\n Parse numeric tokens according to specified rules.\n Any number in the thousands, millions and billions will be abbreviated to #K, #M and #B respectively\n Any number signifying percentage will be shown as #%\n Fractions of the format #/# will stay the same\n :param text_tokens: list of tokens to be parsed\n :param index: index of currently parsed token\n \"\"\"\n\n self.numeric_counter += 1\n token = text_tokens[index]\n numeric_token = float(token.replace(\",\", \"\"))\n\n # format large numbers\n # any number in the thousands, millions and billions will be abbreviated to #K, #M and #B respectively\n if 1000 <= numeric_token < 1000000:\n formatted_token = \"{num:.3f}\".format(num=(numeric_token / 1000)).rstrip(\"0\").rstrip(\".\") + \"K\"\n text_tokens[index] = formatted_token\n elif len(text_tokens) > index + 1 and text_tokens[index + 1].lower() == \"thousand\":\n formatted_token = str(numeric_token).rstrip(\"0\").rstrip(\".\") + \"K\"\n text_tokens[index] = formatted_token\n del text_tokens[index + 1]\n elif 1000000 <= numeric_token < 1000000000:\n formatted_token = \"{num:.3f}\".format(num=numeric_token / 1000000).rstrip(\"0\").rstrip(\".\") + \"M\"\n text_tokens[index] = formatted_token\n elif len(text_tokens) > index + 1 and text_tokens[index + 1].lower() == \"million\":\n formatted_token = str(numeric_token).rstrip(\"0\").rstrip(\".\") + \"M\"\n text_tokens[index] = formatted_token\n del text_tokens[index + 1]\n elif 1000000000 <= numeric_token:\n formatted_token = \"{num:.3f}\".format(num=numeric_token / 1000000000).rstrip(\"0\").rstrip(\".\") + \"B\"\n text_tokens[index] = formatted_token\n elif len(text_tokens) > index + 1 and text_tokens[index + 1].lower() == \"billion\":\n formatted_token = str(numeric_token).rstrip(\"0\").rstrip(\".\") + \"B\"\n text_tokens[index] = formatted_token\n del text_tokens[index + 1]\n\n # parse percentage\n # any number signifying percentage will be shown as #%\n if len(text_tokens) > index + 1:\n lower_case_next_token = text_tokens[index + 1].lower()\n if lower_case_next_token == \"%\" or lower_case_next_token == \"percent\" \\\n or lower_case_next_token == \"percentage\":\n formatted_token = str(numeric_token).rstrip(\"0\").rstrip(\".\") + \"%\"\n text_tokens[index] = formatted_token\n del text_tokens[index + 1]\n\n def parse_date(self, text_tokens, index):\n \"\"\"\n this function calls the appropriate function to parse a date.\n :param text_tokens: list of tokens\n :param index: the index of the month or the index of the 'MM/DD/YY' token\n :return: - reduction of the index as a result of deletion of previous tokens\n in some cases such as '15th of July' we want to delete '15' and 'of' and insert '7~15'\n in this cases we should bring the index back\n \"\"\"\n\n if text_tokens[index].lower() in self.months:\n return self.parse_date_according_to_month(text_tokens, index)\n\n if text_tokens[index].count(\"/\") == 2:\n self.parse_date_slash(text_tokens, index)\n return 0\n\n def parse_date_according_to_month(self, text_tokens, index):\n \"\"\"\n parsing date of format '15 of Jun' or '15th of June' etc. to 'MM~DD' format\n :return - reduction of the index as a result of deletion of previous tokens\n in some cases such as '15th of July' we want to delete '15' and 'of' and insert '7~15'\n in this cases we should bring the index back\n \"\"\"\n\n if len(text_tokens) > index + 1 and text_tokens[index].lower() in self.months:\n if text_tokens[index + 1] in self.days: # July 15th\n text_tokens[index] = self.months.get(text_tokens[index].lower()) + \\\n \"~\" + self.days.get(text_tokens[index + 1])\n del text_tokens[index + 1]\n elif text_tokens[index + 1].isnumeric(): # July 15\n text_tokens[index] = self.months.get(text_tokens[index].lower()) + \\\n \"~\" + str(int(text_tokens[index + 1]))\n del text_tokens[index + 1]\n elif index - 1 >= 0:\n if text_tokens[index - 1] in self.days: # 15th July\n text_tokens[index] = self.months.get(text_tokens[index].lower()) + \\\n \"~\" + self.days.get(text_tokens[index - 1])\n del text_tokens[index - 1]\n return 1\n elif text_tokens[index - 1].isnumeric(): # 15 July\n text_tokens[index] = self.months.get(text_tokens[index].lower()) + \\\n \"~\" + str(int(text_tokens[index - 1]))\n del text_tokens[index - 1]\n return 1\n elif text_tokens[index - 1] == \"of\" and index - 2 >= 0 \\\n and text_tokens[index - 2] in self.days: # 15th of July\n text_tokens[index] = self.months.get(text_tokens[index].lower()) + \\\n \"~\" + self.days.get(text_tokens[index - 2])\n del text_tokens[index - 1] # delete for\n del text_tokens[index - 1] # delete 15th\n return 2\n\n elif text_tokens[index - 1] == \"of\" and text_tokens[index - 2].isnumeric(): # 15 of july\n text_tokens[index] = self.months.get(text_tokens[index].lower()) + \\\n \"~\" + str(int(text_tokens[index - 2]))\n del text_tokens[index - 1] # delete for\n del text_tokens[index - 1] # delete 15\n return 2\n return 0\n\n def parse_date_slash(self, text_tokens, index):\n \"\"\"\n parse date with slash 'MM/DD/YY'\n to ['MM~DD', 'YY']\n @:param - index of the 'MM/DD/YY' token\n \"\"\"\n\n splitted_date = text_tokens[index].split(\"/\")\n if len(splitted_date) == 3 and splitted_date[0].isnumeric() and splitted_date[1].isnumeric() \\\n and splitted_date[2].isnumeric():\n if int(splitted_date[0]) in range(0, 13) and int(splitted_date[1]) in range(0, 32):\n text_tokens[index] = str(int(splitted_date[0])) + \"~\" + str(int(splitted_date[1]))\n text_tokens.insert(index + 1, splitted_date[2])\n\n def parse_fraction(self, text_tokens, index):\n \"\"\"\n this function parses fraction according to given rules ['35', '3/4'] - > ['35 3/4']\n :param text_tokens:\n :param index:\n :return:\n \"\"\"\n\n splited_fruction = text_tokens[index].split(\"/\")\n if index - 1 > 0 and text_tokens[index - 1].isnumeric() and \\\n splited_fruction[0].isnumeric and splited_fruction[1].isnumeric():\n text_tokens[index - 1] = text_tokens[index - 1] + \" \" + text_tokens[index]\n del text_tokens[index]\n return True\n else:\n return False\n\n def parse_entities(self, text_tokens, index, entities):\n\n \"\"\"\n Identify possible entities in the document.\n A possible entity is any sequence of tokens starting with a capital letter\n :param text_tokens: list of tokens to be parsed\n :param index: index of current parsed token\n :param entities: dictionary of possible entities\n \"\"\"\n current_token = text_tokens[index]\n entity = \"\"\n\n # find a sequence of terms with capital letters\n while index + 1 < len(text_tokens) and current_token[0].isupper():\n entity += current_token + \" \"\n index += 1\n current_token = text_tokens[index]\n entity.rstrip(\" \")\n\n # add new possible entity to dictionary\n if entity != \"\":\n if entity not in entities:\n entities[entity] = 1\n else:\n entities[entity] += 1\n\n def parse_capital_letters(self, tokenized_text, term_dict):\n\n \"\"\"\n Parses token according to capital letters rule.\n Ensures a uniform appearance of tokens - if a token only appears in capital form - record as upper case\n Else, record in lower case\n :param tokenized_text - list, list of parsed tokens\n :param term_dict - dictionary, record uniform token appearance according to rule in currently parsed document\n \"\"\"\n\n index = 0\n while index < len(tokenized_text):\n\n token = tokenized_text[index]\n\n if token != '':\n\n # save token as upper case\n # save token as lower and upper case\n formatted_token_lower = token.lower()\n formatted_token_upper = token.upper()\n\n # Add token to term dictionary\n # In the dictionary keep the term_frequency\n # term_frequency - how many times the term appeared in the document\n # key indicates if term is capital or lower case\n\n # Check if first letter is a capital letter\n if token[0].isupper():\n # check in which form the token appears in dictionary and update it accordingly\n if formatted_token_upper not in term_dict and formatted_token_lower not in term_dict:\n term_dict[formatted_token_upper] = 1\n elif formatted_token_upper in term_dict:\n term_dict[formatted_token_upper] += 1\n else: # formatted_token_lower in capitals\n term_dict[formatted_token_lower] += 1\n\n # If current term is lower case change key to lower case\n else:\n # check in which form the token appears in dictionary and update it accordingly\n if formatted_token_upper not in term_dict and formatted_token_lower not in term_dict:\n term_dict[formatted_token_lower] = 1\n elif formatted_token_upper in term_dict: # replace format of token from upper case to lower case\n term_dict[formatted_token_lower] = term_dict[formatted_token_upper] + 1\n term_dict.pop(formatted_token_upper, None) # remove upper case form from the dictionary\n else: # formatted_token_lower in capitals\n term_dict[formatted_token_lower] += 1\n\n index += 1\n\n def parse_sentence(self, text, entities=None, stemming=False):\n\n \"\"\"\n This function tokenize, remove stop words and apply lower case for every word within the text\n :param text: string - text to be parsed\n :param entities: dictionary - record possible entities in currently parsed document\n :param stemming: boolean variable True - with stemming, False - without stemming\n :return: list of parsed tokens\n \"\"\"\n\n text_tokens = word_tokenize(text)\n\n index = 0\n while index < len(text_tokens):\n\n if text_tokens[index].lower() not in self.stop_words\\\n and text_tokens[index] not in self.punctuation_to_remove\\\n and text_tokens[index].isascii():\n\n # removing unnecessary symbols\n text_tokens[index] = text_tokens[index].rstrip(self.symbols).lstrip(self.symbols)\n if text_tokens[index] == \"\":\n del text_tokens[index]\n continue\n\n if text_tokens[index] == '#':\n if not self.parse_hashtag(text_tokens, index):\n continue\n elif text_tokens[index] == '@':\n self.parse_tagging(text_tokens, index)\n elif text_tokens[index] == 'https' or text_tokens[index] == 'http':\n self.parse_url(text_tokens, index)\n continue\n\n # parse numeric values\n elif self.is_float(text_tokens[index]):\n self.parse_numeric_values(text_tokens, index)\n\n # parse dates\n elif text_tokens[index].lower() in self.months or \\\n text_tokens[index].count(\"/\") == 2:\n index -= self.parse_date(text_tokens, index)\n\n # parse fractions\n elif text_tokens[index].count(\"/\") == 1:\n if self.parse_fraction(text_tokens, index):\n continue\n\n # parse entities\n # entity is every sequence of tokens starting with a capital letter \\\n # and appearing at least twice in the entire corpus\n if index + 1 < len(text_tokens) and text_tokens[index][0].isupper() \\\n and text_tokens[index + 1][0].isupper():\n self.parse_entities(text_tokens, index, entities)\n\n # apply stemmer if stemming is True\n if stemming and len(text_tokens[index]) > 0 and text_tokens[index][0] not in \"@#\":\n after_stemming = self.token_stemmer.stem_term(text_tokens[index])\n if after_stemming != '':\n text_tokens[index] = after_stemming\n\n if len(text_tokens[index]) == 1:\n del text_tokens[index]\n continue\n\n index += 1\n else:\n if not text_tokens[index].isascii():\n # token is not ascii\n valid_token = ''\n for char in text_tokens[index]:\n if char.isascii():\n valid_token += char # separate valid token from the ascii symbol appended to him\n else:\n # parsing emoji\n emoji = [*findall(char).values()] # unpack single emoji token and put in list\n if len(emoji) > 0 and emoji[0] not in text_tokens:\n text_tokens.append(emoji[0])\n if len(emoji[0].split()) > 1:\n # add to text tokens emojis such as: 'smiling face', 'smiling', 'face'\n for emoji_token in emoji[0].split():\n text_tokens.append(emoji_token)\n\n if valid_token != '': # append the valid toke\n text_tokens[index] = valid_token\n\n # apply stemmer if stemming is True\n if stemming and valid_token[0] not in \"@#\":\n after_stemming = self.token_stemmer.stem_term(valid_token)\n if after_stemming != '':\n text_tokens[index] = after_stemming\n\n else:\n del text_tokens[index] # not ascii symbols that we want to delete\n else:\n del text_tokens[index] # RT or punctuation that is in ascii\n\n if index > 0 and text_tokens[index - 1] == '':\n del text_tokens[index]\n\n return text_tokens\n\n def prep_url(self, url):\n \"\"\"\n remove unnecessary signs from urls and not meaningful digits and letters\n \"\"\"\n trans_table = url.maketrans(\"\\\\/|=<>.?%-:_\", \" \")\n parsed_url = url.translate(trans_table)\n parsed_url_tokens = parsed_url.split()\n token_index = 0\n while token_index < len(parsed_url_tokens):\n if parsed_url_tokens[token_index].isdigit() \\\n or len(parsed_url_tokens[token_index]) == 1 \\\n or parsed_url_tokens[token_index] in [\"www\", \"co\", \"com\", \"twitter\", \"status\", \"web\", \"https\",\n \"http\"]:\n parsed_url_tokens.remove(parsed_url_tokens[token_index])\n continue\n token_index += 1\n\n return \" \".join(parsed_url_tokens)\n\n def remove_shortened_urls(self, full_text):\n try:\n full_text.index(\"https\")\n return \" \".join(filter(lambda splitted: splitted[:5] != 'https', full_text.split()))\n except ValueError:\n return full_text\n\n def parse_doc(self, doc_as_list, stemming=False):\n \"\"\"\n This function takes a tweet document as list and break it into different fields\n :param stemming: Whether to performe stemming or not\n :type stemming: bool\n :param doc_as_list: list re-presenting the tweet.\n :return: Document object with corresponding fields.\n \"\"\"\n\n url = \"\"\n retweet_url = \"\"\n quote_url = \"\"\n tweet_id = doc_as_list[0]\n tweet_date = doc_as_list[1]\n full_text = doc_as_list[2]\n full_text = self.remove_shortened_urls(full_text)\n if doc_as_list[3] and doc_as_list[3] != \"\":\n url = self.get_valid_url(doc_as_list[3])\n url = self.prep_url(url)\n retweet_text = doc_as_list[5]\n if doc_as_list[6] and doc_as_list[6] != \"\":\n retweet_url = self.get_valid_url(doc_as_list[6])\n retweet_url = self.prep_url(retweet_url)\n quote_text = doc_as_list[8]\n if quote_text:\n quote_text = self.remove_shortened_urls(quote_text)\n else:\n quote_text = \"\"\n if doc_as_list[9] and doc_as_list[9] != \"\":\n quote_url = self.get_valid_url(doc_as_list[9])\n quote_url = self.prep_url(quote_url)\n term_dict = {}\n\n # dictionary for holding possible entities\n entities = dict()\n\n pre_processed_text = full_text + \" \" + quote_text + \" \" + url + \" \" + retweet_url + \" \" + quote_url\n tokenized_text = self.parse_sentence(pre_processed_text, entities, stemming)\n\n doc_length = len(tokenized_text) # after text operations.\n\n # parse token by lower or upper case rule\n # parsing will build the term dictionary in a uniform upper/lower form and calculate the term frequency\n self.parse_capital_letters(tokenized_text, term_dict)\n\n max_tf = 0\n for tf in term_dict.values():\n if tf > max_tf:\n max_tf = tf\n\n unique_term_number = len(term_dict.keys())\n\n document = Document(tweet_id, tweet_date, full_text, url, retweet_text, retweet_url, quote_text,\n quote_url, term_dict, doc_length, tweet_date, unique_term_number, entities, max_tf)\n return document\n","sub_path":"parser_module.py","file_name":"parser_module.py","file_ext":"py","file_size_in_byte":26232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"604226413","text":"# =============================================================================\n# Authors: PAR Government\n# Organization: DARPA\n#\n# Copyright (c) 2016 PAR Government\n# All rights reserved.\n#==============================================================================\nfrom math import log\nimport numpy as np\n\n\ndef packImgBits(img, bits_to_use=16):\n \"\"\"\n :param img:\n :param bits:\n :return:\n @type img: numpy.ndarray\n @type bits: int\n \"\"\"\n shift_bits = img.dtype.itemsize * 8 - bits_to_use\n max_image = np.max(img)\n hist, bin_edges = np.histogram(img, bins=range(max_image + 2), )\n # shift the image histogram to the left to remove unused bins\n # find the second histogram bin that has more than 10 values\n # and subtract it from every pixel value\n adjustment_amount = np.argwhere(hist > 10)[1][0]\n img = np.clip(img.astype('int64') - adjustment_amount, 0, max_image).astype(img.dtype)\n # drop the number of LSBs\n img = np.right_shift(img, shift_bits)\n return img\n\ndef get_gauss_kernel(size=3,sigma=1):\n center=(int)(size/2)\n kernel=np.zeros((size,size))\n for i in range(size):\n for j in range(size):\n diff=np.sqrt((i-center)**2+(j-center)**2)\n kernel[i,j]=np.exp(-(diff**2)/(2*sigma**2))\n return kernel/np.sum(kernel)\n\ndef packImgBitsFFT(img, max_bits=5):\n \"\"\"\n :param img:\n :param bits:\n :return:\n @type img: numpy.ndarray\n @type bits: int\n \"\"\"\n from scipy import fftpack, signal\n amount = (1<<16) if img.dtype == np.uint16 else (1<<8)\n img_output = img.copy()\n kernel = get_gauss_kernel(21,1)\n #freq_kernel = fftpack.fft2(fftpack.ifftshift(kernel))\n for c in range( img.shape[2]):\n im_fft = np.fft.fft2(img[:,:,c])\n convolved = signal.convolve2d(im_fft.real, kernel)\n im_blur = fftpack.ifft2(convolved).real\n diff_x = (im_blur.shape[0] - img_output.shape[0])/2\n diff_y = (im_blur.shape[1] - img_output.shape[1])/2\n img_output[:,:,c] = (amount * im_blur / np.max(im_blur))[diff_x:-diff_x,diff_y:-diff_y]\n return img_output\n\n\ndef packImgBitsS(img, max_bits=5):\n \"\"\"\n :param img:\n :param bits:\n :return:\n @type img: numpy.ndarray\n @type bits: int\n \"\"\"\n from scipy import fftpack, signal\n amount = (1<<16) if img.dtype == np.uint16 else (1<<8)\n\n t = np.linspace(-10, 10, 30)\n bump = np.exp(-0.1 * t ** 2)\n bump /= np.trapz(bump) # normalize the integral to 1\n # make a 2-D kernel out of it\n kernel = bump[:, np.newaxis] * bump[np.newaxis, :]\n kernel_ft = fftpack.fft2(kernel, shape=img.shape[:2], axes=(0, 1))\n # convolve\n img_ft = fftpack.fft2(img, axes=(0, 1))\n # the 'newaxis' is to match to color direction\n img2_ft = kernel_ft[:, :, np.newaxis] * img_ft\n img2 = fftpack.ifft2(img2_ft, axes=(0, 1)).real\n img_output = img2.copy()\n return img_output","sub_path":"maskgen/algorithms/histogram_changes.py","file_name":"histogram_changes.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"488229118","text":"import numpy as np\n\ninputFile = '../resources/input03.txt'\n\n\n#### PART 1\n\ndef readFile(f):\n\tarray = []\n\twith open(inputFile, 'r') as f:\n\t\tfirst = 0\n\t\tfor line in f:\n\t\t\tif first == 0:\n\t\t\t\tline1 = [x for x in line.split(',')]\n\t\t\t\tfirst = 1\n\t\t\telse:\n\t\t\t\tline2 = [x for x in line.split(',')]\n\n\treturn (line1, line2)\n\n\ndef initiateBoard(boardSize):\n\tboard = np.zeros([boardSize, boardSize])\n\tstationPos = [int(boardSize / 2), int(boardSize / 2)]\n\n\tboard[stationPos[0], stationPos[1]] = 1\n\n\treturn (stationPos, board)\n\n\ndef calcNew(currPos, dir, steps):\n\tnewPos = currPos.copy()\n\tif dir == 'R':\n\t\tnewPos[0] = currPos[0] + steps\n\t\tcurrPos[0] = currPos[0] + 1\n\telif dir == 'L':\n\t\tnewPos[0] = currPos[0] - steps\n\t\tcurrPos[0] = currPos[0] - 1\n\telif dir == 'U':\n\t\tnewPos[1] = currPos[1] - steps\n\t\tcurrPos[1] = currPos[1] - 1\n\telse:\n\t\tnewPos[1] = currPos[1] + steps\n\t\tcurrPos[1] = currPos[1] + 1\n\n\treturn (currPos, newPos)\n\n\ndef drawBoard(dir, c, n, firstWire):\n\tif dir in ['R', 'D']:\n\t\tif firstWire:\n\t\t\tboard[c[1]:(n[1] + 1), c[0]:(n[0] + 1)] = 1\n\t\telse:\n\t\t\taux = board[c[1]:(n[1] + 1), c[0]:(n[0] + 1)]\n\t\t\tboard[c[1]:(n[1] + 1), c[0]:(n[0] + 1)] = np.where(aux == 1, 3, 2)\n\telse:\n\n\t\tif firstWire:\n\t\t\tboard[n[1]:(c[1] + 1), n[0]:(c[0] + 1)] = 1\n\t\telse:\n\t\t\taux = board[n[1]:(c[1] + 1), n[0]:(c[0] + 1)]\n\t\t\tboard[n[1]:(c[1] + 1), n[0]:(c[0] + 1)] = np.where(aux == 1, 3, 2)\n\n\ndef drawWire(wire, initialPos, firstWire):\n\tglobal board\n\tcurrPos = initialPos.copy()\n\n\tfor instr in wire:\n\t\tdir = instr[0]\n\t\tsteps = int(instr[1:])\n\t\tcurrPost, newPos = calcNew(currPos, dir, steps)\n\t\t# print(str(currPos) + ' -> ' + str(newPos) + ' for dir: ' + str(dir) + ', steps:' + str(steps))\n\n\t\tdrawBoard(dir, currPos, newPos, firstWire)\n\t\tcurrPos = newPos.copy()\n\n\ndef calcMinDist(arr1, arr2, board, stationPos, minDist):\n\t# draw both wires\n\tdrawWire(arr1, stationPos, True)\n\tdrawWire(arr2, stationPos, False)\n\n\ta, b = np.where(board == 3)\n\n\tfor i in range(0, len(a)):\n\t\tcurrD = abs(a[i] - stationPos[0]) + abs(b[i] - stationPos[1])\n\t\tif currD < minDist:\n\t\t\tminDist = currD\n\n\treturn (minDist)\n\n\n# arr1, arr2 = readFile(inputFile)\n# print(arr1)\n# print(arr2)\n# arr1 = ['R75','D30','R83','U83','L12','D49','R71','U7','L72']\n# arr2 = ['U62','R66','U55','R34','D71','R55','D58','R83']\n\n# arr1 = ['R98','U47','R26','D63','R33','U87','L62','D20','R33','U53','R51']\n# arr2 = ['U98','R91','D20','R16','D67','R40','U7','R15','U6','R7']\n\n# arr1 = ['R8','U5','L5','D3']\n# arr2 = ['U7','R6','D4','L4']\n\n# size = 20000\n# stationPos, board = initiateBoard(size)\n# d = calcMinDist(arr1, arr2, board, stationPos, size)\n# print('The minimum distance is ' + str(d))\n# 462 is too low\n# 2193\n\n#### PART 2\n\ndef calcNew2(currPos, dir, steps):\n\tnewPos = currPos.copy()\n\tif dir == 'R':\n\t\tnewPos[0] = currPos[0] + steps\n\telif dir == 'L':\n\t\tnewPos[0] = currPos[0] - steps\n\telif dir == 'U':\n\t\tnewPos[1] = currPos[1] - steps\n\telse:\n\t\tnewPos[1] = currPos[1] + steps\n\n\treturn (newPos)\n\ndef isBetween(c, n, i):\n\tif (abs(i[0] - c[0]) <= abs(n[0] - c[0])) and (abs(i[1] - c[1]) <= abs(n[1] - c[1])):\n\t\treturn(True)\n\telse:\n\t\treturn(False)\n\ndef countSteps(wire, initialPos, inters):\n\tglobal board\n\n\tcurrPos = initialPos.copy()\n\tsumSteps = 0\n\t#print('inters:' + str(inters))\n\n\tfor instr in wire:\n\t\tdir = instr[0]\n\t\tsteps = int(instr[1:])\n\t\tnewPos = calcNew2(currPos, dir, steps)\n\n\t\t#stop as soon as the intersection is found\n\t\t#print('positions: ' + str(currPos) + ' -> ' + str(newPos))\n\t\tif isBetween(currPos, newPos, inters):\n\t\t\taux1 = abs(inters[0] - currPos[0])\n\t\t\taux2 = abs(inters[1] - currPos[1])\n\t\t\tsumSteps = sumSteps + aux1 + aux2\n\t\t\t#print('Sum steps to intersection: ' + str(sumSteps))\n\t\t\treturn(sumSteps)\n\t\telse:\n\t\t\tsumSteps = sumSteps + steps\n\t\t\t#print('increasing: ' + str(sumSteps))\n\n\t\tcurrPos = newPos.copy()\n\narr1, arr2 = readFile(inputFile)\n\n#arr1 = ['R8', 'U5', 'L5', 'D3']\n#arr2 = ['U7', 'R6', 'D4', 'L4']\n\n#arr1 = ['R75','D30','R83','U83','L12','D49','R71','U7','L72']\n#arr2 = ['U62','R66','U55','R34','D71','R55','D58','R83']\n\n#arr1 = ['R98','U47','R26','D63','R33','U87','L62','D20','R33','U53','R51']\n#arr2 = ['U98','R91','D20','R16','D67','R40','U7','R15','U6','R7']\n\nsize = 20000\nstationPos, board = initiateBoard(size)\n\n# draw both wires\ndrawWire(arr1, stationPos, True)\ndrawWire(arr2, stationPos, False)\n\n# check intersection coordinates\na, b = np.where(board == 3)\n\ndistSum = []\nfor i in range(0, len(a)):\n\tdist1 = countSteps(arr1, stationPos, [b[i], a[i]])\n\tdist2 = countSteps(arr2, stationPos, [b[i], a[i]])\n\tdistSum.append(dist1 + dist2)\n\nprint(distSum)\nprint('Minimum step-distance is ' + str(np.array(distSum).min()))\n#63526","sub_path":"03day/calAdvento03.py","file_name":"calAdvento03.py","file_ext":"py","file_size_in_byte":4593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"291358842","text":"class Solution(object):\n def gameOfLife(self, board):\n \"\"\"\n :type board: List[List[int]]\n :rtype: void Do not return anything, modify board in-place instead.\n \"\"\"\n\n x = 0\n y = 0\n\n n = len(board)\n for I in range(n):\n for J in range(n):\n \n count = 0\n for i in range(max(0, I - 1), min(n, I + 2)):\n for j in range(max(0, J - 1), min(n, J + 2)):\n count += board[i][j] & 1\n\n if (count == 4 and board[i][j]) or count == 3:\n board[I][J] |= 2\n \n for i in range(n):\n for j in range(n):\n board[i][j] >>= 1\n\n print(board)\nboard = [[0, 0, 0, 0],\n [0, 1, 1, 0],\n [0, 1, 1, 0],\n [0, 0, 0, 0]]\n\nresult = Solution().gameOfLife(board)\nprint(result)\n","sub_path":"game-of-life/game-of-life.py","file_name":"game-of-life.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"415198426","text":"#программа, которая вытаскивает с главной страницы сайта все заголовки и записывает их в файл\r\n\r\nimport urllib.request \r\n\r\nurl = 'http://www.znamyatrud.ru/'\r\nuser_agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)' \r\nreq = urllib.request.Request('http://www.znamyatrud.ru/', headers={'User-Agent':user_agent}) \r\n\r\nwith urllib.request.urlopen(req) as response:\r\n html = response.read().decode('windows-1251')\r\n\r\nimport re\r\nregPostTitle = re.compile('.*?', flags=re.U | re.DOTALL)\r\ntitles = regPostTitle.findall(html)\r\n\r\nnew_titles = []\r\nregex = '(<.*?>|[0-9])|\\.'\r\n\r\nfor element in titles:\r\n element = re.sub(regex, '', element)\r\n new_titles.append(element)\r\n\r\nf = open('titles.txt', 'w')\r\nfor t in new_titles:\r\n f.write(t + '\\n')\r\n\r\nf.close()\r\n\r\n","sub_path":"ivanova_hw_1.py","file_name":"ivanova_hw_1.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"609983354","text":"import pandas as pd\r\n\r\nimport numpy as np\r\nfrom kmodes.kmodes import KModes\r\n\r\n\r\n\r\ndf = pd.read_excel('chosen.xlsx')\r\n\r\n#izdvojim bitne kolone\r\n\r\nprint(df.columns)\r\nf = ['Q16RPython', 'Q6', 'Q19_Part_1', 'Q19_Part_2', 'Q19_Part_3','Q13_Part_1', 'Q13_Part_2', 'Q13_Part_3']\r\n\r\n\r\ndf.loc[df['Q19_Part_1'].isna(),'Q19_Part_1']=''\r\ndf.loc[df['Q19_Part_2'].isna(),'Q19_Part_2']=''\r\ndf.loc[df['Q19_Part_3'].isna(),'Q19_Part_3']=''\r\n\r\ndf.loc[df['Q13_Part_1'].isna(),'Q13_Part_1']=''\r\ndf.loc[df['Q13_Part_2'].isna(),'Q13_Part_2']=''\r\ndf.loc[df['Q13_Part_3'].isna(),'Q13_Part_3']=''\r\n\r\n\r\ndf.loc[df['Q6'].isna(),'Q6']=''\r\n\r\n\r\ndata = df[f]\r\nprint(data)\r\n\r\n\r\n\"\"\"\r\ncolumns = ['Q4','Q6', 'Q7']\r\n\r\nq16=['Q16_Part_%d'%i for i in range(1,16)]\r\n\r\nprint(q16)\r\nq19=['Q19_Part_%d'%i for i in range(1,19)]\r\nprint(q19)\r\ncolnames = ['gender', 'education', 'current_role', 'emp_status']\r\n\r\nprint(df[q16]);\r\n\r\n\"\"\"\r\n\r\n\r\n\r\nkm = KModes(n_clusters=7, init='Huang', n_init=5, verbose=1)\r\n\r\nclusters = km.fit_predict(data)\r\n\r\n# Print the cluster centroids\r\nprint(km.cluster_centroids_)\r\n\r\n\r\n\r\n","sub_path":"src/cluster3.py","file_name":"cluster3.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"579619434","text":"\nimport configparser\nimport json\nimport logging\nimport log\nimport os\nfrom sdp import SDP\nimport sys\n\nclass Config(object):\n \"\"\"Configuration container\"\"\"\n \n PREFIX = \"/opt/lthn\"\n OPENVPN_BIN = None\n HAPROXY_BIN = None\n SUDO_BIN = None\n OPENVPN_SUDO = None\n LOGLEVEL = logging.WARNING\n AUDITLOG = None\n VERBOSE = None\n CONFIGFILE = None\n SDPFILE = None\n AUTHIDSFILE = None\n MAINSLEEP = 0.1\n T_SAVE = 10 # How often to save authids (sec)\n T_CLEANUP = 30 # How often to cleanup stale authids\n FORCE_REFRESH = None\n FORCE_SAVE = None\n \n # configargparse results\n CAP = None\n \n def __init__(self, action=\"read\", services=None):\n if (os.getenv('LTHN_PREFIX')):\n type(self).PREFIX = os.getenv('LTHN_PREFIX')\n \n type(self).OPENVPN_BIN = \"/usr/sbin/openvpn\"\n type(self).HAPROXY_BIN = \"/usr/sbin/haproxy\"\n type(self).SUDO_BIN = \"/usr/bin/sudo\"\n type(self).OPENVPN_SUDO = True\n type(self).LOGLEVEL = logging.WARNING\n type(self).CONFIGFILE = type(self).PREFIX + \"/etc/dispatcher.ini\"\n type(self).SDPFILE = type(self).PREFIX + \"/etc/sdp.json\"\n type(self).PIDFILE = type(self).PREFIX + \"/var/run/lthnvpnd.pid\"\n type(self).AUTHIDSFILE = type(self).PREFIX + '/var/authids.db'\n \n s = SDP()\n self.load(self.CONFIGFILE)\n if (action == \"init\"):\n # generate SDP configuration file based on user input\n print('Initialising SDP file %s' % self.SDPFILE)\n s.addService(self.CAP)\n s.configFile = self.SDPFILE\n s.save(self.SDPFILE)\n elif (action == \"read\"):\n self.load(self.CONFIGFILE)\n s.load(self.SDPFILE)\n elif (action == \"dummy\"):\n if (os.path.exists(self.SDPFILE)):\n s.load(self.SDPFILE)\n else:\n logging.warning(\"Missing SDP file\" + self.SDPFILE)\n elif (action == \"edit\"):\n # generate SDP configuration file based on user input\n print('Editing SDP file %s' % self.SDPFILE)\n s.editService(self.CAP)\n print('YOUR CHANGES TO THE SDP CONFIG file ARE UNSAVED!')\n choice = input('Save the file? This will overwrite your existing config file! [y/N] ').strip().lower()[:1]\n if (choice == 'y'):\n s.save()\n elif (action == \"add\"):\n # Add service into SDP file based on user input\n print('Editing configuration file %s' % self.SDPFILE)\n s.addService(self.CAP)\n print('YOUR CHANGES TO THE SDP CONFIG file ARE UNSAVED!')\n choice = input('Save the file? This will overwrite your existing config file! [y/N] ').strip().lower()[:1]\n if (choice == 'y'):\n s.save()\n elif (action == \"upload\"):\n s.load(self.SDPFILE)\n s.upload()\n else:\n logger.error(\"Bad option to Config!\")\n sys.exit(2)\n \n def load(self, filename):\n try:\n logging.debug(\"Reading config file %s\" % (filename))\n cfg = configparser.ConfigParser()\n cfg.read(filename)\n self.cfg = cfg\n except IOError:\n logging.error(\"Cannot read %s. Exiting.\" % (filename))\n sys.exit(1)\n \n def getService(self, id):\n section = \"service-\" + id\n for s in self.cfg.sections():\n if s.lower() == section.lower():\n return(self.cfg[s])\n return(None)\n\nCONFIG = None\n","sub_path":"lib/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"436142187","text":"from reports import config, my_auth, my_reports, output_status_message, output_bing_ads_webfault_error, output_webfault_errors, get_date_range\nfrom datetime import date\nfrom sys import argv\n\nif __name__ == '__main__':\n start_date = ''\n end_date = ''\n\n # validate input\n if len(argv) == 1:\n start_date = date.today().strftime('%m-%d-%Y')\n end_date = start_date\n else:\n start_date = argv[1]\n if len(argv) > 2 :\n end_date = argv[2]\n else:\n end_date = start_date\n\n date_range = get_date_range(start_date, end_date)\n \n if date_range and len(date_range) > 0:\n auth = my_auth()\n authorization_data = auth.authenticate()\n \n for report_date in date_range:\n my_reports(report_date, authorization_data).download_report()","sub_path":"bing/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"396807705","text":"#!/usr/bin/env python\n\nimport sys\n\nsys.path.append('/home/song/PycharmProjects/SongNCM') # need add path at terminal\n\nimport logging\nfrom mininet.net import Mininet\nfrom mininet.node import RemoteController\nfrom mininet.cli import CLI\nfrom mininet.link import TCLink\nfrom Tools.CreateTopology import SwitchHostLink\n\ndef createTopo():\n logging.debug(\"create 2 switch and 3 host\")\n\n topo = SwitchHostLink(2, 3)\n topo.createTopo()\n topo.createLink(topo.switchlist[0], topo.hostlist[0])\n topo.createLink(topo.switchlist[0], topo.hostlist[1])\n topo.createLink(topo.switchlist[0], topo.switchlist[1])\n topo.createLink(topo.switchlist[1], topo.hostlist[2])\n\n controller_ip = \"127.0.0.1\"\n controller_port = 6633\n net = Mininet(topo=topo, link=TCLink, controller=None, autoSetMacs=True)\n net.addController('controller', controller=RemoteController,\n ip=controller_ip, port=controller_port)\n net.start()\n\n # topo.open_service(net)\n\n CLI(net)\n net.stop()\n\n\nif __name__ == '__main__':\n createTopo()\n","sub_path":"topology/s2_h3.py","file_name":"s2_h3.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"402362558","text":"#Uses python3\n\nimport sys\nimport time\n\nsys.setrecursionlimit(200000)\n\ncount = 1\n\ndef graphorder(vertex,graph,visited,post):\n global count\n visited[vertex] = True\n count += 1\n for node in graph[vertex]:\n if visited[node] is False:\n graphorder(node,graph,visited,post)\n post[vertex] = count\n count += 1\n\ndef dfs(graph): \n post = [0 for _ in range(n)]\n visited = [False for _ in range(n)]\n for i in range(n):\n if visited[i] is False:\n graphorder(i,graph,visited,post)\n return post\n\ndef explore_scc(vertex,graph,scc,visited,post):\n visited[vertex] = True\n scc.add(post[vertex])\n \n for node in graph[vertex]:\n if visited[node] is False:\n explore_scc(node,graph,scc,visited,post)\n\n\ndef find_all_scc(graph,post):\n visited = [False for _ in range(n)]\n rem_nodes = sorted([(i,j) for i,j in enumerate(post)],key=lambda k:k[1],reverse=True) \n result = 0\n\n while len(rem_nodes) != 0:\n scc = set() \n max_index = rem_nodes[0][0]\n explore_scc(max_index,graph,scc,visited,post)\n result += 1\n rem_nodes = [t for t in rem_nodes if t[1] not in scc] \n return result\n\n\ndef number_of_strongly_connected_components(actual_graph,reverse_graph): \n\n #t0 = time.time()\n # Post Order for Reverse Graph\n graphpostorder = dfs(reverse_graph)\n \n\n # Find All SCC on actual graph based of post order\n components = find_all_scc(actual_graph,graphpostorder) \n \n #t1 = time.time()\n\n #print(t1-t0)\n #print(\"Components :\",components)\n return components\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n data = list(map(int, input.split()))\n n, m = data[0:2]\n data = data[2:]\n edges = list(zip(data[0:(2 * m):2], data[1:(2 * m):2]))\n actual_graph = [[] for _ in range(n)]\n reverse_graph = [[] for _ in range(n)]\n for (a, b) in edges:\n actual_graph[a - 1].append(b - 1)\n reverse_graph[b-1].append(a-1)\n print(number_of_strongly_connected_components(actual_graph,reverse_graph))\n","sub_path":"graphs/Week-2/strongly_connected/strongly_connected.py","file_name":"strongly_connected.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"345713880","text":"\"\"\"\n Create a contour plot of monthly precip from the climodat data (iemre)\n\"\"\"\nimport sys\nimport datetime\n\nimport psycopg2.extras\nfrom pyiem.network import Table as NetworkTable\nfrom pyiem.plot import MapPlot\nfrom pyiem.util import get_dbconn\n\n\ndef do_month(ts, routes=\"m\"):\n \"\"\"\n Generate the plot for a given month, please\n \"\"\"\n pgconn = get_dbconn(\"coop\", user=\"nobody\")\n ccursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n nt = NetworkTable(\"IACLIMATE\")\n sql = \"\"\"SELECT station, sum(precip) as total, max(day) as lastday\n from alldata_ia WHERE year = %s and month = %s\n and station != 'IA0000' and substr(station,2,1) != 'C'\n GROUP by station\"\"\" % (\n ts.year,\n ts.month,\n )\n\n lats = []\n lons = []\n vals = []\n lastday = None\n ccursor.execute(sql)\n for row in ccursor:\n if row[\"station\"] not in nt.sts:\n continue\n if lastday is None:\n lastday = row[\"lastday\"]\n lats.append(nt.sts[row[\"station\"]][\"lat\"])\n lons.append(nt.sts[row[\"station\"]][\"lon\"])\n vals.append(row[\"total\"])\n\n mp = MapPlot(\n title=\"%s - %s\"\n % (ts.strftime(\"%d %B %Y\"), lastday.strftime(\"%d %B %Y\")),\n subtitle=\"%s Total Precipitation [inch]\" % (ts.strftime(\"%B %Y\"),),\n )\n mp.contourf(\n lons, lats, vals, [0, 0.1, 0.25, 0.5, 0.75, 1, 2, 3, 4, 5, 6, 7]\n )\n mp.plot_values(lons, lats, vals, fmt=\"%.2f\")\n\n pqstr = (\n \"plot %s %s summary/iemre_iowa_total_precip.png \"\n \"%s/summary/iemre_iowa_total_precip.png png\"\n ) % (routes, ts.strftime(\"%Y%m%d%H%M\"), ts.strftime(\"%Y/%m\"))\n mp.postprocess(pqstr=pqstr)\n\n\ndef main(argv):\n \"\"\"Do Something\"\"\"\n if len(argv) == 3:\n now = datetime.datetime(int(argv[1]), int(argv[2]), 1)\n do_month(now, \"m\")\n else:\n now = datetime.datetime.now() - datetime.timedelta(days=1)\n now = now.replace(day=1)\n do_month(now, \"cm\")\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","sub_path":"scripts/climodat/plot_monthly_precip.py","file_name":"plot_monthly_precip.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"246733533","text":"import sys\nimport tensorflow as tf\nsys.path.append(sys.path[0][:-10])\nfrom model.chatter import Chatter\nimport model.seq2seq as seq2seq\nfrom common.utils import CmdParser\nimport config.get_config as _config\nfrom common.pre_treat import preprocess_raw_lccc_data\n\n\nclass Seq2SeqChatter(Chatter):\n \"\"\"\n Seq2Seq模型的聊天类\n \"\"\"\n\n def __init__(self, model, checkpoint_dir, beam_size, vocab_size):\n \"\"\"\n Seq2Seq聊天器初始化,用于加载模型\n \"\"\"\n super().__init__(model, checkpoint_dir, beam_size)\n self.encoder = seq2seq.Encoder(vocab_size, _config.embedding_dim, _config.units, _config.BATCH_SIZE)\n self.decoder = seq2seq.Decoder(vocab_size, _config.embedding_dim, _config.units, _config.BATCH_SIZE)\n self.optimizer = tf.keras.optimizers.Adam()\n self.loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')\n self.checkpoint = tf.train.Checkpoint(optimizer=self.optimizer, encoder=self.encoder, decoder=self.decoder)\n\n print('正在检查是否存在检查点...')\n if self.ckpt:\n print('存在检查点,正在从{}中加载检查点'.format(checkpoint_dir))\n self.checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir)).expect_partial()\n else:\n print('不存在检查点,请先执行train模式,再进入chat模式')\n if model == 'chat':\n exit(0)\n\n def _train_step(self, inp, tar, weight, step_loss):\n loss = 0\n enc_hidden = self.encoder.initialize_hidden_state()\n\n with tf.GradientTape() as tape:\n enc_output, enc_hidden = self.encoder(inp, enc_hidden)\n dec_hidden = enc_hidden\n # 这里初始化decoder的输入,首个token为start,shape为(128, 1)\n dec_input = tf.expand_dims([2] * _config.BATCH_SIZE, 1)\n # 这里针对每个训练出来的结果进行损失计算\n for t in range(1, tar.shape[1]):\n predictions, dec_hidden, _ = self.decoder(dec_input, dec_hidden, enc_output)\n loss += self._loss_function(tar[:, t], predictions, weight)\n # 这一步使用teacher forcing\n dec_input = tf.expand_dims(tar[:, t], 1)\n\n batch_loss = (loss / int(tar.shape[1]))\n variables = self.encoder.trainable_variables + self.decoder.trainable_variables\n gradients = tape.gradient(loss, variables)\n self.optimizer.apply_gradients(zip(gradients, variables))\n\n step_loss[0] += batch_loss\n\n def _create_predictions(self, inputs, dec_input, t):\n hidden = tf.zeros((inputs.shape[0], _config.units))\n enc_out, enc_hidden = self.encoder(inputs, hidden)\n dec_hidden = enc_hidden\n dec_input = tf.expand_dims(dec_input[:, t], 1)\n predictions, _, _ = self.decoder(dec_input, dec_hidden, enc_out)\n return predictions\n\n def _loss_function(self, real, pred, weights):\n \"\"\"\n :param real:\n :param pred:\n :return: loss\n \"\"\"\n # 这里进来的real和pred的shape为(128,)\n mask = tf.math.logical_not(tf.math.equal(real, 0))\n loss_ = self.loss_object(real, pred, sample_weight=weights)\n # 这里要注意了,因为前面我们对于短的句子进行了填充,所\n # 以对于填充的部分,我们不能用于计算损失,所以要mask\n mask = tf.cast(mask, dtype=loss_.dtype)\n loss_ *= mask\n return tf.reduce_mean(loss_)\n\n\ndef get_chatter(model):\n # 初始化要使用的聊天器\n chatter = Seq2SeqChatter(model=model,\n checkpoint_dir=_config.seq2seq_train_data,\n beam_size=_config.beam_size,\n vocab_size=_config.vocab_size,\n dict_fn=_config.seq2seq_dict_fn)\n return chatter\n\n\ndef main():\n parser = CmdParser(version='%seq2seq chatbot V1.0')\n parser.add_option(\"-t\", \"--type\", action=\"store\", type=\"string\",\n dest=\"type\", default=\"pre_treat\",\n help=\"execute type, pre_treat/train/chat\")\n (options, args) = parser.parse_args()\n\n if options.type == 'train':\n chatter = get_chatter(options.type)\n chatter.train(chatter.checkpoint,\n dict_fn=_config.seq2seq_dict_fn,\n data_fn=_config.data,\n max_train_data_size=_config.max_train_data_size)\n elif options.type == 'chat':\n chatter = get_chatter(options.type)\n print(\"Agent: 你好!结束聊天请输入ESC。\")\n while True:\n req = input(\"User: \")\n if req == \"ESC\":\n print(\"Agent: 再见!\")\n exit(0)\n response = chatter.respond(req=req)\n print(\"Agent: \", response)\n elif options.type == 'pre_treat':\n preprocess_raw_lccc_data(raw_data=_config.transformer_lccc_data,\n tokenized_data=_config.transformer_lccc_tokenized_data)\n # preprocess_raw_data(raw_data=_config.resource_data, tokenized_data=_config.tokenized_data)\n else:\n parser.error(msg='')\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Seq2Seq入口:指令需要附带运行参数\n cmd:python seq2seq2_chatter.py -t/--type [执行模式]\n 执行类别:pre_treat/train/chat\n\n chat模式下运行时,输入ESC即退出对话\n \"\"\"\n main()\n","sub_path":"hlp/chat/free/seq2seq_chatter.py","file_name":"seq2seq_chatter.py","file_ext":"py","file_size_in_byte":5501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"260988998","text":"import os\n\nfrom keras import optimizers, regularizers\nimport keras\nfrom keras.callbacks import LearningRateScheduler, TensorBoard, ModelCheckpoint\nfrom keras.datasets import cifar10\nfrom keras.initializers import he_normal\nfrom keras.layers import Dense, Input, add, Activation, GlobalAveragePooling2D\n\nfrom keras.layers.core import Flatten, Dropout\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.pooling import MaxPooling2D, AveragePooling2D\nfrom keras.models import Model, load_model, Sequential\nfrom keras.preprocessing.image import ImageDataGenerator\nimport numpy\n\nimport keras.backend as K\nimport numpy as np\n\n# from keras.layers.convolutional import Conv2D\nfrom operations import Convolution2D as Conv2D\n \nfrom utills import load_cifar, load_cifar_100\nfrom keras.layers.merge import Concatenate\n\n\nos.environ['KERAS_BACKEND'] = 'tensorflow'\nK.set_image_dim_ordering('tf')\n\n# from keras.layers import Conv2D\n\n\n\nstack_n = 5 \nnum_classes = 10\nimg_rows, img_cols = 32, 32\nimg_channels = 3\nbatch_size = 128\nepochs = 200\niterations = 50000 // batch_size\nweight_decay = 0.0001\ndropout = 0.5\nmean = [125.307, 122.95, 113.865]\nstd = [62.9932, 62.0887, 66.7048]\n\ndef scheduler(epoch):\n if epoch < 80:\n return 0.1\n if epoch < 150:\n return 0.01 \n return 0.001\n\n# he_normal = truncated_normal\n\n# build model\ninput = Input(shape=[32,32,3])\n\n\n# Block 1\nx = Conv2D(8, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block1_conv1', input_shape=[32,32,3])(input)\nx = BatchNormalization()(x)\nx = Activation('relu')(x)\nx = Conv2D(8, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block1_conv2')(x)\nx = BatchNormalization()(x)\nx = Activation('relu')(x)\nx = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)\n\n# Block 2\nx = Conv2D(16, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block2_conv1')(x)\nx = BatchNormalization()(x)\nx = Activation('relu')(x)\nx = Conv2D(16, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block2_conv2')(x)\nx = BatchNormalization()(x)\nx = Activation('relu')(x)\nx = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)\n\n# Block 3\nx = Conv2D(32, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block3_conv1')(x)\nx = BatchNormalization()(x)\nx = Activation('relu')(x)\nx = Conv2D(32, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block3_conv2')(x)\nx = BatchNormalization()(x)\nx = Activation('relu')(x)\nx = Conv2D(32, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block3_conv3')(x)\nx = BatchNormalization()(x)\nx = Activation('relu')(x)\nx = Conv2D(32, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block3_conv4')(x)\nx = BatchNormalization()(x)\nx = Activation('relu')(x)\nx = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)\n\n# Block 4\nx = Conv2D(64, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block4_conv1')(x)\nx = BatchNormalization()(x)\nx = Activation('relu')(x)\nx = Conv2D(64, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block4_conv2')(x)\nx = BatchNormalization()(x)\nx = Activation('relu')(x)\nx = Conv2D(64, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block4_conv3')(x)\nx = BatchNormalization()(x)\nx = Activation('relu')(x)\nx = Conv2D(64, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block4_conv4')(x)\nx = BatchNormalization()(x)\nx = Activation('relu')(x)\nx = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)\n\n# Block 5\nx = Conv2D(128, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block5_conv1')(x)\nx = BatchNormalization()(x)\nx = Activation('relu')(x)\nx = Conv2D(128, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block5_conv2')(x)\nx = BatchNormalization()(x)\nx = Activation('relu')(x)\nx = Conv2D(128, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block5_conv3')(x)\nx = BatchNormalization()(x)\nx = Activation('relu')(x)\nx = Conv2D(128, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block5_conv4')(x)\nx = BatchNormalization()(x)\nx = Activation('relu')(x)\nx = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)\n\n# model modification for cifar-10\nx = Flatten(name='flatten')(x)\nx = Dense(1024, use_bias = True, kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='fc_cifa10')(x)\nx = BatchNormalization()(x)\nx = Activation('relu')(x)\nx = Dropout(dropout)(x)\nx = Dense(1024, kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='fc2')(x) \nx = BatchNormalization()(x)\nx = Activation('relu')(x)\nx = Dropout(dropout)(x) \nx = Dense(100, kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='predictions_cifa10')(x) \nx = BatchNormalization()(x)\noutput = Activation('softmax')(x)\n\nmodel = Model(input,output)\n\n\ndef color_preprocessing(x_train,x_test):\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n for i in range(3):\n x_train[:,:,:,i] = (x_train[:,:,:,i] - mean[i]) / std[i]\n x_test[:,:,:,i] = (x_test[:,:,:,i] - mean[i]) / std[i]\n return x_train, x_test\n\nif __name__ == '__main__':\n# # load data\n# # (x_train, y_train), (x_test, y_test) = cifar10.load_data(r'F:\\AAA_workspace\\dataset\\cifar-10-batches-py')\n# # y_train = keras.utils.to_categorical(y_train, num_classes)\n# # y_test = keras.utils.to_categorical(y_test, num_classes)\n# \n\n\n\n\n \n (x_train, y_train), (x_test, y_test) = load_cifar_100()\n# \n# # # color preprocessing\n x_train, x_test = color_preprocessing(x_train, x_test)\n# \n # build network\n \n print(model.summary())\n \n # set optimizer\n sgd = optimizers.SGD(lr=.1, momentum=0.9, nesterov=True)\n# sgd = optimizers.Adam()\n model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\n \n # set callback\n cbks = [TensorBoard(log_dir='./resnet_32/', histogram_freq=0),\n LearningRateScheduler(scheduler),\n ModelCheckpoint('./checkpoint-{epoch}.h5', save_best_only=False, mode='auto', period=10)]\n \n # set data augmentation\n print('Using real-time data augmentation.')\n datagen = ImageDataGenerator(horizontal_flip=True,\n width_shift_range=0.125,\n height_shift_range=0.125,\n fill_mode='constant',cval=0.)\n datagen.fit(x_train)\n \n # start training\n model.fit_generator(datagen.flow(x_train, y_train,batch_size=batch_size),\n steps_per_epoch=iterations,\n epochs=epochs,\n callbacks=cbks,\n validation_data=(x_test, y_test))\n\n# resnet = load_model('resnet_2.h5')\n# # r = resnet.evaluate(x_test, y_test,1)\n# r = resnet.predict(x_test, 100)\n# r = numpy.argmax(r,1)==numpy.argmax(y_test,1)\n# r = numpy.mean(r.astype('float32'))\n# print(r)\n\n\n \n ","sub_path":"cifar-100/vgg_19_FWD.py","file_name":"vgg_19_FWD.py","file_ext":"py","file_size_in_byte":8007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}
+{"seq_id":"5583032","text":"import itertools\n\nS = input()\n\n\ndef solve():\n ans = []\n ss = itertools.groupby(S)\n\n for key, groups in ss:\n groups = list(groups)\n groups_cnt = len(groups)\n nothing_group = [0] * (groups_cnt - 1)\n\n even_cnt = groups_cnt // 2\n odd_cnt = groups_cnt - even_cnt\n\n if key == 'R':\n tmp_ans = nothing_group + [odd_cnt]\n tmp_ans.append(even_cnt)\n elif key == 'L':\n ans[-1] += odd_cnt\n ans[-2] += even_cnt\n tmp_ans = nothing_group\n\n ans += tmp_ans\n\n print(*ans)\n\n\nsolve()\n","sub_path":"contest_abc/136/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}