diff --git "a/6649.jsonl" "b/6649.jsonl"
new file mode 100644--- /dev/null
+++ "b/6649.jsonl"
@@ -0,0 +1,633 @@
+{"seq_id":"462737042","text":"import telegram\nimport config\nimport parserequest\nfrom telegram.error import NetworkError, Unauthorized\nfrom time import sleep\n\nupdate_id = None\n\n\ndef main():\n global update_id\n bot = telegram.Bot(config.token)\n try:\n update_id = bot.getUpdates()[0].update_id\n except IndexError:\n update_id = None\n\n while True:\n try:\n echo(bot)\n except NetworkError:\n sleep(1)\n except Unauthorized:\n update_id += 1\n\n\ndef echo(bot):\n global update_id\n for update in bot.getUpdates(offset=update_id, timeout=10):\n chat_id = update.message.chat_id\n update_id = update.update_id + 1\n\n if update.message:\n list_price = parserequest.get_list_price()\n for price in [x for x in list_price]:\n update.message.reply_text(price)\n print(chat_id)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"iProTechnoBot/old_bot.py","file_name":"old_bot.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"411188968","text":"# Import notes\n\nfrom notes import NoteRoot, NoteCircle, NoteDiamond\nfrom modes import InputMode, RenderMode\n\n### Instrument classes\n\nclass Harp:\n\n def __init__(self):\n\n self.column_count = 5\n self.row_count = 3\n self.chord_image = {}\n self.highlighted_states_image = []\n self.instrument_type = 'harp'\n self.is_highlighted = False\n\n self.sky_inverse_position_map = {\n (0, 0): 'A1', (0, 1): 'A2', (0, 2): 'A3', (0, 3): 'A4', (0, 4): 'A5',\n (1, 0): 'B1', (1, 1): 'B2', (1, 2): 'B3', (1, 3): 'B4', (1, 4): 'B5',\n (2, 0): 'C1', (2, 1): 'C2', (2, 2): 'C3', (2, 3): 'C4', (2, 4): 'C5'\n }\n\n def get_row_count(self):\n return self.row_count\n\n def get_column_count(self):\n return self.column_count\n\n def get_is_highlighted(self):\n return self.is_highlighted\n\n def set_is_highlighted(self, is_highlighted):\n '''\n Expecting a boolean, to determine whether the harp is empty in this frame\n '''\n self.is_highlighted = is_highlighted\n\n def set_chord_image(self, chord_image):\n '''\n The chord_image is a dictionary. The keys are tuples representing the positions of the buttons. The values are dictionaries, where each key is the frame, and the value is a Boolean indicating whether the button is highlighted in that frame.\n '''\n # Ok, but in this case the dict should have keys for all the positions, and shut down buttons should be set to False\n #TODO: Raise TypeError if chord_image is not a dict\n self.chord_image = chord_image\n\n # def update_chord_image(self, index, new_state):\n def append_highlighted_state(self, row_index, column_index, new_state):\n\n '''\n INCOMPLETE IMPLEMENTATION. new_state is expected to be a Boolean\n '''\n\n chord_image = self.get_chord_image()\n\n row = chord_image[row_index]\n highlighted_states = row[column_index]\n highlighted_states.append(new_state)\n\n chord_image[index] = highlighted_states #index is undefined\n\n self.set_chord_image(chord_image)\n\n\n def get_chord_image(self):\n return self.chord_image\n\n def ascii_from_chord_image(self, chord_image, instrument_index):\n\n ascii_chord = ''\n for k in chord_image:\n for f in chord_image[k]:\n if chord_image[k][f]==True: # Button is highlighted\n ascii_chord += self.sky_inverse_position_map[k]\n #print(str(k) + ' = ' + ascii_chord)\n return ascii_chord\n\n\n\n def render_in_html(self, chord_image, note_width, instrument_index):\n\n harp_is_empty = not(self.get_is_highlighted())\n\n harp_render = ''\n\n if harp_is_empty:\n harp_render += '
'\n else:\n harp_render += ''\n\n for row_index in range(self.get_row_count()):\n\n harp_render += ''\n\n for column_index in range(self.get_column_count()):\n\n harp_render += ''\n\n # Calculate the note's overall index in the harp (0 to 14)\n note_index = (row_index * self.get_column_count()) + column_index\n\n note_position = (row_index, column_index)\n\n if note_index % 7 == 0:\n # Note is a root note\n note = NoteRoot()\n elif (note_index % self.get_column_count() == 0 or note_index % self.get_column_count() == 2) or note_index % self.get_column_count() == 4:\n # Note is in an odd column, so it is a circle\n note = NoteCircle()\n else:\n # Note is in an even column, so it is a diamond\n note = NoteDiamond()\n\n note_render = note.render_in_html(note_width, chord_image, note_position, self.get_instrument_type(), note_index, harp_is_empty)\n harp_render += note_render\n harp_render += ' '\n\n harp_render += ' '\n\n\n harp_render += '
'\n return harp_render\n\n\n def get_instrument_type(self):\n return self.instrument_type\n","sub_path":"python/instrument.py","file_name":"instrument.py","file_ext":"py","file_size_in_byte":4305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"946513","text":"'''\nCreate a function named same_name() that has two parameters named your_name and my_name.\n\nIf our names are identical, return True. Otherwise, return False.'''\n\n\n# Write your same_name function here:\n\n# Uncomment these function calls to test your \ndef same_name(your_name,my_name):\n if your_name==my_name:\n return True\n else: return False\nprint(same_name(\"Colby\", \"Colby\"))\n# should print True\nprint(same_name(\"Tina\", \"Amber\"))\n# should print False\n","sub_path":"03 Control Flows/07 same_name.py","file_name":"07 same_name.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"368341371","text":"# -*- coding: utf-8 -*-\nimport os\nimport subprocess\nimport requests\nimport logging\n\n\nlogging.basicConfig(level=logging.DEBUG)\nlog = logging.getLogger(__name__)\n\nGITLAB_TOKEN = os.environ.get(\"GITLAB_TOKEN\")\nGITLAB_BUILD_URL = u'https://gitlab.uaprom/uaprom/uaprom/builds/'\nRUN_DEPLOY = (\n u'curl --request POST --header \"PRIVATE-TOKEN: %s\" '\n u'\"https://gitlab.uaprom/api/v3/projects/114/jobs/{job_id}/{action}\"'\n % GITLAB_TOKEN\n)\n\n\ndef get_last_tag():\n log.info(u'Get last tag')\n tags_json =requests.get(\n u'https://gitlab.uaprom/api/v3/projects/114/repository/tags',\n verify=False,\n params={\"private_token\": u'%s' % GITLAB_TOKEN}\n ).json()\n tags = []\n for i in tags_json:\n if len(i['message'].split('.')) == 3:\n if i['message'].startswith('1'):\n tags.append(i['message'])\n return tags[0]\n\n\ndef get_last_pipeline_of_branch(branch, all_pipelines=False):\n log.info(\n u'Get last pipeline of branch \"%s\" (all_pipelines=%s)'\n % (branch, all_pipelines)\n )\n pipelines =requests.get(\n u'https://gitlab.uaprom/api/v3/projects/114/pipelines?per_page=100',\n verify=False,\n params={\"private_token\": u'%s' % GITLAB_TOKEN}\n ).json()\n if all_pipelines:\n log.info(u'Len pipelines: %s' % len(pipelines))\n needful_pipelines = [\n pipeline for pipeline in pipelines if pipeline['ref'] == branch\n ]\n log.info(u'Len needful_pipelines: %s' % len(needful_pipelines))\n log.info(u'needful_pipelines: %s' % needful_pipelines)\n return needful_pipelines\n else:\n for pipeline in pipelines:\n if pipeline['ref'] == branch:\n log.info(u'pipeline: %s' % pipeline)\n return pipeline\n\n\ndef is_running_job(job_name, branch):\n log.info(u'Is running job %s' % job_name)\n pipelines = get_last_pipeline_of_branch(branch, all_pipelines=True)\n for pipeline in pipelines:\n jobs = requests.get(\n u'https://gitlab.uaprom/api/v3/projects/114/pipelines/%s/jobs'\n % pipeline['id'],\n verify=False,\n params={\"private_token\": u'%s' % GITLAB_TOKEN}\n ).json()\n for job in jobs:\n if job['name'] == job_name:\n if job['status'] in ['pending', 'running']:\n log.info(u'Running job %s' % job['id'])\n return job['id']\n\n\ndef get_job(pipline_id, job_name):\n log.info(u'Get job %s in pipeline %s' % (job_name, pipline_id))\n jobs =requests.get(\n u'https://gitlab.uaprom/api/v3/projects/114/pipelines/%s/jobs'\n % pipline_id,\n verify=False,\n params={\"private_token\": u'%s' % GITLAB_TOKEN}\n ).json()\n for job in jobs:\n if job['name'] == job_name:\n log.info(u'job: %s' % job)\n return job\n\n\ndef run_start_job(job_id):\n log.info(u'Run start job %s' % job_id)\n subprocess.Popen(\n RUN_DEPLOY.format(job_id=job_id, action=u'play'),\n shell=True,\n stdout=subprocess.PIPE\n )\n log.info(u'Started last commit (job_id: %s)' % job_id)\n\n\ndef restart_job(job_id):\n log.info(u'Restart start job %s' % job_id)\n subprocess.Popen(\n RUN_DEPLOY.format(job_id=job_id, action=u'retry'),\n shell=True,\n stdout=subprocess.PIPE\n )\n log.info(u'Restarted last commit (job_id: %s)' % job_id)\n\n\ndef start_deploy(job_name, branch):\n log.info(u'Branch: %s' % branch)\n log.info(u'Job name: %s' % job_name)\n running_job = is_running_job(job_name, branch)\n message = u'Что-то пошло не так :('\n if running_job:\n message = (\n u\"Сборка %s в gitlab уже *была запущена* \"\n u\"или *ожидает запуска!*\"\n u\"\\n%s%s\" % (job_name, GITLAB_BUILD_URL, running_job)\n )\n else:\n pipelines = get_last_pipeline_of_branch(branch, all_pipelines=True)\n if pipelines:\n n = len(pipelines)\n i = 0\n while i < n:\n log.info(u'Попытка #%s' % i)\n job = get_job(pipelines[i]['id'], job_name)\n log.info(u'Job: %s' % job)\n if job:\n if job['status'] == 'manual':\n run_start_job(job['id'])\n message = (\n u\"Запустил сборку %s (%s) в gitlab!\"\n u\"\\n%s%s\"\n % (job_name, branch, GITLAB_BUILD_URL, job['id'])\n )\n else:\n restart_job(job['id'])\n message = (\n u\"*Перезапустил* сборку %s (%s) в \"\n u\"gitlab!\\n%s%s\"\n % (job_name, branch, GITLAB_BUILD_URL, job['id'])\n )\n break\n else:\n i += 1\n if not job:\n message = (\n u\"*Не получилось запустить сборку!* Возможно нет \"\n u\"pipline ветки {} в которой есть job'а \"\n u\"{}!\".format(branch, job_name)\n )\n else:\n message = (\n u\"*Не получилось запустить сборку!* Возможно новых \"\n u\"коммитов в ветке {}!\".format(branch)\n )\n return message\n","sub_path":"bot/gitlab.py","file_name":"gitlab.py","file_ext":"py","file_size_in_byte":5569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"513447375","text":"# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom auto_scan_test import OPConvertAutoScanTest\nfrom hypothesis import reproduce_failure\nimport hypothesis.strategies as st\nimport numpy as np\nimport unittest\nimport random\n\n\nclass TestNonZeroConcert(OPConvertAutoScanTest):\n \"\"\"\n ONNX op: NonZero\n OPset version: 9~15\n \"\"\"\n\n def sample_convert_config(self, draw):\n input_shape = draw(\n st.lists(\n st.integers(\n min_value=10, max_value=20), min_size=1, max_size=3))\n input_dtype = draw(st.sampled_from([\"float32\", \"int32\"]))\n\n config = {\n \"op_names\": [\"NonZero\", ],\n \"test_data_shapes\": [input_shape],\n \"test_data_types\": [input_dtype],\n \"inputs_shape\": [input_shape],\n \"min_opset_version\": 9,\n \"inputs_name\": [\"x\"],\n \"outputs_name\": [\"y\"],\n \"delta\": 1e-4,\n \"rtol\": 1e-4,\n \"run_dynamic\": True,\n }\n attrs = {}\n return (config, attrs)\n\n def test(self):\n self.run_and_statis(max_examples=50)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/onnx/test_auto_scan_nonzero.py","file_name":"test_auto_scan_nonzero.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"447333255","text":"from django.shortcuts import render, redirect, get_list_or_404\nfrom django.views.generic import View\nfrom django.http import HttpResponse\nfrom django.contrib import messages\nfrom orm.models import Bobot\nfrom management.bobot.forms import BobotForm\nfrom library.view import ManagementAccessView\n# Create your views here.\n\n\nclass ListBobotView(ManagementAccessView):\n\tdef get(self, request):\n\n\t\ttemplate = 'bobot/index.html'\n\n\t\tform = BobotForm(request.POST or None)\n\t\tbobot = Bobot.objects.all()\n\t\tdata = {\n 'form_mode' : 'add',\n 'form' : form,\n\t\t'bobot' : bobot,\n\t\t}\n\t\treturn render(request, template, data)\n\nclass EditBobotView(ManagementAccessView):\n template = 'bobot/index.html'\n\n def get(self, request, id):\n bobot = Bobot.objects.filter(id=id)\n if not bobot.exists():\n return redirect('bobot:view')\n bobot = bobot.first()\n initial = {\n\n 'id': bobot.id,\n 'nilai_akademik' : bobot.nilai_akademik,\n 'kelas' : bobot.kelas,\n 'karakter' : bobot.karakter,\n 'plomba'\t: bobot.plomba,\n 'hasil_tes' : bobot.hasil_tes,\n }\n\n form = BobotForm(initial=initial)\n bobot = Bobot.objects.all()\n data = {\n 'id':id,\n 'form': form,\n 'form_mode' : 'edit',\n 'bobot' : bobot,\n }\n return render(request, self.template, data)\n\n\n\nclass UpdateBobotView(ManagementAccessView):\n\n def post(self, request):\n \n template = \"bobot/index.html\"\n form = BobotForm(request.POST or None)\n if form.is_valid():\n id = form.cleaned_data['id']\n bobot = Bobot.objects.get(pk=id)\n bobot.nilai_akademik = form.cleaned_data['nilai_akademik']\n bobot.kelas = form.cleaned_data['kelas']\n bobot.karakter = form.cleaned_data['karakter']\n bobot.plomba = form.cleaned_data['plomba']\n bobot.hasil_tes = form.cleaned_data['hasil_tes']\n messages.add_message(request, messages.INFO, 'Data Berhasil Diupdate') \n bobot.save(force_update=True)\n return redirect('bobot:view')\n else:\n bobot = bobot.objects.all()\n data = {\n 'form_mode':'edit',\n 'form': form,\n 'bobot': bobot,\n }\n messages.add_message(request, messages.INFO, 'Data Gagal Diupdate !!') \n return render(request, template, data)\n\n","sub_path":"PythonMoora/management/bobot/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"636807144","text":"# coding=utf-8\n\nimport json\n\nwith open('processed.json') as f:\n records = json.load(f)\n\nfor i in range(19):\n for j in range(7):\n for k in range(7):\n print(i, j, k, records[i][j][k][0])\n print(i, j, k, records[i][j][k][1])\n\nwith open('final.json', 'w') as f:\n json.dump(records, f)\n","sub_path":"1/3/haha.py","file_name":"haha.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"281736895","text":"#加载飞桨和相关类库\nimport paddle\nimport paddle.fluid as fluid\nfrom paddle.fluid.dygraph import nn\nimport paddle.fluid.dygraph as dy\nfrom paddle.fluid import layers\nimport numpy as np\nimport os\nfrom PIL import Image\nprint(paddle.__version__)\n\n\n# 一行代码实现动转静。\n# 动静转换的操作非常简单,仅需添加一个装饰器( @to_static ),框架就会自动将动态图的程序,转换为静态图的program,并使用该program训练、保存为静态图模型以实现推理部署。\n\n# import paddle\n# from paddle.static import InputSpec\n# from paddle.fluid.dygraph import Layer\n# from paddle.jit import to_static\n\n\n# class SimpleNet(Layer):\n# def __init__(self):\n# super(SimpleNet, self).__init__()\n# self.linear = paddle.nn.Linear(10, 3)\n\n# @to_static(input_spec=[InputSpec(shape=[None, 10], name='x'), InputSpec(shape=[3], name='y')])\n# def forward(self, x, y):\n# out = self.linear(x)\n# out = out + y\n# return out\n\n\n# net = SimpleNet()\n# paddle.jit.save(net, './simple_net') \n\n# 定义mnist数据识别网络结构,同房价预测网络\nclass MNIST(fluid.dygraph.Layer):\n def __init__(self):\n super(MNIST, self).__init__()\n\n self.cnn = dy.Conv2D(num_channels=3, num_filters=1, filter_size=3, stride=1, padding=1, act='relu')\n \n self.cls = dy.Sequential(\n dy.Linear(input_dim=784, output_dim=128),\n dy.Dropout(p=.2),\n dy.Linear(input_dim=128, output_dim=5),\n )\n # self.cls = dy.Linear(input_dim=784, output_dim=5)\n\n # 定义网络结构的前向计算过程\n def forward(self, x):\n x = self.cnn(x)\n\n b = x.shape[0]\n # print(b)\n x = layers.reshape(x, shape=[b,-1,])\n # print(x.shape)\n x = self.cls(x)\n # print(x.shape)\n return layers.softmax(x, axis=1)\n\n\nif __name__ == '__main__':\n\n # 定义预测过程\n with fluid.dygraph.guard():\n model = MNIST()\n \n # 加载模型参数\n # model_dict, _ = fluid.load_dygraph(\"mnist\")\n # model.load_dict(model_dict)\n\n # 灌入数据\n model.eval()\n tensor_img = np.random.rand(1,3,28,28).astype(np.float32)\n result = model(fluid.dygraph.to_variable(tensor_img))\n # 预测输出取整,即为预测的数字,打印结果\n print(\"本次预测的数字是\", result.numpy().astype('int32'))\n\n\nif __name__ == '__main__':\n\n x = np.load('harset/db5_acc.npy')\n y = np.load('harset/db5_lab.npy')\n\n # # 定义飞桨动态图工作环境\n # with fluid.dygraph.guard():\n # # 声明网络结构\n # model = MNIST()\n # # 启动训练模式\n # model.train()\n # # 定义数据读取函数,数据读取batch_size设置为16\n # train_loader = paddle.batch(paddle.dataset.mnist.train(), batch_size=16)\n # # 定义优化器,使用随机梯度下降SGD优化器,学习率设置为0.001\n # optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.001, parameter_list=model.parameters())\n\n\n # 通过with语句创建一个dygraph运行的context\n # 动态图下的一些操作需要在guard下进行\n # with fluid.dygraph.guard():\n # model = MNIST()\n # model.train()\n # train_loader = paddle.batch(paddle.dataset.mnist.train(), batch_size=16)\n # optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.001, parameter_list=model.parameters())\n \n # EPOCH_NUM = 10\n # for epoch_id in range(EPOCH_NUM):\n # for batch_id, data in enumerate(train_loader()):\n # #准备数据,格式需要转换成符合框架要求的\n # image_data = np.array([x[0] for x in data]).astype('float32')\n # label_data = np.array([x[1] for x in data]).astype('float32').reshape(-1, 1)\n # # 将数据转为飞桨动态图格式\n # image = fluid.dygraph.to_variable(image_data)\n # label = fluid.dygraph.to_variable(label_data)\n \n # #前向计算的过程\n # predict = model(image)\n \n # #计算损失,取一个批次样本损失的平均值\n # loss = fluid.layers.square_error_cost(predict, label)\n # avg_loss = fluid.layers.mean(loss)\n \n # #每训练了1000批次的数据,打印下当前Loss的情况\n # if batch_id !=0 and batch_id % 1000 == 0:\n # print(\"epoch: {}, batch: {}, loss is: {}\".format(epoch_id, batch_id, avg_loss.numpy()))\n \n # #后向传播,更新参数的过程\n # avg_loss.backward()\n # optimizer.minimize(avg_loss)\n # model.clear_gradients()\n\n # # 保存模型\n # fluid.save_dygraph(model.state_dict(), 'mnist')\n","sub_path":"har_paddle_v1.8/paddle_mnist.py","file_name":"paddle_mnist.py","file_ext":"py","file_size_in_byte":4921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"602532533","text":"# -*- coding: utf-8 -*-\n# @Author: Puffrora\n# @Date: 2019-08-11 11:11:01\n# @Last Modified by: Puffrora\n# @Last Modified time: 2019-08-11 12:06:09\n\n\nclass Solution(object):\n\tdef findAnagrams(self, s, p):\n\t\twindow, need = {}, {}\n\t\tfor i in p:\n\t\t\tneed[i] = need.get(i, 0) + 1\n\t\tleft, right, match = 0, 0, 0\n\t\tres = []\n\t\twhile right < len(s):\n\t\t\tif s[right] in need:\n\t\t\t\twindow[s[right]] = window.get(s[right], 0) + 1\n\t\t\t\tif window[s[right]] == need[s[right]]:\n\t\t\t\t\tmatch += 1\n\t\t\tright += 1\n\n\t\t\twhile match == len(need):\n\t\t\t\tif right - left == len(p):\n\t\t\t\t\tres.append(left)\n\t\t\t\tif s[left] in need:\n\t\t\t\t\twindow[s[left]] -= 1\n\t\t\t\t\tif window[s[left]] < need[s[left]]:\n\t\t\t\t\t\tmatch -= 1\n\t\t\t\tleft += 1\n\n\t\treturn res","sub_path":"Leetcode/leetcode438 找到字符串中所有字母异位词.py","file_name":"leetcode438 找到字符串中所有字母异位词.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"522787315","text":"#-*- coding: utf-8 -*-\nimport redis\n\nclass RedisProxy:\n\n\tdef __init__ (self, db=None):\n\t\t\"\"\"\n\t\tThis class deals with the storing, creating, deleting, getting object from the proxy_list stored in the\n\t\tredis database.\n\t\tArgs:\n\t\t\t\n\t\t\tFor unhealthy_proxies pass redis_list_name = \"unhealthy_proxies\" as an argument when initiating this class\n\n\t\t\"\"\"\n\t\tif not db:\n\t\t\tself.redis_connection = redis.StrictRedis(host='localhost', port=6379, db=15)\n\t\telse:\n\t\t\tself.redis_connection = redis.StrictRedis(host='localhost', port=6379, db=db)\n\n\t\t\n\tdef store_proxy_list(self, proxy_list, status):\n\t\t\"\"\"\n\t\tproxy_list is the list of the proxies which will be stored in the redis proxies list\n\t\tEach element is in the form of \n\t\t{\"ip\": ip, \"port\": 1080, \"type\": Socks4, \"country\": Brazil, \"latency\": 30, \"reliability\": 90}\n\n\t\tstatus: healthy or unhealhty\n\t\tif status != \"healthy\":\n\t\t\traise StandardError(\"not a valid status for proxy\")\n\t\t\n\t\tif status != \"unhealthy\":\n\t\t\traise StandardError(\"not a valid status for proxy\")\n\t\t\"\"\"\n\t\twith self.redis_connection.pipeline() as pipe:\n\t\t\ttry:\n\t\t\t\tfor proxy in proxy_list:\n\t\t\t\t\tproxy[\"status\"] = status\n\t\t\t\t\tpipe.hmset(proxy.get(\"ip\"), proxy)\n\t\t\t\tpipe.execute()\n\t\t\texcept Exception as e:\n\t\t\t\traise StandardError(e)\n\n\tdef total_proxies(self):\n\t\tproxy_list = self.redis_connection.keys()\n\t\treturn proxy_list\n\n\n\tdef proxy_details(self, proxy):\n\t\t\"\"\"\n\t\tReturn keys and its values for the related proxy\n\t\t\"\"\"\n\t\tproxy_details = self.redis_connection.hgetall(proxy)\n\t\treturn proxy_details\n\n\n\tdef delete_proxy(self, proxy):\n\t\t\"\"\"\n\t\tDelete proxy\n\t\t\"\"\"\n\t\tself.redis_connection.delete(proxy)\n\t\treturn \n\n\n\tdef healthy_proxies(self):\n\t\t\"\"\"\n\t\treturns the list of healthy proxies of the form\n\t\t[{'country': '\\xc2\\xa0Mexico', 'ip': '187.163.164.233', 'latency': '30', 'port': '1080', \n\t\t'reliability': '100', 'status': 'healthy', 'type': 'Socks4'},\n\n\t\t{'country': '\\xc2\\xa0Pakistan', 'ip': '221.120.222.69', 'latency': '30', 'port': '1080', \n\t\t'reliability': '100', 'status': 'healthy', 'type': 'Socks5'}]\n\n\t\t\"\"\"\n\t\tproxy_list = [self.redis_connection.hgetall(key) for key in self.redis_connection.keys() \n\t\t\t\tif self.redis_connection.hget(key, \"status\") == \"healthy\"]\n\n\n\t\treturn proxy_list\n\t\n\tdef unhealthy_proxies(self):\n\t\t\"\"\"\n\t\treturns the list of unhealthy proxies\n\t\t[{'country': '\\xc2\\xa0Mexico', 'ip': '187.163.164.233', 'latency': '30', 'port': '1080', \n\t\t'reliability': '100', 'status': 'unhealthy', 'type': 'Socks4'},\n\n\t\t{'country': '\\xc2\\xa0Pakistan', 'ip': '221.120.222.69', 'latency': '30', 'port': '1080', \n\t\t'reliability': '100', 'status': 'unhealthy', 'type': 'Socks5'}]\n\t\t\"\"\"\n\t\tproxy_list = [self.redis_connection.hgetall(key) for key in self.redis_connection.keys() \n\t\t\t\tif self.redis_connection.hget(key, \"status\") == \"unhealthy\"]\n\t\treturn proxy_list\n\n\n\tdef update_status(self, ip, status):\n\t\t\"\"\"\n\t\tThis method updates the status of the proxy present in the database\n\t\t\"\"\"\n\t\tif not status in (\"healthy\", \"unhealthy\"):\n\t\t\traise StandardError(\"Status that has been provided is not a valid one\")\n\n\t\ttry:\n\t\t\tself.redis_connection.hset(ip, \"status\", status)\n\n\t\texcept Exception as e:\n\t\t\traise StandardError(e)\n\n\n\n","sub_path":"proxies/redis_storage.py","file_name":"redis_storage.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"565694602","text":"\"\"\"added colums to acquaintances table\n\nRevision ID: 514a39d06e75\nRevises: 54c23ecaee46\nCreate Date: 2015-08-24 15:16:31.227226\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '514a39d06e75'\ndown_revision = '54c23ecaee46'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column(u'users_acquaintance', sa.Column('acquaintance_user_first_name', sa.String(length=2000), nullable=True))\n op.add_column(u'users_acquaintance', sa.Column('acquaintance_user_last_name', sa.String(length=2000), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column(u'users_acquaintance', 'acquaintance_user_last_name')\n op.drop_column(u'users_acquaintance', 'acquaintance_user_first_name')\n ### end Alembic commands ###\n","sub_path":"alembic/versions/514a39d06e75_added_colums_to_acquaintances_table.py","file_name":"514a39d06e75_added_colums_to_acquaintances_table.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"325896869","text":"# -*- coding:utf-8 -*\nfrom Time import Ming_time\nimport sys\nimport os\nimport os.path\nimport time,datetime\nfrom ExcelProcess import ming_file\nsys.path.append(\"..\")\nfrom ExcelProcess import excel\nreload(sys)\nsys.setdefaultencoding('utf8')\n\n\nfile_name = unicode(r'D:\\Python\\PEclub\\pingjiao\\2017~2018学年第二学期.xlsx','utf8')\ndata = excel.read_excel(filename=file_name)\n\n\nteacher_number = {}\nteacher_q_score = {}\nteacher_t_score = {}\n\nfor i in range(2, len(data)):\n teacher_name = data[i][9]\n if teacher_name in teacher_number.keys():\n teacher_number[teacher_name] += 1\n else:\n teacher_number[teacher_name] = 1\n\n q_equal_number = {u'完全同意':5,u'同意':4,u'一般':3,u'不同意':2,u'完全不同意':1}\n for j in range(11,39):\n select = data[i][j]\n if teacher_name in teacher_q_score.keys():\n teacher_q_score[teacher_name] += q_equal_number[select]\n else:\n teacher_q_score[teacher_name] = q_equal_number[select]\n\n if teacher_name in teacher_t_score.keys():\n teacher_t_score[teacher_name] = (float(teacher_t_score[teacher_name]) + float(data[i][40])) / 2\n else:\n teacher_t_score[teacher_name] = data[i][40]\n\n\nteacher_list = teacher_number.keys()\nteacher_number_score = {}\n\ntotal_score = {}\nfor teacher_name in teacher_list:\n teacher_number_score[teacher_name] = teacher_number[teacher_name] * (100/46)\n teacher_q_score[teacher_name] = teacher_q_score[teacher_name] / teacher_number[teacher_name]\n teacher_q_score[teacher_name] = teacher_q_score[teacher_name] * (100.0/145)\n\n total_score[teacher_name] = (float(teacher_number_score[teacher_name]) * 0.2) + (float(teacher_q_score[teacher_name]) * 0.6) + (float(teacher_t_score[teacher_name]) * 0.2)\n\n\nscore = []\nfor name in total_score.keys():\n score.append([name,teacher_number[name],total_score[name]])\n\n\n\ndes_file_name = unicode(r'D:\\Python\\PEclub\\pingjiao\\2017~2018学年第二学期总分.xlsx','utf8')\nexcel.write_excel(des_filename=des_file_name,data=score,sheet_name='success')\n\n\n\n\n\n\n\n","sub_path":"PEclub/pingjiao/FormatScore.py","file_name":"FormatScore.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"70126073","text":"\"\"\" Helper functions for converting JSON to ARL objects\n\n\"\"\"\n\nfrom astropy.units import Unit\n\nfrom data_models.data_model_helpers import *\n\nimport numpy\n\ndef json_to_skycoord(d):\n \"\"\"Convert JSON string to SkyCoord\n \n e.g. \"phasecentre\": {\n \"ra\": {\"value\": 30.0, \"unit\": \"deg\"},\n \"dec\": {\"value\": -60.0, \"unit\": \"deg\"},\n \"frame\": \"icrs\",\n \"equinox\": \"j2000\"}\n\n :param d:\n :return:\n \"\"\"\n return SkyCoord(ra=json_to_quantity(d[\"ra\"]),\n dec=json_to_quantity(d[\"dec\"]),\n equinox=d[\"equinox\"],\n frame=d[\"frame\"])\n\n\ndef json_to_quantity(q):\n \"\"\"Convert JSON string to Quantity\n \n e.g. \"cellsize\": {\"value\": 0.001, \"unit\": \"rad\"}\n\n :param q:\n :return:\n \"\"\"\n value = float(q[\"value\"])\n unit = q[\"unit\"]\n assert isinstance(unit, str), \"unit must be string\"\n unit = Unit(q[\"unit\"])\n return Quantity(value, unit)\n\ndef json_to_linspace(l):\n \"\"\"Convert JSON string to numpy.linspace\n \n e.g. \"frequency\": {\"start\": 0.9e8,\"stop\": 1.1e8,\"steps\": 7}\n \n :param l:\n :return:\n \"\"\"\n nsteps = int(l[\"steps\"])\n assert nsteps >= 0, \"Number of steps cannot be less than zero %s\" % str(l)\n return numpy.linspace(l[\"start\"], l[\"stop\"], nsteps)\n","sub_path":"workflows/arlexecute/processing_component_interface/arl_json/json_helpers.py","file_name":"json_helpers.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"525686501","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: D:\\dev\\PyCharm Projects\\NCryptoClient\\NCryptoClient\\UI\\ui_contacts_list.py\n# Compiled at: 2018-04-19 21:08:40\n# Size of source mod 2**32: 4551 bytes\n\"\"\"\nModule for the list of contacts (Widget).\n\"\"\"\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\n\nclass UiContactsList(QListWidget):\n __doc__ = '\\n UI-class which contains a list of buttons, each of which is a user contact.\\n '\n\n def __init__(self, main_window, parent=None):\n \"\"\"\n Constructor. Initializes all GUI and links logic to them.\n @param main_window: reference to the parent window (window itself).\n @param parent: ссылка на родительский класс (window panel).\n \"\"\"\n super().__init__(parent)\n self._main_window = main_window\n self.setResizeMode(QListView.Adjust)\n self.setObjectName('contacts_lb')\n self._last_keyboard_event = None\n self._last_mouse_event = None\n self.add_contact('Log')\n\n def keyPressEvent(self, *args, **kwargs):\n \"\"\"\n Registers keyboard buttons pressing events and writes them in the variable.\n @param args: additional parameters (list).\n @param kwargs: additional parameters (dictionary).\n @return: -\n \"\"\"\n self._last_keyboard_event = args[0]\n\n def mousePressEvent(self, *args, **kwargs):\n \"\"\"\n Registers mouse buttons pressing events and writes them in the variable.\n @param args: additional parameters (list).\n @param kwargs: additional parameters (dictionary).\n @return: -\n \"\"\"\n self._last_mouse_event = args[0]\n\n def add_contact(self, chat_name):\n \"\"\"\n When initializing main window components, this function adds list of user contacts.\n All data is being received from the server.\n @param chat_name: contact name.\n @return: -\n \"\"\"\n index = self.find_contact_widget(chat_name)\n if index:\n return\n item = QListWidgetItem()\n item.setSizeHint(QSize(item.sizeHint().width(), 24))\n button = QPushButton(chat_name)\n button.setContextMenuPolicy(Qt.CustomContextMenu)\n button.customContextMenuRequested.connect(lambda _, local_contact_name=chat_name: self.show_context_menu(local_contact_name))\n button.clicked.connect(lambda _, local_contact_name=chat_name: self._main_window.open_tab(local_contact_name))\n self.addItem(item)\n self.setItemWidget(item, button)\n\n def delete_contact(self, chat_name):\n \"\"\"\n Deletes contact from the list.\n @param chat_name: contact name.\n @return: -\n \"\"\"\n self._main_window.close_tab(chat_name)\n index = self.find_contact_widget(chat_name)\n if index is not None:\n self.takeItem(index)\n\n def find_contact_widget(self, chat_name):\n \"\"\"\n Searches for contact widget in the list of contacts.\n @param chat_name: contact name.\n @return: index of contact widget.\n \"\"\"\n contacts_amount = self.count()\n if contacts_amount == 1:\n if self.itemWidget(self.item(0)).text() == chat_name:\n return 0\n else:\n return\n for i in range(0, contacts_amount):\n widget = self.itemWidget(self.item(i))\n if widget.text() == chat_name:\n return i\n\n def show_context_menu(self, chat_name):\n \"\"\"\n Shows context menu on the mouse left button clicking.\n @param chat_name: contact name.\n @return: -\n \"\"\"\n menu = QMenu(self)\n remove_action = menu.addAction('Remove')\n remove_action.triggered.connect(lambda _, local_chat_name=chat_name: self._main_window.remove_contact_by_login(local_chat_name))\n menu.exec_(self.mapToGlobal(QPoint(self._last_mouse_event.x(), self._last_mouse_event.y())))","sub_path":"pycfiles/NCryptoClient-0.5.1-py2.py3-none-any/ui_contacts_list.cpython-36.py","file_name":"ui_contacts_list.cpython-36.py","file_ext":"py","file_size_in_byte":4065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"618145820","text":"\"\"\"\nRepresentation of polynomial using a linked list\n\"\"\"\n\n\nclass PolyNode:\n def __init__(self, coefficient=None, exp=None):\n self.coefficient = coefficient\n self.exp = exp\n self.next = None\n\n\nclass PolyLinkedList:\n def __init__(self):\n self.head = None\n\n def create(self):\n last = None\n num = int(input(\"Enter number of terms: \"))\n print(\"Enter each term with coefficient and exponent\")\n for i in range(num):\n coefficient, exp = map(int, input().split())\n new_node = PolyNode(coefficient, exp)\n if self.head is None:\n self.head = last = new_node\n else:\n last.next = new_node\n last = new_node\n\n def display(self):\n p = self.head\n while p:\n print(\"{}x^{}\".format(p.coefficient, p.exp), end='+')\n p = p.next\n print()\n\n def eval(self, x):\n p = self.head\n val = 0\n while p:\n val += p.coefficient * pow(x, p.exp)\n p = p.next\n return val\n\n\ndef main():\n pl = PolyLinkedList()\n pl.create()\n pl.display()\n x = 4\n val = pl.eval(x)\n print(\"Evaluation of the given function {} is: {}\".format(x, val))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"linked_list/polynomial_ll.py","file_name":"polynomial_ll.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"37806708","text":"#encoding: utf-8\r\nfrom bencode import bencode, bdecode\r\nimport socket\r\n\r\nfrom hashlib import sha1\r\nfrom random import randint\r\nfrom struct import unpack\r\nfrom socket import inet_aton, inet_ntoa\r\n\r\nBOOTSTRAP_NODES = [\r\n (\"router.bittorrent.com\", 6881),\r\n (\"dht.transmissionbt.com\", 6881),\r\n (\"router.utorrent.com\", 6881)\r\n] \r\nTID_LENGTH = 4\r\nDHT_PORT = 6881\r\n\r\ndef entropy(bytes):\r\n s = \"\"\r\n for i in range(bytes):\r\n s += chr(randint(0, 255))\r\n return s\r\n\r\ndef random_id():\r\n hash = sha1()\r\n hash.update( entropy(20) )\r\n return hash.digest()\r\n\r\ndef decode_nodes(nodes):\r\n n = []\r\n length = len(nodes)\r\n if (length % 26) != 0: \r\n return n\r\n for i in range(0, length, 26):\r\n nid = nodes[i:i+20]\r\n ip = inet_ntoa(nodes[i+20:i+24])\r\n port = unpack(\"!H\", nodes[i+24:i+26])[0]\r\n n.append( (nid, ip, port) )\r\n return n\r\n\r\nclass KRPC(object):\r\n def __init__(self):\r\n self.types = {\r\n \"r\": self.response_received,\r\n \"q\": self.query_received\r\n }\r\n self.actions = {\r\n \"get_peers\": self.get_peers_received,\r\n \"announce_peer\": self.announce_peer_received,\r\n }\r\n\r\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n self.socket.bind((\"0.0.0.0\", DHT_PORT))\r\n\r\n def response_received(self, res, address):\r\n self.find_node_handler(res)\r\n\r\n def query_received(self, res, address):\r\n try:\r\n self.actions[res[\"q\"]](res, address)\r\n except KeyError:\r\n pass\r\n\r\n def send_krpc(self, msg, address):\r\n try:\r\n self.socket.sendto(bencode(msg), address)\r\n except:\r\n pass\r\n\r\nclass Client(KRPC):\r\n def __init__(self):\r\n KRPC.__init__(self)\r\n\r\n def find_node(self, address, nid=None):\r\n nid = random_id()\r\n tid = entropy(TID_LENGTH)\r\n msg = {\r\n \"t\": tid,\r\n \"y\": \"q\",\r\n \"q\": \"find_node\",\r\n \"a\": {\"id\": nid, \"target\": nid}\r\n }\r\n self.send_krpc(msg, address)\r\n\r\n def find_node_handler(self, res):\r\n try:\r\n nodes = decode_nodes(res[\"r\"][\"nodes\"])\r\n for node in nodes:\r\n (nid, ip, port) = node\r\n if len(nid) != 20: continue\r\n self.find_node( (ip, port), nid )\r\n except KeyError:\r\n pass\r\n\r\n def joinDHT(self):\r\n for address in BOOTSTRAP_NODES: self.find_node(address)\r\n\r\n def start(self):\r\n self.joinDHT()\r\n\r\n while True:\r\n try:\r\n (data, address) = self.socket.recvfrom(65536)\r\n res = bdecode(data)\r\n self.types[res[\"y\"]](res, address)\r\n except Exception:\r\n pass\r\n\r\nclass Server(Client):\r\n def __init__(self, master):\r\n Client.__init__(self)\r\n self.master = master\r\n\r\n def get_peers_received(self, res, address):\r\n try:\r\n infohash = res[\"a\"][\"info_hash\"]\r\n self.master.log(infohash)\r\n except KeyError:\r\n pass\r\n\r\n def announce_peer_received(self, res, address):\r\n try:\r\n infohash = res[\"a\"][\"info_hash\"]\r\n self.master.log(infohash)\r\n except KeyError:\r\n pass\r\n\r\n#using example\r\nclass Master(object):\r\n def __init__(self, f):\r\n self.f = f\r\n\r\n def log(self, infohash):\r\n self.f.write(infohash.encode(\"hex\")+\"\\n\")\r\n self.f.flush()\r\ntry:\r\n f = open(\"infohash.log\", \"a\")\r\n m = Master(f)\r\n s = Server(Master(f))\r\n s.start() \r\nexcept KeyboardInterrupt:\r\n s.socket.close()\r\n f.close()","sub_path":"simDHT.py","file_name":"simDHT.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"141062663","text":"from StringIO import StringIO\nimport json\nimport math\nimport random\nimport subprocess\nimport time\nimport urllib\nimport uuid\nimport pycurl\nimport ss2_config\n\nclass shieldsquareRequest:\n\n\t_zpsbd0 = \"false\"\n\t_zpsbd1 = \"\"\n\t_zpsbd2 = \"\"\n\t_zpsbd3 = \"\"\n\t_zpsbd4 = \"\"\n\t_zpsbd5 = \"\"\n\t_zpsbd6 = \"\"\n\t_zpsbd7 = \"\"\n\t_zpsbd8 = \"\"\n\t_zpsbd9 = \"\"\n\t_zpsbda = \"\"\n\t__uzma = \"\"\n\t__uzmb = 0\n\t__uzmc = \"\"\n\t__uzmd = 0\n\nclass shieldsquareCurlResponseCode:\n\n\terror_string = \"\"\n\tresponsecode = 0\n\n\nclass shieldsquareResponse:\n\n\tpid = \"\"\n\tresponsecode= 0\n\turl = \"\"\n\treason =\"\"\n\n\nclass shieldsquareCodes:\n\n\tALLOW = 0\n\tCAPTCHA = 2\n\tBLOCK = 3\n\tFFD = 4\n\tALLOW_EXP = -1\n\ndef shieldsquare_ValidateRequest( shieldsquare_username, shieldsquare_calltype, shieldsquare_pid , request):\n\n\tshieldsquare_low = 10000\n\tshieldsquare_high = 99999\n\tshieldsquare_a = 1\n\tshieldsquare_b = 3\n\tshieldsquare_c = 7\n\tshieldsquare_d = 1\n\tshieldsquare_e = 5\n\tshieldsquare_f = 10\n\tshieldsquare_time = int(time.time())\n\tshieldsquare_request = shieldsquareRequest()\n\tshieldsquare_RETURNCODES = shieldsquareCodes()\n\tshieldsquare_response = shieldsquareResponse()\n\tshieldsquare_response.dynamic_JS = \"var __uzdbm_c = 2+2\"\n\tshieldsquare_curl_response = shieldsquareCurlResponseCode()\n\tshieldsquare_service_url = 'http://' + ss2_config._ss2_domain + '/getRequestData'\n\tcookie_value_dict = dict()\n\tshieldsquare_ex_time = 3600*24*365*10 + 1*1*3*60*60\n\t\n\tif( ss2_config._timeout_value > 1000 ):\n\t\tshieldsquare_response.responsecode = shieldsquare_RETURNCODES.ALLOW_EXP\n\t\tshieldsquare_response.reason = \"ShieldSquare Timeout cant be greater then 1000 Milli seconds\"\n\t\treturn shieldsquare_response.__dict__,None;\n\n\tif len(shieldsquare_pid) == 0:\n\t\tshieldsquare_pid = shieldsquare_generate_pid(ss2_config._sid,request)\n\n\tif '__uzma' in request.COOKIES:\n\t\tcookie_value_dict[\"__uzma\"] = {\"value\":request.COOKIES.get(\"__uzma\"),\"age\":shieldsquare_ex_time}\t\n\t\tshieldsquare_request.__uzma = request.COOKIES.get(\"__uzma\")\n\telse:\n\t\tshieldsquare_uzma = uuid.uuid1()\n\t\tcookie_value_dict[\"__uzma\"] = {\"value\":str(shieldsquare_uzma),\"age\":shieldsquare_ex_time}\n\t\tshieldsquare_request.__uzma = str(shieldsquare_uzma)\n\n\tif '__uzmc' in request.COOKIES:\n\t\tshieldsquare_uzmc = request.COOKIES.get(\"__uzmc\")\n\t\tshieldsquare_uzmc = shieldsquare_uzmc[shieldsquare_e:]\n\t\tshieldsquare_uzmc = shieldsquare_uzmc[:-shieldsquare_e]\n\t\tshieldsquare_a = (int(shieldsquare_uzmc) - shieldsquare_c) / shieldsquare_b\n\t\tshieldsquare_a += 1\n\t\tshieldsquare_uzmc= str(random.randint(shieldsquare_low, shieldsquare_high)) + str(shieldsquare_c+shieldsquare_a*shieldsquare_b) + str(random.randint(shieldsquare_low, shieldsquare_high))\t\n\t\tcookie_value_dict[\"__uzmc\"] = {\"value\":shieldsquare_uzmc,\"age\":shieldsquare_ex_time}\n\t\tshieldsquare_request.__uzmc = shieldsquare_uzmc\n\t\n\telse:\n\t\tshieldsquare_uzmc= str(random.randint(shieldsquare_low, shieldsquare_high)) + str(shieldsquare_c+shieldsquare_a*shieldsquare_b) + str(random.randint(shieldsquare_low, shieldsquare_high))\n\t\tcookie_value_dict[\"__uzmc\"] = {\"value\":shieldsquare_uzmc,\"age\":shieldsquare_ex_time}\n\t\tshieldsquare_request.__uzmc = shieldsquare_uzmc\n\t\t\n\tif '__uzmb' in request.COOKIES:\n\t\tcookie_value_dict[\"__uzmb\"] = {\"value\":request.COOKIES.get(\"__uzmb\"),\"age\":shieldsquare_ex_time}\n\t\tshieldsquare_request.__uzmb = request.COOKIES.get(\"__uzmb\")\n\t\n\telse:\n\t\tcookie_value_dict[\"__uzmb\"] = {\"value\":int(time.time()),\"age\":shieldsquare_ex_time}\n\t\tshieldsquare_request.__uzmb = shieldsquare_time\n\t\n\tif '__uzmd' in request.COOKIES:\n\t\tcookie_value_dict[\"__uzmd\"] = {\"value\":shieldsquare_time,\"age\":shieldsquare_ex_time}\n\t\tshieldsquare_request.__uzmd = shieldsquare_time\n\t\n\telse:\n\t\tcookie_value_dict[\"__uzmd\"] = {\"value\":int(time.time()),\"age\":shieldsquare_ex_time}\n\t\tshieldsquare_request.__uzmd = shieldsquare_time\n\t\t\n\tif(ss2_config._mode == \"Active\"):\n\t\tshieldsquare_request._zpsbd0 = 'true'\n\t\n\telse:\n\t\tshieldsquare_request._zpsbd0 = 'false'\n\t\n\tshieldsquare_request._zpsbd1 = ss2_config._sid\n\tshieldsquare_request._zpsbd2 = shieldsquare_pid\n\tshieldsquare_request._zpsbd3 = ''\n\tshieldsquare_request._zpsbd4 = ''\n\tshieldsquare_request._zpsbd5 = ''\n\tshieldsquare_request._zpsbd6 = ''\n\tshieldsquare_request._zpsbd7 = ''\n\t\n\tshieldsquare_request._zpsbd3 = request.META.get('HTTP_REFERER')\t\n\tshieldsquare_request._zpsbd4 = request.path\n\tshieldsquare_request._zpsbd5 = request.COOKIES.get(ss2_config._sessid) \n\tshieldsquare_request._zpsbd6 = request.META.get(ss2_config._ipaddress)\n\tshieldsquare_request._zpsbd7 = request.META.get('HTTP_USER_AGENT')\n\t\n\tshieldsquare_request._zpsbd8 = shieldsquare_calltype\n\tshieldsquare_request._zpsbd9 = shieldsquare_username\n\tshieldsquare_request._zpsbda = shieldsquare_time\n\tshieldsquare_json_obj = json.dumps(shieldsquare_request.__dict__)\n\tshieldsquare_response.pid =shieldsquare_pid\n\tshieldsquare_response.url =ss2_config._js_url\n\tif(ss2_config._mode == \"Active\"):\n\t\tshieldsquare_curl_response = shieldsquare_post_sync(shieldsquare_service_url, shieldsquare_json_obj, ss2_config._timeout_value)\n\t\tif(str(shieldsquare_curl_response[1]) != '200'):\n\t\t\tshieldsquare_response.responsecode = shieldsquare_RETURNCODES.ALLOW_EXP\n\t\t\tshieldsquare_response.reason = shieldsquare_curl_response[0]\n\t\telse:\n\t\t\tshieldsquare_response_from_ss = json.loads(str(shieldsquare_curl_response[0]))\n\t\t\tshieldsquare_response.dynamic_JS = shieldsquare_response_from_ss['dynamic_JS']\n\t\t\tn=int(shieldsquare_response_from_ss['ssresp'])\n\t\t\tif n == 0:\n\t\t\t\tshieldsquare_response.responsecode = shieldsquare_RETURNCODES.ALLOW\n\t\t\telif n==1:\n\t\t\t\tshieldsquare_response.responsecode = shieldsquare_RETURNCODES.MONITOR\n\t\t\telif n==2:\n\t\t\t\tshieldsquare_response.responsecode = shieldsquare_RETURNCODES.CAPTCHA\n\t\t\telif n==3:\n\t\t\t\tshieldsquare_response.responsecode = shieldsquare_RETURNCODES.BLOCK\n\t\t\telif n==4:\n\t\t\t\tshieldsquare_response.responsecode = shieldsquare_RETURNCODES.FFD\n\t\t\telse:\n\t\t\t\tshieldsquare_response.responsecode = shieldsquare_RETURNCODES.ALLOW_EXP\n\t\t\t\tshieldsquare_response.reason = str(shieldsquare_curl_response[1])\n\telse:\n\t\tif(ss2_config._async_http_post == 'true'):\n\t\t\terror_code=shieldsquare_post_async(shieldsquare_service_url, shieldsquare_json_obj,str(ss2_config._timeout_value))\n\t\t\tif(str(error_code[1])!='None'):\n\t\t\t\tshieldsquare_response.responsecode = shieldsquare_RETURNCODES.ALLOW_EXP\n\t\t\t\tshieldsquare_response.reason = \"Request Timed Out/Server Not Reachable\"\n\t\t\telse:\n\t\t\t\tshieldsquare_response.responsecode = shieldsquare_RETURNCODES.ALLOW\n\t\telse:\n\t\t\tshieldsquare_curl_response=shieldsquare_post_sync(shieldsquare_service_url, shieldsquare_json_obj,ss2_config._timeout_value)\n\n\t\t\tif(str(shieldsquare_curl_response[1])!='200'):\n\t\t\t\tshieldsquare_response.responsecode = shieldsquare_RETURNCODES.ALLOW_EXP\n\t\t\t\tshieldsquare_response.reason = str(shieldsquare_curl_response[0])\n\t\t\telse:\n\t\t\t\tshieldsquare_response.responsecode = shieldsquare_RETURNCODES.ALLOW\n\t\t\t\tshieldsquare_response_from_ss = json.loads(str(shieldsquare_curl_response[0]))\n\t\t\t\tshieldsquare_response.dynamic_JS = shieldsquare_response_from_ss['dynamic_JS']\n\t\t\t\t\n\treturn shieldsquare_response.__dict__,cookie_value_dict;\n\ndef shieldsquare_post_async(url, payload, timeout):\n\tdata = urllib.quote(payload)\n\tcmd = 'curl --fail --silent -X POST -H \"Accept: Application/json\" -H \"Content-Type: application/json\" --connect-timeout 1 -m '+ str(1) + ' ' + url + \" -d '\"+ data + \"'\" +\" &\"\n\tp = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)\n\t(output, err) = p.communicate()\n\tresponse=[output,err]\n\treturn response;\n\ndef shieldsquare_post_sync(url, params, timeout):\n\tdata = urllib.quote(params)\n\tstorage = StringIO()\n\tc = pycurl.Curl()\n\tc.setopt(pycurl.URL, url)\n\tc.setopt(pycurl.TIMEOUT_MS, timeout)\n\tc.setopt(pycurl.NOSIGNAL, 1)\n\tc.setopt(pycurl.VERBOSE, False)\n\tc.setopt(pycurl.WRITEFUNCTION, storage.write)\n\tc.setopt(pycurl.HTTPHEADER, ['Accept: application/json'])\n\tc.setopt(pycurl.POST, 1)\n\tc.setopt(pycurl.POST, 1)\n\tc.setopt(pycurl.POSTFIELDS, data)\n\ttry:\n\t\tresponse=c.perform()\n\t\tcontent = storage.getvalue()\n\t\tresponse=[content,c.getinfo(c.RESPONSE_CODE)]\n\texcept:\n\t\tresponse=[\"Request Timed Out/Server Not Reachable\",\"0\"]\n\tc.close()\n\treturn response;\n\ndef microtime(get_as_float = False):\n\tif get_as_float:\n\t\treturn time.time();\n\telse:\n\t\treturn '%f %d' % math.modf(time.time());\n\ndef shieldsquare_generate_pid(shieldsquare_sid,request):\n\tt=microtime()\n\ttm=t.split(\" \")\n\tp1,p2,p3,p4,p5 = shieldsquare_sid.split(\"-\")\n\tsid_min = num = int(p4,16);\n\trmstr1= \"00000000\" + \"%x\" % int(tm[1])\n\trmstr2= \"0000\" + \"%x\" % int(round(float(tm[0]) * 65536))\n\treturn '%08s-%04x-%04s-%04s-%04x%04x%04x' % (shieldsquare_IP2Hex(request),sid_min,rmstr1[-4:],rmstr2[-4:],\n\t\t\trandom.randint(0,0xffff), random.randint(0,0xffff), random.randint(0,0xffff));\n\n\ndef shieldsquare_IP2Hex(request):\n\thexx=\"\"\n\tip = request.META.get(ss2_config._ipaddress)\n\tpart=ip.split('.')\n\thexx=\"\"\n\tfor i in range(0,len(part)):\n\t\tdt = \"0\" + \"%x\" % int(part[i])\n\t\thexx = hexx + dt[-2:]\n\n\treturn hexx;\n\ndef set_default(obj):\n\tif isinstance(obj, set):\n\t\treturn list(obj)\n\traise TypeError\n\n\ndef set_cookie_in_response(response,cookie_values_dict):\n\t\n\tfor cookie_name in cookie_values_dict:\n\t\tresponse.set_cookie(cookie_name,cookie_values_dict[cookie_name][\"value\"], max_age=cookie_values_dict[cookie_name][\"age\"])\n\treturn response","sub_path":"connector/ss2.py","file_name":"ss2.py","file_ext":"py","file_size_in_byte":9232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"426706404","text":"\"\"\"\r\n\n\nImagine you took all the numbers between 0 and `n` and concatenated them\ntogether into a long string. How many digits are there between 0 and `n`?\nWrite a function that can calculate this.\n\nThere are 0 digits between 0 and 1, there are 9 digits between 0 and 10 and\nthere are 189 digits between 0 and 100.\n\n### Examples\n\n digits(1) ➞ 0\n \n digits(10) ➞ 9\n \n digits(100) ➞ 189\n \n digits(2020) ➞ 6969\n\n### Notes\n\nThe numbers are going to be rather big so creating that string won't be\npractical.\n\n\"\"\"\r\n\ndef digits(num):\n s = 0\n k = 1\n occ = 9\n n = 10\n while num >= n:\n s += k*occ\n k += 1\n occ *= 10\n n *= 10\n return s + (num-n//10)*k\n\n","sub_path":"j9zed4GnykS48W6vh_3.py","file_name":"j9zed4GnykS48W6vh_3.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"424710238","text":"\"\"\"opengenusWeb URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.contrib import admin\r\nfrom django.urls import path, include\r\nfrom django.conf import settings\r\nfrom django.conf.urls.static import static\r\nfrom django.views.generic import TemplateView\r\n\r\nfrom pages.views import InternListView,InternDetailView\r\n\r\nurlpatterns = [\r\n path('admin/', admin.site.urls),\r\n path('', TemplateView.as_view(template_name=\"home.html\"), name=\"home\"),\r\n path('index', TemplateView.as_view(template_name=\"index.html\")),\r\n path('cosmos', TemplateView.as_view(template_name=\"cosmos.html\"), name=\"cosmos\"),\r\n path('quark', TemplateView.as_view(template_name=\"quark.html\"), name=\"quark\"),\r\n path('search', TemplateView.as_view(template_name=\"search.html\"), name=\"search\"),\r\n path('iq', TemplateView.as_view(template_name=\"iq.html\"), name=\"iq\"),\r\n path('discuss', TemplateView.as_view(template_name=\"discuss.html\"), name=\"discuss\"),\r\n # path('intern/', internDetailView),\r\n path('school/', include('schools.urls', namespace=\"schools\")),\r\n path('intern/', InternListView.as_view()),\r\n path('intern/', InternDetailView.as_view()),\r\n path('intern/search/', InternListView.as_view()),\r\n path('faq', TemplateView.as_view(template_name=\"faq.html\"), name=\"faq\"),\r\n\r\n path('tinymce/', include('tinymce.urls')),\r\n]\r\n\r\nif settings.DEBUG:\r\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\r\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\r\n","sub_path":"opengenusWeb/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"356376318","text":"from kivy.app import App\r\nfrom kivy.lang import Builder\r\nfrom kivy.uix.widget import Widget\r\nfrom kivy.uix.button import Button\r\nfrom kivy.clock import Clock\r\nfrom kivy.uix.label import Label\r\nfrom kivy.factory import Factory\r\nfrom kivy.uix.relativelayout import RelativeLayout\r\nfrom kivy.uix.boxlayout import BoxLayout\r\nfrom kivy.uix.stacklayout import StackLayout\r\nfrom kivy.properties import StringProperty, DictProperty, ListProperty\r\nimport SimulateOutside\r\n\r\n\"\"\"\r\nThis is a custom class for dynamically creating buttons and closing them.\r\nThis is for tag lists in our program.\r\n\"\"\"\r\n\r\nBuilder.load_string('''\r\n:\r\n size_hint: (None, None)\r\n text: self.ourText\r\n width: self.texture_size[0] + 69\r\n height: 29\r\n pos: (50, 300)\r\n background_normal: ''\r\n ArtistLayout:\r\n size: (root.width, root.height)\r\n pos: self.parent.pos\r\n Label:\r\n size_hint: (None, 1)\r\n width: self.texture_size[0] + 40\r\n pos: (0, 0)\r\n canvas.before:\r\n Color:\r\n rgba: .5, .5, .5, 1\r\n Rectangle:\r\n pos: self.pos\r\n size: self.size\r\n text: root.ourText\r\n background_normal: '..\\pics\\BlankUpTiny.png'\r\n background_down: '..\\pics\\BlankDownTiny.png'\r\n group: 'test'\r\n Button:\r\n size_hint: (None, 1)\r\n width: 29\r\n pos: (root.texture_size[0] + 40, 0)\r\n background_normal: '..\\pics\\closeUpTiny.png'\r\n background_down: '..\\pics\\closeDownTiny.png'\r\n group: 'test'\r\n\r\n:\r\n\tspacing: 5, 5\r\n''')\r\n\r\nclass ReadWriteArtistList(StackLayout):\r\n c_taglist = ['cat', 'funny', 'jump', 'fail', 'animals']\r\n dynamic_ids = DictProperty({}) # declare class attribute, dynamic_ids\r\n\r\n def __init__(self, **kwargs):\r\n super(ReadWriteArtistList, self).__init__(**kwargs)\r\n Clock.schedule_once(lambda dt: self.populateList(), timeout=0.1)\r\n\r\n def getTarget(self, p_arg):\r\n # gets the id of the last custom button. Used to accessing it.\r\n return [x for x in self.children if str(x.__class__.__name__) == p_arg]\r\n\r\n def populateList(self):\r\n #this adds all the tags to our tag list\r\n f_taglist = SimulateOutside.getArtists(SimulateOutside.getActiveFilePath())\r\n for i_tag in f_taglist:\r\n #similar to addNewArtist() but doesn't just includes pre-existing artists in the gui\r\n i_id = \"Tag:\" + i_tag\r\n i_newArtist = DynamicTag(id=i_id,\r\n ourText=i_tag)\r\n self.add_widget(i_newArtist)\r\n self.dynamic_ids[i_id] = i_newArtist\r\n i_newArtist.children[0].children[0].bind(on_release=self.delayedClose)\r\n\r\n def wipeArtistList(self):\r\n #this just cleans the gui of artists. It doesn't actually edit any data\r\n for item in self.dynamic_ids:\r\n print(\"wipeArtistList():\", item)\r\n\r\n def addNewArtist(self, p_arg):\r\n f_id = \"Tag:\"+p_arg\r\n f_newArtist = DynamicTag(id=f_id,\r\n ourText=p_arg)\r\n\r\n #this adds the tag to the file before we add it to our gui\r\n #this should theoretically stop the function if we tried adding a duplicate tag\r\n try:\r\n if SimulateOutside.addArtist(SimulateOutside.getActiveFilePath(), p_arg)==False:\r\n print(\"ReadWriteArtistList.addNewArtist(): could not add tag \\\"\", p_arg, \"\\\"\", sep='')\r\n return False\r\n #TODO: remove this part once the outside function can reliably test if we're adding a duplicate tag\r\n if f_id in self.dynamic_ids:\r\n # We don't want duplicate tags\r\n print(\"ReadWriteArtistList.addNewArtist(): We already have this tag\")\r\n return False\r\n except:\r\n print(\"ReadWriteArtistList.addNewArtist(): error adding tag\")\r\n return False\r\n\r\n self.add_widget(f_newArtist)\r\n self.dynamic_ids[f_id] = f_newArtist\r\n f_newArtist.children[0].children[0].bind(on_release=self.delayedClose)\r\n return True\r\n\r\n def closeTarget(self, p_targetID):\r\n #removes a tag from the file and our user interface\r\n try:\r\n # this first tries to remove the tag from the file using out metadata library\r\n if SimulateOutside.removeArtist(SimulateOutside.getActiveFilePath(), p_targetID[4:]):\r\n # if that succeeds, we try removing it from the list of dynamic tags displayed\r\n f_target = self.dynamic_ids[p_targetID]\r\n #print(\"ReadWriteArtistList.closeTarget(): closing\", p_targetID)\r\n if f_target != None:\r\n self.remove_widget(f_target)\r\n del self.dynamic_ids[p_targetID]\r\n except KeyError:\r\n print(\"ReadWriteArtistList.closeTarget(): key not in dictionary. Weird\")\r\n print(\"\\tIDs:\", self.dynamic_ids)\r\n print(\"\\ttried:\", p_targetID)\r\n return True\r\n\r\n def delayedClose(self, arg):\r\n #print(\"ReadWriteArtistList.delayedClose() arg:\\t\", arg)\r\n #print(\"ReadWriteArtistList.delayedClose() type:\\t\", type(arg))\r\n #without lambda here, this would pass the timeout arguement to our function.\r\n Clock.schedule_once(lambda dt: self.closeTarget(arg.parent.parent.id), timeout=0.01)\r\n\r\n def getArtistList(self):\r\n f_entrys = []\r\n for entry in self.dynamic_ids:\r\n f_entrys.append(self.dynamic_ids[entry].ourText)\r\n return f_entrys\r\n\r\nclass DynamicTag(Label):\r\n ourText = StringProperty(\"\")\r\n\r\n def __init__(self, **kwargs):\r\n super(DynamicTag, self).__init__(**kwargs)\r\n\r\n def debugSize(self):\r\n f_textBtn= self.children[0].children[1]\r\n f_closeBtn = self.children[0].children[0]\r\n print(\"FrameStartX:\", self.pos[0], end=\"\\t\\t\\t\")\r\n print(\"FrameEndX:\", self.pos[0] + self.width)\r\n print(\"\\tTextBtnStartX:\", f_textBtn.pos[0], end=\"\\t\")\r\n print(\"\\tTextBtnEndX:\", f_textBtn.pos[0]+f_textBtn.width)\r\n print(\"\\tCloseBtnStartX:\", f_closeBtn.pos[0], end=\"\\t\")\r\n print(\"\\tCloseBtnEndX:\", f_closeBtn.pos[0]+f_closeBtn.width)\r\n print()\r\n print(\"TextBtnWidthX:\", f_textBtn.width, end=\"\\t\\t\")\r\n print(\"TextBtnTextureX:\", f_textBtn.texture_size[0], end=\"\\t\")\r\n print(\"TextBtnExtraX:\", f_textBtn.width-f_textBtn.texture_size[0])\r\n print()\r\n print(\"FrameWidth:\", self.width, end=\"\\t\\t\")\r\n print(\"TotalButtonWidth:\", f_textBtn.width + f_closeBtn.width)\r\n print(\"FrameBlackSpace:\", self.width-(f_textBtn.width + f_closeBtn.width))\r\n\r\nclass ArtistLayout(RelativeLayout):\r\n def __init__(self, **kwargs):\r\n super(ArtistLayout, self).__init__(**kwargs)\r\n\r\nFactory.register('ReadWriteArtistList', cls=ReadWriteArtistList)","sub_path":"guiTesting/ReadWriteArtistList.py","file_name":"ReadWriteArtistList.py","file_ext":"py","file_size_in_byte":6955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"323405993","text":"import pandas as pands\r\nimport matplotlib.pyplot as grph_plot\r\n\r\ndata_frame = pands.read_csv('visitors_resources.csv')\r\ndata_frame.head(12)\r\ndata_frame = data_frame.set_index('2019 Year')\r\n\r\n\r\nfig, axes_obj = grph_plot.subplots()\r\ndata_frame.plot(kind='area', ax=axes_obj)\r\ngrph_plot.ylabel('visitors')\r\naxes_obj.grid(color='gray', linestyle='-', alpha=0.3)","sub_path":"visitors_resources.py","file_name":"visitors_resources.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"177550693","text":"# 981. 基于时间的键值存储\n\n\nclass TimeMap:\n time_map = {}\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n\n def set(self, key: str, value: str, timestamp: int) -> None:\n if key not in self.time_map:\n self.time_map[key] = []\n self.time_map[key].append([value, timestamp])\n\n def get(self, key: str, timestamp: int) -> str:\n if key not in self.time_map:\n return \"\"\n value_arr = self.time_map[key]\n if timestamp >= value_arr[0][1]:\n l = 0\n r = len(value_arr)-1\n while l <= r:\n mid = l + (r - l) // 2\n if value_arr[mid][1] == timestamp:\n return value_arr[mid][0]\n elif value_arr[mid][1] > timestamp:\n r = mid - 1\n else:\n l = mid + 1\n return value_arr[r][0]\n return \"\"\n\n\n# Your TimeMap object will be instantiated and called as such:\n# obj = TimeMap()\n# obj.set(key,value,timestamp)\n# param_2 = obj.get(key,timestamp)","sub_path":"answers/TimeMap.py","file_name":"TimeMap.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"341282111","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2018/7/16 16:59\n\n@author: vincent\nGiven a sorted linked list, delete all duplicates such that each element appear only once.\n\nExample 1:\n\nInput: 1->1->2\nOutput: 1->2\nExample 2:\n\nInput: 1->1->2->3->3\nOutput: 1->2->3\n\"\"\"\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def deleteDuplicates(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n if head == None or head.next == None:\n return head\n head.next = self.deleteDuplicates(head.next)\n return head.next if head.val == head.next.val else head","sub_path":"双指针/83. Remove Duplicates from Sorted List.py","file_name":"83. Remove Duplicates from Sorted List.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"165352209","text":"\n__author__ = \"Timothy Alexander, Joshua Gisi\"\n__copyright__ = \"Copyright 2019, Project Money Tree\"\n__version__ = \"0.0.1\"\n__email__ = \"TJEnterprises2019@gmail.com\"\n__status__ = \"Development\"\n\nimport os\nfrom Support.Companion import sqlCompanion as companion\nfrom Support.treasureMap import Map\nfrom tkinter import *\nimport configparser\n\n\n\nclass backTest:\n\n def __init__(self, M1candles):\n \"\"\"\n Initiate class specific data. Make sure to edit the config file as well.\n :param M1candles: An array of candle objects\n \"\"\"\n self.M1Candles = M1candles\n self.markers = []\n self.purchases = []\n self.entryState = \"No Order\"\n self.riskPrice = -1\n self.sl = -1\n self.money = 100\n self.moneyInTrade = 0\n self.leverage = 20\n self.risk = 0.02\n\n self.configs = configparser.ConfigParser()\n config_file = os.path.join(os.path.dirname(__file__), 'HistoryAnalysisConfig.ini')\n self.configs.read(config_file)\n\n self.openConditionObj = []\n self.closeConditionObj = []\n self.initClasses(list(self.configs.get('conditions', 'openConditions').split(', ')),list(self.configs.get('conditions', 'closeConditions').split(', ')))\n\n self.delayCandles = self.configs.getint('control', 'delayCandles')\n self.run(M1candles)\n\n\n\n\n\n\n\n def run(self, M1candles):\n \"\"\"\n A buying/selling simulator for back testing Forex strategies\n :param M1candles: An array of candle objects\n :return:\n \"\"\"\n candlesUpToCurrent = []\n for curCandle in M1candles:\n candlesUpToCurrent.append(curCandle)\n\n\n checkConditions = self.openConditions(curCandle, candlesUpToCurrent)\n\n if self.entryState == \"No Order\" and (checkConditions == \"long\" or checkConditions == \"short\") and self.delayCandles <= 0:\n self.entryState = \"Order Fulfilled\"\n from Conditions.createOrder import condition as createOrder\n order = createOrder(curCandle, candlesUpToCurrent, checkConditions, backtestRef=self)\n self.sl = order.sl\n self.purchases.append({'openDT': curCandle.datetime, 'openPrice': curCandle.close, 'pos': checkConditions})\n for con in self.closeConditionObj:\n con.setup(curCandle, candlesUpToCurrent, order.position)\n\n elif self.entryState == \"Order Fulfilled\" and self.closeConditions(curCandle, candlesUpToCurrent, order.position):\n self.entryState = \"No Order\"\n\n if self.delayCandles >= 0: self.delayCandles -= 1\n\n self.paintMap()\n\n\n\n\n\n\n\n def initClasses(self, openConditionList, closeConditionList):\n \"\"\"\n Run the open/close condition classes specified in the config.ini to initiates their data\n :param openConditionList: A list of open condition classes that all must be met in order for a new positions to occur\n :param closeConditionList: A list of close condition classes that all must be met in order for a close position to occur\n :return:\n \"\"\"\n for con in openConditionList:\n name = \"condition\"\n package = \"Conditions.\"+con\n obj = getattr(__import__(package,fromlist=[name]), name)\n self.openConditionObj.append(obj(self))\n\n for con in closeConditionList:\n name = \"condition\"\n package = \"Conditions.\" + con\n obj = getattr(__import__(package, fromlist=[name]), name)\n self.closeConditionObj.append(obj(self))\n\n\n\n\n\n\n\n def openConditions(self, curCandle, candlesUpToCurrent):\n \"\"\"\n Check if the open conditions are met\n :param curCandle:\n :param candlesUpToCurrent:\n :return:\n \"\"\"\n votesToBuy = 0\n votesToSell = 0\n for con in self.openConditionObj:\n checkConditions = con.run(curCandle, candlesUpToCurrent)\n if checkConditions == 'long':\n votesToBuy += 1\n elif checkConditions == 'short':\n votesToSell += 1\n elif checkConditions == 'NA':\n votesToSell += 0\n votesToBuy += 0\n else:\n votesToBuy = -10000\n votesToSell = -10000\n\n if votesToSell == 0 and votesToBuy == 0:\n return False\n elif votesToSell == 0 and votesToBuy > 0:\n return \"long\"\n elif votesToSell > 0 and votesToBuy == 0:\n return \"short\"\n\n return False\n\n\n\n\n\n\n\n def closeConditions(self, curCandle, candlesUpToCurrent, position):\n \"\"\"\n Check if the close conditions are met\n :param curCandle:\n :param candlesUpToCurrent:\n :return:\n \"\"\"\n for con in self.closeConditionObj:\n if not con.run(curCandle, candlesUpToCurrent, position):\n return False\n return True\n\n\n\n\n\n\n\n def addMarker(self, datetime, name, color, text=None):\n \"\"\"\n Adds a marker to an array which will end up in the Treasure map class to be painted\n :param datetime:\n :param name: type of marker\n :param color:\n :param text:\n :return:\n \"\"\"\n self.markers.append({'datetime':datetime, 'type':name, 'color':color, 'text':text})\n\n\n\n\n\n\n\n def addHorizontalLine(self, datetime, name, pipOffset=0, color=\"RED\", price=0):\n \"\"\"\n Adds a marker to an array which will end up in the Treasure map class to be painted\n :param datetime:\n :param name:\n :param pipOffset:\n :param color:\n :param price:\n :return:\n \"\"\"\n self.markers.append({'datetime':datetime, 'type':name, 'pipOffset': pipOffset, 'color': color, 'price':price})\n\n\n\n\n\n\n\n def paintMap(self):\n \"\"\"\n Create a tKinter canvas and populate it\n :return:\n \"\"\"\n root = Tk()\n Map(root, self.M1Candles, self.purchases, markers=self.markers, indicators=[]) # {'name':'EMA(100)', 'color':'BLUE'}, {'name':'EMA(150)', 'color':'RED'}\n root.mainloop()\n\n\n\n\n\nif __name__ == '__main__':\n Data = companion(\"C:\\\\Users\\\\treeb\\\\OneDrive\\\\Desktop\\\\BackTestData.db\")\n dataSet = Data.getDataByDatetime(\"EUR_USD\", printReturn=False, granularity='M1',\n startDatetime='2017-01-05 02:00:00', endDatetime='2018-04-06 02:05:00', indicators=[])\n\n\n backTest(dataSet)\n Data.closeConnection()","sub_path":"BacktestingSoftware/HistoryAnalysis.py","file_name":"HistoryAnalysis.py","file_ext":"py","file_size_in_byte":6504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"461169600","text":"from pathlib import Path\n\ndef list_files(path,valid_extensions,recursive=True):\n \"\"\"\n List all files in a directory given extensions.\n \"\"\"\n if valid_extensions is None:\n exts_pattern=\"*.*\"\n exts_pattern = \"*.*[\"+\"|\".join(valid_extensions)+\"]\"\n if recursive:\n files = list(Path(path).rglob(exts_pattern))\n return files\n else:\n files = list(Path(path).glob(exts_pattern))\n return files\n\n\ndef list_images(path,valid_extensions=[\"jpg\", \"jpeg\", \"png\", \"bmp\", \"tif\", \"tiff\"],recursive=True):\n \"\"\"\n List all images in a directory recursively.\n pass required extensions to valid_extensions parameter to filter the files.\n \"\"\"\n images_list=list_files(path,valid_extensions,recursive=recursive)\n return images_list\n\nif __name__ == \"__main__\":\n import argparse\n myparser = argparse.ArgumentParser(description='List all images in a directory.')\n myparser.add_argument('path',metavar='path',type=str,help='Path to the directory of Images.')\n args=myparser.parse_args()\n print(list_images(args.path))","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"649580909","text":"class Solution(object):\n def myAtoi(self, str):\n \"\"\"\n :type str: str\n :rtype: int\n \"\"\"\n #deal with inputs which have invalid characters in the middle. eg: \" -0012a42\"\n for i,c in enumerate(str.strip()):\n if (not c.isdigit()) and (c not in '+-'):\n str=str.strip()[:i]\n break\n try:\n ret=int(str)\n return ret if -2147483648<=ret<=2147483647 else (-2147483648,2147483647)[ret>0]\n except:\n return 0\n","sub_path":"8_StringtoInteger(atoi)_M.py","file_name":"8_StringtoInteger(atoi)_M.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"3385748","text":"#!/usr/bin/env python3\n\"\"\"Autoencoders\"\"\"\n\nimport tensorflow.keras as keras\n\n\n# reparameterization trick\n# instead of sampling from Q(z|X), sample epsilon = N(0,I)\n# z = z_mean + sqrt(var) * epsilon\n\n\ndef sampling(args):\n \"\"\"Reparameterization trick by sampling from an isotropic unit Gaussian.\n # Arguments\n args (tensor): mean and log of variance of Q(z|X)\n # Returns\n z (tensor): sampled latent vector\n \"\"\"\n\n z_mean, z_log_var = args\n batch = keras.backend.shape(z_mean)[0]\n dim = keras.backend.int_shape(z_mean)[1]\n # by default, random_normal has mean = 0 and std = 1.0\n epsilon = keras.backend.random_normal(shape=(batch, dim))\n return z_mean + keras.backend.exp(0.5 * z_log_var) * epsilon\n\n\ndef autoencoder(input_dims, hidden_layers, latent_dims):\n \"\"\"\n :param input_dims:is an integer containing\n the dimensions of the model input\n :param hidden_layers: is a list containing the number\n of nodes for each hidden layer in the encoder, respectively\n :param latent_dims: is an integer containing the\n dimensions of the latent space representation\n :return:encoder, decoder, auto\n \"\"\"\n input_image = keras.Input(shape=(input_dims,))\n output = keras.layers.Dense(hidden_layers[0],\n activation='relu')(input_image)\n z_mean = keras.layers.Dense(latent_dims)(output)\n z_log_var = keras.layers.Dense(latent_dims)(output)\n z = keras.layers.Lambda(sampling,\n output_shape=(latent_dims, ))([z_mean, z_log_var])\n\n input_decoder = keras.Input(shape=(latent_dims,))\n out_decoder = keras.layers.Dense(hidden_layers[-1],\n activation='relu')(input_decoder)\n\n for layer in range(len(hidden_layers) - 2, -1, -1):\n out_decoder = keras.layers.Dense(hidden_layers[layer],\n activation='relu')(out_decoder)\n decoder_out = keras.layers.Dense(input_dims,\n activation='sigmoid')(out_decoder)\n\n encoder = keras.models.Model(inputs=input_image,\n outputs=[z, z_mean, z_log_var])\n decoder = keras.models.Model(inputs=input_decoder,\n outputs=decoder_out)\n\n full_encoder = encoder(input_image)[0]\n full_decoder = decoder(full_encoder)\n auto = keras.models.Model(inputs=input_image,\n outputs=full_decoder)\n\n def loss(y_in, y_out):\n \"\"\" custom loss function \"\"\"\n reconstruction_loss = keras.backend.binary_crossentropy(y_in, y_out)\n reconstruction_loss = keras.backend.sum(reconstruction_loss, axis=1)\n kl_loss = (1 + z_log_var - keras.backend.square(z_mean)\n - keras.backend.exp(z_log_var))\n kl_loss = -0.5 * keras.backend.sum(kl_loss, axis=1)\n return reconstruction_loss + kl_loss\n\n auto.compile(optimizer='Adam',\n loss=loss)\n\n return encoder, decoder, auto\n","sub_path":"unsupervised_learning/0x04-autoencoders/3-variational.py","file_name":"3-variational.py","file_ext":"py","file_size_in_byte":3002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"645562369","text":"#!/usr/bin/env python\n#-*-coding:utf-8-*-\n\nfrom flask import Module, flash, request, g, current_app, abort, redirect, \\\n url_for, session, render_template\nfrom rtiss import app\nimport rtiss.utils.db as db\nimport rtiss.utils.auth as auth\n\n@app.route(\"/message/index\")\n@auth.login_required\ndef message_index():\n page= int(request.args.get('page')) if request.args.get('page') else 1\n username=session['user']['username']\n users=db.select_all(\"SELECT username,name FROM T_User\")\n messages = db.select_all(\"SELECT T_Message.*,T_User.name FROM T_Message,T_User WHERE ( public=TRUE OR %s=ANY(at) \\\n OR author=%s ) AND T_Message.author=T_User.username ORDER BY id DESC LIMIT 10 OFFSET %s;\",\\\n (username,username,10*(page-1)))\n return render_template('message/index.html',messages=messages,users=users,page=page)\n\n@app.route(\"/message/new\", methods=(\"POST\",))\n@auth.login_required\ndef message_new():\n author=session['user']['username']\n if 'public' in request.form:\n public=True\n db.execute(\"INSERT INTO T_Message(author,content,public) VALUES(%s,%s,TRUE);\",\\\n (author,request.form['content']))\n else:\n public=False\n users = '{'+','.join(request.form.getlist('usernames'))+'}'\n db.execute(\"INSERT INTO T_Message(author,content,public,at,call) VALUES(%s,%s,False,%s,%s);\",\\\n (author,request.form['content'],users,users))\n return redirect(url_for('message_index'))\n\n@app.route(\"/message//delete\", methods=(\"POST\",))\n@auth.login_required\ndef message_delete(id):\n db.execute(\"DELETE FROM T_Message WHERE id=%s\",[id])\n return redirect(url_for('message_index'))\n\n@app.route(\"/message//read\", methods=(\"POST\",))\n@auth.login_required\ndef message_read(id):\n username=session['user']['username']\n db.execute(\"UPDATE T_Message SET call=ARRAY_REMOVE(call,%s) WHERE id=%s\",(username,id))\n return redirect(url_for('message_index'))\n\n@app.route(\"/message/manage\")\n@auth.superadmin_required\ndef message_manage():\n messages = db.select_all(\"SELECT T_Message.*,T_User.name FROM T_Message,T_User WHERE T_Message.author=T_User.username\")\n return render_template('message/manage.html',messages=messages)\n","sub_path":"rtiss/views/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"484733291","text":"import util\nimport numpy as np\nimport math\n\ndef pbc_fix(coordinate_placeholder, cellparameter):\n if abs(coordinate_placeholder) > (cellparameter/2.0):\n if coordinate_placeholder >= 0:\n new_coordinate = coordinate_placeholder - cellparameter\n else:\n new_coordinate = coordinate_placeholder + cellparameter\n else:\n new_coordinate = coordinate_placeholder\n return new_coordinate\n\ndef move(molecule, cellparameter):\n new_atom_position = []\n move_distance = np.random.random_sample(3)\n sign = np.random.random_integers(1,2)\n if sign == 2:\n move_distance = -1 * move_distance\n move_distance = cellparameter * move_distance\n for atom in range(0, len(molecule)):\n position_placeholder = []\n position_placeholder.append(molecule[atom][0])\n for coordinate in range(1, len(molecule[atom])):\n coordinate_placeholder = float(molecule[atom][coordinate]) + move_distance[coordinate-1]\n new_coordinate = pbc_fix(coordinate_placeholder, cellparameter)\n position_placeholder.append(new_coordinate)\n new_atom_position.append(position_placeholder)\n log_text = 'The molecule has undergone a random move of ' + str(move_distance[0]) + ' angstrom in the x direction, ' + str(move_distance[1]) + ' angstrom in the y direction, and ' + str(move_distance[2]) + ' angstrom in the z direction. \\n'\n return new_atom_position, log_text\n\ndef angle(molecule, cellparameter):\n axis_choose = np.random.random_integers(1,3)\n angle_choose = np.random.uniform(0.0, np.pi)\n sign = np.random.random_integers(1,2)\n if sign == 2:\n angle_choose = -1 * angle_choose\n new_atom_position = []\n normalised_for_head_atoms = []\n head_atoms = molecule[0][1:4]\n for temp in range(0, len(molecule)):\n normalised_for_head_atoms_build = []\n normalised_for_head_atoms_build.append(molecule[temp][0])\n normalised_for_head_x = molecule[temp][1] - head_atoms[0]\n normalised_for_head_y = molecule[temp][2] - head_atoms[1]\n normalised_for_head_z = molecule[temp][3] - head_atoms[2]\n normalised_for_head_x = pbc_fix(normalised_for_head_x, cellparameter)\n normalised_for_head_y = pbc_fix(normalised_for_head_y, cellparameter)\n normalised_for_head_z = pbc_fix(normalised_for_head_z, cellparameter)\n normalised_for_head_atoms_build.append(normalised_for_head_x)\n normalised_for_head_atoms_build.append(normalised_for_head_y)\n normalised_for_head_atoms_build.append(normalised_for_head_z)\n normalised_for_head_atoms.append(normalised_for_head_atoms_build)\n centre_of_mass = util.cen_of_mas(normalised_for_head_atoms)\n normalised_atoms = []\n for temp in range(0, len(molecule)):\n normalised_atoms_build = []\n normalised_atoms_build.append(molecule[temp][0])\n normalised_x = normalised_for_head_atoms[temp][1] - centre_of_mass[0]\n normalised_y = normalised_for_head_atoms[temp][2] - centre_of_mass[1]\n normalised_z = normalised_for_head_atoms[temp][3] - centre_of_mass[2]\n normalised_atoms_build.append(normalised_x)\n normalised_atoms_build.append(normalised_y)\n normalised_atoms_build.append(normalised_z)\n normalised_atoms.append(normalised_atoms_build)\n if axis_choose == 1:\n axis_label = 'X'\n for atom in range(0, len(normalised_atoms)):\n new_atom_position_build = []\n new_atom_position_build.append(normalised_atoms[atom][0])\n x_placeholder = (normalised_atoms[atom][1]) + centre_of_mass[0] + head_atoms[0]\n y_placeholder = (float(normalised_atoms[atom][2]) * np.cos(angle_choose) - float(normalised_atoms[atom][3]) * np.sin(angle_choose)) + centre_of_mass[1] + head_atoms[1]\n z_placeholder = (float(normalised_atoms[atom][2]) * np.sin(angle_choose) + float(normalised_atoms[atom][3]) * np.cos(angle_choose)) + centre_of_mass[2] + head_atoms[2]\n new_x = pbc_fix(x_placeholder, cellparameter)\n new_y = pbc_fix(y_placeholder, cellparameter)\n new_z = pbc_fix(z_placeholder, cellparameter)\n new_atom_position_build.append(new_x)\n new_atom_position_build.append(new_y)\n new_atom_position_build.append(new_z)\n new_atom_position.append(new_atom_position_build)\n elif axis_choose == 2:\n axis_label = 'Y'\n for atom in range(0, len(normalised_atoms)):\n new_atom_position_build = []\n new_atom_position_build.append(normalised_atoms[atom][0])\n x_placeholder = (float(normalised_atoms[atom][3]) * np.sin(angle_choose) + float(normalised_atoms[atom][1]) * np.cos(angle_choose)) + centre_of_mass[0] + head_atoms[0]\n y_placeholder = (normalised_atoms[atom][2]) + centre_of_mass[1] + head_atoms[1]\n z_placeholder = (float(normalised_atoms[atom][3]) * np.cos(angle_choose) - float(normalised_atoms[atom][1]) * np.sin(angle_choose)) + centre_of_mass[2] + head_atoms[2]\n new_x = pbc_fix(x_placeholder, cellparameter)\n new_y = pbc_fix(y_placeholder, cellparameter)\n new_z = pbc_fix(z_placeholder, cellparameter)\n new_atom_position_build.append(new_x)\n new_atom_position_build.append(new_y)\n new_atom_position_build.append(new_z)\n new_atom_position.append(new_atom_position_build)\n elif axis_choose == 3:\n axis_label = 'Z'\n for atom in range(0, len(normalised_atoms)):\n new_atom_position_build = []\n new_atom_position_build.append(normalised_atoms[atom][0])\n x_placeholder = (float(normalised_atoms[atom][1]) * np.cos(angle_choose) - float(normalised_atoms[atom][2]) * np.sin(angle_choose)) + centre_of_mass[0] + head_atoms[0]\n y_placeholder = (float(normalised_atoms[atom][1]) * np.sin(angle_choose) + float(normalised_atoms[atom][2]) * np.cos(angle_choose)) + centre_of_mass[1] + head_atoms[1]\n z_placeholder = (normalised_atoms[atom][3]) + centre_of_mass[2] + head_atoms[2]\n new_x = pbc_fix(x_placeholder, cellparameter)\n new_y = pbc_fix(y_placeholder, cellparameter)\n new_z = pbc_fix(z_placeholder, cellparameter)\n new_atom_position_build.append(new_x)\n new_atom_position_build.append(new_y)\n new_atom_position_build.append(new_z)\n new_atom_position.append(new_atom_position_build)\n log_text = 'The molecule has undergone a random rotation of ' + str(angle_choose) + ' radians in the ' + axis_label + ' axis. \\n'\n return new_atom_position, log_text\n\n\ndef randomise(atoms, num, cellparameter):\n new_atom_position = []\n burn_atoms = atoms\n for duplicate in range(0, int(num)):\n moved_atoms, log_text = move(burn_atoms, cellparameter)\n angle_atoms, log_text = angle(moved_atoms, cellparameter)\n new_atom_position.append(angle_atoms)\n return new_atom_position\n","sub_path":"modules/move.py","file_name":"move.py","file_ext":"py","file_size_in_byte":6993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"623005348","text":"'''\n14. Longest Common Prefix\nEasy\n\ns\n\nIf there is no common prefix, return an empty string \"\".\n\nExample 1:\n\nInput: [\"flower\",\"flow\",\"flight\"]\nOutput: \"fl\"\nExample 2:\n\nInput: [\"dog\",\"racecar\",\"car\"]\nOutput: \"\"\nExplanation: There is no common prefix among the input strings.\nNote:\n\nAll given inputs are in lowercase letters a-z.\n'''\n\nstrs=[\"flower\",\"flow\",\"flight\"]\nres= ''\ntemp={}\ni=0\nwhile True:\n try:\n temp = [s[i] for s in strs]\n if len(set(temp))==1:\n res= res + temp[0]\n i+=1\n else:\n break\n except:\n break\nprint (res)\n\n\n'''\nstrs=[\"flower\",\"flow\",\"flight\"]\nresult = '' #初始化\ni = 0 # 用来标志取那个位置的字母\nwhile True:\n try: #\n temp = [s[i] for s in strs] #取每个string的i位置的字母放入一个list\n if len(set(temp)) == 1: #如果相同\n result = result+temp[0] #加到result里面去\n i+=1 #位置加一\n else:\n break #否则跳出循环\n except:\n break #如果出错 跳出循环,比如输入是[\"\"],temp = [s[i] for s in strs]里面s[0]会报错,因为s为空,没有s[0]\nprint(result)\n\n'''\n","sub_path":"2.14. Longest Common Prefix.py","file_name":"2.14. Longest Common Prefix.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"193341287","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('courses', '0017_auto_20150614_1342'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='category',\n name='parent_category',\n ),\n migrations.RemoveField(\n model_name='certificate',\n name='name',\n ),\n migrations.RemoveField(\n model_name='certificate',\n name='symbol',\n ),\n migrations.AddField(\n model_name='certificate',\n name='type',\n field=models.CharField(max_length=4, default='NI', choices=[('CC', 'Certificate of Completion'), ('CA', 'Certificate of Accomplishment'), ('HCC', 'Honor Code Certificate'), ('VC$', 'Verified Certificate'), ('VCA$', 'Verified Certificate of Accomplishment'), ('SA', 'Statement of Accomplishment'), ('SP$', 'Statement of Participation'), ('CM', 'Certificate of Mastery'), ('NI', 'No Information About Certificate Available'), ('NC', 'No Certificate')]),\n ),\n migrations.AddField(\n model_name='mooc',\n name='platform_key',\n field=models.CharField(blank=True, max_length=150, null=True),\n ),\n migrations.AlterField(\n model_name='mooc',\n name='certificates',\n field=models.ManyToManyField(to='courses.Certificate'),\n ),\n migrations.DeleteModel(\n name='Category',\n ),\n ]\n","sub_path":"courses/migrations/0018_auto_20150614_1452.py","file_name":"0018_auto_20150614_1452.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"260036418","text":"from pwn import *\nfrom subprocess import Popen, PIPE\n\nimport random\nimport re\n\ncontext(arch = 'i386', os = 'linux')\n\n\nr = remote(\"pwn.sunshinectf.org\", 20001)\np = Popen(['./a.out'], stdin=PIPE, stdout=PIPE, stderr=PIPE)\noutput, err = p.communicate(b\"\")\n\nfor x in output.split(\" \"):\n r.sendline(x)\n result = r.recvuntil(\".\")\n if \"How\" in result:\n break\nr.interactive()","sub_path":"bePrepared/prepared.py","file_name":"prepared.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"19995340","text":"RESOURCE_COLLECTION = 'competences_types'\n\n_schema = {\n\n 'attributes': {'type': 'list'},\n 'categories': {'type': 'list'},\n 'checked_by': {'type': 'string'}, # String?\n 'children': {'type': 'list'},\n 'code': {'type': 'string'},\n 'colorcode': {'type': 'string'},\n 'id': {'type': 'integer',\n 'required': True},\n 'meta_type': {'type': 'string'},\n 'type_id': {'type': 'integer'},\n 'type_sa_id': {'type': 'integer'},\n 'description': {'type': 'string'},\n 'duration': {'type': 'string'},\n 'durations': {'type': 'list'},\n 'files': {'type': 'list'},\n 'instructors': {'type': 'list'},\n 'languages_available': {'type': 'list'},\n 'locale': {'type': 'string'},\n 'max_age': {'type': 'string'},\n 'min_age': {'type': 'string'},\n 'modified': {'type': 'string'},\n 'organisations': {'type': 'list'},\n 'pre_requisites': {'type': 'list'},\n 'short_description': {'type': 'string'},\n 'sports': {'type': 'list'},\n 'title': {'type': 'string'},\n 'valid_for': {'type': 'string'},\n 'weight': {'type': 'integer'},\n\n}\n\ndefinition = {\n 'url': 'competences/types',\n 'item_title': 'Competences Types',\n 'datasource': {'source': RESOURCE_COLLECTION,\n },\n 'additional_lookup': {\n 'url': 'regex(\"[\\d{1,9}]+\")',\n 'field': 'id',\n },\n 'extra_response_fields': ['id'],\n 'versioning': False,\n 'resource_methods': ['GET', 'POST'],\n 'item_methods': ['GET', 'PATCH', 'PUT'],\n 'mongo_indexes': {'type_id': ([('id', 1)], {'background': True}),\n 'title': ([('title', 'text')], {'background': True})\n },\n 'schema': _schema\n}\n","sub_path":"domain/competences_types.py","file_name":"competences_types.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"389789954","text":"import geetest\nfrom selenium import webdriver\n\nWEBDRIVER = \"Chrome\"\n# WEBDRIVER = \"PhantomJS\"\n\nif __name__ == \"__main__\":\n if WEBDRIVER == \"PhantomJS\":\n webdriver.DesiredCapabilities.PHANTOMJS['phantomjs.page.settings.userAgent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'\n driver = webdriver.PhantomJS(\"./webdriver/PhantomJS\")\n else:\n driver = webdriver.Chrome(\"./webdriver/chromedriver\")\n\n cracker = geetest.GeetestCrack(driver)\n for _ in range(100):\n try:\n cracker.crack()\n except Exception as e:\n print(e)\n","sub_path":"industry_and_commerce.py","file_name":"industry_and_commerce.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"41128464","text":"import configparser\nimport os\nimport inspect\nimport logging\n\n__config = configparser.ConfigParser()\n\n__env = 'unknown'\n\n\ndef __is_empty(any_structure: object) -> object:\n if any_structure:\n return False\n else:\n return True\n\n\ndef load_config(environment) -> object:\n \"\"\"\n\n :param environment:\n :return:\n \"\"\"\n __logger = logging.getLogger(__name__)\n __logger.info(\"inside load configure\")\n\n try:\n cwd = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))\n # print(\"cwd: \" + cwd)\n path = cwd + \"/\" + \"config.ini\"\n\n global __config\n __config.read(path, encoding='utf-8')\n\n global __env\n __env = environment\n # print(\"env set as: \" + __env)\n except:\n import traceback\n __logger.error(\"UNABLE TO READ CONFIGURATION!!!!!!!!!!!!\")\n __logger.error(traceback.format_exc())\n\n\ndef get_config(key):\n \"\"\"\n\n :param key:\n :return:\n \"\"\"\n global __env\n global __config\n __logger = logging.getLogger(__name__)\n\n __logger.info(\"inside get_config get \" + key + \" for env \" + __env)\n\n config_value = \"unknown\"\n env = __env\n\n if not __is_empty(__config):\n if __config.has_section(env):\n if key in __config[env]:\n config_value = __config[env][key]\n\n __logger.info(\"config_value: \")\n __logger.info(config_value)\n return config_value\n","sub_path":"common/configure/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"519120543","text":"import torch\nimport torch.nn as nn\nimport torchvision.models as models\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\nimport numpy as np\nimport os\nimport argparse\nimport time\nfrom PIL import Image\nfrom tensorboardX import SummaryWriter\nfrom unet import Unet\nimport pickle\nimport evaluate\n\n# parsing the arguments\nparser = argparse.ArgumentParser()\nparser.add_argument('optFlow', help=' Path to the video testing folder')\nparser.add_argument('dataset', help=' Name of dataset')\n\nargs = parser.parse_args()\n\ndataset_dir = args.optFlow\ndataset = str(args.dataset)\n\n\nfrom tensorboardX import SummaryWriter\n\n\ndef readFlow(name):\n if name.endswith('.pfm') or name.endswith('.PFM'):\n return readPFM(name)[0][:, :, 0:2]\n\n f = open(name, 'rb')\n\n header = f.read(4)\n if header.decode(\"utf-8\") != 'PIEH':\n raise Exception('Flow file header does not contain PIEH')\n\n width = np.fromfile(f, np.int32, 1).squeeze()\n height = np.fromfile(f, np.int32, 1).squeeze()\n flow = np.fromfile(f, np.float32, width * height * 2).reshape((height, width, 2 ))\n\n return flow.astype(np.float32)\n\n\ndef loadFlow(name):\n Flow = readFlow(name)\n Flow = torch.from_numpy(Flow)\n Flow = Flow.permute(2,0,1)\n\n optVar = torch.autograd.Variable(Flow.cuda())\n return optVar\n\ndef normalizedData(inTensor):\n inTensor[inTensor > 20.0 ] = 20.0\n inTensor[ inTensor < -20.0] = -20.0\n inTensor = torch.div(inTensor, 20.0)\n return inTensor\n\n\n\nunet = Unet()\nunet = unet.cuda()\nMSE = torch.nn.MSELoss()\noptimizer = torch.optim.SGD(unet.parameters(), 0.0001)\nepochs = 60\n\nwriter = SummaryWriter('logs/'+ str(args.dataset) +'_trained_manual_unsq_sgd')\n\nvideo_names = sorted(os.listdir(dataset_dir + 'optical_flow/'))\n\nnumHis = 4\nk = 0\nlr = 0.0001\nfor epoch in range(0,epochs):\n\n for vid in range(len(video_names)):\n\n opts = sorted(os.listdir(str(dataset_dir) + 'optical_flow/' + video_names[vid] + '/'))\n\n for i in range(numHis, len(opts)):\n k += 1\n opt_tensor1 = loadFlow(dataset_dir + 'optical_flow/' + video_names[vid] + '/' + opts[i-4])\n opt_tensor2 = loadFlow(dataset_dir + 'optical_flow/' + video_names[vid] + '/' + opts[i-3])\n opt_tensor3 = loadFlow(dataset_dir + 'optical_flow/' + video_names[vid] + '/' + opts[i-2])\n opt_tensor4 = loadFlow(dataset_dir + 'optical_flow/' + video_names[vid] + '/' + opts[i-1])\n\n opt_target = loadFlow(dataset_dir + 'optical_flow/' + video_names[vid] + '/' + opts[i])\n opt_target = torch.unsqueeze(opt_target,0)\n opt_target = normalizedData(opt_target)\n opt_target = torch.autograd.Variable(opt_target) # torch.from_numpy(nextFlow)\n opt_target = opt_target.cuda()\n\n mergedTensor1 = torch.cat((opt_tensor1, opt_tensor2), 0)\n mergedTensor2 = torch.cat((opt_tensor3, opt_tensor4), 0)\n\n inputFlow = torch.cat((mergedTensor1, mergedTensor2), 0)\n inputFlow = torch.unsqueeze(inputFlow, 0)\n inputFlow = normalizedData(inputFlow)\n inputFlow = torch.autograd.Variable(inputFlow) # torch.from_numpy(prvFlow)\n inputFlow = inputFlow.cuda()\n\n\n prdctFlow = unet(inputFlow)\n\n loss = MSE(prdctFlow, opt_target)\n\n print('Dataset: {} Loss: {} Epoch: {} Iteration: {} Remaning Epoch: {} Learning Rate: {} '.format(args.dataset, loss.item(), epoch + 1, k, epochs - 1, lr))\n writer.add_scalar('train_loss_' + str(args.dataset), loss.item(), k)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if epoch % 3 == 0:\n torch.save(unet, 'checkpoints/'+ str(args.dataset) + '/trained_manual/NET_batch_'+ '_epoch_' + str(epoch) + '_' +str(lr) + 'MSE_trainedManual_SGD'+ '.pt')\n\n\n\n\n\n\n\n","sub_path":"code/unet/trmn.py","file_name":"trmn.py","file_ext":"py","file_size_in_byte":3848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"637027833","text":"import numpy as np\nfrom PySide import QtGui, QtCore\nimport sharppy.sharptab as tab\nfrom sharppy.sharptab.constants import *\nimport datetime\n\n## routine written by Kelton Halbert\n## keltonhalbert@ou.edu\n\n__all__ = ['backgroundText', 'plotText']\n\nclass backgroundText(QtGui.QFrame):\n '''\n Handles drawing the background frame onto a QPixmap.\n Inherits a QtGui.QFrame Object.\n '''\n def __init__(self):\n super(backgroundText, self).__init__()\n self.initUI()\n\n def initUI(self):\n '''\n Initializes frame variables such as padding,\n width, height, etc, as well as the QPixmap\n that contains the frame drawing.\n '''\n ## set the frame stylesheet\n self.setStyleSheet(\"QFrame {\"\n \" background-color: rgb(0, 0, 0);\"\n \" border-width: 1px;\"\n \" border-style: solid;\"\n \" border-color: #3399CC;}\")\n ## set the frame padding\n ## set the height/width variables\n self.lpad = 0; self.rpad = 0\n self.tpad = 5; self.bpad = 0\n self.wid = self.size().width()\n self.hgt = self.size().height()\n self.tlx = self.rpad; self.tly = self.tpad\n self.brx = self.wid; self.bry = self.hgt\n ## do a DPI check to make sure\n ## the text is sized properly!\n fsize = np.floor(.06 * self.hgt)\n self.tpad = np.floor(.03 * self.hgt)\n ## set the font, get the metrics and height of the font\n self.label_font = QtGui.QFont('Helvetica')\n self.label_font.setPixelSize(fsize)\n self.label_metrics = QtGui.QFontMetrics( self.label_font )\n self.label_height = self.label_metrics.xHeight() + self.tpad\n ## the self.ylast variable is used as a running sum for\n ## text placement.\n self.ylast = self.label_height\n ## initialize the QPixmap that will be drawn on.\n self.plotBitMap = QtGui.QPixmap(self.width()-2, self.height()-2)\n self.plotBitMap.fill(QtCore.Qt.black)\n ## plot the background frame\n self.plotBackground()\n \n def draw_frame(self, qp):\n '''\n Draws the background frame and the text headers for indices.\n '''\n ## initialize a white pen with thickness 1 and a solid line\n pen = QtGui.QPen(QtCore.Qt.white, 1, QtCore.Qt.SolidLine)\n qp.setPen(pen)\n qp.setFont(self.label_font)\n ## set the horizontal grid to be the width of the frame\n ## divided into 8 spaces\n x1 = self.brx / 8\n y1 = 1\n ## draw the header and the indices using a loop.\n ## This loop is a 'horizontal' loop that will plot\n ## the text for a row, keeping the vertical placement constant.\n count = 0\n titles = ['PCL', 'CAPE', 'CINH', 'LCL', 'LI', 'LFC', 'EL']\n for title in titles:\n rect = QtCore.QRect(x1*count, y1, x1*2, self.label_height)\n qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, title)\n count += 1\n qp.drawLine(0, self.label_height, self.brx, self.label_height)\n \n def resizeEvent(self, e):\n '''\n Handles when the window gets resized.\n '''\n self.initUI()\n\n def plotBackground(self):\n '''\n Handles drawing the text background onto\n the QPixmap.\n '''\n ## initialize a QPainter objext\n qp = QtGui.QPainter()\n qp.begin(self.plotBitMap)\n ## draw the frame\n self.draw_frame(qp)\n qp.end()\n\n\nclass plotText(backgroundText):\n '''\n Handles plotting the indices in the frame.\n Inherits a backgroundText Object that contains\n a QPixmap with the frame drawn on it. All drawing\n gets done on this QPixmap, and then the QPixmap\n gets rendered by the paintEvent function.\n '''\n def __init__(self, prof):\n '''\n Initialize the data from a Profile object passed to \n this class. It then takes the data it needs from the\n Profile object and converts them into strings that\n can be used to draw the text in the frame.\n \n Parameters\n ----------\n prof: a Profile Object\n \n '''\n ## get the surfce based, most unstable, and mixed layer\n ## parcels to use for indices, as well as the sounding\n ## profile itself.\n self.sfcparcel = prof.sfcpcl\n self.mlparcel = prof.mlpcl\n self.fcstpcl = prof.fcstpcl\n self.muparcel = prof.mupcl\n self.prof = prof;\n \n \n ## either get or calculate the indices, round to the nearest int, and\n ## convert them to strings.\n ## K Index\n self.k_idx = tab.utils.INT2STR( prof.k_idx )\n ## precipitable water\n self.pwat = tab.utils.FLOAT2STR( prof.pwat, 2 )\n ## 0-3km agl lapse rate\n self.lapserate_3km = tab.utils.FLOAT2STR( prof.lapserate_3km, 1 )\n ## 3-6km agl lapse rate\n self.lapserate_3_6km = tab.utils.FLOAT2STR( prof.lapserate_3_6km, 1 )\n ## 850-500mb lapse rate\n self.lapserate_850_500 = tab.utils.FLOAT2STR( prof.lapserate_850_500, 1 )\n ## 700-500mb lapse rate\n self.lapserate_700_500 = tab.utils.FLOAT2STR( prof.lapserate_700_500, 1 )\n ## convective temperature\n self.convT = tab.utils.INT2STR( prof.convT )\n ## sounding forecast surface temperature\n self.maxT = tab.utils.INT2STR( prof.maxT )\n #fzl = str(int(self.sfcparcel.hght0c))\n ## 100mb mean mixing ratio\n self.mean_mixr = tab.utils.FLOAT2STR( prof.mean_mixr, 1 )\n ## 150mb mean rh\n self.low_rh = tab.utils.INT2STR( prof.low_rh )\n self.mid_rh = tab.utils.INT2STR( prof.mid_rh )\n ## calculate the totals totals index\n self.totals_totals = tab.utils.INT2STR( prof.totals_totals )\n self.dcape = tab.utils.INT2STR( prof.dcape )\n self.drush = tab.utils.INT2STR( prof.drush )\n self.sigsevere = tab.utils.INT2STR( prof.sig_severe )\n self.mmp = tab.utils.FLOAT2STR( prof.mmp, 2 )\n self.esp = tab.utils.FLOAT2STR( prof.esp, 1 )\n self.wndg = tab.utils.FLOAT2STR( prof.wndg, 1 )\n self.tei = tab.utils.INT2STR( prof.tei )\n \n super(plotText, self).__init__()\n\n def resizeEvent(self, e):\n '''\n Handles when the window is resized.\n \n Parametes\n ---------\n e: an Event Object\n '''\n super(plotText, self).resizeEvent(e)\n self.plotData()\n \n def paintEvent(self, e):\n '''\n Handles when the window gets painted.\n This renders the QPixmap that the backgroundText\n Object contians. For the actual drawing of the data,\n see the plotData function.\n \n Parametes\n ---------\n e: an Event Object\n \n '''\n super(plotText, self).paintEvent(e)\n qp = QtGui.QPainter()\n qp.begin(self)\n qp.drawPixmap(1, 1, self.plotBitMap)\n qp.end()\n\n def plotData(self):\n '''\n Handles the drawing of the text onto the QPixmap.\n This is where the actual data gets plotted/drawn.\n '''\n ## initialize a QPainter object\n qp = QtGui.QPainter()\n qp.begin(self.plotBitMap)\n ## draw the indices\n self.drawConvectiveIndices(qp)\n self.drawIndices(qp)\n self.drawSevere(qp)\n qp.end()\n \n def drawSevere(self, qp):\n '''\n This handles the severe indices, such as STP, sig hail, etc.\n \n Parameters\n ----------\n qp: QtGui.QPainter object\n \n '''\n ## initialize a pen to draw with.\n pen = QtGui.QPen(QtCore.Qt.yellow, 1, QtCore.Qt.SolidLine)\n qp.setFont(self.label_font)\n color_list = [QtGui.QColor(CYAN), QtGui.QColor(DBROWN), QtGui.QColor(LBROWN), QtGui.QColor(WHITE), QtGui.QColor(YELLOW), QtGui.QColor(RED), QtGui.QColor(MAGENTA)]\n ## needs to be coded.\n x1 = self.brx / 10\n y1 = self.ylast + self.tpad\n ship = tab.utils.FLOAT2STR( self.prof.ship, 1 )\n stp_fixed = tab.utils.FLOAT2STR( self.prof.stp_fixed, 1 )\n stp_cin = tab.utils.FLOAT2STR( self.prof.stp_cin, 1 )\n right_scp = tab.utils.FLOAT2STR( self.prof.right_scp, 1 )\n \n labels = ['Supercell = ', 'STP (cin) = ', 'STP (fix) = ', 'SHIP = ']\n indices = [right_scp, stp_cin, stp_fixed, ship]\n for label, index in zip(labels,indices):\n rect = QtCore.QRect(x1*7, y1, x1*8, self.label_height)\n if label == labels[0]: # STP uses a different color scale\n if float(index) >= 19.95:\n pen = QtGui.QPen(color_list[6], 1, QtCore.Qt.SolidLine)\n elif float(index) >= 11.95:\n pen = QtGui.QPen(color_list[5], 1, QtCore.Qt.SolidLine)\n elif float(index) >= 1.95:\n pen = QtGui.QPen(color_list[3], 1, QtCore.Qt.SolidLine)\n elif float(index) >= .45:\n pen = QtGui.QPen(color_list[2], 1, QtCore.Qt.SolidLine)\n elif float(index) >= -.45:\n pen = QtGui.QPen(color_list[1], 1, QtCore.Qt.SolidLine)\n elif float(index) < -.45:\n pen = QtGui.QPen(color_list[0], 1, QtCore.Qt.SolidLine)\n elif label == labels[1]: # STP effective\n if float(index) >= 8:\n pen = QtGui.QPen(color_list[6], 1, QtCore.Qt.SolidLine)\n elif float(index) >= 4:\n pen = QtGui.QPen(color_list[5], 1, QtCore.Qt.SolidLine)\n elif float(index) >= 2:\n pen = QtGui.QPen(color_list[4], 1, QtCore.Qt.SolidLine)\n elif float(index) >= 1:\n pen = QtGui.QPen(color_list[3], 1, QtCore.Qt.SolidLine)\n elif float(index) >= .5:\n pen = QtGui.QPen(color_list[2], 1, QtCore.Qt.SolidLine)\n elif float(index) < .5:\n pen = QtGui.QPen(color_list[1], 1, QtCore.Qt.SolidLine)\n elif label == labels[2]: # STP fixed\n if float(index) >= 7:\n pen = QtGui.QPen(color_list[6], 1, QtCore.Qt.SolidLine)\n elif float(index) >= 5:\n pen = QtGui.QPen(color_list[5], 1, QtCore.Qt.SolidLine)\n elif float(index) >= 2:\n pen = QtGui.QPen(color_list[4], 1, QtCore.Qt.SolidLine)\n elif float(index) >= 1:\n pen = QtGui.QPen(color_list[3], 1, QtCore.Qt.SolidLine)\n elif float(index) >= .5:\n pen = QtGui.QPen(color_list[2], 1, QtCore.Qt.SolidLine)\n else:\n pen = QtGui.QPen(color_list[1], 1, QtCore.Qt.SolidLine)\n elif label == labels[3]: # SHIP\n if float(index) >= 5:\n pen = QtGui.QPen(color_list[6], 1, QtCore.Qt.SolidLine)\n elif float(index) >= 2:\n pen = QtGui.QPen(color_list[5], 1, QtCore.Qt.SolidLine)\n elif float(index) >= 1:\n pen = QtGui.QPen(color_list[4], 1, QtCore.Qt.SolidLine)\n elif float(index) >= .5:\n pen = QtGui.QPen(color_list[3], 1, QtCore.Qt.SolidLine)\n else:\n pen = QtGui.QPen(color_list[1], 1, QtCore.Qt.SolidLine)\n qp.setPen(pen)\n qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignLeft, label + index)\n y1 += (self.label_height)\n \n def drawIndices(self, qp):\n '''\n Draws the non-parcel indices.\n \n Parameters\n ----------\n qp: QtGui.QPainter object\n \n '''\n qp.setFont(self.label_font)\n ## make the initial x point relatice to the width of the frame.\n x1 = self.brx / 10\n rpad = 5\n tpad = 5\n\n ## Now we have all the data we could ever want. Time to start drawing\n ## them on the frame.\n ## This starts with the left column.\n \n if self.prof.pwv_flag == -3:\n color = QtGui.QColor('#FF7F00')\n elif self.prof.pwv_flag == -2:\n color = QtGui.QColor('#EE9A00')\n elif self.prof.pwv_flag == -1:\n color = QtGui.QColor('#FFDAB9')\n elif self.prof.pwv_flag == 0:\n color = QtGui.QColor('#FFFFFF')\n elif self.prof.pwv_flag == 1:\n color = QtGui.QColor('#98FB98')\n elif self.prof.pwv_flag == 2:\n color = QtGui.QColor('#66CD00')\n else:\n color = QtGui.QColor('#00FF00')\n \n ## draw the first column of text using a loop, keeping the horizontal\n ## placement constant.\n y1 = self.ylast + self.tpad\n colors = [color, QtGui.QColor(WHITE), QtGui.QColor(WHITE), QtGui.QColor(WHITE), QtGui.QColor(WHITE), QtGui.QColor(WHITE)]\n texts = ['PW = ', 'MeanW = ', 'LowRH = ', 'MidRH = ', 'DCAPE = ', 'DownT = ']\n indices = [self.pwat + 'in', self.mean_mixr + 'g/kg', self.low_rh + '%', self.mid_rh + '%', self.dcape, self.drush + 'F']\n for text, index, c in zip(texts, indices, colors):\n rect = QtCore.QRect(rpad, y1, x1*4, self.label_height)\n pen = QtGui.QPen(c, 1, QtCore.Qt.SolidLine)\n qp.setPen(pen)\n qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignLeft, text + index)\n y1 += (self.label_height)\n\n ## middle-left column\n y1 = self.ylast + self.tpad\n texts = ['K = ', 'TT = ', 'ConvT = ', 'maxT = ', 'ESP = ', 'MMP = ']\n indices = [self.k_idx, self.totals_totals, self.convT + 'F', self.maxT + 'F', self.esp, self.mmp]\n for text, index in zip(texts, indices):\n rect = QtCore.QRect(x1*3.5, y1, x1*4, self.label_height)\n qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignLeft, text + index)\n y1 += (self.label_height)\n\n ## middle-right column\n y1 = self.ylast + self.tpad\n texts = ['WNDG = ', 'TEI = ', '', '', '', 'SigSvr = ']\n indices = [self.wndg, self.tei, '', '', '', self.sigsevere + ' m3/s3']\n for text, index in zip(texts, indices):\n rect = QtCore.QRect(x1*6, y1, x1*4, self.label_height)\n qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignLeft, text + index)\n y1 += (self.label_height)\n self.ylast = y1\n qp.drawLine(0, y1+2, self.brx, y1+2)\n qp.drawLine(x1*7-5, y1+2, x1*7-5, self.bry )\n \n ## lapserate window\n y1 = self.ylast + self.tpad\n texts = ['Sfc-3km AGL LR = ', '3-6km AGL LR = ', '850-500mb LR = ', '700-500mb LR = ']\n indices = [self.lapserate_3km + ' C/km', self.lapserate_3_6km + ' C/km', self.lapserate_850_500 + ' C/km', self.lapserate_700_500 + ' C/km']\n for text, index in zip(texts, indices):\n rect = QtCore.QRect(rpad, y1, x1*8, self.label_height)\n qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignLeft, text + index)\n y1 += (self.label_height)\n\n\n def drawConvectiveIndices(self, qp):\n '''\n This handles the drawing of the parcel indices.\n \n Parameters\n ----------\n qp: QtGui.QPainter object\n \n '''\n ## initialize a white pen with thickness 2 and a solid line\n pen = QtGui.QPen(QtCore.Qt.white, 1, QtCore.Qt.SolidLine)\n qp.setPen(pen)\n qp.setFont(self.label_font)\n ## make the initial x pixel coordinate relative to the frame\n ## width.\n x1 = self.brx / 8\n y1 = self.ylast + self.tpad\n ## get the indices rounded to the nearest int, conver to strings\n ## Start with the surface based parcel.\n sfc_bplus = tab.utils.INT2STR( self.sfcparcel.bplus )\n sfc_bminus = tab.utils.INT2STR( self.sfcparcel.bminus )\n sfc_lclhght = tab.utils.INT2STR( self.sfcparcel.lclhght )\n sfc_limax = tab.utils.INT2STR( self.sfcparcel.li5 )\n sfc_lfchght = tab.utils.INT2STR( self.sfcparcel.lfchght )\n sfc_elhght = tab.utils.INT2STR( self.sfcparcel.elhght )\n ## get the forecast surface parvel\n fcst_bplus = tab.utils.INT2STR( self.fcstpcl.bplus )\n fcst_bminus = tab.utils.INT2STR( self.fcstpcl.bminus )\n fcst_lclhght = tab.utils.INT2STR( self.fcstpcl.lclhght )\n fcst_limax = tab.utils.INT2STR( self.fcstpcl.li5 )\n fcst_lfchght = tab.utils.INT2STR( self.fcstpcl.lfchght )\n fcst_elhght = tab.utils.INT2STR( self.fcstpcl.elhght )\n ## Now get the mixed layer parcel indices\n ml_bplus = tab.utils.INT2STR( self.mlparcel.bplus )\n ml_bminus = tab.utils.INT2STR( self.mlparcel.bminus )\n ml_lclhght = tab.utils.INT2STR( self.mlparcel.lclhght )\n ml_limax = tab.utils.INT2STR( self.mlparcel.li5 )\n ## check and see if the lfc is there\n ml_lfchght = tab.utils.INT2STR( self.mlparcel.lfchght )\n ml_elhght = tab.utils.INT2STR( self.mlparcel.elhght )\n ## get the most unstable parcel indices\n mu_bplus = tab.utils.INT2STR( self.muparcel.bplus )\n mu_bminus = tab.utils.INT2STR( self.muparcel.bminus )\n mu_lclhght = tab.utils.INT2STR( self.muparcel.lclhght )\n mu_limax = tab.utils.INT2STR( self.muparcel.li5 )\n ## make sure the lfc is there\n mu_lfchght = tab.utils.INT2STR( self.muparcel.lfchght )\n mu_elhght = tab.utils.INT2STR( self.muparcel.elhght )\n\n ## Now that we have all the data, time to plot the text in their\n ## respective columns.\n \n ## PCL type\n texts = ['SFC', 'FCST', 'ML', 'MU']\n for text in texts:\n rect = QtCore.QRect(0, y1, x1*2, self.label_height)\n qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, text)\n y1 += (self.label_height)\n ## CAPE\n y1 = self.ylast + self.tpad\n texts = [sfc_bplus, fcst_bplus, ml_bplus, mu_bplus]\n for text in texts:\n rect = QtCore.QRect(x1*1, y1, x1*2, self.label_height)\n qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, text)\n y1 += (self.label_height)\n ## CINH\n y1 = self.ylast + self.tpad\n texts = [sfc_bminus, fcst_bminus, ml_bminus, mu_bminus]\n for text in texts:\n rect = QtCore.QRect(x1*2, y1, x1*2, self.label_height)\n qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, text)\n y1 += (self.label_height)\n ## LCL\n y1 = self.ylast + self.tpad\n texts = [sfc_lclhght, fcst_lclhght, ml_lclhght, mu_lclhght]\n for text in texts:\n rect = QtCore.QRect(x1*3, y1, x1*2, self.label_height)\n qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, text)\n y1 += (self.label_height)\n ## LI\n y1 = self.ylast + self.tpad\n texts = [sfc_limax, fcst_limax, ml_limax, mu_limax]\n for text in texts:\n rect = QtCore.QRect(x1*4, y1, x1*2, self.label_height)\n qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, text)\n y1 += (self.label_height)\n ## LFC\n y1 = self.ylast + self.tpad\n texts = [sfc_lfchght, fcst_lfchght, ml_lfchght, mu_lfchght]\n for text in texts:\n rect = QtCore.QRect(x1*5, y1, x1*2, self.label_height)\n qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, text)\n y1 += (self.label_height)\n ## EL\n y1 = self.ylast + self.tpad\n texts = [sfc_elhght, fcst_elhght, ml_elhght, mu_elhght]\n for text in texts:\n rect = QtCore.QRect(x1*6, y1, x1*2, self.label_height)\n qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, text)\n y1 += (self.label_height)\n self.ylast = y1\n qp.drawLine(0, y1+2, self.brx, y1+2)\n\n\n\n","sub_path":"sharppy/viz/thermo.py","file_name":"thermo.py","file_ext":"py","file_size_in_byte":19945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"169334766","text":"from .opensourceprojects import OpenSourceProjects\nfrom .blogposts import BlogPosts\nfrom .posttweet import PostTweet\nfrom .postlinkedin import PostLinkedIn\n\n\nclass ReviveSocialMedia:\n\n _OSS_MESSAGE = 'OSS Project: {name} is {description}. Check it out! {url} #reviveposts'\n _BLOG_MESSAGE = 'Blog Post: {name}. Check it out! {url} #reviveposts'\n\n def blog(self):\n random_blog = BlogPosts().get()\n try:\n message = self._BLOG_MESSAGE.format(\n name=random_blog['title'],\n url=random_blog['link']\n )\n PostTweet().post(message)\n PostLinkedIn().post(\n message,\n random_blog['title'],\n random_blog['link']\n )\n except:\n self.blog()\n\n def oss(self):\n random_project = OpenSourceProjects().get()\n try:\n tweet = self._OSS_MESSAGE.format(name=random_project['name'], description=random_project['description'], url=random_project['url'])\n if 'documentation' in random_project:\n tweet = tweet + ' Docs: {}'.format(random_project['documentation'])\n if 'repository' in random_project:\n tweet = tweet + ' Repo: {}'.format(random_project['repository'])\n if 'type' in random_project:\n tweet = tweet + ' #{}'.format(random_project['type'])\n PostTweet().post(tweet)\n PostLinkedIn().post(\n tweet, \n random_project['name'],\n random_project['url']\n )\n except:\n self.oss()\n","sub_path":"revivesocialmedia/revivesocialmedia.py","file_name":"revivesocialmedia.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"566352502","text":"class Armor:\n\n def __init__(self,name:str,size:0,weight:0,power:0,\n stamina=0,health=0,defense=0):\n self.name = name\n self.size = size\n self.weight = weight\n self.power = power\n self.stamina = stamina\n self.health = health\n self.defense = defense\n self.__armor_type = None\n\n\n def armor_type(self,type):\n if type == 'rare':\n self.power *= 1.1\n elif type == 'legendary':\n self.power *= 1.7\n self.__armor_type = type\n\n\n def __repr__(self):\n return self.name\n","sub_path":"armor.py","file_name":"armor.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"383061552","text":"import sys\nimport timeit\nimport pprint\n\nsys.stdin = open('배열의_분할', 'r')\n\nstart_time = timeit.default_timer()\n\n\ndef find_state():\n global N_size, S_number, index, state\n state = 0\n while index + 1 < N_size:\n diff = S_number[index + 1] - S_number[index]\n if diff > 0:\n state = 1\n index += 1\n break\n elif diff < 0:\n state = -1\n index += 1\n break\n else:\n index += 1\n\n\ndef find_index():\n global answer, N_size, S_number, index, state\n if state > 0:\n while index + 1 < N_size:\n diff = S_number[index + 1] - S_number[index]\n if diff >= 0:\n index += 1\n else:\n answer += 1\n index += 1\n break\n elif state < 0:\n while index + 1 < N_size:\n diff = S_number[index + 1] - S_number[index]\n if diff <= 0:\n index += 1\n else:\n answer += 1\n index += 1\n break\n else:\n index += 1\n\n\nfor testCase in range(int(input())):\n answer = 1\n N_size = int(input())\n S_number = list(map(int, input().split()))\n index, state = 0, 0\n while index < N_size:\n find_state()\n find_index()\n print(\"#{} {}\".format(testCase + 1, answer))\n\nend_time = timeit.default_timer()\n\nprint('running time: {}'.format(end_time - start_time))\n\n# 1 1\n# 2 2\n# 3 5\n","sub_path":"SWEA/____완료____/Code/D5/배열의_분할/배열의_분할.py","file_name":"배열의_분할.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"186126343","text":"from django.shortcuts import render,redirect\nfrom django.views.generic.base import View\nfrom django.http import JsonResponse, HttpResponse\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic import ListView\nfrom .models import Movie,Category,Actor,Ganre,Rating\nfrom .forms import RewiewForm\nfrom django.db.models import Q\nfrom .RatingForm import ReviewForm,RatingForm\n\n\nclass GanreYear():\n def get_genres(self):\n return Ganre.objects.all()\n \n def get_years(self):\n return Movie.objects.filter(draft=False).values(\"year\")\n\n\nclass MoviesView(GanreYear,ListView):\n model=Movie\n queryset=Movie.objects.filter(draft=False)\n paginate_by = 3\n\n\nclass MovieDetailView(GanreYear,DetailView):\n model=Movie\n slug_field=\"url\"\n\n def get_context_data(self,**kwargs):\n contex=super().get_context_data(**kwargs)\n contex[\"star_form\"]=RatingForm()\n return contex\n\nclass AddReview(View):\n def post(self,request,pk):\n form=RewiewForm(request.POST)\n movie=Movie.objects.get(id=pk)\n if form.is_valid():\n form=form.save(commit=False)\n if request.POST.get(\"parent\",None):\n form.parent_id=int(request.POST.get(\"parent\"))\n form.movie=movie\n # print(pk,type(pk))\n # print(form.name)\n form.save()\n return redirect(movie.absoluteUrl())\n\n\nclass ActorView(GanreYear,DetailView):\n model=Actor\n template_name='movies/actor.html'\n slug_field=\"name\"\n\nclass FilterMoviesView(GanreYear,ListView):\n paginate_by=2\n\n def get_queryset(self):\n queryset=Movie.objects.filter(\n Q(year__in=self.request.GET.getlist('year')) |\n Q(genres__in=self.request.GET.getlist('genre'))\n ).distinct()\n return queryset\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context[\"year\"] = ''.join([f\"year={x}&\" for x in self.request.GET.getlist(\"year\")])\n context[\"genre\"] = ''.join([f\"genre={x}&\" for x in self.request.GET.getlist(\"genre\")])\n return context\n\n# class JsonFilterMoviesView(ListView):\n# def get_queryset(self):\n# queryset = Movie.objects.filter(\n# Q(year__in=self.request.GET.getlist(\"year\")) |\n# Q(genres__in=self.request.GET.getlist(\"genre\"))\n# ).distinct().values(\"title\", \"tagline\", \"url\", \"poster\")\n# return queryset\n\n# def get(self, request, *args, **kwargs):\n# queryset = list(self.get_queryset())\n# return JsonResponse({\"movies\": queryset}, safe=False)\n\nclass AddStarRating(View):\n \"\"\"Добавление рейтинга фильму\"\"\"\n\n def get_client_ip(self, request):\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0]\n else:\n ip = request.META.get('REMOTE_ADDR')\n return ip\n\n def post(self, request):\n form = RatingForm(request.POST)\n if form.is_valid():\n Rating.objects.update_or_create(\n ip=self.get_client_ip(request),\n movie_id=int(request.POST.get(\"movie\")),\n defaults={'star_id': int(request.POST.get(\"star\"))}\n )\n return HttpResponse(status=201)\n else:\n return HttpResponse(status=400)\n\n\nclass Search(ListView):\n \"\"\"Поиск фильмов\"\"\"\n paginate_by = 3\n\n def get_queryset(self):\n return Movie.objects.filter(title__icontains=self.request.GET.get(\"q\"))\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context[\"q\"] = f'q={self.request.GET.get(\"q\")}&'\n return context","sub_path":"movies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"526702412","text":"import urllib.parse as parse\nimport os.path as path\nfrom bs4 import BeautifulSoup\nimport requests\n\ndef getFileName(url) :\n p = parse.urlparse(url).path\n return path.basename(p)\n\ndef getHostname(url, withProtocol = False):\n p = parse.urlparse(url)\n if withProtocol:\n return \"{}://{}\".format(p.scheme, p.hostname)\n else:\n return p.hostname\n\ndef get_true_url(url):\n # import urllib.parse as parse\n # import os.path as path\n\n # print(\">>>>>>>>>>>>>>>>>>>\", parse.urlparse(url).hostname)\n res = requests.get(url)\n soup = BeautifulSoup(res.text, 'html.parser')\n\n sel = \"iframe#mainFrame\"\n iframe = soup.select(sel)\n\n print(iframe, len(iframe))\n print(\"---------------------------\")\n host = getHostname(url)\n uri = iframe[0].get(\"src\")\n print(\"origin url : \", host + uri)\n\n origin_url = urljoin(getHostname(url, True), uri)\n print(origin_url)\n\n return host + uri\n\ndef urljoin(url, path):\n return parse.urljoin(url, path)\n \ndef get_iframe_src(url):\n\n html = requests.get(url).text\n soup = BeautifulSoup(html, 'html.parser')\n\n selector = \"iframe[src]\"\n sss = soup.select_one(selector)\n path = sss.get(\"src\") \n host = getHostname(url)\n\n origin_url = \"https://\" + host + \"/\" + path\n print(origin_url)\n return origin_url\n\n# url = \"https://blog.naver.com/baekmg1988/221405485574\"\n# origin_url = get_true_url(url)\n\n\n# if __name__ == '__main__':\n\n# print(getFileName(url))\n# print(getHostname(url))\n# print(getHostname(url, true))","sub_path":"scraping/scraping_url_utils.py","file_name":"scraping_url_utils.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"506477016","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# \n# XPath selector\n#\n# based on Scrapy selectors https://github.com/scrapy/scrapy/tree/master/scrapy/selector\n#\n\nimport re\nfrom lxml import etree\nimport six\n\ndef flatten(x):\n \"\"\"flatten(sequence) -> list\n\n Returns a single, flat list which contains all elements retrieved\n from the sequence and all recursively contained sub-sequences\n (iterables).\n\n Examples:\n >>> [1, 2, [3,4], (5,6)]\n [1, 2, [3, 4], (5, 6)]\n >>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)])\n [1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]\"\"\"\n\n result = []\n for el in x:\n if hasattr(el, \"__iter__\"):\n result.extend(flatten(el))\n else:\n result.append(el)\n return result\n\ndef extract_regex(regex, text, encoding='utf-8'):\n \"\"\"Extract a list of unicode strings from the given text/encoding using the following policies:\n\n * if the regex contains a named group called \"extract\" that will be returned\n * if the regex contains multiple numbered groups, all those will be returned (flattened)\n * if the regex doesn't contain any group the entire regex matching is returned\n \"\"\"\n\n if isinstance(regex, basestring):\n regex = re.compile(regex, re.UNICODE)\n\n try:\n strings = [regex.search(text).group('extract')] # named group\n except:\n strings = regex.findall(text) # full regex or numbered groups\n strings = flatten(strings)\n return strings\n\nclass XPathSelectorList(list):\n\n def __getslice__(self, i, j):\n return self.__class__(list.__getslice__(self, i, j))\n\n def select(self, xpath):\n return self.__class__(flatten([x.select(xpath) for x in self]))\n\n def re(self, regex):\n return flatten([x.re(regex) for x in self])\n\n def extract(self):\n return [x.extract() for x in self]\n\n def extract_unquoted(self):\n return [x.extract_unquoted() for x in self]\n\nclass XPathSelector(object):\n \n def __init__(self, html_content=None, base_url='', _root=None, _expr=None, namespaces=None):\n ''' init\n '''\n self.namespaces = namespaces\n parser = self._parser(recover=True, encoding='utf-8')\n if html_content is not None:\n _root = etree.fromstring(html_content, parser=parser, base_url=base_url)\n self._root = _root\n self._expr = _expr\n\n def select(self, xpath):\n '''returns a list of new selectors.\n '''\n try:\n xpathev = self._root.xpath\n except AttributeError:\n return XPathSelectorList([])\n\n try:\n result = xpathev(xpath, namespaces=self.namespaces)\n except etree.XPathError:\n raise ValueError(\"Invalid XPath: %s\" % xpath)\n\n if type(result) is not list:\n result = [result]\n\n result = [self.__class__(_root=x, _expr=xpath, namespaces=self.namespaces)\n for x in result]\n return XPathSelectorList(result)\n\n def re(self, regex):\n return extract_regex(regex, self.extract())\n \n def extract(self):\n try:\n return etree.tostring(self._root, method=self._tostring_method, encoding=six.u, with_tail=False)\n except (AttributeError, TypeError):\n if self._root is True:\n return u'1'\n elif self._root is False:\n return u'0'\n else:\n return self._root\n\n def register_namespace(self, prefix, uri):\n if self.namespaces is None:\n self.namespaces = {}\n self.namespaces[prefix] = uri\n\n def __str__(self):\n data = repr(self.extract()[:40])\n return \"<%s xpath=%r data=%s>\" % (type(self).__name__, self._expr, data)\n\n __repr__ = __str__\n\nclass XmlXPathSelector(XPathSelector):\n __slots__ = ()\n _parser = etree.XMLParser\n _tostring_method = 'xml'\n\nclass HtmlXPathSelector(XPathSelector):\n __slots__ = ()\n _parser = etree.HTMLParser\n _tostring_method = 'html'\n\n\nif __name__ == '__main__':\n def tests_html():\n ''' HTML tests '''\n \n html_content = '''\n \n \n \n Example website \n \n \n \n \n \n '''\n hxs = HtmlXPathSelector(html_content) \n assert hxs.select('//title/text()').extract() == [u'Example website']\n assert hxs.select('//base/@href').extract() == [u'http://example.com/']\n assert hxs.select('//div/@id').extract() == [u'images']\n assert hxs.select('//a[@href=\"image2.html\"]/img/@src').extract() == [u'image2_thumb.jpg']\n result = [u'image1_thumb.jpg', u'image2_thumb.jpg', u'image3_thumb.jpg', u'image4_thumb.jpg', u'image5_thumb.jpg']\n assert hxs.select('//a').select('img/@src').extract() == result\n \n links = hxs.select('//a[contains(@href, \"image\")]')\n assert links.extract() == [\n u'Name: My image 1 ',\n u'Name: My image 2 ',\n u'Name: My image 3 ',\n u'Name: My image 4 ',\n u'Name: My image 5 ',\n ]\n\n results = [\n ([u'image1.html'], [u'image1_thumb.jpg']),\n ([u'image2.html'], [u'image2_thumb.jpg']),\n ([u'image3.html'], [u'image3_thumb.jpg']),\n ([u'image4.html'], [u'image4_thumb.jpg']),\n ([u'image5.html'], [u'image5_thumb.jpg']),\n ]\n for index, link in enumerate(links):\n args = (link.select('@href').extract(), link.select('img/@src').extract())\n assert args == results[index]\n \n \n def tests_xml():\n ''' XML tests '''\n \n xml_content = '''\n \n \n \n \n 10 \n 20 \n 30 \n 40 \n 50 \n \n \n \n '''\n xxs = XmlXPathSelector(xml_content)\n assert xxs.select('//counter1/text()').extract() == [u'10']\n assert xxs.select('//object/@name').extract() == [u'object_1']\n\n def tests():\n tests_html()\n tests_xml()\n\n tests()\n \n","sub_path":"packages/xpathselectors.py","file_name":"xpathselectors.py","file_ext":"py","file_size_in_byte":7270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"280265279","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\nimg0 = cv2.imread('futbol.jpg') # resim eklenir\n\n\ngray = cv2.cvtColor(img0, cv2.COLOR_BGR2GRAY) # Gri Skala\n\n\nimg = cv2.GaussianBlur(gray,(3,3),0) # Gürültü kaldırma\n\n\n# laplace sobel islemleri\nlaplacian = cv2.Laplacian(img,cv2.CV_64F)\nsobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5) # x\nsobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=5) # y\n\n\n# tek görselde tablo olarak gösterme işlemi\nplt.subplot(2,2,1),plt.imshow(img,cmap = 'gray')\nplt.title('Original'), plt.xticks([]), plt.yticks([])\nplt.subplot(2,2,2),plt.imshow(laplacian,cmap = 'gray')\nplt.title('Laplacian'), plt.xticks([]), plt.yticks([])\nplt.subplot(2,2,3),plt.imshow(sobelx,cmap = 'gray')\nplt.title('Sobel X'), plt.xticks([]), plt.yticks([])\nplt.subplot(2,2,4),plt.imshow(sobely,cmap = 'gray')\nplt.title('Sobel Y'), plt.xticks([]), plt.yticks([])\n\nplt.show()","sub_path":"farkli_kenar_bulma/kenar_bulma_1.py","file_name":"kenar_bulma_1.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"315896555","text":"from laspy.file import File\nimport gdal\nimport numpy as np\nimport os\n\n\ndef imageLoad(filename):\n im = gdal.Open(filename, gdal.GA_ReadOnly)\n band = im.GetRasterBand(1)\n img = band.ReadAsArray(0, 0, im.RasterXSize, im.RasterYSize)\n transform = im.GetGeoTransform()\n return img, transform\n\n\ndef getNoDataValue(filename):\n im = gdal.Open(filename, gdal.GA_ReadOnly)\n band = im.GetRasterBand(1)\n nodata = band.GetNoDataValue()\n return nodata\n\n\ndef imageWarp(file_from: str, file_to: str, offset=None, interp_method: int = gdal.gdalconst.GRA_Bilinear):\n image_from = gdal.Open(file_from, gdal.GA_ReadOnly)\n image_to = gdal.Open(file_to, gdal.GA_ReadOnly)\n\n # Apply registration offset\n if offset is not None:\n # Move input to memory to apply registration offset\n mem_drv0 = gdal.GetDriverByName('MEM')\n image_tmp = mem_drv0.Create('', image_from.RasterXSize,\n image_from.RasterYSize, 1, gdal.GDT_Float32)\n image_tmp.SetGeoTransform(image_from.GetGeoTransform())\n image_tmp.SetProjection(image_from.GetProjection())\n image_tmp.GetRasterBand(1).WriteArray(\n image_from.ReadAsArray(0, 0, image_from.RasterXSize,\n image_from.RasterYSize))\n NDV = image_from.GetRasterBand(1).GetNoDataValue()\n if NDV is not None:\n image_tmp.GetRasterBand(1).SetNoDataValue(NDV)\n\n offset = np.asarray(offset)\n transform = image_from.GetGeoTransform()\n transform = np.asarray(transform)\n transform[0] += offset[0]\n transform[3] += offset[1]\n image_tmp.SetGeoTransform(transform)\n else:\n image_tmp = image_from\n\n # Create outout image\n mem_drv = gdal.GetDriverByName('MEM')\n destination = mem_drv.Create('', image_to.RasterXSize, image_to.RasterYSize, 1,\n gdal.GDT_Float32)\n\n destination.SetProjection(image_to.GetProjection())\n destination.SetGeoTransform(image_to.GetGeoTransform())\n\n gdal.ReprojectImage(image_tmp, destination, image_from.GetProjection(),\n image_to.GetProjection(), interp_method)\n\n image_out = destination.GetRasterBand(1).ReadAsArray(0, 0, destination.RasterXSize, destination.RasterYSize)\n\n return image_out\n\n\ndef arrayToGeotiff(image_array, out_file_name, reference_file_name, NODATA_VALUE):\n \"\"\" Used to save rasterized dsm of point cloud \"\"\"\n reference_image = gdal.Open(reference_file_name, gdal.GA_ReadOnly)\n transform = reference_image.GetGeoTransform()\n projection = reference_image.GetProjection()\n\n driver = gdal.GetDriverByName('GTiff')\n out_image = driver.Create(out_file_name + '.tif', image_array.shape[1],\n image_array.shape[0], 1, gdal.GDT_Float32)\n if out_image is None:\n print('Could not create output GeoTIFF')\n\n out_image.SetGeoTransform(transform)\n out_image.SetProjection(projection)\n\n out_band = out_image.GetRasterBand(1)\n out_band.SetNoDataValue(NODATA_VALUE)\n out_band.WriteArray(image_array, 0, 0)\n out_band.FlushCache()\n out_image.FlushCache()\n # Ignore pep warning here, aids in memory management performance\n out_image = None\n\n return\n\n\n# Load LAS file and generate max DSM in memory\ndef lasToRaster(las_filename, transform, shape_out, NODATA):\n # Load LAS file\n test_las = File(las_filename, mode='r')\n\n x = test_las.x\n y = test_las.y\n z = test_las.z\n\n # Project to output image space\n # TODO: call map2pix\n map_to_pix = gdal.InvGeoTransform(transform)\n x0 = np.round(map_to_pix[0] + x * map_to_pix[1] + y * map_to_pix[2])\n y0 = np.round(map_to_pix[3] + x * map_to_pix[4] + y * map_to_pix[5])\n\n x0 = x0.astype(int)\n y0 = y0.astype(int)\n\n # Generate MAX value DSM\n raster = np.zeros(shape_out, np.float32) + NODATA\n for ii in range(0, x0.size):\n if (x0[ii] >= 0) & (x0[ii] < raster.shape[1]) & (y0[ii] >= 0) & (\n y0[ii] < raster.shape[0]):\n if z[ii] > raster[y0[ii], x0[ii]]:\n raster[y0[ii], x0[ii]] = z[ii]\n\n return raster\n\n\n# refMat is a GDAL GeoTransform format\ndef map2pix(reference_matrix, points_list):\n x_origin = reference_matrix[0]\n y_origin = reference_matrix[3]\n pixel_width = reference_matrix[1]\n pixel_height = -reference_matrix[5]\n\n xy = np.zeros(shape=(len(points_list), 2))\n\n xy[:, 0] = (np.round((points_list[:, 0] - x_origin) / pixel_width))\n xy[:, 1] = (np.round((y_origin - points_list[:, 1]) / pixel_height))\n\n return xy\n","sub_path":"core3dmetrics/geometrics/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":4581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"648601961","text":"from importlib import import_module\r\nfrom collections import OrderedDict\r\nfrom engine.misc import abrir_json\r\nfrom engine.globs import ModData\r\nfrom types import FunctionType\r\nfrom .r_composites import *\r\nfrom .composites import *\r\nfrom .decorators import *\r\nfrom .leaves import Leaf\r\n\r\n\r\nclass BehaviourTree:\r\n # this is a container.\r\n nodes = []\r\n tree_structure = None\r\n to_check = None\r\n node_set = False\r\n shared_context = {}\r\n status = None\r\n entity = None\r\n\r\n _loaded_functions = None\r\n\r\n def __init__(self, entity, tree_data):\r\n if self.tree_structure is not None:\r\n self.tree_structure.clear()\r\n self.nodes = []\r\n self.shared_context = {}\r\n self._loaded_functions = {}\r\n\r\n special = tree_data['head'].pop('special')\r\n self.load_script_information(tree_data['head'].pop('script'))\r\n self.tree_structure = OrderedDict()\r\n self.entity = entity\r\n tree_data = self.analyze_tree(tree_data['body'])\r\n for key in [str(i) for i in range(len(tree_data))]:\r\n node = None\r\n process = None\r\n data = tree_data[key]\r\n idx = int(key)\r\n self.tree_structure[idx] = []\r\n\r\n name = data['name']\r\n if 'children' in data: # composite\r\n self.tree_structure[idx].extend(data['children'])\r\n if name == 'Selector':\r\n node = Selector(self, idx, data['children'])\r\n elif name == 'Sequence':\r\n node = Sequence(self, idx, data['children'])\r\n elif name == 'Parallel':\r\n sucess_value, failure_value = 0, 0\r\n parallel = special.get('Parallel', False)\r\n if parallel and parallel['ID'] == idx:\r\n sucess_value = special['Parallel'].get('Sucess_value', 0)\r\n failure_value = special['Parallel'].get('Failure_value', 0)\r\n node = Parallel(self, idx, data['children'], sucess_value, failure_value)\r\n elif name == 'RSelector':\r\n node = RandomSelector(self, idx, data['children'])\r\n elif name == 'RSequence':\r\n node = RandomSequence(self, idx, data['children'])\r\n\r\n elif 'child' in data: # decorator\r\n self.tree_structure[idx].append(int(data['child']))\r\n if name == 'Repeater':\r\n times = 0\r\n repeater = special.get('Repeater', False)\r\n if repeater and repeater['ID'] == idx:\r\n times = special['Repeater']['times']\r\n node = Repeater(self, idx, data['child'], times=times)\r\n elif name == 'UntilFail':\r\n node = UntilFail(self, idx, data['child'])\r\n elif name == 'Succeeder':\r\n node = Succeeder(self, idx, data['child'])\r\n elif name == 'Inverter':\r\n node = Inverter(self, idx, data['child'])\r\n elif name == 'Failer':\r\n node = Failer(self, idx, data['child'])\r\n elif name == 'UntilSuccess':\r\n node = UntilSuccess(self, idx, data['child'])\r\n\r\n else: # leaf\r\n if name in globals():\r\n process = globals()[name]\r\n\r\n elif name in self._loaded_functions:\r\n process = self._loaded_functions[name]\r\n\r\n if isinstance(process, FunctionType):\r\n node = Leaf(self, idx, name)\r\n node.set_process(process)\r\n\r\n elif issubclass(process, Leaf):\r\n node = process(self, idx, name)\r\n\r\n self.nodes.append(node)\r\n\r\n self.set_parents()\r\n self.set_children()\r\n self.to_check = [self.nodes[0]]\r\n\r\n def __repr__(self):\r\n return 'BehaviourTree'\r\n\r\n def load_script_information(self, head_data):\r\n for script in head_data:\r\n ruta = ModData.pkg_scripts.replace('.', '/') + '/' + script\r\n modulo = import_module('.'.join([ModData.pkg_scripts, script.replace('/', '.')]), ruta)\r\n for name in head_data[script]:\r\n if hasattr(modulo, name):\r\n self._loaded_functions[name] = getattr(modulo, name)\r\n\r\n def analyze_tree(self, tree_data):\r\n key = None\r\n new_tree = None\r\n\r\n for key in [str(i) for i in range(len(tree_data))]:\r\n idx = int(key)\r\n name = tree_data[key]['name']\r\n if name == 'ExtenderLeaf':\r\n extension = abrir_json(ModData.mobs + 'behaviours/' + tree_data[key]['tree'] + '.json')\r\n head = extension.pop('head')\r\n body = extension.pop('body')\r\n new_tree = self.extend_tree(body, idx)\r\n self.load_script_information(head)\r\n break\r\n\r\n if new_tree:\r\n del tree_data[key]\r\n tree_data.update(new_tree)\r\n return tree_data\r\n\r\n @staticmethod\r\n def extend_tree(new_body, idx):\r\n new_tree = {}\r\n for kex in new_body:\r\n if 'children' in new_body[kex]:\r\n for i in range(len(new_body[kex]['children'])):\r\n new_body[kex]['children'][i] += idx\r\n elif 'child' in new_body[kex]:\r\n new_body[kex]['child'] += idx\r\n idy = str(int(kex) + idx)\r\n new_tree[idy] = new_body[kex]\r\n return new_tree\r\n\r\n def set_parents(self):\r\n for idx in self.tree_structure.keys():\r\n if len(self.tree_structure[idx]):\r\n node = self.nodes[idx]\r\n for idxn in self.tree_structure[idx]:\r\n self.nodes[idxn].set_parent(node)\r\n\r\n def set_children(self):\r\n for idx in self.tree_structure.keys():\r\n if len(self.tree_structure[idx]):\r\n if hasattr(self.nodes[idx], 'children'):\r\n for idxn in self.nodes[idx].children:\r\n node = self.nodes[idxn]\r\n index = self.nodes[idx].children.index(idxn)\r\n self.nodes[idx].children[index] = node\r\n\r\n elif hasattr(self.nodes[idx], 'child'):\r\n idxn = self.tree_structure[idx][0]\r\n node = self.nodes[idxn]\r\n self.nodes[idx].child = node\r\n\r\n def set_to_check(self, *nodes):\r\n if self.node_set is False:\r\n self.to_check = [*nodes]\r\n self.node_set = True\r\n\r\n def set_context(self, key, value):\r\n self.shared_context[key] = value\r\n\r\n def get_context(self, key, default_value=False):\r\n if key in self.shared_context:\r\n return self.shared_context[key]\r\n else:\r\n return default_value\r\n\r\n def clear_context(self):\r\n self.shared_context.clear()\r\n\r\n def erase_keys(self, *keys):\r\n \"\"\"\r\n This method erases the indicated keys from the shared context. It works as clear_context(), but selectively.\r\n \"\"\"\r\n for key in keys:\r\n if key in self.shared_context:\r\n del self.shared_context[key]\r\n\r\n def preserve_keys(self, *keys):\r\n \"\"\"\r\n This method erases all keys from the shared conext, except those which are preserved.\r\n \"\"\"\r\n preserved = {}\r\n for key in keys:\r\n preserved[key] = self.shared_context[key]\r\n\r\n self.clear_context()\r\n for key in preserved:\r\n self.set_context(key, preserved[key])\r\n\r\n def set_status(self, status):\r\n \"\"\"\"\r\n Sets the status of the entire tree. Otherwise, the status is None.\r\n \"\"\"\r\n self.status = status\r\n\r\n def reset(self):\r\n self.status = None\r\n self.to_check = self.nodes[0]\r\n self.clear_context()\r\n for node in self.nodes:\r\n node.reset()\r\n\r\n def update(self):\r\n if self.status is None:\r\n for node in self.to_check:\r\n node.update()\r\n self.node_set = False\r\n else:\r\n return self.status\r\n","sub_path":"engine/mobs/behaviortrees/behaviour_tree.py","file_name":"behaviour_tree.py","file_ext":"py","file_size_in_byte":8222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"18233515","text":"import pandas as pd\nfrom alpha_vantage.timeseries import TimeSeries\nimport time\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport datetime\nfrom api_key import api_key\n\n####\n#### Work in progress\n\n\nclass StockData:\n def __init__(self):\n self.api_key = api_key\n self.df = {}\n # self.today = pd.Timestamp(datetime.date.today())\n\n def indicator(self):\n # calculate total pv, total volume and VMAP\n self.df['6. pv total'] = (\n ((self.df['2. high'] + self.df['3. low'] + self.df['4. close']) / 3) * self.df['5. volume']).cumsum()\n self.df['7. volume total'] = self.df['5. volume'].cumsum()\n self.df['8. VWAP'] = self.df['6. pv total'] / \\\n self.df['7. volume total']\n\n def clean_data(self):\n # return yesterday's data\n yesterday = pd.Timestamp(\n datetime.date.today() - datetime.timedelta(days=1))\n print(str(yesterday))\n self.df = self.df[self.df.index > yesterday\n ].sort_index(ascending=False)\n print('data cleaned!')\n\n def TimeSeries(self, plot=False):\n ts = TimeSeries(key=self.api_key, output_format='pandas')\n self.df, meta_data = ts.get_intraday(\n symbol='MSFT', interval='1min', outputsize='full')\n\n print('dataframe created')\n self.clean_data()\n self.indicator()\n self.df = self.df.drop(['2. high', '3. low', '5. volume',\n '6. pv total', '7. volume total'], axis=1)\n print(self.df)\n self.df.plot()\n plt.title('Intraday Time Series (1 min)')\n plt.grid()\n plt.show()\n\n # if plot == True:\n\n\ndf = StockData()\ndf.TimeSeries(plot=True)\n","sub_path":"stock_alpha_vantage.py","file_name":"stock_alpha_vantage.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"355149324","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 15 17:27:16 2016\n\n@author: cornkle\n\"\"\"\n\nimport os\nimport numpy as np\nfrom scipy.ndimage.measurements import label\nfrom utils import u_mann_kendall as mk\n\n\ndef locate(pattern, root_path, exclude=None):\n strg = []\n llist = os.listdir(root_path)\n llist.sort()\n for file in llist:\n if file.endswith(pattern):\n filepath = os.path.join(root_path, file)\n\n try:\n if exclude in filepath:\n continue\n except TypeError:\n pass\n strg.append(os.path.join(root_path, file))\n return strg\n\n\n\ndef distance(x1, y1, x2, y2):\n return np.sqrt((x1 - x2) *(x1 - x2) + (y1 - y2) * (y1 - y2))\n\n\ndef closest_point(point, points):\n dist_2 = np.sum((points - point) * (points - point), axis=1)\n return np.argmin(dist_2)\n\n\n\"\"\"create one unique integer from two positive integers\n Cantor pairing function\"\"\"\ndef unique_of_pair(x,y):\n\n uni = (x + y) * (x + y + 1) / 2 + y\n return uni\n\n\n\"\"\"\nFind all indices within the local circle of radius\nInput:\nx: x index of center point\ny: y index of center point\nradius: radius in pixels, floats are handled including the farthest point\nReturns a tuple of (y index, x index)\n\"\"\"\ndef draw_circle(x, y, radius):\n\n xloc1 = np.arange(x - radius, x + radius + 1)\n yloc1 = np.arange(y - radius, y + radius + 1)\n xloc, yloc = np.meshgrid(xloc1, yloc1)\n distloc = ( ((xloc - x) * (xloc - x)) + ((yloc - y) * (yloc - y)) )**.5\n\n indloc = (distloc <= radius).nonzero()\n ycirc = indloc[0] - radius + y\n xcirc = indloc[1] - radius + x\n\n return (ycirc, xcirc)\n\n\n\"\"\"\nFind all indices within the local circle of radius\nInput:\nx: x index of center point\ny: y index of center point\nradius: radius in pixels, floats are handled including the farthest point\nReturns a tuple of (y index, x index)\n\"\"\"\ndef draw_ellipse(x, y, short, long):\n\n\n xloc = np.arange(x-np.round(short), x+np.round(short)+1)\n yloc = np.arange(y-np.round(long), y+np.round(long)+1)[:,None]\n #xloc, yloc = np.meshgrid(xloc1, yloc1)\n distloc = ((xloc - x)/short)**2 + ((yloc - y)/long)**2 <=1\n\n pos = np.where(distloc)\n\n ycirc = pos[0]+y\n xcirc = pos[1]+x\n\n return (ycirc, xcirc)\n\n\n\"\"\"\nFind all indices within the local circle of radius but remove indeces that\nare out of an area box, specified with an 2d array.\nInput:\nx: x index of center point\ny: y index of center point\nradius: radius in pixels, floats are handled including the farthest point\nReturns a tuple of (y index, x index)\n\"\"\"\ndef draw_cut_circle(x, y, radius, array):\n\n ycirc, xcirc = draw_circle(x, y, radius)\n noky = np.where(ycirc >= array.shape[0]) # if the circle is off the edge\n if noky[0].size > 0:\n ycirc = np.delete(ycirc, noky)\n xcirc = np.delete(xcirc, noky)\n\n nokx = np.where(xcirc >= array.shape[1])\n if nokx[0].size > 0:\n ycirc = np.delete(ycirc, nokx)\n xcirc = np.delete(xcirc, nokx)\n\n return (ycirc, xcirc)\n\n\n\"\"\"\nFind all indices creating the ring of a local circle of radius but remove indeces that\nare out of an area box, specified with an 2d array.\nInput:\nx: x index of center point\ny: y index of center point\nradius: radius in pixels, floats are handled including the farthest point\nReturns a tuple of (y index, x index)\n\"\"\"\ndef draw_ring(x, y, inrad, outrad, array):\n\n in_ycirc, in_xcirc = draw_cut_circle(x, y, inrad, array)\n out_ycirc, out_xcirc = draw_cut_circle(x, y, outrad, array)\n\n in_uni=unique_of_pair(in_xcirc, in_ycirc)\n out_uni = unique_of_pair(out_xcirc, out_ycirc)\n\n inter = np.in1d(out_uni, in_uni, assume_unique=True)\n\n if np.sum(inter) != 0:\n nok = np.where(inter)\n out_ycirc = np.delete(out_ycirc, nok)\n out_xcirc = np.delete(out_xcirc, nok)\n\n return (out_ycirc, out_xcirc)\n\n\ndef cut_kernel(array, xpos, ypos, dist_from_point):\n \"\"\"\n This function cuts out a kernel from an existing array and allows the kernel to exceed the edges of the input\n array. The cut-out area is shifted accordingly within the kernel window with NaNs filled in\n :param array: 2darray\n :param xpos: middle x point of kernel\n :param ypos: middle y point of kernel\n :param dist_from_point: distance to kernel edge to each side\n :return: 2d array of the chosen kernel size.\n \"\"\"\n\n if array.ndim != 2:\n raise IndexError('Cut kernel only allows 2D arrays.')\n\n kernel = np.zeros((dist_from_point*2+1, dist_from_point*2+1)) * np.nan\n\n if xpos - dist_from_point >= 0:\n xmin = 0\n xmindist = dist_from_point\n else:\n xmin = (xpos - dist_from_point) * -1\n xmindist = dist_from_point + (xpos - dist_from_point)\n\n if ypos - dist_from_point >= 0:\n ymin = 0\n ymindist = dist_from_point\n else:\n ymin = (ypos - dist_from_point) * -1\n ymindist = dist_from_point + (ypos - dist_from_point)\n\n if xpos + dist_from_point < array.shape[1]:\n xmax = kernel.shape[1]\n xmaxdist = dist_from_point + 1\n else:\n xmax = dist_from_point - (xpos - array.shape[1])\n xmaxdist = dist_from_point - (xpos + dist_from_point - array.shape[1])\n\n if ypos + dist_from_point < array.shape[0]:\n ymax = kernel.shape[0]\n ymaxdist = dist_from_point + 1\n else:\n ymax = dist_from_point - (ypos - array.shape[0])\n ymaxdist = dist_from_point - (ypos + dist_from_point - array.shape[0])\n\n cutk = array[ypos - ymindist: ypos + ymaxdist, xpos - xmindist: xpos + xmaxdist]\n\n\n kernel[ymin: ymax, xmin:xmax] = cutk\n\n return kernel\n\ndef cut_kernel_3d(array, xpos, ypos, dist_from_point):\n \"\"\"\n This function cuts out a kernel from an existing array and allows the kernel to exceed the edges of the input\n array. The cut-out area is shifted accordingly within the kernel window with NaNs filled in\n :param array: 2darray\n :param xpos: middle x point of kernel\n :param ypos: middle y point of kernel\n :param dist_from_point: distance to kernel edge to each side\n :return: 2d array of the chosen kernel size.\n \"\"\"\n\n if array.ndim != 3:\n raise IndexError('Cut kernel3d only allows 3D arrays.')\n\n kernel = np.zeros((array.shape[0], dist_from_point*2+1, dist_from_point*2+1)) * np.nan\n\n if xpos - dist_from_point >= 0:\n xmin = 0\n xmindist = dist_from_point\n else:\n xmin = (xpos - dist_from_point) * -1\n xmindist = dist_from_point + (xpos - dist_from_point)\n\n if ypos - dist_from_point >= 0:\n ymin = 0\n ymindist = dist_from_point\n else:\n ymin = (ypos - dist_from_point) * -1\n ymindist = dist_from_point + (ypos - dist_from_point)\n\n if xpos + dist_from_point < array.shape[2]:\n xmax = kernel.shape[2]\n xmaxdist = dist_from_point + 1\n else:\n xmax = dist_from_point - (xpos - array.shape[2])\n xmaxdist = dist_from_point - (xpos + dist_from_point - array.shape[2])\n\n if ypos + dist_from_point < array.shape[1]:\n ymax = kernel.shape[1]\n ymaxdist = dist_from_point + 1\n else:\n ymax = dist_from_point - (ypos - array.shape[1])\n ymaxdist = dist_from_point - (ypos + dist_from_point - array.shape[1])\n\n cutk = array[:, ypos - ymindist: ypos + ymaxdist, xpos - xmindist: xpos + xmaxdist]\n\n\n kernel[:, ymin: ymax, xmin:xmax] = cutk\n\n return kernel\n\n\n\ndef blob_define(array, thresh, min_area=None, max_area=None, minmax_area=None):\n array[array >= thresh] = 0 # T threshold maskout\n array[np.isnan(array)] = 0 # set ocean nans to 0\n\n labels, numL = label(array)\n\n u, inv = np.unique(labels, return_inverse=True)\n n = np.bincount(inv)\n\n goodinds = u[u!=0]\n\n if min_area != None:\n goodinds = u[(n>=min_area) & (u!=0)]\n badinds = u[nmax_area]\n\n if minmax_area != None:\n goodinds = u[(n <= minmax_area[1]) & (u != 0) & (n>=minmax_area[0])]\n badinds = u[(n > minmax_area[1]) | (n < minmax_area[0])]\n\n for b in badinds:\n pos = np.where(labels==b)\n labels[pos]=0\n\n return labels, goodinds\n\n\n\ndef linear_trend(x, eps=0.001, alpha=0.01):\n\n #pf = np.polyfit(np.arange(len(x)), x, 1)\n pf, slope, int, p, ind = mk.test(np.arange(len(x)),x.squeeze().values, eps=eps, alpha=alpha, Ha='upordown')\n\n # we need to return a dataarray or else xarray's groupby won't be happy\n\n if ind == 1:\n issig = slope\n else:\n issig = np.nan\n\n return issig\n\n\n\n\n\n\n\n\n","sub_path":"utils/u_arrays.py","file_name":"u_arrays.py","file_ext":"py","file_size_in_byte":8689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"493758866","text":"from datetime import date\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.core.paginator import Paginator\n\nfrom bookclub.forms import DiscussionForm\nfrom bookclub.models import Book\n\n\ndef all_books(request):\n upcoming_books = Book.objects.filter(read_by__gte=date.today()).order_by('read_by')[:3]\n previous_books = Book.objects.filter(read_by__lt=date.today()).order_by('-read_by')[:3]\n\n return render(request, 'bookclub/all_books.html', {'upcoming_books': upcoming_books, 'previous_books': previous_books})\n\n\ndef book_detail(request, pk):\n book = get_object_or_404(Book, pk=pk)\n discussion_form = DiscussionForm()\n discussion_open = False\n\n if book.read_by <= date.today():\n discussion_open = True\n\n if request.method == \"POST\":\n form = DiscussionForm(request.POST, request.FILES)\n if form.is_valid():\n opinion = form.save(commit=False)\n opinion.author = request.user\n opinion.book = book\n opinion.save()\n\n return redirect('book_detail', pk=book.pk)\n\n return render(request, 'bookclub/book_detail.html', {'book': book, 'discussion_open': discussion_open, 'discussion_form': discussion_form})\n\n\ndef book_list(request):\n books = Book.objects.all()\n paginator = Paginator(books, 5)\n\n page_number = request.GET.get('page')\n page_books = paginator.get_page(page_number)\n\n return render(request, 'bookclub/book_list.html', {'page_books': page_books})\n\n","sub_path":"bookclub/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"330863735","text":"from tkinter import *\r\nimport os\r\n\r\n#Window Declaration\r\nafter_register=Tk()\r\nafter_register.geometry(\"1360x1900\")\r\nafter_register.title(\"Welcome\")\r\n\r\n#setting of the background wallpaper\r\n\r\nbckground_label = Label(height=1000, width=1900)\r\nimage1 = PhotoImage(file=\"bank_home.gif\")\r\nbckground_label.config(image = image1)\r\nbckground_label.image = image1\r\nbckground_label.place(x=0, y=0)\r\nbckground_label.pack()\r\n\r\n#login button for further continuation\r\ndef login_button():\r\n after_register.destroy()\r\n os.system('python login.py')\r\n\r\n#Displaying some text regarding first login\r\nid = Label(text=\"Hello Dear User, Thank You for Trusting us. \\n You are being rewarded 5000 rs for \\n joining with us. Press the Continue button \\n for proceeding. \\n Happy Banking! :)\", font=('Verdana', 20))\r\nid.place(x = 400, y = 100)\r\n\r\n#setting up the login button\r\n\r\nlogin = Button(text = \"Continue\", font=('Verdana', 30), fg = \"BLUE\", command=login_button)\r\nlogin.place(x = 600, y = 600)\r\n\r\n\r\nafter_register.mainloop()\r\n","sub_path":"after_register.py","file_name":"after_register.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"16611068","text":"from ImageRecord import ImageRecord\nimport os\n\n\nclass SiteRecord:\n \"\"\"\n The SiteRecord class represents a record of a site. This record contains details relevant to the site observations.\n\n A 'SiteRecord' is formed from a root site source directory. Within this directory we expect to find image\n observations taken at the given site.\n \"\"\"\n\n def __init__(self, the_dir):\n \"\"\"\n :param the_dir: directory name for site, includes trailing /\n \"\"\"\n self.directory = the_dir\n self.include = True\n self.images = []\n self._load_from_folder()\n self.alias = self._get_site_alias()\n\n def _get_site_alias(self):\n \"\"\"\n A site can have an 'alias' or nickname / shorthand name for the site which may be useful to the reader\n :return: string representing the alias to be used for the site\n \"\"\"\n return os.path.basename(os.path.split(self.directory)[0])\n\n def _load_from_folder(self):\n \"\"\"\n This is how we keep a SiteRecord in sync with the folder.\n First we add any new directories found on the filesystem into the data structure.\n\n Then we check to see if there are any entires in the data structure whose representative files have since been\n removed from the filesystem\n :return: nothing\n \"\"\"\n self._add_new_from_folder()\n self._remove_missing_from_folder()\n\n def _add_new_from_folder(self):\n \"\"\"\n Add new images found in directory which are non existent in the site\n :return: none\n \"\"\"\n count = len(self.images) + 1\n\n for file in [os.path.join(self.directory, x) for x in os.listdir(self.directory)]:\n if file not in [x.image_path for x in self.images]:\n self.images.append(ImageRecord(count, file))\n count += 1\n\n self.images.sort()\n\n def _remove_missing_from_folder(self):\n \"\"\"\n Remove images from the structure which are no longer found in the directory\n :return: none\n \"\"\"\n for image in self.images:\n if not os.path.exists(image.image_path):\n self.images.remove(image)\n\n def __str__(self):\n \"\"\"\n Get a string representation of this site\n :return:\n \"\"\"\n base = \"site: \" + self.alias + \"\\n\"\n\n for x in self.images:\n base += str(x)\n\n return base\n","sub_path":"src/SiteRecord.py","file_name":"SiteRecord.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"214304888","text":"import sys, os, statistics, math\n\ndef main():\n proposition = True\n for m in range(4,1000):\n low = max(4,m-1)\n print(m,low)\n for n in range (low,1000):\n x = generateX(m,n)\n y = generateY(m,n)\n proposition = checkProposition(x,y)\n if not proposition :\n print(\"FAIL: m \",m,\" n \",n)\n return\n\ndef meanAbsDiff(r):\n n=len(r)\n mad=0\n for i in range(n):\n for j in range(n):\n mad = mad + abs(r[i] - r[j])\n return mad/(n^2)\n\ndef avgAbsDev(r):\n n = len(r)\n avg = statistics.mean(r)\n ad=0\n for i in range(n):\n ad = ad + abs(r[i] - avg)\n return ad/n\n\ndef stDev(r):\n n = len(r)\n avg = statistics.mean(r)\n sd = 0\n for i in range(n):\n sd = sd + math.pow(r[i] - avg,2)\n return math.sqrt(sd/n)\n\ndef gini(r):\n n = len(r)\n summ = sum(r)\n g = 0\n for i in range(n):\n for j in range(n):\n g = g + abs(r[i] - r[j])\n return g/(2*n*summ)\n\ndef checkProposition(x,y):\n flag = True\n if (meanAbsDiff(x)>meanAbsDiff(y)):\n print(x,y,\"meanAbsDiff\")\n flag = False\n if (avgAbsDev(x)>avgAbsDev(y)):\n print(x,y,\"avgAbsDev\")\n flag = False\n if (stDev(x)>stDev(y)):\n print(x,y,\"stDev\")\n flag = False\n if (gini(x)>gini(y)):\n print(x,y,\"gini\")\n flag = False\n return flag\n\ndef generateX(m,n):\n r=[]\n r.append(m-3)\n r.append(m-1)\n for i in range(n-2): \n r.append(m-2)\n return r\n\n\ndef generateY(m,n):\n r=[]\n for i in range(m-2): \n r.append(m-2-i)\n for i in range(n-m+2): \n r.append(0)\n return r\n\n# old utility methods \ndef getMofY(rY):\n return rY[0]+2\n\ndef mySumStDev(r):\n n = len(r)\n m = getMofY(r)\n avg = statistics.mean(r)\n sd = 0\n for i in range(1,m-2+1):\n sd = sd + math.pow(i,2)\n for i in range(1,m-2+1):\n sd = sd - 2*avg*i\n sd = sd + (m-2)*(math.pow(avg,2))\n sd = sd + (n-m+2)*(math.pow(avg,2))\n return sd\n\ndef stComput(r):\n n = len(r)\n m = getMofY(r)\n avg = statistics.mean(r)\n sd = (m-2)*(m-1)*(2*m-3)/6 - avg*(m-2)*(m-1)+ avg**2*(m-2)+(n-m+2)*avg**2\n sd = (m-2)*(m-1)*(2*m-3)/6 - (m-2)**2*(m-1)**2/(4*n)\n return sd\n\ndef myAvgAbsDevY(r,m):\n n=len(r)\n ad=0\n avg= 0\n for i in range(2,m-1+1):\n avg = avg + (m-i)\n avg = avg/n\n for i in range(2,m-1+1):\n ad = ad + abs((m-i)- avg)\n ad = ad + (n-m+2)*(avg)\n return ad\n\ndef avgAbsDevComputation(r):\n n = len(r)\n m = getMofY(r)\n ad = 0\n avg = statistics.mean(r)\n floor = math.floor(avg)\n for i in range(floor+1,m-2+1):\n ad = ad + (i-avg)\n for i in range(1,floor+1):\n ad = ad - (i-avg)\n ad = ad + (n-m+2)*(avg)\n return ad\n\ndef testMathAd(r):\n n = len(r)\n m = getMofY(r)\n avg = statistics.mean(r)\n floor = math.floor(avg)\n x = (-((floor-m+2)*(floor+m-1)/2)-(m-2-floor)*avg-((floor*(floor+1))/2) + floor*avg+(n-m+2)*avg)\n x = floor*(-(floor+m-1)/2+(m-2)/2+2*avg-(floor+1)/2)+2*avg*(n-m+2)\n return floor*(2*avg-floor-1)+ 2*avg*(n-m+2)\n\ndef myMeanAbsDiffY(r,m):\n n=len(r)\n sum = 0\n for j in range(m-3+1):\n for i in range(m-3-j+1):\n sum = sum + i\n sum = sum + (m-2-j)*(n-m+2)\n for i in range(j+1):\n sum = sum + i\n temp=0\n for j in range(m-2+1):\n temp = temp + j\n sum = sum + (temp * (n-m+2) )\n return sum/(n^2)\n\n\n\n\nif __name__ == \"__main__\":\n main()\n ","sub_path":"spreadMeasures.py","file_name":"spreadMeasures.py","file_ext":"py","file_size_in_byte":3517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"320782262","text":"def get_methods(a_class):\n return [method for method in a_class.__dict__.keys() if\n callable(getattr(a_class, method)) and not\n method.startswith('_') and method != 'init']\n\n\nclass Group(object):\n def __init__(self, sim, groups, agent_class=None):\n self.sim = sim\n self.num_managers = sim.processes\n self._processor_groups = sim._processor_groups\n self.groups = groups\n self.do = (self.execute_parallel\n if sim.processes > 1\n else self.execute_serial)\n\n self.agent_class = agent_class\n for method in dir(agent_class):\n if method[0] != '_':\n setattr(self, method,\n eval('lambda self=self, *argc, **kw: self.do(\"%s\", *argc, **kw)' %\n method))\n\n self.panel_serial = 0\n self.last_action = \"Begin_of_Simulation\"\n\n def __add__(self, g):\n return Group(self.sim, self.groups + g.groups, self.agent_class)\n\n def __radd__(self, g):\n if isinstance(g, Group):\n return self.__add__(g)\n else:\n return self\n\n def execute_serial(self, command, *args, **kwargs):\n self.last_action = command\n self.sim.messagess[-1].clear()\n out_messages = self._processor_groups[0].execute(\n self.groups, command, [], args, kwargs)\n self.sim.messagess = out_messages\n return out_messages[-1]\n\n def execute_parallel(self, command, *args, **kwargs):\n self.last_action = command\n self.sim.messagess[-1].clear()\n parameters = ((pg, self.groups, command, self.sim.messagess[pgid], args, kwargs)\n for pgid, pg in enumerate(\n self._processor_groups))\n out = self.sim.pool.map(execute_wrapper, parameters, chunksize=1)\n for pgid in range(self.num_managers):\n self.sim.messagess[pgid].clear()\n for out_messages in out:\n for pgid, messages in enumerate(out_messages):\n self.sim.messagess[pgid].extend(messages)\n return self.sim.messagess[-1]\n\n def panel_log(self, variables=[], possessions=[], func={}, len=[]):\n \"\"\" panel_log(.) writes a panel of variables and possessions\n of a group of agents into the database, so that it is displayed\n in the gui.\n\n Args:\n possessions (list, optional):\n a list of all possessions you want to track as 'strings'\n variables (list, optional):\n a list of all variables you want to track as 'strings'\n func (dict, optional):\n accepts lambda functions that execute functions. e.G.\n :code:`func = lambda self: self.old_money - self.new_money`\n len (list, optional):\n records the length of the list or dictionary with that name.\n\n Example in start.py::\n\n for round in simulation.next_round():\n firms.produce_and_sell()\n firms.panel_log(possessions=['money', 'input'],\n variables=['production_target', 'gross_revenue'])\n households.buying()\n \"\"\"\n self.do('_panel_log', variables, possessions, func, len, self.last_action)\n\n def agg_log(self, variables=[], possessions=[], func={}, len=[]):\n \"\"\" agg_log(.) writes a aggregate data of variables and possessions\n of a group of agents into the database, so that it is displayed\n in the gui.\n\n Args:\n possessions (list, optional):\n a list of all possessions you want to track as 'strings'\n variables (list, optional):\n a list of all variables you want to track as 'strings'\n func (dict, optional):\n accepts lambda functions that execute functions. e.G.\n :code:`func = lambda self: self.old_money - self.new_money`\n len (list, optional):\n records the length of the list or dictionary with that name.\n\n Example in start.py::\n\n for round in simulation.next_round():\n firms.produce_and_sell()\n firms.agg_log(possessions=['money', 'input'],\n variables=['production_target', 'gross_revenue'])\n households.buying()\n \"\"\"\n self.do('_agg_log', variables, possessions, func, len)\n\n\ndef execute_wrapper(inp):\n # processor_group.execute(self.groups, command, messages[pgid])\n return inp[0].execute(inp[1], inp[2], inp[3], inp[4], inp[5])\n","sub_path":"abce/group.py","file_name":"group.py","file_ext":"py","file_size_in_byte":4592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"257110798","text":"\"\"\"2행 N열\n스티커를 떼면 좌우상하 스티커 모두 사용할 수 없게 됨\n점수의 합이 최대가 되도록 스티커를 떼려 함\n\n케이스(1) 1칸 이전 대각선에서 올 경우\n케이스(2) 2칸 이전 대각선에서 올 경우\n\n위 두 케이스 중 존재하는 케이스에 대한 최댓값과 현재 상태값의 합\n\"\"\"\nimport sys\ninput = sys.stdin.readline\n\nt = int(input())\nanswers = []\n\nfor _ in range(t):\n n_cols = int(input())\n arr = [list(map(int, input().strip().split())) for _ in range(2)]\n scores = [[0] * n_cols for _ in range(2)]\n\n for c in range(n_cols):\n for r, diag in zip([0, 1], [1, 0]):\n if c == 0:\n scores[r][c] += arr[r][c]\n\n elif c == 1:\n scores[r][c] += arr[r][c] + scores[diag][c-1]\n \n else:\n scores[r][c] += arr[r][c] + max(scores[diag][c-1], scores[diag][c-2])\n\n answers.append(max(scores[0][n_cols-1], scores[1][n_cols-1]))\n\nfor a in answers:\n print(a)","sub_path":"dp/9465.py","file_name":"9465.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"505907443","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom sympy import *\nimport matplotlib.pyplot as pt\n\n\n# In[2]:\n\n\n#Laplace transform\n#It transforms a function of a real variable t (often time) to a function of a complex variable s (complex frequency).\n#the Laplace transform is a useful tool for dealing with linear systems described by ODEs. \n#the Laplace transform is defined for a larger class of functions than the related Fourier transform.\n\n\n# In[3]:\n\n\n#Basic and standard laplase can be found by integrating the function with exp(-st) from 0 to infinity with repect to t\nt,s,n,a,b=symbols('t s n a b')\nf=[1,t,t**n,exp(a*t),exp(-a*t),sin(a*t),cos(a*t),sinh(a*t),cosh(a*t)]\n\n\n# In[4]:\n\n\nintegrate(f[0]*exp(-s*t),(t,0,oo))\n\n\n# In[5]:\n\n\nintegrate(f[1]*exp(-s*t),(t,0,oo))\n\n\n# In[6]:\n\n\nintegrate(f[2]*exp(-s*t),(t,0,oo))\n\n\n# In[7]:\n\n\nintegrate(f[3]*exp(-s*t),(t,0,oo))\n\n\n# In[8]:\n\n\nintegrate(f[4]*exp(-s*t),(t,0,oo))\n\n\n# In[9]:\n\n\nintegrate(f[5]*exp(-s*t),(t,0,oo))\n\n\n# In[10]:\n\n\nintegrate(f[6]*exp(-s*t),(t,0,oo))\n\n\n# In[11]:\n\n\nintegrate(f[7]*exp(-s*t),(t,0,oo))\n\n\n# In[12]:\n\n\nintegrate(f[8]*exp(-s*t),(t,0,oo))\n\n\n# In[13]:\n\n\n#first shifting property is to simply equations with exp(at)\n#convert s----->(s-a)\n\n\n# In[14]:\n\n\nf=exp(a*t)*sin(b*t)\n#find laplace of sin(b*t)\nf1=sin(b*t)\nlaplace=integrate(f1*exp(-s*t),(t,0,oo))\n#s---->s-a\nlaplace.subs(s,s-a)\n\n\n# In[15]:\n\n\nimport numpy as np\nfrom sympy.integrals import laplace_transform\nfrom sympy.abc import t,s,a,b\n\n\n# In[16]:\n\n\nf=cos(t)\nlistu=[]\nfor i in np.arange(0,8*np.pi,0.2):\n listu.append(f.subs(t,i))\npt.plot(np.arange(0,8*np.pi,0.2),listu,label=\"f(t) graph\")\ng=laplace_transform(f,t,s)\ng=g[0]\nlisty=[]#g(s) values\nfor i in np.arange(0,8*np.pi,0.2):\n listy.append(g.subs(s,i))\npt.plot(np.arange(0,8*np.pi,0.2),listy,label=\"f(s) graph\")\npt.legend()\n\n\n# In[17]:\n\n\nf=exp(a*t)*sin(b*t)\n#can solve this easily by first shifting property\n#take f1\nf1=sin(b*t)\ng1=laplace_transform(f1,t,s)\ng1=g1[0]\ng2=g1.subs(s,s-a)\ng=laplace_transform(f,t,s)\ng=g[0]\nif g.equals(g2):\n print('true')\n print(\"laplace transform of exp(a*t)*sin(b*t) is\",g)\n print(\"laplace transform of sin(b*t) and then substituting s with s-a is\",g2)\n\nelse:\n print('false')\n\n\n# In[38]:\n\n\nlisty=[]\nlistu=[]\nlisti=[]\n#let a=1 b=2\ng=g.subs([(a,1),(b,2)])\ng1=g1.subs([(a,1),(b,2)])\ng2=g2.subs([(a,1),(b,2)])\n\nfor i in np.arange(0,8*np.pi,0.2):\n listy.append(g1.subs(s,i))\nfor i in np.arange(0,8*np.pi,0.2):\n listu.append(g2.subs(s,i))\nfor i in np.arange(0,8*np.pi,0.2):\n listi.append(g.subs(s,i))\nfig, (ax1, ax2,ax3) = pt.subplots(1,3)\nfig.suptitle('First shit visuallisation')\nax1.plot(np.arange(0,8*np.pi,0.2),listy)\nax2.plot(np.arange(0,8*np.pi,0.2),listu)\nax3.plot(np.arange(0,8*np.pi,0.2),listi)\nax1.set_title('Laplace before shifting')\nax2.set_title('Laplace after shifting')\nax3.set_title('Laplace done normally')\nprint('See after shifting both graphs become same')\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"python_project.py","file_name":"python_project.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"195453863","text":"# Get the input email from user\nemail = input('What is your email address?: ').strip()\n\n# Slice out the username\nusername = email[:email.index('@')]\n\n# Slice out the domain\ndomain = email[email.index('@') + 1:]\n\n# Format the out put\noutput = 'Your username is {} and your domain is {}'\noutput = output.format(username, domain)\n\n# Print the message\nprint(output)\n","sub_path":"slicer.py","file_name":"slicer.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"298176971","text":"'''\nconfig for factory test rig\n'''\n\nfrom math import *\n\nGDB=\"arm-none-eabi-gdb\"\nFMU_BMAGIC_SERIAL=\"B5DEADF0\"\nIO_BMAGIC_SERIAL=\"B5DFADF1\"\n\nFMU_JTAG=\"/dev/serial/by-id/usb-Black_Sphere_Technologies_Black_Magic_Probe_%s-if00\" % FMU_BMAGIC_SERIAL\nIO_JTAG=\"/dev/serial/by-id/usb-Black_Sphere_Technologies_Black_Magic_Probe_%s-if00\" % IO_BMAGIC_SERIAL\n\nFMU_DEBUG=\"/dev/serial/by-id/usb-Black_Sphere_Technologies_Black_Magic_Probe_%s-if02\" % FMU_BMAGIC_SERIAL\n\nFW_IO=\"FW/px4io.elf\"\nBL_IO=\"FW/px4io_bl.elf\"\n\nFW_FMU=\"FW/firmware-test.elf\"\nBL_FMU=\"FW/px4fmuv3_bl.elf\"\n\nCPUID_IO=\"STM32, Medium density\"\nCPUID_FMU=\"STM32F4xx\"\n\nUSB_DEV_TEST=\"/dev/serial/by-id/usb-3D_Robotics_PX4_FMU_v2.x_0-if00\"\nUSB_DEV_REFERENCE=\"/dev/serial/by-id/usb-3D_Robotics_PH_REFERENCE_0-if00\"\n\nFTDI_POWER=\"/dev/serial/by-id/usb-FTDI_TTL232R_FTFX6YMW-if00-port0\"\n\nNUM_ACCELS=3\nNUM_GYROS=3\n\nREMOTE_MONITOR=\"10.26.1.200:16550\"\nREMOTE_MONITOR2=\"10.26.1.200:16551\"\n\nROTATION_LEVEL_TOLERANCE = 3.0\nROTATION_TOLERANCE = 5.0\n\nGYRO_TOLERANCE = radians(0.2)\n\nPRESSURE_TOLERANCE = 10\nTEMPERATURE_TOLERANCE = 20\nVOLTAGE_TOLERANCE = 0.4\n\n# what channels control pitch and yaw in body frame\n\n# yaw in body frame\nYAW_CHANNEL = 5\n# +100 change == -20 degrees\nYAW_SCALE = -22.0 / 100\n\n\n\n\n# pitch in earth frame\nPITCH_CHANNEL = 6\n# +100 change == -34 degrees\nPITCH_SCALE = -34.0/100\n\n# acceptable modes when the test board is idle\nIDLE_MODES = [\"RTL>\",\"CIRCLE>\",\"MANUAL>\",\"STABILIZE>\"]\n\n\nclass Rotation(object):\n def __init__(self, chan1, chan2, roll, pitch):\n self.chan1 = chan1\n self.chan2 = chan2\n self.roll = roll\n self.pitch = pitch\n\n# servo positions for different orientations of boards in the test jig\n# the columns are:\n# servo5 PWM\n# servo6 PWM\n# expected roll\n# expected pitch\nROTATIONS = {\n 'level' : Rotation(1272, 1687, 0, 0),\n 'right' : Rotation(855, 1420, 90, 0),\n 'left' : Rotation(1660, 1420, -90, 0),\n 'up' : Rotation(1260, 1420, None, 90),\n 'down' : Rotation(1274, 1950, None, -90),\n 'back' : Rotation(1255, 1180, 180, 0)\n }\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"15733326","text":"'''\n1 + 2 + ... + n-1 + n + n-1 + ... + 2 + 1\n이 합은 n**2임\n1. 따라서 주어진 길이에서 n**2만큼 빼주고\n2. 남은 값은 n보다 작거나 같은 수로 빼주면 될듯\n'''\n\nn = int(input()) # 반복 횟수\n\nfor _ in range(n):\n x, y = map(int, input().split()) # x좌표, y좌표\n lenth = y - x # 거리 계산\n test = 0 # 장치 작동 횟수\n n = 1 # 위 주석에서 언급한 n\n while n**2 <= lenth:\n n += 1\n n -= 1\n test += 2*n - 1 # n만큼 횟수 추가\n lenth -= n**2 # n**2만큼 뺀 나머지\n \n while True:\n if lenth == 0: # 다 끝난 경우\n print(test)\n break\n elif lenth >= n:\n lenth -= n\n test += 1\n else: # lenth < n\n lenth = 0\n test += 1","sub_path":"1_백준/1_단계별학습/08_기본_수학_1/10_1011.py","file_name":"10_1011.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"307874797","text":"from conans import ConanFile, python_requires, tools, CMake\nimport shutil\nimport os\n\npyreq = python_requires(\"pyreq/1.0.0@tdelame/stable\")\n\nclass openimageio(pyreq.CMakeConanFile):\n description = \"library for reading and writing images, and a bunch of related classes, utilities, and applications.\"\n license = \"BSD 3-Clause License\"\n url = \"https://sites.google.com/site/openimageio/\"\n\n name = \"OpenImageIO\"\n version = \"2.1.10.1\"\n\n settings = \"os\"\n\n def build_requirements(self):\n \"\"\"Define build-time requirements.\"\"\"\n self.requires(\"boost-headers/1.70.0@tdelame/stable\")\n super(openimageio, self).build_requirements()\n\n def requirements(self):\n \"\"\"Define runtime requirements.\"\"\"\n self.requires(\"OpenEXR/2.4.0@tdelame/stable\")\n self.requires(\"tiff/4.1.0@tdelame/stable\")\n self.requires(\"libjpeg/9c@tdelame/stable\")\n self.requires(\"libpng/1.6.37@tdelame/stable\")\n self.requires(\"zlib/1.2.11@tdelame/stable\")\n self.requires(\"boost-filesystem/1.70.0@tdelame/stable\")\n self.requires(\"boost-thread/1.70.0@tdelame/stable\")\n self.requires(\"TBB/2019-U6@tdelame/stable\")\n self.requires(\"bzip2/1.0.8@tdelame/stable\")\n self.requires(\"freetype/2.9.1@tdelame/stable\")\n\n def source(self):\n \"\"\"Retrieve source code.\"\"\"\n url = \"https://github.com/OpenImageIO/oiio/archive/Release-{}.tar.gz\".format(self.version)\n directory = \"oiio-Release-{}\".format(self.version)\n tools.get(url)\n os.rename(directory, self._source_subfolder)\n\n def cmake_definitions(self):\n \"\"\"Setup CMake definitions.\"\"\"\n boost_lib_paths = []\n boost_libs = []\n for component in [\"filesystem\", \"thread\"]:\n dep = self.deps_cpp_info[\"boost-{}\".format(component)]\n boost_lib_paths.extend(dep.lib_paths)\n boost_libs.extend(dep.libs)\n \n definition_dict = {\n \"OIIO_BUILD_TESTS\": False,\n \"OIIO_BUILD_TOOLS\": False,\n \"OIIO_THREAD_ALLOW_DCLP\": True,\n \n \"EMBEDPLUGINS\": True,\n \"INSTALL_DOCS\": False,\n \"BUILD_DOCS\": False,\n \"USE_STD_REGEX\": True,\n\n \"USE_PYTHON\": False,\n \"USE_HDF5\": False,\n \"USE_OpenColorIO\": False,\n \"USE_OpenCV\": False,\n \"USE_DCMTK\": False,\n \"USE_Field3D\": False,\n \"USE_Libheif\": False,\n \"USE_LibRaw\": False,\n \"USE_Webp\": False,\n \"USE_Nuke\": False,\n \"USE_R3DSDK\": False,\n \"USE_OpenGL\": False,\n \"USE_OpenVDB\": False,\n \"USE_PTex\": False,\n \"USE_Qt5\": False,\n \"USE_Libsquish\": False,\n \"USE_OpenJpeg\": False,\n \"USE_FFmpeg\": False,\n \"USE_GIF\": False,\n \"USE_JPEGTurbo\": False,\n\n \"BOOST_CUSTOM\": True,\n \"Boost_VERSION\": \"1.70.0\",\n \"Boost_INCLUDE_DIRS\": self.deps_cpp_info[\"boost-headers\"].include_paths[0],\n \"Boost_LIBRARY_DIRS\": \";\".join([\"{}\".format(path) for path in boost_lib_paths]),\n \"Boost_LIBRARIES\": \";\".join([\"{}\".format(lib) for lib in boost_libs]),\n\n \"ZLIB_ROOT\": self.deps_cpp_info[\"zlib\"].rootpath,\n \n \"PNG_ROOT\": self.deps_cpp_info[\"libpng\"].rootpath,\n\n \"TIFF_ROOT\": self.deps_cpp_info[\"tiff\"].rootpath,\n\n \"Freetype_ROOT\": self.deps_cpp_info[\"freetype\"].rootpath,\n\n \"BZip2_ROOT\": self.deps_cpp_info[\"bzip2\"].rootpath,\n\n\n \"OpenEXR_ROOT\": self.deps_cpp_info[\"OpenEXR\"].rootpath,\n\n \"JPEG_ROOT\": self.deps_cpp_info[\"libjpeg\"].rootpath,\n\n \"CMAKE_CXX_FLAGS\": \"-fPIC -Wno-error=deprecated -m64 -O3\"\n }\n\n\n self.add_default_definitions(definition_dict)\n return definition_dict\n\n def build(self):\n # this project expect is very picky about cmake invocation...\n build_dir = os.path.join(self._source_subfolder, \"build\")\n package_dir = os.path.abspath(self.package_folder)\n os.makedirs(build_dir)\n os.makedirs(package_dir)\n\n compile_command = 'cmake ../ -G\"Ninja\" -DCMAKE_INSTALL_PREFIX=\"{package_folder}\" {definitions}'.format(\n package_folder=package_dir,\n definitions=\" \".join(['-D{0}=\"{1}\"'.format(key, value) for key, value in self.cmake_definitions().items()]))\n\n install_command = 'ninja install'\n\n with tools.chdir(build_dir):\n self.run(compile_command)\n self.run(install_command)\n\n def package(self):\n os.rename(\n os.path.join(self.package_folder, \"lib64\"),\n os.path.join(self.package_folder, \"lib\"))\n self.package_licenses()\n self.clean_package() \n \n def package_info(self):\n \"\"\"Edit package info.\"\"\"\n super(openimageio, self).package_info()\n self.cpp_info.libs = [\"OpenImageIO\", \"OpenImageIO_Util\"]\n","sub_path":"OpenImageIO/OpenImageIO-2.1.10.1.py","file_name":"OpenImageIO-2.1.10.1.py","file_ext":"py","file_size_in_byte":4971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"187636885","text":"from webob import Request\r\nrequests=[]\r\nwikipedia = Request.blank(\"wikipedia.org\")\r\nwikipedia.host = 'wikipedia.org'\r\nwikipedia.environ[\"SERVER_NAME\"] = 'wikipedia.org'\r\nwikipedia.accept = \"text/html\"\r\nwikipedia.user_agent = \"User-Agent: Mozilla/5.0 (X11; U; Linux i686; ru; rv:1.9b5) Gecko/2008050509 Firefox/3.0b5\"\r\nrequests.append(wikipedia)\r\n\r\nhttpbin1 = Request.blank(\"ip\")\r\nhttpbin1.host = 'httpbin.org'\r\nhttpbin1.environ[\"SERVER_NAME\"] = 'httpbin.org'\r\nhttpbin1.accept = '*/*'\r\nrequests.append(httpbin1)\r\n\r\nhttpbin2 = Request.blank(\"get?foo=bar&1=2&2/0&error=True\")\r\nhttpbin2.host = 'httpbin.org'\r\nhttpbin2.environ[\"SERVER_NAME\"] = 'httpbin.org'\r\nhttpbin2.accept = '*/*'\r\nrequests.append(httpbin2)\r\n\r\nhttpbin3 = Request.blank(\"post\")\r\nhttpbin3.host = 'httpbin.org'\r\nhttpbin3.environ[\"SERVER_NAME\"] = 'httpbin.org'\r\nhttpbin3.method = 'POST'\r\ncontent = \"foo=bar&1=2&2%2F0=&error=True\".encode('ascii')\r\nhttpbin3.content_type = \"application/x-www-form-urlencoded\"\r\nhttpbin3.body = content\r\nhttpbin3.content_length = len(content)\r\nhttpbin3.headers['Connection'] = 'close'\r\nrequests.append(httpbin3)\r\n\r\nhttpbin4 = Request.blank('cookies/set?country=Ru')\r\nhttpbin4.host = 'httpbin.org'\r\nhttpbin4.environ[\"SERVER_NAME\"] = 'httpbin.org'\r\nhttpbin4.accept = '*/*'\r\nhttpbin4.headers['Connection'] = 'close'\r\nrequests.append(httpbin4)\r\n\r\nhttpbin5 = Request.blank(\"cookies\")\r\nhttpbin5.host = 'httpbin.org'\r\nhttpbin5.environ[\"SERVER_NAME\"] = 'httpbin.org'\r\nhttpbin5.accept = '*/*'\r\nhttpbin5.headers['Connection'] = 'close'\r\nrequests.append(httpbin5)\r\n\r\nhttpbin6 = Request.blank('redirect/4')\r\nhttpbin6.host = 'httpbin.org'\r\nhttpbin6.environ[\"SERVER_NAME\"] = 'httpbin.org'\r\nhttpbin6.accept = '*/*'\r\nhttpbin6.headers['Connection'] = 'close'\r\nrequests.append(httpbin6)\r\n\r\nhttpbin7 = Request.blank(\"post\")\r\nhttpbin7.host = 'httpbin.org'\r\nhttpbin7.environ[\"SERVER_NAME\"] = 'httpbin.org'\r\nhttpbin7.method = 'POST'\r\ncontent = \"firstname=Nikita&lastname=Ragozin&group=fo340001&message=empty_message\".encode('ascii')\r\nhttpbin7.content_length = len(content)\r\nhttpbin7.content_type = \"application/x-www-form-urlencoded\"\r\nhttpbin7.body = content\r\nhttpbin7.headers['Connection'] = 'close'\r\nrequests.append(httpbin7)\r\n\r\nfor request in requests:\r\n\tresponce = request.get_response()\r\n\tresponce.content_type = 'text/plain'\r\n\tresponce.charset = 'utf-8'\r\n\tprint(responce)\r\n\tprint(\"\\n\\n------------\\n\\n\")\r\n\r\n","sub_path":"requestWebob.py","file_name":"requestWebob.py","file_ext":"py","file_size_in_byte":2380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"545829863","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\nfrom utils import create_ui,genrate_new_images_on_existing_model, plot_loss, save_checkpoint, load_checkpoint,initialize_weights,create_tensor_board_dirs\nfrom model import Discriminator, Generator\nimport my_config\nfrom datetime import datetime\n\n## ----------settings for models are in config.py file -----------------------\n##-----------change this parmater from \"WGAN\" to \"DC_GAN\" to alternate between models. --------------------\n\n\nconfig=create_ui (my_config)\nMODEL=config.MODEL\nif MODEL['name']==\"Wgan-GP\":\n Model=config.WGAN\nelse:\n Model=config.DC_GAN\n\ndevice_name = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\ndevice = torch.device(device_name) \nModel['device']=device\nprint( \"the current device is : \" +device_name)\n\n\n\n\n############# define model and do pre procssing #####################\ntransforms = transforms.Compose(\n [\n transforms.Resize(Model['image_size']),\n transforms.ToTensor(),\n transforms.Normalize(\n (0.5,),(0.5,))\n ]\n)\ndataset = datasets.FashionMNIST(root=\"dataset/\",train=True, transform=transforms, download=True)\nloader = DataLoader(\n dataset,\n batch_size=Model['batch_size'],\n shuffle=True,\n)\n\nload_flag= Model['General']['load_existing_mode']\nif load_flag:\n gen,disc, epoch,trained= load_checkpoint( Model['General']['load_dir'],Model['name'])\n if not trained:\n my_config.NUM_EPOCHS=epoch\n else: \n real=loader.__iter__().next()[0]\n date=datetime.now().strftime(\"%m_%d_%H_%M\")\n for i in range(Model['General'] ['number_of_real_images']):\n torchvision.utils.save_image(real[i],f\"real_image_from_model_{Model['name']}_At_{date}_{i}.png\")\n genrate_new_images_on_existing_model(Model,gen) \n exit(0) \n# initialize gen and disc, note: discriminator should be called critic (since it no longer outputs between [0, 1])\n# for connivance of alternate between models is name remain disc\nif not load_flag:\n gen = Generator(Model).to(device)\n disc = Discriminator(Model).to(device)\ninitialize_weights(gen)\ninitialize_weights(disc)\n\n# initializate optimizer\nopt_gen = optim.Adam(gen.parameters(), lr=Model['lr_gen'], betas=(Model['beta1'], Model['beta2']))\nopt_disc = optim.Adam(disc.parameters(), lr=Model['lr_disc'], betas=(Model['beta1'], Model['beta2']))\n\n\n############# end define and pre procssing ##############################\n\n# for tensorboard plotting- demonstre improvement by loss graphs and image genration process\nwriter_real,writer_fake,writer_gen_loss,writer_disc_loss=create_tensor_board_dirs(Model)\n\nNUM_EPOCHS= my_config.NUM_EPOCHS\nfixed_noise = torch.randn(Model['batch_size'], Model['z_dim'], 1, 1).to(device)\nnum_of_batches=len(loader)//Model['batch_size']\nstep = 0\nD_loss=[]\nG_loss=[]\nfor epoch in range(NUM_EPOCHS):\n #apply train mode to models \n gen.train()\n disc.train()\n for batch_idx, (real,_) in enumerate(loader):\n if len(real) < Model['batch_size']:# not take in consider the last partial batch \n break \n real = real.to(device)\n torch.autograd.set_detect_anomaly(True)\n for _ in range(Model['disc_iter']):\n noise = torch.randn(Model['batch_size'], Model['z_dim'], 1, 1).to(device) #BCHW \n fake = gen(noise)\n loss_disc=disc.calculate_disc_loss(disc,real,fake)\n disc.zero_grad()\n loss_disc.backward(retain_graph=True)\n opt_disc.step()\n loss_gen = gen.calculate_gen_loss(disc,fake)\n gen.zero_grad()\n loss_gen.backward(retain_graph=True)\n opt_gen.step()\n\n # Print losses occasionally and print to tensorboard\n if batch_idx % 100 == 0 and batch_idx > 0:\n print(\n f\"Epoch [{epoch}/{NUM_EPOCHS}] Batch {batch_idx}/{len(loader)} \\\n Loss D: {loss_disc:.4f}, loss G: {loss_gen:.4f}\"\n )\n with torch.no_grad():\n gen.eval()\n disc.eval()\n fake = gen(fixed_noise)\n # take out (up to) 32 examples\n img_grid_real = torchvision.utils.make_grid(real[:32], normalize=True)# normalize is for return to range of [0,1 ]\n img_grid_fake = torchvision.utils.make_grid(fake[:32], normalize=True)\n D_loss.append(loss_disc.item())\n G_loss.append(loss_gen.item())\n writer_real.add_image(\"Real\", img_grid_real, global_step=step)\n writer_fake.add_image(\"Fake\", img_grid_fake, global_step=step)\n writer_gen_loss.add_scalar('Discriminator Loss', D_loss[-1], global_step=step)\n writer_disc_loss.add_scalar('Generator Loss ',G_loss[-1], global_step=step)\n\n # writer_disc_loss.add_graph(gen,fixed_noise)\n # writer_disc_loss.add_graph(disc,real)\n writer_real.flush()\n writer_fake.flush()\n writer_gen_loss.flush()\n writer_disc_loss.flush()\n\n\n step += 1\nwriter_real.close()\nwriter_fake.close()\nwriter_gen_loss.close()\nwriter_disc_loss.close()\nprint(\"Training is finish!... save the train results and plot loss :)\")\nsave_checkpoint(gen,disc,None,True)\nplot_loss(G_loss,D_loss,gen.model)\n\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"426947913","text":"# Licensed under the Prefect Community License, available at\n# https://www.prefect.io/legal/prefect-community-license\n\n\nimport asyncio\n\nimport pendulum\nimport pytest\n\nfrom prefect_server import api, config, utilities\nfrom prefect_server.database import models\nfrom prefect_server.utilities.exceptions import Unauthorized\n\n\nasync def test_create_logs(flow_run_id):\n\n where_clause = {\n \"flow_run_id\": {\"_eq\": flow_run_id},\n }\n logs_count = await models.Log.where(where_clause).count()\n\n dt = pendulum.now()\n await api.logs.create_logs([dict(flow_run_id=flow_run_id)])\n\n assert await models.Log.where(where_clause).count() == logs_count + 1\n log = await models.Log.where(where_clause).first(\n {\"timestamp\", \"level\", \"task_run_id\"}\n )\n\n assert log.timestamp > dt\n assert log.level == \"INFO\"\n assert log.task_run_id is None\n\n\nasync def test_create_logs_with_task_run_id(flow_run_id, task_run_id):\n\n where_clause = {\n \"flow_run_id\": {\"_eq\": flow_run_id},\n \"task_run_id\": {\"_eq\": task_run_id},\n }\n logs_count = await models.Log.where(where_clause).count()\n\n await api.logs.create_logs(\n [dict(flow_run_id=flow_run_id, task_run_id=task_run_id,)]\n )\n\n assert await models.Log.where(where_clause).count() == logs_count + 1\n log = await models.Log.where(where_clause).first({\"task_run_id\"})\n\n assert log.task_run_id == task_run_id\n\n\nasync def test_create_logs_with_info(flow_run_id):\n\n where_clause = {\n \"flow_run_id\": {\"_eq\": flow_run_id},\n }\n logs_count = await models.Log.where(where_clause).count()\n\n timestamp = pendulum.datetime(2018, 1, 1)\n info = {\"lineno\": 5}\n level = \"ERROR\"\n name = \"Test\"\n message = \"test message\"\n\n pendulum.now()\n await api.logs.create_logs(\n [\n dict(\n flow_run_id=flow_run_id,\n timestamp=timestamp,\n info=info,\n level=level,\n name=name,\n message=message,\n )\n ]\n )\n\n assert await models.Log.where(where_clause).count() == logs_count + 1\n log = await models.Log.where(where_clause).first(\n {\"timestamp\", \"level\", \"name\", \"message\", \"info\"}\n )\n\n assert log.timestamp == timestamp\n assert log.level == level\n assert log.info == info\n assert log.message == message\n assert log.name == name\n\n\nasync def test_create_logs_with_bad_flow_run_ids_still_inserts_good_logs(flow_run_id):\n where_clause = {\n \"flow_run_id\": {\"_eq\": flow_run_id},\n }\n logs_count = await models.Log.where(where_clause).count()\n\n dt = pendulum.now()\n await api.logs.create_logs(\n [\n dict(flow_run_id=flow_run_id),\n dict(flow_run_id=\"\"),\n dict(flow_run_id=flow_run_id, message=\"foo\"),\n dict(flow_run_id=None),\n ]\n )\n\n assert await models.Log.where(where_clause).count() == logs_count + 2\n","sub_path":"server/tests/api/test_logs.py","file_name":"test_logs.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"184222856","text":"import aiohttp\nimport asyncio\n\n\nasync def fetch_resp_text(session, url):\n '''return http request text'''\n async with session.get(url) as resp:\n assert resp.status == 200\n return await resp.text()\n\n\nasync def main():\n async with aiohttp.ClientSession() as session:\n html = await fetch_resp_text(session, 'http://python.org')\n print(html)\n\n\nasync def webget(url):\n async with aiohttp.ClientSession() as session:\n html = await fetch_resp_text(session, url)\n print(url)\n\n\nurl_list = ['http://python.org', 'http://www.baidu.com','http://www.qq.com']\nloop = asyncio.get_event_loop()\n# loop.run_until_complete(main())\ntasks = [webget(host) for host in url_list]\ntasks = [asyncio.ensure_future(webget(host)) for host in url_list]\nloop.run_until_complete(asyncio.wait(tasks))\n","sub_path":"Thread/ansy.py","file_name":"ansy.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"23331596","text":"#!/usr/bin/env python\n#####################################################################################################################################\n# imports ###########################################################################################################################\n#####################################################################################################################################\nimport os\nimport logging\nimport re\nimport json\nfrom contextlib import closing\nfrom requests import get \nfrom requests.exceptions import RequestException\nfrom bs4 import BeautifulSoup\n\n#####################################################################################################################################\n# Helper functions ##################################################################################################################\n#####################################################################################################################################\n\ndef simple_get(url):\n \"\"\"\n Attempts to get the content at 'url' by making a HTTP GET request. \n If the content-type of response is some kind of HTML/XML, return the \n text content, otherwise return None. \n \"\"\"\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n except RequestException as e:\n log_error('Error during request to {0} : {1}'.format(url,str(e)))\n return None\n\n\ndef is_good_response(resp):\n \"\"\"\n Returns True if the response seems to be HTML, False otherwise\n \"\"\"\n content_types = (\"html\",\"json\",\"csv\")\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and any(ct in content_type for ct in content_types))\n\ndef log_error(e):\n \"\"\"\n It is always a good idea to log errors\n This function just prints them, but you can \n make it do anything.\n \"\"\"\n print(e)\n\n#####################################################################################################################################\n#####################################################################################################################################\n#####################################################################################################################################\n\n\n# url we want to scrape\nurl = 'https://archives.library.illinois.edu/archon/?p=collections/findingaid&id=4719&q=correspondence&rootcontentid=83972#id83972'\n\n# Get the site content\nresponse = simple_get(url)\n\n# Parse the site content\nsoup = BeautifulSoup(response, 'html.parser')\n\n# Get the description list (e.g. Series 2: Amateurism)\ndescription_list = soup.find('div', {\"id\": \"famain\"} ).dl\ndata_tag = description_list.dt\ndata_tag_title = data_tag.text\n\n# All boxes for this series\ndata_boxes = description_list.dd.dl\n\n# A list of all the box titles\nbox_titles = data_boxes.findChildren(['dt'], recursive=False)\n\n# a list (of lists) of all boxes content \nbox_contents = data_boxes.find_all(['dd'], recursive=False)\n\nfile_name = \"data.csv\"\nif os.path.exists(file_name):\n os.remove(file_name)\n\nprint(\"Writing data file...\")\nf = open(file_name, \"w+\")\n# set the CSV seperation character\nf.write(\"sep=|\\n\")\n\n# loop over all the box titles and the box content\nfor box_title, box_content in zip(box_titles, box_contents):\n box_title_text = box_title.text # get the box title text\n box_items = box_content.dl.findChildren(['dt'], recursive = False) # a list of all the top level items\n for box_item in box_items:\n item_text = box_item.text\n line = \"{} | {} | {} \\n\".format(data_tag_title, box_title_text, item_text)\n f.write(line)\n\nf.close()\nprint(\"Done!\")","sub_path":"basic_scraper.py","file_name":"basic_scraper.py","file_ext":"py","file_size_in_byte":3851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"234033467","text":"#!/usr/bin/env python3\n\nimport sys\n\nimport creator\nimport messages\n\ndef main():\n\tdispatcher(sys.argv[1:])\n\ndef dispatcher(argv):\n try:\n command = argv[0]\n except IndexError:\n messages.helpMessage(\"No command specified\")\n sys.exit()\n if command in (\"-n\", \"new\"):\n creator.dispatcher(argv[1:])\n else:\n messages.helpMessage(\"The specified command is unknown\")\n sys.exit()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"latexhelper.py","file_name":"latexhelper.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"162754770","text":"import requests, sys\nfrom bs4 import BeautifulSoup\n\nnextLink = 'http://artedosdados.blogspot.com.br/2013/08/python-manipulando-strings-extraindo_6.html'\ntry:\n c = requests.get(nextLink)\n soup = BeautifulSoup(c.text, 'lxml')\n # imprime o título da página\n print(soup.title.string)\n print('-----------------------')\nexcept:\n print('Erro na abertura da página', sys.info()[0])\n# retorna o conteudo em negrito\ntags = soup.find_all('b')\nfor tag in tags:\n print (tag.string)","sub_path":"WebScraping /Capturando_elementos_em_negrito.py","file_name":"Capturando_elementos_em_negrito.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"445114143","text":"from optparse import make_option\nfrom django.core.management.base import BaseCommand, CommandError\nfrom avocado.models import DataCategory, DataConcept, DataField\nimport csv\n\n\ndef annotate_datafield(field_name, annotation):\n '''Presently working with a limited subset of redcap fields.\n Most fields won't be found.\n\n '''\n try:\n c = DataField.objects.filter(name__iexact = field_name)[0]\n c.description = annotation\n c.save()\n print('--> Annotated field {}'.format(field_name))\n except Exception as E:\n pass\n ## print('Error while annotating {}: {}'.format(field_name, E))\n\n\ndef load_annotation(filename):\n with open(filename, 'r') as csvfile:\n reader = csv.DictReader(csvfile, delimiter = '\\t')\n for row in reader:\n try:\n field_name = row['field_label']\n ep = float(row['EP'])\n ea =float(row['EA'])\n if (ep + ea ) > 0:\n prct_prov = round(100*(ep / (ep + ea)), 1)\n prct_miss = 100 - prct_prov\n prct_prov_str = str(prct_prov)\n prct_miss_str = str(prct_miss)\n else:\n prct_prov_str = 'NA'\n prct_miss_str = 'NA'\n\n annotation = 'Data completness: (Provided | Missing | % Provided | % Missing) = ({} | {} | {}% | {}%)'.format(row['EP'], row['EA'], prct_prov_str, prct_miss_str)\n annotate_datafield(field_name, annotation)\n except Exception as Ex:\n print('Error while building annotation for {}'.format(field_name))\n print(Ex)\n\n\nclass Command(BaseCommand):\n \"\"\"Annotate concept based on external file\n\n \"\"\"\n help = 'Annotate data concept'\n\n option_list = BaseCommand.option_list + (\n make_option(\n '--stats_file',\n dest='stats_file',\n help='File containing stats on fields.',\n ),\n )\n\n def handle(self, *args, **options):\n print('Start...')\n filename = options['stats_file']\n load_annotation(filename)\n print('Done.')\n","sub_path":"ibemc/management/commands/load_annotation.py","file_name":"load_annotation.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"7750028","text":"'''Old LAS Parser'''\n\nimport re\nimport os\nfrom collections import deque\n\n\ndef getSection(match):\n '''get section name from first Upper character'''\n return {\n 'A': 'ascii',\n 'C': 'curve',\n 'V': 'version',\n 'W': 'well',\n 'P': 'parameter',\n 'O': 'other'\n }[match]\n\n\n__ParameterRule__ = re.compile(r'([^\\.]*)\\.([^\\s]*)\\s*([^:]*):([^\\n]*)')\n\n\ndef parseLAS(lines):\n '''Pass in raw las file lines'''\n sep = None\n version = None\n wrap = None\n strt = None\n stop = None\n step = None\n null = None\n curves = []\n currentSection = None\n version = None\n for i, line in enumerate(lines):\n\n # Check for Section Delimiter Character ~\n if line.strip().startswith('~'):\n try:\n currentSection = getSection(line.strip()[1:2].upper())\n except:\n raise LASParseError(\n \"Unknown Section: {} at Line#: {}\".format(line.strip(), i))\n if line.strip().startswith('#') or currentSection == 'other':\n yield('comment', line)\n elif currentSection != 'ascii':\n match = parameterRule.match(line)\n if match:\n # Split common line format into pieces and clean\n parameter, unit, value, description = map(\n str.strip, match.groups())\n if version is not None and version < 2:\n value, description = description, value\n if currentSection == 'version':\n if parameter.upper() == 'WRAP':\n wrap = value\n elif parameter.upper() == 'VERS':\n # Try to float value so we can compare it numerically\n try:\n version = float(value)\n except:\n version = value\n elif parameter.upper() == 'SEP':\n sep = value\n elif currentSection == 'well':\n if parameter.upper() == 'STRT':\n strt = value\n elif parameter.upper() == 'STOP':\n stop = value\n elif parameter.upper() == 'STEP':\n step = value\n elif parameter.upper() == 'NULL':\n null = value\n elif currentSection == 'curve':\n # build list so we can use these later\n curves.append(parameter.strip())\n yield(currentSection, (parameter, unit, value, description))\n else:\n # handle ascii block\n firstLine = True\n for i, line in enumerate(lines, i):\n if sep is None:\n values = line.split()\n else:\n values = line.split(sep)\n if len(values) != len(curves):\n raise LASParseError(\"Mismatch Length of Curves: {} and Values: {} for Line#: {}\".format(\n values[0], curves[0], line))\n else:\n if firstLine:\n firstLine = False\n if float(strt) != float(values[0]):\n if float(stop) == float(values[0]):\n raise LASParseError(\"Stop Value: {} matches First Value: {} in Reference: {} for Line#: {}\".format(\n values[0], curves[0], line))\n else:\n raise LASParseError(\"Start Value: {} does not match First Value: {} in Reference: {} for Line#: {}\".format(\n strt, values[0], curves[0], line))\n yield (currentSection, values)\n\n if float(stop) != float(values[0]):\n raise LASParseError(\"Stop Value: {} does not match Last Value: {} in Reference: {} for Line#: {}\".format(\n stop, values[0], curves[0], line))\n\n\nclass LASParseError(Exception):\n pass\n\n\nif __name__ == '__main__':\n folderPath = os.path.realpath(os.path.join(\"Development\", \"LAS\",\n \"LAS Files\"))\n\n for root, dirs, files in os.walk(folderPath, topdown=False):\n for filename in files:\n if filename.lower().endswith('.las'):\n filepath = os.path.join(root, filename)\n with open(filepath, 'r') as las_file:\n try:\n deque(parseLAS(las_file))\n # print('\\n'.join(map(str, parseLAS(lashan))))\n except LASParseError as e:\n print(e, filepath)\n # exc_type, exc_obj, tb = sys.exc_info()\n # f = tb.tb_frame\n # lineno = tb.tb_lineno\n # python_filename = f.f_code.co_filename\n # print 'EXCEPTION {}, FILE {},\n # {}'.format(type(e).__name__,filepath, e)\n","sub_path":"Development/PYTHON/LASParser.py","file_name":"LASParser.py","file_ext":"py","file_size_in_byte":5090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"311827848","text":"#!/usr/bin/env python\n\n\n# Given a singly linked list, return a random node's value from the linked list. Each node must have the same probability of being chosen.\n#\n# Follow up:\n# What if the linked list is extremely large and its length is unknown to you? Could you solve this efficiently without using extra space?\n#\n# Example:\n#\n# // Init a singly linked list [1,2,3].\n# ListNode head = new ListNode(1);\n# head.next = new ListNode(2);\n# head.next.next = new ListNode(3);\n# Solution solution = new Solution(head);\n#\n# // getRandom() should return either 1, 2, or 3 randomly. Each element should have equal probability of returning.\n# solution.getRandom();\n\n\n### Tag: Reservoir sampling ###\n\n\n# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n \n \n def __init__(self, head):\n \"\"\"\n http://www.geeksforgeeks.org/reservoir-sampling/\n \"\"\"\n self.head = head\n \n def getRandom(self):\n import random\n \n selected_item = self.head\n \n if self.head.next == None:\n return selected_item.val\n \n current = self.head.next\n index = 1\n k = 1 # only select 1 item\n \n while current:\n j = random.randint(0, index)\n \n if j < k:\n selected_item = current\n \n current = current.next\n index += 1\n \n return selected_item.val\n\n # def __init__(self, head):\n # \"\"\"\n # @param head The linked list's head.\n # Note that the head is guaranteed to be not null, so it contains at least one node.\n # :type head: ListNode\n # \"\"\"\n # p = head\n # self.items = []\n # while p:\n # self.items.append(p.val)\n # p = p.next\n #\n # def getRandom(self):\n # \"\"\"\n # Returns a random node's value.\n # :rtype: int\n # \"\"\"\n # import random\n # return random.choice(self.items)\n \n\n# Your Solution object will be instantiated and called as such:\nobj = Solution(head)\nparam_1 = obj.getRandom()","sub_path":"382_linked_list_random_node.py","file_name":"382_linked_list_random_node.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"338651701","text":"from traceback import format_exception\nfrom discord.ext import commands\nfrom glob import glob\nfrom utils import checks\n\nclass Core(commands.Cog):\n def __init__(self, potato):\n self.potato = potato\n\n @staticmethod\n def get_traceback(exception, limit=None, chain=True):\n return ''.join(format_exception(\n type(exception),\n exception,\n exception.__traceback__,\n limit=limit,\n chain=chain)\n )\n\n def get_modules_list(self):\n modules = glob(\"modules/**/**.py\", recursive=True)\n modules = [m.replace(\"/\", \".\").replace(\"modules.\", \"\").replace(\".py\", \"\") for m in modules]\n return modules\n\n def get_modules(self):\n modules = self.get_modules_list()\n new_modules = []\n for module in modules:\n if module in self.potato.settings[\"modules\"]:\n new_modules.append(\"+ \" + module)\n else:\n new_modules.append(\"- \" + module)\n return new_modules\n\n def get_full_module_name(self, name):\n modules = self.get_modules_list()\n for module in self.get_modules_list():\n if module.endswith(name):\n return module\n return name\n\n @commands.command()\n @checks.is_owner()\n async def reload(self, ctx, module_name):\n await self.unload(ctx, module_name)\n await self.load(ctx, module_name)\n\n @commands.command()\n @checks.is_owner()\n async def load(self, ctx, module_name):\n \"\"\"Load a module.\"\"\"\n module_name = self.get_full_module_name(module_name)\n try:\n self.potato.load_module(module_name)\n return await ctx.send(\"Module loaded sucessfully.\")\n except Exception as e:\n msg = 'Unable to load; the module caused a `{}`:\\n```py\\n{}\\n```'\\\n .format(type(e).__name__, self.get_traceback(e))\n return await ctx.send(msg)\n\n @commands.command()\n @checks.is_owner()\n async def unload(self, ctx, module_name):\n \"\"\"Unload a module.\"\"\"\n module_name = self.get_full_module_name(module_name)\n try:\n self.potato.unload_module(module_name)\n return await ctx.send(\"Module unloaded sucessfully.\")\n except Exception as e:\n return await ctx.send(\"Unable to load; the module isn't loaded.\")\n\n @commands.command()\n @checks.is_owner()\n async def modules(self, ctx):\n \"\"\"List modules.\"\"\"\n modules = sorted(self.get_modules())\n message = \"```diff\\n\"\n message += \"\\n\".join(modules)\n message += \"```\"\n await ctx.send(message)\n\n\ndef setup(potato):\n \"\"\"Setup the Core module.\"\"\"\n potato.setup_module(Core(potato))\n","sub_path":"modules/default/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"123049152","text":"# Copyright (c) Meta Platforms, Inc. and affiliates.\n\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nfp16 = dict(loss_scale=512.)\n# model settings\nmodel = dict(\n type='CascadeRCNN',\n pretrained=None,\n backbone=dict(\n type='ConvNeXt',\n in_chans=3,\n depths=[3, 3, 27, 3],\n dims=[128, 256, 512, 1024],\n drop_path_rate=0.6,\n layer_scale_init_value=1.0,\n out_indices=[0, 1, 2, 3],\n ),\n neck=dict(\n type='FPN',\n in_channels=[128, 256, 512, 1024],\n out_channels=256,\n num_outs=5),\n rpn_head=dict(\n type='RPNHead',\n in_channels=256,\n feat_channels=256,\n anchor_generator=dict(\n type='AnchorGenerator',\n scales=[4],\n # ratios=[0.5, 1.0, 2.0],\n ratios=[0.2, 0.5, 1.2, 3.5],\n strides=[4, 8, 16, 32, 64]),\n bbox_coder=dict(\n type='DeltaXYWHBBoxCoder',\n target_means=[.0, .0, .0, .0],\n target_stds=[1.0, 1.0, 1.0, 1.0]),\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n loss_bbox=dict(type='L1Loss', loss_weight=1.0)),\n roi_head=dict(\n type='CascadeRoIHead',\n num_stages=3,\n stage_loss_weights=[1, 0.5, 0.25],\n bbox_roi_extractor=dict(\n type='SingleRoIExtractor',\n roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),\n out_channels=256,\n featmap_strides=[4, 8, 16, 32]),\n bbox_head=[\n dict(\n type='ConvFCBBoxHead',\n num_shared_convs=4,\n num_shared_fcs=1,\n in_channels=256,\n conv_out_channels=256,\n fc_out_channels=1024,\n roi_feat_size=7,\n num_classes=1,\n bbox_coder=dict(\n type='DeltaXYWHBBoxCoder',\n target_means=[0., 0., 0., 0.],\n target_stds=[0.1, 0.1, 0.2, 0.2]),\n reg_class_agnostic=False,\n reg_decoded_bbox=True,\n norm_cfg=dict(type='SyncBN', requires_grad=True),\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),\n dict(\n type='ConvFCBBoxHead',\n num_shared_convs=4,\n num_shared_fcs=1,\n in_channels=256,\n conv_out_channels=256,\n fc_out_channels=1024,\n roi_feat_size=7,\n num_classes=1,\n bbox_coder=dict(\n type='DeltaXYWHBBoxCoder',\n target_means=[0., 0., 0., 0.],\n target_stds=[0.05, 0.05, 0.1, 0.1]),\n reg_class_agnostic=False,\n reg_decoded_bbox=True,\n norm_cfg=dict(type='SyncBN', requires_grad=True),\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),\n dict(\n type='ConvFCBBoxHead',\n num_shared_convs=4,\n num_shared_fcs=1,\n in_channels=256,\n conv_out_channels=256,\n fc_out_channels=1024,\n roi_feat_size=7,\n num_classes=1,\n bbox_coder=dict(\n type='DeltaXYWHBBoxCoder',\n target_means=[0., 0., 0., 0.],\n target_stds=[0.033, 0.033, 0.067, 0.067]),\n reg_class_agnostic=False,\n reg_decoded_bbox=True,\n norm_cfg=dict(type='SyncBN', requires_grad=True),\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n loss_bbox=dict(type='GIoULoss', loss_weight=10.0))\n ]),\n # model training and testing settings\n train_cfg = dict(\n rpn=dict(\n assigner=dict(\n type='MaxIoUAssigner',\n pos_iou_thr=0.5,\n neg_iou_thr=0.3,\n min_pos_iou=0.3,\n match_low_quality=True,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomSampler',\n num=256,\n pos_fraction=0.5,\n neg_pos_ub=-1,\n add_gt_as_proposals=False),\n allowed_border=0,\n pos_weight=-1,\n debug=False),\n rpn_proposal=dict(\n nms_pre=2000,\n max_per_img=2000,\n nms=dict(type='nms', iou_threshold=0.7),\n min_bbox_size=0),\n rcnn=[\n dict(\n assigner=dict(\n type='MaxIoUAssigner',\n pos_iou_thr=0.3,\n neg_iou_thr=0.3,\n min_pos_iou=0.3,\n match_low_quality=False,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomSampler',\n num=512,\n pos_fraction=0.25,\n neg_pos_ub=-1,\n add_gt_as_proposals=True),\n pos_weight=-1,\n debug=False),\n dict(\n assigner=dict(\n type='MaxIoUAssigner',\n pos_iou_thr=0.4,\n neg_iou_thr=0.4,\n min_pos_iou=0.4,\n match_low_quality=False,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomSampler',\n num=512,\n pos_fraction=0.25,\n neg_pos_ub=-1,\n add_gt_as_proposals=True),\n pos_weight=-1,\n debug=False),\n dict(\n assigner=dict(\n type='MaxIoUAssigner',\n pos_iou_thr=0.5,\n neg_iou_thr=0.5,\n min_pos_iou=0.5,\n match_low_quality=False,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomSampler',\n num=512,\n pos_fraction=0.25,\n neg_pos_ub=-1,\n add_gt_as_proposals=True),\n pos_weight=-1,\n debug=False)\n ]),\n test_cfg=dict(\n rpn=dict(\n nms_pre=1000,\n max_per_img=1000,\n nms=dict(type='nms', iou_threshold=0.7),\n min_bbox_size=0),\n rcnn=dict(\n score_thr=0.1,\n nms=dict(type='nms', iou_threshold=0.5),\n max_per_img=100)))\n\n\n# dataset settings\ndataset_type = 'ShipDataset'\ndata_root = 'data/'\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ntrain_pipeline = [\n # dict(type='LoadImageFromFile'),\n # dict(type='LoadAnnotations', with_bbox=True),\n dict(type='Mosaic', img_scale=(256, 256), pad_val=0.0, prob=0.5),\n dict(type='Resize',\n img_scale=[(256, 256), (512, 512)],\n multiscale_mode='range',\n keep_ratio=True),\n dict(type='RandomFlip', flip_ratio=0.0, direction='horizontal'),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n # img_scale=(512, 512),\n img_scale=[(256, 256), (384, 384), (512, 512)],\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img']),\n ])\n]\n\ntrain_dataset = dict(\n type='MultiImageMixDataset',\n dataset=dict(\n type=dataset_type,\n ann_file='/data/user_data/annotations/train.json',\n img_prefix='/data/raw_data/training_dataset/A/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations', with_bbox=True)\n ],\n filter_empty_gt=False,\n ),\n pipeline=train_pipeline)\n\ndata = dict(\n samples_per_gpu=4,\n workers_per_gpu=2,\n train=train_dataset,\n val=dict(\n type=dataset_type,\n ann_file='/data/user_data/annotations/train.json',\n img_prefix='/data/raw_data/training_dataset/A/',\n pipeline=test_pipeline),\n test=dict(\n type=dataset_type,\n ann_file='/data/user_data/annotations/testA.json',\n img_prefix='/data/raw_data/test_dataset/测试集/',\n pipeline=test_pipeline))\n\nevaluation = dict(interval=32, metric='bbox', iou_thrs=[0.5])\n\n# optimizer\noptimizer = dict(constructor='LearningRateDecayOptimizerConstructor',\n type='AdamW',\n lr=0.0002, betas=(0.9, 0.999), weight_decay=0.05,\n paramwise_cfg={'decay_rate': 0.8,\n 'decay_type': 'layer_wise',\n 'num_layers': 12})\noptimizer_config = dict(grad_clip=None)\nlr_config = dict(\n policy='step',\n warmup='linear',\n warmup_iters=500,\n warmup_ratio=0.001,\n step=[27, 33])\n\nrunner = dict(type='EpochBasedRunner', max_epochs=32)\n\ncheckpoint_config = dict(interval=32)\n# yapf:disable\nlog_config = dict(\n interval=50,\n hooks=[\n dict(type='TextLoggerHook'),\n # dict(type='TensorboardLoggerHook')\n ])\n# yapf:enable\ncustom_hooks = [dict(type='NumClassCheckHook')]\n\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nload_from = '/data/user_data/pretrained/cascade_mask_rcnn_convnext_base_22k_3x.pth'\nresume_from = None\nworkflow = [('train', 1)]\nwork_dir = '/data/user_data/train_work_dirs/cascade_convnext_base_large_scale_only_train_data_36ep_imagenet22k_pretrain_noflip_anchor4_5_3_3_4_5_ratio_mosaic'","sub_path":"configs/ship/cascade_convnext_base_large_scale_onlytraindata_noflip_anchor4_ratio_mosaic.py","file_name":"cascade_convnext_base_large_scale_onlytraindata_noflip_anchor4_ratio_mosaic.py","file_ext":"py","file_size_in_byte":10286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"456330130","text":"import matplotlib.pyplot as plt\r\nimport xlrd\r\nfrom pylab import *\r\nfrom xlrd import open_workbook\r\nx_data = []\r\ny_data = []\r\nx_volte=[]\r\ntemp=[]\r\nwb = open_workbook('my_data.xlsx')\r\n\r\nfor s in wb.sheets():\r\n print('Sheets:', s.name)\r\n for row in range(s.nrows):\r\n print('the row is:', row)\r\n values = []\r\n for col in range(s.ncols):\r\n values.append(s.cell(row,col).value)\r\n print(values)\r\n x_data.append(values[0])\r\n y_data.append(values[1])\r\n\r\nplt.plot(x_data, y_data, 'bo-', label='Phase curve', linewidth=1)\r\nplt.title('TR14 phase detector')\r\nplt.legend(loc='upper left')\r\n\r\nax = gca()\r\nax.spines['right'].set_color('none')\r\nax.spines['top'].set_color('none')\r\nax.xaxis.set_ticks_position('bottom')\r\nax.spines['bottom'].set_position(('data', 0))\r\nax.yaxis.set_ticks_position('left')\r\nax.spines['left'].set_position(('data', 0))\r\n\r\nplt.xlabel('input-deg')\r\nplt.ylabel('output-V')\r\n\r\nplt.show()\r\nprint('over!')","sub_path":"example_xlrd.py","file_name":"example_xlrd.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"444737377","text":"from flask_wtf import FlaskForm\nfrom wtforms import BooleanField, IntegerField, SelectField, SubmitField, validators\n\nclass BaseList(FlaskForm):\n base_field = SelectField('Base')\n submit = SubmitField()\n\nclass CommodList(FlaskForm):\n commo = SelectField('Commodity')\n submit = SubmitField()","sub_path":"forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"276892109","text":"# -*- coding: utf-8 -*-\n\nimport psycopg2\nfrom datetime import datetime\nimport requests\nimport json\nimport time\nimport sys\n\nconn = psycopg2.connect(\"dbname=postgres user=postgres password=Ef9iei!!\")\n\n\n\n\n\ndef main():\n # Определяем дату стоп\n cur = conn.cursor()\n\n cur.execute(\"select \\\"VALUE\\\" from SETTINGS where \\\"PARAMETER\\\" = 'MIN_DATE';\")\n\n date_str = str(cur.fetchone())[2:12]\n\n datetime_object = datetime.strptime(date_str, '%Y-%m-%d')\n\n unix_stop_time = time.mktime(datetime_object.timetuple())\n\n cur.execute(\"select \\\"match_id\\\" from bot_matches\")\n\n db_matches_temp = cur.fetchall()\n db_matches = []\n\n for db_match in db_matches_temp:\n db_matches.append(int(str(db_match)[1:-2]))\n\n while 1 == 1:\n\n cur.execute(\"select \\\"VALUE\\\" from SETTINGS where \\\"PARAMETER\\\" = 'LAST_RECORDED_MATCH';\")\n\n try:\n LAST_RECORDED_MATCH = cur.fetchone()[0]\n except TypeError:\n LAST_RECORDED_MATCH = None\n\n if LAST_RECORDED_MATCH is None:\n\n matches_opendota = requests.get('https://api.opendota.com/api/proMatches/')\n\n mode = 'continue'\n\n else:\n\n matches_opendota = requests.get('https://api.opendota.com/api/proMatches?less_than_match_id=' + LAST_RECORDED_MATCH )\n\n # Расшифровываем JSON запрос\n matches_json = json.loads(matches_opendota.text)\n\n for match in matches_json:\n\n # Проверяем, если такой матч уже есть в базе данных, то пропускаем его\n\n\n\n if match['match_id'] in db_matches:\n print(\"Пропущен \" + str(match['match_id']))\n\n cur.execute(\"update settings set \\\"VALUE\\\" = '\" + str(\n match['match_id']) + \"' where \\\"PARAMETER\\\" = 'LAST_RECORDED_MATCH'\")\n\n conn.commit()\n\n continue\n\n #print(str(match['start_time'])+ \" \" + str(unix_stop_time) )\n\n if match['start_time']< unix_stop_time:\n cur.execute(\"update settings set \\\"VALUE\\\" = NULL where \\\"PARAMETER\\\" = 'LAST_RECORDED_MATCH'\")\n conn.commit()\n sys.exit(\"Загрузка завершена\")\n\n try:\n dire_team_id = str(match['dire_team_id'])\n\n if dire_team_id == \"None\":\n dire_team_id = \"NULL\"\n\n except KeyError:\n dire_team_id = \"NULL\"\n\n try:\n radiant_team_id = str(match['radiant_team_id'])\n\n if radiant_team_id == \"None\":\n radiant_team_id = \"NULL\"\n\n except KeyError:\n radiant_team_id = \"NULL\"\n\n\n try:\n dire_team_name = str(match['dire_name'])\n\n dire_team_name = \"'\" + dire_team_name.replace(\"'\", \"''\") + \"'\"\n\n except KeyError:\n dire_team_name = \"NULL\"\n\n try:\n radiant_team_name = str(match['radiant_name'])\n\n radiant_team_name = \"'\" + radiant_team_name.replace(\"'\", \"''\") + \"'\"\n\n except KeyError:\n radiant_team_name = \"NULL\"\n\n\n\n cur.execute(\"INSERT INTO public.BOT_MATCHES\"\n \"(MATCH_ID, DURATION, RADIANT_SCORE, RADIANT_WIN, DIRE_TEAM_ID, DIRE_TEAM_NAME, RADIANT_TEAM_ID, RADIANT_TEAM_NAME, START_TIME) \"\n \"VALUES (\"\n + str(match['match_id']) + \",\"\n + str(match['duration']) + \",\"\n + str(match['radiant_score']) + \",\"\n + str(int(match['radiant_win'])) + \",\"\n + dire_team_id + \",\"\n + dire_team_name + \",\"\n + radiant_team_id + \",\"\n + radiant_team_name + \",\"\n + str(match['start_time']) +\")\"\n\n )\n\n cur.execute(\"update settings set \\\"VALUE\\\" = '\"+ str(match['match_id']) + \"' where \\\"PARAMETER\\\" = 'LAST_RECORDED_MATCH'\")\n conn.commit()\n LAST_RECORDED_MATCH = match['match_id']\n\n match_opendota = requests.get('https://api.opendota.com/api/matches/' + str(match['match_id']))\n match_json = json.loads(match_opendota.text)\n\n\n #startapp_players\n\n try:\n for player in match_json['players']:\n cur.execute(\"INSERT INTO public.BOT_PLAYERS\"\n \"(MATCH_ID, ACCOUNT_ID, ASSISTS, DEATHS, GOLD_PER_MIN, HERO_ID, KILLS, XP_PER_MIN, WIN, LOSE, NAME, ISRADIANT, HERO_DAMAGE) \"\n \"VALUES (\"\n + str(match['match_id']) + \",\"\n + str(player['account_id']) + \",\"\n + str(player['assists']) + \",\"\n + str(player['deaths']) + \",\"\n + str(player['gold_per_min'])+ \",\"\n + str(player['hero_id']) + \",\"\n + str(player['kills']) + \",\"\n + str(player['xp_per_min']) + \",\"\n + str(player['win']) + \",\"\n + str(player['lose']) + \",\"\n + \"'\" + str(player['name']) + \"',\"\n + str(int(player['isRadiant'])) + \",\"\n + str(player['hero_damage']) +\")\"\n\n )\n conn.commit()\n\n except TypeError:\n continue\n\n\n\n\n conn.commit()\n\n print(\"Записан \" + str(match['match_id']))\n\n\n\n\n cur.close()\n\n conn.close()\n\ndef some_function():\n try:\n main()\n return True\n except (KeyError, TypeError):\n main()\n return False\n\n\nwhile True:\n if some_function():\n break\n else:\n time.sleep(5)\n continue\n\n\n","sub_path":"0 Get Historic Data Matches.py","file_name":"0 Get Historic Data Matches.py","file_ext":"py","file_size_in_byte":5985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"68676162","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.conf import settings\nfrom libs.con import Con\nimport pdb\n\nCATEGORY_PARAM='category'\nPAGE_PARAM='page'\nCHUNK=settings.PRODUCTS_CHUNK\n\nclass Base:\n\n\tdef __init__(self):\n\t\t# Making connection to Odoo server\n\t\tself._con = Con(settings.ODOO_URL, settings.ODOO_DB, settings.ODOO_USERNAME, settings.ODOO_PASSORD)\n\n\t\"\"\"\n\t\t@Description:\n\t\t\tGet product categories\n\t\t@Input:\n\t\t\troot_url:\n\t\t@Output:\n\t\t\tcates = [{'id': ,'name':, 'url':, \n\t\t\t\t\t\t'child': [ {'id': , 'name':, 'url':}]}]\n\t\"\"\"\n\tdef get_product_cates(self, root_url='/'):\n\t\t# if existing 'saleable product category' we will use this category else geting all category\n\t\t# We will get two level\n\t\tcate_ids = []\n\t\tsaleable_cate_id = self._con.execute('product.category', 'search',[[['name', '=', 'Saleable']]])\n\t\tif saleable_cate_id:\n\t\t\tcate_ids = self._con.execute('product.category', 'search',[[['parent_id', '=', saleable_cate_id[0]]]])\n\t\telse:\n\t\t\t# get all categories from Odoo\n\t\t\tcate_ids = self._con.execute('product.category', 'search',[[]])\n\t\tcates = self._con.execute('product.category', 'read',[cate_ids], {'fields': ['name']})\n\t\tfor cate in cates:\n\t\t\turl = root_url + \"%s/%s\" % (CATEGORY_PARAM, str(cate['id']))\n\t\t\tcate['url'] = url\n\t\t\tchild_cate_ids = self._con.execute('product.category', 'search',[[['parent_id', '=', cate['id']]]])\n\t\t\tif child_cate_ids:\n\t\t\t\tchild_cates = self._con.execute('product.category', 'read',[child_cate_ids], {'fields': ['name']})\n\t\t\t\tfor child_cate in child_cates:\n\t\t\t\t\turl = root_url + \"%s/%s\" % (CATEGORY_PARAM, str(cate['id']))\n\t\t\t\t\tchild_cate['url'] = url\n\t\t\t\tcate['child'] = child_cates\n\t\treturn cates\n\n\tdef get_product_ids(self, categ_id=None, offset=None, limit=None, sale_ok=True, order=\"id DESC\"):\n\t\tparams = [['sale_ok', '=', sale_ok]]\n\t\toffset_par = {\"order\":order}\n\t\tif offset:\n\t\t\toffset_par['offset'] = offset\n\t\tif limit:\n\t\t\toffset_par['limit'] = limit \n\t\tif categ_id:\n\t\t\tparams.append(['categ_id', '=', categ_id])\n\t\tproduct_ids = self._con.execute('product.template', 'search', [params], offset_par)\n\t\treturn product_ids\n\n\t\"\"\"\n\t\t@Description:\n\t\t\tget products by id. If fields has 'product_multi_images_ids', it will get images of product\n\t\t@Output:\n\t\t\tproducts = [{'id': ,'product_multi_images_ids': }]\n\t\"\"\"\n\tdef get_products(self, product_ids, fields):\n\t\ttry:\n\t\t\tfields.index('product_multi_images_ids')\n\t\t\tneed_images = True\n\t\texcept:\n\t\t\tneed_images = False\n\t\tproducts = self._con.execute('product.template', 'read',[product_ids], {'fields': fields})\n\t\tif need_images:\n\t\t\tfor product in products:\n\t\t\t\tmulti_images = self._con.execute('product.multi.images', 'read',[product['product_multi_images_ids']], {'fields': ['name', 'image']})\n\t\t\t\tproduct['product_multi_images_ids'] = multi_images\n\t\treturn products\n\nclass HomeView(Base):\n\n\t\"\"\"\n\t\t@Description:\n\t\t\tget products and group by category\n\t\t@Input:\n\t\t\tlimit: limit number of products is showed on one line\n\t\t@Output:\n\t\t\tproducts_group = [{'id': , 'name': name of categ, 'url':, 'products': [{ 'id':, ..}]}]\n\t\"\"\"\n\tdef get_products_group(self, categ_num=5, limit=CHUNK):\n\t\tproduct_fields = ['name', 'list_price', 'product_multi_images_ids']\n\t\tcates = self.get_product_cates()\n\t\tactive = True\n\t\tfor cate in cates[0:categ_num-1]:\n\t\t\tproduct_ids = self.get_product_ids(cate['id'], 0, limit)\n\t\t\tcate['products'] = self.get_products(product_ids, product_fields)\n\t\t\tcate['active'] = active\n\t\t\tif active:\n\t\t\t\tactive = False\n\t\treturn cates\n\n\tdef get_recommened_products(self, limit=20, chunk=CHUNK):\n\t\trecommened_products= []\n\t\tproduct_ids = self.get_product_ids(limit=limit)\n\t\tproducts = self.get_products(product_ids, ['name', 'list_price', 'product_multi_images_ids'])\n\t\tchunk_size = len(products)/chunk\n\t\tanchor = 0\n\t\tfor i in range(chunk_size):\n\t\t\tgroup = {'id':i, 'active':False}\n\t\t\tif i == 0:\n\t\t\t\tgroup['active'] = True\n\t\t\tif i == chunk_size -1:\n\t\t\t\tgroup['products'] = products[anchor:]\n\t\t\telse:\n\t\t\t\tgroup['products'] = products[anchor:anchor + 3]\n\t\t\tanchor +=3\n\t\t\trecommened_products.append(group)\n\t\treturn recommened_products\n\n\t# Create your views here.\n\tdef shop(self, request, categ_id=None):\n\t\tif categ_id and int(categ_id) < 0:\n\t\t\tcateg_id = None\n\t\telse:\n\t\t\tcateg_id = int(categ_id)\n\t\tproduct_ids = self.get_product_ids(categ_id, 0, CHUNK)\n\t\tproducts = self.get_products(product_ids, ['name', 'list_price', 'product_multi_images_ids'])\n\n\t\t# get total number of products\n\t\tproducts_num = len(self.get_product_ids(categ_id))\n\t\tcontext = {'products':products,\n\t\t\t\t\t# get categories\n\t\t\t\t\t'cates': self.get_product_cates(),\n\t\t\t\t\t'load_more': {'total_products': products_num, 'categ_id':categ_id, 'chunk': CHUNK},\n\t\t\t\t\t}\n\t\treturn render(request, 'goldtree/shop.html', context)\n\n\t# Create your views here.\n\tdef index(self, request):\n\t\toffset = int(request.GET.get('page',1))\n\t\tcateg_id = request.GET.get('category', -1)\n\t\tif categ_id and int(categ_id) < 0:\n\t\t\tcateg_id = None\n\t\telse:\n\t\t\tcateg_id = int(categ_id)\n\t\tif int(offset) < 1:\n\t\t\toffset = 1\n\t\t#Offset begin with 0 in the coding but interface will show 1\n\t\toffset = offset - 1\n\t\tproduct_ids = self.get_product_ids(categ_id, offset, 6)\n\t\tproducts = self.get_products(product_ids, ['name', 'list_price', 'product_multi_images_ids'])\n\n\t\t#get products which are grouped by category\n\t\tproducts_group = self.get_products_group(5, 4)\n\n\t\t#get recommened products\n\t\trecommened_products = self.get_recommened_products(9)\n\n\t\tcontext = {\n\t\t\t\t'products':products,\n\t\t\t\t'cates': self.get_product_cates(),\n\t\t\t\t'products_group': products_group,\n\t\t\t\t'recommened_products': recommened_products,\n\t\t\t\t}\n\t\treturn render(request, 'goldtree/index.html', context)\n","sub_path":"goldtree/views/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"189131726","text":"import unittest\nimport distance_calculator as calculator\n\n\nclass EuclideanDistanceTestSuite(unittest.TestCase):\n def setUp(self):\n self.calculator = calculator.EuclideanDistance()\n\n def test_should_return_correct_distance(self):\n instance1 = [6, 5]\n instance2 = [1, 1]\n weights = [3, 2]\n correct_result = 10.344\n\n self.assertAlmostEqual(self.calculator.calculate(instance1, instance2, weights), correct_result, delta=0.001)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"euclidean_distance_test.py","file_name":"euclidean_distance_test.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"553507892","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport streamlit as st\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier#\nfrom sklearn.metrics import classification_report\n\nst.title(\"Machine learning and medical diagnosis\")\n\ndf = pd.read_csv('data/BRCA.csv')\ndata = df.copy()\nst.write(\n \"This dataset consists of a group of breast cancer patients, who had surgery to remove their tumour. The dataset consists of the following variables:\")\nst.write(data.head())\n\n# Data and feature engineering\ndf = df.drop([334, 335, 336, 337, 338, 339, 340])\ndata = data.drop([334, 335, 336, 337, 338, 339, 340])\ndf = df.drop('Date_of_Last_Visit', axis=1)\ndf['Patient_Status'] = df['Patient_Status'].replace('Dead', 'Deceased')\n\nohe = pd.get_dummies(data[['Gender', 'Tumour_Stage', 'Histology', 'ER status', 'PR status',\n 'HER2 status', 'Surgery_type']])\n\ndata = data.drop(['Gender', 'Patient_ID', 'Tumour_Stage', 'Histology', 'ER status', 'PR status',\n 'HER2 status', 'Surgery_type', 'Date_of_Surgery', 'Date_of_Last_Visit'], axis = 1)\n\nle = LabelEncoder()\ndata['patient_status'] = le.fit_transform(data['Patient_Status'])\ndata = data.drop('Patient_Status', axis = 1)\n\ndata = data.join(ohe)\n\ndata['er_status_positive'], data['pr_status_positive'], data['her2_status_negative'], data['her2_status_positive'] = \\\n data['ER status_Positive'], data['PR status_Positive'], data['HER2 status_Negative'], data['HER2 status_Positive']\n\ndata = data.drop(['ER status_Positive',\n 'PR status_Positive', 'HER2 status_Negative', 'HER2 status_Positive'], axis = 1)\ndata = data.drop(['er_status_positive', 'pr_status_positive'], axis = 1)\n\ncut = data[data['patient_status'] <= 1]\ndata = cut\n\n# Missing data\nst.title(\"Reviewing our dataset\")\nst.write(\"Let's explore our data to see if anything is missing. Before any analysis can begin we need to ensure data is of sufficient quality. As the strength of our prediction will be reflected in the quality of our data!\")\n\ndef missingdata():\n plt.figure(figsize=(10, 10), dpi = 250)\n g = sns.heatmap(data.isnull(), cmap='RdBu')\n g.set_xlabel(\"Features\")\n g.set_ylabel(\"Index\")\n g.set_title('Missing data by feature')\n st.pyplot(g.figure)\n\nmissingdata()\n\nst.write(\"Great, no data is missing!\")\n\n\n# Univariate analysis\n\nst.text(\"\")\nst.write(\n \"From these data we want to predict the patient status (aka the target variable). Let's look at the target variable in detail:\")\n\n\ndef countplot ():\n plt.figure(figsize = (10, 7.5), dpi = 250)\n p = sns.countplot(df['Patient_Status'], palette = 'Paired')\n p.set_xlabel(\"Patient status\")\n p.set_ylabel(\"Count\")\n st.pyplot(p.figure)\n\n\ncountplot()\n\nst.text(\"\")\nst.title(\"Basic descriptive analysis\")\nst.write(\"Using basic descriptive statistics we can generate basic insights into our data!\")\nst.write(data.describe())\n\n# Correlation analysis\nst.text(\"\")\nst.write(\n \"To explore how our data correlations we can call the Pandas internal correlation function. This function takes three method arguments, so feel free to explore how correlations change per method!\")\nmethods = ['Spearman', 'Pearson', 'Kendall']\nselection = st.selectbox('Please select correlation method:', methods)\n\nif selection == 'Pearson':\n i = 'pearson'\nelif selection == 'Spearman':\n i = 'spearman'\nelse:\n i = 'kendall'\n\nst.write(data.corr(method = i))\n\n# Model development\nst.write(\n \"After some basic data and feature engineering (that I'll spare you from!) we can start building a basic ML model to set our baseline performance. First, we must define our training set:\")\n\nX = data.drop('patient_status', axis = 1)\ny = data['patient_status']\n\nst.write(X)\nst.text(\"\")\nst.write(\"And the target variable we are trying to predict:\")\nst.write(y)\n\nst.write(\"Now we have our data defined, we'll use a selection of models and see which performs best out the box! For this we'll need classificatgion algorithms, let's see how they perform!\")\n\nalgorithms = ['Logistic regression', 'Kneighbours classifier', 'Random Forest Classifier']\nselection = st.selectbox('Please select correlation method:', algorithms)\n\nif selection == 'Logistic Regression':\n i = LogisticRegression\nelif selection == 'Kneighbours classifier':\n i = KNeighborsClassifier\nelse:\n i = RandomForestClassifier\n#\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 101)\n\n\ndef modeldev(i, j, k, l, m):\n model = i()\n model.fit(j, k)\n y_pred = model.predict(l)\n rep = classification_report(m, y_pred, output_dict = True)\n rep = pd.DataFrame(rep)\n rep = rep.drop('support', axis=0)\n st.write(rep)\n\nmodeldev(i, X_train, y_train, X_test, y_test)\n\nst.write(\"Here we are, results from our trained model!\")\n\n\n\n\n# st.write(\n# \"One parameter we can look to change is K, the value representing the count of nearest neighbours, and its value is vital to developing a model with good classification capability. \")\n\n\n# error_rate = []\n# for i in range(1,40):\n# knn = KNeighborsClassifier(n_neighbors=i)\n# knn.fit(X_train,y_train)\n# pred_i = knn.predict(X_test)\n# error_rate.append(np.mean(pred_i != y_test))\n#\n# optimalk = pd.DataFrame({\n# 'k': range(1,40),\n# 'error_rate': error_rate\n# })\n# plt.figure(figsize = (10, 10), dpi = 200)\n# plt.title(\"Error rate by value of K\")\n# plt.ylabel(\"Error rate\")\n# plt.xlabel(\"Value of K\")\n# p = sns.lineplot(range(1, 40), optimalk['error_rate'], markers = True)\n# st.text(\"\")\n# st.pyplot(p.figure)\n# st.write(\"As we can see the minimum error is: {} at K = {}\".format(min(error_rate), error_rate.index(min(error_rate))))","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"301964992","text":"def solution(A):\n cnt = {}\n for num in A:\n key_ = str(num)\n if not (key_ in cnt):\n cnt[key_] = 1\n else:\n cnt[key_] += 1\n result = list(cnt.items())\n for item in result:\n if item[1] % 2 == 1:\n return int(item[0])","sub_path":"jinkyuhan/codility_lesson2_OddOccurrencesInArray.py","file_name":"codility_lesson2_OddOccurrencesInArray.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"296541180","text":"# -*- coding:utf-8 -*-\n\"\"\"\n@author: lijingxin\n@contact: lijingxin666@gmail.com\n@site: https://github.com/lijingxin666\n@time: Created on 5:28 PM 5/7/20\n\nQuestion: \n\n\"\"\"\nfrom BinarySearchTree import BinarySearchTree\nfrom BinarySearchTree import Node\nfrom F14031IterativeGet import AdvBST1\nfrom F14032IterativeAdd import AdvBST2\n\nclass AdvBST3(AdvBST2):\n # # Traversal Methods\n # def print_inorder(self):\n # self._print_inorder(self._root)\n # print('')\n #\n # def _print_inorder(self, node):\n # if (node is None):\n # return\n # self._print_inorder(node._left)\n # print('[', node._item, ']', end=\" \")\n # self._print_inorder(node._right)\n\n def printInorderIterative(self):\n node = self._root\n stack = []\n\n while True:\n while (node is not None): # 退出的时候 说明找到最左下的点了\n stack.append(node)\n node = node._left\n if len(stack) == 0: # stack为空 退出\n return\n # stack 不为空 pop 及 打印, node往右\n node = stack.pop()\n print('[', node._item, ']', end=\" \")\n node = node._right\n\nbst = AdvBST3()\nnumbers = [6, 4, 8, 7, 9, 2, 1, 3, 5, 13, 11, 10, 12]\nfor i in numbers:\n bst.add(i)\n#bst.print_inorder()\nbst.printInorderIterative()","sub_path":"Algorithm_PY/ch14/F14033IterativeInorderTraversal.py","file_name":"F14033IterativeInorderTraversal.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"272633513","text":"from keypoints import part_names, pose_chain, part_name_to_id_map\nfrom utils import (get_image_coords, clamp, add_vectors, get_offset_point,\n within_radius_of_corresponding_point)\n\n\nparent_children_id_tuples = [\n (part_name_to_id_map[parent_joint_name], part_name_to_id_map[child_joint_name])\n for parent_joint_name, child_joint_name in pose_chain\n]\nparent_to_child_edges = tuple([t[1] for t in parent_children_id_tuples])\nchild_to_parent_edges = tuple([t[0] for t in parent_children_id_tuples])\n\n\ndef decode_pose(root, heatmap_scores, offsets, output_stride, displacements_fwd, displacements_bwd):\n num_parts = heatmap_scores.shape[2]\n num_edges = len(parent_to_child_edges)\n # TODO: check if this is buggy\n instance_keypoints = [None] * num_parts\n\n # Start the new detection instance at the position of root.\n root_score, root_part = -root[0], root[1] # The `-` is due to python not having a max heap 🤮\n root_point = get_image_coords(root_part, output_stride, offsets)\n instance_keypoints[root_part['keypoint_id']] = {\n 'score': root_score, 'part': part_names[root_part['keypoint_id']], 'position': root_point\n }\n\n # Decode the part positions upwards in the tree, following the backward displacements.\n # TODO: This is absolutely disgusting code, please rewrite.\n for edge in reversed(range(num_edges)):\n # TODO: have some doubts bout this code\n source_keypoint_id = parent_to_child_edges[edge]\n target_keypoint_id = child_to_parent_edges[edge]\n if instance_keypoints[source_keypoint_id] and not instance_keypoints[target_keypoint_id]:\n instance_keypoints[target_keypoint_id] = traverse_to_target_keypoint(\n edge, instance_keypoints[source_keypoint_id], target_keypoint_id, heatmap_scores,\n offsets, output_stride, displacements_bwd\n )\n\n for edge in range(num_edges):\n source_keypoint_id = child_to_parent_edges[edge]\n target_keypoint_id = parent_to_child_edges[edge]\n if instance_keypoints[source_keypoint_id] and not instance_keypoints[target_keypoint_id]:\n instance_keypoints[target_keypoint_id] = traverse_to_target_keypoint(\n edge, instance_keypoints[source_keypoint_id], target_keypoint_id, heatmap_scores,\n offsets, output_stride, displacements_fwd\n )\n\n return instance_keypoints\n\n\ndef traverse_to_target_keypoint(edge_id, source_keypoint, target_keypoint_id, heatmap_scores,\n offsets, output_stride, displacements):\n\n height, width, _ = heatmap_scores.shape\n\n # Nearest neighbor interpolation for the source->target displacements.\n source_keypoint_indices = get_strided_index_near_point(\n source_keypoint['position'], output_stride, height, width\n )\n\n # Get displacement vector located at our source_keypoint's heatmap x & y coordinates\n displacement = get_displacement(edge_id, source_keypoint_indices, displacements)\n\n # Add that vector to out source_keypoint\n displaced_point = add_vectors(source_keypoint['position'], displacement)\n\n # Get the heatmap x & y coordinates for the resulting vector\n displaced_point_indices = get_strided_index_near_point(displaced_point, output_stride,\n height, width)\n\n # Find the offset vector of said coordinates\n # TODO: Shouldn't we be using Hough voting here??\n offset_point = get_offset_point(displaced_point_indices['y'], displaced_point_indices['x'],\n target_keypoint_id, offsets)\n\n # Add it to our previously displayed point to find our target_keypoint!\n target_keypoint = add_vectors(\n {'x': displaced_point_indices['x'] * output_stride,\n 'y': displaced_point_indices['y'] * output_stride},\n offset_point # TODO: I refactored it a bit here, check in case something fails\n )\n\n score = heatmap_scores[\n displaced_point_indices['y'], displaced_point_indices['x'], target_keypoint_id\n ]\n\n return {'position': target_keypoint, 'part': part_names[target_keypoint_id], 'score': score}\n\n\ndef get_strided_index_near_point(point, output_stride, height, width):\n # TODO: Isn't this clamp unnecesary?\n return {\n 'y': int(clamp(round(point['y'] / output_stride), 0, height - 1)),\n 'x': int(clamp(round(point['x'] / output_stride), 0, width - 1))\n }\n\n\ndef get_displacement(edge_id, point, displacements):\n num_edges = int(displacements.shape[2] / 2) # TODO: convert to int?\n return {\n 'y': displacements[point['y'], point['x'], edge_id],\n 'x': displacements[point['y'], point['x'], num_edges + edge_id]\n }\n\n\ndef get_instance_score(existing_poses, squared_nms_radius, instance_keypoints):\n # TODO is this generated score used at all? (Maybe I added some bugs here).\n not_overlapped_keypoint_scores = 0.0\n for keypoint_id, keypoint in enumerate(instance_keypoints):\n if not within_radius_of_corresponding_point(existing_poses, squared_nms_radius,\n keypoint['position'], keypoint_id):\n not_overlapped_keypoint_scores += keypoint['score']\n\n return not_overlapped_keypoint_scores / len(instance_keypoints)\n","sub_path":"decode_pose.py","file_name":"decode_pose.py","file_ext":"py","file_size_in_byte":5318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"373257486","text":"#Create layers for Finding Point and Line Features in Water Bodies and Water smaller than 1.4 acres\r\n#6/9/2014\r\n\r\nimport arcpy\r\narcpy.env.overwriteOutput = True\r\n\r\ninpolyFC = arcpy.GetParameterAsText (0) #Input Soil Polygons\r\ninsfptsFC = arcpy.GetParameterAsText(1) # Input Special Feature Points\r\ninsflnFC = arcpy.GetParameterAsText(2) #Input Special Feature Lines\r\n#MUSYM = arcpy.GetParameterAsText (3)# Choose MUSYM attribute\r\nAcres = arcpy.GetParameterAsText (3) #Choose Acre Size\r\nworkspace = arcpy.GetParameterAsText (4)# Choose Workspace\r\n\r\n\r\n#Add Field\r\n\r\narcpy.AddField_management(inpolyFC, \"ACRES\", \"DOUBLE\",)\r\n\r\n#Calculate Field\r\n\r\narcpy.CalculateField_management(inpolyFC, \"ACRES\", '!Shape.area@ACRES!', \"PYTHON_9.3\")\r\n\r\n\r\n#Select all Water bodies\r\narcpy.SelectLayerByAttribute_management (inpolyFC, \"NEW_SELECTION\", \" MUSYM = 'W' \")\r\n#Make a layer from the feature class\r\narcpy.MakeFeatureLayer_management (inpolyFC, \"soil_w_lyr\")\r\n\r\n#Select points in Water polygons\r\narcpy.SelectLayerByLocation_management (insfptsFC, \"COMPLETELY_WITHIN\", \"soil_w_lyr\", \"\", \"ADD_TO_SELECTION\")\r\n\r\n#Write the selected features to a new featureclass\r\narcpy.CopyFeatures_management(insfptsFC, workspace+'\\\\'+\"SFP_in_W\")\r\n\r\n#Select Lines in Water polygons\r\narcpy.SelectLayerByLocation_management (insflnFC, \"INTERSECT\", \"soil_w_lyr\", \"\", \"ADD_TO_SELECTION\")\r\n\r\n#Export the selected features to a new featureclass\r\narcpy.CopyFeatures_management(insflnFC, workspace+'\\\\'+\"SFL_in_W\")\r\n\r\n#Select Layer By Location SUBSET_SELECTION \"Acres\" < 1.35\r\narcpy.SelectLayerByAttribute_management (\"soil_w_lyr\", \"NEW_SELECTION\", \"ACRES < *\")\r\n\r\n#Export the selected features to a new featureclass\r\narcpy.CopyFeatures_management(\"soil_w_lyr\", workspace+'\\\\'+\"Small_W\")\r\n\r\n#Clear Selected Features\r\narcpy.SelectLayerByAttribute_management (inpolyFC, \"CLEAR_SELECTION\")\r\narcpy.SelectLayerByAttribute_management (insfptsFC, \"CLEAR_SELECTION\")\r\narcpy.SelectLayerByAttribute_management (insflnFC, \"CLEAR_SELECTION\")","sub_path":"geo_surreal/create_layers_finding_pt_ln_musym_acres_size_06122014.py","file_name":"create_layers_finding_pt_ln_musym_acres_size_06122014.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"371706523","text":"\nfrom PyQt5 import QtWidgets\n\nfrom Presenter import Presenter\nfrom Model import Model\nfrom View import View\n\nclass Setup(object):\n \n def __init__(self):\n # Creating Model and View\n self.__view = View()\n self.__model = Model()\n\n # Creating Presenter\n self.__presenter = Presenter(self.__model, self.__view)\n \n def show_window(self):\n self.__view.show()\n\n\nif __name__ == '__main__':\n import sys\n app = QtWidgets.QApplication(sys.argv)\n\n manager = Setup()\n manager.show_window()\n \n sys.exit(app.exec_())","sub_path":"MVP/Manager.py","file_name":"Manager.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"119942128","text":"from flask import Flask, render_template, url_for, request, session, redirect\r\nfrom flask_pymongo import PyMongo, pymongo\r\nfrom bson.objectid import ObjectId\r\nfrom operator import itemgetter\r\nimport bcrypt\r\n\r\napp = Flask(__name__)\r\n\r\napp.config['MONGO_DBNAME'] = 'SampleProject'\r\napp.config['MONGO_URI'] = 'mongodb://127.0.0.1:27017/SampleProject'\r\n\r\nfrom app import routes\r\n\r\nmongo = PyMongo(app)\r\n\r\n@app.route('/')\r\n@app.route('/Home')\r\ndef Home():\r\n\treturn render_template('Home.html', title='Home')\r\n\r\n@app.route('/Register', methods=['POST', 'GET'])\r\ndef Register():\r\n\tif request.method == 'POST':\r\n\t\tUser = mongo.db.User\r\n\t\texisting_user = User.find_one({'username' : request.form['username']})\r\n\r\n\t\tif not existing_user:\r\n\t\t\thashpass = bcrypt.hashpw(request.form['password'].encode('utf-8'), bcrypt.gensalt())\r\n\t\t\tUser.insert({'username': request.form['username'], 'password': hashpass, 'first_name': request.form['first_name'], 'last_name': request.form['last_name'], 'email': request.form['email']})\r\n\t\t\tlogin_user = User.find_one({'username': request.form['username']})\r\n\t\t\tsession['userid'] = str(login_user['_id'])\r\n\t\t\tsession['username'] = login_user['username']\r\n\t\t\tsession['first_name'] = login_user['first_name']\r\n\t\t\tsession['last_name'] = login_user['last_name']\r\n\t\t\tsession['email'] = login_user['email']\r\n\t\t\treturn redirect(url_for('MembersHome'))\r\n\t\treturn 'That username already exists!'\r\n\treturn render_template('register.html', title='Register')\r\n\r\n@app.route('/Login', methods=['POST', 'GET'])\r\ndef Login():\r\n\tif request.method == 'POST':\r\n\t\tUser = mongo.db.User\r\n\t\tlogin_user = User.find_one({'username' : request.form['username']})\r\n\r\n\t\tif login_user:\r\n\t\t\tif bcrypt.hashpw(request.form['password'].encode('utf-8'), login_user['password'].encode('utf-8')) == login_user['password'].encode('utf-8'):\r\n\t\t\t\tsession['userid'] = str(login_user['_id'])\r\n\t\t\t\tsession['username'] = login_user['username']\r\n\t\t\t\tsession['first_name'] = login_user['first_name']\r\n\t\t\t\tsession['last_name'] = login_user['last_name']\r\n\t\t\t\tsession['email'] = login_user['email']\r\n\t\t\t\treturn redirect(url_for('MembersHome'))\r\n\treturn render_template('Login.html', title='Login')\r\n\r\n@app.route('/MembersHome')\r\ndef MembersHome():\r\n\tif not session.get('username'):\r\n\t\treturn redirect(url_for('Login'))\r\n\tquery_0 = session['first_name']\r\n\tDivision = mongo.db.Division\r\n\tmultiquery_test_17 = Division.find({}).limit(0)\r\n\tmultiquery_17 = []\r\n\tfor mq in multiquery_test_17:\r\n\t\tmultiquery_17.append(mq)\r\n\treturn render_template('MembersHome.html', title='MembersHome', query_0=query_0, multiquery_17=multiquery_17)\r\n\r\n@app.route('/newDivision', methods=['POST', 'GET'])\r\ndef newDivision():\r\n\tif not session.get('username'):\r\n\t\treturn redirect(url_for('Login'))\r\n\tif request.method == 'POST':\r\n\t\tDivision = mongo.db.Division\r\n\t\texists = Division.find_one({'Name': request.form['Name']})\r\n\r\n\t\tif not exists:\r\n\t\t\tDivision.insert({'Name': request.form['Name']})\r\n\r\n\treturn render_template('newDivision.html', title='newDivision')\r\n\r\n@app.route('/Newinvoice', methods=['POST', 'GET'])\r\ndef Newinvoice():\r\n\tif not session.get('username'):\r\n\t\treturn redirect(url_for('Login'))\r\n\tif request.method == 'POST':\r\n\t\tInvoice = mongo.db.Invoice\r\n\t\tAmountNoTax_calculated = int(request.form['Amount']) * 0.8\r\n\t\texists = Invoice.find_one({'Description': request.form['Description'], 'Amount': float(request.form['Amount']), 'AmountNoTax': AmountNoTax_calculated, 'Claimant': request.form['Claimant'], 'Division': request.form['Division'], 'Date': request.form['Date']})\r\n\r\n\t\tif not exists:\r\n\t\t\tInvoice.insert({'Description': request.form['Description'], 'Amount': float(request.form['Amount']), 'AmountNoTax': AmountNoTax_calculated, 'Claimant': request.form['Claimant'], 'Division': request.form['Division'], 'Date': request.form['Date']})\r\n\r\n\tUser = mongo.db.User\r\n\tClaimant_options = User.find({})\r\n\tDivision = mongo.db.Division\r\n\tDivision_options = Division.find({})\r\n\treturn render_template('Newinvoice.html', title='Newinvoice', Claimant_options=Claimant_options, Division_options=Division_options)\r\n\r\n@app.route('/Division/')\r\ndef Division(Divisionid):\r\n\tif not session.get('username'):\r\n\t\treturn redirect(url_for('Login'))\r\n\tDivision = mongo.db.Division\r\n\tpage_Division = Division.find_one({'_id': ObjectId(Divisionid)})\r\n\tquery_0 = page_Division['Name']\r\n\tobjectReset = mongo.db.objectReset\r\n\tmultiquery_test_24 = objectReset.find({'Division': str(page_Division['_id'])}).sort('Date', pymongo.DESCENDING).limit(0)\r\n\tmultiquery_24 = []\r\n\tfor mq in multiquery_test_24:\r\n\t\tmultiquery_24.append(mq)\r\n\treturn render_template('Division.html', title='Division', query_0=query_0, multiquery_24=multiquery_24)\r\n\r\n@app.route('/logout')\r\ndef logout():\r\n\tsession.pop('username', None)\r\n\tsession.pop('userid', None)\r\n\tsession.pop('first_name', None)\r\n\tsession.pop('last_name', None)\r\n\tsession.pop('email', None)\r\n\treturn redirect(url_for('Home'))\r\n\r\nif __name__ == \"__main__\":\r\n\tapp.run()\r\n\r\napp.secret_key = '\\xc4\\xd1\\xc8@g[\\x04\\xbfpu\\th&,\\x1b\\xb5\\x18\\x0e\\x06\\xbc\\xad\"*\\xa8'","sub_path":"sample/app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"98931367","text":"import numpy as np\nfrom scipy.integrate import quad\nfrom matplotlib import pyplot as plt\n\n#potenital function for a charged finite bar, 1/4*pi factor not included!\ndef phi(x1,x2,y,z):\n return np.log(np.abs( (x2+np.sqrt(x2**2+z**2+y**2))/(x1+np.sqrt(x1**2+z**2+y**2)) ))\n\n# defines a square magnet (x,y,z) are \"center\" coordinates, X,Y,Z are the total size\ndef square_mag(X,Y,Z,x,y,z):\n int1= quad(lambda t:phi(-X/2+x,X/2+x,t+y,z-Z/2),-Y/2,Y/2)[0]\n int2= -quad(lambda t:phi(-X/2+x,X/2+x,t+y,z+Z/2),-Y/2,Y/2)[0]\n \n return int1+int2\n\ndist=2.0\n\n# defines the arrangement of the magnets, and transforms coordinates\n# here: a single centralized cube\ndef total_phi(xx,yy,zz):\n total= square_mag(10.0,10.0,0.01,-xx,-yy,-zz)\n# total+=square_mag(10.0,10.0,1.0,-xx,-yy,-dist-zz)\n return total\n\neps=0.0001\n\n# calculates the gradient of total_phi\ndef grad(x,y,z):\n delta=np.array([total_phi(x+eps,y,z)-total_phi(x-eps,y,z),\n total_phi(x,y+eps,z)-total_phi(x,y-eps,z),\n total_phi(x,y,z+eps)-total_phi(x,y,z-eps)])\n return delta/2.0/eps\n# test: potential is identically zero in the center of a cube magnet:\nprint (\"test: potential in center of a cube magnet:\",square_mag(1.0,1.0,1.0,0,0,0))\nprint (\"test: field in the center of a sheet of magnet is \",np.pi*4)\nprint (grad(0,0,0))\n\n\n# defines the arrangement of the magnets, and transforms coordinates\n# here:unit cubic magnet\ndef total_phi(xx,yy,zz):\n total= square_mag(1,1,1,-xx,-yy,-zz)\n \n return total\nref_B=grad(0,0,0.51)[2]\nprint(\"field on the surface of unit magnet\",ref_B)\n\n# defines the arrangement of the magnets, and transforms coordinates\n# two slabs of magnet, equal distance from center\ndef total_phi(xx,yy,zz):\n total= square_mag(10.0,10.0,1,-xx,-yy,dist-zz)\n total+=square_mag(10.0,10.0,1.0,-xx,-yy,-dist-zz)\n return total\n\n\nx=np.linspace(-5,5,21)\n\n\n#plots filed for x and z, distance is fixed\ndef calc_plot_Bz(zz):\n Bz=[]\n for i in range(0,21):\n Bz.append(grad(x[i],0,zz)[2])\n Bz=np.array(Bz)\n #Bz=Bz/np.max(np.abs(Bz))\n #Bz=Bz/np.abs(Bz[10])\n print(np.max(np.abs(Bz)))\n plt.plot(x,Bz,label=str(dist-zz-0.5))\n\n#dist=0.001+0.05\n#calc_plot_Bz()\n\n# distance (from center!) is fixed to minimum fluctuation value\ndist= 2.75\nzz=0.0\nfor j in range(0,11):\n calc_plot_Bz(zz)\n zz+=0.21\n\n#phi=total_phi(np.array([0,0]),np.array([0,0]),np.array([0,0]))\n#phii=square_mag(1.0,1.0,1.0,np.array([0,0]),0,1.0)\n#Bz=-grad(x,0,0)[3]\nplt.legend()\nplt.grid()\nplt.show()\n#print (square_mag(1.0,1.0,1.0,0,0,.001))\n","sub_path":"magnet/magnet2.py","file_name":"magnet2.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"287245654","text":"#!/usr/bin/env python3\nimport requests\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\n\n\ndef parse_txs():\n print(\"Get transactions ids...\")\n accounts = {\"0xd079c22e9c63341bf839a8634e8892c430d724cf\",\n \"0xae506bb28ed79b29c6968ab527d1efdc5f399331\"}\n txs = set()\n page = 0\n stop = False\n with open(\"txs.txt\", \"r\") as f:\n old_txs = set(map(lambda x: x.strip(), f.readlines()))\n while 1:\n print(\"page: {}\".format(page))\n r = requests.get('https://etherscan.io/txs?a=0xAE506bb28Ed79b29c6968Ab527d1eFdc5f399331&p={}'.format(page))\n soup = BeautifulSoup(r.text, 'lxml')\n addrs = soup.find_all(\"span\", {\"class\": \"address-tag\"})\n if len(addrs) == 0:\n break\n for addr in addrs:\n if addr.text in accounts:\n continue\n if addr.text in old_txs:\n stop = True\n break\n txs.add(addr.text)\n if stop:\n break\n page += 1\n print(\"Get {} new transactions ids\".format(len(txs)))\n with open(\"txs.txt\", \"a\") as f:\n [f.write(tx + \"\\n\") for tx in txs]\n with open(\"data.txt\", \"a\") as f:\n for i, tx in enumerate(txs):\n print(i)\n tx = tx.strip()\n r = requests.get('https://etherscan.io/tx/{}'.format(tx))\n soup = BeautifulSoup(r.text, 'lxml')\n data = soup.find(\"textarea\", {\"class\": \"form-control\"})\n f.write(data.text + \"\\n\") if data is not None else print(tx)\n print(\"Get transactions data... OK\")\n\n\ndef parse_data():\n print(\"Get variables from data...\")\n with open(\"data.txt\", \"r\") as f:\n s = f.readlines()\n print(len(s))\n with open(\"transactions.txt\", \"w\") as f:\n for tx in s:\n tx = tx.strip()[10:]\n txid = tx[:32]\n am = tx[64:128]\n time = tx[129:]\n f.write(\"{} {} {}\\n\".format(txid, int(\"0x\" + am, 0) / 10 ** 8, datetime.fromtimestamp(int(\"0x\" + time, 0))))\n print(\"Get variables from data... OK\")\n\n\ndef sort_data():\n with open(\"transactions.txt\", \"r\") as f:\n s = f.readlines()\n s = list(map(lambda x: x.split(), s))\n s = sorted(s, key=lambda x: (x[2], x[3]))\n with open(\"info.txt\", \"w\") as f:\n [f.write(x[0] + 4 * \" \" + x[1] + (14 - len(x[1])) * \" \" + x[2] + (14 - len(x[2])) * \" \" + \\\n x[3] + \"\\n\") for x in s]\n\n\ndef sort_by_amount():\n with open(\"transactions.txt\", \"r\") as f:\n s = f.readlines()\n s = list(map(lambda x: x.split(), s))\n s = sorted(s, key=lambda x: float(x[1]), reverse=True)\n with open(\"top.txt\", \"w\") as f:\n [f.write(x[0] + 4 * \" \" + x[1] + (14 - len(x[1])) * \" \" + x[2] + (14 - len(x[2])) * \" \" + \\\n x[3] + \"\\n\") for x in s[:10]]\n f.write(str(sum([float(x[1]) for x in s[10:]])))\n\n\ndef group_by_day():\n with open(\"transactions.txt\", \"r\") as f:\n s = f.readlines()\n s = list(map(lambda x: x.split(), s))\n s = sorted(s, key=lambda x: (x[2], x[3]))\n days_info = []\n cur_date = s[0][2]\n day_tx = []\n for tx in s:\n if tx[2] == cur_date:\n day_tx.append(tx)\n else:\n days_info.append(day_tx)\n cur_date = tx[2]\n day_tx = []\n days_info.append(day_tx)\n with open(\"info_by_day.txt\", \"w\") as f:\n f.write(\"Date\\t\\t\\tAmount\\n\")\n for x in days_info:\n f.write(x[0][2] + \"\\t\\t\" + str(sum(float(el[1]) for el in x)) + \"\\n\")\n\n\nif __name__ == \"__main__\":\n parse_txs()\n parse_data()\n sort_by_amount()\n group_by_day()\n","sub_path":"Statistics/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":3596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"394935414","text":"from scipy.stats import mode\nimport numpy as np\nimport plotly.offline as py\nimport plotly.graph_objs as go\nfrom plotly import tools\n\ndef plot_attention_map(x_words, y_words, weights, average_weights=True, show_max_only=False):\n\n if average_weights:\n mean_weights = np.mean(weights, axis=-1)\n weights = [mean_weights]\n\n traces = []\n for weight_count, weight in enumerate(weights):\n if show_max_only:\n weight = (weight == weight.max(axis=-1)[:, None]).astype(int)\n\n color_centroid = np.around(weights, decimals=4)\n color_centroid = mode(color_centroid, axis=None)[0][0]\n print(color_centroid)\n traces.append(go.Heatmap(z=weight,\n zmin= color_centroid - np.var(weight)/100,\n zmax= color_centroid + np.var(weight)/100,\n x=list(x for x in range(len(x_words))),\n y=list(y for y in range(len(y_words))),\n showscale=True,\n colorbar=dict(x=0.45 if len(weights) > 1 and weight_count%2 == 0 else 1.0,\n # y=0.45 if weight_count%2 == 0 else 1, len= 0.45\n )\n ))\n if len(weights) == 1:\n layout = {}\n layout.update({'yaxis': {'ticktext': y_words,\n 'tickvals': list(y for y in range(len(y_words))),\n 'tickmode': 'array', 'autorange': 'reversed'}})\n layout.update({'xaxis': {'ticktext': x_words,\n 'tickvals': list(x for x in range(len(x_words))),\n 'tickmode': 'array',\n 'tickangle': -90}})\n fig = go.Figure(traces, layout=layout)\n else:\n fig = tools.make_subplots(rows=(len(weights)+1)//2, cols=2, shared_yaxes=False, shared_xaxes=False, print_grid=False)\n\n layout = {'height': (18*len(y_words))*(len(weights)//2 + 1)}\n for trace_count, trace in enumerate(traces):\n fig.append_trace(trace, (trace_count//2)+1, (trace_count%2)+1)\n\n layout.update({'yaxis' + str(trace_count+1): {'ticktext': y_words,\n 'tickvals': list(y for y in range(len(y_words))),\n 'tickmode': 'array', 'autorange': 'reversed'}})\n layout.update({'xaxis' + str(trace_count+1): {'ticktext': x_words,\n 'tickvals': list(x for x in range(len(x_words))) ,\n 'tickmode':'array'}})\n\n fig['layout'].update(layout)\n py.plot(fig, image=\"svg\")\n\n# Place the blue cube on top of the green cube","sub_path":"keras_transformer/utils/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"639438477","text":"#!/usr/bin/env python3\n\"\"\" https://github.com/UWPCEWebPython/flask-mailroom \"\"\"\n\nimport os\n\nfrom flask import Flask, render_template, request, redirect, url_for\n\nfrom model import Donor, Donation\n\napp = Flask(__name__) # pylint: disable=invalid-name\n\n\n@app.route('/', methods=['GET'])\ndef home():\n \"\"\" Redirect to donations page \"\"\"\n return redirect(url_for('donations'))\n\n\n@app.route('/donations', methods=['GET', 'POST'])\ndef donations():\n \"\"\" Prompt for donations and display current donations \"\"\"\n if request.method == 'POST':\n donor_name = request.form['donor_name']\n donation_amount = request.form['donation_amount']\n print(f\"donor_name {donor_name} donation_amount {donation_amount}\")\n try:\n donor = Donor.get(Donor.name == donor_name)\n except Donor.DoesNotExist:\n donor = Donor(name=donor_name)\n donor.save()\n\n donation = Donation(donor=donor, value=donation_amount)\n donation.save()\n\n # README says to redirect to home page, but that just redirects here.\n # So, just fall through to what would be the GET processing.\n\n dons = Donation.select()\n return render_template('donations.jinja2', donations=dons)\n\n\nif __name__ == \"__main__\":\n PORT = int(os.environ.get(\"PORT\", 6738))\n app.run(host='0.0.0.0', port=PORT)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"371577664","text":"import unittest\n\n\n# A child is running up a staircase with n steps, and can hop either 1 step, 2 steps, or 3 steps at a time.\n# Implement as method to count how many possible ways the child can run up the stairs.\n\n\ndef staircase_count(nSteps):\n if nSteps < 0:\n return 0\n if nSteps == 0:\n return 1\n total = 0\n for step in [1, 2, 3]:\n total += staircase_count(nSteps - step)\n return total\n\n\ndef staircase_list(nSteps, li=None, output=None):\n if li is None:\n li = []\n if output is None:\n output = []\n if sum(li) > nSteps:\n return None\n if sum(li) == nSteps:\n output.append(li)\n return output\n for step in [1, 2, 3]:\n staircase_list(nSteps, li=li + [step], output=output)\n return output\n\n\nclass StaircaseTest(unittest.TestCase):\n\n def assertNestedListEquals(self, nested_list, expected):\n for a, b in zip(nested_list, expected):\n self.assertListEqual(a, b)\n\n def test_staircase_count(self):\n self.assertEqual(1, staircase_count(1))\n self.assertEqual(2, staircase_count(2))\n self.assertEqual(4, staircase_count(3))\n self.assertEqual(7, staircase_count(4))\n\n def test_staircase_list(self):\n self.assertNestedListEquals([[1]], staircase_list(1))\n self.assertNestedListEquals([[1, 1], [2]], staircase_list(2))\n self.assertNestedListEquals([[1, 1, 1], [1, 2], [2, 1], [3]], staircase_list(3))\n self.assertNestedListEquals([[1, 1, 1, 1], [1, 1, 2], [1, 2, 1], [1, 3], [2, 1, 1], [2, 2], [3, 1]], staircase_list(4))\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"python/d&c/staircase.py","file_name":"staircase.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"156329728","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom teachers.models import Teacher\nfrom django.db.models import Q\nfrom teachers.forms import TeachersAddForm\n\n\ndef generate_teacher(request):\n teacher = Teacher.generate_teacher()\n return HttpResponse(f'{teacher.get_info()}')\n\n\ndef teachers(request):\n queryset = Teacher.objects.all()\n response = ''\n\n filtr_param = request.GET.get('filtr_param')\n if filtr_param:\n queryset = queryset.filter(\n Q(first_name__contains=filtr_param) | Q(last_name__contains=filtr_param) | Q(email__contains=filtr_param)\n )\n # __contains --> like '%blabla%'\n # __endswith --> like '%blabla'\n # __startswith --> like 'blabla%'\n # __istarts/ends/--> регистронезависимый поиск\n\n for teacher in queryset:\n response += teacher.get_info() + ' '\n return render(request,\n 'teachers_list.html',\n context={'teachers_list': response})\n\n\ndef teacher_add(request):\n if request.method == 'POST':\n form = TeachersAddForm(request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect('/teachers/')\n else:\n form = TeachersAddForm()\n\n return render(request,\n 'teacher_add.html',\n context={'form': form})","sub_path":"Src/teachers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"328440874","text":"import FWCore.ParameterSet.Config as cms\nimport FWCore.ParameterSet.VarParsing as VarParsing\n\nprocess = cms.Process(\"OWNPARTICLES\")\noptions = VarParsing.VarParsing('analysis')\noptions.inputFiles = \"file:/home/t3-ku/janguian/CMSSW_10_6_8/src/KUsoftMVA/test/0ACB220F-DB5C-3449-83F5-E04858176001.root\"\noptions.outputFile = \"defaultout.root\"\noptions.maxEvents = 100\noptions.parseArguments()\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\nprocess.maxEvents = cms.untracked.PSet( input=cms.untracked.int32(options.maxEvents ))\nprocess.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 5000\n\n\nprocess.source = cms.Source(\"PoolSource\", \n fileNames = cms.untracked.vstring(options.inputFiles)\n)\nprocess.out = cms.OutputModule(\"PoolOutputModule\",\n fileName = cms.untracked.string(options.outputFile)\n ,outputCommands = cms.untracked.vstring('drop *',\n \"keep *_offlineSlimmedPrimaryVertices_*_*\",\n \"keep *_slimmedMuons_*_*\",\n \"keep *_slimmedElectrons_*_*\",\n \"keep *_packedPFCandidates_*_*\",\n \"keep *_packedGenParticles_*_*\")\n \n) \nprocess.e = cms.EndPath(process.out)\n\n\n","sub_path":"MuonAnalysis/test/createMiniAODNtuple.py","file_name":"createMiniAODNtuple.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"107819694","text":"#import sys\n#sys.path.append('../')\n#from P32.main import gcd\n\n#def totient_phi(num):\n# count = 0\n# for i in range(1,num):\n# if gcd(i,num) == 1:\n# count += 1\n# return count\n\n\n########################修正コード##########################\nimport sys\nsys.path.append('../')\nfrom P33.main import is_coprime\n\ndef totient_phi(num):\n count = 0\n for i in range(1,num):\n if is_coprime(i,num):\n count +=1\n return count\n","sub_path":"P34/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"443773411","text":"__author__ = 'nick meng'\r\n#字符匹配\r\n#短字符\r\n# ‘\\d’匹配一个数字,‘\\w’匹配一个数字或字母\r\n# ‘\\s’匹配一个空格,‘.’匹配任意字符,\r\n#长字符\r\n#‘*’表示任意个字符(包括0个),\r\n#‘+’表示至少一个字符\r\n#‘?’表示0个或1个字符\r\n# '{n}'表示n个字符,‘{n,m}’表示n到m个字符\r\n#精确匹配,可以用[]表示范围\r\n#‘^’表示行的开头,'$'表示行的结束\r\n#re模块\r\n#\r\ns='ABC\\\\-001'#对应的正则表达式字符串变成了‘ABC\\-001’\r\n\r\nimport re\r\nre.match(r'^\\d{3}\\-\\d{3,8}$','010-1234')\r\n#match方法判断是否匹配,匹配成功返回一个Match对象,否则返回None。\r\n# match(pattern, string, flags=0)\r\ntest='用户输入的字符串'\r\nif re.match(r'正则表达式',test):\r\n print('ok')\r\nelse:\r\n print('failed')\r\n#切分字符串\r\n#\r\ns='a b c'\r\ns1=s.split(' ')\r\nprint(s1)\r\ns2=re.split(r'\\s+','a b c')\r\nprint(s2)\r\ns3=re.split(r'[\\s\\,]+','a,b, c d')\r\nprint(s3)\r\ns4=re.split(r'[\\s\\,\\:]+','a,b:: c d')\r\nprint(s4)\r\n\r\n#分组\r\n#正则表达式可以从字符串中提取子串,\r\nm=re.match(r'^(\\d{3})-(\\d{3,8})$','010-12345')\r\nm.group(0)\r\nm.group(1)\r\nm.group(2)\r\n#如果正则表达式定义了组,就可以在Match对象上涌group函数提取出子串,参数为子串的序数,0表示原始字符串\r\n#\r\nt='19:05:30'\r\nm=re.match(r'^(0[0-9]|1[0-9]|2[0-3]|[0-9])\\:(0[0-9]|1[0-9]|2[0-9]|3[0-9]|4[0-9]|5[0-9]|[0-9])\\:(0[0-9]|1[0-9]|2[0-9]|3[0-9]|4[0-9]|5[0-9]|[0-9])$', t)\r\nm1=m.groups()\r\nprint(m1)\r\n\r\n#贪婪匹配:\r\n#正则默认匹配模式为贪婪匹配,即匹配尽可能多的字符\r\nw=re.match(r'^(\\d+)(0*)$','102300').groups()\r\nprint(w)\r\ns=re.match(r'^(\\d+?)(0*)$','102300').groups()#使用'?'采用非贪婪匹配\r\nprint(s)\r\n#编译:预编译产生Regular Expression对象\r\nre_telephone=re.compile(r'^(\\d{3})-(\\d{3,8})$')\r\nr1=re_telephone.match('010-12345').groups()\r\nprint(r1)","sub_path":"MyProject/firstprogarm/regexdemo.py","file_name":"regexdemo.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"56928209","text":"#!/usr/bin/env python\n\n__author__ = 'tgiguere'\n\nfrom pyon.public import log\nfrom interface.objects import DatasetDescriptionDataSamplingEnum\nfrom eoi.agent.handler.base_external_data_handler import *\nfrom eoi.agent.utils import ArrayIterator\nimport numpy\n\nclass HfrRadialDataHandler(BaseExternalDataHandler):\n\n _data_array = None\n _number_of_records = 0\n _variables = []\n _attributes = {}\n\n def __init__(self, data_provider=None, data_source=None, ext_dataset=None, *args, **kwargs):\n BaseExternalDataHandler.__init__(self, data_provider, data_source, ext_dataset, *args, **kwargs)\n\n self._variables[:] = []\n self._load_attributes(data_source)\n self._load_values(data_source)\n\n def _load_attributes(self, filename=''):\n #looping through the whole file to get the attributes; not sure if this is such a good idea\n with open(filename, 'r') as f:\n in_table_data = False\n correct_table_type = False\n for line in f:\n if line.startswith('%TableType:'):\n parsed_line = line.partition(': ')\n if parsed_line[2].startswith('LLUV'):\n correct_table_type = True\n else:\n correct_table_type = False\n if line.startswith('%TableStart:'):\n in_table_data = True\n if line.startswith('%TableEnd:') and in_table_data:\n in_table_data = False\n correct_table_type = False\n\n\n if not (in_table_data):\n self._parse_attribute(line, correct_table_type)\n f.close()\n\n def _parse_attribute(self, line='', correct_table_type=False):\n #strip out leading %\n new_line = line.replace('%', '')\n\n parsed_line = new_line.partition(':')\n if parsed_line[0] == 'TableColumnTypes' and correct_table_type:\n cols = parsed_line[2].split(' ')\n for col in cols:\n if not col == '' and not col == '\\n':\n self._variables.append(col)\n elif not parsed_line[0].startswith('Table'):\n if not parsed_line[2] == '':\n self._attributes[parsed_line[0]] = parsed_line[2].replace('\\n', '')\n\n def _load_values(self, filename=''):\n a = numpy.loadtxt(fname=filename, comments='%')\n\n self._data_array = {}\n index = 0\n for column in self._variables:\n self._data_array[column] = a[:,index]\n index += 1\n\n self._number_of_records = a.shape[0]\n\n def acquire_data(self, var_name=None, slice_=()):\n if var_name in self._variables:\n vars = [var_name]\n else:\n vars = self._variables\n\n if not isinstance(slice_, tuple): slice_ = (slice_,)\n\n for vn in vars:\n var = self._data_array[vn]\n\n ndims = len(var.shape)\n # Ensure the slice_ is the appropriate length\n if len(slice_) < ndims:\n slice_ += (slice(None),) * (ndims-len(slice_))\n\n arri = ArrayIterator(var, self._block_size)[slice_]\n for d in arri:\n if d.dtype.char is \"S\":\n # Obviously, we can't get the range of values for a string data type!\n rng = None\n elif isinstance(d, numpy.ma.masked_array):\n # TODO: This is a temporary fix because numpy 'nanmin' and 'nanmax'\n # are currently broken for masked_arrays:\n # http://mail.scipy.org/pipermail/numpy-discussion/2011-July/057806.html\n dc = d.compressed()\n if dc.size == 0:\n rng = None\n else:\n rng = (numpy.nanmin(dc), numpy.nanmax(dc))\n else:\n rng = (numpy.nanmin(d), numpy.nanmax(d))\n yield vn, arri.curr_slice, rng, d\n\n return\n\n def get_attributes(self, scope=None):\n \"\"\"\n Returns a dictionary containing the name/value pairs for all attributes in the given scope.\n @param scope The name of a variable in this dataset. If no scope is provided, returns the global_attributes for the dataset\n \"\"\"\n #Since there are no variable attributes in this file, just return the global ones.\n return self._attributes\n\n def get_attribute(self, attr_name=''):\n if attr_name in self._attributes:\n return self._attributes[attr_name]\n else:\n return ''\n\n def get_variables(self):\n return self._variables\n","sub_path":"eoi/agent/handler/hfr_radial_data_handler.py","file_name":"hfr_radial_data_handler.py","file_ext":"py","file_size_in_byte":4644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"317480359","text":"def memoize(func, *args):\n memo = {}\n\n def wrapped(n, a):\n if n not in memo:\n memo[n] = func(n, a)\n\n return memo[n]\n\n return wrapped\n\n\n@memoize\ndef can_construct(n, a):\n if n == \"\":\n return True\n\n for each in a:\n # don't take out from middle because it would create\n # a string of characters that do not necessarily appear in the original\n # string. It is better to take out from prefix because you preserve\n # the adjacent strings in the original. Also, the candidate strings\n # have to match the prefix or suffix if they are able to construct the\n # original string\n if n.startswith(each):\n if can_construct(n[len(each) :], a) is True:\n return True\n\n return False\n\n\nif __name__ == \"__main__\":\n print(can_construct(\"abcdef\", [\"ab\", \"abc\", \"cd\", \"def\", \"abcd\"]))\n print(can_construct(\"enterapotentpot\", [\"a\", \"p\", \"ent\", \"enter\", \"ot\", \"o\", \"t\"]))\n print(\n can_construct(\n \"eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeef\",\n [\"e\", \"ee\", \"eee\", \"eeee\", \"eeeee\", \"eeeeee\"],\n )\n )\n","sub_path":"algorithms/dynamic_programming/can_construct.py","file_name":"can_construct.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"361940259","text":"#!/usr/bin/python3\n\nimport hashlib\n\nSECRET_KEY = \"ckczppom\"\n\n\ndef hash_mine(number, zero_count):\n digest = hashlib.md5(SECRET_KEY + str(number)).hexdigest()\n return digest.startswith(str(0) * zero_count)\n\n\nif __name__ == \"__main__\":\n count = 0\n while True:\n if hash_mine(count, 5):\n break\n\n # Protip: Include this line, otherwise the problem takes a really,\n # really long time to run...\n count += 1\n\n print(\"First hash that starts with five zeroes is: {}\".format(count))\n\n while True:\n if hash_mine(count, 6):\n break\n\n count += 1\n\n print(\"First hash that starts with six zeroes is: {}\".format(count))\n","sub_path":"day4/day4.py","file_name":"day4.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"458208230","text":"#_main_.py - 打耗子\nfrom math import *\nimport time\nfrom sys import exit\nimport pygame\n\npygame.init()\npygame.display.set_caption(\"打耗子\")\n\nscreen = pygame.display.set_mode((640, 480))\nscreen_pic = pygame.image.load(\"grass\")\n#screen_pic = pygame.transform.scale(screen_pic, (640, 480))\n\ndule = pygame.image.load(\"dude\")\ndule = pygame.transform.flip(dule , False, True)\n\ncastle = pygame.image.load(\"castle\")\n\n\n\ndule = pygame.transform.rotate(dule , 0)\n\nscreen.blit(screen_pic, (0, 0))\nscreen.blit(castle, (30, 30))\nscreen.blit(castle, (30, 180))\nscreen.blit(castle, (30, 330))\nscreen.blit(dule, (100, 100))\npygame.display.update()\n\ntime.sleep(3)\n\ndule = pygame.transform.rotate(dule , -30)\n\nscreen.blit(screen_pic, (0, 0))\nscreen.blit(castle, (30, 30))\nscreen.blit(castle, (30, 180))\nscreen.blit(castle, (30, 330))\nscreen.blit(dule, (100, 100))\npygame.display.update()\n\n\ncat_x = 100\ncat_y = 100\nmouse_x = 201\nmouse_y = 302\na = mouse_y -cat_y\nb = mouse_x - cat_x\nangle = atan(a / b)\nangle = angle * 180 / 3.141592654\nangle = 0 - angle\n\nis_start = 1\n\nwhile True:\n\tif is_start == 1:\n\n\t\tcat_x = 100\n\t\tcat_y = 100\n\t\ta = mouse_y - cat_y\n\t\tb = mouse_x - cat_x\n\t\tangle_old = angle\n\t\tangle = atan(a / b)\n\t\tangle = angle * 180 / 3.141592654\n\t\tangle = 0 - angle\n\n\t\t# angle = atan((mouse[1]-123)/(mouse[0]-132))#*180/3.14159265\n\t\tif angle_old == angle:\n\t\t\tdule = pygame.transform.rotate(dule , angle)\n\t\tmouse_x, mouse_y = pygame.mouse.get_pos()\n\t\tscreen.blit(screen_pic, (0, 0))\n\t\tscreen.blit(castle, (30, 30))\n\t\tscreen.blit(castle, (30, 180))\n\t\tscreen.blit(castle, (30, 330))\n\t\tscreen.blit(dule, (100, 100))\n\t\t#time.sleep(1)\n\telif is_start == 2:\n\t\tscreen.bilt()\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\tpygame.quit()\n\t\t\texit()\n\tpygame.display.update()\n","sub_path":"angle.py","file_name":"angle.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"37229291","text":"import pandas as pd\nimport numpy as np\n\ndef findpeaks(data, spacing=1, limit=None):#fonction de détéction des pics à partir d'un signal( les points depassant un seuil=limit)\n\n \n len = data.size\n x = np.zeros(len + 2 * spacing)\n data = np.squeeze(data)\n x[:spacing] = data[0] - 1.e-6\n x[-spacing:] = data[-1] - 1.e-6\n x[spacing:spacing + len] = data\n peak_candidate = np.zeros(len)\n peak_candidate[:] = True\n for s in range(spacing):\n start = spacing - s - 1\n h_b = x[start: start + len] # before\n start = spacing\n h_c = x[start: start + len] # central\n start = spacing + s + 1\n h_a = x[start: start + len] # after\n peak_candidate = np.logical_and(peak_candidate, np.logical_and(h_c > h_b, h_c > h_a))\n\n ind = np.argwhere(peak_candidate)\n ind = ind.reshape(ind.size)\n if limit is not None:\n ind = ind[data[ind] > limit]\n return ind","sub_path":"ECG_TELNET_Pythonanywhere/code/findpeaks.py","file_name":"findpeaks.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"615531604","text":"#\n# Copyright The NOMAD Authors.\n#\n# This file is part of NOMAD. See https://nomad-lab.eu for further info.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\n\nfrom nomad.datamodel import EntryArchive\nfrom atkparser import ATKParser\n\n\ndef approx(value, abs=0, rel=1e-6):\n return pytest.approx(value, abs=abs, rel=rel)\n\n\n@pytest.fixture(scope='module')\ndef parser():\n return ATKParser()\n\n\ndef test_scf(parser):\n archive = EntryArchive()\n parser.parse('tests/data/Si2.nc', archive, None)\n\n sec_run = archive.section_run[0]\n assert sec_run.program_version == 'ATK 2016.0.3'\n\n sec_method = sec_run.section_method[0]\n assert sec_method.smearing_width == 300\n assert sec_method.section_XC_functionals[1].XC_functional_name == 'LDA_C_PZ'\n\n sec_system = sec_run.section_system[0]\n assert sec_system.lattice_vectors[1][0].magnitude == approx(2.7153e-10)\n assert sec_system.atom_positions[1][0].magnitude == approx(1.35765e-10)\n assert sec_system.atom_labels == ['Si', 'Si']\n\n sec_scc = sec_run.section_single_configuration_calculation[0]\n assert sec_scc.energy_total.magnitude == approx(-5.73249938e-17)\n assert sec_scc.energy_XC.magnitude == approx(-3.41975673e-17)\n","sub_path":"tests/test_atkparser.py","file_name":"test_atkparser.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"271814013","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Constants for y0.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import Protocol, TypeVar\n\n__all__ = [\n \"NodeType\",\n \"NodeProtocol\",\n]\n\n\nclass NodeProtocol(Protocol):\n \"\"\"Represents what can be a node in a mixed graph.\"\"\"\n\n def __hash__(self) -> int:\n ...\n\n def __lt__(self, other) -> bool:\n ...\n\n\nNodeType = TypeVar(\"NodeType\", bound=NodeProtocol)\n","sub_path":"src/y0/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"110023837","text":"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nfrom absl import app\nimport numpy as np\n\nimport pyspiel\n\nfrom open_spiel.python import rl_environment\nfrom open_spiel.python.algorithms import random_agent\nfrom open_spiel.python.algorithms import tabular_qlearner\nfrom open_spiel.python.project.part_1.cross_learning import cross_learner\n\ndef train_agents(env, agents, nbep):\n prob_array = []\n for ep in range(nbep):\n time_step = env.reset()\n while not time_step.last():\n output0 = agents[0].step(time_step)\n output1 = agents[1].step(time_step)\n prob_array = []\n prob_array.append(output0.probs)\n prob_array.append(output1.probs)\n actions = [output0.action, output1.action]\n time_step = env.step(actions)\n # Episode is over, step all agents with final info state.\n agents[0].step(time_step)\n agents[1].step(time_step)\n# print(time_step.rewards)\n return prob_array\ndef train_agents_simultaneous_single(env, agents, nbep):\n for ep in range(nbep):\n time_step = env.reset()\n while not time_step.last():\n output0 = agents[0].step(time_step)\n output1 = agents[1].step(time_step)\n actions = [output0.action, output1.action]\n time_step = env.step(actions)\n # Episode is over, step all agents with final info state.\n agents[0].step(time_step)\n agents[1].step(time_step)\n\n\n\ndef evaluate_agents(env, agents):\n time_step = env.reset()\n while not time_step.last():\n output0 = agents[0].step(time_step, is_evaluation = True)\n output1 = agents[1].step(time_step)\n action000 = output0.action\n actions = [output0.action, output1.action]\n time_step = env.step(actions)\n # Episode is over, step all agents with final info state.\n agents[0].step(time_step)\n agents[1].step(time_step)\n return action000\n\n\ndef create_game(name):\n if name == \"PD\":\n return pyspiel.create_matrix_game([[3,0],[5,1]], [[3,5],[0,1]])\n elif name == \"BOS\":\n return pyspiel.create_matrix_game(\"battle_of_sexes\", \"The Battle of The Sexes\",\n [\"LW\", \"WL\"], [\"LW\", \"WL\"],\n [[1, 0], [0, 1/2]], [[1/2, 0], [0, 1]])\n elif name == \"MP\":\n return pyspiel.create_matrix_game(\"matching_pennies\", \"Matching Pennies\",\n [\"Heads\", \"Tails\"], [\"Heads\", \"Tails\"],\n [[0, 1], [1, 0]], [[1, 0], [0, 1]])\n elif name == \"RPS\":\n return pyspiel.create_matrix_game(\n [[0.0, -0.25, 0.5], [0.25, 0.0, -0.05], [-0.5, 0.05, 0.0]],\n [[0.0, 0.25, -0.5], [-0.25, 0.0, 0.05], [0.5, -0.05, 0.0]])\ndef create_environment(game):\n return rl_environment.Environment(game)\n\n\n\ndef execute_scenarios_probs(env, nb, start):\n agents = [\n tabular_qlearner.QLearner(player_id=0, num_actions=3, step_size=0.5, epsilon=0.05, discount_factor=1.0 ),\n tabular_qlearner.QLearner(player_id=1, num_actions=3, step_size=0.5, epsilon=0.05, discount_factor=1.0)\n\n ]\n agents[0]._q_values['[0.0]'][0] = start[0]\n agents[1]._q_values['[0.0]'][0] = start[1]\n train_agents_simultaneous_single(env, agents, nb)\n return evaluate_agents(env, agents)\n\n\ndef rewardCounter(totalSum, reward, payOffMatrix):\n\n if reward == payOffMatrix[0][0]:\n totalSum[0] +=1\n elif reward == payOffMatrix[0][1]:\n totalSum[1] +=1\n elif reward == payOffMatrix[1][0]:\n totalSum[2] +=1\n elif reward == payOffMatrix[1][1]:\n totalSum[3] +=1\n\n\ndef rewardCounter2(totalSum, reward, payOffMatrix):\n\n if reward == payOffMatrix[0][0]:\n totalSum[0][0] +=1\n elif reward == payOffMatrix[0][1]:\n totalSum[0][1] +=1\n elif reward == payOffMatrix[0][2]:\n totalSum[0][2] +=1\n elif reward == payOffMatrix[1][0]:\n totalSum[1][0] +=1\n elif reward == payOffMatrix[1][1]:\n totalSum[1][1] +=1\n elif reward == payOffMatrix[1][2]:\n totalSum[1][2] +=1\n elif reward == payOffMatrix[2][0]:\n totalSum[2][0] +=1\n elif reward == payOffMatrix[2][1]:\n totalSum[2][1] +=1\n elif reward == payOffMatrix[2][2]:\n totalSum[2][2] += 1\n\ndef rewardCounter3(totalSum, reward, qsdf):\n if reward == 0:\n totalSum[0] +=1\n elif reward == 1:\n totalSum[1] +=1\n elif reward == 2:\n totalSum[2] += 1\n\n\ndef create_payoff(name):\n if name == \"PD\":\n return [[3,3],[0,5]], [[5,0],[1,1]]\n if name == \"BOS\":\n return [[1,1/2],[0,0]], [[0,0],[1/2,1]]\n if name == \"RPS\":\n return [[[0,0],[-0.25,25],[0.5,-0.5]], [[0.25,-0.25],[0,0],[-0.05,0.05]], [[-0.5,0.5],[0.05,-0.05],[0,0]]]\n if name == \"MP\":\n return [[0,1],[1,0]], [[1,0],[0,1]]\n\n\ndef main(_):\n name = \"RPS\"\n game = create_game(name)\n payoff = create_payoff(name)\n env = create_environment(game)\n totalsum = np.zeros(4)\n #sum = [[0,0,0],[0,0,0],[0,0,0]]\n for i in range(1000):\n print(i)\n rewardCounter3(totalsum, execute_scenarios_probs(env, 1000, (0, 0)), payoff)\n print(totalsum)\n\n\nif __name__ == \"__main__\":\n app.run(main)","sub_path":"open_spiel/python/Project/part_1/nash/nash.py","file_name":"nash.py","file_ext":"py","file_size_in_byte":5320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"24682482","text":"from flask import Blueprint, render_template\nfrom flask_login import login_required,current_user\nfrom flask.helpers import url_for\nfrom werkzeug.datastructures import cache_property\nfrom website.models import Category, Product, Receipt\nfrom flask import Blueprint, render_template, request, make_response, jsonify, redirect, url_for\nfrom . import db\nimport sys\nimport json\nfrom sqlalchemy import desc\n\nviews = Blueprint('views', __name__)\n\n@views.route('/main', methods = ['POST', 'GET'])\ndef main():\n try:\n if current_user.is_admin == 1:\n return redirect(url_for('views.admin'))\n except:\n print()\n if request.method == 'POST':\n req = request.get_json()\n # print(req, file=sys.stdout)\n if req['command'] == 'get_products':\n sort = req[\"sort\"]\n sort_order= req[\"sort_order\"]\n product_name= req[\"product_name\"]\n product_cats= req[\"product_categories\"]\n lower_bound, upper_bound = req['price_range']\n # print(product_name, file=sys.stdout)\n # print(req, file=sys.stdout)\n \n \n if product_name not in ('', ' ', None, {}):\n products = Product.query.filter_by(name = product_name).all()\n else:\n if sort == 'sold':\n products = Product.query.order_by(desc(Product.sold_number)).all()\n elif sort == 'date':\n products = Product.query.order_by(desc(Product.date)).all()\n else:\n if sort_order == 'desc':\n products = Product.query.order_by(desc(Product.price)).all()\n else:\n products = Product.query.order_by(Product.price).all()\n\n res_products = []\n for product in products:\n if product_cats not in ('', ' ', []) and product.category not in product_cats:\n continue\n if product.price < lower_bound or product.price > upper_bound:\n continue\n res_products.append({\"name\":product.name, \"category\":product.category, \"price\":product.price,\n \"availability_number\":product.availability_number,\n \"sold_number\":product.sold_number, \"image\":product.image})\n \n res = make_response(jsonify({\"message\": res_products}), 200)\n return res\n \n if req['command'] == 'get_categories':\n categories = Category.query.filter_by().all()\n res_categories = []\n for cat in categories:\n res_categories.append({\"name\":cat.name})\n \n res = make_response(jsonify({\"message\": res_categories}), 200)\n return res\n \n return render_template(\"main.html\",user=current_user)\n\n\n\n\n\n\n@views.route('/admin', methods=['GET', 'POST'])\n@login_required\ndef admin():\n if current_user.is_admin == 0:\n return redirect(url_for('views.user'))\n if request.method == 'POST':\n req = request.get_json()\n if req['command'] == 'get_products':\n products = Product.query.filter_by().all()\n names, categories, prices, available_numbers, sold_numbers, images = [],[],[],[],[],[]\n res_products = []\n for product in products:\n res_products.append({\"name\":product.name, \"category\":product.category, \"price\":product.price,\n \"availability_number\":product.availability_number,\n \"sold_number\":product.sold_number, \"image\":product.image})\n \n res = make_response(jsonify({\"message\": res_products}), 200)\n return res\n \n if req['command'] == 'get_categories':\n categories = Category.query.filter_by().all()\n res_categories = []\n for cat in categories:\n res_categories.append({\"name\":cat.name})\n \n res = make_response(jsonify({\"message\": res_categories}), 200)\n return res\n \n \n if req['command'] == 'get_receipts':\n receipts = Receipt.query.order_by(desc(Receipt.date)).all()\n res_categories = []\n for rec in receipts:\n json_rec = {\"id\":rec.id, \"product_name\":rec.product_name,\n \"purchase_number\":rec.purchase_number,\t\"customer_first_name\":rec.customer_first_name,\n \"customer_last_name\":rec.customer_last_name,\t\"customer_address\":rec.customer_address,\n \"total_price\":rec.total_price,\t\"date\":rec.date,\t\"state\":rec.state,\n \"customer_id\":rec.customer_id}\n res_categories.append(json_rec)\n \n res = make_response(jsonify({\"message\": res_categories}), 200)\n return res\n \n if req['command'] == 'get_filtered_receipts':\n if req['rec_id'] not in (' ', ''):\n receipts = Receipt.query.filter_by(id = req['rec_id']).all()\n else:\n receipts = Receipt.query.order_by(desc(Receipt.date)).all()\n res_categories = []\n for rec in receipts:\n json_rec = {\"id\":rec.id, \"product_name\":rec.product_name,\n \"purchase_number\":rec.purchase_number,\t\"customer_first_name\":rec.customer_first_name,\n \"customer_last_name\":rec.customer_last_name,\t\"customer_address\":rec.customer_address,\n \"total_price\":rec.total_price,\t\"date\":rec.date,\t\"state\":rec.state,\n \"customer_id\":rec.customer_id}\n res_categories.append(json_rec)\n \n res = make_response(jsonify({\"message\": res_categories}), 200)\n return res\n \n \n return render_template(\"admin.html\")\n\n\n@views.route('/user', methods = ['POST', 'GET'])\n@login_required\ndef user():\n if current_user.is_admin == 1:\n return redirect(url_for('views.admin'))\n if request.method == 'POST':\n req = request.get_json()\n if req['command'] == 'get_receipts':\n receipts = Receipt.query.filter_by(customer_id = current_user.id).order_by(desc(Receipt.date)).all()\n res_categories = []\n for rec in receipts:\n json_rec = {\"id\":rec.id, \"product_name\":rec.product_name,\n \"purchase_number\":rec.purchase_number,\t\"customer_first_name\":rec.customer_first_name,\n \"customer_last_name\":rec.customer_last_name,\t\"customer_address\":rec.customer_address,\n \"total_price\":rec.total_price,\t\"date\":rec.date,\t\"state\":rec.state,\n \"customer_id\":rec.customer_id}\n res_categories.append(json_rec)\n \n res = make_response(jsonify({\"message\": res_categories}), 200)\n return res\n \n elif req['command'] == 'get_current_user':\n res = make_response(jsonify({\"message\": [{\"user_id\":current_user.id, \"user_first_name\":current_user.first_name,\n \"user_credit\":current_user.charge}]}), 200)\n return res\n \n \n return render_template(\"user.html\",user=current_user)\n\n@views.route('/signin')\ndef signin():\n return render_template(\"signin.html\",user=current_user)\n\n@views.route('/admin/create_product')\n@login_required\ndef create_product():\n if current_user.is_admin == 0:\n return redirect(url_for('views.user'))\n return render_template(\"create_product.html\")\n\n\n@views.route('/admin/edit_product', methods = ['GET', 'POST'])\n@login_required\ndef edit_product():\n if current_user.is_admin == 0:\n return redirect(url_for('views.user'))\n global current_product\n if request.method == 'POST':\n req = json.loads(request.get_data())\n current_product = req['product_name']\n render_template(\"edit_product.html\", product_name = create_product)\n return redirect(url_for(\"views.edit_product\"))\n print(request.method, request.get_json())\n return render_template(\"edit_product.html\", product = current_product)\n\n\n@views.route('/user/shop_basket', methods = ['GET'])\n@login_required\ndef get_shop_basket():\n if current_user.is_admin == 1:\n return redirect(url_for('views.admin'))\n return render_template(\"shop_basket.html\", user = current_user)\n\n\ncurrent_product = ''\n ","sub_path":"website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"235462306","text":"#!/usr/bin/env python3 -m pytest\nfrom plex_trakt_sync.walker import Walker\nfrom tests.conftest import factory\n\nplex = factory.plex_api()\nmf = factory.media_factory()\n\n\ndef test_walker():\n w = Walker(plex, mf)\n assert type(w) == Walker\n\n w.add_library(\"TV Shows\")\n w.add_library(\"Movies (Tuti)\")\n w.add_show(\"Breaking Bad\")\n w.add_movie(\"Batman Begins\")\n\n episodes = list(w.find_episodes())\n movies = list(w.find_movies())\n\n assert len(episodes) == 0\n assert len(movies) == 0\n","sub_path":"tests/test_walker.py","file_name":"test_walker.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"288082723","text":"\"\"\"\nOriginal Developer: Jonathan Ward\n\"\"\"\n\n# Standard Modules:\nimport numpy as np\nimport scipy.signal\n\n# Custom Modules:\nimport parameters\nimport smoothing_interpolate\nimport util\nimport curvature\n\n\nclass TubePoint(object):\n\n def compute_pylon_cost(self):\n if self.is_underground:\n pylon_cost = 0\n else:\n height_cost = (self.pylon_height *\n parameters.PYLON_COST_PER_METER + 5*self.pylon_height**2)\n base_cost = parameters.PYLON_BASE_COST\n pylon_cost = height_cost + base_cost\n return pylon_cost\n\n def build_pylon(self):\n pylon = {\"height\" : self.pylon_height, \n \"cost\" : self.pylon_cost,\n \"latlng\" : self.latlng.tolist(),\n \"elevation\" : self.land_elevation,\n \"arcLength\" : self.arc_length,\n \"index\" : self.index}\n return pylon\n\n def __init__(self, arc_length, land_elevation, tube_elevation, latlng, index):\n self.arc_length = arc_length\n if land_elevation == None:\n print(\"bad land elevation\")\n print(land_elevation)\n raise ValueError\n self.land_elevation = land_elevation\n self.index = index\n self.tube_elevation = tube_elevation\n self.latlng = latlng \n self.pylon_height = tube_elevation - land_elevation\n self.is_underground = (self.pylon_height < 0)\n self.pylon_cost = self.compute_pylon_cost()\n\n\nclass TubeEdge(object):\n \n def compute_tunnel_cost(self, edge_length, tube_point_a, tube_point_b):\n if tube_point_a.is_underground and tube_point_b.is_underground:\n tunneling_cost = (edge_length *\n parameters.TUNNELING_COST_PER_METER)\n if tube_point_a.is_underground and not tube_point_b.is_underground:\n tunneling_cost = (0.5 * edge_length *\n parameters.TUNNELING_COST_PER_METER)\n if not tube_point_a.is_underground and tube_point_b.is_underground:\n tunneling_cost = (0.5 * edge_length *\n parameters.TUNNELING_COST_PER_METER)\n if not tube_point_a.is_underground and not tube_point_b.is_underground:\n tunneling_cost = 0.0 \n return tunneling_cost\n\n def compute_tube_cost(self, edge_length):\n tube_cost = edge_length * parameters.TUBE_COST_PER_METER\n return tube_cost\n\n def compute_edge_length(self, tube_point_a, tube_point_b):\n tube_coords_a = [tube_point_a.arc_length, tube_point_a.tube_elevation]\n tube_coords_b = [tube_point_b.arc_length, tube_point_b.tube_elevation]\n edge_vector = np.subtract(tube_coords_b, tube_coords_a)\n edge_length = np.linalg.norm(edge_vector)\n return edge_length\n\n def __init__(self, tube_point_a, tube_point_b):\n edge_length = self.compute_edge_length(tube_point_a, tube_point_b)\n self.start_arc_length = tube_point_a.arc_length\n self.start_pylon_cost = tube_point_a.pylon_cost\n self.tube_cost = self.compute_tube_cost(edge_length)\n self.tunnel_cost = self.compute_tunnel_cost(edge_length, tube_point_a, \n tube_point_b)\n\nclass NaiveTubeProfile(object):\n\n def compute_tube_elevations(self, arc_lengths, land_elevations, latlngs,\n peak_resolution=None):\n try: \n np.sum(land_elevations)\n except TypeError: \n print(land_elevations)\n if peak_resolution == None:\n peak_resolution = 1\n num_land_elevations = len(land_elevations)\n if num_land_elevations < 5:\n tube_elevations = [max(land_elevations)] * num_land_elevations\n tube_curvature_array = [0] * num_land_elevations\n return [tube_elevations, tube_curvature_array]\n else:\n interior_land_elevations = land_elevations[peak_resolution:\n -peak_resolution]\n interior_land_elevation_peaks_indices_tuple = \\\n scipy.signal.argrelmax(interior_land_elevations,\n order=peak_resolution)\n land_elevation_peaks_indices = (\n interior_land_elevation_peaks_indices_tuple[0] + peak_resolution)\n land_elevation_peaks_indices = land_elevation_peaks_indices.tolist()\n land_elevation_peaks_indices.insert(0,0)\n land_elevation_peaks_indices.append(land_elevations.shape[0] - 1)\n\n tube_elevation_function = scipy.interpolate.PchipInterpolator(\n [arc_lengths[i] for i in land_elevation_peaks_indices], \n [land_elevations[i] for i in land_elevation_peaks_indices])\n tube_elevations = tube_elevation_function(arc_lengths)\n pylon_elevations = np.array(tube_elevations)-np.array(land_elevations)\n\n try: \n np.sum(pylon_elevations)\n except TypeError: \n print(pylon_elevations)\n\n peak_resolution = 200\n num_pylon_elevations = len(pylon_elevations)\n if num_land_elevations < 5:\n tube_elevations = [max(pylon_elevations)] * num_pylon_elevations\n tube_curvature_array = [0] * num_pylon_elevations\n return [tube_elevations, tube_curvature_array]\n\n else:\n interior_pylon_elevations = pylon_elevations[peak_resolution:\n -peak_resolution]\n interior_pylon_elevation_peaks_indices_tuple = \\\n scipy.signal.argrelmax(interior_pylon_elevations,\n order=peak_resolution)\n pylon_elevation_peaks_indices = (\n interior_pylon_elevation_peaks_indices_tuple[0] + peak_resolution)\n pylon_elevation_peaks_indices = pylon_elevation_peaks_indices.tolist()\n pylon_elevation_peaks_indices.insert(0,0)\n pylon_elevation_peaks_indices.append(pylon_elevations.shape[0] - 1)\n adjusted_land_elevations = np.copy(land_elevations)\n maxdiff = (max(land_elevations)-min(land_elevations))/15.0\n n = 1\n for i in sorted(land_elevation_peaks_indices, key=lambda i: land_elevations[i], reverse=True):\n adjusted_land_elevations[i] = land_elevations[i]-(maxdiff*n**(-2))\n n = n + 1\n n = 1\n for i in sorted(pylon_elevation_peaks_indices, key=lambda i: pylon_elevations[i], reverse=True):\n adjusted_land_elevations[i] = land_elevations[i]+(maxdiff*n**(-2))\n n = n + 1\n land_elevation_peaks_indices = sorted(set(land_elevation_peaks_indices + pylon_elevation_peaks_indices))\n tube_elevation_function = scipy.interpolate.PchipInterpolator([arc_lengths[i] for i in land_elevation_peaks_indices], \n [adjusted_land_elevations[i] for i in land_elevation_peaks_indices])\n tube_elevations = tube_elevation_function(arc_lengths)\n\n # peaklist_byheight = sorted(land_elevation_peaks_indices, key=lambda i: land_elevations[i], reverse=True)\n\n # def lower(elt, proportion):\n # land_elevations[peaklist_byheight[elt]] = (land_elevations[peaklist_byheight[elt]] - \n # proportion*(land_elevations[peaklist_byheight[elt]] - \n # max(land_elevations[land_elevation_peaks_indices.index(peaklist_byheight[elt])+1],\n # land_elevations[land_elevation_peaks_indices.index(peaklist_byheight[elt])-1])))\n\n # def compute_cost(tube_elevations):\n # tube_points = []\n # for i in range(len(arc_lengths)): \n # tube_point = TubePoint(arc_lengths[i], land_elevations[i],\n # tube_elevations[i], latlngs[i])\n # tube_points.append(tube_point)\n # tube_edges = [TubeEdge(tube_points[i], tube_points[i + 1])\n # for i in range(len(tube_points) - 1)]\n # tube_costs = [tube_edge.tube_cost for tube_edge in tube_edges]\n # tunneling_costs = [tube_edge.tunneling_cost for tube_edge in tube_edges]\n # pylons_costs = [tube_point.pylon_cost for tube_point in tube_points]\n # total_pylon_cost = sum(pylons_costs)\n # tube_cost = sum(tube_costs)\n # tunneling_cost = sum(tunneling_costs)\n # return total_pylon_cost + tube_cost + tunneling_cost\n\n # def lowering_is_good(i):\n # tube_elevation_function = scipy.interpolate.PchipInterpolator([arc_lengths[i] for i in land_elevation_peaks_indices], [land_elevations[i] for i in land_elevation_peaks_indices])\n # tube_elevations = tube_elevation_function(arc_lengths)\n # old_cost = compute_cost(tube_elevations)\n # lower(i, .333)\n # tube_elevation_function = scipy.interpolate.PchipInterpolator([arc_lengths[i] for i in land_elevation_peaks_indices], [land_elevations[i] for i in land_elevation_peaks_indices])\n # tube_elevations = tube_elevation_function(arc_lengths)\n # new_cost = compute_cost(tube_elevations)\n # lower(i, -.5)\n # return new_cost < old_cost\n\n # for i in range(0,min(6,len(peaklist_byheight)-1)):\n # print (\"peak number is:\")\n # print (i)\n # attempts = 0\n # while (lowering_is_good(i) and attempts < 4):\n # lower(i, .333)\n # attempts +=1\n\n # print (\" \")\n\n yprime = tube_elevation_function.derivative(1)\n ydoubleprime = tube_elevation_function.derivative(2)\n tube_curvature_array = np.array([np.absolute(ydoubleprime(s)/(1 + yprime(s)**2)**1.5) for s in arc_lengths])\n return [tube_elevations, tube_curvature_array]\n\n def build_pylons(self):\n pylons = [tube_point.build_pylon() for tube_point in self.tube_points]\n return pylons\n\n def build_cost_segments(self, tube_edges):\n cost_segment_step_size = 1000 #1 Kilometer\n tube_arc_length_counter = cost_segment_step_size \n cost_segments = [0]\n for tube_edge in tube_edges:\n if tube_edge.start_arc_length < tube_arc_length_counter:\n cost_segments[-1] += (tube_edge.tube_cost + \n tube_edge.tunnel_cost + tube_edge.start_pylon_cost)\n else:\n cost_segments.append(0)\n cost_segments[-1] += (tube_edge.tube_cost + \n tube_edge.tunnel_cost + tube_edge.start_pylon_cost)\n tube_arc_length_counter += cost_segment_step_size\n #if len(cost_segments) > 20:\n # print(\"cost_segments\")\n # print(cost_segments)\n return cost_segments\n\n def __init__(self, elevation_profile, peak_resolution=None):\n arc_lengths = elevation_profile.arc_lengths \n land_elevations = elevation_profile.land_elevations\n latlngs = elevation_profile.latlngs\n tube_elevations, tube_curvature_array = self.compute_tube_elevations(\n arc_lengths, land_elevations, latlngs, peak_resolution)\n tube_points = []\n for i in range(len(arc_lengths)): \n index = i + 1\n tube_point = TubePoint(arc_lengths[i], land_elevations[i],\n tube_elevations[i], latlngs[i], index)\n tube_points.append(tube_point)\n tube_edges = [TubeEdge(tube_points[i], tube_points[i + 1])\n for i in range(len(tube_points) - 1)]\n cost_segments = self.build_cost_segments(tube_edges)\n tube_costs = [tube_edge.tube_cost for tube_edge in tube_edges]\n tunnel_costs = [tube_edge.tunnel_cost for tube_edge in tube_edges]\n pylons_costs = [tube_point.pylon_cost for tube_point in tube_points] \n self.arc_lengths = arc_lengths\n self.tube_points = tube_points\n self.cost_segments = cost_segments\n self.land_elevations = land_elevations\n self.tube_elevations = tube_elevations\n self.tube_curvature_array = tube_curvature_array\n self.total_pylon_cost = sum(pylons_costs)\n self.tube_cost = sum(tube_costs)\n self.tunneling_cost = sum(tunnel_costs)\n\n","sub_path":"test/tube_naive.py","file_name":"tube_naive.py","file_ext":"py","file_size_in_byte":12755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"650194025","text":"import pygame\nfrom pygame.locals import *\nfrom Controller import *\nimport MainMenu\n\nclass Menu:\n \n class Option:\n\n hovered = False\n font = None\n \n def __init__(self, text, pos, font, surface):\n self.text = text\n self.pos = pos\n self.font = font\n self.surface = surface\n self.set_rect()\n self.draw()\n \n def draw(self):\n self.set_rend()\n self.surface.blit(self.rend, self.rect)\n \n def set_rend(self):\n self.rend = self.font.render(self.text, True, self.get_color())\n \n def get_color(self):\n if self.hovered:\n return (255, 255, 255)\n else:\n return (100, 100, 100)\n \n def set_rect(self):\n self.set_rend()\n self.rect = self.rend.get_rect()\n self.rect.topleft = self.pos\n\n def pauseMenuStart(self, controller):\n self.c = controller\n global pause\n while True:\n pygame.event.pump()\n pygame.mouse.set_visible(1)\n for option in self.options:\n if option.rect.collidepoint(pygame.mouse.get_pos()):\n option.hovered = True\n else:\n option.hovered = False\n option.draw()\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == QUIT:\n return \"Quit\"\n if self.options[0].hovered == True:\n if event.type == MOUSEBUTTONDOWN:\n pygame.mouse.set_visible(False)\n return \"Continue\"\n \n if self.options[1].hovered == True:\n if event.type == MOUSEBUTTONDOWN:\n pygame.mouse.set_visible(True)\n return \"Exit\"\n \n def __init__(self, controller):\n pauseMenuFont = pygame.font.Font(None, 40)\n\n self.c = controller\n self.surface = controller.screen\n\n cont = self.Option(\"CONTINUE\", (178, 330), pauseMenuFont, self.surface)\n quit = self.Option(\"QUIT\", (222, 370), pauseMenuFont, self.surface)\n \n self.options = [cont, quit]\n \n def pause(self, controller):\n return self.pauseMenuStart(controller)\n \n","sub_path":"PauseMenu.py","file_name":"PauseMenu.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"540392571","text":"#!/usr/bin/python3\n\nimport os\nimport time\n\ndebug = 0\nif debug == 0:\n f = os.system\nelse:\n f = print\n\n\nvoterList = []\n\nfor line in open('vo_keys'):\n accName = line[:line.find(',')]\n if accName[0:2] == 'vo':\n voterList.append(accName)\n\n\n#all voter stake\n\nn = 0\nfor voter in voterList:\n amount = str(4500000+n)\n f('claac system delegatebw '+ voter +' ' + voter+''' \"'''+amount + ''' AAC\" \"'''+amount + ''' AAC\"''')\n n+=10000\n print(\"voter: \"+voter+ \" stake token ok!\")\n time.sleep(0.02)\n\n","sub_path":"tests/ck_test/0-vote-test-scripts/8-voter_stake.py","file_name":"8-voter_stake.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"58909406","text":"# Script to pull down all the australian tools in ToosAU into a JSON file\n# readable by ToolsAU.\n# Madison Flannery, 2016.\n\nimport urllib2\nimport json\n\n# The URL for the bio.tools API.\nBIO_TOOLS_URL=\"https://bio.tools/api/tool?page=\"\n\n# Search terms.\n# We search for country, email, and capital cities.\nAUSTRALIA = \"Australia\"\nEMAIL=\"*.edu.au\"\nCAPITAL_CITIES=[\"Brisbane\", \"Sydney\", \"Melbourne\", \"Hobart\", \"Adelaide\", \"Perth\", \"Darwin\"]\n\n# API request params, appended to URL.\nCRITERIA_CONTACT = \"contact=\"\nCRITERIA_EMAIL = \"contactEmail=\"\nCRITERIA_INSTITUTION = \"creditsInstitution=\"\n\n# Build the search URL's.\nqueries = [CRITERIA_INSTITUTION + city for city in CAPITAL_CITIES]\nqueries.append(CRITERIA_EMAIL + EMAIL)\nqueries.append(CRITERIA_CONTACT + AUSTRALIA)\n\nresults = []\n\n# Do a query for each URL.\nfor query in queries:\n page_num = 1\n\n # Make sure we get all pages of the query results.\n # API will return 25 results at a time.\n while True:\n # Query and load JSON response.\n response = urllib2.urlopen(BIO_TOOLS_URL + str(page_num) + \"&\" + query)\n data = json.load(response)\n\n for item in data['list']:\n # Ignore duplicates.\n if item not in results:\n results.append(item)\n # Break if we have no more pages of the query to do.\n if data['next'] == None:\n break\n page_num += 1\n\n# Some output.\nprint('Number of Query Results: ' + str(len(results)))\n\n# If we actually have some results, i.e. things went well.\nif(len(results) > 0):\n # Sort the results alphabetically, ignore case.\n results = sorted(results,key=lambda x:x['name'].lower())\n # Dump to file.\n with open('au_tools.json', 'w') as outfile:\n json.dump(results, outfile)\n","sub_path":"toolsAU.py","file_name":"toolsAU.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"50915085","text":"ipadd1=input(\"IP 1 : \")\nipadd2=input(\"IP 2 : \")\n\n#converting IP address in list\nip1 = ipadd1.split(\".\")\nip2 = ipadd2.split(\".\")\n\n#Converting IP address in binary numbers\nipx1=('{0:08b}.{1:08b}.{2:08b}.{3:08b}'.format(int(ip1[0]),int(ip1[1]),int(ip1[2]),int(ip1[3])))\nipx2=('{0:08b}.{1:08b}.{2:08b}.{3:08b}'.format(int(ip2[0]),int(ip2[1]),int(ip2[2]),int(ip2[3])))\n\n#printing the binary converted IP addresses\nprint(ipx1)\nprint(ipx2)\n\n#Taking netmask as an input\nnetmask=int(input(\"Netmask : \"))\n\n#Using the input netmask and performing associated convertion using the following function:\ndef concatenate_list_data(list):\n result= ''\n for element in list:\n result += str(element)\n for x in range(32-int(len(result))):\n result+=str(0)\n #print(result)\n return(result)\n\nnmf = concatenate_list_data([int(1) for x in range(netmask)])\n#print(nmf)\n\n#Splitting the netmask into a list of 8 bits each\nl=[nmf[i:i+8] for i in range(0,len(nmf),8)]\n#print(l)\n\n#Using join() to concatenate the list seperated by a \".\"\nh=\".\"\nh=h.join(l)\nprint(h)\n\n#Splitting up the IP addrresses and joining them to perforn the required AND operation\ne=\"\"\nf=\"\"\nui = ipx1.split(\".\")\n#print(ui)\ne=e.join(ui)\n\n#e=int(e)\n#print(e)\n\nux =ipx2.split(\".\")\nf=f.join(ux)\n#f=int(f)\n\nnmf=int(nmf)\n#print(nmf)\n\ninputA=int(e,2)\ninputB=int(f,2)\n\n\n#Checking for the given condition using the AND operator\nif(inputA & nmf!=inputB & nmf):\n print(\"Belongs to different subnet\")\nelse:\n print(\"Belongs to same subnet\")\n","sub_path":"a02.py","file_name":"a02.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"210913934","text":"##############################################################################\n# Import some libraries\n##############################################################################\nimport os\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\n\n##############################################################################\n# Import some extra special libraries from my own repo and do some other stuff\n##############################################################################\nsys.path.insert(0, r\"D:\\Python\\Local Repo\\library\")\nnp.set_printoptions(suppress=True)\nimport prd_plots\nimport prd_file_import\nimport prd_data_proc\nimport prd_maths\ncs = prd_plots.palette()\n\n##############################################################################\n# Do some stuff\n##############################################################################\n# Specify results directory and change working directory to this location\np0 = (r\"D:\\Experimental Data\\F5 L10 Spectrometer\\Spec data 20190516\")\n# p0 = (r\"D:\\Experimental Data\\Internet Thorlabs optics data\"))\nos.chdir(p0)\n# Generate list of relevant data files and sort them chronologically\nroi = 80\n\nλs, ctss, lbs = prd_file_import.load_spec_dir(p0)\nxs0 = λs[1]\nys0 = ctss[1]\n\n# Use gin to get first approximation for peak location\npts = prd_plots.gin(λs[0], ctss[0], 0,\n 'politely click peak locations and smash enter')\n\npk_λs = []\npk_idxs = []\nfit_data = []\n# Loop over data in directory and perform fits on each spec, for each peak\nfor i0, val0 in enumerate(pts):\n pk_λ = str(int(np.round(pts[i0][0])))\n pk_lb = 'peak ' + str(i0) + ' (' + pk_λ + ' nm)'\n λ_pk, idx_pk = prd_maths.find_nearest(xs0, pts[i0, 0])\n pk_λs.append(λ_pk)\n pk_idxs.append(idx_pk)\n # Restrict data set to roi of interest\n x_roi = xs0[int(idx_pk - roi / 2):int(idx_pk + roi / 2)]\n y_roi = ys0[int(idx_pk - roi / 2):int(idx_pk + roi / 2)]\n # Extract first guess values for fitting\n μ = λ_pk\n σ = 0.1\n bkg = np.mean(y_roi)\n # Set up higher resolution x axis for fit\n x_fit = np.linspace(min(x_roi), max(x_roi), 1000)\n # Perform fit\n popt, pcov = curve_fit(prd_maths.Gaussian_1D,\n x_roi, y_roi, p0=[1, μ, σ, bkg])\n\n As, μs, σs, Ps = prd_data_proc.spec_seq_Gauss_fit_20190516(p0,\n popt,\n idx_pk,\n roi,\n pk_lb)\n fit_data.append([As, μs, σs, Ps])\n data_name = pk_lb + '.dat'\n data = np.column_stack((Ps, As, μs, σs))\n header = \"Powers, Gaussian Amplitudes, Gaussian centres, Gaussian widths\"\n np.savetxt(data_name, data, header=header)\nprint(len(fit_data))\nprd_plots.ggplot()\nsize = 4\nfig1 = plt.figure('fig1', figsize=(size * np.sqrt(2), size))\nax1 = fig1.add_subplot(1, 1, 1)\nfig1.patch.set_facecolor(cs['mnk_dgrey'])\nax1.set_xlabel('Wavelength (λ - nm)')\nax1.set_ylabel('Counts')\nax1.set_title('Labelled spectrum')\nax1.plot(xs0, ys0, '-.', markersize=2, lw=0.5,\n alpha=1, color=cs['gglred'], label='')\n\npk_ys = [ys0[i] for i in pk_idxs]\nfor i0, val0 in enumerate(fit_data):\n pk_x = fit_data[i0][1][1]\n pk_y = prd_maths.Gaussian_1D(pk_x,\n fit_data[i0][0][1],\n fit_data[i0][1][1],\n fit_data[i0][2][1])\n ax1.plot(pk_x, pk_y, '.',\n mfc=cs['ggblue'],\n mec=cs['ggblue'],\n label='peak ' + str(i0))\n ax1.text(pk_x, pk_y, ' peak ' + str(i0))\n\nfig1.tight_layout()\nplt.show()\nprd_plots.PPT_save_2d(fig1, ax1, 'peak labels.png')\n","sub_path":"Experimental analysis/Quantum dots/spectral gaussian fits 20190516.py","file_name":"spectral gaussian fits 20190516.py","file_ext":"py","file_size_in_byte":3814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"426733005","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nimport image_cropping.fields\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('website', '0024_pressclipping'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='FenaconMidia',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(help_text=b'Titulo da Noticia', max_length=90, verbose_name=b'T\\xc3\\xadtulo')),\n ('subtitle', models.CharField(help_text=b'Para Manchetes', max_length=100, null=True, verbose_name=b'Sub-t\\xc3\\xadtulo', blank=True)),\n ('text', models.TextField(verbose_name=b'Texto')),\n ('slug', models.SlugField(unique=True, max_length=200, blank=True)),\n ('credit', models.CharField(help_text=b'Creditos da foto.', max_length=50, verbose_name=b'Cr\\xc3\\xa9dito', blank=True)),\n ('font', models.CharField(help_text=b'Fonte da not\\xc3\\xadcia', max_length=50, verbose_name=b'Fonte', blank=True)),\n ('author', models.CharField(help_text=b'Autor da not\\xc3\\xadcia', max_length=50, verbose_name=b'Autor', blank=True)),\n ('featured_image', models.ImageField(help_text=b'Dimens\\xc3\\xb5es 600x335px ou maior - JPEG', upload_to=b'uploads/noticias/%Y/%m/', verbose_name=b'Imagem Destaque', blank=True)),\n (b'featured_big', image_cropping.fields.ImageRatioField(b'featured_image', '600x335', hide_image_field=False, size_warning=False, allow_fullsize=False, free_crop=False, adapt_rotation=False, help_text=b'Destaque princial na home.', verbose_name=b'Destaque Grande')),\n (b'featured_medium', image_cropping.fields.ImageRatioField(b'featured_image', '470x180', hide_image_field=False, size_warning=False, allow_fullsize=False, free_crop=False, adapt_rotation=False, help_text=b'Destaque secund\\xc3\\xa1rio home.', verbose_name=b'Destaque M\\xc3\\xa9dio')),\n (b'featured_small', image_cropping.fields.ImageRatioField(b'featured_image', '170x170', hide_image_field=False, size_warning=False, allow_fullsize=False, free_crop=False, adapt_rotation=False, help_text=b'Imagem para listagem de not\\xc3\\xadcias e \\xc3\\xbaltimas no\\xc3\\xadticas.', verbose_name=b'Destaque Pequeno')),\n ('published_at', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Data de Publicacao')),\n ('status', models.BooleanField(default=True, help_text=b'Se esta op\\xc3\\xa7\\xc3\\xa3o estiver desmarcada os usu\\xc3\\xa1rios do portal n\\xc3\\xa3o ir\\xc3\\xa3o mais ver esta not\\xc3\\xadcia', verbose_name=b'Noticia Ativa?')),\n ('featured', models.BooleanField(default=False, help_text=b'Coloca a not\\xc3\\xadcia em destaque. Deve possuir imagem destaque.', verbose_name=b'Destaque secund\\xc3\\xa1rio')),\n ('user', models.ForeignKey(verbose_name=b'Usu\\xc3\\xa1rio', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ('-published_at',),\n 'get_latest_by': 'published_at',\n 'verbose_name': 'FENACON na M\\xeddia',\n 'verbose_name_plural': 'FENACON na M\\xeddia',\n },\n ),\n ]\n","sub_path":"apps/website/migrations/0025_fenaconmidia.py","file_name":"0025_fenaconmidia.py","file_ext":"py","file_size_in_byte":3462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"140794724","text":"\"\"\" This is magic glue for integrating the frontend and backend.\n\n This is NOT the place for backend customizations. Go to\n api/historic_hebrew_dates_ui/settings.py instead.\n\"\"\"\n\nimport os.path as op\n\nhere = op.dirname(op.abspath(__file__))\n\n# First, import the standard backend settings. This requires some\n# magic because the backend directory itself is not a Python package.\n# Imitated from https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly\n# or\n# https://stackoverflow.com/a/29855240\n# (respectively for Python >= 3.5 and Python 3.4)\n\nimport sys\nfrom importlib import util, machinery\n\nsettings_name = 'settings'\nsettings_path = op.join(here, 'api', 'historic_hebrew_dates_ui', 'settings.py')\n\nif sys.version_info >= (3, 5):\n spec = util.spec_from_file_location(settings_name, settings_path)\n settings = util.module_from_spec(spec)\n spec.loader.exec_module(settings)\nelse:\n settings = machinery.SourceFileLoader(settings_name, settings_path).load_module()\n\nsys.modules[settings_name] = settings\n\nfrom settings import *\n\n# Next, augment the settings to make the backend aware of the frontend.\n\nSTATICFILES_DIRS += [\n op.join(here, 'web-ui', 'dist'),\n op.join(here, 'web-ui', 'node_modules'),\n]\n\n\nPROXY_FRONTEND = \"http://localhost:4200\"\n\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"481993993","text":"from os import getenv\nfrom subprocess import Popen, PIPE\n\n\n# ---------------------------------------------------------------------------- #\n# API key:\napi_key = getenv(\"CONSUMER_KEY\")\n# API secret key:\napi_secret = getenv(\"CONSUMER_SECRET\")\n# Access token: \naccess_token = getenv(\"API_KEY\")\n# Access token secret: \naccess_token_secret = getenv(\"API_SECRET\")\n\n\n# ---------------------------------------------------------------------------- #\ndef create_auth_json():\n #Create auth.json file for twitter-to-sqlite\n p = Popen(['twitter-to-sqlite', 'auth'], stdin=PIPE)\n p.stdin.write(f\"{api_key}\\n\".encode())\n p.stdin.write(f\"{api_secret}\\n\".encode())\n p.stdin.write(f\"{access_token}\\n\".encode())\n p.stdin.write(f\"{access_token_secret}\\n\".encode())\n p.stdin.flush()\n return\n\n\n# ---------------------------------------------------------------------------- #\nif __name__ == \"__main__\":\n create_auth_json()","sub_path":"bot/create_auth_json.py","file_name":"create_auth_json.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"335749710","text":"variable = [\"I0\",\"I1\",\"I2\",\"I3\",\"1\",\"0\"]\nstack = []\ntree_list = [\"\"]*32\n#max_position_of_or = 0\nstack_or = []\nimport filter\n\ndef Tree(left, right, root, test, q):\n # ******************************preorder********************************\n stack_or = []\n stack_and = []\n RUN_OR = True\n test = obj.check_last(test)\n for i in range(len(test)):\n # check \"(\" and \")\"\n if( test[i] == \"(\" ):\n RUN_OR = False\n stack.append(i)\n elif( test[i] == \")\"):\n position = stack.pop()\n\n # if stack is empty\n if(len(stack) == 0):\n RUN_OR = True\n\n # filter \"!\" sinario !(I0&I1), (!I1&I2) and !(I0)\n if( test[i] == \"!\" and (test[i+1] in variable or test[i+1] == \"(\") ):\n if(RUN_OR == False):\n pass\n elif(RUN_OR == True):\n Is_joke = obj.check_last(test[i+1:])\n #print(Is_joke, test[i], right)\n root = test[i]\n left = Is_joke\n right = right\n\n #print(RUN_OR)\n #print(f\"test:{test} \\nleft {left} \\nroot {root} \\nright {right}\\n-----------------------\")\n \n if(RUN_OR):\n # find the max position of + for split to left root right\n if(test[i] == \"+\"):\n max_position_of_or = i\n root = test[max_position_of_or]\n left = test[:max_position_of_or]\n right = test[max_position_of_or+1:]\n stack_or.append(max_position_of_or)\n\n if(test[i] == \"&\"):\n max_position_of_and = i\n root = test[max_position_of_and]\n left = test[:max_position_of_and]\n right = test[max_position_of_and+1:]\n stack_and.append(max_position_of_and)\n\n if(len(stack_or) > 0):\n root = test[max_position_of_or]\n left = test[:max_position_of_or]\n right = test[max_position_of_or+1:]\n \n if(len(stack_and) > 0):\n root = test[max_position_of_and]\n left = test[:max_position_of_and]\n right = test[max_position_of_and+1:]\n\n pl = (2*q)+1\n pr = (2*q)+2\n tree_list[q] = root\n tree_list[pl] = left\n tree_list[pr] = right\n # *** (2*q)+1 = left index\n # *** (2*q)+2 = right index\n # *** q = root index\n\n # meter checking value recursive value\n #print(f\"test:{test} \\nleft {left} \\nroot {root} \\nright {right}\\n-----------------------\")\n if(len(left) == 1):\n tree_list[pl] = left[0]\n if( len(right) > 1):\n Tree(\"\", \"\", \"\", right, pr)\n else:\n if(right == \"\"):\n tree_list[pr] = right\n else:\n tree_list[pr] = right[0]\n\n return tree_list\n\n Tree(\"\", \"\", \"\", left, pl)\n\n if( len(right) > 1 ):\n Tree(\"\", \"\", \"\", right, pr)\n else:\n #print(pl,q,pr)\n if(right == \"\"):\n tree_list[pr] = right\n else:\n tree_list[pr] = right[0]\n\n return tree_list\n\n#((I2&I1)+(I0&I1))\n# data set for testing my algorithm\ntest1 = [\"!\",\"(\",\"1\",\"+\",\"0\",\")\"] # OK\ntest2 = [\"!\",\"(\",\"!\",\"(\",\"0\",\"+\",\"I0\",\"&\",\"1\",\")\",\")\"] # OK\n\n#(I0+!I1+!(I2))&(!I0+I1+I2)\ntest3 = [\"(\",\"I0\",\"+\",\"!\",\"I1\",\"+\",\"!\",\"(\",\"I2\",\")\",\")\",\"&\",\"(\",\"!\"\n ,\"I0\",\"+\",\"I1\",\"+\",\"I2\",\")\"] # OK\n\n#\"!(I0&I1)+!(I1+I2)\"\ntest4 = [\"!\",\"(\",\"I0\",\"&\",\"I1\",\")\",\"+\",\"!\",\"(\",\"I1\",\"+\",\"I2\",\")\"]\n\n#\"(((I0&I1&!I2)+!I1)+I3)\"\ntest5 = [\"(\",\"(\",\"(\",\"I0\",\"&\",\"I1\",\"&\",\"!\",\"I2\",\")\",\"+\",\"!\",\"I1\",\")\",\"+\",\"I3\",\")\"]\n\n#((I2&I1)+(I0&I1))\ntest6 = [\"(\",\"(\",\"I2\",\"&\",\"I1\",\")\",\"+\",\"(\",\"I0\",\"&\",\"I1\",\")\",\")\"]\n\n#I2&I1+I0&I1\ntest7 = [\"I2\",\"&\",\"I1\",\"+\",\"I0\",\"&\",\"I1\"] # OK\n\nq = 0\nobj = filter.check_bracket()\n\nprint(\"#\",1,\"*\"*50)\ntree_list = [\"\"]*32\nprint(Tree('','','',test1, q))\n\nprint(\"#\",2,\"*\"*50)\ntree_list = [\"\"]*32\nprint(Tree('','','',test2, q))\n\nprint(\"#\",3,\"*\"*50)\ntree_list = [\"\"]*32\nprint(Tree('','','',test3, q))\n\nprint(\"#\",4,\"*\"*50)\ntree_list = [\"\"]*32\nprint(Tree('','','',test4, q))\n\nprint(\"#\",5,\"*\"*50)\ntree_list = [\"\"]*32\nprint(Tree('','','',test5, q))\n\nprint(\"#\",6,\"*\"*50)\ntree_list = [\"\"]*32\nprint(Tree('','','',test6, q))\n\nprint(\"#\",7,\"*\"*50)\ntree_list = [\"\"]*32\nprint(Tree('','','',test7, q))","sub_path":"Boolean expression string/Demo_version/Boolean_tree_recursive_left_and_right.py","file_name":"Boolean_tree_recursive_left_and_right.py","file_ext":"py","file_size_in_byte":4277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"270443821","text":"from graph import Graph, Node\nimport struct\nimport re\n\n#Парсер заполняющий Граф.\nclass Reader:\n def read(file_path):\n g = Graph()\n\n with open(file_path, \"r\") as f:\n lines = [line for line in f]\n u, v = map(int, lines[0].strip().split(' '))\n for i in range(1, u + 1):\n #Записываем узел\n g.add_node(Node(i))\n for i in range(1, v + 1):\n (edge1, edge2) = map(int, lines[i].strip().split(' '))\n #Записываем ребро\n g.add_edge(Node(edge1), Node(edge2))\n g.start, g.finish = map(int, lines[len(lines) - 1].strip().split(' '))\n return g\n","sub_path":"reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"572123382","text":"print(\"██╗ ██╗███╗ ██╗ ██████╗\")\nprint(\"██║ ██║████╗ ██║██╔═══██╗\")\nprint(\"██║ ██║██╔██╗ ██║██║ ██║\")\nprint(\"██║ ██║██║╚██╗██║██║ ██║\")\nprint(\"╚██████╔╝██║ ╚████║╚██████╔╝\")\nprint( \"╚═════╝ ╚═╝ ╚═══╝ ╚═════╝ \")\n\nprint(\" \")\nprint(\" Es un juego que se juega de 2 a 10 jugadores\")\nprint(\" \")\nprint(\" Donde cada jugador recibe 7 cartas al empezar\")\nprint(\" \")\nprint(\" Tu objetivo es llegar a los 500 puntos para derrotar a tus compañeros\")\nprint(\" \")\nprint(\" ▄▄▄▄▄▄▄▄▄▄▄ ▄▄▄▄▄▄▄▄▄▄▄ ▄▄▄▄▄▄▄▄▄▄▄ ▄▄▄▄▄▄▄▄▄▄▄ ▄▄▄▄▄▄▄▄▄▄▄\")\nprint(\"▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌\")\nprint(\"▐░█▀▀▀▀▀▀▀▀▀ ▀▀▀▀█░█▀▀▀▀ ▐░█▀▀▀▀▀▀▀█░▌▐░█▀▀▀▀▀▀▀█░▌ ▀▀▀▀█░█▀▀▀▀ \")\nprint(\"▐░▌ ▐░▌ ▐░▌ ▐░▌▐░▌ ▐░▌ ▐░▌ \")\nprint(\"▐░█▄▄▄▄▄▄▄▄▄ ▐░▌ ▐░█▄▄▄▄▄▄▄█░▌▐░█▄▄▄▄▄▄▄█░▌ ▐░▌ \")\nprint(\"▐░░░░░░░░░░░▌ ▐░▌ ▐░░░░░░░░░░░▌▐░░░░░░���░░░░▌ ▐░▌ \")\nprint(\" ▀▀▀▀▀▀▀▀▀█░▌ ▐░▌ ▐░█▀▀▀▀▀▀▀█░▌▐░█▀▀▀▀█░█▀▀ ▐░▌ \")\nprint(\" ▐░▌ ▐░▌ ▐░▌ ▐░▌▐░▌ ▐░▌ ▐░▌ \")\nprint(\" ▄▄▄▄▄▄▄▄▄█░▌ ▐░▌ ▐░▌ ▐░▌▐░▌ ▐░▌ ▐░▌ \")\nprint(\"▐░░░░░░░░░░░▌ ▐░▌ ▐░▌ ▐░▌▐░▌ ▐░▌ ▐░▌ \")\nprint(\" ▀▀▀▀▀▀▀▀▀▀▀ ▀ ▀ ▀ ▀ ▀ ▀ \")\nprint( \" \")\n\nprint( ) \nprint(\"________ ________ ________ ________ ________ ________ ________ ________ ________\")\nprint('\"\"\"\"\"\"\"\" \"\"\"\"\"\"\"\" \"\"\"\"\"\"\"\" \"\"\"\"\"\"\"\" \"\"\"\"\"\"\"\" \"\"\"\"\"\"\"\" \"\"\"\"\"\"\"\" \"\"\"\"\"\"\"\" \"\"\"\"\"\"\"\"')\nprint( ) \nfrom mesa import *\n\n\n\nwhile True:\n for s in range(0,len(jugadores)):\n os.system(\"cls\")\n tablero.accion(jugadores)\n if rondaIniciada:\n carta = tablero.inicial(barajas)\n rondaIniciada = False\n \n else:\n tablero.repCartas(carta)\n \n print(\" \")\n print(\"{} tu mano es :\".format(jugadores[s].nombre))\n print(\" \")\n jugadores[s].mostrarMano()\n print(\" \")\n jugadores[s].mostrarOpciones()\n desicion = input(\"¿Que deseas hacer?: \")\n while desicion not in [\"q\",\"r\",\"w\"]:\n os.system(\"cls\")\n tablero.accion(jugadores)\n if rondaIniciada:\n carta = tablero.inicial(barajas)\n rondaIniciada = False\n \n else:\n tablero.repCartas(carta)\n \n print(\" \")\n print(\"{} tu mano es :\".format(jugadores[s].nombre))\n print(\" \")\n jugadores[s].mostrarMano()\n print(\" \")\n\n jugadores[s].mostrarOpciones()\n desicion = input(\"¿Que deseas hacer?: \")\n\n if desicion == \"q\":\n try:\n opcion = int(input(\" Que carta deseas jugar? : \"))\n jugada = jugadores[s].jugarCarta(opcion)\n if jugada[0] in barajas.valorCartas:\n if tablero.validarCarta:\n if jugada[0] == \"Retorno\":\n jugadores.reverse()\n\n elif jugada[0] == \"Elegir color\":\n especiales.mostrarColores()\n color = input(\"Elige un color: \")\n especiales.cambiarColor(jugada,especiales.opcionColor(color))\n\n\n carta = jugada\n tablero.repCartas(carta)\n \n elif tablero.validarCarta(jugada,carta):\n carta = jugada\n tablero.repCartas(carta)\n \n else:\n jugadores[s].mano.append(jugada)\n print(\"Haz sido penalizado por jugada incorrecta\")\n barajas.robar(jugadores[s])\n time.sleep(2)\n \n except:\n print(\"Mira bien la longitud de tu mano\")\n print(\"Penalizado por no atencionar bien tu mano\")\n time.sleep(2)\n\n elif desicion == \"r\":\n barajas.robar(jugadores[s])\n time.sleep(2)\n \n else:\n try:\n jugadores[s].Uno()\n opcion = int(input(\" Que carta deseas jugar? : \"))\n jugada = jugadores[s].jugarCarta(opcion)\n if jugada[0] in barajas.valorCartas:\n if tablero.validarCarta:\n if jugada[0] == \"Retorno\":\n jugadores.reverse()\n\n\n if tablero.validarCarta(jugada,carta):\n carta = jugada\n tablero.repCartas(carta)\n \n else:\n jugadores[s].mano.append(jugada)\n print(\"Haz sido penalizado por jugada incorrecta\")\n barajas.robar(jugadores[s])\n time.sleep(1)\n time.sleep(2)\n\n except:\n print(\"Mira bien la longitud de tu mano\")\n print(\"Penalizado por no atencionar bien tu mano\")\n time.sleep(2)\n\n\n if len(jugadores[s].mano) >= 2 and jugadores[s].estado == \"Uno\":\n jugadores[s].estado = \"\"\n\n if len(jugadores[s].mano) == 1 and jugadores[s].estado == \"\":\n print(\"Haz sido penalizado por no decir 'Uno' \")\n barajas.robar(jugadores[s])\n time.sleep(2)\n \n\n if len(jugadores[s].mano) == 0:\n print(\"{} ha ganado esta ronda\".format(jugadores[s].nombre))\n jugadores[s].sumarPuntos(jugadores,jugadores[s],barajas)\n tablero = Mesa()\n tablero.accion(jugadores)\n barajas.repartir(jugadores)\n rondaIniciada = True\n jugadores[s].reiniciarMano(jugadores)\n jugadores[s].reiniciarEstado(jugadores)\n print(\"Nueva ronda\")\n time.sleep(2)\n os.system(\"cls\")\n\n\n\n if jugadores[s].verificarPuntos(jugadores):\n break\n","sub_path":"principal.py","file_name":"principal.py","file_ext":"py","file_size_in_byte":7370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"489744961","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport sys\n\nclass Mlp():\n \n\n def __init__(self, size_layers, act_funct='sigmoid', bias_flag=True,learning_rate=1,mean_weights=1):\n self.learning_rate=learning_rate\n self.size_layers = size_layers\n self.n_layers = len(size_layers)\n self.act_f = act_funct\n self.bias_flag = bias_flag\n self.mean_weights=mean_weights\n self.theta_weights = []\n self.initialize_theta_weights()\n \n \n \n\n def train(self, X, Y, iterations=400):\n for iteration in range(iterations):\n self.gradients = self.backpropagation(X, Y)\n for ii in range( self.n_layers-1):\n self.theta_weights[ii]=self.theta_weights[ii]-((self.learning_rate)*self.gradients[ii])#0.000009 for sum\n \n\n def predict(self, X):\n\n A , Z = self.feedforward(X)\n \n Y_hat = A[-1]\n return Y_hat\n\n def initialize_theta_weights(self):\n size_next_layers = self.size_layers.copy()\n size_next_layers.pop(0)\n i=0\n for i in range(len(size_next_layers)):\n if self.bias_flag:\n theta_tmp = self.mean_weights*(np.random.randn(size_next_layers[i], self.size_layers[i] + 1 ))\n else:\n theta_tmp = self.mean_weights*(np.random.randn(size_next_layers[i], self.size_layers[i])) \n self.theta_weights.append(theta_tmp)\n \n return self.theta_weights\n\n def backpropagation(self, X, Y):\n \n if self.act_f == 'sigmoid':\n g_dz = lambda x: self.sigmoid_derivative(x)\n elif self.act_f == 'relu':\n g_dz = lambda x: self.relu_derivative(x)\n\n n_examples = X.shape[0]\n \n A, Z = self.feedforward(X)\n\n # Backpropagation\n deltas = [None] * self.n_layers\n deltas[-1] = A[-1] - Y\n #deltas[-1]=abs(deltas[-1])\n #deltas[-1] = deltas[-1]* g_dz(Z[-1])\n \n #print(Z[-1])\n \n for ix_layer in np.arange(self.n_layers - 2 , 0 , -1):\n #print(ix_layer)\n theta_tmp = self.theta_weights[ix_layer]\n if self.bias_flag:\n \n theta_tmp = np.delete(theta_tmp, np.s_[0], 1)\n deltas[ix_layer] = (np.matmul(theta_tmp.transpose(), deltas[ix_layer + 1].transpose() ) ).transpose() * g_dz(Z[ix_layer])\n #print(Z) \n #print(deltas[-1])\n \n gradients = [None] * (self.n_layers - 1)\n for ix_layer in range(self.n_layers - 1):\n grads_tmp = np.matmul(deltas[ix_layer + 1].transpose() , A[ix_layer])\n grads_tmp = grads_tmp / n_examples\n \n gradients[ix_layer] = grads_tmp;\n #print(gradients)\n\n #print(gradients) \n return gradients\n\n def feedforward(self, X):\n \n if self.act_f == 'sigmoid':\n g = lambda x: self.sigmoid(x)\n elif self.act_f == 'relu':\n g = lambda x: self.relu(x)\n\n A = [None] * self.n_layers\n Z = [None] * self.n_layers\n input_layer = X\n \n\n for ix_layer in range(self.n_layers - 1):\n n_examples = input_layer.shape[0]\n if self.bias_flag:\n \n input_layer = np.concatenate((np.ones([n_examples ,1]) ,input_layer), axis=1)\n A[ix_layer] = input_layer\n Z[ix_layer + 1] = np.matmul(input_layer, self.theta_weights[ix_layer].transpose() )\n #print(Z[ix_layer+1])\n \n output_layer = g(Z[ix_layer + 1])\n \n input_layer = output_layer\n #print(Z)\n A[self.n_layers - 1] = output_layer\n return A, Z\n\n\n def sigmoid(self, z):\n \n result = 1.0 / (1.0 + np.exp(-z))\n return result\n\n def relu(self, z):\n \n if np.isscalar(z):\n result = np.max((z, 0))\n else:\n zero_aux = np.zeros(z.shape)\n meta_z = np.stack((z , zero_aux), axis = -1)\n result = np.max(meta_z, axis = -1)\n return result\n\n def sigmoid_derivative(self, z):\n \n result = self.sigmoid(z) * (1 - self.sigmoid(z))\n return result\n\n def relu_derivative(self, z):\n \n result = 1 * (z > 0)\n return result","sub_path":"homeworkMLP/mlp_kri.py","file_name":"mlp_kri.py","file_ext":"py","file_size_in_byte":4292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"260923031","text":"from pathlib import Path\nfrom pathlib import PurePosixPath\n\n# le nom de notre fichier jouet \nnom = 'fichier-temoin'\n# on crée un objet de la classe Path, associé au nom de fichier\npath = Path(nom)\nprint(path)\n# si j'écris dedans je le crée\nwith open(nom, 'w', encoding='utf-8') as output:\n output.write('0123456789\\n')\nprint(path.stat())\n\nmtime = path.stat().st_mtime\nfrom datetime import datetime\nmtime_datetime = datetime.fromtimestamp(mtime)\nprint(mtime_datetime)\nprint(f\"{mtime_datetime:%H:%M}\")\n\n# ou encore mieux, si je veux détruire \n# seulement dans le cas où il existe je peux aussi faire\ntry: \n path.unlink()\nexcept FileNotFoundError:\n print(\"no need to remove\")\n\nprint(\"*\"*100) \ndirpath = Path('F:\\Python3\\S3')\n# tous les fichiers *.json dans le répertoire data/\nfor json in dirpath.glob(\"*.*\"):\n print(json)","sub_path":"S3/fichiers3.py","file_name":"fichiers3.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"153962698","text":"#!/usr/bin/python\nimport os\nimport subprocess\n\naptPackages = [\"nmap\", \"python-pip\"]\n\nos.system(\"apt install -y \" + \" \".join(aptPackages))\n\nrequirements = open(\"requirements.txt\", \"r\").read().split(\"\\n\")\nfor module in requirements:\n if module != \"\":\n os.system(\"pip install '\" + module + \"'\")\nos.system(\"pip install cryptography==2.4.2\")","sub_path":"install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"124527797","text":"from django import forms\nfrom proyectoferreteria.apps.gestionadmin.models import Categoria\n\nclass CategoriaForm(forms.ModelForm):\n\n class Meta:\n model = Categoria\n \n fields = [\n 'Id_Categoria',\n 'Descripcion_Categoria'\n ]\n\n labels = {\n 'Id_Categoria':'Id de la categoría',\n 'Descripcion_Categoria':'Descripción de la categoría'\n }\n\n widgets = {\n 'Id_Categoria':forms.TextInput(attrs={'class':'form-control'}),\n 'Descripcion_Categoria':forms.TextInput(attrs={'class':'form-control'}),\n }","sub_path":"proyectoferreteria/apps/gestionadmin/formularios/categoria_form.py","file_name":"categoria_form.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"168554830","text":"import numpy as np\r\nfrom collections import Counter\r\nfrom itertools import product, chain\r\nimport math\r\nfrom scipy.signal import argrelextrema\r\nimport pickle\r\nimport networkx as nx\r\n\r\n\r\nclass Graph:\r\n def __init__(self, parameter, lyap_exp, transition_graph, pattern_label, clustering_coeff, out_strength, time_apparence):\r\n print('Processing...')\r\n self.parameter = parameter #the parameter of the rossler oscillator\r\n \r\n self.lyap_exp = lyap_exp #the lyapunov exponent of the oscillator\r\n \r\n self.transition_graph = transition_graph #the adjacency matrix of the graph\r\n \r\n self.pattern_label = pattern_label #the labels of the patterns/nodes\r\n \r\n self.clustering_coeff = clustering_coeff #the clustering coefficient of each node\r\n \r\n self.out_strength = out_strength #the out_strength of each node\r\n \r\n self.time_apparence = time_apparence #the matrix of shape = (node, time) with \r\n #entries the tells you if that node \r\n #appears in ceartain time (1) or does not (0)\r\n\r\ndef calc_MI(X,Y,bins):\r\n\r\n c_XY = np.histogram2d(X,Y,bins)[0]\r\n c_X = np.histogram(X,bins)[0]\r\n c_Y = np.histogram(Y,bins)[0]\r\n\r\n H_X = shan_entropy(c_X)\r\n H_Y = shan_entropy(c_Y)\r\n H_XY = shan_entropy(c_XY)\r\n\r\n MI = H_X + H_Y - H_XY\r\n return MI\r\n\r\ndef shan_entropy(c):\r\n c_normalized = c / float(np.sum(c))\r\n c_normalized = c_normalized[np.nonzero(c_normalized)]\r\n H = -sum(c_normalized* np.log2(c_normalized))\r\n return H\r\n\r\ndef find_lag(data, stepSize, tau_max = 200, bins =200, plot = False ):\r\n #find usable time delay via mutual information\r\n mis = []\r\n\r\n N = len(data)\r\n for tau in range(1, tau_max):\r\n M = N - tau\r\n unlagged = data[0:M]\r\n lagged = data[tau:N]\r\n mis.append(calc_MI(unlagged, lagged, bins = bins))\r\n best_tau = 0\r\n # mis.append(mutual_information_2d(unlagged, lagged, normalized=True))\r\n for i in mis:\r\n if i < 1/math.e:\r\n best_tau = mis.index(i)\r\n break\r\n if best_tau != 0:\r\n print('criterio e')\r\n pass\r\n else:\r\n #print('criterio minimo')\r\n mis = np.array(mis)\r\n\r\n minimun = argrelextrema(mis, np.less, order = int(1/stepSize)) \r\n\r\n #print(minimun)\r\n best_tau = minimun[0][0] + 1\r\n\r\n if plot == False:\r\n return best_tau\r\n\r\n elif plot == True:\r\n\r\n tau_points = np.arange(1, tau_max)\r\n #plot time delay embedding\r\n fig = plt.plot(tau_points, mis), plt.xlabel('tau'), plt.ylabel('Mutual Information')\r\n return best_tau, fig\r\n\r\ndef mean_derivative(data, stepSize):\r\n num_points = len(data)\r\n p = []\r\n for point in range(num_points - 1):\r\n p.append((data[point + 1] - data[point])/stepSize)\r\n\r\n return p\r\n\r\ndef M_p(p):\r\n \r\n \"\"\"M_p is the threshold for p\"\"\"\r\n \r\n p = np.array(p)\r\n return np.average(np.abs(p))\r\n\r\ndef symbolize_point(p, M_p):\r\n \r\n \"\"\"It symbolize the time series with the criterion show below \"\"\"\r\n \r\n p = np.array(p)\r\n symb = []\r\n for val in p:\r\n if val >= M_p:\r\n symb.append('R')\r\n elif val > 0 and val < M_p:\r\n symb.append('r')\r\n elif val == 0:\r\n symb.append('e')\r\n elif val < 0 and val > - M_p:\r\n symb.append('d')\r\n elif val <= -M_p:\r\n symb.append('D')\r\n\r\n return symb\r\n\r\ndef delay_embedding(data, emb_dim, delay):\r\n \"\"\"It creats the embbeding phase space using the delay \r\n \r\n and the embbeding dimmesion passed\"\"\"\r\n \r\n N = len(data)\r\n M = N - (emb_dim - 1)*delay\r\n delay_vec = []\r\n for i in range(emb_dim):\r\n for time in range(M):\r\n delay_vec[time][i] = data[time + i*delay]\r\n\r\n return delay_vec\r\n\r\n\r\ndef symbolize_vector(symb_points, emb_dim, delay):\r\n N = len(symb_points)\r\n M = N - (emb_dim - 1)*delay\r\n symb_vec = []\r\n time_points = dict()\r\n for time in range(M):\r\n temp_vector = []\r\n for i in range(emb_dim):\r\n temp_vector.append(symb_points[time + i*delay])\r\n symb_vec.append(''.join(temp_vector))\r\n if not symb_vec[time] in time_points:\r\n time_points[symb_vec[time]] = [time]\r\n else:\r\n time_points[symb_vec[time]].append(time)\r\n\r\n return symb_vec, time_points, M\r\n\r\ndef trasitional_graph(data_all, stepSize, emb_dim):\r\n\r\n data = data_all\r\n lag = find_lag(data, stepSize, tau_max = 50, bins = 200, plot = False)\r\n delay = lag\r\n print('delay = ', delay)\r\n\r\n histogram = []\r\n\r\n p = mean_derivative(data, stepSize)\r\n M = M_p(p)\r\n\r\n symb = symbolize_point(p, M)\r\n\r\n\r\n symb_vector, time_points, num_timePoints = symbolize_vector(symb, emb_dim,delay)\r\n\r\n\r\n all_edges = [(symb_vector[i], symb_vector[i + 1] ) for i in range(len(symb_vector)-1)]\r\n\r\n numEdges = Counter(all_edges)\r\n\r\n u, indices = np.unique(np.array(symb_vector),return_index=True)\r\n\r\n num_upatters = len(u)\r\n\r\n\r\n #possible edges with the unique patters\r\n possible_edges = list(product(u ,repeat = 2))\r\n\r\n possible_edges_dict = dict()\r\n for key in possible_edges:\r\n if key in numEdges:\r\n possible_edges_dict[key] = numEdges[key]\r\n else:\r\n possible_edges_dict[key] = 0\r\n\r\n\r\n matrix = np.zeros(shape=(num_upatters, num_upatters),dtype= int)\r\n time_apparence = np.zeros(shape= (num_upatters, num_timePoints ), dtype=int)\r\n\r\n count_coulumn = 0\r\n count_raw = 0\r\n patterns =[]\r\n for key in possible_edges_dict:\r\n if count_coulumn < num_upatters - 1:\r\n matrix[count_raw, count_coulumn] = possible_edges_dict[key]\r\n count_coulumn += 1\r\n if count_raw == 0:\r\n patterns.append(key[1])\r\n\r\n else:\r\n pass\r\n\r\n elif count_coulumn == num_upatters - 1:\r\n matrix[count_raw, count_coulumn] = possible_edges_dict[key]\r\n if count_raw == 0:\r\n patterns.append(key[1])\r\n else:\r\n pass\r\n count_coulumn = 0\r\n count_raw += 1\r\n clustering_coeff = []\r\n G = nx.from_numpy_matrix(matrix)\r\n clustering = nx.clustering(G, weight= 'weight')\r\n\r\n for key in clustering:\r\n clustering_coeff.append(clustering[key])\r\n\r\n count_pattern = 0\r\n for pattern in patterns:\r\n time = time_points[pattern]\r\n for t in time:\r\n\r\n time_apparence[count_pattern, t ] = 1\r\n count_pattern += 1\r\n\r\n out_strength = np.sum(matrix, axis = 1, dtype= int)\r\n\r\n return matrix, patterns, clustering_coeff, out_strength, time_apparence\r\n\r\ndef degree_freq(data_all, stepSize, emb_dim):\r\n # delays = []\r\n # for node in range(90):\r\n # data = data_all[:, node]\r\n # lag = find_lag(data, stepSize= downsample, tau_max = 20, bins = 200, plot = False)\r\n # delays.append(lag)\r\n \r\n # delay = int(np.average(np.array(delays)))\r\n delay = 3\r\n #print('delay = ', delay)\r\n \r\n histogram = []\r\n for node in range(90):\r\n data = data_all[:, node]\r\n #lag = find_lag(data, stepSize= downsample, tau_max = 20, bins = 200, plot = False)\r\n \r\n \r\n p = mean_derivative(data, stepSize)\r\n M = M_p(p)\r\n \r\n symb = symbolize_point(p, M)\r\n\r\n \r\n symb_vector = symbolize_vector(symb, emb_dim, delay = delay)\r\n \r\n \r\n #the last element has not an out-edge\r\n symb_vector.remove(symb_vector[-1])\r\n \r\n freq = dict()\r\n \r\n u, indices = np.unique(np.array(symb_vector),return_index=True)\r\n for pattern in u:\r\n freq[pattern] = symb_vector.count(pattern)\r\n \r\n \r\n for key in freq:\r\n histogram.append(freq[key])\r\n\r\n # all_edges = [(symb_vector[i], symb_vector[i + 1] ) for i in range(len(symb_vector)-1)]\r\n \r\n # numEdges = Counter(all_edges)\r\n \r\n # u, indices = np.unique(np.array(symb_vector),return_index=True)\r\n \r\n # num_upatters = len(u)\r\n \r\n \r\n # #possible edges with the unique patters\r\n # possible_edges = list(product(u ,repeat = 2))\r\n \r\n # possible_edges_dict = dict()\r\n # for key in possible_edges:\r\n # if key in numEdges:\r\n # possible_edges_dict[key] = numEdges[key]\r\n # else:\r\n # possible_edges_dict[key] = 0\r\n \r\n \r\n # matrix = np.zeros(shape=(num_upatters, num_upatters))\r\n \r\n \r\n # count_coulumn = 0\r\n # count_raw = 0\r\n # for key in possible_edges_dict:\r\n # if count_coulumn < num_upatters - 1:\r\n # matrix[count_raw, count_coulumn] = possible_edges_dict[key]\r\n # count_coulumn += 1\r\n \r\n # elif count_coulumn == num_upatters - 1:\r\n # matrix[count_raw, count_coulumn] = possible_edges_dict[key]\r\n # count_coulumn = 0\r\n # count_raw += 1\r\n \r\n # np.fill_diagonal(matrix, 0)\r\n # weight = np.sum(matrix, axis=0)\r\n # weight = weight/np.max(weight)\r\n\r\n # for i in range(len(weight)):\r\n # frequencies.append(weight[i])\r\n \r\n return np.array(histogram)/max(histogram)\r\n\r\ndef positive_values(data):\r\n \"\"\"It transforms the points from the time series in\r\n positive values neccesary to create the visibility graph\"\"\"\r\n\r\n minimun = np.min(data)\r\n data_positive = np.array(data) + abs(minimun) + 1\r\n\r\n return data_positive\r\n\r\ndef horizontal_vg(data):\r\n\r\n numPoints = len(data)\r\n hvg = np.zeros(shape= (numPoints, numPoints))\r\n\r\n data = positive_values(data)\r\n\r\n for i in range(numPoints - 1):\r\n neighbor = []\r\n\r\n hvg[i, i + 1], hvg[i + 1, i] = 1, 1\r\n neighbor.append(data[i + 1 ])\r\n for j in range(i + 2, numPoints):\r\n if data[i] > max(neighbor):\r\n if data[j] > max(neighbor):\r\n hvg[i, j], hvg[j, i] = 1, 1\r\n neighbor.append(data[j])\r\n else:\r\n neighbor.append(data[j])\r\n pass\r\n else:\r\n break\r\n return hvg\r\n\r\n\r\ndef hvg_extended(t_series, time_step):\r\n \r\n downsample = time_step\r\n num_nodes = len(t_series[0])\r\n \r\n histogram = []\r\n for node in range(num_nodes):\r\n data = t_series[:, node]\r\n hvg = horizontal_vg(data)\r\n \r\n weight = np.sum(hvg, axis = 0)\r\n\r\n histogram.append(weight)\r\n \r\n histogram = np.array(list(chain.from_iterable(histogram)), dtype=int)\r\n \r\n \r\n \r\n return histogram/max(histogram)","sub_path":"graphs.py","file_name":"graphs.py","file_ext":"py","file_size_in_byte":10830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"367332103","text":"import sys\nsys.path.append(\"/srv/scratch/oursu/3Dgenome/src/kCCA/ChIAPET/pyrcca/\")\nimport rcca\nimport numpy as np\n\ndef main():\n parser=OptionParser()\n parser.add_option('--out',dest='out')\n parser.add_option('--matrices',dest='ms',default='',help='Comma delimited, .npy files, nodes should be aligned')\n opts,args=parser.parse_args()\n\n matrices=opts.ms.split(',')\n m1=np.load(matrices[0])\n m2=np.load(matrices[1])\n\n # Set up Pyrcca\n cca = rcca.CCA(kernelcca=False, numCC=2, reg=0.5)\n # Find canonical components\n training=cca.train([m1,m2])\n\n\nmain()\n","sub_path":"3Dutils/cca_2_matrices.py","file_name":"cca_2_matrices.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"292187226","text":"\n\nfrom xai.brain.wordbase.verbs._guarantee import _GUARANTEE\n\n#calss header\nclass _GUARANTEEING(_GUARANTEE, ):\n\tdef __init__(self,): \n\t\t_GUARANTEE.__init__(self)\n\t\tself.name = \"GUARANTEEING\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"guarantee\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_guaranteeing.py","file_name":"_guaranteeing.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"323234078","text":"'''Server module.\n\nHandle and response challenge requests from the frontend server.\n\n'''\n\nimport sys\nimport json\nimport traceback\nfrom collections import deque\nfrom tornado import gen\nfrom tornado.ioloop import IOLoop, PollIOLoop\nfrom tornado.web import Application\nfrom tornado.websocket import WebSocketHandler\nimport PyExt\nimport Privilege\nimport Config\nfrom StdChal import StdChal\n\n\nclass EvIOLoop(PollIOLoop):\n '''Tornado compatible ioloop interface.'''\n\n def initialize(self, **kwargs):\n '''Initialize.'''\n\n super().initialize(impl=PyExt.EvPoll(), **kwargs)\n\n\nclass JudgeHandler(WebSocketHandler):\n '''Judge request handler.\n\n Static attributes:\n chal_running_count (int): Number of current running challenges.\n chal_queue (deque): Pending challenges.\n\n '''\n\n chal_running_count = 0\n chal_queue = deque()\n\n @staticmethod\n @gen.coroutine\n def start_chal(obj, websk):\n '''Start a challenge.\n\n Check the challenge config, issue judge tasks, then report the result.\n\n Args:\n obj (dict): Challenge config.\n websk (WebSocketHandler): Websocket object.\n\n Returns:\n None\n\n '''\n\n # The worst exception, there is no chal_id in the obj.\n chal_id = None\n try:\n chal_id = obj['chal_id']\n code_path = obj['code_path']\n res_path = obj['res_path']\n test_list = obj['test']\n metadata = obj['metadata']\n comp_type = obj['comp_type']\n check_type = obj['check_type']\n\n test_paramlist = list()\n assert comp_type in ['g++', 'clang++', 'makefile', 'python3']\n assert check_type in ['diff', 'ioredir']\n\n for test in test_list:\n test_idx = test['test_idx']\n memlimit = test['memlimit']\n timelimit = test['timelimit']\n data_ids = test['metadata']['data']\n for data_id in data_ids:\n test_paramlist.append({\n 'in': res_path + '/testdata/%d.in'%data_id,\n 'ans': res_path + '/testdata/%d.out'%data_id,\n 'timelimit': timelimit,\n 'memlimit': memlimit,\n })\n\n chal = StdChal(chal_id, code_path, comp_type, check_type, \\\n res_path, test_paramlist, metadata)\n result_list, verdict = yield chal.start()\n\n result = []\n idx = 0\n for test in test_list:\n test_idx = test['test_idx']\n data_ids = test['metadata']['data']\n total_runtime = 0\n total_mem = 0\n total_status = 0\n for data_id in data_ids:\n runtime, peakmem, status = result_list[idx]\n total_runtime += runtime\n total_mem += peakmem\n total_status = max(total_status, status)\n idx += 1\n\n result.append({\n 'test_idx': test_idx,\n 'state': total_status,\n 'runtime': total_runtime,\n 'peakmem': total_mem,\n 'verdict': ''\n })\n\n websk.write_message(json.dumps({\n 'chal_id': chal_id,\n 'verdict': verdict,\n 'result': result,\n }))\n\n except Exception:\n traceback.print_exception(*sys.exc_info())\n websk.write_message(json.dumps({\n 'chal_id': chal_id,\n 'verdict': None,\n 'result': None,\n }))\n\n finally:\n JudgeHandler.chal_running_count -= 1\n JudgeHandler.emit_chal()\n\n @staticmethod\n def emit_chal(obj=None, websk=None):\n '''Emit a challenge to the queue and trigger the start_chal.\n\n Args:\n obj (dict, optional): Challenge config.\n websk (WebSocketHandler): Websocket object.\n\n Returns:\n None\n\n '''\n\n if obj is not None:\n JudgeHandler.chal_queue.append((obj, websk))\n\n while len(JudgeHandler.chal_queue) > 0 \\\n and JudgeHandler.chal_running_count < Config.TASK_MAXCONCURRENT:\n chal = JudgeHandler.chal_queue.popleft()\n JudgeHandler.chal_running_count += 1\n IOLoop.instance().add_callback(JudgeHandler.start_chal, *chal)\n\n def open(self):\n '''Handle open event'''\n\n print('Frontend connected')\n\n def on_message(self, msg):\n '''Handle message event'''\n\n obj = json.loads(msg, 'utf-8')\n JudgeHandler.emit_chal(obj, self)\n\n def on_close(self):\n '''Handle close event'''\n\n print('Frontend disconnected')\n\n\ndef init_websocket_server():\n '''Initialize websocket server.'''\n\n app = Application([\n (r'/judge', JudgeHandler),\n ])\n app.listen(2501)\n\n\ndef main():\n '''Main function.'''\n\n Privilege.init()\n PyExt.init()\n StdChal.init()\n IOLoop.configure(EvIOLoop)\n\n init_websocket_server()\n\n IOLoop.instance().start()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":5224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"583742562","text":"from django import forms\nfrom .widgets import ChainedSelectWidget\nfrom .models import Child\n\n\nclass SponsorForm(forms.Form):\n child = forms.IntegerField()\n\nclass FilterForm(forms.Form):\n gender = forms.ChoiceField(choices=[(x, x) for x in ('---------', 'MALE', 'FEMALE')])\n age = forms.ChoiceField(choices=[(x, x) for x in range(1, 18)], required=False)\n\n def __init__(self, *args, **kwargs):\n super(FilterForm, self).__init__(*args, **kwargs)\n\n if 0 == len(self.data):\n self.fields['age'].queryset = Child.objects.none()\n\n # assign a widget to second select field\n self.fields['age'].widget = ChainedSelectWidget(\n parent_name='gender', # the name of parent field\n app_name='sponsorship', # the name of model's application\n model_name='child', # the name of a model with the method\n method_name='get_children', # the name of queryset method\n )","sub_path":"sponsorship/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"63717431","text":"import fingerprint\nimport os\nimport librosa\nimport glob\nfrom hashlib import sha1\nimport numpy as np\nimport pandas as pd\nimport scipy\nfrom collections import Counter\nimport time\n\nclass Database:\n def __init__(self):\n self.song_table = pd.DataFrame({'song_id':[], 'name':[]}) #song table has two columns\n self.fingerprint_table = pd.DataFrame({'song_id':[], 'hash':[], 'offset':[]}) #fingerprint table has three columbs\n \n def addRow(self, table, row):\n table.loc[len(table)] = row\n \n def parse_file_hash(self, filename):\n '''\n Generate a song_id using \n sha1 hashing algorithm. \n '''\n s = sha1()\n with open(filename , \"rb\") as f:\n while True:\n buf = f.read(2**20)\n if not buf: \n break\n s.update(buf)\n\n return s.hexdigest().upper()\n \n def add(self, f): # f is a wav file\n x, fs = librosa.load(f)\n song_id = self.parse_file_hash(f)\n self.addRow(self.song_table, [song_id, f])\n hashes = set(fingerprint.fingerprint(x, fs))\n for hash_, offset in hashes:\n self.addRow(self.fingerprint_table, [song_id, hash_, int(offset)])\n print(f'{f} is added')\n \n def get_song_by_id(self, song_id):\n return self.song_table[self.song_table['song_id'] == song_id].values[0][1]\n \n def get_song_hashes_count(self, song_id):\n return len(self.fingerprint_table.loc[self.fingerprint_table['song_id'] == song_id])\n \n def train(self, training_dir): # add all the wavs in a directory to the database\n t0 = time.process_time()\n i = 0\n for f in glob.iglob(training_dir+'/*.wav'):\n self.add(f)\n i += 1\n t = time.process_time()\n print(f'training time: {t-t0}; number of files added: {i}')\n \n def save(self, s, f): #s, f are both csv files. \n self.song_table.to_csv(s)\n self.fingerprint_table.to_csv(f)\n \n def load(self, s, f):\n self.song_table = pd.read_csv(s, index_col=0)\n self.fingerprint_table = pd.read_csv(f, index_col=0)\n \n def find_matches(self, f): # f is a wav file\n x, fs = librosa.load(f)\n hashes = set(fingerprint.fingerprint(x, fs))\n return self.return_matches(hashes)\n \n def return_matches(self, hashes): # return all the (id, offset_difference) tuple\n # based on finding the rows that match the hashes the target has\n mapper = {}\n for hash_, offset in hashes:\n mapper[hash_] = offset\n values = mapper.keys() #hashe values\n results = []\n for unique_value in set(values):\n matched = self.fingerprint_table.loc[self.fingerprint_table['hash'] == unique_value]\n if len(matched) == 0:\n continue\n else:\n for id_,offset in matched[['song_id', 'offset']].values:\n results.append((id_, int(offset-mapper[unique_value])))\n return results\n \n def align_matches(self, matches):\n if len(matches) == 0:\n return {}\n cnt = Counter(matches)\n # for match in cnt:\n # cnt[match] /= self.get_song_hashes_count(match[0])\n best_guess = cnt.most_common(1)[0] # find the one that has the most matches with the target audio\n largest_count = best_guess[1]\n song_id = best_guess[0][0]\n offset_difference = best_guess[0][1]\n song_name = self.get_song_by_id(song_id)\n nsec = round(float(largest_count)/44100*2048, 5)\n return {\n \"SONG_ID\" : song_id,\n \"SONG_NAME\" : song_name,\n \"SIMILAR_FEATURES_COUNT\" : largest_count,\n \"OFFSET_DIFFERENCE\" : offset_difference,\n \"OFFSET_DIFFERENCE_IN_SEC\": nsec\n }\n \n def query(self, f, log=True):\n t0 = time.process_time()\n output = self.align_matches(self.find_matches(f))\n t = time.process_time()\n if log:\n print(\"Query time:\", t-t0)\n return output","sub_path":"core/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":4082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"202937403","text":"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A module that maps channels to its respective bitrate and resolutions to its\nrespective height, bitrate and profile.\"\"\"\n\nclass ChannelData():\n\n def __init__(self, aac_bitrate, opus_bitrate):\n self.aac_bitrate = aac_bitrate\n self.opus_bitrate = opus_bitrate\n\nclass ResolutionData():\n\n def __init__(self, width, height, h264_bitrate, vp9_bitrate, h264_profile):\n self.width = width\n self.height = height\n self.h264_bitrate = h264_bitrate\n self.vp9_bitrate = vp9_bitrate\n self.h264_profile = h264_profile\n\n def __eq__(self, other):\n return self.height == other.height\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __ge__(self, other):\n return self.height >= other.height\n\n# A map of channels to ChannelData objects which contains the AAC and Opus\n# bitrate information of a given channel.\nCHANNEL_MAP = {\n 2: ChannelData(128, 64),\n 6: ChannelData(192, 96),\n}\n\n# A map of resolutions to ResolutionData objects which contain\n# the height and H264 bitrate of a given resolution.\nRESOLUTION_MAP = {\n '144p': ResolutionData(256, 144, '108k', '95k', 'baseline'),\n '240p': ResolutionData(426, 240, '242k', '150k', 'main'),\n '360p': ResolutionData(640, 360, '400k', '276k', 'main'),\n '480p': ResolutionData(854, 480, '2M', '750k', 'main'),\n '576p': ResolutionData(1024, 576, '2.5M', '1M', 'main'),\n '720p': ResolutionData(1280, 720, '3M', '2M', 'main'),\n '720p-hfr': ResolutionData(1280, 720, '4M', '4M', 'main'),\n '1080p': ResolutionData(1920, 1080, '5M', '4M', 'high'),\n '1080p-hfr': ResolutionData(1920, 1080, '6M', '6M', 'high'),\n '2k': ResolutionData(2560, 1440, '9M', '6M', 'high'),\n '2k-hfr': ResolutionData(2560, 1440, '14M', '9M', 'high'),\n '4k': ResolutionData(3840, 2160, '17M', '12M', 'uhd'),\n '4k-hfr': ResolutionData(3840, 2160, '25M', '18M', 'uhd'),\n}\n\nclass Metadata():\n\n def __init__(self, pipe, channels = None, res_string = None,\n audio_codec = None, video_codec = None, lang=None,\n hardware=None):\n self.pipe = pipe\n if channels:\n self.channels = channels\n self.audio_codec = audio_codec\n self.channel_data = CHANNEL_MAP[channels]\n self.lang = lang\n if res_string:\n self.res = res_string\n self.video_codec = video_codec\n self.resolution_data = RESOLUTION_MAP[res_string]\n self.hardware = hardware\n","sub_path":"streamer/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"431401350","text":"def possible(lawn):\r\n # Get the maximum heights in each row or column\r\n row_max = [max(row) for row in lawn]\r\n col_max = [max([row[col_num] for row in lawn]) for col_num in range(len(lawn[0]))]\r\n # For each square, check if it could have been reached\r\n for row_num in range(len(lawn)):\r\n for col_num in range(len(lawn[row_num])):\r\n if (lawn[row_num][col_num] < row_max[row_num] and \r\n lawn[row_num][col_num] < col_max[col_num]):\r\n return \"NO\"\r\n # If we got this far then the layout is possible\r\n return \"YES\"\r\n\r\n\r\nfin = open(\"input.txt\", \"r\")\r\nfout = open(\"output.txt\", \"w\")\r\n\r\nt = int(fin.readline())\r\n\r\nfor i in range(t):\r\n dimensions = list(map(int, fin.readline().split(\" \")))\r\n lawn = []\r\n for row in range(dimensions[0]):\r\n lawn += [list(map(int, fin.readline().split(\" \")))]\r\n fout.write(\"Case #\" + str(i + 1) + \": \" + possible(lawn) + \"\\n\")\r\n\r\nfin.close()\r\nfout.close()\r\n","sub_path":"solutions_2449486_0/Python/eliotball/lawnmower.py","file_name":"lawnmower.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"633408612","text":"# Definition for a binary tree node.\n#class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object): #recursive DFS \n def constructMaximumBinaryTree(self, nums):\n R = TreeNode(None)\n self.helper(R, nums)\n return R\n \n \n def helper(self, root, nums):\n if nums == []:\n return None\n M = max(nums)\n mid = nums.index(M)\n root.val = M\n if nums[:mid] != []:\n root.left = TreeNode(None) #create a new leaf node\n self.helper(root.left, nums[:mid])\n if nums[mid+1:] != []:\n root.right = TreeNode(None)\n self.helper(root.right, nums[mid+1:])\n \n \n \n \n \"\"\"\n :type nums: List[int]\n :rtype: TreeNode\n \"\"\"\n # Given an integer array with no duplicates. A maximum tree building on this array is defined as follow:\n # The root is the maximum number in the array.\n # The left subtree is the maximum tree constructed from left part subarray divided by the maximum number.\n # The right subtree is the maximum tree constructed from right part subarray divided by the maximum number.\n # Construct the maximum tree by the given array and output the root node of this tree.\n\n","sub_path":"Maximum Binary Tree.py","file_name":"Maximum Binary Tree.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"79133994","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport cv2\nimport numpy as np\n\ndef find_rect_of_target_color(image):\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV_FULL)\n h = hsv[:, :, 0]\n s = hsv[:, :, 1]\n mask = np.zeros(h.shape, dtype=np.uint8)\n mask[((h < 20) | (h > 200)) & (s > 128)] = 255\n contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n rects = []\n # rects : [x,y,width,height]\n for contour in contours:\n approx = cv2.convexHull(contour)\n rect = cv2.boundingRect(approx)\n rects.append(np.array(rect))\n return rects\n\ndef camera_system():\n capture = cv2.VideoCapture(0, cv2.CAP_DSHOW)\n # while cv2.waitKey(30) < 0:\n _, frame = capture.read()\n rects = find_rect_of_target_color(frame)\n capture.release()\n cv2.destroyAllWindows()\n return rects\n\nif __name__ == \"__main__\":\n capture = cv2.VideoCapture(0)\n while cv2.waitKey(30) < 0:\n _, frame = capture.read()\n rects = find_rect_of_target_color(frame)\n if len(rects) > 0:\n rect = max(rects, key=(lambda x: x[2] * x[3]))\n cv2.rectangle(frame, tuple(rect[0:2]), tuple(rect[0:2] + rect[2:4]), (0, 0, 255), thickness=2)\n circle_point = np.array(rect[0:2] + (rect[2:4])/2, dtype=int)\n circle_point = np.round(circle_point)\n print(circle_point)\n cv2.circle(frame,tuple(circle_point),10,(0,0,255),-1)\n cv2.imshow('red', frame)\n print(rect)\n capture.release()\n cv2.destroyAllWindows()\n","sub_path":"inRaspberryPi/CameraSystem.py","file_name":"CameraSystem.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"130064133","text":"import pymysql\nfrom analog.bin.exception.Exceptions import *\nfrom pymysql.cursors import Cursor\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\n\n\nclass db:\n\n def __init__(self, config, controller=None):\n\n self.section_name = 'Database'\n self.database_name = 'WebLog_Analysis'\n self.connect = None\n self.config = config\n self.controller = controller\n\n\n def connect_db(self):\n\n self.connect = pymysql.connect(host=self.config.get(self.section_name, 'host'),\n port=int(self.config.get(self.section_name, 'port')),\n user=self.config.get(self.section_name, 'user'),\n password=self.config.get(self.section_name, 'password'),\n database=self.config.get(self.section_name, 'database'),\n charset=self.config.get(self.section_name, 'charset'))\n return self.connect\n\n\n def close(self):\n if self.connect:\n self.connect.close()\n\n\n def execute_many(self, sql, args) -> Cursor:\n\n try:\n cursor = self.connect.cursor()\n cursor.executemany(sql, args)\n except Exception as e:\n self.connect_db()\n cursor = self.connect.cursor()\n cursor.executemany(sql, args)\n return cursor\n\n\n def execute(self, sql: str, args: object = None) -> Cursor:\n try:\n cursor = self.connect.cursor()\n cursor.execute(sql, args)\n except Exception as e:\n self.connect_db()\n cursor = self.connect.cursor()\n cursor.execute(sql, args)\n return cursor\n\n\n def commit(self):\n self.connect.commit()\n\n\n def update(self, *args):\n cursor = self.connect.cursor()\n try:\n arguments = {\"table_name\": args[0], \"values\": \"\",\n \"conditions\": \"WHERE %s\" % args[2] if len(args) > 2 else \"\"}\n flag = False\n string = \"\"\n if isinstance(args[1], dict):\n for item in args[1].items():\n if flag:\n string += \",\"\n\n string += \"{0} = {1}\".format(item[0], item[1])\n\n if flag is False:\n flag = True\n\n arguments['values'] = string\n cursor.execute(\"UPDATE :table_name SET :values :conditions\", arguments)\n self.connect.commit()\n except Exception:\n return False\n\n\n def create_db(self):\n _connection = None\n try:\n _connection = pymysql.connect(host=self.config.get(self.section_name, 'host'),\n user=self.config.get(self.section_name, 'user'),\n password=self.config.get(self.section_name, 'password'),\n charset=self.config.get(self.section_name, 'charset'))\n cursor = _connection.cursor()\n cursor.execute('create database if not exists {}'.format(self.database_name))\n _connection.commit()\n except Exception as e:\n raise DatabaseException(\"Can't create database, make sure your config are correct!\")\n finally:\n if _connection:\n _connection.close()\n return True\n\n\nif __name__ == \"__main__\":\n a = db()\n","sub_path":"analog/bin/lib/sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":3476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"233764296","text":"from sklearn.manifold import TSNE\nfrom scipy.cluster.hierarchy import dendrogram\nfrom pytransform3d.rotations import *\nfrom varname import nameof\n\nimport sys\nimport os\n\nsys.path.append(\"..\")\nimport cv2\nfrom os import listdir\nfrom os.path import isfile, join\n\n# matplotlib.use('tkagg')\n# matplotlib.use('WebAgg')\n\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport phenograph\nimport io\n\n\ndef getLastDirectory(inputDir):\n if inputDir.endswith('/'):\n inputDir = inputDir[-1]\n return os.path.split(inputDir)[-1]\n\n\nday3WT = '/alder/home/soobink/rotarod_ML10/output/Day3_WT'\nday3YAC = '/alder/home/soobink/rotarod_ML10/output/Day3_YAC'\nday4WT = '/alder/home/soobink/rotarod_ML10/output/Day4_WT'\nday4YAC = '/alder/home/soobink/rotarod_ML10/output/Day4_YAC'\nday3and4WT = '/alder/home/soobink/rotarod_ML10/output/Day3and4_WT'\nday3and4YAC = '/alder/home/soobink/rotarod_ML10/output/Day3and4_YAC'\n\npaths = [day3WT, day4WT, day3YAC, day4YAC, day3and4WT, day3and4YAC]\nperplexities = [20, 30, 100]\n\nfor perplexity in perplexities:\n for path in paths:\n print('--- Running \\'%s\\' with perplexity = %i. ---' % (path, perplexity))\n data_2d = [f for f in listdir(path) if (isfile(join(path, f)) and (not f.startswith('.')))]\n\n # data_3d = ['LD1_1580415036_3d.csv']\n coords_all_2d = []\n coords_all_3d = []\n dataset_name_2d = []\n dataset_name_3d = []\n\n # for f_2d, f_3d in zip(data_2d, data_3d):\n for f_2d in data_2d:\n coords_file = os.path.join(path, f_2d)\n dataset_name_2d = coords_file\n # coords_2d = pd.read_csv(coords_file, dtype=np.float, header=2, index_col=0)\n coords_2d = pd.read_csv(coords_file, dtype=float, header=0, index_col=0)\n coords_2d.dropna(axis=0, inplace=True)\n coords_2d = coords_2d.iloc[:90]\n coords_2d = coords_2d.values[:, 4:] # exclude first column\n coords_2d = np.delete(coords_2d, list(range(2, coords_2d.shape[1], 3)),\n axis=1) # delete every 3rd column of prediction score\n coords_all_2d.append(coords_2d)\n\n # coords_file = data_root + os.sep + f_3d\n # dataset_name_3d = coords_file.split('/')[-1].split('.')[0]\n # coords_3d = pd.read_csv(coords_file, header=2)\n # coords_3d = coords_3d.values[:, 1:] # exclude the index column\n # coords_3d = np.around(coords_3d.astype('float'), 2) # round to two decimal places\n # coords_3d = gaussian_filter1d(coords_3d, 5, axis=0) # smooth the data, the points were oscillating\n # coords_all_3d.append(coords_3d)\n\n\n coords_all_2d = np.vstack(coords_all_2d)\n # x_3d = coords_all_3d[:, ::3];\n # y_3d = coords_all_3d[:, 1::3];\n # z_3d = coords_all_3d[:, 2::3];\n x_2d = coords_all_2d[:, ::2];\n y_2d = coords_all_2d[:, 1::2];\n z_2d = np.zeros(x_2d.shape);\n coords_all_3d_trans = []\n # for i in np.arange(x_3d.shape[0]):\n\n k = 30 # K for k-means step of phenograph\n communities_2d, graph, Q = phenograph.cluster(coords_all_2d, k=k)\n n_clus_2d = np.unique(communities_2d).shape[0]\n\n # --end of phenograph\n\n # tsne_model = TSNE(n_components=2, random_state=2,perplexity=100,angle=0.1,init='pca',n_jobs= mp.cpu_count()-1)\n tsne_model = TSNE(n_components=2, random_state=2, perplexity=perplexity, angle=0.1, init='pca', n_jobs=-1)\n Y_2d = tsne_model.fit_transform(coords_all_2d)\n cmap = plt.cm.colors.ListedColormap(plt.cm.jet(np.linspace(0, 1, n_clus_2d)))\n plt.figure()\n plt.scatter(Y_2d[:, 0], Y_2d[:, 1],\n c=communities_2d,\n cmap=cmap,\n alpha=1.0)\n plt.colorbar(ticks=np.unique(communities_2d), label='Cluster#')\n plt.xlabel('TSNE1');\n plt.ylabel('TSNE2')\n\n name = getLastDirectory(path)\n plt.title(' 2D Body coordinate clusters: total frames %s\\n%s, perplexity = %i' % (\n str(len(communities_2d)), name, perplexity))\n\n plt.savefig(os.path.join('plots', name + 'p' + str(perplexity) + '.png'), format='png')\n plt.text(1, 0, path, ha='right', va='bottom', fontsize=7)\n plt.show()\n","sub_path":"featureImportance/tsne/test_ellenDataPhenograph.py","file_name":"test_ellenDataPhenograph.py","file_ext":"py","file_size_in_byte":4403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"423075372","text":"import webapp2\nimport csv\nimport time\n\nfrom google.appengine.api import logservice\nfrom google.appengine.api import files\n\nimport config\n\nclass Level2Handler(webapp2.RequestHandler):\n def get(self):\n # Create a new Google Storage file\n filename = 'request.csv'\n gs_file = files.gs.create('/gs/%s/%s' % (config.gs_bucket_name, filename),\n mime_type='text/csv')\n with files.open(gs_file, 'a') as f:\n # Create a csv writer that outputs to the Google Storage file\n w = csv.writer(f)\n for r in logservice.fetch(start_time=time.time()-5*60):\n w.writerow([r.start_time,r.method,r.resource,\n r.status,r.latency,r.response_size,\n r.user_agent if r.user_agent else \"NULL\"])\n # Finalize the file\n files.finalize(gs_file)\n\n # Render a HTML link to the file in the reponse body\n link_format = 'https://storage.cloud.google.com/{gs_bucket}/{filename}'\n link = link_format.format(gs_bucket=config.gs_bucket_name, filename=filename)\n self.response.write('{link} '.format(link=link))\n\napp = webapp2.WSGIApplication([('/solution/level2', Level2Handler)], debug=True)\n","sub_path":"solution/level2.py","file_name":"level2.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"349160123","text":"from django.conf import settings\nfrom movie.models import Genre\n\nmovies_df = settings.MOVIE_GENRE_SEP_DF.copy()\n\nweights = {}\n\ndef dot_product(vector_1, vector_2): \n\treturn sum([ i*j for i,j in zip(vector_1, vector_2)])\n\ndef get_movie_score(movie_features, p): \n\treturn dot_product(movie_features, p)\n\t\ndef get_movie_recommendations(genresQ, n_recommendations):\n\tfor i,weight in enumerate([5,3,2]):\n\t\ttry:\n\t\t\tweights[genresQ[i].name] = weight\n\t\texcept IndexError:\n\t\t\tpass\n\n\tmovie_categories = movies_df.columns[1:]\n\tall_weights = []\n\tfor category in movie_categories:\n\t\tgenre = genresQ.filter(name__exact=category)\n\t\tif genre.exists():\n\t\t\tall_weights.append(weights[genre[0].name])\n\t\telse:\n\t\t\tall_weights.append(0)\n\n #we add a column to the movies_df dataset with the calculated score for each movie for the given user\n\tmovies_df['score'] = movies_df[movie_categories].apply(get_movie_score, args=([all_weights]), axis=1)\n\treturn movies_df.sort_values(by=['score'], ascending=False)['movieId'].values[:n_recommendations]\n","sub_path":"recommender/genre_recommender.py","file_name":"genre_recommender.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"26147972","text":"import itertools\ndef fun1(checked,cc):\n for ch in checked:\n q = 0\n for b in ch:\n if b in cc: q = q + 1\n if q == len(ch): return False\n \n return True\n\ndef solution(relation):\n answer,count = 0, 1\n kate = len(relation[0])\n kate_list = [i for i in range(kate)]\n checked = []\n while( count < kate+1 ):\n ccc = list(itertools.combinations(kate_list,count))\n for cc in ccc:\n tmp = []\n if fun1(checked,cc) == False:\n continue\n\n for c in cc:\n semi = []\n for r in relation: semi.append(r[c])\n tmp.append(semi)\n\n final = list(zip(*tmp))\n\n if len(final) == len(set(final)):\n checked.append(cc)\n answer += 1\n\n count = count + 1\n\n return answer\n \n\nprint(solution([[\"100\",\"ryan\",\"music\",\"2\"],[\"200\",\"apeach\",\"math\",\"2\"],[\"300\",\"tube\",\"computer\",\"3\"],[\"400\",\"con\",\"computer\",\"4\"],[\"500\",\"muzi\",\"music\",\"3\"],[\"600\",\"apeach\",\"music\",\"2\"]]) )","sub_path":"Programmers/Programmers_2020 Kakao Blind_후보키.py","file_name":"Programmers_2020 Kakao Blind_후보키.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"487422641","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport numpy as np\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\nfrom utils import plot_classification_dataset, plot_2d_decisionboundary\n\n\nif __name__ == \"__main__\":\n # Load data\n data = np.load('data_3_logreg_b.npz')\n X, y = data['X'], data['y']\n print(X.shape)\n print(y.shape)\n \n Xtrain, Xtest, ytrain, ytest = train_test_split(X,y,test_size=0.33)\n\n model = LogisticRegression(multi_class='ovr', solver='liblinear') \n # Fit and evaluate (compute test error) logistic regression on this 1d data set\n model.fit(Xtrain, ytrain) \n print(\"Test-Accuracy: {0}\".format(model.score(Xtest, ytest)))\n print(\"Train-Accuracy: {0}\".format(model.score(Xtrain, ytrain))) \n # DOC: Accuracy is pretty bad (around 0.5) as the 1 dimensional data with points of class 0\n # surrounding the points of class 1 is not possible to split in half! \n \n\n # Inspect the data set\n plot_classification_dataset(X, y)\n\n # Feature transformation (1d -> 2d)\n X2 = np.copy(X)\n X2.resize((X.shape[0], 2))\n print(X2.shape)\n for i in range(X2.shape[0]):\n X2[i][1] = X2[i][0] * X2[i][0]\n\n # split new 2d data set\n X2train, X2test, y2train, y2test = train_test_split(X2,y,test_size=0.33)\n\n # Fit logistic regression to new 2d data set \n # Evaluate the model (compute test error)\n model2 = LogisticRegression(multi_class='ovr', solver='liblinear') \n # Fit and evaluate (compute test error) logistic regression on this 1d data set\n model2.fit(X2train, y2train) \n print(\"Test-Accuracy: {0}\".format(model2.score(X2test, y2test)))\n print(\"Train-Accuracy: {0}\".format(model2.score(X2train, y2train)))\n\n # Visualize the decision boundary of the final model\n plot_2d_decisionboundary(model2, X2, y)\n\n #DOC: Performance for the 2-dim data is significantly better as the data can easier be split\n # by a linear curve as they are positioned in 2-dim space","sub_path":"logistic_regression_b_skeleton.py","file_name":"logistic_regression_b_skeleton.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"533030946","text":"# Note: no changes need to be made to this file...\n\n# IMPORTS:\n\nimport matplotlib.pyplot as plt\nimport warnings, random\nfrom math import pi, cos, sin, sqrt, floor, ceil, atan2\nfrom cmath import phase\n\n\n# FUNCTIONS:\n\ndef scatter(xs, ys, model=None):\n \"\"\"Plots data according to true and modeled outcomes.\n\n Keyword arguments:\n xs -- the values of the attributes\n ys -- the values of the true outcomes\n model -- the classification/regression model (default None)\n\n Return values:\n None\n \"\"\"\n # Wrap all y in lists, if provided as scalars\n scalar_y = type(ys[0]) is not list\n if scalar_y:\n ys = [[yi] for yi in ys]\n # Determine the x-range of the data\n x0s = [xi[0] for xi in xs]\n x1s = [xi[1] for xi in xs]\n range_x = ceil(1.1*max(-min(x0s), max(x0s), -min(x1s), max(x1s)))\n paint_x = [(xi/64.0-1.0)*range_x for xi in range(129)]\n # Generate subplots\n axes = len(ys[0])\n fig, axs = plt.subplots(1, axes, figsize=(6.4*axes, 4.8), squeeze=False)\n for n, ax in enumerate(axs[0]):\n # Determine the y-range of the data\n yns = [yi[n] for yi in ys]\n range_y = max(-min(yns), max(yns))\n # Plot the data\n data = ax.scatter(x0s, x1s, c=yns, edgecolors='w', cmap=plt.cm.RdYlBu, vmin=-range_y, vmax=range_y)\n # Paint background colors denoting the model predictions\n if hasattr(model, 'predict'):\n if scalar_y:\n paint_y = [[model.predict([xi, yi]) for xi in paint_x] for yi in paint_x]\n else:\n paint_y = [[model.predict([xi, yi])[n] for xi in paint_x] for yi in paint_x]\n ax.imshow(paint_y, origin='lower', extent=(-range_x, range_x, -range_x, range_x), vmin=-range_y, vmax=range_y, interpolation='bilinear', cmap=plt.cm.RdYlBu)\n # Draw dashed line at contour zero\n with warnings.catch_warnings(): # Ignore warning that zero-contour is absent\n warnings.simplefilter('ignore')\n ax.contour(paint_x, paint_x, paint_y, levels=[0.0], colors='k', linestyles='--', linewidths=1.0)\n else:\n ax.set_facecolor('#F8F8F8')\n # Finish the layout and display the figure\n ax.set_aspect('equal', 'box')\n ax.axis([-range_x, range_x, -range_x, range_x])\n ax.grid(True, color='k', linestyle=':', linewidth=0.5)\n ax.axhline(y=0, color='k', linestyle='-', linewidth=1.0)\n ax.axvline(x=0, color='k', linestyle='-', linewidth=1.0)\n ax.set_axisbelow(True)\n ax.set_xlabel(r'$x_1$')\n ax.set_ylabel(r'$x_2$')\n cbar = plt.colorbar(data, ax=ax).ax\n cbar.axhline(y=0.5, color='k', linestyle='--', linewidth=1.0)\n cbar.set_title(r'$y$' if axes == 1 else r'$y_{}$'.format(n+1))\n if hasattr(model, 'loss'):\n if scalar_y:\n loss = sum(model.loss(x, y[0]) for x, y in zip(xs, ys))\n else:\n loss = sum(model.loss(x, y) for x, y in zip(xs, ys))\n plt.suptitle('Total loss: {:.3f}'.format(loss))\n plt.show()\n\n\ndef graph(funcs, *args, xmin=-3.0, xmax=3.0):\n \"\"\"Plots the graph of a given function.\n\n Keyword arguments:\n funcs -- one or more functions to be plotted\n *args -- extra arguments that should be passed to the function(s) (optional)\n xmin -- the lowest x-value (default -4.0)\n xmax -- the highest x-value (default +4.0)\n\n Return values:\n None\n \"\"\"\n # Wrap the function in a list, if only one is provided\n if type(funcs) is not list:\n funcs = [funcs]\n # Plot the figures and keep track of their y-range\n xs = [xmin+xi*(xmax-xmin)/256.0 for xi in range(257)]\n ymin = -1.0\n ymax = +1.0\n colors = plt.rcParams['axes.prop_cycle'].by_key()['color']\n plt.subplot(1, 1, 1, facecolor='#F8F8F8')\n for n, func in enumerate(funcs):\n ys = [func(x, *args) for x in xs]\n ymin = min(ymin, floor(min(ys)))\n ymax = max(ymax, ceil(max(ys)))\n plt.plot(xs, ys, color=colors[n % len(colors)], linewidth=3.0, label=func.__code__.co_name)\n # Finish the layout and display the figure\n plt.axis([xmin, xmax, ymin, ymax])\n plt.legend()\n plt.grid(True, color='k', linestyle=':', linewidth=0.5)\n plt.axhline(y=0, color='k', linestyle='-', linewidth=1.0)\n plt.axvline(x=0, color='k', linestyle='-', linewidth=1.0)\n plt.xlabel(r'$x$')\n plt.ylabel(r'$f(x)$')\n plt.show()\n\n\ndef generate(nominal, num=64, dim=2, bias=None, weights=None, noise=0.0, seed=None):\n \"\"\"Generate a suitable dataset with attributes and outcomes.\n\n Keyword arguments:\n nominal -- flag indicates nominal classes or continuous values\n num -- number of instances (default 64)\n dim -- dimensionality of the attributes (default 2)\n bias -- bias of the generating model equation (default random)\n weights -- weights of the generating model equation (default random)\n noise -- the amount of noise to add (default 0.0)\n seed -- a seed to initialise the random number generator (default random)\n\n Return values:\n xs -- values of the attributes\n ys -- values of the outcomes\n \"\"\"\n # Seed the random number generator\n random.seed(seed)\n # Generate random bias if none provided\n if bias == None:\n bias = random.gauss(0.0, 4.0)\n # Generate randomly directed weight vector if none provided\n if weights == None:\n weights = [random.gauss(0.0, 1.0) for d in range(dim)]\n length = sqrt(sum(wi**2 for wi in weights))\n weights = [wi/length for wi in weights]\n # Generate attribute data\n xs = [[random.gauss(0.0, 8.0) for d in range(dim)] for n in range(num)]\n # Generate outcomes\n if nominal:\n ys = [-1 if bias+sum(wi*xi for wi, xi in zip(weights, x)) < 0 else 1 for x in xs]\n else:\n ys = [bias+sum(wi*xi for wi, xi in zip(weights, x)) for x in xs]\n # Add noise to the attributes\n xs = [[xs[n][d]+random.gauss(0.0, noise) for d in range(dim)] for n in range(num)]\n # Return values\n return xs, ys\n\n\ndef multinomial(classes, num=512, seed=None):\n \"\"\"Generate a dataset based on Newton's method applied to 1+(-z)^c=0.\n\n Keyword arguments:\n classes -- number of classes to generate\n num -- number of instances (default 512)\n seed -- a seed to initialise the random number generator (default random)\n\n Return values:\n xs -- values of the attributes x1 and x2\n ys -- class labels in one-hot encoding\n \"\"\"\n # Seed the random number generator\n random.seed(seed)\n # Generate attribute data\n rs = [sqrt(0.75*random.random()) for n in range(num)]\n fs = [2.0*pi*random.random() for n in range(num)]\n xs = [[r*cos(f), r*sin(f)] for r, f in zip(rs, fs)]\n # Initialize outcomes\n ys = [[0.0 for c in range(classes)] for n in range(num)]\n # Perform Newton's method\n for n in range(num):\n z_old = -complex(xs[n][0], xs[n][1])\n z_new = (z_old*(classes-1)-z_old**(1-classes))/classes\n while abs(z_new-z_old) > 1e-9:\n z_old = z_new\n z_new = (z_old*(classes-1)-z_old**(1-classes))/classes\n c = int(((phase(-z_new)/pi+1.0)*classes-1.0)/2.0)\n ys[n][c] = 1.0\n # Return values\n return xs, ys\n","sub_path":"les4/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":7271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"574794203","text":"t = int(input())\nresult = []\nfor i in range(1, t + 1):\n k, c, s = [int(x) for x in input().split(\" \")] # read a list of integers, 2 in this case\n result = []\n toSum = k**(c-1)\n \n for j in range(s):\n result += [1 + j*toSum]\n \n print(\"Case #{}: {}\".format(i, \" \".join([str(x) for x in result])))\n ","sub_path":"codes/CodeJamCrawler/16_0_4/lidiamcfreitas/problemD.py","file_name":"problemD.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"4359775","text":"from board import Board\nimport numpy as np\n\nCOUNTERS = {'s':'p', 'p':'r', 'r':'s'}\nCOUNTERED = {'p':'s', 'r':'p', 's':'r'}\n\ndef distance(coord1, coord2):\n (r1, c1) = coord1\n (r2, c2) = coord2\n\n dr = r1 - r2\n dc = c1 - c2\n if (dr < 0 and dc < 0) or (dr > 0 and dc > 0):\n return abs(dr + dc)\n else:\n return max(abs(dr), abs(dc))\n\ndef unthrown_diff(board):\n return board.unthrown_uppers - board.unthrown_lowers\n\ndef thrown_diff(board):\n return board.remaining_tokens(\"UPPER\") - board.remaining_tokens(\"LOWER\") - unthrown_diff(board)\n\ndef dominance_diff(board):\n up_rps = (len(board.thrown_uppers[\"r\"]), len(board.thrown_uppers[\"p\"]), len(board.thrown_uppers[\"s\"]))\n low_rps = (len(board.thrown_lowers[\"r\"]), len(board.thrown_lowers[\"p\"]), len(board.thrown_lowers[\"s\"]))\n\n up_r = up_rps[0]/(low_rps[1] + up_rps[0] + 1)\n up_p = up_rps[1]/(low_rps[2] + up_rps[1] + 1)\n up_s = up_rps[2]/(low_rps[0] + up_rps[2] + 1)\n\n low_r = low_rps[0]/(up_rps[1] + low_rps[0] + 1)\n low_p = low_rps[1]/(up_rps[2] + low_rps[1] + 1)\n low_s = low_rps[2]/(up_rps[0] + low_rps[2] + 1)\n return (up_r + up_p + up_s) - (low_r + low_p + low_s)\n\ndef spread(ps):\n if not ps:\n return 0\n mean = np.mean(ps, axis = 0)\n centroid = (round(mean[0]), round(mean[1]))\n return sum([distance(p, centroid) for p in ps])/len(ps)\n\ndef spread_diff(board):\n u_ps = board.chain(board.thrown_uppers)\n l_ps = board.chain(board.thrown_lowers)\n return spread(u_ps) - spread(l_ps)\n\ndef min_circuit(ps, qs):\n # Number of tiles on the board\n min_circuit_dist = 9\n for p in ps:\n dists = [distance(p, q) for q in qs]\n\n if not dists:\n return 0\n\n avg_dist = np.sum(dists)/len(dists)\n if avg_dist < min_circuit_dist:\n min_circuit_dist = avg_dist\n return min_circuit_dist\n\ndef sum_min_dists(player_thrown, opponent_thrown):\n sum_dists = 0\n for key, value in player_thrown.items():\n countered = opponent_thrown[COUNTERS[key]]\n sum_dists += min_circuit(value, countered)\n return sum_dists\n\ndef capture_dist_difference(board):\n return sum_min_dists(board.thrown_lowers, board.thrown_uppers) - sum_min_dists(board.thrown_uppers, board.thrown_lowers)\n\n# evaluates a board state with the option of evaluating after input move from input\ndef evaluate(board):\n if board.is_win(\"UPPER\"):\n return 1\n if board.is_win(\"LOWER\"):\n return -1\n if board.is_draw():\n return 0\n\n # Features: throw_diff, scissor_diff, paper_diff, rock_diff, median row\n ut_diff = unthrown_diff(board)\n t_diff = thrown_diff(board)\n dom_diff = dominance_diff(board)\n s_diff = spread_diff(board)\n c_diff = capture_dist_difference(board)\n #print(unthrown_diff, dom_diff, thrown_diff)\n value = 0.2* ut_diff + 0.2 *t_diff + 0.25*dom_diff + 0.05*s_diff + 0.05*c_diff\n capped_val = min(max(value, -1), 1)\n\n return capped_val\n\ndef evaluate_move(board, move, player):\n return evaluate(apply_move(board, move, player))\n\ndef apply_move(board, move, player):\n player_thrown, player_unthrown = board.player_pieces(player)\n opponent_thrown, opponent_unthrown = board.opponent_pieces(player)\n player_thrown = board.copy_dict(player_thrown)\n opponent_thrown = board.copy_dict(opponent_thrown)\n\n if move[0] == \"THROW\":\n t = board.update_throw(player_thrown, move[1], move[2])\n player_unthrown -= 1\n else:\n t = board.update_slide_swing(player_thrown, move[1], move[2])\n\n counters_t = COUNTERED[t]\n countered_t = COUNTERS[t]\n\n if move[2] in player_thrown[counters_t] or move[2] in opponent_thrown[counters_t]:\n player_thrown[t] = [p for p in player_thrown[t] if p != move[2]]\n\n player_thrown[countered_t] = [p for p in player_thrown[countered_t] if p != move[2]]\n opponent_thrown[countered_t] = [p for p in opponent_thrown[countered_t] if p != move[2]]\n\n if player == \"UPPER\":\n return Board(player_thrown, opponent_thrown, player_unthrown, opponent_unthrown, board.turn + 1, None)\n else:\n return Board(opponent_thrown, player_thrown, opponent_unthrown, player_unthrown, board.turn + 1, None)\n","sub_path":"skeleton-code-B/grandMasters/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":4198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"89021109","text":"from django.shortcuts import render\r\nfrom django.http import HttpRequest, HttpResponse\r\nfrom django.http import JsonResponse, HttpResponse\r\nfrom django.http import StreamingHttpResponse\r\nfrom django.views.decorators.csrf import csrf_exempt, csrf_protect\r\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\r\nfrom AO_Prj.common.results.ao_results import AOJsonResult\r\nimport json\r\nfrom datetime import datetime,date\r\nfrom django.core import serializers\r\nfrom AO_Prj.models import TEngineAddr\r\n\r\n# Create your views here\r\n\r\n''' 初始化所有机房地址信息 '''\r\n@csrf_exempt\r\ndef get_init_engineaddr(request):\r\n result = TEngineAddr.objects.all()\r\n return render(request, 'app/engineaddr/engineaddr.html', {'addrlist': result})\r\n\r\n''' 查询所有机房地址信息 '''\r\n@csrf_exempt\r\ndef get_engineaddr(request):\r\n if request.method == 'POST':\r\n # 获取请求参数\r\n logobj = json.loads(request.body.decode())\r\n result = TEngineAddr.objects.all()\r\n return AOJsonResult(result)\r\n\r\n''' 添加新机房地址 '''\r\n@csrf_exempt\r\ndef add_engineaddr(request):\r\n if request.method == 'POST':\r\n #获取过滤参数\r\n request_data = json.loads(request.body.decode())\r\n #addTime = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\r\n #执行新增操作\r\n try:\r\n #engineaddr = TEngineAddr(addrname=addrname, remark=remake, addtime=addTime)\r\n #engineaddr.save()\r\n TEngineAddr.objects.create(**request_data)\r\n except Exception as e:\r\n print(e)\r\n pass\r\n else:\r\n return HttpResponse(json.dumps({'data': 1}), content_type=\"application/json\")\r\n\r\n\r\n''' 删除机房地址 '''\r\n@csrf_exempt\r\ndef del_engineaddr(request):\r\n if request.method == 'POST':\r\n #获取请求参数\r\n request_data = json.loads(request.body.decode())\r\n addrid = request_data['addrid']\r\n\r\n #执行删除\r\n try:\r\n TEngineAddr.objects.filter(id=addrid).delete()\r\n except:\r\n pass\r\n else:\r\n return HttpResponse(json.dumps({'data': 1}), content_type=\"application/json\")\r\n\r\n''' 更新机房地址信息 '''\r\n@csrf_exempt\r\ndef update_engineaddr(request):\r\n if request.method == 'POST':\r\n #获取请求参数\r\n request_data = json.loads(request.body.decode())\r\n addrid = request_data.pop('addrid')\r\n #执行更新\r\n try:\r\n TEngineAddr.objects.filter(id=addrid).update(**request_data)\r\n except Exception as e:\r\n print(e)\r\n pass\r\n else:\r\n return HttpResponse(json.dumps({'data': 1}), content_type=\"application/json\")\r\n\r\n\r\n''' 获取所有的机房地址名称和对应的id '''\r\n@csrf_exempt\r\ndef get_all_sites(request):\r\n queryAll = TEngineAddr.objects.all()\r\n result = [{'id':engine.id,'addrname':'%s-%s' % (engine.addrsite,engine.addrname)} for engine in queryAll]\r\n return AOJsonResult(result)\r\n\r\n\r\n","sub_path":"apps/engineaddr/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"270905449","text":"# -*- coding: utf-8 -*-\n\n#--------------------------------------------------------------------#\n# #\n# Copyright (C) 2018 HOLOEYE Photonics AG. All rights reserved. #\n# Contact: https://holoeye.com/contact/ #\n# #\n# This file is part of HOLOEYE SLM Display SDK. #\n# #\n# You may use this file under the terms and conditions of the #\n# \"HOLOEYE SLM Display SDK Standard License v1.0\" license agreement. #\n# #\n#--------------------------------------------------------------------#\n\n\n\n\n# Import the SLM Display SDK:\nimport detect_heds_module_path\nfrom holoeye import slmdisplaysdk\n\n\n# Function to print some statistics:\ndef printStat(stat, dataHandles):\n sum = 0.0\n count = 0\n min = 10000\n max = -10000\n\n for handle in dataHandles:\n # get the stat from the handle\n v = getattr(handle, stat)\n\n # check if this action did happen at all\n if v == slmdisplaysdk.Datahandle.NotDone:\n continue\n\n # process value\n sum += float(v)\n count += 1\n\n if v < min:\n min = v\n\n if v > max:\n max = v\n\n # check if any handle did this action\n if count > 0:\n avg = sum / count\n\n print(\"{0:<16} -> min: {1:<3} - avg: {2:<3} - max: {3:<3}\".format(stat, min, avg, max))\n else:\n print(\"{0:<16} -> min: {1} - avg: {1} - max: {1}\".format(stat, \"n/a\"))","sub_path":"SLM/slideshow_preload_print_stats.py","file_name":"slideshow_preload_print_stats.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"492774337","text":"# Notes: The output should collection of a CSV file, with 2000 sentences (or less) for #each file. Output csv file must have three columns: sentenceID as the #original corpus, sentences as original corpus and NER_extracted_Bern (output #of BERN)\nimport pandas as pd\nimport requests\n\ndef bern_ent_extraction(query):\n try:\n ent_info = query_raw(query)\n extracted_ents = extract_ents(ent_info)\n return extracted_ents\n except:\n return None\n\n\ndef query_raw(text, url=\"https://bern.korea.ac.kr/plain\"):\n return requests.post(url, data={'sample_text': text}).json()\n\n\n# define functions to extract ENTs from Bern model\n\n\n# find_ent_index(ents)\ndef extract_ents(ents_info):\n index = find_ent_index(ents_info)\n extracted_ents = find_ent(index)\n return extracted_ents\n\n\ndef find_ent_index(ents_info):\n ent_index = {}\n for k, v in ents_info['logits'].items():\n if v:\n l = [v[0][0][\"start\"], v[0][0][\"end\"]]\n tup = tuple(l)\n ent_index[k] = tup\n return ent_index\n\n\ndef find_ent(ent_index):\n extracted_ent = {}\n for k, v in ent_index.items():\n extracted_ent[k] = query[v[0]:v[1]]\n return extracted_ent\n\n\nwith open (r\"E:\\Helen\\FinalProject_INFO5731\\COVID_19_relatedWorking\\All_COVID_related_body_sentSplited\\COV_RelatedBody_sentSplit_DS1.csv\", 'r', newline='', encoding='utf-8') as file:\n df = pd.read_csv(file)\n#for sent in df.sentence[:1]:'\nquery = \"Autophagy captures intracellular components and delivers them to lysosomes, where they are degraded and recycled to sustain metabolism and to enable survival during starvation1-5\"\nif __name__ == '__main__':\n query_raw(query)\n\nbern_ents = []\nsents_list=[]\nfor sent in df.sentence[-1000:]:\n sents_list.append(sent)\n query = sent\n bern_ents.append(bern_ent_extraction(query))\n\nprint(bern_ents)\nsentID = [id for id in df.sentenceID[-1000:]]\ndf_ent = pd.DataFrame(list(zip(sentID, sents_list,bern_ents)), columns=[\"sentenceID\",\"sentences\", \"BERN_entities\"])\n\nwith open (\"E:\\Helen\\FinalProject_INFO5731\\COVID_19_relatedWorking\\All_COVID_ENTS_extracted\\BERN\\DS1\\Entities_extracted_DS1_1000_-1.csv\", 'w', newline='', encoding='utf-8') as file:\n df_ent.to_csv(file)\n","sub_path":"Deep_Content_Analysis/NER_Extracting/Entities/BERN/DS1/BERN_COVID_DS1_1000.py","file_name":"BERN_COVID_DS1_1000.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"479230137","text":"import os\nimport sys\nfrom pathlib import Path\n\nfrom setuptools import find_packages, setup\n\ndef read(fname):\n with open(os.path.join(os.path.dirname(__file__), fname)) as f:\n return f.read()\n\nsetup(\n name='provider',\n version=\"0.0.2\",\n author='schwarzlicht',\n author_email='schwarzlicht@riseup.net',\n description=('Content crawler for Twitter'),\n long_description=read('readme.md'),\n license='mit',\n include_package_data=False,\n packages=find_packages(),\n entry_points={'console_scripts': [\n 'content-provider=provider.script:main',\n ]},\n install_requires=['argparse', 'tweepy', 'python-decouple', 'apscheduler',\n 'pyyml'],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"483203890","text":"import base_grid\n\n# Inherit a Grid and give it an update instruction\nclass life_grid(base_grid.grid):\n def __init__(self, block, screen, colour):\n base_grid.grid.__init__(self, block, screen, colour)\n self.mouse_clicks = []\n\n # Propogate mouse_clicks to the cells\n def add_mouse_clicks(self, mouse_click):\n x, y = mouse_click\n for coordinate in self.cells:\n xo, yo = coordinate\n if (xo < x and x <= xo + self.width) and (yo < y and y <= yo + self.height):\n self.cells[coordinate].pop()\n self.cells[coordinate].append(\"white\")\n\n # Propogate the life\n def propogate_life(self):\n if len(self.cells[self.margin, self.margin]) == 1: \n self.add_neighbors()\n return\n if len(self.cells[self.margin, self.margin]) == 9: \n for coordinate in self.cells: \n live_neighbors = 0\n old_state = self.cells[coordinate].pop(0)\n new_state = old_state\n\n while self.cells[coordinate]:\n if not self.cells[coordinate].pop() == \"black\":\n live_neighbors = live_neighbors + 1\n\n if old_state == \"black\":\n if live_neighbors == 3:\n new_state = \"white\"\n elif live_neighbors < 2 or live_neighbors > 3:\n new_state = \"black\"\n elif live_neighbors == 3:\n if old_state == \"green\":\n new_state = \"red\"\n if old_state == \"blue\":\n new_state = \"green\"\n if old_state == \"red\":\n new_state = \"blue\"\n if old_state == \"white\":\n new_state = \"blue\"\n\n self.cells[coordinate].append(new_state)\n self.add_neighbors()\n\n #Get the neighbor cell states\n def add_neighbors(self):\n if len(self.cells[self.margin, self.margin]) == 1:\n for coordinate in self.cells:\n xo, yo = coordinate\n for i in range(3):\n x = xo - (i - 1) * (self.width + self.margin)\n for j in range(3):\n y = yo - (j - 1) * (self.height + self.margin)\n if not (x == xo and y == yo): \n if x < self.margin:\n x = self.xmax\n if x > self.screen_width - self.width:\n x = self.margin\n if y < self.margin: \n y = self.ymax\n if y > self.screen_width - self.height:\n y = self.margin\n self.cells[(xo, yo)].append(self.cells[(x, y)][0])\n","sub_path":"GameOfLife/grid/life_grid.py","file_name":"life_grid.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"513710583","text":"from django import forms\nfrom users.models import Profile\n\n\nclass UserCreationProfileForm(forms.ModelForm):\n class Meta:\n model = Profile\n fields = ['country', 'phone']\n widgets = {'phone': forms.NumberInput(attrs={'aria-label': 'Sizing example input', 'aria-describedby': 'nputGroup-sizing-default',\n 'class': 'form-control'})}\n","sub_path":"main/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"387303894","text":"estatura = float(input('Digite su estatura\\n'))\nacumuladorestatura = 0\ncontadorpersonas = 0\npromedio = 0\n\nwhile estatura >= 1.0 and estatura <= 3.0:\n\n\testatura = float(input('Digite su estatura\\n'))\n\n\tcontadorpersonas = contadorpersonas + 1\n\tacumuladorestatura = acumuladorestatura + estatura\n\tpromedio = acumuladorestatura / contadorpersonas\n\nelse:\n\tprint('El promedio de las estaturas es:', promedio)","sub_path":"EjerciciosPython2/estatura_promedio3.py","file_name":"estatura_promedio3.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"587996027","text":"s = int(input())\n\nans = [s]\nidx = 1\ntmp = s\nwhile True:\n if tmp % 2 == 0:\n tmp = tmp//2\n else:\n tmp = (3*tmp) + 1\n idx +=1\n\n if tmp in ans:\n print(idx)\n exit()\n else:\n ans.append(tmp)","sub_path":"Python_codes/p03146/s629482993.py","file_name":"s629482993.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"334472017","text":"#\n# Python: 3.9.5\n#\n# Chris Windsor\n#\n# Purpose: The Tech Academy Python Course\n# Demonstrating how to pass variables from function to \n# while producing a functional game. \n#\n\n\n\n\ndef start():\n f_name = \"Sarah\"\n l_name = \"Connor\"\n age = 28\n gender = \"Female\"\n get_info(f_name,l_name,age,gender)\n\n\n\ndef get_info(f_name,l_name,age,gender):\n print(\"My name is {} {}. I am a {} year old {}.\".format(f_name,l_name,age,gender))\n\n\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n start()\n","sub_path":"python_tutorial.py","file_name":"python_tutorial.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"410751218","text":"def computador_escolhe_jogada(n, m):\n resto = n % (m + 1)\n if n == m:\n return n\n elif resto == 0 or resto > m:\n return m\n else:\n return resto\n\ndef usuario_escolhe_jogada(n, m):\n pecas = 0\n while pecas < 1 or pecas > m or pecas > n:\n pecas = int(input(\"Digite o número de peças a serem retirados: \"))\n if pecas < 1 or pecas > m:\n print(\"Jogada invalida, tente novamente.\")\n return pecas\n\n\ndef menu ():\n user = 0\n comp = 0\n print(\"Bem-vindo ao jogo do NIM! Escolha:\\n\")\n print(\"1 - para jogar uma partida isolada\\n2 - para jogar um campeonato\")\n escolha = int(input(\"\"))\n if escolha == 1:\n print(\"Você escolheu uma partida isolada\")\n print(\"-=\" * 40)\n print(\"\\t\\t\\t\\t JOGO NIM\")\n print(\"-=\" *40)\n partida()\n elif escolha == 2:\n print(\"Voce escolheu um campeonato!\\n\")\n print(\"\\n**********Rodada 1************\")\n result = partida()\n if result == \"user\":\n user += 1\n elif result == \"comp\":\n comp += 1\n print(\"Placar: Você {} X {} Computador\".format(user, comp))\n print(\"\\n**********Rodada 2************\")\n result = partida()\n if result == \"user\":\n user += 1\n elif result == \"comp\":\n comp += 1\n print(\"Placar: Você {} X {} Computador\".format(user, comp))\n print(\"\\n**********Rodada 3************\")\n result = partida()\n if result == \"user\":\n user += 1\n elif result == \"comp\":\n comp += 1\n print(\"Placar: Você {} X {} Computador\".format(user, comp))\n\ndef partida():\n n = int(input(\"Digite o número de peças do jogo: \"))\n m = int(input(\"Digite o número de peças máximos que é possível retirar em uma rodada: \"))\n nInicial = n\n pecas = 0\n print(\"\\n\")\n if nInicial % (m + 1) == 0:\n print(\"Você começa!\\n\")\n while n > 0:\n pecas1 = usuario_escolhe_jogada(n, m)\n print(\"\\nVocê tirou {} peças\".format(pecas1))\n n -= pecas1\n print(\"Restam {} peças\\n\".format(n))\n if n <= 0:\n print(\"Você ganhou!\")\n return \"user\"\n pecas2 = computador_escolhe_jogada(n, m)\n print(\"\\nO computador tirou tirou {} peças\".format(pecas2))\n n -= pecas2\n print(\"Restam {} peças\".format(n))\n if n <= 0:\n print(\"O computador ganhou!\")\n return \"comp\"\n pecas = pecas1 + pecas2\n else:\n print(\"Computador começa!\\n\")\n while n > 0:\n pecas1 = computador_escolhe_jogada(n, m)\n print(\"\\nO computador tirou {} peças\".format(pecas1))\n n -= pecas1\n print(\"Restam {} peças\".format(n))\n if n <= 0:\n print(\"O computador ganhou!\")\n return \"comp\"\n pecas2 = usuario_escolhe_jogada(n, m)\n print(\"\\nVocê tirou {} peças\".format(pecas2))\n n -= pecas2\n print(\"Restam {} peças\\n\".format(n))\n if n <= 0:\n print(\"Você ganhou!\")\n return \"user\"\n pecas = pecas1 + pecas2\n\nmenu()\n","sub_path":"Curso Coursera - python/jogo_nim.py","file_name":"jogo_nim.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"144716061","text":"import math\n\n\ndef lse():\n \"\"\"\n https://www.cnblogs.com/BlogOfMr-Leo/p/8627311.html\n :return:\n \"\"\"\n import numpy as np\n from scipy import optimize\n\n x = np.array([0.9, 2.5, 3.3, 4.5, 5.7, 6.9])\n y = np.array([1.1, 1.6, 2.6, 3.2, 4.0, 6.0])\n\n def reds(p):\n # 计算以p为参数的直线和数据之间的误差\n k, b = p\n return y - (k * x + b)\n # return math.pow((y - (k * x + b)),2)\n\n # leastsq 使得reds()输出最小,参数的初始值是【1,0】\n r = optimize.leastsq(reds, [1, 0])\n k, b = r[0]\n print(\"k=\", k, \"\\n b=\", b)\n y1 = x * k + b\n a = np.array([y1[0] - y[0], y1[1] - y[1], y1[2] - y[2], y1[3] - y[3], y1[4] - y[4], y1[5] - y[5]])\n print(\"\\n\", y, \"\\n\", y1, a)\n print(\"灵敏度计算\", k)\n\nif __name__ == '__main__':\n print(2^2)\n print(math.pow(2,3))\n lse()","sub_path":"math_basic/optimize_method.py","file_name":"optimize_method.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"73762828","text":"from django.conf.urls import url\n\nfrom . import views\n\napp_name = 'posts'\n\nurlpatterns = [\n url(r'^(?P\\d+)/comment/$', views.comment_add, name='comment_add'),\n url(r'^(?P\\d+)/comment/(?P\\d+)/approve/$', views.comment_approve, name='comment_approve'),\n url(r'^(?P\\d+)/comment/(?P\\d+)/edit/$', views.comment_edit, name='comment_edit'),\n url(r'^(?P\\d+)/comment/(?P\\d+)/remove/$', views.comment_remove, name='comment_remove'),\n]\n","sub_path":"posts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"268735704","text":"import argparse\nfrom pprint import pprint\n\nfrom gevent.pywsgi import WSGIServer\nfrom lib.applib_updater import update\n\nclass Application(object):\n\n def __init__(self, applib_repo_dir, website_repo_dir):\n self.__applib_repo_dir = applib_repo_dir\n self.__website_repo_dir = website_repo_dir\n\n def __call__(self, env, start_response):\n if env['REQUEST_METHOD'] == 'POST':\n if env['PATH_INFO'] == '/' and update(self.__applib_repo_dir, self.__website_repo_dir):\n start_response('200 OK', [('Content-Type', 'text/html')])\n return [\"Updated \"]\n else:\n start_response('404 Not Found', [('Content-Type', 'text/html')])\n return ['Not Found ']\n \n start_response('405 Method Not Allowed', [('Content-Type', 'text/html')])\n return ['Method Not Allowed Try POST ']\n\nif __name__ == '__main__':\n \n parser = argparse.ArgumentParser(description='Webapp for testing the applib_updater.update function')\n parser.add_argument('-a', '--applib-repo-dir', required=True)\n parser.add_argument('-w', '--website-repo-dir', required=True)\n parser.add_argument('-p', '--port', type=int, required=True)\n args = parser.parse_args()\n #pprint(args)\n print('Serving on {0}...'.format(args.port))\n \n WSGIServer(\n ('', args.port),\n Application(\n args.applib_repo_dir,\n args.website_repo_dir\n )\n ).serve_forever()\n \n","sub_path":"src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"402654051","text":"#Тестовая программа по греческой статье\r\nimport math\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n#Исходные данные\r\n#Модуль сдвига\r\nG = 2039000/(2*(1 + 0.25))\r\n#Коэффициент Пуассона\r\nnu = 0.3\r\n#Константы\r\nl = 0.0\r\na = 5.0\r\n#Массив координат точек пластины\r\npc = np.array([[0, 0],\r\n [1, 0],\r\n [0, 1]])\r\npc2 = np.array([[0, 0],\r\n [0.5, 0],\r\n [1, 0],\r\n [0.5, 0.5],\r\n [0, 1],\r\n [0, 0.5]])\r\n#Массив элементов - в нём номера точек, на которых он строится\r\nelm1 = np.array([0, 1, 2])\r\n#Для обычных пластин\r\n#Нижняя левая, нижняя правая, верхняя, центральная\r\nelm2 = np.array([[0, 1, 5],\r\n [1, 2, 3],\r\n [3, 4, 5],\r\n [1, 3, 5]])\r\n#Вектор узловых нагрузок и узловых моментов\r\n\"\"\"fu = np.array([0, 0, 0, 0, 0, 0])\r\nffi = np.array([1e-12, 0, 0])\"\"\"\r\nP1 = np.array([0, 0, 0, 0, 1000, 0])\r\nP2 = np.array([0, 0, 0, 0, 0, 0, 0, 0, 1000, 0, 0, 0])\r\n\r\n#Функция получения матрицы жёсткости элемента\r\n#pn - кортеж с номерами точек ��ластины\r\n#te - тип матрицы:\r\n#1 - 6x6\r\n#2 - 12x12\r\ndef matrix_K(pc, pn, te):\r\n #1. Определение длин сторон пластины\r\n #Номер стороны = номер противоположного узла пластины\r\n l1 = math.sqrt((pc[pn[1], 0] - pc[pn[2], 0])**2 + (pc[pn[1], 1] - pc[pn[2], 1])**2)\r\n l2 = math.sqrt((pc[pn[0], 0] - pc[pn[2], 0])**2 + (pc[pn[0], 1] - pc[pn[2], 1])**2)\r\n l3 = math.sqrt((pc[pn[1], 0] - pc[pn[0], 0])**2 + (pc[pn[1], 1] - pc[pn[0], 1])**2)\r\n #2. Определение дополнительных параметров, связанных с размером пластины\r\n l12 = l1**2 + l2**2 - l3**2\r\n l23 = l2**2 + l3**2 - l1**2\r\n l31 = l3**2 + l1**2 - l2**2\r\n #3. Определение метрического тензора (что это?)\r\n gab = np.array([[l2**2, l12/2],\r\n [l12/2, l1**2]])\r\n #4. Вычисление определителя метрического тензора\r\n g = np.linalg.det(gab)\r\n #5. Вычисление площади пластины через определитель метрического тензора\r\n A = math.sqrt(g/4)\r\n #6. Задаём известные матрицы\r\n B = np.array([[1, 0, 0, 0, -1, 0],\r\n [0, 0, 0, 1, 0, -1],\r\n [0, 1, 0, 0, 0, -1],\r\n [0, 0, 1, 0, -1, 0]])\r\n D = G*np.array([[(2*(1 - nu))/(1 - 2*nu), (2*nu)/(1 - 2*nu), 0, 0],\r\n [(2*nu)/(1 - 2*nu), (2*(1 - nu))/(1 - 2*nu), 0, 0],\r\n [0, 0, 1 + a, 1 - a],\r\n [0, 0, 1 - a, 1 + a]])\r\n #7. Задаём константу\r\n C = 2*a\r\n #8. Вычисляем подматрицы\r\n Kuu = A*(B.transpose() @ D @ B)\r\n #9. Составляем матрицу жесткости элемента\r\n Ke = Kuu\r\n if te == 2:\r\n E1 = D[0][0] + D[0][3]\r\n E2 = D[0][1] + D[1][3]\r\n E3 = D[0][2] + D[2][3]\r\n E4 = D[0][3] + D[3][3]\r\n \r\n M1 = D[0][1] + D[0][2]\r\n M2 = D[1][1] + D[1][2]\r\n M3 = D[1][2] + D[2][2]\r\n M4 = D[1][3] + D[2][3]\r\n \r\n R1 = D[0][1] + D[2][3]\r\n R2 = D[0][2] + D[1][3]\r\n \r\n T1 = 8*(D[0][0] + D[0][3] + D[3][3])\r\n T2 = 8*(D[1][1] + D[1][2] + D[2][2])\r\n \r\n #Заполняем одну половину, другую отзеркалим\r\n Ke = np.array([[3*D[0][0], 3*D[0][2], -4*D[0][3], -D[0][1], E1, M1, 0, 0, -4*E1, -4*M1, 4*D[0][3], 4*D[0][1]],\r\n [0, 3*D[2][2], -D[2][3], -D[1][2], E3, M3, 0, 0, -4*E3, -4*M3, 4*D[2][3], 4*D[1][2]],\r\n [0, 0, 3*D[3][3], 3*D[1][3], E4, M4, -4*E4, -4*M4, 0, 0, 4*D[0][3], 4*D[2][3]],\r\n [0, 0, 0, 3*D[1][1], E2, M2, -4*E2, -4*M2, 0, 0, 4*D[0][1], 4*D[1][2]],\r\n [0, 0, 0, 0, 3*(E1 + E4), 3*(M1 + M4), -4*E4, -4*E2, -4*E1, -4*E3, 0, 0],\r\n [0, 0, 0, 0, 0, 3*(M2 + M3), -4*M4, -4*M2, -4*M1, -4*M3, 0, 0],\r\n [0, 0, 0, 0, 0, 0, T1, 4*R1 + 8*R2, 8*D[0][3], 4*R1, -8*E1, -4*(E3 + M1)],\r\n [0, 0, 0, 0, 0, 0, 0, T2, 4*R1, 8*D[1][2], -4*(E3 + M1), -8*M3],\r\n [0, 0, 0, 0, 0, 0, 0, 0, T1, 4*R1 + 8*R2, -8*E4, -4*(E2 + M4)],\r\n [0, 0, 0, 0, 0, 0, 0, 0, 0, T2, -4*(E2 + M4), -8*M2],\r\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, T1, 4*R1 + 8*R2],\r\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, T2]])\r\n \r\n #Зеркалим незаполненную половину матрицы\r\n for i in range(12):\r\n for j in range(i):\r\n Ke[i][j] = Ke[j][i]\r\n #Секция вывода данных\r\n \"\"\"print('1:\\nl1 =', l1, '\\nl2 =', l2, '\\nl3 =', l3)\r\n print('2:\\nl12 =', l12, '\\nl23 =', l23, '\\nl31 =', l31)\r\n print('3:\\ngab =\\n', gab)\r\n print('4:\\ng =', g)\r\n print('5:\\nA =', A)\r\n print('6:\\nB =\\n', B, '\\nD =\\n', D, '\\nBcap =\\n', Bcap, '\\nDcap =\\n', Dcap)\r\n print('7:\\nC =', C)\r\n print('8:\\nKuu =\\n', Kuu, '\\nKufi =\\n', Kufi, '\\nKfifi =\\n', Kfifi)\r\n print('9:\\nKe =\\n', Ke.astype(int))\"\"\"\r\n #И возвращаем Ke\r\n return Ke\r\n\r\n#Функция установки граничного условия\r\n#KG - глобальная матрица жёсткости\r\n#n - номер столбца и строки, который мы закрепляем (начинается с 0)\r\ndef set_BC(KG, n):\r\n #Размер матрицы\r\n size = KG.shape[0]\r\n KG[0:size, n:n + 1] = np.zeros((size, 1))\r\n KG[n:n + 1, 0:size] = np.zeros(size).transpose()\r\n KG[n, n] = 1\r\n \r\n return KG\r\n\r\n#Функция добавления матрицы к глобальной матрице жёсткости\r\n#KG - глобальная матрица жёсткости\r\n#Kelm - матрица жёсткости элемента\r\n#n - номер элемента\r\ndef matrix_Add(KG, Kelm, elm, n):\r\n for iy in range(3):\r\n for ix in range(3):\r\n KG[2*elm[n, iy]:2*elm[n, iy] + 2, 2*elm[n, ix]:2*elm[n, ix] + 2] += Kelm[iy*2:iy*2 + 2, ix*2:ix*2 + 2]\r\n \r\n return KG\r\n\r\n#Количество точек\r\npn1 = pc2.shape[0]\r\n#И количество элементов\r\nen1 = elm2.shape[0]\r\n#Получаем глобальную матрицу жёсткости\r\nK1 = np.zeros((pn1*2, pn1*2))\r\n#Заносим в матрицу жёсткости элементы\r\nfor i in range(en1):\r\n K1 = matrix_Add(K1, matrix_K(pc2, elm2[i], 1), elm2, i)\r\n print('KElm of elm #', i, ':', matrix_K(pc2, elm2[i], 1), '\\nK1 step', i, ':\\n', K1)\r\n\r\n#Получаем матрицы жёсткости элементов\r\n#K1 = matrix_K(elm, 1)\r\nK2 = matrix_K(pc, elm1, 2)\r\n#Заносим в матрицу жёсткости элементы\r\n#И присваиваем граничное условие\r\nK1 = set_BC(K1, 0)\r\nK1 = set_BC(K1, 1)\r\n#K1 = set_BC(K1, 2)\r\nK1 = set_BC(K1, 3)\r\n#K1 = set_BC(K1, 4)\r\nK1 = set_BC(K1, 5)\r\n\r\nK2 = set_BC(K2, 0)\r\nK2 = set_BC(K2, 1)\r\n#K2 = set_BC(K2, 2)\r\nK2 = set_BC(K2, 3)\r\n#K2 = set_BC(K2, 4)\r\nK2 = set_BC(K2, 5)\r\n#K2 = set_BC(K2, 6)\r\n#print('10:\\nBC =\\n', K.astype(int))\r\n#10. Составляем вектор нагрузок\r\n\"\"\"P = np.zeros(9)\r\nP[0:6] = fu\r\nP[6:9] = ffi\"\"\"\r\n#print('10:\\nP =\\n', P)\r\n#11. Получаем вектор узловых перемещений U, решив систему линейных уравнений\r\nU1 = np.linalg.solve(K1, P2.transpose())\r\nU2 = np.linalg.solve(K2, P2.transpose())\r\nU1 = U1.reshape((int(U1.shape[0]/2), 2)).transpose()\r\nU2 = U2.reshape((int(U2.shape[0]/2), 2)).transpose()\r\n#print('11:\\nU =\\n', U1)\r\nprint('U1:\\n', U1, '\\n\\nU2:\\n', U2)\r\n\r\n#Графический вывод\r\nk = 100\r\npcd = pc.transpose()\r\npc2d = pc2.transpose()\r\nprint('pcd:\\n', pcd, '\\n\\npc2d:\\n', pc2d)\r\n\r\nplt.figure(figsize = (16, 9))\r\n\r\nplt.plot(pc2d[0], pc2d[1], 'bo-')\r\nplt.plot(pc2d[0] + U1[0]*k, pc2d[1] + U1[1]*k, 'ro-')\r\nplt.plot(pc2d[0] + U2[0]*k, pc2d[1] + U2[1]*k, 'go-')\r\n#Вывод исходной схемы\r\n#Проходимся по элементам\r\n\"\"\"for i in range(elm.shape[0]):\r\n #Массив точек треугольника\r\n #Четвёртая точка - чтобы замкнуть\r\n #Сразу транспонирован\r\n tp = np.zeros((2, 4))\r\n #Заполняем по точкам\r\n for j in range(3):\r\n for ii in range(2):\r\n tp[ii, j] = pc[elm[i][j]][ii]\r\n #Дублируем последнюю точку\r\n tp[0:2, 3:4] = tp[0:2, 0:1]\r\n #И рисуем\r\n plt.plot(tp[0], tp[1], 'bo-')\r\n#Вывод схемы с перемещениями\r\n#Проходимся по элементам\r\nfor i in range(elm.shape[0]):\r\n #Массив точек треугольника\r\n #Четвёртая точка - чтобы замкнуть\r\n #Сразу транспонирован\r\n tp = np.zeros((2, 4))\r\n #Заполняем по точкам\r\n for j in range(3):\r\n for ii in range(2):\r\n tp[ii, j] = pc[elm[i][j]][ii] + U[elm[i][j]*3 + ii]\r\n #Дублируем последнюю точку\r\n tp[0:2, 3:4] = tp[0:2, 0:1]\r\n #И рисуем\r\n plt.plot(tp[0], tp[1], 'ro-')\"\"\"\r\nplt.axis('equal')\r\nplt.title('Общий вид системы')\r\nplt.show()","sub_path":"test_n2.py","file_name":"test_n2.py","file_ext":"py","file_size_in_byte":9665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"480000547","text":"\"\"\"\n9. Среди натуральных чисел, которые были введены, найти\nнаибольшее по сумме цифр. Вывести на экран это число и сумму его цифр.\n\"\"\"\n\nmax_n = 0\nfor i in range(10):\n num = input(\"Введите натуральное число: \")\n a = 0\n for j in num:\n a += int(j)\n if a > max_n:\n max_n = a\n max_num = num\nprint(f\"Наибольшая сумма чисел у числа {max_num}\")","sub_path":"Lesson_2/9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"644039799","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom dbase_create import Prova, Questao, Assertiva, Oj, Sumula, Enunciado, Lei, LeiAlt\n\nBase = declarative_base()\n \nengine = create_engine('sqlite:///smartlegis.db')\n# Bind the engine to the metadata of the Base class so that the\n# declaratives can be accessed through a DBSession instance\nBase.metadata.bind = engine\n \nDBSession = sessionmaker(bind=engine)\n# A DBSession() instance establishes all conversations with the database\n# and represents a \"staging zone\" for all the objects loaded into the\n# database session object. Any change made against the objects in the\n# session won't be persisted into the database until you call\n# session.commit(). If you're not happy about the changes, you can\n# revert all of them back to the last commit by calling\n# session.rollback()\nsession = DBSession()\n \n# begin inserts\np = Prova(\n ano=2019,\n esfera='Estadual',\n banca='CESPE',\n tipo='CE',\n escolaridade='superior',\n area='direito',\n instituto='PGE',\n instituto_uf='AP',\n instituto_municipio='macapá',\n supercargo='Procurador',\n cargo='Procurador do Estado',\n ninscritos=3000,\n nota_max=70.00,\n corte = 65.00\n )\n\nsession.add(p)\nsession.commit()","sub_path":"dbase_inserts.py","file_name":"dbase_inserts.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"114335222","text":"# COMPONENT CREATOR\nfrom CMGTools.RootTools.samples.ComponentCreator import ComponentCreator\nkreator = ComponentCreator()\n\n# ----------------------------- 2017 pp run ----------------------------------------\n\njson = '/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions17/5TeV/ReReco/Cert_306546-306826_5TeV_EOY2017ReReco_Collisions17_JSON.txt'\n\n# ----------------------------- Run2017F 14Dec2018 ----------------------------------------\nSingleMuon_Run2017G_17Nov2017 = kreator.makeMyPrivateDataComponent(\"SingleMuon_Run2017G\",\"/SingleMuon/jrgonzal-data5TeV_22nov2019_SingleMuon_Run2017G-17Nov2017-v1-e6071589c1d4feaedf45b2e5392eb06a/USER\", \"PRIVATE\", \".*root\", \"phys03\", json, None, [], [],True)\nDoubleMuon_Run2017G_17Nov2017 = kreator.makeMyPrivateDataComponent(\"DoubleMuon_Run2017G\",\"/DoubleMuon/jrgonzal-data5TeV_22nov2019_DoubleMuon_Run2017G-17Nov2017-v1-e6071589c1d4feaedf45b2e5392eb06a/USER\", \"PRIVATE\", \".*root\", \"phys03\", json, None, [], [],True)\nHighEGJet_Run2017G_17Nov2017 = kreator.makeMyPrivateDataComponent(\"HighEGJet_Run2017G\",\"/HighEGJet/jrgonzal-data5TeV_22nov2019_HighEGJet_Run2017G-17Nov2017-v2-e6071589c1d4feaedf45b2e5392eb06a/USER\", \"PRIVATE\", \".*root\", \"phys03\", json, None, [], [],True)\n\ndataSamples_Run2017G = [SingleMuon_Run2017G_17Nov2017, DoubleMuon_Run2017G_17Nov2017, HighEGJet_Run2017G_17Nov2017]\n\n\n# ---------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n from CMGTools.RootTools.samples.tools import runMain\n runMain(samples, localobjs=locals())\n","sub_path":"RootTools/python/samples/samples_5TeV_DATA2017_NanoAOD.py","file_name":"samples_5TeV_DATA2017_NanoAOD.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"279224837","text":"import os.path\n\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.options\nimport tornado.web\n\nimport db_api\nimport image_api\n\nfrom tornado.options import define, options\ndefine('port', default=80, help='tripod report server runs on the given port', type=int)\n\nclass IndexHandler(tornado.web.RequestHandler):\n def get(self):\n bizes = db_api.conndb.GetBizes()\n caches = db_api.conndb.GetCaches()\n self.render('index.tpl',biz_items = bizes, machine_items= caches)\n \nclass BizReportHandler(tornado.web.RequestHandler):\n def post(self):\n# try:\n begin = self.get_argument('begin_time_biz')\n end = self.get_argument('end_time_biz') \n biz = self.get_argument('biz_id')\n data = db_api.conndb.ReadData(biz,['get','set','load','remove'], begin ,end)\n image_src = image_api.PlotImage(biz,['get','set','load','remove'], begin,end,data)\n bizes = db_api.conndb.GetBizes()\n self.render('biz_query.tpl', selected_biz_id =biz, biz_items = bizes, begin_time = begin, end_time= end, items = data,pic_path= image_src)\n# except Exception:\n# self.render('none.tpl')\n\nclass CacheReportHandler(tornado.web.RequestHandler):\n def post(self):\n machine = self.get_argument('machine')\n begin = self.get_argument('begin_time_cache')\n end = self.get_argument('end_time_cache') \n data = db_api.conndb.ReadData(machine,['used_memory_rss','total','hit','miss'], begin ,end)\n image_src_memory = image_api.PlotImage(machine ,['used_memory_rss'], begin,end,data)\n image_src_command = image_api.PlotImage(machine ,['total','hit','miss'], begin,end,data)\n caches = db_api.conndb.GetCaches()\n self.render('cache_query.tpl', selected_cc =machine, machine_items= caches, begin_time = begin, end_time= end, items = data, pic_path_memory= image_src_memory, pic_path_command= image_src_command)\n \nif __name__ == '__main__':\n tornado.options.parse_command_line()\n app = tornado.web.Application(\n handlers=[(r'/', IndexHandler), \n (r'/cache_query', CacheReportHandler),\n (r'/biz_query', BizReportHandler)],\n template_path=os.path.join(os.path.dirname(__file__), './templates'),\n static_path=os.path.join(os.path.dirname(__file__), './static'),\n )\n http_server = tornado.httpserver.HTTPServer(app)\n http_server.listen(options.port)\n tornado.ioloop.IOLoop.instance().start()\n\n","sub_path":"main_project/tripod2/py/report/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"437480318","text":"from functools import wraps\n\nfrom flask import Flask, jsonify, make_response, redirect, request\nfrom flask_restful import Api, reqparse, Resource\nfrom pony import orm\nfrom werkzeug.exceptions import BadRequest\n\n# Configuration\nPROVIDER = 'sqlite'\nCREATE_DB = True\nDATABASE = 'database.db'\nDEBUG = True\nSECRET_KEY = 'development key'\nUSERNAME = 'admin'\nPASSWORD = 'default'\n\n\napp = Flask(__name__)\napp.config.from_object(__name__)\n\n# database and models\ndb = orm.Database()\n\n\nclass ServiceRegistry(db.Entity):\n _table_ = 'services'\n\n id = orm.PrimaryKey(int, auto=True)\n service = orm.Required(str)\n version = orm.Optional(str)\n change = orm.Required(str)\n\n\n# helper methods\ndef sortkeypicker(keynames):\n # https://stackoverflow.com/a/1143719/311829\n negate = set()\n for i, k in enumerate(keynames):\n if k[:1] == '-':\n keynames[i] = k[1:]\n negate.add(k[1:])\n\n def getit(adict):\n composite = [adict[k] for k in keynames]\n for i, (k, v) in enumerate(zip(keynames, composite)):\n if k in negate:\n composite[i] = -v\n return composite\n return getit\n\n\ndef errors_handler(view):\n @wraps(view)\n def wrapped(self, *f_args, **f_kwargs):\n try:\n return view(self, *f_args, **f_kwargs)\n except orm.ObjectNotFound as e:\n return make_response(jsonify({\n 'status': False,\n 'message': 'Resource Not Found'\n }), 404)\n except Exception as e:\n return make_response(jsonify({\n 'status': False,\n 'message': 'Internal Server Error: ' + str(e)\n }), 505)\n return wrapped\n\n\n# requires both service and version params\nservice_parser = reqparse.RequestParser(bundle_errors=True)\nservice_parser.add_argument(\n 'service', type=str,\n help='Service name has to be provided',\n required=True\n)\nservice_parser.add_argument(\n 'version', type=str,\n help='Version value has to be provided',\n required=True\n)\n\n\n# rest resources\nclass ServiceResourceList(Resource):\n\n # @errors_handler\n @orm.db_session\n def get(self):\n data = request.values\n if len(list(data.keys())) == 0:\n # no params to query\n query = ServiceRegistry.select()\n elif ('service' in data and data['service']) and ('version' in data and data['version']):\n # filtering by service and version\n query = orm.select(\n c for c in ServiceRegistry\n if c.service == data['service'] and c.version == data['version'])\n elif ('service' in data and data['service']):\n # filtering by only service\n query = orm.select(\n c for c in ServiceRegistry\n if c.service == data['service'])\n return {\n 'status': True,\n 'items': sorted([\n item.to_dict()\n for item in query],\n key=sortkeypicker(('service', 'version', 'change'))\n )}\n\n @errors_handler\n @orm.db_session\n def put(self):\n try:\n values = service_parser.parse_args()\n except BadRequest as e:\n err_response = {'status': False}\n err_response.update(e.data)\n return make_response(jsonify(err_response), 400)\n\n service = ServiceRegistry(\n service=values['service'],\n version=values['version'],\n change='created'\n )\n db.commit()\n return redirect('/services/' + str(service.id))\n\n\nclass ServiceResource(Resource):\n @errors_handler\n @orm.db_session\n def get(self, service_id):\n service = ServiceRegistry[service_id]\n\n if service:\n return make_response(jsonify({\n 'status': True,\n 'message': 'Resource Found',\n 'item': service.to_dict()\n }), 200)\n\n @errors_handler\n @orm.db_session\n def delete(self, service_id):\n service = ServiceRegistry[service_id]\n\n if service:\n service.change = 'removed'\n db.commit()\n return make_response(jsonify({\n 'status': True,\n 'message': 'Successfully deleted. ID [{}]'.format(service_id),\n 'item': service.to_dict()\n }), 200)\n\n @errors_handler\n @orm.db_session\n def put(self, service_id):\n if not service_id: # insert\n try:\n values = service_parser.parse_args()\n except BadRequest as e:\n err_response = {'status': False}\n err_response.update(e.data)\n return make_response(jsonify(err_response), 400)\n\n service = ServiceRegistry(\n service=values['service'],\n version=values['version'],\n change='created'\n )\n db.commit()\n return redirect('/services/' + str(service.id))\n else: # update\n service = ServiceRegistry[service_id]\n\n if service:\n if request.form.get('service') is not None:\n service.service = request.form['service']\n if request.form.get('version') is not None:\n service.version = request.form['version']\n service.change = 'changed'\n db.commit()\n return make_response(jsonify({\n 'status': True,\n 'message': 'Resource updated',\n 'item': service.to_dict()\n }), 200)\n\n\napi = Api(app)\napi.add_resource(ServiceResourceList, '/services', strict_slashes=False)\napi.add_resource(ServiceResource, '/services/', strict_slashes=False)\n\n\ndef init_db(app):\n db.bind(\n provider=app.config['PROVIDER'],\n filename=app.config['DATABASE'],\n create_db=app.config['CREATE_DB']\n )\n db.generate_mapping(create_tables=True)\n return db\n\n\nif __name__ == '__main__':\n db = init_db(app)\n app.run(debug=app.config['DEBUG'])\n","sub_path":"service_server.py","file_name":"service_server.py","file_ext":"py","file_size_in_byte":6094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"514552677","text":"import math\nn, k = map(int, input().split())\na = list(map(int, input().split()))\n\nlimit = math.log(n)\n\nif k >= n:\n for i in range(n-1):\n print(n, end=\" \")\n print(n)\n exit()\n\nfor _ in range(k):\n list = [1]*n\n for i, d in enumerate(a):\n if d == 0:\n continue\n for j in range(-d, d+1):\n if j == 0:\n continue\n if i+j >= 0:\n try:\n list[i+j] += 1\n # print(i, j)\n except:\n pass\n if _ > limit:\n break\n a = list.copy()\nfor i in range(n-1):\n print(a[i], end=\" \")\nprint(a[-1])\n","sub_path":"ABC/tokyokaijo2020/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"137525352","text":"### makeTrainTest.py ###\n# \n#\n#\n#\n\nimport sys\n\n\nfirstLine = True\nfor line in sys.stdin:\n if firstLine:\n firstLine = False\n continue\n data = line.split(\",\")\n\n output = \"\"\n for i in range(5, len(data)):\n x = data[i].strip()\n if i == 13:\n x = x.strip(\"\\\"\")\n output += x\n \n if i < len(data) - 1:\n output += \"\\t\"\n\n print(output)\n\n \n","sub_path":"MULTINOM_PREDICT/makeTrainTest.py","file_name":"makeTrainTest.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"42765493","text":"from Node import Node\n\nclass CircularLinkedList():\n\n def __init__(self):\n self.head = None\n self.tail = None\n\n\n def push(self, val):\n if self.head == None:\n \n self.head = Node(val)\n self.tail = self.head\n self.head.next = self.tail\n self.tail.next = self.head\n\n else:\n new_node = Node(val)\n self.tail.next = new_node\n self.tail = new_node\n self.tail.next = self.head\n\n def print(self):\n curr = self.head\n print(curr.val)\n\n while curr.next != self.head:\n curr = curr.next\n print(curr.val)\n\n def get_size(self):\n curr = self.head\n count = 1\n\n while curr.next != self.head:\n curr = curr.next\n count += 1\n \n return count\n\n def even_odd(self):\n size = self.get_size()\n\n if size % 2 == 0:\n return [size // 2, size // 2]\n else:\n return [(size // 2) + 1, size - (size // 2 + 1)] \n\n def split(self): \n kind = self.even_odd()\n first = CircularLinkedList()\n second = CircularLinkedList()\n curr = self.head\n\n for i in range(kind[0]):\n first.push(curr.val)\n curr = curr.next\n \n for i in range(kind[1]):\n second.push(curr.val)\n curr = curr.next\n \n return [first, second]\n \n\n\n ","sub_path":"CircularLinkedList.py","file_name":"CircularLinkedList.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"301660595","text":"#!/usr/bin/env python3\n\n# This file is Copyright (c) 2019 Antti Lukats \n# This file is Copyright (c) 2015-2019 Florent Kermarrec \n# License: BSD\n\nimport argparse\n\nfrom migen import *\n\nfrom litex_boards.partner.platforms import mega65\n\nfrom litex.soc.interconnect import wishbone\n\nfrom litex.soc.cores.clock import *\nfrom litex.soc.integration.soc_core import *\nfrom litex.soc.integration.builder import *\n\nfrom litex.soc.cores.hyperbus import HyperRAM\n\nfrom liteeth.phy.rmii import LiteEthPHYRMII\nfrom liteeth.mac import LiteEthMAC\n\n\n#from hyper_memory import *\n#self.submodules.hyperram = HyperRAM(platform.request(\"hyperram\"))\n#self.add_wb_slave(mem_decoder(self.mem_map[\"hyperram\"]), self.hyperram.bus)\n#self.add_memory_region(\"hyperram\", self.mem_map[\"hyperram\"], 8*1024*1024)\n\n\n# CRG ----------------------------------------------------------------------------------------------\n\nclass _CRG(Module):\n def __init__(self, platform, sys_clk_freq):\n self.clock_domains.cd_sys = ClockDomain()\n self.clock_domains.cd_eth = ClockDomain()\n\n # # #\n self.cd_sys.clk.attr.add(\"keep\")\n self.submodules.pll = pll = S7PLL(speedgrade=-1)\n self.comb += pll.reset.eq(platform.request(\"cpu_reset\"))\n\n pll.register_clkin(platform.request(\"clk100\"), 100e6)\n pll.create_clkout(self.cd_sys, sys_clk_freq)\n pll.create_clkout(self.cd_eth, 50e6)\n\n\n# BaseSoC ------------------------------------------------------------------------------------------\n\nclass BaseSoC(SoCCore):\n mem_map = {\n# \"spiflash\": 0x20000000,\n \"hyperram\": 0x20000000,\n }\n mem_map.update(SoCCore.mem_map)\n\n def __init__(self, sys_clk_freq=int(100e6), **kwargs):\n platform = mega65.Platform()\n\n SoCCore.__init__(self, platform, clk_freq=sys_clk_freq,\n ident=\"MEGA65\", ident_version=True,\n integrated_rom_size=0x8000,\n integrated_main_ram_size=0x10000,\n **kwargs)\n\n\t# can we just use the clock without PLL ?\n\n self.submodules.crg = _CRG(platform, sys_clk_freq)\n self.counter = counter = Signal(32)\n self.sync += counter.eq(counter + 1)\n \n\t#\n led_red = platform.request(\"user_led\", 0)\n self.comb += led_red.eq(counter[23])\n\n# led_green = platform.request(\"user_led_green\")\n# self.comb += led_green.eq(counter[25])\n\n\n# hyperram_pads = platform.request(\"hyperram\")\n# self.submodules.hyperram = HyperRAM(hyperram_pads)\n# self.add_wb_slave(mem_decoder(self.mem_map[\"hyperram\"]), self.hyperram.bus)\n# self.add_memory_region(\"hyperram\", self.mem_map[\"hyperram\"] | self.shadow_base, 8*1024*1024)\n\n self.submodules.hyperram = HyperRAM(platform.request(\"hyperram\"))\n self.add_wb_slave(mem_decoder(self.mem_map[\"hyperram\"]), self.hyperram.bus)\n self.add_memory_region(\"hyperram\", self.mem_map[\"hyperram\"], 8*1024*1024)\n\n\n\nclass EthernetSoC(BaseSoC):\n mem_map = {\n \"ethmac\": 0x30000000, # (shadow @0xb0000000)\n }\n mem_map.update(BaseSoC.mem_map)\n\n def __init__(self, **kwargs):\n BaseSoC.__init__(self, **kwargs)\n\n self.submodules.ethphy = LiteEthPHYRMII(self.platform.request(\"eth_clocks\"),\n self.platform.request(\"eth\"))\n self.add_csr(\"ethphy\")\n self.submodules.ethmac = LiteEthMAC(phy=self.ethphy, dw=32,\n interface=\"wishbone\", endianness=self.cpu.endianness)\n self.add_wb_slave(self.mem_map[\"ethmac\"], self.ethmac.bus, 0x2000)\n self.add_memory_region(\"ethmac\", self.mem_map[\"ethmac\"] | self.shadow_base, 0x2000)\n self.add_csr(\"ethmac\")\n self.add_interrupt(\"ethmac\")\n\n self.ethphy.crg.cd_eth_rx.clk.attr.add(\"keep\")\n self.ethphy.crg.cd_eth_tx.clk.attr.add(\"keep\")\n self.platform.add_period_constraint(self.ethphy.crg.cd_eth_rx.clk, 1e9/12.5e6)\n self.platform.add_period_constraint(self.ethphy.crg.cd_eth_tx.clk, 1e9/12.5e6)\n self.platform.add_false_path_constraints(\n self.crg.cd_sys.clk,\n self.ethphy.crg.cd_eth_rx.clk,\n self.ethphy.crg.cd_eth_tx.clk)\n\n\n# Build --------------------------------------------------------------------------------------------\n\ndef main():\n parser = argparse.ArgumentParser(description=\"LiteX on MEGA65\")\n builder_args(parser)\n# soc_sdram_args(parser)\n soc_core_args(parser)\n\n parser.add_argument(\"--with-ethernet\", action=\"store_true\",\n help=\"enable Ethernet support\")\n\n args = parser.parse_args()\n\n cls = EthernetSoC if args.with_ethernet else BaseSoC\n soc = cls(**soc_core_argdict(args))\n\n builder = Builder(soc, **builder_argdict(args))\n builder.build()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"litex_boards/partner/targets/mega65.py","file_name":"mega65.py","file_ext":"py","file_size_in_byte":4817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"183807457","text":"import itertools\n\nimport allel\nimport msprime # type: ignore\nimport numpy as np\nimport pytest\nimport xarray as xr\nfrom allel import hudson_fst\n\nfrom sgkit import (\n Fst,\n Garud_H,\n Tajimas_D,\n count_cohort_alleles,\n count_variant_alleles,\n create_genotype_call_dataset,\n divergence,\n diversity,\n pbs,\n simulate_genotype_call_dataset,\n variables,\n)\nfrom sgkit.window import window\n\nfrom .test_aggregation import get_dataset\n\n\ndef ts_to_dataset(ts, chunks=None, samples=None):\n \"\"\"\n Convert the specified tskit tree sequence into an sgkit dataset.\n Note this just generates haploids for now. With msprime 1.0, we'll be\n able to generate diploid/whatever-ploid individuals easily.\n \"\"\"\n if samples is None:\n samples = ts.samples()\n tables = ts.dump_tables()\n alleles = []\n genotypes = []\n for var in ts.variants(samples=samples):\n alleles.append(var.alleles)\n genotypes.append(var.genotypes)\n alleles = np.array(alleles).astype(\"S\")\n genotypes = np.expand_dims(genotypes, axis=2)\n\n ds = create_genotype_call_dataset(\n variant_contig_names=[\"1\"],\n variant_contig=np.zeros(len(tables.sites), dtype=int),\n variant_position=tables.sites.position.astype(int),\n variant_allele=alleles,\n sample_id=np.array([f\"tsk_{u}\" for u in samples]).astype(\"U\"),\n call_genotype=genotypes,\n )\n if chunks is not None:\n ds = ds.chunk(dict(zip([\"variants\", \"samples\"], chunks)))\n return ds\n\n\ndef add_cohorts(ds, ts, n_cohorts=1, cohort_key_names=[\"cohorts_0\", \"cohorts_1\"]):\n subsets = np.array_split(ts.samples(), n_cohorts)\n sample_cohorts = np.concatenate(\n [np.full_like(subset, i) for i, subset in enumerate(subsets)]\n )\n ds[\"sample_cohort\"] = xr.DataArray(sample_cohorts, dims=\"samples\")\n if cohort_key_names is not None:\n cohort_names = [f\"co_{i}\" for i in range(n_cohorts)]\n coords = {k: cohort_names for k in cohort_key_names}\n ds = ds.assign_coords(coords)\n return ds, subsets\n\n\n@pytest.mark.parametrize(\"sample_size\", [2, 3, 10, 100])\n@pytest.mark.parametrize(\"chunks\", [(-1, -1), (10, -1)])\n@pytest.mark.parametrize(\n \"cohort_allele_count\",\n [None, variables.cohort_allele_count, \"cohort_allele_count_non_default\"],\n)\ndef test_diversity(sample_size, chunks, cohort_allele_count):\n ts = msprime.simulate(sample_size, length=100, mutation_rate=0.05, random_seed=42)\n ds = ts_to_dataset(ts, chunks) # type: ignore[no-untyped-call]\n ds, subsets = add_cohorts(ds, ts, cohort_key_names=[\"cohorts\"]) # type: ignore[no-untyped-call]\n if cohort_allele_count is not None:\n ds = count_cohort_alleles(ds, merge=False).rename(\n {variables.cohort_allele_count: cohort_allele_count}\n )\n ds = ds.assign_coords({\"cohorts\": [\"co_0\"]})\n ds = diversity(ds, cohort_allele_count=cohort_allele_count)\n else:\n ds = ds.assign_coords({\"cohorts\": [\"co_0\"]})\n ds = diversity(ds)\n\n div = ds.stat_diversity.sum(axis=0, skipna=False).sel(cohorts=\"co_0\").values\n ts_div = ts.diversity(span_normalise=False)\n np.testing.assert_allclose(div, ts_div)\n\n\n@pytest.mark.parametrize(\"sample_size\", [10])\ndef test_diversity__windowed(sample_size):\n ts = msprime.simulate(sample_size, length=200, mutation_rate=0.05, random_seed=42)\n ds = ts_to_dataset(ts) # type: ignore[no-untyped-call]\n ds, subsets = add_cohorts(ds, ts, cohort_key_names=[\"cohorts\"]) # type: ignore[no-untyped-call]\n ds = window(ds, size=25)\n ds = diversity(ds)\n div = ds[\"stat_diversity\"].sel(cohorts=\"co_0\").compute()\n\n # Calculate diversity using tskit windows\n # Find the variant positions so we can have windows with a fixed number of variants\n positions = ts.tables.sites.position\n windows = np.concatenate(([0], positions[::25][1:], [ts.sequence_length]))\n ts_div = ts.diversity(windows=windows, span_normalise=False)\n np.testing.assert_allclose(div, ts_div)\n\n # Calculate diversity using scikit-allel moving_statistic\n # (Don't use windowed_diversity, since it treats the last window differently)\n ds = count_variant_alleles(ts_to_dataset(ts)) # type: ignore[no-untyped-call]\n ac = ds[\"variant_allele_count\"].values\n mpd = allel.mean_pairwise_difference(ac, fill=0)\n ska_div = allel.moving_statistic(mpd, np.sum, size=25)\n np.testing.assert_allclose(\n div[:-1], ska_div\n ) # scikit-allel has final window missing\n\n\ndef test_diversity__missing_call_genotype():\n ds = xr.Dataset()\n with pytest.raises(ValueError, match=\"call_genotype not present\"):\n diversity(ds)\n\n\n@pytest.mark.parametrize(\n \"sample_size, n_cohorts\",\n [(2, 2), (3, 2), (3, 3), (10, 2), (10, 3), (10, 4), (100, 2), (100, 3), (100, 4)],\n)\n@pytest.mark.parametrize(\"chunks\", [(-1, -1), (10, -1)])\ndef test_divergence(sample_size, n_cohorts, chunks):\n ts = msprime.simulate(sample_size, length=100, mutation_rate=0.05, random_seed=42)\n ds = ts_to_dataset(ts, chunks) # type: ignore[no-untyped-call]\n ds, subsets = add_cohorts(ds, ts, n_cohorts) # type: ignore[no-untyped-call]\n ds = divergence(ds)\n div = ds.stat_divergence.sum(axis=0, skipna=False).values\n\n # entries on the diagonal are diversity values\n for i in range(n_cohorts):\n ts_div = ts.diversity([subsets[i]], span_normalise=False)\n np.testing.assert_allclose(div[i, i], ts_div)\n\n # test off-diagonal entries, by replacing diagonal with NaNs\n np.fill_diagonal(div, np.nan)\n ts_div = np.full([n_cohorts, n_cohorts], np.nan)\n for i, j in itertools.combinations(range(n_cohorts), 2):\n ts_div[i, j] = ts.divergence([subsets[i], subsets[j]], span_normalise=False)\n ts_div[j, i] = ts.divergence([subsets[j], subsets[i]], span_normalise=False)\n np.testing.assert_allclose(div, ts_div)\n\n\n@pytest.mark.parametrize(\"sample_size, n_cohorts\", [(10, 2)])\n@pytest.mark.parametrize(\"chunks\", [(-1, -1), (50, -1)])\ndef test_divergence__windowed(sample_size, n_cohorts, chunks):\n ts = msprime.simulate(sample_size, length=200, mutation_rate=0.05, random_seed=42)\n ds = ts_to_dataset(ts, chunks) # type: ignore[no-untyped-call]\n ds, subsets = add_cohorts(ds, ts, n_cohorts) # type: ignore[no-untyped-call]\n ds = window(ds, size=25)\n ds = divergence(ds)\n div = ds[\"stat_divergence\"].values\n # test off-diagonal entries, by replacing diagonal with NaNs\n div[:, np.arange(2), np.arange(2)] = np.nan\n\n # Calculate diversity using tskit windows\n # Find the variant positions so we can have windows with a fixed number of variants\n positions = ts.tables.sites.position\n windows = np.concatenate(([0], positions[::25][1:], [ts.sequence_length]))\n n_windows = len(windows) - 1\n ts_div = np.full([n_windows, n_cohorts, n_cohorts], np.nan)\n for i, j in itertools.combinations(range(n_cohorts), 2):\n ts_div[:, i, j] = ts.divergence(\n [subsets[i], subsets[j]], windows=windows, span_normalise=False\n )\n ts_div[:, j, i] = ts_div[:, i, j]\n np.testing.assert_allclose(div, ts_div)\n\n\n@pytest.mark.parametrize(\"sample_size, n_cohorts\", [(10, 2)])\n@pytest.mark.parametrize(\"chunks\", [(-1, -1), (50, -1)])\n@pytest.mark.xfail() # combine with test_divergence__windowed when this is passing\ndef test_divergence__windowed_scikit_allel_comparison(sample_size, n_cohorts, chunks):\n ts = msprime.simulate(sample_size, length=200, mutation_rate=0.05, random_seed=42)\n ds = ts_to_dataset(ts, chunks) # type: ignore[no-untyped-call]\n ds, subsets = add_cohorts(ds, ts, n_cohorts) # type: ignore[no-untyped-call]\n ds = window(ds, size=25)\n ds = divergence(ds)\n div = ds[\"stat_divergence\"].values\n # test off-diagonal entries, by replacing diagonal with NaNs\n div[:, np.arange(2), np.arange(2)] = np.nan\n\n # Calculate divergence using scikit-allel moving_statistic\n # (Don't use windowed_divergence, since it treats the last window differently)\n ds1 = count_variant_alleles(ts_to_dataset(ts, samples=ts.samples()[:1])) # type: ignore[no-untyped-call]\n ds2 = count_variant_alleles(ts_to_dataset(ts, samples=ts.samples()[1:])) # type: ignore[no-untyped-call]\n ac1 = ds1[\"variant_allele_count\"].values\n ac2 = ds2[\"variant_allele_count\"].values\n mpd = allel.mean_pairwise_difference_between(ac1, ac2, fill=0)\n ska_div = allel.moving_statistic(mpd, np.sum, size=25) # noqa: F841\n # TODO: investigate why numbers are different\n np.testing.assert_allclose(\n div[:-1], ska_div\n ) # scikit-allel has final window missing\n\n\ndef test_divergence__missing_calls():\n ds = get_dataset(\n [\n [[0, 0], [-1, -1], [-1, -1]], # all of cohort 1 calls are missing\n ]\n )\n ds[\"sample_cohort\"] = xr.DataArray(np.array([0, 1, 1]), dims=\"samples\")\n ds = divergence(ds)\n np.testing.assert_equal(ds[\"stat_divergence\"].values[0, 1], np.nan)\n\n\n@pytest.mark.parametrize(\"sample_size\", [2, 3, 10, 100])\ndef test_Fst__Hudson(sample_size):\n # scikit-allel can only calculate Fst for pairs of cohorts (populations)\n n_cohorts = 2\n ts = msprime.simulate(sample_size, length=100, mutation_rate=0.05, random_seed=42)\n ds = ts_to_dataset(ts) # type: ignore[no-untyped-call]\n ds, subsets = add_cohorts(ds, ts, n_cohorts) # type: ignore[no-untyped-call]\n n_variants = ds.dims[\"variants\"]\n ds = window(ds, size=n_variants) # single window\n ds = Fst(ds, estimator=\"Hudson\")\n fst = ds.stat_Fst.sel(cohorts_0=\"co_0\", cohorts_1=\"co_1\").values\n\n # scikit-allel\n ac1 = ds.cohort_allele_count.values[:, 0, :]\n ac2 = ds.cohort_allele_count.values[:, 1, :]\n num, den = hudson_fst(ac1, ac2)\n ska_fst = np.sum(num) / np.sum(den)\n\n np.testing.assert_allclose(fst, ska_fst)\n\n\n@pytest.mark.parametrize(\n \"sample_size, n_cohorts\",\n [(2, 2), (3, 2), (3, 3), (10, 2), (10, 3), (10, 4), (100, 2), (100, 3), (100, 4)],\n)\ndef test_Fst__Nei(sample_size, n_cohorts):\n ts = msprime.simulate(sample_size, length=100, mutation_rate=0.05, random_seed=42)\n ds = ts_to_dataset(ts) # type: ignore[no-untyped-call]\n ds, subsets = add_cohorts(ds, ts, n_cohorts) # type: ignore[no-untyped-call]\n n_variants = ds.dims[\"variants\"]\n ds = window(ds, size=n_variants) # single window\n ds = Fst(ds, estimator=\"Nei\")\n fst = ds.stat_Fst.values\n\n ts_fst = np.full([1, n_cohorts, n_cohorts], np.nan)\n for i, j in itertools.combinations(range(n_cohorts), 2):\n ts_fst[0, i, j] = ts.Fst([subsets[i], subsets[j]])\n ts_fst[0, j, i] = ts_fst[0, i, j]\n np.testing.assert_allclose(fst, ts_fst)\n\n\ndef test_Fst__unknown_estimator():\n ts = msprime.simulate(2, length=100, mutation_rate=0.05, random_seed=42)\n ds = ts_to_dataset(ts) # type: ignore[no-untyped-call]\n with pytest.raises(\n ValueError, match=\"Estimator 'Unknown' is not a known estimator\"\n ):\n Fst(ds, estimator=\"Unknown\")\n\n\n@pytest.mark.parametrize(\n \"sample_size, n_cohorts\",\n [(10, 2), (10, 3)],\n)\n@pytest.mark.parametrize(\"chunks\", [(-1, -1), (50, -1)])\ndef test_Fst__windowed(sample_size, n_cohorts, chunks):\n ts = msprime.simulate(sample_size, length=200, mutation_rate=0.05, random_seed=42)\n ds = ts_to_dataset(ts, chunks) # type: ignore[no-untyped-call]\n ds, subsets = add_cohorts(ds, ts, n_cohorts) # type: ignore[no-untyped-call]\n ds = window(ds, size=25)\n fst_ds = Fst(ds, estimator=\"Nei\")\n fst = fst_ds[\"stat_Fst\"].values\n\n # Calculate Fst using tskit windows\n # Find the variant positions so we can have windows with a fixed number of variants\n positions = ts.tables.sites.position\n windows = np.concatenate(([0], positions[::25][1:], [ts.sequence_length]))\n n_windows = len(windows) - 1\n ts_fst = np.full([n_windows, n_cohorts, n_cohorts], np.nan)\n for i, j in itertools.combinations(range(n_cohorts), 2):\n ts_fst[:, i, j] = ts.Fst(\n [subsets[i], subsets[j]], windows=windows, span_normalise=False\n )\n ts_fst[:, j, i] = ts_fst[:, i, j]\n\n np.testing.assert_allclose(fst, ts_fst)\n\n # scikit-allel\n fst_ds = Fst(ds, estimator=\"Hudson\")\n for i, j in itertools.combinations(range(n_cohorts), 2):\n fst = fst_ds[\"stat_Fst\"].sel(cohorts_0=f\"co_{i}\", cohorts_1=f\"co_{j}\").values\n\n ac_i = fst_ds.cohort_allele_count.values[:, i, :]\n ac_j = fst_ds.cohort_allele_count.values[:, j, :]\n ska_fst = allel.moving_hudson_fst(ac_i, ac_j, size=25)\n\n np.testing.assert_allclose(\n fst[:-1], ska_fst\n ) # scikit-allel has final window missing\n\n\n@pytest.mark.parametrize(\"sample_size\", [2, 3, 10, 100])\ndef test_Tajimas_D(sample_size):\n ts = msprime.simulate(sample_size, length=100, mutation_rate=0.05, random_seed=42)\n ds = ts_to_dataset(ts) # type: ignore[no-untyped-call]\n ds, subsets = add_cohorts(ds, ts, cohort_key_names=None) # type: ignore[no-untyped-call]\n n_variants = ds.dims[\"variants\"]\n ds = window(ds, size=n_variants) # single window\n ds = Tajimas_D(ds)\n d = ds.stat_Tajimas_D.compute()\n ts_d = ts.Tajimas_D()\n np.testing.assert_allclose(d, ts_d)\n\n\n@pytest.mark.parametrize(\n \"sample_size, n_cohorts\",\n [(10, 3), (20, 4)],\n)\ndef test_pbs(sample_size, n_cohorts):\n ts = msprime.simulate(sample_size, length=100, mutation_rate=0.05, random_seed=42)\n ds = ts_to_dataset(ts) # type: ignore[no-untyped-call]\n ds, subsets = add_cohorts(ds, ts, n_cohorts, cohort_key_names=[\"cohorts_0\", \"cohorts_1\", \"cohorts_2\"]) # type: ignore[no-untyped-call]\n n_variants = ds.dims[\"variants\"]\n ds = window(ds, size=n_variants) # single window\n\n ds = pbs(ds)\n\n # scikit-allel\n for i, j, k in itertools.combinations(range(n_cohorts), 3):\n stat_pbs = (\n ds[\"stat_pbs\"]\n .sel(cohorts_0=f\"co_{i}\", cohorts_1=f\"co_{j}\", cohorts_2=f\"co_{k}\")\n .values\n )\n\n ac_i = ds.cohort_allele_count.values[:, i, :]\n ac_j = ds.cohort_allele_count.values[:, j, :]\n ac_k = ds.cohort_allele_count.values[:, k, :]\n\n ska_pbs_value = allel.pbs(ac_i, ac_j, ac_k, window_size=n_variants)\n\n np.testing.assert_allclose(stat_pbs, ska_pbs_value)\n\n\n@pytest.mark.parametrize(\n \"sample_size, n_cohorts, cohorts, cohort_indexes\",\n [\n (10, 3, None, None),\n (20, 4, None, None),\n (20, 4, [(0, 1, 2), (3, 1, 2)], [(0, 1, 2), (3, 1, 2)]),\n ],\n)\n@pytest.mark.parametrize(\"chunks\", [(-1, -1), (50, -1)])\ndef test_pbs__windowed(sample_size, n_cohorts, cohorts, cohort_indexes, chunks):\n ts = msprime.simulate(sample_size, length=200, mutation_rate=0.05, random_seed=42)\n ds = ts_to_dataset(ts, chunks) # type: ignore[no-untyped-call]\n ds, subsets = add_cohorts(ds, ts, n_cohorts, cohort_key_names=[\"cohorts_0\", \"cohorts_1\", \"cohorts_2\"]) # type: ignore[no-untyped-call]\n ds = window(ds, size=25)\n\n ds = pbs(ds, cohorts=cohorts)\n\n # scikit-allel\n for i, j, k in itertools.combinations(range(n_cohorts), 3):\n stat_pbs = (\n ds[\"stat_pbs\"]\n .sel(cohorts_0=f\"co_{i}\", cohorts_1=f\"co_{j}\", cohorts_2=f\"co_{k}\")\n .values\n )\n\n if cohort_indexes is not None and (i, j, k) not in cohort_indexes:\n np.testing.assert_array_equal(stat_pbs, np.full_like(stat_pbs, np.nan))\n else:\n ac_i = ds.cohort_allele_count.values[:, i, :]\n ac_j = ds.cohort_allele_count.values[:, j, :]\n ac_k = ds.cohort_allele_count.values[:, k, :]\n\n ska_pbs_value = allel.pbs(ac_i, ac_j, ac_k, window_size=25)\n\n # scikit-allel has final window missing\n np.testing.assert_allclose(stat_pbs[:-1], ska_pbs_value)\n\n\n@pytest.mark.parametrize(\n \"n_variants, n_samples, n_contigs, n_cohorts, cohorts, cohort_indexes\",\n [\n (9, 5, 1, 1, None, None),\n (9, 5, 1, 2, None, None),\n (9, 5, 1, 2, [1], [1]),\n (9, 5, 1, 2, [\"co_1\"], [1]),\n ],\n)\n@pytest.mark.parametrize(\"chunks\", [(-1, -1), (5, -1)])\ndef test_Garud_h(\n n_variants, n_samples, n_contigs, n_cohorts, cohorts, cohort_indexes, chunks\n):\n ds = simulate_genotype_call_dataset(\n n_variant=n_variants, n_sample=n_samples, n_contig=n_contigs\n )\n ds = ds.chunk(dict(zip([\"variants\", \"samples\"], chunks)))\n subsets = np.array_split(ds.samples.values, n_cohorts)\n sample_cohorts = np.concatenate(\n [np.full_like(subset, i) for i, subset in enumerate(subsets)]\n )\n ds[\"sample_cohort\"] = xr.DataArray(sample_cohorts, dims=\"samples\")\n cohort_names = [f\"co_{i}\" for i in range(n_cohorts)]\n coords = {k: cohort_names for k in [\"cohorts\"]}\n ds = ds.assign_coords(coords) # type: ignore[no-untyped-call]\n ds = window(ds, size=3)\n\n gh = Garud_H(ds, cohorts=cohorts)\n h1 = gh.stat_Garud_h1.values\n h12 = gh.stat_Garud_h12.values\n h123 = gh.stat_Garud_h123.values\n h2_h1 = gh.stat_Garud_h2_h1.values\n\n # scikit-allel\n for c in range(n_cohorts):\n if cohort_indexes is not None and c not in cohort_indexes:\n # cohorts that were not computed should be nan\n np.testing.assert_array_equal(h1[:, c], np.full_like(h1[:, c], np.nan))\n np.testing.assert_array_equal(h12[:, c], np.full_like(h12[:, c], np.nan))\n np.testing.assert_array_equal(h123[:, c], np.full_like(h123[:, c], np.nan))\n np.testing.assert_array_equal(\n h2_h1[:, c], np.full_like(h2_h1[:, c], np.nan)\n )\n else:\n gt = ds.call_genotype.values[:, sample_cohorts == c, :]\n ska_gt = allel.GenotypeArray(gt)\n ska_ha = ska_gt.to_haplotypes()\n ska_h = allel.moving_garud_h(ska_ha, size=3)\n\n np.testing.assert_allclose(h1[:, c], ska_h[0])\n np.testing.assert_allclose(h12[:, c], ska_h[1])\n np.testing.assert_allclose(h123[:, c], ska_h[2])\n np.testing.assert_allclose(h2_h1[:, c], ska_h[3])\n\n\ndef test_Garud_h__raise_on_non_diploid():\n ds = simulate_genotype_call_dataset(n_variant=10, n_sample=10, n_ploidy=3)\n with pytest.raises(\n NotImplementedError, match=\"Garud H only implemented for diploid genotypes\"\n ):\n Garud_H(ds)\n\n\ndef test_Garud_h__raise_on_no_windows():\n ds = simulate_genotype_call_dataset(n_variant=10, n_sample=10)\n\n with pytest.raises(ValueError, match=\"Dataset must be windowed for Garud_H\"):\n Garud_H(ds)\n","sub_path":"sgkit/tests/test_popgen.py","file_name":"test_popgen.py","file_ext":"py","file_size_in_byte":18434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"332573186","text":"import pkg_resources\npkg_resources.require('mock')\npkg_resources.require('fa-archiver')\npkg_resources.require('pml')\n\nimport numpy\nimport unittest\nimport jump_bba\nimport pml\nimport aphla as ap\nimport mock\n\n\nclass SelectDataTest(unittest.TestCase):\n\n def setUp(self):\n self.data = numpy.zeros((2000,2,2))\n self.data[:,0,0] = numpy.arange(2000)\n self.data[:,0,1] = numpy.arange(2000)\n self.data[100,1,0] = 3\n self.data[1100,1,1] = 4\n self.exc_high = mock.MagicMock(count=1000, start_time=100)\n self.exc_low = mock.MagicMock(count=1000, start_time=1100)\n\n def test_select_data_throws_AssertionError_if_exc_high_low_different_counts(self):\n self.exc_low.count = 101\n self.assertRaises(AssertionError, jump_bba.select_data, self.data,\n pml.X, self.exc_high, self.exc_low)\n\n def test_select_data_returns_correct_shape(self):\n high_data, low_data = jump_bba.select_data(self.data, pml.X,\n self.exc_high, self.exc_low)\n expected_shape = (100, 1)\n self.assertEqual(high_data.shape, expected_shape)\n self.assertEqual(low_data.shape, expected_shape)\n\n def test_select_data_selects_first_timestamp(self):\n high_data_x, _ = jump_bba.select_data(self.data, pml.X,\n self.exc_high, self.exc_low)\n _, low_data_y = jump_bba.select_data(self.data, pml.Y,\n self.exc_high, self.exc_low)\n self.assertEqual(high_data_x[0,0], 3)\n self.assertEqual(low_data_y[0,0], 4)\n\n\nclass TestJumpBba(unittest.TestCase):\n\n def setUp(self):\n pml.initialise()\n\n @mock.patch('pml.excite.caput')\n @mock.patch('jump_bba.caget')\n @mock.patch('jump_bba.caput')\n def test_jump_bba_sets_expected_pvs(self, jump_caput, jump_caget, excite_caput):\n jump_caget.return_value = 10\n quad = ap.getElements('QUAD')[0]\n print(quad.pv())\n # one 1Hz cycle\n osc = pml.excite.Oscillation(1, 0, 1, 1)\n jump_bba.jump_bba(quad, 1, osc)\n\n jump_caput.assert_has_calls([mock.call('SR01A-PC-Q1D-01:SETI', 10.5),\n mock.call('SR01A-PC-Q1D-01:SETI', 9.5),\n mock.call('SR01A-PC-Q1D-01:SETI', 10)])\n\n # Note you can assert excite_caput's calls to be [] and it will tell\n # what they actually were.\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/test_jump_bba.py","file_name":"test_jump_bba.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"192043664","text":"import urllib\ndef read_textProfanity():\n\tquotes = open(\"./example_text.txt\")\n\tcontent_file = quotes.read()\n\tprint(content_file)\n\tquotes.close()\n\tcheck_profanity(content_file)\n\n\ndef check_profanity(text_check):\n\tconnection = urllib.urlopen(\"http://www.wdyl.com/profanity?q=\"+text_check)\n\toutput = connection.read()\n\tprint(output)\n\tconnection.close()\nread_textProfanity()\n","sub_path":"check_profanity.py","file_name":"check_profanity.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"71169823","text":"import os\nimport sys\nimport six\nfrom bayesdend.autoencoder import *\nfrom bayesdend.utils.data_util import load_real_data, read_config, load_results\n\n\"\"\"Only change this\"\"\"\nname_of_exp = 'dend36_soma'\n\n# replicate several important parameters\nconfig = read_config(name_of_exp)\ndata_file = '{}/{}/{}'.format(config['save_path'], name_of_exp, 'data_clean.pkl')\nos.environ['CUDA_VISIBLE_DEVICES'] = str(config['GPU'])\ndata_dict = load_real_data(data_file)\nT_total = data_dict['traces'].shape[0]\n\nif config['model_mode'] == 'soma_only':\n\n data_feed = {'traces': data_dict['traces'][:, :10],\n 'noises': data_dict['noises'][:, :10],\n 'masks': data_dict['masks'][:, :10]}\n\nelse:\n spine_ind = data_dict['n_soma'] + data_dict['n_dend'] + 1\n inf_result_path = '{}/{}/{}'.format(config['save_path'], 'dend36_soma', 'results.pkl')\n inf_result = load_results(inf_result_path)\n inf_spikes = np.expand_dims(inf_result['spikes'], axis=1)\n data_feed = {'traces': data_dict['traces'][:, spine_ind:spine_ind + 1],\n 'noises': data_dict['noises'][:, spine_ind:spine_ind + 1],\n 'masks': data_dict['masks'][:, spine_ind:spine_ind + 1],\n 'soma_spikes': inf_spikes}\n\ndata_feed_batch = {}\nfor key, value in six.iteritems(data_feed):\n data_feed_batch[key] = np.expand_dims(data_feed[key], axis=0)\n\nNc = data_feed['traces'].shape[1]\nupsample = config['upsample']\nfr = config['firing_rate']\nnoise_flag = config['noise_flag']\n\n# rebuild the model\nmodel = AutoEncoder(config['model_mode'], 1, T_total, Nc, fr, noise_flag, upsample, False)\n\n# restore model\nsess = tf.InteractiveSession()\nrestorer = tf.train.Saver()\nmodel_filename = '{}/{}/{}'.format(config['save_path'], name_of_exp, 'model')\nrestorer.restore(sess, model_filename)\nprint(\"Model resotred.\")\n\ntest_fd = {model.data_ph[key]: value\n for key, value in six.iteritems(data_feed_batch)}\n\n# inference and reconstruction\nspike_inf, r_e_d, r_v_d, r_e, r_v, log_noise_est, loss = sess.run([\n model.Q.mean(), model.Erecon_d, model.Vrecon_d, model.Erecon, model.Vrecon,\n model.log_noise, model.obj], test_fd)\n\n# generative filters\nif config['model_mode'] == 'soma_only':\n filters, baseline, scaling = sess.run([model.spike_filters,\n model.b, model.A], test_fd)\nelse:\n filters, baseline, scaling, prop_factor = sess.run([model.spike_filters,\n model.b, model.A, model.prop_factor], test_fd)\n\nspike_inf = np.squeeze(spike_inf)\nr_e_d = np.squeeze(r_e_d)\nr_v_d = np.squeeze(r_v_d)\nr_e = np.squeeze(r_e)\nr_v = np.squeeze(r_v)\nfilters = np.squeeze(filters)\nlog_noise_est = np.squeeze(log_noise_est)\n\nfinal_dict = {'spikes': spike_inf,\n 'rec_mean': r_e,\n 'rec_var': r_v,\n 'rec_mean_d': r_e_d,\n 'rec_var_d': r_v_d,\n 'log_noise_est': log_noise_est,\n 'filters': filters,\n 'baseline': baseline,\n 'scaling': scaling,\n 'loss': loss}\n\nif config['model_mode'] == 'spine_only':\n final_dict['prop_factor'] = prop_factor\n\nto_save = '{}/{}/{}'.format(config['save_path'], name_of_exp, 'results.pkl')\nwith open(to_save, 'wb') as f:\n pickle.dump(final_dict, f)\nprint('Results saved.')\n","sub_path":"bayesdend/data_post.py","file_name":"data_post.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"288257725","text":"'''\n\n intro to TensorFlow variables\n\n'''\n\nimport tensorflow as tf\n\n# another y = Wx + b example\n\nW = tf.Variable([2.50, 4.0], tf.float32, name='var_W')\nx = tf.placeholder(tf.float32, name='x')\nb = tf.Variable([5.0, 10.0], tf.float32, name='var_b')\n\ny = W * x + b\n\n# because we have variables in the mix now, we must initialize them before\n# starting the Session\n\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n # this is a required line to initialize the global init object\n sess.run(init)\n print('y = {}'.format(\n sess.run(\n y,\n feed_dict={\n x:[10, 100]\n }\n )\n ))\n\n\n# new session\nnumber = tf.Variable(2)\nmultiplier = tf.Variable(1)\n\ninit = tf.global_variables_initializer()\n\n# in this line of code, result is the computation node (tensor) while number\n# will contain the value\nresult = number.assign(tf.multiply(number, multiplier))\n\nwith tf.Session() as sess:\n sess.run(init)\n for i in range(10):\n # sess.run(result) will execute: number *= multiplier\n print('result = number {} * multiplier {} = {}'.format(\n sess.run(number),\n sess.run(multiplier),\n sess.run(\n result\n )))\n # in this loop the multiplier is incremented\n print('Increment multiplier, new value = {}'.format(\n sess.run(\n multiplier.assign_add(1)\n )))\n","sub_path":"foundations/fundamentals/tf_variables.py","file_name":"tf_variables.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"140613387","text":"import click\nfrom model.db_model import Region\nfrom .spider import get_projects, get_rooms\n\n\n@click.group(chain=True)\n@click.pass_context\ndef cli(ctx):\n if ctx.invoked_subcommand is None:\n click.echo('please use a command!')\n\n\n@cli.command()\n@click.argument('region', nargs=1)\ndef sync_project(region):\n if not Region.region_map.get(region):\n click.echo(f'region({region}) code not found!')\n return\n get_projects(region)\n click.echo(f'sync {Region.region_map.get(region)}({region}) successful!')\n\n\n@cli.command()\n@click.argument('building_id', nargs=1)\ndef sync_rooms(building_id):\n get_rooms(building_id)\n click.echo(f'sync {building_id} successful!')\n\n\nif __name__ == \"__main__\":\n cli() # pylint: disable=no-value-for-parameter\n","sub_path":"task/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"263163248","text":"from utils.LoadStructuralData import load_struct_data\nfrom utils.LoadFunctionalData import load_funct_data\nfrom gat_impl.ExecuteGAT import GATModel\nfrom keras.activations import relu\nimport numpy as np\nimport os\nimport pickle as pkl\nimport itertools\nimport math\nimport random\nimport pprint\n\ncached_data = {}\ngat_result_dir = os.path.join(os.path.dirname(os.path.join(os.path.dirname(__file__))), 'Results', 'GAT_results')\nif not os.path.exists(gat_result_dir):\n os.makedirs(gat_result_dir)\n\n\n# utility class for storing together the hyper-parameters of a GAT model into an object\nclass ConfigGAT(object):\n\n def __init__(self, updated_params=None):\n '''\n Initialize the object embodying the configuration of a GAT model.\n :param updated_params: specific hyper-parameters used by current GAT configuration\n '''\n self.params = {\n # architecture hyper-parameters\n 'name': 'GAT',\n 'hidden_units': [30, 20, 15],\n 'attention_heads': [3, 3, 2],\n 'include_ew': False,\n 'readout_aggregator': GATModel.master_node_aggregator,\n 'use_batch_norm': True,\n 'non_linearity': relu,\n # training hyper.\n 'load_specific_data': load_struct_data,\n 'pers_traits_selection': ['NEO.NEOFAC_A', 'NEO.NEOFAC_C', 'NEO.NEOFAC_E', 'NEO.NEOFAC_N', 'NEO.NEOFAC_O'],\n 'learning_rate': 0.0001,\n 'decay_rate': 0.0005,\n 'attn_drop': 0.6,\n 'batch_size': 32,\n 'functional_dim': 50,\n 'scan_session': 1,\n 'num_epochs': 250,\n 'pq_threshold': np.inf,\n 'train_prog_threshold': 0.1,\n 'k_strip_epochs': 5,\n # nested CV hyper.\n 'nested_CV_level': 'outer',\n 'k_outer': 5,\n 'k_inner': 5,\n 'eval_fold_out': 4,\n 'eval_fold_in': 1}\n\n # update the default hyper-parameters\n self.update(update_hyper=updated_params)\n if type(self.params['pers_traits_selection']) is not list:\n raise ValueError('The trait selection should be list, not %s' % type(self.params['pers_traits_selection']))\n if self.params['nested_CV_level'] not in {'inner', 'outer'}:\n raise ValueError('Possible CV levels: inner, outer')\n if self.params['name'] != 'GAT':\n raise ValueError('Name of the Graph Attention Network is GAT, not %s' % self.params['name'])\n if len(self.params['attention_heads']) != len(self.params['hidden_units']):\n raise ValueError('Attention heads and hidden units are not specified for the same nr. of layers')\n # values for the PQ threshold:\n pq_thresholds = {GATModel.master_node_aggregator: 0.05,\n GATModel.concat_feature_aggregator: 0.25,\n GATModel.average_feature_aggregator: 0.5}\n self.params['pq_threshold'] = pq_thresholds[self.params['readout_aggregator']]\n # keep a fixed order on the personality traits so we can decode when predicting them all at once\n self.params['pers_traits_selection'] = sorted(self.params['pers_traits_selection'])\n self.params['target_score_type'] = len(self.params['pers_traits_selection'])\n\n def __str__(self):\n '''\n Produces a unique string identifier of the current GAT model.\n :return: str of the name of the model, including the nested CV parameters\n '''\n str_dataset = 'GAT_%s' % self.params['load_specific_data'].__name__.split('_')[1]\n str_dim_sess = 'DIM_%d_SESS_%d' % (self.params['functional_dim'], self.params['scan_session'])\n str_attn_heads = 'AH_%s' % \",\".join(map(str, self.params['attention_heads']))\n str_hid_units = 'HU_%s' % \",\".join(map(str, self.params['hidden_units']))\n str_traits = 'PT_%s' % self.get_summarized_traits()\n str_aggregator = 'AGR_%s' % self.params['readout_aggregator'].__name__.split('_')[0]\n str_include_ew = 'IW_%r' % self.params['include_ew']\n str_batch_sz = 'BS_%d' % self.params['batch_size']\n str_dropout = 'DROP_%s' % str(self.params['attn_drop'])\n str_learn_rate = 'LR_%s' % str(self.params['learning_rate'])\n str_decay_rate = 'DR_%s' % str(self.params['decay_rate'])\n str_cross_val = 'CV_%d%d%s' % (self.params['eval_fold_in'], self.params['eval_fold_out'], self.params[\n 'nested_CV_level'])\n\n str_params = [str_dataset, str_dim_sess, str_attn_heads, str_hid_units, str_traits, str_aggregator,\n str_include_ew, str_batch_sz, str_dropout, str_learn_rate, str_decay_rate, str_cross_val]\n if self.params['load_specific_data'] is load_struct_data:\n str_params.remove(str_dim_sess)\n return '_'.join(str_params)\n\n def print_model_details(self):\n '''\n Prints the details of the current GAT model as hyper-parameters of architecture, training process and nested CV\n :return: void\n '''\n params = self.params\n print('Name of the current GAT model is %s' % self)\n if params['load_specific_data'] == load_struct_data:\n print('Dataset: structural HCP graphs')\n else:\n print('Dataset: functional HCP graphs')\n print('Dimension of graphs: %d and session: %d' % (params['functional_dim'], params['scan_session']))\n print('----- Opt. hyperparams -----')\n print('batch size: ' + str(params['batch_size']))\n print('number of training epochs: ' + str(params['num_epochs']))\n print('lr: ' + str(params['learning_rate']))\n print('l2_coef: ' + str(params['decay_rate']))\n print('droput rate ' + str(params['attn_drop']))\n print('using batch normalization ' + str(params['use_batch_norm']))\n print('----- Archi. hyperparams -----')\n print('nb. layers: ' + str(len(params['hidden_units'])))\n print('nb. units per layer: ' + str(params['hidden_units']))\n print('nb. attention heads: ' + str(params['attention_heads']))\n print('aggregation strategy: ' + str(params['readout_aggregator']))\n print('including edge weights: ' + str(params['include_ew']))\n print('nonlinearity: ' + str(params['non_linearity']))\n print('----- Cross-Validation params. -----')\n print('Nested-CV level: ' + self.params['nested_CV_level'])\n print('Inner split: ' + str(self.params['k_inner']))\n print('Outer split: ' + str(self.params['k_outer']))\n print('Outer evaluation fold id: ' + str(self.params['eval_fold_out']))\n print('Inner evaluation fold id: ' + str(self.params['eval_fold_in']))\n\n def get_name(self):\n '''\n Get the name of the GAT model discarding the hyper-parameters of the Nested Cross Validation.\n :return: str of the base name of the model\n '''\n import re\n return re.compile(re.escape('_CV') + '.*').sub('', re.sub(r\"PT_[A-Z]{1,5}_\", \"\", str(self)))\n\n def get_summarized_traits(self):\n '''\n Summarize the names of the traits targeted at once.\n :return: str of the concatenation of the trait names without the common prefixes\n '''\n return ''.join(self.params['pers_traits_selection']).replace('NEO.NEOFAC_', '')\n\n def update(self, update_hyper):\n '''\n Updates the default hyper-parameters of the GAT configuration object\n :param update_hyper: dict of new hyper-parameters\n :return: void, it's changing the internal state of the object\n '''\n if update_hyper is not None:\n self.params.update(update_hyper)\n\n def load_data(self):\n '''\n Load the entire dataset specified by the load_specific_dataset parameter of the configuration. Keep it\n in main memory in a global variable in case future models are trained/evaluated on it during the same run.\n :return:\n '''\n global cached_data\n loader_data = self.params['load_specific_data']\n trait_choice = self.get_summarized_traits()\n if loader_data in cached_data.keys():\n if trait_choice in cached_data[loader_data].keys():\n return cached_data[loader_data][trait_choice]\n else:\n uncached_data = loader_data(self.params)\n cached_data[loader_data][trait_choice] = uncached_data\n return uncached_data\n else:\n cached_data[loader_data] = {}\n uncached_data = loader_data(self.params)\n cached_data[loader_data][trait_choice] = uncached_data\n return uncached_data\n\n def checkpoint_file(self):\n '''\n Retrieves the path to the checkpoint file where the model (its parameters) is/should be saved\n :return: str path\n '''\n return os.path.join(gat_result_dir, 'checkpoint_' + str(self) + '.h5')\n\n def logs_file(self):\n '''\n Retrieves the path to the logs file where the training history of the model is/should be saved\n :return: str path\n '''\n return os.path.join(gat_result_dir, 'logs_' + str(self) + '.pck')\n\n def results_file(self):\n '''\n Retrieves the path to the results file where the evaluation data: test loss, predictions is/should be saved\n :return: str path\n '''\n return os.path.join(gat_result_dir, 'predictions_' + str(self))\n\n def get_results(self):\n '''\n Retrieve the results of the model.\n :return: dict with evaluation results: losses, metrics, predictions\n '''\n results = None\n if os.path.exists(self.results_file()):\n with open(self.results_file(), 'rb') as result_fp:\n results = pkl.load(result_fp)\n return results\n\n @staticmethod\n def get_sampled_models(max_samples=19200, no_layers=3, **kwargs):\n '''\n Samples a pre-defined number of GAT configurations for the inner CV of the nested CV phase\n :param max_samples: maximum number of sampled models\n :param kwargs: compatibility with the sampling function of baseline models\n :param no_layers: number of layers of the sampled configurations\n :return: dict of hyper-parameters choices to be converted to a Grid Search\n '''\n samples_file = os.path.join(os.path.dirname(os.path.join(os.path.dirname(__file__))), 'Results',\n 'gat_sampled_models.pck')\n if os.path.exists(samples_file):\n with open(samples_file, 'rb') as handle:\n choices = pkl.load(handle)\n return choices\n choices = {\n 'learning_rate': [0.005, 0.001, 0.0005, 0.0001],\n 'decay_rate': [0.0005],\n 'attn_drop': [0.2, 0.4, 0.6, 0.8],\n 'readout_aggregator': [GATModel.average_feature_aggregator,\n GATModel.master_node_aggregator,\n GATModel.concat_feature_aggregator],\n 'load_specific_data': [load_struct_data, load_funct_data],\n 'include_ew': [True, False],\n 'batch_size': [32]}\n models_so_far = np.prod(np.array([len(choices[x]) for x in choices.keys()])) * 5 * 5\n sampling_left = math.floor(max_samples / models_so_far)\n sample_ah = list(itertools.product(range(3, 7), repeat=no_layers))\n sample_hu = list(itertools.product(range(12, 48), repeat=no_layers))\n\n def check_feat_expansion(ah_hu_choice):\n '''\n Checks if the particular choice of attention heads and units per GAT layer follows an expansion approach\n of the node features' dimensionality\n :param ah_hu_choice: tuple of two lists of the choices of attention heads and hidden units\n :return: bool, the validity of the choice\n '''\n for i in range(1, no_layers - 1):\n if ah_hu_choice[0][i] * ah_hu_choice[1][i] > ah_hu_choice[0][i - 1] * ah_hu_choice[1][i - 1]:\n return False\n # the last GAT layer averages node features (no multiplication with no of attention heads)\n if ah_hu_choice[1][-1] > ah_hu_choice[0][-2] * ah_hu_choice[1][-2]:\n return False\n return True\n\n valid_ah_hu = set(filter(lambda ah_hu_choice: check_feat_expansion(ah_hu_choice),\n list(itertools.product(sample_ah, sample_hu))))\n choices['arch_width'] = list(map(lambda x: [list(x[0]), list(x[1])], random.sample(valid_ah_hu, sampling_left)))\n with open(samples_file, 'wb') as handle:\n pkl.dump(choices, handle)\n\n return choices\n\n\nif __name__ == \"__main__\":\n pprint.pprint(ConfigGAT.get_sampled_models())\n","sub_path":"gat_impl/ConfigGAT.py","file_name":"ConfigGAT.py","file_ext":"py","file_size_in_byte":12808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"126215532","text":"#!/usr/bin/env python\n#\n# radiosonde_auto_rx - Configuration File Reader\n#\n# Copyright (C) 2018 Mark Jessop \n# Released under GNU GPL v3 or later\n#\n\nimport copy\nimport logging\nimport traceback\nimport json\nfrom .utils import rtlsdr_test\n\n# Dummy initial config with some parameters we need to make the web interface happy.\nglobal_config = {'min_freq':400.0,'max_freq':403.0,'snr_threshold':10,'station_lat':0.0,'station_lon':0.0}\n\ntry:\n # Python 2\n from ConfigParser import RawConfigParser\nexcept ImportError:\n # Python 3\n from configparser import RawConfigParser\n\ndef read_auto_rx_config(filename):\n\t\"\"\" Read an Auto-RX v2 Station Configuration File.\n\n\tThis function will attempt to parse a configuration file.\n\tIt will also confirm the accessibility of any SDRs specified in the config file.\n\n\tArgs:\n\t\tfilename (str): Filename of the configuration file to read.\n\n\tReturns:\n\t\tauto_rx_config (dict): The configuration dictionary.\n\t\tsdr_config (dict): A dictionary with SDR parameters.\n\t\"\"\"\n\tglobal global_config\n\t# Configuration Defaults:\n\tauto_rx_config = {\n\t\t# Log Settings\n\t\t'per_sonde_log' : True,\n # Email Settings\n 'email_enabled': False,\n 'email_smtp_server': 'localhost',\n 'email_from': 'sonde@localhost',\n 'email_to': None,\n\t\t# SDR Settings\n\t\t'sdr_fm': 'rtl_fm',\n\t\t'sdr_power': 'rtl_power',\n\t\t'sdr_quantity': 1,\n\t\t# Search Parameters\n\t\t'min_freq'\t\t: 400.4,\n\t\t'max_freq'\t\t: 404.0,\n\t\t'rx_timeout'\t: 120,\n\t\t'whitelist'\t: [],\n\t\t'blacklist'\t: [],\n\t\t'greylist'\t: [],\n\t\t# Location Settings\n\t\t'station_lat'\t: 0.0,\n\t\t'station_lon'\t: 0.0,\n\t\t'station_alt'\t: 0.0,\n\t\t# Position Filter Settings\n\t\t'max_altitude'\t: 50000,\n\t\t'max_radius_km'\t: 1000,\n\t\t# Habitat Settings\n\t\t'habitat_enabled': False,\n\t\t'habitat_upload_rate': 30,\n\t\t'habitat_uploader_callsign': 'SONDE_AUTO_RX',\n\t\t'habitat_uploader_antenna': '1/4-wave',\n\t\t'habitat_upload_listener_position': False,\n\t\t'habitat_payload_callsign': '',\n\t\t# APRS Settings\n\t\t'aprs_enabled'\t: False,\n\t\t'aprs_upload_rate': 30,\n\t\t'aprs_user'\t\t: 'N0CALL',\n\t\t'aprs_pass'\t\t: '00000',\n\t\t'aprs_server'\t: 'rotate.aprs2.net',\n\t\t'aprs_object_id': '',\n\t\t'aprs_custom_comment': 'Radiosonde Auto-RX ',\n\t\t# Web Settings,\n\t\t'web_port'\t\t: 5000,\n\t\t'web_archive_age': 120,\n\t\t# Advanced Parameters\n\t\t'search_step'\t: 800,\n\t\t'snr_threshold'\t\t: 10,\n\t\t'min_distance'\t: 1000,\n\t\t'dwell_time'\t: 10,\n\t\t'max_peaks'\t\t: 10,\n\t\t'quantization'\t: 10000,\n\t\t'synchronous_upload' : False,\n\t\t'scan_dwell_time' : 20,\n\t\t'detect_dwell_time' : 5,\n\t\t'scan_delay' : 10,\n\t\t'payload_id_valid' : 5, \n\t\t# Rotator Settings\n\t\t'enable_rotator': False,\n\t\t'rotator_hostname': '127.0.0.1',\n\t\t'rotator_port'\t: 4533,\n\t\t'rotator_homing_enabled': False,\n\t\t'rotator_home_azimuth': 0,\n\t\t'rotator_home_elevation': 0,\n\t\t# OziExplorer Settings\n\t\t'ozi_enabled'\t: False,\n\t\t'ozi_update_rate': 5,\n\t\t'ozi_port'\t\t: 55681,\n\t\t'payload_summary_enabled': False,\n\t\t'payload_summary_port' : 55672\n\t}\n\n\tsdr_settings = {}#'0':{'ppm':0, 'gain':-1, 'bias': False}}\n\n\ttry:\n\t\tconfig = RawConfigParser(auto_rx_config)\n\t\tconfig.read(filename)\n\n\t\t# Log Settings\n\t\tauto_rx_config['per_sonde_log'] = config.getboolean('logging', 'per_sonde_log')\n\n # Email Settings\n\t\tif config.has_option('email', 'email_enabled'):\n\t\t\ttry:\n\t\t\t\tauto_rx_config['email_enabled'] = config.getboolean('email', 'email_enabled')\n\t\t\t\tauto_rx_config['email_smtp_server'] = config.get('email', 'smtp_server')\n\t\t\t\tauto_rx_config['email_from'] = config.get('email', 'from')\n\t\t\t\tauto_rx_config['email_to'] = config.get('email', 'to')\n\t\t\texcept:\n\t\t\t\tlogging.error(\"Config - Invalid email settings. Disabling.\")\n\t\t\t\tauto_rx_config['email_enabled'] = False\n\n\t\t# SDR Settings\n\t\tauto_rx_config['sdr_fm'] = config.get('advanced', 'sdr_fm_path')\n\t\tauto_rx_config['sdr_power'] = config.get('advanced', 'sdr_power_path')\n\t\tauto_rx_config['sdr_quantity'] = config.getint('sdr', 'sdr_quantity')\n\n\t\t# Search Parameters\n\t\tauto_rx_config['min_freq'] = config.getfloat('search_params', 'min_freq')\n\t\tauto_rx_config['max_freq'] = config.getfloat('search_params', 'max_freq')\n\t\tauto_rx_config['rx_timeout'] = config.getint('search_params', 'rx_timeout')\n\t\tauto_rx_config['whitelist'] = json.loads(config.get('search_params', 'whitelist'))\n\t\tauto_rx_config['blacklist'] = json.loads(config.get('search_params', 'blacklist'))\n\t\tauto_rx_config['greylist'] = json.loads(config.get('search_params', 'greylist'))\n\n\t\t# Location Settings\n\t\tauto_rx_config['station_lat'] = config.getfloat('location', 'station_lat')\n\t\tauto_rx_config['station_lon'] = config.getfloat('location', 'station_lon')\n\t\tauto_rx_config['station_alt'] = config.getfloat('location', 'station_alt')\n\n\t\t# Position Filtering\n\t\tauto_rx_config['max_altitude'] = config.getint('filtering', 'max_altitude')\n\t\tauto_rx_config['max_radius_km'] = config.getint('filtering', 'max_radius_km')\n\n\t\t# Habitat Settings\n\t\tauto_rx_config['habitat_enabled'] = config.getboolean('habitat', 'habitat_enabled')\n\t\tauto_rx_config['habitat_upload_rate'] = config.getint('habitat', 'upload_rate')\n\t\tauto_rx_config['habitat_payload_callsign'] = config.get('habitat', 'payload_callsign')\n\t\tauto_rx_config['habitat_uploader_callsign'] = config.get('habitat', 'uploader_callsign')\n\t\tauto_rx_config['habitat_upload_listener_position'] = config.getboolean('habitat','upload_listener_position')\n\n\t\t# APRS Settings\n\t\tauto_rx_config['aprs_enabled'] = config.getboolean('aprs', 'aprs_enabled')\n\t\tauto_rx_config['aprs_upload_rate'] = config.getint('aprs', 'upload_rate')\n\t\tauto_rx_config['aprs_user'] = config.get('aprs', 'aprs_user')\n\t\tauto_rx_config['aprs_pass'] = config.get('aprs', 'aprs_pass')\n\t\tauto_rx_config['aprs_server'] = config.get('aprs', 'aprs_server')\n\t\tauto_rx_config['aprs_object_id'] = config.get('aprs', 'aprs_object_id')\n\t\tauto_rx_config['aprs_custom_comment'] = config.get('aprs', 'aprs_custom_comment')\n\n\t\t# OziPlotter Settings\n\t\tauto_rx_config['ozi_enabled'] = config.getboolean('oziplotter', 'ozi_enabled')\n\t\tauto_rx_config['ozi_update_rate'] = config.getint('oziplotter', 'ozi_update_rate')\n\t\tauto_rx_config['ozi_port'] = config.getint('oziplotter', 'ozi_port')\n\t\tauto_rx_config['payload_summary_enabled'] = config.getboolean('oziplotter', 'payload_summary_enabled')\n\t\tauto_rx_config['payload_summary_port'] = config.getint('oziplotter', 'payload_summary_port')\n\n\t\t# Advanced Settings\n\t\tauto_rx_config['search_step'] = config.getfloat('advanced', 'search_step')\n\t\tauto_rx_config['snr_threshold'] = config.getfloat('advanced', 'snr_threshold')\n\t\tauto_rx_config['min_distance'] = config.getfloat('advanced', 'min_distance')\n\t\tauto_rx_config['dwell_time'] = config.getint('advanced', 'dwell_time')\n\t\tauto_rx_config['quantization'] = config.getint('advanced', 'quantization')\n\t\tauto_rx_config['max_peaks'] = config.getint('advanced', 'max_peaks')\n\t\tauto_rx_config['scan_dwell_time'] = config.getint('advanced', 'scan_dwell_time')\n\t\tauto_rx_config['detect_dwell_time'] = config.getint('advanced', 'detect_dwell_time')\n\t\tauto_rx_config['scan_delay'] = config.getint('advanced', 'scan_delay')\n\t\tauto_rx_config['payload_id_valid'] = config.getint('advanced', 'payload_id_valid')\n\t\tauto_rx_config['synchronous_upload'] = config.getboolean('advanced', 'synchronous_upload')\n\n\t\t# Rotator Settings (TBC)\n\t\tauto_rx_config['rotator_enabled'] = config.getboolean('rotator','rotator_enabled')\n\t\tauto_rx_config['rotator_update_rate'] = config.getint('rotator', 'update_rate')\n\t\tauto_rx_config['rotator_hostname'] = config.get('rotator', 'rotator_hostname')\n\t\tauto_rx_config['rotator_port'] = config.getint('rotator', 'rotator_port')\n\t\tauto_rx_config['rotator_homing_enabled'] = config.getboolean('rotator', 'rotator_homing_enabled')\n\t\tauto_rx_config['rotator_home_azimuth'] = config.getfloat('rotator', 'rotator_home_azimuth')\n\t\tauto_rx_config['rotator_home_elevation'] = config.getfloat('rotator', 'rotator_home_elevation')\n\n\n\t\t# New setting in this version (20180616). Keep it in a try-catch to avoid bombing out if the new setting isn't present.\n\t\ttry:\n\t\t\tauto_rx_config['habitat_uploader_antenna'] = config.get('habitat', 'uploader_antenna').strip()\n\t\texcept:\n\t\t\tlogging.error(\"Config - Missing uploader_antenna setting. Using default.\")\n\t\t\tauto_rx_config['habitat_uploader_antenna'] = '1/4-wave'\n\n\t\t# New settings added in 20180624.\n\t\ttry:\n\t\t\tauto_rx_config['web_port'] = config.getint('web', 'web_port')\n\t\t\tauto_rx_config['web_archive_age'] = config.getint('web', 'archive_age')\n\t\texcept:\n\t\t\tlogging.error(\"Config - Missing Web Server settings. Using defaults.\")\n\t\t\tauto_rx_config['web_port'] = 5000\n\t\t\tauto_rx_config['web_archive_age'] = 120\n\n\n\n\t\t# Now we attempt to read in the individual SDR parameters.\n\t\tauto_rx_config['sdr_settings'] = {}\n\n\t\tfor _n in range(1,auto_rx_config['sdr_quantity']+1):\n\t\t\t_section = \"sdr_%d\" % _n\n\t\t\ttry:\n\t\t\t\t_device_idx = config.get(_section,'device_idx')\n\t\t\t\t_ppm = config.getint(_section, 'ppm')\n\t\t\t\t_gain = config.getfloat(_section, 'gain')\n\t\t\t\t_bias = config.getboolean(_section, 'bias')\n\n\t\t\t\tif (auto_rx_config['sdr_quantity'] > 1) and (_device_idx == '0'):\n\t\t\t\t\tlogging.critical(\"Config - SDR Device ID of 0 used with a multi-SDR configuration. Go read the warning in the config file!\")\n\t\t\t\t\treturn None\n\n\t\t\t\t# See if the SDR exists.\n\t\t\t\t_sdr_valid = rtlsdr_test(_device_idx)\n\t\t\t\tif _sdr_valid:\n\t\t\t\t\tauto_rx_config['sdr_settings'][_device_idx] = {'ppm':_ppm, 'gain':_gain, 'bias':_bias, 'in_use': False, 'task': None}\n\t\t\t\t\tlogging.info('Config - Tested SDR #%s OK' % _device_idx)\n\t\t\t\telse:\n\t\t\t\t\tlogging.warning(\"Config - SDR #%s invalid.\" % _device_idx)\n\t\t\texcept Exception as e:\n\t\t\t\tlogging.error(\"Config - Error parsing SDR %d config - %s\" % (_n,str(e)))\n\t\t\t\tcontinue\n\n\t\t# Sanity checks when using more than one SDR\n\t\tif (len(auto_rx_config['sdr_settings'].keys()) > 1) and (auto_rx_config['habitat_payload_callsign'] != \"\"):\n\t\t\tlogging.critical(\"Fixed Habitat Payload callsign used in a multi-SDR configuration. Go read the warnings in the config file!\")\n\t\t\treturn None\n\n\t\tif (len(auto_rx_config['sdr_settings'].keys()) > 1) and (auto_rx_config['aprs_object_id'] != \"\"):\n\t\t\tlogging.critical(\"Fixed APRS object ID used in a multi-SDR configuration. Go read the warnings in the config file!\")\n\t\t\treturn None\n\n\t\tif (len(auto_rx_config['sdr_settings'].keys()) > 1) and (auto_rx_config['rotator_enabled']):\n\t\t\tlogging.critical(\"Rotator enabled in a multi-SDR configuration. Go read the warnings in the config file!\")\n\t\t\treturn None\n\n\t\t# TODO: Revisit this limitation once the OziPlotter output sub-module is complete.\n\t\tif (len(auto_rx_config['sdr_settings'].keys()) > 1) and (auto_rx_config['ozi_enabled'] or auto_rx_config['payload_summary_enabled']):\n\t\t\tlogging.critical(\"Chase car outputs (OziPlotter/Payload Summary) enabled in a multi-SDR configuration.\")\n\t\t\treturn None\n\n\n\t\tif len(auto_rx_config['sdr_settings'].keys()) == 0:\n\t\t\t# We have no SDRs to use!!\n\t\t\tlogging.error(\"Config - No working SDRs! Cannot run...\")\n\t\t\treturn None\n\t\telse:\n\t\t\t# Create a global copy of the configuration file at this point\n\t\t\tglobal_config = copy.deepcopy(auto_rx_config)\n\t\t\treturn auto_rx_config\n\n\n\texcept:\n\t\ttraceback.print_exc()\n\t\tlogging.error(\"Could not parse config file.\")\n\t\treturn None\n\n\nif __name__ == '__main__':\n\t''' Quick test script to attempt to read in a config file. '''\n\timport sys, pprint\n\tlogging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.DEBUG)\n\n\tconfig = read_auto_rx_config(sys.argv[1])\n\n\tpprint.pprint(global_config)","sub_path":"auto_rx/autorx/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":11484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"296153206","text":"import re\nfrom main_modules.settings import PRIORITY, TYPE\n__classificationtype__=TYPE.BackEND_SPECIFIED\n__priority__ = PRIORITY.LOW\n\ndef tamper(payload, **kwargs):\n \"\"\"\n onvert to char()\n\n =char(100,118,119,97)\nchar(0x##)+char(0x##)+... if we can use only one character\n\n >>> tamper(\"selet\")\n 'SeLeT'\n\n CHAR(83, 101, 76, 101, 84) mysql \n CHAR(83) + CHAR(101) + CHAR(76) + CHAR(101) + CHAR(84) mssql\n CHR(115) || CHR(101) || CHR(108) || CHR(101) || CHR(99) || CHR(116) oracle\n \"\"\"\n \n string=re.sub(r\"\\w*\",convert_this,str(payload))\n\n return (string) if payload else payload\n\ndef convert_this(string):\n new_word=[]\n string=string.group()\n new_word=\" || \".join(\"Char(\"+str(\"{:07x}\".format(ord(c))+')') for c in string)\n return new_word","sub_path":"Tampers/sqli/CharacterEncoding_charOracle.py","file_name":"CharacterEncoding_charOracle.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"589026547","text":"import timeit\nfrom random import shuffle, randint\nimport random\n\nimport matplotlib.pyplot as plt\nimport sys\n\n\ndef geraLista(tam):\n lista = list(range(1, tam + 1))\n shuffle(lista)\n return lista\n\n\ndef geraListaInvertida(tam):\n lista = list(range(1, tam + 1))\n return lista[::-1]\n\n\ndef desenhaGrafico(x, y, xl=\"Entradas\", yl=\"Saídas\", z='Tempo'):\n fig = plt.figure(figsize=(10, 8))\n ax = fig.add_subplot(111)\n ax.plot(x, y, label=\"Lista aleatória - {} \".format(z))\n ax.legend(bbox_to_anchor=(1, 1), bbox_transform=plt.gcf().transFigure)\n plt.ylabel(yl)\n plt.xlabel(xl)\n plt.show()\n plt.savefig(z + \".png\")\n\n\ndef shellSort(lista):\n\n\n intervalo = len(lista) // 2\n\n while intervalo > 0:\n for index in range(intervalo, len(lista)):\n pivo = lista[index]\n aux_index = index\n while aux_index >= intervalo and lista[aux_index - intervalo] > pivo:\n lista[aux_index] = lista[aux_index - intervalo]\n aux_index = aux_index - intervalo\n lista[aux_index] = pivo\n\n intervalo //= 2\nif __name__ == '__main__':\n z = [100000, 200000, 300000, 400000, 500000, 1000000, 2000000]\n x = []\n for i in z:\n x.append(geraLista(int(i)))\n y = []\n\n\n\n for i in range(len(x)):\n\n print(len(x[i]))\n y.append(\n timeit.timeit(\"shellSort({})\".format(x[i]), setup=\"from __main__ import shellSort\",\n number=4))\n desenhaGrafico(z, y)\n","sub_path":"shellsort/shellsort.py","file_name":"shellsort.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"51045983","text":"import pandas as pd\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport re\nimport os\n\nurlList = [\n '1', '500', '1000', '1500', '2000', '2500', '3000', '3500', '4000', '4500',\n '5000'\n]\nseries_bool = [True, True, True, True]\nattri = 'onclick'\npattern = r'([0-9]+)'\n\nprint(os.getcwd())\n\nUserIdPath = 'data/UserID.txt'\nTablePath = 'data/table.csv'\nos.remove(UserIdPath)\nos.remove(TablePath)\n\n# 全ページからUserIDのみを取得\nfor urlPage in urlList:\n url = 'url'\n html = urlopen(url).read()\n soup = BeautifulSoup(html, 'html.parser')\n\n UserID = ''\n links = soup.find_all('tr')\n for link in links:\n if attri in link.attrs and link.attrs[attri].find('UserID=') != -1:\n UserID += link.attrs[attri]\n\n # UserIDのみを改行区切り\n match = '\\n'.join(re.findall(pattern, UserID)) + '\\n'\n # テキストにUserIDを書き込み\n with open(UserIdPath, mode='a', encoding='utf-8') as fw:\n fw.write(match)\n\n # table取得\n table = pd.read_html(url, flavor='bs4')[9].ix[:, series_bool].dropna()\n # csvに書き込み\n with open(TablePath, 'a', encoding='utf-8') as f:\n table.to_csv(f)","sub_path":"Python/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"337298292","text":"import torch\nimport torch.nn as nn\n\n__all__ = ['mymodel2']\n\n\ndef change_dir(grad):\n return -1 * grad\n\ndef custom(grad, model_output,labels_):\n i = 0\n for col in labels_:\n label = col.data \n max_ouput = torch.max(model_output[i,label,:,:])\n norm_output = model_output[i,label,:,:]/max_ouput\n mask = norm_output > 0.85\n chd = change_dir(grad[i,label,0,0])\n grad.select(0, i).select(0,label).copy_(grad[i,label,:,:].masked_fill_(mask,chd))\n i += 1 \n return grad\n\n\nclass MyModel2(nn.Module):\n def __init__(self):\n super(MyModel2, self).__init__()\n #self.attention = None\n\n def forward(self, input_):\n if not self.training:\n return input_\n else:\n attention = torch.mean(input_, dim=1, keepdim=True)\n importance_map = torch.sigmoid(attention)\n return input_.mul(importance_map)\n\n\n","sub_path":"wsol/method/mymodel2.py","file_name":"mymodel2.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"412034109","text":"#faça um programa que receba 10 números inteiros e armazene numa lista.\n#Calcule e mostre:\n#A quantidade de números pares;\n#A soma dos números ímpares;\n#A quantidade de números entre 10 e 20(inclusive);\n#A média dos números da lista.\n\nfrom random import randint\nn=[]\nfor i in range(10):\n n.append(randint(10,20))\n print(n)\npar=impar=qtd=soma=media=0\nfor i in range(10):\n if n[i]%2==0:\n par+=1\n\n else:\n impar+=n[i]\n\n if n[i]>=10 and n[i]<=20:\n qtd+=1\n\n soma+=n[i]\n\nmedia=soma/len(n) \nprint('Quantidade de números pares: ',par)\nprint('A soma dos números ímpares: ',impar)\nprint('A quantidade de números entre 10 e 20: ',qtd)\nprint('A média dos números da lista: ',media)\n","sub_path":"Pacote para dowloand/Python/ex018(lista e for) Armazenamento em lista.py","file_name":"ex018(lista e for) Armazenamento em lista.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"614507735","text":"\"\"\"\n=================================================\nAuthor : Bulici\nTime : 2020/3/5 18:46 \nEmail : 294666094@qq.com\nMotto : Clumsy birds have to start flying early.\n=================================================\n\"\"\"\nimport unittest\nimport os\nimport random\nimport jsonpath\nfrom library.ddt import ddt,data\nfrom common.handleexcel import Excel\nfrom common.handlepath import DATADIR\nfrom common.handleconfig import conf\nfrom common.handlereplace import ReplaceData\nfrom common.handlerequests import Requests\nfrom common.handlemylog import log\nfrom common.handle_sign import HandleSign\nfrom common.handelmysql import DB\n\n@ddt\nclass TestMainstreaming(unittest.TestCase):\n excel = Excel(os.path.join(DATADIR,\"apicases.xlsx\"),\"mainstreaming\")\n cases = excel.read_data()\n request = Requests()\n db = DB()\n\n @data(*cases)\n def testmainstreaming(self,case):\n \"\"\"\n 接口测试项目主流程测试用例\n :param case: Excel文档中的用例\n :return:\n \"\"\"\n #第一步:准备测试数据\n url = conf.get(\"env\", \"url\") + ReplaceData.replace_data(case[\"url\"])\n method = case[\"method\"]\n expected = eval(case[\"expected\"])\n headers = eval(conf.get(\"env\",\"headers\"))\n title = case[\"title\"]\n row = case[\"case_id\"] + 1\n #判断是注册接口,则生成一个随机手机号,并保存为类属性\n if case[\"interface\"] == \"register\":\n ReplaceData.mobile_phone = self.random_phone()\n case[\"data\"] = ReplaceData.replace_data(case[\"data\"])\n data = eval(case[\"data\"])\n\n #判断不是注册、登录、项目列表接口就在请求头中添加token鉴权信息\n if case[\"interface\"] != \"register\" and case[\"interface\"] != \"login\" :\n headers[\"Authorization\"] = ReplaceData.Authorization\n # 添加时间戳和签名到json请求体\n sign = HandleSign.generate_sign(ReplaceData.token)\n data.update(sign)\n\n #第二步:发送请求,获取结果\n response = self.request.send(url=url,method=method,headers=headers,params=data,json=data)\n res = response.json()\n\n #判断是否是登录接口,提取用户的id,提取鉴权token值,保存为类属性\n if case[\"interface\"] == \"login\":\n ReplaceData.member_id = str(jsonpath.jsonpath(res,\"$..id\")[0])\n ReplaceData.token = jsonpath.jsonpath(res,\"$..token\")[0]\n token_type = jsonpath.jsonpath(res,\"$..token_type\")[0]\n ReplaceData.Authorization = token_type + \" \" + ReplaceData.token\n\n # 判断是否是添加项目接口,提取项目的id,保存为类属性\n if case[\"interface\"] == \"add\" and case[\"title\"] == \"管理员添加项目一\":\n ReplaceData.pass_loan_id = str(jsonpath.jsonpath(res,\"$..id\")[0])\n elif case[\"interface\"] == \"add\" and case[\"title\"] == \"管理员添加项目二\":\n ReplaceData.file_loan_id = str(jsonpath.jsonpath(res,\"$..id\")[0])\n\n #第三步:断言,比对预期结果与实际结果\n try:\n self.assertEqual(expected[\"code\"],res[\"code\"])\n self.assertEqual(expected[\"msg\"],res[\"msg\"])\n except AssertionError as e :\n print(\"预期结果:{}\".format(expected))\n print(\"实际结果:{}\".format(res))\n self.excel.write_data(row=row,column=8,value=\"未通过\")\n log.error(\"用例未通过:{},错误原因:{}\".format(title,e))\n raise e\n else:\n self.excel.write_data(row=row, column=8, value=\"通过\")\n log.debug(\"用例通过:{}\".format(title))\n\n\n\n def random_phone(self):\n \"\"\"\n 随机生成手机号的方法\n :return:\n \"\"\"\n while True:\n phone = \"155\"\n for i in range(0, 8):\n n = random.randint(0, 9)\n phone += str(n)\n sql = \"SELECT * FROM futureloan.member WHERE mobile_phone={}\".format(phone)\n res_phone = self.db.find_count(sql)\n if res_phone == 0:\n break\n\n return phone\n","sub_path":"testcase/test11mainstreaming.py","file_name":"test11mainstreaming.py","file_ext":"py","file_size_in_byte":4139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"496600162","text":"from django.conf import settings\nfrom django.core.management.base import BaseCommand\n\nimport pandas as pd\n\nfrom categories.models import Pool\n\nimport os\nimport logging\nimport re\n\n\ndef vendor_logger():\n return logging.getLogger('vendor')\n\n\ndef vehicle_info(vehicle):\n field_map = {\n 'oasis': {\n 'field_types': ('core', 'zones')\n },\n 'oasis_sb': {\n 'field_types': ('core', 'setasides', 'zones') \n },\n 'hcats': {\n 'field_types': ('core', 'zones') \n },\n 'hcats_sb': {\n 'field_types': ('core', 'setasides', 'zones') \n },\n 'bmo': {\n 'field_types': ('core', 'zones') \n },\n 'bmo_sb': {\n 'field_types': ('core', 'setasides', 'zones') \n }\n }\n return field_map[vehicle]\n\n\ndef vendor_field_type_core():\n return [\n 'ContractorName',\n 'ContractNumber',\n 'ContractEnd',\n 'DUNS',\n 'POC1',\n 'Phone1',\n 'Email1',\n 'POC2',\n 'Phone2',\n 'Email2'\n ]\n \ndef vendor_field_type_setasides():\n return [\n 'SB',\n '8(a)',\n '8(a)Date',\n 'HubZ',\n 'SDB',\n 'WO',\n 'VO',\n 'SDVOSB',\n 'VIP'\n ]\n \ndef vendor_field_type_zones():\n return [\n 'Zone1',\n 'Zone2',\n 'Zone3',\n 'Zone4',\n 'Zone5',\n 'Zone6'\n ]\n\nclass Command(BaseCommand):\n \n def check_pool(self, vehicle, pool, df):\n variables = globals()\n info = vehicle_info(vehicle)\n logger = vendor_logger()\n columns = list(df.columns)\n vendor_count = 0\n \n print(\" > Data:\")\n for field_group in info['field_types']:\n field_processor = \"vendor_field_type_{}\".format(field_group)\n missing = 0\n \n print(\" - {}:\".format(field_group))\n for column in variables[field_processor]():\n if column not in columns:\n print(\" - Missing: {}\".format(column))\n missing += 1\n \n if missing == 0:\n print(\" - No missing fields\")\n \n for index, record in df.iterrows():\n vendor_count += 1\n \n print(\" > Vendors: {}\".format(vendor_count))\n\n\n def check_vehicle(self, vehicle):\n vehicle_file = os.path.join(settings.BASE_DIR, 'data/pools/{}.xlsx'.format(vehicle))\n wb = pd.ExcelFile(vehicle_file)\n sheets = wb.sheet_names\n \n print(\"\\nVehicle [ {} ]\".format(vehicle))\n \n for name in sheets:\n try:\n pool = re.search(r'\\(\\s*([0-9a-zA-Z]+)\\s*\\)', name, re.IGNORECASE).group(1)\n pool_data = Pool.objects.get(number=pool, vehicle__id__iexact=vehicle)\n \n print(\"\\n > Pool [ {} ]\".format(pool))\n self.check_pool(vehicle, pool, wb.parse(name))\n\n except AttributeError as e:\n pass # Not a pool sheet, skip...\n \n except Pool.DoesNotExist as e:\n logger.debug(\" > Pool {} not found\".format(pool))\n raise(e)\n\n except Pool.MultipleObjectsReturned as e:\n logger.debug(\" > More than one pool matched {}. Integrity error!\".format(pool))\n raise(e)\n \n\n def handle(self, *args, **options):\n for vehicle in settings.VEHICLES:\n self.check_vehicle(vehicle)\n","sub_path":"app/vendors/management/commands/check_vendors.py","file_name":"check_vendors.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"370917694","text":"\"\"\"\r\n\tCopyright (c) 2016 Arttu Ylä-Sahra\r\n\r\n\tPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\r\n\r\n\tThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\r\n\r\n\tTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r\n\"\"\"\r\n\r\n\"\"\"\r\n\trandom-background.py - A simple generator script using the Pintograph library for generating images. Each time it is a run, an image with a random filename is generated\r\n\"\"\"\r\n\r\nfrom pintograph_engine import Pintograph\r\nfrom PIL import Image, ImageDraw\r\nimport random\r\nimport math\r\n\r\n#How large the picture shall be?\r\nBACKGROUND_SIZE = 4500\r\n\r\n#Base steps: this amount will be ran for autodetection of position, and 10x that for actual rendering\r\nBASE_STEPS = 1000\r\n\r\ndef realign_cords(xy):\r\n\t\"\"\"\r\n\t\tAs the Pintograph library generates coordinates around an arbitrary center point, we need to convert the coordinates to a type suitable for images - aka starting from zero\r\n\t\"\"\"\r\n\tglobal BACKGROUND_SIZE\r\n\r\n\treturn [int(xy[0] + (BACKGROUND_SIZE / 2)), int((BACKGROUND_SIZE-1) - (xy[1] + BACKGROUND_SIZE / 2))]\r\n\r\n#Initialize the image library\r\nbase_img = Image.new(\"RGBA\", (BACKGROUND_SIZE, BACKGROUND_SIZE), (255,255,255,255))\r\nbase_draw = ImageDraw.Draw(base_img)\r\n\r\n#Initialize the variables required for the Pintograph library\r\nrandom.seed()\r\n#How long the rods will be?\r\nrod_length = 50 + random.randint(-20,20)\r\n\r\n#One full phase is 2pi, and we want it to complete a full circle in 10 steps, so.. \r\nphases_per_step = (2*math.pi) / 10\r\n\r\n#Starting angle at approximately 173 to 287 degrees for both\r\ninitial_phase = (4.01 - 1) + (random.random() * 2)\r\n\r\n#The radii for the circles\r\nradii = 10\r\n\r\npinto = Pintograph(r1=radii,\r\n\t\t\t\t r1_phase_per_step = phases_per_step,\r\n\t\t\t\t r2=radii,\r\n\t\t\t\t r2_phase_per_step = phases_per_step - 0.02 + (random.random() * 0.04),\r\n\t\t\t\t circle_distance = ((rod_length * 2) * (2.0/7.0)),\r\n\t\t\t\t left_rod_lngth = rod_length,\r\n\t\t\t\t right_rod_lngth = rod_length + int((random.random() * 5)),\r\n\r\n\t\t\t\t lft_radii_minimum_factor = 0.01,\r\n\t\t\t\t lft_radii_degrade_percentage_per_step = 0.0010752 + (random.random() / 10000),\r\n\r\n\t\t\t\t rght_radii_minimum_factor = 0.01,\r\n\t\t\t\t rght_radii_degrade_percentage_per_step = 0.0010751 + (random.random() / 10000),\r\n\t\t\t\t )\r\n\r\n\r\npinto.set_initial_phases(initial_phase - 0.03 + (random.random() * 0.06), initial_phase)\r\npinto.set_x_swing((rod_length*2)/7, 0, phases_per_step, BASE_STEPS * 0.85)\r\n\r\n#Run a rough scan and scale\r\npinto.automatic_scale_and_center(BASE_STEPS, 0.75, BACKGROUND_SIZE, 2)\t\r\n\r\nbase_pos = realign_cords(pinto.calculate_adjusted_step(0))\r\n\r\nfor i in range(BASE_STEPS * 10):\r\n\tnew_pos = realign_cords(pinto.calculate_adjusted_step(i / 10.0))\r\n\t#Simulate opacity; as actually implementing it would be slightly complicated, let's assume the line is darker when farther in the steps\r\n\tc = int(200 - (i / (BASE_STEPS*10.0))*180)\r\n\tbase_draw.line([base_pos[0], base_pos[1], new_pos[0], new_pos[1]], (c,c,c,c), 1)\r\n\tbase_pos = new_pos\r\n\r\nbase_img.save(\"bg-\"+str(random.randint(0,10000000000))+\".png\")\r\n","sub_path":"random-background.py","file_name":"random-background.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"377764332","text":"\n# In[1]:\n\n\n##Implement Some Importatnt functions to be used in the project flow\n\n\n# In[2]:\n\n\n# Important imports\nimport numpy as np\nimport cv2\nimport pickle\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom skimage.feature import hog\n\nimport glob\nimport time\nfrom sklearn.svm import LinearSVC\nfrom sklearn.svm import SVC\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom scipy.ndimage.measurements import label\n\nfrom moviepy.editor import VideoFileClip\nfrom IPython.display import HTML\nimport imageio\n#imageio.plugins.ffmpeg.download()\n\n\n# In[3]:\n\n\n# Important notes when reading images in this project\n\n'''\npng, mpimg -> 0 - 1 \npng, cv2 -> 0 - 255\njpg, mpimg -> 0 - 255\njpg, cv2 -> 0 - 255\n'''\n\n\n# ## 1- Draw Boxes Function\n\n# In[4]:\n\n\n# Draw boxes using cv2 library given 2 opposite points\ndef draw_boxes(img, bboxes, color, thick):\n # make a copy from the input image\n draw_img = np.copy(img)\n # draw the bounding box which has the input opposite points in shape of ((x1,y1),(x2,y2))\n for bbox in bboxes:\n # draw the rectangle using cv2.rectangle with the input color of shape (R,G,B)\n cv2.rectangle(draw_img, bbox[0], bbox[1], color, thick)\n \n return draw_img\n\n\n# In[5]:\n\n\n# Test draw_boxes Function\ntest_image = mpimg.imread(\"test_images/test1.jpg\")\n\ntest_bboxes = [((800,500),(950,400))]\ntest_result = draw_boxes(test_image,test_bboxes, color=(255,0,0), thick=8)\nplt.imshow(test_result)\nplt.show()\n\n\n# \n# # 2- Features Extraction \n\n# ## 2a) Color Hitograms Features\n\n# In[6]:\n\n\n# Extract features from the color histogram\ndef color_hist_features(img, nbins, bins_range):\n # Calclate the histograms for each channel seperately\n chan1_hist = np.histogram(img[:,:,0], bins=nbins, range=bins_range) \n chan2_hist = np.histogram(img[:,:,1], bins=nbins, range=bins_range)\n chan3_hist = np.histogram(img[:,:,2], bins=nbins, range=bins_range)\n # Generate bins centers\n bins_edges = chan1_hist[1]\n bins_centers = (bins_edges[1:] + bins_edges[0:len(bins_edges)-1])/2\n # Concatenate all features together\n color_hist_features = np.concatenate((chan1_hist[0], chan2_hist[1], chan3_hist[0]))\n \n # return the histogram features which is the most important one from this function\n # However, the otehr histograms and bins centers will be needed to be visualized in testing this function\n return color_hist_features, chan1_hist, chan2_hist, chan3_hist, bins_centers\n\n\n# In[7]:\n\n\n# Test Color histogram features extraction\ntest_image = mpimg.imread(\"test_images/test1.jpg\")\n\ntest_features, test_ch1, test_ch2, test_ch3, test_centers = color_hist_features(test_image, \n nbins=32, \n bins_range=(0,256))\n\nfig = plt.figure(figsize=(12,3))\nplt.subplot(131)\nplt.bar(test_centers, test_ch1[0])\nplt.xlim(0, 256)\nplt.title('ch1 Histogram')\nplt.subplot(132)\nplt.bar(test_centers, test_ch2[0])\nplt.xlim(0, 256)\nplt.title('ch2 Histogram')\nplt.subplot(133)\nplt.bar(test_centers, test_ch3[0])\nplt.xlim(0, 256)\nplt.title('ch3 Histogram')\nfig.tight_layout()\nplt.show()\n\n\n# ## 2b) Color Spatial Bining Features\n\n# In[8]:\n\n\n# Extract features from the Color spatial Bining\ndef bin_spatial(img, color_space, size):\n # convert the image into the color space sent into the function\n if color_space != \"RGB\":\n if color_space == \"HSV\":\n feature_img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n elif color_space == \"HLS\":\n feature_img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n elif color_space == \"LUV\":\n feature_img = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)\n elif color_space == \"YUV\":\n feature_img = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)\n elif color_space == \"YCrCb\":\n feature_img = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)\n elif color_space == \"GRAY\":\n feature_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n else:\n feature_img = np.copy(img)\n \n # flatten the features extarcted from the image after resizing\n bin_spatial_features = cv2.resize(feature_img, size).ravel()\n \n # return these features\n return bin_spatial_features, feature_img\n\n\n# In[9]:\n\n\n# Test Color spatial Bining \ntest_image = mpimg.imread(\"test_images/test1.jpg\")\nprint(test_image.shape)\n\ntest_features, test_img_converted = bin_spatial(test_image, color_space=\"YCrCb\", size=(8,8))\nplt.plot(test_features)\nplt.show()\n\n\n# ## 2c) Oriented Gradient Histogram features (Hog)\n\n# In[10]:\n\n\n# Extract features of the Histogram Oriented Gradient\ndef get_hog_features(img, orient, pix_per_cell, cell_per_block, transform_sqrt_flag, vis_flag, feature_vector_flag):\n # Note that the img here should be 2D (grayscale)\n # check the visualization flag if it is true or not to plot the output of hog functionality \n if vis_flag == True:\n # apply hog with visualizing the output of hog functionality\n hog_features, hog_image = hog(img, orientations=orient, \n pixels_per_cell=(pix_per_cell,pix_per_cell),\n cells_per_block=(cell_per_block,cell_per_block),\n transform_sqrt=transform_sqrt_flag, \n visualise=vis_flag, feature_vector=feature_vector_flag)\n \n return hog_features, hog_image\n \n if vis_flag == False:\n # apply hog without visualizing the output of hog functionality\n hog_features = hog(img, orientations=orient, \n pixels_per_cell=(pix_per_cell,pix_per_cell),\n cells_per_block=(cell_per_block,cell_per_block),\n transform_sqrt=transform_sqrt_flag, \n visualise=vis_flag, feature_vector=feature_vector_flag)\n \n return hog_features\n \n\n\n# In[11]:\n\n\n# Test extraction of hog features and visulaize\ntest_image = mpimg.imread(\"test_images/test1.jpg\")\n\n# Note that the image should be 2D (grayscale) \ntest_gray = cv2.cvtColor(test_image, cv2.COLOR_RGB2GRAY)\ntest_features, test_result_img = get_hog_features(test_gray, orient=12, pix_per_cell=4, \n cell_per_block=2, transform_sqrt_flag=True, \n vis_flag=True, feature_vector_flag=True)\n\nfig = plt.figure(figsize=(12,3))\nplt.subplot(121)\nplt.imshow(test_image, cmap='gray')\nplt.title('Example')\nplt.subplot(122)\nplt.imshow(test_result_img, cmap='gray')\nplt.title('HOG Visualization')\nplt.show()\n\n\n# # 3- Combine Features (Color_hist, bin_spatial) with (hog)\n\n# In[55]:\n\n\n# Extract all of the previous features from list of images \ndef extract_features(imgs, cspace, spatial_size, hist_nbins, hist_range, \n orient, pix_per_cell, cell_per_block, transform_sqrt_flag, vis_flag, feature_vector_flag,\n hog_channel, extract_spatial_flag, extract_color_hist_flag, extract_hog_flag, cv2read=False):\n # Create empty list for appending the extracted features\n features = []\n # make an iteration to apply the extraction over img by img\n\n for img in imgs:\n # create local features for every image to preserve them after finishing all images\n image_features = []\n # read the img\n if cv2read == True:\n image_read = cv2.imread(img)\n else:\n image_read = img\n \n #converted_image = image_read\n \n # Apply bin spatial features extraction\n bin_features, converted_image = bin_spatial(image_read, color_space=cspace, size=spatial_size)\n \n # Apply color hist features extraction\n col_features,_,_,_,_ = color_hist_features(converted_image, nbins=hist_nbins, bins_range=hist_range)\n \n # Apply hog features extraction\n if hog_channel == \"ALL\":\n hog_features = []\n # Apply hog features extraction over each channel in the image\n for channel in range(converted_image.shape[2]):\n hog_features.append(get_hog_features(img=converted_image[:,:,channel], orient=orient, \n pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, \n transform_sqrt_flag=transform_sqrt_flag, \n vis_flag=vis_flag, feature_vector_flag=feature_vector_flag))\n hog_features = np.ravel(hog_features)\n else:\n # Apply hog features extraction over the given channel in the image\n hog_features = get_hog_features(img=converted_image[:,:,hog_channel], orient=orient, \n pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, \n transform_sqrt_flag=transform_sqrt_flag, \n vis_flag=vis_flag, feature_vector_flag=feature_vector_flag)\n \n # Append all of the features in one list\n if extract_spatial_flag == True:\n image_features.append(bin_features)\n if extract_color_hist_flag == True:\n image_features.append(col_features)\n if extract_hog_flag == True:\n image_features.append(hog_features)\n \n #print(image_features)\n \n # Appned all of the features in (features) list after concatenate all of the previous features\n features.append(np.concatenate(image_features))\n \n # return all of these features in a feature vector\n return features\n\n\n# In[56]:\n\n\ndef extract_features_One_image(img, cspace, spatial_size, hist_nbins, hist_range, \n orient, pix_per_cell, cell_per_block, transform_sqrt_flag, vis_flag, feature_vector_flag,\n hog_channel, extract_spatial_flag, extract_color_hist_flag, extract_hog_flag, cv2read=False):\n \n # Create empty list for appending the extracted features\n features = []\n # create local features for every image to preserve them after finishing all images\n image_features = []\n # read the img\n if cv2read == True:\n image_read = cv2.imread(img)\n else:\n image_read = img\n \n #converted_image = image_read\n\n # Apply bin spatial features extraction\n bin_features, converted_image = bin_spatial(image_read, color_space=cspace, size=spatial_size)\n\n # Apply color hist features extraction\n col_features,_,_,_,_ = color_hist_features(converted_image, nbins=hist_nbins, bins_range=hist_range)\n\n # Apply hog features extraction\n if hog_channel == \"ALL\":\n hog_features = []\n # Apply hog features extraction over each channel in the image\n for channel in range(converted_image.shape[2]):\n hog_features.append(get_hog_features(img=converted_image[:,:,channel], orient=orient, \n pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, \n transform_sqrt_flag=transform_sqrt_flag, \n vis_flag=vis_flag, feature_vector_flag=feature_vector_flag))\n hog_features = np.ravel(hog_features)\n else:\n # Apply hog features extraction over the given channel in the image\n hog_features = get_hog_features(img=converted_image[:,:,hog_channel], orient=orient, \n pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, \n transform_sqrt_flag=transform_sqrt_flag, \n vis_flag=vis_flag, feature_vector_flag=feature_vector_flag)\n\n # Append all of the features in one list\n if extract_spatial_flag == True:\n image_features.append(bin_features)\n if extract_color_hist_flag == True:\n image_features.append(col_features)\n if extract_hog_flag == True:\n image_features.append(hog_features)\n\n # Appned all of the features in (features) list after concatenate all of the previous features\n features.append(np.concatenate(image_features))\n\n # return all of these features in a feature vector\n return features\n\n\n# # 4- HeatMap, apply threhold, draw labeled bboxes Functions\n\n# ## 4a) HeatMap Function\n\n# In[58]:\n\n\n# add heatmap using the bounding boxes list given as an input to the function\ndef add_heat(heatmap, bbox_list):\n # note that heamap input here is zeros of the shape of the image or one channel only in the image\n # iterate through the bboxlist\n for bbox in bbox_list:\n heatmap[bbox[0][1]:bbox[1][1], bbox[0][0]:bbox[1][0]] += 1\n \n # return the heatmap\n return heatmap\n\n\n# ## 4b) Apply threshold on the heatmap created\n\n# In[59]:\n\n\n# apply threshold value over the heatmap created\ndef apply_threshold(heatmap, threshold):\n # values below the given threshold will be equal to 0\n heatmap[heatmap <= threshold] = 0\n \n return heatmap\n\n\n# ## 4c) draw labeled bboxes \n\n# In[60]:\n\n\n# draw the bounding box rectangle on the image given the labels \ndef draw_labels_bboxes(img, labels):\n # note that labels will be come from scipy.ndimage.measurements\n #iterate through the whole detected cars\n for car_number in range(1, labels[1]+1):\n # Find pixels with each car_number label value\n nonzero = (labels[0] == car_number).nonzero()\n # Identify x and y values of those pixels\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n # Define a bounding box based on min/max x and y\n bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))\n # Draw the box on the image\n cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6)\n # Return the image\n return img\n\n\n# # 5- Build Classifier (Normalize, Train, Test, Accuracy calculation)\n\n# ## 5a) Extract features of both Cars, NotCars data\n\n# In[17]:\n\n\n# Read all images paths for cars and notcars\nCars_images = glob.glob(\"Training_Data/vehicles/*/*.png\")\nnoCars_images = glob.glob(\"Training_Data/non-vehicles/*/*.png\") \n\n# save images in these lists\ncars = []\nnotcars = []\n\n\nfor car_image in Cars_images:\n cars.append(car_image)\n \nfor notcar_image in noCars_images:\n notcars.append(notcar_image)\n\n# sample_size = 1000\n# cars = cars[0:sample_size]\n# notcars = notcars[0:sample_size]\n\n# parameters need tweak \ncolor_space = 'YCrCb' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb\norient = 8 # HOG orientations\npix_per_cell = 8 # HOG pixels per cell\ncell_per_block = 2 # HOG cells per block\nhog_channel = \"ALL\" # Can be 0, 1, 2, or \"ALL\"\nspatial_size = (16, 16) # Spatial binning dimensions\nspatial_range = (0, 200) # Spatial range\nhist_bins = 32 # Number of histogram bins\nspatial_transform = True # Spatial Transform sqrt\nspatial_feat = True # Spatial features on or off\nvisualize = False # Visualization flag\nfeature_vector = True # Feature Vector flag\nhist_feat = True # Histogram features on or off\nhog_feat = True # HOG features on or off\ny_start_stop = [400, 680] # Min and max in y to search in slide_window()\n\ncar_features = extract_features(imgs=cars, cspace=color_space, spatial_size=spatial_size, hist_nbins=hist_bins, \n hist_range=spatial_range, orient=orient, pix_per_cell=pix_per_cell, \n cell_per_block=cell_per_block, transform_sqrt_flag=spatial_transform, \n vis_flag=visualize, feature_vector_flag=feature_vector, hog_channel=hog_channel, \n extract_spatial_flag=spatial_feat, extract_color_hist_flag=hist_feat, \n extract_hog_flag=hog_feat, cv2read=True)\n\nNotcar_features = extract_features(imgs=notcars, cspace=color_space, spatial_size=spatial_size, hist_nbins=hist_bins, \n hist_range=spatial_range, orient=orient, pix_per_cell=pix_per_cell, \n cell_per_block=cell_per_block, transform_sqrt_flag=spatial_transform, \n vis_flag=visualize, feature_vector_flag=feature_vector, hog_channel=hog_channel, \n extract_spatial_flag=spatial_feat, extract_color_hist_flag=hist_feat, \n extract_hog_flag=hog_feat, cv2read=True)\n\n\n# ## 5b) Normalize, Labels, SVC-Classifier, Train, Accuracy Calculation\n\n# In[18]:\n\n\n# Combine features of cars and notcars together\nX = np.vstack((car_features, Notcar_features)).astype(np.float64)\n\n# Fit a per-column scaler\nX_scaler = StandardScaler().fit(X)\n\n# Apply the scaler to X\nscaled_X = X_scaler.transform(X)\n\n# Define the labels vector\ny = np.hstack((np.ones(len(car_features)), np.zeros(len(Notcar_features))))\n\n# Split up data into randomized training and test sets\nrand_state = np.random.randint(0, 100)\nX_train, X_test, y_train, y_test = train_test_split(scaled_X, y, test_size=0.2, random_state=rand_state)\n\nprint('Using:', orient, 'orientations', pix_per_cell, 'pixels per cell and', cell_per_block, 'cells per block')\nprint('Feature vector length:', len(X_train[0]))\n\n# Use a linear SVC\nsvc = LinearSVC() # C=5.0, gamma='auto', kernel='rbf'\n#svc = SVC(C=5.0,kernel='rbf')\n\n# Check the training time for the SVC\nt = time.time()\nsvc.fit(X_train, y_train)\nt2 = time.time()\nprint(round(t2 - t, 2), 'Seconds to train SVC...')\n\n\n# Check the score of the SVC\nprint('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))\n# Check the prediction time for a single sample\nt = time.time()\n\n\n# ## 5c) Save the parameters needed after that in a pickle file\n\n# In[19]:\n\n#print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))\ny_pred = svc.predict(X_test)\n# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\nprint(cm,'\\n')\nAcc = ((cm[0][0]+cm[1][1])/(cm[0][0]+cm[0][1]+cm[1][0]+cm[1][1]))\nprint('Acc is = ',Acc)\n\n# # 6- Sliding Windows method \n\n# In[48]:\n\n'''\n\n''' \n\n'''\n# Save the parameters in a pickle in order to be easily accessed\npickle_file = \"Classifier.p\"\nprint(\"Saving the data in a pickle file.....\")\n\nwith open(pickle_file, \"wb\") as p_file:\n pickle.dump({\"X_Scaler\": X_scaler,\n \"svc\":svc,\n \"cspace\": color_space,\n \"orient\": orient,\n \"pix_per_cell\": pix_per_cell,\n \"cell_per_block\": cell_per_block,\n \"hog_channel\":hog_channel,\n \"spatial_size\": spatial_size,\n \"hist_bins\":hist_bins,\n \"spatial_range\": spatial_range,\n \"spatial_transform\":spatial_transform,\n \"visualize\":visualize,\n \"feature_vector\": feature_vector,\n \"spatial_feat\": spatial_feat,\n \"hist_feat\": hist_feat,\n \"hog_feat\": hog_feat,\n \"y_start_stop\": y_start_stop }, p_file, pickle.HIGHEST_PROTOCOL)\n\n'''\n\n# Sliding window search to get the windows \ndef slide_window(img, x_start_stop, y_start_stop, xy_window, xy_overlap):\n # If x and/or y start/stop positions not defined, set to image size\n if x_start_stop[0] == None:\n x_start_stop[0] = 0\n if x_start_stop[1] == None:\n x_start_stop[1] = img.shape[1]\n if y_start_stop[0] == None:\n y_start_stop[0] = 0\n if y_start_stop[1] == None:\n y_start_stop[1] = img.shape[0]\n \n # Compute the span of the region to be searched \n xspan = x_start_stop[1] - x_start_stop[0]\n yspan = y_start_stop[1] - y_start_stop[0]\n \n # Compute the number of pixels per step in x/y\n nx_pix_per_step = np.int(xy_window[0]*(1 - xy_overlap[0]))\n ny_pix_per_step = np.int(xy_window[1]*(1 - xy_overlap[1]))\n \n # Compute the number of windows in x/y\n nx_buffer = np.int(xy_window[0]*(xy_overlap[0]))\n ny_buffer = np.int(xy_window[1]*(xy_overlap[1]))\n nx_windows = np.int((xspan-nx_buffer)/nx_pix_per_step) \n ny_windows = np.int((yspan-ny_buffer)/ny_pix_per_step) \n \n # Initialize a list to append window positions to\n window_list = []\n # Loop through finding x and y window positions\n for ys in range(ny_windows):\n for xs in range(nx_windows):\n # Calculate window position\n startx = xs * nx_pix_per_step + x_start_stop[0]\n endx = startx + xy_window[0]\n \n starty = ys * ny_pix_per_step + y_start_stop[0]\n endy = starty + xy_window[1]\n \n # Append window position to list\n window_list.append(((startx, starty), (endx, endy)))\n # Return the list of windows\n return window_list\n\n\n# In[49]:\n\n\n# test the Sliding window function\ntest_image = mpimg.imread(\"test_images/test1.jpg\")\n\nwindows = slide_window(test_image, x_start_stop=[0, 1300], y_start_stop=[400, 680], xy_window=(96, 96), xy_overlap=(0.5, 0.5))\ntest_result = draw_boxes(test_image, windows, (255,0,0), 8)\nplt.imshow(test_result)\nplt.show()\n\n\n# # 7- Search Windows\n\n# ## 7a) Search Window and prediction for the input image\n\n# In[53]:\n\n\n# This function will take an image as an input and list of windows to search in them \ndef search_windows(img, windows, classifier, scaler, cspace, spatial_size, hist_nbins, hist_range, \n orient, pix_per_cell, cell_per_block, transform_sqrt_flag, vis_flag, feature_vector_flag,\n hog_channel, extract_spatial_flag, extract_color_hist_flag, extract_hog_flag):\n \n \n # 1) Create an empty list to receive positive detection windows\n on_windows = []\n \n # 2) Iterate over all windows in the list\n for window in windows:\n # 3) Extract the test window from original image\n test_img = cv2.resize(img[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64))\n \n # 4) Extract features for that window using single_img_features()\n features = extract_features_One_image(img=test_img, cspace=cspace, spatial_size=spatial_size, hist_nbins=hist_nbins, \n hist_range=hist_range, orient=orient, pix_per_cell=pix_per_cell, \n cell_per_block=cell_per_block, transform_sqrt_flag=transform_sqrt_flag, \n vis_flag=vis_flag, feature_vector_flag=feature_vector_flag, hog_channel=hog_channel, \n extract_spatial_flag=extract_spatial_flag, \n extract_color_hist_flag=extract_color_hist_flag, extract_hog_flag=extract_hog_flag)\n \n #print(features.min())\n #print(features.max())\n # 5) Scale extracted features to be fed to classifier\n test_features = scaler.transform(np.array(features).reshape(1, -1))\n \n # 6) Predict using your classifier\n prediction = classifier.predict(test_features)\n #print(\"pred: \", prediction)\n \n # 7) If positive (prediction == 1) then save the window\n if prediction == 1:\n on_windows.append(window)\n # 8) Return windows for positive detections\n return on_windows\n\n\n# ## 7b) Load the pickle that conatins the needed parameters\n\n# In[23]:\n\n\n# read the data saved previously in the pickle file\npickle_file_name = \"Classifier.p\"\n\nwith open(pickle_file_name, \"rb\") as f:\n pickle_data = pickle.load(f)\n \n# # X_scaler\n# param_X_scaler = pickle_data[\"X_Scaler\"]\n# param_svc = pickle_data[\"svc\"]\n# param_color_space = pickle_data[\"cspace\"]\n# param_orient = pickle_data[\"orient\"]\n# param_pix_per_cell = pickle_data[\"pix_per_cell\"]\n# param_cell_per_block = pickle_data[\"cell_per_block\"]\n# param_hog_channel = pickle_data[\"hog_channel\"]\n# param_spatial_size = pickle_data[\"spatial_size\"]\n# param_hist_bins = pickle_data[\"hist_bins\"]\n# param_spatial_transform = pickle_data[\"spatial_transform\"]\n# param_visualize = pickle_data[\"visualize\"]\n# param_feature_vector = pickle_data[\"feature_vector\"]\n# param_hist_feat = pickle_data[\"hist_feat\"]\n# param_hog_feat = pickle_data[\"hog_feat\"]\n# param_y_start_stop = pickle_data[\"y_start_stop\"]\n \n\nprint(\"Saved parameters is loaded..\")\n \n\n\n# # 8) Apply the full pipeline\n\n# ## 8a) Find Cars in the image based on the loaded data\n\n# In[62]:\n\n\n# This function car find cars in an image based on the saved data in the pickle file\ndef find_cars(image):\n \n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n plt.imshow(image)\n plt.show()\n # Extract parameters from the pickle file\n pickle_file_name = \"Classifier.p\"\n with open(pickle_file_name, \"rb\") as f:\n parameters = pickle.load(f)\n \n # copy from the image passed to the function\n draw_image = np.copy(image)\n y_start_stop = parameters[\"y_start_stop\"]\n\n # get windows from slide_window function with multiscale sliding window\n windows = slide_window(image, x_start_stop=[None, 1300], y_start_stop=[400,600],\n xy_window=(64, 64), xy_overlap=(0.9, 0.9))\n \n windows += slide_window(image, x_start_stop=[None, 1300], y_start_stop=y_start_stop,\n xy_window=(96, 96), xy_overlap=(0.9, 0.9))\n\n windows += slide_window(image, x_start_stop=[None, 1300], y_start_stop=y_start_stop,\n xy_window=(128, 128), xy_overlap=(0.9, 0.9))\n \n\n # search in the windows we have to select the best windows \n hot_windows = search_windows(img=image, windows=windows, classifier=parameters[\"svc\"],\n scaler=parameters[\"X_Scaler\"], cspace=parameters[\"cspace\"], \n spatial_size=parameters[\"spatial_size\"], hist_nbins=parameters[\"hist_bins\"],\n hist_range=parameters[\"spatial_range\"],orient=parameters[\"orient\"],\n pix_per_cell=parameters[\"pix_per_cell\"],\n cell_per_block=parameters[\"cell_per_block\"],\n transform_sqrt_flag=parameters[\"spatial_transform\"],\n vis_flag=parameters[\"visualize\"],\n feature_vector_flag=parameters[\"feature_vector\"], \n hog_channel=parameters[\"hog_channel\"],\n extract_spatial_flag=parameters[\"spatial_feat\"], \n extract_color_hist_flag=parameters[\"hist_feat\"],\n extract_hog_flag=parameters[\"hog_feat\"])\n \n \n # draw boxes over the given image\n window_image = draw_boxes(draw_image, hot_windows, color=(255, 0, 0), thick=8)\n \n plt.imshow(window_image)\n plt.show()\n \n # Create a zeros_like the image given in order to be passed over the function of heatmap\n heat = np.zeros_like(image[:, :, 0]).astype(np.float)\n\n # Add heat to each box in box list\n heat = add_heat(heat, hot_windows)\n \n #print(heat.max())\n\n # Apply threshold to help remove false positives\n heat = apply_threshold(heat, 15)\n\n # Visualize the heatmap when displaying\n heatmap = np.clip(heat, 0, 255)\n \n plt.imshow(heatmap)\n plt.show()\n\n # Find final boxes from heatmap using label function\n labels = label(heatmap)\n draw_image = draw_labels_bboxes(np.copy(draw_image), labels)\n\n # plt.close(\"all\")\n #\n# fig = plt.figure()\n# plt.figure(figsize=(20,10))\n# #\n# plt.subplot(133)\n# plt.imshow(draw_image)\n# plt.title('Car Positions')\n# plt.subplot(132)\n# plt.imshow(heatmap, cmap='hot')\n# plt.title('Heat Map')\n# plt.subplot(131)\n# plt.imshow(window_image)\n# plt.title('Windows')\n # # fig.tight_layout()\n # # mng = plt.get_current_fig_manager()\n #\n # # mng.full_screen_toggle()\n # # plt.pause(0.05)\n #\n # # plt.imshow(window_img)\n #plt.show()\n \n draw_image = cv2.cvtColor(draw_image, cv2.COLOR_RGB2BGR)\n \n #plt.imshow(draw_image)\n #plt.show()\n \n return draw_image\n\n# ## 8b) Test the pieline using the test images in the folder we have\n\n# In[25]:\n\n\n#--------------------------------------------------------------------------------\n#--------------------------------------------------------------------------------\n#--------------------------------------------------------------------------------\n#--------------------------------------------------------------------------------\n'''\n\n#doing all the relevant imports\n#import matplotlib.pyplot as plt\n#import matplotlib.image as mpimg\nimport numpy as np\nimport cv2\n\n\ncap = cv2.VideoCapture('project_video.mp4')\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\n# Very important output video size should equal input video size (960,540)\nout = cv2.VideoWriter('output.mp4',fourcc, 20.0, (1280,720))\n\nwhile(cap.isOpened()):\n ret ,frame = cap.read()\n if ret == True:\n \n test_result_image_from_pipeline = find_cars(frame,parameters)\n result=cv2.cvtColor(test_result_image_from_pipeline,cv2.COLOR_BGR2RGB)\n\n k = cv2.waitKey(60) & 0xff\n if k == 27:\n break\n else:\n # cv2.imshow('result',result)\n #cv2.imshow('frame',frame)\n out.write(result)\n else:\n print('Error Vedio')\n break\ncv2.destroyAllWindows()\ncap.release()\nout.release()\n'''\n\n#group_of_images = glob.glob(\"test_images/*.jpg\")\n\n#for image in group_of_images:\n# read_test_image_to_pipeline = cv2.imread(image)\n# print(pickle_data)\n# test_result_image_from_pipeline = find_cars(read_test_image_to_pipeline)\n# plt.imshow(cv2.cvtColor(test_result_image_from_pipeline, cv2.COLOR_BGR2RGB))\n# plt.show()\n\nread_test_image_to_pipeline = cv2.imread(\"test_images/test14.jpg\")\nprint(pickle_data)\ntest_result_image_from_pipeline = find_cars(read_test_image_to_pipeline)\nplt.imshow(cv2.cvtColor(test_result_image_from_pipeline, cv2.COLOR_BGR2RGB))\nplt.show()\n\n\n# # 9) Apply the Pipeline on the Project Video\n\n# In[26]:\n\n\n# # Extract frames from the test video\n\n# project_output_video = \"test_output_video.mp4\"\n# clip = VideoFileClip(\"test_video.mp4\")\n# output_video = clip.fl_image(find_cars) # NOTE: this function expects color images!!\n# get_ipython().magic('time output_video.write_videofile(project_output_video, audio=False)')\n\n\n# In[27]:\n\n\n# group_of_images = glob.glob(\"test_images/*.jpg\")\n\n# for image in group_of_images:\n# read_test_image_to_pipeline = cv2.imread(image)\n\n# print(pickle_data)\n# read_test_image_to_pipeline=cv2.cvtColor(read_test_image_to_pipeline, cv2.COLOR_RGB2BGR)\n# test_result_image_from_pipeline = find_cars(read_test_image_to_pipeline)\n\n# #plt.imshow(cv2.cvtColor(test_result_image_from_pipeline, cv2.COLOR_BGR2RGB))\n# plt.imshow(test_result_image_from_pipeline)\n# plt.show()\n\n\n# In[28]:\n\n\n# Extract frames from the Project video\n\n# project_output_video = \"project_output_video.mp4\"\n# clip = VideoFileClip(\"project_video.mp4\")\n# output_video = clip.fl_image(find_cars) # NOTE: this function expects color images!!\n# get_ipython().magic('time output_video.write_videofile(project_output_video, audio=False)')\n\n\n# In[29]:\n'''\ndef close_clip(vidya_clip):\n # noinspection PyBroadException\n try:\n vidya_clip.reader.close()\n del vidya_clip.reader\n if vidya_clip.audio is not None:\n vidya_clip.audio.reader.close_proc()\n del vidya_clip.audio\n del vidya_clip\n except Exception:\n # sys.exc_clear()\n pass\n \nproject_output_video = \"test_video_out.mp4\"\nclip = VideoFileClip(\"test_video.mp4\")\noutput_video = clip.fl_image(find_cars).subclip(0,5) # NOTE: this function expects color images!!\nclip.write_videofile(project_output_video)\nclip.write_videofile(project_output_video, audio=False)\nclip.close()\n'''\n\n# In[30]:\n\n'''\nfrom moviepy.editor import VideoFileClip\nfrom IPython.display import HTML\nimport imageio\nimageio.plugins.ffmpeg.download()\n'''\n\n# In[31]:\n\n# project_output_video = \"test.mp4\"\n#project_output_video = \"project_video_out.mp4\"\n#clip = VideoFileClip(\"project_video.mp4\")\n#output_video = clip.fl_image(find_cars) # NOTE: this function expects color images!!\n#clip.write_videofile(project_output_video)\n#clip.write_videofile(project_output_video, audio=False)\n#clip.close()\n\n# In[32]:\n\n'''\nproject_output_video = \"part1.mp4\"\nclip1 = VideoFileClip(\"project_video.mp4\")\noutput_video = clip1.fl_image(find_cars).subclip(0,5) # NOTE: this function expects color images!!\noutput_video.write_videofile(project_output_video, audio=False)\noutput_video.write_videofile(project_output_video, audio=False)\nclip1.close()\n'''\n# In[33]:\n'''\nproject_output_video_2 = \"Output.mp4\"\nclip2 = VideoFileClip(\"project_video.mp4\")\noutput_video_2 = clip2.fl_image(find_cars) # NOTE: this function expects color images!!\n#get_ipython().magic('time output_video_2.write_videofile(project_output_video_2, audio=False)')\n\n'''\n\n# # Extract images for the Report\n\n# In[46]:\n\n# Test extraction of hog features and visulaize\n#test_image = mpimg.imread(\"test_images/test1.jpg\")\n#test_image = cv2.imread(\"non-vehicles/Extras/extra2532.png\")\n\n# Note that the image should be 2D (grayscale) \n#test_gray = cv2.cvtColor(test_image, cv2.COLOR_RGB2GRAY)\n\n#pickle_file_name = \"Classifier.p\"\n\n#with open(pickle_file_name, \"rb\") as f:\n# parameters = pickle.load(f)\n\n#test_features, test_result_img = get_hog_features(test_gray, orient=parameters[\"orient\"], pix_per_cell=parameters[\"pix_per_cell\"], \n# cell_per_block=parameters['cell_per_block'], transform_sqrt_flag=True, \n# vis_flag=True, feature_vector_flag=True)\n\n#fig = plt.figure(figsize=(12,3))\n#plt.subplot(121)\n#plt.imshow(test_image, cmap='gray')\n#plt.title('Example')\n#plt.subplot(122)\n#plt.imshow(test_result_img, cmap='gray')\n#plt.title('HOG Visualization')\n#plt.show()\n\n\n# In[63]:\n\n'''\nextract_images = \"extract_images.mp4\"\nclip_extract_images = VideoFileClip(\"project_video.mp4\").reader.close()\n\nextract_sequence_images = clip_extract_images.fl_image(find_cars).subclip(30,33) # NOTE: this function expects color images!!\nget_ipython().magic('time extract_sequence_images.write_videofile(extract_images, audio=False)')\n\n#/////////////////////////////////////////////////////////////////////////////////////\n'''\n'''\nproject_video_output = './project_video_output.mp4'\nclip1 = VideoFileClip(\"./project_video.mp4\")\nlane_clip = clip1.fl_image(find_cars) #NOTE: this function expects color images!!\nlane_clip.write_videofile(project_video_output, audio=False)\n'''\nproject_video_output = './test_video_output.mp4'\nclip1 = VideoFileClip(\"./test_video.mp4\")\nlane_clip = clip1.fl_image(find_cars) #NOTE: this function expects color images!!\nlane_clip.write_videofile(project_video_output, audio=False)\n","sub_path":"Object_Detection Project.py","file_name":"Object_Detection Project.py","file_ext":"py","file_size_in_byte":35147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"119261001","text":"import pandas as pd\nimport numpy as np\nfrom coastlib.coreutils.design_tools import runup, overtopping, hudson\n\n\n# Input design parameters\nHs = 4 # Significant wave height [ft]\nTp = 8 # Wave period [s]\nrunup_limit = 2 # Runup limit [ft]\novertopping_limit = 0.1\n\n\n# Set revetment parameter ranges\nslopes = np.arange(start=0.5, stop=0.25, step=-0.01)\ncrest_elevations = np.arange(start=10, stop=20, step=0.1)\ntoe_elevations = np.arange(start=-10, stop=0, step=0.1)\n\n\n# Convert parameters to metric units\ncrest_elevations *= 0.3048\ntoe_elevations *= 0.3048\n\n\n# Find optimal combination\nsummary_m = pd.DataFrame(\n data=[\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n np.inf\n ],\n index=[\n 'Slope',\n 'Crest Elevation [m]',\n 'Toe elevation [m]',\n 'D50 [m]',\n 'Volume [m^3]'\n ],\n columns=['Value']\n)\nfor slp in slopes:\n for crel in crest_elevations:\n for toel in toe_elevations:\n D = hudson(Hs=Hs, alfa=slp, rock_density=1)\n if (\n runup(Hm0=Hs, Tp=Tp, slp=slp) < runup_limit and\n overtopping(Hm0=Hs, Rc=1) < overtopping_limit and\n volume(slp, crel, toel, D) < summary_m[0]['volume']\n ):\n summary_m['Value']['Slope'] = slp\n summary_m['Value']['D50 [m]'] = D\n summary_m['Value']['Volume [m^3'] = volume(slp, crel, toel)\n\n\n# Convert summary to customary units\nsummary = pd.DataFrame(\n data=[\n summary_m['Values']['Slope'],\n summary_m['Values']['Crest elevation [m]'] / 0.3048,\n summary_m['Values']['Toe elevation [m]'] / 0.3048,\n summary_m['Values']['D50 [m]'] / 0.3048,\n summary_m['Values']['Volume [m^3]'] / ((0.3048 * 3) ** 3),\n ],\n index=[\n 'Slope',\n 'Crest Elevation [ft]',\n 'Toe elevation [ft]',\n 'D50 [ft]',\n 'Volume [yd^3]'\n ],\n columns='Value'\n)\n\n\n# Return optimal revetment parameters\nif np.isinf(summary['Value']['Volume [m^3]']):\n print('No solution exists for the input provided')\nelse:\n print(summary)\n # plot optimal revetment over input profile\n","sub_path":"Hempstead/revetment_design.py","file_name":"revetment_design.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"566203147","text":"import paddle.fluid as fluid\n\n\ndef cnn_net(dict_dim=100,\n max_len=10,\n cnn_dim=32,\n cnn_filter_size=128,\n emb_dim=8,\n hid_dim=128,\n class_dim=2,\n is_prediction=False):\n \"\"\"\n Conv net\n \"\"\"\n data = fluid.data(name=\"input\", shape=[None, max_len], dtype='int64')\n label = fluid.data(name=\"label\", shape=[None, 1], dtype='int64')\n seq_len = fluid.data(name=\"seq_len\", shape=[None], dtype='int64')\n # embedding layer\n emb = fluid.embedding(input=data, size=[dict_dim, emb_dim])\n emb = fluid.layers.sequence_unpad(emb, length=seq_len)\n # convolution layer\n conv = fluid.nets.sequence_conv_pool(\n input=emb,\n num_filters=cnn_dim,\n filter_size=cnn_filter_size,\n act=\"tanh\",\n pool_type=\"max\")\n\n # full connect layer\n fc_1 = fluid.layers.fc(input=[conv], size=hid_dim)\n # softmax layer\n prediction = fluid.layers.fc(input=[fc_1], size=class_dim, act=\"softmax\")\n #if is_prediction:\n # return prediction\n cost = fluid.layers.cross_entropy(input=prediction, label=label)\n avg_cost = fluid.layers.mean(x=cost)\n acc = fluid.layers.accuracy(input=prediction, label=label)\n\n return avg_cost\n","sub_path":"PaddleRec/text_classification/net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"233771355","text":"import tensorflow as tf\ntf.set_random_seed(66)\n\nx_train =[1,2,3]\ny_train = [3,5,7]\n\nW=tf.Variable(tf.random_normal([1]),name='weight') #정규분포에따른 랜덤값을 하나 넣겠다는 뜻\nb=tf.Variable(tf.random_normal([1]),name='bias')\n\nhypothesis = x_train * W +b\n\ncost = tf.reduce_mean(tf.square(hypothesis - y_train)) #예측값에서 실제값을 뺸것을 제곱을하여 평균을 낸것 -> 비용(손실) => loss=mse 와 같다 !\n\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01) #AdamOptimizer 성능 최고\n\ntrain = optimizer.minimize(cost)\n\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\nprint(sess.run(W),sess.run(b)) #[0.06524777] [1.4264158]\n\nfor step in range(3): #step -> epoch\n sess.run(train)\n if step % 1 ==0:\n print(\"step: \",step,'sess.run(cost): ',sess.run(cost),'sess.run(W): ',sess.run(W),'sess.run(b): ',sess.run(b)) #weight 2, bias 1 로 수렴\n # sess.run(train)\n # if step % 2 ==0:\n # print(\"step: \",step,'sess.run(cost): ',sess.run(cost),'sess.run(W): ',sess.run(W),'sess.run(b): ',sess.run(b)) #weight 2, bias 1 로 수렴\n\n\n#경사하강법에있는 최적의 optimizer (minimize)해준 지점을 찾음\n#loss가 최소인것을 찾음\n#1. x*w + b =>와 y_train에서 mse를 산출\n#2. 계산된 cost를 minimized해준것이 optimizer\n#3. optimizer를 train 1,2,3 한번 돈것이 1 epoch ","sub_path":"tf114/tf05_linear.py","file_name":"tf05_linear.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"497208801","text":"\"\"\"\nSupport for www service installation and management.\n\"\"\"\n\nimport os\n\nfrom fabric.api import run, settings, env, put, sudo, local\n\nfrom os import path\nfrom twisted.python.util import sibpath\n\nfrom braid import authbind, archive\nfrom braid.twisted import service\nfrom braid.debian import equivs\nfrom braid.tasks import addTasks\nfrom braid.utils import confirm\n\nfrom braid import config\n__all__ = ['config']\n\n\nclass TwistedWeb(service.Service):\n def task_install(self):\n \"\"\"\n Install t-web, a Twisted Web based server.\n \"\"\"\n # Bootstrap a new service environment\n self.bootstrap()\n\n # Add to www-data group. Mailman depends on this.\n sudo('/usr/sbin/usermod -a -g www-data -G t-web {}'.format(self.serviceUser))\n\n # Setup authbind\n authbind.allow(self.serviceUser, 80)\n authbind.allow(self.serviceUser, 443)\n\n # Install httpd equiv, so apt doesn't try to install apache ever\n equivs.installEquiv(self.serviceName, 'httpd')\n\n with settings(user=self.serviceUser):\n run('/bin/ln -nsf {}/start {}/start'.format(self.configDir, self.binDir))\n run('/bin/ln -nsf {}/start-maintenance {}/start-maintenance'.format(self.configDir, self.binDir))\n self.update()\n # cron.install(self.serviceUser, '{}/crontab'.format(self.configDir))\n\n run('/bin/mkdir -p ~/data')\n if env.get('installPrivateData'):\n self.task_installTLSKeys()\n self.task_makeProductionServer()\n else:\n self.task_makeStagingServer()\n\n\n def task_makeProductionServer(self):\n \"\"\"\n Make the target server a production server.\n \"\"\"\n run('/usr/bin/touch {}/production'.format(self.configDir))\n\n\n def task_makeStagingServer(self):\n \"\"\"\n Make the target server a staging server.\n \"\"\"\n run('/bin/rm -f {}/production'.format(self.configDir))\n\n\n def task_makeTestTLSKeys(self):\n \"\"\"\n Make some test TLS certs.\n \"\"\"\n local(\"\"\"\n openssl req -config {config} -batch -x509 -sha256 -nodes -days 365 -newkey rsa:2048 \\\\\n -keyout {key} -out {cert}\n \"\"\".strip().format(\n key=sibpath(__file__, 'TEST.key'),\n cert=sibpath(__file__, 'twistedmatrix.com.crt'),\n config=sibpath(__file__, 'openssl.cnf')))\n local(\"cat {key} {cert} > {pem}\".format(\n key=sibpath(__file__, 'TEST.key'),\n cert=sibpath(__file__, 'twistedmatrix.com.crt'),\n pem=sibpath(__file__, 'www.twistedmatrix.com.pem')))\n\n\n def task_installTLSKeys(self):\n \"\"\"\n Install TLS keys.\n \"\"\"\n with settings(user=self.serviceUser):\n run('mkdir -p ~/ssl')\n for cert in ['www.twistedmatrix.com.pem',\n 'buildbot.twistedmatrix.com.pem']:\n fullpath = sibpath(__file__, cert)\n if path.exists(fullpath):\n put(fullpath, '~/ssl/' + cert, mode=0o600)\n run('ln -s ~/ssl/www.twistedmatrix.com.pem '\n '~/ssl/twistedmatrix.com.pem')\n run('ln -s ~/ssl/www.twistedmatrix.com.pem ~/ssl/DEFAULT.pem')\n\n\n def task_updateSoftware(self):\n \"\"\"\n Update just the PyPy and Twisted versions.\n \"\"\"\n self.task_stop()\n self.bootstrap()\n self.venv.install_twisted()\n self.task_start()\n\n\n def update(self):\n \"\"\"\n Update config.\n \"\"\"\n self.venv.install_twisted()\n\n with settings(user=self.serviceUser):\n run('mkdir -p ' + self.configDir)\n put(os.path.dirname(__file__) + '/*', self.configDir,\n mirror_local_mode=True)\n\n\n def task_update(self):\n \"\"\"\n Update config and restart.\n \"\"\"\n self.update()\n self.task_restart()\n\n\n def task_updateData(self):\n \"\"\"\n Update config.\n \"\"\"\n self.update()\n\n\n def task_dump(self, dump):\n \"\"\"\n Dump non-versioned resources.\n \"\"\"\n with settings(user=self.serviceUser):\n archive.dump({\n 'data': 'data',\n }, dump)\n\n\n def task_restore(self, dump):\n \"\"\"\n Resotre non-versioned resources.\n \"\"\"\n msg = 'All non-versioned web resources will be replaced with the backup.'\n if confirm(msg):\n with settings(user=self.serviceUser):\n archive.restore({\n 'data': 'data',\n }, dump)\n\n def task_startMaintenanceSite(self):\n \"\"\"\n Start maintenance site.\n \"\"\"\n with settings(user=self.serviceUser):\n run('{}/start-maintenance'.format(self.binDir))\n\n\n def task_uploadRelease(self, release, releasesTarball):\n \"\"\"\n Upload a relase.\n\n It expects a tarball containing the following files:\n - Twisted-.tar.bz2\n - Twisted-. for all source/windows installers\n - twisted--.txt for md5 and sha512\n - doc - for narative documentation\n - api - for api documents\n\n @param release: Release version.\n @param releasesTarball: Tarball with release tarballs and documentation\n \"\"\"\n apiVersion = '.'.join(release.split('.')[:2])\n distPaths = {}\n for ext in ['.tar.bz2', '-cp27-cp27m-win_amd64.whl']:\n tarball = 'Twisted-{}{}'.format(release, ext)\n distPaths[tarball] = 'data/releases/Twisted/{}/{}'.format(apiVersion, tarball)\n\n distPaths['doc'] = 'data/documentation/{}'.format(release)\n distPaths['api'] = 'data/documentation/{}/api'.format(release)\n for hash in ['md5sums', 'shasums']:\n hashFile = 'twisted-{}-{}.txt'.format(release,hash)\n distPaths[hashFile] = 'data/releases/{}'.format(hashFile)\n\n directories = [path.dirname(file) for file in distPaths.values()]\n\n with settings(user=self.serviceUser):\n run('/bin/mkdir -p {}'.format(' '.join(set(directories))))\n archive.restore(distPaths, releasesTarball)\n\n\n def task_updateCurrentDocumentation(self, release):\n \"\"\"\n Update the current link for documentation\n \"\"\"\n with settings(user=self.serviceUser):\n run('/bin/ln -nsf {} data/documentation/current'.format(release))\n\n\n\naddTasks(globals(), TwistedWeb('t-web').getTasks())\n","sub_path":"services/t-web/fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":6509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"592511199","text":"r\"\"\"\nThis module implements differential operators on spherical grids \n\n.. autosummary::\n :nosignatures:\n\n make_laplace\n make_gradient\n make_divergence\n make_vector_gradient\n make_tensor_divergence\n \n \n.. codeauthor:: David Zwicker \n\"\"\"\n\nfrom typing import Tuple\n\nimport numpy as np\n\nfrom ...tools.docstrings import fill_in_docstring\nfrom ...tools.numba import jit\nfrom ...tools.typing import OperatorType\nfrom ..boundaries import Boundaries\nfrom ..spherical import SphericalSymGrid\nfrom .common import make_general_poisson_solver\n\n\n@SphericalSymGrid.register_operator(\"laplace\", rank_in=0, rank_out=0)\n@fill_in_docstring\ndef make_laplace(grid: SphericalSymGrid, conservative: bool = True) -> OperatorType:\n \"\"\"make a discretized laplace operator for a spherical grid\n\n {DESCR_SPHERICAL_GRID}\n\n Args:\n grid (:class:`~pde.grids.spherical.SphericalSymGrid`):\n The polar grid for which this operator will be defined\n conservative (bool):\n Flag indicating whether the laplace operator should be conservative (which\n results in slightly slower computations).\n\n Returns:\n A function that can be applied to an array of values\n \"\"\"\n assert isinstance(grid, SphericalSymGrid)\n\n # calculate preliminary quantities\n dim_r = grid.shape[0]\n dr = grid.discretization[0]\n rs = grid.axes_coords[0]\n r_min, r_max = grid.axes_bounds[0]\n\n if conservative:\n # create a conservative spherical laplace operator\n rl = rs - dr / 2 # inner radii of spherical shells\n rh = rs + dr / 2 # outer radii\n assert np.isclose(rl[0], r_min) and np.isclose(rh[-1], r_max)\n volumes = (rh ** 3 - rl ** 3) / 3 # volume of the spherical shells\n factor_l = (rs - 0.5 * dr) ** 2 / (dr * volumes)\n factor_h = (rs + 0.5 * dr) ** 2 / (dr * volumes)\n\n @jit\n def laplace(arr: np.ndarray, out: np.ndarray) -> None:\n \"\"\"apply laplace operator to array `arr`\"\"\"\n for i in range(1, dim_r + 1): # iterate inner radial points\n out[i - 1] = factor_h[i - 1] * (arr[i + 1] - arr[i])\n out[i - 1] -= factor_l[i - 1] * (arr[i] - arr[i - 1])\n\n else: # create an operator that is not conservative\n dr2 = 1 / dr ** 2\n\n @jit\n def laplace(arr: np.ndarray, out: np.ndarray) -> None:\n \"\"\"apply laplace operator to array `arr`\"\"\"\n for i in range(1, dim_r + 1): # iterate inner radial points\n out[i - 1] = (arr[i + 1] - 2 * arr[i] + arr[i - 1]) * dr2\n out[i - 1] += (arr[i + 1] - arr[i - 1]) / (rs[i - 1] * dr)\n\n return laplace # type: ignore\n\n\n@SphericalSymGrid.register_operator(\"gradient\", rank_in=0, rank_out=1)\n@fill_in_docstring\ndef make_gradient(grid: SphericalSymGrid) -> OperatorType:\n \"\"\"make a discretized gradient operator for a spherical grid\n\n {DESCR_SPHERICAL_GRID}\n\n Args:\n grid (:class:`~pde.grids.spherical.SphericalSymGrid`):\n The polar grid for which this operator will be defined\n\n Returns:\n A function that can be applied to an array of values\n \"\"\"\n assert isinstance(grid, SphericalSymGrid)\n\n # calculate preliminary quantities\n dim_r = grid.shape[0]\n dr = grid.discretization[0]\n\n scale_r = 1 / (2 * dr)\n\n @jit\n def gradient(arr: np.ndarray, out: np.ndarray) -> None:\n \"\"\"apply gradient operator to array `arr`\"\"\"\n for i in range(1, dim_r + 1): # iterate inner radial points\n out[0, i - 1] = (arr[i + 1] - arr[i - 1]) * scale_r\n out[1, i - 1] = out[2, i - 1] = 0 # no angular dependence by definition\n\n return gradient # type: ignore\n\n\n@SphericalSymGrid.register_operator(\"gradient_squared\", rank_in=0, rank_out=0)\n@fill_in_docstring\ndef make_gradient_squared(grid: SphericalSymGrid, central: bool = True) -> OperatorType:\n \"\"\"make a discretized gradient squared operator for a spherical grid\n\n {DESCR_SPHERICAL_GRID}\n\n Args:\n grid (:class:`~pde.grids.spherical.SphericalSymGrid`):\n The polar grid for which this operator will be defined\n central (bool):\n Whether a central difference approximation is used for the gradient\n operator. If this is False, the squared gradient is calculated as\n the mean of the squared values of the forward and backward\n derivatives.\n\n Returns:\n A function that can be applied to an array of values\n \"\"\"\n assert isinstance(grid, SphericalSymGrid)\n\n # calculate preliminary quantities\n dim_r = grid.shape[0]\n dr = grid.discretization[0]\n\n if central:\n # use central differences\n scale = 0.25 / dr ** 2\n\n @jit\n def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None:\n \"\"\"apply squared gradient operator to array `arr`\"\"\"\n for i in range(1, dim_r + 1): # iterate inner radial points\n out[i - 1] = (arr[i + 1] - arr[i - 1]) ** 2 * scale\n\n else:\n # use forward and backward differences\n scale = 0.5 / dr ** 2\n\n @jit\n def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None:\n \"\"\"apply squared gradient operator to array `arr`\"\"\"\n for i in range(1, dim_r + 1): # iterate inner radial points\n term = (arr[i + 1] - arr[i]) ** 2 + (arr[i] - arr[i - 1]) ** 2\n out[i - 1] = term * scale\n\n return gradient_squared # type: ignore\n\n\n@SphericalSymGrid.register_operator(\"divergence\", rank_in=1, rank_out=0)\n@fill_in_docstring\ndef make_divergence(grid: SphericalSymGrid, safe: bool = True) -> OperatorType:\n \"\"\"make a discretized divergence operator for a spherical grid\n\n {DESCR_SPHERICAL_GRID}\n\n Warning:\n This operator ignores the θ-component of the field when calculating the\n divergence. This is because the resulting scalar field could not be expressed\n on a :class:`~pde.grids.spherical_sym.SphericalSymGrid`.\n\n Args:\n grid (:class:`~pde.grids.spherical.SphericalSymGrid`):\n The polar grid for which this operator will be defined\n safe (bool):\n Add extra checks for the validity of the input\n\n Returns:\n A function that can be applied to an array of values\n \"\"\"\n assert isinstance(grid, SphericalSymGrid)\n\n # calculate preliminary quantities\n dim_r = grid.shape[0]\n dr = grid.discretization[0]\n rs = grid.axes_coords[0]\n\n scale_r = 1 / (2 * dr)\n fs = 2 / rs # factors that need to be multiplied below\n\n @jit\n def divergence(arr: np.ndarray, out: np.ndarray) -> None:\n \"\"\"apply divergence operator to array `arr`\"\"\"\n if safe:\n assert np.all(arr[1, 1:-1] == 0)\n arr_r = arr[0, :]\n for i in range(1, dim_r + 1): # iterate radial points\n out[i - 1] = (arr_r[i + 1] - arr_r[i - 1]) * scale_r + fs[i - 1] * arr_r[i]\n\n return divergence # type: ignore\n\n\n@SphericalSymGrid.register_operator(\"vector_gradient\", rank_in=1, rank_out=2)\n@fill_in_docstring\ndef make_vector_gradient(grid: SphericalSymGrid, safe: bool = True) -> OperatorType:\n \"\"\"make a discretized vector gradient operator for a spherical grid\n\n Warning:\n This operator ignores the two angular components of the field when calculating\n the gradient. This is because the resulting field could not be expressed on a\n :class:`~pde.grids.spherical_sym.SphericalSymGrid`.\n\n {DESCR_SPHERICAL_GRID}\n\n Args:\n grid (:class:`~pde.grids.spherical.SphericalSymGrid`):\n The polar grid for which this operator will be defined\n safe (bool):\n Add extra checks for the validity of the input\n\n Returns:\n A function that can be applied to an array of values\n \"\"\"\n assert isinstance(grid, SphericalSymGrid)\n\n # calculate preliminary quantities\n dim_r = grid.shape[0]\n rs = grid.axes_coords[0]\n dr = grid.discretization[0]\n scale_r = 1 / (2 * dr)\n\n @jit\n def vector_gradient(arr: np.ndarray, out: np.ndarray) -> None:\n \"\"\"apply vector gradient operator to array `arr`\"\"\"\n if safe:\n assert np.all(arr[1:, 1:-1] == 0)\n\n # assign aliases\n arr_r = arr[0, :]\n out_rr, out_rθ, out_rφ = out[0, 0, :], out[0, 1, :], out[0, 2, :]\n out_θr, out_θθ, out_θφ = out[1, 0, :], out[1, 1, :], out[1, 2, :]\n out_φr, out_φθ, out_φφ = out[2, 0, :], out[2, 1, :], out[2, 2, :]\n\n # set all components to zero that are not affected\n out_rθ[:] = 0\n out_rφ[:] = 0\n out_θr[:] = 0\n out_θφ[:] = 0\n out_φr[:] = 0\n out_φθ[:] = 0\n\n # inner radial boundary condition\n for i in range(1, dim_r + 1): # iterate radial points\n out_rr[i - 1] = (arr_r[i + 1] - arr_r[i - 1]) * scale_r\n out_θθ[i - 1] = arr_r[i] / rs[i - 1]\n out_φφ[i - 1] = arr_r[i] / rs[i - 1]\n\n return vector_gradient # type: ignore\n\n\n@SphericalSymGrid.register_operator(\"tensor_divergence\", rank_in=2, rank_out=1)\n@fill_in_docstring\ndef make_tensor_divergence(grid: SphericalSymGrid, safe: bool = True) -> OperatorType:\n \"\"\"make a discretized tensor divergence operator for a spherical grid\n\n {DESCR_SPHERICAL_GRID}\n\n Args:\n grid (:class:`~pde.grids.spherical.SphericalSymGrid`):\n The polar grid for which this operator will be defined\n safe (bool):\n Add extra checks for the validity of the input\n\n Returns:\n A function that can be applied to an array of values\n \"\"\"\n assert isinstance(grid, SphericalSymGrid)\n\n # calculate preliminary quantities\n dim_r = grid.shape[0]\n rs = grid.axes_coords[0]\n dr = grid.discretization[0]\n scale_r = 1 / (2 * dr)\n\n @jit\n def tensor_divergence(arr: np.ndarray, out: np.ndarray) -> None:\n \"\"\"apply tensor divergence operator to array `arr`\"\"\"\n # assign aliases\n arr_rr, arr_rθ, arr_rφ = arr[0, 0, :], arr[0, 1, :], arr[0, 2, :]\n arr_θr, arr_θθ, arr_θφ = arr[1, 0, :], arr[1, 1, :], arr[1, 2, :]\n arr_φr, arr_φθ, arr_φφ = arr[2, 0, :], arr[2, 1, :], arr[2, 2, :]\n out_r, out_θ, out_φ = out[0, :], out[1, :], out[2, :]\n\n # check inputs\n if safe:\n assert np.all(arr_rθ[1:-1] == 0)\n assert np.all(arr_θθ[1:-1] == 0)\n assert np.all(arr_φφ[1:-1] == 0)\n assert np.all(arr_φθ[1:-1] == 0)\n assert np.all(arr_θφ[1:-1] == 0)\n\n # iterate over inner points\n for i in range(1, dim_r + 1):\n deriv_r = (arr_rr[i + 1] - arr_rr[i - 1]) * scale_r\n out_r[i - 1] = deriv_r + 2 * arr_rr[i] / rs[i - 1]\n\n deriv_r = (arr_θr[i + 1] - arr_θr[i - 1]) * scale_r\n out_θ[i - 1] = deriv_r + 2 * arr_θr[i] / rs[i - 1]\n\n deriv_r = (arr_φr[i + 1] - arr_φr[i - 1]) * scale_r\n out_φ[i - 1] = deriv_r + (2 * arr_φr[i] + arr_rφ[i]) / rs[i - 1]\n\n return tensor_divergence # type: ignore\n\n\n@fill_in_docstring\ndef _get_laplace_matrix(bcs: Boundaries) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"get sparse matrix for laplace operator on a polar grid\n\n Args:\n bcs (:class:`~pde.grids.boundaries.axes.Boundaries`):\n {ARG_BOUNDARIES_INSTANCE}\n\n Returns:\n tuple: A sparse matrix and a sparse vector that can be used to evaluate\n the discretized laplacian\n \"\"\"\n from scipy import sparse\n\n assert isinstance(bcs.grid, SphericalSymGrid)\n bcs.check_value_rank(0)\n\n # calculate preliminary quantities\n dim_r = bcs.grid.shape[0]\n dr = bcs.grid.discretization[0]\n rs = bcs.grid.axes_coords[0]\n r_min, r_max = bcs.grid.axes_bounds[0]\n\n # create a conservative spherical laplace operator\n rl = r_min + dr * np.arange(dim_r) # inner radii of spherical shells\n rh = rl + dr # outer radii\n assert np.isclose(rh[-1], r_max)\n volumes = (rh ** 3 - rl ** 3) / 3 # volume of the spherical shells\n\n factor_l = (rs - 0.5 * dr) ** 2 / (dr * volumes)\n factor_h = (rs + 0.5 * dr) ** 2 / (dr * volumes)\n\n matrix = sparse.dok_matrix((dim_r, dim_r))\n vector = sparse.dok_matrix((dim_r, 1))\n\n for i in range(dim_r):\n matrix[i, i] += -factor_l[i] - factor_h[i]\n\n if i == 0:\n if r_min == 0:\n matrix[i, i + 1] = factor_l[i]\n else:\n const, entries = bcs[0].get_data((-1,))\n vector[i] += const * factor_l[i]\n for k, v in entries.items():\n matrix[i, k] += v * factor_l[i]\n\n else:\n matrix[i, i - 1] = factor_l[i]\n\n if i == dim_r - 1:\n const, entries = bcs[0].get_data((dim_r,))\n vector[i] += const * factor_h[i]\n for k, v in entries.items():\n matrix[i, k] += v * factor_h[i]\n\n else:\n matrix[i, i + 1] = factor_h[i]\n\n return matrix, vector\n\n\n@SphericalSymGrid.register_operator(\"poisson_solver\", rank_in=0, rank_out=0)\n@fill_in_docstring\ndef make_poisson_solver(bcs: Boundaries, method: str = \"auto\") -> OperatorType:\n \"\"\"make a operator that solves Poisson's equation\n\n {DESCR_POLAR_GRID}\n\n Args:\n bcs (:class:`~pde.grids.boundaries.axes.Boundaries`):\n {ARG_BOUNDARIES_INSTANCE}\n\n Returns:\n A function that can be applied to an array of values\n \"\"\"\n matrix, vector = _get_laplace_matrix(bcs)\n return make_general_poisson_solver(matrix, vector, method)\n","sub_path":"pde/grids/operators/spherical_sym.py","file_name":"spherical_sym.py","file_ext":"py","file_size_in_byte":13575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"133233967","text":"from secml.array import CArray\nfrom secml.figure import CFigure\n\nX = CArray.linspace(-3.14, 3.14, 256, endpoint=True)\nC, S = X.cos(), X.sin()\n\nfig = CFigure(fontsize=14)\n\nfig.sp.plot(X, C, color='red', alpha=0.5, linewidth=1.0, linestyle='-')\nfig.sp.plot(X, S)\n\nfig.sp.xlim(-3, 3)\n\nfig.show()\n","sub_path":"docs/source/pyplots/xlim.py","file_name":"xlim.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"354867316","text":"from django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render_to_response, render\nfrom django.template import RequestContext\n\nfrom eventus_backend.forms import RegistrationForm, LoginForm\nfrom .models import EventusUser\n\n\n# Main render for the login page\ndef eventus_login(request):\n if request.user.is_authenticated():\n ''' If facebook user, check that he is linked to an eventus profile.'''\n return HttpResponseRedirect('/home/')\n if request.method == 'POST':\n login_form = LoginForm(request.POST)\n if login_form.is_valid():\n username = login_form.cleaned_data['username']\n password = login_form.cleaned_data['password']\n eventus_user = authenticate(username=username, password=password)\n if eventus_user is not None:\n login(request, eventus_user)\n return HttpResponseRedirect('/home/')\n else:\n return render_to_response('eventus/login.html', {'form': login_form},\n context_instance=RequestContext(request))\n else:\n print('form not valid')\n return render_to_response('eventus/login.html', {'form': login_form},\n context_instance=RequestContext(request))\n else:\n ''' The user is not submitting any login form.'''\n login_form = LoginForm(request.POST)\n context = {'form': login_form}\n return render_to_response('eventus/login.html', context, context_instance=RequestContext(request))\n\n\n# Method managing login through facebook\ndef eventus_login_fb(request):\n print(\"BMR - running the view facebook login method\")\n if request.user.is_authenticated():\n ''' If facebook user, check that he is linked to an eventus profile.'''\n print(\"Loggging with facebook\")\n print(request.__dict__)\n return HttpResponseRedirect('/home/')\n else:\n return HttpResponseRedirect('/login/')\n\n\n# Main render for the registration page\ndef eventus_register(request):\n if request.user.is_authenticated():\n return HttpResponseRedirect('/home/')\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n user = User.objects.create_user(username=form.cleaned_data['username'], email=form.cleaned_data['email'],\n password=form.cleaned_data['password'])\n user.save()\n eventus_user = EventusUser(user=user, name=form.cleaned_data['name'],\n birthday=form.cleaned_data['birthday'])\n eventus_user.save()\n return HttpResponseRedirect('/home/')\n else:\n context = {'form': form}\n return render_to_response('eventus/register.html', context,\n context_instance=RequestContext(request))\n else:\n ''' user is not submitting the form, show them a blank registration form '''\n form = RegistrationForm()\n context = {'form': form}\n return render_to_response('eventus/register.html', context, context_instance=RequestContext(request))\n\n# Main renderer for the home page\ndef eventus_home(request):\n if request.user.is_authenticated():\n return render(request, 'eventus/home.html', {})\n else:\n return HttpResponseRedirect('/')\n\n\ndef eventus_logout(request):\n logout(request)\n return HttpResponseRedirect('/')\n","sub_path":"eventus_backend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"247730855","text":"import pandas as pd\r\n\r\n#open data\r\ndf = pd.read_csv('Absenteeism_at_work.csv',\r\n header=0, sep=',',encoding='utf-8')\r\n\r\ndf.dropna(how=\"all\", inplace=True) # drops the empty line at file-end\r\n\r\n#### make a new date variable\r\ndef label_month_year(row):\r\n return(str(row['Month of absence'])+str(row['Year']))\r\ndf['month-year']=df.apply(lambda row: label_month_year (row),axis=1)\r\n\r\n#and sum all of the values for each month\r\ndf2=df.groupby('month-year').sum()[['Absenteeism time in hours']]\r\ndf2['Date']=pd.to_datetime(df2.ix[:,0].keys(), format='%m%Y')\r\ndf2=df2.sort_values(by='Date')\r\n\r\n# plots\r\nfrom matplotlib import pyplot\r\nfrom statsmodels.graphics.tsaplots import plot_acf\r\ntime1=pd.Series(df2['Absenteeism time in hours'].values, index=df2['Date'])\r\n\r\n# simple line plot\r\nplot=time1.plot() \r\n\r\n\r\n#autocorrelation plot - comment out the first plot and run the line below to get autocorrelation plot\r\n#plot2=plot_acf(time1) \r\n\r\npyplot.show()\r\n","sub_path":"autocorrelations/autocor.py","file_name":"autocor.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"626505207","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.decomposition import IncrementalPCA\nfrom beat_extraction_fcns import *\n\nclass The_Autoencoder:\n\n def __init__(self, chunks_list=None, group_list=None, encoded_dim=20,\n encoder_filename=None):\n self._encoded_dim = encoded_dim\n if chunks_list is not None:\n self._chunk_arr = np.array(chunks_list)\n self._all_groups = np.array(group_list)\n self.break_data()\n self.build_autoencoder()\n self.train_autoencoder()\n if encoder_filename is not None:\n with open(encoder_filename, 'rb') as handle:\n self._encoder = pickle.load(handle)\n handle.close()\n \n \n \n \n def expand_chunks(self, chunk_arr):\n break_list = []\n for chunk in chunk_arr:\n for i in range(chunk.shape[1]):\n this_piece = chunk[:,i]\n this_piece = (this_piece - np.min(this_piece))/((np.max(this_piece) - np.min(this_piece)) + 0.000001)\n break_list.append(this_piece)\n break_list = np.array(break_list)\n return break_list\n \n \n \n def break_data(self):\n self._X_train = self.expand_chunks(self._chunk_arr)\n self._X_train = extract_beats_from_many(self._X_train)\n \n \n def build_autoencoder(self):\n self._encoder = IncrementalPCA(n_components=self._encoded_dim, \n whiten=True)\n \n \n def train_autoencoder(self):\n self._encoder = self._encoder.fit(self._X_train)\n \n \n \n def encode(self, chunk_arr, num_beats_to_encode=10):\n chunk_encode_rows = []\n for chunk in chunk_arr:\n for col in range(chunk.shape[1]):\n peaks, beat_sigs = detect_peaks(chunk[:,col])\n if len(beat_sigs) > 0:\n beat_pca = self._encoder.transform(np.array(beat_sigs))\n flat_sig = np.zeros(num_beats_to_encode*self._encoded_dim)\n idx = 0\n count = 0\n if len(beat_sigs) > 0:\n for row in beat_pca:\n flat_sig[idx:idx+beat_pca.shape[1]] = row\n idx += beat_pca.shape[1]\n count += 1 \n if count == num_beats_to_encode:\n break\n if col == 0:\n flat_chunk = flat_sig\n else:\n flat_chunk = np.concatenate((flat_chunk, flat_sig))\n chunk_encode_rows.append(flat_chunk)\n chunk_encode_rows = np.array(chunk_encode_rows)\n return chunk_encode_rows\n \n \n def save(self, filename):\n with open(filename, 'wb') as handle:\n pickle.dump(self._encoder, handle)\n handle.close()\n \n ","sub_path":"Train_Autoencoder.py","file_name":"Train_Autoencoder.py","file_ext":"py","file_size_in_byte":3001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"147254870","text":"from Data.DataAPI import DataAPI as DB\n\n\nclass Search_Manager:\n '''Searches database by term, catagory and file'''\n def __init__(self):\n self.DB = DB() #DataAPI\n self.current_data = None\n self.search_string = None\n self.search_field = None\n self.result = []\n\n\n def search(self, search_string, search_field, search_catagory):\n '''Recieves search specifications and delegates search, returning result after search'''\n self.search_field = search_field.lower()\n self.current_data = self.fetch(search_catagory)\n self.search_string = str(search_string.lower())\n self._search()\n \n results = self.result\n self.clear()\n\n return results\n\n\n def fetch(self, cat):\n '''returns database entries by catagory '''\n if cat == 'contract': return self.DB.read_all_contracts()\n if cat == 'customer': return self.DB.read_all_customers()\n if cat == 'destination': return self.DB.read_all_destinations()\n if cat == 'employee': return self.DB.read_all_employees()\n if cat == 'vehicle': return self.DB.read_all_vehicles()\n if cat == 'vehicle_type': return self.DB.read_all_vehicle_types()\n\n\n def _search(self):\n '''Searches loaded document for a match to loaded search string'''\n for el in self.current_data:\n obj = vars(el)\n if obj[self.search_field].lower() == self.search_string:\n self.result.append(el)\n \n\n def clear(self):\n '''Clears result'''\n self.result = []\n\n\n\n\n\n\n ","sub_path":"src/Logic/Search_Manager.py","file_name":"Search_Manager.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"7969968","text":"# Copyright (c) 2017 The Khronos Group Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import division, print_function\n\nfrom collections import OrderedDict\n\nimport nnef\n\nfrom . import dog\nfrom . import utils\nfrom .nnef_dog_types import NnefDN, NnefOp, NnefGraph\n\n\ndef nnefgraph_to_nnefdog(nnefgraph, variables_dir=None):\n if variables_dir:\n variables_dir = utils.without_slash(variables_dir)\n\n properties, nnefops = nnefgraph\n dtype_by_nnefdn_name = properties[\"dtypes\"]\n shape_by_nnefdn_name = properties[\"shapes\"]\n graph_name = properties[\"graph\"].name\n graph_inputs = list(properties[\"graph\"].params.keys())\n graph_outputs = list(properties[\"graph\"].results.keys())\n\n ops = []\n dn_by_name = {}\n\n def transform_arg(arg, op):\n if isinstance(arg, nnef.Identifier):\n dn = dn_by_name.get(str(arg))\n if dn is None:\n utils.print_error(\"DataNode {} not defined before use\".format(str(arg)))\n return utils.REMOVE\n if op not in dn.consumers: # can be multiple times, eg: matmul(a, a)\n dn.consumers.append(op)\n return dn\n else:\n return arg\n\n def transform_result(result, op):\n\n if isinstance(result, nnef.Identifier):\n dn = NnefDN(str(result))\n dn.shape = list(shape_by_nnefdn_name[str(result)])\n dn.dtype = str(dtype_by_nnefdn_name.get(str(result)))\n dn.producer = op\n if dn.name in dn_by_name:\n utils.print_error(\"DataNode {} defined multiple times\".format(dn.name))\n return utils.REMOVE\n dn_by_name[dn.name] = dn\n return dn\n else:\n return result\n\n def transform_tensor_to_dn(tensor):\n dn = dn_by_name.get(str(tensor))\n if dn is None:\n utils.print_error(\"DataNode {} not defined before use\".format(str(tensor)))\n return utils.REMOVE\n return dn\n\n for prototype, values in nnefops:\n op = NnefOp(prototype.name)\n\n args = OrderedDict([(name, values[name]) for name in prototype.params.keys()])\n results = OrderedDict([(name, values[name]) for name in prototype.results.keys()])\n\n op.args = utils.recursive_transform(args, lambda arg: transform_arg(arg, op))\n op.results = utils.recursive_transform(results, lambda result: transform_result(result, op))\n ops.append(op)\n\n if variables_dir and op.name == \"variable\":\n op.result_node.extra[dog.EXTRA_WEIGHTS] = utils.read_nnef_tensor(\n \"{}/{}.dat\".format(variables_dir, op.args[\"label\"]))\n\n input_dn_names = [dn.name for dn in utils.recursive_transform(graph_inputs, transform_tensor_to_dn)]\n output_dn_names = [dn.name for dn in utils.recursive_transform(graph_outputs, transform_tensor_to_dn)]\n\n return NnefGraph(graph_name, ops, dn_by_name, input_dn_names, output_dn_names)\n","sub_path":"converter/nnef_converters/common/nnef_to_dog.py","file_name":"nnef_to_dog.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"376471928","text":"#!/usr/bin/python3\n# -*- coding: utf=8 -*-\nimport sys\nimport cv2\nimport numpy as np\nimport platform\nprint(platform.python_version())\nfrom cv2 import aruco\nfrom cv_bridge import CvBridge, CvBridgeError\n\nprint(cv2.__version__)\n\ndef getid(image_src):\n aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_250)\n parameters = aruco.DetectorParameters_create()\n corners, ids, rejectedImgPoints = aruco.detectMarkers(image_src, aruco_dict, parameters=parameters)\n if(ids is None):\n print(\"None!!!!\")\n result = []\n result.append(0)\n else:\n print(\"cdcd\")\n frame_markers = aruco.drawDetectedMarkers(image_src.copy(), corners, ids)\n # cv2.imshow('frame_result', frame_markers), cv2.waitKey(1)\n print(len(ids))\n print(ids)\n\n result = []\n for i in range(len(ids)):\n print(ids[i])\n result.append(ids[i][0])\n print(result)\n return result\n","sub_path":"src/aruco_detector.py","file_name":"aruco_detector.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"129077401","text":"import binascii\nimport struct\nimport math\nimport collections\n\ndef dayTime(seconds):\n \"\"\"\n 天内秒转时间\n 单位:秒\n :param seconds:\n :return:\n \"\"\"\n m, s = divmod(seconds, 60)\n h, m = divmod(m, 60)\n if h<10:\n return (\"%02d:%02d:%02d\" % (h, m, s))\n else:\n return (\"%d:%02d:%02d\" % (h, m, s))\n\n\ndef signeddata(val,nbits):\n result = 0x0\n result = (val << (32-nbits)) & 2147483647\n result = result >> (32-nbits)\n return result\n\n\nf = open(\"G:\\\\ts03\\\\bds_cd03\", \"rb\")\nf2 = open(\"G:\\\\ts03\\\\bds_data3.txt\",\"w+\")\nf2.write(\"datetime\".rjust(19, \" \") +\" \"+ \"prn\".rjust(5, \" \") + \"time\".rjust(10, \" \") +\n \"type\".rjust(15, \" \") + \"A0_utc\".rjust(28, \" \") + \"A1_utc\".rjust(28, \" \") +\n \"dT_ls\".rjust(10, \" \") + \"dT_lsf\".rjust(10, \" \") + \"DN_lsf\".rjust(10, \" \") +\n \"WN_lsf\".rjust(10, \" \") + \"\\n\")\n\ndate1 = \"\"\ndatestr = \"\"\n\nwhile True:\n data1 = f.read(1)\n datad1 = binascii.b2a_hex(data1)\n if not data1:\n break\n elif data1 == b'\\x0A':\n data2 = f.read(2)\n datad2 = binascii.b2a_hex(data2)\n if data2 == b'RD':\n data3 = f.read(3)\n datad3 = binascii.b2a_hex(data3)\n if data3 == b'\\x30\\x30\\x36':\n y = struct.unpack('H', f.read(2))[0] # 年\n m = struct.unpack('B', f.read(1))[0] # 月\n d = struct.unpack('B', f.read(1))[0] # 日\n b = struct.unpack('B', f.read(1))[0]\n if y == 65535 or m == 255 or d == 255:\n print(\"rd无效值\")\n break\n date1 = str(y) + \"-\" + str(m) + \"-\" + str(d)\n elif data2 == b'~~':\n data4 = f.read(3)\n datad4 = binascii.b2a_hex(data4)\n if data4 == b'\\x30\\x30\\x35':\n data5 = f.read(4)\n datad5 = binascii.b2a_hex(data5)\n rt = struct.unpack('I', data5)[0] # 天内秒\n if rt == 4294967295:\n continue # 无效值\n time1 = dayTime(rt / 1000)\n if date1 == \"\":\n date1 = \" \"\n datestr = date1 +\" \"+ time1\n elif data2 == b'cd':\n data5 = f.read(3)\n datad5 = binascii.b2a_hex(data5)\n if data5 == b'\\x30\\x33\\x30':\n bin_data = \"\"\n prn_u = struct.unpack('B', f.read(1))[0]\n if prn_u > 5:\n if prn_u<10:\n prn_u = \"C0\" + str(prn_u)\n else:\n prn_u = \"C\" + str(prn_u)\n s1 = f.read(4)\n datas1 = binascii.b2a_hex(s1)\n time_u = struct.unpack('I', s1)[0]\n s2 = f.read(1)\n datas2 = binascii.b2a_hex(s2)\n type_u = struct.unpack('B', s2)[0]\n if type_u == 0:\n type_u = \"B1\"\n elif type_u == 1:\n type_u = \"B2\"\n elif type_u == 2:\n type_u = \"B3\"\n elif type_u == 3:\n type_u = \"B1 from CEO\"\n elif type_u == 4:\n type_u = \"B2 from CEO\"\n elif type_u == 5:\n type_u = \"B2 from CEO\"\n elif type_u == 6:\n type_u = \"B1C\"\n elif type_u == 7:\n type_u = \"B1-2\"\n\n s3 = f.read(1)\n datas3 = binascii.b2a_hex(s3)\n len_u = struct.unpack('B', s3)[0]\n\n pinput = []\n # pinput2 = []\n poutput = []\n for i in range(len_u):\n data6 = f.read(4)\n datad6 = binascii.b2a_hex(data6)\n data_u = struct.unpack('I', data6)[0]\n # pinput2.append(data6)\n pinput.append(data_u)\n poutput.append((pinput[0] >> 2) & 0x3FFFFFFF)\n poutput.append(((pinput[0] & 0x000003) << 28) | ((pinput[1] >> 4) & 0x0FFFFFFF))\n poutput.append(((pinput[1] & 0x00000F) << 26) | ((pinput[2] >> 6) & 0x03FFFFFF))\n poutput.append(((pinput[2] & 0x00003F) << 24) | ((pinput[3] >> 8) & 0x00FFFFFF))\n poutput.append(((pinput[3] & 0x0000FF) << 22) | ((pinput[4] >> 10) & 0x003FFFFF))\n poutput.append(((pinput[4] & 0x0003FF) << 20) | ((pinput[5] >> 12) & 0x000FFFFF))\n poutput.append(((pinput[5] & 0x000FFF) << 18) | ((pinput[6] >> 14) & 0x0003FFFF))\n poutput.append(((pinput[6] & 0x003FFF) << 16) | ((pinput[7] >> 16) & 0x0000FFFF))\n poutput.append(((pinput[7] & 0x00FFFF) << 14) | ((pinput[8] >> 18) & 0x00003FFF))\n poutput.append(((pinput[8] & 0x03FFFF) << 12) | ((pinput[9] >> 16) & 0x00000FFF))\n\n for y in range(len_u):\n poutput[y] <<= 2\n\n FraID = (poutput[0] & 0x0001C000) >> 14\n if FraID == 5:\n Pnum = (poutput[1] & 0x0007F000) >> 12\n if Pnum == 10:\n utcmap = ((poutput[1] >> 10) & 0x03 << 6) | (poutput[2] >> 26) & 0x3F\n tls = utcmap\n tlsf = signeddata(poutput[2] >> 18, 8)\n WNlsf = (poutput[2] >> 10) & 0xFF\n utc1 = poutput[3] & 0xFFFFFC00\n utch = hex(utc1)[2:]\n fmt = \"\"\n bts = bytearray()\n for u1 in range(0, len(utch), 2):\n bts.append(int(utch[u1:u1+2], 16))\n if len(bts) == 4:\n fmt = \"!i\"\n elif len(bts) == 1:\n fmt = \"!b\"\n utc1 = struct.unpack(fmt, bts)[0]\n utc2 = (poutput[4] >> 22) & 0x03FF\n A0utc = (utc1 | utc2) * 9.31322574615479e-10\n utcmap = ((poutput[4] & 0x003FFC00) << 2) | ((poutput[5] >> 20) & 0x0FFF)\n if utcmap & 0x800000:\n utcmap |=0xFF000000\n utcmapu = hex(utcmap)[2:]\n if len(utcmapu)==1:\n utcmapu = \"0\"+utcmapu\n bts2 = bytearray()\n for u2 in range(0, len(utcmapu) ,2):\n bts2.append(int(utcmapu[u2:u2+2], 16))\n if len(bts2) == 4:\n fmt = \"!i\"\n elif len(bts2) == 1:\n fmt = \"!b\"\n utcmap2 = struct.unpack(fmt, bts2)[0]\n A1utc = utcmap2 * 8.881784197e-16\n DN = (poutput[5] >> 12) & 0xFF\n print(\"datetime:\" + datestr + \"\\n\" + \"prn:\" + str(prn_u) + \"\\n\" + \"time:\" + str(time_u) + \"\\n\" +\n \"type:\" + str(type_u) + \"\\n\" + \"A0_utc:\" + str(A0utc) + \"\\n\" + \"A1_utc:\" + str(A1utc) + \"\\n\" +\n \"dT_ls:\" + str(tls) + \"\\n\" + \"dT_lsf:\" + str(tlsf) + \"\\n\" + \"DN_lsf:\" + str(DN) + \"\\n\" +\n \"WN_lsf:\" + str(WNlsf) + \"\\n\" + \"------------------------------\")\n f2.write(datestr+ \" \" + str(prn_u).rjust(5, \" \") + str(time_u).rjust(10, \" \") +\n str(type_u).rjust(15, \" \") + str(A0utc).rjust(28, \" \") + str(A1utc).rjust(28, \" \") +\n str(tls).rjust(10, \" \") + str(tlsf).rjust(10, \" \") + str(DN).rjust(10, \" \") +\n str(WNlsf).rjust(10, \" \") + \"\\n\")\nf.close()\nf2.close()\n\n\n\n\n","sub_path":"test/read_BDS_File.py","file_name":"read_BDS_File.py","file_ext":"py","file_size_in_byte":8094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"207193315","text":"# coding: utf-8\nimport unittest\n\nclass TestMyParser(unittest.TestCase):\n\n def setUp(self):\n self.parser = MyParser()\n fixture_path = 'tests/fixtures/sample.html'\n with open(fixture_path, 'r') as f:\n self.markup = f.read()\n\n def test_processing(self):\n data = self.parser.process(markup=self.markup)\n # Check that every value exists and is not empty\n for k,v in data:\n self.assertTrue(v)\n","sub_path":"tests/test_parsers.py","file_name":"test_parsers.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"143918989","text":"from abc import ABC, abstractmethod\nfrom typing import Dict, Union, List\n\nfrom ansible.cli import CLI\nfrom ansible.errors import AnsibleParserError, AnsibleUndefinedVariable, AnsibleError\nfrom ansible.parsing.yaml.objects import AnsibleUnicode\nfrom ansible.playbook import Playbook, Play\nfrom ansible.playbook.block import Block\nfrom ansible.playbook.helpers import load_list_of_blocks\nfrom ansible.playbook.role_include import IncludeRole\nfrom ansible.playbook.task import Task\nfrom ansible.playbook.task_include import TaskInclude\nfrom ansible.template import Templar\nfrom ansible.utils.display import Display\n\nfrom ansibleplaybookgrapher.graph import EdgeNode, TaskNode, PlaybookNode, RoleNode, PlayNode, CompositeNode, BlockNode\nfrom ansibleplaybookgrapher.utils import clean_name, handle_include_path, has_role_parent, generate_id, \\\n convert_when_to_str\n\n\nclass BaseParser(ABC):\n \"\"\"\n Base Parser of a playbook\n \"\"\"\n\n def __init__(self, tags: List[str] = None, skip_tags: List[str] = None, display: Display = None):\n \"\"\"\n\n :param tags: Only add plays and tasks tagged with these values\n :param skip_tags: Only add plays and tasks whose tags do not match these values\n :param display: Ansible display used to print some messages in the console\n \"\"\"\n loader, inventory, variable_manager = CLI._play_prereqs()\n self.data_loader = loader\n self.inventory_manager = inventory\n self.variable_manager = variable_manager\n\n self.tags = tags or [\"all\"]\n self.skip_tags = skip_tags or []\n self.display = display or Display()\n\n @abstractmethod\n def generate_graph(self, *args, **kwargs) -> PlaybookNode:\n pass\n\n def template(self, data: Union[str, AnsibleUnicode], variables: Dict,\n fail_on_undefined=False) -> Union[str, AnsibleUnicode]:\n \"\"\"\n Template the data using Jinja. Return data if an error occurs during the templating\n :param data:\n :param fail_on_undefined:\n :param variables:\n :return:\n \"\"\"\n try:\n templar = Templar(loader=self.data_loader, variables=variables)\n return templar.template(data, fail_on_undefined=fail_on_undefined)\n except AnsibleError as ansible_error:\n # Sometime we need to export\n if fail_on_undefined:\n raise\n self.display.warning(ansible_error)\n return data\n\n def _add_task(self, task: Task, task_vars: Dict, node_type: str, parent_node: CompositeNode) -> bool:\n \"\"\"\n Include the task in the graph.\n :return: True if the task has been included, false otherwise\n \"\"\"\n\n if not task.evaluate_tags(only_tags=self.tags, skip_tags=self.skip_tags, all_vars=task_vars):\n self.display.vv(f\"The task '{task.get_name()}' is skipped due to the tags.\")\n return False\n\n self.display.vv(f\"Adding {node_type} '{task.get_name()}' to the graph\")\n\n task_name = clean_name(f\"[{node_type}] \" + self.template(task.get_name(), task_vars))\n edge_label = convert_when_to_str(task.when)\n\n edge_node = EdgeNode(parent_node, TaskNode(task_name, generate_id(f\"{node_type}_\")), edge_label)\n parent_node.add_node(target_composition=f\"{node_type}s\", node=edge_node)\n\n return True\n\n\nclass PlaybookParser(BaseParser):\n \"\"\"\n The playbook parser. This is the main entrypoint responsible to parser the playbook into a graph structure\n \"\"\"\n\n def __init__(self, playbook_filename: str, include_role_tasks=False, tags: List[str] = None,\n skip_tags: List[str] = None, display: Display = None):\n \"\"\"\n :param playbook_filename: The filename of the playbook to parse\n :param display: Ansible display used to print some messages in the console\n :param include_role_tasks: If true, the tasks of the role will be included in the graph\n :param tags: Only add plays and tasks tagged with these values\n :param skip_tags: Only add plays and tasks whose tags do not match these values\n \"\"\"\n\n super().__init__(tags=tags, skip_tags=skip_tags, display=display)\n\n self.include_role_tasks = include_role_tasks\n self.playbook_filename = playbook_filename\n self.playbook = Playbook.load(playbook_filename, loader=self.data_loader,\n variable_manager=self.variable_manager)\n # the root node\n self.playbook_root_node = PlaybookNode(self.playbook_filename)\n\n def generate_graph(self, *args, **kwargs) -> PlaybookNode:\n \"\"\"\n Loop through the playbook and generate the graph.\n\n The graph is drawn following this order (https://docs.ansible.com/ansible/2.4/playbooks_reuse_roles.html#using-roles)\n for each play:\n add pre_tasks\n add roles\n if include_role_tasks\n add role_tasks\n add tasks\n add post_tasks\n :return:\n \"\"\"\n\n # loop through the plays\n for play in self.playbook.get_plays():\n\n # the load basedir is relative to the playbook path\n if play._included_path is not None:\n self.data_loader.set_basedir(play._included_path)\n else:\n self.data_loader.set_basedir(self.playbook._basedir)\n self.display.vvv(f\"Loader basedir set to {self.data_loader.get_basedir()}\")\n\n play_vars = self.variable_manager.get_vars(play)\n play_hosts = [h.get_name() for h in self.inventory_manager.get_hosts(self.template(play.hosts, play_vars))]\n play_name = \"Play: {} ({})\".format(clean_name(play.get_name()), len(play_hosts))\n play_name = self.template(play_name, play_vars)\n\n self.display.banner(\"Parsing \" + play_name)\n\n play_node = PlayNode(play_name, hosts=play_hosts)\n self.playbook_root_node.add_play(play_node, \"\")\n\n # loop through the pre_tasks\n self.display.v(\"Parsing pre_tasks...\")\n for pre_task_block in play.pre_tasks:\n self._include_tasks_in_blocks(current_play=play, parent_nodes=[play_node], block=pre_task_block,\n play_vars=play_vars, node_type=\"pre_task\")\n\n # loop through the roles\n self.display.v(\"Parsing roles...\")\n\n for role in play.get_roles():\n # Don't insert tasks from ``import/include_role``, preventing duplicate graphing\n if role.from_include:\n continue\n\n # the role object doesn't inherit the tags from the play. So we add it manually.\n role.tags = role.tags + play.tags\n if not role.evaluate_tags(only_tags=self.tags, skip_tags=self.skip_tags, all_vars=play_vars):\n self.display.vv(f\"The role '{role.get_name()}' is skipped due to the tags.\")\n # Go to the next role\n continue\n\n role_node = RoleNode(clean_name(role.get_name()))\n # edge from play to role\n play_node.add_node(\"roles\", EdgeNode(play_node, role_node))\n\n if self.include_role_tasks:\n # loop through the tasks of the roles\n for block in role.compile(play):\n self._include_tasks_in_blocks(current_play=play, parent_nodes=[role_node], block=block,\n play_vars=play_vars, node_type=\"task\")\n # end of roles loop\n\n # loop through the tasks\n self.display.v(\"Parsing tasks...\")\n for task_block in play.tasks:\n self._include_tasks_in_blocks(current_play=play, parent_nodes=[play_node], block=task_block,\n play_vars=play_vars, node_type=\"task\")\n\n # loop through the post_tasks\n self.display.v(\"Parsing post_tasks...\")\n for post_task_block in play.post_tasks:\n self._include_tasks_in_blocks(current_play=play, parent_nodes=[play_node], block=post_task_block,\n play_vars=play_vars, node_type=\"post_task\")\n # Summary\n self.display.display(\"\") # just an empty line\n self.display.v(f\"{len(play_node.pre_tasks)} pre_task(s) added to the graph.\")\n self.display.v(f\"{len(play_node.roles)} role(s) added to the play\")\n self.display.v(f\"{len(play_node.tasks)} task(s) added to the play\")\n self.display.v(f\"{len(play_node.post_tasks)} post_task(s) added to the play\")\n\n self.display.banner(f\"Done parsing {play_name}\")\n self.display.display(\"\") # just an empty line\n # moving to the next play\n\n return self.playbook_root_node\n\n def _include_tasks_in_blocks(self, current_play: Play, parent_nodes: List[CompositeNode],\n block: Union[Block, TaskInclude], node_type: str, play_vars: Dict = None):\n \"\"\"\n Recursively read all the tasks of the block and add it to the graph\n :param parent_nodes: This a list of parent nodes. Each time, we see an include_role, the corresponding node is\n added to this list\n :param current_play:\n :param block:\n :param play_vars:\n :param node_type:\n :return:\n \"\"\"\n\n if not block._implicit and block._role is None:\n # Here we have an explicit block. Ansible internally converts all normal tasks to Block\n block_node = BlockNode(str(block.name))\n parent_nodes[-1].add_node(f\"{node_type}s\",\n EdgeNode(parent_nodes[-1], block_node, convert_when_to_str(block.when)))\n parent_nodes.append(block_node)\n\n # loop through the tasks\n for task_or_block in block.block:\n if isinstance(task_or_block, Block):\n self._include_tasks_in_blocks(current_play=current_play, parent_nodes=parent_nodes, block=task_or_block,\n node_type=node_type, play_vars=play_vars)\n elif isinstance(task_or_block, TaskInclude): # include, include_tasks, include_role are dynamic\n # So we need to process them explicitly because Ansible does it during the execution of the playbook\n\n task_vars = self.variable_manager.get_vars(play=current_play, task=task_or_block)\n\n if isinstance(task_or_block, IncludeRole):\n # Here we have an 'include_role'. The class IncludeRole is a subclass of TaskInclude.\n # We do this because the management of an 'include_role' is different.\n # See :func:`~ansible.playbook.included_file.IncludedFile.process_include_results` from line 155\n self.display.v(\n f\"An 'include_role' found. Including tasks from the role '{task_or_block.args['name']}'\")\n\n role_node = RoleNode(task_or_block.args['name'])\n parent_nodes[-1].add_node(f\"{node_type}s\", EdgeNode(parent_nodes[-1], role_node,\n convert_when_to_str(task_or_block.when)))\n\n if self.include_role_tasks:\n # If we have an include_role and we want to include role tasks, the parent node now becomes\n # the role.\n parent_nodes.append(role_node)\n\n block_list, _ = task_or_block.get_block_list(play=current_play, loader=self.data_loader,\n variable_manager=self.variable_manager)\n else:\n self.display.v(f\"An 'include_tasks' found. Including tasks from '{task_or_block.get_name()}'\")\n\n templar = Templar(loader=self.data_loader, variables=task_vars)\n try:\n include_file = handle_include_path(original_task=task_or_block, loader=self.data_loader,\n templar=templar)\n except AnsibleUndefinedVariable as e:\n # TODO: mark this task with some special shape or color\n self.display.warning(\n f\"Unable to translate the include task '{task_or_block.get_name()}' due to an undefined variable: {str(e)}. \"\n \"Some variables are available only during the execution of the playbook.\")\n self._add_task(task=task_or_block, task_vars=task_vars, node_type=node_type,\n parent_node=parent_nodes[-1])\n continue\n\n data = self.data_loader.load_from_file(include_file)\n if data is None:\n self.display.warning(f\"The file '{include_file}' is empty and has no tasks to include\")\n continue\n elif not isinstance(data, list):\n raise AnsibleParserError(\"Included task files must contain a list of tasks\", obj=data)\n\n # get the blocks from the include_tasks\n block_list = load_list_of_blocks(data, play=current_play, variable_manager=self.variable_manager,\n role=task_or_block._role, loader=self.data_loader,\n parent_block=task_or_block)\n\n for b in block_list: # loop through the blocks inside the included tasks or role\n self._include_tasks_in_blocks(current_play=current_play, parent_nodes=parent_nodes, block=b,\n play_vars=task_vars, node_type=node_type)\n else:\n if len(parent_nodes) > 1 and not has_role_parent(task_or_block) and task_or_block._parent._implicit:\n # We add a new parent node if:\n # - We found an include_role\n # - We found an explicit Block\n # If an include_role is not found and we have a task that is not from an include_role and not from\n # an explicit block => we remove the last CompositeNode we have added.\n parent_nodes.pop()\n\n # check if this task comes from a role, and we don't want to include tasks of the role\n if has_role_parent(task_or_block) and not self.include_role_tasks:\n # skip role's task\n self.display.vv(\n f\"The task '{task_or_block.get_name()}' has a role as parent and include_role_tasks is false. \"\n \"It will be skipped.\")\n # skipping\n continue\n\n self._add_task(task=task_or_block, task_vars=play_vars, node_type=node_type,\n parent_node=parent_nodes[-1])\n","sub_path":"ansibleplaybookgrapher/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":15183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"20584312","text":"import os\nimport pprint as ppr\nimport re\nfrom os.path import join as pj\n\n\ndef detect_xml_files(basedir):\n xml_files = []\n for root, dirs, files in os.walk(basedir):\n for file in files:\n if file.endswith('.xml') is True \\\n and file.startswith('_new') is False:\n xml_files.append(os.path.join(root, file))\n return sorted(xml_files)\n\n\ndef get_uris_from_file(fil, filter=None):\n uris = []\n content = read_xml(fil)\n for line in content:\n s = rxsearch(r'(?<=dc:uri=\").*?(?=\")', line)\n if s is not None:\n if filter is None:\n uris.append(s)\n else:\n if rxmatch(filter, s) is True:\n uris.append(s)\n return uris\n\n\ndef mkdir(folder):\n try:\n os.makedirs(folder)\n except FileExistsError:\n pass\n\n\ndef pprint(obj):\n pp = ppr.PrettyPrinter(indent=4)\n pp.pprint(obj)\n\n\ndef rxmatch(rxscheme, s):\n return bool(re.search(rxscheme, s))\n\n\ndef rxsearch(rxscheme, s, group=0):\n r = None\n m = re.search(rxscheme, s)\n if m is not None:\n r = m.group(group)\n return r\n\n\ndef read_xml(filename):\n print('Read file ' + filename)\n arr = []\n try:\n filecontent = open(filename, 'r')\n except Exception as e:\n print(e)\n else:\n for line in filecontent.read().splitlines():\n arr.append(line)\n return(arr)\n\n\ndef write_xml(data, folder, filename, debug=False):\n mkdir(folder)\n target = pj(folder, filename)\n print('Write file ' + target)\n if debug is False:\n with open(target, 'w') as fp:\n for line in data:\n fp.write(line + '\\n')\n","sub_path":"tools/sanitizer/lib/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"128199771","text":"import torch\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nfrom tqdm import tqdm\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import precision_recall_fscore_support\nimport datetime\nimport gen_features_torch\nimport torch.utils.data\nfrom torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence\n\ndef read_data_files(file_path):\n data = pd.read_csv(file_path + 'data_long.csv')\n return data\n\ndef gen_batch_data(data):\n cols = list(data.columns)\n del_cols_index = [cols.index(col) for col in ['sid', 'pid', 'click_mode']]\n sel_cols_index = list(range(len(cols)))\n for item in del_cols_index:\n sel_cols_index.pop(item)\n\n grouped = data.groupby('sid')\n batch_feas_list = []\n batch_click_mode_list = []\n for i, (group_id, group) in tqdm(enumerate(grouped)):\n grouped_values = group.values\n batch_click_mode = grouped_values[:,del_cols_index[-1]][0]\n batch_feas = torch.tensor(grouped_values[:, sel_cols_index]).type(torch.FloatTensor)\n batch_feas_list.append(batch_feas)\n batch_click_mode_list.append(batch_click_mode)\n batch_click_mode_list = torch.tensor(batch_click_mode_list).type(torch.LongTensor)\n # first paddle\n # check why size does not add up to 10000???\n print('list_len:', len(batch_click_mode_list))\n return batch_feas_list, batch_click_mode_list\n\ndef split_train_test_12_class(file_path):\n # data = read_data_files(file_path)\n data = gen_features_torch.merge_data()\n train_data = data[data['click_mode'] != -1]\n #train-val split\n val_data = train_data.iloc[:int(0.2 * train_data.shape[0]), :]\n train_data = train_data.iloc[int(0.2 * train_data.shape[0]):, :]\n\n train_x, train_y = gen_batch_data(train_data)\n val_x, val_y = gen_batch_data(val_data)\n\n test_data = data.query('click_mode == -1')\n submit = test_data[['sid']].copy()\n\n test_x, _ = gen_batch_data(test_data)\n return train_x, train_y, val_x, val_y, test_x, submit\n\ndef eval_f(y_pred, train_data):\n y_true = train_data.label\n y_pred = y_pred.reshape((12, -1)).T\n y_pred = np.argmax(y_pred, axis=1)\n score = f1_score(y_true, y_pred, average='weighted')\n return 'weighted-f1-score', score, True\n\ndef f1_decomposition(val_y, val_pred):\n precision, recall, F1, support = precision_recall_fscore_support(val_y, val_pred)\n weighted_F1 = precision_recall_fscore_support(val_y, val_pred, average ='weighted')[2]\n df_eval = pd.DataFrame({'precision':precision, 'recall':recall,'F1':F1, 'support':support, 'weighted_F1':weighted_F1})\n return df_eval\n\ndef submit_result_12_class(submit, pred_test, model_name):\n now_time = str(datetime.datetime.now().strftime(\"%Y_%m_%d_%H_%M\"))\n submit['recommend_mode'] = pred_test\n submit.to_csv(\n '../submit/{}_result_{}.csv'.format(model_name, now_time), index=False)\n\nclass RNNModel(nn.Module):\n def __init__(self, input_dim, hidden_dim, layer_dim, output_dim):\n super(RNNModel, self).__init__()\n # batch_first=True will affect the input shape to self.rnn()\n\n self.rnn = nn.RNN(input_dim, 64, layer_dim, batch_first=True,\n nonlinearity='relu')\n self.fc1 = nn.Linear(64, 32)\n self.fc2 = nn.Linear(32, output_dim)\n self.dropout = nn.Dropout(0.5)\n\n\n def forward(self, x):\n out, hn = self.rnn(x)\n #hn[-1] is of (batch_size, hidden_dim)\n # hn_ = self.dropout(hn[-1])\n out = self.fc1(hn[-1])\n out = self.fc2(out)\n # out = self.dropout(out)\n return out\n\nclass LSTM(nn.Module):\n def __init__(self, input_dim, hidden_dim, layer_dim, output_dim):\n super(LSTM, self).__init__()\n self.lstm = nn.LSTM(input_dim, hidden_dim, layer_dim, batch_first=True, dropout = 0.5)\n self.fc = nn.Linear(hidden_dim, output_dim)\n self.layer_dim = layer_dim\n self.hidden_dim = hidden_dim\n\n def forward(self, x):\n # may want to initialize h0 and c0\n out, (hn,cn) = self.lstm(x)\n out = self.fc(hn[-1])\n return out\n\ndef train_test(train_x, train_y, val_x, val_y, test_x):\n '''train_x is a list of two-dim tensor\n train_y is an one-dim tensor\n '''\n input_dim = train_x[0].size()[1]\n hidden_dim = 10\n output_dim = 12\n layer_dim = 1 #used in RNN\n batch_size = 100\n\n model = RNNModel(input_dim, hidden_dim, layer_dim, output_dim)\n # model = LSTM(input_dim, hidden_dim, layer_dim, output_dim)\n\n # Cross Entropy Loss\n error = nn.CrossEntropyLoss()\n # error = nn.CrossEntropyLoss(weight = torch.tensor(np.array([1,1,6,1,1,1,1,1,1,1,1,1])).type(torch.FloatTensor))\n\n #learning_rate = 1 for LSTM\n learning_rate = 0.000001 # for RNN with num_epochs 500 seems to have 0.29 F1\n optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)\n\n\n num_epochs = 50\n pred_test_list = []\n score_list = []\n print('# of iters:',(len(train_x)/batch_size),'\\n')\n for epoch in range(num_epochs):\n for i in range(len(train_x)//batch_size+1):\n if i < len(train_x)//batch_size:\n feas = train_x[i*batch_size:(i+1)*batch_size]\n labels = train_y[i*batch_size:(i+1)*batch_size]\n else:\n feas = train_x[i*batch_size:]\n labels = train_y[i*batch_size:]\n # feas = feas.view(-1, 12, 1) #needed for RNN\n lens = list(map(len, feas))\n padded_feas = pad_sequence(feas, batch_first=True)\n packed_feas = pack_padded_sequence(padded_feas, lens, batch_first=True, enforce_sorted=False)\n optimizer.zero_grad()\n outputs = model(packed_feas)\n loss = error(outputs, labels)\n if i%100==0:\n print('loss at {0} is {1}'.format(i, loss.item()))\n loss.backward()\n optimizer.step()\n\n if i ==0:\n train_outputs = outputs\n else:\n train_outputs = torch.cat((train_outputs, outputs), dim = 0)\n i+=1\n\n with torch.no_grad():\n for i in range(len(val_x)//batch_size+1):\n if i < len(val_x)//batch_size:\n val_feas = val_x[i*batch_size:(i+1)*batch_size]\n val_labels = val_y[i*batch_size:(i+1)*batch_size]\n else:\n val_feas = val_x[i*batch_size:]\n val_labels = val_y[i*batch_size:]\n val_lens = list(map(len, val_feas))\n padded_val_feas = pad_sequence(val_feas, batch_first=True)\n packed_val_feas = pack_padded_sequence(padded_val_feas, val_lens, batch_first=True, enforce_sorted=False)\n val_outputs = model(packed_val_feas)\n predicted = torch.max(val_outputs.data, 1)[1]\n if i ==0:\n val_result = predicted\n #for evaluation purposes\n val_outputs_long = val_outputs\n else:\n val_result = torch.cat((val_result, predicted), dim = 0)\n val_outputs_long = torch.cat((val_outputs_long, val_outputs), dim = 0)\n # pred_test = predict(model, test_loader)\n # pred_test_list.append(pred_test)\n score = precision_recall_fscore_support(val_y, val_result, average ='weighted')[2]\n # print('epoch:', epoch, 'f1-score:', score)\n score_list.append(score)\n max_index = np.argmax(np.array(score_list))\n print('max_index', max_index)\n print(f1_decomposition(val_y, val_result))\n val_outputs_long = pd.DataFrame(val_outputs_long.numpy())\n val_outputs_long['click_mode']=val_y\n val_outputs_long['predicted']=val_result\n val_outputs_long.to_csv('../output/val_outputs_long.csv')\n train_outputs_long = pd.DataFrame(train_outputs.detach().numpy())\n train_outputs_long['predicted']=np.argmax(train_outputs_long.values, axis=1)\n train_outputs_long['click_mode']=train_y\n train_outputs_long.to_csv('../output/train_outputs_long.csv')\n # pred_test = pred_test_list[max_index]\n # return pred_test\n\nif __name__ == '__main__':\n train_x, train_y, val_x, val_y, test_x, submit = split_train_test_12_class('../input_torch/')\n train_test(train_x, train_y, val_x, val_y, test_x)\n # submit_result_12_class(submit, pred_test, 'pytorch')\n","sub_path":"code/pytorch.py","file_name":"pytorch.py","file_ext":"py","file_size_in_byte":8369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"640337381","text":"# Author: Alex Gezerlis\n# Numerical Methods in Physics with Python (2nd ed., CUP, 2023)\n\nfrom triang import forsub, backsub, testcreate\nfrom ludec import ludec\nfrom jacobi import termcrit\nfrom power import mag, testeigone\nimport numpy as np\n\n#def invpowershift(A,shift=20,kmax=200,tol=1.e-2):\ndef invpowershift(A,shift=20,kmax=200,tol=1.e-8):\n n = A.shape[0]\n znews = np.ones(n)\n qnews = znews/mag(znews)\n Astar = A - np.identity(n)*shift\n L, U = ludec(Astar)\n\n for k in range(1,kmax):\n qs = np.copy(qnews)\n ys = forsub(L,qs)\n znews = backsub(U,ys)\n qnews = znews/mag(znews)\n\n if qs@qnews<0:\n qnews = -qnews\n\n err = termcrit(qs,qnews)\n #print(k, qnews, err)\n\n if err < tol:\n lam = qnews@A@qnews\n break\n else:\n lam = qnews = None\n\n return lam, qnews\n\nif __name__ == '__main__':\n A, _ = testcreate(4,21)\n testeigone(invpowershift,A)\n","sub_path":"second_edition/codes/invpowershift.py","file_name":"invpowershift.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"391737759","text":"from collections import OrderedDict, namedtuple\n\n\nclass _SearchRecipe(namedtuple(\"SearchRecipe\", \"ref\")):\n with_packages = True\n\n def to_dict(self):\n data = {\"id\": str(self.ref)}\n return data\n\n\nclass _SearchPackage(namedtuple(\"SearchPackage\",\n \"package_id, options, settings, requires, outdated\")):\n\n def to_dict(self):\n return {\"id\": self.package_id, \"options\": self.options, \"settings\": self.settings,\n \"requires\": self.requires, \"outdated\": self.outdated}\n\n\nclass SearchRecorder(object):\n\n def __init__(self):\n self.error = False\n self.keyword = \"results\"\n self._info = OrderedDict()\n\n def add_recipe(self, remote_name, ref, with_packages=True):\n recipe = _SearchRecipe(ref)\n recipe.with_packages = with_packages\n if remote_name not in self._info:\n self._info[remote_name] = OrderedDict()\n self._info[remote_name][ref.full_repr()] = {\"recipe\": recipe, \"packages\": []}\n\n def add_package(self, remote_name, ref, package_id, options, settings, requires, outdated):\n sp = _SearchPackage(package_id, options, settings, requires, outdated)\n self._info[remote_name][ref.full_repr()][\"packages\"].append(sp)\n\n def get_info(self):\n info = {\"error\": self.error, self.keyword: []}\n\n for remote_name, recipe_packages in sorted(self._info.items()):\n remote_info = {\"remote\": remote_name, \"items\": []}\n for item in recipe_packages.values():\n recipe_info = item[\"recipe\"].to_dict()\n if item[\"recipe\"].with_packages:\n packages_info = [package.to_dict() for package in item[\"packages\"]]\n remote_info[\"items\"].append({\"recipe\": recipe_info, \"packages\": packages_info})\n else:\n remote_info[\"items\"].append({\"recipe\": recipe_info})\n info[self.keyword].append(remote_info)\n return info\n","sub_path":"conans/client/recorder/search_recorder.py","file_name":"search_recorder.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"63082380","text":"'''\n(1)输入一行字符(仅含英文字母),编写函数分别统计各个字符的个数,不区分大小写.\n(2)输入一行字符,编写函数统计出四类字母的数量: 英文字母、空格、数字和其它字符的个数。\n分别编写一个函数,完成上述功能。\n\n提示: 设d为一个dict对象,其\"值\"为\"键\"(字符)出现的次数.\n1.返回一个字典对象: d[keyvalue]\n2.如果当前字符为keyvalue,则 d[keyvalue] += 1 增加一次出现次数.\n'''\n\n#第一题\ndef count1(string):\n garge = {}\n for c in string:\n if c in garge:\n garge[c] += 1\n else:\n garge[c] = 1\n return garge\n\n'''\ns = input('输入:')\ngarge = count1(s.lower())\nprint(garge)\n'''\n\n#第2题\ndef count2(string):\n garge = {'Word': 0, 'Number': 0, 'Blank': 0, 'Other': 0}\n for c in string:\n if c.isalpha():\n garge['Word'] += 1\n elif c.isdigit():\n garge['Number'] += 1\n elif c.isspace():\n garge['Blank'] += 1\n else:\n garge['Other'] += 1\n return garge\n\ns = input('输入:') #123123qweewerwe() (_=)\nprint(count2(s))","sub_path":"Visual Studio/Python/字符统计.py","file_name":"字符统计.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"}
+{"seq_id":"631874363","text":"import json\nimport sys\nimport time\n\nimport requests\nfrom git_wrapper import git_wrapper\n\n\ndef common_update():\n while is_running:\n s.update()\n s.add('.')\n s.commit('syncing')\n s.push()\n\n time.sleep(5)\n\ndef update():\n while is_running:\n s.update()\n\n time.sleep(1)\n\ndef git_commit():\n while is_running:\n s.add('.')\n s.commit('syncing')\n s.push()\n\n time.sleep(5)\n\ndef svn_commit():\n while is_running:\n l, is_modified = s.getNewFiles()\n\n for e in l:\n s.add(e)\n\n if is_modified:\n s.commit('syncing')\n\n time.sleep(1)\n\n\nassert len(sys.argv) == 2, \"usages: python3 SClient.py user.json\"\n\nwith open('user.json', 'r') as f:\n user_data = f.read()\n\nuser_data = json.loads(user_data)\n\nresp = requests.get(user_data['auth-url'],\n auth=(user_data['id'], user_data['password']))\n\nprint(resp.text)\nprint(len(resp.text))\nret = resp.text.split('\\\\n ')[0]\nprint(ret)\nassert ret == \"success\", \"Invalid url\"\n\ns = git_wrapper(user_data['id'], user_data['password'])\n # svn_wrapper(user_data['id'], user_data['password'])\nis_running = True\n\ns.checkout(url=user_data['repo-url'],\n dest=user_data['repo-dir'])\n\ncommon_update()\n\n'''\nupdater = Thread(target=update)\nupdater.start()\n\ncommitter = Thread(target=git_commit)\ncommitter.start()\n\nupdater.join()\ncommitter.join()\n'''\n","sub_path":"SClient.py","file_name":"SClient.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"185683388","text":"import random\nfrom XOR_task import *\nfrom enum import Enum\nimport matplotlib.pyplot as plt\nimport time\n\nclass Actions(Enum):\n Stand = 'S'\n Hit = 'H'\n Split = 'SP'\n Double = 'DD'\n \n\nclass BlackJack:\n DECK = []\n def __init__(self):\n self.deckCount = 8\n self.drawnCards = []\n BlackJack.DECK = BlackJack.getStaticDeck()\n self.bank = 200\n self.deck = BlackJack.DECK * self.deckCount\n random.shuffle(self.deck)\n \n plt.close()\n plt.ion()\n plt.show()\n self.bankHistory = []\n self.bet = 1\n\n def run(self, agent, plot=True):\n self.bank -= self.bet\n \n if len(self.deck) < (len(BlackJack.DECK) * self.deckCount)/2:\n self.deck = BlackJack.DECK * self.deckCount\n random.shuffle(self.deck)\n # print(\"New Deck . . .\")\n \n dealer_cards = []\n agent_cards = []\n\n # Initial Turn\n agent_cards.append(self.draw(self.deck))\n dealer_cards.append(self.draw(self.deck))\n agent_cards.append(self.draw(self.deck))\n dealer_cards.append(self.draw(self.deck))\n \n # print(f'Bank: {self.bank}')\n # print(f'Player hand: {agent_cards}')\n # print(f'Dealer hand: {dealer_cards}')\n\n # Player Dealing\n if BlackJack.countCards(agent_cards) == 21 and BlackJack.countCards(dealer_cards) == 21:\n self.bank += self.bet\n # print(\"Push Black Jack.\")\n return\n \n \n choice = agent.input(agent_cards, dealer_cards[0])\n # print(f'Player chose to: {choice}')\n \n hand1 = hand2 = None\n \n if choice == Actions.Hit:\n while (BlackJack.countCards(agent_cards) < 21):\n agent_cards.append(self.draw(self.deck))\n # print(f'Player hand: {agent_cards}')\n if BlackJack.countCards(agent_cards) > 21:\n break\n choice = agent.input(agent_cards, dealer_cards[0])\n # print(f'Player chose to: {choice}')\n if (choice != Actions.Hit):\n break\n elif choice == Actions.Double:\n agent_cards.append(self.draw(self.deck))\n self.bank -= self.bet\n # print(f'Player hand: {agent_cards}')\n elif choice == Actions.Split and agent_cards[0][0] == agent_cards[1][0]:\n hand1 = [agent_cards[0], self.draw(self.deck)]\n hand2 = [agent_cards[1], self.draw(self.deck)]\n self.bank -= self.bet\n \n # print(f'Player hand1: {hand1}')\n choice = agent.input(hand1, dealer_cards[0])\n # print(f'Player chose to: {choice}')\n if choice == Actions.Hit:\n while (BlackJack.countCards(hand1) < 21):\n hand1.append(self.draw(self.deck))\n # print(f'Player hand1: {hand1}')\n if BlackJack.countCards(hand1) > 21:\n break\n choice = agent.input(hand1, dealer_cards[0])\n # print(f'Player chose to: {choice}')\n if (choice != Actions.Hit):\n break\n \n # print(f'Player hand2: {hand2}')\n choice = agent.input(hand2, dealer_cards[0])\n # print(f'Player chose to: {choice}')\n if choice == Actions.Hit:\n while (BlackJack.countCards(hand2) < 21):\n hand2.append(self.draw(self.deck))\n # print(f'Player hand2: {hand2}')\n if BlackJack.countCards(hand2) > 21:\n break\n choice = agent.input(hand2, dealer_cards[0])\n # print(f'Player chose to: {choice}')\n if (choice != Actions.Hit):\n break\n \n \n # DealerDealing\n while BlackJack.countCards(dealer_cards) < 17:\n dealer_cards.append(self.draw(self.deck))\n # print(f'Dealer hand: {dealer_cards}')\n \n # Final Count\n for hand in [agent_cards, hand1, hand2]:\n if hand == None:\n continue\n if len(hand) == 2 and hand1 == None and BlackJack.countCards(hand) == 21:\n # print(\"BLACK JACK !!!!\")\n self.bank += self.bet*2.5\n elif BlackJack.countCards(hand) == BlackJack.countCards(dealer_cards):\n if choice == Actions.Double:\n self.bank += self.bet\n self.bank += self.bet\n elif BlackJack.countCards(hand) > 21:\n ()\n elif BlackJack.countCards(hand) > BlackJack.countCards(dealer_cards):\n if choice == Actions.Double:\n self.bank += self.bet*2\n self.bank += self.bet*2\n elif BlackJack.countCards(dealer_cards) > 21:\n if choice == Actions.Double:\n self.bank += self.bet*2\n self.bank += self.bet*2\n \n # print(f'Final bank: {self.bank}')\n self.bankHistory.append(self.bank)\n if plot and len(self.bankHistory)%10 == 0:\n plt.xlim(0, max(300, len(self.bankHistory)+(1-((len(self.bankHistory)%300)/300))*300))\n plt.ylim(0, 300)\n plt.grid(True)\n plt.xlabel(\"matches played\")\n plt.ylabel(\"bank\")\n plt.plot(self.bankHistory, color='blue')\n plt.show()\n plt.pause(0.001)\n \n def countCards(cards):\n count = 0\n aces = 0\n for card in cards:\n if card[0] in \"KQJT\":\n count += 10\n elif card[0] in \"23456789\":\n count += int(card[0])\n elif card[0] == \"A\":\n aces += 1\n for i in range(aces):\n if count + 11 > 21:\n count += 1\n else:\n count += 11\n return count\n\n def draw(self, deck):\n card = deck.pop(0)\n self.drawnCards.append(card)\n return card\n\n def getStaticDeck():\n deck = []\n for card in \"A23456789TJQK\":\n for sign in \"SCDH\":\n deck.append(card + sign)\n return deck\n\nclass BasicAgent:\n def __init__(self) -> None:\n pass\n \n def input(self, agent_cards, dealer_card):\n # Table Strat\n for i in range(len(X)):\n c1 = BlackJack.countCards(agent_cards[0])\n c2 = BlackJack.countCards(agent_cards[1])\n row = BlackJack.countCards(dealer_card)\n \n if c1 == 11:\n c1 = 1\n if c2 == 11:\n c2 = 1\n if row == 11:\n row = 1\n\n if X[i][0] == c1 and X[i][1] == c2 and X[i][3] == row:\n if y[i] == [1, 0, 0, 0]:\n return Actions.Stand\n elif y[i] == [0, 1, 0, 0]:\n return Actions.Hit\n elif y[i] == [0, 0, 1, 0]:\n return Actions.Split\n elif y[i] == [0, 0, 0, 1]:\n return Actions.Double\n else:\n assert False\n # print(f'Agent Cards: {agent_cards}')\n # print(f'Dealer Card: {dealer_card}')\n assert False\n \n # Dealer Strat\n #\n # if BlackJack.countCards(agent_cards) < 17:\n # return Actions.Hit\n # else:\n # return Actions.Stand\n \n # Manual Mode\n #\n # choice = input('>')\n # if choice == 'H':\n # return Actions.Hit\n # elif choice == 'S':\n # return Actions.Stand\n # elif choice == 'SP':\n # return Actions.Split\n # elif choice == 'DD':\n # return Actions.Double\n # else:\n # print(f'Action \"{choice}\" not found.')\n # return Actions.Stand\n \n \nif __name__ == '__main__':\n bj = BlackJack()\n agent = BasicAgent()\n \n while bj.bank > 0:\n bj.run(agent)\n \n plt.pause(0)","sub_path":"BlackJack.py","file_name":"BlackJack.py","file_ext":"py","file_size_in_byte":8127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"207778287","text":"# Copyright (c) 2020 Patrick Hart, Julian Bernhard,\n# Klemens Esterle, Tobias Kessler\n# \n# This software is released under the MIT License.\n# https://opensource.org/licenses/MIT\nimport numpy as np\n# BARK\nfrom bark.core.world.evaluation import \\\n EvaluatorGoalReached, EvaluatorCollisionEgoAgent, \\\n EvaluatorStepCount, EvaluatorDrivableArea\nfrom bark.runtime.commons.parameters import ParameterServer\nfrom bark.core.geometry import *\n# BARK-ML\nfrom bark_ml.evaluators.evaluator import StateEvaluator\n\n\nclass GoalReachedGuiding(StateEvaluator):\n def __init__(self,\n params=ParameterServer(),\n eval_agent=None):\n StateEvaluator.__init__(self, params)\n self._goal_reward = \\\n self._params[\"ML\"][\"GoalReachedGuiding\"][\"GoalReward\",\n \"Reward for reaching the goal.\",\n 1.]\n self._col_penalty = \\\n self._params[\"ML\"][\"GoalReachedGuiding\"][\"CollisionPenalty\",\n \"Reward given for a collisions.\",\n -1.]\n self._max_steps = \\\n self._params[\"ML\"][\"GoalReachedGuiding\"][\"MaxSteps\",\n \"Maximum steps per episode.\",\n 50]\n self._act_penalty = \\\n self._params[\"ML\"][\"GoalReachedGuiding\"][\"ActionPenalty\",\n \"Weight factor for penalizing actions\",\n 0.01]\n self._goal_dist = \\\n self._params[\"ML\"][\"GoalReachedGuiding\"][\"GoalDistance\",\n \"Weight factor for distance to goal\",\n 0.01]\n self._eval_agent = eval_agent\n self._goal_lane_corr = None\n\n def _add_evaluators(self):\n \"\"\"Evaluators that will be set in the BARK world\"\"\"\n self._evaluators[\"goal_reached\"] = EvaluatorGoalReached()\n self._evaluators[\"collision\"] = EvaluatorCollisionEgoAgent()\n self._evaluators[\"step_count\"] = EvaluatorStepCount()\n self._evaluators[\"drivable_area\"] = EvaluatorDrivableArea()\n\n def GetGoalLaneCorridorForGoal(self, observed_world):\n \"\"\"Returns the lanecorridor the goal is in\"\"\"\n if self._goal_lane_corr is not None:\n return self._goal_lane_corr\n ego_agent = observed_world.ego_agent\n goal_def = ego_agent.goal_definition\n goal_shape = goal_def.goal_shape\n rc = observed_world.ego_agent.road_corridor\n lane_corr = None\n for lc in rc.lane_corridors:\n if Collide(lc.polygon, goal_shape):\n lane_corr = lc\n return lane_corr\n\n def CalculateDistanceToGoal(self, observed_world, goal_lane_corr):\n \"\"\"Calculates the distance to the goal of the ego_agent\"\"\"\n goal_center_line = goal_lane_corr.center_line\n ego_agent = observed_world.ego_agent\n ego_agent_state = ego_agent.state\n distance_to_gaol = Distance(\n goal_center_line,\n Point2d(ego_agent_state[1], ego_agent_state[2]))\n return distance_to_gaol\n\n def CalculateGuidingReward(self, observed_world, action):\n \"\"\"Returns a guiding reward using the dist. to goal and penalized acts.\"\"\"\n guiding_reward = 0.\n goal_lane_corr = self.GetGoalLaneCorridorForGoal(observed_world)\n distance_to_goal = self.CalculateDistanceToGoal(observed_world, goal_lane_corr)\n guiding_reward -= self._goal_dist*distance_to_goal\n # NOTE: this will only work for continious actions\n if action is not None and type(action) is not int:\n accs = action[0]\n delta = action[1]\n guiding_reward -= self._act_penalty*(accs**2 + delta*+2)\n return guiding_reward\n\n def _evaluate(self, observed_world, eval_results, action):\n \"\"\"Returns information about the current world state\n \"\"\"\n done = False\n success = eval_results[\"goal_reached\"]\n collision = eval_results[\"collision\"] or eval_results[\"drivable_area\"]\n step_count = eval_results[\"step_count\"]\n # determine whether the simulation should terminate\n if success or collision or step_count > self._max_steps:\n done = True\n guiding_reward = self.CalculateGuidingReward(observed_world, action)\n # calculate reward\n reward = collision * self._col_penalty + \\\n success * self._goal_reward + guiding_reward\n return reward, done, eval_results\n \n def Reset(self, world):\n self._goal_lane_corr = None\n return super(GoalReachedGuiding, self).Reset(world)","sub_path":"bark_ml/evaluators/goal_reached_guiding.py","file_name":"goal_reached_guiding.py","file_ext":"py","file_size_in_byte":4105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"31533946","text":"# Kaggle Python Tutorial on Machine Learning\n# Chapter 1\n# Exercise 6 - First prediction\n\n# Create a copy of test: test_one\ntest_one = test.copy(deep=True)\n\n# Initialize a Survived column to 0\ntest_one[\"Survived\"] = 0\n\n# Set Survived to 1 if Sex equals \"female\" and print the `Survived` column from `test_one`\ntest_one[\"Survived\"][test_one[\"Sex\"] == 'female'] = 1\nprint(test_one[\"Survived\"])\n","sub_path":"chapter1/06-firstprediction.py","file_name":"06-firstprediction.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"49662885","text":"# -*- coding: utf-8 -*-\n\"\"\" Configuration handling.\n\n References:\n - http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html\n - http://freedesktop.org/wiki/Software/pyxdg/\n - https://github.com/ActiveState/appdirs\n\"\"\"\n# Copyright © 2013 1&1 Internet AG\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, with_statement\n\nimport sys\nimport logging\nimport ConfigParser\n\n\nclass Configuration(object):\n \"\"\" Reads and manages the configuation.\n \"\"\"\n\n # Singleton instance\n instance = None\n\n\n @classmethod\n def create(cls, config_file=None):\n \"\"\" Return the default configuration.\n \"\"\"\n if cls.instance is None:\n cls.instance = cls(config_file)\n\n # Load config file, possibly overwriting the defaults\n cls.instance.load_ini()\n\n if config_file and config_file != cls.instance.config_file:\n raise RuntimeError(\"Configuration initialized a second time with a different file!\")\n\n return cls.instance\n\n\n def __init__(self, config_file=None):\n \"\"\" Initialize configuration.\n \"\"\"\n self.config_file = config_file\n\n # Set defaults\n #self.default(\"apt\", \"repositories\", \"primary\", list)\n #self.default(\"apt\", \"repositories\", \"secondary\", list)\n self._validate()\n\n\n def _validate(self):\n \"\"\" Validate a loaded configuration.\n \"\"\"\n #if isinstance(self.foobar, basestring):\n # try:\n # self.foobar = int(self.foobar, 10)\n # except (ValueError, TypeError), exc:\n # raise ValueError(\"Bad foobar %r: %s\" % (self.foobar, exc))\n\n\n def load_ini(self):\n \"\"\" Load the given .INI file.\n \"\"\"\n if not self.config_file:\n return\n\n # Load INI file\n ini_file = ConfigParser.SafeConfigParser()\n if not ini_file.read(self.config_file):\n raise ConfigParser.ParsingError(\"Global configuration file %r not found!\" % (\n self.config_file,\n ))\n\n \"\"\"\n # Make sure there's our global settings section\n if not ini_file.has_section(self.SECTION):\n raise ConfigParser.ParsingError(\"%r needs to have a [%s] section!\" % (\n self.config_file, self.SECTION,\n ))\n\n # Get the given values\n for key, val in ini_file.items(self.SECTION):\n # Ensure that all names are known (to prevent uncaught typos)\n if key not in self.KEYS:\n raise ConfigParser.ParsingError(\"%r has an unknown key %s in the [%s] section!\" % (\n self.config_file, key, self.SECTION,\n ))\n\n # Do some shell-like path expansion\n val = os.path.expanduser(os.path.expandvars(val))\n\n # Set as attribute for easy access\n setattr(self, key, val)\n \"\"\"\n\n self._validate()\n","sub_path":"src/infrascope/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"123188730","text":"from modules.alarm.alarm_time_file_base import AlarmTimeFileBase\nfrom logging import Logger\nfrom setting import Setting\n\n\nclass BlockAlarmRise(AlarmTimeFileBase):\n \"\"\"description of class\"\"\"\n\n def __init__(self, logger: Logger, setting: Setting):\n \"\"\"Initializes (declare internal variables)\"\"\"\n super(BlockAlarmRise, self).__init__(logger, setting)\n self._start_r = None\n self._start_g = None\n self._start_b = None\n self._stop_r = None\n self._stop_g = None\n self._stop_b = None\n self._step_r = None\n self._step_g = None\n self._step_b = None\n self._current_r = None\n self._current_g = None\n self._current_b = None\n\n def update_display(self, screen, size, fore_color, back_color, blocks, current_time) -> None:\n try:\n if not self._is_alarm:\n return\n back_color = (self._current_r, self._current_g, self._current_b)\n screen.fill(back_color)\n for block in blocks:\n block.update_display(True, screen, size, self._fore_color, back_color, current_time)\n\n (self._current_r, self._step_r) = self._calculate_color_part(\n self._start_r,\n self._stop_r,\n self._step_r,\n self._current_r)\n (self._current_g, self._step_g) = self._calculate_color_part(\n self._start_g,\n self._stop_g,\n self._step_g,\n self._current_g)\n (self._current_b, self._step_b) = self._calculate_color_part(\n self._start_b,\n self._stop_b,\n self._step_b,\n self._current_b)\n\n except Exception as ex:\n self._logger.exception(ex)\n\n def init_draw(self):\n super(BlockAlarmRise, self).init_draw()\n (_, background_color, _, _) = self._setting.get_curret_setting()\n self._start_r = background_color[0]\n self._start_g = background_color[1]\n self._start_b = background_color[2]\n self._stop_r = self._back_color[0]\n self._stop_g = self._back_color[1]\n self._stop_b = self._back_color[2]\n self._step_r = (self._stop_r - self._start_r) / 20\n self._step_g = (self._stop_g - self._start_g) / 20\n self._step_b = (self._stop_b - self._start_b) / 20\n self._current_r = self._start_r\n self._current_g = self._start_g\n self._current_b = self._start_b\n\n def _calculate_color_part(self, start, stop, step, current):\n current += step\n if current > stop:\n step = -step\n current += step\n if current < start:\n step = -step\n current += step\n return (current, step)\n","sub_path":"modules/alarm/block_alarm_rise.py","file_name":"block_alarm_rise.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"52548546","text":"# -*- coding: utf-8 -*-\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'HOST': '{{ db_expo }}'.split(':')[0],\n 'PORT': '{{ db_expo }}'.split(':')[1],\n# 'HOST': '{{ db_hhservice_host }}',\n# 'PORT': '{{ db_hhservice_port }}',\n 'NAME': 'hhservice',\n 'USER': '{{ db_expo_u }}',\n 'PASSWORD': '{{ db_expo_p }}'\n }\n}\n\n\nVISITOR_LOG_CACHE_HOSTS = ({{ memcache_quotes }},)\nVISITOR_LOG_CACHE_PREFIX = 'expo.'\nVISITOR_LOG_CACHE_TTL = 300\nVISITOR_LOG_ENABLE = True\nVISITOR_LOG_ACTIVITY_TRESHOLD = 300\nVISITOR_LOG_STAND_ACTIVITY_TRESHOLD = 300\n\ngelf_handler = {'class': 'graypy.GELFHandler',\n 'host': '{{ graylog_host }}',\n 'port': 12201}\n\nhandler = {'class': 'logging.handlers.WatchedFileHandler',\n 'filename': '/var/log/hh-expo/hh-expo.log',\n 'level': 'DEBUG',\n 'formatter': 'verbose'}\n\nemail_handler = {'class': 'logging.handlers.WatchedFileHandler',\n 'filename': '/var/log/hh-expo/emails.log',\n 'level': 'DEBUG',\n 'formatter': 'verbose'}\n\nMEDIA_URL = 'http://hh.ru/i-expo/'\nUPLOAD_PUT_URL = 'http://{{ intbal_n_c1 }}:8024/i-expo/'\nUPLOAD_TIMEOUT = 500\nUPLOAD_GET_TIMEOUT = 500\n\nSTATIC_URL = 'http://i-expo.hh.ru/static/'\n\nMAX_UPLOAD_SIZE = ''\n\nMAILER_QUEUE_HOSTS = ({{ cf_mailer_queue_hosts }})\nMAILER_QUEUE_LOGIN = '{{ cf_mailer_queue_login }}'\nMAILER_QUEUE_PASSWORD = '{{ cf_mailer_queue_password }}'\nMAILER_QUEUE_ROUTING_KEYS = {\n 'LOW': 'mail.active.LOW',\n 'NORMAL': 'mail.active.NORMAL',\n 'HIGH': 'mail.active.HIGH',\n}\nMAILER_QUEUE_VIRTUAL_HOST = '/'\nMAILER_QUEUE_EXCHANGE = ''\nEMAIL_QUEUE_DELAY = 300\nCAREER_EMAIL_TEMPLATE = 'career_fair_base'\nMAILER_QUEUE_MAILING_TYPE = 'applicant_advertising_mailer'\nMAILER_QUEUE_MAILING_ID = 0\n\nDEFAULT_FROM_EMAIL = 'Ярмарка вакансий '\n\nAPPLICANT_FAIR_START = '13.02.2013 10:00'\nAPPLICANT_FAIR_FINISH = '14.02.2013 18:00'\nPRESENTATION_CONVERTATION_ATTEMPTS = 3\nIMAGE_RESIZE_QUALITY = 95\nPRESENTATION_DENSITY = '150' # quality for presentation file convertation( in dpi )\n\nBANNER_SERVICE_ENABLED = True\nBANNER_SERVICE_HOST = '//hhcdn.ru/pv'\n\nVACANCY_SEARCH_ENABLE = True\nVACANCY_SEARCH_PER_PAGE = 20\n\nTEMPLATE_LOADERS = (\n ('django.template.loaders.cached.Loader', (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n )),\n)\n\nTEXT_BLOCK_CACHE_TTL = 60 * 60 * 24 # 1 day\n\nSOCIAL_AUTH_VK_OAUTH2_KEY = '{{ advdream_social_auth_vk_oauth2_key }}'\nSOCIAL_AUTH_VK_OAUTH2_SECRET = '{{ advdream_social_auth_vk_oauth2_secret }}'\n\nSOCIAL_AUTH_FACEBOOK_KEY = '{{ advdream_social_auth_facebook_key }}'\nSOCIAL_AUTH_FACEBOOK_SECRET = '{{ advdream_social_auth_facebook_secret }}'\n\nSOCIAL_AUTH_HHRU_OAUTH2_KEY = '{{ advdream_social_auth_hhru_oauth2_key }}'\nSOCIAL_AUTH_HHRU_OAUTH2_SECRET = '{{ advdream_social_auth_hhru_oauth2_secret }}'\n\nSOCIAL_AUTH_MAILRU_OAUTH2_KEY = '{{ advdream_social_auth_mailru_oauth2_key }}'\nSOCIAL_AUTH_MAILRU_OAUTH2_SECRET = '{{ advdream_social_auth_mailru_oauth2_secret }}'\n\nBANNER_INDEX_MAIN_SITE = '146'\nBANNER_INDEX_MAIN_PLACE = '627'\nBANNER_INDEX_RIGHT_SITE = '146'\nBANNER_INDEX_RIGHT_PLACE = '623'\nBANNER_INDEX_FOOTER_SITE = '146'\nBANNER_INDEX_FOOTER_PLACE = '624'\nBANNER_INNER_RIGHT_SITE = '146'\nBANNER_INNER_RIGHT_PLACE = '623'\nBANNER_INNER_FOOTER_SITE = '146'\nBANNER_INNER_FOOTER_PLACE = '624'\n\nTEXT_BLOCK_CACHE_TTL = 60 * 60 * 24 # 1 day\nEXPO_REGION_CACHE_TTL = 60 * 60 * 24 # 1 day\n\nSESSION_COOKIE_DOMAIN = '.expo.hh.ru'\nFAQITEM_PER_PAGE = 10\n","sub_path":"public/playbooks/roles/expo/templates/etc/hh-expo/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"291588282","text":"\"\"\"\nBuilds the infrastructure for Hip Edit backing services.\n\"\"\"\nfrom __future__ import print_function\nimport logging\nfrom os import path\nfrom hip_edit import activemq\nfrom hip_edit import cli_arg_parser\nfrom hip_edit import cf_template_builder\nfrom hip_edit import cf_driver\nfrom hip_edit import log\nfrom hip_edit.build_context import BuildContext\n\nLOGGER = log.get_stream_logger(__name__)\n\ndef main():\n \"\"\"\n Entry point\n \"\"\"\n cli_options = cli_arg_parser.services_arg_parser().parse_args()\n logging.root.setLevel(logging.DEBUG if cli_options.verbose else logging.INFO)\n if not cli_options.stack_down():\n if cli_options.stack_halt():\n if confirm(\"\"\"You are going to stop the ActveMQ instance and release the EIP forever.\n Is this what you want?\"\"\") != 'yes':\n LOGGER.info('No changes made.')\n return\n template = cf_template_builder.build(cli_options)\n else:\n if confirm(\"\"\"You are going to destroy all stack resources and\n this operation can not be done. Is this what you want?\"\"\") != 'yes':\n LOGGER.info('No changes made.')\n return\n template = None\n outputs = cf_driver.execute(cli_options, template)\n if outputs is None or cli_options.stack_down():\n return\n build_ctx = BuildContext()\n build_ctx.add('services', outputs).save()\n activemq_instance_id = build_ctx.get('MessageServerInstanceId', group_key='services')\n if cli_options.stack_up():\n activemq.check_instance_status(instance_id=activemq_instance_id)\n hostname = build_ctx.get('npm_config_messaging_host')\n outputs = activemq.configure(cli_options, hostname,\n templates_path=path.abspath('./artifacts/activemq'),\n distribution_type='bitnami')\n build_ctx.add(('services', 'activemq', 'users'), outputs).save()\n else:\n activemq.halt_instance(instance_id=activemq_instance_id)\n\n\n\ndef confirm(message, prompt=' ([no]/yes) '):\n \"\"\"Prints a message and returns user input.\"\"\"\n print(\"\\n\".join((s.strip() for s in message.split(\"\\n\"))), end='')\n return raw_input(prompt)\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"hip-edit-infra/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"118243247","text":"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\n#path =\"../frictionCompensator/velocities/test3/\"\npath =\"../testTorqueEncs/torques/leftArm/joint22/\"\n\nfor file in os.listdir(path):\n if file.endswith(\".txt\"):\n with open(path+file, 'r') as f:\n data = f.readlines()\n time = []\n velocity = []\n for line in data:\n numbers = line.split()\n time.append(float(numbers[1]))\n velocity.append(float(numbers[2]))\n time[:]=[x - time[0] for x in time]\n plt.plot(time, velocity, lw=4)\n\n\n accel=[]\n for j in range(len(velocity)-1):\n accel.append( float((velocity[j+1] - velocity[j])/(time[j+1] - time[j])) )\n accel.append(float((velocity[-1] - velocity[-2])/(time[-1] - time[-2])))\n plt.plot(time, accel, lw=4)\n plt.show()\n\n\n#labels\nplt.xlabel('time [s]', size=30)\n#plt.ylabel('velocity ' u\"\\u03C9\" ' [rad/s]', size=25)\nplt.ylabel('velocity [degrees/s]', size=30)\n\n#limits tuneados a mano\n#plt.xlim((0, 1))\nplt.xlim((0, 3.5))\n\nplt.xticks(size=25)\n# plt.ylim((100, y))\nplt.yticks(size=25)\n\n#plt.title('Velocity vs. Time curves', size=30)\n\n#show\nplt.show()\n","sub_path":"friction-gravity-compensation/friction/accel_stability.py","file_name":"accel_stability.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"131616492","text":"\"\"\"The following file is released under the Apache 2 Licence, see LICENCE.txt.\"\"\"\n\nimport setuptools\n\n\nwith open(\"README.md\") as fp:\n long_description = fp.read()\n\n\nsetuptools.setup(\n name=\"vpn_example\",\n version=\"0.0.1\",\n\n description=\"A Cloud Gurus Transit Gateway\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n\n author=\"Phil Basford\",\n\n package_dir={\"\": \"vpn_example\"},\n packages=setuptools.find_packages(where=\"vpn_example\"),\n\n install_requires=[\n \"aws-cdk.core==1.109.0\",\n \"aws-cdk.aws_ec2\",\n \"aws-cdk.aws_ecs\",\n \"aws-cdk.aws_ecs_patterns\",\n \"aws-cdk.aws_rds\",\n \"aws_cdk.aws_secretsmanager\"\n ],\n\n python_requires=\">=3.6\",\n\n classifiers=[\n \"Development Status :: 4 - Beta\",\n\n \"Intended Audience :: Developers\",\n\n \"Programming Language :: JavaScript\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Utilities\",\n\n \"Typing :: Typed\",\n ],\n)\n","sub_path":"vpn-example/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"338623324","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Documentation source:\n# - https://gbdev.gg8.se/wiki/articles/Sound_Controller\n\nclass PolynomialCounter:\n\n def __init__(self):\n self.i = 0\n self.shifted_divisor = 0\n\n def set_nr43(self,value):\n clock_shifted = value >> 4\n divisor = 0\n divisor = {\n 0: 8,\n 1: 16,\n 2: 32,\n 3: 48,\n 4: 64,\n 5: 80,\n 6: 96,\n 7: 112\n }.get(value & 0b00000111)\n self.shifted_divisor = divisor << clock_shifted\n self.i = 1\n\n def step(self, ticks):\n self.i -= ticks\n if self.i <= 0:\n self.i = self.shifted_divisor\n return True\n return False\n","sub_path":"vsgb/audio/polynomial_counter.py","file_name":"polynomial_counter.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"339984011","text":"# Copyright 2021 Nokia\n# Licensed under the BSD 3-Clause License.\n# SPDX-License-Identifier: BSD-3-Clause\n\nimport secrets\nimport json\nfrom flask import Blueprint, render_template, flash, redirect, request\n\nimport a10.structures.constants\nimport a10.structures.identity\n\nimport a10.asvr.elements\nimport a10.asvr.results\nimport a10.asvr.policies\n\nfrom . import formatting\n\nelements_blueprint = Blueprint('elements', __name__, static_folder='../static', template_folder='../templates/')\n\nsecret = secrets.token_urlsafe(64)\nelements_blueprint.secret_key = secret\n\n\n@elements_blueprint.route(\"/elements\", methods=['GET'])\ndef elements():\n\tlrs=5 # default number of latest results if nothing else is specified\n\n\tif 'lrs' in request.args:\n\t\tlrs = int(request.args['lrs'])\n\n\tes = a10.asvr.elements.getElementsFull()\n\n\tfor e in es:\n\t\tprint(\"HERE\",e)\n\t\tres = a10.asvr.results.getLatestResults(e['itemid'], lrs)\n\t\tresultsummary = []\n\t\tfor r in res:\n\t\t\tsummarystr = {\n\t\t\t\t'verifiedAt': formatting.futc(r['verifiedAt']),\n\t\t\t\t'pid': r['policyID'],\n\t\t\t\t'pname': a10.asvr.policies.getPolicy(r['policyID']).msg()['name'],\n\t\t\t\t'res': r['result'],\n\t\t\t\t'rul': r['ruleName'],\n\t\t\t\t'rid': r['itemid']\n\t\t\t}\n\n\t\t\tresultsummary.append(summarystr)\n\n\t\te['summary'] = resultsummary\n\n\tes_sorted = sorted(es, key=lambda i: (i['name']))\n\n\treturn render_template('elements.html', elements=es_sorted)\n\n\n@elements_blueprint.route(\"/element/\", methods=['GET'])\ndef element(item_id):\n\tlrs=50 # default number of latest results if nothing else is specified\n\n\tif 'lrs' in request.args:\n\t\tlrs = int(request.args['lrs'])\n\n\te = a10.asvr.elements.getElement(item_id)\n\tevs = a10.asvr.expectedvalues.getExpectedValuesForElement(item_id)\n\tfor i in evs:\n\t\tp = a10.asvr.policies.getPolicy(i['policyID'])\n\t\tif p.rc()==a10.structures.constants.SUCCESS:\n\t\t\ti['policyname'] = p.msg()['name']\n\t\telse:\n\t\t\ti['policyname'] = \"POLICY DELETED\"\t\n\n\tresultsummary = []\n\tres = a10.asvr.results.getLatestResults(item_id, lrs)\n\n\tfor r in res:\n\t\tresultsummary.append({\n\t\t\t\t'verifiedAt': formatting.futc(r['verifiedAt']),\n\t\t\t\t'pid': r['policyID'],\n\t\t\t\t'pname': a10.asvr.policies.getPolicy(r['policyID']).msg()['name'],\n\t\t\t\t'res': r['result'],\n\t\t\t\t'rul': r['ruleName'],\n\t\t\t\t'msg': r['message'],\n\t\t\t\t'rid': r['itemid']\n\t\t\t})\n\n\tpp = json.dumps(e.msg(), sort_keys=True, indent=4)\n\treturn render_template(\"element.html\", e=e.msg(), evs=evs, rs=resultsummary, pp=pp)\n","sub_path":"u10/blueprints/elements.py","file_name":"elements.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"4263545","text":"# -*- coding: utf-8 -*\n\"\"\"\n模型的训练控制器,核心成员有:model、reader、evaluate(待定)。核心方法有:\n0.运行时环境初始化\n1.网络初始化\n2.reader初始化\n3.模型训练\n4.模型评估\n5.模型保存:meta信息尽可能完整一些\n6.模型指标与模型网络结构可视化\n7.模型选择的策略\n\n--------------------------\n\n核心方法的调用顺序为:\n1.打印meta及version相关日志,便于问题追查\n2.鉴权\n3.初始化运行所需要的环境\n\"\"\"\nimport json\nimport logging\nimport multiprocessing\nimport os\nimport shutil\nimport paddle.distributed.fleet as fleet\nimport paddle.distributed.fleet.base.role_maker as role_maker\nimport paddle\nimport paddle.static as static\nfrom .. import version\nfrom ..common.rule import InstanceName\nfrom ..utils.util_helper import get_model_paths, save_meta_data, make_targz\n\n\nclass BaseStaticTrainer(object):\n def __init__(self, params, data_set_reader, model):\n \"\"\"\n :param params\n :param data_set_reader\n :param model\n \"\"\"\n self.params = params\n self.data_set_reader = data_set_reader\n self.model_class = model\n\n # 参数解析\n # 动态图or静态图\n self.enable_static = True\n self.is_recompute = self.params.get(\"is_recompute\", 0)\n if 'output_path' in self.params.keys() and self.params[\"output_path\"]:\n self.save_checkpoints_path = os.path.join(self.params[\"output_path\"], \"save_checkpoints\")\n self.save_inference_model_path = os.path.join(self.params[\"output_path\"], \"save_inference_model\")\n else:\n self.save_checkpoints_path = \"./output/save_checkpoints/\"\n self.save_inference_model_path = \"./output/save_inference_model/\"\n\n self.forward_train_output = {}\n self.fetch_list_train = []\n self.fetch_list_evaluate = []\n self.fetch_list_train_key = []\n self.fetch_list_evaluate_key = []\n\n self.parser_meta()\n self.use_fleet = False\n self.init_env_static()\n\n def do_train(self):\n \"\"\"\n 启动数据集循环,开始训练\n :return:\n \"\"\"\n raise NotImplementedError\n\n def do_evaluate(self, reader, phase, step):\n \"\"\"在当前的训练状态下,对某个测试集进行评估\n :param reader:待评估数据集\n :param phase:当前的运行阶段\n :param step:当前的运行步数\n \"\"\"\n raise NotImplementedError\n\n def do_visual(self):\n \"\"\"评估指标的可视化展示\n \"\"\"\n raise NotImplementedError\n\n def parser_meta(self):\n logging.info(\"parser meta ....\")\n model_meta_info = {}\n if self.params[\"load_checkpoint\"] or self.params[\"load_parameters\"]:\n model_meta_info = self.load_model_meta_info(\"net_model\")\n elif self.params[\"pre_train_model\"]:\n model_meta_info = self.load_model_meta_info(\"pre_train_model\")\n # 由外部json配置传入\n meta_param = {}\n extra_param = self.params.get(\"extra_param\", None)\n if extra_param:\n meta_param = extra_param.get(\"meta\", None)\n\n self.meta_dict = {\n \"framework_version\": version.full_version,\n \"model_type\": model_meta_info.get(\"model_type\", \"\"),\n \"pretrain_model_version\": model_meta_info.get(\"pretrain_model_version\", \"\"),\n \"pretrain_model_type\": model_meta_info.get(\"pretrain_model_type\", \"\"),\n \"job_type\": meta_param.get(\"job_type\", \"custom\"),\n \"net_type\": self.model_class.__class__.__name__,\n \"task_type\": \"train\",\n \"deploy_type\": 4,\n \"is_dynamic\": 0\n }\n return\n\n def init_env_static(self):\n \"\"\"\n 初始化静态图的运行时环境:包括:program、executor、fleet、cuda、place\n :return:\n \"\"\"\n logging.info(\"init environment on static mode......\")\n paddle.enable_static()\n\n # step1: init program\n self.startup_program = static.Program()\n self.train_program = static.Program()\n self.test_program = static.Program()\n self.evaluate_program = static.Program()\n self.save_inference_program = static.Program()\n\n random_seed = self.params.get(\"random_seed\", 0)\n if random_seed is not None:\n self.startup_program.random_seed = random_seed\n self.train_program.random_seed = random_seed\n self.test_program.random_seed = random_seed\n self.evaluate_program.random_seed = random_seed\n self.save_inference_program.random_seed = random_seed\n\n # step2: init run place、executor、fleet\n self.num_trainers = 1\n self.trainer_id = 0\n\n self.place_type = self.params.get(\"PADDLE_PLACE_TYPE\", os.getenv(\"PADDLE_PLACE_TYPE\", \"cpu\"))\n self.params[\"PADDLE_PLACE_TYPE\"] = self.place_type\n\n # executor执行器的一些参数设置\n self.use_fast_executor = self.params.get(\"use_fast_executor\", False)\n self.exe_strategy = paddle.static.ExecutionStrategy()\n self.exe_strategy.num_iteration_per_run = self.params.get(\"num_iteration_per_run\", 1)\n self.exe_strategy.num_iteration_per_drop_scope = self.params.get(\"num_iteration_per_drop_scope\", 10)\n\n self.build_strategy = paddle.static.BuildStrategy()\n\n if self.place_type == \"gpu\":\n logging.info(\"gpu place....\")\n gpus = os.getenv('FLAGS_selected_gpus', '0').split(\",\")\n self.gpu_id = int(gpus[0])\n self.run_place = paddle.CUDAPlace(int(gpus[0]))\n self.dev_count = len(gpus)\n self.exe_strategy.num_threads = self.dev_count\n self.use_cuda = True\n \"\"\"\n gpu fleet 使用三步骤:\n 1.导入依赖包:from paddle.distributed import fleet\n 2.初始化fleet环境:包括定义缺省的分布式策略,然后通过将参数is_collective设置为True,使训练架构设定为Collective架构。\n strategy = fleet.DistributedStrategy()\n fleet.init(is_collective=True, strategy=strategy)\n 3.使用distributed_optimizer设置分布式训练优化器\n optimizer = fleet.distributed_optimizer(optimizer)\n \"\"\"\n if self.params.get(\"PADDLE_IS_FLEET\", 0):\n fleet.init(is_collective=True)\n logging.info(\"fleet init ...\")\n self.use_fleet = True\n self.strategy = fleet.DistributedStrategy()\n self.strategy.execution_strategy = self.exe_strategy\n self.strategy.build_strategy = self.build_strategy\n # TODO nccl_comm_num 可以加快GPU之间的通信效率,建议单机设置为1,多机设置为2。\n # TODO 找个判断多机的方法,设置nccl_comm_num参数\n self.strategy.nccl_comm_num = 1\n self.strategy.sync_nccl_allreduce = True\n self.strategy.fuse_all_reduce_ops = True\n\n # amp设置\n self.use_amp = self.params.get(\"use_amp\", False)\n if self.use_amp:\n opt_params = self.model_class.model_params.get('optimization', None)\n init_loss_scaling = opt_params.get(\"init_loss_scaling\", 1.0)\n incr_every_n_steps = opt_params.get(\"incr_every_n_steps\", 1000)\n decr_every_n_nan_or_inf = opt_params.get(\"decr_every_n_nan_or_inf\", 2)\n incr_ratio = opt_params.get(\"incr_ratio\", 2.0)\n decr_ratio = opt_params.get(\"decr_ratio\", 0.8)\n\n self.strategy.amp = True\n self.strategy.amp_configs = {\n \"init_loss_scaling\": init_loss_scaling,\n \"decr_every_n_nan_or_inf\": decr_every_n_nan_or_inf,\n \"incr_every_n_steps\": incr_every_n_steps,\n \"incr_ratio\": incr_ratio,\n \"use_dynamic_loss_scaling\": True,\n \"decr_ratio\": decr_ratio,\n \"custom_white_list\": [],\n \"custom_black_list\": [],\n }\n\n fleet.init(is_collective=True, strategy=self.strategy)\n # 以下代码是为了打印日志,不影响训练\n trainer_id = fleet.worker_index()\n current_endpoint = os.getenv(\"PADDLE_CURRENT_ENDPOINT\")\n worker_endpoints = fleet.worker_endpoints()\n trainers_num = len(worker_endpoints)\n logging.debug(\"worker_endpoints:{} trainers_num:{} current_endpoint:{} trainer_id:{}\".format(\n worker_endpoints,\n trainers_num,\n current_endpoint,\n trainer_id))\n self.num_trainers = trainers_num\n self.trainer_id = trainer_id\n else:\n self.use_fleet = False\n self.num_trainers = 1\n self.trainer_id = 0\n\n elif self.place_type == \"xpu\":\n logging.info(\"xpu_place, support single device mode only\")\n xpus = os.getenv('FLAGS_selected_xpus', '0').split(\",\")\n # self.run_place = paddle.XPUPlace(int(xpus[0]))\n self.run_place = paddle.set_device(\"xpu:\" + xpus[0])\n self.dev_count = 1\n self.exe_strategy.num_threads = self.dev_count\n self.gpu_id = 0\n self.use_cuda = False\n logging.info(\"finish prepare xpu single deviece env\")\n self.use_fleet = False\n self.num_trainers = 1\n self.trainer_id = 0\n else:\n logging.info(\"cpu place....\")\n self.run_place = paddle.CPUPlace()\n self.dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))\n self.use_cuda = False\n self.gpu_id = 0\n self.exe_strategy.num_threads = self.dev_count\n \"\"\"\n cpu fleet 使用步骤\n https://fleetx.readthedocs.io/en/latest/paddle_fleet_rst/parameter_server/ps_quick_start.html\n 1.导入依赖:\n import paddle.distributed.fleet as fleet\n import paddle.distributed.fleet.base.role_maker as role_maker\n \n 2.定义分布式模式并初始化分布式训练环境,当前参数服务器模式只支持静态图模式\n 通过fleet.init()接口,用户可以定义训练相关的环境,注意此环境是用户预先在环境变量中配置好的,\n 包括:训练节点个数,服务节点个数,当前节点的序号,服务节点完整的IP:PORT列表等。\n paddle.enable_static()\n role = role_maker.PaddleCloudRoleMaker()\n fleet.init(role)\n \n 3.组网,加载reader\n model = init_net()\n reader = init_dataset_reader()\n \n 4.定义同步训练 Strategy 及 Optimizer\n optimizer = paddle.optimizer.SGD(learning_rate=0.0001)\n strategy = fleet.DistributedStrategy()\n strategy.a_sync = True\n optimizer = fleet.distributed_optimizer(optimizer, strategy)\n optimizer.minimize(model.cost)\n \n 5.训练\n \n \"\"\"\n if self.params.get(\"PADDLE_IS_FLEET\", 0):\n logging.info(\"int fleet parameter server mode in multi cpus....\")\n role = role_maker.PaddleCloudRoleMaker(is_collective=False)\n fleet.init(role)\n self.use_fleet = True\n else:\n self.use_fleet = False\n self.num_trainers = 1\n self.trainer_id = 0\n\n # step3: init executor with run place\n self.executor = static.Executor(self.run_place)\n\n # step4: init model net\n self.init_static_model_net()\n\n # step5: run executor\n self.executor.run(self.startup_program)\n\n # step6: load model params: checkpoints or pre_train_model\n if self.params[\"load_checkpoint\"] or self.params[\"load_parameters\"]:\n self.load_static_model_params(\"net_model\")\n elif self.params[\"pre_train_model\"]:\n self.load_static_model_params(\"pre_train_model\")\n\n # step7: init train_executor\n if self.use_fleet:\n self.train_exe = self.executor\n else:\n if self.place_type == \"xpu\":\n self.train_exe = self.executor\n else:\n # 单机模式下可以使用ParallelExecutor来提速\n self.train_exe = static.ParallelExecutor(\n use_cuda=self.use_cuda,\n loss_name=self.forward_train_output[InstanceName.LOSS].name,\n exec_strategy=self.exe_strategy,\n build_strategy=self.build_strategy,\n main_program=self.train_program,\n num_trainers=self.num_trainers,\n trainer_id=self.trainer_id)\n\n def init_static_model_net(self):\n \"\"\"init static model net\n \"\"\"\n logging.info(\"init_model_net.....\")\n self.init_static_train_net()\n if self.params[\"is_eval_dev\"]:\n self.evaluate_program = self.init_static_evaluate_net(self.data_set_reader.dev_reader,\n self.evaluate_program)\n if self.params[\"is_eval_test\"]:\n self.test_program = self.init_static_evaluate_net(self.data_set_reader.test_reader, self.test_program)\n self.init_static_save_inference_net()\n\n def init_static_train_net(self):\n \"\"\"\n 训练网络初始化,前向+后向\n :return:\n \"\"\"\n with static.program_guard(self.train_program, self.startup_program):\n with paddle.fluid.unique_name.guard():\n self.data_set_reader.train_reader.dataset.create_reader()\n fields_dict = self.data_set_reader.train_reader.dataset.instance_fields_dict()\n self.model_class.structure()\n if getattr(self.model_class, 'param_attrs', None):\n self.model_class.set_param_attrs(self.train_program)\n self.forward_train_output = self.model_class.forward(fields_dict, phase=InstanceName.TRAINING)\n loss = self.forward_train_output[InstanceName.LOSS]\n self.model_class.set_optimizer()\n \n # 加入recompute功能\n if self.is_recompute:\n self.strategy.recompute = True\n self.strategy.recompute_configs = {\"checkpoints\": self.forward_train_output['checkpoints']}\n del self.forward_train_output[\"checkpoints\"]\n \n if self.use_fleet:\n self.optimizer = fleet.distributed_optimizer(self.model_class.optimizer, strategy=self.strategy)\n else:\n self.optimizer = self.model_class.optimizer\n\n self.optimizer.minimize(loss)\n\n if self.forward_train_output.__contains__(InstanceName.TARGET_FEED):\n self.forward_train_output.pop(InstanceName.TARGET_FEED)\n if self.forward_train_output.__contains__(InstanceName.TARGET_PREDICTS):\n self.forward_train_output.pop(InstanceName.TARGET_PREDICTS)\n # TODO:这里需要注意一下,或许有坑\n # self.forward_train_output.update(self.optimizer_output_dict)\n # 如果想获取学习率,加上下面这一行就能fetch出来\n self.forward_train_output.update({\"lr\": \"learning_rate_0\"})\n self.fetch_list_train = list(self.forward_train_output.values())\n self.fetch_list_train_key = list(self.forward_train_output.keys())\n\n def init_static_evaluate_net(self, reader, program):\n \"\"\"初始化评估过程的网络,网络只有前向\n :return:\n \"\"\"\n with static.program_guard(program, self.startup_program):\n with paddle.fluid.unique_name.guard():\n reader.dataset.create_reader()\n fields_dict = reader.dataset.instance_fields_dict()\n self.model_class.structure()\n self.forward_evaluate_output = self.model_class.forward(fields_dict, phase=InstanceName.EVALUATE)\n if \"mems\" in self.forward_evaluate_output.keys():\n self.mems_eval = self.forward_evaluate_output[\"mems\"]\n del self.forward_evaluate_output[\"mems\"]\n\n if self.forward_evaluate_output.__contains__(InstanceName.TARGET_FEED):\n self.forward_evaluate_output.pop(InstanceName.TARGET_FEED)\n\n if self.forward_evaluate_output.__contains__(InstanceName.TARGET_PREDICTS):\n self.forward_evaluate_output.pop(InstanceName.TARGET_PREDICTS)\n\n self.fetch_list_evaluate = list(self.forward_evaluate_output.values())\n self.fetch_list_evaluate_key = list(self.forward_evaluate_output.keys())\n\n program = program.clone(for_test=True)\n return program\n\n def init_static_save_inference_net(self):\n \"\"\"初始化用来保存inference model的网络,只有前向,且是裁切过后的网络。\n :return:\n \"\"\"\n with static.program_guard(self.save_inference_program, self.startup_program):\n with paddle.fluid.unique_name.guard():\n self.data_set_reader.predict_reader.dataset.create_reader()\n fields_dict = self.data_set_reader.predict_reader.dataset.instance_fields_dict()\n self.model_class.structure()\n forward_output_dict = self.model_class.forward(fields_dict, phase=InstanceName.SAVE_INFERENCE)\n feed_tensor = forward_output_dict[InstanceName.TARGET_FEED]\n target_feed_list = []\n for x in feed_tensor:\n target_feed_list.append(x.name)\n\n self.infer_dict = get_infer_data_meta(target_feed_list, fields_dict)\n self.feed_target_tensor = feed_tensor\n self.inference_output = forward_output_dict[InstanceName.TARGET_PREDICTS]\n\n self.save_inference_program = self.save_inference_program.clone(for_test=True)\n\n def load_static_model_params(self, params_type):\n \"\"\"\n \"\"\"\n logging.info(\"load_model_params on static mode....\")\n if params_type == \"net_model\":\n if self.params[\"load_checkpoint\"] and self.params[\"load_parameters\"]:\n raise ValueError(\n \"ERROR: config 'load_checkpoint' and 'load_parameters' \"\n \"both are set! Only one of them should be set. \"\n \"if you want warmstart checkpoint keep its learning_rate and moments, plese set 'load_checkpoint'. \"\n \"if you want warmstart checkpoint with only its parameters, and you want reset a new learning_rate \"\n \"by config, plese set 'load_parameters'\")\n if self.params[\"load_checkpoint\"]:\n original_path = self.params[\"load_checkpoint\"]\n init_checkpoint(exe=self.executor, init_checkpoint_path=original_path, main_program=self.train_program)\n elif self.params[\"load_parameters\"]:\n original_path = self.params[\"load_parameters\"]\n init_pretraining_params(exe=self.executor,\n pretraining_params_path=original_path, main_program=self.train_program)\n\n elif params_type == \"pre_train_model\":\n # pretrain_embedding_path = self.get_pretrain_embedding_path()\n for pre_train_model in self.params[\"pre_train_model\"]:\n logging.info(\"pre_train_model's name = %s\" % pre_train_model[\"name\"])\n params_path = pre_train_model[\"params_path\"]\n init_pretraining_params(exe=self.executor,\n pretraining_params_path=params_path,\n main_program=self.train_program)\n # self.save_model(0)\n # exit()\n\n def save_model(self, steps, save_checkpoint=True, save_inference=True):\n if self.enable_static:\n logging.info(\"save model on static....\")\n if save_checkpoint:\n self.save_checkpoint(self.executor, self.train_program, steps)\n if save_inference:\n self.save_inference(self.executor, self.feed_target_tensor, self.inference_output,\n self.save_inference_program, steps, self.infer_dict)\n else:\n logging.info(\"save model on dynamic....\")\n\n def save_checkpoint(self, exe, program, steps):\n \"\"\"\n :param exe:\n :param program:\n :param steps:\n :return:\n \"\"\"\n path_dict = get_model_paths(self.save_checkpoints_path, self.save_inference_model_path, steps)\n save_path = path_dict[\"checkpoints_model_path\"]\n # todo: 需要验证一下fleet的save和非fleet有没有区别\n paddle.fluid.io.save_persistables(exe, save_path, program)\n meta_path = path_dict[\"checkpoints_meta_path\"]\n save_meta_data(self.meta_dict, meta_path)\n if self.params.get(\"need_tar\", False):\n # 压缩为tar.gz\n errcode = make_targz(save_path + \".tar.gz\", save_path)\n if errcode == 0:\n shutil.rmtree(save_path)\n\n def save_inference(self, exe, feed_vars, target_vars, program, steps, data_dict):\n \"\"\"\n :param exe:\n :param feed_vars\n :param target_vars\n :param program:\n :param steps:\n :param data_dict:\n :return:\n \"\"\"\n path_dict = get_model_paths(self.save_checkpoints_path, self.save_inference_model_path, steps)\n save_path = os.path.join(path_dict[\"inference_model_path\"], \"wenxin\")\n # paddle.fluid.io.save_inference_model\n # paddle.static.save_inference_model\n paddle.static.save_inference_model(\n save_path,\n feed_vars,\n target_vars,\n exe,\n program=program,\n model_filename=\"model\",\n params_filename=\"params\")\n\n infer_meta_path = path_dict[\"inference_infer_meta_path\"]\n meta_path = path_dict[\"inference_meta_path\"]\n save_meta_data(data_dict, infer_meta_path)\n save_meta_data(self.meta_dict, meta_path)\n\n def load_model_meta_info(self, load_model):\n \"\"\"\n 获取模型的meta信息\n :param load_model:\n :return:\n \"\"\"\n meta_info = {}\n if load_model == \"net_model\":\n if self.params[\"load_checkpoint\"]:\n original_path = self.params[\"load_checkpoint\"]\n meta_info = parse_meta(original_path)\n elif self.params[\"load_parameters\"]:\n original_path = self.params[\"load_parameters\"]\n meta_info = parse_meta(original_path)\n elif load_model == \"pre_train_model\":\n for pre_train_model in self.params[\"pre_train_model\"]:\n logging.info(\"pre_train_model's name = %s\" % pre_train_model[\"name\"])\n params_path = os.path.dirname(pre_train_model[\"params_path\"])\n # original_path = params_path = os.path.dirname(pre_train_model[\"params_path\"])\n meta_info = parse_meta(params_path)\n return meta_info\n\n\ndef get_infer_data_meta(target_feed_list, fields_dict):\n \"\"\"\n :param target_feed_list:\n :param fields_dict:\n :return:\n \"\"\"\n infer_dict = {\"fields\": []}\n for name in target_feed_list:\n for k1, v1 in fields_dict.items(): # dict_keys(['text_a', 'label'])\n for k2, v2 in v1.items():\n if v2:\n for k3 in v2:\n # logging.info(k3)\n if v2[k3] and v2[k3].name == name:\n field_ele = \"%s#%s\" % (k1, k3)\n infer_dict[\"fields\"].append(field_ele)\n return infer_dict\n\n\ndef parse_meta(model_dir):\n \"\"\"\n :param model_dir:\n :return: meta_dict\n \"\"\"\n json_path = None\n meta_dict = {}\n for file in os.listdir(model_dir):\n if file.endswith(\".meta\"):\n json_path = file\n break\n try:\n if json_path:\n json_file = open(os.path.join(model_dir, json_path), 'r')\n model_info = json_file.read()\n meta_dict = json.loads(model_info)\n except Exception as e:\n logging.error(\"error in parser model.meta.....\")\n return meta_dict\n\n\ndef init_checkpoint(exe, init_checkpoint_path, main_program):\n \"\"\"加载checkpoints文件\n :param exe:\n :param init_checkpoint_path:\n :param main_program:\n :return:\n \"\"\"\n assert os.path.exists(init_checkpoint_path), \"[%s] cann't be found.\" % init_checkpoint_path\n\n def existed_persitables(var):\n \"\"\"\n existed_presitables\n \"\"\"\n if not paddle.fluid.io.is_persistable(var):\n return False\n return os.path.exists(os.path.join(init_checkpoint_path, var.name))\n\n paddle.fluid.io.load_vars(exe, init_checkpoint_path, main_program=main_program, predicate=existed_persitables)\n logging.info(\"Load model from {}\".format(init_checkpoint_path))\n\n\ndef init_pretraining_params(exe, pretraining_params_path, main_program):\n \"\"\"\n :param exe:\n :param pretraining_params_path:\n :param main_program:\n :return:\n \"\"\"\n assert os.path.exists(pretraining_params_path), \"[%s] cann't be found.\" % pretraining_params_path\n\n def existed_params(var):\n \"\"\"\n existed_params\n \"\"\"\n if not isinstance(var, paddle.fluid.framework.Parameter):\n return False\n return os.path.exists(os.path.join(pretraining_params_path, var.name))\n\n paddle.fluid.io.load_vars(exe, pretraining_params_path, main_program=main_program, predicate=existed_params)\n\n\n","sub_path":"erniekit/controller/static_trainer.py","file_name":"static_trainer.py","file_ext":"py","file_size_in_byte":26171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"513144393","text":"#!/usr/bin/env python\nimport os, sys, unittest\nfrom time import sleep\nimport PyCoralTest\n\n#============================================================================\n\ndef createTable():\n ###print MSGHDR, \"Recreate the table\"\n session = svc.connect( urlRW, coral.access_Update )\n session.transaction().start(False)\n session.nominalSchema().dropIfExistsTable( tableName )\n description = coral.TableDescription()\n description.setName( tableName)\n description.insertColumn( 'ID', 'int' )\n description.insertColumn( 'Data', 'float' ) # MySQL test fails with double?\n description.setPrimaryKey( 'ID' )\n tableHandle = session.nominalSchema().createTable( description )\n session.transaction().commit()\n sleep(1)\n\n#============================================================================\n\nclass PyCoralMiscellaneousBugsTest( PyCoralTest.TestCase ):\n\n #------------------------------------------------------------------------\n\n def setUp(self):\n # Call the base class method\n PyCoralTest.TestCase.setUp(self)\n print(\"\")\n # Use CORAL defaults for retrial parameters\n svc.configuration().setConnectionRetrialPeriod( retrialPeriod )\n svc.configuration().setConnectionRetrialTimeOut( retrialTimeOut )\n # Use CORAL defaults for connection sharing (enabled)\n svc.configuration().enableConnectionSharing()\n # Configure the connection service (see bug #71449)\n # - disable the CORAL connection pool cleanup\n # - connection timeout=0: \"idle\" connections are immediately \"expired\"\n svc.configuration().disablePoolAutomaticCleanUp()\n svc.configuration().setConnectionTimeOut(0)\n # Use CORAL defaults for RO transactions (serializable)\n if \"CORAL_ORA_SKIP_TRANS_READONLY\" in os.environ:\n del os.environ[\"CORAL_ORA_SKIP_TRANS_READONLY\"]\n\n #------------------------------------------------------------------------\n\n def tearDown(self):\n # Purge the connection pool after each test\n svc.purgeConnectionPool()\n # Call the base class method\n PyCoralTest.TestCase.tearDown(self)\n \n #------------------------------------------------------------------------\n\n # Test bug #61090 aka bug #76501\n def test010_bug61090(self):\n session = svc.connect( urlRW, coral.access_Update )\n # Fill table in R/W tx (do not delete bulkInserter yet)\n print(MSGHDR, \"Fill the table in R/W tx - do not delete bulkInserter\")\n session.transaction().start(False)\n editor=session.nominalSchema().tableHandle(tableName).dataEditor()\n editor.deleteRows(\"\",coral.AttributeList())\n bulkInserter = editor.bulkInsert( rowBuffer, 100 )\n for i in range(5):\n rowBuffer[\"ID\"].setData(i)\n rowBuffer[\"Data\"].setData(i+0.1*i)\n bulkInserter.processNextIteration()\n bulkInserter.flush()\n session.transaction().commit()\n # Read data in R/O tx (no need to reconnect R/O session to show bug)\n print(MSGHDR, \"Query the table in R/O tx\")\n session.transaction().start(True)\n query = session.nominalSchema().newQuery()\n query.addToTableList(tableName)\n query.setRowCacheSize(3)\n query.defineOutput(rowBuffer)\n cursor=query.execute()\n nrows = 0\n while cursor.next() :\n nrows = nrows + 1\n row = cursor.currentRow()\n print(MSGHDR, \"Current row:\", row)\n self.assertEqual( 5, nrows )\n cursor=None\n query=None\n session.transaction().commit()\n # Release the bulk operation outside a tx\n print(MSGHDR, \"Release bulkInserter outside tx\")\n crash=True\n ###crash=False # Flag to cause the crash or bypass it\n if not crash: session.transaction().start(True)\n bulkInserter = None # CRASH!\n if not crash: session.transaction().commit()\n\n#============================================================================\n\nif __name__ == '__main__':\n\n print(\"\")\n MSGHDR = \"+++ PYCORAL MIXBUGS TEST +++\"\n\n # Build the unique table name and the URLs\n tableName = \"PYCORALMIXBUGSTEST\"\n import PyCoralTest\n tableName = PyCoralTest.buildUniqueTableName( tableName )\n [urlRW,urlRO] = PyCoralTest.parseArguments()\n print(MSGHDR, \"Table name:\", tableName)\n print(MSGHDR, \"URL [RW,RO]:\", [urlRW,urlRO])\n\n # Bootstrap CORAL\n #os.environ['CORAL_MSGLEVEL']='Verbose'\n #os.environ['CORAL_MSGLEVEL']='Info'\n import coral\n print(MSGHDR, \"Instantiate the PyCoral connection service\")\n svc = coral.ConnectionService()\n\n # Save CORAL default retrial parameters as global variables\n retrialPeriod=svc.configuration().connectionRetrialPeriod()\n retrialTimeOut=svc.configuration().connectionRetrialTimeOut()\n\n # Prepare the row buffer for this test\n rowBuffer = coral.AttributeList()\n rowBuffer.extend(\"ID\",\"int\")\n rowBuffer.extend(\"Data\",\"float\")\n\n # Recreate the table\n print(MSGHDR, \"Create the test table\")\n createTable()\n\n # Start the unit test (can specify one specific test as cl argument)\n print(MSGHDR, \"Start the test suite\")\n unittest.main( testRunner =\n unittest.TextTestRunner(stream=sys.stdout,verbosity=2) )\n","sub_path":"PyCoral/tests/Python3/test_PyCoral_MiscellaneousBugs.py","file_name":"test_PyCoral_MiscellaneousBugs.py","file_ext":"py","file_size_in_byte":5307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"156318861","text":"import os\nfrom gp import bayesian_optimisation\nimport numpy as np\nfrom sklearn.model_selection import cross_val_score, KFold\nfrom sklearn.gaussian_process.kernels import Matern\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import LogisticRegression\nimport pandas as pd\nfrom sklearn.feature_selection import f_regression\nfrom sklearn.naive_bayes import GaussianNB\n\ntry: # if running in CLI\n cur_path = os.path.abspath(__file__)\nexcept NameError: # if running in IDE\n cur_path = os.getcwd()\n\nwhile cur_path.split('/')[-1] != 'bb_preds':\n cur_path = os.path.abspath(os.path.join(cur_path, os.pardir))\noutput_folder = os.path.join(cur_path, 'model_results')\n\ndef test_scaler(x, y):\n print('Searching for best scaler...')\n scores = []\n for scale in [StandardScaler(), MinMaxScaler(), RobustScaler()]:\n pipe = Pipeline([('scale',scale), ('clf',LogisticRegression(random_state = 1108))])\n score = cross_val_score(pipe, x, y, scoring = 'accuracy' ,cv = KFold(n_splits = 10, random_state = 46))\n scores.append(np.mean(score))\n if scores.index(max(scores)) == 0:\n print('Using Standard Scaler')\n return StandardScaler()\n elif scores.index(max(scores)) == 1:\n print('Using Min Max Scaler')\n return MinMaxScaler()\n elif scores.index(max(scores)) == 2:\n print('Using Robust Scaler')\n return RobustScaler()\n\ndef sample_loss_n_feats(parameters):\n feats = int(parameters[0])\n print('%s features' % (feats))\n model = Pipeline([('scale',scale), ('clf',LogisticRegression(random_state = 1108, solver = solver_, C = C_))])\n score = cross_val_score(model, x_data[feat_sigs[:feats]], y_data, scoring = 'accuracy' ,cv = KFold(n_splits = 10, random_state = 1108))\n print('----> score: %s' % np.mean(score))\n return np.mean(score)\n\ndef find_feats():\n print('Searching for best number of features...')\n bounds = np.array([[1, len(list(x_data))]])\n start = [[len(list(x_data))]]\n results = bayesian_optimisation(n_iters=5, \n sample_loss=sample_loss_n_feats, \n bounds=bounds,\n x0 = start,\n gp_params = {'kernel': Matern(), 'alpha': 1e-5, 'n_restarts_optimizer': 10, 'normalize_y': True})\n return int(results[0][list(results[1]).index(max(results[1]))])\n\ndef test_solver(x, y):\n print('Searching for best solver...')\n scores = []\n for slvr in ['liblinear', 'newton-cg', 'lbfgs', 'sag','saga']:\n pipe = Pipeline([('scale',scale), ('clf',LogisticRegression(random_state = 1108, solver = slvr, C = C_))])\n score = cross_val_score(pipe, x, y, scoring = 'accuracy' ,cv = KFold(n_splits = 10, random_state = 86))\n scores.append(np.mean(score))\n if scores.index(max(scores)) == 0:\n print('Using liblinear')\n return 'liblinear'\n elif scores.index(max(scores)) == 1:\n print('Using newton-cg')\n return 'newton-cg'\n elif scores.index(max(scores)) == 2:\n print('Using lbfgs')\n return 'lbfgs'\n elif scores.index(max(scores)) == 3:\n print('Using sag')\n return 'sag'\n elif scores.index(max(scores)) == 4:\n print('Using saga')\n return 'saga'\n \ndef sample_loss_c(parameters):\n c = 10**parameters[0]\n model = Pipeline([('scale',scale), ('clf',LogisticRegression(random_state = 1108, C = c))])\n score = cross_val_score(model, x_data[feat_sigs[:features]], y_data, scoring = 'accuracy' ,cv = KFold(n_splits = 10, random_state = 88))\n print('----> score: %s' % np.mean(score))\n return np.mean(score)\n \ndef c_tuning():\n print('-- Beginning C Search')\n bounds = np.array([[-3, 3]])\n results = bayesian_optimisation(n_iters=5, \n sample_loss=sample_loss_c, \n bounds=bounds,\n gp_params = {'kernel': Matern(), 'alpha': 1e-5, 'n_restarts_optimizer': 10, 'normalize_y': True})\n print('Best C: %s, Best score: %s' % (results[0][list(results[1]).index(max(results[1]))][0], max(results[1]))) \n return 10**results[0][list(results[1]).index(max(results[1]))][0]\n\ndef execute(sa, od, X_data = None, Y_data = None):\n x_data = X_data\n y_data = Y_data\n x_feats = list(x_data)\n global x_data\n global y_data\n \n scale = test_scaler(x_data, y_data) #minmax\n global scale\n f = open(os.path.join(output_folder, '%s-%s-log.txt'%(sa, od)), 'w')\n f.write('scale: %s,'%(scale))\n f.close()\n \n C_ = 1\n global C_\n solver_ = test_solver(x_data, y_data)\n global solver_\n f = open(os.path.join(output_folder, '%s-%s-log.txt'%(sa, od)), 'a')\n f.write('start solver: %s,'%(solver_))\n f.close()\n\n feat_sigs = list(x_data)\n global feat_sigs\n features = len(feat_sigs)\n global features\n C_ = c_tuning()\n global C_\n f = open(os.path.join(output_folder, '%s-%s-log.txt'%(sa, od)), 'a')\n f.write('start C: %s,'%(C_))\n f.close()\n\n print('Starting feature ranking')\n sigs = f_regression(x_data, y_data)[1]\n indices = np.argsort(sigs)\n feat_sigs = [x_feats[i] for i in indices]\n global feat_sigs\n features = find_feats()\n global features\n f = open(os.path.join(output_folder, '%s-%s-log.txt'%(sa, od)), 'a')\n f.write('n feats: %s,'%(features))\n f.close()\n f = open(os.path.join(output_folder, '%s-%s-log.txt'%(sa, od)), 'a')\n f.write('significant features: ')\n for line in feat_sigs[:features]:\n f.write('%s, '%(line))\n f.close()\n \n solver_ = test_solver(x_data[feat_sigs[:features]], y_data)\n global solver_\n f = open(os.path.join(output_folder, '%s-%s-log.txt'%(sa, od)), 'a')\n f.write('final solver: %s,'%(solver_))\n f.close()\n\n C_ = c_tuning()\n f = open(os.path.join(output_folder, '%s-%s-log.txt'%(sa, od)), 'a')\n f.write('final C: %s,'%(C_))\n f.close()\n \n print('---Finalizing Log Model')\n model = Pipeline([('scale',scale), ('clf',LogisticRegression(random_state = 1108, solver = solver_, C = C_))]) \n tune_score = cross_val_score(model, x_data[feat_sigs[:features]], y_data, scoring = 'accuracy' ,cv = KFold(n_splits = 10, random_state = 88))\n print('...Log Model Finalized')\n tune_score = np.mean(tune_score)\n base_model = Pipeline([('scale',scale), ('clf',GaussianNB())])\n baseline_score = cross_val_score(base_model, x_data[feat_sigs], y_data, scoring = 'accuracy' ,cv = KFold(n_splits = 10, random_state = 86))\n baseline_score = np.mean(baseline_score)\n improvement = (tune_score - baseline_score)/baseline_score\n print('%s percent improvement from baseline' % (improvement * 100))\n if improvement < 0:\n f = open(os.path.join(output_folder, '%s-%s-log.txt'%(sa, od)), 'a')\n f.write('final score: XXX,')\n f.close()\n return 0\n else:\n f = open(os.path.join(output_folder, '%s-%s-log.txt'%(sa, od)), 'a')\n f.write('final score: %s,'%(tune_score))\n f.close()\n return tune_score\n ","sub_path":"model_tuning/log_tuning.py","file_name":"log_tuning.py","file_ext":"py","file_size_in_byte":7190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"113028108","text":"import os\nimport csv\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.apps import apps\n\n\nclass Command(BaseCommand):\n args = 'Model.csv'\n help = 'Import `Model`.csv into `Model` database.'\n\n def handle(self, *args, **options):\n if len(args) != 1:\n raise CommandError(\"Invalid Invocation. See help.\")\n\n csvPath = args[0]\n if not os.path.exists(csvPath):\n raise CommandError(\"%s doesnt exist.\" % csvPath)\n\n model, _ = os.path.splitext(os.path.basename(csvPath))\n Model = apps.get_model(\"message_sender\", model.title())\n if not Model:\n raise CommandError(\"%s Model doesn't exist'\")\n\n model_fields = [f.name for f in Model._meta.fields]\n fields_name = []\n with open(csvPath, 'rb') as csvFile:\n reader = csv.reader(csvFile, delimiter=',', quotechar=\"\\\"\")\n fields_name = reader.next()\n for i, _ in enumerate(fields_name):\n fields_name[i] = fields_name[i].lower()\n fields_name[i] = fields_name[i].replace(' ', '_')\n if not fields_name[i] in model_fields:\n raise CommandError(\"%s field doesn't exists in %s Model\" %\n (fields_name[i], Model))\n\n for row in reader:\n try:\n obj = Model()\n for i, field in enumerate(row):\n setattr(obj, fields_name[i], field)\n obj.save()\n except Exception as e:\n raise CommandError(e)\n","sub_path":"message_sender/management/commands/importcsv.py","file_name":"importcsv.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"162141995","text":"import urllib.parse \nfrom config import Config\nimport datetime,calendar\n\nconfig = Config()\nclass EventsModel:\n def __init__(self):\n self.url = self.setUrl(endpoint=config.getTestCasesEndPointURL('TestSuite_1'))\n self.ric = ''\n self.queryString = {\n \"eventTypes\":\"\",\n \"start\":\"\",\n \"end\":\"\",\n \"adjustments\":\"\",\n \"fields\":\"\",\n \"count\":\"\",\n \"sessions\": \"\"\n }\n\n def setUrl(self,protocol='http',endpoint=None,port=8080):\n return \"{protocol}://{endpoint}:{port}\".format(protocol=protocol,endpoint=endpoint,port=port)\n\n def getURL(self):\n keys = self.queryString.keys()\n for k in list(keys):\n if self.queryString[k]==\"\":\n self.queryString.pop(k)\n\n path = \"data/historical-pricing/v1/views/\"\n URL = \"{url}/{path}/{ric}?{query}\".format(url=self.url,path=path,ric=self.ric,query=urllib.parse.urlencode(self.queryString))\n return URL\n \n def setRic(self, ric):\n self.ric = ric\n \n def setEventTypes(self,eventTypes):\n self.queryString['eventTypes'] = eventTypes\n\n def setStartDate(self, startdate):\n self.queryString['start'] = startdate\n \n def setEndDate(self, enddate):\n self.queryString['end'] = enddate\n\n def setAdjustments(self, adjustment):\n self.queryString['adjustment'] = adjustment\n\n def setFields(self, fields):\n self.queryString['fields'] = fields\n \n def setCount(self,count):\n self.queryString['count'] = count\n\n def setSessions(self,sessions):\n self.queryString['sessions'] = sessions\n \n def setDateByStartEnd(self,start,end):\n startDate = start\n endDate = end\n if start != \"\":\n if start != \"now\":\n start = start.split('-')[1]\n if start[-1] == \"M\":\n monthToDayCount = 0\n now = datetime.datetime.now()\n for i in range(int(start[:-1]), 0, -1):\n monthRange = calendar.monthrange(now.year-(i/now.month), now.month - (i%now.month))[1]\n monthToDayCount += monthRange\n startDate = datetime.datetime.now() - datetime.timedelta(days=monthToDayCount)\n elif start[-1] == \"W\":\n startDate = datetime.datetime.now() - datetime.timedelta(weeks=int(start[:-1]))\n elif start[-1] == \"D\":\n startDate = datetime.datetime.now() - datetime.timedelta(days=int(start[:-1]))\n elif start[-1] == \"h\":\n startDate = datetime.datetime.now() - datetime.timedelta(hours=int(start[:-1]))\n elif start[-1] == \"m\":\n startDate = datetime.datetime.now() - datetime.timedelta(minutes=int(start[:-1]))\n elif start[-1] == 's':\n startDate = datetime.datetime.now() - datetime.timedelta(seconds=int(start[:-1]))\n else:\n startDate = datetime.datetime.now()\n elif start == \"now\":\n startDate = datetime.datetime.now()\n\n try:\n startDate = startDate.strftime('%Y-%m-%dT%H:%M:%SZ')\n except Exception as e:\n print(e)\n\n if end != \"\":\n if end != \"now\":\n end = end.split('-')[1]\n if end[-1] == \"M\":\n monthToDayCount = 0\n now = datetime.datetime.now()\n for i in range(int(end[:-1]), 0, -1):\n monthRange = calendar.monthrange(now.year - (i / now.month), now.month - (i % now.month))[1]\n monthToDayCount += monthRange\n endDate = datetime.datetime.now() - datetime.timedelta(days=monthToDayCount)\n elif end[-1] == \"W\":\n endDate = datetime.datetime.now() - datetime.timedelta(weeks=int(end[:-1]))\n elif end[-1] == \"D\":\n endDate = datetime.datetime.now() - datetime.timedelta(days=int(end[:-1]))\n elif end[-1] == \"h\":\n endDate = datetime.datetime.now() - datetime.timedelta(hours=int(end[:-1]))\n elif end[-1] == \"m\":\n endDate = datetime.datetime.now() - datetime.timedelta(minutes=int(end[:-1]))\n elif end[-1] == \"s\":\n endDate = datetime.datetime.now() - datetime.timedelta(seconds=int(end[:-1]))\n else:\n endDate = datetime.datetime.now()\n elif end == \"now\":\n endDate = datetime.datetime.now()\n\n try:\n endDate = endDate.strftime('%Y-%m-%dT%H:%M:%SZ')\n except Exception as e:\n print(e)\n pass\n\n self.setStartDate(startDate)\n self.setEndDate(endDate)\n \n ","sub_path":"Events.py","file_name":"Events.py","file_ext":"py","file_size_in_byte":4930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"465252177","text":"import FWCore.ParameterSet.Config as cms\n\npfsim = cms.EDProducer(\n 'PFSimParticleProducer',\n hepmcSrc = cms.InputTag('generator'),\n genSrc = cms.InputTag('genParticles'),\n verbose = cms.untracked.bool( False )\n )\n\njets = cms.EDProducer(\n 'PFSimFastJetProducer',\n particleSrc = cms.InputTag('pfsim'),\n jetPtThreshold = cms.double(5.),\n )\n\npfsimSequence = cms.Sequence(\n pfsim +\n jets\n ) \n","sub_path":"CMGTools/PFSim/python/pfsim_cff.py","file_name":"pfsim_cff.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"156675403","text":"# -*- coding: utf-8 -*-\nimport logging\nfrom functools import reduce\n\nimport copy\n\nfrom mahjong.constants import EAST, SOUTH, WEST, NORTH\nfrom mahjong.tile import TilesConverter\nfrom utils.settings_handler import settings\nfrom mahjong.ai.shanten import Shanten\n\nlogger = logging.getLogger('tenhou')\n\n\nclass Player(object):\n # the place where is player is sitting\n # always = 0 for our player\n seat = 0\n # where is sitting dealer, based on this information we can calculate player wind\n dealer_seat = 0\n # position based on scores\n position = 0\n scores = 0\n uma = 0\n\n name = ''\n rank = ''\n\n discards = []\n # tiles that were discarded after player's riichi\n safe_tiles = []\n tiles = []\n melds = []\n table = None\n last_draw = None\n in_tempai = False\n in_riichi = False\n in_defence_mode = False\n\n # system fields\n # for local games emulation\n _is_daburi = False\n _is_ippatsu = False\n\n def __init__(self, seat, dealer_seat, table, use_previous_ai_version=False):\n self.discards = []\n self.melds = []\n self.tiles = []\n self.safe_tiles = []\n self.seat = seat\n self.table = table\n self.dealer_seat = dealer_seat\n\n if use_previous_ai_version:\n try:\n from mahjong.ai.old_version import MainAI\n # project wasn't set up properly\n # we don't have old version\n except ImportError:\n logger.error('Wasn\\'t able to load old api version')\n from mahjong.ai.main import MainAI\n else:\n if settings.ENABLE_AI:\n from mahjong.ai.main import MainAI\n else:\n from mahjong.ai.random import MainAI\n\n self.ai = MainAI(table, self)\n\n def __str__(self):\n result = u'{0}'.format(self.name)\n if self.scores:\n result += u' ({:,d})'.format(int(self.scores))\n if self.uma:\n result += u' {0}'.format(self.uma)\n else:\n result += u' ({0})'.format(self.rank)\n return result\n\n # for calls in array\n def __repr__(self):\n return self.__str__()\n\n def erase_state(self):\n self.discards = []\n self.melds = []\n self.tiles = []\n self.safe_tiles = []\n\n self.last_draw = None\n self.in_tempai = False\n self.in_riichi = False\n self.in_defence_mode = False\n\n self.dealer_seat = 0\n\n self.ai.erase_state()\n\n self._is_daburi = False\n self._is_ippatsu = False\n\n def add_called_meld(self, meld):\n self.melds.append(meld)\n\n def add_discarded_tile(self, tile):\n self.discards.append(tile)\n\n def init_hand(self, tiles):\n self.tiles = tiles\n\n self.ai.determine_strategy()\n\n def draw_tile(self, tile):\n self.last_draw = tile\n self.tiles.append(tile)\n # we need sort it to have a better string presentation\n self.tiles = sorted(self.tiles)\n\n self.ai.determine_strategy()\n\n def discard_tile(self, tile=None):\n \"\"\"\n We can say what tile to discard\n input tile = None we will discard tile based on AI logic\n :param tile: 136 tiles format\n :return:\n \"\"\"\n # we can't use if tile, because of 0 tile\n if tile is not None:\n tile_to_discard = tile\n else:\n tile_to_discard = self.ai.discard_tile()\n\n if tile_to_discard != Shanten.AGARI_STATE:\n self.add_discarded_tile(tile_to_discard)\n self.tiles.remove(tile_to_discard)\n\n return tile_to_discard\n\n def can_call_riichi(self):\n return all([\n self.in_tempai,\n\n not self.in_riichi,\n not self.is_open_hand,\n\n self.scores >= 1000,\n self.table.count_of_remaining_tiles > 4\n ])\n\n def try_to_call_meld(self, tile, is_kamicha_discard):\n return self.ai.try_to_call_meld(tile, is_kamicha_discard)\n\n @property\n def player_wind(self):\n position = self.dealer_seat\n if position == 0:\n return EAST\n elif position == 1:\n return NORTH\n elif position == 2:\n return WEST\n else:\n return SOUTH\n\n @property\n def is_dealer(self):\n return self.seat == self.dealer_seat\n\n @property\n def is_open_hand(self):\n return len(self.melds) > 0\n\n @property\n def closed_hand(self):\n tiles = self.tiles[:]\n meld_tiles = [x.tiles for x in self.melds]\n if meld_tiles:\n meld_tiles = reduce(lambda z, y: z + y, [x.tiles for x in self.melds])\n return [item for item in tiles if item not in meld_tiles]\n\n @property\n def meld_tiles(self):\n \"\"\"\n Array of array with 34 tiles indices\n :return: array\n \"\"\"\n melds = [x.tiles for x in self.melds]\n melds = copy.deepcopy(melds)\n for meld in melds:\n meld[0] //= 4\n meld[1] //= 4\n meld[2] //= 4\n return melds\n\n def format_hand_for_print(self, tile):\n hand_string = '{} + {}'.format(\n TilesConverter.to_one_line_string(self.closed_hand),\n TilesConverter.to_one_line_string([tile])\n )\n if self.is_open_hand:\n melds = []\n for item in self.melds:\n melds.append('{}'.format(TilesConverter.to_one_line_string(item.tiles)))\n hand_string += ' [{}]'.format(', '.join(melds))\n return hand_string\n","sub_path":"project/mahjong/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":5558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"211494714","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Go through all Russian lemmas looking for headwords with secondary stress in them.\n\nimport pywikibot, re, sys, argparse\nimport unicodedata\n\nimport blib\nfrom blib import getparam, rmparam, tname, msg, site\n\nimport rulib\nimport runounlib\n\nGR = \"\\u0300\" # grave = ̀\n\nru_normal_head_templates = [\"ru-noun\", \"ru-proper noun\", \"ru-verb\", \"ru-adj\",\n \"ru-adv\", \"ru-phrase\", \"ru-noun form\", \"ru-diacritical mark\",\n \"ru-noun-alt-ё\", \"ru-adj-alt-ё\", \"ru-verb-alt-ё\"]\n\noverall_head_count = {}\ncat_head_count = {}\n\ndef has_secondary_stress(text):\n return GR in unicodedata.normalize(\"NFD\", str(text))\n\ndef output_heads_seen(overall=False):\n if overall:\n dic = overall_head_count\n msg(\"Overall templates seen:\")\n else:\n dic = cat_head_count\n msg(\"Templates seen per category:\")\n for head, count in sorted(dic.items(), key=lambda x:-x[1]):\n msg(\" %s = %s\" % (head, count))\n\ndef process_text_on_page(index, pagetitle, text):\n global args\n def pagemsg(txt):\n msg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n def expand_text(tempcall):\n return blib.expand_text(tempcall, pagetitle, pagemsg, args.verbose)\n\n notes = []\n\n parsed = blib.parse_text(text)\n found_page_head = False\n for t in parsed.filter_templates():\n found_this_head = False\n tn = tname(t)\n if tn in ru_normal_head_templates:\n heads = blib.fetch_param_chain(t, \"1\", \"head\")\n for head in heads:\n if has_secondary_stress(head):\n pagemsg(\"Found secondarily stressed head %s in %s\" % (head,\n str(t)))\n elif tn == \"head\" and getparam(t, \"1\") == \"ru\":\n heads = blib.fetch_param_chain(t, \"head\", \"head\")\n for head in heads:\n if has_secondary_stress(head):\n pagemsg(\"Found secondarily stressed head %s in %s\" % (head,\n str(t)))\n elif tn in [\"ru-noun+\", \"ru-proper noun+\", \"ru-noun-table\", \"ru-noun-old\"]:\n per_word_objs = runounlib.split_noun_decl_arg_sets(t, pagemsg)\n for per_word in per_word_objs:\n for arg_set in per_word:\n if has_secondary_stress(arg_set[1]):\n pagemsg(\"Found secondarily stressed head %s in %s\" % (\n arg_set[1], str(t)))\n elif tn == \"ru-decl-adj\":\n head = getparam(t, \"1\")\n if has_secondary_stress(head):\n pagemsg(\"Found secondarily stressed head %s in %s\" % (head,\n str(t)))\n\nparser = blib.create_argparser(\"Find Russian terms with secondary stress in the headword\",\n include_pagefile=True, include_stdin=True)\nargs = parser.parse_args()\nstart, end = blib.parse_start_end(args.start, args.end)\n\nblib.do_pagefile_cats_refs(args, start, end, process_text_on_page, edit=True, stdin=True,\n default_cats=[\"Russian lemmas\", \"Russian non-lemma forms\"])\n","sub_path":"find_ru_headword_secondary_stress.py","file_name":"find_ru_headword_secondary_stress.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"253469087","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/6/19 17:05\n# @Author : Zcs\n# @File : send_log(logging).py\n\n# 使用tcp协议向日志服务器发送日志\nimport logging.handlers\nimport socket\n\nmsg = 'test'\nip = '192.168.205.141'\nport = 601\nlogger = logging.getLogger('SysLogger')\n# socktype -- tcp:socket.SOCK_STREAM -- udp:socket.SOCK_DGRAM\nfh = logging.handlers.SysLogHandler((ip, port), logging.handlers.SysLogHandler.LOG_AUTH, socktype=socket.SOCK_STREAM)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nfh.setFormatter(formatter)\nlogger.addHandler(fh)\nlogger.warning(msg)\nfh.close()\n","sub_path":"my_log/send_log(logging).py","file_name":"send_log(logging).py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"101821326","text":"from django.db.models.base import ModelBase as BaseModelBase\nfrom django.db.models import Field\nfrom api.serializers import ModelSerializer\nfrom collections import defaultdict\nimport importlib\nfrom django.utils.translation import activate, deactivate, get_language, ugettext\n\n\nclass Registry(dict):\n pass\n\n\nregistry = Registry()\n\n\nclass AlreadyRegisteredException(Exception):\n pass\n\n\nclass ApiMeta(type):\n def __new__(cls, cls_name, super_classes, attrs):\n if cls_name != \"BaseApi\":\n return super().__new__(cls, cls_name, super_classes, attrs)()\n else:\n return super().__new__(cls, cls_name, super_classes, attrs)\n\n\nclass BaseApi(metaclass=ApiMeta):\n def __init__(self, *args, **kwargs):\n self.serializer_store = dict()\n self.extra_kwargs = getattr(self, \"extra_kwargs\", {})\n\n def contribute_to_class(self, model, name):\n self.model = model\n # try:\n setattr(model, name, self)\n # self.resource_name = getattr(self, \"resource_name\", self._resource_name)\n\n registry[self.resource_name] = self\n\n @property\n def base_name(self):\n lang = get_language()\n deactivate()\n name = ugettext(self.model._meta.verbose_name_plural)\n activate(lang)\n return name\n\n @property\n def url_name(self):\n return \"_\".join(self.base_name.lower().split())\n\n @property\n def resource_name(self):\n return \"-\".join(self.base_name.lower().split())\n\n @property\n def url_path(self):\n return self.resource_name\n\n def fields(self, key=\"default\", remove=[]):\n for field in self.model._meta.get_fields(include_hidden=False):\n\n name = field.name\n if not name in self.extra_kwargs:\n self.extra_kwargs.update({name: {}})\n\n extra_kwargs = {}\n\n if (\n hasattr(field, \"many_to_many\")\n and hasattr(field, \"one_to_many\")\n and (field.many_to_many or field.one_to_many)\n ):\n extra_kwargs.update({\"many\": True})\n\n model = getattr(field, \"related_model\", self.model)\n\n if not isinstance(field, Field):\n field = field.remote_field\n serializer_conf = getattr(field, \"related_serializers\", {})\n extra_kwargs.update({\"read_only\": True})\n\n else:\n serializer_conf = getattr(field, \"serializers\", {})\n\n if key in serializer_conf:\n yield name, field, serializer_conf.get(key), model, extra_kwargs\n\n def get_serializer(\n self, key,\n ):\n\n if key in self.serializer_store:\n return self.serializer_store.get(key)\n attrs = {}\n _fields = [\"pk\"]\n\n serializer_name = f\"{self.model.__name__}{key.title()}Serializer\"\n\n for name, field, conf, model, extra_kwargs in self.fields(key):\n self.extra_kwargs[name].update(extra_kwargs)\n _fields.append(name)\n if type(conf) == str:\n serializer = model.Api.get_serializer(conf)\n attrs.update({name: serializer(**extra_kwargs)})\n elif conf is not None:\n attrs.update({name: conf})\n _key = key\n\n class Meta:\n model = self.model\n fields = _fields\n key = _key\n\n serializer = type(serializer_name, (ModelSerializer,), {\"Meta\": Meta, **attrs})\n # self.serializer_store[key] = serializer\n return serializer\n\n @property\n def search_fields(self):\n return []\n\n def register_serializer(self, serializer):\n\n key = (\n getattr(\n serializer.Meta,\n \"key\",\n serializer.__name__.replace(self.model.__name__, \"\")\n .replace(\"Serializer\", \"\")\n .lower(),\n )\n or \"default\"\n )\n if key in self.serializer_store:\n raise AlreadyRegisteredException(\n f\"Serializer with key:{key} is already registered for {self.model.__name__} api\"\n )\n else:\n self.serializer_store[key] = serializer\n\n if not hasattr(serializer.Meta, \"key\"):\n setattr(serializer.Meta, \"key\", key)\n\n","sub_path":"{{cookiecutter.project_slug}}/api/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":4280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"575050907","text":"#!/usr/bin/env python\n\n'''\nA game to be played through a socket connection, arbitred by a controller\nusing GTP.\n\nAlternatively, the commands can be read and sent by the console.\n'''\nfrom GTPEngine import GTPEngine\nfrom optparse import OptionParser\nfrom socket import socket, AF_INET, SOCK_STREAM\nimport sys\nfrom pygo1963.model.PlayersFactory import ALPHA_BETA_KEY, HUMAN_KEY, RANDOM_KEY,\\\n create_player\nfrom pygo1963.view.View import View, GameView\nfrom pygo1963.view.main import view_loop\n\nclass NetworkGame():\n \"\"\" A Go game to be played through a remote connection. \"\"\"\n \n def __init__(self, engine, use_sockets=False, host=None, port=None):\n \n self.engine = engine\n \n self.next_move = None\n \n if options.use_sockets:\n logger.write('trying to make socket on %s, %d\\n' % (options.host,\n options.port)) \n self.sockobj = socket(AF_INET, SOCK_STREAM)\n self.sockobj.connect((options.host, options.port))\n \n logger.write('making socket file.\\n')\n self.in_file = self.out_file = self.sockobj.makefile() \n \n else:\n self.in_file = sys.stdin\n self.out_file = sys.stdout\n \n self.sockobj = None\n \n def __getattr__(self, name):\n \n if name == 'board':\n return self.engine.board\n else:\n raise AttributeError\n \n #TODO cerrar archivos cuando termina\n def play(self):\n \"\"\" Game loop. \"\"\"\n \n logger.write('starting play.\\n')\n while not self.engine.received_quit:\n \n cmd = ''\n while not cmd: \n cmd = self._preprocess_command(self.in_file.readline())\n logger.write('command read: ' + cmd)\n \n response = engine.process_command(cmd)\n logger.write('response sent: ' + response)\n \n self.out_file.write(response)\n self.out_file.flush()\n \n self.finish()\n\n def _preprocess_command(self, command):\n \"\"\" Removes control characters and comments from the command. \"\"\"\n \n #Remove control chars except HT and LF\n del_chars = ''.join(chr(n) for n in range(9) + range(11,32)) \n command = command.translate(None, del_chars)\n \n #Convert HT to spaces\n command.replace('\\t', ' ')\n \n #remove comments\n index = command.find('#')\n if index != -1:\n command = command[:index]\n \n #if its only whitespaces make it empty\n if command.isspace():\n return ''\n \n return command\n \n def finish(self):\n \"\"\" Makes the necessary cleanup after the game has finished. \"\"\"\n \n if self.sockobj:\n self.out_file.close()\n self.sockobj.close()\n \n\n\ndef parse_options():\n parser = OptionParser()\n parser.add_option('-c', '--color', action='store', dest='color',\n type='choice', choices=('b', 'w', 'black', 'white'), \n default='b')\n parser.add_option('-e', '--engine', action='store', dest='player',\n type='choice', choices=(ALPHA_BETA_KEY, HUMAN_KEY, RANDOM_KEY), \n default=ALPHA_BETA_KEY)\n parser.add_option('-s', '--use-sockets', action='store_true', \n dest='use_sockets', default=False)\n parser.add_option('-i', '--host', action='store', dest='host', \n default='localhost')\n parser.add_option('-p', '--port', action='store', dest='port', type=\"int\",\n default='50007')\n return parser.parse_args()[0]\n\n\n#MAIN PROGRAM\nif __name__ == \"__main__\":\n options = parse_options()\n \n view = View() \n \n logger = open('network.%s.log' % options.color, 'w')\n \n engine = GTPEngine(create_player(options.player, options.color, view.controller)) \n game = NetworkGame(engine, options.use_sockets, options.host, options.port)\n GameView(view, game)\n\n view_loop(view)\n \n logger.close()\n ","sub_path":"src/pygo1963/network_game/network_game.py","file_name":"network_game.py","file_ext":"py","file_size_in_byte":4230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"249435306","text":"\n\"\"\" \nSet up the plot figures, axes, and items to be done for each frame.\n\nThis module is imported by the plotting routines and then the\nfunction setplot is called to set the plot parameters.\n \n\"\"\" \n\n#--------------------------\ndef setplot(plotdata):\n#--------------------------\n \n \"\"\" \n Specify what is to be plotted at each frame.\n Input: plotdata, an instance of visclaw.data.ClawPlotData.\n Output: a modified version of plotdata.\n \n \"\"\" \n\n\n plotdata.clearfigures() # clear any old figures,axes,items data\n\n # Figure for q[0]\n plotfigure = plotdata.new_plotfigure(name='Pressure', figno=1)\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.axescmd = 'subplot(211)'\n \n #plotaxes.xlimits = [0.,150.]\n plotaxes.ylimits = [-1.,1.0]\n plotaxes.title = 'Pressure'\n\n # Set up for item on these axes:\n plotitem = plotaxes.new_plotitem(plot_type='1d_plot')\n plotitem.plot_var = 0\n plotitem.plotstyle = '-o'\n plotitem.color = 'b'\n plotitem.show = True # show on plot?\n plotitem.kwargs = {'linewidth':2,'markersize':5}\n \n\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.axescmd = 'subplot(212)'\n plotaxes.xlimits = 'auto'\n plotaxes.ylimits = [-1.,1.]\n plotaxes.title = 'Velocity'\n\n # Set up for item on these axes:\n plotitem = plotaxes.new_plotitem(plot_type='1d_plot')\n plotitem.plot_var = 1\n plotitem.plotstyle = '-'\n plotitem.color = 'b'\n plotitem.show = True # show on plot?\n plotitem.kwargs = {'linewidth':3,'markersize':5}\n \n\n # Parameters used only when creating html and/or latex hardcopy\n # e.g., via visclaw.frametools.printframes:\n\n plotdata.printfigs = True # print figures\n plotdata.print_format = 'png' # file format\n plotdata.print_framenos = 'all' # list of frames to print\n plotdata.print_fignos = 'all' # list of figures to print\n plotdata.html = True # create html files of plots?\n plotdata.html_homelink = '../README.html'\n plotdata.latex = True # create latex file of plots?\n plotdata.latex_figsperline = 2 # layout of plots\n plotdata.latex_framesperline = 1 # layout of plots\n plotdata.latex_makepdf = False # also run pdflatex?\n\n return plotdata\n\n \n","sub_path":"fvmbook/chap7/standing/setplot.py","file_name":"setplot.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"380711015","text":"from RobotArm import RobotArm\r\n\r\nrobotArm = RobotArm('exercise 11')\r\nrobotArm.speed = 3\r\n\r\n[robotArm.moveRight() for movement in range (8)] # begin helemaal aan de rechter kant!!!!!\r\nfor movement in range (9): # anders leg je misschien een wit blok op een ander wit blok\r\n robotArm.grab() # en dan kan het onderste witte blok niet verplaatst worden\r\n color = robotArm.scan()\r\n if color == 'white':\r\n robotArm.moveRight()\r\n robotArm.drop()\r\n [robotArm.moveLeft() for movement in range (2)]\r\n else:\r\n robotArm.drop()\r\n robotArm.moveLeft()\r\n\r\nrobotArm.wait()\r\nfrom RobotArm import RobotArm\r\n","sub_path":"ex11.py","file_name":"ex11.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"263425743","text":"\"\"\"\nA helper script to install `pandoc` to a user-writable\nlocation in their `PATH`.\n\"\"\"\nimport os\nimport tarfile\nimport tempfile\nimport subprocess\n\nurl = 'https://github.com/jgm/pandoc/releases/download/2.18/pandoc-2.18-linux-amd64.tar.gz'\n\nif __name__ == '__main__':\n # the values of PATH\n candidates = os.environ['PATH'].split(':')\n # locations that are writeable by current user\n writeable = list(c for c in candidates if\n os.access(c, os.W_OK))\n\n if len(writeable) == 0:\n raise ValueError('no writeable locations in path!')\n\n # we have multiple writeable locations in the user's path\n # rank the writeable locations by a metric\n # here we're just using the path length\n # this is totally arbitrary and could be whatever\n score = [len(w) for w in writeable]\n\n # take the writeable location with the lowest \"score\"\n target = writeable[score.index(min(score))]\n\n with tempfile.TemporaryDirectory() as d:\n subprocess.check_call(['wget', '-q', url, '-P', d])\n\n with tarfile.open(os.path.join(d, url.split('/')[-1])) as t:\n for member in t.getmembers():\n if member.name.split('/')[-1] == 'pandoc':\n data = t.extractfile(member).read()\n break\n\n if data is None or len(data) == 0:\n raise ValueError('unable to extract pandoc!')\n\n full = os.path.join(target, 'pandoc')\n print(f'writing `pandoc` to {full}')\n with open(full, 'wb') as f:\n f.write(data)\n\n # make binary executable\n os.chmod(full, 755)\n","sub_path":"docker/builds/pandoc.py","file_name":"pandoc.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"12752075","text":"#\n# @lc app=leetcode id=52 lang=python3\n#\n# [52] N-Queens II\n#\n# https://leetcode.com/problems/n-queens-ii/description/\n#\n# algorithms\n# Hard (52.66%)\n# Total Accepted: 103.8K\n# Total Submissions: 196.6K\n# Testcase Example: '4'\n#\n# The n-queens puzzle is the problem of placing n queens on an n×n chessboard\n# such that no two queens attack each other.\n#\n#\n#\n# Given an integer n, return the number of distinct solutions to the n-queens\n# puzzle.\n#\n# Example:\n#\n#\n# Input: 4\n# Output: 2\n# Explanation: There are two distinct solutions to the 4-queens puzzle as shown\n# below.\n# [\n# [\".Q..\", // Solution 1\n# \"...Q\",\n# \"Q...\",\n# \"..Q.\"],\n#\n# [\"..Q.\", // Solution 2\n# \"Q...\",\n# \"...Q\",\n# \".Q..\"]\n# ]\n#\n#\n#\nclass Solution:\n def totalNQueens(self, n: int) -> int:\n total = [0]\n q_list = [0 for _ in range(n)]\n\n def _total(n, q_list, row, total):\n if row == n:\n total[0] += 1\n return\n for col in range(n):\n if isConfict(q_list, row, col):\n q_list[row] = col\n _total(n, q_list, row + 1, total)\n\n def isConfict(q_list, row, col):\n for i in range(row):\n if q_list[i] == col - row + i:\n return False\n if q_list[i] == col + row - i:\n return False\n if q_list[i] == col:\n return False\n return True\n _total(n, q_list, 0, total)\n return total[0]\n\n# 经典的dps题目,记录一下\n","sub_path":"leetcode/52.n-queens-ii.py","file_name":"52.n-queens-ii.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"137191940","text":"from logging.config import dictConfig\r\nfrom os import path\r\n\r\n_bot_root = path.dirname(path.dirname(path.abspath(__file__)))\r\n\r\nBOT_TOKEN = None\r\nMOE_URL = \"http://localhost:3000/\"\r\nPATH_TO_BOT_DB: str = path.join(_bot_root, path.join(\"data\", \"bot_database.db\"))\r\nLOG_PATH = path.join(_bot_root, path.join(\"data\", \"bot.log\"))\r\nREQUEST_KWARGS = {\r\n \"proxy_url\": \"socks5://127.0.0.1:9050\"\r\n}\r\n_REQUEST_KWARGS_EXAMPLE = {\r\n \"proxy_url\": \"socks5 OR socks5h://URL_OF_THE_PROXY_SERVER:PROXY_PORT\",\r\n # Optional, if you need authentication:\r\n \"urllib3_proxy_kwargs\": {\r\n \"username\": \"PROXY_USER\",\r\n \"password\": \"PROXY_PASS\",\r\n }\r\n}\r\n\r\nLOGGING = {\r\n \"version\": 1,\r\n \"disable_existing_loggers\": True,\r\n \"formatters\": {\r\n \"base\": {\r\n \"format\": \"%(asctime)s %(levelname)s | %(pathname)s:%(funcName)s:%(lineno)d | %(message)s\",\r\n },\r\n },\r\n \"handlers\": {\r\n \"console\": {\r\n \"level\": \"INFO\",\r\n \"class\": \"logging.StreamHandler\",\r\n \"formatter\": \"base\"\r\n },\r\n \"file\": {\r\n \"level\": \"INFO\",\r\n \"class\": \"logging.handlers.RotatingFileHandler\",\r\n \"filename\": LOG_PATH,\r\n \"formatter\": \"base\",\r\n \"maxBytes\": 1024 * 1024 * 100,\r\n },\r\n },\r\n \"loggers\": {\r\n \"general\": {\r\n \"handlers\": [\"file\", \"console\"],\r\n \"level\": \"INFO\",\r\n },\r\n }\r\n}\r\n\r\ntry:\r\n from .local_settings import *\r\nexcept ImportError:\r\n pass\r\n\r\ndictConfig(LOGGING)\r\n","sub_path":"config/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"220082717","text":"# ###### /=================================================\\\n# ####### | CLASS EXAMPLE FOR \"COMPUTER SCIENCES\" (07JCJ**) |\n# #### \\ | https://github.com/squillero/computer-sciences |\n# ###G c\\ | |\n# ## _\\ | © 2020 Giovanni Squillero |\n# | _/ | Free for personal or classroom use. |\n# | _/ \\=================================================/\n\n\ndef safe_int(whatever):\n try:\n value = int(whatever)\n except ValueError as exception:\n print(f\"Yeuch: {str(exception)}\")\n value = 0\n return value\n\n\ndef main():\n while True:\n user_input = input(\"> \")\n value = safe_int(user_input)\n print(f\"Value is {value}\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Python/src/2020-21/20201207_try-execpt_1.py","file_name":"20201207_try-execpt_1.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"511233598","text":"from elements import *\n\nshape_dict = {\"process\" : \"oval\", \"file_store\" : \"rectangle\"}\n\n#\n# Object helper functions\n#\ndef get_elem_by_id(id):\n\tfor element in elements:\n\t\ttry:\n\t\t\tif element.id == id:\n\t\t\t\treturn element\n\t\texcept:\n\t\t\tcontinue\n\ndef get_connected_items(id):\n\tfor element in elements:\n\t\ttry:\n\t\t\tif element.id == id:\n\t\t\t\treturn element.connected_items\n\t\texcept:\n\t\t\tcontinue\n\ndef get_conn_elems(ein):\n\tconn_tag = ein.part_of\n\tconn_elems = []\n\tfor e in elements:\n\t\tif (e.part_of == conn_tag):\n\t\t\tconn_elems.append(e)\n\n\treturn conn_elems\n\n\t\n\n\ndef is_object(id):\n\tfor word in shape_dict.keys():\n\t\ttry:\n\t\t\tif get_elem_by_id(id).tag.find(word) != -1:\n\t\t\t\tif get_elem_by_id(id).tag.find(\"_text\") == -1:\n\t\t\t\t\treturn True\n\t\texcept:\n\t\t\tcontinue\n\treturn False\n\ndef is_text_object(id):\n\ttry:\n\t\tif get_elem_by_id(id).tag.find(\"_text\") != -1:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\texcept:\n\t\treturn False\n\ndef is_node(id):\n\te = get_elem_by_id(id)\n\ttry:\n\t\tif e.tag.find(\"_node\") != -1:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\texcept:\n\t\treturn False\n\ndef is_left_node(id):\n\te = get_elem_by_id(id)\n\ttry:\n\t\tif e.tag.find(\"left_node\") != -1:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\texcept:\n\t\treturn False\n\ndef is_right_node(id):\n\te = get_elem_by_id(id)\n\ttry:\n\t\tif e.tag.find(\"right_node\") != -1:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\texcept:\n\t\treturn False\n\ndef is_center_node(id):\n\te = get_elem_by_id(id)\n\ttry:\n\t\tif e.tag.find(\"center_node\") != -1:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\texcept:\n\t\treturn False\n\ndef is_cline(id):\n\te = get_elem_by_id(id)\n\ttry:\n\t\tif e.tag.find(\"connecting_line\") != -1:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\texcept:\n\t\tFalse\n\ndef is_a_connector(id):\n\te = get_elem_by_id(id)\n\ttry:\n\t\tif e.part_of.find(\"connector\") != -1:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\texcept:\n\t\treturn False\n\n","sub_path":"build/lib.linux-i686-2.7/tmtool/items/objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"487789176","text":"import thread\nimport os\nimport time\n\ndef countchar(pos):\n# updates lenlist, do not count EOL as part of line length\n\tglobal lenlist, nthread, readlock, file, listlock, threadlock\n\n\tindex = 0\n\n\treadlock.acquire()\n\tfile.seek(0)\n\tline = file.read(pos)\n\treadlock.release()\n\n\t# gets the line number (index) to start counting bytes for\n\tfor i in line:\n\t\tif i == '\\n':\n\t\t\tindex += 1\n\n\tfor i in range(splitnum):\n\t\t# place locks here because we want to make sure that the threads are reading from where they left off when suspended\n\t\treadlock.acquire()\n\t\tfile.seek(pos)\n\t\tchar = file.read(1)\n\t\treadlock.release()\n\n\t\tif char != '\\n':\n\t\t\tlistlock.acquire()\n\t\t\tlenlist[index] += 1\n\t\t\tlistlock.release()\n\t\telse: # goes to the next line\n\t\t\tindex += 1\n\n\t\tpos += 1\n\n\tthreadlock.acquire()\n\tnthread -= 1;\n\tthreadlock.release()\n\ndef linelengths(filenm, ntrh):\n# returns a Python list, the ith element of which is the number of characters in line i of the file.\n\tstartTime = time.time()\n\tglobal lenlist, nthread, splitnum, readlock, file, listlock, threadlock\n\n\treadlock = thread.allocate_lock()\n\tlistlock = thread.allocate_lock()\n\tthreadlock = thread.allocate_lock()\n\tnthread = ntrh\n\tfile = open(filenm, 'r')\n\tlenlist = sum(1 for i in file) * [0]\n\tnbytes = os.path.getsize(filenm)\n\tsplitnum = nbytes/ntrh\n\tstartpos = 0\n\n\tfor i in range(ntrh):\n\t\tif i != 0:\n\t\t\tstartpos += splitnum\n\t\tthread.start_new_thread(countchar, (startpos,))\n\n\twhile nthread > 0: # busy wait\n\t\tpass\n\n\tprint(\"Threaded version took %s seconds.\" % (time.time() - startTime))\n\t#return lenlist\n","sub_path":"HW2/Threaded.py","file_name":"Threaded.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"637819599","text":"import os\nimport pickle\nfrom sklearn.datasets.base import Bunch\nimport codecs\n#Bunch 类提供了一种key,value的对象形式\n#target_name 所有分类集的名称列表\n#label 每个文件的分类标签列表\n#filenames 文件路径\n#contents 分词后文件词向量形式\n\n'''\n本程序用来将文本变成词向量bunch对象的形式\n'''\n\ndef _readfile(path):\n\tfp = codecs.open(path, \"r\", 'utf-8')\n\tcontent = fp.read()\n\t# print(content)\n\tfp.close()\n\treturn content\n\n\ndef segment2Bunch():\n\t\n\t# wordbag_path=\"train_word_bag/train_set.dat\"\n\t# seg_path=\"train_corpus_seg/\"\n\t# wordbag_path=\"test_word_bag/test_set.dat\"\n\t# seg_path=\"test_seg/\"\n\n\ttrain_test_dict = {'train':[\"train_word_bag/train_set.dat\", \"train_corpus_seg/\"], \\\n\t\t\t\t\t'test':[\"test_word_bag/test_set.dat\", \"test_corpus_seg/\"]}\n\n\tfor i in train_test_dict.keys():\n\n\t\tbunch=Bunch(target_name=[],label=[],filenames=[],contents=[])\n\n\t\twordbag_path = train_test_dict[i][0]\n\t\tseg_path = train_test_dict[i][1]\n\t\t# print(wordbag_path, seg_path)\n\t\tcatelist = os.listdir(seg_path)\n\t\tbunch.target_name.extend(catelist)#将类别信息保存到Bunch对象\n\t\tfor mydir in catelist:\n\t\t\tclass_path = seg_path+mydir+\"/\"\n\t\t\tfile_list = os.listdir(class_path)\n\t\t\tfor file_path in file_list:\n\t\t\t\tfullname = class_path + file_path\n\t\t\t\tbunch.label.append(mydir)#保存当前文件的分类标签\n\t\t\t\tbunch.filenames.append(fullname)#保存当前文件的文件路径\n\t\t\t\tbunch.contents.append(_readfile(fullname).strip())#保存文件词向量\n\n\t\t#Bunch对象持久化\n\t\tfile_obj=open(wordbag_path,\"wb\")\n\t\tpickle.dump(bunch,file_obj)\n\t\tfile_obj.close()\n\n\tprint(\"构建文本对象结束\")\n\n\nif __name__ == '__main__':\n\tsegment2Bunch()\n","sub_path":"THU_naive_bayes/segment2Bunch.py","file_name":"segment2Bunch.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"322770955","text":"from pymongo import MongoClient\nfrom bson.objectid import ObjectId\nimport pickle\n\n\nNULL_ID = ObjectId(\"000000000000000000000000\")\n\n\nclass Database:\n def __init__(self, mongo_port = 27017):\n self.client = MongoClient('localhost', mongo_port)\n self.db = self.client.poker\n self.games = self.db.games\n self.meta = self.db.meta\n self.players = self.db.players\n self.features = self.db.features\n self.opponentmodels = self.db.opponentmodels\n\n self.meta.update({ '_id': 1 },\n { '$setOnInsert': {'lastProcessedGame': NULL_ID}, },\n upsert = True)\n\n def add_game(self, game):\n object_id = self.games.insert_one(game).inserted_id\n return object_id\n\n\n @property\n def last_processed_game(self):\n return self.meta.find_one({'_id': 1})['lastProcessedGame']\n\n\n @last_processed_game.setter\n def last_processed_game(self, game_id):\n self.meta.update({'_id':1}, {\"$set\": {'lastProcessedGame' : game_id}})\n\n\n def get_games(self):\n return self.games.find({'_id': {'$gt' : self.last_processed_game}})\n\n\n @property\n def unprocessed_game_count(self):\n return self.games.count({'_id': {'$gt' : self.last_processed_game}})\n\n\n def add_player_model(self, player_name, model):\n model_data = pickle.dumps(model)\n update = {\n '$set' : {'model' : pickle.dumps(model)},\n '$setOnInsert' : {'name' : player_name}\n }\n self.players.update({'name': player_name}, update, upsert = True)\n\n\n def get_player_model(self, player_name):\n found = self.players.find_one({'name' : player_name})\n if found is not None:\n return pickle.loads(found['model'])\n else:\n return None\n\n\n def add_player_stat(self, player_name, stat_name, stat_value):\n update = {\n '$set' : {stat_name : stat_value},\n '$setOnInsert' : {'name' : player_name}\n }\n self.players.update({'name': player_name}, update, upsert = True)\n\n\n def get_player_stat(self, player_name, stat_name):\n found = self.players.find_one({'name' : player_name}, {stat_name : 1})\n if found is not None:\n return found[stat_name]\n else:\n return None\n\n\n def add_player_features(self, player_name, inputs, responses):\n update = {\n '$set' : {'inputs' : inputs, 'responses':responses},\n '$setOnInsert' : {'name' : player_name}\n }\n self.features.update({'name': player_name}, update, upsert = True)\n\n\n def get_player_features(self, player_name):\n return self.features.find_one({'name': player_name})\n","sub_path":"pokerbot/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"258008884","text":"# Inspired from: https://github.com/tensorflow/tensorflow/blob/r1.4/tensorflow/examples/tutorials/mnist/mnist_softmax.py\n\n# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"A very simple MNIST classifier.\n\nSee extensive documentation at\nhttps://www.tensorflow.org/get_started/mnist/beginners\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport sys\nimport numpy as np\nimport tensorflow as tf\n\nif __name__ == '__main__':\n np.random.seed(12345678)\n tf.set_random_seed(87654321)\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--dtype\", type=str, default='float32', help='Input and output dtype')\n parser.add_argument(\"--nbatch\", type=int, default=64, help='Batch size of the layer')\n parser.add_argument(\"--nin\", type=int, default=100, help='Input size of the layer')\n parser.add_argument(\"--nout\", type=int, default=10, help='Output size of the layer')\n parser.add_argument(\"--nsteps\", type=int, default=1000, help='Number of training steps')\n args = parser.parse_args()\n\n # Create the model\n x = tf.placeholder(args.dtype, [None, args.nin])\n W = tf.Variable(tf.zeros([args.nin, args.nout], dtype=args.dtype))\n b = tf.Variable(tf.zeros([args.nout], dtype=args.dtype))\n y = tf.matmul(x, W) + b\n\n # Define loss and optimizer\n y_ = tf.placeholder(args.dtype, [None, args.nout])\n\n # The raw formulation of cross-entropy,\n #\n # tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),\n # reduction_indices=[1]))\n #\n # can be numerically unstable.\n #\n # So here we use tf.nn.softmax_cross_entropy_with_logits on the raw\n # outputs of 'y', and then average across the batch.\n cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\n train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n # tf.global_variables_initializer().run()\n # Train\n for i in range(args.nsteps):\n data = np.random.normal(size=(args.nbatch, args.nin)).astype(args.dtype)\n target = np.zeros((args.nbatch, args.nout), dtype=args.dtype)\n target[np.arange(args.nbatch), np.random.randint(0, args.nout, args.nbatch)] = 1\n\n sess.run(train_step, feed_dict={x: data, y_: target})\n if (i + 1) % 100 == 0:\n print(\"Step %d/%d\" % (i + 1, args.nsteps))\n print('End')\n","sub_path":"tutorials/tensorflow/old_benchmark/_backup_benchmark_softmax.py","file_name":"_backup_benchmark_softmax.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"227341089","text":"def load_stock(filename):\n file = open(filename, \"r\")\n stock_list = []\n for line in file:\n line = line.strip()\n item = line.split(\",\")\n item[1] = int(item[1])\n item[2] = int(item[2])\n stock_list.append(item)\n file.close()\n stock_list.sort()\n\n return stock_list\n\ndef store_stock(stock_list):\n file = open(\"stock.txt\", \"w\")\n for line in stock_list:\n line[1] = str(line[1])\n line[2] = str(line[2])\n item = \",\".join(line) + \"\\n\"\n file.write(item)\n file.close()\n\ndef take_name(stock_list):\n while True:\n res = \" \"\n name = input(\"What you want to buy? >>>\")\n for item in stock_list:\n if item[0] == name:\n break\n if item[0] == name:\n break\n else:\n print (\"Sorry, we do not have a stock for \" + name + \".\")\n res = input(\"Do you want to buy other item? (y/n)>>>\")\n if res == \"n\" :\n break \n if res == \" \":\n return item\n else:\n return []\n \ndef take_quant(item):\n while True:\n res = \" \"\n try:\n qty = int(input(\"How many? >>>\"))\n if qty > item[1]:\n print (\"Sorry, we have only %5d items.\" % item[2])\n res = input(\"Would you buy? (y/n)>>>\")\n if res == \"y\":\n qty = item[1]\n break \n except:\n print (\"Type in a number. >>>\")\n if res == \"y\" or res == \" \":\n return qty\n else:\n return 0\n\ndef take_input(stock_list):\n item = take_name(stock_list)\n if item != []:\n quant = take_quant(item)\n else:\n quant = 0\n return item, quant\n \ndef sell(stock_list, sales_hist):\n item, quant = take_input(stock_list)\n if item == []:\n return\n item[1] -= quant\n amount = item[2] * quant\n print (\"item = \", item[0], \"; price = \", item[2], \"; quanity = \", quant, \\\n \"; amount = \", amount)\n sales_hist.append((item[0], item[2], quant, amount))\n \ndef print_stock(stock_list):\n print (\"\\n\", \" \" * 20 + \"STOCK REPORT\")\n print (\"Name price quatity amount\")\n for item in stock_list:\n print (\"%-10s %5d %5d %6d\" % (item[0], item[2], item[1], \\\n item[1] * item[2]))\n \n \ndef print_sales(sales_hist):\n print (\" \" * 20 + \"SALES REPORT\")\n print (\"Name price quatity amount\")\n for item in sales_hist:\n print (\"%-10s %5d %5d %6d\" % (item[0], item[1], item[2], item[3]))\n\"\"\"\nWhat would you like to do?\n S: Sell item \n P: Print stock\n R: Report sales\n E: Exit\nEnter your choice (S, P, R, or E)>>\n\"\"\"\ndef show_menu():\n print (\"\\n\", \"What would you like to do?\")\n print (\" S: Sell an item\")\n print (\" P: Print stock\")\n print (\" R: Report sales\")\n print (\" E: Exit\")\n return input (\"Enter your choice (S, P, R, or E))>>>\")\n\ndef input_error(s):\n print (s + \"?\" + \"I beg your pardon.\")\n \ndef main(): \n stock_list = load_stock(\"stock.txt\")\n sales_hist = []\n while True:\n s = show_menu()\n if s == \"E\":\n break\n elif s==\"S\":\n sell( stock_list, sales_hist)\n elif s ==\"P\":\n print_stock(stock_list)\n elif s == \"R\":\n print_sales(sales_hist)\n else:\n input_error(s)\n store_stock(stock_list)\n\nmain()\n","sub_path":"2020/20200410/homework05.py","file_name":"homework05.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"116130064","text":"#!/usr/bin/env python3\n\"\"\"\nGiven a sorted (in ascending order) integer array nums of n elements\nand a target value, write a function to search target in nums. If\ntarget exists, then return its index, otherwise return -1.\n\n\nExamples:\n\n Input: nums = [-1,0,3,5,9,12], target = 9\n Output: 4\n Explanation: 9 exists in nums and its index is 4\n\n\n Input: nums = [-1,0,3,5,9,12], target = 2\n Output: -1\n Explanation: 2 does not exist in nums so return -1\n \nNote:\n - You may assume that all elements in nums are unique.\n - n will be in the range [1, 10000].\n - The value of each element in nums will be in the range [-9999, 9999].\n\nReference:\n - https://leetcode.com/problems/binary-search/ (Easy)\n - https://www.geeksforgeeks.org/binary-search/\n\n\"\"\"\n\nfrom typing import List\n\nclass Solution:\n\n def search_v1(self, nums: List[int], target: int):\n \"\"\"Recursion.\"\"\"\n def helper(nums, target, L, R):\n if R < L :\n return -1\n else:\n mid = (L + R) // 2\n if target == nums[mid]:\n return mid\n elif target > nums[mid]:\n return helper(nums, target, mid+1, R)\n else:\n return helper(nums, target, L, mid-1)\n return helper(nums, target, 0, len(nums)-1)\n\n def search_v2(self, nums: List[int], target: int):\n \"\"\"Loop.\"\"\"\n L, R = 0, len(nums)-1\n while L <= R:\n mid = (L + R) // 2\n if target == nums[mid]:\n return mid\n elif target > nums[mid]:\n L = mid + 1\n else: \n R = mid - 1\n return -1\n\n\ndef main():\n a = [-1, 0, 3, 5, 9, 12]\n test_data = [\n [a, 9],\n [a, 2],\n [a, -1],\n ]\n\n sol = Solution()\n for arr, target in test_data:\n print(f\"# Input: {arr}, target = {target}\")\n print(\" - Output v1 = {}\".format(sol.search_v1(arr, target)))\n print(\" - Output v2 = {}\".format(sol.search_v2(arr, target)))\n\n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":"python3/sorting_and_search/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"467156959","text":"import torch\n\nclass Encoder(torch.nn.Module):\n def __init__(self, hidden_size, vocab_size, embedding_dim, num_layers=1, bidirectional=False):\n super(Encoder, self).__init__()\n self.hidden_size = hidden_size\n # self.embedding = torch.nn.Embedding.from_pretrained(weights)\n self.embedding = torch.nn.Embedding(vocab_size, embedding_dim)\n self.num_layers = num_layers\n self.bidirectional = bidirectional\n self.lstm = torch.nn.LSTM(embedding_dim, hidden_size, dropout=0.2, num_layers=num_layers, bidirectional=bidirectional)\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n \n def forward(self, inputs, hidden):\n ips, lengths = inputs\n inputs = self.embedding(ips)\n packed_embedded = torch.nn.utils.rnn.pack_padded_sequence(inputs, lengths)\n outputs, hidden = self.lstm(packed_embedded, hidden)\n outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(outputs)\n return outputs, hidden\n \n def init_hidden(self, batch_size):\n s = self.num_layers * (2 if self.bidirectional else 1)\n return (torch.zeros(size=(s, batch_size, self.hidden_size), device=self.device), torch.zeros(size=(s, batch_size, self.hidden_size), device=self.device))\n","sub_path":"Encoder.py","file_name":"Encoder.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"590578585","text":"import matplotlib.pyplot as plt\r\nimport neural_network as nn\r\nimport graphics as g\r\nimport random\r\nimport time\r\nimport math\r\n\r\n'''\r\n File name: main.py\r\n Author: Michael Berge\r\n Date created: 7/19/2018\r\n Date modified: 5/2/2020\r\n Python Version: 3.8.1\r\n'''\r\n\r\ndef main():\r\n num_i = 2\r\n num_h1 = 5\r\n num_h2 = 5\r\n num_o = 1\r\n args = [num_i, num_h1, num_o, num_h2]\r\n\r\n neural_network = nn.NeuralNetwork(args)\r\n start_time = time.time()\r\n plt.title(\"Training Data Improvement\")\r\n plt.xlabel(\"Iterations\")\r\n plt.ylabel(\"Guess\")\r\n\r\n l1 = []\r\n l2 = []\r\n l3 = []\r\n l4 = []\r\n\r\n # Number of training iterations\r\n epoch = 1000\r\n\r\n # Graphics object\r\n gr = g.Graphics(args)\r\n\r\n for i in range(epoch):\r\n file = open(\"training_data.txt\", \"r\")\r\n arr = []\r\n\r\n for j in range(4):\r\n str_ = file.readline()\r\n str_split = str_.split(\" \")\r\n arr.append(str_split)\r\n random.shuffle(arr)\r\n\r\n for j in range(4):\r\n input_ = [int(arr[j][0]), int(arr[j][1])]\r\n target = [int(arr[j][2].strip())]\r\n neural_network.train(input_, target, args, gr)\r\n\r\n if i % (epoch / 100) == 0:\r\n l1.append(neural_network.feed_forward([0, 0], args))\r\n l2.append(neural_network.feed_forward([1, 1], args))\r\n l3.append(neural_network.feed_forward([0, 1], args))\r\n l4.append(neural_network.feed_forward([1, 0], args))\r\n file.close()\r\n\r\n # Print progress bar\r\n print_progress_bar(i + 1, epoch, prefix='Progress:', suffix='Complete', length=50)\r\n\r\n # Calculate and display training time\r\n display_time(start_time)\r\n\r\n # testing data for the network\r\n print(\"[0, 0]: \" + str(round(neural_network.feed_forward([0, 0], args)[0])))\r\n print(\"[1, 1]: \" + str(round(neural_network.feed_forward([1, 1], args)[0])))\r\n print(\"[0, 1]: \" + str(round(neural_network.feed_forward([0, 1], args)[0])))\r\n print(\"[1, 0]: \" + str(round(neural_network.feed_forward([1, 0], args)[0])))\r\n\r\n # Plot points and display graph\r\n x = []\r\n for i in range(epoch):\r\n if i % (epoch / 100) == 0:\r\n x.append(i)\r\n plt.plot(x, l1, \"black\")\r\n plt.plot(x, l2, \"black\")\r\n plt.plot(x, l3, \"black\")\r\n plt.plot(x, l4, \"black\")\r\n plt.show()\r\n\r\n# Prints the progress bar for training data iterations\r\ndef print_progress_bar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='#'):\r\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\r\n filled_length = int(length * iteration // total)\r\n bar = fill * filled_length + '-' * (length - filled_length)\r\n print('%s [%s] %s%% %s' % (prefix, bar, percent, suffix), end='\\r')\r\n # Print New line on Complete\r\n if iteration == total:\r\n print()\r\n\r\n# Displays the time from start_time to the time the function was called\r\ndef display_time(start_time):\r\n end_time = time.time()\r\n time_elapsed = end_time - start_time\r\n print(\"\\nTime Elapsed: \", end=\"\")\r\n if time_elapsed > 60:\r\n print(\"{:02d}\".format(math.floor(time_elapsed / 60)), end=\"\")\r\n print(\":{:02}\".format(round(time_elapsed % 60)), end=\"\\n\\n\")\r\n else:\r\n print(\"00:{:02d}\".format(round(time_elapsed % 60)), end=\"\\n\\n\")\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"554016938","text":"import copy\nimport pytest\n\nfrom uuid import uuid4\n\n\nORDER = [\n 'user', 'award', 'lab', 'static_section', 'higlass_view_config', 'page',\n 'ontology', 'ontology_term', 'file_format', 'badge', 'organism', 'gene',\n 'genomic_region', 'bio_feature', 'target', 'imaging_path', 'publication',\n 'publication_tracking', 'document', 'image', 'vendor', 'construct',\n 'modification', 'experiment_type', 'protocol', 'sop_map', 'biosample_cell_culture',\n 'individual_human', 'individual_mouse', 'individual_fly', 'individual_primate',\n 'individual_chicken', 'individual_zebrafish', 'biosource', 'antibody', 'enzyme',\n 'treatment_rnai', 'treatment_agent',\n 'biosample', 'quality_metric_fastqc', 'quality_metric_bamcheck', 'quality_metric_rnaseq',\n 'quality_metric_bamqc', 'quality_metric_pairsqc', 'quality_metric_margi',\n 'quality_metric_dedupqc_repliseq', 'quality_metric_chipseq', 'quality_metric_chipseq_v2', 'quality_metric_workflowrun',\n 'quality_metric_atacseq', 'quality_metric_rnaseq_madqc', 'quality_metric_qclist',\n 'microscope_setting_d1', 'microscope_setting_d2',\n 'microscope_setting_a1', 'microscope_setting_a2', 'file_fastq',\n 'file_processed', 'file_reference', 'file_calibration', 'file_microscopy',\n 'file_set', 'file_set_calibration', 'file_set_microscope_qc',\n 'file_vistrack', 'experiment_hi_c', 'experiment_capture_c',\n 'experiment_repliseq', 'experiment_atacseq', 'experiment_chiapet',\n 'experiment_damid', 'experiment_seq', 'experiment_tsaseq',\n 'experiment_mic', 'experiment_set', 'experiment_set_replicate',\n 'data_release_update', 'software', 'analysis_step', 'workflow',\n 'workflow_mapping', 'workflow_run_sbg', 'workflow_run_awsem',\n 'tracking_item', 'quality_metric_flag',\n 'summary_statistic', 'summary_statistic_hi_c', 'workflow_run',\n 'microscope_configuration', 'image_setting', 'quality_metric_mcool',\n 'ingestion_submission', 'file_other', 'filter_set'\n]\n\n\n@pytest.fixture\ndef wrangler_testapp(wrangler, app, external_tx, zsa_savepoints):\n return remote_user_testapp(app, wrangler['uuid'])\n\n\n@pytest.fixture\ndef submitter_testapp(submitter, app, external_tx, zsa_savepoints):\n return remote_user_testapp(app, submitter['uuid'])\n\n\n@pytest.fixture\ndef lab(testapp, award):\n item = {\n 'name': 'encode-lab',\n 'title': 'ENCODE lab',\n 'status': 'current',\n 'awards': [award['@id']]\n }\n return testapp.post_json('/lab', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef another_lab(testapp, award):\n item = {\n 'name': 'another-encode-lab',\n 'title': 'Another ENCODE lab',\n 'status': 'current',\n 'awards': [award['@id']]\n }\n return testapp.post_json('/lab', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef admin(testapp):\n item = {\n 'first_name': 'Test',\n 'last_name': 'Admin',\n 'email': 'admin@example.org',\n 'groups': ['admin'],\n 'status': 'current'\n }\n # User @@object view has keys omitted.\n res = testapp.post_json('/user', item)\n return testapp.get(res.location).json\n\n\n@pytest.fixture\ndef submitter(testapp, lab, award):\n item = {\n 'first_name': 'ENCODE',\n 'last_name': 'Submitter',\n 'email': 'encode_submitter@example.org',\n 'submits_for': [lab['@id']],\n 'viewing_groups': [award['viewing_group']],\n 'status': \"current\"\n }\n # User @@object view has keys omitted.\n res = testapp.post_json('/user', item)\n return testapp.get(res.location).json\n\n\n@pytest.fixture\ndef pi(testapp, lab, award):\n item = {\n 'first_name': 'ENCODE',\n 'last_name': 'PI',\n 'email': 'encode_pi@example.org',\n 'submits_for': [lab['@id']],\n 'viewing_groups': [award['viewing_group']],\n 'status': \"current\"\n }\n # User @@object view has keys omitted.\n res = testapp.post_json('/user', item)\n return testapp.get(res.location).json\n\n@pytest.fixture\ndef access_key(testapp, submitter):\n description = 'My programmatic key'\n item = {\n 'user': submitter['@id'],\n 'description': description,\n }\n res = testapp.post_json('/access_key', item)\n result = res.json['@graph'][0].copy()\n result['secret_access_key'] = res.json['secret_access_key']\n return result\n\n\n@pytest.fixture\ndef award(testapp):\n item = {\n 'name': 'encode3-award',\n 'description': 'ENCODE test award',\n 'viewing_group': '4DN',\n 'project': '4DN'\n }\n return testapp.post_json('/award', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef human_individual(testapp, award, lab, human):\n item = {\n \"accession\": \"4DNINOOOAAQ1\",\n \"age\": 53,\n \"age_units\": \"year\",\n 'award': award['@id'],\n 'lab': lab['@id'],\n 'organism': human['@id'],\n \"ethnicity\": \"Caucasian\",\n \"health_status\": \"unknown\",\n \"life_stage\": \"adult\",\n \"sex\": \"female\",\n \"status\": \"released\",\n \"url\": \"http://ccr.coriell.org/Sections/BrowseCatalog/FamilyTypeSubDetail.aspx?PgId=402&fam=1463&coll=GM\",\n # \"uuid\": \"44d24e3f-bc5b-469a-8500-7ebd728f8ed5\"\n }\n return testapp.post_json('/individual_human', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef worthington_biochemical(testapp, award, lab):\n item = {\n \"title\": \"Worthington Biochemical\",\n \"name\": \"worthington-biochemical\",\n \"description\": \"\",\n \"url\": \"http://www.worthington-biochem.com\",\n 'award': award['@id'],\n 'lab': lab['@id'],\n 'status': 'current'\n }\n return testapp.post_json('/vendor', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef mboI(testapp, worthington_biochemical, lab, award):\n item = {\n \"name\": \"MboI\",\n \"enzyme_source\": worthington_biochemical['@id'],\n 'status': 'current',\n 'award': award['@id'],\n 'lab': lab['@id']\n }\n return testapp.post_json('/enzyme', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef lung_biosource(testapp, lab, award, lung_oterm):\n item = {\n \"biosource_type\": \"tissue\",\n 'tissue': lung_oterm['@id'],\n 'award': award['@id'],\n 'lab': lab['@id'],\n }\n return testapp.post_json('/biosource', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef de_term(testapp, uberon_ont, lab, award):\n item = {\n \"term_id\": \"UBERON:0005439\",\n \"term_name\": \"definitive endoderm\",\n \"term_url\": \"http://purl.obolibrary.org/obo/UBERON_0005439\",\n \"source_ontologies\": [uberon_ont['@id']]\n }\n return testapp.post_json('/ontology_term', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef biosample_cc_wo_diff(testapp, lab, award):\n item = {\n \"culture_start_date\": \"2018-01-01\",\n 'award': award['@id'],\n 'lab': lab['@id']\n }\n return testapp.post_json('/biosample_cell_culture', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef tissue_biosample(testapp, lung_biosource, lab, award):\n item = {\n 'description': \"Tissue Biosample\",\n 'biosource': [lung_biosource['uuid']],\n 'award': award['@id'],\n 'lab': lab['@id']\n }\n return testapp.post_json('/biosample', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef protocol_data(lab, award):\n return {'description': 'A Protocol',\n 'protocol_type': 'Experimental protocol',\n 'award': award['@id'],\n 'lab': lab['@id']\n }\n\n\n@pytest.fixture\ndef protocol(testapp, protocol_data):\n return testapp.post_json('/protocol', protocol_data).json['@graph'][0]\n\n\n@pytest.fixture\ndef so_ont(testapp):\n return testapp.post_json('/ontology', {'ontology_name': 'SO'}).json['@graph'][0]\n\n\n@pytest.fixture\ndef gene_term(testapp, so_ont):\n gterm = {\n 'uuid': '7bea5bde-d860-49f8-b178-35d0dadbd644',\n 'term_id': 'SO:0000704', 'term_name': 'gene',\n 'source_ontologies': [so_ont['@id']]}\n return testapp.post_json('/ontology_term', gterm).json['@graph'][0]\n\n\n@pytest.fixture\ndef region_term(testapp, so_ont):\n gterm = {\n 'uuid': '6bea5bde-d860-49f8-b178-35d0dadbd644',\n 'term_id': 'SO:0000001', 'term_name': 'region',\n 'source_ontologies': [so_ont['@id']]}\n return testapp.post_json('/ontology_term', gterm).json['@graph'][0]\n\n\n@pytest.fixture\ndef protein_term(testapp, so_ont):\n gterm = {\n 'uuid': '8bea5bde-d860-49f8-b178-35d0dadbd644',\n 'term_id': 'SO:0000104', 'term_name': 'polypeptide',\n 'preferred_name': 'protein',\n 'source_ontologies': [so_ont['@id']]}\n return testapp.post_json('/ontology_term', gterm).json['@graph'][0]\n\n\n@pytest.fixture\ndef transcript_term(testapp, so_ont):\n gterm = {\n 'uuid': '5bea5bde-d860-49f8-b178-35d0dadbd644',\n 'term_id': 'SO:0000673', 'term_name': 'transcript',\n 'source_ontologies': [so_ont['@id']]}\n return testapp.post_json('/ontology_term', gterm).json['@graph'][0]\n\n\n@pytest.fixture\ndef component_term(testapp, so_ont):\n gterm = {\n 'uuid': '4bea5bde-d860-49f8-b178-35d0dadbd644',\n 'term_id': 'GO:0005575', 'term_name': 'cellular_component',\n 'source_ontologies': [so_ont['@id']]}\n return testapp.post_json('/ontology_term', gterm).json['@graph'][0]\n\n\n@pytest.fixture\ndef cell_line_term(testapp, ontology):\n item = {\n \"is_slim_for\": \"cell\",\n \"namespace\": \"http://www.ebi.ac.uk/efo\",\n \"term_id\": \"EFO:0000322\",\n \"term_name\": \"cell line\",\n \"uuid\": \"111189bc-8535-4448-903e-854af460a233\",\n \"source_ontologies\": [ontology['@id']],\n \"term_url\": \"http://www.ebi.ac.uk/efo/EFO_0000322\"\n }\n return testapp.post_json('/ontology_term', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef f123_oterm(testapp, ontology, cell_line_term):\n item = {\n \"uuid\": \"530036bc-8535-4448-903e-854af460b254\",\n \"term_name\": \"F123-CASTx129\",\n \"term_id\": \"EFO:0009319\",\n \"source_ontologies\": [ontology['@id']],\n \"slim_terms\": [cell_line_term['@id']]\n }\n return testapp.post_json('/ontology_term', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef gm12878_oterm(testapp, ontology, cell_line_term):\n item = {\n \"uuid\": \"530056bc-8535-4448-903e-854af460b111\",\n \"term_name\": \"GM12878\",\n \"term_id\": \"EFO:0002784\",\n \"source_ontologies\": [ontology['@id']],\n \"slim_terms\": [cell_line_term['@id']]\n }\n return testapp.post_json('/ontology_term', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef thousandgen_oterm_data(ontology, cell_line_term):\n return {\"source_ontologies\": [ontology['@id']],\n \"slim_terms\": [cell_line_term['@id']]}\n\n\n@pytest.fixture\ndef thousandgen_oterms(testapp, thousandgen_oterm_data):\n oterms = []\n names = {'HG12345': 'EFO:999998', 'GM12345': 'EFO:999999'}\n for tn, tid in names.items():\n thousandgen_oterm_data['term_name'] = tn\n thousandgen_oterm_data['term_id'] = tid\n oterms.append(testapp.post_json('/ontology_term', thousandgen_oterm_data).json['@graph'][0])\n return oterms\n\n\n@pytest.fixture\ndef b_lymphocyte_oterm(testapp, uberon_ont):\n item = {\n \"term_name\": \"lymphocyte of B lineage\",\n \"term_id\": \"CL:0000945\",\n \"preferred_name\": \"B-lymphocyte\",\n \"source_ontologies\": [uberon_ont['@id']],\n }\n return testapp.post_json('/ontology_term', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef F123_biosource(testapp, lab, award, f123_oterm):\n item = {\n \"accession\": \"4DNSROOOAAQ2\",\n \"biosource_type\": \"stem cell\",\n \"cell_line\": f123_oterm['@id'],\n 'award': award['@id'],\n 'lab': lab['@id'],\n }\n return testapp.post_json('/biosource', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef GM12878_biosource(testapp, lab, award, gm12878_oterm, b_lymphocyte_oterm):\n item = {\n \"accession\": \"4DNSROOOAAQ1\",\n \"biosource_type\": \"immortalized cell line\",\n \"cell_line\": gm12878_oterm['@id'],\n \"tissue\": b_lymphocyte_oterm['@id'],\n 'award': award['@id'],\n 'lab': lab['@id'],\n }\n return testapp.post_json('/biosource', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef tier1_biosource(testapp, protocol, lab, award, gm12878_oterm):\n item = {\n 'description': 'Tier 1 cell line Biosource',\n 'biosource_type': 'immortalized cell line',\n 'cell_line': gm12878_oterm['@id'],\n 'SOP_cell_line': protocol['@id'],\n 'cell_line_tier': 'Tier 1',\n 'award': award['@id'],\n 'lab': lab['@id']\n }\n return testapp.post_json('/biosource', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef human_biosource(testapp, human_individual, worthington_biochemical, gm12878_oterm, lab, award):\n item = {\n \"description\": \"GM12878 cells\",\n \"biosource_type\": \"immortalized cell line\",\n \"individual\": human_individual['@id'],\n \"cell_line\": gm12878_oterm['@id'],\n \"biosource_vendor\": worthington_biochemical['@id'],\n \"status\": \"current\",\n 'award': award['@id'],\n 'lab': lab['@id']\n }\n return testapp.post_json('/biosource', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef human_data():\n return {\n 'uuid': '7745b647-ff15-4ff3-9ced-b897d4e2983c',\n 'name': 'human',\n 'scientific_name': 'Homo sapiens',\n 'taxon_id': '9606',\n 'genome_assembly': 'GRCh38'\n }\n\n\n@pytest.fixture\ndef human(testapp, human_data):\n return testapp.post_json('/organism', human_data).json['@graph'][0]\n\n\n@pytest.fixture\ndef mouse(testapp):\n item = {\n 'uuid': '3413218c-3d86-498b-a0a2-9a406638e786',\n 'name': 'mouse',\n 'scientific_name': 'Mus musculus',\n 'taxon_id': '10090',\n 'genome_assembly': 'GRCm38'\n }\n return testapp.post_json('/organism', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef mouse_individual(testapp, mouse, lab, award):\n item = {\n 'uuid': '4731442b-f283-4fdf-ad8a-a69cf5a7c68a',\n \"age\": 53,\n \"age_units\": \"day\",\n 'award': award['@id'],\n 'lab': lab['@id'],\n 'organism': mouse['@id'],\n \"mouse_strain\": \"Balb-c\",\n \"mouse_life_stage\": \"adult\",\n \"sex\": \"female\",\n }\n return testapp.post_json('/individual_mouse', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef organism(human):\n return human\n\n\n@pytest.fixture\ndef experiment_set(testapp, lab, award):\n item = {\n 'lab': lab['@id'],\n 'award': award['@id'],\n 'experimentset_type': 'replicates',\n 'status': 'in review by lab'\n }\n return testapp.post_json('/experiment_set', item).json['@graph'][0]\n\n\n# fixtures for testing calculated experiment_sets property in experiment_set\n# and also for _update method of experiment_set_replicate (and experiment_set)\n@pytest.fixture\ndef experiment(testapp, experiment_data):\n return testapp.post_json('/experiment_hi_c', experiment_data).json['@graph'][0]\n\n\n@pytest.fixture\ndef experiment_data(lab, award, human_biosample, mboI, exp_types):\n return {\n 'lab': lab['@id'],\n 'award': award['@id'],\n 'biosample': human_biosample['@id'],\n 'experiment_type': exp_types['hic']['@id'],\n 'digestion_enzyme': mboI['@id'],\n 'status': 'in review by lab'\n }\n\n\n@pytest.fixture\ndef exp_types(testapp, lab, award):\n experiment_types = {}\n title_dict = {\n 'hic': ('in situ Hi-C', [\"ExperimentHiC\"]),\n 'microc': ('Micro-C', [\"ExperimentHiC\"]),\n 'capc': ('Capture Hi-C', [\"ExperimentCaptureC\"]),\n 'rnaseq': ('RNA-seq', [\"ExperimentSeq\"]),\n 'fish': ('DNA FISH', [\"ExperimentMic\"]),\n 'dnase': ('DNase Hi-C', [\"ExperimentHiC\"]),\n 'dam': ('DamID-seq', [\"ExperimentDamid\"]),\n 'chia': ('ChIA-PET', [\"ExperimentChiapet\"]),\n 'repliseq': ('2-stage Repli-seq', [\"ExperimentRepliseq\"]),\n 'multi': ('Multi-stage Repli-seq', [\"ExperimentRepliseq\"]),\n 'chipseq': ('ChIP-seq', [\"ExperimentSeq\"]),\n 'dilution': ('Dilution Hi-C', [\"ExperimentHiC\"]),\n 'atacseq': ('ATAC-seq', [\"ExperimentAtacseq\"]),\n 'tsaseq': ('TSA-seq', [\"ExperimentTsaseq\"])\n }\n for k, v in title_dict.items():\n data = {\n 'uuid': str(uuid4()),\n 'title': v[0],\n 'lab': lab['@id'],\n 'award': award['@id'],\n 'status': 'released',\n 'valid_item_types': v[1]\n }\n experiment_types[k] = testapp.post_json('/experiment_type', data, status=201).json['@graph'][0]\n return experiment_types\n\n\n@pytest.fixture\ndef experiment_project_release(testapp, lab, award, human_biosample, exp_types):\n item = {\n 'lab': lab['@id'],\n 'award': award['@id'],\n 'biosample': human_biosample['@id'],\n 'experiment_type': exp_types['microc']['@id'],\n 'status': 'released to project'\n }\n return testapp.post_json('/experiment_hi_c', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef base_experiment(testapp, experiment_data):\n return testapp.post_json('/experiment_hi_c', experiment_data).json['@graph'][0]\n\n\n@pytest.fixture\ndef experiments(testapp, experiment_data):\n expts = []\n for i in range(4):\n experiment_data['description'] = 'Experiment ' + str(i)\n expts.append(testapp.post_json('/experiment_hi_c', experiment_data).json['@graph'][0])\n return expts\n\n\n@pytest.fixture\ndef rep_set_data(lab, award):\n return {\n 'lab': lab['@id'],\n 'award': award['@id'],\n 'description': 'Test replicate set',\n }\n\n\n@pytest.fixture\ndef empty_replicate_set(testapp, rep_set_data):\n return testapp.post_json('/experiment_set_replicate', rep_set_data).json['@graph'][0]\n\n\n@pytest.fixture\ndef two_experiment_replicate_set(testapp, rep_set_data, experiments):\n rep_set_data['description'] = 'Two one BioRep Experiment Replicate Set'\n rep_set_data['replicate_exps'] = [\n {'replicate_exp': experiments[0]['@id'],\n 'bio_rep_no': 1,\n 'tec_rep_no': 1},\n {'replicate_exp': experiments[1]['@id'],\n 'bio_rep_no': 1,\n 'tec_rep_no': 2}\n ]\n return testapp.post_json('/experiment_set_replicate', rep_set_data).json['@graph'][0]\n\n\n@pytest.fixture\ndef file_formats(testapp, lab, award):\n formats = {}\n ef_format_info = {\n 'pairs_px2': {'standard_file_extension': 'pairs.gz.px2',\n \"valid_item_types\": [\"FileProcessed\"]},\n 'pairsam_px2': {'standard_file_extension': 'sam.pairs.gz.px2',\n \"valid_item_types\": [\"FileProcessed\"]},\n 'bai': {'standard_file_extension': 'bam.bai',\n \"valid_item_types\": [\"FileProcessed\"]},\n 'beddb': {\"standard_file_extension\": \"beddb\",\n \"valid_item_types\": [\"FileProcessed\", \"FileReference\"]},\n }\n format_info = {\n 'fastq': {'standard_file_extension': 'fastq.gz',\n 'other_allowed_extensions': ['fq.gz'],\n \"valid_item_types\": [\"FileFastq\"]},\n 'pairs': {'standard_file_extension': 'pairs.gz',\n \"extrafile_formats\": ['pairs_px2', 'pairsam_px2'],\n \"valid_item_types\": [\"FileProcessed\"]},\n 'bam': {'standard_file_extension': 'bam',\n 'extrafile_formats': ['bai'],\n \"valid_item_types\": [\"FileProcessed\"]},\n 'mcool': {'standard_file_extension': 'mcool',\n \"valid_item_types\": [\"FileProcessed\", \"FileVistrack\"]},\n 'tiff': {'standard_file_extension': 'tiff',\n 'other_allowed_extensions': ['tif'],\n \"valid_item_types\": [\"FileMicroscopy\", \"FileCalibration\"]},\n 'zip': {'standard_file_extension': 'zip',\n \"valid_item_types\": [\"FileProcessed\", \"FileMicroscopy\", \"FileCalibration\"]},\n 'chromsizes': {'standard_file_extension': 'chrom.sizes',\n \"valid_item_types\": [\"FileReference\"]},\n 'other': {'standard_file_extension': '',\n \"valid_item_types\": [\"FileProcessed\", \"FileMicroscopy\",\n \"FileReference\", \"FileCalibration\"]},\n 'bw': {'standard_file_extension': 'bw',\n \"valid_item_types\": [\"FileProcessed\", \"FileVistrack\"]},\n 'bg': {'standard_file_extension': 'bedGraph.gz',\n \"valid_item_types\": [\"FileProcessed\", \"FileVistrack\"]},\n 'bigbed': {'standard_file_extension': 'bb',\n \"valid_item_types\": [\"FileProcessed\", \"FileReference\"]},\n 'bed': {\"standard_file_extension\": \"bed.gz\",\n \"extrafile_formats\": ['beddb'],\n \"valid_item_types\": [\"FileProcessed\", \"FileReference\"]}\n }\n\n for eff, info in ef_format_info.items():\n info['file_format'] = eff\n info['uuid'] = str(uuid4())\n info['lab'] = lab['@id']\n info['award'] = award['@id']\n formats[eff] = testapp.post_json('/file_format', info, status=201).json['@graph'][0]\n for ff, info in format_info.items():\n info['file_format'] = ff\n info['uuid'] = str(uuid4())\n if info.get('extrafile_formats'):\n eff2add = []\n for eff in info.get('extrafile_formats'):\n eff2add.append(formats[eff].get('@id'))\n info['extrafile_formats'] = eff2add\n info['lab'] = lab['@id']\n info['award'] = award['@id']\n formats[ff] = testapp.post_json('/file_format', info, status=201).json['@graph'][0]\n return formats\n\n\n@pytest.fixture\ndef file(testapp, lab, award, file_formats):\n item = {\n 'file_format': file_formats.get('fastq').get('@id'),\n 'md5sum': 'd41d8cd98f00b204e9800998ecf8427e',\n 'lab': lab['@id'],\n 'award': award['@id'],\n 'status': 'uploaded', # avoid s3 upload codepath\n }\n return testapp.post_json('/file_fastq', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef file_fastq(testapp, lab, award, file_formats):\n item = {\n 'file_format': file_formats.get('fastq').get('@id'),\n 'md5sum': 'd41d8cd9f00b204e9800998ecf8427e',\n 'lab': lab['@id'],\n 'award': award['@id'],\n 'status': 'uploaded', # avoid s3 upload codepath\n }\n return testapp.post_json('/file_fastq', item).json['@graph'][0]\n\n\nRED_DOT = \"\"\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUA\nAAAFCAYAAACNbyblAAAAHElEQVQI12P4//8/w38GIAXDIBKE0DHxgljNBAAO\n9TXL0Y4OHwAAAABJRU5ErkJggg==\"\"\"\n\n\n@pytest.fixture\ndef attachment():\n return {'download': 'red-dot.png', 'href': RED_DOT}\n\n\n@pytest.fixture\ndef image_data(attachment, lab, award):\n return {\n 'attachment': attachment,\n 'caption': 'Test image',\n 'award': award['uuid'],\n 'lab': lab['uuid'],\n }\n\n\n@pytest.fixture\ndef image(testapp, image_data):\n return testapp.post_json('/image', image_data).json['@graph'][0]\n\n\n@pytest.fixture\ndef rnai(testapp, lab, award):\n item = {\n 'award': award['@id'],\n 'lab': lab['@id'],\n 'target_sequence': 'TATATGGGGAA',\n 'rnai_type': 'shRNA',\n }\n return testapp.post_json('/treatment_rnai', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef construct(testapp, lab, award):\n item = {\n 'name': 'Awesome_Construct',\n 'construct_type': 'tagging construct',\n 'protein_tags': ['eGFP, C-terminal'],\n 'award': award['@id'],\n 'lab': lab['@id'],\n }\n return testapp.post_json('/construct', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef publication(testapp, lab, award):\n item = {\n 'uuid': '8312fc0c-b241-4cb2-9b01-1438910550ad',\n 'award': award['@id'],\n 'lab': lab['@id'],\n 'ID': \"PMID:22955616\",\n }\n return testapp.post_json('/publication', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef publication_tracking(testapp, lab, award):\n item = {\n 'uuid': '8312fc0c-b241-4cb2-9b01-1438910550ac',\n 'award': award['@id'],\n 'lab': lab['@id'],\n 'PMID': \"PMID:12345678\",\n }\n return testapp.post_json('/publication_tracking', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef software(testapp, lab, award):\n # TODO: ASK_ANDY do we want software_type to be an array?\n item = {\n \"name\": \"FastQC\",\n \"software_type\": [\"indexer\", ],\n \"version\": \"1\",\n 'lab': lab['@id'],\n 'award': award['@id']\n }\n return testapp.post_json('/software', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef analysis_step(testapp, software, lab, award):\n item = {\n 'name': 'fastqc',\n \"software_used\": software['@id'],\n \"version\": \"1\",\n 'lab': lab['@id'],\n 'award': award['@id']\n }\n return testapp.post_json('/analysis_step', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef document(testapp, lab, award):\n item = {\n 'award': award['@id'],\n 'lab': lab['@id']\n }\n return testapp.post_json('/document', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef workflow_run_sbg(testapp, lab, award, workflow_bam):\n item = {'run_platform': 'SBG',\n 'parameters': [],\n 'workflow': workflow_bam['@id'],\n 'title': u'md5 run 2017-01-20 13:16:11.026176',\n 'sbg_import_ids': [u'TBCKPdzfUE9DpvtzO6yb9yoIvO81RaZd'],\n 'award': award['@id'],\n 'sbg_task_id': '1235',\n 'lab': lab['@id'],\n 'sbg_mounted_volume_ids': ['4dn_s32gkz1s7x', '4dn_s33xkquabu'],\n 'run_status': 'started',\n }\n return testapp.post_json('/workflow_run_sbg', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef workflow_run_awsem(testapp, lab, award, workflow_bam):\n item = {'run_platform': 'AWSEM',\n 'parameters': [],\n 'workflow': workflow_bam['@id'],\n 'title': u'md5 run 2017-01-20 13:16:11.026176',\n 'award': award['@id'],\n 'awsem_job_id': '1235',\n 'lab': lab['@id'],\n 'run_status': 'started',\n }\n return testapp.post_json('/workflow_run_awsem', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef workflow_run_json(testapp, lab, award, workflow_bam):\n return {'run_platform': 'SBG',\n 'parameters': [],\n 'workflow': workflow_bam['@id'],\n 'title': u'md5 run 2017-01-20 13:16:11.026176',\n 'sbg_import_ids': [u'TBCKPdzfUE9DpvtzO6yb9yoIvO81RaZd'],\n 'award': award['@id'],\n 'sbg_task_id': '1235',\n 'lab': lab['@id'],\n 'sbg_mounted_volume_ids': ['4dn_s32gkz1s7x', '4dn_s33xkquabu'],\n 'run_status': 'started',\n }\n\n\n@pytest.fixture\ndef workflow_run_awsem_json(testapp, lab, award, workflow_bam):\n return {'run_platform': 'AWSEM',\n 'parameters': [],\n 'workflow': workflow_bam['@id'],\n 'title': u'md5 run 2017-01-20 13:16:11.026176',\n 'award': award['@id'],\n 'awsem_job_id': '1235',\n 'lab': lab['@id'],\n 'run_status': 'started',\n }\n\n\n@pytest.fixture\ndef human_biosample(testapp, human_biosource, lab, award):\n item = {\n \"description\": \"GM12878 prepared for Hi-C\",\n \"biosource\": [human_biosource['@id'], ],\n \"status\": \"in review by lab\",\n 'award': award['@id'],\n 'lab': lab['@id']\n # \"biosample_protocols\": [\"131106bc-8535-4448-903e-854af460b212\"],\n # \"modifications\": [\"431106bc-8535-4448-903e-854af460b254\"],\n # \"treatments\": [\"686b362f-4eb6-4a9c-8173-3ab267307e3b\"]\n }\n return testapp.post_json('/biosample', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef software_bam(testapp, lab, award):\n # TODO: ASK_ANDY do we want software_type to be an array?\n item = {\n \"name\": \"Aligner\",\n \"software_type\": [\"indexer\", ],\n \"version\": \"1\",\n 'lab': lab['@id'],\n 'award': award['@id']\n }\n return testapp.post_json('/software', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef workflow_bam(testapp, lab, award):\n item = {\n 'title': \"test workflow\",\n 'name': \"test_workflow\",\n 'award': award['@id'],\n 'lab': lab['@id']\n }\n return testapp.post_json('/workflow', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef workflow_mapping(testapp, workflow_bam, lab, award):\n item = {\n \"name\": \"test mapping\",\n \"workflow_name\": \"test workflow name\",\n \"workflow\": workflow_bam['@id'],\n \"data_input_type\": \"experiment\",\n 'lab': lab['@id'],\n 'award': award['@id'],\n \"workflow_parameters\": [\n {\"parameter\": \"bowtie_index\", \"value\": \"some value\"}\n ],\n \"experiment_parameters\": [\n {\"parameter\": \"biosample.biosource.individual.organism\", \"value\": \"mouse\"}\n ],\n \"workflow_parameters\": [\n {\"parameter\": \"genome_version\", \"value\": \"mm9\"}\n ]\n }\n return testapp.post_json('/workflow_mapping', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef basic_genomic_region(testapp, lab, award):\n item = {\n \"genome_assembly\": \"GRCh38\",\n 'award': award['@id'],\n 'lab': lab['@id'],\n }\n return testapp.post_json('/genomic_region', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef genome_info(lab, award):\n return {\n \"genome_assembly\": \"GRCh38\",\n \"chromosome\": \"X\",\n \"start_coordinate\": 1,\n \"end_coordinate\": 3,\n 'award': award['@id'],\n 'lab': lab['@id']\n }\n\n\n@pytest.fixture\ndef genomic_region_w_chrloc(testapp, genome_info):\n return testapp.post_json('/genomic_region', genome_info).json['@graph'][0]\n\n\n@pytest.fixture\ndef genomic_region_2(testapp, genome_info):\n genome_info['chromosome'] = '9'\n genome_info['start_coordinate'] = 50\n genome_info['start_coordinate'] = 300\n return testapp.post_json('/genomic_region', genome_info).json['@graph'][0]\n\n\n@pytest.fixture\ndef target_w_genes(testapp, lab, award):\n item = {\n \"targeted_genes\": [\"eeny\", \"meeny\"],\n 'award': award['@id'],\n 'lab': lab['@id'],\n }\n return testapp.post_json('/target', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef targ_w_alias(testapp, target_w_genes):\n return testapp.patch_json(target_w_genes['@id'], {'aliases': ['lab:test_targ']}, status=200).json['@graph'][0]\n\n\n@pytest.fixture\ndef targ_gr_w_alias(testapp, target_w_region):\n return testapp.patch_json(target_w_region['@id'], {'aliases': ['lab:test_targ_gr']}, status=200).json['@graph'][0]\n\n\n@pytest.fixture\ndef targ_agr_w_alias(testapp, another_target_w_region):\n return testapp.patch_json(another_target_w_region['@id'], {'aliases': ['lab:test_another_gr']}, status=200).json['@graph'][0]\n\n\n@pytest.fixture\ndef gene_item(testapp, lab, award):\n return testapp.post_json('/gene', {'lab': lab['@id'], 'award': award['@id'], 'geneid': '5885'}).json['@graph'][0]\n\n\n@pytest.fixture\ndef gene_bio_feature(testapp, lab, award, gene_term, gene_item):\n item = {'award': award['@id'],\n 'lab': lab['@id'],\n 'description': 'Test Gene BioFeature',\n 'feature_type': gene_term['@id'],\n 'relevant_genes': [gene_item['@id']]}\n return testapp.post_json('/bio_feature', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef prot_bio_feature(testapp, lab, award, protein_term, gene_item):\n item = {'award': award['@id'],\n 'lab': lab['@id'],\n 'description': 'Test Protein BioFeature',\n 'feature_type': protein_term['@id'],\n 'relevant_genes': [gene_item['@id']]}\n return testapp.post_json('/bio_feature', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef biofeat_w_alias(testapp, gene_bio_feature):\n return testapp.patch_json(gene_bio_feature['@id'], {'aliases': ['lab:test_targ_bf']}, status=200).json['@graph'][0]\n\n\n@pytest.fixture\ndef gr_biofeat_w_alias(testapp, genomic_region_bio_feature):\n return testapp.patch_json(\n genomic_region_bio_feature['@id'], {'aliases': ['lab:test_targ_gr_bf']}, status=200).json['@graph'][0]\n\n\n@pytest.fixture\ndef some_genomic_region(testapp, lab, award):\n item = {'award': award['@id'],\n 'lab': lab['@id'],\n 'genome_assembly': 'GRCh38',\n 'chromosome': '1',\n 'start_coordinate': 17,\n 'end_coordinate': 544}\n return testapp.post_json('/genomic_region', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef vague_genomic_region(testapp, lab, award):\n item = {'award': award['@id'],\n 'lab': lab['@id'],\n 'genome_assembly': 'GRCm38',\n 'chromosome': '5',\n 'start_location': 'beginning',\n 'end_location': 'centromere'}\n return testapp.post_json('/genomic_region', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef vague_genomic_region_w_desc(testapp, lab, award):\n item = {'award': award['@id'],\n 'lab': lab['@id'],\n 'genome_assembly': 'GRCm38',\n 'chromosome': '5',\n 'start_location': 'beginning',\n 'end_location': 'centromere',\n 'location_description': 'gene X enhancer'}\n return testapp.post_json('/genomic_region', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef basic_region_bio_feature(testapp, lab, award, region_term):\n item = {'award': award['@id'],\n 'lab': lab['@id'],\n 'description': 'Test Region BioFeature with minimal info',\n 'feature_type': region_term['@id']}\n return testapp.post_json('/bio_feature', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef genomic_region_bio_feature(testapp, lab, award, region_term, some_genomic_region):\n item = {'award': award['@id'],\n 'lab': lab['@id'],\n 'description': 'Test Region BioFeature',\n 'feature_type': region_term['@id'],\n 'genome_location': [some_genomic_region['@id']]}\n return testapp.post_json('/bio_feature', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef target_w_region(testapp, genomic_region_w_chrloc, lab, award):\n item = {\n \"targeted_genome_regions\": [genomic_region_w_chrloc['@id']],\n 'award': award['@id'],\n 'lab': lab['@id'],\n }\n return testapp.post_json('/target', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef another_target_w_region(testapp, genomic_region_2, lab, award):\n item = {\n \"targeted_genome_regions\": [genomic_region_2['@id']],\n 'award': award['@id'],\n 'lab': lab['@id'],\n }\n return testapp.post_json('/target', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef target_w_desc(testapp, lab, award):\n item = {\n \"description\": \"I'm a region\",\n 'award': award['@id'],\n 'lab': lab['@id'],\n }\n return testapp.post_json('/target', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef mod_basic_info(lab, award):\n return {\n 'lab': lab['@id'],\n 'award': award['@id'],\n 'description': 'minimal modification',\n 'modification_type': 'Crispr',\n }\n\n\n@pytest.fixture\ndef basic_modification(testapp, mod_basic_info):\n return testapp.post_json('/modification', mod_basic_info).json['@graph'][0]\n\n\n@pytest.fixture\ndef mod_w_genomic_change(testapp, mod_basic_info):\n mod = copy.deepcopy(mod_basic_info)\n mod['description'] = 'mod with genomic change'\n mod['genomic_change'] = \"deletion\"\n return testapp.post_json('/modification', mod).json['@graph'][0]\n\n\n@pytest.fixture\ndef mod_w_target(testapp, mod_basic_info, gene_bio_feature):\n mod = copy.deepcopy(mod_basic_info)\n mod['description'] = 'mod with target'\n mod['target_of_mod'] = [gene_bio_feature['@id']]\n return testapp.post_json('/modification', mod).json['@graph'][0]\n\n\n@pytest.fixture\ndef mod_w_change_and_target(testapp, mod_basic_info, gene_bio_feature):\n mod = copy.deepcopy(mod_basic_info)\n mod['description'] = 'mod with target and genomic change'\n mod['target_of_mod'] = [gene_bio_feature['@id']]\n mod['genomic_change'] = \"deletion\"\n return testapp.post_json('/modification', mod).json['@graph'][0]\n\n\n@pytest.fixture\ndef uberon_ont(testapp):\n return testapp.post_json('/ontology', {'ontology_name': 'Uberon'}).json['@graph'][0]\n\n\n@pytest.fixture\ndef ontology(testapp):\n data = {\n \"uuid\": \"530006bc-8535-4448-903e-854af460b254\",\n \"ontology_name\": \"Experimental Factor Ontology\",\n \"ontology_url\": \"http://www.ebi.ac.uk/efo/\",\n \"download_url\": \"http://sourceforge.net/p/efo/code/HEAD/tree/trunk/src/efoinowl/InferredEFOOWLview/EFO_inferred.owl?format=raw\",\n \"namespace_url\": \"http://www.ebi.ac.uk/efo/\",\n \"ontology_prefix\": \"EFO\",\n \"description\": \"The description\",\n \"notes\": \"The download\",\n }\n return testapp.post_json('/ontology', data).json['@graph'][0]\n\n\n@pytest.fixture\ndef oterm(uberon_ont):\n return {\n \"uuid\": \"530036bc-8535-4448-903e-854af460b222\",\n \"preferred_name\": \"preferred lung name\",\n \"term_name\": \"lung\",\n \"term_id\": \"UBERON:0002048\",\n \"term_url\": \"http://purl.obolibrary.org/obo/UBERON_0002048\",\n \"source_ontologies\": [uberon_ont['@id']]\n }\n\n\n@pytest.fixture\ndef lung_oterm(oterm, testapp):\n return testapp.post_json('/ontology_term', oterm).json['@graph'][0]\n\n\n@pytest.fixture\ndef quality_metric_fastqc(testapp, award, lab):\n item = {\n \"uuid\": \"ed80c2a5-ae55-459b-ba1d-7b0971ce2613\",\n \"award\": award['@id'],\n \"lab\": lab['@id']\n }\n return testapp.post_json('/quality_metric_fastqc', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef quality_metric_pairsqc(testapp, lab, award):\n item = {\n 'uuid': 'fdc5ca7f-35bc-421e-ab1f-00f9e5146041',\n 'award': award['@id'],\n 'lab': lab['@id']\n }\n return testapp.post_json('/quality_metric_pairsqc', item).json['@graph'][0]\n","sub_path":"src/encoded/tests/datafixtures.py","file_name":"datafixtures.py","file_ext":"py","file_size_in_byte":37392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"539933907","text":"from PIL import Image\nimport time\nimport numpy as np\nimport binascii\nimport os\nimport traceback\nimport re\nimport statistics\nimport collections\nfrom collections import Counter\n\n#Excel\nimport openpyxl\n\n#Other directories\nimport sys \nsys.path.append(\"../../../Analysis/PSNR+SSIM/\")\nfrom PSNRSSIM import returnValues\n\n\n#Matrix for Ordered dither\nthe_2x2 = np.array([[0,2],\n\t\t\t\t[3,1]])\nthe_2x2 = np.divide(the_2x2,4)\ntiled = np.tile(the_2x2,(256,256))\n\nlength =8\n\ndef greyScale(image):\n\timageArray = np.array(image, 'float64')\t\t\t#Image to numpy array\n\timageArray = np.divide(imageArray, 256)\t\t\t#Divides image values by the range of pixel values. 256 for 8 bit images\n\n\ti,j = 0,0\n\t\n\t#Message to embed and conversion to binary\n\tmessage = \"Why do programmers always mix up Halloween and Christmas? Because 31 OCT = 25 DEC!\"\n\tmessage = bin(int.from_bytes(message.encode('utf-8', 'surrogatepass'), 'big'))[2:]\n\tmessage = message.zfill(8*((len(message) + 7)//8))\n\t\n\tvariableY=0\n\tcounter =0\n\tfor x in range(height):\n\t\tfor y in range(0,width,length):\t\t\t\n\t\t\ttheGroup = imageArray[x,y:y+length]\n\t\t\t\t\n\t\t\n\t\t\twcounter = len([i for i in halftoneValue(np.copy(theGroup),x,y) if i > 128]) \t\t\t\t#Count number of white pixels in the halftoned group\n\t\t\t\n\t\t\tc = length-2*wcounter \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#Broadens the groups complexity value\n\t\t\tt = int((length*2)/5)\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#Defines the threshold to compare against the complexity\n\t\t\t\n\t\t\t\n\t\t\tif((0-t) <= float(c) <= 0+t):\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#If the complexity is within the threshold\n\t\t\t\tif(j < len(message)):\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#For iterating through the message\n\t\t\t\t\tei = minimum_error(theGroup, message[j], length, c,t,x,y)\t\t\t\t\t\t\t#Get the minimum error to change the greyscale values so that when halftoned, embeds the message bit\n\t\t\t\t\tembeddedGroup = [x+y for x,y in zip(theGroup,ei)]\t\t\t\t\t\t\t\t#Creates a group for the combination of the selected group added with the error added\n\n\t\t\t\t\twcounter1 = len([i for i in halftoneValue(np.copy(embeddedGroup),x,y) if i > 128])\t#Count number of white pixels in the halftoned embedded group\n\t\t\t\t\td = length-2*wcounter1\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#Again, broadens the groups complexity value\n\t\t\t\t\n\t\t\t\t\tif((0-t) <= float(d) <= 0+t):\t\t\t\t\t\t\t\t\t\t\t\t\t#If the new embedded group is within the complexity range...\n\t\t\t\t\t\tfor g in range(0,length,1):\n\t\t\t\t\t\t\timageArray[x,y+g] = embeddedGroup[g]\t\t\t\t\t\t\t\t\t#Update the imageArray to store the greyscale values\n\n\t\t\t\t\t\thalftoneGroup(x,y,imageArray,length)\t\t\t\t#Halftone the group\n\n\t\t\t\t\t\tj += 1\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#Iterate through the message by 1\n\t\t\t\t\telse:\n\t\t\t\t\t\thalftoneGroup(x,y,imageArray,length)\t\t\t\t#Halftone the group\n\n\t\t\t\telse:\n\t\t\t\t\thalftoneGroup(x,y,imageArray,length)\t\t\t\t\t#Halftone the group\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\telse:\n\t\t\t\thalftoneGroup(x,y,imageArray,length)\t\t\t\t\t\t#Halftone the group\n\t\t\t\t\n\t\t\n\treturn Image.fromarray(np.array(imageArray,'uint8'))\n\n\n#Find the minimum change to embed the message\ndef minimum_error(theGroup, messageBit, length,c,t,x,y):\n\teu, ed = [0]*length, [0]*length\n\tn = 1\n\n\tk = 0\n\twhile((halftoneValue([x+y for x,y in zip(theGroup,eu)],x,y).count(255)) % (2**n) != int(messageBit)):\t\t#While the number of white pixels from halftoning mod 2 is not equal to the message bit (1 or 0)\n\t\teu[k] += 1\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#Add 1 to eu position k\n\t\tk = (k+1)%length \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#Add 1 to k\n\n\n\tk = 0\n\twhile(halftoneValue([x+y for x,y in zip(theGroup,ed)],x,y).count(255) % (2**n) != int(messageBit)):\t\t\t#While the number of white pixels from halftoning mod 2 is not equal to the message bit (1 or 0)\n\t\ted[k] -= 1\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#Take away 1 from ed position k\n\t\tk = (k+1)%length \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#Add 1 to k\n\n\n\tfor counter, value in enumerate(eu):\t\t#Go through eu's values and compare them against ed. Whichever is smallest gets assigned to be returned\n\t\tif(value < abs(ed[counter])):\n\t\t\tei = eu\n\t\t\t#return eu\n\t\telse:\n\t\t\tei = ed\n\t\t\t#return ed\n\n\tembeddedGroup = [x+y for x,y in zip(theGroup,ei)]\n\twcounter1 = len([i for i in halftoneValue(np.copy(embeddedGroup),x,y) if i > 128])\n\td = length-2*wcounter1\n\n\tif((0-t) <= float(d) <= 0+t):\t\t\t\t#If the complexity value is within the threshold...\n\t\treturn ei \t\t\t\t\t\t\t\t#Return the error variance\n\telse:\t\t\t\t\t\t\t\t\t\t#If not...\n\t\tfor counter, value in enumerate(eu):\t#Iterate through eu and return ed\n\t\t\tif(value < abs(ed[counter])):\n\t\t\t\tei = ed\n\t\t\telse:\n\t\t\t\tei = eu\n\n\treturn ei\n\n\n#Halftone value is what the group halftone values are\ndef halftoneValue(theGroup,x,y):\n\tfor i in range(0, len(theGroup)):\n\t\ttheGroup[i] = (255 if (theGroup[i]>tiled[x,y]) else 0)\n\t\ty+=1\n\n\treturn theGroup\n\n\n#Halftone group is halftoning the group and distrubuting to other pixels \ndef halftoneGroup(x,y,imageArray, length):\n\tfor i in range(0, length):\n\t\timageArray[x,y] = (255 if (imageArray[x,y]>tiled[x,y]) else 0)\n\t\ty+=1\n\n\treturn imageArray\n\n\n#Extract the message from the embedded halftoned image\ndef extraction(image):\n\timageArray = np.array(image, 'float64')\t\t\t\t\t\t\t\t#Image to array\t\t\t\t\t\t\t\n\tmessage = []\n\n\tfor x in range(height):\n\t\tfor y in range(0,width,length):\n\t\t\ttheGroup = imageArray[x,y:y+length]\t\t\t\t\t\t\t#Select the group\n\t\t\n\t\t\twcounter = len([i for i in theGroup if i > 128]) \t\t\t#Number of white pixels in the group\n\n\t\t\tc = length-2*wcounter \t\t\t\t\t\t\t\t\t\t#Broaden the complexity value\n\t\t\tt = int((length*2)/5)\t\t\t\t\t\t\t\t\t\t#Threshold\n\t\t\tn = 1\n\t\t\t\n\t\t\tif((0-t) <= float(c) <= 0+t):\t\t\t\t\t\t\t\t#If the complexity is within the threshold\n\t\t\t\t\n\t\t\t\tmessageBit = wcounter % (2**n)\t\t\t\t\t\t\t#Get a 1 or a 0 from the white number count\n\t\t\t\tmessage.append(str(messageBit))\t\t\t\t\t\t\t#Add the bit to the message list\n\t\n\n\textractedMessage = ''.join(message)\t\t\t\t\t\t\t\t\t#Join the extracted bits together\n\tfinalMessage =[]\n\tfor i in range(0, len(extractedMessage), 8):\t\t\t\t\t\t#For every 8 bits (makes a character)\n\t\tmessage = int(extractedMessage[i:i+8],2)\n\t\tcharacter = message.to_bytes((message.bit_length() + 7)//8, 'big').decode('utf-8', 'ignore')\t\t#Convert to ASCII\n\t\tif(re.match(r'[\\w ?=]+', character)):\t\t\t\t\t\t\t#If the character is part of the regex\n\t\t\tfinalMessage.append(character)\t\t\t\t\t\t\t\t#Add it to the final message list\n\tprint(''.join(finalMessage)[:84])\t\n\tprint()\n\tanalyse(finalMessage)\t\t\t\t\t\t\t\t\t\t\t\t#Analyse the final message\n\ndef analyse(decryptMessage):\n\tmessage = \"Why do programmers always mix up Halloween and Christmas? Because 31 OCT = 25 DEC!\"\t\t#Original message\n\tmessage = list(message)\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#Make the original message a list\n\n\tvalue = 0\n\tfor item in message:\n\t\tif item in decryptMessage:\t\t\t\t\t\t#Compare the two lists\n\t\t\tvalue +=1\n\n\tmessagePercents.append(((value/84)*100))\t\t\t#Get percentage of extracted message against the original message\n\n\n\nembedTimes = []\ndecryptTimes = []\npsnrValues = []\nssimValues = []\nmessagePercents = []\n\n\n#Processes every file in the original images folder\nfileList = []\nfor file in os.listdir(\"../../../Images/Original/\"):\n\tfileList.append(file[:-4])\t\t\t\t\t\t\t#Remove the file extension so\nfileList = sorted(fileList, key=int)\t\t\t\t\t#it can be sorted by int\n\nfor file in fileList:\t\t\t\t\t\t\t\t\t#For every file in the sorted file list\n\tfilename = os.fsdecode(file)\n\tfilename+=\".png\"\t\t\t\t\t\t\t\t\t#Add png file extension. Converts any file format to png\n\t\n\n\timage = Image.open(\"../../../Images/Original/\"+filename)\t\t\t\t\t\t\t\t\t#Open file to embed\t\t\t\t\t\t\t\t\t\t\t\n\toriginal = Image.open(\"../../../Images/Basic Halftone/Ordered/2x2/\"+filename)\t\t#For comparing against original\n\tprint(filename)\n\n\theight, width = image.size\n\tstart_time = time.time()\n\timageConverted = greyScale(image)\n\tembedTime = time.time() - start_time\n\t\t\n\timageConverted.save(\"../../../Images/Embedded/3. Greyscale Text/Ordered/2x2/\"+filename)\n\n\n\timageDecode = Image.open(\"../../../Images/Embedded/3. Greyscale Text/Ordered/2x2/\"+filename)\n\tstart_time = time.time()\n\textraction(imageDecode)\n\tdecryptTime = time.time() - start_time\n\n\tpsnr, ssim = returnValues(original,imageConverted)\t\t#Send original and processed image to get PSNR and SSIM values\n\tpsnrValues.append(psnr)\n\tssimValues.append(ssim)\n\tembedTimes.append(embedTime)\n\tdecryptTimes.append(decryptTime)\n\n\nexcel_document = openpyxl.load_workbook(\"../../../../Data/Data.xlsx\")\t#Open excel\nsheet = (excel_document['Greyscale Embed'])\t\t\t\t\t\t\t\t#Selects sheet\n\n#Input valeus to the sheet\nmultiple_cells = sheet['T4' : 'T51']\nfor value, row in enumerate(multiple_cells):\n for cell in row:\n \tcell.value = psnrValues[value]\n\nmultiple_cells = sheet['U4' : 'U51']\nfor value, row in enumerate(multiple_cells):\n for cell in row:\n \tcell.value = ssimValues[value]\n\nmultiple_cells = sheet['V4' : 'V51']\nfor value, row in enumerate(multiple_cells):\n for cell in row:\n \tcell.value = embedTimes[value]\n\nmultiple_cells = sheet['W4' : 'W51']\nfor value, row in enumerate(multiple_cells):\n for cell in row:\n \tcell.value = decryptTimes[value]\n\nmultiple_cells = sheet['X4' : 'X51']\nfor value, row in enumerate(multiple_cells):\n for cell in row:\n \tcell.value = messagePercents[value]\n#End of inputting values\n\n\nexcel_document.save(\"../../../../Data/Data.xlsx\")","sub_path":"Algorithms/Embedding/3. Greyscale Text/Ordered Greyscale/Greyscale 2x2.py","file_name":"Greyscale 2x2.py","file_ext":"py","file_size_in_byte":8935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"182355852","text":"# -*- coding: utf-8 -*-\n\"\"\"\nFile name: quad_mdl.py\nAuthor: Daniel Hulse\nCreated: June 2019\nDescription: A fault model of a multi-rotor drone.\n\"\"\"\n\nimport networkx as nx\nimport numpy as np\n\nimport auxfunctions as aux\nimport faultprop as fp\n\n#Declare time range to run model over\ntimes=[0,3, 55]\n\n##Define flows for model\nclass EE:\n def __init__(self,name):\n self.rate=1.0\n self.effort=1.0\n def status(self):\n status={'rate':self.rate, 'effort':self.effort}\n return status.copy()\n \nclass Force:\n def __init__(self,name):\n self.flowtype='Force'\n self.name=name\n self.value=1.0\n def status(self):\n status={'value':self.value}\n return status.copy()\n\nclass ME:\n def __init__(self,name):\n self.flowtype='ME'\n self.name=name\n self.rate=1.0\n self.effort=1.0\n self.nominal={'rate':1.0, 'effort':1.0}\n def status(self):\n status={'rate':self.rate, 'effort':self.effort}\n return status.copy() \n\nclass Sig:\n def __init__(self,name):\n self.flowtype='Sig'\n self.name=name\n self.forward=0.0\n self.upward=0.0\n def status(self):\n status={'forward':self.forward, 'upward':self.upward}\n return status.copy() \n\nclass DOF:\n def __init__(self,name):\n self.flowtype='DOF'\n self.name=name\n self.stab=1.0\n self.vertvel=0.0\n self.planvel=0.0\n self.uppwr=0.0\n self.planpwr=0.0\n def status(self):\n status={'stab':self.stab, 'vertvel':self.vertvel, 'planvel':self.planvel, 'planpwr':self.planpwr, 'uppwr':self.uppwr}\n return status.copy() \nclass Land:\n def __init__(self,name):\n self.flowtype='Land'\n self.name=name\n self.stat='landed'\n self.area='start'\n self.nominal={'status':'landed', 'area':'start'}\n def status(self):\n status={'status':self.stat, 'area':self.area}\n return status.copy() \n\nclass Env:\n def __init__(self,name):\n self.flowtype='Env'\n self.name=name\n self.elev=0.0\n self.x=0.0\n self.y=0.0\n self.start=[0.0,0.0]\n self.start_xw=5\n self.start_yw=5\n self.start_area=aux.square(self.start,self.start_xw, self.start_yw)\n self.flyelev=30\n self.poi_center=[0,150]\n self.poi_xw=50\n self.poi_yw=50\n self.poi_area=aux.square(self.poi_center, self.poi_xw, self.poi_yw)\n self.dang_center=[0,150]\n self.dang_xw=150\n self.dang_yw=150\n self.dang_area=aux.square(self.dang_center, self.dang_xw, self.dang_yw)\n self.safe1_center=[-25,100]\n self.safe1_xw=10\n self.safe1_yw=10\n self.safe1_area=aux.square(self.safe1_center, self.safe1_xw, self.safe1_yw)\n self.safe2_center=[25,50]\n self.safe2_xw=10\n self.safe2_yw=10\n self.safe2_area=aux.square(self.safe2_center, self.safe2_xw, self.safe2_yw)\n self.nominal={'elev':1.0, 'x':1.0, 'y':1.0}\n def status(self):\n status={'elev':self.elev, 'x':self.x, 'y':self.y}\n return status.copy()\n\nclass Direc:\n def __init__(self,name):\n self.flowtype='Dir'\n self.name=name\n self.traj=[0,0,0]\n self.power=1\n self.nominal={'x': self.traj[0], 'y': self.traj[1], 'z': self.traj[2], 'power': 1}\n def status(self):\n status={'x': self.traj[0], 'y': self.traj[1], 'z': self.traj[2], 'power': self.power}\n return status.copy()\n\nclass storeEE:\n def __init__(self, name,EEout, FS):\n self.type='function'\n self.EEout=EEout\n self.FS=FS\n self.effstate=1.0\n self.ratestate=1.0\n self.soc=2000\n self.faultmodes={'short':{'rate':'moderate', 'rcost':'major'}, \\\n 'degr':{'rate':'moderate', 'rcost':'minor'}, \\\n 'break':{'rate':'common', 'rcost':'moderate'}, \\\n 'nocharge':{'rate':'moderate','rcost':'minor'}, \\\n 'lowcharge':{'rate':'moderate','rcost':'minor'}}\n self.faults=set(['nom'])\n def condfaults(self):\n if self.FS.value<1.0:\n self.faults.update(['break'])\n if self.EEout.rate>2:\n self.faults.add('break')\n if self.soc<20:\n self.faults.add('lowcharge')\n if self.soc<1:\n self.faults.remove('lowcharge')\n self.faults.add('nocharge')\n return 0\n def behavior(self, time):\n if self.faults.intersection(set(['short'])):\n self.effstate=0.0\n elif self.faults.intersection(set(['break'])):\n self.effstate=0.0\n elif self.faults.intersection(set(['degr'])):\n self.effstate=0.5\n \n if self.faults.intersection(set(['nocharge'])):\n self.soc=0.0\n self.effstate=0.0\n \n self.EEout.effort=self.effstate\n self.soc=self.soc-self.EEout.rate*time\n def updatefxn(self,faults=['nom'],opermode=[], time=0):\n self.faults.update(faults)\n self.condfaults()\n self.behavior(time)\n return \n\nclass distEE:\n def __init__(self,EEin,EEmot,EEctl,FS):\n self.useprop=1.0\n self.type='function'\n self.EEin=EEin\n self.EEmot=EEmot\n self.EEctl=EEctl\n self.FS=FS\n self.effstate=1.0\n self.ratestate=1.0\n self.faultmodes={'short':{'rate':'moderate', 'rcost':'major'}, \\\n 'degr':{'rate':'moderate', 'rcost':'minor'}, \\\n 'break':{'rate':'common', 'rcost':'moderate'}}\n self.faults=set(['nom'])\n def condfaults(self):\n if self.FS.value<0.5:\n self.faults.update(['break'])\n if max(self.EEmot.rate,self.EEctl.rate)>2:\n self.faults.add('break') \n def behavior(self, time):\n if self.faults.intersection(set(['short'])):\n self.ratestate=np.inf\n self.effstate=0.0\n elif self.faults.intersection(set(['break'])):\n self.effstate=0.0\n elif self.faults.intersection(set(['degr'])):\n self.effstate=0.5\n self.EEin.rate=self.ratestate*self.EEin.effort\n self.EEmot.effort=self.effstate*self.EEin.effort\n self.EEctl.effort=self.effstate*self.EEin.effort\n \n self.EEin.rate=aux.m2to1([ self.EEin.effort, self.ratestate, max(self.EEmot.rate,self.EEctl.rate)])\n \n def updatefxn(self,faults=['nom'],opermode=[], time=0):\n self.faults.update(faults)\n self.condfaults()\n self.behavior(time)\n return \n\nclass engageLand:\n def __init__(self,name, Forcein, Forceout):\n self.useprop=1.0\n self.name=name\n self.type='function'\n self.forcein=Forcein\n self.forceout=Forceout\n self.fstate=1.0\n self.faultmodes={'break':{'rate':'moderate', 'rcost':'major'}, \\\n 'deform':{'rate':'moderate', 'rcost':'minor'}, }\n self.faults=set(['nom'])\n def condfaults(self):\n if self.forceout.value<-1.4:\n self.faults.update(['break'])\n elif self.forceout.value<-1.2:\n self.faults.update(['deform'])\n def behavior(self, time):\n if self.faults.intersection(set(['break'])):\n self.fstate=4.0\n elif self.faults.intersection(set(['deform'])):\n self.fstate=2.0\n else:\n self.fstate=1.0\n \n self.forceout.value=self.fstate*min([-2.0,self.forcein.value])*0.2\n \n def updatefxn(self,faults=['nom'],opermode=[], time=0):\n self.faults.update(faults)\n self.condfaults()\n self.behavior(time)\n return \n\nclass holdPayload:\n def __init__(self,name, Force_gr,Force_air, Force_struct):\n self.name=name\n self.useprop=1.0\n self.type='function'\n self.FG=Force_gr\n self.FA=Force_air\n self.FS=Force_struct\n self.fstate=1.0\n self.faultmodes={'break':{'rate':'moderate', 'rcost':'major'}, \\\n 'deform':{'rate':'moderate', 'rcost':'minor'}, }\n self.faults=set(['nom'])\n def condfaults(self):\n if abs(self.FG.value)>1.6:\n self.faults.update(['break'])\n elif abs(self.FG.value)>1.4:\n self.faults.update(['deform'])\n def behavior(self, time):\n if self.faults.intersection(set(['break'])):\n self.fstate=0.0\n elif self.faults.intersection(set(['deform'])):\n self.fstate=0.5\n else:\n self.fstate=1.0\n self.FA.value=self.fstate\n self.FS.value=self.fstate\n def updatefxn(self,faults=['nom'],opermode=[], time=0):\n self.faults.update(faults)\n self.condfaults()\n self.behavior(time)\n return \n \nclass affectDOF:\n def __init__(self, name, EEin, Ctlin, DOFout,Force, archtype):\n self.type='function'\n self.EEin=EEin\n self.Ctlin=Ctlin\n self.DOF=DOFout\n self.Force=Force\n self.archtype=archtype\n self.faultmodes={}\n if archtype=='quad':\n LineRF=line('RF')\n LineLF=line('LF')\n LineLR=line('LR')\n LineRR=line('RR')\n self.lines=[LineRF,LineLF,LineLR, LineRR]\n self.upward=[1,1,1,1]\n self.forward=[0.5,0.5,-0.5,-0.5]\n for lin in self.lines:\n self.faultmodes.update(lin.faultmodes) \n self.faults={'nom'}\n def behavior(self, time):\n Air={}\n EEin={}\n #injects faults into lines\n for lin in self.lines:\n for fault in self.faults:\n if fault in lin.faultmodes:\n lin.faults.update([fault])\n \n ind=self.lines.index(lin)\n cmds={'up':self.upward[ind], 'for':self.forward[ind]}\n lin.behavior(self.EEin.effort, self.Ctlin, cmds, self.Force.value)\n self.faults.update(lin.faults) \n Air[lin.name]=lin.Airout\n EEin[lin.name]=lin.EE_in\n \n if any(value==np.inf for value in EEin.values()):\n self.EEin.rate=np.inf\n elif any(value!=0.0 for value in EEin.values()):\n self.EEin.rate=np.max(list(EEin.values()))\n else:\n self.EEin.rate=0.0\n \n if all(value==1.0 for value in Air.values()):\n self.DOF.stab=1.0\n elif all(value==0.5 for value in Air.values()):\n self.DOF.stab=1.0\n elif all(value==2.0 for value in Air.values()):\n self.DOF.stab=1.0\n elif all(value==0.0 for value in Air.values()):\n self.DOF.stab=1.0\n elif any(value==0.0 for value in Air.values()):\n self.DOF.stab=0.0\n elif any(value>2.5 for value in Air.values()):\n self.DOF.stab=0.0\n Airs=list(Air.values())\n #if not(self.Force.value==1.0):\n # self.DOF.stab=self.Force.value\n \n self.DOF.uppwr=np.mean(Airs)\n \n list1=Airs[:len(Airs)//2]\n list2=Airs[len(Airs)//2:]\n vect=np.array([list1,list2])\n self.DOF.planpwr=np.sum(vect[0]-vect[1])/3\n \n #need to expand on this, add directional velocity, etc\n return\n def updatefxn(self,faults=['nom'],opermode=[], time=0):\n self.faults.update(faults)\n self.behavior(time)\n return \n\nclass line:\n def __init__(self, name):\n self.type='component'\n self.name=name \n self.elecstate=1.0\n self.elecstate_in=1.0\n self.ctlstate=1.0\n self.mechstate=1.0\n self.propstate=1.0\n self.Airout=1.0\n self.faultmodes={name+'short':{'rate':'moderate', 'rcost':'major'}, \\\n name+'openc':{'rate':'moderate', 'rcost':'major'}, \\\n name+'ctlup':{'rate':'moderate', 'rcost':'minor'}, \\\n name+'ctldn':{'rate':'moderate', 'rcost':'minor'}, \\\n name+'ctlbreak':{'rate':'common', 'rcost':'moderate'}, \\\n name+'mechbreak':{'rate':'common', 'rcost':'moderate'}, \\\n name+'mechfriction':{'rate':'common', 'rcost':'moderate'}, \\\n name+'propwarp':{'rate':'veryrare', 'rcost':'replacement'}, \\\n name+'propstuck':{'rate':'veryrare', 'rcost':'replacement'}, \\\n name+'propbreak':{'rate':'veryrare', 'rcost':'replacement'}\n }\n self.faults=set(['nom'])\n def behavior(self, EEin, Ctlin, cmds, Force):\n \n if Force<=0.0:\n self.faults.update([self.name+'mechbreak', self.name+'propbreak'])\n elif Force<=0.5:\n self.faults.update([self.name+'mechfriction'])\n \n if self.faults.intersection(set([self.name+'short'])):\n self.elecstate=0.0\n self.elecstate_in=np.inf\n elif self.faults.intersection(set([self.name+'openc'])):\n self.elecstate=0.0\n self.elecstate_in=0.0\n if self.faults.intersection(set([self.name+'ctlbreak'])):\n self.ctlstate=0.0\n elif self.faults.intersection(set([self.name+'ctldn'])):\n self.ctlstate=0.5\n elif self.faults.intersection(set([self.name+'ctlup'])):\n self.ctlstate=2.0\n if self.faults.intersection(set([self.name+'mechbreak'])):\n self.mechstate=0.0\n elif self.faults.intersection(set([self.name+'mechfriction'])):\n self.mechstate=0.5\n self.elecstate_in=2.0\n if self.faults.intersection(set([self.name+'propstuck'])):\n self.propstate=0.0\n self.mechstate=0.0\n self.elecstate_in=4.0\n elif self.faults.intersection(set([self.name+'propbreak'])):\n self.propstate=0.0\n elif self.faults.intersection(set([self.name+'propwarp'])):\n self.propstate=0.5\n \n self.Airout=aux.m2to1([EEin,self.elecstate,Ctlin.upward*cmds['up']+Ctlin.forward*cmds['for'],self.ctlstate,self.mechstate,self.propstate])\n self.EE_in=aux.m2to1([EEin,self.elecstate_in]) \n \nclass ctlDOF:\n def __init__(self, name,EEin, Dir, Ctl, DOFs, FS):\n self.type='function'\n self.EEin=EEin\n self.Ctl=Ctl\n self.Dir=Dir\n self.DOFs=DOFs\n self.FS=FS\n self.vel=0.0\n self.t1=0\n self.ctlstate=1.0\n self.faultmodes={'noctl':{'rate':'rare', 'rcost':'high'}, \\\n 'degctl':{'rate':'rare', 'rcost':'high'}}\n self.faults=set(['nom'])\n def condfaults(self):\n if self.FS.value<0.5:\n self.faults.update(['noctl'])\n def behavior(self, time):\n if self.faults.intersection(set(['noctl'])):\n self.ctlstate=0.0\n elif self.faults.intersection(set(['degctl'])):\n self.ctlstate=0.5\n \n if time>self.t1:\n self.vel=self.DOFs.vertvel\n self.t1=time\n \n upthrottle=1.0\n \n if self.Dir.traj[2]>=1:\n upthrottle=1.5\n elif self.Dir.traj[2]>0 and self.Dir.traj[2]>1:\n upthrottle= 0.5 * self.Dir.traj[2] + 1.0\n elif self.Dir.traj[2]==0:\n damp=np.sign(self.vel)\n damp2=damp*min(1.0, np.power(self.vel, 2))\n upthrottle=1.0-0.2*damp2\n elif self.Dir.traj[2]<=0.0 and self.Dir.traj[2]>-1.0:\n maxdesc=-0.5\n damp=min(1.0, np.power(self.vel-maxdesc, 2))\n upthrottle=0.75+0.4*damp\n elif self.Dir.traj[2]<=-1.0:\n maxdesc=-5.0\n damp=min(0.75, np.power(self.vel-maxdesc, 2))\n upthrottle=0.75+0.15*damp\n \n if self.Dir.traj[0]==0 and self.Dir.traj[1]==0:\n forwardthrottle=0.0\n else:\n forwardthrottle=1.0\n \n pwr=self.Dir.power\n self.Ctl.forward=self.EEin.effort*self.ctlstate*forwardthrottle*pwr\n self.Ctl.upward=self.EEin.effort*self.ctlstate*upthrottle*pwr\n\n def updatefxn(self,faults=['nom'],opermode=[], time=0):\n self.condfaults()\n self.faults.update(faults)\n self.behavior(time)\n\nclass planpath:\n def __init__(self, name,EEin, Env, Dir, FS):\n self.type='function'\n self.EEin=EEin\n self.Env=Env\n self.Dir=Dir\n self.FS=FS\n self.mode='taxi'\n self.faultmodes={'noloc':{'rate':'rare', 'rcost':'high'}, \\\n 'degloc':{'rate':'rare', 'rcost':'high'}}\n self.faults=set(['nom'])\n def condfaults(self):\n if self.FS.value<0.5:\n self.faults.update(['noloc'])\n def behavior(self, t):\n \n if t<1:\n self.mode='taxi'\n elif self.mode=='taxi' and t<2:\n self.mode='climb'\n elif self.mode=='climb' and self.Env.elev>=50:\n self.mode='hover'\n elif self.mode=='hover' and self.Env.y==0 and t<20:\n self.mode='forward'\n elif self.mode=='forward' and self.Env.y>50:\n self.mode='hover'\n elif self.mode=='hover' and self.Env.y>50:\n self.mode='backward'\n elif self.mode=='backward' and self.Env.y<0:\n self.mode='hover'\n elif self.mode=='hover' and self.Env.y<0:\n self.mode='descend'\n elif self.mode=='descend' and self.Env.elev<10:\n self.mode='land'\n elif self.mode=='land' and self.Env.elev<1:\n self.mode='taxi'\n \n if self.mode=='taxi':\n self.Dir.power=0.0\n elif self.mode=='takeoff':\n self.Dir.power=1.0\n self.Dir.traj=[0,0,1]\n elif self.mode=='climb':\n self.Dir.power=1.0\n self.Dir.traj=[0,0,1]\n elif self.mode=='hover':\n self.Dir.power=1.0\n self.Dir.traj=[0,0,0]\n elif self.mode=='forward':\n self.Dir.power=1.0\n self.Dir.traj=[0,1,0]\n elif self.mode=='backward':\n self.Dir.power=1.0\n self.Dir.traj=[0,-1,0]\n elif self.mode=='descend':\n self.Dir.power=1.0\n self.Dir.traj=[0,0,-1]\n elif self.mode=='land':\n self.Dir.power=1.0\n self.Dir.traj=[0,0,-0.1]\n \n if self.faults.intersection(set(['noloc'])):\n self.Dir.traj=[0,0,0]\n elif self.faults.intersection(set(['degloc'])):\n self.Dir.traj=[0,0,-1]\n if self.EEin.effort<0.5:\n self.Dir.power=0.0\n self.Dir.traj=[0,0,0]\n \n def updatefxn(self,faults=['nom'],opermode=[], time=0):\n self.condfaults()\n self.faults.update(faults)\n self.behavior(time)\n\nclass trajectory:\n def __init__(self, name, Env, DOF, Land, Dir, Force_LG):\n self.type='environment'\n self.Env=Env\n self.DOF=DOF\n self.Land=Land\n self.Dir=Dir\n self.Force_LG=Force_LG\n self.lasttime=0\n self.t1=0.0\n self.faultmodes={'nom':{'rate':'common', 'rcost':'NA'}, }\n self.faults=set(['nom'])\n def condfaults(self):\n return 0\n def behavior(self, time):\n maxvel=20.0\n maxpvel=5.0\n \n if self.Env.elev<=0.0:\n self.Force_LG.value=min(-2.0, (self.DOF.vertvel-self.DOF.planvel)/3)\n flight=0.0\n else:\n self.Force_LG.value=0.0\n flight=1.0\n \n if time>self.t1:\n sign=np.sign(self.DOF.vertvel)\n damp=-0.02*sign*np.power(self.DOF.vertvel, 2)-0.1*self.DOF.vertvel\n acc=10*(self.DOF.uppwr-flight)\n self.DOF.vertvel=self.DOF.vertvel+acc+damp\n if self.Env.elev<=0.0:\n self.DOF.vertvel=max(0,self.DOF.vertvel)\n self.t1=time\n \n self.DOF.planvel=flight*maxpvel*self.DOF.planpwr\n \n self.Env.elev=max(0.0, self.Env.elev+self.DOF.vertvel)\n self.Env.x=self.Env.x+self.DOF.planvel*self.Dir.traj[0]\n self.Env.y=self.Env.y+self.DOF.planvel*self.Dir.traj[1]\n \n def updatefxn(self,faults=['nom'],opermode=[], time=0):\n if time>self.lasttime:\n self.behavior(time)\n self.lasttime=time\n self.condfaults()\n\n##future: try to automate this part so you don't have to do it in a wierd order\ndef initialize():\n \n #initialize graph\n g=nx.DiGraph()\n \n Force_ST=Force('Force_ST')\n EE_1=EE('EE_1')\n StoreEE=storeEE('StoreEE',EE_1, Force_ST)\n g.add_node('StoreEE', obj=StoreEE)\n \n EEmot=EE('EEmot')\n EEctl=EE('EEctl')\n \n DistEE=distEE(EE_1,EEmot,EEctl, Force_ST)\n g.add_node('DistEE', obj=DistEE)\n g.add_edge('StoreEE','DistEE', EE_1=EE_1)\n \n Ctl1=Sig('Ctl1')\n DOFs=DOF('DOFs')\n \n Force_Air=Force('Force_Air')\n AffectDOF=affectDOF('AffectDOF',EEmot,Ctl1,DOFs,Force_Air, 'quad')\n g.add_node('AffectDOF', obj=AffectDOF)\n Dir1=Direc('Dir1')\n CtlDOF=ctlDOF('CtlDOF',EEctl, Dir1, Ctl1, DOFs, Force_ST)\n g.add_node('CtlDOF', obj=CtlDOF)\n g.add_edge('DistEE','AffectDOF', EEmot=EEmot)\n g.add_edge('DistEE','CtlDOF', EEctl=EEctl)\n g.add_edge('CtlDOF','AffectDOF', Ctl1=Ctl1,DOFs=DOFs)\n\n Env1=Env('Env1')\n Planpath=planpath('Planpath',EEctl, Env1,Dir1, Force_ST)\n g.add_node('Planpath', obj=Planpath)\n g.add_edge('DistEE','Planpath', EEctl=EEctl)\n g.add_edge('Planpath','CtlDOF', Dir1=Dir1)\n \n Land1=Land('Land')\n Force_GR=Force('Force_GR')\n Force_LG=Force('Force_LG')\n Trajectory=trajectory('Trajectory',Env1,DOFs,Land1,Dir1, Force_GR)\n g.add_node('Trajectory', obj=Trajectory)\n g.add_edge('Trajectory','AffectDOF',DOFs=DOFs)\n g.add_edge('Planpath', 'Trajectory', Dir1=Dir1, Env1=Env1)\n \n \n EngageLand=engageLand('EngageLand',Force_GR, Force_LG)\n g.add_node('EngageLand', obj=EngageLand)\n g.add_edge('Trajectory', 'EngageLand', Force_GR=Force_GR)\n \n \n HoldPayload=holdPayload('HoldPayload',Force_LG, Force_Air, Force_ST)\n g.add_node('HoldPayload', obj=HoldPayload)\n g.add_edge('EngageLand','HoldPayload', Force_LG=Force_LG)\n g.add_edge('HoldPayload', 'AffectDOF', Force_Air=Force_Air)\n g.add_edge('HoldPayload', 'StoreEE', Force_ST=Force_ST)\n g.add_edge('HoldPayload', 'DistEE', Force_ST=Force_ST)\n g.add_edge('HoldPayload', 'Planpath', Force_ST=Force_ST)\n g.add_edge('HoldPayload', 'CtlDOF', Force_ST=Force_ST)\n \n return g\n\n#def environment(DOF,t):\n# if DOF.stab\n \ndef findclassification(g, endfaults, endflows, scen):\n \n Env=fp.getflow('Env1', g)\n \n #may need to redo this\n if aux.inrange(Env.start_area, Env.x, Env.y):\n landloc='nominal'\n area=1\n elif aux.inrange(Env.safe1_area, Env.x, Env.y) or aux.inrange(Env.safe2_area, Env.x, Env.y):\n landloc='emsafe'\n area=1000\n elif aux.inrange(Env.dang_area, Env.x, Env.y):\n landloc='emdang'\n area=100000\n else:\n landloc='emunsanc'\n area=10000\n \n repaircosts=fp.listfaultsprops(endfaults, g, 'rcost')\n maxcost=aux.textmax(repaircosts.values())\n \n if maxcost=='major':\n repcost=10000\n elif maxcost=='moderate':\n repcost=3000\n elif maxcost=='minor':\n repcost=500\n elif maxcost=='replacement':\n repcost=250\n else:\n repcost=0\n\n totcost=repcost+area\n \n rate=1e-6\n \n expcost=totcost*rate*1e5\n \n return {'rate':rate, 'cost': totcost, 'expected cost': expcost}","sub_path":"quad_mdl.py","file_name":"quad_mdl.py","file_ext":"py","file_size_in_byte":23412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"151754866","text":"#!/usr/bin/python3\n\n# student test file for HW4\n\nimport unittest\nfrom hw4 import *\n\n#====================================\n\nTIMEOUT_SHORT = 1\nTIMEOUT_LONG = 10\n\nclass tester_most_common_char(unittest.TestCase):\n\t# O(k) time and space\n\tdef test__given(self):\n\t\tself.assertEqual(most_common_char('AVX is a feature in modern CPUs that allows one instruction to affect multiple units. vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvectors'), 'v')\n\t\tself.assertIn(most_common_char('aabbaabb'), ['a','b'])\n\n\nclass tester_alphabet_finder(unittest.TestCase):\n\t# O(k) time and space\n\tdef test__given(self):\n\t\ttest = 'qwertyuiopASDFGHJKLzxcvbnm insensitive paella'\n\t\tresult = test[:26]\n\t\tself.assertEqual(alphabet_finder(test), result)\n\n\t\ttest = 'aardvarks are cool!'\n\t\tresult = None\n\t\tself.assertEqual(alphabet_finder(test), result)\n\n\nclass tester_longest_unique_subarray(unittest.TestCase):\n\t# O(k) time and space\n\tdef test__given(self):\n\t\tself.assertEqual(tuple(longest_unique_subarray([1, 2, 3, 1, 4, 5, 6])), (1, 6))\n\t\tself.assertEqual(tuple(longest_unique_subarray(list(range(10)))), (0, 10))\n\n\nclass tester_string_my_one_true_love(unittest.TestCase):\n\t# O(k) time and space\n\tdef test__given(self):\n\t\tself.assertTrue(string_my_one_true_love('abcbabcdcdda'))\n\t\tself.assertTrue(string_my_one_true_love('aaabbbcccddde'))\n\t\tself.assertFalse(string_my_one_true_love('aaabbbcccdddeeffgg'))\n\n\nclass tester_alive_people(unittest.TestCase):\n\t# O(k log k) time, O(k) space\n\t# under certain circumstances O(k) solution MIGHT exist?@timeout_decorator.timeout(TIMEOUT_SHORT)\n\tdef test__given(self):\n\t\tself.assertEqual(alive_people([[1920, 80], [1940, 22], [1961, 10]]), 1961)\n\n\nclass tester_three_sum(unittest.TestCase):\n\t# O(k^2) time and space\n\tdef _transform(self, result):\n\t\tresult = list(result)\n\t\t\n\t\tfor i in range(len(result)):\n\t\t\tresult[i] = sorted(list(result[i]))\n\t\t\n\t\tresult.sort()\n\t\t\n\t\treturn result\n\t\n\tdef test__given(self):\n\t\tresult = three_sum([-1, 0, 1, 2, -1, -4], 0)\n\t\tex = [[-1, 0, 1], [-1, -1, 2]]\n\t\t\n\t\tresult = self._transform(result)\n\t\tex = self._transform(ex)\n\t\t\n\t\tself.assertEqual(result, ex)\n\n\nclass tester_happy_numbers(unittest.TestCase):\n\t# O(k log k) time, O(log k) space\n\tdef test__given(self):\n\t\tself.assertEqual(happy_numbers(8), 2468 // 1234)\n\t\tself.assertEqual(happy_numbers(15), 4)\n\n\nclass tester_zero_sum_subarray(unittest.TestCase):\n\t# O(k) time and space\n\tdef test__given(self):\n\t\tself.assertEqual(tuple(zero_sum_subarray([0, 1, 2, 3, 4, 5])), (0, 1))\n\t\tself.assertEqual(tuple(zero_sum_subarray([10, 20, -20, 3, 21, 2, -6])), (1, 2))\n\n\n#===================================\n# BOILERPLATE CODE\n\n# suppress stdout, but keep stderr since that's what unittest uses\n# https://stackoverflow.com/questions/30715337\n\nfrom io import StringIO\nimport sys\n\nclass ReplaceStd(object):\n\t\"\"\" Let's make it pythonic. \"\"\"\n\n\tdef __init__(self):\n\t\tself.stdout = None\n\n\tdef __enter__(self):\n\t\tself.stdout = sys.stdout\n\t\tsys.stdout = StringIO()\n\n\tdef __exit__(self, type, value, traceback):\n\t\tsys.stdout.close()\n\t\tsys.stdout = self.stdout\n\nif __name__ == \"__main__\":\n\twith ReplaceStd():\n\t\tunittest.main(module=__name__, buffer=True, exit=False)\n","sub_path":"hw4-test.py","file_name":"hw4-test.py","file_ext":"py","file_size_in_byte":3139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"414558906","text":"bigram={}\nfile1 = open(\"G:/file1.txt\", \"r\", encoding='utf-8')\nfile2 = open(\"G:/file2.txt\", \"r\", encoding='utf-8')\nline1 = file1.readlines()\nline2= file1.readlines()\n#print(line1)\ndef gramma(line,i):\n for l in range(2,int(len(str(line)))-i-2):\n t='' \n for q in range(0,i):\n t=t+str(str(line)[l+q])\n\n if len(t)>1:\n\n if t in bigram:\n bigram[t]=bigram[t]+1;\n else:\n bigram[t]=1;\ngramma(line1,3)\n\nnew_d={}\nfor k in sorted(bigram, key=len, reverse=False):\n new_d[k] = bigram[k]\nprint(new_d)\ngramma(line2,5)\n","sub_path":"cp_4/shyshkin_fb-73_vitrovich_fb-73_cp4/4_1.py","file_name":"4_1.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"307789996","text":"from tkinter import *\nimport math\n# ---------------------------- CONSTANTS ------------------------------- #\nPINK = \"#e2979c\"\nRED = \"#e7305b\"\nGREEN = \"#9bdeac\"\nYELLOW = \"#fff47d\"\nFONT_NAME = \"Courier\"\nWORK = 25\nSHORT_BREAK_MIN = 5\nLONG_BREAK_MIN = 20\nreps=0\ntimer= None\n\n# ---------------------------- TIMER RESET ------------------------------- # \ndef reset_timer():\n window.after_cancel(timer)\n canvas.itemconfig(timer_text, text=\"00:00\")\n check_marks.config(text=\"\")\n timer_label.config(text=\"Timer\",fg=PINK)\n global reps\n reps=0\n\n# ---------------------------- TIMER MECHANISM ------------------------------- # \ndef start_timer():\n global reps\n reps+=1\n work_sec=WORK*60\n short_break_sec=SHORT_BREAK_MIN * 60\n long_break_sec=LONG_BREAK_MIN * 60\n\n if reps % 8 == 0:\n countdown(long_break_sec)\n timer_label.config(text=\"Break\",fg=RED)\n elif reps % 2 == 0:\n countdown(short_break_sec)\n timer_label.config(text=\"Break\", fg=PINK)\n else:\n countdown(work_sec)\n timer_label.config(text=\"Work\", fg=GREEN)\n\n\n\n# ---------------------------- COUNTDOWN MECHANISM ------------------------------- #\ndef countdown(count):\n count_min=math.floor(count / 60)\n count_sec=count % 60\n if count_sec <10:\n count_sec =f\"0{count_sec}\"\n canvas.itemconfig(timer_text,text=f\"{count_min}:{count_sec}\")\n if count > 0:\n global timer\n timer=window.after(1000, countdown, count-1)\n else:\n start_timer()\n marks=\"\"\n for i in range(math.floor(reps/2)):\n marks+=\"✔\"\n check_marks.config(text=marks)\n\n# ---------------------------- UI SETUP ------------------------------- #\nwindow=Tk()\nwindow.title(\"Pomodoro\")\nwindow.config(padx=100,pady=50,bg=YELLOW)\ncanvas=Canvas(width=200,height=224,bg=YELLOW,highlightthickness=0)\ntomato=PhotoImage(file=\"tomato.png\")\ncanvas.create_image(100,112,image=tomato)\ntimer_text=canvas.create_text(100,130,text=\"00:00\",fill=\"white\",font=(FONT_NAME,35,\"bold\"))\ncanvas.grid(column=1,row=1)\ntimer_label=Label(text=\"Timer\",bg=YELLOW,fg=GREEN,font=(FONT_NAME,50,\"normal\"))\ntimer_label.grid(column=1,row=0)\nstart_button=Button(text=\"Start\",highlightthickness=0,command=start_timer)\nstart_button.grid(column=0,row=2)\n\nreset_button=Button(text=\"Reset\",highlightthickness=0,command=reset_timer)\nreset_button.grid(column=2,row=2)\n\ncheck_marks=Label(text=\"\",font=(24),fg=GREEN,bg=YELLOW)\ncheck_marks.grid(column=1,row=3)\n\n\n\n\n\nwindow.mainloop()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"547787583","text":"import numpy as np\n\ndef insertion_sort(a):\n if len(a) == 1:\n return a\n\n result = np.copy(a)\n for i in range(1,len(a)):\n x = result[i]\n j = i - 1\n while j >= 0 and result[j] > x:\n result[j+1] = result[j]\n j-=1\n\n result[j+1] = x\n\n return result\n\n\na = np.random.random(10000)\nassert np.all(insertion_sort(a) == np.sort(a))","sub_path":"insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"608419927","text":"import unittest\nfrom multipageform import MultipageFormFactory, FileField\nfrom django import forms\nfrom django.core.files.uploadedfile import SimpleUploadedFile\n\n\nclass SimpleForm1(forms.Form):\n req_1 = forms.CharField(required=True)\n req_2 = forms.CharField(required=True)\n unreq_file = FileField(required=False)\n\n\nclass SimpleForm2(forms.Form):\n req_3 = forms.CharField(required=True)\n\n\nclass TestFileField_SimpleWorkflow(unittest.TestCase):\n def setUp(self):\n self.formCls = MultipageFormFactory(SimpleForm1)\n self.testFile = SimpleUploadedFile(\"/1/2/3/filename.txt\", \"Preved\\nMedved\")\n\n def test_Simple(self):\n form = self.formCls({\"req_1\": \"1\", \"req_2\": \"2\"})\n self.assertEqual(form.is_valid(), True)\n self.assertEqual(form.cleaned_data['unreq_file'], None)\n\n form = self.formCls({\"req_1\": \"1\", \"req_2\": \"2\"}, {'unreq_file': self.testFile})\n self.assertEqual(form.is_valid(), True)\n self.assertEqual(SimpleUploadedFile, type(form.cleaned_data['unreq_file']))\n\nimport StringIO\n\n\nclass TestFileField_2PagesWorkflow(unittest.TestCase):\n def setUp(self):\n self.formCls = MultipageFormFactory(SimpleForm1, SimpleForm2)\n self.testFile = SimpleUploadedFile(\"/1/2/3/filename.txt\", \"Preved\\nMedved\", \"image/jpeg\")\n # self.testFile.file = StringIO.StringIO(self.testFile.file)\n # raise Exception(self.testFile.__dict__)\n\n def test_Simple(self):\n form = self.formCls({\"req_1\": \"1\", \"req_2\": \"2\"}, {'unreq_file': self.testFile})\n self.assertEqual(form.is_valid(), False)\n\n step, form = form.current_step()\n self.assertEqual(step, 1)\n\n new_post = form.cleaned_data\n new_post['unreq_file'] = form.forms[0].fields['unreq_file'].saved_filename\n\n form = self.formCls(new_post)\n self.assertEqual(form.is_valid(), False)\n step, form = form.current_step()\n self.assertEqual(step, 1)\n\n new_post = form.cleaned_data\n new_post['unreq_file'] = form.forms[0].fields['unreq_file'].saved_filename\n\n new_post['req_3'] = 'req 3'\n\n assert new_post['unreq_file']\n\n form = self.formCls(new_post)\n\n self.assertEqual(form.is_valid(), True)\n\n data = form.cleaned_data\n assert data\n self.assertEqual(data['unreq_file']._name, \"filename.txt\")\n self.assertEqual(data['unreq_file'].content_type, \"image/jpeg\")\n","sub_path":"multipageform/tests/filefield.py","file_name":"filefield.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"338224276","text":"#!/usr/bin/env python3\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL']='3'\n\nimport tensorflow as tf\n\nfrom config import *\n\ndef clear_dir(d):\n if os.path.isdir(d):\n os.system('rm -r \"%s\"' % d)\n os.mkdir(d)\n\nclear_dir('images')\nclear_dir('models')\nclear_dir('logs')\n\nwith tf.device('/gpu:1'):\n import model_config\n model = model_config.get_model()\n\n import weight_visualizer\n\n import inputs\n flow_train = inputs.get_data()\n\n def vis_weight(epoch, logs):\n epoch += 1\n if epoch % 20 == 0:\n weight_visualizer.visualize(model, 'conv0_%04d.png' % epoch)\n if epoch % 500 == 0:\n fn = 'models/weight_%04d.h5' % epoch\n print('\\nSaving weights to: ' + fn)\n model.save_weights(fn)\n\n try:\n from keras.optimizers import SGD, Adam\n from keras.callbacks import LambdaCallback, TensorBoard\n if BASE is not None:\n print('Loading weights.')\n model.load_weights(BASE, by_name=True)\n\n model.compile(\n optimizer=Adam(),\n #optimizer=SGD(1e-4, decay=1e-3, momentum=0.9),\n loss='mse'\n )\n\n model.fit_generator(flow_train,\n MINI_EPOCH, EPOCH, verbose=True,\n callbacks=[LambdaCallback(on_epoch_end=vis_weight),\n TensorBoard()],\n validation_data=inputs.get_data(), validation_steps=16)\n\n except KeyboardInterrupt:\n print('Halted.')\n\n model.save_weights(SAVETO)\n print('Saved to '+SAVETO)\n\n","sub_path":"autoencoder-wta/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"586562890","text":"class Person:\n def __init__(self, name, birth_year, gender, father=None, mother=None):\n self.name = name\n self.birth_year = birth_year\n self.gender = gender\n if father:\n self.add_parent(father)\n if mother:\n self.add_parent(mother)\n self.kids = []\n\n def add_parent(self, parent):\n if isinstance(parent, Person):\n if parent.gender == 'F':\n self.mother = parent\n else:\n self.father = parent\n parent.kids.append(self)\n\n def children(self, gender='both'):\n if not gender == 'both':\n return list(filter(lambda person: person.gender == gender,\n self.kids))\n else:\n return self.kids\n\n def get_siblings_by_gender(self, gender):\n if hasattr(self, \"mother\"):\n if hasattr(self, \"father\"):\n siblings = list(set(self.mother.children(gender) +\n self.father.children(gender)))\n else:\n siblings = self.mother.children(gender)\n elif hasattr(self, \"father\"):\n siblings = self.father.children(gender)\n else:\n siblings = []\n\n return list(set(siblings) - {self})\n\n def get_brothers(self):\n return self.get_siblings_by_gender('M')\n\n def get_sisters(self):\n return self.get_siblings_by_gender('F')\n\n def is_direct_successor(self, other_person):\n return other_person in self.children()\n","sub_path":"task3/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"161496198","text":"import cv2\r\n\r\nvideo = cv2.VideoCapture(0)\r\nfaceCascade = cv2.CascadeClassifier(\"dataset/haarcascade_frontalface_default.xml\")\r\nsmileCascade = cv2.CascadeClassifier(\"dataset/haarcascade_smile.xml\")\r\n\r\nwhile True:\r\n success, img = video.read()\r\n grayImg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n faces = faceCascade.detectMultiScale(grayImg, 1.1, 4)\r\n count = 500\r\n keyPressed = cv2.waitKey(1)\r\n for x, y, w, h in faces:\r\n smiles = smileCascade.detectMultiScale(grayImg, 1.8, 15)\r\n for x, y, w, h in smiles:\r\n print(\"Image \" + str(count) + \"Saved\")\r\n path = 'SavedImages\\\\' + str(count) + '.jpg'\r\n cv2.imwrite(path, img)\r\n count += 1\r\n if count >= 503:\r\n break\r\n\r\n cv2.imshow('live video', img)\r\n if keyPressed & 0xFF == ord('q'):\r\n break\r\n\r\nvideo.release()\r\ncv2.destroyAllWindows()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"28128029","text":"n = int(input())\nA=[]*n\nA = list(map(int, input().split()))\n\ndef findmax(A):\n s=[0]*len(A)\n s[0]=A[0]\n for i in range(1,len(A)):\n s[i]=max(s[i-1]+A[i],A[i])\n return max(s)\nprint(findmax(A))","sub_path":"자료구조_알고리즘설계해석/최대 구간 합 계산.py","file_name":"최대 구간 합 계산.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"19698531","text":"import numpy as np\nimport gym\n\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Activation, Flatten, Input, merge\nfrom keras.layers.advanced_activations import LeakyReLU, ELU\nfrom keras.layers.noise import GaussianNoise\nfrom keras.optimizers import RMSprop, Adam, Nadam\n\nfrom rl.agents import ContinuousDQNAgent\nfrom rl.memory import SequentialMemory\nfrom rl.random import OrnsteinUhlenbeckProcess\nfrom rl.core import Processor\n\nfrom rl.callbacks import FileLogger, ModelIntervalCheckpoint\n\nfrom osim.env import *\n\nimport argparse\nimport math\n\n\nclass PendulumProcessor(Processor):\n def process_reward(self, reward):\n # The magnitude of the reward can be important. Since each step yields a relatively\n # high reward, we reduce the magnitude by two orders.\n return reward\n\n# Command line parameters\nparser = argparse.ArgumentParser(description='Train or test deep neural net motor controller')\nparser.add_argument('--train', dest='train', action='store_true', default=True)\nparser.add_argument('--test', dest='train', action='store_false', default=True)\nparser.add_argument('--steps', dest='steps', action='store', default=500000)\nparser.add_argument('--visualize', dest='visualize', action='store_true', default=False)\nparser.add_argument('--start_weights', dest='start_weights', action='store', default=\"best/ddpg_elu_rew2_best_actor.h5f\")\nparser.add_argument('--model', dest='model', action='store', default=\"CDQN/cdqn_Gait_from_ddpg.h5f\")\nparser.add_argument('--sigma', dest='sigma', action='store', default=0.25)\nparser.add_argument('--theta', dest='theta', action='store', default=0.15)\nparser.add_argument('--gamma', dest='gamma', action='store', default=0.99)\nparser.add_argument('--rseed', dest='rseed', action='store', default=53, type=int)\nargs = parser.parse_args()\n\nENV_NAME = 'Pendulum-v0'\ngym.undo_logger_setup()\n\n\n# Get the environment and extract the number of actions.\nenv = GaitEnv(args.visualize)\n#env = gym.make(ENV_NAME)\nprint (\"Random seed: %i\\n\", args.rseed)\nnp.random.seed(args.rseed)\nrandom.seed(args.rseed)\nenv.seed(args.rseed)\n\nassert len(env.action_space.shape) == 1\nnb_actions = env.action_space.shape[0]\n\n# Total number of steps in training\nnallsteps = args.steps\n\ninit = 'lecun_uniform'\n \n# Build all necessary models: V, mu, and L networks.\nV_model = Sequential()\nV_model.add(Flatten(input_shape=(1,) + env.observation_space.shape))\nV_model.add(GaussianNoise(0.01)) # add to the command line!\nV_model.add(Dense(32, init = init))\nV_model.add(ELU())\nV_model.add(Dense(32, init = init))\nV_model.add(ELU())\nV_model.add(Dense(32, init = init))\nV_model.add(ELU())\nV_model.add(Dense(1))\nV_model.add(Activation('linear'))\nprint(V_model.summary())\n\nmu_model = Sequential()\nmu_model.add(Flatten(input_shape=(1,) + env.observation_space.shape))\nmu_model.add(GaussianNoise(0.01)) # add to the command line!\nmu_model.add(Dense(32, init = init))\nmu_model.add(ELU())\nmu_model.add(Dense(32, init = init))\nmu_model.add(ELU())\nmu_model.add(Dense(32, init = init))\nmu_model.add(ELU())\nmu_model.add(Dense(nb_actions, init = init))\nmu_model.add(GaussianNoise(0.01))\nmu_model.add(Activation('sigmoid'))\nprint(mu_model.summary())\n\naction_input = Input(shape=(nb_actions,), name='action_input')\nobservation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input')\nx = merge([action_input, Flatten()(observation_input)], mode='concat')\nx = GaussianNoise(0.01)(x)\nx = Dense(64, init = init)(x)\nx = ELU()(x)\nx = Dense(64, init = init)(x)\nx = ELU()(x)\nx = Dense(64, init = init)(x)\nx = ELU()(x)\nx = Dense(((nb_actions * nb_actions + nb_actions) / 2))(x)\nx = Activation('sigmoid')(x)\nL_model = Model(input=[action_input, observation_input], output=x)\nprint(L_model.summary())\n\n# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and\n# even the metrics!\nprocessor = PendulumProcessor()\nmemory = SequentialMemory(limit=100000, window_length=1)\nrandom_process = OrnsteinUhlenbeckProcess(theta=float(args.theta), mu=0., sigma=float(args.sigma), size=nb_actions)\n\n#mu_model.load_weights('best/ddpg_elu_rew2_best_actor.h5f')\nagent = ContinuousDQNAgent(nb_actions=nb_actions, V_model=V_model, L_model=L_model, mu_model=mu_model,\n memory=memory, nb_steps_warmup=100, random_process=random_process,\n gamma=float(args.gamma), target_model_update=1e-3, delta_clip=2., \n processor=processor)\nagent.compile(Nadam(lr=.001, clipnorm=2.), metrics=['mae'])\n\nif args.train:\n# agent.load_weights(args.start_weights)\n checkpoint_weights_filename = 'CDQN_train/cdqn_Gait_from_ddpg_{step}.h5f'\n# log_filename = 'CDQN/cdqn_from_ddpg_{}.json'.format('Gait')\n log_filename = 'CDQN/cdqn_{}_from_ddpg.json'.format('Gait')\n callbacks = [ModelIntervalCheckpoint(checkpoint_weights_filename, interval=10000)]\n callbacks += [FileLogger(log_filename, interval=10000)]\n agent.fit(env, callbacks=callbacks, nb_steps=nallsteps, visualize=False, verbose=1, nb_max_episode_steps=1000)\n agent.save_weights(args.model, overwrite=True)\n\nif not args.train:\n agent.load_weights(args.model)\n agent.test(env, nb_episodes=3, visualize=False, nb_max_episode_steps=500) \n","sub_path":"scripts/cdqn.py","file_name":"cdqn.py","file_ext":"py","file_size_in_byte":5264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"397941145","text":"from django.http import HttpResponse\nfrom django.conf import settings\n\nclass Surf(object):\n def __init__(self, request):\n self.session= request.session\n cart= self.session.get(settings.SURF_SESSION_ID)\n if not cart:\n cart=self.session[settings.SURF_SESSION_ID]= {'page_counter':0}\n self.cart = cart\n\n def add(self):\n n=self.cart['page_counter']+1\n self.cart['page_counter']=n\n self.save()\n\n def save(self):\n self.session[settings.SURF_SESSION_ID] = self.cart\n self.session.modified = True\n\n def check(self):\n counter=self.cart['page_counter']\n print(counter)\n if int(counter) > 5:\n return False\n else:\n return True\n\n def clear(self):\n self.cart['page_counter']=0\n self.save()\n\n\n\n","sub_path":"apply/surfing.py","file_name":"surfing.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"}
+{"seq_id":"638049672","text":"from django.urls import path\nfrom .views import *\n\napp_name=\"main\"\nurlpatterns = [\n path('', showmain, name=\"showmain\"), \n path('faq/',faq,name = 'faq'),\n path('contact/',contact,name='contact'),\n path('academic/', academic, name=\"academic\"),\n path('volunteer/', volunteer, name=\"volunteer\"),\n path('research/', research, name=\"research\"),\n path('art/', art, name=\"art\"),\n path('fresh/', fresh, name=\"fresh\"),\n path('performance/', performance, name=\"performance\"),\n path('atheletic/', atheletic, name=\"atheletic\"),\n path('social/',social,name='social'),\n\n # likelion\n path('likelion/',likelion,name='likelion'),\n path('likelion/',likelion_detail,name='likelion_detail'),\n path('likelion/new/',likelion_new,name='likelion_new'),\n path('likelion/create/',likelion_create,name='likelion_create'),\n path('likelion/edit/',likelion_edit,name='likelion_edit'),\n path('likelion/update/',likelion_update,name='likelion_update'),\n path('likelion/delete/',likelion_delete,name='likelion_delete'),\n \n # cafein\n path('cafein/',cafein,name='cafein'),\n path('cafein/',cafein_detail,name='cafein_detail'),\n path('cafein/new/',cafein_new,name='cafein_new'),\n path('cafein/create/',cafein_create,name='cafein_create'),\n path('cafein/edit/',cafein_edit,name='cafein_edit'),\n path('cafein/update/',cafein_update,name='cafein_update'),\n path('cafein/delete/',cafein_delete,name='cafein_delete'),\n\n # dna\n path('dna/',dna,name='dna'),\n path('dna/',dna_detail,name='dna_detail'),\n path('dna/new/',dna_new,name='dna_new'),\n path('dna/create/',dna_create,name='dna_create'),\n path('dna/edit/',dna_edit,name='dna_edit'),\n path('dna/update/',dna_update,name='dna_update'),\n path('dna/delete/',dna_delete,name='dna_delete'),\n\n # dussa\n path('dussa/',dussa,name='dussa'),\n path('dussa/',dussa_detail,name='dussa_detail'),\n path('dussa/new/',dussa_new,name='dussa_new'),\n path('dussa/create/',dussa_create,name='dussa_create'),\n path('dussa/edit/',dussa_edit,name='dussa_edit'),\n path('dussa/update/',dussa_update,name='dussa_update'),\n path('dussa/delete/',dussa_delete,name='dussa_delete'),\n\n # kcc\n path('kcc/',kcc,name='kcc'),\n path('kcc/',kcc_detail,name='kcc_detail'),\n path('kcc/new/',kcc_new,name='kcc_new'),\n path('kcc/create/',kcc_create,name='kcc_create'),\n path('kcc/edit/',kcc_edit,name='kcc_edit'),\n path('kcc/update/',kcc_update,name='kcc_update'),\n path('kcc/delete/',kcc_delete,name='kcc_delete'),\n\n # mecs\n path('mecs/',mecs,name='mecs'),\n path('mecs/',mecs_detail,name='mecs_detail'),\n path('mecs/new/',mecs_new,name='mecs_new'),\n path('mecs/create/',mecs_create,name='mecs_create'),\n path('mecs/edit/',mecs_edit,name='mecs_edit'),\n path('mecs/update/',mecs_update,name='mecs_update'),\n path('mecs/delete/',mecs_delete,name='mecs_delete'),\n\n # nsa\n path('nsa/',nsa,name='nsa'),\n path('nsa/',nsa_detail,name='nsa_detail'),\n path('nsa/new/',nsa_new,name='nsa_new'),\n path('nsa/create/',nsa_create,name='nsa_create'),\n path('nsa/edit/',nsa_edit,name='nsa_edit'),\n path('nsa/update/',nsa_update,name='nsa_update'),\n path('nsa/delete/',nsa_delete,name='nsa_delete'),\n\n # marx\n path('marx/',marx,name='marx'),\n path('marx/',marx_detail,name='marx_detail'),\n path('marx/new/',marx_new,name='marx_new'),\n path('marx/create/',marx_create,name='marx_create'),\n path('marx/edit/',marx_edit,name='marx_edit'),\n path('marx/update/',marx_update,name='marx_update'),\n path('marx/delete/',marx_delete,name='marx_delete'),\n\n # management\n path('management/',management,name='management'),\n path('management/',management_detail,name='management_detail'),\n path('management/new/',management_new,name='management_new'),\n path('management/create/',management_create,name='management_create'),\n path('management/edit/',management_edit,name='management_edit'),\n path('management/update/',management_update,name='management_update'),\n path('management/delete/',management_delete,name='management_delete'),\n\n # economy\n path('economy/',economy,name='economy'),\n path('economy/',economy_detail,name='economy_detail'),\n path('economy/new/',economy_new,name='economy_new'),\n path('economy/create/',economy_create,name='economy_create'),\n path('economy/edit/',economy_edit,name='economy_edit'),\n path('economy/update/',economy_update,name='economy_update'),\n path('economy/delete/',economy_delete,name='economy_delete'),\n\n # international\n path('international/',international,name='international'),\n path('international/',international_detail,name='international_detail'),\n path('international/new/',international_new,name='international_new'),\n path('international/create/',international_create,name='international_create'),\n path('international/edit/',international_edit,name='international_edit'),\n path('international/update/',international_update,name='international_update'),\n path('international/delete/',international_delete,name='international_delete'),\n\n # politics\n path('politics/',politics,name='politics'),\n path('politics/',politics_detail,name='politics_detail'),\n path('politics/new/',politics_new,name='politics_new'),\n path('politics/create/',politics_create,name='politics_create'),\n path('politics/edit/',politics_edit,name='politics_edit'),\n path('politics/update/',politics_update,name='politics_update'),\n path('politics/delete/',politics_delete,name='politics_delete'),\n\n path('elf/',elf,name='elf'),\n path('rcy/',rcy,name='rcy'),\n path('road/',road,name='road'),\n path('hand/',hand,name='hand'),\n path('neighbor/',neighbor,name='neighbor'),\n path('painters/',painters,name='painters'),\n path('green/',green,name='green'),\n path('korean/',korean,name='korean'),\n\n # kusa\n path('kusa/',kusa,name='kusa'),\n path('kusa/',kusa_detail,name='kusa_detail'),\n path('kusa/new/',kusa_new,name='kusa_new'),\n path('kusa/create/',kusa_create,name='kusa_create'),\n path('kusa/edit/',kusa_edit,name='kusa_edit'),\n path('kusa/update/',kusa_update,name='kusa_update'),\n path('kusa/delete/',kusa_delete,name='kusa_delete'),\n\n # rich\n path('rich/',rich,name='rich'),\n path('rich/',rich_detail,name='rich_detail'),\n path('rich/new/',rich_new,name='rich_new'),\n path('rich/create/',rich_create,name='rich_create'),\n path('rich/edit/',rich_edit,name='rich_edit'),\n path('rich/update/',rich_update,name='rich_update'),\n path('rich/delete/',rich_delete,name='rich_delete'),\n\n # unsa\n path('unsa/',unsa,name='unsa'),\n path('unsa/',unsa_detail,name='unsa_detail'),\n path('unsa/new/',unsa_new,name='unsa_new'),\n path('unsa/create/',unsa_create,name='unsa_create'),\n path('unsa/edit/',unsa_edit,name='unsa_edit'),\n path('unsa/update/',unsa_update,name='unsa_update'),\n path('unsa/delete/',unsa_delete,name='unsa_delete'),\n\n # frontier\n path('froniter/',frontier,name='frontier'),\n path('frontier/',frontier_detail,name='frontier_detail'),\n path('frontier/new/',frontier_new,name='frontier_new'),\n path('frontier/create/',frontier_create,name='frontier_create'),\n path('frontier/edit/',frontier_edit,name='frontier_edit'),\n path('frontier/update/',frontier_update,name='frontier_update'),\n path('frontier/delete/',frontier_delete,name='frontier_delete'),\n\n # buddha\n path('buddha/',buddha,name='buddha'),\n path('buddha/',buddha_detail,name='buddha_detail'),\n path('buddha/new/',buddha_new,name='buddha_new'),\n path('buddha/create/',buddha_create,name='buddha_create'),\n path('buddha/edit/',buddha_edit,name='buddha_edit'),\n path('buddha/update/',buddha_update,name='buddha_update'),\n path('buddha/delete/',buddha_delete,name='buddha_delete'),\n\n #ajax\n path('ajax/',ajax,name='ajax'),\n path('ajax/',ajax_detail,name='ajax_detail'),\n path('ajax/new/',ajax_new,name='ajax_new'),\n path('ajax/create/',ajax_create,name='ajax_create'),\n path('ajax/edit/',ajax_edit,name='ajax_edit'),\n path('ajax/update/',ajax_update,name='ajax_update'),\n path('ajax/delete/',ajax_delete,name='ajax_delete'),\n\n\n #hola\n path('hola/',hola,name='hola'),\n path('hola/',hola_detail,name='hola_detail'),\n path('hola/new/',hola_new,name='hola_new'),\n path('hola/create/',hola_create,name='hola_create'),\n path('hola/edit/',hola_edit,name='hola_edit'),\n path('hola/update/',hola_update,name='hola_update'),\n path('hola/delete/',hola_delete,name='hola_delete'),\n\n #odc\n path('odc/',odc,name='odc'),\n path('odc/',odc_detail,name='odc_detail'),\n path('odc/new/',odc_new,name='odc_new'),\n path('odc/create/',odc_create,name='odc_create'),\n path('odc/edit/',odc_edit,name='odc_edit'),\n path('odc/update/',odc_update,name='odc_update'),\n path('odc/delete/',odc_delete,name='odc_delete'),\n\n #opus\n path('opus/',opus,name='opus'),\n path('opus/',opus_detail,name='opus_detail'),\n path('opus/new/',opus_new,name='opus_new'),\n path('opus/create/',opus_create,name='opus_create'),\n path('opus/edit/',opus_edit,name='opus_edit'),\n path('opus/update/',opus_update,name='opus_update'),\n path('opus/delete/',opus_delete,name='opus_delete'),\n\n #drama\n path('drama/',drama,name='drama'),\n path('drama/',drama_detail,name='drama_detail'),\n path('drama/new/',drama_new,name='drama_new'),\n path('drama/create/',drama_create,name='drama_create'),\n path('drama/edit/',drama_edit,name='drama_edit'),\n path('drama/update/',drama_update,name='drama_update'),\n path('drama/delete/',drama_delete,name='drama_delete'),\n\n #lotus\n path('lotus/',lotus,name='lotus'),\n path('lotus/',lotus_detail,name='lotus_detail'),\n path('lotus/new/',lotus_new,name='lotus_new'),\n path('lotus/create/',lotus_create,name='lotus_create'),\n path('lotus/edit/',lotus_edit,name='lotus_edit'),\n path('lotus/update/',lotus_update,name='lotus_update'),\n path('lotus/delete/',lotus_delete,name='lotus_delete'),\n\n #cloud\n path('cloud/',cloud,name='cloud'),\n path('cloud/',cloud_detail,name='cloud_detail'),\n path('cloud/new/',cloud_new,name='cloud_new'),\n path('cloud/create/',cloud_create,name='cloud_create'),\n path('cloud/edit/',cloud_edit,name='cloud_edit'),\n path('cloud/update/',cloud_update,name='cloud_update'),\n path('cloud/delete/',cloud_delete,name='cloud_delete'),\n\n #arirang\n path('arirang/',arirang,name='arirang'),\n path('arirang/',arirang_detail,name='arirang_detail'),\n path('arirang/new/',arirang_new,name='arirang_new'),\n path('arirang/create/',arirang_create,name='arirang_create'),\n path('arirang/edit/',arirang_edit,name='arirang_edit'),\n path('arirang/update/',arirang_update,name='arirang_update'),\n path('arirang/delete/',arirang_delete,name='arirang_delete'),\n\n #eumsem\n path('eumsem/',eumsem,name='eumsem'),\n path('eumsem/',eumsem_detail,name='eumsem_detail'),\n path('eumsem/new/',eumsem_new,name='eumsem_new'),\n path('eumsem/create/',eumsem_create,name='eumsem_create'),\n path('eumsem/edit/',eumsem_edit,name='eumsem_edit'),\n path('eumsem/update/',eumsem_update,name='eumsem_update'),\n path('eumsem/delete/',eumsem_delete,name='eumsem_delete'),\n\n #fearless\n path('fearless/',fearless,name='fearless'),\n path('fearless/',fearless_detail,name='fearless_detail'),\n path('fearless/new/',fearless_new,name='fearless_new'),\n path('fearless/create/',fearless_create,name='fearless_create'),\n path('fearless/edit/',fearless_edit,name='fearless_edit'),\n path('fearless/update/',fearless_update,name='fearless_update'),\n path('fearless/delete/',fearless_delete,name='fearless_delete'),\n\n #yeoul\n path('yeoul/',yeoul,name='yeoul'),\n path('yeoul/',yeoul_detail,name='yeoul_detail'),\n path('yeoul/new/',yeoul_new,name='yeoul_new'),\n path('yeoul/create/',yeoul_create,name='yeoul_create'),\n path('yeoul/edit/',yeoul_edit,name='yeoul_edit'),\n path('yeoul/update/',yeoul_update,name='yeoul_update'),\n path('yeoul/delete/',yeoul_delete,name='yeoul_delete'),\n\n # elephente\n path('elephente/',elephente,name='elephente'),\n path('elephente/',elephente_detail,name='elephente_detail'),\n path('elephente/new/',elephente_new,name='elephente_new'),\n path('elephente/create/',elephente_create,name='elephente_create'),\n path('elephente/edit/',elephente_edit,name='elephente_edit'),\n path('elephente/update/',elephente_update,name='elephente_update'),\n path('elephente/delete/',elephente_delete,name='elephente_delete'),\n\n # doomchit\n path('doomchit/',doomchit,name='doomchit'),\n path('doomchit/',doomchit_detail,name='doomchit_detail'),\n path('doomchit/new/',doomchit_new,name='doomchit_new'),\n path('doomchit/create/',doomchit_create,name='doomchit_create'),\n path('doomchit/edit/',doomchit_edit,name='doomchit_edit'),\n path('doomchit/update/',doomchit_update,name='doomchit_update'),\n path('doomchit/delete/',doomchit_delete,name='doomchit_delete'),\n\n # enactus\n path('enactus/',enactus,name='enactus'),\n path('enactus/',enactus_detail,name='enactus_detail'),\n path('enactus/new/',enactus_new,name='enactus_new'),\n path('enactus/create/',enactus_create,name='enactus_create'),\n path('enactus/edit/',enactus_edit,name='enactus_edit'),\n path('enactus/update/',enactus_update,name='enactus_update'),\n path('enactus/delete/',enactus_delete,name='enactus_delete'),\n\n # jam\n path('jam/',jam,name='jam'),\n path('jam/',jam_detail,name='jam_detail'),\n path('jam/new/',jam_new,name='jam_new'),\n path('jam/create/',jam_create,name='jam_create'),\n path('jam/edit/',jam_edit,name='jam_edit'),\n path('jam/update/',jam_update,name='jam_update'),\n path('jam/delete/',jam_delete,name='jam_delete'),\n \n # qud\n path('qud/',qud,name='qud'),\n path('qud/',qud_detail,name='qud_detail'),\n path('qud/new/',qud_new,name='qud_new'),\n path('qud/create/',qud_create,name='qud_create'),\n path('qud/edit/',qud_edit,name='qud_edit'),\n path('qud/update/',qud_update,name='qud_update'),\n path('qud/delete/',qud_delete,name='qud_delete'),\n\n # elf\n path('elf/',elf,name='elf'),\n path('elf/',elf_detail,name='elf_detail'),\n path('elf/new/',elf_new,name='elf_new'),\n path('elf/create/',elf_create,name='elf_create'),\n path('elf/edit/',elf_edit,name='elf_edit'),\n path('elf/update/',elf_update,name='elf_update'),\n path('elf/delete/',elf_delete,name='elf_delete'),\n \n # rcy\n path('rcy/',rcy,name='rcy'),\n path('rcy/',rcy_detail,name='rcy_detail'),\n path('rcy/new/',rcy_new,name='rcy_new'),\n path('rcy/create/',rcy_create,name='rcy_create'),\n path('rcy/edit/',rcy_edit,name='rcy_edit'),\n path('rcy/update/',rcy_update,name='rcy_update'),\n path('rcy/delete/',rcy_delete,name='rcy_delete'),\n\n # road\n path('road/',road,name='road'),\n path('road/',road_detail,name='road_detail'),\n path('road/new/',road_new,name='road_new'),\n path('road/create/',road_create,name='road_create'),\n path('road/edit/',road_edit,name='road_edit'),\n path('road/update/',road_update,name='road_update'),\n path('road/delete/',road_delete,name='road_delete'),\n\n # hand\n path('hand/',hand,name='hand'),\n path('hand/',hand_detail,name='hand_detail'),\n path('hand/new/',hand_new,name='hand_new'),\n path('hand/create/',hand_create,name='hand_create'),\n path('hand/edit/',hand_edit,name='hand_edit'),\n path('hand/update/',hand_update,name='hand_update'),\n path('hand/delete/',hand_delete,name='hand_delete'),\n\n # neighbor\n path('neighbor/',neighbor,name='neighbor'),\n path('neighbor/',neighbor_detail,name='neighbor_detail'),\n path('neighbor/new/',neighbor_new,name='neighbor_new'),\n path('neighbor/create/',neighbor_create,name='neighbor_create'),\n path('neighbor/edit/',neighbor_edit,name='neighbor_edit'),\n path('neighbor/update/',neighbor_update,name='neighbor_update'),\n path('neighbor/delete/',neighbor_delete,name='neighbor_delete'),\n\n # painters\n path('painters/',painters,name='painters'),\n path('painters/',painters_detail,name='painters_detail'),\n path('painters/new/',painters_new,name='painters_new'),\n path('painters/create/',painters_create,name='painters_create'),\n path('painters/edit/',painters_edit,name='painters_edit'),\n path('painters/update/',painters_update,name='painters_update'),\n path('painters/delete/',painters_delete,name='painters_delete'),\n\n # green\n path('green/',green,name='green'),\n path('green/',green_detail,name='green_detail'),\n path('green/new/',green_new,name='green_new'),\n path('green/create/',green_create,name='green_create'),\n path('green/edit/',green_edit,name='green_edit'),\n path('green/update/',green_update,name='green_update'),\n path('green/delete/',green_delete,name='green_delete'),\n\n # korean\n path('korean/',korean,name='korean'),\n path('korean/',korean_detail,name='korean_detail'),\n path('korean/new/',korean_new,name='korean_new'),\n path('korean/create/',korean_create,name='korean_create'),\n path('korean/edit/',korean_edit,name='korean_edit'),\n path('korean/update/',korean_update,name='korean_update'),\n path('korean/delete/',korean_delete,name='korean_delete'),\n\n # draw\n path('draw/',draw,name='draw'),\n path('draw/',draw_detail,name='draw_detail'),\n path('draw/new/',draw_new,name='draw_new'),\n path('draw/create/',draw_create,name='draw_create'),\n path('draw/edit/',draw_edit,name='draw_edit'),\n path('draw/update/',draw_update,name='draw_update'),\n path('draw/delete/',draw_delete,name='draw_delete'),\n\n # literal\n path('literal/',literal,name='literal'),\n path('literal/',literal_detail,name='literal_detail'),\n path('literal/new/',literal_new,name='literal_new'),\n path('literal/create/',literal_create,name='literal_create'),\n path('literal/edit/',literal_edit,name='literal_edit'),\n path('literal/update/',literal_update,name='literal_update'),\n path('literal/delete/',literal_delete,name='literal_delete'),\n\n # calligraphy\n path('calligraphy/',calligraphy,name='calligraphy'),\n path('calligraphy/',calligraphy_detail,name='calligraphy_detail'),\n path('calligraphy/new/',calligraphy_new,name='calligraphy_new'),\n path('calligraphy/create/',calligraphy_create,name='calligraphy_create'),\n path('calligraphy/edit/',calligraphy_edit,name='calligraphy_edit'),\n path('calligraphy/update/',calligraphy_update,name='calligraphy_update'),\n path('calligraphy/delete/',calligraphy_delete,name='calligraphy_delete'),\n\n # circle\n path('circle/',circle,name='circle'),\n path('circle/',circle_detail,name='circle_detail'),\n path('circle/new/',circle_new,name='circle_new'),\n path('circle/create/',circle_create,name='circle_create'),\n path('circle/edit/',circle_edit,name='circle_edit'),\n path('circle/update/',circle_update,name='circle_update'),\n path('circle/delete/',circle_delete,name='circle_delete'),\n\n # stone\n path('stone/',stone,name='stone'),\n path('stone/',stone_detail,name='stone_detail'),\n path('stone/new/',stone_new,name='stone_new'),\n path('stone/create/',stone_create,name='stone_create'),\n path('stone/edit/',stone_edit,name='stone_edit'),\n path('stone/update/',stone_update,name='stone_update'),\n path('stone/delete/',stone_delete,name='stone_delete'),\n\n # cartoon\n path('cartoon/',cartoon,name='cartoon'),\n path('cartoon/',cartoon_detail,name='cartoon_detail'),\n path('cartoon/new/',cartoon_new,name='cartoon_new'),\n path('cartoon/create/',cartoon_create,name='cartoon_create'),\n path('cartoon/edit/',cartoon_edit,name='cartoon_edit'),\n path('cartoon/update/',cartoon_update,name='cartoon_update'),\n path('cartoon/delete/',cartoon_delete,name='cartoon_delete'),\n\n # rush\n path('rush/',rush,name='rush'),\n path('rush/',rush_detail,name='rush_detail'),\n path('rush/new/',rush_new,name='rush_new'),\n path('rush/create/',rush_create,name='rush_create'),\n path('rush/edit/',rush_edit,name='rush_edit'),\n path('rush/update/',rush_update,name='rush_update'),\n path('rush/delete/',rush_delete,name='rush_delete'),\n\n # dust\n path('dust/',dust,name='dust'),\n path('dust/',dust_detail,name='dust_detail'),\n path('dust/new/',dust_new,name='dust_new'),\n path('dust/create/',dust_create,name='dust_create'),\n path('dust/edit/',dust_edit,name='dust_edit'),\n path('dust/update/',dust_update,name='dust_update'),\n path('dust/delete/',dust_delete,name='dust_delete'),\n\n # cave\n path('cave/',cave,name='cave'),\n path('cave/',cave_detail,name='cave_detail'),\n path('cave/new/',cave_new,name='cave_new'),\n path('cave/create/',cave_create,name='cave_create'),\n path('cave/edit/',cave_edit,name='cave_edit'),\n path('cave/update/',cave_update,name='cave_update'),\n path('cave/delete/',cave_delete,name='cave_delete'),\n\n # action\n path('action/',action,name='action'),\n path('action/',action_detail,name='action_detail'),\n path('action/new/',action_new,name='action_new'),\n path('action/create/',action_create,name='action_create'),\n path('action/edit/',action_edit,name='action_edit'),\n path('action/update/',action_update,name='action_update'),\n path('action/delete/',action_delete,name='action_delete'),\n\n # wind\n path('wind/',wind,name='wind'),\n path('wind/',wind_detail,name='wind_detail'),\n path('wind/new/',wind_new,name='wind_new'),\n path('wind/create/',wind_create,name='wind_create'),\n path('wind/edit/',wind_edit,name='wind_edit'),\n path('wind/update/',wind_update,name='wind_update'),\n path('wind/delete/',wind_delete,name='wind_delete'),\n\n # mountain\n path('mountain/',mountain,name='mountain'),\n path('mountain/',mountain_detail,name='mountain_detail'),\n path('mountain/new/',mountain_new,name='mountain_new'),\n path('mountain/create/',mountain_create,name='mountain_create'),\n path('mountain/edit/',mountain_edit,name='mountain_edit'),\n path('mountain/update/',mountain_update,name='mountain_update'),\n path('mountain/delete/',mountain_delete,name='mountain_delete'),\n\n # water\n path('water/',water,name='water'),\n path('water/',water_detail,name='water_detail'),\n path('water/new/',water_new,name='water_new'),\n path('water/create/',water_create,name='water_create'),\n path('water/edit/',water_edit,name='water_edit'),\n path('water/update/',water_update,name='water_update'),\n path('water/delete/',water_delete,name='water_delete'),\n\n # courtist\n path('courtist/',courtist,name='courtist'),\n path('courtist/',courtist_detail,name='courtist_detail'),\n path('courtist/new/',courtist_new,name='courtist_new'),\n path('courtist/create/',courtist_create,name='courtist_create'),\n path('courtist/edit/